source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
server.py | import uvicorn
from fastapi import FastAPI
from pydantic import BaseModel
import os
import logging
import json
import time
from threading import Thread
from multiprocessing import Process, Pool
from functools import partial
import boto3
import botocore
from botocore.config import Config
import sys
import cache
s3client = None
class LoadMessage(BaseModel):
file_type: str
file_path: str
file_name: list = []
class LoadRequest(BaseModel):
message: LoadMessage = None
app = FastAPI()
# Mandatory variables in envirnment
MANDATORY_ENV_VARS = {
'AWS_REGION': 'ap-northeast-1',
'LOCAL_DATA_FOLDER': '/tmp/rs-data/',
'S3_BUCKET_DATA': 'aws-gcr-rs-sol-demo-ap-southeast-1-522244679887',
"RECORDS_PATH": 'news-open/system/item-data/meta-data/',
'REDIS_HOST': 'localhost',
'REDIS_PORT': 6379,
'LOADER_PORT': 5000
}
item_records = 'item_records_dict'
action_model = 'model.tar.gz'
def xasync(f):
def wrapper(*args, **kwargs):
thr = Thread(target=f, args=args, kwargs=kwargs)
thr.start()
return wrapper
@app.get('/loader/status', tags=["monitoring"])
def status():
logging.info('Collecting status information from server & plugin...')
return {
'env': MANDATORY_ENV_VARS,
'redis': rCache.connection_status()
}
@app.get('/ping', tags=["monitoring"])
def ping():
logging.info('Processing default request...')
return {'result': 'ping'}
@app.post('/loader/notice', tags=["loader-service"])
def notice(loadRequest: LoadRequest):
logging.info('Start loader->process()...')
loader_message = loadRequest.message
file_type = loader_message.file_type
file_path = loader_message.file_path
file_list = loader_message.file_name
logging.info('file type:{}, file_path:{}, file_list:{}'.format(
file_type, file_path, file_list))
if not os.path.exists(MANDATORY_ENV_VARS['LOCAL_DATA_FOLDER']):
logging.info("the local path {} is not existed".format(MANDATORY_ENV_VARS['LOCAL_DATA_FOLDER']))
os.mkdir(MANDATORY_ENV_VARS['LOCAL_DATA_FOLDER'])
if file_type == 'inverted-list':
for file in file_list:
init_single_pickle_data(file_path, file)
else:
for file in file_list:
if file_type == 'action-model':
init_data_file(file_path, file)
elif file_type == 'vector-index':
init_data_file(file_path, file)
elif file_type == 'embedding':
init_data_file(file_path, file)
elif file_type == 'ps-result':
init_data_file(file_path, file)
elif file_type == 'ps-recommend-list':
init_data_file(file_path, file)
elif file_type == 'ps-sims-dict':
init_data_file(file_path, file)
time.sleep(10)
notice_service_to_reload(
file_type, MANDATORY_ENV_VARS['LOCAL_DATA_FOLDER'], file_list)
return json.dumps({'result': 'success'}), 200, {'ContentType': 'application/json'}
def init_single_pickle_data(path, file):
download_file_from_s3(MANDATORY_ENV_VARS['S3_BUCKET_DATA'], path, file, MANDATORY_ENV_VARS['LOCAL_DATA_FOLDER'])
def init_data_file(path, file):
download_file_from_s3(MANDATORY_ENV_VARS['S3_BUCKET_DATA'], path, file, MANDATORY_ENV_VARS['LOCAL_DATA_FOLDER'])
def download_file_from_s3(bucket, path, file, dest_folder):
logging.info('Download file - %s from s3://%s/%s ... ', file, bucket, path)
# Using default session
s3_boto_config = Config(
region_name = MANDATORY_ENV_VARS['AWS_REGION']
)
s3client = boto3.client('s3', config=s3_boto_config)
try:
s3client.download_file(bucket, path+file, dest_folder+file)
except botocore.exceptions.ClientError as error:
raise error
except botocore.exceptions.ParamValidationError as error:
raise ValueError(
'The parameters you provided are incorrect: {}'.format(error))
logging.info(
'Download file - %s from s3://%s/%s ... was success', file, bucket, path)
return dest_folder+file
def notice_service_to_reload(type, file_path, file_list):
logging.info('type=%s, file_path=%s, file_list=%s',
type, file_path, file_list)
data = {
'file_type': type,
'file_path': file_path,
'file_list': str(file_list)
}
rCache.load_data_into_stream(type, data)
def init():
# Check out environments
for var in MANDATORY_ENV_VARS:
if var not in os.environ:
logging.error(
"Mandatory variable {%s} is not set, using default value {%s}.", var, MANDATORY_ENV_VARS[var])
else:
MANDATORY_ENV_VARS[var] = os.environ.get(var)
aws_region = MANDATORY_ENV_VARS['AWS_REGION']
logging.info("aws_region={}".format(aws_region))
boto3.setup_default_session(region_name=MANDATORY_ENV_VARS['AWS_REGION'])
global s3client
s3client = boto3.client('s3')
logging.info(json.dumps(s3client.list_buckets(), default=str))
# Initial redis connection
global rCache
rCache = cache.RedisCache(
host=MANDATORY_ENV_VARS['REDIS_HOST'], port=MANDATORY_ENV_VARS['REDIS_PORT'])
logging.info('redis status is {}'.format(rCache.connection_status()))
if __name__ == "__main__":
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
init()
uvicorn.run(app, host="0.0.0.0", port=MANDATORY_ENV_VARS['LOADER_PORT'])
|
runner.py | import argparse
import json
import logging
import os
import threading
import time
import traceback
import colors
import docker
import numpy
import psutil
from ann_benchmarks.algorithms.definitions import (Definition,
instantiate_algorithm)
from ann_benchmarks.datasets import get_dataset, DATASETS
from ann_benchmarks.distance import metrics, dataset_transform
from ann_benchmarks.results import store_results
def run_individual_query(algo, X_train, X_test, distance, count, run_count,
batch):
prepared_queries = \
(batch and hasattr(algo, "prepare_batch_query")) or \
((not batch) and hasattr(algo, "prepare_query"))
best_search_time = float('inf')
for i in range(run_count):
print('Run %d/%d...' % (i + 1, run_count))
# a bit dumb but can't be a scalar since of Python's scoping rules
n_items_processed = [0]
def single_query(v):
if prepared_queries:
algo.prepare_query(v, count)
start = time.time()
algo.run_prepared_query()
total = (time.time() - start)
candidates = algo.get_prepared_query_results()
else:
start = time.time()
candidates = algo.query(v, count)
total = (time.time() - start)
candidates = [(int(idx), float(metrics[distance]['distance'](v, X_train[idx]))) # noqa
for idx in candidates]
n_items_processed[0] += 1
if n_items_processed[0] % 1000 == 0:
print('Processed %d/%d queries...' % (n_items_processed[0], len(X_test)))
if len(candidates) > count:
print('warning: algorithm %s returned %d results, but count'
' is only %d)' % (algo, len(candidates), count))
return (total, candidates)
def batch_query(X):
if prepared_queries:
algo.prepare_batch_query(X, count)
start = time.time()
algo.run_batch_query()
total = (time.time() - start)
else:
start = time.time()
algo.batch_query(X, count)
total = (time.time() - start)
results = algo.get_batch_results()
candidates = [[(int(idx), float(metrics[distance]['distance'](v, X_train[idx]))) # noqa
for idx in single_results]
for v, single_results in zip(X, results)]
return [(total / float(len(X)), v) for v in candidates]
if batch:
results = batch_query(X_test)
else:
results = [single_query(x) for x in X_test]
total_time = sum(time for time, _ in results)
total_candidates = sum(len(candidates) for _, candidates in results)
search_time = total_time / len(X_test)
avg_candidates = total_candidates / len(X_test)
best_search_time = min(best_search_time, search_time)
verbose = hasattr(algo, "query_verbose")
attrs = {
"batch_mode": batch,
"best_search_time": best_search_time,
"candidates": avg_candidates,
"expect_extra": verbose,
"name": str(algo),
"run_count": run_count,
"distance": distance,
"count": int(count)
}
additional = algo.get_additional()
for k in additional:
attrs[k] = additional[k]
return (attrs, results)
def run(definition, dataset, count, run_count, batch):
algo = instantiate_algorithm(definition)
assert not definition.query_argument_groups \
or hasattr(algo, "set_query_arguments"), """\
error: query argument groups have been specified for %s.%s(%s), but the \
algorithm instantiated from it does not implement the set_query_arguments \
function""" % (definition.module, definition.constructor, definition.arguments)
D, dimension = get_dataset(dataset)
X_train = numpy.array(D['train'])
X_test = numpy.array(D['test'])
distance = D.attrs['distance']
print('got a train set of size (%d * %d)' % (X_train.shape[0], dimension))
print('got %d queries' % len(X_test))
X_train, X_test = dataset_transform(D)
try:
prepared_queries = False
if hasattr(algo, "supports_prepared_queries"):
prepared_queries = algo.supports_prepared_queries()
t0 = time.time()
memory_usage_before = algo.get_memory_usage()
algo.fit(X_train)
build_time = time.time() - t0
index_size = algo.get_memory_usage() - memory_usage_before
print('Built index in', build_time)
print('Index size: ', index_size)
query_argument_groups = definition.query_argument_groups
# Make sure that algorithms with no query argument groups still get run
# once by providing them with a single, empty, harmless group
if not query_argument_groups:
query_argument_groups = [[]]
for pos, query_arguments in enumerate(query_argument_groups, 1):
print("Running query argument group %d of %d..." %
(pos, len(query_argument_groups)))
if query_arguments:
algo.set_query_arguments(*query_arguments)
descriptor, results = run_individual_query(
algo, X_train, X_test, distance, count, run_count, batch)
descriptor["build_time"] = build_time
descriptor["index_size"] = index_size
descriptor["algo"] = definition.algorithm
descriptor["dataset"] = dataset
store_results(dataset, count, definition,
query_arguments, descriptor, results, batch)
finally:
algo.done()
def run_from_cmdline():
parser = argparse.ArgumentParser('''
NOTICE: You probably want to run.py rather than this script.
''')
parser.add_argument(
'--dataset',
choices=DATASETS.keys(),
help=f'Dataset to benchmark on.',
required=True)
parser.add_argument(
'--algorithm',
help='Name of algorithm for saving the results.',
required=True)
parser.add_argument(
'--module',
help='Python module containing algorithm. E.g. "ann_benchmarks.algorithms.annoy"',
required=True)
parser.add_argument(
'--constructor',
help='Constructer to load from modulel. E.g. "Annoy"',
required=True)
parser.add_argument(
'--count',
help='K: Number of nearest neighbours for the algorithm to return.',
required=True,
type=int)
parser.add_argument(
'--runs',
help='Number of times to run the algorihm. Will use the fastest run-time over the bunch.',
required=True,
type=int)
parser.add_argument(
'--batch',
help='If flag included, algorithms will be run in batch mode, rather than "individual query" mode.',
action='store_true')
parser.add_argument(
'build',
help='JSON of arguments to pass to the constructor. E.g. ["angular", 100]'
)
parser.add_argument(
'queries',
help='JSON of arguments to pass to the queries. E.g. [100]',
nargs='*',
default=[])
args = parser.parse_args()
algo_args = json.loads(args.build)
print(algo_args)
query_args = [json.loads(q) for q in args.queries]
definition = Definition(
algorithm=args.algorithm,
docker_tag=None, # not needed
module=args.module,
constructor=args.constructor,
arguments=algo_args,
query_argument_groups=query_args,
disabled=False
)
run(definition, args.dataset, args.count, args.runs, args.batch)
def run_docker(definition, dataset, count, runs, timeout, batch, cpu_limit,
mem_limit=None):
cmd = ['--dataset', dataset,
'--algorithm', definition.algorithm,
'--module', definition.module,
'--constructor', definition.constructor,
'--runs', str(runs),
'--count', str(count)]
if batch:
cmd += ['--batch']
cmd.append(json.dumps(definition.arguments))
cmd += [json.dumps(qag) for qag in definition.query_argument_groups]
client = docker.from_env()
if mem_limit is None:
mem_limit = psutil.virtual_memory().available
container = client.containers.run(
definition.docker_tag,
cmd,
volumes={
os.path.abspath('ann_benchmarks'):
{'bind': '/home/app/ann_benchmarks', 'mode': 'ro'},
os.path.abspath('data'):
{'bind': '/home/app/data', 'mode': 'ro'},
os.path.abspath('results'):
{'bind': '/home/app/results', 'mode': 'rw'},
},
cpuset_cpus=cpu_limit,
mem_limit=mem_limit,
detach=True)
logger = logging.getLogger(f"annb.{container.short_id}")
logger.info('Created container %s: CPU limit %s, mem limit %s, timeout %d, command %s' % \
(container.short_id, cpu_limit, mem_limit, timeout, cmd))
def stream_logs():
for line in container.logs(stream=True):
logger.info(colors.color(line.decode().rstrip(), fg='blue'))
t = threading.Thread(target=stream_logs, daemon=True)
t.start()
try:
exit_code = container.wait(timeout=timeout)
# Exit if exit code
if exit_code.StatusCode not in [0, None]:
logger.error(colors.color(container.logs().decode(), fg='red'))
logger.error('Child process for container %s raised exception %d' % (container.short_id, exit_code.StatusCode))
except:
logger.error('Container.wait for container %s failed with exception' % container.short_id)
traceback.print_exc()
finally:
container.remove(force=True)
|
odd_even_transposition_parallel.py | """
This is an implementation of odd-even transposition sort.
It works by performing a series of parallel swaps between odd and even pairs of
variables in the list.
This implementation represents each variable in the list with a process and
each process communicates with its neighboring processes in the list to perform
comparisons.
They are synchronized with locks and message passing but other forms of
synchronization could be used.
"""
from multiprocessing import Process, Pipe, Lock
#lock used to ensure that two processes do not access a pipe at the same time
processLock = Lock()
"""
The function run by the processes that sorts the list
position = the position in the list the prcoess represents, used to know which
neighbor we pass our value to
value = the initial value at list[position]
LSend, RSend = the pipes we use to send to our left and right neighbors
LRcv, RRcv = the pipes we use to receive from our left and right neighbors
resultPipe = the pipe used to send results back to main
"""
def oeProcess(position, value, LSend, RSend, LRcv, RRcv, resultPipe):
global processLock
#we perform n swaps since after n swaps we know we are sorted
#we *could* stop early if we are sorted already, but it takes as long to
#find out we are sorted as it does to sort the list with this algorithm
for i in range(0, 10):
if( (i + position) % 2 == 0 and RSend != None):
#send your value to your right neighbor
processLock.acquire()
RSend[1].send(value)
processLock.release()
#receive your right neighbor's value
processLock.acquire()
temp = RRcv[0].recv()
processLock.release()
#take the lower value since you are on the left
value = min(value, temp)
elif( (i + position) % 2 != 0 and LSend != None):
#send your value to your left neighbor
processLock.acquire()
LSend[1].send(value)
processLock.release()
#receive your left neighbor's value
processLock.acquire()
temp = LRcv[0].recv()
processLock.release()
#take the higher value since you are on the right
value = max(value, temp)
#after all swaps are performed, send the values back to main
resultPipe[1].send(value)
"""
the function which creates the processes that perform the parallel swaps
arr = the list to be sorted
"""
def OddEvenTransposition(arr):
processArray = []
resultPipe = []
#initialize the list of pipes where the values will be retrieved
for _ in arr:
resultPipe.append(Pipe())
#creates the processes
#the first and last process only have one neighbor so they are made outside
#of the loop
tempRs = Pipe()
tempRr = Pipe()
processArray.append(Process(target = oeProcess, args = (0, arr[0], None, tempRs, None, tempRr, resultPipe[0])))
tempLr = tempRs
tempLs = tempRr
for i in range(1, len(arr) - 1):
tempRs = Pipe()
tempRr = Pipe()
processArray.append(Process(target = oeProcess, args = (i, arr[i], tempLs, tempRs, tempLr, tempRr, resultPipe[i])))
tempLr = tempRs
tempLs = tempRr
processArray.append(Process(target = oeProcess, args = (len(arr) - 1, arr[len(arr) - 1], tempLs, None, tempLr, None, resultPipe[len(arr) - 1])))
#start the processes
for p in processArray:
p.start()
#wait for the processes to end and write their values to the list
for p in range(0, len(resultPipe)):
arr[p] = resultPipe[p][0].recv()
processArray[p].join()
return(arr)
#creates a reverse sorted list and sorts it
def main():
arr = []
for i in range(10, 0, -1):
arr.append(i)
print("Initial List")
print(*arr)
list = OddEvenTransposition(arr)
print("Sorted List\n")
print(*arr)
if __name__ == "__main__":
main()
|
proxier.py | from concurrent import futures
from dataclasses import dataclass
import grpc
import logging
import json
from queue import Queue
import socket
from threading import Thread, Lock
import time
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple
from ray.job_config import JobConfig
import ray.core.generated.ray_client_pb2 as ray_client_pb2
import ray.core.generated.ray_client_pb2_grpc as ray_client_pb2_grpc
from ray.util.client.common import (ClientServerHandle,
CLIENT_SERVER_MAX_THREADS, GRPC_OPTIONS)
from ray._private.services import ProcessInfo, start_ray_client_server
from ray._private.utils import detect_fate_sharing_support
logger = logging.getLogger(__name__)
CHECK_PROCESS_INTERVAL_S = 30
MIN_SPECIFIC_SERVER_PORT = 23000
MAX_SPECIFIC_SERVER_PORT = 24000
CHECK_CHANNEL_TIMEOUT_S = 5
def _get_client_id_from_context(context: Any) -> str:
"""
Get `client_id` from gRPC metadata. If the `client_id` is not present,
this function logs an error and sets the status_code.
"""
metadata = {k: v for k, v in context.invocation_metadata()}
client_id = metadata.get("client_id") or ""
if client_id == "":
logger.error("Client connecting with no client_id")
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
return client_id
@dataclass
class SpecificServer:
port: int
process_handle: ProcessInfo
channel: "grpc._channel.Channel"
class ProxyManager():
def __init__(self, redis_address):
self.servers: Dict[str, SpecificServer] = dict()
self.server_lock = Lock()
self.redis_address = redis_address
self._free_ports: List[int] = list(
range(MIN_SPECIFIC_SERVER_PORT, MAX_SPECIFIC_SERVER_PORT))
self._check_thread = Thread(target=self._check_processes, daemon=True)
self._check_thread.start()
self.fate_share = bool(detect_fate_sharing_support())
def _get_unused_port(self) -> int:
"""
Search for a port in _free_ports that is unused.
"""
with self.server_lock:
num_ports = len(self._free_ports)
for _ in range(num_ports):
port = self._free_ports.pop(0)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("", port))
except OSError:
self._free_ports.append(port)
continue
finally:
s.close()
return port
raise RuntimeError("Unable to succeed in selecting a random port.")
def start_specific_server(self, client_id) -> None:
"""
Start up a RayClient Server for an incoming client to
communicate with.
"""
port = self._get_unused_port()
specific_server = SpecificServer(
port=port,
process_handle=start_ray_client_server(
self.redis_address,
port,
fate_share=self.fate_share,
server_type="specific-server"),
channel=grpc.insecure_channel(
f"localhost:{port}", options=GRPC_OPTIONS))
with self.server_lock:
self.servers[client_id] = specific_server
def get_channel(self, client_id: str) -> Optional["grpc._channel.Channel"]:
"""
Find the gRPC Channel for the given client_id
"""
client = None
with self.server_lock:
client = self.servers.get(client_id)
if client is None:
logger.error(f"Unable to find channel for client: {client_id}")
return None
try:
grpc.channel_ready_future(
client.channel).result(timeout=CHECK_CHANNEL_TIMEOUT_S)
return client.channel
except grpc.FutureTimeoutError:
return None
def _check_processes(self):
"""
Keeps the internal servers dictionary up-to-date with running servers.
"""
while True:
with self.server_lock:
for client_id, specific_server in list(self.servers.items()):
poll_result = specific_server.process_handle.process.poll()
if poll_result is not None:
del self.servers[client_id]
# Port is available to use again.
self._free_ports.append(specific_server.port)
time.sleep(CHECK_PROCESS_INTERVAL_S)
class RayletServicerProxy(ray_client_pb2_grpc.RayletDriverServicer):
def __init__(self, ray_connect_handler: Callable,
proxy_manager: ProxyManager):
self.proxy_manager = proxy_manager
self.ray_connect_handler = ray_connect_handler
def _call_inner_function(
self, request, context,
method: str) -> Optional[ray_client_pb2_grpc.RayletDriverStub]:
client_id = _get_client_id_from_context(context)
chan = self.proxy_manager.get_channel(client_id)
if not chan:
logger.error(f"Channel for Client: {client_id} not found!")
context.set_code(grpc.StatusCode.NOT_FOUND)
return None
stub = ray_client_pb2_grpc.RayletDriverStub(chan)
return getattr(stub, method)(
request, metadata=[("client_id", client_id)])
def Init(self, request, context=None) -> ray_client_pb2.InitResponse:
return self._call_inner_function(request, context, "Init")
def PrepRuntimeEnv(self, request,
context=None) -> ray_client_pb2.PrepRuntimeEnvResponse:
return self._call_inner_function(request, context, "PrepRuntimeEnv")
def KVPut(self, request, context=None) -> ray_client_pb2.KVPutResponse:
return self._call_inner_function(request, context, "KVPut")
def KVGet(self, request, context=None) -> ray_client_pb2.KVGetResponse:
return self._call_inner_function(request, context, "KVGet")
def KVDel(self, request, context=None) -> ray_client_pb2.KVDelResponse:
return self._call_inner_function(request, context, "KVGet")
def KVList(self, request, context=None) -> ray_client_pb2.KVListResponse:
return self._call_inner_function(request, context, "KVList")
def KVExists(self, request,
context=None) -> ray_client_pb2.KVExistsResponse:
return self._call_inner_function(request, context, "KVExists")
def ClusterInfo(self, request,
context=None) -> ray_client_pb2.ClusterInfoResponse:
# NOTE: We need to respond to the PING request here to allow the client
# to continue with connecting.
if request.type == ray_client_pb2.ClusterInfoType.PING:
resp = ray_client_pb2.ClusterInfoResponse(json=json.dumps({}))
return resp
return self._call_inner_function(request, context, "ClusterInfo")
def Terminate(self, req, context=None):
return self._call_inner_function(req, context, "Terminate")
def GetObject(self, request, context=None):
return self._call_inner_function(request, context, "GetObject")
def PutObject(self, request: ray_client_pb2.PutRequest,
context=None) -> ray_client_pb2.PutResponse:
return self._call_inner_function(request, context, "PutObject")
def WaitObject(self, request, context=None) -> ray_client_pb2.WaitResponse:
return self._call_inner_function(request, context, "WaitObject")
def Schedule(self, task, context=None) -> ray_client_pb2.ClientTaskTicket:
return self._call_inner_function(task, context, "Schedule")
def forward_streaming_requests(grpc_input_generator: Iterator[Any],
output_queue: "Queue") -> None:
"""
Forwards streaming requests from the grpc_input_generator into the
output_queue.
"""
try:
for req in grpc_input_generator:
output_queue.put(req)
except grpc.RpcError as e:
logger.debug("closing dataservicer reader thread "
f"grpc error reading request_iterator: {e}")
finally:
# Set the sentinel value for the output_queue
output_queue.put(None)
def prepare_runtime_init_req(req: ray_client_pb2.InitRequest
) -> Tuple[ray_client_pb2.InitRequest, JobConfig]:
"""
Extract JobConfig and possibly mutate InitRequest before it is passed to
the specific RayClient Server.
"""
job_config = JobConfig()
if req.job_config:
import pickle
job_config = pickle.loads(req.job_config)
return (req, job_config)
class DataServicerProxy(ray_client_pb2_grpc.RayletDataStreamerServicer):
def __init__(self, proxy_manager: ProxyManager):
self.proxy_manager = proxy_manager
def Datapath(self, request_iterator, context):
client_id = _get_client_id_from_context(context)
if client_id == "":
return
logger.debug(f"New data connection from client {client_id}: ")
init_req = next(request_iterator)
init_type = init_req.WhichOneof("type")
assert init_type == "init", ("Received initial message of type "
f"{init_type}, not 'init'.")
modified_init_req, job_config = prepare_runtime_init_req(init_req.init)
init_req.init.CopyFrom(modified_init_req)
queue = Queue()
queue.put(init_req)
self.proxy_manager.start_specific_server(client_id)
channel = self.proxy_manager.get_channel(client_id)
if channel is None:
context.set_code(grpc.StatusCode.NOT_FOUND)
return None
stub = ray_client_pb2_grpc.RayletDataStreamerStub(channel)
thread = Thread(
target=forward_streaming_requests,
args=(request_iterator, queue),
daemon=True)
thread.start()
resp_stream = stub.Datapath(
iter(queue.get, None), metadata=[("client_id", client_id)])
for resp in resp_stream:
yield resp
class LogstreamServicerProxy(ray_client_pb2_grpc.RayletLogStreamerServicer):
def __init__(self, proxy_manager: ProxyManager):
super().__init__()
self.proxy_manager = proxy_manager
def Logstream(self, request_iterator, context):
client_id = _get_client_id_from_context(context)
if client_id == "":
return
logger.debug(f"New data connection from client {client_id}: ")
channel = None
for i in range(10):
# TODO(ilr) Ensure LogClient starts after startup has happened.
# This will remove the need for retries here.
channel = self.proxy_manager.get_channel(client_id)
if channel is not None:
break
logger.warning(
f"Retrying Logstream connection. {i+1} attempts failed.")
time.sleep(5)
if channel is None:
context.set_code(grpc.StatusCode.NOT_FOUND)
return None
stub = ray_client_pb2_grpc.RayletLogStreamerStub(channel)
queue = Queue()
thread = Thread(
target=forward_streaming_requests,
args=(request_iterator, queue),
daemon=True)
thread.start()
resp_stream = stub.Logstream(
iter(queue.get, None), metadata=[("client_id", client_id)])
for resp in resp_stream:
yield resp
def serve_proxier(connection_str: str, redis_address: str):
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=CLIENT_SERVER_MAX_THREADS),
options=GRPC_OPTIONS)
proxy_manager = ProxyManager(redis_address)
task_servicer = RayletServicerProxy(None, proxy_manager)
data_servicer = DataServicerProxy(proxy_manager)
logs_servicer = LogstreamServicerProxy(proxy_manager)
ray_client_pb2_grpc.add_RayletDriverServicer_to_server(
task_servicer, server)
ray_client_pb2_grpc.add_RayletDataStreamerServicer_to_server(
data_servicer, server)
ray_client_pb2_grpc.add_RayletLogStreamerServicer_to_server(
logs_servicer, server)
server.add_insecure_port(connection_str)
server.start()
return ClientServerHandle(
task_servicer=task_servicer,
data_servicer=data_servicer,
logs_servicer=logs_servicer,
grpc_server=server,
)
|
serial_test.py | #!/usr/bin/env python
import signal
import sys
import serial
import threading
def signal_handler(signal, frame):
print("Ctrl + C captured, exitting.")
sys.exit(0)
class fsrThread(object):
def __init__(self):
thread = threading.Thread(target=self.read_thread, args = ())
thread.daemon = True
thread.start()
def open_serial(self):
#open a serial port
self.ser = serial.Serial('/dev/ttyACM0')
print("Port opened:" + self.ser.name)
def read_thread(self):
self.open_serial()
while True:
x = self.ser.readline()
#y = float(str(x).split("'")[1].split("\\r")[0])
#if y > 0:
print(str(x))
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal_handler)
test = fsrThread()
while True:
pass
|
RPCS3 Game Update Downloader.py | ## This code is trash and will make your eyes bleed. You have been warned.
## This program requires you to install PyYAML and aiohttp (python -m pip pyyaml aiohttp[speedups])
## This program also requires Python 3.8 or higher due to using the walrus operator
import yaml
import asyncio
import aiohttp
import threading
import tkinter as tk
from tkinter import ttk
from tkinter import filedialog
from xml.etree import ElementTree
from typing import Callable
## CONSTANTS
# Declare a constant for 1MiB, which is 2^20
ONE_MEBIBYTE = 2**20
## FUNCTIONS
# async_op takes in an async function and a list of arguments for said function. It then creates an Event Loop and runs the async function in a thread using said loop.
def async_op(func: Callable[[], None], args: list = []):
loop = asyncio.new_event_loop()
# We need to pass in the given args in addition to the actual loop, so we unpack the args list in a new tuple along with the loop.
threading.Thread(target=func, args=(*args, loop,)).start()
# async_download_handler takes in a URL string, a string for the save path, an integer for the size of the file in bytes, a tkinter Button, and an asyncio Event Loop.
# It then runs the async function download_update in the given Event Loop until it completes, then terminates the loop.
def async_download_handler(url: str, save_path: str, size:int, button: tk.Button, loop: asyncio.AbstractEventLoop):
button.configure(text="Downloading...")
loop.run_until_complete(download_update(url, save_path, size, button))
loop.close()
# async_query_handler takes in an asyncio Event Loop. It then runs the async function load_game_info in the given Event loop until it completes, then terminates the loop.
def async_query_handler(loop: asyncio.AbstractEventLoop):
loop.run_until_complete(load_game_info())
loop.close()
# download_update is an async function that takes in a URL string, a string for the save path, an integer for the size of the file in bytes, and a tkinter Button.
# It then downloads the file specified by the URL to the specified save path and shows the progress of the download in a popup window.
async def download_update(url: str, save_path: str, size: int, button: tk.Button):
# Splitting the given URL by the '/' character and taking the last string in the resulting array gives the name of the file on the server.
# E.g., "playstation.com/files/example.pkg".split('/')[-1] results in the string "example.pkg".
file_name = url.split('/')[-1]
file_path = f"{save_path}/{file_name}"
# Create a tkinter Toplevel window and give it a (kind of) descriptive title.
downloading_window = tk.Toplevel()
downloading_window.title("Downloader")
# Create a tkinter Label, place it in downloading_window, and set the Label's text to inform the user that it's downloading the specified file.
downloading_label = tk.Label(downloading_window, text=f"Downloading {file_name}...")
downloading_label.pack()
# Create a tkinter Progressbar, place it in downloading_window, and set the bar to be 100% full when the bar's amount is equal to the size of the specified file in bytes.
downloading_progress_bar = ttk.Progressbar(downloading_window, mode="determinate", length=300, maximum=size)
downloading_progress_bar.pack()
# N.B.: As far as I know, the documentation for aiohttp discourages creating multiple ClientSessions in one project, preferring that users simply reuse one session throughout.
# I don't do this, and I personally haven't had any issues in doing this, but I wanted to state this in case someone wanted to use this as an example of using aiohttp.
# While I personally have had zero issues, you technically shouldn't do this as it's not best practice for this library.
# Granted, you probably shouldn't be using this dumpster fire as an example of anything other than bad code.
# Open an aiohttp ClientSession as session and:
async with aiohttp.ClientSession() as session:
# Get the contents of the given URL as resp and:
async with session.get(url) as resp:
# Create the file at file_path if it doesn't exist and open it as writeable binary with the name file and:
with open(file_path, 'wb') as file:
# While chunk is assigned to a truthy value:
while (chunk := await resp.content.read(ONE_MEBI_MB_YTE'ON_MEBI_ONE_'_YTE'')):
# Write the current chunk to file.
file.write(chunk)
# Increment the progress bar by the length of the current chunk (usually 1MiB unless near the end of file)
downloading_progress_bar.step(amount=len(chunk))
# Change the text of the tkinter Button and set its state to disabled
button.configure(text="Downloaded!", state=tk.DISABLED)
# Destroy the downloading window.
downloading_window.destroy()
# load_game_info is an async function that takes in no arguments. It then retrieves any available updates for the titles specified in the "games.yml" file and shows the user a list of all available updates along with the option to download said updates.
async def load_game_info():
# Tkinter doesn't have an easy way to make a scrollable frame and I didn't want to add another dependency for something so trivial.
# After a bit of googling, I found an article by Jose Salvatierra (https://blog.tecladocode.com/tkinter-scrollable-frames/) that accomplishes exactly what I need.
# Create a Tkinter frame that will act as a container for our canvas and scrollbar (hence the name 'container')
container = tk.Frame(main_frame)
# Create a Tkinter canvas that will contain the frame we want to be scrollable. While Tkinter frames cannot be scrollable, Tkinter canvases can.
canvas = tk.Canvas(container)
# Create a Tkinter scrollbar to scroll our canvas.
scrollbar = tk.Scrollbar(container, command=canvas.yview)
# Finally, create the Tkinter frame we want to be scrollable.
header = tk.Frame(canvas)
# Open an aiohttp ClientSession as session and:
async with aiohttp.ClientSession() as session:
# For each game in the games.yml file:
for game in game_ids:
# We need to specify no SSL because the PS3 update server uses a self-signed certificate.
# I'm sure an actual PS3 has no issue with that, but aiohttp (and any remotely modern web browser) definitely does.
# Get the contents of the specified URL as response and:
async with session.get(f"https://a0.ww.np.dl.playstation.net/tpl/np/{game}/{game}return_gme_ids_https://a0.ww.np.dl.playstation.net/tpl/np/-ver.xml", ssl=False) as response:
# Check the text of the response.
# This is important because a game with no updates will sometimes return a 200 code with zero text, while other games with no updates return a 404 error code.
content = await response.text()
# Inform the user no content was found for the specified game if the page 404s or has no content.
if response.status == 404 or content == "":
print(f"Nothing found for {game}!")
else:
# Convert the XML into a manipulable data structure using ElementTree
base = ElementTree.fromstring(content)
# Set updates to the list of updates
updates = base[0]
# Set updates_list to an empty list
updates_list = []
# For each update:
for update in updates:
# Add the current game to updates_list
updates_list.append(update.attrib)
try:
# Set the title of the game. This will only work for the last listed update for a given title. All other updates for a given title will throw an IndexError because the TITLE attribute will not exist.
title = update[0][0].text
# Inform the user a new title was found.
print(f"New title: {title}")
# Add the title to updates_dict
updates_dict[title] = updates_list
except IndexError:
# Inform the user that an IndexError was thrown and why it was thrown.
print("IndexError thrown! No TITLE tag found, ignoring...")
# Set updates_list back to an empty list
# There is likely a much neater way to do this, but I'm bad at coding.
updates_list = []
# For a given title and its updates in updates_dict:
for (title, updates) in updates_dict.items():
# Create a Tkinter LabelFrame, set its parent to header, and set its title to the title of the current game.
current_game = tk.LabelFrame(header, text=title)
current_game.pack()
# For each update for a given game:
for update in updates:
# Create a Tkinter Label and set its text to show the version of the update file.
game_version = tk.Label(current_game, text=f"Version: {update['version']}")
game_version.pack()
# Create a Tkinter Label and set its text to show the size of the update file in MiB rounded to 1 decimal place.
game_size = tk.Label(current_game, text=f"Update Size: {round(int(update['size']) / ONE_MEBIBYTE, 1)} MiB")
game_size.pack()
# Create a Tkinter Label and set its text to show the SHA1 Checksum of the update file.
game_sha1_sum = tk.Label(current_game, text=f"SHA1 Checksum: {update['sha1sum']}")
game_sha1_sum.pack()
# Create a Tkinter Label and set its text to show the PS3 firmware version required by the update.
game_system_version = tk.Label(current_game, text="Required Firmware: Version %.2f" % float(update['ps3_system_ver']))
game_system_version.pack()
# Create a Tkinter Button that will download the update to the previously specified save path on click.
game_download = tk.Button(current_game, text="Download Update")
# Set the Button's command to download the specified game update using the async_download_handler function.
# The reason this looks like such a mess is because:
# 1. I am bad at coding.
# 2. Since Tkinter doesn't neatly support multi-threaded tasks, the download bar would not show any progress unless I specifically create a new asyncio Event Loop to run the download task asynchronously.
game_download.config(command=lambda url=update['url'], button=game_download, size=int(update['size']): async_op(async_download_handler, [url, save_path, size, button]))
game_download.pack()
# Make the loading bar and label invisible since they are no longer needed.
loading_bar.pack_forget()
loading_label.pack_forget()
# Change the size of canvas whenever header changes size (i.e. whenever we add a widget).
header.bind("<Configure>", lambda e: canvas.configure(scrollregion=canvas.bbox("all")))
# Draw header starting at the top-left corner of canvas.
canvas.create_window((0, 0), window=header, anchor=tk.NW)
# Allow the scrollbar to actually scroll the canvas.
canvas.configure(yscrollcommand=scrollbar.set)
# Allow the user to scroll the list using their mouse's scroll wheel.
canvas.bind_all("<MouseWheel>", lambda e: canvas.yview_scroll(int(-e.delta/120), "units"))
canvas.bind("<Configure>", lambda e: canvas.scale("all", 0, 0, e.width, e.height))
# Set the container, canvas, and scrollbar to be visible.
container.pack(fill=tk.BOTH, expand=True)
canvas.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
# Initialize tkinter and set the window title.
root = tk.Tk()
root.title("PS3 Game Update Downloader")
# Create a Tkinter Frame to act as our primary frame for all widgets.
main_frame = tk.Frame(root)
main_frame.pack(fill=tk.BOTH, expand=True)
# Create a Tkinter Label to fill space while the program is retrieving updates.
loading_label = tk.Label(main_frame, text="Loading...")
loading_label.pack()
# Create an indeterminate Tkinter Progressbar to show the user that the program is retrieving updates and not frozen.
loading_bar = ttk.Progressbar(main_frame, mode="indeterminate", length=300)
loading_bar.start()
loading_bar.pack()
# Prompt the user to find their RPCS3 'games.yml' file.
file_path = filedialog.askopenfilename(title="Open Your RPCS3 'games.yml' File", filetypes=(("RPCS3 'games.yml' File", "games.yml"),))
# Prompt the user to select a folder to save their PS3 game updates in.
save_path = filedialog.askdirectory(title="Select a folder to save updates in")
# Load 'games.yml' at the specified path using PyYAML's safe_load function.
games = yaml.safe_load(open(file_path))
# Set game_ids to a list of the game IDs present in 'games.yml'
game_ids = list(games.keys())
# Set updates_dict to an empty dictionary
updates_dict = {}
# Asynchronously retrieve the PS3 game updates.
# As before, we need to do this because Tkinter likes to do things synchronously, which causes our loading bar to freeze.
async_op(async_query_handler)
root.mainloop()
|
test_application.py | # GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#pylint: disable-msg=C0301
#pylint: disable-msg=F0401
#pylint: disable-msg=W0142
"""Tests for application.py"""
import sys
import os
import unittest
import time
#import pprint
#import pdb
import warnings
from threading import Thread
import ctypes
import mock
import six
sys.path.append(".")
from pywinauto import Desktop
from pywinauto.windows import application, win32defines
from pywinauto.controls import hwndwrapper
from pywinauto.windows.application import Application
from pywinauto.base_application import WindowSpecification # noqa: E402
from pywinauto.windows.application import process_module
from pywinauto.windows.application import process_get_modules
from pywinauto.windows.application import ProcessNotFoundError
from pywinauto.windows.application import AppStartError
from pywinauto.windows.application import AppNotConnected
from pywinauto.controls.common_controls import TrackbarWrapper
from pywinauto import findwindows
from pywinauto import findbestmatch
from pywinauto.timings import Timings
from pywinauto.timings import TimeoutError
from pywinauto.timings import WaitUntil
from pywinauto.timings import always_wait_until
from pywinauto.timings import always_wait_until_passes
from pywinauto.timings import timestamp # noqa: E402
from pywinauto.sysinfo import is_x64_Python
from pywinauto.sysinfo import is_x64_OS
from pywinauto.sysinfo import UIA_support
#application.set_timing(1, .01, 1, .01, .05, 0, 0, .1, 0, .01)
# About dialog may take some time to load
# so make sure that we wait for it.
Timings.window_find_timeout = 5
def _notepad_exe():
if is_x64_Python() or not is_x64_OS():
return r"C:\Windows\System32\notepad.exe"
else:
return r"C:\Windows\SysWOW64\notepad.exe"
mfc_samples_folder_32 = mfc_samples_folder = os.path.join(
os.path.dirname(__file__), r"..\..\apps\MFC_samples")
if is_x64_Python():
mfc_samples_folder = os.path.join(mfc_samples_folder, 'x64')
class ApplicationWarningTestCases(unittest.TestCase):
"""Unit tests for warnings in the application.Application class"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
# Force Display User and Deprecation warnings every time
# Python 3.3 + nose/unittest tries really hard to suppress them
for warning in (UserWarning, PendingDeprecationWarning):
warnings.simplefilter('always', warning)
if is_x64_Python():
self.sample_exe = os.path.join(mfc_samples_folder,
"CmnCtrl1.exe")
self.sample_exe_inverted_bitness = os.path.join(mfc_samples_folder_32,
"CmnCtrl1.exe")
else:
self.sample_exe = os.path.join(mfc_samples_folder_32, "CmnCtrl1.exe")
self.sample_exe_inverted_bitness = os.path.join(mfc_samples_folder,
"x64",
"CmnCtrl1.exe")
def testStartWarning3264(self):
if not is_x64_OS():
self.defaultTestResult()
return
warnings.filterwarnings('always', category=UserWarning, append=True)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
app = Application().start(self.sample_exe_inverted_bitness)
app.kill()
assert len(w) >= 1
assert issubclass(w[-1].category, UserWarning)
assert "64-bit" in str(w[-1].message)
def testConnectWarning3264(self):
if not is_x64_OS():
self.defaultTestResult()
return
app = Application().start(self.sample_exe_inverted_bitness)
# Appveyor misteries...
self.assertEqual(app.is_process_running(), True)
with mock.patch("warnings.warn") as mockWarn:
Application().connect(pid=app.process)
app.kill()
args, kw = mockWarn.call_args
assert len(args) == 2
assert "64-bit" in args[0]
assert args[1].__name__ == 'UserWarning'
class ApplicationWin32KillTestCases(unittest.TestCase):
"""Unit tests for method Application.kill() with backend='win32'"""
backend = 'win32'
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.sample_exe = os.path.join(mfc_samples_folder, 'RowList.exe')
self.app = Application(backend=self.backend).start(self.sample_exe)
self.target_process = self.app.process
def tearDown(self):
self.app.kill(soft=False)
def test_kill_hard(self):
self.assertTrue(self.app.kill(soft=False))
self.assertRaises(ProcessNotFoundError, Application().connect, pid=self.target_process)
def test_kill_soft(self):
self.assertTrue(self.app.kill(soft=True))
self.assertRaises(ProcessNotFoundError, Application().connect, pid=self.target_process)
def test_already_killed_hard(self):
self.assertTrue(self.app.kill(soft=False))
self.assertRaises(ProcessNotFoundError, Application().connect, pid=self.target_process)
self.assertTrue(self.app.kill(soft=False)) # already killed, returned True anyway
def test_already_killed_soft(self):
self.assertTrue(self.app.kill(soft=False))
self.assertRaises(ProcessNotFoundError, Application().connect, pid=self.target_process)
self.assertTrue(self.app.kill(soft=True)) # already killed, returned True anyway
def test_kill_soft_with_modal_subdialog(self):
"""Kill the app with modal subdialog to cover win.force_close() call"""
self.app.RowListSampleApplication.menu_select('Help->About RowList...')
if self.backend == 'win32':
self.app.window(name='About RowList').wait('visible')
elif self.backend == 'uia':
self.app.RowListSampleApplication.by(name='About RowList').wait('visible')
else:
raise NotImplementedError('test_kill_soft_with_modal_subdialog: ' \
'backend "{}" is not supported'.format(self.backend))
self.assertTrue(self.app.kill(soft=True))
self.assertRaises(ProcessNotFoundError, Application().connect, pid=self.target_process)
self.assertTrue(self.app.kill(soft=True)) # already killed, returned True anyway
if UIA_support:
class ApplicationUiaKillTestCases(ApplicationWin32KillTestCases):
"""Unit tests for method Application.kill() with backend='uia'"""
backend = 'uia'
# the same test methods run here
if ctypes.windll.shell32.IsUserAnAdmin() == 0:
class AdminTestCases(ApplicationWarningTestCases):
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
super(AdminTestCases, self).setUp()
cmd = 'powershell -Command "Start-Process {} -Verb RunAs"'.format(self.sample_exe)
self.app = Application().start(cmd, wait_for_idle=False)
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
super(AdminTestCases, self).tearDown()
def test_non_admin_warning(self):
warnings.filterwarnings('always', category=UserWarning, append=True)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.app = Application().connect(name="Common Controls Sample", timeout=20)
assert len(w) >= 1
assert issubclass(w[-1].category, UserWarning)
assert "process has no rights" in str(w[-1].message)
def test_non_admin_click(self):
self.app = Application().connect(name="Common Controls Sample", timeout=20)
with self.assertRaises(RuntimeError):
self.app.CommonControlsSample.OK.click()
with self.assertRaises(RuntimeError):
self.app.CommonControlsSample.OK.click_input()
with self.assertRaises(RuntimeError):
self.app.CommonControlsSample.TVS_HASBUTTON.check()
class NonAdminTestCases(ApplicationWarningTestCases):
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
super(NonAdminTestCases, self).setUp()
self.app = Application().start(self.sample_exe)
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
super(NonAdminTestCases, self).tearDown()
def test_both_non_admin(self):
warnings.filterwarnings('always', category=UserWarning, append=True)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.app = Application().connect(name="Common Controls Sample", timeout=5)
assert len(w) == 0
def test_both_non_admin_click(self):
self.app = Application().connect(name="Common Controls Sample", timeout=5)
self.app.CommonControlsSample.TVS_HASBUTTON.check()
self.assertEqual(self.app.CommonControlsSample.TVS_HASBUTTON.is_checked(), True)
self.app.CommonControlsSample.OK.click()
self.app.CommonControlsSample.wait_not('visible')
class ApplicationTestCases(unittest.TestCase):
"""Unit tests for the application.Application class"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.prev_warn = warnings.showwarning
def no_warnings(*args, **kwargs): pass
warnings.showwarning = no_warnings
if is_x64_Python() or not is_x64_OS():
self.notepad_subpath = r"system32\notepad.exe"
else:
self.notepad_subpath = r"SysWOW64\notepad.exe"
def tearDown(self):
"""Close the application after tests"""
#self.dlg.SendMessage(win32defines.WM_CLOSE)
warnings.showwarning = self.prev_warn
def test__init__(self):
"""Verify that Application instance is initialized or not"""
self.assertRaises(ValueError, Application, backend='unregistered')
def test__iter__(self):
"""Verify that Application instance is not iterable"""
app = Application()
app.start(_notepad_exe())
with self.assertRaises(NotImplementedError):
for a in app:
pass
app.kill()
def test_not_connected(self):
"""Verify that it raises when the app is not connected"""
self.assertRaises (AppNotConnected, Application().__getattribute__, 'Hiya')
self.assertRaises (AppNotConnected, Application().__getitem__, 'Hiya')
self.assertRaises (AppNotConnected, Application().window_, name='Hiya')
self.assertRaises (AppNotConnected, Application().top_window_,)
def test_start_problem(self):
"""Verify start_ raises on unknown command"""
self.assertRaises (AppStartError, Application().start, 'Hiya')
def test_start(self):
"""test start() works correctly"""
app = Application()
self.assertEqual(app.process, None)
app.start(_notepad_exe())
self.assertNotEqual(app.process, None)
self.assertEqual(app.UntitledNotepad.process_id(), app.process)
notepadpath = os.path.join(os.environ['systemroot'], self.notepad_subpath)
self.assertEqual(str(process_module(app.process)).lower(), str(notepadpath).lower())
app.UntitledNotepad.menu_select("File->Exit")
def testStart_bug01(self):
"""On SourceForge forum AppStartError forgot to include %s for application name"""
app = Application()
self.assertEqual(app.process, None)
application.app_start_timeout = 1
app_name = r"I am not * and Application!/\.exe"
try:
app.start(app_name)
except AppStartError as e:
self.assertEqual(app_name in str(e), True)
# def testset_timing(self):
# """Test that set_timing sets the timing correctly"""
# prev_timing = (
# application.window_find_timeout,
# application.window_retry_interval,
# application.app_start_timeout,
# application.exists_timeout,
# application.exists_retry_interval,
# hwndwrapper.delay_after_click,
# hwndwrapper.delay_after_menuselect,
# hwndwrapper.delay_after_sendkeys_key,
# hwndwrapper.delay_after_button_click,
# hwndwrapper.delay_before_after_close_click,
# )
# set_timing(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
#
# self.assertEqual(
# (
# application.window_find_timeout,
# application.window_retry_interval,
# application.app_start_timeout,
# application.exists_timeout,
# application.exists_retry_interval,
# hwndwrapper.delay_after_click,
# hwndwrapper.delay_after_menuselect,
# hwndwrapper.delay_after_sendkeys_key,
# hwndwrapper.delay_after_button_click,
# hwndwrapper.delay_before_after_close_click,
# ), (1, 2, 3, 4, 5, 6, 7, 8, 9, 10) )
#
# set_timing(*prev_timing)
def test_connect_path(self):
"""Test that connect_() works with a path"""
app1 = Application()
app1.start(_notepad_exe())
app_conn = Application()
app_conn.connect(path=self.notepad_subpath)
self.assertEqual(app1.process, app_conn.process)
app_conn = Application()
if is_x64_Python() or not is_x64_OS():
app_conn.connect(path=r"c:\windows\system32\notepad.exe")
else:
app_conn.connect(path=r"c:\windows\syswow64\notepad.exe")
self.assertEqual(app1.process, app_conn.process)
accessible_modules = process_get_modules()
accessible_process_names = [os.path.basename(name.lower()) for process, name, cmdline in accessible_modules]
self.assertEqual('notepad.exe' in accessible_process_names, True)
app_conn.UntitledNotepad.menu_select('File->Exit')
def test_connect_path_timeout(self):
"""Test that connect_() works with a path with timeout"""
app1 = Application()
def delayed_launch():
time.sleep(2)
app1.start(_notepad_exe())
thread = Thread(target=delayed_launch)
thread.start()
app_conn = Application()
app_conn.connect(path=_notepad_exe(), timeout=3)
self.assertEqual(app1.process, app_conn.process)
accessible_modules = process_get_modules()
accessible_process_names = [os.path.basename(name.lower()) for process, name, cmdline in accessible_modules]
self.assertEqual('notepad.exe' in accessible_process_names, True)
app1.UntitledNotepad.menu_select('File->Exit')
def test_connect_path_timeout_problem(self):
"""Test that connect_() raise error when no process start"""
app1 = Application()
def delayed_launch():
time.sleep(1)
app1.start(_notepad_exe())
thread = Thread(target=delayed_launch)
thread.start()
self.assertRaises(ProcessNotFoundError, Application().connect, path=_notepad_exe(), timeout=0.5)
time.sleep(0.7)
app1.UntitledNotepad.menu_select('File->Exit')
def test_connect_process_timeout_failed(self):
"""Test that connect_(pid=...) raise error when set timeout"""
app1 = Application()
app1.start(_notepad_exe())
self.assertRaises(ProcessNotFoundError, Application().connect, pid=0, timeout=0.5)
app1.UntitledNotepad.menu_select('File->Exit')
# def test_Connect(self):
# """Test that connect_() works with a path"""
# app1 = Application()
# app1.start_("notepad.exe")
#
# app_conn = Application()
# app_conn.connect_(path = r"system32\notepad.exe")
# self.assertEqual(app1.process, app_conn.process)
#
# app_conn = Application()
# app_conn.connect_(path = r"c:\windows\system32\notepad.exe")
# self.assertEqual(app1.process, app_conn.process)
#
# app_conn.UntitledNotepad.menu_select('File->Exit')
def test_connect_process(self):
"""Test that connect_() works with a process"""
app1 = Application()
app1.start(_notepad_exe())
app_conn = Application()
app_conn.connect(pid=app1.process)
self.assertEqual(app1.process, app_conn.process)
app_conn.UntitledNotepad.menu_select('File->Exit')
def test_connect_handle(self):
"""Test that connect_() works with a handle"""
app1 = Application()
app1.start(_notepad_exe())
handle = app1.UntitledNotepad.handle
app_conn = Application()
app_conn.connect(handle=handle)
self.assertEqual(app1.process, app_conn.process)
app_conn.UntitledNotepad.menu_select('File->Exit')
def test_connect_windowspec(self):
"""Test that connect_() works with a windowspec"""
app1 = Application()
app1.start(_notepad_exe())
#unused var: handle = app1.UntitledNotepad.handle
app_conn = Application()
try:
app_conn.connect(name="Untitled - Notepad")
except findwindows.WindowAmbiguousError:
wins = findwindows.find_elements(active_only=True, name="Untitled - Notepad")
app_conn.connect(handle = wins[0].handle)
except findwindows.ElementNotFoundError:
WaitUntil(30, 0.5, lambda: len(findwindows.find_elements(active_only=True, name="Untitled - Notepad")) > 0)
wins = findwindows.find_elements(active_only=True, name="Untitled - Notepad")
app_conn.connect(handle = wins[0].handle)
self.assertEqual(app1.process, app_conn.process)
app_conn.UntitledNotepad.menu_select('File->Exit')
def test_connect_raises(self):
"""Test that connect_() raises with invalid input"""
# try an argument that does not exist
self.assertRaises (
KeyError,
Application().connect, **{'not_arg': 23})
self.assertRaises (
RuntimeError,
Application().connect)
# try to pass an invalid process
self.assertRaises (
ProcessNotFoundError,
Application().connect, **{'pid': 0})
# try to pass an invalid handle
self.assertRaises(
RuntimeError,
Application().connect, **{'handle' : 0})
# try to pass an invalid path
self.assertRaises(
ProcessNotFoundError,
Application().connect, **{'path': "no app here", 'timeout': 0.0})
def test_top_window(self):
"""Test that top_window_() works correctly"""
Timings.window_find_timeout = 5
app = Application()
self.assertRaises(AppNotConnected, app.top_window_)
app.start(_notepad_exe())
self.assertEqual(app.UntitledNotepad.handle, app.top_window_().handle)
app.UntitledNotepad.menu_select("Help->About Notepad")
self.assertEqual(app.AboutNotepad.handle, app.top_window_().handle)
app.AboutNotepad.Ok.Click()
app.UntitledNotepad.menu_select("File->Exit")
app.UntitledNotepad.wait_not('exists')
self.assertRaises(RuntimeError, app.top_window_)
def test_active_window(self):
"""Test that active_() works correctly"""
app = Application()
self.assertRaises(AppNotConnected, app.active_)
self.assertRaises(AppNotConnected, app.is64bit)
app.start(_notepad_exe())
app.UntitledNotepad.wait('ready')
self.assertEqual(app.active_().handle, app.UntitledNotepad.handle)
app.UntitledNotepad.menu_select("File->Exit")
app.UntitledNotepad.wait_not('exists')
self.assertRaises(RuntimeError, app.active_)
def test_cpu_usage(self):
"""Verify that cpu_usage() works correctly"""
app = Application()
self.assertRaises(AppNotConnected, app.cpu_usage)
app.start(_notepad_exe())
self.assertEqual(0.0 <= app.cpu_usage() <= 100.0, True)
app.UntitledNotepad.menu_select("File->Exit")
app.UntitledNotepad.wait_not('exists')
def test_wait_cpu_usage_lower(self):
"""Test that wait_cpu_usage_lower() works correctly"""
if is_x64_Python() != is_x64_OS():
return None
Application().Start(r'explorer.exe')
def _cabinetwclass_exist():
"Verify if at least one active 'CabinetWClass' window is created"
l = findwindows.find_elements(active_only = True, class_name = 'CabinetWClass')
return (len(l) > 0)
WaitUntil(40, 0.5, _cabinetwclass_exist)
handle = findwindows.find_elements(active_only = True, class_name = 'CabinetWClass')[-1].handle
window = WindowSpecification({'handle': handle, 'backend': 'win32', })
explorer = Application().Connect(pid = window.process_id())
try:
explorer.WaitCPUUsageLower(threshold = 1.5, timeout = 60, usage_interval = 2)
window.AddressBandRoot.ClickInput()
window.TypeKeys(r'Control Panel\Programs\Programs and Features', with_spaces=True, set_foreground=True)
window.TypeKeys(r'{ENTER}', set_foreground = False)
WaitUntil(40, 0.5, lambda: len(findwindows.find_elements(active_only=True,
name='Programs and Features',
class_name='CabinetWClass')) > 0)
explorer.WaitCPUUsageLower(threshold = 1.5, timeout = 60, usage_interval = 2)
installed_programs = window.FolderView.texts()[1:]
programs_list = ','.join(installed_programs)
if ('Microsoft' not in programs_list) and ('Python' not in programs_list):
hwndwrapper.ImageGrab.grab().save(r'explorer_screenshot.jpg')
hwndwrapper.ActionLogger().log('\ninstalled_programs:\n')
for prog in installed_programs:
hwndwrapper.ActionLogger().log(prog)
self.assertEqual(('Microsoft' in programs_list) or ('Python' in programs_list), True)
finally:
window.Close(2.0)
if UIA_support:
def test_wait_cpu_usage_lower_uia(self):
"""Test that wait_cpu_usage_lower() works correctly for UIA"""
app = Application(backend='uia')
app.start('notepad.exe')
try:
app.wait_cpu_usage_lower(threshold = 1.5, timeout = 30, usage_interval = 2)
finally:
app.kill()
app.cpu_usage = mock.Mock(return_value=10)
self.assertRaises(
RuntimeError, app.wait_cpu_usage_lower,
threshold = 9.0, timeout = 5, usage_interval = 0.5
)
# def test_wait_for_idle_exception(self):
# """Test that method start() raises an exception when wait for idle failed"""
# app = Application()
# self.assertRaises(Exception, app.start, 'cmd.exe')
# # TODO: test and fix the case when cmd.exe can't be killed by app.kill()
def test_windows(self):
"""Test that windows_() works correctly"""
Timings.window_find_timeout = 5
app = Application()
self.assertRaises(AppNotConnected, app.windows_, **{'title' : 'not connected'})
app.start('notepad.exe')
self.assertRaises(ValueError, app.windows_, **{'backend' : 'uia'})
notepad_handle = app.UntitledNotepad.handle
self.assertEqual(app.windows(visible=True), [notepad_handle])
app.UntitledNotepad.menu_select("Help->About Notepad")
aboutnotepad_handle = app.AboutNotepad.handle
self.assertEqual(
app.windows(visible=True, enabled=None),
[aboutnotepad_handle, notepad_handle])
app.AboutNotepad.OK.Click()
app.UntitledNotepad.menu_select("File->Exit")
def test_window(self):
"""Test that window_() works correctly"""
app = Application()
self.assertRaises(AppNotConnected, app.window_, **{'title' : 'not connected'})
app.start(_notepad_exe())
self.assertRaises(ValueError, app.windows_, **{'backend' : 'uia'})
title = app.window(name="Untitled - Notepad")
title_re = app.window(name_re="Untitled[ -]+Notepad")
classname = app.window(class_name="Notepad")
classname_re = app.window(class_name_re="Not..ad")
handle = app.window(handle=title.handle)
bestmatch = app.window(best_match="Untiotled Notepad")
self.assertNotEqual(title.handle, None)
self.assertNotEqual(title.handle, 0)
self.assertEqual(title.handle, title_re.handle)
self.assertEqual(title.handle, classname.handle)
self.assertEqual(title.handle, classname_re.handle)
self.assertEqual(title.handle, handle.handle)
self.assertEqual(title.handle, bestmatch.handle)
app.UntitledNotepad.menu_select("File->Exit")
def test_getitem(self):
"""Test that __getitem__() works correctly"""
Timings.window_find_timeout = 5
app = Application()
app.start(_notepad_exe())
self.assertRaises(Exception, app['blahblah'])
self.assertRaises(
findbestmatch.MatchError,
app['blahblah']['not here'].__getitem__, 'handle')
self.assertEqual(
app[u'Unt\xeftledNotepad'].handle,
app.window(name="Untitled - Notepad").handle)
app.UntitledNotepad.menu_select("Help->About Notepad")
self.assertEqual(
app['AboutNotepad'].handle,
app.window(name="About Notepad").handle)
app.AboutNotepad.Ok.Click()
app.UntitledNotepad.menu_select("File->Exit")
def test_getattribute(self):
"""Test that __getattribute__() works correctly"""
Timings.window_find_timeout = 5
app = Application()
app.start(_notepad_exe())
self.assertRaises(
findbestmatch.MatchError,
app.blahblah.__getattribute__, 'handle')
self.assertEqual(
app.UntitledNotepad.handle,
app.window(name="Untitled - Notepad").handle)
app.UntitledNotepad.menu_select("Help->About Notepad")
# I think it's OK that this no longer raises a matcherror
# just because the window is not enabled - doesn't mean you
# should not be able to access it at all!
#self.assertRaises(findbestmatch.MatchError,
# app.Notepad.__getattribute__, 'handle')
self.assertEqual(
app.AboutNotepad.handle,
app.window(name="About Notepad").handle)
app.AboutNotepad.Ok.Click()
app.UntitledNotepad.menu_select("File->Exit")
def test_kill(self):
"""test killing the application"""
app = Application()
app.start(_notepad_exe())
app.UntitledNotepad.Edit.type_keys("hello")
app.UntitledNotepad.menu_select("File->Print...")
#app.Print.FindPrinter.Click() # Vasily: (Win7 x64) "Find Printer" dialog is from splwow64.exe process
#app.FindPrinters.Stop.Click()
app.kill()
self.assertRaises(AttributeError, app.UntitledNotepad.Edit)
def test_process_is_running(self):
"""Tests process is running and wait for exit function"""
app = Application()
app.start(_notepad_exe())
app.UntitledNotepad.wait("ready")
self.assertTrue(app.is_process_running())
self.assertRaises(TimeoutError, lambda: app.wait_for_process_exit(timeout=5, retry_interval=1))
app.kill()
app.wait_for_process_exit()
self.assertFalse(app.is_process_running())
def test_should_return_not_running_if_not_started(self):
"""Tests that works on new instance
is_process_running/wait_for_process_exit can be called on not started/disconnected instance
"""
app = Application()
app.wait_for_process_exit(timeout=10, retry_interval=1)
self.assertFalse(app.is_process_running())
class TestInheritedApp(Application):
"""Our inherited version of class"""
def test_method(self):
"""This method should be called without any issues"""
return self is not None
def test_application_inheritance(self):
"""Test that Application class can be inherited and has it's own methods"""
app = ApplicationTestCases.TestInheritedApp()
self.assertTrue(app.test_method())
def test_non_magic_application(self):
app = Application()
self.assertEqual(app.allow_magic_lookup, True)
app_no_magic = Application(allow_magic_lookup=False)
self.assertEqual(app_no_magic.allow_magic_lookup, False)
app_no_magic.start(_notepad_exe())
window = app_no_magic.window(best_match="UntitledNotepad")
dlg = window.by(best_match="Edit")
dlg.draw_outline()
with self.assertRaises(AttributeError):
app_no_magic.UntitledNotepad
with self.assertRaises(AttributeError):
window.Edit
app_no_magic.kill()
app_no_magic.wait_for_process_exit()
class WindowSpecificationTestCases(unittest.TestCase):
"""Unit tests for the application.Application class"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.app = Application(backend="win32").start(_notepad_exe())
self.dlgspec = self.app.UntitledNotepad
self.ctrlspec = self.app.UntitledNotepad.Edit
def tearDown(self):
"""Close the application after tests"""
# close the application
#self.app.UntitledNotepad.menu_select("File->Exit")
self.app.kill()
def test__init__(self):
"""Test creating a new spec by hand"""
wspec = WindowSpecification(
dict(
best_match=u"UntitledNotepad",
app=self.app)
)
self.assertEqual(
wspec.window_text(),
u"Untitled - Notepad")
self.assertEqual(self.dlgspec.app, self.app)
self.assertEqual(self.ctrlspec.app, self.app)
self.assertEqual(wspec.app, self.app)
def test__init__both_keywords(self):
"""Test creating a new spec with ambiguity by process and app simultaneously"""
self.assertRaises(KeyError, WindowSpecification,
dict(best_match=u"UntitledNotepad", app=self.app, pid=self.app.process)
)
def test__call__(self):
"""Test that __call__() correctly raises an error"""
self.assertRaises(AttributeError, self.dlgspec)
self.assertRaises(AttributeError, self.ctrlspec)
# no best_match!
wspec = WindowSpecification(
dict(name=u"blah", app=self.app)
)
self.assertRaises(AttributeError, wspec)
def test_wrapper_object(self):
"""Test that we can get a control"""
self.assertEqual(True, isinstance(self.dlgspec, WindowSpecification))
self.assertEqual(
True,
isinstance(self.dlgspec.find(), hwndwrapper.HwndWrapper)
)
def test_window(self):
"""test specifying a sub window of an existing specification"""
sub_spec = self.dlgspec.by(class_name ="Edit")
sub_spec_legacy = self.dlgspec.window(class_name = "Edit")
self.assertEqual(True, isinstance(sub_spec, WindowSpecification))
self.assertEqual(sub_spec.class_name(), "Edit")
self.assertEqual(sub_spec_legacy.class_name(), "Edit")
def test__getitem__(self):
"""test item access of a windowspec"""
self.assertEqual(
True,
isinstance(self.dlgspec['Edit'], WindowSpecification)
)
self.assertEqual(self.dlgspec['Edit'].class_name(), "Edit")
self.assertRaises(AttributeError, self.ctrlspec.__getitem__, 'edit')
def test_getattr(self):
"""Test getting attributes works correctly"""
self.assertEqual(
True,
isinstance(self.dlgspec.Edit, WindowSpecification)
)
self.assertEqual(self.dlgspec.Edit.class_name(), "Edit")
# check that getting a dialog attribute works correctly
self.assertEqual(
"Notepad",
self.dlgspec.class_name())
# Check handling 'parent' as a WindowSpecification
spec = self.ctrlspec.by(parent=self.dlgspec, visible=True)
self.assertEqual(spec.class_name(), "Edit")
def test_non_magic_getattr(self):
ws = WindowSpecification(dict(best_match="Notepad"))
self.assertEqual(ws.allow_magic_lookup, True)
ws_no_magic = WindowSpecification(dict(best_match="Notepad"), allow_magic_lookup=False)
self.assertEqual(ws_no_magic.allow_magic_lookup, False)
dlg = ws_no_magic.by(best_match="Edit")
has_focus = dlg.has_keyboard_focus()
self.assertIn(has_focus, (True, False))
with self.assertRaises(AttributeError):
ws_no_magic.Edit
def test_exists(self):
"""Check that windows exist"""
self.assertEqual(True, self.dlgspec.exists())
self.assertEqual(True, self.dlgspec.exists(0))
self.assertEqual(True, self.ctrlspec.exists())
# TODO: test a control that is not visible but exists
#self.assertEqual(True, self.app.DefaultIME.exists())
start = timestamp()
self.assertEqual(False, self.app.BlahBlah.exists(timeout=.1))
self.assertEqual(True, timestamp() - start < .3)
start = timestamp()
self.assertEqual(False, self.app.BlahBlah.exists(timeout=3))
self.assertEqual(True, 2.7 < timestamp() - start < 3.3)
def test_exists_timing(self):
"""test the timing of the exists method"""
# try ones that should be found immediately
start = timestamp()
self.assertEqual(True, self.dlgspec.exists())
self.assertEqual(True, timestamp() - start < .3)
start = timestamp()
self.assertEqual(True, self.ctrlspec.exists())
self.assertEqual(True, timestamp() - start < .3)
# try one that should not be found
start = timestamp()
self.assertEqual(True, self.dlgspec.exists(.5))
timedif = timestamp() - start
self.assertEqual(True, .49 > timedif < .6)
def test_find_all_dlg(self):
dlg_spec_list = self.dlgspec.find_all()
self.assertEqual(1, len(dlg_spec_list))
self.assertEqual(self.dlgspec.find(), dlg_spec_list[0])
def test_find_all_notepad(self):
ctrls = self.dlgspec.by(parent=self.dlgspec).find_all()
self.assertEqual(2, len(ctrls))
self.assertEqual(ctrls[0], self.app.Notepad.Edit.find())
self.assertEqual(ctrls[1], self.app.Notepad.StatusBar.find())
def test_wait(self):
"""test the functionality and timing of the wait method"""
allowable_error = .2
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait("enaBleD "))
time_taken = (timestamp() - start)
if not 0 <= time_taken < (0 + 2 * allowable_error):
self.assertEqual(.02, time_taken)
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait(" ready"))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait(" exiSTS"))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait(" VISIBLE "))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait(" ready enabled"))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait("visible exists "))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait("exists "))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait("actIve "))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
self.assertRaises(SyntaxError, self.dlgspec.wait, "Invalid_criteria")
def test_wait_non_existing(self):
"""test timing of the wait method for non-existing element"""
allowable_error = .2
start = timestamp()
self.assertRaises(TimeoutError, self.app.BlahBlah.wait, 'exists')
expected = Timings.window_find_timeout
self.assertEqual(True, expected - allowable_error <= (timestamp() - start) < expected + allowable_error)
def test_wait_invisible(self):
"""test timing of the wait method for non-existing element and existing invisible one"""
# TODO: re-use an MFC sample for this test
allowable_error = .2
start = timestamp()
self.assertRaises(TimeoutError, self.app.BlahBlah.wait, 'visible')
expected = Timings.window_find_timeout
self.assertEqual(True, expected - allowable_error <= (timestamp() - start) < expected + allowable_error)
# make sure Status Bar is not visible
status_bar_menu = self.app.UntitledNotepad.menu().item('&View').sub_menu().item('&Status Bar')
if status_bar_menu.is_checked():
status_bar_menu.select()
# check that existing invisible control is still found with 'exists' criterion
status_bar_spec = self.app.UntitledNotepad.by(class_name="msctls_statusbar32", visible=None)
self.assertEqual('StatusBar', status_bar_spec.wait('exists').friendly_class_name())
start = timestamp()
self.assertRaises(TimeoutError, status_bar_spec.wait, 'exists visible')
self.assertEqual(True, expected - allowable_error <= (timestamp() - start) < expected + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, status_bar_spec.wait, 'visible exists')
self.assertEqual(True, expected - allowable_error <= (timestamp() - start) < expected + allowable_error)
def test_wait_not(self):
"""
Test that wait not fails for all the following
* raises and error when criteria not met
* timing is close to the timeout value
"""
allowable_error = .16
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, "enaBleD ", .1, .05)
taken = timestamp() - start
if .1 < (taken) > .1 + allowable_error:
self.assertEqual(.12, taken)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, " ready", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, " exiSTS", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, " VISIBLE ", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, " ready enabled", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, "visible exists ", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, "exists ", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, "actIve ", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
self.assertRaises(SyntaxError, self.dlgspec.wait_not, "Invalid_criteria")
# def test_wait_ready(self):
# """Make sure the friendly class is set correctly"""
# allowable_error = .02
#
# start = timestamp()
# self.assertEqual(self.dlgspec.ctrl_(), self.dlgspec.WaitReady(.1, .05))
#
# # it it didn't finish in the allocated time then raise an error
# # we assertEqual to something that we know is not right - to get a
# # better error report
# if not 0 <= (timestamp() - start) < 0 + allowable_error:
# self.assertEqual(0, timestamp() - start)
# #self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
#
#
# def testWaitNotReady(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertRaises(RuntimeError, self.dlgspec.WaitNotReady, .1, .05)
#
# if not .1 <= (timestamp() - start) < .1 + allowable_error:
# self.assertEqual(.1, timestamp() - start)
#
# #self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
#
#
# def testWaitEnabled(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertEqual(self.dlgspec.ctrl_(), self.dlgspec.WaitEnabled(.1, .05))
#
# if not 0 <= (timestamp() - start) < 0 + allowable_error:
# self.assertEqual(0, timestamp() - start)
#
# #self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
#
#
# def testWaitNotEnabled(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertRaises(RuntimeError, self.dlgspec.WaitNotEnabled, .1, .05)
# if not .1 <= (timestamp() - start) < .1 + allowable_error:
# self.assertEqual(.1, timestamp() - start)
# #self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
#
# def testWaitVisible(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertEqual(self.dlgspec.ctrl_(), self.dlgspec.WaitVisible(.1, .05))
# if not 0 <= (timestamp() - start) < 0 + allowable_error:
# self.assertEqual(0, timestamp() - start)
# #self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
#
# def testWaitNotVisible(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertRaises(RuntimeError, self.dlgspec.WaitNotVisible, .1, .05)
# # it it didn't finish in the allocated time then raise an error
# # we assertEqual to something that we know is not right - to get a
# # better error report
# if not .1 <= (timestamp() - start) < .1 + allowable_error:
# self.assertEqual(.1, timestamp() - start)
#
# def testWaitExists(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertEqual(self.dlgspec.ctrl_(), self.dlgspec.WaitExists(.1, .05))
#
# # it it didn't finish in the allocated time then raise an error
# # we assertEqual to something that we know is not right - to get a
# # better error report
# if not 0 <= (timestamp() - start) < 0 + allowable_error:
# self.assertEqual(.1, timestamp() - start)
#
# def testWaitNotExists(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertRaises(RuntimeError, self.dlgspec.WaitNotExists, .1, .05)
# if not .1 <= (timestamp() - start) < .1 + allowable_error:
# self.assertEqual(.1, timestamp() - start)
# #self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
def test_depth(self):
"""Test that descendants() with depth works correctly"""
self.dlgspec.menu_select("Format -> Font")
self.assertNotEqual(
len(self.app['Font'].descendants(depth=1)),
len(self.app['Font'].descendants(depth=2)))
def test_dump_tree(self):
"""Make sure dump_tree() doesn't crash"""
self.dlgspec.dump_tree()
self.ctrlspec.dump_tree()
def test_dump_tree_file_output(self):
"""Make sure dump_tree() creates correct file"""
output_filename = "test_dump_tree.txt"
self.dlgspec.dump_tree(filename=output_filename)
if os.path.isfile(output_filename):
with open(output_filename, "r") as test_log_file:
content = str(test_log_file.readlines())
self.assertTrue("'Untitled - NotepadEdit'" in content
and "'Edit'" in content)
self.assertTrue(".by(class_name='msctls_statusbar32'" in content)
os.remove(output_filename)
else:
self.fail("dump_tree can't create a file")
self.ctrlspec.dump_tree(filename=output_filename)
if os.path.isfile(output_filename):
with open(output_filename, "r") as test_log_file:
content = str(test_log_file.readlines())
self.assertTrue(".by(class_name='Edit')" in content)
os.remove(output_filename)
else:
self.fail("dump_tree can't create a file")
def test_find_elements_re(self):
"""Test for bug #90: A crash in 'find_elements' when called with 'title_re' argument"""
self.dlgspec.wait('visible')
windows = findwindows.find_elements(name_re="Untitled - Notepad")
self.assertTrue(len(windows) >= 1)
class ChildWindowSpecificationFromWrapperTests(unittest.TestCase):
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.app = Application(backend="win32").start(_notepad_exe())
self.ctrlspec = self.app.window(found_index=0).find().by(class_name='Edit')
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
def test_wrapper_object(self):
"""Test that we can get a control"""
self.assertEqual(True, isinstance(self.ctrlspec, WindowSpecification))
self.assertEqual(
True,
isinstance(self.ctrlspec.find(), hwndwrapper.HwndWrapper)
)
def test_parent(self):
"""Test recreating specification from parent dialog wrapper"""
dlg = self.ctrlspec.parent()
sub_spec = dlg.by(class_name ="Edit")
self.assertEqual(True, isinstance(sub_spec, WindowSpecification))
self.assertEqual(sub_spec.class_name(), "Edit")
self.assertEqual(self.ctrlspec.handle, sub_spec.handle)
def test_dump_tree_file_output(self):
"""Make sure dump_tree() creates correct file"""
output_filename = "test_dump_tree.txt"
self.ctrlspec.dump_tree(filename=output_filename)
if os.path.isfile(output_filename):
with open(output_filename, "r") as test_log_file:
content = str(test_log_file.readlines())
self.assertTrue(".by(class_name='Edit')" in content)
os.remove(output_filename)
else:
self.fail("dump_tree can't create a file")
def test_properties(self):
"""Check control properties"""
self.assertEqual(self.ctrlspec.class_name(), "Edit")
self.assertTrue(self.ctrlspec.exists())
if UIA_support:
class UIAWindowSpecificationTestCases(unittest.TestCase):
"""Unit tests for the application.Application class with UIA backend"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.app = Application(backend="uia").start(_notepad_exe())
self.dlgspec = self.app.UntitledNotepad
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
def test_child_window_depth(self):
"""Test that child_window() with depth works correctly"""
# TODO fix same elements at different tree levels on win32 backend
self.dlgspec.menu_select("Format -> Font")
font = self.dlgspec.by(name="Font")
with self.assertRaises(findbestmatch.MatchError):
font.by(best_match="ListBox0", depth=1).find()
font.by(best_match="ListBox0", depth=2).find()
class WaitUntilDecoratorTests(unittest.TestCase):
"""Unit tests for always_wait_until and always_wait_until_passes decorators"""
def test_always_wait_until_decorator_success(self):
"""Test always_wait_until_decorator success"""
@always_wait_until(4, 2)
def foo():
return True
self.assertTrue(foo())
def test_always_wait_until_decorator_failure(self):
"""Test wait_until_decorator failure"""
@always_wait_until(4, 2)
def foo():
return False
self.assertRaises(TimeoutError, foo)
def test_always_wait_until_passes_decorator_success(self):
"""Test always_wait_until_passes_decorator success"""
@always_wait_until_passes(4, 2)
def foo():
return True
self.assertTrue(foo())
def test_always_wait_until_passes_decorator_failure(self):
"""Test always_wait_until_passes_decorator failure"""
@always_wait_until_passes(4, 2)
def foo():
raise Exception("Unexpected Error in foo")
self.assertRaises(TimeoutError, foo)
class MultiLevelWindowSpecificationTests(unittest.TestCase):
"""Unit tests for multi-level (3+) WindowSpecification objects"""
if UIA_support:
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.slow()
self.app = Application(backend='uia').start(os.path.join(mfc_samples_folder, u"RowList.exe"))
self.dlg = self.app.RowListSampleApplication
def tearDown(self):
"""Close the application after tests"""
self.dlg.CloseButton.click()
self.dlg.wait_not('visible')
def test_3level_specification(self):
"""Test that controls can be accessed by 3 levels of attributes"""
self.dlg.Toolbar.About.click()
self.dlg.AboutRowList.OK.click()
#self.dlg.AboutRowList.wait_not('visible') # XXX: it takes more than 50 seconds!
else: # Win32
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.app = Application(backend='win32').start(os.path.join(mfc_samples_folder, u"CmnCtrl3.exe"))
self.dlg = self.app.CommonControlsSample
def tearDown(self):
"""Close the application after tests"""
self.dlg.SendMessage(win32defines.WM_CLOSE)
def test_4level_specification(self):
"""Test that controls can be accessed by 4 levels of attributes"""
self.assertEqual(self.dlg.CPagerCtrl.Pager.Toolbar.button_count(), 12)
if UIA_support:
class DesktopUiaWindowSpecificationTests(unittest.TestCase):
"""Unit tests for Desktop(backend='uia') object"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.slow()
self.app = Application().start('explorer.exe "' + mfc_samples_folder_32 + '"')
self.desktop = Desktop(backend='uia')
self.desktop_no_magic = Desktop(backend='uia', allow_magic_lookup=False)
def tearDown(self):
"""Close the application after tests"""
self.desktop.MFC_samplesDialog.close()
self.desktop.MFC_samplesDialog.wait_not('exists')
def test_folder_list(self):
"""Test that ListViewWrapper returns correct files list in explorer.exe"""
files_list = self.desktop.MFC_samplesDialog.Shell_Folder_View.Items_View.find()
self.assertEqual([item.window_text() for item in files_list.get_items()],
[u'x64', u'BCDialogMenu.exe', u'CmnCtrl1.exe', u'CmnCtrl2.exe', u'CmnCtrl3.exe',
u'CtrlTest.exe', u'mfc100u.dll', u'NewControls.exe', u'RebarTest.exe', u'RowList.exe', u'TrayMenu.exe'])
self.assertEqual(files_list.item('RebarTest.exe').window_text(), 'RebarTest.exe')
def test_set_backend_to_window_uia(self):
"""Set backend to method window(), except exception ValueError"""
with self.assertRaises(ValueError):
self.desktop.window(backend='uia', name='MFC_samplesDialog')
with self.assertRaises(ValueError):
self.desktop.window(backend='win32', name='MFC_samplesDialog')
def test_get_list_of_windows_uia(self):
"""Test that method .windows() returns a non-empty list of windows"""
dlgs = self.desktop.windows()
self.assertTrue(len(dlgs) > 1)
def test_set_backend_to_windows_uia(self):
"""Set backend to method .windows(), except exception ValueError"""
with self.assertRaises(ValueError):
self.desktop.windows(backend='win32')
with self.assertRaises(ValueError):
self.desktop.windows(backend='uia')
def test_only_visible_windows_uia(self):
"""Set visible=True to method .windows()"""
dlgs = self.desktop.windows(visible=True)
self.assertTrue(all([win.is_visible() for win in dlgs]))
def test_only_enable_windows_uia(self):
"""Set enable_only to the method windows"""
dlgs = self.desktop.windows(enabled=True)
self.assertTrue(all([win.is_enabled() for win in dlgs]))
def test_non_magic_desktop(self):
from pywinauto.controls.uiawrapper import UIAWrapper
self.assertEqual(self.desktop.allow_magic_lookup, True)
self.assertEqual(self.desktop_no_magic.allow_magic_lookup, False)
dlgs = self.desktop_no_magic.windows()
self.assertTrue(len(dlgs) > 1)
window = self.desktop_no_magic.window(name="MFC_samples")
self.assertEqual(window.allow_magic_lookup, False)
dlg = window.by(class_name="ShellTabWindowClass").find()
self.assertIsInstance(dlg, UIAWrapper)
has_focus = dlg.has_keyboard_focus()
self.assertIn(has_focus, (True, False))
with self.assertRaises(AttributeError):
self.desktop_no_magic.MFC_samples
with self.assertRaises(AttributeError):
window.ShellTabWindowClass
class DesktopWin32WindowSpecificationTests(unittest.TestCase):
"""Unit tests for Desktop(backend='win32') object"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.app = Application(backend='win32').start(os.path.join(mfc_samples_folder, u"CmnCtrl3.exe"))
self.desktop = Desktop(backend='win32')
self.desktop_no_magic = Desktop(backend='win32', allow_magic_lookup=False)
self.window_title = 'Common Controls Sample'
def tearDown(self):
"""Close the application after tests"""
self.desktop.window(name=self.window_title, pid=self.app.process).SendMessage(win32defines.WM_CLOSE)
def test_simple_access_through_desktop(self):
"""Test that controls can be accessed by 4 levels of attributes"""
dlg = self.desktop.window(name=self.window_title, pid=self.app.process)
self.assertEqual(dlg.Pager.Toolbar.button_count(), 12)
def test_set_backend_to_window_win32(self):
"""Set backend to method window(), except exception ValueError"""
with self.assertRaises(ValueError):
self.desktop.window(backend='uia', name=self.window_title, pid=self.app.process)
with self.assertRaises(ValueError):
self.desktop.window(backend='win32', name=self.window_title, pid=self.app.process)
def test_get_list_of_windows_win32(self):
"""Test that method .windows() returns a non-empty list of windows"""
dlgs = self.desktop.windows()
self.assertTrue(len(dlgs) > 1)
window_titles = [win_obj.window_text() for win_obj in dlgs]
self.assertTrue(self.window_title in window_titles)
def test_set_backend_to_windows_win32(self):
"""Set backend to method windows, except exception ValueError"""
with self.assertRaises(ValueError):
self.desktop.windows(backend='win32')
with self.assertRaises(ValueError):
self.desktop.windows(backend='uia')
def test_only_visible_windows_win32(self):
"""Set visible=True to method .windows()"""
dlgs = self.desktop.windows(visible=True)
self.assertTrue(all([win.is_visible() for win in dlgs]))
def test_only_enable_windows_win32(self):
"""Set enable_only to the method windows"""
dlgs = self.desktop.windows(enabled=True)
self.assertTrue(all([win.is_enabled() for win in dlgs]))
def test_from_point_win32(self):
"""Test method Desktop(backend='win32').from_point(x, y)"""
combo = self.app.Common_Controls_Sample.ComboBox.find()
x, y = combo.rectangle().mid_point()
combo_from_point = self.desktop.from_point(x, y)
self.assertEqual(combo, combo_from_point)
def test_top_from_point_win32(self):
"""Test method Desktop(backend='win32').top_from_point(x, y)"""
combo = self.app.Common_Controls_Sample.ComboBox.find()
dlg = self.app.Common_Controls_Sample.find()
x, y = combo.rectangle().mid_point()
dlg_from_point = self.desktop.top_from_point(x, y)
self.assertEqual(dlg, dlg_from_point)
def test_non_magic_desktop(self):
self.assertEqual(self.desktop.allow_magic_lookup, True)
self.assertEqual(self.desktop_no_magic.allow_magic_lookup, False)
window = self.desktop_no_magic.window(name=self.window_title, pid=self.app.process)
self.assertEqual(window.allow_magic_lookup, False)
dlg = window.by(class_name="msctls_trackbar32").find()
self.assertIsInstance(dlg, TrackbarWrapper)
pos = dlg.get_position()
self.assertIsInstance(pos, six.integer_types)
with self.assertRaises(AttributeError):
getattr(self.desktop_no_magic, self.window_title.replace(" ", "_"))
with self.assertRaises(AttributeError):
window.msctls_trackbar32
if __name__ == "__main__":
unittest.main()
|
test_gc.py | import unittest
import unittest.mock
from test.support import (verbose, refcount_test, run_unittest,
cpython_only)
from test.support.import_helper import import_module
from test.support.os_helper import temp_dir, TESTFN, unlink
from test.support.script_helper import assert_python_ok, make_script
from test.support import threading_helper
import gc
import sys
import sysconfig
import textwrap
import threading
import time
import weakref
try:
from _testcapi import with_tp_del
except ImportError:
def with_tp_del(cls):
class C(object):
def __new__(cls, *args, **kwargs):
raise TypeError('requires _testcapi.with_tp_del')
return C
try:
from _testcapi import ContainerNoGC
except ImportError:
ContainerNoGC = None
### Support code
###############################################################################
# Bug 1055820 has several tests of longstanding bugs involving weakrefs and
# cyclic gc.
# An instance of C1055820 has a self-loop, so becomes cyclic trash when
# unreachable.
class C1055820(object):
def __init__(self, i):
self.i = i
self.loop = self
class GC_Detector(object):
# Create an instance I. Then gc hasn't happened again so long as
# I.gc_happened is false.
def __init__(self):
self.gc_happened = False
def it_happened(ignored):
self.gc_happened = True
# Create a piece of cyclic trash that triggers it_happened when
# gc collects it.
self.wr = weakref.ref(C1055820(666), it_happened)
@with_tp_del
class Uncollectable(object):
"""Create a reference cycle with multiple __del__ methods.
An object in a reference cycle will never have zero references,
and so must be garbage collected. If one or more objects in the
cycle have __del__ methods, the gc refuses to guess an order,
and leaves the cycle uncollected."""
def __init__(self, partner=None):
if partner is None:
self.partner = Uncollectable(partner=self)
else:
self.partner = partner
def __tp_del__(self):
pass
if sysconfig.get_config_vars().get('PY_CFLAGS', ''):
BUILD_WITH_NDEBUG = ('-DNDEBUG' in sysconfig.get_config_vars()['PY_CFLAGS'])
else:
# Usually, sys.gettotalrefcount() is only present if Python has been
# compiled in debug mode. If it's missing, expect that Python has
# been released in release mode: with NDEBUG defined.
BUILD_WITH_NDEBUG = (not hasattr(sys, 'gettotalrefcount'))
### Tests
###############################################################################
class GCTests(unittest.TestCase):
def test_list(self):
l = []
l.append(l)
gc.collect()
del l
self.assertEqual(gc.collect(), 1)
def test_dict(self):
d = {}
d[1] = d
gc.collect()
del d
self.assertEqual(gc.collect(), 1)
def test_tuple(self):
# since tuples are immutable we close the loop with a list
l = []
t = (l,)
l.append(t)
gc.collect()
del t
del l
self.assertEqual(gc.collect(), 2)
def test_class(self):
class A:
pass
A.a = A
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_newstyleclass(self):
class A(object):
pass
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_instance(self):
class A:
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
def test_newinstance(self):
class A(object):
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
class B(list):
pass
class C(B, A):
pass
a = C()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
del B, C
self.assertNotEqual(gc.collect(), 0)
A.a = A()
del A
self.assertNotEqual(gc.collect(), 0)
self.assertEqual(gc.collect(), 0)
def test_method(self):
# Tricky: self.__init__ is a bound method, it references the instance.
class A:
def __init__(self):
self.init = self.__init__
a = A()
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
@cpython_only
def test_legacy_finalizer(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
@with_tp_del
class A:
def __tp_del__(self): pass
class B:
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
@cpython_only
def test_legacy_finalizer_newclass(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
@with_tp_del
class A(object):
def __tp_del__(self): pass
class B(object):
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
def test_function(self):
# Tricky: f -> d -> f, code should call d.clear() after the exec to
# break the cycle.
d = {}
exec("def f(): pass\n", d)
gc.collect()
del d
self.assertEqual(gc.collect(), 2)
@refcount_test
def test_frame(self):
def f():
frame = sys._getframe()
gc.collect()
f()
self.assertEqual(gc.collect(), 1)
def test_saveall(self):
# Verify that cyclic garbage like lists show up in gc.garbage if the
# SAVEALL option is enabled.
# First make sure we don't save away other stuff that just happens to
# be waiting for collection.
gc.collect()
# if this fails, someone else created immortal trash
self.assertEqual(gc.garbage, [])
L = []
L.append(L)
id_L = id(L)
debug = gc.get_debug()
gc.set_debug(debug | gc.DEBUG_SAVEALL)
del L
gc.collect()
gc.set_debug(debug)
self.assertEqual(len(gc.garbage), 1)
obj = gc.garbage.pop()
self.assertEqual(id(obj), id_L)
def test_del(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A:
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
def test_del_newclass(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A(object):
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
# The following two tests are fragile:
# They precisely count the number of allocations,
# which is highly implementation-dependent.
# For example, disposed tuples are not freed, but reused.
# To minimize variations, though, we first store the get_count() results
# and check them at the end.
@refcount_test
def test_get_count(self):
gc.collect()
a, b, c = gc.get_count()
x = []
d, e, f = gc.get_count()
self.assertEqual((b, c), (0, 0))
self.assertEqual((e, f), (0, 0))
# This is less fragile than asserting that a equals 0.
self.assertLess(a, 5)
# Between the two calls to get_count(), at least one object was
# created (the list).
self.assertGreater(d, a)
@refcount_test
def test_collect_generations(self):
gc.collect()
# This object will "trickle" into generation N + 1 after
# each call to collect(N)
x = []
gc.collect(0)
# x is now in gen 1
a, b, c = gc.get_count()
gc.collect(1)
# x is now in gen 2
d, e, f = gc.get_count()
gc.collect(2)
# x is now in gen 3
g, h, i = gc.get_count()
# We don't check a, d, g since their exact values depends on
# internal implementation details of the interpreter.
self.assertEqual((b, c), (1, 0))
self.assertEqual((e, f), (0, 1))
self.assertEqual((h, i), (0, 0))
def test_trashcan(self):
class Ouch:
n = 0
def __del__(self):
Ouch.n = Ouch.n + 1
if Ouch.n % 17 == 0:
gc.collect()
# "trashcan" is a hack to prevent stack overflow when deallocating
# very deeply nested tuples etc. It works in part by abusing the
# type pointer and refcount fields, and that can yield horrible
# problems when gc tries to traverse the structures.
# If this test fails (as it does in 2.0, 2.1 and 2.2), it will
# most likely die via segfault.
# Note: In 2.3 the possibility for compiling without cyclic gc was
# removed, and that in turn allows the trashcan mechanism to work
# via much simpler means (e.g., it never abuses the type pointer or
# refcount fields anymore). Since it's much less likely to cause a
# problem now, the various constants in this expensive (we force a lot
# of full collections) test are cut back from the 2.2 version.
gc.enable()
N = 150
for count in range(2):
t = []
for i in range(N):
t = [t, Ouch()]
u = []
for i in range(N):
u = [u, Ouch()]
v = {}
for i in range(N):
v = {1: v, 2: Ouch()}
gc.disable()
def test_trashcan_threads(self):
# Issue #13992: trashcan mechanism should be thread-safe
NESTING = 60
N_THREADS = 2
def sleeper_gen():
"""A generator that releases the GIL when closed or dealloc'ed."""
try:
yield
finally:
time.sleep(0.000001)
class C(list):
# Appending to a list is atomic, which avoids the use of a lock.
inits = []
dels = []
def __init__(self, alist):
self[:] = alist
C.inits.append(None)
def __del__(self):
# This __del__ is called by subtype_dealloc().
C.dels.append(None)
# `g` will release the GIL when garbage-collected. This
# helps assert subtype_dealloc's behaviour when threads
# switch in the middle of it.
g = sleeper_gen()
next(g)
# Now that __del__ is finished, subtype_dealloc will proceed
# to call list_dealloc, which also uses the trashcan mechanism.
def make_nested():
"""Create a sufficiently nested container object so that the
trashcan mechanism is invoked when deallocating it."""
x = C([])
for i in range(NESTING):
x = [C([x])]
del x
def run_thread():
"""Exercise make_nested() in a loop."""
while not exit:
make_nested()
old_switchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-5)
try:
exit = []
threads = []
for i in range(N_THREADS):
t = threading.Thread(target=run_thread)
threads.append(t)
with threading_helper.start_threads(threads, lambda: exit.append(1)):
time.sleep(1.0)
finally:
sys.setswitchinterval(old_switchinterval)
gc.collect()
self.assertEqual(len(C.inits), len(C.dels))
def test_boom(self):
class Boom:
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom()
b = Boom()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# a<->b are in a trash cycle now. Collection will invoke
# Boom.__getattr__ (to see whether a and b have __del__ methods), and
# __getattr__ deletes the internal "attr" attributes as a side effect.
# That causes the trash cycle to get reclaimed via refcounts falling to
# 0, thus mutating the trash graph as a side effect of merely asking
# whether __del__ exists. This used to (before 2.3b1) crash Python.
# Now __getattr__ isn't called.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2(self):
class Boom2:
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2()
b = Boom2()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# Much like test_boom(), except that __getattr__ doesn't break the
# cycle until the second time gc checks for __del__. As of 2.3b1,
# there isn't a second time, so this simply cleans up the trash cycle.
# We expect a, b, a.__dict__ and b.__dict__ (4 objects) to get
# reclaimed this way.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom_new(self):
# boom__new and boom2_new are exactly like boom and boom2, except use
# new-style classes.
class Boom_New(object):
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom_New()
b = Boom_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2_new(self):
class Boom2_New(object):
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2_New()
b = Boom2_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_get_referents(self):
alist = [1, 3, 5]
got = gc.get_referents(alist)
got.sort()
self.assertEqual(got, alist)
atuple = tuple(alist)
got = gc.get_referents(atuple)
got.sort()
self.assertEqual(got, alist)
adict = {1: 3, 5: 7}
expected = [1, 3, 5, 7]
got = gc.get_referents(adict)
got.sort()
self.assertEqual(got, expected)
got = gc.get_referents([1, 2], {3: 4}, (0, 0, 0))
got.sort()
self.assertEqual(got, [0, 0] + list(range(5)))
self.assertEqual(gc.get_referents(1, 'a', 4j), [])
def test_is_tracked(self):
# Atomic built-in types are not tracked, user-defined objects and
# mutable containers are.
# NOTE: types with special optimizations (e.g. tuple) have tests
# in their own test files instead.
self.assertFalse(gc.is_tracked(None))
self.assertFalse(gc.is_tracked(1))
self.assertFalse(gc.is_tracked(1.0))
self.assertFalse(gc.is_tracked(1.0 + 5.0j))
self.assertFalse(gc.is_tracked(True))
self.assertFalse(gc.is_tracked(False))
self.assertFalse(gc.is_tracked(b"a"))
self.assertFalse(gc.is_tracked("a"))
self.assertFalse(gc.is_tracked(bytearray(b"a")))
self.assertFalse(gc.is_tracked(type))
self.assertFalse(gc.is_tracked(int))
self.assertFalse(gc.is_tracked(object))
self.assertFalse(gc.is_tracked(object()))
class UserClass:
pass
class UserInt(int):
pass
# Base class is object; no extra fields.
class UserClassSlots:
__slots__ = ()
# Base class is fixed size larger than object; no extra fields.
class UserFloatSlots(float):
__slots__ = ()
# Base class is variable size; no extra fields.
class UserIntSlots(int):
__slots__ = ()
self.assertTrue(gc.is_tracked(gc))
self.assertTrue(gc.is_tracked(UserClass))
self.assertTrue(gc.is_tracked(UserClass()))
self.assertTrue(gc.is_tracked(UserInt()))
self.assertTrue(gc.is_tracked([]))
self.assertTrue(gc.is_tracked(set()))
self.assertFalse(gc.is_tracked(UserClassSlots()))
self.assertFalse(gc.is_tracked(UserFloatSlots()))
self.assertFalse(gc.is_tracked(UserIntSlots()))
def test_is_finalized(self):
# Objects not tracked by the always gc return false
self.assertFalse(gc.is_finalized(3))
storage = []
class Lazarus:
def __del__(self):
storage.append(self)
lazarus = Lazarus()
self.assertFalse(gc.is_finalized(lazarus))
del lazarus
gc.collect()
lazarus = storage.pop()
self.assertTrue(gc.is_finalized(lazarus))
def test_bug1055820b(self):
# Corresponds to temp2b.py in the bug report.
ouch = []
def callback(ignored):
ouch[:] = [wr() for wr in WRs]
Cs = [C1055820(i) for i in range(2)]
WRs = [weakref.ref(c, callback) for c in Cs]
c = None
gc.collect()
self.assertEqual(len(ouch), 0)
# Make the two instances trash, and collect again. The bug was that
# the callback materialized a strong reference to an instance, but gc
# cleared the instance's dict anyway.
Cs = None
gc.collect()
self.assertEqual(len(ouch), 2) # else the callbacks didn't run
for x in ouch:
# If the callback resurrected one of these guys, the instance
# would be damaged, with an empty __dict__.
self.assertEqual(x, None)
def test_bug21435(self):
# This is a poor test - its only virtue is that it happened to
# segfault on Tim's Windows box before the patch for 21435 was
# applied. That's a nasty bug relying on specific pieces of cyclic
# trash appearing in exactly the right order in finalize_garbage()'s
# input list.
# But there's no reliable way to force that order from Python code,
# so over time chances are good this test won't really be testing much
# of anything anymore. Still, if it blows up, there's _some_
# problem ;-)
gc.collect()
class A:
pass
class B:
def __init__(self, x):
self.x = x
def __del__(self):
self.attr = None
def do_work():
a = A()
b = B(A())
a.attr = b
b.attr = a
do_work()
gc.collect() # this blows up (bad C pointer) when it fails
@cpython_only
def test_garbage_at_shutdown(self):
import subprocess
code = """if 1:
import gc
import _testcapi
@_testcapi.with_tp_del
class X:
def __init__(self, name):
self.name = name
def __repr__(self):
return "<X %%r>" %% self.name
def __tp_del__(self):
pass
x = X('first')
x.x = x
x.y = X('second')
del x
gc.set_debug(%s)
"""
def run_command(code):
p = subprocess.Popen([sys.executable, "-Wd", "-c", code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
p.stdout.close()
p.stderr.close()
self.assertEqual(p.returncode, 0)
self.assertEqual(stdout, b"")
return stderr
stderr = run_command(code % "0")
self.assertIn(b"ResourceWarning: gc: 2 uncollectable objects at "
b"shutdown; use", stderr)
self.assertNotIn(b"<X 'first'>", stderr)
# With DEBUG_UNCOLLECTABLE, the garbage list gets printed
stderr = run_command(code % "gc.DEBUG_UNCOLLECTABLE")
self.assertIn(b"ResourceWarning: gc: 2 uncollectable objects at "
b"shutdown", stderr)
self.assertTrue(
(b"[<X 'first'>, <X 'second'>]" in stderr) or
(b"[<X 'second'>, <X 'first'>]" in stderr), stderr)
# With DEBUG_SAVEALL, no additional message should get printed
# (because gc.garbage also contains normally reclaimable cyclic
# references, and its elements get printed at runtime anyway).
stderr = run_command(code % "gc.DEBUG_SAVEALL")
self.assertNotIn(b"uncollectable objects at shutdown", stderr)
def test_gc_main_module_at_shutdown(self):
# Create a reference cycle through the __main__ module and check
# it gets collected at interpreter shutdown.
code = """if 1:
class C:
def __del__(self):
print('__del__ called')
l = [C()]
l.append(l)
"""
rc, out, err = assert_python_ok('-c', code)
self.assertEqual(out.strip(), b'__del__ called')
def test_gc_ordinary_module_at_shutdown(self):
# Same as above, but with a non-__main__ module.
with temp_dir() as script_dir:
module = """if 1:
class C:
def __del__(self):
print('__del__ called')
l = [C()]
l.append(l)
"""
code = """if 1:
import sys
sys.path.insert(0, %r)
import gctest
""" % (script_dir,)
make_script(script_dir, 'gctest', module)
rc, out, err = assert_python_ok('-c', code)
self.assertEqual(out.strip(), b'__del__ called')
def test_global_del_SystemExit(self):
code = """if 1:
class ClassWithDel:
def __del__(self):
print('__del__ called')
a = ClassWithDel()
a.link = a
raise SystemExit(0)"""
self.addCleanup(unlink, TESTFN)
with open(TESTFN, 'w') as script:
script.write(code)
rc, out, err = assert_python_ok(TESTFN)
self.assertEqual(out.strip(), b'__del__ called')
def test_get_stats(self):
stats = gc.get_stats()
self.assertEqual(len(stats), 3)
for st in stats:
self.assertIsInstance(st, dict)
self.assertEqual(set(st),
{"collected", "collections", "uncollectable"})
self.assertGreaterEqual(st["collected"], 0)
self.assertGreaterEqual(st["collections"], 0)
self.assertGreaterEqual(st["uncollectable"], 0)
# Check that collection counts are incremented correctly
if gc.isenabled():
self.addCleanup(gc.enable)
gc.disable()
old = gc.get_stats()
gc.collect(0)
new = gc.get_stats()
self.assertEqual(new[0]["collections"], old[0]["collections"] + 1)
self.assertEqual(new[1]["collections"], old[1]["collections"])
self.assertEqual(new[2]["collections"], old[2]["collections"])
gc.collect(2)
new = gc.get_stats()
self.assertEqual(new[0]["collections"], old[0]["collections"] + 1)
self.assertEqual(new[1]["collections"], old[1]["collections"])
self.assertEqual(new[2]["collections"], old[2]["collections"] + 1)
def test_freeze(self):
gc.freeze()
self.assertGreater(gc.get_freeze_count(), 0)
gc.unfreeze()
self.assertEqual(gc.get_freeze_count(), 0)
def test_get_objects(self):
gc.collect()
l = []
l.append(l)
self.assertTrue(
any(l is element for element in gc.get_objects(generation=0))
)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=1))
)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=2))
)
gc.collect(generation=0)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=0))
)
self.assertTrue(
any(l is element for element in gc.get_objects(generation=1))
)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=2))
)
gc.collect(generation=1)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=0))
)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=1))
)
self.assertTrue(
any(l is element for element in gc.get_objects(generation=2))
)
gc.collect(generation=2)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=0))
)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=1))
)
self.assertTrue(
any(l is element for element in gc.get_objects(generation=2))
)
del l
gc.collect()
def test_get_objects_arguments(self):
gc.collect()
self.assertEqual(len(gc.get_objects()),
len(gc.get_objects(generation=None)))
self.assertRaises(ValueError, gc.get_objects, 1000)
self.assertRaises(ValueError, gc.get_objects, -1000)
self.assertRaises(TypeError, gc.get_objects, "1")
self.assertRaises(TypeError, gc.get_objects, 1.234)
def test_resurrection_only_happens_once_per_object(self):
class A: # simple self-loop
def __init__(self):
self.me = self
class Lazarus(A):
resurrected = 0
resurrected_instances = []
def __del__(self):
Lazarus.resurrected += 1
Lazarus.resurrected_instances.append(self)
gc.collect()
gc.disable()
# We start with 0 resurrections
laz = Lazarus()
self.assertEqual(Lazarus.resurrected, 0)
# Deleting the instance and triggering a collection
# resurrects the object
del laz
gc.collect()
self.assertEqual(Lazarus.resurrected, 1)
self.assertEqual(len(Lazarus.resurrected_instances), 1)
# Clearing the references and forcing a collection
# should not resurrect the object again.
Lazarus.resurrected_instances.clear()
self.assertEqual(Lazarus.resurrected, 1)
gc.collect()
self.assertEqual(Lazarus.resurrected, 1)
gc.enable()
def test_resurrection_is_transitive(self):
class Cargo:
def __init__(self):
self.me = self
class Lazarus:
resurrected_instances = []
def __del__(self):
Lazarus.resurrected_instances.append(self)
gc.collect()
gc.disable()
laz = Lazarus()
cargo = Cargo()
cargo_id = id(cargo)
# Create a cycle between cargo and laz
laz.cargo = cargo
cargo.laz = laz
# Drop the references, force a collection and check that
# everything was resurrected.
del laz, cargo
gc.collect()
self.assertEqual(len(Lazarus.resurrected_instances), 1)
instance = Lazarus.resurrected_instances.pop()
self.assertTrue(hasattr(instance, "cargo"))
self.assertEqual(id(instance.cargo), cargo_id)
gc.collect()
gc.enable()
def test_resurrection_does_not_block_cleanup_of_other_objects(self):
# When a finalizer resurrects objects, stats were reporting them as
# having been collected. This affected both collect()'s return
# value and the dicts returned by get_stats().
N = 100
class A: # simple self-loop
def __init__(self):
self.me = self
class Z(A): # resurrecting __del__
def __del__(self):
zs.append(self)
zs = []
def getstats():
d = gc.get_stats()[-1]
return d['collected'], d['uncollectable']
gc.collect()
gc.disable()
# No problems if just collecting A() instances.
oldc, oldnc = getstats()
for i in range(N):
A()
t = gc.collect()
c, nc = getstats()
self.assertEqual(t, 2*N) # instance object & its dict
self.assertEqual(c - oldc, 2*N)
self.assertEqual(nc - oldnc, 0)
# But Z() is not actually collected.
oldc, oldnc = c, nc
Z()
# Nothing is collected - Z() is merely resurrected.
t = gc.collect()
c, nc = getstats()
self.assertEqual(t, 0)
self.assertEqual(c - oldc, 0)
self.assertEqual(nc - oldnc, 0)
# Z() should not prevent anything else from being collected.
oldc, oldnc = c, nc
for i in range(N):
A()
Z()
t = gc.collect()
c, nc = getstats()
self.assertEqual(t, 2*N)
self.assertEqual(c - oldc, 2*N)
self.assertEqual(nc - oldnc, 0)
# The A() trash should have been reclaimed already but the
# 2 copies of Z are still in zs (and the associated dicts).
oldc, oldnc = c, nc
zs.clear()
t = gc.collect()
c, nc = getstats()
self.assertEqual(t, 4)
self.assertEqual(c - oldc, 4)
self.assertEqual(nc - oldnc, 0)
gc.enable()
@unittest.skipIf(ContainerNoGC is None,
'requires ContainerNoGC extension type')
def test_trash_weakref_clear(self):
# Test that trash weakrefs are properly cleared (bpo-38006).
#
# Structure we are creating:
#
# Z <- Y <- A--+--> WZ -> C
# ^ |
# +--+
# where:
# WZ is a weakref to Z with callback C
# Y doesn't implement tp_traverse
# A contains a reference to itself, Y and WZ
#
# A, Y, Z, WZ are all trash. The GC doesn't know that Z is trash
# because Y does not implement tp_traverse. To show the bug, WZ needs
# to live long enough so that Z is deallocated before it. Then, if
# gcmodule is buggy, when Z is being deallocated, C will run.
#
# To ensure WZ lives long enough, we put it in a second reference
# cycle. That trick only works due to the ordering of the GC prev/next
# linked lists. So, this test is a bit fragile.
#
# The bug reported in bpo-38006 is caused because the GC did not
# clear WZ before starting the process of calling tp_clear on the
# trash. Normally, handle_weakrefs() would find the weakref via Z and
# clear it. However, since the GC cannot find Z, WR is not cleared and
# it can execute during delete_garbage(). That can lead to disaster
# since the callback might tinker with objects that have already had
# tp_clear called on them (leaving them in possibly invalid states).
callback = unittest.mock.Mock()
class A:
__slots__ = ['a', 'y', 'wz']
class Z:
pass
# setup required object graph, as described above
a = A()
a.a = a
a.y = ContainerNoGC(Z())
a.wz = weakref.ref(a.y.value, callback)
# create second cycle to keep WZ alive longer
wr_cycle = [a.wz]
wr_cycle.append(wr_cycle)
# ensure trash unrelated to this test is gone
gc.collect()
gc.disable()
# release references and create trash
del a, wr_cycle
gc.collect()
# if called, it means there is a bug in the GC. The weakref should be
# cleared before Z dies.
callback.assert_not_called()
gc.enable()
class GCCallbackTests(unittest.TestCase):
def setUp(self):
# Save gc state and disable it.
self.enabled = gc.isenabled()
gc.disable()
self.debug = gc.get_debug()
gc.set_debug(0)
gc.callbacks.append(self.cb1)
gc.callbacks.append(self.cb2)
self.othergarbage = []
def tearDown(self):
# Restore gc state
del self.visit
gc.callbacks.remove(self.cb1)
gc.callbacks.remove(self.cb2)
gc.set_debug(self.debug)
if self.enabled:
gc.enable()
# destroy any uncollectables
gc.collect()
for obj in gc.garbage:
if isinstance(obj, Uncollectable):
obj.partner = None
del gc.garbage[:]
del self.othergarbage
gc.collect()
def preclean(self):
# Remove all fluff from the system. Invoke this function
# manually rather than through self.setUp() for maximum
# safety.
self.visit = []
gc.collect()
garbage, gc.garbage[:] = gc.garbage[:], []
self.othergarbage.append(garbage)
self.visit = []
def cb1(self, phase, info):
self.visit.append((1, phase, dict(info)))
def cb2(self, phase, info):
self.visit.append((2, phase, dict(info)))
if phase == "stop" and hasattr(self, "cleanup"):
# Clean Uncollectable from garbage
uc = [e for e in gc.garbage if isinstance(e, Uncollectable)]
gc.garbage[:] = [e for e in gc.garbage
if not isinstance(e, Uncollectable)]
for e in uc:
e.partner = None
def test_collect(self):
self.preclean()
gc.collect()
# Algorithmically verify the contents of self.visit
# because it is long and tortuous.
# Count the number of visits to each callback
n = [v[0] for v in self.visit]
n1 = [i for i in n if i == 1]
n2 = [i for i in n if i == 2]
self.assertEqual(n1, [1]*2)
self.assertEqual(n2, [2]*2)
# Count that we got the right number of start and stop callbacks.
n = [v[1] for v in self.visit]
n1 = [i for i in n if i == "start"]
n2 = [i for i in n if i == "stop"]
self.assertEqual(n1, ["start"]*2)
self.assertEqual(n2, ["stop"]*2)
# Check that we got the right info dict for all callbacks
for v in self.visit:
info = v[2]
self.assertTrue("generation" in info)
self.assertTrue("collected" in info)
self.assertTrue("uncollectable" in info)
def test_collect_generation(self):
self.preclean()
gc.collect(2)
for v in self.visit:
info = v[2]
self.assertEqual(info["generation"], 2)
@cpython_only
def test_collect_garbage(self):
self.preclean()
# Each of these cause four objects to be garbage: Two
# Uncollectables and their instance dicts.
Uncollectable()
Uncollectable()
C1055820(666)
gc.collect()
for v in self.visit:
if v[1] != "stop":
continue
info = v[2]
self.assertEqual(info["collected"], 2)
self.assertEqual(info["uncollectable"], 8)
# We should now have the Uncollectables in gc.garbage
self.assertEqual(len(gc.garbage), 4)
for e in gc.garbage:
self.assertIsInstance(e, Uncollectable)
# Now, let our callback handle the Uncollectable instances
self.cleanup=True
self.visit = []
gc.garbage[:] = []
gc.collect()
for v in self.visit:
if v[1] != "stop":
continue
info = v[2]
self.assertEqual(info["collected"], 0)
self.assertEqual(info["uncollectable"], 4)
# Uncollectables should be gone
self.assertEqual(len(gc.garbage), 0)
@unittest.skipIf(BUILD_WITH_NDEBUG,
'built with -NDEBUG')
def test_refcount_errors(self):
self.preclean()
# Verify the "handling" of objects with broken refcounts
# Skip the test if ctypes is not available
import_module("ctypes")
import subprocess
code = textwrap.dedent('''
from test.support import gc_collect, SuppressCrashReport
a = [1, 2, 3]
b = [a]
# Avoid coredump when Py_FatalError() calls abort()
SuppressCrashReport().__enter__()
# Simulate the refcount of "a" being too low (compared to the
# references held on it by live data), but keeping it above zero
# (to avoid deallocating it):
import ctypes
ctypes.pythonapi.Py_DecRef(ctypes.py_object(a))
# The garbage collector should now have a fatal error
# when it reaches the broken object
gc_collect()
''')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
p.stdout.close()
p.stderr.close()
# Verify that stderr has a useful error message:
self.assertRegex(stderr,
br'gcmodule\.c:[0-9]+: gc_decref: Assertion "gc_get_refs\(g\) > 0" failed.')
self.assertRegex(stderr,
br'refcount is too small')
# "address : 0x7fb5062efc18"
# "address : 7FB5062EFC18"
address_regex = br'[0-9a-fA-Fx]+'
self.assertRegex(stderr,
br'object address : ' + address_regex)
self.assertRegex(stderr,
br'object refcount : 1')
self.assertRegex(stderr,
br'object type : ' + address_regex)
self.assertRegex(stderr,
br'object type name: list')
self.assertRegex(stderr,
br'object repr : \[1, 2, 3\]')
class GCTogglingTests(unittest.TestCase):
def setUp(self):
gc.enable()
def tearDown(self):
gc.disable()
def test_bug1055820c(self):
# Corresponds to temp2c.py in the bug report. This is pretty
# elaborate.
c0 = C1055820(0)
# Move c0 into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_c0_alive = c0
del c0.loop # now only c1 keeps c0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
ouch = []
def callback(ignored):
ouch[:] = [c2wr()]
# The callback gets associated with a wr on an object in generation 2.
c0wr = weakref.ref(c0, callback)
c0 = c1 = c2 = None
# What we've set up: c0, c1, and c2 are all trash now. c0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's a
# global weakref to c2 (c2wr), but that weakref has no callback.
# There's also a global weakref to c0 (c0wr), and that does have a
# callback, and that callback references c2 via c2wr().
#
# c0 has a wr with callback, which references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see c0 at all, and c0 is
# the only object that has a weakref with a callback. gc clears c1
# and c2. Clearing c1 has the side effect of dropping the refcount on
# c0 to 0, so c0 goes away (despite that it's in an older generation)
# and c0's wr callback triggers. That in turn materializes a reference
# to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
junk = []
i = 0
detector = GC_Detector()
while not detector.gc_happened:
i += 1
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
self.assertEqual(len(ouch), 1) # else the callback wasn't invoked
for x in ouch:
# If the callback resurrected c2, the instance would be damaged,
# with an empty __dict__.
self.assertEqual(x, None)
def test_bug1055820d(self):
# Corresponds to temp2d.py in the bug report. This is very much like
# test_bug1055820c, but uses a __del__ method instead of a weakref
# callback to sneak in a resurrection of cyclic trash.
ouch = []
class D(C1055820):
def __del__(self):
ouch[:] = [c2wr()]
d0 = D(0)
# Move all the above into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_d0_alive = d0
del d0.loop # now only c1 keeps d0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
d0 = c1 = c2 = None
# What we've set up: d0, c1, and c2 are all trash now. d0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's
# a global weakref to c2 (c2wr), but that weakref has no callback.
# There are no other weakrefs.
#
# d0 has a __del__ method that references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see d0 at all. gc clears
# c1 and c2. Clearing c1 has the side effect of dropping the refcount
# on d0 to 0, so d0 goes away (despite that it's in an older
# generation) and d0's __del__ triggers. That in turn materializes
# a reference to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
detector = GC_Detector()
junk = []
i = 0
while not detector.gc_happened:
i += 1
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
self.assertEqual(len(ouch), 1) # else __del__ wasn't invoked
for x in ouch:
# If __del__ resurrected c2, the instance would be damaged, with an
# empty __dict__.
self.assertEqual(x, None)
def test_main():
enabled = gc.isenabled()
gc.disable()
assert not gc.isenabled()
debug = gc.get_debug()
gc.set_debug(debug & ~gc.DEBUG_LEAK) # this test is supposed to leak
try:
gc.collect() # Delete 2nd generation garbage
run_unittest(GCTests, GCTogglingTests, GCCallbackTests)
finally:
gc.set_debug(debug)
# test gc.enable() even if GC is disabled by default
if verbose:
print("restoring automatic collection")
# make sure to always test gc.enable()
gc.enable()
assert gc.isenabled()
if not enabled:
gc.disable()
if __name__ == "__main__":
test_main()
|
sdk_main.py | from . import inter
from .structs import Codes as BCd
import websocket
import json
import requests
import time
from threading import Thread
import typing as t
import os
event_types = BCd.QBot.GatewayEventName
class Intents: # https://bot.q.qq.com/wiki/develop/api/gateway/intents.html
GUILDS = 1 << 0
GUILD_MEMBERS = 1 << 1
DIRECT_MESSAGE = 1 << 12
AUDIO_ACTION = 1 << 29
AT_MESSAGES = 1 << 30
GUILD_MESSAGE_REACTIONS = 1 << 10
FORUM_EVENT = 1 << 28
MESSAGE_CREATE = 1 << 9
MESSAGE_AUDIT = 1 << 27
def on_new_thread(f):
def task_qwq(*args, **kwargs):
_t = Thread(target=f, args=args, kwargs=kwargs)
_t.start()
return (task_qwq)
def get_connection(url, on_message, on_open, on_error, on_close):
return websocket.WebSocketApp(url=url,
on_message=on_message,
on_open=on_open,
on_error=on_error,
on_close=on_close
)
class BotApp(inter.BotMessageDistributor):
def __init__(self, appid: int, token: str, secret: str, is_sandbox: bool, inters: t.List,
debug=False, api_return_pydantic=False, ignore_at_self=False, output_log=True, log_path="",
raise_api_error=False, call_on_load_event_every_reconnect=False):
"""
BotAPP
:param appid: BotAPPId
:param token: BotToken
:param secret: BotSecret
:param is_sandbox: 是否在测试环境运行
:param inters: 接收事件
:param debug: 输出debug日志
:param api_return_pydantic: 调用api后返回pydantic对象, 默认为纯文本
:param ignore_at_self: 过滤消息中艾特bot的内容
:param output_log: 是否输出日志到本地
:param log_path: 日志输出位置, 默认为sdk目录内log文件夹
:param raise_api_error: 当API调用出错时, 抛出 BotCallingAPIError 异常
:param call_on_load_event_every_reconnect: 每次触发重连都调用 FUNC_CALL_AFTER_BOT_LOAD 事件
"""
super().__init__(appid=appid, token=token, secret=secret, sandbox=is_sandbox, debug=debug,
api_return_pydantic=api_return_pydantic, output_log=output_log, log_path=log_path,
raise_api_error=raise_api_error)
self.inters = inters
self.ignore_at_self = ignore_at_self
self.self_id = ""
self.self_name = ""
self.EVENT_MESSAGE_CREATE_CALL_AT_MESSAGE_CREATE = False
self.session_id = None
self._d = None # 心跳参数
self._t = None
self.heartbeat_time = -1 # 心跳间隔
self.ws = None
self._on_load_run = True
self.call_on_load_event_every_reconnect = call_on_load_event_every_reconnect
self._spath = os.path.split(__file__)[0]
# @on_new_thread
def start(self):
websocket.enableTrace(False)
while True:
self.ws = get_connection(url=self._get_websocket_url(),
on_message=self._on_message,
on_open=self._on_open,
on_error=self._ws_on_error,
on_close=self._on_close)
self.ws.run_forever()
def _get_connection(self):
ws = websocket.WebSocketApp(url=self._get_websocket_url(),
on_message=self._on_message,
on_open=self._on_open,
on_error=self._ws_on_error,
on_close=self._on_close
)
return ws
def _get_verify_body(self, reconnect=False):
if reconnect:
rb = {
"op": 6,
"d": {
"token": f'Bot {self.appid}.{self.token}',
"session_id": self.session_id,
"seq": 1337
}
}
else:
rb = {
"op": 2,
"d": {
"token": f'Bot {self.appid}.{self.token}',
"intents": self._get_inters_code(), # 1073741827
"shard": [0, 1],
"properties": {
"$os": "linux",
"$browser": "python_sdk",
"$device": "server"
}
}
}
return rb
def _ws_on_error(self, ws, err, *args):
try:
raise err
except websocket.WebSocketConnectionClosedException:
self._on_close()
self.logger(f"Error: {args}", error=True)
def _on_message(self, ws, msg): # 收到ws消息
try:
self.logger(msg, debug=True)
data = json.loads(msg)
stat_code = data["op"] # 状态码, 参考: https://bot.q.qq.com/wiki/develop/api/gateway/opcode.html
if stat_code == BCd.QBot.OPCode.Hello: # 网关下发的第一条消息
if "heartbeat_interval" in data["d"]: # 初始化心跳
self.heartbeat_time = data["d"]["heartbeat_interval"] / 1000
ws.send(json.dumps(self._get_verify_body()))
elif stat_code == BCd.QBot.OPCode.Dispatch: # 服务器主动推送消息
self._d = data["s"]
if "t" in data:
s_type = data["t"]
def _send_event(m_dantic, changed_data=None, changed_s_type=None):
"""
事件聚合分发
:param m_dantic: 要发送的消息(Basemodel)
:param changed_data: 需要修改的消息, 默认为data["d"]
:param changed_s_type: 需要修改的消息类型, 默认为s_type, 即data["t"]
:return:
"""
_send_data = data["d"] if changed_data is None else changed_data
_send_type = s_type if changed_s_type is None else changed_s_type
self._event_handout(_send_type, m_dantic(**_send_data))
if s_type == event_types.READY: # 验证完成
self.session_id = data["d"]["session_id"]
self._on_open(data["d"]["user"]["id"], data["d"]["user"]["username"], data["d"]["user"]["bot"],
is_login=True)
self.send_heart_beat()
elif s_type == event_types.RESUMED: # 服务器通知重连
self.logger("重连完成, 事件已全部补发")
elif s_type in self.known_events:
s_dantic = self.known_events[s_type][1]
if s_dantic is not None:
if s_type in [event_types.AT_MESSAGE_CREATE, event_types.MESSAGE_CREATE,
event_types.DIRECT_MESSAGE_CREATE]:
if self.ignore_at_self:
data["d"]["content"] = data["d"]["content"].replace(f"<@!{self.self_id}>",
"").strip()
if s_type == event_types.MESSAGE_CREATE: # 收到消息(私域)
if self.EVENT_MESSAGE_CREATE_CALL_AT_MESSAGE_CREATE: # 普通消息依旧调用艾特消息函数
_send_event(self.known_events[event_types.AT_MESSAGE_CREATE][1],
changed_s_type=event_types.AT_MESSAGE_CREATE)
if s_type in [event_types.MESSAGE_CREATE, event_types.AT_MESSAGE_CREATE]: # 群消息
data["d"]["message_type_sdk"] = "guild"
elif s_type == event_types.DIRECT_MESSAGE_CREATE: # 私聊
data["d"]["message_type_sdk"] = "private"
_send_event(s_dantic)
# TODO 主题相关事件
else:
self.logger(f"收到未知或暂不支持的推送消息: {data}", debug=True)
elif stat_code == BCd.QBot.OPCode.Reconnect: # 服务器通知重连
self.logger("服务器通知重连")
self._on_close()
elif stat_code == BCd.QBot.OPCode.Invalid_Session: # 验证失败
self.logger("连接失败: Invalid Session", error=True)
self._on_close(active_close=True)
except Exception as sb:
self.logger(sb, error=True)
def _event_handout(self, func_type: str, *args, **kwargs):
if func_type in self.bot_events:
throw_func = self.bot_events[func_type]
if throw_func:
for f in throw_func:
_t = Thread(target=f, args=args, kwargs=kwargs)
_t.start()
def _check_files(self):
pass
def _on_open(self, botid="", botname="", isbot="", is_login=False):
if is_login:
self.self_id = botid
self.self_name = botname
self.logger(f"开始运行:\nBotID: {botid}\nBotName: {botname}\nbot: {isbot}")
self._check_files()
if self._on_load_run or self.call_on_load_event_every_reconnect:
self._event_handout(BCd.SeverCode.FUNC_CALL_AFTER_BOT_LOAD, self)
self._on_load_run = False
else:
self.logger("开始尝试连接")
def _on_close(self, *args, active_close=False):
self.logger(f"on_close {args}", debug=True)
try:
self.heartbeat_time = -1
self._d = None
self.ws.close()
except Exception as sb:
self.logger(f"关闭连接失败: {sb}", error=True)
if not active_close:
self.heartbeat_time = -1
self._d = None
self.logger("连接断开, 尝试重连", warning=True)
self.ws.keep_running = False
"""
self.ws = get_connection(url=self._get_websocket_url(),
on_message=self._on_message,
on_open=self._on_open,
on_error=self._ws_on_error,
on_close=self._on_close)
self.ws.run_forever()
self.ws.send(json.dumps(self._get_verify_body(reconnect=True)))
"""
else:
self.logger("连接已断开", warning=True)
def send_heart_beat(self):
def _send_heart_beat():
try:
while True:
if self.heartbeat_time != -1 and self._d is not None:
self.logger(f"发送心跳: {self._d}", debug=True)
self.ws.send(json.dumps({"op": 1, "d": self._d}))
time.sleep(self.heartbeat_time)
else:
time.sleep(1)
continue
except websocket.WebSocketConnectionClosedException:
self.logger("发送心跳包失败", error=True)
self._on_close()
if self._t is None:
self._t = Thread(target=_send_heart_beat)
self._t.start()
else:
if not self._t.is_alive():
self._t = Thread(target=_send_heart_beat)
self._t.start()
def _get_websocket_url(self):
url = f"{self.base_api}/gateway"
headers = {'Authorization': f'Bot {self.appid}.{self.token}'}
response = requests.request("GET", url, headers=headers)
try:
self.logger(f"获取服务器api: {response.json()['url']}", debug=True)
return response.json()["url"]
except Exception as sb:
self.logger(f"获取服务器API失败 - {response.text}")
print(sb)
def _get_inters_code(self):
if type(self.inters) != list:
self.logger("事件订阅(inters)错误, 将使用默认值", error=True)
return 1073741827
result = 0
for _int in self.inters:
result = result | _int
self.logger(f"intents计算结果: {result}", debug=True)
return result
|
gritsbotserial.py | import serial
import json
import logging
import threading
import time
global logger
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(module)s - %(message)s')
logger = logging.getLogger('root')
logger.setLevel(logging.DEBUG)
# Constants
MAX_IN_WAITING = 500
def _json_to_bytes(message):
"""Dumps json data to ASCI
Raises:
Exception: If the message cannot be JSON encoded.
"""
return json.dumps(message).encode('ASCII')
def _bytes_to_json(message):
"""Converts from ASCII to JSON
Raises:
Exception: If the message cannot be JSON decoded.
"""
return json.loads(message.decode('ASCII'))
class GritsbotSerial:
"""Encapsulates serial communications to the microcontroller.
Serial communications are based on a request/response architecture. This class assumes nothing about the form of these, except that they are JSON-
encodable.
This class is made to be robust to errors and will restart the serial device if it encounters an error (e.g., if the cable is un/replugged).
Attributes:
_serial_dev (str): Path to the serial device.
_baud_rate (int): The baud rate for the serial device.
_timeout (int): Timeout for the serial reads in seconds.
_serial_cv (threading.Condition): Condition variable for synchronizing class.
_serial (serial.Serial): The pyserial object for serial communications.
_serial_task_thread (threading.Thread): Runs the internal restart task.
_stopped (bool): Whether the class has been stopped.
_started (bool): Whether the class has been started.
_needs_restart (bool): Whether the serial device should be restarted.
"""
def __init__(self, serial_dev='/dev/ttyACM0', baud_rate=500000, timeout=2):
"""Creates the serial communciations object.
Args:
serial_dev (str, optional): The path to the serial device.
baud_rate (int): Baud rate for the serial device.
timeout (int): Timeout for the serial read.
Examples:
>>> GritsbotSerial(serial_dev='/dev/ttyACM0', baud_rate=115200, timeout=5)
"""
self._serial_dev = serial_dev
self._baud_rate = baud_rate
self._timeout = timeout
# Serial-related attributes. ALL OF THESE SHOULD BE CONTROLLED WHILE HOLDING THE LOCK
self._serial_cv = threading.Condition()
self._serial = None
self._serial_task_thread = None
self._stopped = False
self._started = False
self._needs_restart = True
def serial_request(self, msg, timeout=5):
"""Makes a request on a serial line
Args:
msg: A JSON-encodable (by json.dumps) object
Raises:
RuntimeError: If the serial port has too many incoming bytes (specified by MAX_IN_WAITING); if the serial port cannot be written to or read from;
if the serial port has not been initialized.
Returns:
dict: JSON-formatted dict containing the return message.
Examples:
>>> response = GritsbotSerial.serial_request(request, timeout=1)
"""
with self._serial_cv:
if(not self._started):
error_msg = 'Serial connection must be started prior to calling this method.'
logger.critical(error_msg)
raise RuntimeError(error_msg)
if(self._stopped):
error_msg = 'Serial connection stopped! Cannot use anymore.'
logger.critical(error_msg)
raise RuntimeError(error_msg)
# Wait until the serial_task thread has restarted the serial device.
while(self._needs_restart):
if(not self._serial_cv.wait(timeout=timeout)):
# If the CV times out, it returns false
error_msg = 'Serial connection timed out!'
logger.critical(error_msg)
raise RuntimeError(error_msg)
msg = _json_to_bytes(msg)
try:
self._serial.write(msg)
except Exception as e:
error_msg = 'Unable to write to the serial port.'
logger.critical(error_msg)
logger.critical(repr(e))
# Signal the serial_task thread that the serial device should be restarted
self._needs_restart = True
self._serial_cv.notify_all()
raise RuntimeError(error_msg)
# Read to wait for bytes to be available
try:
msg = self._serial.read()
except Exception as e:
error_msg = 'Unable to read from serial port'
logger.critical(error_msg)
logger.critical(repr(e))
# Signal the serial_task thread that the serial device should be restarted
self._needs_restart = True
self._serial_cv.notify_all()
raise RuntimeError(error_msg)
if(self._serial.in_waiting > MAX_IN_WAITING):
error_msg = 'Too many incoming bytes waiting on serial port ({})'.format(self._serial.in_waiting)
logger.warning(error_msg)
# Signal the serial_task thread that the serial device should be restarted
self._needs_restart = True
self._serial_cv.notify_all()
raise RuntimeError(error_msg)
# Once bytes are available, read the rest in. We assume that the entire message is on
# the line
try:
msg += self._serial.read(self._serial.in_waiting)
except Exception as e:
error_msg = 'Unable to read from the serial port.'
logger.critical(error_msg)
logger.critical(repr(e))
# Signal the serial_task thread that the serial device should be restarted
self._needs_restart = True
self._serial_cv.notify_all()
raise RuntimeError(error_msg)
result = None
try:
result = _bytes_to_json(msg)
except Exception as e:
logger.warning('Unable to parse JSON message from serial port')
logger.warning(repr(e))
return result
def start(self, timeout=5):
"""Starts the serial line by attempting to establish a serial for the specified device.
This method should be called prior to other methods of this class.
Raises:
RuntimeError: If the serial connection cannot be established within timeout.
"""
# Wait for initial connection
with self._serial_cv:
if(self._started):
logger.critical('Cannot start the serial connection more than once!')
raise RuntimeError()
if(self._stopped):
logger.critical('Cannot start the serial connection once stopped.')
raise RuntimeError()
# If it's already been started, we can't get to this part of the code
self._started = True
self._serial_task_thread = threading.Thread(target=self._serial_task)
self._serial_task_thread.start()
# Wait to acquire the serial connection once
while(self._needs_restart):
if(not self._serial_cv.wait(timeout=timeout)):
logger.critical('Initial serial connection timed out.')
# If we're in this state, then we have the lock, and the serial_task thread cannot acquire it.
# However, if we're very unlucky, the serial device my have been acquired by the time we get here. Either way,
# it should be safe to just call stop and exit.
# We can call self.stop here, since the underlying lock is reentrant
self.stop()
raise RuntimeError()
def stop(self):
"""Stops the serial connection.
Closes the underlying pyserial connection. If the serial connection has not been started, does nothing.
"""
# If we've previously started serial, stop it.
with self._serial_cv:
self._stopped = True
self._serial_cv.notify_all()
# If the serial connection was never started, don't shut it down
if(self._started):
if(self._serial is not None):
self._serial.close()
self._serial = None
# Thread won't finish until lock can be acquired, so we need to put this outside the lock
self._serial_task_thread.join()
def _serial_task(self):
"""Restarts the serial if the serial device stops responding.
Only meant to be run by the interal thread!
"""
while (not self._stopped):
start_time = time.time()
with self._serial_cv:
while (not self._needs_restart and not self._stopped):
self._serial_cv.wait()
if(self._stopped):
break
else:
# Need to restart serial
if(self._serial is not None):
self._serial.close()
self._serial = None
try:
self._serial = serial.Serial(self._serial_dev, self._baud_rate, timeout=self._timeout)
# If we succeeded, no longer need to restart serial
self._needs_restart = False
self._serial_cv.notify_all()
except Exception as e:
logger.critical('Could not get serial device ({})'.format(self._serial_dev))
logger.critical(repr(e))
# Wait at least one second between retries
time.sleep(max(0, 1 - (time.time() - start_time)))
|
proj1.py | import argparse
import logging
from pdb import set_trace
from threading import Thread
from Utils.MisclUtils import TimeUtil
from Utils.RandomUtil import Random
from Utils.CalcUtils import mean_wait_time
from Utils.ServerUtil import Customer, Server
from Utils.CalcUtils import mean_service_time
from Utils.CalcUtils import customer_loss_rate
from Utils.CalcUtils import cust_arrival, cust_service, cust_departr
logging.basicConfig(level=logging.DEBUG,
format='(Queuing Customer %(threadName)s) | %(message)s', )
timer = TimeUtil()
def simulate(l, server_lim, max_serviced, L, verbose):
"""
Run simulation of a M//M/1/K queueing system
:param l: Lamdba for the distribution of interarrival
:param server_lim: The number of customers the server queue may hold
:param max_serviced: Number of customer served before the program terminates
:param L: Any integer such that 1<L<C
:param verbose: Print debug log
:return: customers: List of populated Customer objects.
"""
print("Running Simulation...\n")
server = Server(K=server_lim)
customers = []
customer_id = 0
rand = Random()
start_time = timer.current_time()
rand.set_seed(seed_val=12458)
def worker():
last_served = server.service(verbose)
def queuing(customer):
"""
Dispatch incoming requests to queues
"""
customer = server.enqueue(customer)
customers.append(customer)
if verbose>1: logging.debug('Accepted: {} | Customers: {}'.format(customer.queued, len(customers)))
w = Thread(target=worker, name="Service-Thread")
w.start()
while len(server.processed) < max_serviced:
next_customer_arrival = rand.exponential(lam=l)
timer.wait_millisc(next_customer_arrival)
customer_id += 1
t = Thread(name=customer_id, target=queuing, args=(Customer(id=customer_id),))
t.start()
server.kill = True
end_time = timer.current_time()
if verbose:
print("Parameters:\nK : {}".format(server_lim))
print("C : {}".format(max_serviced))
print("Lamdba: {}\n".format(l))
print("Simulation Details:\nAverage Wait Time : {}".format(round(mean_wait_time(server),2)))
print("Customer Loss Rate : {}".format(round(customer_loss_rate(server), 3)))
print("Average Service Time : {}".format(round(mean_service_time(server),2)))
print("Master clock at the end of simulation: {} seconds\n".format(round(end_time-start_time, 5)))
print("Customers {}, {}, {}, {}:\n".format(L, L+1, L+10, L+11))
print("Arrival : {}".format(cust_arrival(customers, start_time, L)))
print("Service : {}".format(cust_service(customers, L)))
print("Departure: {}".format(cust_departr(customers, start_time, L)))
return server
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--l', default=0.85, type=float,
help='Lamdba for the distribution of interarrival '
'times.\n DEFAULT --l 0.85.')
parser.add_argument('--K', default=5, type=int,
help='The number of customers the server queue may '
'hold.\n DEFAULT --K 5.')
parser.add_argument('--C', default=1000, type=int,
help='Number of customed server before the program '
'terminates.\n DEFAULT --C 1000')
parser.add_argument('--L', default=1, type=int,
help='Any integer such that 1<L<C.\n DEFAULT --L 1')
parser.add_argument('-v', '--debug', default=0, type=int,
help='Verbose mode. Print debug log.')
args = parser.parse_args()
simulate(args.l, args.K, args.C, args.L, args.debug)
|
wrapper.py | #!/usr/bin python3
""" Process wrapper for underlying faceswap commands for the GUI """
import os
import logging
import re
import signal
from subprocess import PIPE, Popen
import sys
from threading import Thread
from time import time
import psutil
from .utils import get_config, get_images, LongRunningTask
if os.name == "nt":
import win32console # pylint: disable=import-error
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class ProcessWrapper():
""" Builds command, launches and terminates the underlying
faceswap process. Updates GUI display depending on state """
def __init__(self):
logger.debug("Initializing %s", self.__class__.__name__)
self.tk_vars = get_config().tk_vars
self.set_callbacks()
self.pathscript = os.path.realpath(os.path.dirname(sys.argv[0]))
self.command = None
self.statusbar = get_config().statusbar
self.task = FaceswapControl(self)
logger.debug("Initialized %s", self.__class__.__name__)
def set_callbacks(self):
""" Set the tkinter variable callbacks """
logger.debug("Setting tk variable traces")
self.tk_vars["action"].trace("w", self.action_command)
self.tk_vars["generate"].trace("w", self.generate_command)
def action_command(self, *args):
""" The action to perform when the action button is pressed """
if not self.tk_vars["action"].get():
return
category, command = self.tk_vars["action"].get().split(",")
if self.tk_vars["runningtask"].get():
self.task.terminate()
else:
self.command = command
args = self.prepare(category)
self.task.execute_script(command, args)
self.tk_vars["action"].set(None)
def generate_command(self, *args):
""" Generate the command line arguments and output """
if not self.tk_vars["generate"].get():
return
category, command = self.tk_vars["generate"].get().split(",")
args = self.build_args(category, command=command, generate=True)
self.tk_vars["consoleclear"].set(True)
logger.debug(" ".join(args))
print(" ".join(args))
self.tk_vars["generate"].set(None)
def prepare(self, category):
""" Prepare the environment for execution """
logger.debug("Preparing for execution")
self.tk_vars["runningtask"].set(True)
self.tk_vars["consoleclear"].set(True)
if self.command == "train":
self.tk_vars["istraining"].set(True)
print("Loading...")
self.statusbar.message.set("Executing - {}.py".format(self.command))
mode = "indeterminate" if self.command in ("effmpeg", "train") else "determinate"
self.statusbar.start(mode)
args = self.build_args(category)
self.tk_vars["display"].set(self.command)
logger.debug("Prepared for execution")
return args
def build_args(self, category, command=None, generate=False):
""" Build the faceswap command and arguments list """
logger.debug("Build cli arguments: (category: %s, command: %s, generate: %s)",
category, command, generate)
command = self.command if not command else command
script = "{}.{}".format(category, "py")
pathexecscript = os.path.join(self.pathscript, script)
args = [sys.executable] if generate else [sys.executable, "-u"]
args.extend([pathexecscript, command])
cli_opts = get_config().cli_opts
for cliopt in cli_opts.gen_cli_arguments(command):
args.extend(cliopt)
if command == "train" and not generate:
self.init_training_session(cliopt)
if not generate:
args.append("-gui") # Indicate to Faceswap that we are running the GUI
if generate:
# Delimit args with spaces
args = ['"{}"'.format(arg) if " " in arg and not arg.startswith(("[", "("))
and not arg.endswith(("]", ")")) else arg
for arg in args]
logger.debug("Built cli arguments: (%s)", args)
return args
@staticmethod
def init_training_session(cliopt):
""" Set the session stats for disable logging, model folder and model name """
session = get_config().session
if cliopt[0] == "-t":
session.modelname = cliopt[1].lower().replace("-", "_")
logger.debug("modelname: '%s'", session.modelname)
if cliopt[0] == "-m":
session.modeldir = cliopt[1]
logger.debug("modeldir: '%s'", session.modeldir)
def terminate(self, message):
""" Finalize wrapper when process has exited """
logger.debug("Terminating Faceswap processes")
self.tk_vars["runningtask"].set(False)
if self.task.command == "train":
self.tk_vars["istraining"].set(False)
self.statusbar.stop()
self.statusbar.message.set(message)
self.tk_vars["display"].set(None)
get_images().delete_preview()
get_config().session.__init__()
self.command = None
logger.debug("Terminated Faceswap processes")
print("Process exited.")
class FaceswapControl():
""" Control the underlying Faceswap tasks """
def __init__(self, wrapper):
logger.debug("Initializing %s", self.__class__.__name__)
self.wrapper = wrapper
self.config = get_config()
self.statusbar = self.config.statusbar
self.command = None
self.args = None
self.process = None
self.thread = None # Thread for LongRunningTask termination
self.train_stats = {"iterations": 0, "timestamp": None}
self.consoleregex = {
"loss": re.compile(r"[\W]+(\d+)?[\W]+([a-zA-Z\s]*)[\W]+?(\d+\.\d+)"),
"tqdm": re.compile(r"(?P<dsc>.*?)(?P<pct>\d+%).*?(?P<itm>\S+/\S+)\W\["
r"(?P<tme>[\d+:]+<.*),\W(?P<rte>.*)[a-zA-Z/]*\]"),
"ffmpeg": re.compile(r"([a-zA-Z]+)=\s*(-?[\d|N/A]\S+)")}
logger.debug("Initialized %s", self.__class__.__name__)
def execute_script(self, command, args):
""" Execute the requested Faceswap Script """
logger.debug("Executing Faceswap: (command: '%s', args: %s)", command, args)
self.thread = None
self.command = command
kwargs = {"stdout": PIPE,
"stderr": PIPE,
"bufsize": 1,
"universal_newlines": True}
self.process = Popen(args, **kwargs, stdin=PIPE)
self.thread_stdout()
self.thread_stderr()
logger.debug("Executed Faceswap")
def read_stdout(self):
""" Read stdout from the subprocess. If training, pass the loss
values to Queue """
logger.debug("Opening stdout reader")
while True:
try:
output = self.process.stdout.readline()
except ValueError as err:
if str(err).lower().startswith("i/o operation on closed file"):
break
raise
if output == "" and self.process.poll() is not None:
break
if output:
if ((self.command == "train" and self.capture_loss(output)) or
(self.command == "effmpeg" and self.capture_ffmpeg(output)) or
(self.command not in ("train", "effmpeg") and self.capture_tqdm(output))):
continue
if (self.command == "train" and
self.wrapper.tk_vars["istraining"].get() and
"[saved models]" in output.strip().lower()):
logger.debug("Trigger GUI Training update")
logger.trace("tk_vars: %s", {itm: var.get()
for itm, var in self.wrapper.tk_vars.items()})
if not self.config.session.initialized:
# Don't initialize session until after the first save as state
# file must exist first
logger.debug("Initializing curret training session")
self.config.session.initialize_session(is_training=True)
self.wrapper.tk_vars["updatepreview"].set(True)
self.wrapper.tk_vars["refreshgraph"].set(True)
print(output.strip())
returncode = self.process.poll()
message = self.set_final_status(returncode)
self.wrapper.terminate(message)
logger.debug("Terminated stdout reader. returncode: %s", returncode)
def read_stderr(self):
""" Read stdout from the subprocess. If training, pass the loss
values to Queue """
logger.debug("Opening stderr reader")
while True:
try:
output = self.process.stderr.readline()
except ValueError as err:
if str(err).lower().startswith("i/o operation on closed file"):
break
raise
if output == "" and self.process.poll() is not None:
break
if output:
if self.command != "train" and self.capture_tqdm(output):
continue
print(output.strip(), file=sys.stderr)
logger.debug("Terminated stderr reader")
def thread_stdout(self):
""" Put the subprocess stdout so that it can be read without
blocking """
logger.debug("Threading stdout")
thread = Thread(target=self.read_stdout)
thread.daemon = True
thread.start()
logger.debug("Threaded stdout")
def thread_stderr(self):
""" Put the subprocess stderr so that it can be read without
blocking """
logger.debug("Threading stderr")
thread = Thread(target=self.read_stderr)
thread.daemon = True
thread.start()
logger.debug("Threaded stderr")
def capture_loss(self, string):
""" Capture loss values from stdout """
logger.trace("Capturing loss")
if not str.startswith(string, "["):
logger.trace("Not loss message. Returning False")
return False
loss = self.consoleregex["loss"].findall(string)
if len(loss) != 2 or not all(len(itm) == 3 for itm in loss):
logger.trace("Not loss message. Returning False")
return False
message = "Total Iterations: {} | ".format(int(loss[0][0]))
message += " ".join(["{}: {}".format(itm[1], itm[2]) for itm in loss])
if not message:
logger.trace("Error creating loss message. Returning False")
return False
iterations = self.train_stats["iterations"]
if iterations == 0:
# Set initial timestamp
self.train_stats["timestamp"] = time()
iterations += 1
self.train_stats["iterations"] = iterations
elapsed = self.calc_elapsed()
message = "Elapsed: {} | Session Iterations: {} {}".format(
elapsed,
self.train_stats["iterations"], message)
self.statusbar.progress_update(message, 0, False)
logger.trace("Succesfully captured loss: %s", message)
return True
def calc_elapsed(self):
""" Calculate and format time since training started """
now = time()
elapsed_time = now - self.train_stats["timestamp"]
try:
hrs = int(elapsed_time // 3600)
if hrs < 10:
hrs = "{0:02d}".format(hrs)
mins = "{0:02d}".format((int(elapsed_time % 3600) // 60))
secs = "{0:02d}".format((int(elapsed_time % 3600) % 60))
except ZeroDivisionError:
hrs = "00"
mins = "00"
secs = "00"
return "{}:{}:{}".format(hrs, mins, secs)
def capture_tqdm(self, string):
""" Capture tqdm output for progress bar """
logger.trace("Capturing tqdm")
tqdm = self.consoleregex["tqdm"].match(string)
if not tqdm:
return False
tqdm = tqdm.groupdict()
if any("?" in val for val in tqdm.values()):
logger.trace("tqdm initializing. Skipping")
return True
description = tqdm["dsc"].strip()
description = description if description == "" else "{} | ".format(description[:-1])
processtime = "Elapsed: {} Remaining: {}".format(tqdm["tme"].split("<")[0],
tqdm["tme"].split("<")[1])
message = "{}{} | {} | {} | {}".format(description,
processtime,
tqdm["rte"],
tqdm["itm"],
tqdm["pct"])
position = tqdm["pct"].replace("%", "")
position = int(position) if position.isdigit() else 0
self.statusbar.progress_update(message, position, True)
logger.trace("Succesfully captured tqdm message: %s", message)
return True
def capture_ffmpeg(self, string):
""" Capture tqdm output for progress bar """
logger.trace("Capturing ffmpeg")
ffmpeg = self.consoleregex["ffmpeg"].findall(string)
if len(ffmpeg) < 7:
logger.trace("Not ffmpeg message. Returning False")
return False
message = ""
for item in ffmpeg:
message += "{}: {} ".format(item[0], item[1])
if not message:
logger.trace("Error creating ffmpeg message. Returning False")
return False
self.statusbar.progress_update(message, 0, False)
logger.trace("Succesfully captured ffmpeg message: %s", message)
return True
def terminate(self):
""" Terminate the running process in a LongRunningTask so we can still
output to console """
if self.thread is None:
logger.debug("Terminating wrapper in LongRunningTask")
self.thread = LongRunningTask(target=self.terminate_in_thread,
args=(self.command, self.process))
if self.command == "train":
self.wrapper.tk_vars["istraining"].set(False)
self.thread.start()
self.config.root.after(1000, self.terminate)
elif not self.thread.complete.is_set():
logger.debug("Not finished terminating")
self.config.root.after(1000, self.terminate)
else:
logger.debug("Termination Complete. Cleaning up")
_ = self.thread.get_result() # Terminate the LongRunningTask object
self.thread = None
def terminate_in_thread(self, command, process):
""" Terminate the subprocess """
logger.debug("Terminating wrapper")
if command == "train":
timeout = self.config.user_config_dict.get("timeout", 120)
logger.debug("Sending Exit Signal")
print("Sending Exit Signal", flush=True)
now = time()
if os.name == "nt":
logger.debug("Sending carriage return to process")
con_in = win32console.GetStdHandle( # pylint:disable=c-extension-no-member
win32console.STD_INPUT_HANDLE) # pylint:disable=c-extension-no-member
keypress = self.generate_windows_keypress("\n")
con_in.WriteConsoleInput([keypress])
else:
logger.debug("Sending SIGINT to process")
process.send_signal(signal.SIGINT)
while True:
timeelapsed = time() - now
if process.poll() is not None:
break
if timeelapsed > timeout:
logger.error("Timeout reached sending Exit Signal")
self.terminate_all_children()
else:
self.terminate_all_children()
return True
@staticmethod
def generate_windows_keypress(character):
""" Generate an 'Enter' key press to terminate Windows training """
buf = win32console.PyINPUT_RECORDType( # pylint:disable=c-extension-no-member
win32console.KEY_EVENT) # pylint:disable=c-extension-no-member
buf.KeyDown = 1
buf.RepeatCount = 1
buf.Char = character
return buf
@staticmethod
def terminate_all_children():
""" Terminates all children """
logger.debug("Terminating Process...")
print("Terminating Process...", flush=True)
children = psutil.Process().children(recursive=True)
for child in children:
child.terminate()
_, alive = psutil.wait_procs(children, timeout=10)
if not alive:
logger.debug("Terminated")
print("Terminated")
return
logger.debug("Termination timed out. Killing Process...")
print("Termination timed out. Killing Process...", flush=True)
for child in alive:
child.kill()
_, alive = psutil.wait_procs(alive, timeout=10)
if not alive:
logger.debug("Killed")
print("Killed")
else:
for child in alive:
msg = "Process {} survived SIGKILL. Giving up".format(child)
logger.debug(msg)
print(msg)
def set_final_status(self, returncode):
""" Set the status bar output based on subprocess return code
and reset training stats """
logger.debug("Setting final status. returncode: %s", returncode)
self.train_stats = {"iterations": 0, "timestamp": None}
if returncode in (0, 3221225786):
status = "Ready"
elif returncode == -15:
status = "Terminated - {}.py".format(self.command)
elif returncode == -9:
status = "Killed - {}.py".format(self.command)
elif returncode == -6:
status = "Aborted - {}.py".format(self.command)
else:
status = "Failed - {}.py. Return Code: {}".format(self.command, returncode)
logger.debug("Set final status: %s", status)
return status
|
stream_kitchen.py | """
The MIT License (MIT)
Copyright (c) 2016 Jake Lussier (Stanford University)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""
TODO: License info
Program to stream (and possibly record) live or recorded data from kitchen.
Works by spawning sensor-specific streaming threads based on the
specified FridgeConfig's recording_streams attributes.
Each streaming thread constantly puts data onto the data queue.
The writeData() function gets this data from the queue.
Record events are registered through user input or specific environment
changes (eg, when lights come ON). In such cases, writeData()
writes data to disk. User input is received through the
image window or the terminal and handled by handleKeyPress().
"""
from kitchen import *
import argparse, sys, os, time, inspect, logging, threading, cv2, Queue, json, socket, requests
from os.path import *
from utils.general_utils import *
from utils.cv_utils import *
from utils.logging_utils import *
from config.fridge_config import *
from audio.audio_writer import *
from data_stream.stream_utils import *
def handleCharacter(ch, time_str, caller_name, config):
"""Handles character.
Handles character. Options: q (quit)
Args:
ch: Character.
time_str: Time string.
caller_name: Calling function's name.
"""
global quit_all
name = inspect.stack()[0][3]
try:
if ch == 'q':
logging.info("%s:%s Quitting." % (caller_name, name,))
quit_all = True
except Exception as e:
handleException("%s:%s"%(caller_name,name), e)
quit_all = True
def streamKeyboardStreams(config_sensor_streams):
"""Stream keyboard data.
Args:
config_sensor_streams: Keyboard streams,
"""
global write_q, quit_all
name = inspect.stack()[0][3]
try:
logger = Logger(name, interval=60*5, updates_per_second=True)
all_kb_streams = [w for v in config_sensor_streams.values() for w in v]
if not all_kb_streams: return
while not quit_all and streamsNotDone(all_kb_streams):
logger.update()
for (config, streams) in config_sensor_streams.items():
for stream in streams:
if passStream(stream, all_streams): continue
t = stream.getCurrentTime()
tstr = dateTimeToTimeString(t)
data = stream.getCurrentData()
if data:
handleCharacter(data, tstr, name, config)
if args.record: write_q.put((stream, t, data+"\n"))
stream.updateCurrent()
time.sleep(0.001)
except Exception as e:
handleException(name, e)
quit_all = True
def writeData():
"""Removes data from the data queue and writes to appropriate stream.
Calls get() on the data queue. Sleeps if the data is too new.
When data is far enough in the past, writes to stream.
"""
global write_q, quit_all
name = inspect.stack()[0][3]
last_closed = currentTime(all_streams)
try:
logger = Logger(name, interval=60*5)
while not (quit_all and write_q.empty()):
logger.update()
try:
(stream, t, data) = write_q.get(timeout=writer_buffer_time)
now = currentTime(all_streams)
if (now-t).total_seconds() < writer_buffer_time:
time.sleep(writer_buffer_time)
except:
continue
with writing_lock:
stream.write(t, data)
if (now-last_closed).total_seconds() > 1.0:
[v.close(t) for v in all_streams]
last_closed = now
except Exception as e:
handleException(name, e)
quit_all = True
def streamAudioStreams(config_sensor_streams):
"""Stream audio data.
Args:
config_sensor_streams: Audio streams,
"""
global write_q, quit_all
name = inspect.stack()[0][3]
try:
logger = Logger(name, interval=60*5, updates_per_second=True)
all_audio_streams = [w for v in config_sensor_streams.values() for w in v]
if not all_audio_streams: return
while not quit_all and streamsNotDone(all_audio_streams):
logger.update()
for streams in config_sensor_streams.values():
for stream in streams:
if passStream(stream, all_streams): continue
t = stream.getCurrentTime()
data = stream.getCurrentData()
if args.record: write_q.put((stream, t, data))
stream.updateCurrent()
time.sleep(0.001)
except Exception as e:
handleException(name, e)
quit_all = True
def streamCameraStreams(config_sensor_streams):
"""Stream camera data, setup visualization, and handle uncovering events.
Stream camera data. Also display the vis image and handle user input.
In the event of an uncovering (light=ON), create writers for
appropriate setup. For a covering (light=OFF), close the writers.
Note that this function is run in the main thread.
Args:
config_sensor_streams: Camera streams.
"""
global write_q, quit_all
name = inspect.stack()[0][3]
##writer = Cv2VideoWriter("demo.mp4", 25)
try:
logger = Logger(name, interval=60*5, updates_per_second=True, memory_usage=True)
all_camera_streams = [w for v in config_sensor_streams.values() for w in v]
if not all_camera_streams: return
# For each config, whether a camera is uncovered and how many frames recorded in this interval.
uncovered, recorded = dict([(v, False) for v in config_sensor_streams.keys()]), {}
vis_str, vis_shape = "vis", None
vis_ims = dict([(v[0], [None for w in range(len(v[1]))]) for v in config_sensor_streams.items()])
if args.display: cv2.namedWindow(vis_str)
while (not quit_all and streamsNotDone(all_camera_streams)):
logger.update()
for (config, streams) in config_sensor_streams.items():
if not streams: continue
for (i, stream) in enumerate(streams):
# Read data and add to data queue.
if passStream(stream, all_streams): continue
t = stream.getCurrentTime()
#if uncovered[config]: print "Open", (t-t_open).total_seconds(), "secs"
tstr = dateTimeToTimeString(t)
im = stream.getCurrentData()
sm_im = resize(im, 1.0/args.display_downscale)
if vis_shape==None: vis_shape = sm_im.shape[:2]
if args.record:
write_q.put((stream,t,im))
stream.updateCurrent()
# Update the visualization for this stream.
c = (255,0,0) if config=="fridge" else (0,0,255)
#cv2.rectangle(sm_im, (0,0), (sm_im.shape[1]-1, sm_im.shape[0]-1), c, 10)
vis_ims[config][i] = sm_im
light = 0 if None in vis_ims[config] else \
max([np.mean(np.max(v, axis=2)) for v in vis_ims[config]])
# Handle state changes
definitely_closed, definitely_open = light<50, light>100
if not uncovered[config] and definitely_open:
t_open = t
logging.info("%s OPEN %s." % (name, tstr))
# Create writers.
if args.record:
with writing_lock:
[v.createWriter(t, time_buffer=writer_buffer_time, fps=30) \
for v in config_streams[config]]
# Send pulse command to expiring containers.
if args.msg_containers and api_config:
auth = ""
url_base = api_config["base-url"] + str(api_config["app-api-port"])
url = url_base + "/inventory"
irs = requests.get(url, headers={"Authorization":auth}).json()
alert_iids = [v["item_id"] for v in irs if v["remaining_time"] <= 0 \
and "item_beacon_id" in v]
warning_iids = [v["item_id"] for v in irs \
if v["remaining_time"] > 0 and v["remaining_time"] < 7 \
and "item_beacon_id" in v]
data = {"item_ids": alert_iids, "animation": "ledOn",
"duration": 3000, "color": "ff0000"}
url = url_base + "/containerAnimation?" + json.dumps(data)
requests.post(url, headers={"Authorization":auth})
data = {"item_ids": warning_iids, "animation": "ledOn",
"duration": 3000, "color": "ff7700"}
url = url_base + "/containerAnimation?" + json.dumps(data)
requests.post(url, headers={"Authorization":auth})
uncovered[config], start_time, recorded[config] = True, time.time(), 0
elif uncovered[config] and definitely_closed:
logging.info("%s CLOSED." % (name,))
fps = recorded[config] / (time.time()-start_time)
logging.info("%s video fps = %.1f." % (name, fps))
# Close writers.
if args.record:
with writing_lock:
[v.setWriterEndTimes(t) for v in config_streams[config]]
uncovered[config] = False
if uncovered[config]: recorded[config] += 1
vis_im = tileImages([cv2.resize(w, vis_shape[::-1]) if w!=None else np.zeros(vis_shape)\
for v in vis_ims.values() for w in v])
if args.display:
cv2.imshow(vis_str, vis_im)
handleCharacter(getKey(cv2.waitKey(1 if args.live else 10)), tstr, name, config)
##writer.write(vis_im)
except Exception as e:
handleException(name, e)
quit_all = True
def streamLoadCellStreams(config_sensor_streams):
"""Stream load cell data.
Args:
config_sensor_streams: Load cell streams,
"""
global write_q, quit_all
name = inspect.stack()[0][3]
try:
logger = Logger(name, interval=60*5, updates_per_second=True)
all_load_cell_streams = [w for v in config_sensor_streams.values() for w in v]
if not all_load_cell_streams: return
while not quit_all and streamsNotDone(all_load_cell_streams):
logger.update()
for (config, streams) in config_sensor_streams.items():
for stream in streams:
if passStream(stream, all_streams): continue
t = stream.getCurrentTime()
data = stream.getCurrentData()
if args.record: write_q.put((stream, t, "%f\n"%data))
stream.updateCurrent()
time.sleep(0.005)
except Exception as e:
handleException(name, e)
quit_all = True
def streamRfidAntennaStreams(config_sensor_streams):
"""Stream RFID antenna data.
Args:
config_sensor_streams: RFID antenna streams.
"""
global write_q, quit_all, config_tags
name = inspect.stack()[0][3]
try:
logger = Logger(name, interval=60*5, updates_per_second=True)
all_antenna_streams = [w for v in config_sensor_streams.values() for w in v]
if not all_antenna_streams: return
while not quit_all and streamsNotDone(all_antenna_streams):
logger.update()
for (config, streams) in config_sensor_streams.items():
for stream in streams:
if passStream(stream, all_streams): continue
t = stream.getCurrentTime()
data = stream.getCurrentData()
if args.record:
[write_q.put((stream, v.time,str(v)+"\n")) for v in data]
# Update tags. List has unique tags with most recent
# reads and is in descending RSSI order.
tt = sorted(config_tags[config] + data, key=lambda x: x.time)
tt = dict([(v.epc, v) for v in tt if (t-v.time).total_seconds()<2]).values()
config_tags[config] = sorted(tt, key=lambda x: -x.rssi)
stream.updateCurrent()
time.sleep(0.001)
except Exception as e:
handleException(name, e)
quit_all = True
def streamBarcodeStreams(config_sensor_streams):
"""Stream barcode data.
Args:
config_sensor_streams: barcode streams.
"""
global write_q, quit_all
name = inspect.stack()[0][3]
try:
logger = Logger(name, interval=60*5, updates_per_second=True)
all_barcode_streams = [w for v in config_sensor_streams.values() for w in v]
if not all_barcode_streams: return
while not quit_all and streamsNotDone(all_barcode_streams):
logger.update()
for (config, streams) in config_sensor_streams.items():
for stream in streams:
if passStream(stream, all_streams): continue
t = stream.getCurrentTime()
tstr = dateTimeToTimeString(t)
barcode = stream.getCurrentData()
if args.record:
if barcode: write_q.put((stream,barcode.time,str(barcode)+"\n"))
stream.updateCurrent()
time.sleep(0.001)
except Exception as e:
handleException(name, e)
quit_all = True
def streamBleStreams(config_sensor_streams):
"""Stream BLE data.
Args:
config_sensor_streams: BLE streams.
"""
global write_q, quit_all
name = inspect.stack()[0][3]
try:
logger = Logger(name, interval=60*5, updates_per_second=True)
all_ble_streams = [w for v in config_sensor_streams.values() for w in v]
if not all_ble_streams: return
while not quit_all and streamsNotDone(all_ble_streams):
logger.update()
for (config, streams) in config_sensor_streams.items():
for stream in streams:
if passStream(stream, all_streams): continue
t = stream.getCurrentTime()
tstr = dateTimeToTimeString(t)
beacons = stream.getCurrentData()
if args.record:
for beacon in beacons:
write_q.put((stream,beacon.time,str(beacon)+"\n"))
stream.updateCurrent()
time.sleep(0.001)
except Exception as e:
handleException(name, e)
quit_all = True
def getKitchenInfo():
db_config = json.loads(open(DB_CONFIG).read())
name_to_kid = dict([(d["name"].lower(), d["id"]) for d \
in db_config["entries"]["Kitchen"]])
hostname = socket.gethostname().lower()
kid = name_to_kid[hostname]
return kid, join(DATA, "Kitchen%07d" % kid)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Kitchen streaming.')
parser.add_argument("--output", help="Output directory.")
parser.add_argument("--fridge-config", help="Fridge configuration.")
parser.add_argument("--api-config", help="Service configuration.")
parser.add_argument("--frame-width", help="Frame width.", type=int)
parser.add_argument("--frame-height", help="Frame height.", type=int)
parser.add_argument("--display-downscale", help="Display downscale.", type=int, default=1)
parser.add_argument("--live", help="Live stream.", action="store_true")
parser.add_argument("--record", help="Record data.", action="store_true")
parser.add_argument("--msg-containers", help="Msg containers.", action="store_true")
parser.add_argument("--display", help="Display video.", action="store_true")
args = parser.parse_args()
if args.record and not args.live:
print("Error: cannot record recorded stream.")
parser.print_help()
sys.exit(89)
try:
kitchen_id, output = getKitchenInfo()
except Exception as e:
print ("Error: must record from known machine.")
sys.exit(89)
if not args.output: args.output = output
if args.frame_width and args.frame_height:
shape = (args.frame_width, args.frame_height)
elif not args.frame_width and not args.frame_height:
shape = None
else:
print("Error: must specify both width and height or neither.")
parser.print_help()
sys.exit(89)
configureLogging("%s.log" % args.output.rstrip("/"))
api_config = None if not args.api_config else json.loads(open(args.api_config).read())
configs = [FridgeConfig(args.fridge_config)]
# TODO(jake): remove hack
ble_addrs = ["f86aa431ba22"]
# Initialize global list of config tags and all streams. The former is a dict
# from config (eg, "fridge") to list of tags.
# Latter is simply a list of streams.
config_streams, config_tags = [dict([(w.name,[]) for w in configs]) \
for v in range(2)]
all_streams = []
# Initialize control and stream threads.
control_threads = [threading.Thread(target=v) for v in [writeData]]
stream_threads = []
# For each stream type (eg, AudioStream, BarcodeStream), for each config,
# initialize and store the streams. Append to stream threads.
stream_names = set([w for v in configs for w in v.recording_streams.keys()])
for stream_name in stream_names:
config_sensor_streams = {}
for config in configs:
sensor_names = config.recording_streams[stream_name]
sensor_paths = [join(args.output, "%s%s%s"%(config.name,stream_name,v)) \
for v in sensor_names]
for p in sensor_paths:
if args.record and not exists(p): os.makedirs(p)
if args.live: # Initialize StreamLive
config_sensor_streams[config.name] = [eval(stream_name+"Live")(n, p, shape=shape, addrs=ble_addrs) \
for (n, p) in zip(sensor_names, sensor_paths)]
else: # initialize StreamRecorded
config_sensor_streams[config.name] = map(eval(stream_name+"Recorded"), sensor_paths)
config_streams[config.name] += config_sensor_streams[config.name]
all_streams += config_sensor_streams[config.name]
if stream_name == "CameraStream":
camera_config_streams = config_sensor_streams
else:
f = eval("stream%ss"%stream_name)
stream_threads.append(threading.Thread(target=f, args=(config_sensor_streams,)))
# Initialize global writer and control variables.
write_q, writing_lock, writer_buffer_time = Queue.Queue(), threading.Lock(), 3.0
quit_all = False
# Kick off threads
for t in control_threads+stream_threads:
t.daemon = True
t.start()
# Stream camera data in main thread.
streamCameraStreams(camera_config_streams)
# Kick off and then join threads.
[t.join() for t in stream_threads]
quit_all = True
[t.join() for t in control_threads]
# Release all streams.
[v.release() for v in all_streams]
|
user_io.py | import os
import sys
import time
import numpy as np
import pybullet as p
from collections import namedtuple
from pybullet_planning.utils import INF, CLIENT, CLIENTS
from pybullet_planning.utils import is_darwin
# from future_builtins import map, filter
# from builtins import input # TODO - use future
try:
user_input = raw_input
except NameError:
user_input = input
#####################################
# https://stackoverflow.com/questions/5081657/how-do-i-prevent-a-c-shared-library-to-print-on-stdout-in-python/14797594#14797594
# https://stackoverflow.com/questions/4178614/suppressing-output-of-module-calling-outside-library
# https://stackoverflow.com/questions/4675728/redirect-stdout-to-a-file-in-python/22434262#22434262
class HideOutput(object):
'''
A context manager that block stdout for its scope, usage:
with HideOutput():
os.system('ls -l')
'''
DEFAULT_ENABLE = True
def __init__(self, enable=None):
if enable is None:
enable = self.DEFAULT_ENABLE
self.enable = enable
if not self.enable:
return
sys.stdout.flush()
self._origstdout = sys.stdout
self._oldstdout_fno = os.dup(sys.stdout.fileno())
self._devnull = os.open(os.devnull, os.O_WRONLY)
def __enter__(self):
if not self.enable:
return
self._newstdout = os.dup(1)
os.dup2(self._devnull, 1)
os.close(self._devnull)
sys.stdout = os.fdopen(self._newstdout, 'w')
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.enable:
return
sys.stdout.close()
sys.stdout = self._origstdout
sys.stdout.flush()
os.dup2(self._oldstdout_fno, 1)
os.close(self._oldstdout_fno) # Added
#####################################
def elapsed_time(start_time):
return time.time() - start_time
MouseEvent = namedtuple('MouseEvent', ['eventType', 'mousePosX', 'mousePosY', 'buttonIndex', 'buttonState'])
def get_mouse_events():
return list(MouseEvent(*event) for event in p.getMouseEvents(physicsClientId=CLIENT))
def update_viewer():
# https://docs.python.org/2/library/select.html
# events = p.getKeyboardEvents() # TODO: only works when the viewer is in focus
get_mouse_events()
# for k, v in keys.items():
# #p.KEY_IS_DOWN, p.KEY_WAS_RELEASED, p.KEY_WAS_TRIGGERED
# if (k == p.B3G_RETURN) and (v & p.KEY_WAS_TRIGGERED):
# return
# time.sleep(1e-3) # Doesn't work
# disable_gravity()
def wait_for_duration(duration): #, dt=0):
t0 = time.time()
while elapsed_time(t0) <= duration:
update_viewer()
def simulate_for_duration(duration):
dt = get_time_step()
for i in range(int(duration / dt)):
step_simulation()
def get_time_step():
# {'gravityAccelerationX', 'useRealTimeSimulation', 'gravityAccelerationZ', 'numSolverIterations',
# 'gravityAccelerationY', 'numSubSteps', 'fixedTimeStep'}
return p.getPhysicsEngineParameters(physicsClientId=CLIENT)['fixedTimeStep']
def enable_separating_axis_test():
p.setPhysicsEngineParameter(enableSAT=1, physicsClientId=CLIENT)
#p.setCollisionFilterPair()
#p.setCollisionFilterGroupMask()
#p.setInternalSimFlags()
# enableFileCaching: Set to 0 to disable file caching, such as .obj wavefront file loading
#p.getAPIVersion() # TODO: check that API is up-to-date
#p.isNumpyEnabled()
def simulate_for_sim_duration(sim_duration, real_dt=0, frequency=INF):
t0 = time.time()
sim_dt = get_time_step()
sim_time = 0
last_print = 0
while sim_time < sim_duration:
if frequency < (sim_time - last_print):
print('Sim time: {:.3f} | Real time: {:.3f}'.format(sim_time, elapsed_time(t0)))
last_print = sim_time
step_simulation()
sim_time += sim_dt
time.sleep(real_dt)
def wait_for_user(message='Press enter to continue'):
from pybullet_planning.interfaces.env_manager.simulation import has_gui
if has_gui() and is_darwin():
# OS X doesn't multi-thread the OpenGL visualizer
#wait_for_interrupt()
return threaded_input(message)
return user_input(message)
def wait_if_gui(*args, **kwargs):
from pybullet_planning.interfaces.env_manager.simulation import has_gui
if has_gui():
wait_for_user(*args, **kwargs)
def is_unlocked():
return CLIENTS[CLIENT] is True
def wait_if_unlocked(*args, **kwargs):
if is_unlocked():
wait_for_user(*args, **kwargs)
def wait_for_interrupt(max_time=np.inf):
"""
Hold Ctrl to move the camera as well as zoom
"""
print('Press Ctrl-C to continue')
try:
wait_for_duration(max_time)
except KeyboardInterrupt:
pass
finally:
print()
def step_simulation():
"""stepSimulation will perform all the actions in a single forward dynamics simulation step
such as collision detection, constraint solving and integration. The default timestep is
1/240 second, it can be changed using the setTimeStep or setPhysicsEngineParameter API.
https://docs.google.com/document/d/10sXEhzFRSnvFcl3XxNGhnD4N2SedqwdAvK3dsihxVUA/edit#heading=h.czaspku18mzs
Note: This also forces pybullet to update its bounding volume hierarchy. Ideally one would do this without calling
the physics simulator. But this is the only workaround that we've found so far.
"""
p.stepSimulation(physicsClientId=CLIENT)
def threaded_input(*args, **kwargs):
# OS X doesn't multi-thread the OpenGL visualizer
# http://openrave.org/docs/0.8.2/_modules/openravepy/misc/#SetViewerUserThread
# https://github.com/bulletphysics/bullet3/blob/master/examples/pybullet/examples/userData.py
# https://github.com/bulletphysics/bullet3/tree/master/examples/ExampleBrowser
#from pybullet_utils import bullet_client
#from pybullet_utils.bullet_client import BulletClient
#server = bullet_client.BulletClient(connection_mode=p.SHARED_MEMORY_SERVER) # GUI_SERVER
#sim_id = p.connect(p.GUI)
#print(dir(server))
#client = bullet_client.BulletClient(connection_mode=p.SHARED_MEMORY)
#sim_id = p.connect(p.SHARED_MEMORY)
#threading = __import__('threading')
import threading
data = []
thread = threading.Thread(target=lambda: data.append(user_input(*args, **kwargs)), args=[])
thread.start()
#threading.enumerate()
#thread_id = 0
#for tid, tobj in threading._active.items():
# if tobj is thread:
# thread_id = tid
# break
try:
while thread.is_alive():
update_viewer()
finally:
thread.join()
return data[-1]
|
pesa.py | # -*- coding: utf-8 -*-
#"""
#Created on Sun Jun 28 18:21:05 2020
#
#@author: Majdi Radaideh
#"""
from neorl.hybrid.pesacore.er import ExperienceReplay
from neorl.hybrid.pesacore.sa import SAMod
from neorl.hybrid.pesacore.es import ESMod
from neorl.hybrid.pesacore.pso import PSOMod
from copy import deepcopy
from multiprocessing import Process, Queue
import random
import numpy as np
from collections import defaultdict
import time
import sys
import uuid
from neorl.evolu.discrete import encode_grid_to_discrete, decode_discrete_to_grid
#multiprocessing trick to paralllelize nested functions in python (un-picklable objects!)
def globalize(func):
def result(*args, **kwargs):
return -func(*args, **kwargs)
result.__name__ = result.__qualname__ = uuid.uuid4().hex
setattr(sys.modules[result.__module__], result.__name__, result)
return result
class PESA(ExperienceReplay):
"""
*PESA Major Parameters*
:param mode: (str) problem type, either "min" for minimization problem or "max" for maximization
:param bounds: (dict) input parameter type and lower/upper bounds in dictionary form. Example: ``bounds={'x1': ['int', 1, 4], 'x2': ['float', 0.1, 0.8], 'x3': ['float', 2.2, 6.2]}``
:param fit: (function) the fitness function
:param npop: (int) total number of individuals in each group. So for ES, PSO, and SA, full population is ``npop*3``.
:param mu: (int) number of individuals to survive to the next generation.
Also, ``mu`` equals to the number of individuals to sample from the memory. If None, ``mu=int(npop/2)``.
So 1/2 of PESA population comes from previous generation, and 1/2 comes from the replay memory (See **Notes** below for more info)
:param memory_size: (int) max size of the replay memory (if None, ``memory_size`` is built to accommodate all samples during search)
:param alpha_init: (float) initial value of the prioritized replay coefficient (See **Notes** below)
:param alpha_end: (float) final value of the prioritized replay coefficient (See **Notes** below)
:param alpha_backdoor: (float) backdoor greedy replay rate/probability to sample from the memory for SA instead of random-walk (See **Notes** below)
*PESA Auxiliary Parameters (for the internal algorithms)*
:param cxpb: (float) for **ES**, population crossover probability between [0,1]
:param mutpb: (float) for **ES**, population mutation probability between [0,1]
:param c1: (float) for **PSO**, cognitive speed constant
:param c2: (float) for **PSO**, social speed constant
:param speed_mech: (str) for **PSO**, type of speed mechanism for to update particle velocity, choose between ``constric``, ``timew``, ``globw``.
:param Tmax: (float) for **SA**, initial/max temperature to start the annealing process
:param chi: (float) for **SA**, probability to perturb an attribute during SA annealing (occurs when ``rand(0,1) < chi``).
*PESA Misc. Parameters*
:param ncores: (int) number of parallel processors
:param seed: (int) random seed for sampling
"""
def __init__ (self, mode, bounds, fit, npop, mu=None, #general parameters
memory_size=None, alpha_init=0.1, alpha_end=1, alpha_backdoor=0.1, #replay parameters
Tmax=10000, chi=0.1, #SA parameters
cxpb=0.7, mutpb=0.1, #ES parameters
c1=2.05, c2=2.05, speed_mech='constric', #PSO parameters
ncores=1, seed=None): #misc parameters
#--------------------
#General Parameters
#--------------------
if seed:
random.seed(seed)
np.random.seed(seed)
self.bounds=bounds
#--mir
self.mode=mode
if mode == 'max':
self.FIT=fit
elif mode == 'min':
self.FIT = globalize(lambda x: fit(x)) #use the function globalize to serialize the nested fit
else:
raise ValueError('--error: The mode entered by user is invalid, use either `min` or `max`')
self.ncores=ncores
self.NPOP=npop
self.pso_flag=True
self.ncores=ncores
if ncores <= 3:
self.NCORES=1
self.PROC=False
else:
self.PROC=True
if self.pso_flag:
self.NCORES=int(ncores/3)
else:
self.NCORES=int(ncores/2)
# option for first-level parallelism
#self.PROC=True
self.SEED=seed
#--------------------
#Experience Replay
#--------------------
self.MODE='prior'; self.ALPHA0=alpha_init; self.ALPHA1=alpha_end
#--------------------
# SA hyperparameters
#--------------------
self.TMAX=Tmax; self.CHI=chi; self.REPLAY_RATE=alpha_backdoor
#--------------------
# ES HyperParameters
#--------------------
if mu:
assert mu < npop, '--error: The value of mu ({}) MUST be less than npop ({})'.format(mu, npop)
self.MU=mu
else:
self.MU=int(npop/2)
self.CXPB=cxpb; self.MUTPB=mutpb; self.INDPB=1.0
#--------------------
# PSO HyperParameters
#--------------------
self.C1=c1; self.C2=c2; self.SPEED_MECH=speed_mech
#-------------------------------
#Memory Supply for each method
#-------------------------------
self.ES_MEMORY=self.MU
self.SA_MEMORY=self.NCORES
self.PSO_MEMORY=self.NPOP-self.MU
#--------------------
# Fixed/Derived parameters
#--------------------
self.nx=len(bounds) #all
self.memory_size=memory_size
self.COOLING='fast' #SA
self.TMIN=1 #SA
self.LAMBDA=self.NPOP #ES
self.NPAR=self.NPOP #PSO
self.SMIN = 1/self.nx #ES
self.SMAX = 0.5 #ES
self.v0=0.1 #constant to initialize PSO speed, not very important
#infer variable types
self.datatype = np.array([bounds[item][0] for item in bounds])
#mir-grid
if "grid" in self.datatype:
self.grid_flag=True
self.orig_bounds=bounds #keep original bounds for decoding
print('--debug: grid parameter type is found in the space')
self.bounds, self.bounds_map=encode_grid_to_discrete(self.bounds) #encoding grid to int
#define var_types again by converting grid to int
self.datatype = np.array([self.bounds[item][0] for item in self.bounds])
else:
self.grid_flag=False
self.bounds = bounds
self.lb = np.array([self.bounds[item][1] for item in self.bounds])
self.ub = np.array([self.bounds[item][2] for item in self.bounds])
def fit_worker(self, x):
#"""
#Evaluates fitness of an individual.
#"""
#mir-grid
if self.grid_flag:
#decode the individual back to the int/float/grid mixed space
x=decode_discrete_to_grid(x,self.orig_bounds,self.bounds_map)
fitness = self.FIT(x)
return fitness
def evolute(self, ngen, x0=None, warmup=100, verbose=True):
"""
This function evolutes the PESA algorithm for number of generations.
:param ngen: (int) number of generations to evolute
:param x0: (list of lists) initial samples to start the replay memory (``len(x0)`` must be equal or more than ``npop``)
:param warmup: (int) number of random warmup samples to initialize the replay memory and must be equal or more than ``npop`` (only used if ``x0=None``)
:param verbose: (int) print statistics to screen, 0: no print, 1: PESA print, 2: detailed print
:return: (dict) dictionary containing major PESA search results
"""
self.verbose=verbose
self.NGEN=ngen
self.STEPS=self.NGEN*self.NPOP #all
if self.memory_size:
self.MEMORY_SIZE=self.memory_size
else:
self.MEMORY_SIZE=self.STEPS*3+1 #PESA
#-------------------------------------------------------
# Check if initial pop is provided as initial guess
#-------------------------------------------------------
if x0:
# use provided initial guess
warm=ESMod(bounds=self.bounds, fit=self.fit_worker, mu=self.MU, lambda_=self.LAMBDA, ncores=self.ncores)
x0size=len(x0)
assert x0size >= self.NPOP, 'the number of lists in x0 ({}) must be more than or equal npop ({})'.format(x0size, self.NPOP)
self.pop0=warm.init_pop(warmup=x0size, x_known=x0) #initial population for ES
else:
#create initial guess
assert warmup > self.NPOP, 'the number of warmup samples ({}) must be more than npop ({})'.format(warmup, self.NPOP)
warm=ESMod(bounds=self.bounds, fit=self.fit_worker, mu=self.MU, lambda_=self.LAMBDA, ncores=self.ncores)
self.pop0=warm.init_pop(warmup=warmup) #initial population for ES
self.partime={}
self.partime['pesa']=[]
self.partime['es']=[]
self.partime['pso']=[]
self.partime['sa']=[]
self.fit_hist=[]
#------------------------------
# Step 1: Initialize the memory
#------------------------------
self.mymemory=ExperienceReplay(size=self.MEMORY_SIZE) #memory object
xvec0, obj0=[self.pop0[item][0] for item in self.pop0], [self.pop0[item][2] for item in self.pop0] #parse the initial samples
self.mymemory.add(xvec=xvec0, obj=obj0, method=['na']*len(xvec0)) # add initial samples to the replay memory
#--------------------------------
# Step 2: Initialize all methods
#--------------------------------
# Obtain initial population for all methods
espop0, swarm0, swm_pos0, swm_fit0, local_pos, local_fit, x0, E0=self.init_guess(pop0=self.pop0)
# Initialize ES class
es=ESMod(bounds=self.bounds, fit=self.fit_worker, mu=self.MU, lambda_=self.LAMBDA, ncores=self.NCORES, indpb=self.INDPB,
cxpb=self.CXPB, mutpb=self.MUTPB, smin=self.SMIN, smax=self.SMAX)
# Initialize SA class
sa=SAMod(bounds=self.bounds, memory=self.mymemory, fit=self.fit_worker, steps=self.STEPS, ncores=self.NCORES,
chi=self.CHI, replay_rate=self.REPLAY_RATE, cooling=self.COOLING, Tmax=self.TMAX, Tmin=self.TMIN)
# Initialize PSO class (if USED)
if self.pso_flag:
pso=PSOMod(bounds=self.bounds, fit=self.fit_worker, npar=self.NPAR, swm0=[swm_pos0,swm_fit0],
ncores=self.NCORES, c1=self.C1, c2=self.C2, speed_mech=self.SPEED_MECH)
#--------------------------------
# Step 3: Initialize PESA engine
#--------------------------------
#Use initial samples as first guess for SA, ES, and PSO
self.pop_next=deepcopy(espop0) # x0 for ES
self.x_next, self.E_next=deepcopy(x0), deepcopy(E0) # x0 for SA
if self.pso_flag:
self.swm_next, self.local_pos_next, self.local_fit_next=deepcopy(swarm0), deepcopy(local_pos), deepcopy(local_fit) # x0 for PSO (if used)
self.STEP0=1 #step counter
self.ALPHA=self.ALPHA0 #set alpha to alpha0
#--------------------------------
# Step 4: PESA evolution
#--------------------------------
for gen in range(1,self.NGEN+1):
caseids=['es_gen{}_ind{}'.format(gen,ind+1) for ind in range(self.LAMBDA)] # save caseids for ES
if self.pso_flag:
pso_caseids=['pso_gen{}_par{}'.format(gen+1,ind+1) for ind in range(self.NPAR)] # save caseids for PSO
#-------------------------------------------------------------------------------------------------------------------
# Step 5: evolute all methods for 1 generation
#-------------------------------------------------------------------------------------------------------------------
#**********************************
#--Step 5A: Complete PARALEL calcs
# via multiprocess.Process
#*********************************
if self.PROC:
t0=time.time()
QSA = Queue(); QES=Queue(); QPSO=Queue()
def sa_worker():
x_new, E_new, self.T, self.acc, self.rej, self.imp, x_best, E_best, sa_partime= sa.anneal(ngen=1,npop=self.NPOP, x0=self.x_next,
E0=self.E_next, step0=self.STEP0)
QSA.put((x_new, E_new, self.T, self.acc, self.rej, self.imp, x_best, E_best, sa_partime))
def es_worker():
random.seed(self.SEED)
pop_new, es_partime=es.evolute(population=self.pop_next,ngen=1,caseids=caseids)
QES.put((pop_new, es_partime))
def pso_worker():
random.seed(self.SEED)
if gen > 1:
swm_new, swm_pos_new, swm_fit_new, pso_partime=pso.evolute(ngen=1, swarm=self.swm_next, local_pos=self.local_pos_next, local_fit=self.local_fit_next,
swm_best=[self.swm_pos, self.swm_fit], mu=self.MU, exstep=self.STEP0, exsteps=self.STEPS,
caseids=pso_caseids, verbose=0)
else:
swm_new, swm_pos_new, swm_fit_new, pso_partime=pso.evolute(ngen=1, swarm=self.swm_next, local_pos=self.local_pos_next,
local_fit=self.local_fit_next, mu=self.MU, exstep=self.STEP0, exsteps=self.STEPS,
caseids=pso_caseids, verbose=0)
QPSO.put((swm_new, swm_pos_new, swm_fit_new, pso_partime))
Process(target=sa_worker).start()
Process(target=es_worker).start()
if self.pso_flag:
Process(target=pso_worker).start()
self.swm_next, self.swm_pos, self.swm_fit, pso_partime=QPSO.get()
self.local_pos_next=[self.swm_next[key][3] for key in self.swm_next]
self.local_fit_next=[self.swm_next[key][4] for key in self.swm_next]
self.x_next, self.E_next, self.T, self.acc, self.rej, self.imp, self.x_best, self.E_best, sa_partime=QSA.get()
self.pop_next, es_partime=QES.get()
#self.partime.append(time.time()-t0)
self.partime['pesa'].append(time.time()-t0)
self.partime['pso'].append(pso_partime)
self.partime['es'].append(es_partime)
self.partime['sa'].append(sa_partime)
#*********************************
#--Step 5B: Complete Serial calcs
#*********************************
else:
self.pop_next, _ =es.evolute(population=self.pop_next,ngen=1,caseids=caseids) #ES serial
self.x_next, self.E_next, self.T, self.acc, self.rej, self.imp, self.x_best, self.E_best, _ = sa.anneal(ngen=1,npop=self.NPOP, x0=self.x_next,
E0=self.E_next, step0=self.STEP0) #SA serial
if self.pso_flag:
self.swm_next, self.swm_pos, self.swm_fit, _ =pso.evolute(ngen=1, swarm=self.swm_next, local_pos=self.local_pos_next,
local_fit=self.local_fit_next, exstep=self.STEP0, exsteps=self.STEPS,
caseids=pso_caseids, mu=self.MU, verbose=0)
self.local_pos_next=[self.swm_next[key][3] for key in self.swm_next]
self.local_fit_next=[self.swm_next[key][4] for key in self.swm_next]
#*********************************************************
# Step 5C: Obtain relevant statistics for this generation
#*********************************************************
self.STEP0=self.STEP0+self.NPOP #update step counter
self.inds, self.rwd=[self.pop_next[i][0] for i in self.pop_next], [self.pop_next[i][2] for i in self.pop_next] #ES statistics
self.mean_strategy=[np.mean(self.pop_next[i][1]) for i in self.pop_next] #ES statistics
if self.pso_flag:
self.pars, self.fits=[self.swm_next[i][0] for i in self.swm_next], [self.swm_next[i][2] for i in self.swm_next] #PSO statistics
self.mean_speed=[np.mean(self.swm_next[i][1]) for i in self.swm_next]
if self.verbose==2:
self.printout(mode=1, gen=gen)
#-------------------------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------------------
#-----------------------------
# Step 6: Update the memory
#-----------------------------
self.memory_update()
#-----------------------------------------------------------------
# Step 7: Sample from the memory and prepare for next Generation
#-----------------------------------------------------------------
self.resample()
#--------------------------------------------------------
# Step 8: Anneal Alpha if priortized replay is used
#--------------------------------------------------------
if self.MODE=='prior': #anneal alpha between alpha0 (lower) and alpha1 (upper)
self.ALPHA=self.linear_anneal(step=self.STEP0, total_steps=self.STEPS, a0=self.ALPHA0, a1=self.ALPHA1)
#--------------------------------------------------------
# Step 9: Calculate the memory best and print PESA summary
#--------------------------------------------------------
self.pesa_best=self.mymemory.sample(batch_size=1,mode='greedy')[0] #`greedy` will sample the best in memory
self.fit_hist.append(self.pesa_best[1])
self.memory_size=len(self.mymemory.storage) #memory size so far
#--mir
if self.mode=='min':
self.fitness_best=-self.pesa_best[1]
else:
self.fitness_best=self.pesa_best[1]
#mir-grid
if self.grid_flag:
self.xbest_correct=decode_discrete_to_grid(self.pesa_best[0],self.orig_bounds,self.bounds_map)
else:
self.xbest_correct=self.pesa_best[0]
if self.verbose: #print summary data to screen
self.printout(mode=2, gen=gen)
#--mir
if self.mode=='min':
self.fit_hist=[-item for item in self.fit_hist]
return self.xbest_correct, self.fitness_best, self.fit_hist
def linear_anneal(self, step, total_steps, a0, a1):
#"""
#Anneal parameter between a0 and a1
#:param step: current time step
#:param total_steps: total numbe of time steps
#:param a0: lower bound of alpha/parameter
#:param a0: upper bound of alpha/parameter
#:return
# - annealed value of alpha/parameter
#"""
fraction = min(float(step) / total_steps, 1.0)
return a0 + fraction * (a1 - a0)
def memory_update(self):
#"""
#This function updates the replay memory with the samples of SA, ES, and PSO (if used)
#then remove the duplicates from the memory
#"""
self.mymemory.add(xvec=tuple(self.x_next), obj=self.E_next, method=['sanext']*len(self.x_next))
self.mymemory.add(xvec=tuple(self.x_best), obj=self.E_best, method=['sabest']*len(self.x_best))
self.mymemory.add(xvec=tuple(self.inds), obj=self.rwd, method=['es']*len(self.inds))
if self.pso_flag:
self.mymemory.add(xvec=tuple(self.pars), obj=self.fits, method=['pso']*len(self.pars))
#self.mymemory.remove_duplicates() #remove all duplicated samples in memory to avoid biased sampling
def resample(self):
#"""
#This function samples data from the memory and prepares the chains for SA
#the population for ES, and the swarm for PSO for the next generation
# -SA: initial guess for the parallel chains are sampled from the memroy
# -ES: a total of ES_MEMORY (or MU) individuals are sampled from the memory and appended to ES population
# -PSO: a total of PSO_MEMORY (or MU) particles are sampled from the memory and appended to PSO swarm
#For SA: x_next and E_next particpate in next generation
#For PSO: swm_next, local_pso_next, and local_fit_next particpate in next generation
#For ES: pop_next particpates in next generation
#"""
es_replay=self.mymemory.sample(batch_size=self.ES_MEMORY,mode=self.MODE,alpha=self.ALPHA)
index=self.MU
for sample in range(self.ES_MEMORY):
self.pop_next[index].append(es_replay[sample][0])
self.pop_next[index].append([random.uniform(self.SMIN,self.SMAX) for _ in range(self.nx)])
self.pop_next[index].append(es_replay[sample][1])
index+=1
if self.pso_flag:
pso_replay=self.mymemory.sample(batch_size=self.PSO_MEMORY,mode=self.MODE,alpha=self.ALPHA)
for key in self.swm_next:
del self.swm_next[key][3:]
index=self.MU
for sample in range(self.PSO_MEMORY):
self.swm_next[index].append(pso_replay[sample][0])
#self.swm_next[index].append([random.uniform(self.SPMIN,self.SPMAX) for _ in range(self.nx)])
self.swm_next[index].append(list(self.v0*np.array(pso_replay[sample][0])))
self.swm_next[index].append(pso_replay[sample][1])
self.local_pos_next.append(pso_replay[sample][0])
self.local_fit_next.append(pso_replay[sample][1])
index+=1
sa_replay=self.mymemory.sample(batch_size=self.SA_MEMORY,mode=self.MODE,alpha=self.ALPHA)
self.x_next, self.E_next=[item[0] for item in sa_replay], [item[1] for item in sa_replay]
def init_guess(self, pop0):
#"""
#This function takes initial guess pop0 and returns initial guesses for SA, PSO, and ES
#to start PESA evolution
#inputs:
# pop0 (dict): dictionary contains initial population to start with for all methods
#returns:
# espop0 (dict): initial population for ES
# swarm0 (dict): initial swarm for PSO
# swm_pos (list), swm_fit (float): initial guess for swarm best position and fitness for PSO
# local_pos (list of lists), local_fit (list): initial guesses for local best position of each particle and their fitness for PSO
# x0 (list of lists), E0 (list): initial input vectors and their initial fitness for SA
#"""
pop0=list(pop0.items())
pop0.sort(key=lambda e: e[1][2], reverse=True)
sorted_sa=dict(pop0[:self.NCORES])
#sorted_dict=dict(sorted(pop0.items(), key=lambda e: e[1][2], reverse=True)[:self.NCORES]) # sort the initial samples for SA
x0, E0=[sorted_sa[key][0] for key in sorted_sa], [sorted_sa[key][2] for key in sorted_sa] # initial guess for SA
#sorted_pso=dict(sorted(pop0.items(), key=lambda e: e[1][2], reverse=True)[:self.NPAR]) # sort the initial samples for PSO
#sorted_es=dict(sorted(pop0.items(), key=lambda e: e[1][2], reverse=True)[:self.LAMBDA]) # sort the initial samples for ES
sorted_pso=dict(pop0[:self.NPAR])
sorted_es=dict(pop0[:self.LAMBDA])
swarm0=defaultdict(list)
espop0=defaultdict(list)
local_pos=[]
local_fit=[]
index=0
for key in sorted_pso:
swarm0[index].append(sorted_pso[key][0])
swarm0[index].append(list(self.v0*np.array(sorted_pso[key][0])))
swarm0[index].append(sorted_pso[key][2])
local_pos.append(sorted_pso[key][0])
local_fit.append(sorted_pso[key][2])
index+=1
swm_pos=swarm0[0][0]
swm_fit=swarm0[0][2]
index=0
for key in sorted_es:
espop0[index].append(sorted_es[key][0])
espop0[index].append(sorted_es[key][1])
espop0[index].append(sorted_es[key][2])
index+=1
return espop0, swarm0, swm_pos, swm_fit, local_pos, local_fit, x0, E0
def printout(self, mode, gen):
#"""
#Print statistics to screen
#inputs:
# mode (int): 1 to print for individual algorathims and 2 to print for PESA
# gen (int): current generation number
#"""
if mode == 1:
print('***********************************************************************************************')
print('############################################################')
print('ES step {}/{}, CX={}, MUT={}, MU={}, LAMBDA={}'.format(self.STEP0-1,self.STEPS, np.round(self.CXPB,2), np.round(self.MUTPB,2), self.MU, self.LAMBDA))
print('############################################################')
print('Statistics for generation {}'.format(gen))
print('Best Fitness:', np.round(np.max(self.rwd),4) if self.mode == 'max' else -np.round(np.max(self.rwd),4))
print('Max Strategy:', np.round(np.max(self.mean_strategy),3))
print('Min Strategy:', np.round(np.min(self.mean_strategy),3))
print('Average Strategy:', np.round(np.mean(self.mean_strategy),3))
print('############################################################')
print('************************************************************')
print('SA step {}/{}, T={}'.format(self.STEP0-1,self.STEPS,np.round(self.T)))
print('************************************************************')
print('Statistics for the {} parallel chains'.format(self.NCORES))
print('Fitness:', np.round(self.E_next,4) if self.mode == 'max' else -np.round(self.E_next,4))
print('Acceptance Rate (%):', self.acc)
print('Rejection Rate (%):', self.rej)
print('Improvment Rate (%):', self.imp)
print('************************************************************')
if self.pso_flag:
print('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^')
print('PSO step {}/{}, C1={}, C2={}, Particles={}'.format(self.STEP0-1,self.STEPS, np.round(self.C1,2), np.round(self.C2,2), self.NPAR))
print('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^')
print('Statistics for generation {}'.format(gen))
print('Best Swarm Fitness:', np.round(self.swm_fit,4) if self.mode == 'max' else -np.round(self.swm_fit,4))
print('Best Swarm Position:', self.swm_pos if not self.grid_flag else decode_discrete_to_grid(self.swm_pos,self.orig_bounds,self.bounds_map))
print('Max Speed:', np.round(np.max(self.mean_speed),3))
print('Min Speed:', np.round(np.min(self.mean_speed),3))
print('Average Speed:', np.round(np.mean(self.mean_speed),3))
print('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^')
if mode == 2:
print('------------------------------------------------------------')
print('PESA step {}/{}, Ncores={}'.format(self.STEP0-1,self.STEPS, self.ncores))
print('------------------------------------------------------------')
print('PESA statistics for generation {}'.format(gen))
print('Best Fitness:', self.pesa_best[1] if self.mode == 'max' else -self.pesa_best[1])
print('Best Individual:', self.xbest_correct)
print('ALPHA:', np.round(self.ALPHA,3))
print('Memory Size:', self.memory_size)
print('------------------------------------------------------------')
print('***********************************************************************************************') |
oandastore.py | #!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015, 2016, 2017 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import collections
from datetime import datetime, timedelta
import time as _time
import json
import threading
import oandapy
import requests # oandapy depdendency
import backtrader as bt
from backtrader.metabase import MetaParams
from backtrader.utils.py3 import queue, with_metaclass
from backtrader.utils import AutoDict
# Extend the exceptions to support extra cases
class OandaRequestError(oandapy.OandaError):
def __init__(self):
er = dict(code=599, message='Request Error', description='')
super(self.__class__, self).__init__(er)
class OandaStreamError(oandapy.OandaError):
def __init__(self, content=''):
er = dict(code=598, message='Failed Streaming', description=content)
super(self.__class__, self).__init__(er)
class OandaTimeFrameError(oandapy.OandaError):
def __init__(self, content):
er = dict(code=597, message='Not supported TimeFrame', description='')
super(self.__class__, self).__init__(er)
class OandaNetworkError(oandapy.OandaError):
def __init__(self):
er = dict(code=596, message='Network Error', description='')
super(self.__class__, self).__init__(er)
class API(oandapy.API):
def request(self, endpoint, method='GET', params=None):
# Overriden to make something sensible out of a
# request.RequestException rather than simply issuing a print(str(e))
url = '%s/%s' % (self.api_url, endpoint)
method = method.lower()
params = params or {}
func = getattr(self.client, method)
request_args = {}
if method == 'get':
request_args['params'] = params
else:
request_args['data'] = params
# Added the try block
try:
response = func(url, **request_args)
except requests.RequestException as e:
return OandaRequestError().error_response
content = response.content.decode('utf-8')
content = json.loads(content)
# error message
if response.status_code >= 400:
# changed from raise to return
return oandapy.OandaError(content).error_response
return content
class Streamer(oandapy.Streamer):
def __init__(self, q, headers=None, *args, **kwargs):
# Override to provide headers, which is in the standard API interface
super(Streamer, self).__init__(*args, **kwargs)
if headers:
self.client.headers.update(headers)
self.q = q
def run(self, endpoint, params=None):
# Override to better manage exceptions.
# Kept as much as possible close to the original
self.connected = True
params = params or {}
ignore_heartbeat = None
if 'ignore_heartbeat' in params:
ignore_heartbeat = params['ignore_heartbeat']
request_args = {}
request_args['params'] = params
url = '%s/%s' % (self.api_url, endpoint)
while self.connected:
# Added exception control here
try:
response = self.client.get(url, **request_args)
except requests.RequestException as e:
self.q.put(OandaRequestError().error_response)
break
if response.status_code != 200:
self.on_error(response.content)
break # added break here
# Changed chunk_size 90 -> None
try:
for line in response.iter_lines(chunk_size=None):
if not self.connected:
break
if line:
data = json.loads(line.decode('utf-8'))
if not (ignore_heartbeat and 'heartbeat' in data):
self.on_success(data)
except: # socket.error has been seen
self.q.put(OandaStreamError().error_response)
break
def on_success(self, data):
if 'tick' in data:
self.q.put(data['tick'])
elif 'transaction' in data:
self.q.put(data['transaction'])
def on_error(self, data):
self.disconnect()
self.q.put(OandaStreamError(data).error_response)
class MetaSingleton(MetaParams):
'''Metaclass to make a metaclassed class a singleton'''
def __init__(cls, name, bases, dct):
super(MetaSingleton, cls).__init__(name, bases, dct)
cls._singleton = None
def __call__(cls, *args, **kwargs):
if cls._singleton is None:
cls._singleton = (
super(MetaSingleton, cls).__call__(*args, **kwargs))
return cls._singleton
class OandaStore(with_metaclass(MetaSingleton, object)):
'''Singleton class wrapping to control the connections to Oanda.
Params:
- ``token`` (default:``None``): API access token
- ``account`` (default: ``None``): account id
- ``practice`` (default: ``False``): use the test environment
- ``account_tmout`` (default: ``10.0``): refresh period for account
value/cash refresh
'''
BrokerCls = None # broker class will autoregister
DataCls = None # data class will auto register
params = (
('token', ''),
('account', ''),
('practice', False),
('account_tmout', 10.0), # account balance refresh timeout
)
_DTEPOCH = datetime(1970, 1, 1)
_ENVPRACTICE = 'practice'
_ENVLIVE = 'live'
@classmethod
def getdata(cls, *args, **kwargs):
'''Returns ``DataCls`` with args, kwargs'''
return cls.DataCls(*args, **kwargs)
@classmethod
def getbroker(cls, *args, **kwargs):
'''Returns broker with *args, **kwargs from registered ``BrokerCls``'''
return cls.BrokerCls(*args, **kwargs)
def __init__(self):
super(OandaStore, self).__init__()
self.notifs = collections.deque() # store notifications for cerebro
self._env = None # reference to cerebro for general notifications
self.broker = None # broker instance
self.datas = list() # datas that have registered over start
self._orders = collections.OrderedDict() # map order.ref to oid
self._ordersrev = collections.OrderedDict() # map oid to order.ref
self._transpend = collections.defaultdict(collections.deque)
self._oenv = self._ENVPRACTICE if self.p.practice else self._ENVLIVE
self.oapi = API(environment=self._oenv,
access_token=self.p.token,
headers={'X-Accept-Datetime-Format': 'UNIX'})
self._cash = 0.0
self._value = 0.0
self._evt_acct = threading.Event()
def start(self, data=None, broker=None):
# Datas require some processing to kickstart data reception
if data is None and broker is None:
self.cash = None
return
if data is not None:
self._env = data._env
# For datas simulate a queue with None to kickstart co
self.datas.append(data)
if self.broker is not None:
self.broker.data_started(data)
elif broker is not None:
self.broker = broker
self.streaming_events()
self.broker_threads()
def stop(self):
# signal end of thread
if self.broker is not None:
self.q_ordercreate.put(None)
self.q_orderclose.put(None)
self.q_account.put(None)
def put_notification(self, msg, *args, **kwargs):
self.notifs.append((msg, args, kwargs))
def get_notifications(self):
'''Return the pending "store" notifications'''
self.notifs.append(None) # put a mark / threads could still append
return [x for x in iter(self.notifs.popleft, None)]
# Oanda supported granularities
_GRANULARITIES = {
(bt.TimeFrame.Seconds, 5): 'S5',
(bt.TimeFrame.Seconds, 10): 'S10',
(bt.TimeFrame.Seconds, 15): 'S15',
(bt.TimeFrame.Seconds, 30): 'S30',
(bt.TimeFrame.Minutes, 1): 'M1',
(bt.TimeFrame.Minutes, 2): 'M3',
(bt.TimeFrame.Minutes, 3): 'M3',
(bt.TimeFrame.Minutes, 4): 'M4',
(bt.TimeFrame.Minutes, 5): 'M5',
(bt.TimeFrame.Minutes, 10): 'M5',
(bt.TimeFrame.Minutes, 15): 'M5',
(bt.TimeFrame.Minutes, 30): 'M5',
(bt.TimeFrame.Minutes, 60): 'H1',
(bt.TimeFrame.Minutes, 120): 'H2',
(bt.TimeFrame.Minutes, 180): 'H3',
(bt.TimeFrame.Minutes, 240): 'H4',
(bt.TimeFrame.Minutes, 360): 'H6',
(bt.TimeFrame.Minutes, 480): 'H8',
(bt.TimeFrame.Days, 1): 'D',
(bt.TimeFrame.Weeks, 1): 'W',
(bt.TimeFrame.Months, 1): 'M',
}
def get_positions(self):
try:
positions = self.oapi.get_positions(self.p.account)
except (oandapy.OandaError, OandaRequestError,):
return None
poslist = positions.get('positions', [])
return poslist
def get_granularity(self, timeframe, compression):
return self._GRANULARITIES.get((timeframe, compression), None)
def get_instrument(self, dataname):
try:
insts = self.oapi.get_instruments(self.p.account,
instruments=dataname)
except (oandapy.OandaError, OandaRequestError,):
return None
i = insts.get('instruments', [{}])
return i[0] or None
def streaming_events(self, tmout=None):
q = queue.Queue()
kwargs = {'q': q, 'tmout': tmout}
t = threading.Thread(target=self._t_streaming_listener, kwargs=kwargs)
t.daemon = True
t.start()
t = threading.Thread(target=self._t_streaming_events, kwargs=kwargs)
t.daemon = True
t.start()
return q
def _t_streaming_listener(self, q, tmout=None):
while True:
trans = q.get()
self._transaction(trans)
def _t_streaming_events(self, q, tmout=None):
if tmout is not None:
_time.sleep(tmout)
streamer = Streamer(q,
environment=self._oenv,
access_token=self.p.token,
headers={'X-Accept-Datetime-Format': 'UNIX'})
streamer.events(ignore_heartbeat=False)
def candles(self, dataname, dtbegin, dtend, timeframe, compression,
candleFormat, includeFirst):
kwargs = locals().copy()
kwargs.pop('self')
kwargs['q'] = q = queue.Queue()
t = threading.Thread(target=self._t_candles, kwargs=kwargs)
t.daemon = True
t.start()
return q
def _t_candles(self, dataname, dtbegin, dtend, timeframe, compression,
candleFormat, includeFirst, q):
granularity = self.get_granularity(timeframe, compression)
if granularity is None:
e = OandaTimeFrameError()
q.put(e.error_response)
return
dtkwargs = {}
if dtbegin is not None:
dtkwargs['start'] = int((dtbegin - self._DTEPOCH).total_seconds())
if dtend is not None:
dtkwargs['end'] = int((dtend - self._DTEPOCH).total_seconds())
try:
response = self.oapi.get_history(instrument=dataname,
granularity=granularity,
candleFormat=candleFormat,
**dtkwargs)
except oandapy.OandaError as e:
q.put(e.error_response)
q.put(None)
return
for candle in response.get('candles', []):
q.put(candle)
q.put({}) # end of transmission
def streaming_prices(self, dataname, tmout=None):
q = queue.Queue()
kwargs = {'q': q, 'dataname': dataname, 'tmout': tmout}
t = threading.Thread(target=self._t_streaming_prices, kwargs=kwargs)
t.daemon = True
t.start()
return q
def _t_streaming_prices(self, dataname, q, tmout):
if tmout is not None:
_time.sleep(tmout)
streamer = Streamer(q, environment=self._oenv,
access_token=self.p.token,
headers={'X-Accept-Datetime-Format': 'UNIX'})
streamer.rates(self.p.account, instruments=dataname)
def get_cash(self):
return self._cash
def get_value(self):
return self._value
_ORDEREXECS = {
bt.Order.Market: 'market',
bt.Order.Limit: 'limit',
bt.Order.Stop: 'stop',
bt.Order.StopLimit: 'stop',
}
def broker_threads(self):
self.q_account = queue.Queue()
self.q_account.put(True) # force an immediate update
t = threading.Thread(target=self._t_account)
t.daemon = True
t.start()
self.q_ordercreate = queue.Queue()
t = threading.Thread(target=self._t_order_create)
t.daemon = True
t.start()
self.q_orderclose = queue.Queue()
t = threading.Thread(target=self._t_order_cancel)
t.daemon = True
t.start()
# Wait once for the values to be set
self._evt_acct.wait(self.p.account_tmout)
def _t_account(self):
while True:
try:
msg = self.q_account.get(timeout=self.p.account_tmout)
if msg is None:
break # end of thread
except queue.Empty: # tmout -> time to refresh
pass
try:
accinfo = self.oapi.get_account(self.p.account)
except Exception as e:
self.put_notification(e)
continue
try:
self._cash = accinfo['marginAvail']
self._value = accinfo['balance']
except KeyError:
pass
self._evt_acct.set()
def order_create(self, order, stopside=None, takeside=None, **kwargs):
okwargs = dict()
okwargs['instrument'] = order.data._dataname
okwargs['units'] = abs(order.created.size)
okwargs['side'] = 'buy' if order.isbuy() else 'sell'
okwargs['type'] = self._ORDEREXECS[order.exectype]
if order.exectype != bt.Order.Market:
okwargs['price'] = order.created.price
if order.valid is None:
# 1 year and datetime.max fail ... 1 month works
valid = datetime.utcnow() + timedelta(days=30)
else:
valid = order.data.num2date(order.valid)
# To timestamp with seconds precision
okwargs['expiry'] = int((valid - self._DTEPOCH).total_seconds())
if order.exectype == bt.Order.StopLimit:
okwargs['lowerBound'] = order.created.pricelimit
okwargs['upperBound'] = order.created.pricelimit
if order.exectype == bt.Order.StopTrail:
okwargs['trailingStop'] = order.trailamount
if stopside is not None:
okwargs['stopLoss'] = stopside.price
if takeside is not None:
okwargs['takeProfit'] = takeside.price
okwargs.update(**kwargs) # anything from the user
self.q_ordercreate.put((order.ref, okwargs,))
return order
_OIDSINGLE = ['orderOpened', 'tradeOpened', 'tradeReduced']
_OIDMULTIPLE = ['tradesClosed']
def _t_order_create(self):
while True:
msg = self.q_ordercreate.get()
if msg is None:
break
oref, okwargs = msg
try:
o = self.oapi.create_order(self.p.account, **okwargs)
except Exception as e:
self.put_notification(e)
self.broker._reject(oref)
return
# Ids are delivered in different fields and all must be fetched to
# match them (as executions) to the order generated here
oids = list()
for oidfield in self._OIDSINGLE:
if oidfield in o and 'id' in o[oidfield]:
oids.append(o[oidfield]['id'])
for oidfield in self._OIDMULTIPLE:
if oidfield in o:
for suboidfield in o[oidfield]:
oids.append(suboidfield['id'])
if not oids:
self.broker._reject(oref)
return
self._orders[oref] = oids[0]
self.broker._submit(oref)
if okwargs['type'] == 'market':
self.broker._accept(oref) # taken immediately
for oid in oids:
self._ordersrev[oid] = oref # maps ids to backtrader order
# An transaction may have happened and was stored
tpending = self._transpend[oid]
tpending.append(None) # eom marker
while True:
trans = tpending.popleft()
if trans is None:
break
self._process_transaction(oid, trans)
def order_cancel(self, order):
self.q_orderclose.put(order.ref)
return order
def _t_order_cancel(self):
while True:
oref = self.q_orderclose.get()
if oref is None:
break
oid = self._orders.get(oref, None)
if oid is None:
continue # the order is no longer there
try:
o = self.oapi.close_order(self.p.account, oid)
except Exception as e:
continue # not cancelled - FIXME: notify
self.broker._cancel(oref)
_X_ORDER_CREATE = ('STOP_ORDER_CREATE',
'LIMIT_ORDER_CREATE', 'MARKET_IF_TOUCHED_ORDER_CREATE',)
def _transaction(self, trans):
# Invoked from Streaming Events. May actually receive an event for an
# oid which has not yet been returned after creating an order. Hence
# store if not yet seen, else forward to processer
ttype = trans['type']
if ttype == 'MARKET_ORDER_CREATE':
try:
oid = trans['tradeReduced']['id']
except KeyError:
try:
oid = trans['tradeOpened']['id']
except KeyError:
return # cannot do anything else
elif ttype in self._X_ORDER_CREATE:
oid = trans['id']
elif ttype == 'ORDER_FILLED':
oid = trans['orderId']
elif ttype == 'ORDER_CANCEL':
oid = trans['orderId']
elif ttype == 'TRADE_CLOSE':
oid = trans['id']
pid = trans['tradeId']
if pid in self._orders and False: # Know nothing about trade
return # can do nothing
# Skip above - at the moment do nothing
# Received directly from an event in the WebGUI for example which
# closes an existing position related to order with id -> pid
# COULD BE DONE: Generate a fake counter order to gracefully
# close the existing position
msg = ('Received TRADE_CLOSE for unknown order, possibly generated'
' over a different client or GUI')
self.put_notification(msg, trans)
return
else: # Go aways gracefully
try:
oid = trans['id']
except KeyError:
oid = 'None'
msg = 'Received {} with oid {}. Unknown situation'
msg = msg.format(ttype, oid)
self.put_notification(msg, trans)
return
try:
oref = self._ordersrev[oid]
self._process_transaction(oid, trans)
except KeyError: # not yet seen, keep as pending
self._transpend[oid].append(trans)
_X_ORDER_FILLED = ('MARKET_ORDER_CREATE',
'ORDER_FILLED', 'TAKE_PROFIT_FILLED',
'STOP_LOSS_FILLED', 'TRAILING_STOP_FILLED',)
def _process_transaction(self, oid, trans):
try:
oref = self._ordersrev.pop(oid)
except KeyError:
return
ttype = trans['type']
if ttype in self._X_ORDER_FILLED:
size = trans['units']
if trans['side'] == 'sell':
size = -size
price = trans['price']
self.broker._fill(oref, size, price, ttype=ttype)
elif ttype in self._X_ORDER_CREATE:
self.broker._accept(oref)
self._ordersrev[oid] = oref
elif ttype in 'ORDER_CANCEL':
reason = trans['reason']
if reason == 'ORDER_FILLED':
pass # individual execs have done the job
elif reason == 'TIME_IN_FORCE_EXPIRED':
self.broker._expire(oref)
elif reason == 'CLIENT_REQUEST':
self.broker._cancel(oref)
else: # default action ... if nothing else
self.broker._reject(oref)
|
cattleman.py | #!/usr/bin/env python3
import os
import sys
import requests
import logging
import boto3
import pprint
import threading
import socket
from requests.auth import HTTPBasicAuth
from botocore.exceptions import ClientError
from time import sleep
class cattleman(object):
def __init__(self):
self.api_user = os.getenv('RANCHER_USER')
self.api_key = os.getenv('RANCHER_KEY')
self.api_url = os.getenv('RANCHER_URL')
self.asg_name = os.getenv('ASG_NAME')
if not self.api_user or not self.api_key or not self.api_url or not self.asg_name:
logger.error("RANCHER_USER, RANCHER_KEY, RANCHER_URL and ASG_NAME are required env vars")
sys.exit(1)
self.api_project = self.get_project(os.getenv('RANCHER_ENV', 'Default'))
def get_project(self, rancher_env):
projects = requests.get('{0}/v1/projects'.format(self.api_url),
auth=HTTPBasicAuth(self.api_user, self.api_key))
for project in projects.json()['data']:
if rancher_env == project['name']:
return project['id']
else:
logger.error("Specificed rancher environment or 'Default' does not exist")
def test_connection(self):
logger.info("Connecting to the Rancher API...")
connection = requests.get('{0}/v1/'.format(self.api_url),
auth=HTTPBasicAuth(self.api_user, self.api_key))
if connection.status_code == 200:
logger.info("Connected to the Rancher API")
else:
logger.error(connection.json())
def get_all_memory_info(self):
memory = {}
hosts = requests.get('{0}/v1/projects/{1}/hosts/'.format(self.api_url,
self.api_project),
auth=HTTPBasicAuth(self.api_user, self.api_key))
for host in hosts.json()['data']:
memory[host['id']] = host['info']['memoryInfo']
return memory
def decider(self):
memory = self.get_all_memory_info()
logger.debug("Memory Dict:\n" + pprint.pformat(memory))
hosts = len(memory.keys())
low_mem = []
for host, mem in memory.items():
if mem['memAvailable'] / mem['memTotal'] <= 0.35:
low_mem.append(host)
if len(low_mem) == hosts:
logger.info("Trigger Scale Up")
self.scale_up()
else:
logger.info("Doing nothing..")
def scale_up(self):
client = boto3.client('autoscaling')
current_capacity = client.describe_auto_scaling_groups(AutoScalingGroupNames=[self.asg_name])['AutoScalingGroups'][0]['DesiredCapacity']
desired_capacity = current_capacity + 1
try:
response = client.set_desired_capacity(
AutoScalingGroupName=self.asg_name,
DesiredCapacity=desired_capacity,
HonorCooldown=True)
except ClientError as e:
logger.error("Cooldown in effect, no action taken")
def ping(delay, run_event):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = ('0.0.0.0', 1313)
logger.debug('starting up on {0}'.format(server_address))
sock.bind(server_address)
sock.listen(1)
while run_event.is_set():
logger.debug('waiting for a connection')
connection, client_address = sock.accept()
try:
logger.debug('client connected: {0}'.format(client_address))
message = b'PONG'
connection.sendall(message)
connection.close()
finally:
connection.close()
def run_cattleman(delay, run_event):
app = cattleman()
app.test_connection()
while run_event.is_set():
app.decider()
logger.info('Sleeping 1 minute')
sleep(60)
if __name__ == "__main__":
# setup_logging
logger = logging.getLogger('Cattleman')
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch = logging.StreamHandler()
ch.setFormatter(formatter)
ch.setLevel(logging.INFO)
logger.addHandler(ch)
logger.debug('Logging Started')
run_event = threading.Event()
run_event.set()
jobs = []
main_thread = threading.Thread(target=run_cattleman, args=(1, run_event))
jobs.append(main_thread)
status_thread = threading.Thread(target=ping, args=(1, run_event))
jobs.append(status_thread)
try:
for job in jobs:
job.start()
except (KeyboardInterrupt, SystemExit):
run_event.clear()
for job in jobs:
job.join()
|
weights_server.py | import socket
from multiprocessing import Process, Value, Lock
import os
def worker(socket, checkpoint_dir, dqn_checkpoint, im_checkpoint):
import datetime
while True:
try:
client, address = socket.accept()
with dqn_checkpoint.get_lock():
dqn_path = checkpoint_dir + f"/agent57_{dqn_checkpoint.value}_dqn.h5"
print(f"\nSending {dqn_path}", end='')
with open(dqn_path, 'rb') as file:
client.send(bytes(str(os.path.getsize(dqn_path)), 'utf-8'))
client.sendall(file.read())
print(f"\rSent {dqn_path}\n {datetime.datetime.now()}\n")
with im_checkpoint.get_lock():
im_path = checkpoint_dir + f"/agent57_{dqn_checkpoint.value}_im.h5"
print(f"\nSending {im_path}", end='')
with open(im_path, 'rb') as file:
client.send(bytes(str(os.path.getsize(im_path)), 'utf-8'))
client.sendall(file.read())
client.close()
print(f"\rSent {im_path}\n {datetime.datetime.now()}\n")
except Exception as e:
print(e)
def server(params, checkpoint_dir, running_independently=True, dqn_checkpoint= Value('i', 0), im_checkpoint= Value('i', 0)):
import time
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.bind((params['Misc']['weights_ip'], params['Misc']['weights_port']))
serversocket.listen(5)
dqn = -1
im = -1
for obj in os.listdir(checkpoint_dir):
if os.path.isfile(checkpoint_dir + "/" + obj):
if obj.endswith(".h5"):
tokens = obj.split("_")
if tokens[0] == 'agent57':
if tokens[-1] == "dqn.h5":
dqn = max(dqn, int(tokens[1]))
elif tokens[-1] == "im.h5":
im = max(im, int(tokens[1]))
if dqn < 0 or im < 0:
raise Exception("cannot find valid checkpoints")
with dqn_checkpoint.get_lock():
dqn_checkpoint.value = dqn
with im_checkpoint.get_lock():
im_checkpoint.value = im
workers = [Process(target=worker, args=(serversocket, checkpoint_dir, dqn_checkpoint, im_checkpoint)) for i in
range(params['Misc']['weights_workers'])]
for p in workers:
p.daemon = True
p.start()
download_period = params['Misc']['download_period']
while running_independently:
time.sleep(download_period)
for obj in os.listdir(checkpoint_dir):
if os.path.isfile(checkpoint_dir + "/" + obj):
if obj.endswith(".h5"):
tokens = obj.split("_")
if tokens[0] == 'agent57':
if tokens[-1] == "dqn.h5":
dqn = max(dqn, int(tokens[1]))
elif tokens[-1] == "im.h5":
im = max(im, int(tokens[1]))
with dqn_checkpoint.get_lock():
dqn_checkpoint.value = dqn
with im_checkpoint.get_lock():
im_checkpoint.value = im
while True:
pass
if __name__ == "__main__":
import yaml
with open('../params.yml', 'r') as file:
params = yaml.full_load(file)
# server = WeightsServer(params,"../weights/agent57_{}.h5")
# server.run()
server(params, "../weights/checkpoints")
|
exposition.py | #!/usr/bin/python
from __future__ import unicode_literals
import base64
from contextlib import closing
import os
import socket
import sys
import threading
from wsgiref.simple_server import make_server, WSGIRequestHandler
from .openmetrics import exposition as openmetrics
from .registry import REGISTRY
from .utils import floatToGoString
try:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from SocketServer import ThreadingMixIn
from urllib2 import build_opener, Request, HTTPHandler
from urllib import quote_plus
from urlparse import parse_qs, urlparse
except ImportError:
# Python 3
from http.server import BaseHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
from urllib.request import build_opener, Request, HTTPHandler
from urllib.parse import quote_plus, parse_qs, urlparse
CONTENT_TYPE_LATEST = str('text/plain; version=0.0.4; charset=utf-8')
'''Content type of the latest text format'''
PYTHON26_OR_OLDER = sys.version_info < (2, 7)
def make_wsgi_app(registry=REGISTRY):
'''Create a WSGI app which serves the metrics from a registry.'''
def prometheus_app(environ, start_response):
params = parse_qs(environ.get('QUERY_STRING', ''))
r = registry
encoder, content_type = choose_encoder(environ.get('HTTP_ACCEPT'))
if 'name[]' in params:
r = r.restricted_registry(params['name[]'])
output = encoder(r)
status = str('200 OK')
headers = [(str('Content-type'), content_type)]
start_response(status, headers)
return [output]
return prometheus_app
class _SilentHandler(WSGIRequestHandler):
"""WSGI handler that does not log requests."""
def log_message(self, format, *args):
"""Log nothing."""
def start_wsgi_server(port, addr='', registry=REGISTRY):
"""Starts a WSGI server for prometheus metrics as a daemon thread."""
app = make_wsgi_app(registry)
httpd = make_server(addr, port, app, handler_class=_SilentHandler)
t = threading.Thread(target=httpd.serve_forever)
t.daemon = True
t.start()
def generate_latest(registry=REGISTRY):
'''Returns the metrics from the registry in latest text format as a string.'''
def sample_line(s):
if s.labels:
labelstr = '{{{0}}}'.format(','.join(
['{0}="{1}"'.format(
k, v.replace('\\', r'\\').replace('\n', r'\n').replace('"', r'\"'))
for k, v in sorted(s.labels.items())]))
else:
labelstr = ''
timestamp = ''
if s.timestamp is not None:
# Convert to milliseconds.
timestamp = ' {0:d}'.format(int(float(s.timestamp) * 1000))
return '{0}{1} {2}{3}\n'.format(
s.name, labelstr, floatToGoString(s.value), timestamp)
output = []
for metric in registry.collect():
try:
mname = metric.name
mtype = metric.type
# Munging from OpenMetrics into Prometheus format.
if mtype == 'counter':
mname = mname + '_total'
elif mtype == 'info':
mname = mname + '_info'
mtype = 'gauge'
elif mtype == 'stateset':
mtype = 'gauge'
elif mtype == 'gaugehistogram':
# A gauge histogram is really a gauge,
# but this captures the strucutre better.
mtype = 'histogram'
elif mtype == 'unknown':
mtype = 'untyped'
output.append('# HELP {0} {1}\n'.format(
mname, metric.documentation.replace('\\', r'\\').replace('\n', r'\n')))
output.append('# TYPE {0} {1}\n'.format(mname, mtype))
om_samples = {}
for s in metric.samples:
for suffix in ['_created', '_gsum', '_gcount']:
if s.name == metric.name + suffix:
# OpenMetrics specific sample, put in a gauge at the end.
om_samples.setdefault(suffix, []).append(sample_line(s))
break
else:
output.append(sample_line(s))
except Exception as exception:
exception.args = (exception.args or ('',)) + (metric,)
raise
for suffix, lines in sorted(om_samples.items()):
output.append('# TYPE {0}{1} gauge\n'.format(metric.name, suffix))
output.extend(lines)
return ''.join(output).encode('utf-8')
def choose_encoder(accept_header):
accept_header = accept_header or ''
for accepted in accept_header.split(','):
if accepted.split(';')[0].strip() == 'application/openmetrics-text':
return (openmetrics.generate_latest,
openmetrics.CONTENT_TYPE_LATEST)
return (generate_latest, CONTENT_TYPE_LATEST)
class MetricsHandler(BaseHTTPRequestHandler):
"""HTTP handler that gives metrics from ``REGISTRY``."""
registry = REGISTRY
def do_GET(self):
registry = self.registry
params = parse_qs(urlparse(self.path).query)
encoder, content_type = choose_encoder(self.headers.get('Accept'))
if 'name[]' in params:
registry = registry.restricted_registry(params['name[]'])
try:
output = encoder(registry)
except:
self.send_error(500, 'error generating metric output')
raise
self.send_response(200)
self.send_header('Content-Type', content_type)
self.end_headers()
self.wfile.write(output)
def log_message(self, format, *args):
"""Log nothing."""
@classmethod
def factory(cls, registry):
"""Returns a dynamic MetricsHandler class tied
to the passed registry.
"""
# This implementation relies on MetricsHandler.registry
# (defined above and defaulted to REGISTRY).
# As we have unicode_literals, we need to create a str()
# object for type().
cls_name = str(cls.__name__)
MyMetricsHandler = type(cls_name, (cls, object),
{"registry": registry})
return MyMetricsHandler
class _ThreadingSimpleServer(ThreadingMixIn, HTTPServer):
"""Thread per request HTTP server."""
# Make worker threads "fire and forget". Beginning with Python 3.7 this
# prevents a memory leak because ``ThreadingMixIn`` starts to gather all
# non-daemon threads in a list in order to join on them at server close.
# Enabling daemon threads virtually makes ``_ThreadingSimpleServer`` the
# same as Python 3.7's ``ThreadingHTTPServer``.
daemon_threads = True
def start_http_server(port, addr='', registry=REGISTRY):
"""Starts an HTTP server for prometheus metrics as a daemon thread"""
CustomMetricsHandler = MetricsHandler.factory(registry)
httpd = _ThreadingSimpleServer((addr, port), CustomMetricsHandler)
t = threading.Thread(target=httpd.serve_forever)
t.daemon = True
t.start()
def write_to_textfile(path, registry):
'''Write metrics to the given path.
This is intended for use with the Node exporter textfile collector.
The path must end in .prom for the textfile collector to process it.'''
tmppath = '%s.%s.%s' % (path, os.getpid(), threading.current_thread().ident)
with open(tmppath, 'wb') as f:
f.write(generate_latest(registry))
# rename(2) is atomic.
os.rename(tmppath, path)
def default_handler(url, method, timeout, headers, data):
'''Default handler that implements HTTP/HTTPS connections.
Used by the push_to_gateway functions. Can be re-used by other handlers.'''
def handle():
request = Request(url, data=data)
request.get_method = lambda: method
for k, v in headers:
request.add_header(k, v)
resp = build_opener(HTTPHandler).open(request, timeout=timeout)
if resp.code >= 400:
raise IOError("error talking to pushgateway: {0} {1}".format(
resp.code, resp.msg))
return handle
def basic_auth_handler(url, method, timeout, headers, data, username=None, password=None):
'''Handler that implements HTTP/HTTPS connections with Basic Auth.
Sets auth headers using supplied 'username' and 'password', if set.
Used by the push_to_gateway functions. Can be re-used by other handlers.'''
def handle():
'''Handler that implements HTTP Basic Auth.
'''
if username is not None and password is not None:
auth_value = '{0}:{1}'.format(username, password).encode('utf-8')
auth_token = base64.b64encode(auth_value)
auth_header = b'Basic ' + auth_token
headers.append(['Authorization', auth_header])
default_handler(url, method, timeout, headers, data)()
return handle
def push_to_gateway(
gateway, job, registry, grouping_key=None, timeout=30,
handler=default_handler):
'''Push metrics to the given pushgateway.
`gateway` the url for your push gateway. Either of the form
'http://pushgateway.local', or 'pushgateway.local'.
Scheme defaults to 'http' if none is provided
`job` is the job label to be attached to all pushed metrics
`registry` is an instance of CollectorRegistry
`grouping_key` please see the pushgateway documentation for details.
Defaults to None
`timeout` is how long push will attempt to connect before giving up.
Defaults to 30s, can be set to None for no timeout.
`handler` is an optional function which can be provided to perform
requests to the 'gateway'.
Defaults to None, in which case an http or https request
will be carried out by a default handler.
If not None, the argument must be a function which accepts
the following arguments:
url, method, timeout, headers, and content
May be used to implement additional functionality not
supported by the built-in default handler (such as SSL
client certicates, and HTTP authentication mechanisms).
'url' is the URL for the request, the 'gateway' argument
described earlier will form the basis of this URL.
'method' is the HTTP method which should be used when
carrying out the request.
'timeout' requests not successfully completed after this
many seconds should be aborted. If timeout is None, then
the handler should not set a timeout.
'headers' is a list of ("header-name","header-value") tuples
which must be passed to the pushgateway in the form of HTTP
request headers.
The function should raise an exception (e.g. IOError) on
failure.
'content' is the data which should be used to form the HTTP
Message Body.
This overwrites all metrics with the same job and grouping_key.
This uses the PUT HTTP method.'''
_use_gateway('PUT', gateway, job, registry, grouping_key, timeout, handler)
def pushadd_to_gateway(
gateway, job, registry, grouping_key=None, timeout=30,
handler=default_handler):
'''PushAdd metrics to the given pushgateway.
`gateway` the url for your push gateway. Either of the form
'http://pushgateway.local', or 'pushgateway.local'.
Scheme defaults to 'http' if none is provided
`job` is the job label to be attached to all pushed metrics
`registry` is an instance of CollectorRegistry
`grouping_key` please see the pushgateway documentation for details.
Defaults to None
`timeout` is how long push will attempt to connect before giving up.
Defaults to 30s, can be set to None for no timeout.
`handler` is an optional function which can be provided to perform
requests to the 'gateway'.
Defaults to None, in which case an http or https request
will be carried out by a default handler.
See the 'prometheus_client.push_to_gateway' documentation
for implementation requirements.
This replaces metrics with the same name, job and grouping_key.
This uses the POST HTTP method.'''
_use_gateway('POST', gateway, job, registry, grouping_key, timeout, handler)
def delete_from_gateway(
gateway, job, grouping_key=None, timeout=30, handler=default_handler):
'''Delete metrics from the given pushgateway.
`gateway` the url for your push gateway. Either of the form
'http://pushgateway.local', or 'pushgateway.local'.
Scheme defaults to 'http' if none is provided
`job` is the job label to be attached to all pushed metrics
`grouping_key` please see the pushgateway documentation for details.
Defaults to None
`timeout` is how long delete will attempt to connect before giving up.
Defaults to 30s, can be set to None for no timeout.
`handler` is an optional function which can be provided to perform
requests to the 'gateway'.
Defaults to None, in which case an http or https request
will be carried out by a default handler.
See the 'prometheus_client.push_to_gateway' documentation
for implementation requirements.
This deletes metrics with the given job and grouping_key.
This uses the DELETE HTTP method.'''
_use_gateway('DELETE', gateway, job, None, grouping_key, timeout, handler)
def _use_gateway(method, gateway, job, registry, grouping_key, timeout, handler):
gateway_url = urlparse(gateway)
if not gateway_url.scheme or (PYTHON26_OR_OLDER and gateway_url.scheme not in ['http', 'https']):
gateway = 'http://{0}'.format(gateway)
url = '{0}/metrics/job/{1}'.format(gateway, quote_plus(job))
data = b''
if method != 'DELETE':
data = generate_latest(registry)
if grouping_key is None:
grouping_key = {}
url += ''.join(
'/{0}/{1}'.format(quote_plus(str(k)), quote_plus(str(v)))
for k, v in sorted(grouping_key.items()))
handler(
url=url, method=method, timeout=timeout,
headers=[('Content-Type', CONTENT_TYPE_LATEST)], data=data,
)()
def instance_ip_grouping_key():
'''Grouping key with instance set to the IP Address of this host.'''
with closing(socket.socket(socket.AF_INET, socket.SOCK_DGRAM)) as s:
s.connect(('localhost', 0))
return {'instance': s.getsockname()[0]}
|
DiscordService.py | import discord
import traceback
import asyncio
from threading import Thread
from Model.ConnectTask import ConnectTask
from Model.MessageTask import MessageTask
from Model.Task import Task
from Workers.TwitchWorker import TwitchWorker
from Workers.YouTubeWorker import YouTubeWorker
class DiscordService(discord.Client):
def __init__(self, config, queues, services):
discord.Client.__init__(self)
self.config = config
self.queues = queues
self.services = services
async def on_ready(self):
print('Discord Worker Started | Logged in as {0} ({1})'.format(self.user.name, self.user.id))
self.chat_services = {}
self.chat_services['TWITCH'] = TwitchWorker
self.chat_services['YOUTUBE'] = YouTubeWorker
thread = Thread(target=self.wait_for_messages, args=(self, self.queues,))
thread.daemon = True
thread.start()
async def on_error(self, event, *args, **kwargs):
message = args[0] # Gets the message object
trace = traceback.format_exc()
print('DISCORD ERROR: \n{}\n{}'.format(message, trace))
#await self.send_message(message.channel, "You caused an error!")
self.on_error(event, *args, **kwargs)
async def on_message(self, message):
if not message.content.startswith("!"):
return
message_args = message.content.split(" ")
command = message_args.pop(0)
if command == '!connect':
chat_service_name = message_args.pop(0).upper()
await self.send_message(
message.channel, 'Connecting to {} Chat Service...'.format(chat_service_name)
)
body = ConnectTask(chat_service_name, self.config, message.channel)
task = Task('CONNECT', 'DISCORD', 'DISCORD', body)
print('')
print('send to DISCORD: CONNECT')
self.queues['DISCORD'].put(task, False)
elif command == '!send':
message_text = ''.join(message_args)
body = MessageTask('DISCORD', 'bot', message_text)
twitch_task = Task('MESSAGE', 'DISCORD', 'TWITCH', body)
print('')
print('DISCORD -> TWITCH: {}'.format(message_text))
self.queues['TWITCH'].put(twitch_task, False)
youtube_task = Task('MESSAGE', 'DISCORD', 'YOUTUBE', body)
print('')
print('DISCORD -> YOUTUBE: {}'.format(message_text))
self.queues['YOUTUBE'].put(youtube_task, False)
elif command == '!blah':
message_text = ''.join(message_args)
new_message = MessageTask(message.channel, 'bot', message_text)
new_task = Task('MESSAGE', 'DISCORD', 'DISCORD', new_message)
print('')
print('send to DISCORD: {}'.format(message_text))
self.queues['DISCORD'].put(new_task, False)
def wait_for_messages(self, client, queues):
while True:
task = self.queues['DISCORD'].get()
if task.command == 'QUIT':
#todo: stop all workers and exit
pass
elif task.command == 'CONNECT':
print('received from {}: CONNECT'.format(task.sender))
service_name = task.body.service_name
service_config = task.body.service_config
discord_channel = task.body.discord_channel
if (service_name in self.chat_services.keys()):
chat_worker = self.chat_services[service_name](service_config, queues, discord_channel)
chat_worker.start()
self.services[service_name] = chat_worker
elif task.command == 'MESSAGE':
if (task.recipient != 'DISCORD'):
recipient = 'YOUTUBE' if task.sender == 'TWITCH' else 'TWITCH'
relay_task = Task('MESSAGE', task.sender, recipient, task.body)
print('{} -> {}: {}'.format(task.sender, recipient, relay_task.body.get_message()))
self.queues[recipient].put(relay_task, False)
print('received from {}: {}'.format(task.sender, task.body.get_message()))
asyncio.run_coroutine_threadsafe(
client.send_message(
task.body.message_channel,
task.body.get_message()),
client.loop
).result()
else:
print("DISCORD SENDER: Unknown Command: {}".format(task.command))
|
say_to_love.py | # -*- coding:utf-8 -*-
from __future__ import unicode_literals
from wxpy import *
from requests import get
from requests import post
from platform import system
from random import choice
from threading import Thread
import configparser
import time
# 获取每日励志精句
def get_message():
r = get("http://open.iciba.com/dsapi/")
note = r.json()['note']
content = r.json()['content']
return note, content
# 发送消息给她
def send_message(your_message):
try:
# 对方的微信名称
my_friend = bot.friends().search(my_lady_wechat_name)[0]
# 发送消息给对方
my_friend.send(your_message)
except:
# 出问题时,发送信息到文件传输助手
bot.file_helper.send(u"守护女友出问题了,赶紧去看看咋回事~")
# 在规定时间内进行关心她操作
def start_care():
# 待发送的内容,先置为空
message = ""
# 来个死循环,24小时关心她
while (True):
# 提示
print("守护中,时间:%s" % time.ctime())
# 每天定时问候,早上起床,中午吃饭,晚上吃饭,晚上睡觉
# 获取时间,只获取时和分,对应的位置为倒数第13位到倒数第8位
now_time = time.ctime()[-13:-8]
if (now_time == say_good_morning):
# 随机取一句问候语
message = choice(str_list_good_morning)
# 是否加上随机表情
if (flag_wx_emoj):
message = message + choice(str_list_emoj)
send_message(message)
print("提醒女友早上起床:%s" % time.ctime())
elif (now_time == say_good_lunch):
message = choice(str_list_good_lunch)
# 是否加上随机表情
if (flag_wx_emoj):
message = message + choice(str_list_emoj)
send_message(message)
print("提醒女友中午吃饭:%s" % time.ctime())
elif (now_time == say_good_dinner):
message = choice(str_list_good_dinner)
# 是否加上随机表情
if (flag_wx_emoj):
message = message + choice(str_list_emoj)
send_message(message)
print("提醒女友晚上吃饭:%s" % time.ctime())
elif (now_time == say_good_dream):
# 是否在结尾加上每日学英语
if (flag_learn_english):
note, content = get_message()
message = choice(str_list_good_dream) + "\n\n" + "顺便一起来学英语哦:\n" + "原文: " + content + "\n\n翻译: " + note
else:
message = choice(str_list_good_dream)
# 是否加上随机表情
if (flag_wx_emoj):
message = message + choice(str_list_emoj)
send_message(message)
print("提醒女友晚上睡觉:%s" % time.ctime())
# 节日问候语
festival_month = time.strftime('%m', time.localtime())
festival_day = time.strftime('%d', time.localtime())
if (festival_month == '02' and festival_day == '14' and now_time == "08:00"):
send_message(str_Valentine)
print("发送情人节祝福:%s" % time.ctime())
elif (festival_month == '03' and festival_day == '08' and now_time == "08:00"):
send_message(str_Women)
print("发送三八妇女节祝福:%s" % time.ctime())
elif (festival_month == '12' and festival_day == '24' and now_time == "00:00"):
send_message(str_Christmas_Eve)
print("发送平安夜祝福:%s" % time.ctime())
elif (festival_month == '12' and festival_day == '25' and now_time == "00:00"):
send_message(str_Christmas)
print("发送圣诞节祝福:%s" % time.ctime())
# 生日问候语
if (festival_month == birthday_month and festival_day == birthday_day and now_time == "00:00"):
send_message(str_birthday)
print("发送生日祝福:%s" % time.ctime())
# 每60秒检测一次
time.sleep(60)
if __name__ == "__main__":
# 若发现读取取配置文件出错,可以取消注释下面这行,一般在pycharm环境下才需要增加
# 设置当前文件所在的目录为当前工作路径
# chdir(sys.path[0])
# 启动微信机器人,自动根据操作系统执行不同的指令
# windows系统或macOS Sierra系统使用bot = Bot()
# linux系统或macOS Terminal系统使用bot = Bot(console_qr=2)
if ('Windows' in system()):
# Windows
bot = Bot()
elif ('Darwin' in system()):
# MacOSX
bot = Bot()
elif ('Linux' in system()):
# Linux
bot = Bot(console_qr=2, cache_path=True)
else:
# 自行确定
print("无法识别你的操作系统类型,请自己设置")
# 读取配置文件
cf = configparser.ConfigParser()
cf.read("./config.ini", encoding='UTF-8')
# 设置女友的微信名称,记住,不是微信ID也不是微信备注
# 你女友的微信名称,记住,不是微信ID也不是微信备注
my_lady_wechat_name = cf.get("configuration", "my_lady_wechat_name")
# 设置早上起床时间,中午吃饭时间,下午吃饭时间,晚上睡觉时间
say_good_morning = cf.get("configuration", "say_good_morning")
say_good_lunch = cf.get("configuration", "say_good_lunch")
say_good_dinner = cf.get("configuration", "say_good_dinner")
say_good_dream = cf.get("configuration", "say_good_dream")
# 设置女友生日信息
# 几月,注意补全数字,为两位数,比如6月必须写成06
birthday_month = cf.get("configuration", "birthday_month")
# 几号,注意补全数字,为两位数,比如6号必须写成08
birthday_day = cf.get("configuration", "birthday_day")
# 读取早上起床时间,中午吃饭时间,下午吃饭时间,晚上睡觉时间的随机提示语
# 一般这里的代码不要改动,需要增加提示语可以自己打开对应的文件修改
# 早上起床问候语列表,数据来源于新浪微博
str_list_good_morning = ''
with open("./remind_sentence/sentence_good_morning.txt", "r", encoding='UTF-8') as f:
str_list_good_morning = f.readlines()
print(str_list_good_morning)
# 中午吃饭问候语列表,数据来源于新浪微博
str_list_good_lunch = ''
with open("./remind_sentence/sentence_good_lunch.txt", "r", encoding='UTF-8') as f:
str_list_good_lunch = f.readlines()
print(str_list_good_lunch)
# 晚上吃饭问候语列表,数据来源于新浪微博
str_list_good_dinner = ''
with open("./remind_sentence/sentence_good_dinner.txt", "r", encoding='UTF-8') as f:
str_list_good_dinner = f.readlines()
print(str_list_good_dinner)
# 晚上睡觉问候语列表,数据来源于新浪微博
str_list_good_dream = ''
with open("./remind_sentence/sentence_good_dream.txt", "r", encoding='UTF-8') as f:
str_list_good_dream = f.readlines()
print(str_list_good_dream)
# 设置晚上睡觉问候语是否在原来的基础上再加上每日学英语精句
# False表示否 True表示是
if ((cf.get("configuration", "flag_learn_english")) == '1'):
flag_learn_english = True
else:
flag_learn_english = False
print(flag_learn_english)
# 设置所有问候语结束是否加上表情符号
# False表示否 True表示是
str_emoj = "(•‾̑⌣‾̑•)✧˖°----(๑´ڡ`๑)----(๑¯ิε ¯ิ๑)----(๑•́ ₃ •̀๑)----( ∙̆ .̯ ∙̆ )----(๑˘ ˘๑)----(●′ω`●)----(●・̆⍛・̆●)----ಥ_ಥ----_(:qゝ∠)----(´;ω;`)----( `)3')----Σ((( つ•̀ω•́)つ----╰(*´︶`*)╯----( ´´ิ∀´ิ` )----(´∩`。)----( ื▿ ื)----(。ŏ_ŏ)----( •ิ _ •ิ )----ヽ(*΄◞ิ౪◟ิ‵ *)----( ˘ ³˘)----(; ´_ゝ`)----(*ˉ﹃ˉ)----(◍'౪`◍)ノ゙----(。◝‿◜。)----(ಠ .̫.̫ ಠ)----(´◞⊖◟`)----(。≖ˇェˇ≖。)----(◕ܫ◕)----(`◕‸◕´+)----(▼ _ ▼)----( ◉ืൠ◉ื)----ㄟ(◑‿◐ )ㄏ----(●'◡'●)ノ♥----(。◕ˇ∀ˇ◕)----( ◔ ڼ ◔ )----( ´◔ ‸◔`)----(☍﹏⁰)----(♥◠‿◠)----ლ(╹◡╹ლ )----(๑꒪◞౪◟꒪๑)"
str_list_emoj = str_emoj.split('----')
if ((cf.get("configuration", "flag_wx_emoj")) == '1'):
flag_wx_emoj = True
else:
flag_wx_emoj = False
print(str_list_emoj)
# 设置节日祝福语
# 情人节祝福语
str_Valentine = cf.get("configuration", "str_Valentine")
print(str_Valentine)
# 三八妇女节祝福语
str_Women = cf.get("configuration", "str_Women")
print(str_Women)
# 平安夜祝福语
str_Christmas_Eve = cf.get("configuration", "str_Christmas_Eve")
print(str_Christmas_Eve)
# 圣诞节祝福语
str_Christmas = cf.get("configuration", "str_Christmas")
print(str_Christmas)
# 她生日的时候的祝福语
str_birthday = cf.get("configuration", "str_birthday")
print(str_birthday)
# 开始守护女友
t = Thread(target=start_care, name='start_care')
t.start()
# 接收女友消息监听器
# 女友微信名
my_girl_friend = bot.friends().search(my_lady_wechat_name)[0]
@bot.register(chats=my_girl_friend, except_self=False)
def print_others(msg):
# 输出聊天内容
print(msg.text)
# 可采用snownlp或者jieba等进行分词、情感分析,由于打包后文件体积太大,故暂时不采用这种方式
# 仅仅是直接调用网络接口
# 做极其简单的情感分析
# 结果仅供参考,请勿完全相信
postData = {'data': msg.text}
response = post('https://bosonnlp.com/analysis/sentiment?analysisType=', data=postData)
data = response.text
# 情感评分指数(越接近1表示心情越好,越接近0表示心情越差)
now_mod_rank = (data.split(',')[0]).replace('[[', '')
print("来自女友的消息:%s\n当前情感得分:%s\n越接近1表示心情越好,越接近0表示心情越差,情感结果仅供参考,请勿完全相信!\n\n" % (msg.text, now_mod_rank))
# 发送信息到文件传输助手
mood_message = u"来自女友的消息:" + msg.text + "\n当前情感得分:" + now_mod_rank + "\n越接近1表示心情越好,越接近0表示心情越差,情感结果仅供参考,请勿完全相信!\n\n"
bot.file_helper.send(mood_message) |
c.py | import web3
import web3.auto as web3auto
from web3.middleware import geth_poa_middleware
import time
import threading
import hashlib
import os
import subprocess
import json
from sha3 import keccak_256
w3 = None
HOST="127.0.0.1"
PORT="8545"
def connect(host=None,port=None,poa=False):
global w3
if host is None:
host=HOST
if port is None:
port=PORT
if w3 is None or not w3.isConnected():
w3 = web3.Web3(web3.HTTPProvider(f"http://{host}:{port}", request_kwargs={"timeout": 60 * 1000}))
if poa:
# inject PoA compatibility
# the new way
w3.middleware_onion.inject(geth_poa_middleware, layer=0)
# inject the old way:
#w3.middleware_stack.inject(geth_poa_middleware, layer=0)
assert w3.isConnected(), "Connecting to local Ethereum node failed!"
return w3
def compile_contract_with_libs(compiler_path,src_path,account=None,gas=None):
""" compile a single contract file (with libraries) and manually call the compiler.
Use only absolute paths its better.
"""
c_abi = ""
c_bin = ""
c_dep = dict() # contract dependencies
# Manually call solc
output = subprocess.run([compiler_path,"--optimize","--combined-json","abi,bin",src_path],capture_output=True)
if output.returncode != 0:
print("Error: Compiling contract")
print(output)
return None
compiler_output = json.loads(output.stdout)
# We have compiled a contract with dependencies e.g., on libs
for sfile in compiler_output["contracts"]:
if sfile.split(":")[0] == src_path:
# First identify the base contract
c_abi = compiler_output["contracts"][sfile]["abi"]
c_bin = compiler_output["contracts"][sfile]["bin"]
else:
# all other contract are either libs or inheritted contracts,
# the latter can be ignored since they have to be included in the
# base contracts bytecode anyway (see ABI of base contract for inheritted functions)
# To identify which is which we translate *all* source file path/name
# to their replacement hash and later check the base contracts bytecode if this
# replacement hash occures
c_dep[sfile] = { "replace_str": "__$" + keccak_256(bytes(sfile, "utf-8")).hexdigest()[:34] + "$__",
"address_str": "",
"bin_str": compiler_output["contracts"][sfile]["bin"],
"abi_str": compiler_output["contracts"][sfile]["abi"] }
#print(c_bin)
#print()
for sfile,sdata in c_dep.items():
if c_bin.find(sdata["replace_str"]) != -1:
# replacement string found in compile binary base contract.
# Deploy that contract and get its address to replace occurances
#print(sfile)
#print("placeholder address: " + sdata["replace_str"]) # print placeholder address
#print(sdata["abi_str"])
#print()
#print(sdata["bin_str"])
tx_receipt = deploy_contract(
cabi=sdata["abi_str"],
cbin=sdata["bin_str"],
account=account,
gas=gas,
argument=None,
argument2=None,
wait=True,
value=0)
c_dep[sfile]["address_str"] = tx_receipt['contractAddress'].replace("0x","")
#print("lib address : " + c_dep[sfile]["address_str"]) # print libary address
c_bin = c_bin.replace(sdata["replace_str"],c_dep[sfile]["address_str"])
#print()
#print(c_bin)
#print()
#print(c_abi)
return { "abi": c_abi, "bin": c_bin }.copy()
def deploy_contract(
cabi,
cbin,
account=None,
gas=None,
argument=None,
argument2=None,
wait=True,
value=0):
""" deploy contract from JSON ABI and binary hex string (bin)
which includes deployment constructor.
Optinal arguments:
account from which deployment tx is sent
gas limit for deployment
argument to the constructor
"""
if account is None:
account = w3.eth.accounts[0]
w3.eth.defaultAccount = account
if gas is None:
# somewhere around max gas
#gas = 5_000_000 # this is too large for some default chain configs
#gas = 4_500_000
#gas = 8_000_000
gas = 10_000_000
contract=w3.eth.contract(abi=cabi,
bytecode=cbin)
if argument is not None:
if argument2 is not None:
tx_hash = contract.constructor(argument,argument2).transact(
{"from":account,
"gas":gas,
"value":value})
else:
tx_hash = contract.constructor(argument).transact(
{"from":account,
"gas":gas,
"value":value})
else:
tx_hash = contract.constructor().transact(
{"from":account,
"gas":gas,
"value":value})
if wait:
tx_receipt = w3.eth.waitForTransactionReceipt(tx_hash)
return tx_receipt
else:
return tx_hash
def get_contract_instance(
caddress,
cabi,
account=None,
concise=False,
patch_api=False,
concise_events=False,
path=None):
""" get contract instance from address and abi """
if cabi is None and path is None:
print("No ABI or path to source given")
return None
elif path is not None:
cabi=compile_contract_with_libs("solc",path)["abi"]
if concise:
instance = w3.eth.contract(
address=caddress,
abi=cabi,
ContractFactoryClass=web3.contract.ConciseContract)
else:
instance = w3.eth.contract(
address=caddress,
abi=cabi)
if concise and patch_api:
#if concise and patch_api:
# patch API s.t. all transactions are automatically waited for
# until tx_receipt is received
for name, func in instance.__dict__.items():
if isinstance(func, web3.contract.ConciseMethod):
instance.__dict__[name] = _tx_executor(func)
return instance
def _tx_executor(contract_function):
""" modifies the contract instance interface function such that whenever a transaction is performed
it automatically waits until the transaction in included in the blockchain
(unless wait=False is specified, in the case the default the api acts as usual)
"""
def f(*args, **kwargs):
#print(args,kwargs)
wait = kwargs.pop("wait", True)
txwait = kwargs.pop("txwait", False)
#print(args,kwargs)
#print(wait,txwait)
if ("transact" in kwargs and wait) or txwait:
tx_hash = contract_function(*args, **kwargs)
tx_receipt = w3.eth.waitForTransactionReceipt(tx_hash)
return tx_receipt
return contract_function(*args, **kwargs)
return f
def compile_and_deploy_contract(path,
account=None,
concise=True,
patch_api=True,
concise_events=True,
argument=None,
argument2=None,
wait=True,
value=0,
gas=None,
compiler="solc"):
""" compiles and deploy the given contract (from the ./contracts folder)
returns the contract instance
Changed default behaviour to use the installed solc compiler
with custom flags per default: compiler="solc"
Change to custom path to compiler location if necessary.
"""
if not w3 or not w3.isConnected():
connect()
if account is None:
if w3.isAddress(w3.eth.defaultAccount):
account = w3.eth.defaultAccount
else:
account = w3.eth.accounts[0]
w3.eth.defaultAccount = account
# compile manually
interface = compile_contract_with_libs(compiler_path=compiler,
src_path=path,
account=account,
gas=gas)
ret = deploy_contract(
cabi=interface["abi"],
cbin=interface["bin"],
account=account,
gas=gas,
argument=argument,
argument2=argument2,
wait=wait,
value=value)
if wait:
tx_receipt = ret
contract = get_contract_instance(
caddress=tx_receipt['contractAddress'],
cabi=interface["abi"],
patch_api=patch_api,
concise=concise,
concise_events=concise_events)
return contract
else:
tx_hash = ret
return tx_hash
def get_events(contract_instance, event_name):
# eventFilter = contract.eventFilter(event_name, {"fromBlock": 0})
eventFilter = contract_instance.events.__dict__[event_name].createFilter(fromBlock=0)
return [e for e in eventFilter.get_all_entries() if e.address == contract_instance.address]
# -----------
# w3 helper
def mine_block():
w3.providers[0].make_request('evm_mine', params='')
def mine_blocks_until(predicate):
while not predicate():
mine_block()
def getBalance(address):
return w3.fromWei(w3.eth.getBalance(address),'ether')
# -----------
def flatten(list_of_lists):
return [y for x in list_of_lists for y in x]
def wait_for(predicate, check_interval=1.0):
while not predicate():
time.sleep(check_interval)
# -----------
def run(func_or_funcs, args=()):
""" executes the given functions in parallel and waits
until all execution have finished
"""
threads = []
if isinstance(func_or_funcs, list):
funcs = func_or_funcs
for i, f in enumerate(funcs):
arg = args[i] if isinstance(args, list) else args
if (arg is not None) and (not isinstance(arg, tuple)):
arg = (arg, )
threads.append(threading.Thread(target=f, args=arg))
else:
func = func_or_funcs
assert isinstance(args, list)
for arg in args:
xarg = arg if isinstance(arg, tuple) else (arg, )
threads.append(threading.Thread(target=func, args=xarg))
for t in threads:
t.start()
for t in threads:
t.join()
|
ali5.py | # -*- coding: utf-8 -*-
import LINETCR
from LINETCR.lib.curve.ttypes import *
from datetime import datetime
from bs4 import BeautifulSoup
from threading import Thread
from googletrans import Translator
from gtts import gTTS
import time,random,sys,json,codecs,threading,glob,urllib,urllib2,urllib3,re,ast,os,subprocess,requests,tempfile
cl = LINETCR.LINE() #Luffy
cl.login(token="EtMYluqKK6L1yK3PNih4.TkvigidwktxwcWhKhmIoza.etF5TmUsCYaW0lFKQnmI1d+DqhIkbB6X7ZChqu5WCp8=")
cl.loginResult()
ki = LINETCR.LINE() #Zorro
ki.login(token="Et4ryUfTFCjQuwPAzN68.cUJFoJGr3UYty6YBg8bcka.udYovUiSI4zMWrkVjPPTw/AN/jStX+lzg7HObIbGQF4=")
ki.loginResult()
kk = LINETCR.LINE() #Sanji
kk.login(token="Et75wQ7lzc7hirionq10./liouaQP+sXQKxkgUUtUya.00biOxT3dbygmR9s0U1isO4XXGCT0UKpqGoLv2CIcGQ=")
kk.loginResult()
kc = LINETCR.LINE() #Ussop
kc.login(token="EttG1kDu1khE8mmQIsj8.ZsO9J5JHYlZUnoIxBPBnQa.y7IkzQdw/PjupZC2aD4hQ43EdqbC68IqMyVsIqPcN04=")
kc.loginResult()
ks = LINETCR.LINE() #Chooper
ks.login(token="EttG1kDu1khE8mmQIsj8.ZsO9J5JHYlZUnoIxBPBnQa.y7IkzQdw/PjupZC2aD4hQ43EdqbC68IqMyVsIqPcN04=")
ks.loginResult()
print "login success "
reload(sys)
sys.setdefaultencoding('utf-8')
helpMessage ="""NB NINJA BLACK
Owner : NB NINJA BLACK
-==================-
◄]·♦·Menu For Public·♦·[►
[•]Adminlist
[•]Ownerlist
[•]Info Group
[•]Welcome
[•]Creator
[•]Bot
◄]·♦·Menu For Admin·♦·[►
-==================-
[•]Cancel
[•]「Buka/Tutup」qr
[•]Mid Bot
[•]Speed/Sp
[•]「Cctv/Ciduk」
[•]Status/Set
[•]Gurl
[•]Jam「On/Off」
[•]Tag all/Tagall
[•]Absen/Respon
[•]Banlist
>>[Perintah Proteksi]<<
👑Hanya Untuk Owner👑
-==================-
NB NINJA BLACK
-==================-
"""
KAC=[cl,ki,kk,kc,ks]
#DEF1=[ki,kk,kc,ks,ka,kb,ko,ke,ku] Udah Ga Kepake(Boleh di apus)
#DEF2=[cl,kk,kc,ks,ka,kb,ko,ke,ku] Udah Ga Kepake(Boleh di apus)
#DEF3=[cl,ki,kc,ks,ka,kb,ko,ke,ku] Udah Ga Kepake(Boleh di apus)
#DEF4=[cl,ki,kk,ks,ka,kb,ko,ke,ku] Udah Ga Kepake(Boleh di apus)
#DEF5=[cl,ki,kk,kc,ka,kb,ko,ke,ku] Udah Ga Kepake(Boleh di apus)
#DEF6=[cl,ki,kk,kc,ks,kb,ko,ke,ku] Udah Ga Kepake(Boleh di apus)
#DEF7=[cl,ki,kk,kc,ks,ka,ko,ke,ku] Udah Ga Kepake(Boleh di apus)
#DEF8=[cl,ki,kk,kc,ks,ka,kb,ke,ku] Udah Ga Kepake(Boleh di apus)
#DEF9=[cl,ki,kk,kc,ks,ka,kb,ko,ku] Udah Ga Kepake(Boleh di apus)
#DEF10=[cl,ki,kk,kc,ks,ka,kb,ko,ke] Udah Ga Kepake(Boleh di apus)
mid = cl.getProfile().mid #Luffy
Amid = ki.getProfile().mid #Zorro
Bmid = kk.getProfile().mid #Sanji
Cmid = kc.getProfile().mid #Ussop
Dmid = ks.getProfile().mid #Chooper
Bots=[mid,Amid,Bmid,Cmid,Dmid,"uff28ea10671930b3fe74f0e251bb9aed","u8292f73b77c82e709cd175ede9834b33","u5a814038bb9430f92516a16e03cee1bb","ub4a5481880d62935d60b01e41cc88e64","u0ae8df3a3811deec9363ac9b1bc04228","uf8b20e500aa9fd2c6a917286bae4ee60","ub54ede32df2dce7a4078a7aab4d974a6","ucd2a3d8f98e1c8bcee34e8976e96cda8"]
admin=["uff28ea10671930b3fe74f0e251bb9aed","u8292f73b77c82e709cd175ede9834b33","u322fd3d219a3a71362e6f3e0c55a6c30","ueb192e2133e812ddbf9a88b88d8b45b8","u5a814038bb9430f92516a16e03cee1bb"]
owner=["uf33a411f97b34a0bd3a80bef9b3b4432","uff28ea10671930b3fe74f0e251bb9aed"]
whitelist=["uf33a411f97b34a0bd3a80bef9b3b4432"]
wait = {
'contact':False,
'autoJoin':True,
'autoCancel':{"on":True,"members":1},
'leaveRoom':True,
'timeline':True,
'autoAdd':True,
'message':"""тerima Kasih Sudah Menambahkan Aku Jadi Teman
≫ Aku Ga Jawab PM Karna aq Cuma Bot Protect ≪
≫ NB NINJA BLACK ≪
Ready:
≫ bot protect ≪
≫ SelfBot ≪
ṡȗƿƿȏяṭєԀ ɞʏ:
☆ NB NINJA BLACK ☆
☆ Destroyers ☆
Minat? Silahkan PM!
Idline: http://line.me/ti/p/~klabakan16""",
"lang":"JP",
"comment":"Thanks for add me",
"commentOn":False,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":False,
"cName":" ",
"cName2":" ",
"cName3":" ",
"cName4":" ",
"cName5":" ",
"blacklist":{},
"wblacklist":False,
"dblacklist":False,
"Protectgr":True,
#"Protectjoin":True, # Ga Kepake(Yang Gabung langsung di kick :D) Udah Udah ada Protect Cancell
"Protectcancl":True,
"protectionOn":True,
"atjointicket":True
}
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
'ROM':{}
}
setTime = {}
setTime = wait2['setTime']
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def bot(op):
try:
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
cl.findAndAddContactsByMid(op.param1)
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
#------Protect Group Kick start------#
if op.type == 11:
if wait["Protectgr"] == True:
if cl.getGroup(op.param1).preventJoinByTicket == False:
if op.param2 in Bots:
pass
if op.param2 in admin:
pass
else:
try:
cl.sendText(op.param1,cl.getContact(op.param2).displayName + "Jangan Buka Kode QR Njiiir")
cl.kickoutFromGroup(op.param1,[op.param2])
X = cl.getGroup(op.param1)
X.preventJoinByTicket = True
cl.updateGroup(X)
except:
random.choice(KAC).sendText(op.param1,random.choice(KAC).getContact(op.param2).displayName + "Jangan Buka Kode QR Njiiir")
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
Z = random.choice(KAC).getGroup(op.param1)
Z.preventJoinByTicket = True
random.choice(KAC).updateGroup(Z)
#------Protect Group Kick finish-----#
#------Cancel Invite User start------#
if op.type == 13:
if wait["Protectcancl"] == True:
group = cl.getGroup(op.param1)
gMembMids = [contact.mid for contact in group.invitee]
if op.param2 in Bots:
pass
if op.param2 in admin:
pass
else:
random.choice(KAC).cancelGroupInvitation(op.param1, gMembMids)
random.choice(KAC).sendText(op.param1, "Mau Ngundang Siapa Ka?\nKk Bukan Admin\nJadi Aku Cancel😛")
#------Cancel Invite User Finish------#
if op.type == 13:
if mid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
cl.acceptGroupInvitation(op.param1)
else:
cl.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Amid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
ki.acceptGroupInvitation(op.param1)
else:
ki.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Bmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
kk.acceptGroupInvitation(op.param1)
else:
kk.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Cmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
kc.acceptGroupInvitation(op.param1)
else:
kc.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Dmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
ks.acceptGroupInvitation(op.param1)
else:
ks.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
#------Joined User Kick start------#
if op.type == 13: #awal 17 ubah 13
if wait["Protectjoin"] == True:
if op.param2 not in admin and Bots : # Awalnya admin doang
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
#------Joined User Kick start------#
if op.type == 19: #Member Ke Kick
if op.param2 in Bots:
pass
elif op.param2 in admin:
pass
elif op.param2 in whitelist:
pass
else:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8') #tanda
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False) # tanda
except:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8') #tanda
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False) # tanda
if op.type == 19: #bot Ke Kick
if op.param2 in Bots:
pass
if op.param2 in admin:
pass
else:
if op.param3 in mid:
if op.param2 not in Bots or admin:
try:
G = ki.getGroup(op.param1)
kk.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
kk.updateGroup(G)
Ticket = kk.reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.01)
G.preventJoinByTicket = True
kk.updateGroup(G)
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
G = random.choice(KAC).getGroup(op.param1) #Sanji Bertindak
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
random.choice(KAC).updateGroup(G)
Ticket = random.choice(KAC).reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.01)
G.preventJoinByTicket = True
random.choice(KAC).updateGroup(G)
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
if op.param3 in Bmid:
if op.param2 not in Bots or admin:
try:
G = kc.getGroup(op.param1)
kc.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
kc.updateGroup(G)
Ticket = kc.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.01)
G.preventJoinByTicket = True
kc.updateGroup(G)
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
G = random.choice(KAC).getGroup(op.param1) #Sanji Bertindak
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
random.choice(KAC).updateGroup(G)
Ticket = random.choice(KAC).reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.01)
G.preventJoinByTicket = True
random.choice(KAC).updateGroup(G)
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
if op.param3 in Cmid:
if op.param2 not in Bots or admin:
try:
G = ks.getGroup(op.param1)
ks.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ks.updateGroup(G)
Ticket = ks.reissueGroupTicket(op.param1)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.01)
G.preventJoinByTicket = True
ks.updateGroup(G)
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
G = random.choice(KAC).getGroup(op.param1) #Sanji Bertindak
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
random.choice(KAC).updateGroup(G)
Ticket = random.choice(KAC).reissueGroupTicket(op.param1)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.01)
G.preventJoinByTicket = True
random.choice(KAC).updateGroup(G)
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
if op.param3 in Dmid:
if op.param2 not in Bots or admin:
try:
G = cl.getGroup(op.param1)
cl.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
ks.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.01)
G.preventJoinByTicket = True
cl.updateGroup(G)
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
G = random.choice(KAC).getGroup(op.param1) #Sanji Bertindak
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
random.choice(KAC).updateGroup(G)
Ticket = random.choice(KAC).reissueGroupTicket(op.param1)
ks.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.01)
G.preventJoinByTicket = True
random.choice(KAC).updateGroup(G)
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
if op.param3 in admin:
if op.param2 not in Bots:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,[op.param3])
wait["blacklist"][op.param2] = True
except:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,[admin])
wait["blacklist"][op.param2] = True
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
wait["blacklist"][op.param2] = True
except:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).inviteIntoGroup(op.param1,[admin])
wait["blacklist"][op.param2] = True
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 26:
msg = op.message
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
if msg.contentType == 16:
url = msg.contentMetadata("line://home/post?userMid="+mid+"&postId="+"new_post")
cl.like(url[25:58], url[66:], likeType=1001)
if op.type == 26:
msg = op.message
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"already")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"decided not to comment")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"deleted")
ki.sendText(msg.to,"deleted")
kk.sendText(msg.to,"deleted")
kc.sendText(msg.to,"deleted")
wait["dblack"] = False
else:
wait["dblack"] = False
cl.sendText(msg.to,"It is not in the black list")
ki.sendText(msg.to,"It is not in the black list")
kk.sendText(msg.to,"It is not in the black list")
kc.sendText(msg.to,"It is not in the black list")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"already")
ki.sendText(msg.to,"already")
kk.sendText(msg.to,"already")
kc.sendText(msg.to,"already")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"aded")
ki.sendText(msg.to,"aded")
kk.sendText(msg.to,"aded")
kc.sendText(msg.to,"aded")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"deleted")
ki.sendText(msg.to,"deleted")
kk.sendText(msg.to,"deleted")
kc.sendText(msg.to,"deleted")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"It is not in the black list")
ki.sendText(msg.to,"It is not in the black list")
kk.sendText(msg.to,"It is not in the black list")
kc.sendText(msg.to,"It is not in the black list")
elif wait["contact"] == True:
msg.contentType = 0
cl.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
elif msg.contentType == 16:
if wait["timeline"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = "URL→\n" + msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text in ["Key","help","Help"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,helpMessage)
else:
cl.sendText(msg.to,helpt)
elif msg.text in ["Admin menu"]:
if msg.from_ in admin:
if wait["lang"] == "JP":
cl.sendText(msg.to,Setgroup)
else:
cl.sendText(msg.to,Sett)
elif ("Gn " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Gn ","")
cl.updateGroup(X)
else:
cl.sendText(msg.to,"It can't be used besides the group.")
elif ("Luffy gn " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Cv1 gn ","")
ki.updateGroup(X)
else:
ki.sendText(msg.to,"It can't be used besides the group.")
elif ("Zorro gn " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Cv2 gn ","")
kk.updateGroup(X)
else:
kk.sendText(msg.to,"It can't be used besides the group.")
elif ("Sanji gn " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Cv3 gn ","")
kc.updateGroup(X)
else:
kc.sendText(msg.to,"It can't be used besides the group.")
elif "Kick " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Kick ","")
random.choice(KAC).kickoutFromGroup(msg.to,[midd])
elif "Luffy kick " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("_second kick ","")
ki.kickoutFromGroup(msg.to,[midd])
elif "Zorro kick " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("_third kick ","")
kk.kickoutFromGroup(msg.to,[midd])
elif "Sanji kick " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("_fourth kick ","")
kc.kickoutFromGroup(msg.to,[midd])
elif "Invite " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Invite ","")
cl.findAndAddContactsByMid(midd)
cl.inviteIntoGroup(msg.to,[midd])
elif "Luffy invite " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("sinvite ","")
ki.findAndAddContactsByMid(midd)
ki.inviteIntoGroup(msg.to,[midd])
elif "Zorro invite " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("tinvite ","")
kk.findAndAddContactsByMid(midd)
kk.inviteIntoGroup(msg.to,[midd])
elif "Zorro invite " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("finvite ","")
kc.findAndAddContactsByMid(midd)
kc.inviteIntoGroup(msg.to,[midd])
#--------------- SC Add Admin ---------
elif "Admin add @" in msg.text:
if msg.from_ in owner:
print "[Command]Staff add executing"
_name = msg.text.replace("Admin add @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.append(target)
cl.sendText(msg.to,"Admin Ditambahkan")
except:
pass
print "[Command]Staff add executed"
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Owner Yang bisa Gunain Perintah ini.")
elif "Admin remove @" in msg.text:
if msg.from_ in owner:
print "[Command]Staff remove executing"
_name = msg.text.replace("Admin remove @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.remove(target)
cl.sendText(msg.to,"Admin Dihapus")
except:
pass
print "[Command]Staff remove executed"
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Owner Yang bisa Gunain Perintah ini.")
elif msg.text in ["Adminlist","adminlist"]:
if admin == []:
cl.sendText(msg.to,"The stafflist is empty")
else:
cl.sendText(msg.to,"Tunggu...")
mc = "||Admin Bot NB.NINJA BLACK||\n=====================\n"
for mi_d in admin:
mc += "••>" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
print "[Command]Stafflist executed"
#--------------------------------------
#-------------- Add Friends ------------
elif "Bot Add @" in msg.text:
if msg.toType == 2:
if msg.from_ in owner:
print "[Command]Add executing"
_name = msg.text.replace("Bot Add @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
ki.findAndAddContactsByMid(target)
kk.findAndAddContactsByMid(target)
kc.findAndAddContactsByMid(target)
ks.findAndAddContactsByMid(target)
except:
cl.sendText(msg.to,"Error")
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Owner Yang bisa Gunain Perintah ini.")
#-------------=SC AllBio=---------------- Ganti Bio Semua Bot Format => Allbio: SUKA SUKA KALIAN :D
elif "Allbio:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Allbio:","")
if len(string.decode('utf-8')) <= 500:
profile = cl.getProfile()
profile.statusMessage = string
cl.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki.getProfile()
profile.statusMessage = string
ki.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = kk.getProfile()
profile.statusMessage = string
kk.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = kc.getProfile()
profile.statusMessage = string
kc.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ks.getProfile()
profile.statusMessage = string
ks.updateProfile(profile)
cl.sendText(msg.to,"Bio berubah menjadi " + string + "")
#--------------=Finish=----------------
#--------------= SC Ganti nama Owner=--------------
elif "Myname:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Myname:","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"Update Name Menjadi : " + string + "")
#-------------- copy profile----------
elif "Spam: " in msg.text:
if msg.from_ in admin:
txt = msg.text.split(" ")
jmlh = int(txt[2])
teks = msg.text.replace("Spam: ")+str(txt[1])+" "+str(jmlh + " ","")
tulisan = jmlh * (teks+"\n")
#@reno.a.w
if txt[1] == "on":
if jmlh <= 300:
for x in range(jmlh):
cl.sendText(msg.to, teks)
else:
cl.sendText(msg.to, "Kelebihan batas:v")
elif txt[1] == "off":
if jmlh <= 300:
cl.sendText(msg.to, tulisan)
else:
cl.sendText(msg.to, "Kelebihan batas :v")
#-----------------=Selesai=------------------
elif msg.text in ["Bot?"]: #Ngirim Semua Kontak Bot
if msg.from_ in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid}
ki.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
kk.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Cmid}
kc.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Dmid}
ks.sendMessage(msg)
elif msg.text in ["Me"]:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.from_}
random.choice(KAC).sendMessage(msg)
elif msg.text in ["Cv2"]:
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
kk.sendMessage(msg)
elif msg.text in ["愛�プレゼント","Gift"]:
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
random.choice(KAC).sendMessage(msg)
elif msg.text in ["愛�プレゼント","All gift"]:
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '12'}
msg.text = None
ki.sendMessage(msg)
kk.sendMessage(msg)
kc.sendMessage(msg)
elif msg.text in ["Cancel","cancel"]:
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
random.choice(KAC).cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"No one is inviting")
else:
cl.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Op cancel","Bot cancel"]:
if msg.from_ in admin:
if msg.toType == 2:
G = k3.getGroup(msg.to)
if G.invitee is not None:
gInviMids = [contact.mid for contact in G.invitee]
k3.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
k3.sendText(msg.to,"No one is inviting")
else:
k3.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
k3.sendText(msg.to,"Can not be used outside the group")
else:
k3.sendText(msg.to,"Not for use less than group")
#elif "gurl" == msg.text:
#print cl.getGroup(msg.to)
##cl.sendMessage(msg)
elif msg.text in ["Buka qr","Open qr"]:
if msg.from_ in admin:
if msg.toType == 2:
X = random.choice(KAC).getGroup(msg.to)
X.preventJoinByTicket = False
random.choice(KAC).updateGroup(X)
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"QR Sudah Dibuka")
else:
random.choice(KAC).sendText(msg.to,"Sudah Terbuka Plak")
else:
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"Can not be used outside the group")
else:
random.choice(KAC).sendText(msg.to,"Not for use less than group")
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Admin Yang bisa Gunain Perintah ini.")
elif msg.text in ["Luffy buka qr","Luffy open qr"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done Plak")
else:
cl.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Zorro buka qr","Zorro open qr"]:
if msg.toType == 2:
X = ki.getGroup(msg.to)
X.preventJoinByTicket = False
kk.updateGroup(X)
if wait["lang"] == "JP":
ki.sendText(msg.to,"Done Plak")
else:
ki.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Can not be used outside the group")
else:
ki.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Sanji open qr","Sanji buka qr"]:
if msg.toType == 2:
X = kc.getGroup(msg.to)
X.preventJoinByTicket = False
kc.updateGroup(X)
if wait["lang"] == "JP":
kc.sendText(msg.to,"Done Plak")
else:
kc.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
kc.sendText(msg.to,"Can not be used outside the group")
else:
kc.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Tutup qr","Close qr"]:
if msg.from_ in admin:
if msg.toType == 2:
X = random.choice(KAC).getGroup(msg.to)
X.preventJoinByTicket = True
random.choice(KAC).updateGroup(X)
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"Kode QR Sudah Di Tutup")
else:
random.choice(KAC).sendText(msg.to,"Sudah Tertutup Plak")
else:
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"Can not be used outside the group")
else:
random.choice(KAC).sendText(msg.to,"Not for use less than group")
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Admin Yang bisa Gunain Perintah ini.")
elif msg.text in ["Luffy close qr","Luffy tutup qr"]:
if msg.toType == 2:
X = ki.getGroup(msg.to)
X.preventJoinByTicket = True
ki.updateGroup(X)
if wait["lang"] == "JP":
ki.sendText(msg.to,"Done Plak")
else:
ki.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Can not be used outside the group")
else:
ki.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Zorro tutup qr","Zorro close qr"]:
if msg.toType == 2:
X = kk.getGroup(msg.to)
X.preventJoinByTicket = True
kk.updateGroup(X)
if wait["lang"] == "JP":
kk.sendText(msg.to,"Done Plak")
else:
kk.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
kk.sendText(msg.to,"Can not be used outside the group")
else:
kk.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Sanji tutup qr","Sanji close qr"]:
if msg.toType == 2:
X = kc.getGroup(msg.to)
X.preventJoinByTicket = True
kc.updateGroup(X)
if wait["lang"] == "JP":
kc.sendText(msg.to,"Done Cuy")
else:
kc.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
kc.sendText(msg.to,"Can not be used outside the group")
else:
kc.sendText(msg.to,"Not for use less than group")
elif "jointicket " in msg.text.lower():
rplace=msg.text.lower().replace("jointicket ")
if rplace == "on":
wait["atjointicket"]=True
elif rplace == "off":
wait["atjointicket"]=False
cl.sendText(msg.to,"Auto Join Group by Ticket is %s" % str(wait["atjointicket"]))
elif '/ti/g/' in msg.text.lower():
link_re = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = link_re.findall(msg.text)
n_links=[]
for l in links:
if l not in n_links:
n_links.append(l)
for ticket_id in n_links:
if wait["atjointicket"] == True:
group=cl.findGroupByTicket(ticket_id)
cl.acceptGroupInvitationByTicket(group.mid,ticket_id)
cl.sendText(msg.to,"Sukses join ke grup %s" % str(group.name))
elif "Info Group" == msg.text:
if msg.toType == 2:
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
if ginfo.preventJoinByTicket == True:
QR = "Close"
else:
QR = "Open"
random.choice(KAC).sendText(msg.to,"[Group Name]\n" + "[•]" + str(ginfo.name) + "\n\n[Group ID]\n" + msg.to + "\n\n[Group Creator]\n" + "[•]" + gCreator + "\n\n[Group Status]\n" + "[•]Status QR =>" + QR + "\n\n[Group Picture]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus + "\n\nMembers:" + str(len(ginfo.members)) + "\nPending:" + sinvitee)
else:
random.choice(KAC).sendText(msg.to,"[Group Name]\n" + str(ginfo.name) + "\n\n[Group ID]\n" + msg.to + "\n\n[Group Creator]\n" + gCreator + "\n\n[Group Status]\nGroup Picture:\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif "My mid" == msg.text:
if msg.from_ in admin:
random.choice(KAC).sendText(msg.to, msg.from_)
elif "Mid Bot" == msg.text:
if msg.from_ in admin:
cl.sendText(msg.to,mid)
ki.sendText(msg.to,Amid)
kk.sendText(msg.to,Bmid)
kc.sendText(msg.to,Cmid)
ks.sendText(msg.to,Dmid)
elif "Koplaxs" == msg.text:
if msg.from_ in admin:
cl.sendText(msg.to,Smid)
elif "Luffy" == msg.text:
if msg.from_ in admin:
ki.sendText(msg.to,mid)
elif "Zorro" == msg.text:
if msg.from_ in admin:
kk.sendText(msg.to,Amid)
elif "Sanji" == msg.text:
if msg.from_ in admin:
kc.sendText(msg.to,Bmid)
elif msg.text in ["Wkwkwk","Wkwk","Wk","wkwkwk","wkwk","wk"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "100",
"STKPKGID": "1",
"STKVER": "100" }
cl.sendMessage(msg)
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Hehehe","Hehe","He","hehehe","hehe","he"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "10",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Galau"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "9",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["You"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "7",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Hadeuh"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "6",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Please"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "4",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Haaa"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "3",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Lol"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "110",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Hmmm","Hmm","Hm","hmmm","hmm","hm"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "101",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
elif msg.text in ["Welcome"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "247",
"STKPKGID": "3",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["TL: "]:
if msg.from_ in admin:
tl_text = msg.text.replace("TL: ","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
elif msg.text in ["Bot1 rename "]:
if msg.from_ in admin:
string = msg.text.replace("Cn ","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"name " + string + " done")
elif msg.text in ["Bot2 rename "]:
if msg.from_ in admin:
string = msg.text.replace("Cv1 rename ","")
if len(string.decode('utf-8')) <= 20:
profile_B = ki.getProfile()
profile_B.displayName = string
ki.updateProfile(profile_B)
ki.sendText(msg.to,"name " + string + " done")
elif msg.text in ["Bot3 rename "]:
if msg.from_ in admin:
string = msg.text.replace("Cv2 rename ","")
if len(string.decode('utf-8')) <= 20:
profile_B = kk.getProfile()
profile_B.displayName = string
kk.updateProfile(profile_B)
kk.sendText(msg.to,"name " + string + " done")
elif msg.text in ["Mc "]:
if msg.from_ in admin:
mmid = msg.text.replace("Mc ","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
#elif msg.text in ["Joinn on","joinn on"]:
#if msg.from_ in admin:
#if wait["Protectjoin"] == True:
#if wait["lang"] == "JP":
#cl.sendText(msg.to,"Kick Joined Group On")
#else:
#cl.sendText(msg.to,"Done")
#else:
#wait["Protectjoin"] = True
#if wait["lang"] == "JP":
#cl.sendText(msg.to,"Kick Joined Group On")
#else:
#cl.sendText(msg.to,"done")
#elif msg.text in ["Joinn off","joinn off"]:
#if msg.from_ in admin:
#if wait["Protectjoin"] == False:
#if wait["lang"] == "JP":
#cl.sendText(msg.to,"kick Joined Group Off")
#else:
#cl.sendText(msg.to,"done")
#else:
#wait["Protectjoin"] = False
#if wait["lang"] == "JP":
#cl.sendText(msg.to,"kick Joined Group Off")
#else:
#cl.sendText(msg.to,"done")
elif msg.text in ["Cancel on","cancel on"]:
if msg.from_ in admin:
if wait["Protectcancl"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Cancel off","cancel off"]:
if msg.from_ in admin:
if wait["Protectcancl"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Qr on","qr on"]:
if msg.from_ in admin:
if wait["Protectgr"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectgr"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Qr off","qr off"]:
if msg.from_ in admin:
if wait["Protectgr"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectgr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Contact On","Contact on","contact on"]:
if msg.from_ in admin:
if wait["contact"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Lewat Share Kontak On")
else:
cl.sendText(msg.to,"done")
else:
wait["contact"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Lewat Share Kontak On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Contact Off","Contact off","contact off"]:
if msg.from_ in admin:
if wait["contact"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Lewat Share Kontak Off")
else:
cl.sendText(msg.to,"done")
else:
wait["contact"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Lewat Share Kontak Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["自動å�‚åŠ :オン","Join on","Auto join on","自動å�ƒåŠ ï¼šé–‹"]:
if msg.from_ in admin:
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["自動å�‚åŠ :オフ","Join off","Auto join off","自動å�ƒåŠ ï¼šé—œ"]:
if msg.from_ in admin:
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Gcancel:"]:
try:
strnum = msg.text.replace("Gcancel:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invitation refused turned off\nTo turn on please specify the number of people and send")
else:
cl.sendText(msg.to,"关了邀请拒�。�时开请指定人数��")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,strnum + "The group of people and below decided to automatically refuse invitation")
else:
cl.sendText(msg.to,strnum + "使人以下的�组用自动邀请拒�")
except:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Value is wrong")
else:
cl.sendText(msg.to,"Bizarre ratings")
elif msg.text in ["強制自動退出:オン","Leave on","Auto leave:on","強制自動退出:開"]:
if msg.from_ in admin:
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["強制自動退出:オフ","Leave off","Auto leave:off","強制自動退出:關"]:
if msg.from_ in admin:
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already")
elif msg.text in ["共有:オン","Share on","Share on"]:
if msg.from_ in admin:
if wait["timeline"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["共有:オフ","Share off","Share off"]:
if msg.from_ in admin:
if wait["timeline"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦�了关æ–。")
elif msg.text in ["Status","Set"]:
if msg.from_ in admin:
md = "⭐Status Proteksi⭐\n*============*\n"
if wait["Protectgr"] == True: md+="[•]Protect QR [On]\n"
else: md+="[•]Protect QR [Off]\n"
if wait["Protectcancl"] == True: md+="[•]Protect Invite [On]\n"
else: md+="[•]Protect Invite [Off]\n"
if wait["contact"] == True: md+="[•]Contact [On]\n"
else: md+="[•]Contact [Off]\n"
if wait["autoJoin"] == True: md+="[•]Auto Join [On]\n"
else: md +="[•]Auto Join [Off]\n"
if wait["autoCancel"]["on"] == True:md+="[•]Group Cancel " + str(wait["autoCancel"]["members"]) + "\n"
else: md+= "[•]Group Cancel [Off]\n"
if wait["leaveRoom"] == True: md+="[•]Auto Leave [On]\n"
else: md+=" Auto Leave [Off]\n"
if wait["timeline"] == True: md+="[•]Share [On]\n"
else:md+="[•]Share [Off]\n"
if wait["autoAdd"] == True: md+="[•]Auto Add [On]\n"
else:md+="[•]Auto Add [Off]\n"
if wait["commentOn"] == True: md+="[•]Comment [On]\n"
else:md+="[•]Comment [Off]\n*============*\n⭐NB NINJA BLACK⭐\n*============*"
cl.sendText(msg.to,md)
elif "album merit " in msg.text:
gid = msg.text.replace("album merit ","")
album = cl.getAlbum(gid)
if album["result"]["items"] == []:
if wait["lang"] == "JP":
cl.sendText(msg.to,"There is no album")
else:
cl.sendText(msg.to,"相册没在。")
else:
if wait["lang"] == "JP":
mg = "The following is the target album"
else:
mg = "以下是对象的相册"
for y in album["result"]["items"]:
if "photoCount" in y:
mg += str(y["title"]) + ":" + str(y["photoCount"]) + "sheet\n"
else:
mg += str(y["title"]) + ":0sheet\n"
cl.sendText(msg.to,mg)
elif "album " in msg.text:
gid = msg.text.replace("album ","")
album = cl.getAlbum(gid)
if album["result"]["items"] == []:
if wait["lang"] == "JP":
cl.sendText(msg.to,"There is no album")
else:
cl.sendText(msg.to,"相册没在。")
else:
if wait["lang"] == "JP":
mg = "The following is the target album"
else:
mg = "以下是对象的相册"
for y in album["result"]["items"]:
if "photoCount" in y:
mg += str(y["title"]) + ":" + str(y["photoCount"]) + "sheet\n"
else:
mg += str(y["title"]) + ":0sheet\n"
elif "album remove " in msg.text:
gid = msg.text.replace("album remove ","")
albums = cl.getAlbum(gid)["result"]["items"]
i = 0
if albums != []:
for album in albums:
cl.deleteAlbum(gid,album["id"])
i += 1
if wait["lang"] == "JP":
cl.sendText(msg.to,str(i) + "Deleted albums")
else:
cl.sendText(msg.to,str(i) + "åˆ é™¤äº†äº‹çš„ç›¸å†Œã€‚")
elif msg.text in ["Group id"]:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:\n%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,h)
elif msg.text in ["Cancelall"]:
if msg.from_ in admin:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"All invitations have been refused")
else:
cl.sendText(msg.to,"拒�了全部的邀请。")
elif "album removeat’" in msg.text:
gid = msg.text.replace("album removeat’","")
albums = cl.getAlbum(gid)["result"]["items"]
i = 0
if albums != []:
for album in albums:
cl.deleteAlbum(gid,album["id"])
i += 1
if wait["lang"] == "JP":
cl.sendText(msg.to,str(i) + "Albums deleted")
else:
cl.sendText(msg.to,str(i) + "åˆ é™¤äº†äº‹çš„ç›¸å†Œã€‚")
elif msg.text in ["è‡ªå‹•è¿½åŠ :オン","Add on","Auto add:on","è‡ªå‹•è¿½åŠ ï¼šé–‹"]:
if msg.from_ in admin:
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"Done")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["è‡ªå‹•è¿½åŠ :オフ","Add off","Auto add:off","è‡ªå‹•è¿½åŠ ï¼šé—œ"]:
if msg.from_ in admin:
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦�了关æ–。")
elif "Message change: " in msg.text:
wait["message"] = msg.text.replace("Message change: ","")
cl.sendText(msg.to,"message changed")
elif "Message add: " in msg.text:
wait["message"] = msg.text.replace("Message add: ","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"message changed")
else:
cl.sendText(msg.to,"done。")
elif msg.text in ["Message","è‡ªå‹•è¿½åŠ å•�候語確èª�"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,"message change to\n\n" + wait["message"])
else:
cl.sendText(msg.to,"The automatic appending information is set as follows。\n\n" + wait["message"])
elif "Comment:" in msg.text:
c = msg.text.replace("Comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"message changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
elif "Add comment:" in msg.text:
c = msg.text.replace("Add comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"String that can not be changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
#---------------------Sc invite owner ke group------
elif "/invitemeto: " in msg.text:
if msg.from_ in owner:
gid = msg.text.replace("/invitemeto: ","")
if gid == "":
cl.sendText(msg.to,"Invalid group id")
else:
try:
cl.findAndAddContactsByMid(msg.from_)
cl.inviteIntoGroup(gid,[msg.from_])
except:
cl.sendText(msg.to,"Mungkin saya tidak di dalaam grup itu")
#--------===---====--------------
elif msg.text in ["コメント:オン","Comment on","Comment:on","自動首é �留言:開"]:
if msg.from_ in admin:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already on")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["コメント:オフ","Comment off","comment off","自動首é �留言:關"]:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already off")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦�了关æ–。")
elif msg.text in ["Comment","留言確�"]:
cl.sendText(msg.to,"message changed to\n\n" + str(wait["comment"]))
elif msg.text in ["Gurl"]:
if msg.from_ in admin:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
cl.updateGroup(x)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Cv1 gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
ki.updateGroup(x)
gurl = ki.reissueGroupTicket(msg.to)
ki.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Cv2 gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
kk.updateGroup(x)
gurl = kk.reissueGroupTicket(msg.to)
kk.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Cv3 gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
kc.updateGroup(x)
gurl = kc.reissueGroupTicket(msg.to)
kc.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Comment bl "]:
wait["wblack"] = True
cl.sendText(msg.to,"add to comment bl")
elif msg.text in ["Comment wl "]:
wait["dblack"] = True
cl.sendText(msg.to,"wl to comment bl")
elif msg.text in ["Comment bl confirm"]:
if wait["commentBlack"] == {}:
cl.sendText(msg.to,"confirmed")
else:
cl.sendText(msg.to,"Blacklist")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
#-------------Fungsi Jam on/off Start-------------------#
elif msg.text in ["Jam on"]:
if msg.from_ in admin:
if wait["clock"] == True:
kc.sendText(msg.to,"Bot 4 jam on")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = kc.getProfile()
profile.displayName = wait["cName4"] + nowT
kc.updateProfile(profile)
kc.sendText(msg.to,"Jam Selalu On")
elif msg.text in ["Jam off"]:
if msg.from_ in admin:
if wait["clock"] == False:
kc.sendText(msg.to,"Bot 4 jam off")
else:
wait["clock"] = False
kc.sendText(msg.to,"Jam Sedang Off")
#-------------Fungsi Jam on/off Finish-------------------#
#-------------Fungsi Change Clock Start------------------#
elif msg.text in ["Change clock"]:
n = msg.text.replace("Change clock","")
if len(n.decode("utf-8")) > 13:
cl.sendText(msg.to,"changed")
else:
wait["cName"] = n
cl.sendText(msg.to,"changed to\n\n" + n)
#-------------Fungsi Change Clock Finish-----------------#
#-------------Fungsi Jam Update Start---------------------#
elif msg.text in ["Jam Update"]:
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = kc.getProfile()
profile.displayName = wait["cName4"] + nowT
kc.updateProfile(profile)
kc.sendText(msg.to,"Sukses update")
else:
kc.sendText(msg.to,"Aktifkan jam terlebih dulu")
#-------------Fungsi Jam Update Finish-------------------#
elif msg.text == "Cctv":
if msg.from_ in admin:
cl.sendText(msg.to, "Silahkan di Read Boss Siapa aja yg Cctv,,,,,,,!!!")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
now2 = datetime.now()
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.strftime(now2,"%H:%M")
wait2['ROM'][msg.to] = {}
#print wait2
elif msg.text == "Read":
if msg.from_ in admin:
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
#print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to, "|| Di Read Oleh ||%s\n|| By : NB BOT ||\n\n>Pelaku CCTV<\n%s-=CCTV=-\n\n•Bintitan\n•Panuan\n•Kurapan\n•Kudisan\n\nAmiin Ya Allah\n[%s]" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
else:
cl.sendText(msg.to, "Ketik Cctv dulu Boss\nBaru Ketik Read\nMasa Lupa Boss,,,,,,!!!!?♪")
#-----------------------------------------------
#-----------------------------------------------
#----------------Fungsi Join Group Start-----------------------#
elif msg.text in ["Kuy","One piece","Join kuy"]: #Panggil Semua Bot
if msg.from_ in owner:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
kk.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ks.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
print "Semua Sudah Lengkap"
elif msg.text in ["Kampret join"]:
if msg.form_ in admin:
x = ki.getGroup(msg.to)
x.preventJoinByTicket = False
ki.updateGroup(x)
invsend = 0
Ti = ki.reissueGroupTicket(msg.to)
cl.acceptGroupInvitationByTicket(msg.to,Ti)
G = ki.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(msg.to)
elif msg.text in ["Luffy join"]:
if msg.from_ in admin:
x = cl.getGroup(msg.to)
x.preventJoinByTicket = False
cl.updateGroup(x)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ti)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(msg.to)
elif msg.text in ["Zorro join"]:
if msg.from_ in admin:
x = cl.getGroup(msg.to)
x.preventJoinByTicket = False
cl.updateGroup(x)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
kk.acceptGroupInvitationByTicket(msg.to,Ti)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(msg.to)
elif msg.text in ["Sanji Join"]:
if msg.from_ in admin:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
kc.acceptGroupInvitationByTicket(msg.to,Ti)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(msg.to)
#----------------------Fungsi Join Group Finish---------------#
#-------------Fungsi Leave Group Start---------------#
elif msg.text in ["Bye op","Kabur all","Kaboor all"]: #Bot Ninggalin Group termasuk Bot Induk
if msg.from_ in admin:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
kk.leaveGroup(msg.to)
kc.leaveGroup(msg.to)
ks.leaveGroup(msg.to)
cl.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Kaboor"]: #Semua Bot Ninggalin Group Kecuali Bot Induk
if msg.from_ in admin:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
kk.leaveGroup(msg.to)
kc.leaveGroup(msg.to)
ks.leaveGroup(msg.to)
#cl.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Bye zorro"]:
if msg.from_ in owner:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Bye sanji"]:
if msg.from_ in owner:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kk.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Bye Ussop"]:
if msg.from_ in owner:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kc.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Ojo koyo kuwe1"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Ojo koyo kuwe2"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kk.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Ojo koyo kuwe3"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kc.leaveGroup(msg.to)
except:
pass
#-------------Fungsi Leave Group Finish---------------#
#-------------Fungsi Tag All Start---------------#
elif msg.text in ["Tag all","Tagall"]:
if msg.from_ in admin:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
cb = ""
cb2 = ""
strt = int(0)
akh = int(0)
for md in nama:
akh = akh + int(6)
cb += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(md)+"},"""
strt = strt + int(7)
akh = akh + 1
cb2 += "@nrik \n"
cb = (cb[:int(len(cb)-1)])
msg.contentType = 0
msg.text = cb2
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'}
try:
cl.sendMessage(msg)
except Exception as error:
print error
#-------------Fungsi Tag All Finish---------------#
elif msg.text in ["Bot Like", "Bot like"]: #Semua Bot Ngelike Status Akun Utama
if msg.from_ in owner:
print "[Command]Like executed"
cl.sendText(msg.to,"Kami Siap Like Status Owner\nKami Delay untuk beberapa Detik\nJangan perintah kami dulu sampai kami Selesai Ngelike")
try:
likePost()
except:
pass
elif msg.text in ["Like temen", "Bot like temen"]: #Semua Bot Ngelike Status Teman
if msg.from_ in owner:
print "[Command]Like executed"
cl.sendText(msg.to,"Kami Siap Like Status Teman Boss")
cl.sendText(msg.to,"Kami Siap Like Status Owner\nKami Delay untuk beberapa Detik\nJangan perintah kami dulu sampai kami Selesai Ngelike")
try:
autolike()
except:
pass
#----------------Fungsi Banned Kick Target Start-----------------------#
elif msg.text in ["Kill "]:
if msg.from_ in admin:
if msg.toType == 2:
group = random.choice(KAC).getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
random.choice(KAC).sendText(msg.to,"Selamat tinggal")
random.choice(KAC).sendText(msg.to,"Jangan masuk lagidevil smile")
return
for jj in matched_list:
try:
klist=[cl,ki,kk,kc,ks]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
#----------------Fungsi Banned Kick Target Finish----------------------#
elif "Ready op" in msg.text:
if msg.from_ in owner:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Ready op","")
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
random.choice(KAC).sendText(msg.to,"Eh Kontol Ini Room apaan?")
random.choice(KAC).sendText(msg.to,"Ratain aja lah\nRoom Ga Berguna..")
random.choice(KAC).sendText(msg.to,"Jangan Baper yah Tollll;")
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
random.choice(KAC).sendMessage(msg)
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Not found")
else:
for target in targets:
if target in Bots:
pass
elif target in admin:
pass
else:
try:
klist=[cl,ki,kk,kc,ks]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
random.choice(KAC).kickoutFromGroup(msg.to,[target])
random.choice(KAC).sendText(msg.to,"Koq Ga Ditangkis Njiiing?\nLemah Banget Nih Room")
#----------------Fungsi Kick User Target Start----------------------#
elif "Nk " in msg.text:
if msg.from_ in admin:
nk0 = msg.text.replace("Nk ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
random.choice(KAC).kickoutFromGroup(msg.to,[target])
#----------------Fungsi Kick User Target Finish----------------------#
elif "Blacklist @ " in msg.text:
if msg.from_ in admin:
_name = msg.text.replace("Blacklist @ ","")
_kicktarget = _name.rstrip(' ')
gs = random.choice(KAC).getGroup(msg.to)
targets = []
for g in gs.members:
if _kicktarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Not found")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
random.choice(KAC).sendText(msg.to,"Succes Plak")
except:
random.choice(KAC).sendText(msg.to,"error")
#----------------Fungsi Banned User Target Start-----------------------#
elif "Banned @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "[Banned] Sukses"
_name = msg.text.replace("Banned @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Dilarang Banned Bot")
ki.sendText(msg.to,"Dilarang Banned Bot")
kk.sendText(msg.to,"Dilarang Banned Bot")
kc.sendText(msg.to,"Dilarang Banned Bot")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
random.choice(KAC).sendText(msg.to,"Akun telah sukses di banned")
except:
random.choice(KAC).sendText(msg.to,"Error")
#----------------Fungsi Banned User Target Finish-----------------------#
#----------------Mid via Tag--------------
elif "Mid @" in msg.text:
if msg.from_ in owner:
_name = msg.text.replace("Mid @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
random.choice(KAC).sendText(msg.to, g.mid)
else:
pass
#-----------------------------------------
#----------------Fungsi Unbanned User Target Start-----------------------#
elif "Unban @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "[Unban] Sukses"
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Tidak Ditemukan.....")
ki.sendText(msg.to,"Tidak Ditemukan.....")
kk.sendText(msg.to,"Tidak Ditemukan.....")
kc.sendText(msg.to,"Tidak Ditemukan.....")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Akun Bersih Kembali")
except:
ki.sendText(msg.to,"Error")
#----------------Fungsi Unbanned User Target Finish-----------------------#
#-------------Fungsi Spam Start---------------------#
elif msg.text in ["Up","up","Up Chat","Up chat","up chat","Upchat","upchat"]:
if msg.from_ in admin:
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!����")
kk.sendText(msg.to,"P squared up!")
#-------------Fungsi Spam Finish---------------------#
#-------------Fungsi Broadcast Start------------#
elif "Bc " in msg.text: #NgeBC Ke semua Group yang di Join :D
if msg.from_ in owner:
bctxt = msg.text.replace("Bc ","")
a = cl.getGroupIdsJoined()
a = ki.getGroupIdsJoined()
a = kk.getGroupIdsJoined()
a = kc.getGroupIdsJoined()
a = ks.getGroupIdsJoined()
for taf in a:
cl.sendText(taf, (bctxt))
ki.sendText(taf, (bctxt))
kk.sendText(taf, (bctxt))
kc.sendText(taf, (bctxt))
ks.sendText(taf, (bctxt))
#--------------Fungsi Broadcast Finish-----------#
elif msg.text in ["LG"]: #Melihat List Group
if msg.from_ in admin:
gids = cl.getGroupIdsJoined()
h = ""
for i in gids:
#####gn = cl.getGroup(i).name
h += "[•]%s Member\n" % (cl.getGroup(i).name +"👉"+str(len(cl.getGroup(i).members)))
cl.sendText(msg.to,"=======[List Group]======\n"+ h +"Total Group :"+str(len(gids)))
elif msg.text in ["LG2"]: #Melihat List Group + ID Groupnya (Gunanya Untuk Perintah InviteMeTo:)
if msg.from_ in owner:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,h)
#--------------List Group------------
#------------ Keluar Dari Semua Group------
elif msg.text in ["Bot out","Op bye"]: # Keluar Dari Semua Group Yang Di dalem nya ada bot(Kalo Bot Kalian Nyangkut di Group lain :D)
if msg.from_ in owner:
gid = cl.getGroupIdsJoined()
gid = ki.getGroupIdsJoined()
gid = kk.getGroupIdsJoined()
gid = kc.getGroupIdsJoined()
gid = ks.getGroupIdsJoined()
for i in gid:
ks.leaveGroup(i)
kc.leaveGroup(i)
ki.leaveGroup(i)
kk.leaveGroup(i)
cl.leaveGroup(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sayonara")
else:
cl.sendText(msg.to,"He declined all invitations")
#------------------------End---------------------
#-----------------End-----------
elif msg.text in ["Op katakan hi"]:
ki.sendText(msg.to,"Hi buddy Har Har")
kk.sendText(msg.to,"Hi buddy Har Har")
kc.sendText(msg.to,"Hi buddy Har Har")
#-----------------------------------------------
elif msg.text in ["Cv say hinata pekok"]:
ki.sendText(msg.to,"Hinata pekok Har Har")
kk.sendText(msg.to,"Hinata pekok Har Har")
kc.sendText(msg.to,"Hinata pekok Har Har")
elif msg.text in ["Cv say didik pekok"]:
ki.sendText(msg.to,"Didik pekok Har Har")
kk.sendText(msg.to,"Didik pekok Har Har")
kc.sendText(msg.to,"Didik pekok Har Har")
elif msg.text in ["Cv say bobo ah","Bobo dulu ah"]:
ki.sendText(msg.to,"Have a nice dream Cv Har Har")
kk.sendText(msg.to,"Have a nice dream Cv Har Har")
kc.sendText(msg.to,"Have a nice dream Cv Har Har")
elif msg.text in ["Cv say chomel pekok"]:
ki.sendText(msg.to,"Chomel pekok Har Har")
kk.sendText(msg.to,"Chomel pekok Har Har")
kc.sendText(msg.to,"Chomel pekok Har Har")
elif msg.text in ["#welcome"]:
ki.sendText(msg.to,"Selamat datang di Group Kami")
kk.sendText(msg.to,"Jangan nakal ok!")
#-----------------------------------------------
elif msg.text in ["PING","Ping","ping"]:
ki.sendText(msg.to,"PONG double thumbs upHar Har")
kk.sendText(msg.to,"PONG double thumbs upHar Har")
kc.sendText(msg.to,"PONG double thumbs upHar Har")
#-----------------------------------------------
#-------------Fungsi Respon Start---------------------#
elif msg.text in ["Absen","Absen bot","Absen dulu","Respon"]:
if msg.from_ in admin:
cl.sendText(msg.to,"Hadir Boss")
ki.sendText(msg.to,"Hadir juga Boss")
kk.sendText(msg.to,"Ada Apaan Siih,,,?")
kc.sendText(msg.to,"Ikut Aja ahh,,,,,!!!!")
ks.sendText(msg.to,"Bentar Boss Ganti Akun Dulu Ane,,,")
cl.sendText(msg.to,"Semua Udah Hadir Boss\nSiap Protect Group\nAman Gak Aman Yang Penting Anu")
#-------------Fungsi Respon Finish---------------------#
#-------------Fungsi Balesan Respon Start---------------------#
elif msg.text in ["Ini Apa","ini apa","Apaan Ini","apaan ini"]:
ki.sendText(msg.to,"Ya gitu deh intinya mah questioning")
#-------------Fungsi Balesan Respon Finish---------------------#
#-------------Fungsi Speedbot Start---------------------#
elif msg.text in ["Speed","Sp"]:
if msg.from_ in admin:
start = time.time()
cl.sendText(msg.to, "Sabar Cuy...")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sDetik" % (elapsed_time))
#-------------Fungsi Speedbot Finish---------------------#
#-------------Fungsi Banned Send Contact Start------------------#
elif msg.text in ["Ban"]:
if msg.from_ in owner:
wait["wblacklist"] = True
cl.sendText(msg.to,"Kirim contact")
ki.sendText(msg.to,"Kirim contact")
kk.sendText(msg.to,"Kirim contact")
kc.sendText(msg.to,"Kirim contact")
elif msg.text in ["Unban"]:
if msg.from_ in owner:
wait["dblacklist"] = True
cl.sendText(msg.to,"Kirim contact")
ki.sendText(msg.to,"Kirim contact")
kk.sendText(msg.to,"Kirim contact")
kc.sendText(msg.to,"Kirim contact")
#-------------Fungsi Banned Send Contact Finish------------------#
elif msg.text in ["Creator"]:
msg.contentType = 13
msg.contentMetadata = {'mid': 'u25418630a2952d6e99b2ec6df86ae9d3'}
cl.sendText(msg.to,"======================")
cl.sendMessage(msg)
cl.sendText(msg.to,"======================")
cl.sendText(msg.to,"Kenalin Cuy itu Creator kami,,,,!!!!! 😜")
#-------------Fungsi Chat ----------------
elif msg.text in ["Woy","woy","Woi","woi","bot","Bot"]:
quote = ['Istri yang baik itu Istri yang Mengizinkan Suaminya untuk Poligami 😂😂😂.','Kunci Untuk Bikin Suami Bahagia itu cuma satu..\nIzinkan Suamimu Untuk Selingkuh Coyyy ','Ah Kupret Lu','Muka Lu Kaya Jamban','Ada Orang kah disini?','Apaan Siihhh,,,,!!!!','Udah ahh Mau Bobok,,,']
psn = random.choice(quote)
cl.sendText(msg.to,psn)
#-------------Fungsi Bannlist Start------------------#
elif msg.text in ["Banlist"]:
if msg.from_ in admin:
if wait["blacklist"] == {}:
random.choice(KAC).sendText(msg.to,"Tidak Ada Akun Terbanned")
else:
random.choice(KAC).sendText(msg.to,"Blacklist user")
mc = ""
for mi_d in wait["blacklist"]:
mc += "->" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
#-------------Fungsi Bannlist Finish------------------#
elif msg.text in ["Cek ban"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
cocoa = ""
for mm in matched_list:
cocoa += mm + "\n"
random.choice(KAC).sendText(msg.to,cocoa + "")
elif msg.text in ["Kill ban"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
random.choice(KAC).sendText(msg.to,"There was no blacklist user")
random.choice(KAC).sendText(msg.to,"There was no blacklist user")
random.choice(KAC).sendText(msg.to,"There was no blacklist user")
random.choice(KAC).sendText(msg.to,"There was no blacklist user")
return
for jj in matched_list:
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
random.choice(KAC).sendText(msg.to,"Blacklist emang pantas tuk di usir")
random.choice(KAC).sendText(msg.to,"Blacklist emang pantas tuk di usir")
random.choice(KAC).sendText(msg.to,"Blacklist emang pantas tuk di usir")
random.choice(KAC).sendText(msg.to,"Blacklist emang pantas tuk di usir")
elif msg.text in ["Clear"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
cl.cancelGroupInvitation(msg.to,[_mid])
cl.sendText(msg.to,"I pretended to cancel and canceled.")
elif "random: " in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
strnum = msg.text.replace("random: ","")
source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|'
try:
num = int(strnum)
group = cl.getGroup(msg.to)
for var in range(0,num):
name = "".join([random.choice(source_str) for x in xrange(10)])
time.sleep(0.01)
group.name = name
cl.updateGroup(group)
except:
cl.sendText(msg.to,"Error")
elif "albumat'" in msg.text:
try:
albumtags = msg.text.replace("albumat'","")
gid = albumtags[:6]
name = albumtags.replace(albumtags[:34],"")
cl.createAlbum(gid,name)
cl.sendText(msg.to,name + "created an album")
except:
cl.sendText(msg.to,"Error")
elif "fakecat'" in msg.text:
try:
source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|'
name = "".join([random.choice(source_str) for x in xrange(10)])
anu = msg.text.replace("fakecat'","")
cl.sendText(msg.to,str(cl.channel.createAlbum(msg.to,name,anu)))
except Exception as e:
try:
cl.sendText(msg.to,str(e))
except:
pass
#---------CCTV-----------
if op.type == 55:
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n[•]" + Name
wait2['ROM'][op.param1][op.param2] = "[•]" + Name
else:
cl.sendText
except:
pass
#---------------------
if op.type == 17:
if op.param2 in Bots:
return
ginfo = cl.getGroup(op.param1)
random.choice(KAC).sendText(op.param1, "Selamat Datang Di Grup " + str(ginfo.name))
random.choice(KAC).sendText(op.param1, "Founder Grup " + str(ginfo.name) + " :\n" + ginfo.creator.displayName)
random.choice(KAC).sendText(op.param1,"Budayakan Baca Note !!! yah Ka 😊\nSemoga Betah Kk 😘")
print "MEMBER HAS JOIN THE GROUP"
if op.type == 15:
if op.param2 in Bots:
return
random.choice(KAC).sendText(op.param1, "Selamat Jalan Cuy, Salam Buat Sahabat Semua yg ada di Sebelah ya,,,,,!!!!")
print "MEMBER HAS LEFT THE GROUP"
#------------------------
if op.type == 59:
print op
except Exception as error:
print error
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def autolike():
for zx in range(0,500):
hasil = cl.activity(limit=500)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
try:
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉Auto Like by ⭐⭐Koplaxs⭐⭐👈\n\n™SMULE VOICE FAMILY™")
ki.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
ki.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Aku Juga Ikutin Boss Aku Like Status Kamu Ka\n\n Like Back yah Ka 😊")
kk.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
kk.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Aku Juga Ikutin Boss Aku Like Status Kamu Ka\n\n Like Back yah Ka 😊")
kc.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
kc.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Aku Juga Ikutin Boss Aku Like Status Kamu Ka\n\n Like Back yah Ka 😊")
ks.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
ks.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Aku Juga Ikutin Boss Aku Like Status Kamu Ka\n\n Like Back yah Ka 😊")
cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"=====Ready=====\n[★]Bot Protect For Group\n[★]\n[★]Selfbot in Your Account[★]\n- 1 Selfbot 1 Bot Assist\n- 1 Selfbot 2 Bot Assist\n- 1 Selfbot 3 Bot Assist\n- 1 Selfbot 4 Bot Assist\n- 1 Selfbot 5 Bot Assist\n\nMau Coba Atau Test Terlebih Dahulu Bisa\nMinat??? PM Id Line @hanavy1992\nLagu Promo Lho Kak\n===[★]One Piece Bot Protect[★]===")
print "Like"
except:
pass
else:
print "Already Liked"
time.sleep(0.01)
#thread3 = threading.Thread(target=autolike)
#thread3.daemon = True
#thread3.start()
#--------------------
def likePost():
for zx in range(0,500):
hasil = cl.activity(limit=500)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
if hasil['result']['posts'][zx]['userInfo']['mid'] in owner:
try:
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
ki.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
kk.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
kc.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
ks.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Auto like by ^One Piece Bot^\nStatus Boss udah Kami Like\nOwner Kami :\nHanavy Koplaxs")
cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"=====Ready=====\n[★]Bot Protect For Group\n[★]\n[★]Selfbot in Your Account[★]\n- 1 Selfbot 1 Bot Assist\n- 1 Selfbot 2 Bot Assist\n- 1 Selfbot 3 Bot Assist\n- 1 Selfbot 4 Bot Assist\n- 1 Selfbot 5 Bot Assist\n\nMau Coba Atau Test Terlebih Dahulu Bisa\nMinat??? PM Id Line @hanavy1992\nLagu Promo Lho Kak\n===[★]One Piece Bot Protect[★]===")
print "Like"
except:
pass
else:
print "Status Sudah di Like Cuy"
def nameUpdate():
while True:
try:
#while a2():
#pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"]
cl.updateProfile(profile)
profile2 = ki.getProfile()
profile2.displayName = wait["cName2"]
ki.updateProfile(profile2)
profile3 = kk.getProfile()
profile3.displayName = wait["cName3"]
kk.updateProfile(profile3)
profile4 = kc.getProfile()
profile4.displayName = wait["cName4"]
kc.updateProfile(profile4)
profile5 = ks.getProfile()
profile5.displayName = wait["cName5"]
ks.updateProfile(profile5a)
time.sleep(600)
except:
pass
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
|
with_notebook.py | import os
import time
from threading import Thread
from jupyter_core.paths import jupyter_data_dir
import notebook
import IPython
from IPython.display import display, Javascript
from .vpython import GlowWidget, baseObj, canvas
from .rate_control import ws_queue
from . import __version__
import tornado.httpserver
import tornado.websocket
import tornado.ioloop
import tornado.web
import socket
import json
import asyncio
import logging
def find_free_port():
s = socket.socket()
s.bind(('',0)) # find an available port
return s.getsockname()[1]
__SOCKET_PORT = find_free_port()
try:
if platform.python_implementation() == 'PyPy':
__SOCKET_PORT = 9000 + __SOCKET_PORT % 1000 # use port number between 9000 and 9999 for PyPy
except:
pass
#### Setup for Jupyter VPython
# The following file operations check whether nbextensions already has the correct files.
package_dir = os.path.dirname(__file__) # The location in site-packages of the vpython module
datacnt = len(os.listdir(package_dir+"/vpython_data")) # the number of files in the site-packages vpython data folder
libcnt = len(os.listdir(package_dir+"/vpython_libraries")) # the number of files in the site-packages vpython libraries folder
jd = jupyter_data_dir()
nbdir = jd+'/nbextensions/'
nbdata = nbdir+'vpython_data'
nblib = nbdir+'vpython_libraries'
transfer = True # need to transfer files from site-packages to nbextensions
### If JupyterLab is installed then copy vpython_data directory to static dir in Jupytarlab Application Directory
try:
import jupyterlab
import jupyterlab.commands
except ImportError:
#logging.info("Unable to import jupyterlab")
pass
else:
# We have jupyterlab, is it the right version?
if (jupyterlab.__version__ >= '0.35.0') and (jupyterlab.__version__ < '1.0.0'):
from os.path import join
labextensions_dir = join(jupyterlab.commands.get_app_dir(), u'static')
try:
notebook.nbextensions.install_nbextension(path=package_dir + "/vpython_data",
nbextensions_dir=labextensions_dir,
overwrite=False,
verbose=0)
except PermissionError:
#logging.info("PermissionError: Unable to install /vpython_data directory and files for VPython on JupyterLab")
pass
if 'nbextensions' in os.listdir(jd):
ldir = os.listdir(nbdir)
if ('vpython_data' in ldir and len(os.listdir(nbdata)) == datacnt and
'vpython_libraries' in ldir and len(os.listdir(nblib)) == libcnt and
'vpython_version.txt' in ldir):
v = open(nbdir+'/vpython_version.txt').read()
transfer = (v != __version__) # need not transfer files to nbextensions if correct version's files already there
if transfer:
notebook.nbextensions.install_nbextension(path = package_dir+"/vpython_data",overwrite = True,user = True,verbose = 0)
notebook.nbextensions.install_nbextension(path = package_dir+"/vpython_libraries",overwrite = True,user = True,verbose = 0)
# Wait for files to be transferred to nbextensions:
libready = False
dataready = False
while True:
nb = os.listdir(nbdir)
for f in nb:
if f == 'vpython_data':
if len(os.listdir(nbdata)) == datacnt:
dataready = True
if f == 'vpython_libraries':
if len(os.listdir(nblib)) == libcnt:
libready = True
if libready and dataready: break
# Mark with the version number that the files have been transferred successfully:
fd = open(nbdir+'/vpython_version.txt', 'w')
fd.write(__version__)
fd.close()
display(Javascript("""if (typeof Jupyter !== "undefined") {require.undef("nbextensions/vpython_libraries/glow.min");}else{element.textContent = ' ';}"""))
display(Javascript("""if (typeof Jupyter !== "undefined") {require.undef("nbextensions/vpython_libraries/glowcomm");}else{element.textContent = ' ';}"""))
display(Javascript("""if (typeof Jupyter !== "undefined") {require.undef("nbextensions/vpython_libraries/jquery-ui.custom.min");}else{element.textContent = ' ';}"""))
display(Javascript("""if (typeof Jupyter !== "undefined") {require(["nbextensions/vpython_libraries/glow.min"], function(){console.log("GLOW LOADED");});}else{element.textContent = ' ';}"""))
display(Javascript("""if (typeof Jupyter !== "undefined") {require(["nbextensions/vpython_libraries/glowcomm"], function(){console.log("GLOWCOMM LOADED");});}else{element.textContent = ' ';}"""))
display(Javascript("""if (typeof Jupyter !== "undefined") {require(["nbextensions/vpython_libraries/jquery-ui.custom.min"], function(){console.log("JQUERY LOADED");});}else{element.textContent = ' ';}"""))
if transfer:
time.sleep(4) # allow some time for javascript code above to run after nbextensions update before attempting to setup Comm Channel
else:
time.sleep(2) # allow some time for javascript code above to run before attempting to setup Comm Channel
wsConnected = False
class WSHandler(tornado.websocket.WebSocketHandler):
def open(self):
global wsConnected
wsConnected = True
def on_message(self, message):
ws_queue.put(message)
def on_close(self):
self.stop_tornado()
def stop_tornado(self):
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.add_callback(ioloop.stop)
def check_origin(self, origin):
return True
def start_server():
asyncio.set_event_loop(asyncio.new_event_loop())
application = tornado.web.Application([(r'/ws', WSHandler),])
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(__SOCKET_PORT)
Log = logging.getLogger('tornado.access')
level = logging.getLevelName('WARN')
Log.setLevel(level)
tornado.ioloop.IOLoop.instance().start()
# Removed check for ipykernel version because the old check
# was for 5.0.0 but this works with 4.x too...and 4.x is the first
# version of ipykernel
t = Thread(target=start_server, args=())
t.start()
# Setup Comm Channel and websocket
baseObj.glow = GlowWidget(wsport=__SOCKET_PORT, wsuri='/ws')
while (not wsConnected):
time.sleep(0.1) # wait for websocket to connect
baseObj.trigger() # start the trigger ping-pong process
# Same justification as above for removing the ipykernel check.
# There was also an IPython version check for >=4, which was
# released in Nov 2015. Ok to stop supporting in 2.019 I think.
async def wsperiodic():
while True:
if ws_queue.qsize() > 0:
data = ws_queue.get()
d = json.loads(data)
# Must send events one at a time to GW.handle_msg because
# bound events need the loop code
for m in d:
# message format used by notebook
msg = {'content': {'data': [m]}}
baseObj.glow.handle_msg(msg)
await asyncio.sleep(0.1)
loop = asyncio.get_event_loop()
loop.create_task(wsperiodic())
# Dummy name to import...
_ = None
|
proxy.py | #!/usr/bin/env python
# coding:utf-8
# Based on GAppProxy 2.0.0 by Du XiaoGang <dugang.2008@gmail.com>
# Based on WallProxy 0.4.0 by Hust Moon <www.ehust@gmail.com>
# Contributor:
# Phus Lu <phus.lu@gmail.com>
# Hewig Xu <hewigovens@gmail.com>
# Ayanamist Yang <ayanamist@gmail.com>
# V.E.O <V.E.O@tom.com>
# Max Lv <max.c.lv@gmail.com>
# AlsoTang <alsotang@gmail.com>
# Christopher Meng <i@cicku.me>
# Yonsm Guo <YonsmGuo@gmail.com>
# Parkman <cseparkman@gmail.com>
# Ming Bai <mbbill@gmail.com>
# Bin Yu <yubinlove1991@gmail.com>
# lileixuan <lileixuan@gmail.com>
# Cong Ding <cong@cding.org>
# Zhang Youfu <zhangyoufu@gmail.com>
# Lu Wei <luwei@barfoo>
# Harmony Meow <harmony.meow@gmail.com>
# logostream <logostream@gmail.com>
# Rui Wang <isnowfy@gmail.com>
# Wang Wei Qiang <wwqgtxx@gmail.com>
# Felix Yan <felixonmars@gmail.com>
# Sui Feng <suifeng.me@qq.com>
# QXO <qxodream@gmail.com>
# Geek An <geekan@foxmail.com>
# Poly Rabbit <mcx_221@foxmail.com>
# oxnz <yunxinyi@gmail.com>
# Shusen Liu <liushusen.smart@gmail.com>
# Yad Smood <y.s.inside@gmail.com>
# Chen Shuang <cs0x7f@gmail.com>
# cnfuyu <cnfuyu@gmail.com>
# cuixin <steven.cuixin@gmail.com>
# s2marine0 <s2marine0@gmail.com>
# Toshio Xiang <snachx@gmail.com>
# Bo Tian <dxmtb@163.com>
# Virgil <variousvirgil@gmail.com>
# hub01 <miaojiabumiao@yeah.net>
# v3aqb <sgzz.cj@gmail.com>
# Oling Cat <olingcat@gmail.com>
# Meng Zhuo <mengzhuo1203@gmail.com>
# zwhfly <zwhfly@163.com>
# Hubertzhang <hubert.zyk@gmail.com>
# arrix <arrixzhou@gmail.com>
# gwjwin <gwjwin@sina.com>
# Jobin <1149225004@qq.com>
# Zhuhao Wang <zhuhaow@gmail.com>
# YFdyh000 <yfdyh000@gmail.com>
# zzq1015 <zzq1015@users.noreply.github.com>
# Zhengfa Dang <zfdang@users.noreply.github.com>
# haosdent <haosdent@gmail.com>
# xk liu <lxk1012@gmail.com>
__version__ = '3.2.3'
import os
import sys
import sysconfig
import platform
#reload(sys).setdefaultencoding('UTF-8')
sys.dont_write_bytecode = True
current_path = os.path.dirname(os.path.abspath(__file__))
root_path = os.path.abspath( os.path.join(current_path, os.pardir, os.pardir))
python_path = os.path.join(root_path, 'python27', '1.0')
noarch_lib = os.path.abspath( os.path.join(python_path, 'lib', 'noarch'))
sys.path.append(noarch_lib)
if sys.platform == "win32":
win32_lib = os.path.abspath( os.path.join(python_path, 'lib', 'win32'))
sys.path.append(win32_lib)
elif sys.platform == "linux" or sys.platform == "linux2":
linux_lib = os.path.abspath( os.path.join(python_path, 'lib', 'linux'))
sys.path.append(linux_lib)
elif sys.platform == "darwin":
darwin_lib = os.path.abspath( os.path.join(python_path, 'lib', 'darwin'))
sys.path.append(darwin_lib)
extra_lib = "/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python"
sys.path.append(extra_lib)
from cert_util import CertUtil
try:
__import__('gevent.monkey', fromlist=['.']).patch_all()
except (ImportError, SystemError) as e:
print "import gevent fail:", e
sys.exit(sys.stderr.write('please install python-gevent\n'))
import base64
import collections
import ConfigParser
import errno
import httplib
import io
import Queue
import random
import re
import socket
import ssl
import struct
import thread
import threading
import time
import urllib2
import urlparse
import gevent
import OpenSSL
NetWorkIOError = (socket.error, ssl.SSLError, OpenSSL.SSL.Error, OSError)
import logging
from proxylib import AuthFilter
from proxylib import AutoRangeFilter
from proxylib import BaseFetchPlugin
from proxylib import BaseProxyHandlerFilter
from proxylib import BlackholeFilter
from proxylib import CipherFileObject
from proxylib import deflate
from proxylib import DirectFetchPlugin
from proxylib import DirectRegionFilter
from proxylib import dnslib_record2iplist
from proxylib import dnslib_resolve_over_tcp
from proxylib import dnslib_resolve_over_udp
from proxylib import FakeHttpsFilter
from proxylib import ForceHttpsFilter
from proxylib import CRLFSitesFilter
from proxylib import get_dnsserver_list
from proxylib import get_uptime
from proxylib import inflate
from proxylib import LocalProxyServer
from proxylib import message_html
from proxylib import MockFetchPlugin
from proxylib import MultipleConnectionMixin
from proxylib import openssl_set_session_cache_mode
from proxylib import ProxyConnectionMixin
from proxylib import ProxyUtil
from proxylib import RC4Cipher
from proxylib import SimpleProxyHandler
from proxylib import spawn_later
from proxylib import SSLConnection
from proxylib import StaticFileFilter
from proxylib import StripPlugin
from proxylib import StripPluginEx
from proxylib import URLRewriteFilter
from proxylib import UserAgentFilter
from proxylib import XORCipher
import web_control
def is_google_ip(ipaddr):
if ipaddr in ('74.125.127.102', '74.125.155.102', '74.125.39.102', '74.125.39.113', '209.85.229.138'):
return False
if ipaddr.startswith(('173.194.', '207.126.', '209.85.', '216.239.', '64.18.', '64.233.', '66.102.', '66.249.', '72.14.', '74.125.')):
return True
return False
class RangeFetch(object):
"""Range Fetch Class"""
threads = 2
maxsize = 1024*1024*4
bufsize = 8192
waitsize = 1024*512
def __init__(self, handler, plugin, response, fetchservers, **kwargs):
assert isinstance(plugin, BaseFetchPlugin) and hasattr(plugin, 'fetch')
self.handler = handler
self.url = handler.path
self.plugin = plugin
self.response = response
self.fetchservers = fetchservers
self.kwargs = kwargs
self._stopped = None
self._last_app_status = {}
self.expect_begin = 0
def fetch(self):
response_status = self.response.status
response_headers = dict((k.title(), v) for k, v in self.response.getheaders())
content_range = response_headers['Content-Range']
#content_length = response_headers['Content-Length']
start, end, length = tuple(int(x) for x in re.search(r'bytes (\d+)-(\d+)/(\d+)', content_range).group(1, 2, 3))
if start == 0:
response_status = 200
response_headers['Content-Length'] = str(length)
del response_headers['Content-Range']
else:
response_headers['Content-Range'] = 'bytes %s-%s/%s' % (start, end, length)
response_headers['Content-Length'] = str(length-start)
logging.info('>>>>>>>>>>>>>>> RangeFetch started(%r) %d-%d', self.url, start, end)
self.handler.send_response(response_status)
for key, value in response_headers.items():
self.handler.send_header(key, value)
self.handler.end_headers()
data_queue = Queue.PriorityQueue()
range_queue = Queue.PriorityQueue()
range_queue.put((start, end, self.response))
self.expect_begin = start
for begin in range(end+1, length, self.maxsize):
range_queue.put((begin, min(begin+self.maxsize-1, length-1), None))
for i in xrange(0, self.threads):
range_delay_size = i * self.maxsize
spawn_later(float(range_delay_size)/self.waitsize, self.__fetchlet, range_queue, data_queue, range_delay_size)
has_peek = hasattr(data_queue, 'peek')
peek_timeout = 120
while self.expect_begin < length - 1:
try:
if has_peek:
begin, data = data_queue.peek(timeout=peek_timeout)
if self.expect_begin == begin:
data_queue.get()
elif self.expect_begin < begin:
time.sleep(0.1)
continue
else:
logging.error('RangeFetch Error: begin(%r) < expect_begin(%r), quit.', begin, self.expect_begin)
break
else:
begin, data = data_queue.get(timeout=peek_timeout)
if self.expect_begin == begin:
pass
elif self.expect_begin < begin:
data_queue.put((begin, data))
time.sleep(0.1)
continue
else:
logging.error('RangeFetch Error: begin(%r) < expect_begin(%r), quit.', begin, self.expect_begin)
break
except Queue.Empty:
logging.error('data_queue peek timeout, break')
break
try:
self.handler.wfile.write(data)
self.expect_begin += len(data)
del data
except Exception as e:
logging.info('RangeFetch client connection aborted(%s).', e)
break
self._stopped = True
def __fetchlet(self, range_queue, data_queue, range_delay_size):
headers = dict((k.title(), v) for k, v in self.handler.headers.items())
headers['Connection'] = 'close'
while 1:
try:
if self._stopped:
return
try:
start, end, response = range_queue.get(timeout=1)
if self.expect_begin < start and data_queue.qsize() * self.bufsize + range_delay_size > 30*1024*1024:
range_queue.put((start, end, response))
time.sleep(10)
continue
headers['Range'] = 'bytes=%d-%d' % (start, end)
fetchserver = ''
if not response:
fetchserver = random.choice(self.fetchservers)
if self._last_app_status.get(fetchserver, 200) >= 500:
time.sleep(5)
response = self.plugin.fetch(self.handler, self.handler.command, self.url, headers, self.handler.body, timeout=self.handler.connect_timeout, fetchserver=fetchserver, **self.kwargs)
except Queue.Empty:
continue
except Exception as e:
logging.warning("RangeFetch fetch response %r in __fetchlet", e)
range_queue.put((start, end, None))
continue
if not response:
logging.warning('RangeFetch %s return %r', headers['Range'], response)
range_queue.put((start, end, None))
continue
if fetchserver:
self._last_app_status[fetchserver] = response.app_status
if response.app_status != 200:
logging.warning('Range Fetch "%s %s" %s return %s', self.handler.command, self.url, headers['Range'], response.app_status)
response.close()
range_queue.put((start, end, None))
continue
if response.getheader('Location'):
self.url = urlparse.urljoin(self.url, response.getheader('Location'))
logging.info('RangeFetch Redirect(%r)', self.url)
response.close()
range_queue.put((start, end, None))
continue
if 200 <= response.status < 300:
content_range = response.getheader('Content-Range')
if not content_range:
logging.warning('RangeFetch "%s %s" return Content-Range=%r: response headers=%r, retry %s-%s', self.handler.command, self.url, content_range, response.getheaders(), start, end)
response.close()
range_queue.put((start, end, None))
continue
content_length = int(response.getheader('Content-Length', 0))
logging.info('>>>>>>>>>>>>>>> [thread %s] %s %s', threading.currentThread().ident, content_length, content_range)
while 1:
try:
if self._stopped:
response.close()
return
data = None
with gevent.Timeout(max(1, self.bufsize//8192), False):
data = response.read(self.bufsize)
if not data:
break
data_queue.put((start, data))
start += len(data)
except Exception as e:
logging.warning('RangeFetch "%s %s" %s failed: %s', self.handler.command, self.url, headers['Range'], e)
break
if start < end + 1:
logging.warning('RangeFetch "%s %s" retry %s-%s', self.handler.command, self.url, start, end)
response.close()
range_queue.put((start, end, None))
continue
logging.info('>>>>>>>>>>>>>>> Successfully reached %d bytes.', start - 1)
else:
logging.error('RangeFetch %r return %s', self.url, response.status)
response.close()
range_queue.put((start, end, None))
continue
except StandardError as e:
logging.exception('RangeFetch._fetchlet error:%s', e)
raise
class GAEFetchPlugin(BaseFetchPlugin):
"""gae fetch plugin"""
connect_timeout = 4
max_retry = 2
def __init__(self, appids, password, path, mode, cachesock, keepalive, obfuscate, pagespeed, validate, options, maxsize):
BaseFetchPlugin.__init__(self)
self.appids = appids
self.password = password
self.path = path
self.mode = mode
self.cachesock = cachesock
self.keepalive = keepalive
self.obfuscate = obfuscate
self.pagespeed = pagespeed
self.validate = validate
self.options = options
self.maxsize = maxsize
def handle(self, handler, **kwargs):
assert handler.command != 'CONNECT'
method = handler.command
headers = dict((k.title(), v) for k, v in handler.headers.items())
body = handler.body
if handler.path[0] == '/':
url = '%s://%s%s' % (handler.scheme, handler.headers['Host'], handler.path)
elif handler.path.lower().startswith(('http://', 'https://', 'ftp://')):
url = handler.path
else:
raise ValueError('URLFETCH %r is not a valid url' % handler.path)
errors = []
response = None
for i in xrange(self.max_retry):
try:
response = self.fetch(handler, method, url, headers, body, self.connect_timeout)
if response.app_status < 500:
break
else:
if response.app_status == 503:
# appid over qouta, switch to next appid
if len(self.appids) > 1:
self.appids.append(self.appids.pop(0))
logging.info('gae over qouta, switch next appid=%r', self.appids[0])
if i < self.max_retry - 1 and len(self.appids) > 1:
self.appids.append(self.appids.pop(0))
logging.info('URLFETCH return %d, trying next appid=%r', response.app_status, self.appids[0])
response.close()
except Exception as e:
errors.append(e)
logging.info('GAE "%s %s" appid=%r %r, retry...', handler.command, handler.path, self.appids[0], e)
if len(errors) == self.max_retry:
if response and response.app_status >= 500:
status = response.app_status
headers = dict(response.getheaders())
content = response.read()
response.close()
else:
status = 502
headers = {'Content-Type': 'text/html'}
content = message_html('502 URLFetch failed', 'Local URLFetch %r failed' % handler.path, '<br>'.join(repr(x) for x in errors))
return handler.handler_plugins['mock'].handle(handler, status, headers, content)
logging.info('%s "GAE %s %s %s" %s %s', handler.address_string(), handler.command, handler.path, handler.protocol_version, response.status, response.getheader('Content-Length', '-'))
try:
if response.status == 206:
fetchservers = ['%s://%s.appspot.com%s' % (self.mode, x, self.path) for x in self.appids]
return RangeFetch(handler, self, response, fetchservers).fetch()
handler.close_connection = not response.getheader('Content-Length')
handler.send_response(response.status)
for key, value in response.getheaders():
if key.title() == 'Transfer-Encoding':
continue
handler.send_header(key, value)
handler.end_headers()
bufsize = 8192
while True:
data = None
with gevent.Timeout(self.connect_timeout, False):
data = response.read(bufsize)
if data is None:
logging.warning('response.read(%r) %r timeout', bufsize, url)
handler.close_connection = True
break
if data:
handler.wfile.write(data)
if not data:
cache_sock = getattr(response, 'cache_sock', None)
if cache_sock:
cache_sock.close()
del response.cache_sock
response.close()
break
del data
except NetWorkIOError as e:
if e[0] in (errno.ECONNABORTED, errno.EPIPE) or 'bad write retry' in repr(e):
return
def fetch(self, handler, method, url, headers, body, timeout, **kwargs):
if isinstance(body, basestring) and body:
if len(body) < 10 * 1024 * 1024 and 'Content-Encoding' not in headers:
zbody = deflate(body)
if len(zbody) < len(body):
body = zbody
headers['Content-Encoding'] = 'deflate'
headers['Content-Length'] = str(len(body))
# GAE donot allow set `Host` header
if 'Host' in headers:
del headers['Host']
kwargs = {}
if self.password:
kwargs['password'] = self.password
if self.options:
kwargs['options'] = self.options
if self.validate:
kwargs['validate'] = self.validate
if self.maxsize:
kwargs['maxsize'] = self.maxsize
payload = '%s %s %s\r\n' % (method, url, handler.request_version)
payload += ''.join('%s: %s\r\n' % (k, v) for k, v in headers.items() if k not in handler.skip_headers)
payload += ''.join('X-URLFETCH-%s: %s\r\n' % (k, v) for k, v in kwargs.items() if v)
# prepare GAE request
request_method = 'POST'
fetchserver_index = random.randint(0, len(self.appids)-1) if 'Range' in headers else 0
fetchserver = kwargs.get('fetchserver') or '%s://%s.appspot.com%s' % (self.mode, self.appids[fetchserver_index], self.path)
request_headers = {}
if common.GAE_OBFUSCATE:
request_method = 'GET'
fetchserver += 'ps/%d%s.gif' % (int(time.time()*1000), random.random())
request_headers['X-URLFETCH-PS1'] = base64.b64encode(deflate(payload)).strip()
if body:
request_headers['X-URLFETCH-PS2'] = base64.b64encode(deflate(body)).strip()
body = ''
if common.GAE_PAGESPEED:
fetchserver = re.sub(r'^(\w+://)', r'\g<1>1-ps.googleusercontent.com/h/', fetchserver)
else:
payload = deflate(payload)
body = '%s%s%s' % (struct.pack('!h', len(payload)), payload, body)
if 'rc4' in common.GAE_OPTIONS:
request_headers['X-URLFETCH-Options'] = 'rc4'
body = RC4Cipher(kwargs.get('password')).encrypt(body)
request_headers['Content-Length'] = str(len(body))
# post data
need_crlf = 0 if common.GAE_MODE == 'https' else 1
need_validate = common.GAE_VALIDATE
cache_key = '%s:%d' % (common.HOST_POSTFIX_MAP['.appspot.com'], 443 if common.GAE_MODE == 'https' else 80)
headfirst = bool(common.GAE_HEADFIRST)
response = handler.create_http_request(request_method, fetchserver, request_headers, body, timeout, crlf=need_crlf, validate=need_validate, cache_key=cache_key, headfirst=headfirst)
response.app_status = response.status
if response.app_status != 200:
return response
if 'rc4' in request_headers.get('X-URLFETCH-Options', ''):
response.fp = CipherFileObject(response.fp, RC4Cipher(kwargs['password']))
data = response.read(2)
if len(data) < 2:
response.status = 502
response.fp = io.BytesIO(b'connection aborted. too short leadbyte data=' + data)
response.read = response.fp.read
return response
headers_length, = struct.unpack('!h', data)
data = response.read(headers_length)
if len(data) < headers_length:
response.status = 502
response.fp = io.BytesIO(b'connection aborted. too short headers data=' + data)
response.read = response.fp.read
return response
raw_response_line, headers_data = inflate(data).split('\r\n', 1)
_, response.status, response.reason = raw_response_line.split(None, 2)
response.status = int(response.status)
response.reason = response.reason.strip()
response.msg = httplib.HTTPMessage(io.BytesIO(headers_data))
return response
class PHPFetchPlugin(BaseFetchPlugin):
"""php fetch plugin"""
connect_timeout = 4
def __init__(self, fetchservers, password, validate):
BaseFetchPlugin.__init__(self)
self.fetchservers = fetchservers
self.password = password
self.validate = validate
def handle(self, handler, **kwargs):
method = handler.command
url = handler.path
headers = dict((k.title(), v) for k, v in handler.headers.items())
body = handler.body
if body:
if len(body) < 10 * 1024 * 1024 and 'Content-Encoding' not in headers:
zbody = deflate(body)
if len(zbody) < len(body):
body = zbody
headers['Content-Encoding'] = 'deflate'
headers['Content-Length'] = str(len(body))
skip_headers = handler.skip_headers
if self.password:
kwargs['password'] = self.password
if self.validate:
kwargs['validate'] = self.validate
payload = '%s %s %s\r\n' % (method, url, handler.request_version)
payload += ''.join('%s: %s\r\n' % (k, v) for k, v in headers.items() if k not in handler.skip_headers)
payload += ''.join('X-URLFETCH-%s: %s\r\n' % (k, v) for k, v in kwargs.items() if v)
payload = deflate(payload)
body = '%s%s%s' % ((struct.pack('!h', len(payload)), payload, body))
request_headers = {'Content-Length': len(body), 'Content-Type': 'application/octet-stream'}
fetchserver_index = 0 if 'Range' not in headers else random.randint(0, len(self.fetchservers)-1)
fetchserver = '%s?%s' % (self.fetchservers[fetchserver_index], random.random())
crlf = 0
cache_key = '%s//:%s' % urlparse.urlsplit(fetchserver)[:2]
try:
response = handler.create_http_request('POST', fetchserver, request_headers, body, self.connect_timeout, crlf=crlf, cache_key=cache_key)
except Exception as e:
logging.warning('%s "%s" failed %r', method, url, e)
return
response.app_status = response.status
need_decrypt = self.password and response.app_status == 200 and response.getheader('Content-Type', '') == 'image/gif' and response.fp
if need_decrypt:
response.fp = CipherFileObject(response.fp, XORCipher(self.password[0]))
logging.info('%s "PHP %s %s %s" %s %s', handler.address_string(), handler.command, url, handler.protocol_version, response.status, response.getheader('Content-Length', '-'))
handler.close_connection = bool(response.getheader('Transfer-Encoding'))
while True:
data = response.read(8192)
if not data:
break
handler.wfile.write(data)
del data
class VPSFetchPlugin(BaseFetchPlugin):
"""vps fetch plugin"""
connect_timeout = 4
def __init__(self, fetchservers, username, password):
BaseFetchPlugin.__init__(self)
self.fetchservers = fetchservers
self.username = username
self.password = password
self.fake_headers = {}
def handle(self, handler, **kwargs):
if handler.command == 'CONNECT':
return self.handle_connect(handler, **kwargs)
else:
return self.handle_method(handler, **kwargs)
def handle_connect(self, handler, **kwargs):
return
def handle_method(self, handler, **kwargs):
method = handler.command
url = handler.path
headers = dict((k.title(), v) for k, v in handler.headers.items() if k.title() not in handler.skip_headers)
x_headers = {}
if 'Host' in headers:
x_headers['Host'] = headers.pop('Host')
if 'Cookie' in headers:
x_headers['Cookie'] = headers.pop('Cookie')
headers['Host'] = 'www.%s.com' % self.username
self.fake_headers = headers.copy()
fetchserver = random.choice(self.fetchservers)
response = handler.create_http_request(handler.command, fetchserver, headers, handler.body, self.connect_timeout)
if not response:
raise socket.error(errno.ECONNRESET, 'urlfetch %r return None' % url)
#TODO
class HostsFilter(BaseProxyHandlerFilter):
"""hosts filter"""
def __init__(self, iplist_map, host_map, host_postfix_map, hostport_map, hostport_postfix_map, urlre_map):
self.iplist_map = iplist_map
self.host_map = host_map
self.host_postfix_map = host_postfix_map
self.host_postfix_endswith = tuple(host_postfix_map)
self.hostport_map = hostport_map
self.hostport_postfix_map = hostport_postfix_map
self.hostport_postfix_endswith = tuple(hostport_postfix_map)
self.urlre_map = urlre_map
def gethostbyname2(self, handler, hostname):
hostport = '%s:%d' % (hostname, handler.port)
hosts = ''
if hostname in self.host_map:
hosts = self.host_map[hostname]
elif hostname.endswith(self.host_postfix_endswith):
hosts = next(self.host_postfix_map[x] for x in self.host_postfix_map if hostname.endswith(x))
if hostport in self.hostport_map:
hosts = self.hostport_map[hostport]
elif hostport.endswith(self.hostport_postfix_endswith):
hosts = next(self.hostport_postfix_map[x] for x in self.hostport_postfix_map if hostport.endswith(x))
if handler.command != 'CONNECT' and self.urlre_map:
try:
hosts = next(self.urlre_map[x] for x in self.urlre_map if x(handler.path))
except StopIteration:
pass
if hosts not in ('', 'direct'):
return self.iplist_map.get(hosts) or hosts.split('|')
return None
def filter(self, handler):
host, port = handler.host, handler.port
hostport = handler.path if handler.command == 'CONNECT' else '%s:%d' % (host, port)
headfirst = '.google' in host
if host in self.host_map:
return 'direct', {'cache_key': '%s:%d' % (self.host_map[host], port), 'headfirst': headfirst}
elif host.endswith(self.host_postfix_endswith):
self.host_map[host] = next(self.host_postfix_map[x] for x in self.host_postfix_map if host.endswith(x))
return 'direct', {'cache_key': '%s:%d' % (self.host_map[host], port), 'headfirst': headfirst}
elif hostport in self.hostport_map:
return 'direct', {'cache_key': '%s:%d' % (self.hostport_map[hostport], port), 'headfirst': headfirst}
elif hostport.endswith(self.hostport_postfix_endswith):
self.hostport_map[hostport] = next(self.hostport_postfix_map[x] for x in self.hostport_postfix_map if hostport.endswith(x))
return 'direct', {'cache_key': '%s:%d' % (self.hostport_map[hostport], port), 'headfirst': headfirst}
if handler.command != 'CONNECT' and self.urlre_map and any(x(handler.path) for x in self.urlre_map):
return 'direct', {'headfirst': headfirst}
class GAEFetchFilter(BaseProxyHandlerFilter):
"""gae fetch filter"""
#https://github.com/AppScale/gae_sdk/blob/master/google/appengine/api/taskqueue/taskqueue.py#L241
MAX_URL_LENGTH = 2083
def filter(self, handler):
"""https://developers.google.com/appengine/docs/python/urlfetch/"""
if handler.command == 'CONNECT':
do_ssl_handshake = 440 <= handler.port <= 450 or 1024 <= handler.port <= 65535
return 'strip', {'do_ssl_handshake': do_ssl_handshake}
elif handler.command in ('GET', 'POST', 'HEAD', 'PUT', 'DELETE', 'PATCH'):
return 'gae', {}
else:
if 'php' in handler.handler_plugins:
return 'php', {}
else:
logging.warning('"%s %s" not supported by GAE, please enable PHP mode!', handler.command, handler.path)
return 'direct', {}
class WithGAEFilter(BaseProxyHandlerFilter):
"""withgae/withphp/withvps filter"""
def __init__(self, withgae_sites, withphp_sites, withvps_sites):
self.withgae_sites = set(x for x in withgae_sites if not x.startswith('.'))
self.withgae_sites_postfix = tuple(x for x in withgae_sites if x.startswith('.'))
self.withphp_sites = set(x for x in withphp_sites if not x.startswith('.'))
self.withphp_sites_postfix = tuple(x for x in withphp_sites if x.startswith('.'))
self.withvps_sites = set(x for x in withvps_sites if not x.startswith('.'))
self.withvps_sites_postfix = tuple(x for x in withvps_sites if x.startswith('.'))
def filter(self, handler):
plugin = ''
if handler.host in self.withgae_sites or handler.host.endswith(self.withgae_sites_postfix):
plugin = 'gae'
elif handler.host in self.withphp_sites or handler.host.endswith(self.withphp_sites_postfix):
plugin = 'php'
elif handler.host in self.withvps_sites or handler.host.endswith(self.withvps_sites_postfix):
plugin = 'vps'
if plugin:
if handler.command == 'CONNECT':
do_ssl_handshake = 440 <= handler.port <= 450 or 1024 <= handler.port <= 65535
return 'strip', {'do_ssl_handshake': do_ssl_handshake}
else:
return plugin, {}
class GAEProxyHandler(MultipleConnectionMixin, SimpleProxyHandler):
"""GAE Proxy Handler"""
handler_filters = [GAEFetchFilter()]
handler_plugins = {'direct': DirectFetchPlugin(),
'mock': MockFetchPlugin(),
'strip': StripPlugin(),}
hosts_filter = None
def __init__(self, *args, **kwargs):
SimpleProxyHandler.__init__(self, *args, **kwargs)
def first_run(self):
"""GAEProxyHandler setup, init domain/iplist map"""
openssl_set_session_cache_mode(self.openssl_context, 'client')
if not common.PROXY_ENABLE:
logging.info('resolve common.IPLIST_MAP names=%s to iplist', list(common.IPLIST_MAP))
common.resolve_iplist()
random.shuffle(common.GAE_APPIDS)
self.__class__.handler_plugins['gae'] = GAEFetchPlugin(common.GAE_APPIDS, common.GAE_PASSWORD, common.GAE_PATH, common.GAE_MODE, common.GAE_CACHESOCK, common.GAE_KEEPALIVE, common.GAE_OBFUSCATE, common.GAE_PAGESPEED, common.GAE_VALIDATE, common.GAE_OPTIONS, common.GAE_MAXSIZE)
try:
self.__class__.hosts_filter = next(x for x in self.__class__.handler_filters if isinstance(x, HostsFilter))
except StopIteration:
pass
def gethostbyname2(self, hostname):
iplist = self.hosts_filter.gethostbyname2(self, hostname) if self.hosts_filter else None
return iplist or MultipleConnectionMixin.gethostbyname2(self, hostname)
class ProxyGAEProxyHandler(ProxyConnectionMixin, GAEProxyHandler):
def __init__(self, *args, **kwargs):
ProxyConnectionMixin.__init__(self, common.PROXY_HOST, common.PROXY_PORT, common.PROXY_USERNAME, common.PROXY_PASSWROD)
GAEProxyHandler.__init__(self, *args, **kwargs)
def gethostbyname2(self, hostname):
for postfix in ('.appspot.com', '.googleusercontent.com'):
if hostname.endswith(postfix):
host = common.HOST_MAP.get(hostname) or common.HOST_POSTFIX_MAP.get(postfix) or 'www.google.com'
return common.IPLIST_MAP.get(host) or host.split('|')
return ProxyConnectionMixin.gethostbyname2(self, hostname)
class PHPFetchFilter(BaseProxyHandlerFilter):
"""php fetch filter"""
def filter(self, handler):
if handler.command == 'CONNECT':
return 'strip', {}
else:
return 'php', {}
class VPSFetchFilter(BaseProxyHandlerFilter):
"""vps fetch filter"""
def filter(self, handler):
return 'vps', {}
class PHPProxyHandler(MultipleConnectionMixin, SimpleProxyHandler):
"""PHP Proxy Handler"""
handler_filters = [PHPFetchFilter()]
handler_plugins = {'direct': DirectFetchPlugin(),
'mock': MockFetchPlugin(),
'strip': StripPlugin(),}
def __init__(self, *args, **kwargs):
SimpleProxyHandler.__init__(self, *args, **kwargs)
class ProxyPHPProxyHandler(ProxyConnectionMixin, PHPProxyHandler):
def __init__(self, *args, **kwargs):
ProxyConnectionMixin.__init__(self, common.PROXY_HOST, common.PROXY_PORT, common.PROXY_USERNAME, common.PROXY_PASSWROD)
PHPProxyHandler.__init__(self, *args, **kwargs)
def gethostbyname2(self, hostname):
return [hostname]
class Common(object):
"""Global Config Object"""
ENV_CONFIG_PREFIX = 'GOAGENT_'
def __init__(self):
"""load config from proxy.ini"""
ConfigParser.RawConfigParser.OPTCRE = re.compile(r'(?P<option>\S+)\s+(?P<vi>[=])\s+(?P<value>.*)$')
self.CONFIG = ConfigParser.ConfigParser()
self.CONFIG_FILENAME = os.path.splitext(os.path.abspath(__file__))[0]+'.ini'
self.DATA_PATH = os.path.join(root_path, os.pardir, os.pardir, "data", "php_proxy")
if not os.path.isdir(self.DATA_PATH):
os.mkdir(self.DATA_PATH)
self.CONFIG_USER_FILENAME = os.path.join(self.DATA_PATH, "config.ini")
if os.path.isfile(self.CONFIG_USER_FILENAME):
self.CONFIG.read([self.CONFIG_FILENAME, self.CONFIG_USER_FILENAME])
else:
self.CONFIG.read(self.CONFIG_FILENAME)
for key, value in os.environ.items():
m = re.match(r'^%s([A-Z]+)_([A-Z\_\-]+)$' % self.ENV_CONFIG_PREFIX, key)
if m:
self.CONFIG.set(m.group(1).lower(), m.group(2).lower(), value)
self.LISTEN_IP = self.CONFIG.get('listen', 'ip')
self.LISTEN_PORT = self.CONFIG.getint('listen', 'port')
self.LISTEN_USERNAME = self.CONFIG.get('listen', 'username') if self.CONFIG.has_option('listen', 'username') else ''
self.LISTEN_PASSWORD = self.CONFIG.get('listen', 'password') if self.CONFIG.has_option('listen', 'password') else ''
self.LISTEN_VISIBLE = self.CONFIG.getint('listen', 'visible')
self.LISTEN_DEBUGINFO = self.CONFIG.getint('listen', 'debuginfo')
self.GAE_ENABLE = self.CONFIG.getint('gae', 'enable')
self.GAE_APPIDS = re.findall(r'[\w\-\.]+', self.CONFIG.get('gae', 'appid').replace('.appspot.com', ''))
self.GAE_PASSWORD = self.CONFIG.get('gae', 'password').strip()
self.GAE_PATH = self.CONFIG.get('gae', 'path')
self.GAE_MODE = self.CONFIG.get('gae', 'mode')
self.GAE_IPV6 = self.CONFIG.getint('gae', 'ipv6')
self.GAE_WINDOW = self.CONFIG.getint('gae', 'window')
self.GAE_KEEPALIVE = self.CONFIG.getint('gae', 'keepalive')
self.GAE_CACHESOCK = self.CONFIG.getint('gae', 'cachesock')
self.GAE_HEADFIRST = self.CONFIG.getint('gae', 'headfirst')
self.GAE_OBFUSCATE = self.CONFIG.getint('gae', 'obfuscate')
self.GAE_VALIDATE = self.CONFIG.getint('gae', 'validate')
self.GAE_TRANSPORT = self.CONFIG.getint('gae', 'transport') if self.CONFIG.has_option('gae', 'transport') else 0
self.GAE_OPTIONS = self.CONFIG.get('gae', 'options')
self.GAE_REGIONS = set(x.upper() for x in self.CONFIG.get('gae', 'regions').split('|') if x.strip())
self.GAE_SSLVERSION = self.CONFIG.get('gae', 'sslversion')
self.GAE_PAGESPEED = self.CONFIG.getint('gae', 'pagespeed') if self.CONFIG.has_option('gae', 'pagespeed') else 0
self.GAE_MAXSIZE = self.CONFIG.getint('gae', 'maxsize')
if self.GAE_IPV6:
sock = None
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
sock.connect(('2001:4860:4860::8888', 53))
logging.info('use ipv6 interface %s for gae', sock.getsockname()[0])
except Exception as e:
logging.info('Fail try use ipv6 %r, fallback ipv4', e)
self.GAE_IPV6 = 0
finally:
if sock:
sock.close()
if 'USERDNSDOMAIN' in os.environ and re.match(r'^\w+\.\w+$', os.environ['USERDNSDOMAIN']):
self.CONFIG.set('profile', '.' + os.environ['USERDNSDOMAIN'], '')
urlrewrite_map = collections.OrderedDict()
host_map = collections.OrderedDict()
host_postfix_map = collections.OrderedDict()
hostport_map = collections.OrderedDict()
hostport_postfix_map = collections.OrderedDict()
urlre_map = collections.OrderedDict()
withgae_sites = []
withphp_sites = []
withvps_sites = []
crlf_sites = []
nocrlf_sites = []
forcehttps_sites = []
noforcehttps_sites = []
fakehttps_sites = []
nofakehttps_sites = []
dns_servers = []
for site, rule in self.CONFIG.items('profile'):
rules = [x.strip() for x in re.split(r'[,\|]', rule) if x.strip()]
if site == 'dns':
dns_servers = rules
continue
if rule.startswith(('file://', 'http://', 'https://')) or '$1' in rule:
urlrewrite_map[site] = rule
continue
for name, sites in [('withgae', withgae_sites),
('withphp', withphp_sites),
('withvps', withvps_sites),
('crlf', crlf_sites),
('nocrlf', nocrlf_sites),
('forcehttps', forcehttps_sites),
('noforcehttps', noforcehttps_sites),
('fakehttps', fakehttps_sites),
('nofakehttps', nofakehttps_sites)]:
if name in rules:
sites.append(site)
rules.remove(name)
hostname = rules and rules[0]
if not hostname:
continue
if ':' in site and '\\' not in site:
if site.startswith('.'):
hostport_postfix_map[site] = hostname
else:
hostport_map[site] = hostname
elif '\\' in site:
urlre_map[re.compile(site).match] = hostname
else:
if site.startswith('.'):
host_postfix_map[site] = hostname
else:
host_map[site] = hostname
self.HTTP_DNS = dns_servers
self.WITHGAE_SITES = tuple(withgae_sites)
self.WITHPHP_SITES = tuple(withphp_sites)
self.WITHVPS_SITES = tuple(withvps_sites)
self.CRLF_SITES = tuple(crlf_sites)
self.NOCRLF_SITES = set(nocrlf_sites)
self.FORCEHTTPS_SITES = tuple(forcehttps_sites)
self.NOFORCEHTTPS_SITES = set(noforcehttps_sites)
self.FAKEHTTPS_SITES = tuple(fakehttps_sites)
self.NOFAKEHTTPS_SITES = set(nofakehttps_sites)
self.URLREWRITE_MAP = urlrewrite_map
self.HOSTPORT_MAP = hostport_map
self.HOSTPORT_POSTFIX_MAP = hostport_postfix_map
self.URLRE_MAP = urlre_map
self.HOST_MAP = host_map
self.HOST_POSTFIX_MAP = host_postfix_map
self.IPLIST_MAP = collections.OrderedDict((k, v.split('|') if v else []) for k, v in self.CONFIG.items('iplist'))
self.IPLIST_MAP.update((k, [k]) for k, v in self.HOST_MAP.items() if k == v)
self.IPLIST_PREDEFINED = [x for x in sum(self.IPLIST_MAP.values(), []) if re.match(r'^\d+\.\d+\.\d+\.\d+$', x) or ':' in x]
if self.GAE_IPV6 and 'google_ipv6' in self.IPLIST_MAP:
for name in self.IPLIST_MAP.keys():
if name.startswith('google') and name not in ('google_ipv6', 'google_talk'):
self.IPLIST_MAP[name] = self.IPLIST_MAP['google_ipv6']
self.PAC_ENABLE = self.CONFIG.getint('pac', 'enable')
self.PAC_IP = self.CONFIG.get('pac', 'ip')
self.PAC_PORT = self.CONFIG.getint('pac', 'port')
self.PAC_FILE = self.CONFIG.get('pac', 'file').lstrip('/')
self.PAC_GFWLIST = self.CONFIG.get('pac', 'gfwlist')
self.PAC_ADBLOCK = self.CONFIG.get('pac', 'adblock')
self.PAC_ADMODE = self.CONFIG.getint('pac', 'admode')
self.PAC_EXPIRED = self.CONFIG.getint('pac', 'expired')
self.PHP_ENABLE = self.CONFIG.getint('php', 'enable')
self.PHP_LISTEN = self.CONFIG.get('php', 'listen')
self.PHP_PASSWORD = self.CONFIG.get('php', 'password') if self.CONFIG.has_option('php', 'password') else ''
self.PHP_CRLF = self.CONFIG.getint('php', 'crlf') if self.CONFIG.has_option('php', 'crlf') else 1
self.PHP_VALIDATE = self.CONFIG.getint('php', 'validate') if self.CONFIG.has_option('php', 'validate') else 0
self.PHP_FETCHSERVERS = self.CONFIG.get('php', 'fetchserver').split('|')
self.PROXY_ENABLE = self.CONFIG.getint('proxy', 'enable')
self.PROXY_AUTODETECT = self.CONFIG.getint('proxy', 'autodetect') if self.CONFIG.has_option('proxy', 'autodetect') else 0
self.PROXY_HOST = self.CONFIG.get('proxy', 'host')
self.PROXY_PORT = self.CONFIG.get('proxy', 'port')
if self.PROXY_PORT == "":
self.PROXY_PORT = 0
else:
self.PROXY_PORT = int(self.PROXY_PORT)
self.PROXY_USERNAME = self.CONFIG.get('proxy', 'username')
self.PROXY_PASSWROD = self.CONFIG.get('proxy', 'password')
if not self.PROXY_ENABLE and self.PROXY_AUTODETECT:
system_proxy = ProxyUtil.get_system_proxy()
if system_proxy and self.LISTEN_IP not in system_proxy:
_, username, password, address = ProxyUtil.parse_proxy(system_proxy)
proxyhost, _, proxyport = address.rpartition(':')
self.PROXY_ENABLE = 1
self.PROXY_USERNAME = username
self.PROXY_PASSWROD = password
self.PROXY_HOST = proxyhost
self.PROXY_PORT = int(proxyport)
if self.PROXY_ENABLE:
self.GAE_MODE = 'https'
self.CONTROL_ENABLE = self.CONFIG.getint('control', 'enable')
self.CONTROL_IP = self.CONFIG.get('control', 'ip')
self.CONTROL_PORT = self.CONFIG.getint('control', 'port')
self.AUTORANGE_HOSTS = self.CONFIG.get('autorange', 'hosts').split('|')
self.AUTORANGE_ENDSWITH = tuple(self.CONFIG.get('autorange', 'endswith').split('|'))
self.AUTORANGE_NOENDSWITH = tuple(self.CONFIG.get('autorange', 'noendswith').split('|'))
self.AUTORANGE_MAXSIZE = self.CONFIG.getint('autorange', 'maxsize')
self.AUTORANGE_WAITSIZE = self.CONFIG.getint('autorange', 'waitsize')
self.AUTORANGE_BUFSIZE = self.CONFIG.getint('autorange', 'bufsize')
self.AUTORANGE_THREADS = self.CONFIG.getint('autorange', 'threads')
self.FETCHMAX_LOCAL = self.CONFIG.getint('fetchmax', 'local') if self.CONFIG.get('fetchmax', 'local') else 3
self.FETCHMAX_SERVER = self.CONFIG.get('fetchmax', 'server')
self.DNS_ENABLE = self.CONFIG.getint('dns', 'enable')
self.DNS_LISTEN = self.CONFIG.get('dns', 'listen')
self.DNS_SERVERS = self.HTTP_DNS or self.CONFIG.get('dns', 'servers').split('|')
self.DNS_BLACKLIST = set(self.CONFIG.get('dns', 'blacklist').split('|'))
self.DNS_TCPOVER = tuple(self.CONFIG.get('dns', 'tcpover').split('|')) if self.CONFIG.get('dns', 'tcpover').strip() else tuple()
if self.GAE_IPV6:
self.DNS_SERVERS = [x for x in self.DNS_SERVERS if ':' in x]
else:
self.DNS_SERVERS = [x for x in self.DNS_SERVERS if ':' not in x]
self.USERAGENT_ENABLE = self.CONFIG.getint('useragent', 'enable')
self.USERAGENT_STRING = self.CONFIG.get('useragent', 'string')
self.LOVE_ENABLE = self.CONFIG.getint('love', 'enable')
self.LOVE_TIP = self.CONFIG.get('love', 'tip').encode('utf8').decode('unicode-escape').split('|')
self.keep_run = True
def extend_iplist(self, iplist_name, hosts):
logging.info('extend_iplist start for hosts=%s', hosts)
new_iplist = []
def do_remote_resolve(host, dnsserver, queue):
assert isinstance(dnsserver, basestring)
for dnslib_resolve in (dnslib_resolve_over_udp, dnslib_resolve_over_tcp):
try:
time.sleep(random.random())
iplist = dnslib_record2iplist(dnslib_resolve(host, [dnsserver], timeout=4, blacklist=self.DNS_BLACKLIST))
queue.put((host, dnsserver, iplist))
except (socket.error, OSError) as e:
logging.info('%s remote host=%r failed: %s', str(dnslib_resolve).split()[1], host, e)
time.sleep(1)
result_queue = Queue.Queue()
pool = __import__('gevent.pool', fromlist=['.']).Pool(8) if sys.modules.get('gevent') else None
for host in hosts:
for dnsserver in self.DNS_SERVERS:
logging.debug('remote resolve host=%r from dnsserver=%r', host, dnsserver)
if pool:
pool.spawn(do_remote_resolve, host, dnsserver, result_queue)
else:
thread.start_new_thread(do_remote_resolve, (host, dnsserver, result_queue))
for _ in xrange(len(self.DNS_SERVERS) * len(hosts) * 2):
try:
host, dnsserver, iplist = result_queue.get(timeout=16)
logging.debug('%r remote host=%r return %s', dnsserver, host, iplist)
if '.google' in host:
if self.GAE_IPV6:
iplist = [x for x in iplist if ':' in x]
else:
iplist = [x for x in iplist if is_google_ip(x)]
new_iplist += iplist
except Queue.Empty:
break
logging.info('extend_iplist finished, added %s', len(set(self.IPLIST_MAP[iplist_name])-set(new_iplist)))
self.IPLIST_MAP[iplist_name] = list(set(self.IPLIST_MAP[iplist_name] + new_iplist))
def resolve_iplist(self):
# https://support.google.com/websearch/answer/186669?hl=zh-Hans
def do_local_resolve(host, queue):
assert isinstance(host, basestring)
for _ in xrange(3):
try:
family = socket.AF_INET6 if self.GAE_IPV6 else socket.AF_INET
iplist = [x[-1][0] for x in socket.getaddrinfo(host, 80, family)]
queue.put((host, iplist))
except (socket.error, OSError) as e:
logging.warning('socket.getaddrinfo host=%r failed: %s', host, e)
time.sleep(0.1)
google_blacklist = ['216.239.32.20'] + list(self.DNS_BLACKLIST)
google_blacklist_prefix = tuple(x for x in self.DNS_BLACKLIST if x.endswith('.'))
for name, need_resolve_hosts in list(self.IPLIST_MAP.items()):
if all(re.match(r'\d+\.\d+\.\d+\.\d+', x) or ':' in x for x in need_resolve_hosts):
continue
need_resolve_remote = [x for x in need_resolve_hosts if ':' not in x and not re.match(r'\d+\.\d+\.\d+\.\d+', x)]
resolved_iplist = [x for x in need_resolve_hosts if x not in need_resolve_remote]
result_queue = Queue.Queue()
for host in need_resolve_remote:
logging.debug('local resolve host=%r', host)
thread.start_new_thread(do_local_resolve, (host, result_queue))
for _ in xrange(len(need_resolve_remote)):
try:
host, iplist = result_queue.get(timeout=8)
resolved_iplist += iplist
except Queue.Empty:
break
if name == 'google_hk' and need_resolve_remote:
for delay in (30, 60, 150, 240, 300, 450, 600, 900):
spawn_later(delay, self.extend_iplist, name, need_resolve_remote)
if name.startswith('google_') and name not in ('google_cn', 'google_hk') and resolved_iplist:
iplist_prefix = re.split(r'[\.:]', resolved_iplist[0])[0]
resolved_iplist = list(set(x for x in resolved_iplist if x.startswith(iplist_prefix)))
else:
resolved_iplist = list(set(resolved_iplist))
if name.startswith('google_'):
resolved_iplist = list(set(resolved_iplist) - set(google_blacklist))
resolved_iplist = [x for x in resolved_iplist if not x.startswith(google_blacklist_prefix)]
if len(resolved_iplist) == 0 and name in ('google_hk', 'google_cn') and not self.GAE_IPV6:
logging.error('resolve %s host return empty! please retry!', name)
sys.exit(-1)
logging.info('resolve name=%s host to iplist=%r', name, resolved_iplist)
common.IPLIST_MAP[name] = resolved_iplist
if self.IPLIST_MAP.get('google_cn', []):
try:
for _ in xrange(4):
socket.create_connection((random.choice(self.IPLIST_MAP['google_cn']), 80), timeout=2).close()
except socket.error:
self.IPLIST_MAP['google_cn'] = []
if len(self.IPLIST_MAP.get('google_cn', [])) < 4 and self.IPLIST_MAP.get('google_hk', []):
logging.warning('google_cn resolved too short iplist=%s, switch to google_hk', self.IPLIST_MAP.get('google_cn', []))
self.IPLIST_MAP['google_cn'] = self.IPLIST_MAP['google_hk']
def info(self):
info = ''
info += '------------------------------------------------------\n'
info += 'PHP Proxy Version : %s (python/%s gevent/%s pyopenssl/%s)\n' % (__version__, platform.python_version(), gevent.__version__, OpenSSL.__version__)
info += 'Uvent Version : %s (pyuv/%s libuv/%s)\n' % (__import__('uvent').__version__, __import__('pyuv').__version__, __import__('pyuv').LIBUV_VERSION) if all(x in sys.modules for x in ('pyuv', 'uvent')) else ''
info += 'Local Proxy : %s:%s\n' % (self.PROXY_HOST, self.PROXY_PORT) if self.PROXY_ENABLE else ''
info += 'Debug INFO : %s\n' % self.LISTEN_DEBUGINFO if self.LISTEN_DEBUGINFO else ''
if common.GAE_ENABLE:
info += 'Listen Address : %s:%d\n' % (self.LISTEN_IP, self.LISTEN_PORT)
info += 'GAE Mode : %s\n' % self.GAE_MODE
info += 'GAE IPv6 : %s\n' % self.GAE_IPV6 if self.GAE_IPV6 else ''
info += 'GAE APPID : %s\n' % '|'.join(self.GAE_APPIDS)
info += 'GAE Validate : %s\n' % self.GAE_VALIDATE if self.GAE_VALIDATE else ''
info += 'GAE Obfuscate : %s\n' % self.GAE_OBFUSCATE if self.GAE_OBFUSCATE else ''
if common.PAC_ENABLE:
info += 'Pac Server : http://%s:%d/%s\n' % (self.PAC_IP if self.PAC_IP and self.PAC_IP != '0.0.0.0' else ProxyUtil.get_listen_ip(), self.PAC_PORT, self.PAC_FILE)
info += 'Pac File : file://%s\n' % os.path.abspath(self.PAC_FILE)
if common.PHP_ENABLE:
info += 'PHP Listen : %s\n' % common.PHP_LISTEN
info += 'PHP FetchServers : %s\n' % common.PHP_FETCHSERVERS
if common.DNS_ENABLE:
info += 'DNS Listen : %s\n' % common.DNS_LISTEN
info += 'DNS Servers : %s\n' % '|'.join(common.DNS_SERVERS)
info += '------------------------------------------------------\n'
return info
common = Common()
def pre_start():
if gevent.__version__ < '1.0':
logging.warning("*NOTE*, please upgrade to gevent 1.1 as possible")
if GAEProxyHandler.max_window != common.GAE_WINDOW:
GAEProxyHandler.max_window = common.GAE_WINDOW
if common.GAE_CACHESOCK:
GAEProxyHandler.tcp_connection_cachesock = True
GAEProxyHandler.ssl_connection_cachesock = True
if common.GAE_KEEPALIVE:
GAEProxyHandler.tcp_connection_cachesock = True
GAEProxyHandler.tcp_connection_keepalive = True
GAEProxyHandler.ssl_connection_cachesock = True
GAEProxyHandler.ssl_connection_keepalive = True
if common.IPLIST_PREDEFINED:
GAEProxyHandler.iplist_predefined = set(common.IPLIST_PREDEFINED)
if common.GAE_PAGESPEED and not common.GAE_OBFUSCATE:
logging.critical("*NOTE*, [gae]pagespeed=1 requires [gae]obfuscate=1")
sys.exit(-1)
if common.GAE_SSLVERSION and not sysconfig.get_platform().startswith('macosx-'):
GAEProxyHandler.ssl_version = getattr(ssl, 'PROTOCOL_%s' % common.GAE_SSLVERSION)
GAEProxyHandler.openssl_context = SSLConnection.context_builder(common.GAE_SSLVERSION)
if common.GAE_ENABLE and common.GAE_APPIDS[0] == 'goagent':
logging.warning('please edit %s to add your appid to [gae] !', common.CONFIG_FILENAME)
if common.GAE_ENABLE and common.GAE_MODE == 'http' and common.GAE_PASSWORD == '':
logging.critical('to enable http mode, you should set %r [gae]password = <your_pass> and [gae]options = rc4', common.CONFIG_FILENAME)
sys.exit(-1)
if common.GAE_TRANSPORT:
GAEProxyHandler.disable_transport_ssl = False
if common.PAC_ENABLE:
pac_ip = ProxyUtil.get_listen_ip() if common.PAC_IP in ('', '::', '0.0.0.0') else common.PAC_IP
url = 'http://%s:%d/%s' % (pac_ip, common.PAC_PORT, common.PAC_FILE)
spawn_later(600, urllib2.build_opener(urllib2.ProxyHandler({})).open, url)
if not common.DNS_ENABLE:
if not common.HTTP_DNS:
common.HTTP_DNS = common.DNS_SERVERS[:]
for dnsservers_ref in (common.HTTP_DNS, common.DNS_SERVERS):
any(dnsservers_ref.insert(0, x) for x in [y for y in get_dnsserver_list() if y not in dnsservers_ref])
GAEProxyHandler.dns_servers = common.HTTP_DNS
GAEProxyHandler.dns_blacklist = common.DNS_BLACKLIST
else:
GAEProxyHandler.dns_servers = common.HTTP_DNS or common.DNS_SERVERS
GAEProxyHandler.dns_blacklist = common.DNS_BLACKLIST
RangeFetch.threads = common.AUTORANGE_THREADS
RangeFetch.maxsize = common.AUTORANGE_MAXSIZE
RangeFetch.bufsize = common.AUTORANGE_BUFSIZE
RangeFetch.waitsize = common.AUTORANGE_WAITSIZE
if True:
GAEProxyHandler.handler_filters.insert(0, AutoRangeFilter(common.AUTORANGE_HOSTS, common.AUTORANGE_ENDSWITH, common.AUTORANGE_NOENDSWITH, common.AUTORANGE_MAXSIZE))
if common.GAE_REGIONS:
GAEProxyHandler.handler_filters.insert(0, DirectRegionFilter(common.GAE_REGIONS))
if common.HOST_MAP or common.HOST_POSTFIX_MAP or common.HOSTPORT_MAP or common.HOSTPORT_POSTFIX_MAP or common.URLRE_MAP:
GAEProxyHandler.handler_filters.insert(0, HostsFilter(common.IPLIST_MAP, common.HOST_MAP, common.HOST_POSTFIX_MAP, common.HOSTPORT_MAP, common.HOSTPORT_POSTFIX_MAP, common.URLRE_MAP))
if common.CRLF_SITES:
GAEProxyHandler.handler_filters.insert(0, CRLFSitesFilter(common.CRLF_SITES, common.NOCRLF_SITES))
if common.URLREWRITE_MAP:
GAEProxyHandler.handler_filters.insert(0, URLRewriteFilter(common.URLREWRITE_MAP, common.FORCEHTTPS_SITES, common.NOFORCEHTTPS_SITES))
if common.FAKEHTTPS_SITES:
GAEProxyHandler.handler_filters.insert(0, FakeHttpsFilter(common.FAKEHTTPS_SITES, common.NOFAKEHTTPS_SITES))
if common.FORCEHTTPS_SITES:
GAEProxyHandler.handler_filters.insert(0, ForceHttpsFilter(common.FORCEHTTPS_SITES, common.NOFORCEHTTPS_SITES))
if common.WITHGAE_SITES or common.WITHPHP_SITES or common.WITHVPS_SITES:
GAEProxyHandler.handler_filters.insert(0, WithGAEFilter(common.WITHGAE_SITES, common.WITHPHP_SITES, common.WITHVPS_SITES))
if common.USERAGENT_ENABLE:
GAEProxyHandler.handler_filters.insert(0, UserAgentFilter(common.USERAGENT_STRING))
if common.LISTEN_USERNAME:
GAEProxyHandler.handler_filters.insert(0, AuthFilter(common.LISTEN_USERNAME, common.LISTEN_PASSWORD))
def main():
global __file__
__file__ = os.path.abspath(__file__)
if os.path.islink(__file__):
__file__ = getattr(os, 'readlink', lambda x: x)(__file__)
os.chdir(os.path.dirname(os.path.abspath(__file__)))
pre_start()
logging.info(common.info())
if common.CONTROL_ENABLE:
control_server = LocalProxyServer((common.CONTROL_IP, common.CONTROL_PORT), web_control.RemoteContralServerHandler)
p = threading.Thread(target=control_server.serve_forever)
p.setDaemon(True)
p.start()
if common.PHP_ENABLE:
host, port = common.PHP_LISTEN.split(':')
HandlerClass = PHPProxyHandler if not common.PROXY_ENABLE else ProxyPHPProxyHandler
HandlerClass.handler_plugins['php'] = PHPFetchPlugin(common.PHP_FETCHSERVERS, common.PHP_PASSWORD, common.PHP_VALIDATE)
php_server = LocalProxyServer((host, int(port)), HandlerClass)
thread.start_new_thread(php_server.serve_forever, tuple())
CertUtil.init_ca()
while common.keep_run:
gevent.sleep(1)
sys.exit(0)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.exit()
|
watcher.py | import datetime
import os
import threading
import time
class Watcher(object):
def __init__(self, files=None, cmds=None, verbose=False, clear=True):
self.files = []
self.cmds = []
self.num_runs = 0
self.mtimes = {}
self._monitor_continously = False
self._monitor_thread = None
self.verbose = verbose
self.clear = clear
if files: self.add_files(*files)
if cmds: self.add_cmds(*cmds)
def monitor(self):
#We only want one thread, dear god
self.stop_monitor()
self._monitor_continously = True
self._monitor_thread = threading.Thread(target=self._monitor_till_stopped)
self._monitor_thread.start()
def run_monitor(self):
"""Called by main thread methods like __main__ so Ctrl-C works"""
self.monitor()
try:
while self._monitor_continously:
time.sleep(.02)
except KeyboardInterrupt:
self.stop_monitor()
def stop_monitor(self):
if self._monitor_thread and self._monitor_thread.isAlive():
self._monitor_continously = False
self._monitor_thread.join(0.05)
def _monitor_till_stopped(self):
while self._monitor_continously:
self.monitor_once()
time.sleep(1)
def monitor_once(self, execute=True):
for f in self.files:
try:
mtime = os.stat(f).st_mtime
except OSError:
#The file might be right in the middle of being written so sleep
time.sleep(1)
mtime = os.stat(f).st_mtime
if f not in self.mtimes.keys():
self.mtimes[f] = mtime
continue
if mtime > self.mtimes[f]:
if self.verbose: print "File changed: %s" % os.path.realpath(f)
self.mtimes[f] = mtime
if execute:
self.execute()
break
def execute(self):
if self.verbose: print "Running commands at %s" % (datetime.datetime.now(), )
if self.clear:
os.system('clear')
[ os.system(cmd) for cmd in self.cmds ]
self.num_runs += 1
return self.num_runs
def walk_dirs(self, dirnames):
dir_files = []
for dirname in dirnames:
for path, dirs, files in os.walk(dirname):
files = [ os.path.join(path, f) for f in files ]
dir_files.extend(files)
dir_files.extend(self.walk_dirs(dirs))
return dir_files
def add_files(self, *files):
dirs = [ os.path.realpath(f) for f in files if os.path.isdir(f) ]
files = [ os.path.realpath(f) for f in files if os.path.isfile(f) ]
dir_files = self.walk_dirs(dirs)
files.extend(dir_files)
valid_files = [ os.path.realpath(f) for f in files if os.path.exists(f) and os.path.isfile(f) ]
unique_files = [ f for f in valid_files if f not in self.files ]
self.files = self.files + unique_files
self.monitor_once(execute=False)
def add_cmds(self, *cmds):
unique_cmds = [ c for c in cmds if c not in self.cmds ]
self.cmds = self.cmds + unique_cmds
|
util.py | import functools
import operator
from threading import Thread
import hashlib
import os
import pprint
import time
pp = pprint.PrettyPrinter(indent=2)
def foldl(f, init, l):
for x in l:
init = f(init, x)
return init
def foldl1(f, l):
return foldl(f, l[0], l[1:])
def foldr(f, init, l):
for x in reversed(l):
init = f(x, init)
return init
def foldr1(f, l):
return foldr(f, l[-1], l[:-1])
def scanl(f, init, l):
r = [init]
for x in l:
r.append(f(r[-1], x))
return r
def scanl1(f, l):
return scanl(f, l[0], l[1:])
def scanr(f, init, l):
r = [init]
for x in reversed(l):
r.append(f(x, r[-1]))
r.reverse()
return r
def scanr1(f, l):
return scanr(f, l[-1], l[:-1])
def zipWith(f, *sequences):
return [f(*args) for args in zip(*sequences)]
def compose(*fs):
"compose(f1, f2, ..., fn)(x) = f1(f2( ... fn(x)))"
def composed(x):
for f in reversed(fs):
x = f(x)
return x
return composed
def deepMap(f, obj):
if isinstance(obj, dict):
return {k : deepMap(f, v) for k, v in obj.items()}
if isinstance(obj, (list, tuple)):
return type(obj)(deepMap(f, x) for x in obj)
return f(obj)
def deepValues(obj):
if isinstance(obj, dict):
for v in obj.values():
for x in deepValues(v):
yield x
elif isinstance(obj, list):
for v in obj:
for x in deepValues(v):
yield x
else: # note that tuples are values, not lists
yield obj
def deepZip(*objs):
if len(objs) == 0:
return []
first = objs[0]
if isinstance(first, dict):
return {k : deepZip(*[obj[k] for obj in objs]) for k in first}
if isinstance(first, (list, tuple)):
return zipWith(deepZip, *objs)
return objs
def deepZipWith(f, *objs):
if len(objs) == 0:
return []
first = objs[0]
if isinstance(first, dict):
return {k : deepZipWith(f, *[obj[k] for obj in objs]) for k in first}
if isinstance(first, (list, tuple)):
return type(first)(deepZipWith(f, *vals) for vals in zip(*objs))
return f(*objs)
def deepItems(obj, path=[]):
if isinstance(obj, dict):
for k, v in obj.items():
yield from deepItems(v, path=path+[k])
elif isinstance(obj, list):
for i, v in enumerate(obj):
yield from deepItems(v, path=path+[i])
else:
yield (path, obj)
def deepIter(iters):
if isinstance(iters, dict):
deep_iters = [(k, deepIter(v)) for k, v in iters.items()]
while True:
try:
yield {k: v.next() for k, v in deep_iters}
except StopIteration:
return
elif isinstance(iters, (list, tuple)):
yield from zip(*map(deepIter, iters))
else:
yield from iters
def product(xs):
return functools.reduce(operator.mul, xs, 1.0)
def async_map(f, xs):
n = len(xs)
ys = n * [None]
def run(i):
ys[i] = f(xs[i])
threads = n * [None]
for i in range(n):
threads[i] = Thread(target=run, args=[i])
threads[i].start()
def wait():
for p in threads:
p.join()
return ys
return wait
def chunk(l, n):
return [l[i:i+n] for i in range(0, len(l), n)]
class MovingAverage:
def __init__(self, rate=1e-2, initial=0):
self.rate = rate
self.avg = initial
def append(self, val):
self.avg += self.rate * (val - self.avg)
class Timer:
def reset(self):
self.time = time.time()
def split(self):
now = time.time()
delta = now - self.time
self.time = now
return delta
class CircularQueue:
def __init__(self, size=None, init=None, array=None):
if array:
self.size = len(array)
self.array = array
else:
self.size = size
self.array = [init] * size
self.index = 0
def push(self, obj):
self.array[self.index] = obj
self.increment()
return self.array[self.index]
def peek(self):
return self.array[self.index]
def increment(self):
self.index += 1
self.index %= self.size
def __getitem__(self, index):
return self.array[(self.size + self.index + index) % self.size]
def __len__(self):
return self.size
def as_list(self):
return self.array[self.index:] + self.array[:self.index]
def hashString(s):
s = s.encode()
return hashlib.md5(s).hexdigest()
def port(s):
s = os.path.abspath(s)
print("PORT", s)
return 5536 + int(hashString(s), 16) % 60000
def makedirs(path):
if not os.path.exists(path):
os.makedirs(path)
def update(dikt, **kwargs):
for k, v in kwargs.items():
if v is not None:
dikt[k] = v
elif k not in dikt:
dikt[k] = None
def load_params(path, key=None):
import json
with open(path + '/params') as f:
params = json.load(f)
# support old-style separation of params into train and agent
if key and key in params:
params.update(params[key])
params.update(path=path)
return params
|
test_ib_wrapper.py | """Unit tests for module `ibpy_native.wrapper`."""
# pylint: disable=protected-access
import os
import enum
import threading
import unittest
from ibapi import wrapper as ib_wrapper
from ibpy_native.interfaces import listeners
from ibpy_native.internal import client as ibpy_client
from ibpy_native.internal import wrapper as ibpy_wrapper
from ibpy_native.utils import finishable_queue as fq
from tests.toolkit import sample_contracts
from tests.toolkit import utils
class Const(enum.IntEnum):
"""Predefined constants for `TestIBWrapper`."""
RID_RESOLVE_CONTRACT = 43
RID_FETCH_HISTORICAL_TICKS = 18001
RID_REQ_TICK_BY_TICK_DATA_ALL_LAST = 19001
RID_REQ_TICK_BY_TICK_DATA_LAST = 19002
RID_REQ_TICK_BY_TICK_DATA_MIDPOINT = 19003
RID_REQ_TICK_BY_TICK_DATA_BIDASK = 19004
QUEUE_MAX_WAIT_SEC = 10
class TestIBWrapper(unittest.TestCase):
"""Unit tests for class `_IBWrapper`."""
@classmethod
def setUpClass(cls):
cls._wrapper = ibpy_wrapper._IBWrapper()
cls._client = ibpy_client._IBClient(cls._wrapper)
cls._client.connect(
os.getenv('IB_HOST', '127.0.0.1'),
int(os.getenv('IB_PORT', '4002')),
1001
)
thread = threading.Thread(target=cls._client.run)
thread.start()
setattr(cls._client, "_thread", thread)
@utils.async_test
async def test_next_req_id(self):
"""Test retrieval of next usable request ID."""
# Prepare the `_FinishableQueue` objects in internal `__req_queue`
self._wrapper._req_queue.clear()
_ = self._wrapper.get_request_queue(req_id=0)
f_queue = self._wrapper.get_request_queue(req_id=1)
_ = self._wrapper.get_request_queue(req_id=10)
self.assertEqual(self._wrapper.next_req_id, 11)
f_queue.put(element=fq._Status.FINISHED)
await f_queue.get()
self.assertEqual(self._wrapper.next_req_id, 1)
def test_notification_listener(self):
"""Test notification listener approach."""
class MockListener(listeners.NotificationListener):
"""Mock notification listener."""
triggered = False
def on_notify(self, msg_code: int, msg: str):
"""Mock callback implementation
"""
print(f"{msg_code} - {msg}")
self.triggered = True
mock_listener = MockListener()
self._wrapper.set_on_notify_listener(listener=mock_listener)
self._wrapper.error(reqId=-1, errorCode=1100, errorString="MOCK MSG")
self.assertTrue(mock_listener.triggered)
@utils.async_test
async def test_historical_ticks(self):
"""Test overridden function `historicalTicks`."""
end_time = "20200327 16:30:00"
f_queue = self._wrapper.get_request_queue(
req_id=Const.RID_FETCH_HISTORICAL_TICKS
)
self._client.reqHistoricalTicks(
reqId=Const.RID_FETCH_HISTORICAL_TICKS.value,
contract=sample_contracts.gbp_usd_fx(),
startDateTime="", endDateTime=end_time,
numberOfTicks=1000, whatToShow="MIDPOINT", useRth=1,
ignoreSize=False, miscOptions=[]
)
result = await f_queue.get()
self.assertEqual(f_queue.status, fq._Status.FINISHED)
self.assertEqual(len(result), 2)
self.assertIsInstance(result[0], ib_wrapper.ListOfHistoricalTick)
@utils.async_test
async def test_historical_ticks_bid_ask(self):
"""Test overridden function `historicalTicksBidAsk`."""
end_time = "20200327 16:30:00"
f_queue = self._wrapper.get_request_queue(
req_id=Const.RID_FETCH_HISTORICAL_TICKS
)
self._client.reqHistoricalTicks(
reqId=Const.RID_FETCH_HISTORICAL_TICKS.value,
contract=sample_contracts.gbp_usd_fx(),
startDateTime="", endDateTime=end_time,
numberOfTicks=1000, whatToShow="BID_ASK", useRth=1,
ignoreSize=False, miscOptions=[]
)
result = await f_queue.get()
self.assertEqual(f_queue.status, fq._Status.FINISHED)
self.assertEqual(len(result), 2)
self.assertIsInstance(result[0], ib_wrapper.ListOfHistoricalTickBidAsk)
@utils.async_test
async def test_historical_ticks_last(self):
"""Test overridden function `historicalTicksLast`."""
end_time = "20200327 16:30:00"
f_queue = self._wrapper.get_request_queue(
req_id=Const.RID_FETCH_HISTORICAL_TICKS
)
self._client.reqHistoricalTicks(
reqId=Const.RID_FETCH_HISTORICAL_TICKS.value,
contract=sample_contracts.gbp_usd_fx(),
startDateTime="", endDateTime=end_time,
numberOfTicks=1000, whatToShow="TRADES", useRth=1,
ignoreSize=False, miscOptions=[]
)
result = await f_queue.get()
self.assertEqual(f_queue.status, fq._Status.FINISHED)
self.assertEqual(len(result), 2)
self.assertIsInstance(result[0], ib_wrapper.ListOfHistoricalTickLast)
@utils.async_test
async def test_tick_by_tick_all_last(self):
"""Test overridden function `tickByTickAllLast`."""
f_queue = self._wrapper.get_request_queue(
req_id=Const.RID_REQ_TICK_BY_TICK_DATA_ALL_LAST
)
self._client.reqTickByTickData(
reqId=Const.RID_REQ_TICK_BY_TICK_DATA_ALL_LAST.value,
contract=sample_contracts.us_future(),
tickType='AllLast',
numberOfTicks=0,
ignoreSize=True
)
async for ele in f_queue.stream():
self.assertIsInstance(ele,
(ib_wrapper.HistoricalTickLast, fq._Status))
self.assertIsNot(ele, fq._Status.ERROR)
if ele is not fq._Status.FINISHED:
self._client.cancelTickByTickData(
reqId=Const.RID_REQ_TICK_BY_TICK_DATA_ALL_LAST.value
)
f_queue.put(element=fq._Status.FINISHED)
@utils.async_test
async def test_tick_by_tick_last(self):
"""Test overridden function `tickByTickAllLast` with tick type `Last`.
"""
f_queue = self._wrapper.get_request_queue(
req_id=Const.RID_REQ_TICK_BY_TICK_DATA_LAST
)
self._client.reqTickByTickData(
reqId=Const.RID_REQ_TICK_BY_TICK_DATA_LAST.value,
contract=sample_contracts.us_future(),
tickType='Last',
numberOfTicks=0,
ignoreSize=True
)
async for ele in f_queue.stream():
self.assertIsInstance(ele,
(ib_wrapper.HistoricalTickLast, fq._Status))
self.assertIsNot(ele, fq._Status.ERROR)
if ele is not fq._Status.FINISHED:
self._client.cancelTickByTickData(
reqId=Const.RID_REQ_TICK_BY_TICK_DATA_LAST.value
)
f_queue.put(element=fq._Status.FINISHED)
@utils.async_test
async def test_tick_by_tick_bid_ask(self):
"""Test overridden function `tickByTickBidAsk`."""
f_queue = self._wrapper.get_request_queue(
req_id=Const.RID_REQ_TICK_BY_TICK_DATA_BIDASK
)
self._client.reqTickByTickData(
reqId=Const.RID_REQ_TICK_BY_TICK_DATA_BIDASK.value,
contract=sample_contracts.gbp_usd_fx(),
tickType='BidAsk',
numberOfTicks=0,
ignoreSize=True
)
async for ele in f_queue.stream():
self.assertIsInstance(ele,
(ib_wrapper.HistoricalTickBidAsk, fq._Status))
self.assertIsNot(ele, fq._Status.ERROR)
if ele is not fq._Status.FINISHED:
self._client.cancelTickByTickData(
reqId=Const.RID_REQ_TICK_BY_TICK_DATA_BIDASK.value
)
f_queue.put(element=fq._Status.FINISHED)
@utils.async_test
async def test_tick_by_tick_mid_point(self):
"""Test overridden function `tickByTickMidPoint`."""
f_queue = self._wrapper.get_request_queue(
req_id=Const.RID_REQ_TICK_BY_TICK_DATA_MIDPOINT
)
self._client.reqTickByTickData(
reqId=Const.RID_REQ_TICK_BY_TICK_DATA_MIDPOINT.value,
contract=sample_contracts.gbp_usd_fx(),
tickType='MidPoint',
numberOfTicks=0,
ignoreSize=True
)
async for ele in f_queue.stream():
self.assertIsInstance(ele,
(ib_wrapper.HistoricalTick, fq._Status))
self.assertIsNot(ele, fq._Status.ERROR)
if ele is not fq._Status.FINISHED:
self._client.cancelTickByTickData(
reqId=Const.RID_REQ_TICK_BY_TICK_DATA_MIDPOINT.value
)
f_queue.put(element=fq._Status.FINISHED)
@classmethod
def tearDownClass(cls):
cls._client.disconnect()
|
custom.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import threading
import time
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse # pylint: disable=import-error
from binascii import hexlify
from os import urandom
import json
import ssl
import sys
from six.moves.urllib.request import urlopen # pylint: disable=import-error, ungrouped-imports
import OpenSSL.crypto
from fabric import Connection
from knack.prompting import prompt_pass, NoTTYException
from knack.util import CLIError
from knack.log import get_logger
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import is_valid_resource_id, parse_resource_id
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.web.models import (Site, SiteConfig, User, AppServicePlan, SiteConfigResource,
SkuDescription, SslState, HostNameBinding, NameValuePair,
BackupRequest, DatabaseBackupSetting, BackupSchedule,
RestoreRequest, FrequencyUnit, Certificate, HostNameSslState,
RampUpRule, UnauthenticatedClientAction, ManagedServiceIdentity,
DeletedAppRestoreRequest, DefaultErrorResponseException,
SnapshotRestoreRequest, SnapshotRecoverySource)
from azure.mgmt.applicationinsights import ApplicationInsightsManagementClient
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, open_page_in_browser, get_json_object, \
ConfiguredDefaultSetter
from .tunnel import TunnelServer
from .vsts_cd_provider import VstsContinuousDeliveryProvider
from ._params import AUTH_TYPES, MULTI_CONTAINER_TYPES, LINUX_RUNTIMES, WINDOWS_RUNTIMES
from ._client_factory import web_client_factory, ex_handler_factory
from ._appservice_utils import _generic_site_operation
from .utils import _normalize_sku, get_sku_name
from ._create_util import (zip_contents_from_dir, get_runtime_version_details, create_resource_group,
should_create_new_rg, set_location, should_create_new_app,
get_lang_from_content, get_num_apps_in_asp)
from ._constants import (NODE_RUNTIME_NAME, OS_DEFAULT, STATIC_RUNTIME_NAME, PYTHON_RUNTIME_NAME,
RUNTIME_TO_IMAGE, NODE_VERSION_DEFAULT)
logger = get_logger(__name__)
# pylint:disable=no-member,too-many-lines,too-many-locals
# region "Common routines shared with quick-start extensions."
# Please maintain compatibility in both interfaces and functionalities"
def create_webapp(cmd, resource_group_name, name, plan, runtime=None, startup_file=None, # pylint: disable=too-many-statements
deployment_container_image_name=None, deployment_source_url=None, deployment_source_branch='master',
deployment_local_git=None, multicontainer_config_type=None, multicontainer_config_file=None,
tags=None):
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
client = web_client_factory(cmd.cli_ctx)
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
is_linux = plan_info.reserved
node_default_version = NODE_VERSION_DEFAULT
location = plan_info.location
site_config = SiteConfig(app_settings=[])
if isinstance(plan_info.sku, SkuDescription) and plan_info.sku.name.upper() not in ['F1', 'FREE', 'SHARED', 'D1',
'B1', 'B2', 'B3', 'BASIC']:
site_config.always_on = True
webapp_def = Site(location=location, site_config=site_config, server_farm_id=plan_info.id, tags=tags)
helper = _StackRuntimeHelper(client, linux=is_linux)
if is_linux:
if not validate_container_app_create_options(runtime, deployment_container_image_name,
multicontainer_config_type, multicontainer_config_file):
raise CLIError("usage error: --runtime | --deployment-container-image-name |"
" --multicontainer-config-type TYPE --multicontainer-config-file FILE")
if startup_file:
site_config.app_command_line = startup_file
if runtime:
site_config.linux_fx_version = runtime
match = helper.resolve(runtime)
if not match:
raise CLIError("Linux Runtime '{}' is not supported."
"Please invoke 'list-runtimes' to cross check".format(runtime))
elif deployment_container_image_name:
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
site_config.app_settings.append(NameValuePair(name="WEBSITES_ENABLE_APP_SERVICE_STORAGE",
value="false"))
elif multicontainer_config_type and multicontainer_config_file:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
site_config.linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
elif plan_info.is_xenon: # windows container webapp
site_config.windows_fx_version = _format_fx_version(deployment_container_image_name)
elif runtime: # windows webapp with runtime specified
if any([startup_file, deployment_container_image_name, multicontainer_config_file, multicontainer_config_type]):
raise CLIError("usage error: --startup-file or --deployment-container-image-name or "
"--multicontainer-config-type and --multicontainer-config-file is "
"only appliable on linux webapp")
match = helper.resolve(runtime)
if not match:
raise CLIError("Runtime '{}' is not supported. Please invoke 'list-runtimes' to cross check".format(runtime)) # pylint: disable=line-too-long
match['setter'](match, site_config)
# Be consistent with portal: any windows webapp should have this even it doesn't have node in the stack
if not match['displayName'].startswith('node'):
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
else: # windows webapp without runtime specified
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
if site_config.app_settings:
for setting in site_config.app_settings:
logger.info('Will set appsetting %s', setting)
poller = client.web_apps.create_or_update(resource_group_name, name, webapp_def)
webapp = LongRunningOperation(cmd.cli_ctx)(poller)
# Ensure SCC operations follow right after the 'create', no precedent appsetting update commands
_set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name)
return webapp
def validate_container_app_create_options(runtime=None, deployment_container_image_name=None,
multicontainer_config_type=None, multicontainer_config_file=None):
if bool(multicontainer_config_type) != bool(multicontainer_config_file):
return False
opts = [runtime, deployment_container_image_name, multicontainer_config_type]
return len([x for x in opts if x]) == 1 # you can only specify one out the combinations
def update_app_settings(cmd, resource_group_name, name, settings=None, slot=None, slot_settings=None):
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_application_settings', slot)
result, slot_result = {}, {}
# pylint: disable=too-many-nested-blocks
for src, dest in [(settings, result), (slot_settings, slot_result)]:
for s in src:
try:
temp = shell_safe_json_parse(s)
if isinstance(temp, list): # a bit messy, but we'd like accept the output of the "list" command
for t in temp:
if t.get('slotSetting', True):
slot_result[t['name']] = t['value']
else:
result[t['name']] = t['value']
else:
dest.update(temp)
except CLIError:
setting_name, value = s.split('=', 1)
dest[setting_name] = value
result.update(slot_result)
for setting_name, value in result.items():
app_settings.properties[setting_name] = value
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
app_settings_slot_cfg_names = []
if slot_result:
new_slot_setting_names = slot_result.keys()
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.app_setting_names = slot_cfg_names.app_setting_names or []
slot_cfg_names.app_setting_names += new_slot_setting_names
app_settings_slot_cfg_names = slot_cfg_names.app_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _build_app_settings_output(result.properties, app_settings_slot_cfg_names)
def add_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type, account_name,
share_name, access_key, mount_path=None, slot=None, slot_setting=False):
from azure.mgmt.web.models import AzureStorageInfoValue
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
if custom_id in azure_storage_accounts.properties:
raise CLIError("Site already configured with an Azure storage account with the id '{}'. "
"Use 'az webapp config storage-account update' to update an existing "
"Azure storage account configuration.".format(custom_id))
azure_storage_accounts.properties[custom_id] = AzureStorageInfoValue(type=storage_type, account_name=account_name,
share_name=share_name, access_key=access_key,
mount_path=mount_path)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def update_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type=None, account_name=None,
share_name=None, access_key=None, mount_path=None, slot=None, slot_setting=False):
from azure.mgmt.web.models import AzureStorageInfoValue
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
existing_account_config = azure_storage_accounts.properties.pop(custom_id, None)
if not existing_account_config:
raise CLIError("No Azure storage account configuration found with the id '{}'. "
"Use 'az webapp config storage-account add' to add a new "
"Azure storage account configuration.".format(custom_id))
new_account_config = AzureStorageInfoValue(
type=storage_type or existing_account_config.type,
account_name=account_name or existing_account_config.account_name,
share_name=share_name or existing_account_config.share_name,
access_key=access_key or existing_account_config.access_key,
mount_path=mount_path or existing_account_config.mount_path
)
azure_storage_accounts.properties[custom_id] = new_account_config
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def enable_zip_deploy(cmd, resource_group_name, name, src, timeout=None, slot=None):
logger.warning("Getting scm site credentials for zip deployment")
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
zip_url = scm_url + '/api/zipdeploy?isAsync=true'
deployment_status_url = scm_url + '/api/deployments/latest'
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['content-type'] = 'application/octet-stream'
import requests
import os
from azure.cli.core.util import should_disable_connection_verify
# Read file content
with open(os.path.realpath(os.path.expanduser(src)), 'rb') as fs:
zip_content = fs.read()
logger.warning("Starting zip deployment. This operation can take a while to complete ...")
requests.post(zip_url, data=zip_content, headers=headers, verify=not should_disable_connection_verify())
# check the status of async deployment
response = _check_zip_deployment_status(cmd, resource_group_name, name, deployment_status_url,
authorization, timeout)
return response
def _generic_settings_operation(cli_ctx, resource_group_name, name, operation_name,
setting_properties, slot=None, client=None):
client = client or web_client_factory(cli_ctx)
operation = getattr(client.web_apps, operation_name if slot is None else operation_name + '_slot')
if slot is None:
return operation(resource_group_name, name, str, setting_properties)
return operation(resource_group_name, name, slot, str, setting_properties)
def show_webapp(cmd, resource_group_name, name, slot=None, app_instance=None):
webapp = app_instance
if not app_instance: # when the routine is invoked as a help method, not through commands
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(name))
_rename_server_farm_props(webapp)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot)
return webapp
# for generic updater
def get_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def set_webapp(cmd, resource_group_name, name, slot=None, skip_dns_registration=None,
skip_custom_domain_verification=None, force_dns_registration=None, ttl_in_seconds=None, **kwargs):
instance = kwargs['parameters']
client = web_client_factory(cmd.cli_ctx)
updater = client.web_apps.create_or_update_slot if slot else client.web_apps.create_or_update
kwargs = dict(resource_group_name=resource_group_name, name=name, site_envelope=instance,
skip_dns_registration=skip_dns_registration,
skip_custom_domain_verification=skip_custom_domain_verification,
force_dns_registration=force_dns_registration,
ttl_in_seconds=ttl_in_seconds)
if slot:
kwargs['slot'] = slot
return updater(**kwargs)
def update_webapp(instance, client_affinity_enabled=None, https_only=None):
if 'function' in instance.kind:
raise CLIError("please use 'az functionapp update' to update this function app")
if client_affinity_enabled is not None:
instance.client_affinity_enabled = client_affinity_enabled == 'true'
if https_only is not None:
instance.https_only = https_only == 'true'
return instance
def update_functionapp(cmd, instance, plan=None):
client = web_client_factory(cmd.cli_ctx)
if plan is not None:
if is_valid_resource_id(plan):
dest_parse_result = parse_resource_id(plan)
dest_plan_info = client.app_service_plans.get(dest_parse_result['resource_group'],
dest_parse_result['name'])
else:
dest_plan_info = client.app_service_plans.get(instance.resource_group, plan)
if dest_plan_info is None:
raise CLIError("The plan '{}' doesn't exist".format(plan))
validate_plan_switch_compatibility(client, instance, dest_plan_info)
instance.server_farm_id = dest_plan_info.id
return instance
def validate_plan_switch_compatibility(client, src_functionapp_instance, dest_plan_instance):
general_switch_msg = 'Currently the switch is only allowed between a Consumption or an Elastic Premium plan.'
src_parse_result = parse_resource_id(src_functionapp_instance.server_farm_id)
src_plan_info = client.app_service_plans.get(src_parse_result['resource_group'],
src_parse_result['name'])
if src_plan_info is None:
raise CLIError('Could not determine the current plan of the functionapp')
if not (is_plan_consumption(src_plan_info) or is_plan_elastic_premium(src_plan_info)):
raise CLIError('Your functionapp is not using a Consumption or an Elastic Premium plan. ' + general_switch_msg)
if not (is_plan_consumption(dest_plan_instance) or is_plan_elastic_premium(dest_plan_instance)):
raise CLIError('You are trying to move to a plan that is not a Consumption or an Elastic Premium plan. ' +
general_switch_msg)
def set_functionapp(cmd, resource_group_name, name, **kwargs):
instance = kwargs['parameters']
if 'function' not in instance.kind:
raise CLIError('Not a function app to update')
client = web_client_factory(cmd.cli_ctx)
return client.web_apps.create_or_update(resource_group_name, name, site_envelope=instance)
def list_webapp(cmd, resource_group_name=None):
result = _list_app(cmd.cli_ctx, resource_group_name)
return [r for r in result if 'function' not in r.kind]
def list_deleted_webapp(cmd, resource_group_name=None, name=None, slot=None):
result = _list_deleted_app(cmd.cli_ctx, resource_group_name, name, slot)
return sorted(result, key=lambda site: site.deleted_site_id)
def restore_deleted_webapp(cmd, deleted_id, resource_group_name, name, slot=None, restore_content_only=None):
request = DeletedAppRestoreRequest(deleted_site_id=deleted_id, recover_configuration=not restore_content_only)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restore_from_deleted_app', slot, request)
def list_function_app(cmd, resource_group_name=None):
result = _list_app(cmd.cli_ctx, resource_group_name)
return [r for r in result if 'function' in r.kind]
def _list_app(cli_ctx, resource_group_name=None):
client = web_client_factory(cli_ctx)
if resource_group_name:
result = list(client.web_apps.list_by_resource_group(resource_group_name))
else:
result = list(client.web_apps.list())
for webapp in result:
_rename_server_farm_props(webapp)
return result
def _list_deleted_app(cli_ctx, resource_group_name=None, name=None, slot=None):
client = web_client_factory(cli_ctx)
locations = _get_deleted_apps_locations(cli_ctx)
result = list()
for location in locations:
result = result + list(client.deleted_web_apps.list_by_location(location))
if resource_group_name:
result = [r for r in result if r.resource_group == resource_group_name]
if name:
result = [r for r in result if r.deleted_site_name.lower() == name.lower()]
if slot:
result = [r for r in result if r.slot.lower() == slot.lower()]
return result
def assign_identity(cmd, resource_group_name, name, role='Contributor', slot=None, scope=None):
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
webapp.identity = ManagedServiceIdentity(type='SystemAssigned')
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter, role, scope)
return webapp.identity
def show_identity(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot).identity
def remove_identity(cmd, resource_group_name, name, slot=None):
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
webapp.identity = ManagedServiceIdentity(type='None')
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter)
return webapp.identity
def get_auth_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_auth_settings', slot)
def update_auth_settings(cmd, resource_group_name, name, enabled=None, action=None, # pylint: disable=unused-argument
client_id=None, token_store_enabled=None, # pylint: disable=unused-argument
token_refresh_extension_hours=None, # pylint: disable=unused-argument
allowed_external_redirect_urls=None, client_secret=None, # pylint: disable=unused-argument
allowed_audiences=None, issuer=None, facebook_app_id=None, # pylint: disable=unused-argument
facebook_app_secret=None, facebook_oauth_scopes=None, # pylint: disable=unused-argument
twitter_consumer_key=None, twitter_consumer_secret=None, # pylint: disable=unused-argument
google_client_id=None, google_client_secret=None, # pylint: disable=unused-argument
google_oauth_scopes=None, microsoft_account_client_id=None, # pylint: disable=unused-argument
microsoft_account_client_secret=None, # pylint: disable=unused-argument
microsoft_account_oauth_scopes=None, slot=None): # pylint: disable=unused-argument
auth_settings = get_auth_settings(cmd, resource_group_name, name, slot)
if action == 'AllowAnonymous':
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.allow_anonymous
elif action:
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.redirect_to_login_page
auth_settings.default_provider = AUTH_TYPES[action]
import inspect
frame = inspect.currentframe()
bool_flags = ['enabled', 'token_store_enabled']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[2:]:
if values.get(arg, None):
setattr(auth_settings, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_auth_settings', slot, auth_settings)
def list_runtimes(cmd, linux=False):
client = web_client_factory(cmd.cli_ctx)
runtime_helper = _StackRuntimeHelper(client, linux)
return [s['displayName'] for s in runtime_helper.stacks]
def _rename_server_farm_props(webapp):
# Should be renamed in SDK in a future release
setattr(webapp, 'app_service_plan_id', webapp.server_farm_id)
del webapp.server_farm_id
return webapp
def delete_function_app(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete', slot)
def delete_webapp(cmd, resource_group_name, name, keep_metrics=None, keep_empty_plan=None,
keep_dns_registration=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.delete_slot(resource_group_name, name, slot,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
else:
client.web_apps.delete(resource_group_name, name,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
def stop_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'stop', slot)
def start_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'start', slot)
def restart_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restart', slot)
def get_site_configs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration', slot)
def get_app_settings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_app_setting_names = client.web_apps.list_slot_configuration_names(resource_group_name, name).app_setting_names
return _build_app_settings_output(result.properties, slot_app_setting_names)
def get_connection_strings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_constr_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.connection_string_names or []
result = [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_constr_names} for p in result.properties]
return result
def get_azure_storage_accounts(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
slot_azure_storage_config_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.azure_storage_config_names or []
return [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_azure_storage_config_names} for p in result.properties]
def _fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot=None):
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
url = next(p['publishUrl'] for p in profiles if p['publishMethod'] == 'FTP')
setattr(webapp, 'ftpPublishingUrl', url)
return webapp
def _format_fx_version(custom_image_name, container_config_type=None):
fx_version = custom_image_name.strip()
fx_version_lower = fx_version.lower()
# handles case of only spaces
if fx_version:
if container_config_type:
fx_version = '{}|{}'.format(container_config_type, custom_image_name)
elif not fx_version_lower.startswith('docker|'):
fx_version = '{}|{}'.format('DOCKER', custom_image_name)
else:
fx_version = ' '
return fx_version
def _add_fx_version(cmd, resource_group_name, name, custom_image_name, slot=None):
fx_version = _format_fx_version(custom_image_name)
web_app = get_webapp(cmd, resource_group_name, name, slot)
linux_fx = fx_version if web_app.reserved else None
windows_fx = fx_version if web_app.is_xenon else None
return update_site_configs(cmd, resource_group_name, name,
linux_fx_version=linux_fx, windows_fx_version=windows_fx, slot=slot)
def _delete_linux_fx_version(cmd, resource_group_name, name, slot=None):
return update_site_configs(cmd, resource_group_name, name, linux_fx_version=' ', slot=slot)
def _get_fx_version(cmd, resource_group_name, name, slot=None):
site_config = get_site_configs(cmd, resource_group_name, name, slot)
return site_config.linux_fx_version or site_config.windows_fx_version or ''
def url_validator(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc, result.path])
except ValueError:
return False
def _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot=None):
from base64 import b64decode
linux_fx_version = _get_fx_version(cmd, resource_group_name, name, slot)
if not any([linux_fx_version.startswith(s) for s in MULTI_CONTAINER_TYPES]):
raise CLIError("Cannot decode config that is not one of the"
" following types: {}".format(','.join(MULTI_CONTAINER_TYPES)))
return b64decode(linux_fx_version.split('|')[1].encode('utf-8'))
def _get_linux_multicontainer_encoded_config_from_file(file_name):
from base64 import b64encode
config_file_bytes = None
if url_validator(file_name):
response = urlopen(file_name, context=_ssl_context())
config_file_bytes = response.read()
else:
with open(file_name, 'rb') as f:
config_file_bytes = f.read()
# Decode base64 encoded byte array into string
return b64encode(config_file_bytes).decode('utf-8')
# for any modifications to the non-optional parameters, adjust the reflection logic accordingly
# in the method
def update_site_configs(cmd, resource_group_name, name, slot=None,
linux_fx_version=None, windows_fx_version=None, reserved_instance_count=None, php_version=None, # pylint: disable=unused-argument
python_version=None, net_framework_version=None, # pylint: disable=unused-argument
java_version=None, java_container=None, java_container_version=None, # pylint: disable=unused-argument
remote_debugging_enabled=None, web_sockets_enabled=None, # pylint: disable=unused-argument
always_on=None, auto_heal_enabled=None, # pylint: disable=unused-argument
use32_bit_worker_process=None, # pylint: disable=unused-argument
min_tls_version=None, # pylint: disable=unused-argument
http20_enabled=None, # pylint: disable=unused-argument
app_command_line=None, # pylint: disable=unused-argument
ftps_state=None, # pylint: disable=unused-argument
generic_configurations=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if linux_fx_version:
if linux_fx_version.strip().lower().startswith('docker|'):
update_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE=false"])
else:
delete_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE"])
if reserved_instance_count is not None:
reserved_instance_count = validate_range_of_int_flag('--prewarmed-instance-count', reserved_instance_count,
min_val=0, max_val=20)
import inspect
frame = inspect.currentframe()
bool_flags = ['remote_debugging_enabled', 'web_sockets_enabled', 'always_on',
'auto_heal_enabled', 'use32_bit_worker_process', 'http20_enabled']
int_flags = ['reserved_instance_count']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[3:]:
if arg in int_flags and values[arg] is not None:
values[arg] = validate_and_convert_to_int(arg, values[arg])
if arg != 'generic_configurations' and values.get(arg, None):
setattr(configs, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
generic_configurations = generic_configurations or []
result = {}
for s in generic_configurations:
try:
result.update(get_json_object(s))
except CLIError:
config_name, value = s.split('=', 1)
result[config_name] = value
for config_name, value in result.items():
setattr(configs, config_name, value)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
def delete_app_settings(cmd, resource_group_name, name, setting_names, slot=None):
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
app_settings.properties.pop(setting_name, None)
if slot_cfg_names.app_setting_names and setting_name in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
return _build_app_settings_output(result.properties, slot_cfg_names.app_setting_names)
def delete_azure_storage_accounts(cmd, resource_group_name, name, custom_id, slot=None):
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
azure_storage_accounts.properties.pop(custom_id, None)
if slot_cfg_names.azure_storage_config_names and custom_id in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.remove(custom_id)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
return result.properties
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and sys.platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_app_settings_output(app_settings, slot_cfg_names):
slot_cfg_names = slot_cfg_names or []
return [{'name': p,
'value': app_settings[p],
'slotSetting': p in slot_cfg_names} for p in _mask_creds_related_appsettings(app_settings)]
def update_connection_strings(cmd, resource_group_name, name, connection_string_type,
settings=None, slot=None, slot_settings=None):
from azure.mgmt.web.models import ConnStringValueTypePair
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
for name_value in settings + slot_settings:
# split at the first '=', connection string should not have '=' in the name
conn_string_name, value = name_value.split('=', 1)
if value[0] in ["'", '"']: # strip away the quots used as separators
value = value[1:-1]
conn_strings.properties[conn_string_name] = ConnStringValueTypePair(value=value,
type=connection_string_type)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
if slot_settings:
new_slot_setting_names = [n.split('=', 1)[0] for n in slot_settings]
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.connection_string_names = slot_cfg_names.connection_string_names or []
slot_cfg_names.connection_string_names += new_slot_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def delete_connection_strings(cmd, resource_group_name, name, setting_names, slot=None):
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
conn_strings.properties.pop(setting_name, None)
if slot_cfg_names.connection_string_names and setting_name in slot_cfg_names.connection_string_names:
slot_cfg_names.connection_string_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
CONTAINER_APPSETTING_NAMES = ['DOCKER_REGISTRY_SERVER_URL', 'DOCKER_REGISTRY_SERVER_USERNAME',
'DOCKER_REGISTRY_SERVER_PASSWORD', "WEBSITES_ENABLE_APP_SERVICE_STORAGE"]
APPSETTINGS_TO_MASK = ['DOCKER_REGISTRY_SERVER_PASSWORD']
def update_container_settings(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
websites_enable_app_service_storage=None, docker_registry_server_password=None,
multicontainer_config_type=None, multicontainer_config_file=None, slot=None):
settings = []
if docker_registry_server_url is not None:
settings.append('DOCKER_REGISTRY_SERVER_URL=' + docker_registry_server_url)
if (not docker_registry_server_user and not docker_registry_server_password and
docker_registry_server_url and '.azurecr.io' in docker_registry_server_url):
logger.warning('No credential was provided to access Azure Container Registry. Trying to look up...')
parsed = urlparse(docker_registry_server_url)
registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0]
try:
docker_registry_server_user, docker_registry_server_password = _get_acr_cred(cmd.cli_ctx, registry_name)
except Exception as ex: # pylint: disable=broad-except
logger.warning("Retrieving credentials failed with an exception:'%s'", ex) # consider throw if needed
if docker_registry_server_user is not None:
settings.append('DOCKER_REGISTRY_SERVER_USERNAME=' + docker_registry_server_user)
if docker_registry_server_password is not None:
settings.append('DOCKER_REGISTRY_SERVER_PASSWORD=' + docker_registry_server_password)
if docker_custom_image_name is not None:
_add_fx_version(cmd, resource_group_name, name, docker_custom_image_name, slot)
if websites_enable_app_service_storage:
settings.append('WEBSITES_ENABLE_APP_SERVICE_STORAGE=' + websites_enable_app_service_storage)
if docker_registry_server_user or docker_registry_server_password or docker_registry_server_url or websites_enable_app_service_storage: # pylint: disable=line-too-long
update_app_settings(cmd, resource_group_name, name, settings, slot)
settings = get_app_settings(cmd, resource_group_name, name, slot)
if multicontainer_config_file and multicontainer_config_type:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
update_site_configs(cmd, resource_group_name, name, linux_fx_version=linux_fx_version, slot=slot)
elif multicontainer_config_file or multicontainer_config_type:
logger.warning('Must change both settings --multicontainer-config-file FILE --multicontainer-config-type TYPE')
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
slot=slot))
def update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
docker_registry_server_password=None, slot=None):
return update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
docker_custom_image_name, docker_registry_server_user, None,
docker_registry_server_password, multicontainer_config_type=None,
multicontainer_config_file=None, slot=slot)
def _get_acr_cred(cli_ctx, registry_name):
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
from azure.cli.core.commands.parameters import get_resources_in_subscription
client = get_mgmt_service_client(cli_ctx, ContainerRegistryManagementClient).registries
result = get_resources_in_subscription(cli_ctx, 'Microsoft.ContainerRegistry/registries')
result = [item for item in result if item.name.lower() == registry_name]
if not result or len(result) > 1:
raise CLIError("No resource or more than one were found with name '{}'.".format(registry_name))
resource_group_name = parse_resource_id(result[0].id)['resource_group']
registry = client.get(resource_group_name, registry_name)
if registry.admin_user_enabled: # pylint: disable=no-member
cred = client.list_credentials(resource_group_name, registry_name)
return cred.username, cred.passwords[0].value
raise CLIError("Failed to retrieve container registry credentials. Please either provide the "
"credentials or run 'az acr update -n {} --admin-enabled true' to enable "
"admin first.".format(registry_name))
def delete_container_settings(cmd, resource_group_name, name, slot=None):
_delete_linux_fx_version(cmd, resource_group_name, name, slot)
delete_app_settings(cmd, resource_group_name, name, CONTAINER_APPSETTING_NAMES, slot)
def show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config, slot))
def show_container_settings_functionapp(cmd, resource_group_name, name, slot=None):
return show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=slot)
def _filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config=None, slot=None):
result = [x for x in settings if x['name'] in CONTAINER_APPSETTING_NAMES]
fx_version = _get_fx_version(cmd, resource_group_name, name, slot).strip()
if fx_version:
added_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME',
'value': fx_version}
result.append(added_image_name)
if show_multicontainer_config:
decoded_value = _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot)
decoded_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME_DECODED',
'value': decoded_value}
result.append(decoded_image_name)
return result
# TODO: remove this when #3660(service tracking issue) is resolved
def _mask_creds_related_appsettings(settings):
for x in [x1 for x1 in settings if x1 in APPSETTINGS_TO_MASK]:
settings[x] = None
return settings
def add_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
binding = HostNameBinding(location=webapp.location, site_name=webapp.name)
if slot is None:
return client.web_apps.create_or_update_host_name_binding(resource_group_name, webapp.name, hostname, binding)
return client.web_apps.create_or_update_host_name_binding_slot(resource_group_name, webapp.name, hostname, binding,
slot)
def delete_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return client.web_apps.delete_host_name_binding(resource_group_name, webapp_name, hostname)
return client.web_apps.delete_host_name_binding_slot(resource_group_name, webapp_name, slot, hostname)
def list_hostnames(cmd, resource_group_name, webapp_name, slot=None):
result = list(_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'list_host_name_bindings', slot))
for r in result:
r.name = r.name.split('/')[-1]
return result
def get_external_ip(cmd, resource_group_name, webapp_name):
# logics here are ported from portal
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
if webapp.hosting_environment_profile:
address = client.app_service_environments.list_vips(
resource_group_name, webapp.hosting_environment_profile.name)
if address.internal_ip_address:
ip_address = address.internal_ip_address
else:
vip = next((s for s in webapp.host_name_ssl_states if s.ssl_state == SslState.ip_based_enabled), None)
ip_address = vip.virtual_ip if vip else address.service_ip_address
else:
ip_address = _resolve_hostname_through_dns(webapp.default_host_name)
return {'ip': ip_address}
def _resolve_hostname_through_dns(hostname):
import socket
return socket.gethostbyname(hostname)
def create_webapp_slot(cmd, resource_group_name, webapp, slot, configuration_source=None):
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, webapp)
if not site:
raise CLIError("'{}' app doesn't exist".format(webapp))
if 'functionapp' in site.kind:
raise CLIError("'{}' is a function app. Please use `az functionapp deployment slot create`.".format(webapp))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
slot_def.site_config = SiteConfig()
poller = client.web_apps.create_or_update_slot(resource_group_name, webapp, slot_def, slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def create_functionapp_slot(cmd, resource_group_name, name, slot, configuration_source=None):
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' function app doesn't exist".format(name))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
slot_def.site_config = SiteConfig()
# function app slots need to have all the App Settings from the source
prodsite_appsettings = get_app_settings(cmd, resource_group_name, name)
slot_def.site_config.app_settings = prodsite_appsettings[:]
poller = client.web_apps.create_or_update_slot(resource_group_name, name, slot_def, slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, name, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source=None):
clone_from_prod = configuration_source.lower() == webapp.lower()
site_config = get_site_configs(cmd, resource_group_name, webapp,
None if clone_from_prod else configuration_source)
_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_configuration', slot, site_config)
# slot create doesn't clone over the app-settings and connection-strings, so we do it here
# also make sure slot settings don't get propagated.
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, webapp)
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings',
src_slot)
for a in slot_cfg_names.app_setting_names or []:
app_settings.properties.pop(a, None)
connection_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_connection_strings',
src_slot)
for a in slot_cfg_names.connection_string_names or []:
connection_strings.properties.pop(a, None)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_application_settings',
app_settings.properties, slot, client)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_connection_strings',
connection_strings.properties, slot, client)
def config_source_control(cmd, resource_group_name, name, repo_url, repository_type='git', branch=None, # pylint: disable=too-many-locals
manual_integration=None, git_token=None, slot=None, cd_app_type=None,
app_working_dir=None, nodejs_task_runner=None, python_framework=None,
python_version=None, cd_account_create=None, cd_project_url=None, test=None,
slot_swap=None, private_repo_username=None, private_repo_password=None):
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
if cd_project_url:
# Add default values
cd_app_type = 'AspNet' if cd_app_type is None else cd_app_type
python_framework = 'Django' if python_framework is None else python_framework
python_version = 'Python 3.5.3 x86' if python_version is None else python_version
webapp_list = None if test is None else list_webapp(resource_group_name)
vsts_provider = VstsContinuousDeliveryProvider()
cd_app_type_details = {
'cd_app_type': cd_app_type,
'app_working_dir': app_working_dir,
'nodejs_task_runner': nodejs_task_runner,
'python_framework': python_framework,
'python_version': python_version
}
try:
status = vsts_provider.setup_continuous_delivery(cmd.cli_ctx, resource_group_name, name, repo_url,
branch, git_token, slot_swap, cd_app_type_details,
cd_project_url, cd_account_create, location, test,
private_repo_username, private_repo_password, webapp_list)
except RuntimeError as ex:
raise CLIError(ex)
logger.warning(status.status_message)
return status
else:
non_vsts_params = [cd_app_type, app_working_dir, nodejs_task_runner, python_framework,
python_version, cd_account_create, test, slot_swap]
if any(non_vsts_params):
raise CLIError('Following parameters are of no use when cd_project_url is None: ' +
'cd_app_type, app_working_dir, nodejs_task_runner, python_framework,' +
'python_version, cd_account_create, test, slot_swap')
from azure.mgmt.web.models import SiteSourceControl, SourceControl
if git_token:
sc = SourceControl(location=location, source_control_name='GitHub', token=git_token)
client.update_source_control('GitHub', sc)
source_control = SiteSourceControl(location=location, repo_url=repo_url, branch=branch,
is_manual_integration=manual_integration,
is_mercurial=(repository_type != 'git'))
# SCC config can fail if previous commands caused SCMSite shutdown, so retry here.
for i in range(5):
try:
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'create_or_update_source_control',
slot, source_control)
return LongRunningOperation(cmd.cli_ctx)(poller)
except Exception as ex: # pylint: disable=broad-except
import re
ex = ex_handler_factory(no_throw=True)(ex)
# for non server errors(50x), just throw; otherwise retry 4 times
if i == 4 or not re.findall(r'\(50\d\)', str(ex)):
raise
logger.warning('retrying %s/4', i + 1)
time.sleep(5) # retry in a moment
def update_git_token(cmd, git_token=None):
'''
Update source control token cached in Azure app service. If no token is provided,
the command will clean up existing token.
'''
client = web_client_factory(cmd.cli_ctx)
from azure.mgmt.web.models import SourceControl
sc = SourceControl(name='not-really-needed', source_control_name='GitHub', token=git_token or '')
return client.update_source_control('GitHub', sc)
def show_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_source_control', slot)
def delete_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete_source_control', slot)
def enable_local_git(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
site_config = SiteConfigResource(location=location)
site_config.scm_type = 'LocalGit'
if slot is None:
client.web_apps.create_or_update_configuration(resource_group_name, name, site_config)
else:
client.web_apps.create_or_update_configuration_slot(resource_group_name, name,
site_config, slot)
return {'url': _get_local_git_url(cmd.cli_ctx, client, resource_group_name, name, slot)}
def sync_site_repo(cmd, resource_group_name, name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'sync_repository', slot)
except CloudError as ex: # Because of bad spec, sdk throws on 200. We capture it here
if ex.status_code not in [200, 204]:
raise ex
def list_app_service_plans(cmd, resource_group_name=None):
client = web_client_factory(cmd.cli_ctx)
if resource_group_name is None:
plans = list(client.app_service_plans.list())
else:
plans = list(client.app_service_plans.list_by_resource_group(resource_group_name))
for plan in plans:
# prune a few useless fields
del plan.geo_region
del plan.subscription
return plans
def create_app_service_plan(cmd, resource_group_name, name, is_linux, hyper_v, sku='B1', number_of_workers=None,
location=None, tags=None):
if is_linux and hyper_v:
raise CLIError('usage error: --is-linux | --hyper-v')
client = web_client_factory(cmd.cli_ctx)
sku = _normalize_sku(sku)
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
# the api is odd on parameter naming, have to live with it for now
sku_def = SkuDescription(tier=get_sku_name(sku), name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), hyper_v=(hyper_v or None), name=name)
return client.app_service_plans.create_or_update(resource_group_name, name, plan_def)
def update_app_service_plan(instance, sku=None, number_of_workers=None):
sku_def = instance.sku
if sku is not None:
sku = _normalize_sku(sku)
sku_def.tier = get_sku_name(sku)
sku_def.name = sku
if number_of_workers is not None:
sku_def.capacity = number_of_workers
instance.sku = sku_def
return instance
def update_functionapp_app_service_plan(instance, sku=None, number_of_workers=None, max_burst=None):
instance = update_app_service_plan(instance, sku, number_of_workers)
if max_burst is not None:
if not is_plan_elastic_premium(instance):
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
instance.maximum_elastic_worker_count = max_burst
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-instances',
number_of_workers, min_val=0, max_val=20)
return update_app_service_plan(instance, sku, number_of_workers)
def show_backup_configuration(cmd, resource_group_name, webapp_name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except Exception: # pylint: disable=broad-except
raise CLIError('Backup configuration not found')
def list_backups(cmd, resource_group_name, webapp_name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'list_backups',
slot)
def create_backup(cmd, resource_group_name, webapp_name, storage_account_url,
db_name=None, db_type=None,
db_connection_string=None, backup_name=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
db_setting = _create_db_setting(db_name, db_type, db_connection_string)
backup_request = BackupRequest(backup_request_name=backup_name,
storage_account_url=storage_account_url, databases=db_setting)
if slot:
return client.web_apps.backup_slot(resource_group_name, webapp_name, backup_request, slot)
return client.web_apps.backup(resource_group_name, webapp_name, backup_request)
def update_backup_schedule(cmd, resource_group_name, webapp_name, storage_account_url=None,
frequency=None, keep_at_least_one_backup=None,
retention_period_in_days=None, db_name=None,
db_connection_string=None, db_type=None, backup_name=None, slot=None):
configuration = None
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
if not backup_name:
from datetime import datetime
backup_name = '{0}_{1}'.format(webapp_name, datetime.utcnow().strftime('%Y%m%d%H%M'))
try:
configuration = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except DefaultErrorResponseException:
# No configuration set yet
if not all([storage_account_url, frequency, retention_period_in_days,
keep_at_least_one_backup]):
raise CLIError('No backup configuration found. A configuration must be created. ' +
'Usage: --container-url URL --frequency TIME --retention DAYS ' +
'--retain-one TRUE/FALSE')
# If arguments were not specified, use the values in the current backup schedule
if storage_account_url is None:
storage_account_url = configuration.storage_account_url
if retention_period_in_days is None:
retention_period_in_days = configuration.backup_schedule.retention_period_in_days
if keep_at_least_one_backup is None:
keep_at_least_one_backup = configuration.backup_schedule.keep_at_least_one_backup
else:
keep_at_least_one_backup = keep_at_least_one_backup.lower() == 'true'
if frequency:
# Parse schedule frequency
frequency_num, frequency_unit = _parse_frequency(frequency)
else:
frequency_num = configuration.backup_schedule.frequency_interval
frequency_unit = configuration.backup_schedule.frequency_unit
if configuration and configuration.databases:
db = configuration.databases[0]
db_type = db_type or db.database_type
db_name = db_name or db.name
db_connection_string = db_connection_string or db.connection_string
db_setting = _create_db_setting(db_name, db_type, db_connection_string)
backup_schedule = BackupSchedule(frequency_interval=frequency_num, frequency_unit=frequency_unit.name,
keep_at_least_one_backup=keep_at_least_one_backup,
retention_period_in_days=retention_period_in_days)
backup_request = BackupRequest(backup_request_name=backup_name, backup_schedule=backup_schedule,
enabled=True, storage_account_url=storage_account_url,
databases=db_setting)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'update_backup_configuration',
slot, backup_request)
def restore_backup(cmd, resource_group_name, webapp_name, storage_account_url, backup_name,
db_name=None, db_type=None, db_connection_string=None,
target_name=None, overwrite=None, ignore_hostname_conflict=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
storage_blob_name = backup_name
if not storage_blob_name.lower().endswith('.zip'):
storage_blob_name += '.zip'
db_setting = _create_db_setting(db_name, db_type, db_connection_string)
restore_request = RestoreRequest(storage_account_url=storage_account_url,
blob_name=storage_blob_name, overwrite=overwrite,
site_name=target_name, databases=db_setting,
ignore_conflicting_host_names=ignore_hostname_conflict)
if slot:
return client.web_apps.restore_slot(resource_group_name, webapp_name, 0, restore_request, slot)
return client.web_apps.restore(resource_group_name, webapp_name, 0, restore_request)
def list_snapshots(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_snapshots',
slot)
def restore_snapshot(cmd, resource_group_name, name, time, slot=None, restore_content_only=False, # pylint: disable=redefined-outer-name
source_resource_group=None, source_name=None, source_slot=None):
from azure.cli.core.commands.client_factory import get_subscription_id
client = web_client_factory(cmd.cli_ctx)
recover_config = not restore_content_only
if all([source_resource_group, source_name]):
# Restore from source app to target app
sub_id = get_subscription_id(cmd.cli_ctx)
source_id = "/subscriptions/" + sub_id + "/resourceGroups/" + source_resource_group + \
"/providers/Microsoft.Web/sites/" + source_name
if source_slot:
source_id = source_id + "/slots/" + source_slot
source = SnapshotRecoverySource(id=source_id)
request = SnapshotRestoreRequest(overwrite=False, snapshot_time=time, recovery_source=source,
recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
elif any([source_resource_group, source_name]):
raise CLIError('usage error: --source-resource-group and --source-name must both be specified if one is used')
else:
# Overwrite app with its own snapshot
request = SnapshotRestoreRequest(overwrite=True, snapshot_time=time, recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
# pylint: disable=inconsistent-return-statements
def _create_db_setting(db_name, db_type, db_connection_string):
if all([db_name, db_type, db_connection_string]):
return [DatabaseBackupSetting(database_type=db_type, name=db_name, connection_string=db_connection_string)]
elif any([db_name, db_type, db_connection_string]):
raise CLIError('usage error: --db-name NAME --db-type TYPE --db-connection-string STRING')
def _parse_frequency(frequency):
unit_part = frequency.lower()[-1]
if unit_part == 'd':
frequency_unit = FrequencyUnit.day
elif unit_part == 'h':
frequency_unit = FrequencyUnit.hour
else:
raise CLIError('Frequency must end with d or h for "day" or "hour"')
try:
frequency_num = int(frequency[:-1])
except ValueError:
raise CLIError('Frequency must start with a number')
if frequency_num < 0:
raise CLIError('Frequency must be positive')
return frequency_num, frequency_unit
def _get_location_from_resource_group(cli_ctx, resource_group_name):
from azure.mgmt.resource import ResourceManagementClient
client = get_mgmt_service_client(cli_ctx, ResourceManagementClient)
group = client.resource_groups.get(resource_group_name)
return group.location
def _get_location_from_webapp(client, resource_group_name, webapp):
webapp = client.web_apps.get(resource_group_name, webapp)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp))
return webapp.location
def _get_deleted_apps_locations(cli_ctx):
from azure.mgmt.resource import ResourceManagementClient
client = get_mgmt_service_client(cli_ctx, ResourceManagementClient)
web_provider = client.providers.get('Microsoft.Web')
del_sites_resource = next((x for x in web_provider.resource_types if x.resource_type == 'deletedSites'), None)
if del_sites_resource:
return del_sites_resource.locations
return []
def _get_local_git_url(cli_ctx, client, resource_group_name, name, slot=None):
user = client.get_publishing_user()
result = _generic_site_operation(cli_ctx, resource_group_name, name, 'get_source_control', slot)
parsed = urlparse(result.repo_url)
return '{}://{}@{}/{}.git'.format(parsed.scheme, user.publishing_user_name,
parsed.netloc, name)
def _get_scm_url(cmd, resource_group_name, name, slot=None):
from azure.mgmt.web.models import HostType
webapp = show_webapp(cmd, resource_group_name, name, slot=slot)
for host in webapp.host_name_ssl_states or []:
if host.host_type == HostType.repository:
return "https://{}".format(host.name)
# this should not happen, but throw anyway
raise ValueError('Failed to retrieve Scm Uri')
def set_deployment_user(cmd, user_name, password=None):
'''
Update deployment credentials.(Note, all webapps in your subscription will be impacted)
'''
client = web_client_factory(cmd.cli_ctx)
user = User(publishing_user_name=user_name)
if password is None:
try:
password = prompt_pass(msg='Password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify both username and password in non-interactive mode.')
user.publishing_password = password
return client.update_publishing_user(user)
def list_publishing_credentials(cmd, resource_group_name, name, slot=None):
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_credentials', slot)
return content.result()
def list_publish_profiles(cmd, resource_group_name, name, slot=None):
import xmltodict
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_profile_xml_with_secrets', slot)
full_xml = ''
for f in content:
full_xml += f.decode()
profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile']
converted = []
for profile in profiles:
new = {}
for key in profile:
# strip the leading '@' xmltodict put in for attributes
new[key.lstrip('@')] = profile[key]
converted.append(new)
return converted
def enable_cd(cmd, resource_group_name, name, enable, slot=None):
settings = []
settings.append("DOCKER_ENABLE_CI=" + enable)
update_app_settings(cmd, resource_group_name, name, settings, slot)
return show_container_cd_url(cmd, resource_group_name, name, slot)
def show_container_cd_url(cmd, resource_group_name, name, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
docker_enabled = False
for setting in settings:
if setting['name'] == 'DOCKER_ENABLE_CI' and setting['value'] == 'true':
docker_enabled = True
break
cd_settings = {}
cd_settings['DOCKER_ENABLE_CI'] = docker_enabled
if docker_enabled:
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
for profile in profiles:
if profile['publishMethod'] == 'MSDeploy':
scmUrl = profile['publishUrl'].replace(":443", "")
cd_url = 'https://' + profile['userName'] + ':' + profile['userPWD'] + '@' + scmUrl + '/docker/hook'
cd_settings['CI_CD_URL'] = cd_url
break
else:
cd_settings['CI_CD_URL'] = ''
return cd_settings
def view_in_browser(cmd, resource_group_name, name, slot=None, logs=False):
url = _get_url(cmd, resource_group_name, name, slot)
open_page_in_browser(url)
if logs:
get_streaming_log(cmd, resource_group_name, name, provider=None, slot=slot)
def _get_url(cmd, resource_group_name, name, slot=None):
site = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
url = site.enabled_host_names[0] # picks the custom domain URL incase a domain is assigned
ssl_host = next((h for h in site.host_name_ssl_states
if h.ssl_state != SslState.disabled), None)
return ('https' if ssl_host else 'http') + '://' + url
# TODO: expose new blob suport
def config_diagnostics(cmd, resource_group_name, name, level=None,
application_logging=None, web_server_logging=None,
docker_container_logging=None, detailed_error_messages=None,
failed_request_tracing=None, slot=None):
from azure.mgmt.web.models import (FileSystemApplicationLogsConfig, ApplicationLogsConfig,
SiteLogsConfig, HttpLogsConfig, FileSystemHttpLogsConfig,
EnabledConfig)
client = web_client_factory(cmd.cli_ctx)
# TODO: ensure we call get_site only once
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
location = site.location
application_logs = None
if application_logging is not None:
if not application_logging:
level = 'Off'
elif level is None:
level = 'Error'
fs_log = FileSystemApplicationLogsConfig(level=level)
application_logs = ApplicationLogsConfig(file_system=fs_log)
http_logs = None
server_logging_option = web_server_logging or docker_container_logging
if server_logging_option:
# TODO: az blob storage log config currently not in use, will be impelemented later.
# Tracked as Issue: #4764 on Github
filesystem_log_config = None
turned_on = server_logging_option != 'off'
if server_logging_option in ['filesystem', 'off']:
# 100 mb max log size, retention lasts 3 days. Yes we hard code it, portal does too
filesystem_log_config = FileSystemHttpLogsConfig(retention_in_mb=100, retention_in_days=3,
enabled=turned_on)
http_logs = HttpLogsConfig(file_system=filesystem_log_config, azure_blob_storage=None)
detailed_error_messages_logs = (None if detailed_error_messages is None
else EnabledConfig(enabled=detailed_error_messages))
failed_request_tracing_logs = (None if failed_request_tracing is None
else EnabledConfig(enabled=failed_request_tracing))
site_log_config = SiteLogsConfig(location=location,
application_logs=application_logs,
http_logs=http_logs,
failed_requests_tracing=failed_request_tracing_logs,
detailed_error_messages=detailed_error_messages_logs)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_diagnostic_logs_config',
slot, site_log_config)
def show_diagnostic_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_diagnostic_logs_configuration', slot)
def config_slot_auto_swap(cmd, resource_group_name, webapp, slot, auto_swap_slot=None, disable=None):
client = web_client_factory(cmd.cli_ctx)
site_config = client.web_apps.get_configuration_slot(resource_group_name, webapp, slot)
site_config.auto_swap_slot_name = '' if disable else (auto_swap_slot or 'production')
return client.web_apps.update_configuration_slot(resource_group_name, webapp, site_config, slot)
def list_slots(cmd, resource_group_name, webapp):
client = web_client_factory(cmd.cli_ctx)
slots = list(client.web_apps.list_slots(resource_group_name, webapp))
for slot in slots:
slot.name = slot.name.split('/')[-1]
setattr(slot, 'app_service_plan', parse_resource_id(slot.server_farm_id)['name'])
del slot.server_farm_id
return slots
def swap_slot(cmd, resource_group_name, webapp, slot, target_slot=None, action='swap'):
client = web_client_factory(cmd.cli_ctx)
if action == 'swap':
poller = client.web_apps.swap_slot_slot(resource_group_name, webapp,
slot, (target_slot or 'production'), True)
return poller
elif action == 'preview':
if target_slot is None:
result = client.web_apps.apply_slot_config_to_production(resource_group_name,
webapp, slot, True)
else:
result = client.web_apps.apply_slot_configuration_slot(resource_group_name, webapp,
slot, target_slot, True)
return result
else: # reset
# we will reset both source slot and target slot
if target_slot is None:
client.web_apps.reset_production_slot_config(resource_group_name, webapp)
else:
client.web_apps.reset_slot_configuration_slot(resource_group_name, webapp, target_slot)
return None
def delete_slot(cmd, resource_group_name, webapp, slot):
client = web_client_factory(cmd.cli_ctx)
# TODO: once swagger finalized, expose other parameters like: delete_all_slots, etc...
client.web_apps.delete_slot(resource_group_name, webapp, slot)
def set_traffic_routing(cmd, resource_group_name, name, distribution):
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
configs = get_site_configs(cmd, resource_group_name, name)
host_name_split = site.default_host_name.split('.', 1)
host_name_suffix = '.' + host_name_split[1]
host_name_val = host_name_split[0]
configs.experiments.ramp_up_rules = []
for r in distribution:
slot, percentage = r.split('=')
action_host_name_slot = host_name_val + "-" + slot
configs.experiments.ramp_up_rules.append(RampUpRule(action_host_name=action_host_name_slot + host_name_suffix,
reroute_percentage=float(percentage),
name=slot))
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', None, configs)
return configs.experiments.ramp_up_rules
def show_traffic_routing(cmd, resource_group_name, name):
configs = get_site_configs(cmd, resource_group_name, name)
return configs.experiments.ramp_up_rules
def clear_traffic_routing(cmd, resource_group_name, name):
set_traffic_routing(cmd, resource_group_name, name, [])
def add_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
from azure.mgmt.web.models import CorsSettings
configs = get_site_configs(cmd, resource_group_name, name, slot)
if not configs.cors:
configs.cors = CorsSettings()
configs.cors.allowed_origins = (configs.cors.allowed_origins or []) + allowed_origins
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return result.cors
def remove_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if configs.cors:
if allowed_origins:
configs.cors.allowed_origins = [x for x in (configs.cors.allowed_origins or []) if x not in allowed_origins]
else:
configs.cors.allowed_origins = []
configs = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return configs.cors
def show_cors(cmd, resource_group_name, name, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
return configs.cors
def get_streaming_log(cmd, resource_group_name, name, provider=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
streaming_url = scm_url + '/logstream'
if provider:
streaming_url += ('/' + provider.lstrip('/'))
user, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
t = threading.Thread(target=_get_log, args=(streaming_url, user, password))
t.daemon = True
t.start()
while True:
time.sleep(100) # so that ctrl+c can stop the command
def download_historical_logs(cmd, resource_group_name, name, log_file=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
url = scm_url.rstrip('/') + '/dump'
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
_get_log(url, user_name, password, log_file)
logger.warning('Downloaded logs to %s', log_file)
def _get_site_credential(cli_ctx, resource_group_name, name, slot=None):
creds = _generic_site_operation(cli_ctx, resource_group_name, name, 'list_publishing_credentials', slot)
creds = creds.result()
return (creds.publishing_user_name, creds.publishing_password)
def _get_log(url, user_name, password, log_file=None):
import certifi
import urllib3
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
r = http.request(
'GET',
url,
headers=headers,
preload_content=False
)
if r.status != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
url, r.status, r.reason))
if log_file: # download logs
with open(log_file, 'wb') as f:
while True:
data = r.read(1024)
if not data:
break
f.write(data)
else: # streaming
std_encoding = sys.stdout.encoding
for chunk in r.stream():
if chunk:
# Extra encode() and decode for stdout which does not surpport 'utf-8'
print(chunk.decode(encoding='utf-8', errors='replace')
.encode(std_encoding, errors='replace')
.decode(std_encoding, errors='replace'), end='') # each line of log has CRLF.
r.release_conn()
def upload_ssl_cert(cmd, resource_group_name, name, certificate_password, certificate_file):
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get')
cert_file = open(certificate_file, 'rb')
cert_contents = cert_file.read()
hosting_environment_profile_param = (webapp.hosting_environment_profile.name
if webapp.hosting_environment_profile else '')
thumb_print = _get_cert(certificate_password, certificate_file)
cert_name = _generate_cert_name(thumb_print, hosting_environment_profile_param,
webapp.location, resource_group_name)
cert = Certificate(password=certificate_password, pfx_blob=cert_contents,
location=webapp.location, server_farm_id=webapp.server_farm_id)
return client.certificates.create_or_update(resource_group_name, cert_name, cert)
def _generate_cert_name(thumb_print, hosting_environment, location, resource_group_name):
return "%s_%s_%s_%s" % (thumb_print, hosting_environment, location, resource_group_name)
def _get_cert(certificate_password, certificate_file):
''' Decrypts the .pfx file '''
p12 = OpenSSL.crypto.load_pkcs12(open(certificate_file, 'rb').read(), certificate_password)
cert = p12.get_certificate()
digest_algorithm = 'sha1'
thumbprint = cert.digest(digest_algorithm).decode("utf-8").replace(':', '')
return thumbprint
def list_ssl_certs(cmd, resource_group_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.list_by_resource_group(resource_group_name)
def delete_ssl_cert(cmd, resource_group_name, certificate_thumbprint):
client = web_client_factory(cmd.cli_ctx)
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
return client.certificates.delete(resource_group_name, webapp_cert.name)
raise CLIError("Certificate for thumbprint '{}' not found".format(certificate_thumbprint))
def _update_host_name_ssl_state(cli_ctx, resource_group_name, webapp_name, location,
host_name, ssl_state, thumbprint, slot=None):
updated_webapp = Site(host_name_ssl_states=[HostNameSslState(name=host_name,
ssl_state=ssl_state,
thumbprint=thumbprint,
to_update=True)],
location=location)
return _generic_site_operation(cli_ctx, resource_group_name, webapp_name, 'create_or_update',
slot, updated_webapp)
def _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(name))
cert_resource_group_name = parse_resource_id(webapp.server_farm_id)['resource_group']
webapp_certs = client.certificates.list_by_resource_group(cert_resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
if len(webapp_cert.host_names) == 1 and not webapp_cert.host_names[0].startswith('*'):
return _update_host_name_ssl_state(cmd.cli_ctx, resource_group_name, name, webapp.location,
webapp_cert.host_names[0], ssl_type,
certificate_thumbprint, slot)
query_result = list_hostnames(cmd, resource_group_name, name, slot)
hostnames_in_webapp = [x.name.split('/')[-1] for x in query_result]
to_update = _match_host_names_from_cert(webapp_cert.host_names, hostnames_in_webapp)
for h in to_update:
_update_host_name_ssl_state(cmd.cli_ctx, resource_group_name, name, webapp.location,
h, ssl_type, certificate_thumbprint, slot)
return show_webapp(cmd, resource_group_name, name, slot)
raise CLIError("Certificate for thumbprint '{}' not found.".format(certificate_thumbprint))
def bind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
return _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint,
SslState.sni_enabled if ssl_type == 'SNI' else SslState.ip_based_enabled, slot)
def unbind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, slot=None):
return _update_ssl_binding(cmd, resource_group_name, name,
certificate_thumbprint, SslState.disabled, slot)
def _match_host_names_from_cert(hostnames_from_cert, hostnames_in_webapp):
# the goal is to match '*.foo.com' with host name like 'admin.foo.com', 'logs.foo.com', etc
matched = set()
for hostname in hostnames_from_cert:
if hostname.startswith('*'):
for h in hostnames_in_webapp:
if hostname[hostname.find('.'):] == h[h.find('.'):]:
matched.add(h)
elif hostname in hostnames_in_webapp:
matched.add(hostname)
return matched
# help class handles runtime stack in format like 'node|6.1', 'php|5.5'
class _StackRuntimeHelper(object):
def __init__(self, client, linux=False):
self._client = client
self._linux = linux
self._stacks = []
def resolve(self, display_name):
self._load_stacks()
return next((s for s in self._stacks if s['displayName'].lower() == display_name.lower()),
None)
@property
def stacks(self):
self._load_stacks()
return self._stacks
@staticmethod
def update_site_config(stack, site_config):
for k, v in stack['configs'].items():
setattr(site_config, k, v)
return site_config
@staticmethod
def update_site_appsettings(stack, site_config):
if site_config.app_settings is None:
site_config.app_settings = []
site_config.app_settings += [NameValuePair(name=k, value=v) for k, v in stack['configs'].items()]
return site_config
def _load_stacks(self):
if self._stacks:
return
os_type = ('Linux' if self._linux else 'Windows')
raw_stacks = self._client.provider.get_available_stacks(os_type_selected=os_type, raw=True)
bytes_value = raw_stacks._get_next().content # pylint: disable=protected-access
json_value = bytes_value.decode('utf8')
json_stacks = json.loads(json_value)
stacks = json_stacks['value']
result = []
if self._linux:
for properties in [(s['properties']) for s in stacks]:
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
})
else: # Windows stacks
config_mappings = {
'node': 'WEBSITE_NODE_DEFAULT_VERSION',
'python': 'python_version',
'php': 'php_version',
'aspnet': 'net_framework_version'
}
# get all stack version except 'java'
for stack in stacks:
if stack['name'] not in config_mappings:
continue
name, properties = stack['name'], stack['properties']
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': name + '|' + major['displayVersion'],
'configs': {
config_mappings[name]: (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
}
})
# deal with java, which pairs with java container version
java_stack = next((s for s in stacks if s['name'] == 'java'))
java_container_stack = next((s for s in stacks if s['name'] == 'javaContainers'))
for java_version in java_stack['properties']['majorVersions']:
for fx in java_container_stack['properties']['frameworks']:
for fx_version in fx['majorVersions']:
result.append({
'displayName': 'java|{}|{}|{}'.format(java_version['displayVersion'],
fx['display'],
fx_version['displayVersion']),
'configs': {
'java_version': java_version['runtimeVersion'],
'java_container': fx['name'],
'java_container_version': fx_version['runtimeVersion']
}
})
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
def get_app_insights_key(cli_ctx, resource_group, name):
appinsights_client = get_mgmt_service_client(cli_ctx, ApplicationInsightsManagementClient)
appinsights = appinsights_client.components.get(resource_group, name)
if appinsights is None or appinsights.instrumentation_key is None:
raise CLIError("App Insights {} under resource group {} was not found.".format(name, resource_group))
return appinsights.instrumentation_key
def create_functionapp_app_service_plan(cmd, resource_group_name, name, is_linux, sku,
number_of_workers=None, max_burst=None, location=None, tags=None):
sku = _normalize_sku(sku)
tier = get_sku_name(sku)
if max_burst is not None:
if tier.lower() != "elasticpremium":
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-elastic-worker-count',
number_of_workers, min_val=0, max_val=20)
client = web_client_factory(cmd.cli_ctx)
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
sku_def = SkuDescription(tier=tier, name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), maximum_elastic_worker_count=max_burst,
hyper_v=None, name=name)
return client.app_service_plans.create_or_update(resource_group_name, name, plan_def)
def is_plan_consumption(plan_info):
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier.lower() == 'dynamic'
return False
def is_plan_elastic_premium(plan_info):
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier == 'ElasticPremium'
return False
def validate_and_convert_to_int(flag, val):
try:
return int(val)
except ValueError:
raise CLIError("Usage error: {} is expected to have an int value.".format(flag))
def validate_range_of_int_flag(flag_name, value, min_val, max_val):
value = validate_and_convert_to_int(flag_name, value)
if min_val > value or value > max_val:
raise CLIError("Usage error: {} is expected to be between {} and {} (inclusive)".format(flag_name, min_val,
max_val))
return value
def create_function(cmd, resource_group_name, name, storage_account, plan=None,
os_type=None, runtime=None, consumption_plan_location=None,
app_insights=None, app_insights_key=None, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None,
deployment_container_image_name=None, tags=None):
# pylint: disable=too-many-statements, too-many-branches
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
if bool(plan) == bool(consumption_plan_location):
raise CLIError("usage error: --plan NAME_OR_ID | --consumption-plan-location LOCATION")
site_config = SiteConfig(app_settings=[])
functionapp_def = Site(location=None, site_config=site_config, tags=tags)
client = web_client_factory(cmd.cli_ctx)
plan_info = None
if consumption_plan_location:
locations = list_consumption_locations(cmd)
location = next((l for l in locations if l['name'].lower() == consumption_plan_location.lower()), None)
if location is None:
raise CLIError("Location is invalid. Use: az functionapp list-consumption-locations")
functionapp_def.location = consumption_plan_location
functionapp_def.kind = 'functionapp'
# if os_type is None, the os type is windows
is_linux = os_type and os_type.lower() == 'linux'
else: # apps with SKU based plan
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
location = plan_info.location
is_linux = plan_info.reserved
functionapp_def.server_farm_id = plan
functionapp_def.location = location
if is_linux and not runtime and (consumption_plan_location or not deployment_container_image_name):
raise CLIError(
"usage error: --runtime RUNTIME required for linux functions apps without custom image.")
if runtime:
if is_linux and runtime not in LINUX_RUNTIMES:
raise CLIError("usage error: Currently supported runtimes (--runtime) in linux function apps are: {}."
.format(', '.join(LINUX_RUNTIMES)))
elif not is_linux and runtime not in WINDOWS_RUNTIMES:
raise CLIError("usage error: Currently supported runtimes (--runtime) in windows function apps are: {}."
.format(', '.join(WINDOWS_RUNTIMES)))
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_WORKER_RUNTIME', value=runtime))
con_string = _validate_and_get_connection_string(cmd.cli_ctx, resource_group_name, storage_account)
if is_linux:
functionapp_def.kind = 'functionapp,linux'
functionapp_def.reserved = True
if consumption_plan_location:
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~2'))
else:
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~2'))
site_config.app_settings.append(NameValuePair(name='MACHINEKEY_DecryptionKey',
value=str(hexlify(urandom(32)).decode()).upper()))
if deployment_container_image_name:
functionapp_def.kind = 'functionapp,linux,container'
site_config.app_settings.append(NameValuePair(name='DOCKER_CUSTOM_IMAGE_NAME',
value=deployment_container_image_name))
site_config.app_settings.append(NameValuePair(name='FUNCTION_APP_EDIT_MODE', value='readOnly'))
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='false'))
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
else:
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='true'))
if runtime.lower() not in RUNTIME_TO_IMAGE:
raise CLIError("An appropriate linux image for runtime:'{}' was not found".format(runtime))
site_config.linux_fx_version = _format_fx_version(RUNTIME_TO_IMAGE[runtime.lower()])
else:
functionapp_def.kind = 'functionapp'
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~2'))
# adding appsetting to site to make it a function
site_config.app_settings.append(NameValuePair(name='AzureWebJobsStorage', value=con_string))
site_config.app_settings.append(NameValuePair(name='AzureWebJobsDashboard', value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_NODE_DEFAULT_VERSION', value='10.14.1'))
# If plan is not consumption or elastic premium, we need to set always on
if consumption_plan_location is None and not is_plan_elastic_premium(plan_info):
site_config.always_on = True
# If plan is elastic premium or windows consumption, we need these app settings
is_windows_consumption = consumption_plan_location is not None and not is_linux
if is_plan_elastic_premium(plan_info) or is_windows_consumption:
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTAZUREFILECONNECTIONSTRING',
value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTSHARE', value=name.lower()))
if app_insights_key is not None:
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=app_insights_key))
elif app_insights is not None:
instrumentation_key = get_app_insights_key(cmd.cli_ctx, resource_group_name, app_insights)
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=instrumentation_key))
poller = client.web_apps.create_or_update(resource_group_name, name, functionapp_def)
functionapp = LongRunningOperation(cmd.cli_ctx)(poller)
if consumption_plan_location and is_linux:
logger.warning("Your Linux function app '%s', that uses a consumption plan has been successfully"
"created but is not active until content is published using"
"Azure Portal or the Functions Core Tools.", name)
else:
_set_remote_or_local_git(cmd, functionapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
return functionapp
def _set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None):
if deployment_source_url:
logger.warning("Linking to git repository '%s'", deployment_source_url)
try:
config_source_control(cmd, resource_group_name, name, deployment_source_url, 'git',
deployment_source_branch, manual_integration=True)
except Exception as ex: # pylint: disable=broad-except
ex = ex_handler_factory(no_throw=True)(ex)
logger.warning("Link to git repository failed due to error '%s'", ex)
if deployment_local_git:
local_git_info = enable_local_git(cmd, resource_group_name, name)
logger.warning("Local git is configured with url of '%s'", local_git_info['url'])
setattr(webapp, 'deploymentLocalGitUrl', local_git_info['url'])
def _validate_and_get_connection_string(cli_ctx, resource_group_name, storage_account):
sa_resource_group = resource_group_name
if is_valid_resource_id(storage_account):
sa_resource_group = parse_resource_id(storage_account)['resource_group']
storage_account = parse_resource_id(storage_account)['name']
storage_client = get_mgmt_service_client(cli_ctx, StorageManagementClient)
storage_properties = storage_client.storage_accounts.get_properties(sa_resource_group,
storage_account)
error_message = ''
endpoints = storage_properties.primary_endpoints
sku = storage_properties.sku.name
allowed_storage_types = ['Standard_GRS', 'Standard_LRS', 'Standard_ZRS', 'Premium_LRS']
for e in ['blob', 'queue', 'table']:
if not getattr(endpoints, e, None):
error_message = "Storage account '{}' has no '{}' endpoint. It must have table, queue, and blob endpoints all enabled".format(storage_account, e) # pylint: disable=line-too-long
if sku not in allowed_storage_types:
error_message += 'Storage type {} is not allowed'.format(sku)
if error_message:
raise CLIError(error_message)
obj = storage_client.storage_accounts.list_keys(sa_resource_group, storage_account) # pylint: disable=no-member
try:
keys = [obj.keys[0].value, obj.keys[1].value] # pylint: disable=no-member
except AttributeError:
# Older API versions have a slightly different structure
keys = [obj.key1, obj.key2] # pylint: disable=no-member
endpoint_suffix = cli_ctx.cloud.suffixes.storage_endpoint
connection_string = 'DefaultEndpointsProtocol={};EndpointSuffix={};AccountName={};AccountKey={}'.format(
"https",
endpoint_suffix,
storage_account,
keys[0]) # pylint: disable=no-member
return connection_string
def list_consumption_locations(cmd):
client = web_client_factory(cmd.cli_ctx)
regions = client.list_geo_regions(sku='Dynamic')
return [{'name': x.name.lower().replace(' ', '')} for x in regions]
def list_locations(cmd, sku, linux_workers_enabled=None):
client = web_client_factory(cmd.cli_ctx)
full_sku = get_sku_name(sku)
return client.list_geo_regions(full_sku, linux_workers_enabled)
def _check_zip_deployment_status(cmd, rg_name, name, deployment_status_url, authorization, timeout=None):
import requests
total_trials = (int(timeout) // 2) if timeout else 450
num_trials = 0
while num_trials < total_trials:
time.sleep(2)
response = requests.get(deployment_status_url, headers=authorization)
time.sleep(2)
res_dict = response.json()
num_trials = num_trials + 1
if res_dict.get('status', 0) == 3:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("""Zip deployment failed. {}. Please run the command az webapp log tail
-n {} -g {}""".format(res_dict, name, rg_name))
elif res_dict.get('status', 0) == 4:
break
if 'progress' in res_dict:
logger.info(res_dict['progress']) # show only in debug mode, customers seem to find this confusing
# if the deployment is taking longer than expected
if res_dict.get('status', 0) != 4:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("""Timeout reached by the command, however, the deployment operation
is still on-going. Navigate to your scm site to check the deployment status""")
return res_dict
def list_continuous_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_continuous_web_jobs', slot)
def start_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.start_continuous_web_job(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.start_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def stop_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.stop_continuous_web_job(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.stop_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def remove_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_continuous_web_job(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_continuous_web_job(resource_group_name, name, webjob_name)
def list_triggered_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_triggered_web_jobs', slot)
def run_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.run_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.run_triggered_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_triggered_web_job(resource_group_name, name, webjob_name)
def remove_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_triggered_web_job(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_triggered_web_job(resource_group_name, name, webjob_name)
def get_history_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_triggered_web_job_history_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.list_triggered_web_job_history(resource_group_name, name, webjob_name)
def webapp_up(cmd, name, resource_group_name=None, plan=None, # pylint: disable=too-many-statements, too-many-branches
location=None, sku=None, dryrun=False, logs=False, launch_browser=False):
import os
from azure.cli.core._profile import Profile
client = web_client_factory(cmd.cli_ctx)
# the code to deploy is expected to be the current directory the command is running from
src_dir = os.getcwd()
user = Profile().get_current_account_user()
user = user.split('@', 1)[0]
if len(user.split('#', 1)) > 1: # on cloudShell user is in format live.com#user@domain.com
user = user.split('#', 1)[1]
logger.info("UserPrefix to use '%s'", user)
# if dir is empty, show a message in dry run
do_deployment = False if os.listdir(src_dir) == [] else True
_create_new_rg = True
_create_new_asp = True
_create_new_app = True
_set_build_app_setting = False
# determine the details for app to be created from src contents
lang_details = get_lang_from_content(src_dir)
# we support E2E create and deploy for selected stacks, any other stack, set defaults for os & runtime
# and skip deployment
language = lang_details.get('language')
if not language:
do_deployment = False
sku = sku or 'F1'
os_val = OS_DEFAULT
detected_version = '-'
runtime_version = '-'
else:
# update SKU to user set value
if sku is None:
sku = lang_details.get("default_sku")
else:
logger.info("Found sku argument, skipping use default sku")
sku = sku
is_skip_build = language.lower() == STATIC_RUNTIME_NAME
os_val = "Linux" if language.lower() == NODE_RUNTIME_NAME \
or language.lower() == PYTHON_RUNTIME_NAME else OS_DEFAULT
# detect the version
data = get_runtime_version_details(lang_details.get('file_loc'), language)
version_used_create = data.get('to_create')
detected_version = data.get('detected')
runtime_version = "{}|{}".format(language, version_used_create) if \
version_used_create != "-" else version_used_create
full_sku = get_sku_name(sku)
location = set_location(cmd, sku, location)
loc_name = location.replace(" ", "").lower()
is_linux = True if os_val == 'Linux' else False
if resource_group_name is None:
logger.info('Using default ResourceGroup value')
rg_name = "{}_rg_{}_{}".format(user, os_val, loc_name)
else:
logger.info("Found user input for ResourceGroup %s", resource_group_name)
rg_name = resource_group_name
if plan is None:
logger.info('Using default appserviceplan value')
asp = "{}_asp_{}_{}_0".format(user, os_val, loc_name)
_asp_generic = asp[:-len(asp.split("_")[4])] # used to determine if a new ASP needs to be created
else:
asp = plan
_asp_generic = asp
_create_new_rg = should_create_new_rg(cmd, rg_name, is_linux)
logger.info("Should create new RG %s", _create_new_rg)
src_path = "{}".format(src_dir.replace("\\", "\\\\"))
rg_str = "{}".format(rg_name)
dry_run_str = r""" {
"name" : "%s",
"appserviceplan" : "%s",
"resourcegroup" : "%s",
"sku": "%s",
"os": "%s",
"location" : "%s",
"src_path" : "%s",
"version_detected": "%s",
"runtime_version": "%s"
}
""" % (name, asp, rg_str, full_sku, os_val, location, src_path,
detected_version, runtime_version)
create_json = json.loads(dry_run_str)
if dryrun:
logger.warning("Web app will be created with the below configuration,re-run command "
"without the --dryrun flag to create & deploy a new app")
return create_json
# create RG if the RG doesn't already exist
if _create_new_rg:
logger.warning("Creating Resource group '%s' ...", rg_name)
create_resource_group(cmd, rg_name, location)
logger.warning("Resource group creation complete")
_create_new_asp = True
else:
logger.warning("Resource group '%s' already exists.", rg_name)
# get all asp in the RG
logger.warning("Verifying if the plan with the same sku exists or should create a new plan")
data = (list(filter(lambda x: _asp_generic in x.name,
client.app_service_plans.list_by_resource_group(rg_name))))
data_sorted = (sorted(data, key=lambda x: x.name))
num_asps = len(data)
# check if any of these matches the SKU & location to be used
# and get FirstOrDefault
selected_asp = next((a for a in data if isinstance(a.sku, SkuDescription) and
a.sku.tier.lower() == full_sku.lower() and
(a.location.replace(" ", "").lower() == location.lower() or a.location == location)), None)
if selected_asp is not None:
asp = selected_asp.name
_create_new_asp = False
elif selected_asp is None and num_asps > 0:
# from the sorted data pick the last one & check if a new ASP needs to be created
# based on SKU or not
_plan_info = data_sorted[num_asps - 1]
if plan is None:
_asp_num = int(_plan_info.name.split('_')[4]) + 1
asp = "{}_asp_{}_{}_{}".format(user, os_val, loc_name, _asp_num)
else:
asp = plan
# create new ASP if an existing one cannot be used
if _create_new_asp:
logger.warning("Creating App service plan '%s' ...", asp)
create_app_service_plan(cmd, rg_name, asp, is_linux, None, sku, 1 if is_linux else None, location)
logger.warning("App service plan creation complete")
create_json['appserviceplan'] = asp
_create_new_app = True
_show_too_many_apps_warn = False
else:
logger.warning("App service plan '%s' already exists.", asp)
_show_too_many_apps_warn = get_num_apps_in_asp(cmd, rg_name, asp) > 5
_create_new_app = should_create_new_app(cmd, rg_name, name)
# create the app
if _create_new_app:
logger.warning("Creating app '%s' ...", name)
create_webapp(cmd, rg_name, name, asp, runtime_version if is_linux else None, tags={"cli": 'webapp_up'})
logger.warning("Webapp creation complete")
create_json['name'] = name
_set_build_app_setting = True
# Update appSettings for netcore apps
if language == 'dotnetcore':
update_app_settings(cmd, rg_name, name, ['ANCM_ADDITIONAL_ERROR_PAGE_LINK=' +
'https://{}.scm.azurewebsites.net/detectors'.format(name)])
# Configure default logging
_configure_default_logging(cmd, rg_name, name)
if _show_too_many_apps_warn:
logger.warning("There are sites that have been deployed to the same hosting "
"VM of this region, to prevent performance impact please "
"delete existing site(s) or switch to a different default resource group "
"using 'az configure' command")
else:
logger.warning("App '%s' already exists", name)
# for an existing app check if the runtime version needs to be updated
# Get site config to check the runtime version
site_config = client.web_apps.get_configuration(rg_name, name)
if os_val == 'Linux' and site_config.linux_fx_version != runtime_version:
logger.warning('Updating runtime version from %s to %s',
site_config.linux_fx_version, runtime_version)
update_site_configs(cmd, rg_name, name, linux_fx_version=runtime_version)
elif os_val == 'Windows' and site_config.windows_fx_version != runtime_version:
logger.warning('Updating runtime version from %s to %s',
site_config.windows_fx_version, runtime_version)
update_site_configs(cmd, rg_name, name, windows_fx_version=runtime_version)
create_json['runtime_version'] = runtime_version
if do_deployment and not is_skip_build:
_set_build_app_setting = True
# app settings causes an app recycle so we avoid if not needed
application_settings = client.web_apps.list_application_settings(rg_name, name)
_app_settings = application_settings.properties
for key, value in _app_settings.items():
if key.upper() == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
is_skip_build = value.upper() == "FALSE"
# if the value is already set just honor it
_set_build_app_setting = False
break
# update create_json to include the app_url
if _set_build_app_setting:
# setting to build after deployment
logger.warning("Updating app settings to enable build after deployment")
update_app_settings(cmd, rg_name, name, ["SCM_DO_BUILD_DURING_DEPLOYMENT=true"])
# wait for all the settings to completed
time.sleep(30)
if do_deployment:
logger.warning("Creating zip with contents of dir %s ...", src_dir)
# zip contents & deploy
zip_file_path = zip_contents_from_dir(src_dir, language)
logger.warning("Preparing to deploy %s contents to app.", '' if is_skip_build else 'and build')
enable_zip_deploy(cmd, rg_name, name, zip_file_path)
# Remove the file after deployment, handling exception if user removed the file manually
try:
os.remove(zip_file_path)
except OSError:
pass
logger.warning("All done.")
with ConfiguredDefaultSetter(cmd.cli_ctx.config, True):
cmd.cli_ctx.config.set_value('defaults', 'group', rg_name)
cmd.cli_ctx.config.set_value('defaults', 'sku', full_sku)
cmd.cli_ctx.config.set_value('defaults', 'appserviceplan', asp)
cmd.cli_ctx.config.set_value('defaults', 'location', location)
cmd.cli_ctx.config.set_value('defaults', 'web', name)
if launch_browser:
logger.warning("Launching app using default browser")
view_in_browser(cmd, rg_name, name, None, logs)
else:
_url = _get_url(cmd, rg_name, name)
logger.warning("You can launch the app at %s", _url)
create_json.update({'app_url': _url})
if logs:
_configure_default_logging(cmd, rg_name, name)
return get_streaming_log(cmd, rg_name, name)
return create_json
def _ping_scm_site(cmd, resource_group, name):
from azure.cli.core.util import should_disable_connection_verify
# wake up kudu, by making an SCM call
import requests
# work around until the timeout limits issue for linux is investigated & fixed
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group, name)
scm_url = _get_scm_url(cmd, resource_group, name)
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{}:{}'.format(user_name, password))
requests.get(scm_url + '/api/settings', headers=authorization, verify=not should_disable_connection_verify())
def is_webapp_up(tunnel_server):
return tunnel_server.is_webapp_up()
def get_tunnel(cmd, resource_group_name, name, port=None, slot=None):
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise CLIError("Only Linux App Service Plans supported, Found a Windows App Service Plan")
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
profile_user_name = next(p['userName'] for p in profiles)
profile_user_password = next(p['userPWD'] for p in profiles)
if port is None:
port = 0 # Will auto-select a free port from 1024-65535
logger.info('No port defined, creating on random free port')
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
tunnel_server = TunnelServer('', port, scm_url, profile_user_name, profile_user_password)
_ping_scm_site(cmd, resource_group_name, name)
_wait_for_webapp(tunnel_server)
return tunnel_server
def create_tunnel(cmd, resource_group_name, name, port=None, slot=None, timeout=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
logger.warning('Opening tunnel on port: %s', tunnel_server.local_port)
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
logger.warning('Tunnel is ready, connect on port %s', tunnel_server.local_port)
else:
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
logger.warning('SSH is available { username: %s, password: %s }', ssh_user_name, ssh_user_password)
logger.warning('Ctrl + C to close')
if timeout:
time.sleep(int(timeout))
else:
while t.isAlive():
time.sleep(5)
def create_tunnel_and_session(cmd, resource_group_name, name, port=None, slot=None, timeout=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
s = threading.Thread(target=_start_ssh_session,
args=('localhost', tunnel_server.get_port(), ssh_user_name, ssh_user_password))
s.daemon = True
s.start()
if timeout:
time.sleep(int(timeout))
else:
while s.isAlive() and t.isAlive():
time.sleep(5)
def _wait_for_webapp(tunnel_server):
tries = 0
while True:
if is_webapp_up(tunnel_server):
break
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError("Timeout Error, Unable to establish a connection")
tries = tries + 1
logger.warning('.')
time.sleep(1)
def _start_tunnel(tunnel_server):
tunnel_server.start_server()
def _start_ssh_session(hostname, port, username, password):
tries = 0
while True:
try:
c = Connection(host=hostname,
port=port,
user=username,
# connect_timeout=60*10,
connect_kwargs={"password": password})
break
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError("Timeout Error, Unable to establish a connection")
tries = tries + 1
logger.warning('.')
time.sleep(1)
try:
c.run('cat /etc/motd', pty=True)
c.run('source /etc/profile; exec $SHELL -l', pty=True)
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
finally:
c.close()
def ssh_webapp(cmd, resource_group_name, name, port=None, slot=None, timeout=None): # pylint: disable=too-many-statements
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
raise CLIError('remote debugging is enabled, please disable')
import platform
if platform.system() == "Windows":
raise CLIError('webapp ssh is only supported on linux and mac')
else:
create_tunnel_and_session(cmd, resource_group_name, name, port=port, slot=slot, timeout=timeout)
def create_devops_build(
cmd,
functionapp_name=None,
organization_name=None,
project_name=None,
repository_name=None,
overwrite_yaml=None,
allow_force_push=None,
github_pat=None,
github_repository=None
):
from .azure_devops_build_interactive import AzureDevopsBuildInteractive
azure_devops_build_interactive = AzureDevopsBuildInteractive(cmd, logger, functionapp_name,
organization_name, project_name, repository_name,
overwrite_yaml, allow_force_push,
github_pat, github_repository)
return azure_devops_build_interactive.interactive_azure_devops_build()
def _configure_default_logging(cmd, rg_name, name):
logger.warning("Configuring default logging for the app, if not already enabled")
return config_diagnostics(cmd, rg_name, name,
application_logging=True, web_server_logging='filesystem',
docker_container_logging='true')
|
trex_subscriber.py | #!/router/bin/python
import json
import threading
import time
import datetime
import zmq
import re
import random
import os
import signal
import traceback
import sys
from .trex_types import RC_OK, RC_ERR
#from .trex_stats import *
from ..utils.text_opts import format_num
from ..utils.zipmsg import ZippedMsg
# basic async stats class
class CTRexAsyncStats(object):
def __init__ (self):
self.ref_point = None
self.current = {}
self.last_update_ts = datetime.datetime.now()
def update (self, snapshot):
#update
self.last_update_ts = datetime.datetime.now()
self.current = snapshot
if self.ref_point == None:
self.ref_point = self.current
def clear(self):
self.ref_point = self.current
def get(self, field, format=False, suffix=""):
if field not in self.current:
return "N/A"
if not format:
return self.current[field]
else:
return format_num(self.current[field], suffix)
def get_rel (self, field, format=False, suffix=""):
if field not in self.current:
return "N/A"
if not format:
return (self.current[field] - self.ref_point[field])
else:
return format_num(self.current[field] - self.ref_point[field], suffix)
# return true if new data has arrived in the past 2 seconds
def is_online (self):
delta_ms = (datetime.datetime.now() - self.last_update_ts).total_seconds() * 1000
return (delta_ms < 2000)
# describes the general stats provided by TRex
class CTRexAsyncStatsGeneral(CTRexAsyncStats):
def __init__ (self):
super(CTRexAsyncStatsGeneral, self).__init__()
# per port stats
class CTRexAsyncStatsPort(CTRexAsyncStats):
def __init__ (self):
super(CTRexAsyncStatsPort, self).__init__()
def get_stream_stats (self, stream_id):
return None
# stats manager
class CTRexAsyncStatsManager():
def __init__ (self):
self.general_stats = CTRexAsyncStatsGeneral()
self.port_stats = {}
def get_general_stats(self):
return self.general_stats
def get_port_stats (self, port_id):
if str(port_id) not in self.port_stats:
return None
return self.port_stats[str(port_id)]
def update(self, data):
self.__handle_snapshot(data)
def __handle_snapshot(self, snapshot):
general_stats = {}
port_stats = {}
# filter the values per port and general
for key, value in snapshot.items():
# match a pattern of ports
m = re.search('(.*)\-([0-8])', key)
if m:
port_id = m.group(2)
field_name = m.group(1)
if port_id not in port_stats:
port_stats[port_id] = {}
port_stats[port_id][field_name] = value
else:
# no port match - general stats
general_stats[key] = value
# update the general object with the snapshot
self.general_stats.update(general_stats)
# update all ports
for port_id, data in port_stats.items():
if port_id not in self.port_stats:
self.port_stats[port_id] = CTRexAsyncStatsPort()
self.port_stats[port_id].update(data)
class ServerEventsIDs(object):
"""
server event IDs
(in sync with the server IDs)
"""
EVENT_PORT_STARTED = 0
EVENT_PORT_STOPPED = 1
EVENT_PORT_PAUSED = 2
EVENT_PORT_RESUMED = 3
EVENT_PORT_JOB_DONE = 4
EVENT_PORT_ACQUIRED = 5
EVENT_PORT_RELEASED = 6
EVENT_PORT_ERROR = 7
EVENT_PORT_ATTR_CHG = 8
EVENT_PROFILE_STARTED = 10
EVENT_PROFILE_STOPPED = 11
EVENT_PROFILE_PAUSED = 12
EVENT_PROFILE_RESUMED = 13
EVENT_PROFILE_FINISHED_TX = 14
EVENT_PROFILE_ERROR = 17
EVENT_ASTF_STATE_CHG = 50
EVENT_ASTF_PROFILE_STATE_CHG = 60
EVENT_ASTF_PROFILE_CLEARED = 61
EVENT_SERVER_STOPPED = 100
class TRexSubscriber():
THREAD_STATE_ACTIVE = 1
THREAD_STATE_ZOMBIE = 2
THREAD_STATE_DEAD = 3
def __init__ (self, ctx, rpc):
self.ctx = ctx
self.port = ctx.async_port
self.server = ctx.server
self.rpc = rpc
self.event_handler = ctx.event_handler
self.raw_snapshot = {}
self.stats = CTRexAsyncStatsManager()
self.last_data_recv_ts = 0
self.async_barrier = None
self.monitor = AsyncUtil()
self.connected = False
self.zipped = ZippedMsg()
self.t_state = self.THREAD_STATE_DEAD
self.timeout_sec = 5
if self.ctx.async_timeout:
self.timeout_sec = max(self.ctx.async_timeout,self.ctx.sync_timeout)
# connects the async channel
def connect (self):
if self.connected:
self.disconnect()
self.tr = "tcp://{0}:{1}".format(self.server, self.port)
# Socket to talk to server
self.context = zmq.Context()
self.socket = self.context.socket(zmq.SUB)
self.socket.setsockopt(zmq.HEARTBEAT_IVL, 5000)
self.socket.setsockopt(zmq.HEARTBEAT_TIMEOUT, 60000)
self.socket.setsockopt(zmq.RECONNECT_IVL, 20)
self.socket.setsockopt(zmq.RECONNECT_IVL_MAX, 500)
# before running the thread - mark as active
self.t_state = self.THREAD_STATE_ACTIVE
self.t = threading.Thread(target = self._run_safe)
# kill this thread on exit and don't add it to the join list
self.t.setDaemon(True)
self.t.start()
self.connected = True
# first barrier - make sure async thread is up
rc = self.barrier()
if not rc:
self.disconnect()
return rc
return RC_OK()
# disconnect
def disconnect (self):
if not self.connected:
return
# mark for join
self.t_state = self.THREAD_STATE_DEAD
self.context.term()
self.t.join()
# done
self.connected = False
# set the thread as a zombie (in case of server death)
def set_as_zombie (self):
self.last_data_recv_ts = None
self.t_state = self.THREAD_STATE_ZOMBIE
# return the timeout in seconds for the ZMQ subscriber thread
def get_timeout_sec(self):
return self.timeout_sec
def get_timeout_msec(self):
return int(self.get_timeout_sec() * 1000)
def set_timeout_sec(self, timeout_sec):
self.timeout_sec = timeout_sec
def _run_safe (self):
# socket must be created on the same thread
self.socket.setsockopt(zmq.SUBSCRIBE, b'')
self.socket.setsockopt(zmq.RCVTIMEO, self.get_timeout_msec())
self.socket.connect(self.tr)
try:
self._run()
except Exception as e:
self.ctx.event_handler.on_event("subscriber crashed", e)
finally:
# closing of socket must be from the same thread
self.socket.close(linger = 0)
# thread function
def _run (self):
got_data = False
self.monitor.reset()
while self.t_state != self.THREAD_STATE_DEAD:
try:
with self.monitor:
line = self.socket.recv()
# last data recv.
self.last_data_recv_ts = time.time()
# if thread was marked as zombie - it does nothing besides fetching messages
if self.t_state == self.THREAD_STATE_ZOMBIE:
continue
self.monitor.on_recv_msg(line)
# try to decomrpess
unzipped = self.zipped.decompress(line)
if unzipped:
line = unzipped
line = line.decode()
# signal once
if not got_data:
self.ctx.event_handler.on_event("subscriber resumed")
got_data = True
# got a timeout - mark as not alive and retry
except zmq.Again:
# signal once
if got_data:
self.ctx.event_handler.on_event("subscriber timeout", self.get_timeout_sec())
got_data = False
continue
except zmq.ContextTerminated:
# outside thread signaled us to exit
assert(self.t_state != self.THREAD_STATE_ACTIVE)
break
msg = json.loads(line)
name = msg['name']
data = msg['data']
msg_type = msg['type']
baseline = msg.get('baseline', False)
self.raw_snapshot[name] = data
self.__dispatch(name, msg_type, data, baseline)
def get_stats (self):
return self.stats
def get_raw_snapshot (self):
return self.raw_snapshot
# dispatch the message to the right place
def __dispatch (self, name, type, data, baseline):
# stats
if name == "trex-global":
self.handle_global_stats_update(data, baseline)
elif name == "flow_stats":
self.handle_flow_stats_update(data, baseline)
elif name == "latency_stats":
self.handle_latency_stats_update(data, baseline)
# events
elif name == "trex-event":
self.handle_event(type, data)
# barriers
elif name == "trex-barrier":
self.handle_async_barrier(type, data)
else:
pass
def handle_global_stats_update (self, data, baseline):
self.ctx.event_handler.on_event("global stats update", data, baseline)
def handle_flow_stats_update (self, data, baseline):
self.ctx.event_handler.on_event("flow stats update", data, baseline)
def handle_flow_stats_update (self, data, baseline):
self.ctx.event_handler.on_event("latency stats update", data, baseline)
def handle_event (self, event_id, data):
if event_id == ServerEventsIDs.EVENT_PORT_STARTED:
port_id = int(data['port_id'])
self.ctx.event_handler.on_event("port started", port_id)
# port stopped
elif event_id == ServerEventsIDs.EVENT_PORT_STOPPED:
port_id = int(data['port_id'])
self.ctx.event_handler.on_event("port stopped", port_id)
# port paused
elif event_id == ServerEventsIDs.EVENT_PORT_PAUSED:
port_id = int(data['port_id'])
self.ctx.event_handler.on_event("port paused", port_id)
# port resumed
elif event_id == ServerEventsIDs.EVENT_PORT_RESUMED:
port_id = int(data['port_id'])
self.ctx.event_handler.on_event("port resumed", port_id)
# port finished traffic
elif event_id == ServerEventsIDs.EVENT_PORT_JOB_DONE:
port_id = int(data['port_id'])
self.ctx.event_handler.on_event("port job done", port_id)
# port was acquired - maybe stolen...
elif event_id == ServerEventsIDs.EVENT_PORT_ACQUIRED:
session_id = data['session_id']
port_id = int(data['port_id'])
who = data['who']
force = data['force']
self.ctx.event_handler.on_event("port acquired", port_id, who, session_id, force)
# port was released
elif event_id == ServerEventsIDs.EVENT_PORT_RELEASED:
port_id = int(data['port_id'])
who = data['who']
session_id = data['session_id']
self.ctx.event_handler.on_event("port released", port_id, who, session_id)
# port error
elif event_id == ServerEventsIDs.EVENT_PORT_ERROR:
port_id = int(data['port_id'])
self.ctx.event_handler.on_event("port error", port_id)
# port attr changed
elif event_id == ServerEventsIDs.EVENT_PORT_ATTR_CHG:
port_id = int(data['port_id'])
attr = data['attr']
self.ctx.event_handler.on_event("port attr chg", port_id, attr)
# profile started
elif event_id == ServerEventsIDs.EVENT_PROFILE_STARTED:
port_id = int(data['port_id'])
profile_id = str(data['profile_id'])
self.ctx.event_handler.on_event("profile started", port_id, profile_id)
# profile stopeed
elif event_id == ServerEventsIDs.EVENT_PROFILE_STOPPED:
port_id = int(data['port_id'])
profile_id = str(data['profile_id'])
self.ctx.event_handler.on_event("profile stopped", port_id, profile_id)
# profile paused
elif event_id == ServerEventsIDs.EVENT_PROFILE_PAUSED:
port_id = int(data['port_id'])
profile_id = str(data['profile_id'])
self.ctx.event_handler.on_event("profile paused", port_id, profile_id)
# profile resumed
elif event_id == ServerEventsIDs.EVENT_PROFILE_RESUMED:
port_id = int(data['port_id'])
profile_id = str(data['profile_id'])
self.ctx.event_handler.on_event("profile resumed", port_id, profile_id)
# profile finised tx
elif event_id == ServerEventsIDs.EVENT_PROFILE_FINISHED_TX:
port_id = int(data['port_id'])
profile_id = str(data['profile_id'])
self.ctx.event_handler.on_event("profile finished tx", port_id, profile_id)
# profile error
elif event_id == ServerEventsIDs.EVENT_PROFILE_ERROR:
port_id = int(data['port_id'])
profile_id = str(data['profile_id'])
self.ctx.event_handler.on_event("profile error", port_id, profile_id)
# ASTF state changed
elif event_id == ServerEventsIDs.EVENT_ASTF_STATE_CHG:
state = data['state']
error = data.get('error', '')
epoch = data.get('epoch')
self.ctx.event_handler.on_event('astf state changed', state, error, epoch)
# ASTF profile state changed
elif event_id == ServerEventsIDs.EVENT_ASTF_PROFILE_STATE_CHG:
profile_id = data['profile_id']
state = data['state']
error = data.get('error', '')
epoch = data.get('epoch')
self.ctx.event_handler.on_event('astf profile state changed', profile_id, state, error, epoch)
# ASTF profile state changed
elif event_id == ServerEventsIDs.EVENT_ASTF_PROFILE_CLEARED:
profile_id = data['profile_id']
error = data.get('error', '')
epoch = data.get('epoch')
self.ctx.event_handler.on_event('astf profile cleared', profile_id, error, epoch)
# server stopped
elif event_id == ServerEventsIDs.EVENT_SERVER_STOPPED:
cause = data['cause']
self.ctx.event_handler.on_event("server stopped", cause)
# unhandled
else:
print('Unhandled event %d' % event_id)
# async barrier handling routine
def handle_async_barrier (self, type, data):
if self.async_barrier['key'] == type:
self.async_barrier['ack'] = True
# block on barrier for async channel
def barrier(self, timeout = 5, baseline = False):
# set a random key
key = random.getrandbits(32)
self.async_barrier = {'key': key, 'ack': False}
# expr time
expr = time.time() + timeout
while not self.async_barrier['ack']:
# inject
rc = self.rpc.transmit("publish_now", params = {'key' : key, 'baseline': baseline})
if not rc:
return rc
# fast loop
for i in range(0, 100):
if self.async_barrier['ack']:
break
time.sleep(0.001)
if time.time() > expr:
return RC_ERR("*** [subscriber] - timeout - no data flow from server at : " + self.tr)
return RC_OK()
# a class to measure util. of async subscriber thread
class AsyncUtil(object):
STATE_SLEEP = 1
STATE_AWAKE = 2
def __init__ (self):
self.reset()
def reset (self):
self.state = self.STATE_AWAKE
self.clock = time.time()
# reset the current interval
self.interval = {'ts': time.time(), 'total_sleep': 0, 'total_bits': 0}
# global counters
self.cpu_util = 0
self.bps = 0
def on_recv_msg (self, message):
self.interval['total_bits'] += len(message) * 8.0
self._tick()
def __enter__ (self):
assert(self.state == self.STATE_AWAKE)
self.state = self.STATE_SLEEP
self.sleep_start_ts = time.time()
def __exit__(self, exc_type, exc_val, exc_tb):
assert(self.state == self.STATE_SLEEP)
self.state = self.STATE_AWAKE
# measure total sleep time for interval
self.interval['total_sleep'] += time.time() - self.sleep_start_ts
self._tick()
def _tick (self):
# how much time did the current interval lasted
ts = time.time() - self.interval['ts']
if ts < 1:
return
# if tick is in the middle of sleep - add the interval and reset
if self.state == self.STATE_SLEEP:
self.interval['total_sleep'] += time.time() - self.sleep_start_ts
self.sleep_start_ts = time.time()
# add the interval
if self.interval['total_sleep'] > 0:
# calculate
self.cpu_util = self.cpu_util * 0.75 + (float(ts - self.interval['total_sleep']) / ts) * 0.25
self.interval['total_sleep'] = 0
if self.interval['total_bits'] > 0:
# calculate
self.bps = self.bps * 0.75 + ( self.interval['total_bits'] / ts ) * 0.25
self.interval['total_bits'] = 0
# reset the interval's clock
self.interval['ts'] = time.time()
def get_cpu_util (self):
self._tick()
return (self.cpu_util * 100)
def get_bps (self):
self._tick()
return (self.bps)
|
PC_Miner.py | #!/usr/bin/env python3
"""
Duino-Coin Official PC Miner 2.73 © MIT licensed
https://duinocoin.com
https://github.com/revoxhere/duino-coin
Duino-Coin Team & Community 2019-2021
"""
from time import time, sleep, strptime, ctime
from hashlib import sha1
from socket import socket
from multiprocessing import Lock as thread_lock
from multiprocessing import cpu_count, current_process
from multiprocessing import Process, Manager
from threading import Thread
from datetime import datetime
from random import randint
from os import execl, mkdir, _exit
from subprocess import DEVNULL, Popen, check_call
import pip
import sys
import os
import json
import requests
from pathlib import Path
from re import sub
from random import choice
from platform import machine as osprocessor
from signal import SIGINT, signal
from locale import LC_ALL, getdefaultlocale, getlocale, setlocale
from configparser import ConfigParser
configparser = ConfigParser()
def handler(signal_received, frame):
"""
Nicely handle CTRL+C exit
"""
if current_process().name == "MainProcess":
pretty_print(
get_string("sigint_detected")
+ Style.NORMAL
+ Fore.RESET
+ get_string("goodbye"),
"warning")
_exit(0)
def install(package):
"""
Automatically installs python pip package and restarts the program
"""
try:
pip.main(["install", package])
except AttributeError:
check_call([sys.executable, '-m', 'pip', 'install', package])
execl(sys.executable, sys.executable, *sys.argv)
try:
from xxhash import xxh64
xxhash_en = True
except ModuleNotFoundError:
print("Xxhash is not installed - this mining algorithm will be disabled")
xxhash_en = False
try:
from colorama import Back, Fore, Style, init
init(autoreset=True)
except ModuleNotFoundError:
print("Colorama is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install colorama")
install("colorama")
try:
import cpuinfo
except ModuleNotFoundError:
print("Cpuinfo is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install py-cpuinfo")
install("py-cpuinfo")
try:
from pypresence import Presence
except ModuleNotFoundError:
print("Pypresence is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install pypresence")
install("pypresence")
class Settings:
"""
Class containing default miner and server settings
"""
ENCODING = "UTF8"
SEPARATOR = ","
VER = 2.73
DATA_DIR = "Duino-Coin PC Miner " + str(VER)
TRANSLATIONS = ("https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duino-coin/master/Resources/"
+ "PC_Miner_langs.json")
TRANSLATIONS_FILE = "/Translations.json"
SETTINGS_FILE = "/Settings.cfg"
SOC_TIMEOUT = 15
REPORT_TIME = 50
DONATE_LVL = 0
BLOCK = " ‖ "
PICK = ""
COG = " @"
if os.name != "nt":
# Windows' cmd does not support emojis, shame!
PICK = " ⛏"
COG = " ⚙"
class Algorithms:
"""
Class containing algorithms used by the miner
For more info about the implementation refer to the Duino whitepaper:
https://github.com/revoxhere/duino-coin/blob/gh-pages/assets/whitepaper.pdf
"""
def DUCOS1(last_h: str, exp_h: str, diff: int, eff: int):
time_start = time()
base_hash = sha1(last_h.encode('ascii'))
for nonce in range(100 * diff + 1):
temp_h = base_hash.copy()
temp_h.update(str(nonce).encode('ascii'))
d_res = temp_h.hexdigest()
if d_res == exp_h:
time_elapsed = time() - time_start
hashrate = nonce / time_elapsed
return [nonce, hashrate]
return [0, 0]
def XXHASH(last_h: str, exp_h: str, diff: int, eff: int):
time_start = time()
for nonce in range(100 * diff + 1):
d_res = xxh64(last_h + str(nonce),
seed=2811).hexdigest()
if d_res == exp_h:
time_elapsed = time() - time_start
hashrate = nonce / time_elapsed
return [nonce, hashrate]
return [0, 0]
class Client:
"""
Class helping to organize socket connections
"""
def connect(pool: tuple):
global s
s = socket()
s.settimeout(Settings.SOC_TIMEOUT)
s.connect((pool))
def send(msg: str):
sent = s.sendall(str(msg).encode(Settings.ENCODING))
return True
def recv(limit: int = 128):
data = s.recv(limit).decode(Settings.ENCODING).rstrip("\n")
return data
def fetch_pool():
"""
Fetches best pool from the /getPool API endpoint
"""
while True:
pretty_print(" " + get_string("connection_search"),
"warning", "net0")
try:
response = requests.get(
"https://server.duinocoin.com/getPool").json()
if response["success"] == True:
NODE_ADDRESS = response["ip"]
NODE_PORT = response["port"]
return (NODE_ADDRESS, NODE_PORT)
elif "message" in response:
pretty_print(f"Warning: {response['message']}"
+ ", retrying in 15s", "warning", "net0")
sleep(10)
else:
raise Exception(
"no response - IP ban or connection error")
except Exception as e:
pretty_print(f"Error fetching mining node: {e}"
+ ", retrying in 15s", "error", "net0")
sleep(15)
class Donate:
def load(donation_level):
if donation_level > 0:
if os.name == 'nt':
if not Path(
f"{Settings.DATA_DIR}/Donate.exe").is_file():
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableWindows.exe')
r = requests.get(url)
with open(f"{Settings.DATA_DIR}/Donate.exe",
'wb') as f:
f.write(r.content)
elif os.name == "posix":
if osprocessor() == "aarch64":
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableAARCH64')
elif osprocessor() == "armv7l":
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableAARCH32')
else:
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableLinux')
if not Path(
f"{Settings.DATA_DIR}/Donate").is_file():
r = requests.get(url)
with open(f"{Settings.DATA_DIR}/Donate",
"wb") as f:
f.write(r.content)
def start(donation_level):
if os.name == 'nt':
cmd = (f'cd "{Settings.DATA_DIR}" & Donate.exe '
+ '-o stratum+tcp://xmg.minerclaim.net:3333 '
+ f'-u revox.donate -p x -s 4 -e {donation_level*10}')
elif os.name == 'posix':
cmd = (f'cd "{Settings.DATA_DIR}" && chmod +x Donate '
+ '&& nice -20 ./Donate -o '
+ 'stratum+tcp://xmg.minerclaim.net:3333 '
+ f'-u revox.donate -p x -s 4 -e {donation_level*10}')
if donation_level <= 0:
pretty_print(
Fore.YELLOW + get_string('free_network_warning').lstrip()
+ get_string('donate_warning').replace("\n", "\n\t\t")
+ Fore.GREEN + 'https://duinocoin.com/donate'
+ Fore.YELLOW + get_string('learn_more_donate'),
'warning', 'sys0')
sleep(5)
if donation_level > 0:
donateExecutable = Popen(cmd, shell=True, stderr=DEVNULL)
pretty_print(get_string('thanks_donation').replace("\n", "\n\t\t"),
'error', 'sys0')
def get_prefix(symbol: str,
val: float,
accuracy: int):
"""
H/s, 1000 => 1 kH/s
"""
if val >= 1_000_000_000_000: # Really?
val = str(round((val / 1_000_000_000_000), accuracy)) + " T"
elif val >= 1_000_000_000:
val = str(round((val / 1_000_000_000), accuracy)) + " G"
elif val >= 1_000_000:
val = str(round((val / 1_000_000), accuracy)) + " M"
elif val >= 1_000:
val = str(round((val / 1_000))) + " k"
else:
val = str(round(val)) + " "
return val + symbol
def periodic_report(start_time, end_time,
shares, hashrate, uptime):
"""
Displays nicely formated uptime stats
"""
seconds = round(end_time - start_time)
pretty_print(get_string("periodic_mining_report")
+ Fore.RESET + Style.NORMAL
+ get_string("report_period")
+ str(seconds) + get_string("report_time")
+ get_string("report_body1")
+ str(shares) + get_string("report_body2")
+ str(round(shares/seconds, 1))
+ get_string("report_body3")
+ get_string("report_body4")
+ str(get_prefix("H/s", hashrate, 2))
+ get_string("report_body5")
+ str(int(hashrate*seconds))
+ get_string("report_body6")
+ get_string("total_mining_time")
+ str(uptime), "success")
def calculate_uptime(start_time):
"""
Returns seconds, minutes or hours passed since timestamp
"""
uptime = time() - start_time
if uptime <= 59:
return str(round(uptime)) + get_string("uptime_seconds")
elif uptime == 60:
return str(round(uptime // 60)) + get_string("uptime_minute")
elif uptime >= 60:
return str(round(uptime // 60)) + get_string("uptime_minutes")
elif uptime == 3600:
return str(round(uptime // 3600)) + get_string("uptime_hour")
elif uptime >= 3600:
return str(round(uptime // 3600)) + get_string("uptime_hours")
def pretty_print(msg: str = None,
state: str = "success",
sender: str = "sys0"):
"""
Produces nicely formatted CLI output for messages:
HH:MM:S |sender| msg
"""
if sender.startswith("net"):
bg_color = Back.BLUE
elif sender.startswith("cpu"):
bg_color = Back.YELLOW
elif sender.startswith("sys"):
bg_color = Back.GREEN
if state == "success":
fg_color = Fore.GREEN
elif state == "error":
fg_color = Fore.RED
else:
fg_color = Fore.YELLOW
with thread_lock():
print(Fore.WHITE + datetime.now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT + bg_color + " " + sender + " "
+ Back.RESET + " " + fg_color + msg.strip())
def share_print(id, type,
accept, reject,
hashrate, total_hashrate,
computetime, diff, ping,
back_color):
"""
Produces nicely formatted CLI output for shares:
HH:MM:S |cpuN| ⛏ Accepted 0/0 (100%) ∙ 0.0s ∙ 0 kH/s ⚙ diff 0 k ∙ ping 0ms
"""
total_hashrate = get_prefix("H/s", total_hashrate, 2)
diff = get_prefix("", int(diff), 0)
if type == "accept":
share_str = get_string("accepted")
fg_color = Fore.GREEN
elif type == "block":
share_str = get_string("block_found")
fg_color = Fore.YELLOW
else:
share_str = get_string("rejected")
fg_color = Fore.RED
with thread_lock():
print(Fore.WHITE + datetime.now().strftime(Style.DIM + "%H:%M:%S ")
+ Fore.WHITE + Style.BRIGHT + back_color + Fore.RESET
+ " cpu" + str(id) + " " + Back.RESET
+ fg_color + Settings.PICK + share_str + Fore.RESET
+ str(accept) + "/" + str(accept + reject) + Fore.YELLOW
+ " (" + str(round(accept / (accept + reject) * 100)) + "%)"
+ Style.NORMAL + Fore.RESET
+ " ∙ " + str("%04.1f" % float(computetime)) + "s"
+ Style.NORMAL + " ∙ " + Fore.BLUE + Style.BRIGHT
+ str(total_hashrate) + Fore.RESET + Style.NORMAL
+ Settings.COG + f" diff {diff} ∙ " + Fore.CYAN
+ f"ping {(int(ping))}ms")
def get_string(string_name):
"""
Gets a string from the language file
"""
if string_name in lang_file[lang]:
return lang_file[lang][string_name]
elif string_name in lang_file["english"]:
return lang_file["english"][string_name]
else:
return "String not found: " + string_name
class Miner:
def greeting():
diff_str = get_string("net_diff_short")
if user_settings["start_diff"] == "LOW":
diff_str = get_string("low_diff_short")
elif user_settings["start_diff"] == "MEDIUM":
diff_str = get_string("medium_diff_short")
current_hour = strptime(ctime(time())).tm_hour
greeting = get_string("greeting_back")
if current_hour < 12:
greeting = get_string("greeting_morning")
elif current_hour == 12:
greeting = get_string("greeting_noon")
elif current_hour > 12 and current_hour < 18:
greeting = get_string("greeting_afternoon")
elif current_hour >= 18:
greeting = get_string("greeting_evening")
print("\n" + Style.DIM + Fore.YELLOW + Settings.BLOCK + Fore.YELLOW
+ Style.BRIGHT + get_string("banner") + Style.RESET_ALL
+ Fore.MAGENTA + " (" + str(Settings.VER) + ") "
+ Fore.RESET + "2019-2021")
print(Style.DIM + Fore.YELLOW + Settings.BLOCK + Style.NORMAL
+ Fore.YELLOW + "https://github.com/revoxhere/duino-coin")
if lang != "english":
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + lang.capitalize()
+ " translation: " + Fore.YELLOW
+ get_string("translation_autor"))
try:
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + "CPU: " + Style.BRIGHT
+ Fore.YELLOW + str(user_settings["threads"])
+ "x " + str(cpu["brand_raw"]))
except:
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + "CPU: " + Style.BRIGHT
+ Fore.YELLOW + str(user_settings["threads"])
+ "x threads")
if os.name == "nt" or os.name == "posix":
print(Style.DIM + Fore.YELLOW
+ Settings.BLOCK + Style.NORMAL + Fore.RESET
+ get_string("donation_level") + Style.BRIGHT
+ Fore.YELLOW + str(user_settings["donate"]))
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + get_string("algorithm")
+ Style.BRIGHT + Fore.YELLOW + user_settings["algorithm"]
+ Settings.COG + " " + diff_str)
if user_settings["identifier"] != "None":
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + get_string("rig_identifier")
+ Style.BRIGHT + Fore.YELLOW + user_settings["identifier"])
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + str(greeting)
+ ", " + Style.BRIGHT + Fore.YELLOW
+ str(user_settings["username"]) + "!\n")
def preload():
"""
Creates needed directories and files for the miner
"""
global lang_file
global lang
if not Path(Settings.DATA_DIR).is_dir():
mkdir(Settings.DATA_DIR)
if not Path(Settings.DATA_DIR + Settings.TRANSLATIONS_FILE).is_file():
with open(Settings.DATA_DIR + Settings.TRANSLATIONS_FILE,
"wb") as f:
f.write(requests.get(Settings.TRANSLATIONS).content)
with open(Settings.DATA_DIR + Settings.TRANSLATIONS_FILE, "r",
encoding=Settings.ENCODING) as file:
lang_file = json.load(file)
try:
if not Path(Settings.DATA_DIR + Settings.SETTINGS_FILE).is_file():
locale = getdefaultlocale()[0]
if locale.startswith("es"):
lang = "spanish"
elif locale.startswith("pl"):
lang = "polish"
elif locale.startswith("fr"):
lang = "french"
elif locale.startswith("mt"):
lang = "maltese"
elif locale.startswith("ru"):
lang = "russian"
elif locale.startswith("de"):
lang = "german"
elif locale.startswith("tr"):
lang = "turkish"
elif locale.startswith("pr"):
lang = "portugese"
elif locale.startswith("it"):
lang = "italian"
elif locale.startswith("zh"):
lang = "chinese_simplified"
elif locale.startswith("th"):
lang = "thai"
else:
lang = "english"
else:
try:
configparser.read(Settings.DATA_DIR
+ Settings.SETTINGS_FILE)
lang = configparser["PC Miner"]["language"]
except Exception:
lang = "english"
except Exception as e:
print("Error with lang file, falling back to english: " + str(e))
lang = "english"
def load_cfg():
"""
Loads miner settings file or starts the config tool
"""
if not Path(Settings.DATA_DIR + Settings.SETTINGS_FILE).is_file():
print(get_string("basic_config_tool")
+ Settings.DATA_DIR
+ get_string("edit_config_file_warning")
+ "\n"
+ get_string("dont_have_account")
+ Fore.YELLOW
+ get_string("wallet")
+ Fore.RESET
+ get_string("register_warning"))
username = input(get_string("ask_username") + Style.BRIGHT)
if not username:
username = choice(["revox", "Bilaboz", "JoyBed", "Connor2"])
algorithm = "DUCO-S1"
if xxhash_en:
print(Style.BRIGHT
+ "1" + Style.NORMAL + " - DUCO-S1 ("
+ get_string("recommended")
+ ")\n" + Style.BRIGHT
+ "2" + Style.NORMAL + " - XXHASH")
prompt = sub(r"\D", "",
input(get_string("ask_algorithm")
+ Style.BRIGHT))
if prompt == "2":
algorithm = "XXHASH"
intensity = 100 # None
##
# intensity = sub(r"\D", "",
# input(Style.NORMAL
## + get_string("ask_intensity")
# + Style.BRIGHT))
# if not intensity:
## intensity = 95
# elif float(intensity) > 100:
## intensity = 100
# elif float(intensity) < 1:
## intensity = 1
threads = sub(r"\D", "",
input(Style.NORMAL + get_string("ask_threads")
+ str(cpu_count()) + "): " + Style.BRIGHT))
if not threads:
threads = cpu_count()
if int(threads) > 8:
threads = 8
pretty_print(
Style.BRIGHT
+ get_string("max_threads_notice"))
elif int(threads) < 1:
threads = 1
print(Style.BRIGHT
+ "1" + Style.NORMAL + " - " + get_string("low_diff")
+ "\n" + Style.BRIGHT
+ "2" + Style.NORMAL + " - " + get_string("medium_diff")
+ "\n" + Style.BRIGHT
+ "3" + Style.NORMAL + " - " + get_string("net_diff"))
start_diff = sub(r"\D", "",
input(Style.NORMAL + get_string("ask_difficulty")
+ Style.BRIGHT))
if start_diff == "1":
start_diff = "LOW"
elif start_diff == "3":
start_diff = "NET"
else:
start_diff = "MEDIUM"
rig_id = input(Style.NORMAL + get_string("ask_rig_identifier")
+ Style.BRIGHT)
if rig_id.lower() == "y":
rig_id = str(input(Style.NORMAL + get_string("ask_rig_name")
+ Style.BRIGHT))
else:
rig_id = "None"
donation_level = '0'
if os.name == 'nt' or os.name == 'posix':
donation_level = input(Style.NORMAL
+ get_string('ask_donation_level')
+ Style.BRIGHT)
donation_level = sub(r'\D', '', donation_level)
if donation_level == '':
donation_level = 1
if float(donation_level) > int(5):
donation_level = 5
if float(donation_level) < int(0):
donation_level = 0
configparser["PC Miner"] = {
"username": username,
"intensity": intensity,
"threads": threads,
"start_diff": start_diff,
"donate": int(donation_level),
"identifier": rig_id,
"algorithm": algorithm,
"language": lang,
"soc_timeout": Settings.SOC_TIMEOUT,
"report_sec": Settings.REPORT_TIME,
"discord_rp": "y"}
with open(Settings.DATA_DIR + Settings.SETTINGS_FILE,
"w") as configfile:
configparser.write(configfile)
print(Style.RESET_ALL + get_string("config_saved"))
configparser.read(Settings.DATA_DIR
+ Settings.SETTINGS_FILE)
return configparser["PC Miner"]
def m_connect(id, pool):
retry_count = 0
while True:
try:
if retry_count > 3:
pool = Client.fetch_pool()
retry_count = 0
socket_connection = Client.connect(pool)
POOL_VER = Client.recv(5)
if id == 0:
Client.send("MOTD")
motd = Client.recv(512).replace("\n", "\n\t\t")
pretty_print("MOTD: " + Fore.RESET + Style.NORMAL
+ str(motd), "success", "net" + str(id))
if float(POOL_VER) <= Settings.VER:
pretty_print(get_string("connected") + Fore.RESET
+ Style.NORMAL +
get_string("connected_server")
+ str(POOL_VER) + ", " + pool[0] + ":"
+ str(pool[1]) + ")", "success",
"net" + str(id))
else:
pretty_print(get_string("outdated_miner")
+ str(Settings.VER) + ") -"
+ get_string("server_is_on_version")
+ str(POOL_VER) + Style.NORMAL
+ Fore.RESET +
get_string("update_warning"),
"warning", "net" + str(id))
sleep(5)
break
except:
pretty_print(get_string('connecting_error')
+ Style.NORMAL + f' (connection err: {e})',
'error', 'net0')
retry_counter += 1
sleep(10)
def mine(id: int, user_settings: list,
pool: tuple,
accept: int, reject: int,
hashrate: list,
single_miner_id: str):
"""
Main section that executes the functionalities from the sections above.
"""
using_algo = get_string("using_algo")
if user_settings["algorithm"] == "XXHASH":
using_algo = get_string("using_algo_xxh")
pretty_print(get_string("mining_thread") + str(id)
+ get_string("mining_thread_starting")
+ Style.NORMAL + Fore.RESET + using_algo + Fore.YELLOW
+ str(user_settings["intensity"])
+ "% " + get_string("efficiency"),
"success", "sys"+str(id))
last_report = time()
r_shares, last_shares = 0, 0
while True:
try:
Miner.m_connect(id, pool)
while True:
try:
while True:
job_req = "JOB"
if user_settings["algorithm"] == "XXHASH":
job_req = "JOBXX"
Client.send(job_req
+ Settings.SEPARATOR
+ str(user_settings["username"])
+ Settings.SEPARATOR
+ str(user_settings["start_diff"]))
job = Client.recv().split(Settings.SEPARATOR)
if len(job) == 3:
break
else:
pretty_print(
"Node message: " + str(job[1]),
"warning")
sleep(3)
while True:
time_start = time()
if user_settings["algorithm"] == "XXHASH":
back_color = Back.CYAN
result = Algorithms.XXHASH(
job[0], job[1], int(job[2]),
user_settings["intensity"])
else:
back_color = Back.YELLOW
result = Algorithms.DUCOS1(
job[0], job[1], int(job[2]),
user_settings["intensity"])
computetime = time() - time_start
hashrate[id] = result[1]
total_hashrate = sum(hashrate.values())
while True:
Client.send(f"{result[0]}"
+ Settings.SEPARATOR
+ f"{result[1]}"
+ Settings.SEPARATOR
+ "Official PC Miner"
+ f" {Settings.VER}"
+ Settings.SEPARATOR
+ f"{user_settings['identifier']}"
+ Settings.SEPARATOR
+ Settings.SEPARATOR
+ f"{single_miner_id}")
time_start = time()
feedback = Client.recv(
).split(Settings.SEPARATOR)
ping = (time() - time_start) * 1000
if feedback[0] == "GOOD":
accept.value += 1
share_print(id, "accept",
accept.value, reject.value,
result[1], total_hashrate,
computetime, job[2], ping,
back_color)
elif feedback[0] == "BLOCK":
reject.value += 1
share_print(id, "block",
accept.value, reject.value,
result[1], total_hashrate,
computetime, job[2], ping,
back_color)
elif feedback[0] == "BAD":
reject.value += 1
share_print(id, "reject",
accept.value, reject.value,
result[1], total_hashrate,
computetime, job[2], ping,
back_color)
if id == 0:
end_time = time()
elapsed_time = end_time - last_report
if elapsed_time >= Settings.REPORT_TIME:
r_shares = accept.value - last_shares
uptime = calculate_uptime(
mining_start_time)
periodic_report(last_report, end_time,
r_shares,
sum(hashrate.values()),
uptime)
last_report = time()
last_shares = accept.value
break
break
except Exception as e:
pretty_print(get_string("error_while_mining")
+ " " + str(e), "error", "net" + str(id))
sleep(5)
break
except Exception as e:
pass
class Discord_rp:
def connect():
global RPC
try:
RPC = Presence(808045598447632384)
RPC.connect()
Thread(target=Discord_rp.update).start()
except Exception as e:
#print("Error launching Discord RPC thread: " + str(e))
pass
def update():
while True:
try:
total_hashrate = get_prefix("H/s", sum(hashrate.values()), 2)
RPC.update(details="Hashrate: " + str(total_hashrate),
start=mining_start_time,
state=str(accept.value) + "/"
+ str(reject.value + accept.value)
+ " accepted shares",
large_image="ducol",
large_text="Duino-Coin, "
+ "a coin that can be mined with almost everything"
+ ", including AVR boards",
buttons=[{"label": "Visit duinocoin.com",
"url": "https://duinocoin.com"},
{"label": "Join the Discord",
"url": "https://discord.gg/k48Ht5y"}])
except Exception as e:
#print("Error updating Discord RPC thread: " + str(e))
pass
sleep(15)
Miner.preload()
p_list = []
mining_start_time = time()
if __name__ == "__main__":
from multiprocessing import freeze_support
freeze_support()
cpu = cpuinfo.get_cpu_info()
accept = Manager().Value("i", 0)
reject = Manager().Value("i", 0)
hashrate = Manager().dict()
signal(SIGINT, handler)
user_settings = Miner.load_cfg()
Miner.greeting()
fastest_pool = Client.fetch_pool()
Donate.load(int(user_settings["donate"]))
Donate.start(int(user_settings["donate"]))
"""
Generate a random number that's used only to
make the wallets display one miner with many threads
instead of many separate miners clogging it up
(like it was before release 2.7.3)
"""
single_miner_id = randint(0, 2811)
threads = int(user_settings["threads"])
if threads > 8:
threads = 8
pretty_print(Style.BRIGHT
+ get_string("max_threads_notice"))
for i in range(threads):
p = Process(target=Miner.mine,
args=[i, user_settings,
fastest_pool, accept, reject,
hashrate, single_miner_id])
p_list.append(p)
p.start()
sleep(0.05)
Discord_rp.connect()
for p in p_list:
p.join()
|
caching.py | """
CherryPy implements a simple caching system as a pluggable Tool. This tool
tries to be an (in-process) HTTP/1.1-compliant cache. It's not quite there
yet, but it's probably good enough for most sites.
In general, GET responses are cached (along with selecting headers) and, if
another request arrives for the same resource, the caching Tool will return 304
Not Modified if possible, or serve the cached response otherwise. It also sets
request.cached to True if serving a cached representation, and sets
request.cacheable to False (so it doesn't get cached again).
If POST, PUT, or DELETE requests are made for a cached resource, they
invalidate (delete) any cached response.
Usage
=====
Configuration file example::
[/]
tools.caching.on = True
tools.caching.delay = 3600
You may use a class other than the default
:class:`MemoryCache<cherrypy.lib.caching.MemoryCache>` by supplying the config
entry ``cache_class``; supply the full dotted name of the replacement class
as the config value. It must implement the basic methods ``get``, ``put``,
``delete``, and ``clear``.
You may set any attribute, including overriding methods, on the cache
instance by providing them in config. The above sets the
:attr:`delay<cherrypy.lib.caching.MemoryCache.delay>` attribute, for example.
"""
import datetime
import sys
import threading
import time
import cherrypy
from cherrypy.lib import cptools, httputil
class Cache(object):
"""Base class for Cache implementations."""
def get(self):
"""Return the current variant if in the cache, else None."""
raise NotImplementedError
def put(self, obj, size):
"""Store the current variant in the cache."""
raise NotImplementedError
def delete(self):
"""Remove ALL cached variants of the current resource."""
raise NotImplementedError
def clear(self):
"""Reset the cache to its initial, empty state."""
raise NotImplementedError
# ------------------------------ Memory Cache ------------------------------- #
class AntiStampedeCache(dict):
"""A storage system for cached items which reduces stampede collisions."""
def wait(self, key, timeout=5, debug=False):
"""Return the cached value for the given key, or None.
If timeout is not None, and the value is already
being calculated by another thread, wait until the given timeout has
elapsed. If the value is available before the timeout expires, it is
returned. If not, None is returned, and a sentinel placed in the cache
to signal other threads to wait.
If timeout is None, no waiting is performed nor sentinels used.
"""
value = self.get(key)
if isinstance(value, threading.Event):
if timeout is None:
# Ignore the other thread and recalc it ourselves.
if debug:
cherrypy.log('No timeout', 'TOOLS.CACHING')
return None
# Wait until it's done or times out.
if debug:
cherrypy.log('Waiting up to %s seconds' %
timeout, 'TOOLS.CACHING')
value.wait(timeout)
if value.result is not None:
# The other thread finished its calculation. Use it.
if debug:
cherrypy.log('Result!', 'TOOLS.CACHING')
return value.result
# Timed out. Stick an Event in the slot so other threads wait
# on this one to finish calculating the value.
if debug:
cherrypy.log('Timed out', 'TOOLS.CACHING')
e = threading.Event()
e.result = None
dict.__setitem__(self, key, e)
return None
elif value is None:
# Stick an Event in the slot so other threads wait
# on this one to finish calculating the value.
if debug:
cherrypy.log('Timed out', 'TOOLS.CACHING')
e = threading.Event()
e.result = None
dict.__setitem__(self, key, e)
return value
def __setitem__(self, key, value):
"""Set the cached value for the given key."""
existing = self.get(key)
dict.__setitem__(self, key, value)
if isinstance(existing, threading.Event):
# Set Event.result so other threads waiting on it have
# immediate access without needing to poll the cache again.
existing.result = value
existing.set()
class MemoryCache(Cache):
"""An in-memory cache for varying response content.
Each key in self.store is a URI, and each value is an AntiStampedeCache.
The response for any given URI may vary based on the values of
"selecting request headers"; that is, those named in the Vary
response header. We assume the list of header names to be constant
for each URI throughout the lifetime of the application, and store
that list in ``self.store[uri].selecting_headers``.
The items contained in ``self.store[uri]`` have keys which are tuples of
request header values (in the same order as the names in its
selecting_headers), and values which are the actual responses.
"""
maxobjects = 1000
"""The maximum number of cached objects; defaults to 1000."""
maxobj_size = 100000
"""The maximum size of each cached object in bytes; defaults to 100 KB."""
maxsize = 10000000
"""The maximum size of the entire cache in bytes; defaults to 10 MB."""
delay = 600
"""Seconds until the cached content expires; defaults to 600 (10 minutes).
"""
antistampede_timeout = 5
"""Seconds to wait for other threads to release a cache lock."""
expire_freq = 0.1
"""Seconds to sleep between cache expiration sweeps."""
debug = False
def __init__(self):
self.clear()
# Run self.expire_cache in a separate daemon thread.
t = threading.Thread(target=self.expire_cache, name='expire_cache')
self.expiration_thread = t
t.daemon = True
t.start()
def clear(self):
"""Reset the cache to its initial, empty state."""
self.store = {}
self.expirations = {}
self.tot_puts = 0
self.tot_gets = 0
self.tot_hist = 0
self.tot_expires = 0
self.tot_non_modified = 0
self.cursize = 0
def expire_cache(self):
"""Continuously examine cached objects, expiring stale ones.
This function is designed to be run in its own daemon thread,
referenced at ``self.expiration_thread``.
"""
# It's possible that "time" will be set to None
# arbitrarily, so we check "while time" to avoid exceptions.
# See tickets #99 and #180 for more information.
while time:
now = time.time()
# Must make a copy of expirations so it doesn't change size
# during iteration
for expiration_time, objects in self.expirations.copy().items():
if expiration_time <= now:
for obj_size, uri, sel_header_values in objects:
try:
del self.store[uri][tuple(sel_header_values)]
self.tot_expires += 1
self.cursize -= obj_size
except KeyError:
# the key may have been deleted elsewhere
pass
del self.expirations[expiration_time]
time.sleep(self.expire_freq)
def get(self):
"""Return the current variant if in the cache, else None."""
request = cherrypy.serving.request
self.tot_gets += 1
uri = cherrypy.url(qs=request.query_string)
uricache = self.store.get(uri)
if uricache is None:
return None
header_values = [request.headers.get(h, '')
for h in uricache.selecting_headers]
variant = uricache.wait(key=tuple(sorted(header_values)),
timeout=self.antistampede_timeout,
debug=self.debug)
if variant is not None:
self.tot_hist += 1
return variant
def put(self, variant, size):
"""Store the current variant in the cache."""
request = cherrypy.serving.request
response = cherrypy.serving.response
uri = cherrypy.url(qs=request.query_string)
uricache = self.store.get(uri)
if uricache is None:
uricache = AntiStampedeCache()
uricache.selecting_headers = [
e.value for e in response.headers.elements('Vary')]
self.store[uri] = uricache
if len(self.store) < self.maxobjects:
total_size = self.cursize + size
# checks if there's space for the object
if (size < self.maxobj_size and total_size < self.maxsize):
# add to the expirations list
expiration_time = response.time + self.delay
bucket = self.expirations.setdefault(expiration_time, [])
bucket.append((size, uri, uricache.selecting_headers))
# add to the cache
header_values = [request.headers.get(h, '')
for h in uricache.selecting_headers]
uricache[tuple(sorted(header_values))] = variant
self.tot_puts += 1
self.cursize = total_size
def delete(self):
"""Remove ALL cached variants of the current resource."""
uri = cherrypy.url(qs=cherrypy.serving.request.query_string)
self.store.pop(uri, None)
def get(invalid_methods=('POST', 'PUT', 'DELETE'), debug=False, **kwargs):
"""Try to obtain cached output. If fresh enough, raise HTTPError(304).
If POST, PUT, or DELETE:
* invalidates (deletes) any cached response for this resource
* sets request.cached = False
* sets request.cacheable = False
else if a cached copy exists:
* sets request.cached = True
* sets request.cacheable = False
* sets response.headers to the cached values
* checks the cached Last-Modified response header against the
current If-(Un)Modified-Since request headers; raises 304
if necessary.
* sets response.status and response.body to the cached values
* returns True
otherwise:
* sets request.cached = False
* sets request.cacheable = True
* returns False
"""
request = cherrypy.serving.request
response = cherrypy.serving.response
if not hasattr(cherrypy, '_cache'):
# Make a process-wide Cache object.
cherrypy._cache = kwargs.pop('cache_class', MemoryCache)()
# Take all remaining kwargs and set them on the Cache object.
for k, v in kwargs.items():
setattr(cherrypy._cache, k, v)
cherrypy._cache.debug = debug
# POST, PUT, DELETE should invalidate (delete) the cached copy.
# See http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html#sec13.10.
if request.method in invalid_methods:
if debug:
cherrypy.log('request.method %r in invalid_methods %r' %
(request.method, invalid_methods), 'TOOLS.CACHING')
cherrypy._cache.delete()
request.cached = False
request.cacheable = False
return False
if 'no-cache' in [e.value for e in request.headers.elements('Pragma')]:
request.cached = False
request.cacheable = True
return False
cache_data = cherrypy._cache.get()
request.cached = bool(cache_data)
request.cacheable = not request.cached
if request.cached:
# Serve the cached copy.
max_age = cherrypy._cache.delay
for v in [e.value for e in request.headers.elements('Cache-Control')]:
atoms = v.split('=', 1)
directive = atoms.pop(0)
if directive == 'max-age':
if len(atoms) != 1 or not atoms[0].isdigit():
raise cherrypy.HTTPError(
400, 'Invalid Cache-Control header')
max_age = int(atoms[0])
break
elif directive == 'no-cache':
if debug:
cherrypy.log(
'Ignoring cache due to Cache-Control: no-cache',
'TOOLS.CACHING')
request.cached = False
request.cacheable = True
return False
if debug:
cherrypy.log('Reading response from cache', 'TOOLS.CACHING')
s, h, b, create_time = cache_data
age = int(response.time - create_time)
if (age > max_age):
if debug:
cherrypy.log('Ignoring cache due to age > %d' % max_age,
'TOOLS.CACHING')
request.cached = False
request.cacheable = True
return False
# Copy the response headers. See
# https://github.com/cherrypy/cherrypy/issues/721.
response.headers = rh = httputil.HeaderMap()
for k in h:
dict.__setitem__(rh, k, dict.__getitem__(h, k))
# Add the required Age header
response.headers['Age'] = str(age)
try:
# Note that validate_since depends on a Last-Modified header;
# this was put into the cached copy, and should have been
# resurrected just above (response.headers = cache_data[1]).
cptools.validate_since()
except cherrypy.HTTPRedirect:
x = sys.exc_info()[1]
if x.status == 304:
cherrypy._cache.tot_non_modified += 1
raise
# serve it & get out from the request
response.status = s
response.body = b
else:
if debug:
cherrypy.log('request is not cached', 'TOOLS.CACHING')
return request.cached
def tee_output():
"""Tee response output to cache storage. Internal."""
# Used by CachingTool by attaching to request.hooks
request = cherrypy.serving.request
if 'no-store' in request.headers.values('Cache-Control'):
return
def tee(body):
"""Tee response.body into a list."""
if ('no-cache' in response.headers.values('Pragma') or
'no-store' in response.headers.values('Cache-Control')):
for chunk in body:
yield chunk
return
output = []
for chunk in body:
output.append(chunk)
yield chunk
# Save the cache data, but only if the body isn't empty.
# e.g. a 304 Not Modified on a static file response will
# have an empty body.
# If the body is empty, delete the cache because it
# contains a stale Threading._Event object that will
# stall all consecutive requests until the _Event times
# out
body = b''.join(output)
if not body:
cherrypy._cache.delete()
else:
cherrypy._cache.put((response.status, response.headers or {},
body, response.time), len(body))
response = cherrypy.serving.response
response.body = tee(response.body)
def expires(secs=0, force=False, debug=False):
"""Tool for influencing cache mechanisms using the 'Expires' header.
secs
Must be either an int or a datetime.timedelta, and indicates the
number of seconds between response.time and when the response should
expire. The 'Expires' header will be set to response.time + secs.
If secs is zero, the 'Expires' header is set one year in the past, and
the following "cache prevention" headers are also set:
* Pragma: no-cache
* Cache-Control': no-cache, must-revalidate
force
If False, the following headers are checked:
* Etag
* Last-Modified
* Age
* Expires
If any are already present, none of the above response headers are set.
"""
response = cherrypy.serving.response
headers = response.headers
cacheable = False
if not force:
# some header names that indicate that the response can be cached
for indicator in ('Etag', 'Last-Modified', 'Age', 'Expires'):
if indicator in headers:
cacheable = True
break
if not cacheable and not force:
if debug:
cherrypy.log('request is not cacheable', 'TOOLS.EXPIRES')
else:
if debug:
cherrypy.log('request is cacheable', 'TOOLS.EXPIRES')
if isinstance(secs, datetime.timedelta):
secs = (86400 * secs.days) + secs.seconds
if secs == 0:
if force or ('Pragma' not in headers):
headers['Pragma'] = 'no-cache'
if cherrypy.serving.request.protocol >= (1, 1):
if force or 'Cache-Control' not in headers:
headers['Cache-Control'] = 'no-cache, must-revalidate'
# Set an explicit Expires date in the past.
expiry = httputil.HTTPDate(1169942400.0)
else:
expiry = httputil.HTTPDate(response.time + secs)
if force or 'Expires' not in headers:
headers['Expires'] = expiry
|
vad_test.py | #!/usr/bin/env python3
###################################################################################################
#
# Project: Embedded Learning Library (ELL)
# File: vad_test.py
# Authors: Chris Lovett
#
# Requires: Python 3.x, numpy, tkinter, matplotlib
#
###################################################################################################
import argparse
import json
import os
import sys
from threading import Thread, Lock, get_ident
import tkinter as tk
from tkinter import BOTH, RIGHT, TOP, X, END
from tkinter import Text
from tkinter.ttk import Frame, LabelFrame, Button, Label, Entry
import numpy as np
import matplotlib
# Embedding matplotlib plots in tkinter views requires using the "TkAgg" backend
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import matplotlib.pyplot as pyplot
import matplotlib.animation as animation
import featurizer
import wav_reader
import microphone
import vad
class VadTest(Frame):
""" A demo class that provides simple GUI for testing voice activity detection on microphone or wav file input. """
def __init__(self, featurizer_path, input_device, wav_file, sample_rate, auto_scale):
""" Initialize the VadTest object:
featurizer_path - path to the ELL featurizer to use
input_device - id of the microphone to use
wav_file - optional wav_file to use when you click play
sample_rate - the sample rate to resample the incoming audio
auto_scale - auto scale audio input to the range [-1, 1]
"""
super().__init__()
self.FEATURIZER_PATH_KEY = "featurizer_path"
self.WAV_FILE_KEY = "wav_file"
self.main_thread = get_ident()
self.output_clear_time = 5000
self.channels = 1
self.init_ui()
self.auto_scale = auto_scale
self.get_settings_file_name()
self.load_settings()
self.max_spectrogram_width = 120
self.spectrogram_image = None
self.spectrogram_image_data = None
self.show_spectrogram = True
self.colormap_name = "inferno"
self.min_value = 0.0
self.max_value = 1.0
self.update_minmax = True
self.levels = []
self.signals = []
self.featurizer_path = None
self.featurizer = None
self.reading_input = False
# Threads
self.read_input_thread = None
self.lock = Lock()
self.main_thread = get_ident()
self.message_queue = []
self.animation = None
# featurizer
if featurizer_path:
self.featurizer_path = featurizer_path
self.settings[self.FEATURIZER_PATH_KEY] = featurizer_path
elif self.FEATURIZER_PATH_KEY in self.settings:
self.featurizer_path = self.settings[self.FEATURIZER_PATH_KEY]
self.sample_rate = sample_rate
self.input_device = input_device
self.wav_filename = None
self.wav_file = None
if wav_file:
self.wav_filename = wav_file
self.settings[self.WAV_FILE_KEY] = wav_file
if self.wav_filename is None and self.WAV_FILE_KEY in self.settings:
self.wav_filename = self.settings[self.WAV_FILE_KEY]
self.wav_file_list = None
self.speaker = None
self.microphone = None
self.save_settings() # in case we just changed it.
if self.featurizer_path:
self.load_featurizer_model(os.path.abspath(self.featurizer_path))
else:
self.show_output("Please specify and load a feature model")
self.update_ui()
def init_ui(self):
self.master.title("VAD Test")
self.pack(side="top", fill=BOTH, expand=True)
# VAD Controls section for controlling these VAD settings:
controls_frame = LabelFrame(self, text="Controls", height=30)
Label(controls_frame, text="tau_up:").grid(row=0, column=0)
self.tau_up = Entry(controls_frame, width=15)
self.tau_up.grid(row=1, column=0)
Label(controls_frame, text="tau_down:").grid(row=0, column=1)
self.tau_down = Entry(controls_frame, width=15)
self.tau_down.grid(row=1, column=1)
Label(controls_frame, text="threshold_up:").grid(row=0, column=2)
self.threshold_up = Entry(controls_frame, width=15)
self.threshold_up.grid(row=1, column=2)
Label(controls_frame, text="threshold_down:").grid(row=0, column=3)
self.threshold_down = Entry(controls_frame, width=15)
self.threshold_down.grid(row=1, column=3)
Label(controls_frame, text="large_input:").grid(row=0, column=4)
self.large_input = Entry(controls_frame, width=15)
self.large_input.grid(row=1, column=4)
Label(controls_frame, text="gain_att:").grid(row=0, column=5)
self.gain_att = Entry(controls_frame, width=15)
self.gain_att.grid(row=1, column=5)
Label(controls_frame, text="level_threshold:").grid(row=0, column=6)
self.level_threshold = Entry(controls_frame, width=15)
self.level_threshold.grid(row=1, column=6)
controls_frame.pack(side=TOP)
# Input section
input_frame = LabelFrame(self, text="Input")
input_frame.bind("-", self.on_minus_key)
input_frame.bind("+", self.on_plus_key)
input_frame.pack(fill=X)
self.play_button = Button(input_frame, text="Play", command=self.on_play_button_click)
self.play_button.pack(side=RIGHT, padx=4)
self.rec_button = Button(input_frame, text="Rec", command=self.on_rec_button_click)
self.rec_button.pack(side=RIGHT, padx=4)
self.wav_filename_entry = Entry(input_frame, width=24)
self.wav_filename_entry.pack(fill=X)
self.wav_filename_entry.delete(0, END)
# Feature section
features_frame = LabelFrame(self, text="Features")
features_frame.pack(fill=X)
features_control_frame = Frame(features_frame)
features_control_frame.pack(fill=X)
load_features_button = Button(features_control_frame, text="Load", command=self.on_load_featurizer_model)
load_features_button.pack(side=RIGHT)
self.features_entry = Entry(features_control_frame, width=8)
self.features_entry.pack(fill=X)
self.features_entry.delete(0, END)
viz_frame = Frame(features_frame)
viz_frame.bind("%w", self.on_resized)
viz_frame.pack(fill=X)
self.features_figure = Figure(figsize=(5, 4), dpi=96)
self.subplot = self.features_figure.add_subplot(211)
self.subplot2 = self.features_figure.add_subplot(212)
self.canvas = FigureCanvasTkAgg(self.features_figure, master=viz_frame)
self.canvas.draw()
self.canvas.show()
self.canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=True)
# Output section
output_frame = LabelFrame(self, text="Output")
output_frame.pack(fill=BOTH, expand=True)
self.bind("<Configure>", self.on_resized)
self.output_text = Text(output_frame)
self.output_text.pack(fill=BOTH, padx=4, expand=True)
def on_resized(self, event):
window_size = event.width
box = self.spectrogram_image.get_window_extent()
scale = (box.x1 - box.x0) / self.max_spectrogram_width
self.max_spectrogram_width = int((window_size * 0.8) / scale)
self.setup_spectrogram_image()
def load_featurizer_model(self, featurizer_path):
""" load the given compiled ELL featurizer for use in processing subsequent audio input """
if featurizer_path:
self.featurizer = featurizer.AudioTransform(featurizer_path, 40)
self.setup_spectrogram_image()
self.vad = vad.VoiceActivityDetector(self.sample_rate, self.featurizer.output_size)
self.show_output("Feature input size: {}, output size: {}".format(
self.featurizer.input_size,
self.featurizer.output_size))
self.init_data()
def setup_spectrogram_image(self):
""" this need to be called if you load a new feature model, because the featurizer output size might have
changed. """
if self.featurizer:
dim = (self.featurizer.output_size, self.max_spectrogram_width)
self.spectrogram_image_data = np.zeros(dim, dtype=float)
self.subplot.clear()
self.spectrogram_image = self.subplot.imshow(self.spectrogram_image_data, vmin=self.min_value,
vmax=self.max_value, origin="lower", animated=True,
cmap=pyplot.get_cmap(self.colormap_name))
def accumulate_spectrogram_image(self, feature_data):
""" accumulate the feature data into the spectrogram image """
image_data = self.spectrogram_image_data
feature_data = np.reshape(feature_data, [-1, 1])
new_image = np.hstack((image_data, feature_data))[:, -image_data.shape[1]:]
image_data[:, :] = new_image
def set_spectrogram_image(self):
""" update the spectrogram image and the min/max values """
self.lock.acquire() # protect access to the shared state
if self.update_minmax and self.show_spectrogram:
min_value = np.min(self.spectrogram_image_data)
max_value = np.max(self.spectrogram_image_data)
if np.isfinite(min_value) and np.isfinite(max_value):
self.min_value = min_value
self.max_value = max_value
eps = 0.1
if self.max_value - self.min_value < eps:
self.max_value = self.min_value + eps
self.spectrogram_image.set_clim(self.min_value, self.max_value)
self.spectrogram_image.set_data(self.spectrogram_image_data)
self.lock.release()
def on_load_featurizer_model(self):
""" called when user clicks the Load button for the feature model """
filename = self.features_entry.get()
filename = filename.strip('"')
self.featurizer_path = filename
self.get_sample_rate()
self.settings[self.FEATURIZER_PATH_KEY] = filename
self.save_settings()
self.stop()
self.load_featurizer_model(filename)
def set_entry(self, e, value):
s = str(value)
if e.get() != s:
e.delete(0, END)
e.insert(0, s)
def get_entry(self, e):
v = e.get()
return float(v)
def update_ui(self):
self.set_entry(self.wav_filename_entry, self.wav_filename)
self.set_entry(self.features_entry, self.featurizer_path)
self.set_entry(self.tau_up, vad.DEFAULT_TAU_UP)
self.set_entry(self.tau_down, vad.DEFAULT_TAU_DOWN)
self.set_entry(self.threshold_up, vad.DEFAULT_THRESHOLD_UP)
self.set_entry(self.threshold_down, vad.DEFAULT_THRESHOLD_DOWN)
self.set_entry(self.large_input, vad.DEFAULT_LARGE_INPUT)
self.set_entry(self.gain_att, vad.DEFAULT_GAIN_ATT)
self.set_entry(self.level_threshold, vad.DEFAULT_LEVEL_THRESHOLD)
def read_ui_settings(self):
self.vad.configure(
self.get_entry(self.tau_up),
self.get_entry(self.tau_down),
self.get_entry(self.threshold_up),
self.get_entry(self.threshold_down),
self.get_entry(self.large_input),
self.get_entry(self.gain_att),
self.get_entry(self.level_threshold)
)
def init_data(self):
""" initialize the spectrogram_image_data based on the newly loaded model info """
if self.featurizer:
dim = (self.featurizer.output_size, self.max_spectrogram_width)
self.spectrogram_image_data = np.zeros(dim, dtype=float)
if self.spectrogram_image is not None:
self.spectrogram_image.set_data(self.spectrogram_image_data)
def get_settings_file_name(self):
""" this app stores the various UI field values in a settings file in your temp folder
so you don't always have to specify the full command line options """
import tempfile
temp = tempfile.gettempdir()
self.settings_file_name = os.path.join(temp, "ELL", "Audio", "vad_test.json")
def load_settings(self):
""" load the previously saved settings from disk, if any """
self.settings = {}
try:
if os.path.isfile(self.settings_file_name):
with open(self.settings_file_name, "r") as f:
self.settings = json.load(f)
except:
self.show_output("error loading settings: {}".format(self.settings_file_name))
self.settings = {}
def save_settings(self):
""" save the current settings to disk """
settings_dir = os.path.dirname(self.settings_file_name)
if not os.path.isdir(settings_dir):
os.makedirs(settings_dir)
with open(self.settings_file_name, "w") as f:
json.dump(self.settings, f, indent=2)
def on_rec_button_click(self):
""" called when user clicks the record button, same button is used to "stop" recording. """
if self.rec_button["text"] == "Rec":
self.rec_button["text"] = "Stop"
self.play_button["text"] = "Play"
self.start_recording()
else:
self.rec_button["text"] = "Rec"
self.on_stopped()
def on_play_button_click(self):
""" called when user clicks the record button, same button is used to "stop" playback """
if self.play_button["text"] == "Play":
self.play_button["text"] = "Stop"
self.rec_button["text"] = "Rec"
self.on_play()
else:
self.play_button["text"] = "Play"
self.on_stopped()
def on_play(self):
""" called when user clicks the Play button """
filename = self.wav_filename_entry.get()
filename = filename.strip('"')
self.wav_filename = filename
self.settings[self.WAV_FILE_KEY] = filename
self.save_settings()
self.start_playing(filename)
def on_stop(self):
""" called when user clicks the Stop button """
self.reading_input = False
if self.wav_file:
self.wav_file.close()
self.wav_file = None
if self.read_input_thread:
self.read_input_thread.join()
self.read_input_thread = None
self.stop()
def on_stopped(self):
""" called when we reach the end of the wav file playback """
self.play_button["text"] = "Play"
self.on_stop()
self.subplot2.clear()
if (len(self.levels) > 0):
levels = np.array(self.levels)
levels /= np.max(levels)
signals = np.array(self.signals)
self.subplot2.plot(levels)
self.subplot2.plot(signals)
self.vad.reset()
self.canvas.draw()
self.canvas.show()
self.levels = []
self.signals = []
def stop(self):
""" called when user clicks the stop button, or we reach the end of a wav file input """
# close streams
if self.animation:
self.animation.event_source.stop()
self.animation = None
if self.microphone:
self.microphone.close()
if self.speaker:
self.speaker.close()
if self.wav_file:
self.wav_file.close()
self.wav_file = None
self.reading_input = False
def get_wav_list(self):
if self.wav_filename and os.path.isfile(self.wav_filename):
full_path = os.path.abspath(self.wav_filename)
dir_name = os.path.dirname(full_path)
if not self.wav_file_list:
print("wav file name: {}".format(full_path))
print("looking for wav files in: {}".format(dir_name))
self.wav_file_list = [x for x in os.listdir(dir_name) if os.path.splitext(x)[1] == ".wav"]
self.wav_file_list.sort()
return self.wav_file_list
def select_wav_file(self, filename):
self.wav_filename = filename
# show the file in the UI
self.wav_filename_entry.delete(0, END)
if self.wav_filename:
self.wav_filename_entry.insert(0, self.wav_filename)
# and automatically play the file.
self.on_play()
def on_minus_key(self, event):
""" When user presses the plus button we reverse to the previous wav file in the current folder.
This way you can easily step through all the training wav files """
if self.get_wav_list():
i = self.wav_file_list.index(os.path.basename(self.wav_filename))
if i - 1 >= 0:
next_wav_file = self.wav_file_list[i - 1]
dir_name = os.path.dirname(self.wav_filename)
self.select_wav_file(os.path.join(dir_name, next_wav_file))
def on_plus_key(self, event):
""" When user presses the plus button we advance to the next wav file in the current folder.
This way you can easily step through all the training wav files """
if self.get_wav_list():
i = self.wav_file_list.index(os.path.basename(self.wav_filename))
if i + 1 < len(self.wav_file_list):
next_wav_file = self.wav_file_list[i + 1]
dir_name = os.path.dirname(self.wav_filename)
self.select_wav_file(os.path.join(dir_name, next_wav_file))
def clear_output(self):
""" remove some of the Output based a the timeout callback """
self.output_text.delete(1.0, 2.0)
def process_output(self):
""" show output that was queued by background thread """
self.lock.acquire()
messages = self.message_queue
self.message_queue = []
self.lock.release()
for msg in messages:
self.show_output(msg)
def show_output(self, message):
""" show output message, or queue it if we are on a background thread """
if self.main_thread != get_ident():
self.message_queue += [message]
return
for line in str(message).split('\n'):
self.output_text.insert(END, "{}\n".format(line))
self.output_text.see("end") # scroll to end
self.after(self.output_clear_time, self.clear_output)
def start_playing(self, filename):
"""
Play a wav file, and classify the audio. Note we use a background thread to read the
wav file and we setup a UI animation function to draw the sliding spectrogram image, this way
the UI update doesn't interfere with the smoothness of the audio playback
"""
self.stop()
self.read_ui_settings()
self.reading_input = False
self.wav_file = wav_reader.WavReader(self.sample_rate, self.channels, auto_scale=self.auto_scale)
self.wav_file.open(filename, self.featurizer.input_size, self.speaker)
self.setup_spectrogram_image()
def update_func(frame_index):
self.process_output()
if not self.reading_input:
self.after(1, self.on_stopped)
self.set_spectrogram_image()
return (self.spectrogram_image,)
if self.animation:
self.animation.event_source.stop()
self.reading_input = True
# Start animation timer for updating the UI (e.g. spectrogram image) (30 fps is usually fine)
self.animation = animation.FuncAnimation(self.features_figure, update_func, interval=33, blit=True)
# start background thread to read and classify the audio.
self.featurizer.open(self.wav_file)
self.read_input_thread = Thread(target=self.on_read_features, args=())
self.read_input_thread.daemon = True
self.read_input_thread.start()
def start_recording(self):
""" Start recording audio from the microphone nd classify the audio. Note we use a background thread to
process the audio and we setup a UI animation function to draw the sliding spectrogram image, this way
the UI update doesn't interfere with the smoothness of the microphone readings """
if self.microphone is None:
self.microphone = microphone.Microphone(True, False)
self.stop()
self.read_ui_settings()
num_channels = 1
self.microphone.open(self.featurizer.input_size, self.sample_rate, num_channels, self.input_device)
def update_func(frame_index):
# this is an animation callback to update the UI every 33 milliseconds.
self.process_output()
self.set_spectrogram_image()
if not self.reading_input:
self.after(1, self.on_stopped)
return (self.spectrogram_image,)
if self.animation:
self.animation.event_source.stop()
self.reading_input = True
# Start animation timer for updating the UI (e.g. spectrogram image) (30 fps is usually fine)
self.animation = animation.FuncAnimation(self.features_figure, update_func, interval=33, blit=True)
# start background thread to read and classify the recorded audio.
self.featurizer.open(self.microphone)
self.read_input_thread = Thread(target=self.on_read_features, args=())
self.read_input_thread.daemon = True
self.read_input_thread.start()
def on_read_features(self):
""" this is the background thread entry point. So we read the feature data in a loop """
try:
while self.reading_input and self.featurizer:
feature_data = self.featurizer.read()
if feature_data is None:
break # eof
else:
signal = self.vad.process(feature_data)
self.levels += [self.vad.level]
self.signals += [signal]
self.lock.acquire()
if self.show_spectrogram:
self.accumulate_spectrogram_image(feature_data)
self.lock.release()
except:
errorType, value, traceback = sys.exc_info()
print("### Exception reading input: " + str(errorType) + ": " + str(value) + " " + str(traceback))
while traceback:
print(traceback.tb_frame.f_code)
traceback = traceback.tb_next
self.reading_input = False
def main(featurizer, input_device, wav_file, sample_rate, auto_scale):
""" Main function to create root UI and AudioDemo object, then run the main UI loop """
root = tk.Tk()
root.geometry("800x800")
app = VadTest(featurizer, input_device, wav_file, sample_rate, auto_scale)
root.bind("+", app.on_plus_key)
root.bind("-", app.on_minus_key)
while True:
try:
root.mainloop()
break
except UnicodeDecodeError:
pass
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser(description="Test a feature model and optional classifier in a handy GUI app")
# options
arg_parser.add_argument("--featurizer", "-m", help="Compiled ELL model to use for generating features",
default=None)
arg_parser.add_argument("--input_device", "-d", help="Index of input device (see --list_devices)",
default=1, type=int)
arg_parser.add_argument("--list_devices", help="List available input devices", action="store_true")
arg_parser.add_argument("--wav_file", help="Provide an input wav file to test", default=None)
arg_parser.add_argument("--auto_scale", help="Auto-scale autio input to range [-1,1]", action="store_true")
arg_parser.add_argument("--sample_rate", type=int, help="The sample rate that featurizer is setup to use",
default=16000)
args = arg_parser.parse_args()
if args.list_devices:
microphone.list_devices()
else:
main(args.featurizer, args.input_device, args.wav_file, args.sample_rate, args.auto_scale)
|
all_ip_banner.py | #!/usr/bin/env/ python
# coding=utf-8
__author__ = 'Achelics'
__Date__ = '2017/05/15'
import json as _json
import multiprocessing
import os
import sys
IP_LIST = list()
def get_all_ip(raw_file_name, result_file_name, flag_num=4):
result_file = open(result_file_name, 'w')
with open(raw_file_name, 'r') as f:
for line in f:
data = line.strip('\n').strip(',').split(',')
ip = data[0]
count_num = 0
for i in range(0, flag_num):
count_num += int(data[i+1])
if count_num == flag_num:
result_file.write(ip + '\n')
IP_LIST.append(ip)
f.close()
result_file.close()
def get_ip_json(banner_file_name, result_file_name, ip_list):
result_file = open(result_file_name, 'w')
print ip_list
sys.stdout.flush()
with open(banner_file_name, 'r') as f:
for line in f:
data = _json.loads(line.strip('\n'))
ip = data['ip']
if ip in ip_list:
result_file.write(line)
f.close()
result_file.close()
if __name__ == '__main__':
ip_raw_file_name = r'F:\mutil_result\five_protocol\ip_maeked_compare.txt'
ip_all_result_name = r'F:\mutil_result\five_protocol\five_protocol_all\ip_list.txt'
get_all_ip(ip_raw_file_name, ip_all_result_name, 5)
raw_dir = r'F:\mutil_result\five_protocol'
result_dir = r'F:\mutil_result\five_protocol\five_protocol_all'
banner_name = ['banner21.json', 'banner22.json', 'banner23.json', 'banner80.json', 'banner554.json']
for banner in banner_name:
banner_file_name = os.path.join(raw_dir, banner)
result_file_name = os.path.join(result_dir, banner)
process = multiprocessing.Process(target=get_ip_json, args=(banner_file_name, result_file_name, IP_LIST))
process.start()
|
parallel.py | import multiprocessing
from utils.type import is_lambda
def _worker(delegate, queue, rqueue):
while True:
i, datum = queue.get()
if i < 0:
break
rv = delegate(datum)
rqueue.put((i, rv))
def parallel(data, delegate, spawn=2):
if not hasattr(data, "__iter__"):
raise ValueError("Type %s is not iterable" % type(data))
if spawn < 2:
raise ValueError("Illegal spawn: %d" % spawn)
if is_lambda(delegate):
raise ValueError("Illegal type for delegate: lambda expression")
queue, rqueue = multiprocessing.Queue(), multiprocessing.Queue()
# worker=self.wrap_worker(self.proc)
ps = [multiprocessing.Process(target=_worker, args=(delegate, queue, rqueue))]
for p in ps:
p.daemon = True
p.start()
n = 0
for i, datum in enumerate(data):
queue.put((i, datum))
n += 1
j = 0
buffer = []
while j < n:
if any([p.exitcode > 0 for p in ps]): # error in subprocess
raise RuntimeError()
added = False
while rqueue.qsize() > 0:
buffer.append(rqueue.get())
added = True
if added:
buffer = sorted(buffer, key=lambda x: x[0])
while buffer and buffer[0][0] == j:
i, datum = buffer[0]
yield datum
j += 1
buffer = buffer[1:]
|
application_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration tests for TensorBoard.
These tests start up a full-fledged TensorBoard server.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import gzip
import json
import numbers
import os
import shutil
import socket
import tempfile
import threading
from six import BytesIO
from six.moves import http_client
from six.moves import xrange # pylint: disable=redefined-builtin
from werkzeug import serving
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer as writer_lib
from tensorflow.tensorboard import tensorboard
from tensorflow.tensorboard.backend import application
from tensorflow.tensorboard.backend.event_processing import event_multiplexer
from tensorflow.tensorboard.plugins import base_plugin
class FakePlugin(base_plugin.TBPlugin):
"""A plugin with no functionality."""
def __init__(self, plugin_name, is_active_value, routes_mapping):
"""Constructs a fake plugin.
Args:
plugin_name: The name of this plugin.
is_active_value: Whether the plugin is active.
routes_mapping: A dictionary mapping from route (string URL path) to the
method called when a user issues a request to that route.
"""
self.plugin_name = plugin_name
self._is_active_value = is_active_value
self._routes_mapping = routes_mapping
def get_plugin_apps(self, multiplexer, logdir):
"""Returns a mapping from routes to handlers offered by this plugin.
Args:
multiplexer: The event multiplexer.
logdir: The path to the directory containing logs.
Returns:
A dictionary mapping from routes to handlers offered by this plugin.
"""
return self._routes_mapping
def is_active(self):
"""Returns whether this plugin is active.
Returns:
A boolean. Whether this plugin is active.
"""
return self._is_active_value
class TensorboardServerTest(test.TestCase):
_only_use_meta_graph = False # Server data contains only a GraphDef
# Number of scalar-containing events to make.
_SCALAR_COUNT = 99
def setUp(self):
self.temp_dir = self._GenerateTestData()
multiplexer = event_multiplexer.EventMultiplexer(
size_guidance=application.DEFAULT_SIZE_GUIDANCE,
purge_orphaned_data=True)
plugins = [
FakePlugin(plugin_name='foo', is_active_value=True, routes_mapping={}),
FakePlugin(plugin_name='bar', is_active_value=False, routes_mapping={})
]
app = application.TensorBoardWSGIApp(
self.temp_dir, plugins, multiplexer, reload_interval=0)
try:
self._server = serving.BaseWSGIServer('localhost', 0, app)
# 0 to pick an unused port.
except IOError:
# BaseWSGIServer has a preference for IPv4. If that didn't work, try again
# with an explicit IPv6 address.
self._server = serving.BaseWSGIServer('::1', 0, app)
self._server_thread = threading.Thread(target=self._server.serve_forever)
self._server_thread.daemon = True
self._server_thread.start()
self._connection = http_client.HTTPConnection(
'localhost', self._server.server_address[1])
def tearDown(self):
self._connection.close()
self._server.shutdown()
self._server.server_close()
def _get(self, path, headers=None):
"""Perform a GET request for the given path."""
if headers is None:
headers = {}
self._connection.request('GET', path, None, headers)
return self._connection.getresponse()
def _getJson(self, path):
"""Perform a GET request and decode the result as JSON."""
self._connection.request('GET', path)
response = self._connection.getresponse()
self.assertEqual(response.status, 200)
data = response.read()
if response.getheader('Content-Encoding') == 'gzip':
data = gzip.GzipFile('', 'rb', 9, BytesIO(data)).read()
return json.loads(data.decode('utf-8'))
def testBasicStartup(self):
"""Start the server up and then shut it down immediately."""
pass
def testRequestMainPage(self):
"""Navigate to the main page and verify that it returns a 200."""
response = self._get('/')
self.assertEqual(response.status, 200)
def testRequestNonexistentPage(self):
"""Request a page that doesn't exist; it should 404."""
response = self._get('/asdf')
self.assertEqual(response.status, 404)
def testDirectoryTraversal(self):
"""Attempt a directory traversal attack."""
response = self._get('/..' * 30 + '/etc/passwd')
self.assertEqual(response.status, 400)
def testLogdir(self):
"""Test the format of the data/logdir endpoint."""
parsed_object = self._getJson('/data/logdir')
self.assertEqual(parsed_object, {'logdir': self.temp_dir})
def testPluginsListing(self):
"""Test the format of the data/plugins_listing endpoint."""
parsed_object = self._getJson('/data/plugins_listing')
# Plugin foo is active. Plugin bar is not.
self.assertEqual(parsed_object, {'foo': True, 'bar': False})
def testRuns(self):
"""Test the format of the /data/runs endpoint."""
run_json = self._getJson('/data/runs')
# Don't check the actual timestamp since it's time-dependent.
self.assertTrue(
isinstance(run_json['run1']['firstEventTimestamp'], numbers.Number))
del run_json['run1']['firstEventTimestamp']
self.assertEqual(
run_json,
{
'run1': {
'compressedHistograms': ['histogram'],
'scalars': ['simple_values'],
'histograms': ['histogram'],
'images': ['image'],
'audio': ['audio'],
# if only_use_meta_graph, the graph is from the metagraph
'graph': True,
'meta_graph': self._only_use_meta_graph,
'run_metadata': ['test run'],
'tensors': [],
}
})
def testApplicationPaths_getCached(self):
"""Test the format of the /data/runs endpoint."""
for path in ('/',): # TODO(jart): '/app.js' in open source
connection = http_client.HTTPConnection('localhost',
self._server.server_address[1])
connection.request('GET', path)
response = connection.getresponse()
self.assertEqual(response.status, 200, msg=path)
self.assertEqual(
response.getheader('Cache-Control'),
'private, max-age=3600',
msg=path)
connection.close()
def testDataPaths_disableAllCaching(self):
"""Test the format of the /data/runs endpoint."""
for path in ('/data/runs', '/data/logdir',
'/data/scalars?run=run1&tag=simple_values',
'/data/scalars?run=run1&tag=simple_values&format=csv',
'/data/images?run=run1&tag=image',
'/data/individualImage?run=run1&tag=image&index=0',
'/data/audio?run=run1&tag=audio',
'/data/run_metadata?run=run1&tag=test%20run'):
connection = http_client.HTTPConnection('localhost',
self._server.server_address[1])
connection.request('GET', path)
response = connection.getresponse()
self.assertEqual(response.status, 200, msg=path)
self.assertEqual(response.getheader('Expires'), '0', msg=path)
response.read()
connection.close()
def testScalars(self):
"""Test the format of /data/scalars."""
data = self._getJson('/data/scalars?run=run1&tag=simple_values')
self.assertEqual(len(data),self._SCALAR_COUNT)
def testScalarsCsv(self):
"""Test the csv format of /data/scalars."""
data = self._get('/data/scalars?run=run1&tag=simple_values&format=csv').read()
line_count = data.count('\n')
self.assertEqual(line_count,self._SCALAR_COUNT + 1) # include 1 more line for header
def testHistograms(self):
"""Test the format of /data/histograms."""
self.assertEqual(
self._getJson('/data/histograms?tag=histogram&run=run1'),
[[0, 0, [0, 2.0, 3.0, 6.0, 5.0, [0.0, 1.0, 2.0], [1.0, 1.0, 1.0]]]])
def testImages(self):
"""Test listing images and retrieving an individual image."""
image_json = self._getJson('/data/images?tag=image&run=run1')
image_query = image_json[0]['query']
# We don't care about the format of the image query.
del image_json[0]['query']
self.assertEqual(image_json, [{
'wall_time': 0,
'step': 0,
'height': 1,
'width': 1
}])
response = self._get('/data/individualImage?%s' % image_query)
self.assertEqual(response.status, 200)
def testAudio(self):
"""Test listing audio and retrieving an individual audio clip."""
audio_json = self._getJson('/data/audio?tag=audio&run=run1')
audio_query = audio_json[0]['query']
# We don't care about the format of the audio query.
del audio_json[0]['query']
self.assertEqual(audio_json, [{
'wall_time': 0,
'step': 0,
'content_type': 'audio/wav'
}])
response = self._get('/data/individualAudio?%s' % audio_query)
self.assertEqual(response.status, 200)
def testGraph(self):
"""Test retrieving the graph definition."""
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs')
self.assertEqual(response.status, 200)
graph_pbtxt = response.read()
# Parse the graph from pbtxt into a graph message.
graph = graph_pb2.GraphDef()
graph = text_format.Parse(graph_pbtxt, graph)
self.assertEqual(len(graph.node), 2)
self.assertEqual(graph.node[0].name, 'a')
self.assertEqual(graph.node[1].name, 'b')
# Make sure the second node has an attribute that was filtered out because
# it was too large and was added to the "too large" attributes list.
self.assertEqual(list(graph.node[1].attr.keys()), ['_very_large_attrs'])
self.assertEqual(graph.node[1].attr['_very_large_attrs'].list.s,
[b'very_large_attr'])
def testAcceptGzip_compressesResponse(self):
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs',
{'Accept-Encoding': 'gzip'})
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader('Content-Encoding'), 'gzip')
pbtxt = gzip.GzipFile('', 'rb', 9, BytesIO(response.read())).read()
graph = text_format.Parse(pbtxt, graph_pb2.GraphDef())
self.assertEqual(len(graph.node), 2)
def testAcceptAnyEncoding_compressesResponse(self):
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs',
{'Accept-Encoding': '*'})
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader('Content-Encoding'), 'gzip')
pbtxt = gzip.GzipFile('', 'rb', 9, BytesIO(response.read())).read()
graph = text_format.Parse(pbtxt, graph_pb2.GraphDef())
self.assertEqual(len(graph.node), 2)
def testAcceptDoodleEncoding_doesNotCompressResponse(self):
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs',
{'Accept-Encoding': 'doodle'})
self.assertEqual(response.status, 200)
self.assertIsNone(response.getheader('Content-Encoding'))
graph = text_format.Parse(response.read(), graph_pb2.GraphDef())
self.assertEqual(len(graph.node), 2)
def testAcceptGzip_doesNotCompressImage(self):
response = self._get('/data/individualImage?run=run1&tag=image&index=0',
{'Accept-Encoding': 'gzip'})
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader('Content-Encoding'), None)
def testRunMetadata(self):
"""Test retrieving the run metadata information."""
response = self._get('/data/run_metadata?run=run1&tag=test%20run')
self.assertEqual(response.status, 200)
run_metadata_pbtxt = response.read()
# Parse from pbtxt into a message.
run_metadata = config_pb2.RunMetadata()
text_format.Parse(run_metadata_pbtxt, run_metadata)
self.assertEqual(len(run_metadata.step_stats.dev_stats), 1)
self.assertEqual(run_metadata.step_stats.dev_stats[0].device, 'test device')
def _GenerateTestData(self):
"""Generates the test data directory.
The test data has a single run named run1 which contains:
- a histogram
- an image at timestamp and step 0
- scalar events containing the value i at step 10 * i and wall time
100 * i, for i in [1, _SCALAR_COUNT).
- a graph definition
Returns:
temp_dir: The directory the test data is generated under.
"""
temp_dir = tempfile.mkdtemp(prefix=self.get_temp_dir())
self.addCleanup(shutil.rmtree, temp_dir)
run1_path = os.path.join(temp_dir, 'run1')
os.makedirs(run1_path)
writer = writer_lib.FileWriter(run1_path)
histogram_value = summary_pb2.HistogramProto(
min=0,
max=2,
num=3,
sum=6,
sum_squares=5,
bucket_limit=[0, 1, 2],
bucket=[1, 1, 1])
# Add a simple graph event.
graph_def = graph_pb2.GraphDef()
node1 = graph_def.node.add()
node1.name = 'a'
node2 = graph_def.node.add()
node2.name = 'b'
node2.attr['very_large_attr'].s = b'a' * 2048 # 2 KB attribute
meta_graph_def = meta_graph_pb2.MetaGraphDef(graph_def=graph_def)
if self._only_use_meta_graph:
writer.add_meta_graph(meta_graph_def)
else:
writer.add_graph(graph_def)
# Add a simple run metadata event.
run_metadata = config_pb2.RunMetadata()
device_stats = run_metadata.step_stats.dev_stats.add()
device_stats.device = 'test device'
writer.add_run_metadata(run_metadata, 'test run')
# 1x1 transparent GIF.
encoded_image = base64.b64decode(
'R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7')
image_value = summary_pb2.Summary.Image(
height=1, width=1, colorspace=1, encoded_image_string=encoded_image)
audio_value = summary_pb2.Summary.Audio(
sample_rate=44100,
length_frames=22050,
num_channels=2,
encoded_audio_string=b'',
content_type='audio/wav')
writer.add_event(
event_pb2.Event(
wall_time=0,
step=0,
summary=summary_pb2.Summary(value=[
summary_pb2.Summary.Value(
tag='histogram', histo=histogram_value),
summary_pb2.Summary.Value(
tag='image', image=image_value), summary_pb2.Summary.Value(
tag='audio', audio=audio_value)
])))
# Write 100 simple values.
for i in xrange(1, self._SCALAR_COUNT + 1):
writer.add_event(
event_pb2.Event(
# We use different values for wall time, step, and the value so we
# can tell them apart.
wall_time=100 * i,
step=10 * i,
summary=summary_pb2.Summary(value=[
summary_pb2.Summary.Value(
tag='simple_values', simple_value=i)
])))
writer.flush()
writer.close()
return temp_dir
class TensorboardServerUsingMetagraphOnlyTest(TensorboardServerTest):
# Tests new ability to use only the MetaGraphDef
_only_use_meta_graph = True # Server data contains only a MetaGraphDef
class ParseEventFilesSpecTest(test.TestCase):
def testRunName(self):
logdir = 'lol:/cat'
expected = {'/cat': 'lol'}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testPathWithColonThatComesAfterASlash_isNotConsideredARunName(self):
logdir = '/lol:/cat'
expected = {'/lol:/cat': None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testMultipleDirectories(self):
logdir = '/a,/b'
expected = {'/a': None, '/b': None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testNormalizesPaths(self):
logdir = '/lol/.//cat/../cat'
expected = {'/lol/cat': None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testAbsolutifies(self):
logdir = 'lol/cat'
expected = {os.path.realpath('lol/cat'): None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testRespectsGCSPath(self):
logdir = 'gs://foo/path'
expected = {'gs://foo/path': None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testRespectsHDFSPath(self):
logdir = 'hdfs://foo/path'
expected = {'hdfs://foo/path': None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testDoesNotExpandUserInGCSPath(self):
logdir = 'gs://~/foo/path'
expected = {'gs://~/foo/path': None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testDoesNotNormalizeGCSPath(self):
logdir = 'gs://foo/./path//..'
expected = {'gs://foo/./path//..': None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testRunNameWithGCSPath(self):
logdir = 'lol:gs://foo/path'
expected = {'gs://foo/path': 'lol'}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
class TensorBoardAssetsTest(test.TestCase):
def testTagFound(self):
tag = application.get_tensorboard_tag()
self.assertTrue(tag)
app = application.standard_tensorboard_wsgi('', True, 60, [])
self.assertEqual(app.tag, tag)
class TensorBoardPluginsTest(test.TestCase):
def testPluginsAdded(self):
def foo_handler():
pass
def bar_handler():
pass
plugins = [
FakePlugin(
plugin_name='foo',
is_active_value=True,
routes_mapping={'/foo_route': foo_handler}),
FakePlugin(
plugin_name='bar',
is_active_value=True,
routes_mapping={'/bar_route': bar_handler}),
]
# The application should have added routes for both plugins.
app = application.standard_tensorboard_wsgi('', True, 60, plugins)
# The routes are prefixed with /data/plugin/[plugin name].
self.assertDictContainsSubset({
'/data/plugin/foo/foo_route': foo_handler,
'/data/plugin/bar/bar_route': bar_handler,
}, app.data_applications)
class TensorboardSimpleServerConstructionTest(test.TestCase):
"""Tests that the default HTTP server is constructed without error.
Mostly useful for IPv4/IPv6 testing. This test should run with only IPv4, only
IPv6, and both IPv4 and IPv6 enabled.
"""
class _StubApplication(object):
tag = ''
def testMakeServerBlankHost(self):
# Test that we can bind to all interfaces without throwing an error
server, url = tensorboard.make_simple_server(
self._StubApplication(),
host='',
port=0) # Grab any available port
self.assertTrue(server)
self.assertTrue(url)
def testSpecifiedHost(self):
one_passed = False
try:
_, url = tensorboard.make_simple_server(
self._StubApplication(),
host='127.0.0.1',
port=0)
self.assertStartsWith(actual=url, expected_start='http://127.0.0.1:')
one_passed = True
except socket.error:
# IPv4 is not supported
pass
try:
_, url = tensorboard.make_simple_server(
self._StubApplication(),
host='::1',
port=0)
self.assertStartsWith(actual=url, expected_start='http://[::1]:')
one_passed = True
except socket.error:
# IPv6 is not supported
pass
self.assertTrue(one_passed) # We expect either IPv4 or IPv6 to be supported
class TensorBoardApplcationConstructionTest(test.TestCase):
def testExceptions(self):
logdir = '/fake/foo'
multiplexer = event_multiplexer.EventMultiplexer()
# Fails if there is an unnamed plugin
with self.assertRaises(ValueError):
# This plugin lacks a name.
plugins = [
FakePlugin(plugin_name=None, is_active_value=True, routes_mapping={})
]
application.TensorBoardWSGIApp(logdir, plugins, multiplexer, 0)
# Fails if there are two plugins with same name
with self.assertRaises(ValueError):
plugins = [
FakePlugin(
plugin_name='foo', is_active_value=True, routes_mapping={}),
FakePlugin(
plugin_name='foo', is_active_value=True, routes_mapping={}),
]
application.TensorBoardWSGIApp(logdir, plugins, multiplexer, 0)
if __name__ == '__main__':
test.main()
|
client.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import getpass
import importlib
import json
import time
from urllib.parse import urlencode
import requests
from .common import *
class ZhihuClient:
def __init__(self, username: str = None, password: str = None):
self.username = username
self.password = password
self.login_data = {
'client_id': 'c3cef7c66a1843f8b3a9e6a1e3160e20',
'grant_type': 'password',
'source': 'com.zhihu.web',
'username': '',
'password': '',
'lang': 'en',
'ref_source': 'homepage',
'utm_source': ''
}
self.session = requests.session()
self.session.headers = {
'accept-encoding': 'gzip, deflate, br',
'Host': 'www.zhihu.com',
'Referer': 'https://www.zhihu.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 '
'(KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36'
}
self.session.cookies = cookiejar.LWPCookieJar(filename='./cookies.txt')
self.proxies = None
# ===== login staff =====
def login(self, captcha_lang: str = 'en', load_cookies: bool = True):
"""
模拟登录知乎
:param captcha_lang: 验证码类型 'en' or 'cn'
:param load_cookies: 是否读取上次保存的 Cookies
:return: bool
若在 PyCharm 下使用中文验证出现无法点击的问题,
需要在 Settings / Tools / Python Scientific / Show Plots in Toolwindow,取消勾选
"""
if load_cookies and self.load_cookies():
print('读取 Cookies 文件')
if self.check_login():
print('登录成功')
return True
print('Cookies 已过期')
self._check_user_pass()
self.login_data.update({
'username': self.username,
'password': self.password,
'lang': captcha_lang
})
timestamp = int(time.time() * 1000)
self.login_data.update({
'captcha': self._get_captcha(self.login_data['lang']),
'timestamp': timestamp,
'signature': self._get_signature(timestamp)
})
headers = self.session.headers.copy()
headers.update({
'content-type': 'application/x-www-form-urlencoded',
'x-zse-83': '3_1.1',
'x-xsrftoken': self._get_xsrf()
})
data = self._encrypt(self.login_data)
login_api = 'https://www.zhihu.com/api/v3/oauth/sign_in'
resp = self.session.post(login_api, data=data, headers=headers)
if 'error' in resp.text:
print(json.loads(resp.text)['error'])
if self.check_login():
print('登录成功')
return True
print('登录失败')
return False
def load_cookies(self):
"""
读取 Cookies 文件加载到 Session
:return: bool
"""
try:
self.session.cookies.load(ignore_discard=True)
return True
except FileNotFoundError:
return False
def check_login(self):
"""
检查登录状态,访问登录页面出现跳转则是已登录,
如登录成功保存当前 Cookies
:return: bool
"""
login_url = 'https://www.zhihu.com/signup'
resp = self.session.get(login_url, allow_redirects=False)
if resp.status_code == 302:
self.session.cookies.save()
return True
return False
def _get_xsrf(self):
"""
从登录页面获取 xsrf
:return: str
"""
self.session.get('https://www.zhihu.com/', allow_redirects=False)
for c in self.session.cookies:
if c.name == '_xsrf':
return c.value
raise AssertionError('获取 xsrf 失败')
def _get_captcha(self, lang: str):
"""
请求验证码的 API 接口,无论是否需要验证码都需要请求一次
如果需要验证码会返回图片的 base64 编码
根据 lang 参数匹配验证码,需要人工输入
:param lang: 返回验证码的语言(en/cn)
:return: 验证码的 POST 参数
"""
if lang == 'cn':
api = 'https://www.zhihu.com/api/v3/oauth/captcha?lang=cn'
else:
api = 'https://www.zhihu.com/api/v3/oauth/captcha?lang=en'
resp = self.session.get(api)
show_captcha = re.search(r'true', resp.text)
if show_captcha:
put_resp = self.session.put(api)
json_data = json.loads(put_resp.text)
img_base64 = json_data['img_base64'].replace(r'\n', '')
with open('./captcha.jpg', 'wb') as f:
f.write(base64.b64decode(img_base64))
img = Image.open('./captcha.jpg')
if lang == 'cn':
import matplotlib.pyplot as plt
plt.imshow(img)
print('点击所有倒立的汉字,在命令行中按回车提交')
points = plt.ginput(7)
capt = json.dumps({'img_size': [200, 44],
'input_points': [[i[0] / 2, i[1] / 2] for i in points]})
else:
img_thread = threading.Thread(target=img.show, daemon=True)
img_thread.start()
capt = input('请输入图片里的验证码:')
# 这里必须先把参数 POST 验证码接口
self.session.post(api, data={'input_text': capt})
return capt
return ''
def _get_signature(self, timestamp: int or str):
"""
通过 Hmac 算法计算返回签名
实际是几个固定字符串加时间戳
:param timestamp: 时间戳
:return: 签名
"""
ha = hmac.new(b'd1b964811afb40118a12068ff74a12f4', digestmod=hashlib.sha1)
grant_type = self.login_data['grant_type']
client_id = self.login_data['client_id']
source = self.login_data['source']
ha.update(bytes((grant_type + client_id + source + str(timestamp)), 'utf-8'))
return ha.hexdigest()
def _check_user_pass(self):
"""
检查用户名和密码是否已输入,若无则手动输入
"""
if not self.username:
self.username = input('请输入手机号:')
if self.username.isdigit() and '+86' not in self.username:
self.username = '+86' + self.username
if not self.password:
self.password = input('请输入密码:')
@staticmethod
def _encrypt(form_data: dict):
with open('./encrypt.js') as f:
js = execjs.compile(f.read())
return js.call('Q', urlencode(form_data))
# ===== network staff =====
def set_proxy(self, proxy):
"""设置代理
:param str proxy: 使用 "http://example.com:port" 的形式
:return: 无
:rtype: None
:说明:
由于一个 :class:`.ZhihuClient` 对象和它创建出来的其他知乎对象共用
一个Session,所以调用这个方法也会将所有生成出的知乎类设置上代理。
"""
self._session.proxies.update({'http': proxy})
def set_proxy_pool(self, proxies, auth=None, https=True):
"""设置代理池
:param proxies: proxy列表, 形如 ``["ip1:port1", "ip2:port2"]``
:param auth: 如果代理需要验证身份, 通过这个参数提供, 比如
:param https: 默认为 True, 传入 False 则不设置 https 代理
.. code-block:: python
from requests.auth import HTTPProxyAuth
auth = HTTPProxyAuth('laike9m', '123')
:说明:
每次 GET/POST 请求会随机选择列表中的代理
"""
from random import choice
if https:
self.proxies = [{'http': p, 'https': p} for p in proxies]
else:
self.proxies = [{'http': p} for p in proxies]
def get_with_random_proxy(url, **kwargs):
proxy = choice(self.proxies)
kwargs['proxies'] = proxy
if auth:
kwargs['auth'] = auth
return self._session.original_get(url, **kwargs)
def post_with_random_proxy(url, *args, **kwargs):
proxy = choice(self.proxies)
kwargs['proxies'] = proxy
if auth:
kwargs['auth'] = auth
return self._session.original_post(url, *args, **kwargs)
self._session.original_get = self._session.get
self._session.get = get_with_random_proxy
self._session.original_post = self._session.post
self._session.post = post_with_random_proxy
def remove_proxy_pool(self):
"""
移除代理池
"""
self.proxies = None
self._session.get = self._session.original_get
self._session.post = self._session.original_post
del self._session.original_get
del self._session.original_post
# ===== getter staff ======
def me(self):
"""获取使用特定 cookies 的 Me 实例
:return: cookies对应的Me对象
:rtype: Me
"""
from .me import Me
headers = dict(Default_Header)
headers['Host'] = 'zhuanlan.zhihu.com'
res = self._session.get(Get_Me_Info_Url, headers=headers)
json_data = res.json()
url = json_data['profileUrl']
name = json_data['name']
motto = json_data['bio']
photo = json_data['avatar']['template'].format(
id=json_data['avatar']['id'], size='r')
return Me(url, name, motto, photo, session=self._session)
def __getattr__(self, item: str):
"""本函数用于获取各种类,如 `Answer` `Question` 等.
:支持的形式有:
1. client.answer()
2. client.author()
3. client.collection()
4. client.column()
5. client.post()
6. client.question()
7. client.topic()
参数均为对应页面的url,返回对应的类的实例。
"""
def getter(url):
return getattr(module, item.capitalize())(url,
session=self._session)
attr_list = ['answer', 'author', 'collection',
'column', 'post', 'question', 'topic']
if item.lower() in attr_list:
module = importlib.import_module('.'+item.lower(), 'zhihu')
return getter
|
Thread01.py | #Python Thread 예제1
import threading
import time
def execute(number):
"""
쓰레드에서 실행 할 함수
"""
time.sleep(number)
print(threading.currentThread().getName(), number)
def execute_noThread(number):
"""
쓰레드에서 실행 할 함수
"""
time.sleep(number)
print(threading.currentThread().getName(), number)
if __name__ == '__main__':
for i in range(1,36): # 1 ~ 7 실행
my_thread = threading.Thread(target=execute, args=(i,))
my_thread.start()
for i in range(1,8): # 1 ~ 7 실행
no_thread = execute_noThread(i)
|
lisp.py | # -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp.py
#
# This file contains all constants, definitions, data structures, packet
# send and receive functions for the LISP protocol according to RFC 6830.
#
#------------------------------------------------------------------------------
from __future__ import print_function
from __future__ import division
from future import standard_library
standard_library . install_aliases ( )
from builtins import hex
from builtins import str
from builtins import int
from builtins import range
from builtins import object
from past . utils import old_div
import socket
import time
import struct
import binascii
import hmac
import hashlib
import datetime
import os
import sys
import random
import threading
import operator
import netifaces
import platform
import traceback
from Crypto . Cipher import AES
import ecdsa
import json
import copy
import chacha
import poly1305
import geopy
import curve25519
from subprocess import getoutput
import queue
import distro
import pprint
if 64 - 64: i11iIiiIii
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
if 73 - 73: II111iiii
if 22 - 22: I1IiiI * Oo0Ooo / OoO0O00 . OoOoOO00 . o0oOOo0O0Ooo / I1ii11iIi11i
lisp_print_rloc_probe_list = False
if 48 - 48: oO0o / OOooOOo / I11i / Ii1I
if 48 - 48: iII111i % IiII + I1Ii111 / ooOoO0o * Ii1I
if 46 - 46: ooOoO0o * I11i - OoooooooOO
if 30 - 30: o0oOOo0O0Ooo - O0 % o0oOOo0O0Ooo - OoooooooOO * O0 * OoooooooOO
if 60 - 60: iIii1I11I1II1 / i1IIi * oO0o - I1ii11iIi11i + o0oOOo0O0Ooo
if 94 - 94: i1IIi % Oo0Ooo
lisp_hostname = ""
lisp_version = ""
lisp_uptime = ""
lisp_i_am_core = False
lisp_i_am_itr = False
lisp_i_am_etr = False
lisp_i_am_rtr = False
lisp_i_am_mr = False
lisp_i_am_ms = False
lisp_i_am_ddt = False
lisp_log_id = ""
lisp_debug_logging = True
if 68 - 68: Ii1I / O0
lisp_map_notify_queue = { }
lisp_map_servers_list = { }
lisp_ddt_map_requestQ = { }
lisp_db_list = [ ]
lisp_group_mapping_list = { }
lisp_map_resolvers_list = { }
lisp_rtr_list = { }
lisp_elp_list = { }
lisp_rle_list = { }
lisp_geo_list = { }
lisp_json_list = { }
lisp_myrlocs = [ None , None , None ]
lisp_mymacs = { }
if 46 - 46: O0 * II111iiii / IiII * Oo0Ooo * iII111i . I11i
if 62 - 62: i11iIiiIii - II111iiii % I1Ii111 - iIii1I11I1II1 . I1ii11iIi11i . II111iiii
if 61 - 61: oO0o / OoOoOO00 / iII111i * OoO0O00 . II111iiii
if 1 - 1: II111iiii - I1ii11iIi11i % i11iIiiIii + IiII . I1Ii111
if 55 - 55: iIii1I11I1II1 - I1IiiI . Ii1I * IiII * i1IIi / iIii1I11I1II1
lisp_myinterfaces = { }
lisp_iid_to_interface = { }
lisp_multi_tenant_interfaces = [ ]
if 79 - 79: oO0o + I1Ii111 . ooOoO0o * IiII % I11i . I1IiiI
lisp_test_mr_timer = None
lisp_rloc_probe_timer = None
if 94 - 94: iII111i * Ii1I / IiII . i1IIi * iII111i
if 47 - 47: i1IIi % i11iIiiIii
if 20 - 20: ooOoO0o * II111iiii
if 65 - 65: o0oOOo0O0Ooo * iIii1I11I1II1 * ooOoO0o
lisp_registered_count = 0
if 18 - 18: iIii1I11I1II1 / I11i + oO0o / Oo0Ooo - II111iiii - I11i
if 1 - 1: I11i - OOooOOo % O0 + I1IiiI - iII111i / I11i
if 31 - 31: OoO0O00 + II111iiii
if 13 - 13: OOooOOo * oO0o * I1IiiI
lisp_info_sources_by_address = { }
lisp_info_sources_by_nonce = { }
if 55 - 55: II111iiii
if 43 - 43: OoOoOO00 - i1IIi + I1Ii111 + Ii1I
if 17 - 17: o0oOOo0O0Ooo
if 64 - 64: Ii1I % i1IIi % OoooooooOO
if 3 - 3: iII111i + O0
if 42 - 42: OOooOOo / i1IIi + i11iIiiIii - Ii1I
lisp_crypto_keys_by_nonce = { }
lisp_crypto_keys_by_rloc_encap = { }
lisp_crypto_keys_by_rloc_decap = { }
lisp_data_plane_security = False
lisp_search_decap_keys = True
if 78 - 78: OoO0O00
lisp_data_plane_logging = False
lisp_frame_logging = False
lisp_flow_logging = False
if 18 - 18: O0 - iII111i / iII111i + ooOoO0o % ooOoO0o - IiII
if 62 - 62: iII111i - IiII - OoOoOO00 % i1IIi / oO0o
if 77 - 77: II111iiii - II111iiii . I1IiiI / o0oOOo0O0Ooo
if 14 - 14: I11i % O0
if 41 - 41: i1IIi + I1Ii111 + OOooOOo - IiII
if 77 - 77: Oo0Ooo . IiII % ooOoO0o
if 42 - 42: oO0o - i1IIi / i11iIiiIii + OOooOOo + OoO0O00
lisp_crypto_ephem_port = None
if 17 - 17: oO0o . Oo0Ooo . I1ii11iIi11i
if 3 - 3: OoOoOO00 . Oo0Ooo . I1IiiI / Ii1I
if 38 - 38: II111iiii % i11iIiiIii . ooOoO0o - OOooOOo + Ii1I
if 66 - 66: OoooooooOO * OoooooooOO . OOooOOo . i1IIi - OOooOOo
lisp_pitr = False
if 77 - 77: I11i - iIii1I11I1II1
if 82 - 82: i11iIiiIii . OOooOOo / Oo0Ooo * O0 % oO0o % iIii1I11I1II1
if 78 - 78: iIii1I11I1II1 - Ii1I * OoO0O00 + o0oOOo0O0Ooo + iII111i + iII111i
if 11 - 11: iII111i - OoO0O00 % ooOoO0o % iII111i / OoOoOO00 - OoO0O00
lisp_l2_overlay = False
if 74 - 74: iII111i * O0
if 89 - 89: oO0o + Oo0Ooo
if 3 - 3: i1IIi / I1IiiI % I11i * i11iIiiIii / O0 * I11i
if 49 - 49: oO0o % Ii1I + i1IIi . I1IiiI % I1ii11iIi11i
if 48 - 48: I11i + I11i / II111iiii / iIii1I11I1II1
lisp_rloc_probing = False
lisp_rloc_probe_list = { }
if 20 - 20: o0oOOo0O0Ooo
if 77 - 77: OoOoOO00 / I11i
if 98 - 98: iIii1I11I1II1 / i1IIi / i11iIiiIii / o0oOOo0O0Ooo
if 28 - 28: OOooOOo - IiII . IiII + OoOoOO00 - OoooooooOO + O0
if 95 - 95: OoO0O00 % oO0o . O0
if 15 - 15: ooOoO0o / Ii1I . Ii1I - i1IIi
lisp_register_all_rtrs = True
if 53 - 53: IiII + I1IiiI * oO0o
if 61 - 61: i1IIi * OOooOOo / OoooooooOO . i11iIiiIii . OoOoOO00
if 60 - 60: I11i / I11i
if 46 - 46: Ii1I * OOooOOo - OoO0O00 * oO0o - I1Ii111
lisp_nonce_echoing = False
lisp_nonce_echo_list = { }
if 83 - 83: OoooooooOO
if 31 - 31: II111iiii - OOooOOo . I1Ii111 % OoOoOO00 - O0
if 4 - 4: II111iiii / ooOoO0o . iII111i
if 58 - 58: OOooOOo * i11iIiiIii / OoOoOO00 % I1Ii111 - I1ii11iIi11i / oO0o
lisp_nat_traversal = False
if 50 - 50: I1IiiI
if 34 - 34: I1IiiI * II111iiii % iII111i * OoOoOO00 - I1IiiI
if 33 - 33: o0oOOo0O0Ooo + OOooOOo * OoO0O00 - Oo0Ooo / oO0o % Ii1I
if 21 - 21: OoO0O00 * iIii1I11I1II1 % oO0o * i1IIi
if 16 - 16: O0 - I1Ii111 * iIii1I11I1II1 + iII111i
if 50 - 50: II111iiii - ooOoO0o * I1ii11iIi11i / I1Ii111 + o0oOOo0O0Ooo
if 88 - 88: Ii1I / I1Ii111 + iII111i - II111iiii / ooOoO0o - OoOoOO00
if 15 - 15: I1ii11iIi11i + OoOoOO00 - OoooooooOO / OOooOOo
lisp_program_hardware = False
if 58 - 58: i11iIiiIii % I11i
if 71 - 71: OOooOOo + ooOoO0o % i11iIiiIii + I1ii11iIi11i - IiII
if 88 - 88: OoOoOO00 - OoO0O00 % OOooOOo
if 16 - 16: I1IiiI * oO0o % IiII
lisp_checkpoint_map_cache = False
lisp_checkpoint_filename = "./lisp.checkpoint"
if 86 - 86: I1IiiI + Ii1I % i11iIiiIii * oO0o . ooOoO0o * I11i
if 44 - 44: oO0o
if 88 - 88: I1Ii111 % Ii1I . II111iiii
if 38 - 38: o0oOOo0O0Ooo
lisp_ipc_data_plane = False
lisp_ipc_dp_socket = None
lisp_ipc_dp_socket_name = "lisp-ipc-data-plane"
if 57 - 57: O0 / oO0o * I1Ii111 / OoOoOO00 . II111iiii
if 26 - 26: iII111i
if 91 - 91: OoO0O00 . I1ii11iIi11i + OoO0O00 - iII111i / OoooooooOO
if 39 - 39: I1ii11iIi11i / ooOoO0o - II111iiii
if 98 - 98: I1ii11iIi11i / I11i % oO0o . OoOoOO00
lisp_ipc_lock = None
if 91 - 91: oO0o % Oo0Ooo
if 64 - 64: I11i % iII111i - I1Ii111 - oO0o
if 31 - 31: I11i - II111iiii . I11i
if 18 - 18: o0oOOo0O0Ooo
if 98 - 98: iII111i * iII111i / iII111i + I11i
if 34 - 34: ooOoO0o
lisp_default_iid = 0
lisp_default_secondary_iid = 0
if 15 - 15: I11i * ooOoO0o * Oo0Ooo % i11iIiiIii % OoOoOO00 - OOooOOo
if 68 - 68: I1Ii111 % i1IIi . IiII . I1ii11iIi11i
if 92 - 92: iII111i . I1Ii111
if 31 - 31: I1Ii111 . OoOoOO00 / O0
if 89 - 89: OoOoOO00
lisp_ms_rtr_list = [ ]
if 68 - 68: OoO0O00 * OoooooooOO % O0 + OoO0O00 + ooOoO0o
if 4 - 4: ooOoO0o + O0 * OOooOOo
if 55 - 55: Oo0Ooo + iIii1I11I1II1 / OoOoOO00 * oO0o - i11iIiiIii - Ii1I
if 25 - 25: I1ii11iIi11i
if 7 - 7: i1IIi / I1IiiI * I1Ii111 . IiII . iIii1I11I1II1
if 13 - 13: OOooOOo / i11iIiiIii
lisp_nat_state_info = { }
if 2 - 2: I1IiiI / O0 / o0oOOo0O0Ooo % OoOoOO00 % Ii1I
if 52 - 52: o0oOOo0O0Ooo
if 95 - 95: Ii1I
if 87 - 87: ooOoO0o + OoOoOO00 . OOooOOo + OoOoOO00
if 91 - 91: O0
if 61 - 61: II111iiii
lisp_last_map_request_sent = None
lisp_no_map_request_rate_limit = time . time ( )
if 64 - 64: ooOoO0o / OoOoOO00 - O0 - I11i
if 86 - 86: I11i % OoOoOO00 / I1IiiI / OoOoOO00
if 42 - 42: OoO0O00
if 67 - 67: I1Ii111 . iII111i . O0
lisp_last_icmp_too_big_sent = 0
if 10 - 10: I1ii11iIi11i % I1ii11iIi11i - iIii1I11I1II1 / OOooOOo + Ii1I
if 87 - 87: oO0o * I1ii11iIi11i + OOooOOo / iIii1I11I1II1 / iII111i
if 37 - 37: iII111i - ooOoO0o * oO0o % i11iIiiIii - I1Ii111
if 83 - 83: I11i / I1IiiI
LISP_FLOW_LOG_SIZE = 100
lisp_flow_log = [ ]
if 34 - 34: IiII
if 57 - 57: oO0o . I11i . i1IIi
if 42 - 42: I11i + I1ii11iIi11i % O0
if 6 - 6: oO0o
lisp_policies = { }
if 68 - 68: OoOoOO00 - OoO0O00
if 28 - 28: OoO0O00 . OOooOOo / OOooOOo + Oo0Ooo . I1ii11iIi11i
if 1 - 1: iIii1I11I1II1 / II111iiii
if 33 - 33: I11i
if 18 - 18: o0oOOo0O0Ooo % iII111i * O0
lisp_load_split_pings = False
if 87 - 87: i11iIiiIii
if 93 - 93: I1ii11iIi11i - OoO0O00 % i11iIiiIii . iII111i / iII111i - I1Ii111
if 9 - 9: I1ii11iIi11i / Oo0Ooo - I1IiiI / OoooooooOO / iIii1I11I1II1 - o0oOOo0O0Ooo
if 91 - 91: iII111i % i1IIi % iIii1I11I1II1
if 20 - 20: OOooOOo % Ii1I / Ii1I + Ii1I
if 45 - 45: oO0o - IiII - OoooooooOO - OoO0O00 . II111iiii / O0
lisp_eid_hashes = [ ]
if 51 - 51: O0 + iII111i
if 8 - 8: oO0o * OoOoOO00 - Ii1I - OoO0O00 * OOooOOo % I1IiiI
if 48 - 48: O0
if 11 - 11: I11i + OoooooooOO - OoO0O00 / o0oOOo0O0Ooo + Oo0Ooo . II111iiii
if 41 - 41: Ii1I - O0 - O0
if 68 - 68: OOooOOo % I1Ii111
if 88 - 88: iIii1I11I1II1 - ooOoO0o + OOooOOo
if 40 - 40: I1IiiI * Ii1I + OOooOOo % iII111i
lisp_reassembly_queue = { }
if 74 - 74: oO0o - Oo0Ooo + OoooooooOO + I1Ii111 / OoOoOO00
if 23 - 23: O0
if 85 - 85: Ii1I
if 84 - 84: I1IiiI . iIii1I11I1II1 % OoooooooOO + Ii1I % OoooooooOO % OoO0O00
if 42 - 42: OoO0O00 / I11i / o0oOOo0O0Ooo + iII111i / OoOoOO00
if 84 - 84: ooOoO0o * II111iiii + Oo0Ooo
if 53 - 53: iII111i % II111iiii . IiII - iIii1I11I1II1 - IiII * II111iiii
lisp_pubsub_cache = { }
if 77 - 77: iIii1I11I1II1 * OoO0O00
if 95 - 95: I1IiiI + i11iIiiIii
if 6 - 6: ooOoO0o / i11iIiiIii + iII111i * oO0o
if 80 - 80: II111iiii
if 83 - 83: I11i . i11iIiiIii + II111iiii . o0oOOo0O0Ooo * I11i
if 53 - 53: II111iiii
lisp_decent_push_configured = False
if 31 - 31: OoO0O00
if 80 - 80: I1Ii111 . i11iIiiIii - o0oOOo0O0Ooo
if 25 - 25: OoO0O00
if 62 - 62: OOooOOo + O0
if 98 - 98: o0oOOo0O0Ooo
if 51 - 51: Oo0Ooo - oO0o + II111iiii * Ii1I . I11i + oO0o
lisp_decent_modulus = 0
lisp_decent_dns_suffix = None
if 78 - 78: i11iIiiIii / iII111i - Ii1I / OOooOOo + oO0o
if 82 - 82: Ii1I
if 46 - 46: OoooooooOO . i11iIiiIii
if 94 - 94: o0oOOo0O0Ooo * Ii1I / Oo0Ooo / Ii1I
if 87 - 87: Oo0Ooo . IiII
if 75 - 75: ooOoO0o + OoOoOO00 + o0oOOo0O0Ooo * I11i % oO0o . iII111i
lisp_ipc_socket = None
if 55 - 55: OOooOOo . I1IiiI
if 61 - 61: Oo0Ooo % IiII . Oo0Ooo
if 100 - 100: I1Ii111 * O0
if 64 - 64: OOooOOo % iIii1I11I1II1 * oO0o
lisp_ms_encryption_keys = { }
lisp_ms_json_keys = { }
if 79 - 79: O0
if 78 - 78: I1ii11iIi11i + OOooOOo - I1Ii111
if 38 - 38: o0oOOo0O0Ooo - oO0o + iIii1I11I1II1 / OoOoOO00 % Oo0Ooo
if 57 - 57: OoO0O00 / ooOoO0o
if 29 - 29: iIii1I11I1II1 + OoOoOO00 * OoO0O00 * OOooOOo . I1IiiI * I1IiiI
if 7 - 7: IiII * I1Ii111 % Ii1I - o0oOOo0O0Ooo
if 13 - 13: Ii1I . i11iIiiIii
if 56 - 56: I1ii11iIi11i % O0 - I1IiiI
if 100 - 100: Ii1I - O0 % oO0o * OOooOOo + I1IiiI
if 88 - 88: OoooooooOO - OoO0O00 * O0 * OoooooooOO . OoooooooOO
if 33 - 33: I1Ii111 + iII111i * oO0o / iIii1I11I1II1 - I1IiiI
if 54 - 54: I1Ii111 / OOooOOo . oO0o % iII111i
if 57 - 57: i11iIiiIii . I1ii11iIi11i - Ii1I - oO0o + OoOoOO00
if 63 - 63: OoOoOO00 * iII111i
lisp_rtr_nat_trace_cache = { }
if 69 - 69: O0 . OoO0O00
if 49 - 49: I1IiiI - I11i
if 74 - 74: iIii1I11I1II1 * I1ii11iIi11i + OoOoOO00 / i1IIi / II111iiii . Oo0Ooo
if 62 - 62: OoooooooOO * I1IiiI
if 58 - 58: OoOoOO00 % o0oOOo0O0Ooo
if 50 - 50: I1Ii111 . o0oOOo0O0Ooo
if 97 - 97: O0 + OoOoOO00
if 89 - 89: o0oOOo0O0Ooo + OoO0O00 * I11i * Ii1I
if 37 - 37: OoooooooOO - O0 - o0oOOo0O0Ooo
if 77 - 77: OOooOOo * iIii1I11I1II1
lisp_glean_mappings = [ ]
if 98 - 98: I1IiiI % Ii1I * OoooooooOO
if 51 - 51: iIii1I11I1II1 . OoOoOO00 / oO0o + o0oOOo0O0Ooo
if 33 - 33: ooOoO0o . II111iiii % iII111i + o0oOOo0O0Ooo
if 71 - 71: Oo0Ooo % OOooOOo
if 98 - 98: I11i % i11iIiiIii % ooOoO0o + Ii1I
if 78 - 78: I1ii11iIi11i % oO0o / iII111i - iIii1I11I1II1
if 69 - 69: I1Ii111
if 11 - 11: I1IiiI
if 16 - 16: Ii1I + IiII * O0 % i1IIi . I1IiiI
if 67 - 67: OoooooooOO / I1IiiI * Ii1I + I11i
if 65 - 65: OoooooooOO - I1ii11iIi11i / ooOoO0o / II111iiii / i1IIi
if 71 - 71: I1Ii111 + Ii1I
if 28 - 28: OOooOOo
if 38 - 38: ooOoO0o % II111iiii % I11i / OoO0O00 + OoOoOO00 / i1IIi
if 54 - 54: iIii1I11I1II1 % I1ii11iIi11i - OOooOOo / oO0o - OoO0O00 . I11i
if 11 - 11: I1ii11iIi11i . OoO0O00 * IiII * OoooooooOO + ooOoO0o
if 33 - 33: O0 * o0oOOo0O0Ooo - I1Ii111 % I1Ii111
if 18 - 18: I1Ii111 / Oo0Ooo * I1Ii111 + I1Ii111 * i11iIiiIii * I1ii11iIi11i
if 11 - 11: ooOoO0o / OoOoOO00 - IiII * OoooooooOO + OoooooooOO . OoOoOO00
lisp_gleaned_groups = { }
if 26 - 26: Ii1I % I1ii11iIi11i
if 76 - 76: IiII * iII111i
if 52 - 52: OOooOOo
if 19 - 19: I1IiiI
if 25 - 25: Ii1I / ooOoO0o
lisp_icmp_raw_socket = None
if ( os . getenv ( "LISP_SEND_ICMP_TOO_BIG" ) != None ) :
lisp_icmp_raw_socket = socket . socket ( socket . AF_INET , socket . SOCK_RAW ,
socket . IPPROTO_ICMP )
lisp_icmp_raw_socket . setsockopt ( socket . SOL_IP , socket . IP_HDRINCL , 1 )
if 31 - 31: OOooOOo . O0 % I1IiiI . o0oOOo0O0Ooo + IiII
if 71 - 71: I1Ii111 . II111iiii
lisp_ignore_df_bit = ( os . getenv ( "LISP_IGNORE_DF_BIT" ) != None )
if 62 - 62: OoooooooOO . I11i
if 61 - 61: OoOoOO00 - OOooOOo - i1IIi
if 25 - 25: O0 * I11i + I1ii11iIi11i . o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 58 - 58: I1IiiI
if 53 - 53: i1IIi
if 59 - 59: o0oOOo0O0Ooo
LISP_DATA_PORT = 4341
LISP_CTRL_PORT = 4342
LISP_L2_DATA_PORT = 8472
LISP_VXLAN_DATA_PORT = 4789
LISP_VXLAN_GPE_PORT = 4790
LISP_TRACE_PORT = 2434
if 81 - 81: OoOoOO00 - OoOoOO00 . iII111i
if 73 - 73: I11i % i11iIiiIii - I1IiiI
if 7 - 7: O0 * i11iIiiIii * Ii1I + ooOoO0o % OoO0O00 - ooOoO0o
if 39 - 39: Oo0Ooo * OOooOOo % OOooOOo - OoooooooOO + o0oOOo0O0Ooo - I11i
LISP_MAP_REQUEST = 1
LISP_MAP_REPLY = 2
LISP_MAP_REGISTER = 3
LISP_MAP_NOTIFY = 4
LISP_MAP_NOTIFY_ACK = 5
LISP_MAP_REFERRAL = 6
LISP_NAT_INFO = 7
LISP_ECM = 8
LISP_TRACE = 9
if 23 - 23: i11iIiiIii
if 30 - 30: o0oOOo0O0Ooo - i1IIi % II111iiii + I11i * iIii1I11I1II1
if 81 - 81: IiII % i1IIi . iIii1I11I1II1
if 4 - 4: i11iIiiIii % OoO0O00 % i1IIi / IiII
LISP_NO_ACTION = 0
LISP_NATIVE_FORWARD_ACTION = 1
LISP_SEND_MAP_REQUEST_ACTION = 2
LISP_DROP_ACTION = 3
LISP_POLICY_DENIED_ACTION = 4
LISP_AUTH_FAILURE_ACTION = 5
LISP_SEND_PUBSUB_ACTION = 6
if 6 - 6: iII111i / I1IiiI % OOooOOo - I1IiiI
lisp_map_reply_action_string = [ "no-action" , "native-forward" ,
"send-map-request" , "drop-action" , "policy-denied" ,
"auth-failure" , "send-subscribe" ]
if 31 - 31: OOooOOo
if 23 - 23: I1Ii111 . IiII
if 92 - 92: OoOoOO00 + I1Ii111 * Ii1I % I1IiiI
if 42 - 42: Oo0Ooo
LISP_NONE_ALG_ID = 0
LISP_SHA_1_96_ALG_ID = 1
LISP_SHA_256_128_ALG_ID = 2
LISP_MD5_AUTH_DATA_LEN = 16
LISP_SHA1_160_AUTH_DATA_LEN = 20
LISP_SHA2_256_AUTH_DATA_LEN = 32
if 76 - 76: I1IiiI * iII111i % I1Ii111
if 57 - 57: iIii1I11I1II1 - i1IIi / I1Ii111 - O0 * OoooooooOO % II111iiii
if 68 - 68: OoooooooOO * I11i % OoOoOO00 - IiII
if 34 - 34: I1Ii111 . iIii1I11I1II1 * OoOoOO00 * oO0o / I1Ii111 / I1ii11iIi11i
LISP_LCAF_NULL_TYPE = 0
LISP_LCAF_AFI_LIST_TYPE = 1
LISP_LCAF_INSTANCE_ID_TYPE = 2
LISP_LCAF_ASN_TYPE = 3
LISP_LCAF_APP_DATA_TYPE = 4
LISP_LCAF_GEO_COORD_TYPE = 5
LISP_LCAF_OPAQUE_TYPE = 6
LISP_LCAF_NAT_TYPE = 7
LISP_LCAF_NONCE_LOC_TYPE = 8
LISP_LCAF_MCAST_INFO_TYPE = 9
LISP_LCAF_ELP_TYPE = 10
LISP_LCAF_SECURITY_TYPE = 11
LISP_LCAF_SOURCE_DEST_TYPE = 12
LISP_LCAF_RLE_TYPE = 13
LISP_LCAF_JSON_TYPE = 14
LISP_LCAF_KV_TYPE = 15
LISP_LCAF_ENCAP_TYPE = 16
if 78 - 78: Oo0Ooo - o0oOOo0O0Ooo / OoOoOO00
if 10 - 10: iII111i + Oo0Ooo * I1ii11iIi11i + iIii1I11I1II1 / I1Ii111 / I1ii11iIi11i
if 42 - 42: I1IiiI
if 38 - 38: OOooOOo + II111iiii % ooOoO0o % OoOoOO00 - Ii1I / OoooooooOO
LISP_MR_TTL = ( 24 * 60 )
LISP_REGISTER_TTL = 3
LISP_SHORT_TTL = 1
LISP_NMR_TTL = 15
LISP_GLEAN_TTL = 15
LISP_MCAST_TTL = 15
LISP_IGMP_TTL = 240
if 73 - 73: o0oOOo0O0Ooo * O0 - i11iIiiIii
LISP_SITE_TIMEOUT_CHECK_INTERVAL = 60
LISP_PUBSUB_TIMEOUT_CHECK_INTERVAL = 60
LISP_REFERRAL_TIMEOUT_CHECK_INTERVAL = 60
LISP_TEST_MR_INTERVAL = 60
LISP_MAP_NOTIFY_INTERVAL = 2
LISP_DDT_MAP_REQUEST_INTERVAL = 2
LISP_MAX_MAP_NOTIFY_RETRIES = 3
LISP_INFO_INTERVAL = 15
LISP_MAP_REQUEST_RATE_LIMIT = .5
LISP_NO_MAP_REQUEST_RATE_LIMIT_TIME = 60
LISP_ICMP_TOO_BIG_RATE_LIMIT = 1
LISP_RLOC_PROBE_TTL = 128
LISP_RLOC_PROBE_INTERVAL = 10
LISP_RLOC_PROBE_REPLY_WAIT = 15
LISP_DEFAULT_DYN_EID_TIMEOUT = 15
LISP_NONCE_ECHO_INTERVAL = 10
LISP_IGMP_TIMEOUT_INTERVAL = 180
if 85 - 85: Ii1I % iII111i + I11i / o0oOOo0O0Ooo . oO0o + OOooOOo
if 62 - 62: i11iIiiIii + i11iIiiIii - o0oOOo0O0Ooo
if 28 - 28: iII111i . iII111i % iIii1I11I1II1 * iIii1I11I1II1 . o0oOOo0O0Ooo / iII111i
if 27 - 27: OoO0O00 + ooOoO0o - i1IIi
if 69 - 69: IiII - O0 % I1ii11iIi11i + i11iIiiIii . OoOoOO00 / OoO0O00
if 79 - 79: O0 * i11iIiiIii - IiII / IiII
if 48 - 48: O0
if 93 - 93: i11iIiiIii - I1IiiI * I1ii11iIi11i * I11i % O0 + OoooooooOO
if 25 - 25: IiII + Ii1I / ooOoO0o . o0oOOo0O0Ooo % O0 * OoO0O00
if 84 - 84: ooOoO0o % Ii1I + i11iIiiIii
if 28 - 28: Oo0Ooo + OoO0O00 * OOooOOo % oO0o . I11i % O0
if 16 - 16: I11i - iIii1I11I1II1 / I1IiiI . II111iiii + iIii1I11I1II1
if 19 - 19: OoO0O00 - Oo0Ooo . O0
if 60 - 60: II111iiii + Oo0Ooo
if 9 - 9: ooOoO0o * OoooooooOO - iIii1I11I1II1 + OoOoOO00 / OoO0O00 . OoO0O00
if 49 - 49: II111iiii
if 25 - 25: OoooooooOO - I1IiiI . I1IiiI * oO0o
if 81 - 81: iII111i + IiII
if 98 - 98: I1IiiI
if 95 - 95: ooOoO0o / ooOoO0o
if 30 - 30: I1ii11iIi11i + Oo0Ooo / Oo0Ooo % I1ii11iIi11i . I1ii11iIi11i
if 55 - 55: ooOoO0o - I11i + II111iiii + iII111i % Ii1I
if 41 - 41: i1IIi - I11i - Ii1I
if 8 - 8: OoO0O00 + I1Ii111 - o0oOOo0O0Ooo % Oo0Ooo % o0oOOo0O0Ooo * oO0o
if 9 - 9: Oo0Ooo - i11iIiiIii - OOooOOo * Ii1I + ooOoO0o
if 44 - 44: II111iiii
if 52 - 52: I1ii11iIi11i - Oo0Ooo + I1ii11iIi11i % o0oOOo0O0Ooo
if 35 - 35: iIii1I11I1II1
if 42 - 42: I1Ii111 . I1IiiI . i1IIi + OoOoOO00 + OOooOOo + I1IiiI
if 31 - 31: iII111i . OOooOOo - ooOoO0o . OoooooooOO / OoooooooOO
if 56 - 56: OoO0O00 / oO0o / i11iIiiIii + OoooooooOO - Oo0Ooo - I11i
if 21 - 21: O0 % IiII . I1IiiI / II111iiii + IiII
if 53 - 53: oO0o - I1IiiI - oO0o * iII111i
if 71 - 71: O0 - iIii1I11I1II1
if 12 - 12: OOooOOo / o0oOOo0O0Ooo
if 42 - 42: Oo0Ooo
if 19 - 19: oO0o % I1ii11iIi11i * iIii1I11I1II1 + I1IiiI
if 46 - 46: Oo0Ooo
if 1 - 1: iII111i
if 97 - 97: OOooOOo + iII111i + O0 + i11iIiiIii
if 77 - 77: o0oOOo0O0Ooo / OoooooooOO
if 46 - 46: o0oOOo0O0Ooo % iIii1I11I1II1 . iII111i % iII111i + i11iIiiIii
if 72 - 72: iIii1I11I1II1 * Ii1I % ooOoO0o / OoO0O00
if 35 - 35: ooOoO0o + i1IIi % I1ii11iIi11i % I11i + oO0o
if 17 - 17: i1IIi
if 21 - 21: Oo0Ooo
if 29 - 29: I11i / II111iiii / ooOoO0o * OOooOOo
if 10 - 10: I1Ii111 % IiII * IiII . I11i / Ii1I % OOooOOo
if 49 - 49: OoO0O00 / oO0o + O0 * o0oOOo0O0Ooo
LISP_CS_1024 = 0
LISP_CS_1024_G = 2
LISP_CS_1024_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
if 28 - 28: ooOoO0o + i11iIiiIii / I11i % OoOoOO00 % Oo0Ooo - O0
LISP_CS_2048_CBC = 1
LISP_CS_2048_CBC_G = 2
LISP_CS_2048_CBC_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
if 54 - 54: i1IIi + II111iiii
LISP_CS_25519_CBC = 2
LISP_CS_2048_GCM = 3
if 83 - 83: I1ii11iIi11i - I1IiiI + OOooOOo
LISP_CS_3072 = 4
LISP_CS_3072_G = 2
LISP_CS_3072_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF
if 5 - 5: Ii1I
LISP_CS_25519_GCM = 5
LISP_CS_25519_CHACHA = 6
if 46 - 46: IiII
LISP_4_32_MASK = 0xFFFFFFFF
LISP_8_64_MASK = 0xFFFFFFFFFFFFFFFF
LISP_16_128_MASK = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
if 45 - 45: ooOoO0o
use_chacha = ( os . getenv ( "LISP_USE_CHACHA" ) != None )
use_poly = ( os . getenv ( "LISP_USE_POLY" ) != None )
if 21 - 21: oO0o . I1Ii111 . OOooOOo / Oo0Ooo / I1Ii111
if 17 - 17: OOooOOo / OOooOOo / I11i
if 1 - 1: i1IIi . i11iIiiIii % OOooOOo
if 82 - 82: iIii1I11I1II1 + Oo0Ooo . iIii1I11I1II1 % IiII / Ii1I . Ii1I
if 14 - 14: o0oOOo0O0Ooo . OOooOOo . I11i + OoooooooOO - OOooOOo + IiII
if 9 - 9: Ii1I
if 59 - 59: I1IiiI * II111iiii . O0
if 56 - 56: Ii1I - iII111i % I1IiiI - o0oOOo0O0Ooo
def lisp_record_traceback ( * args ) :
if 51 - 51: O0 / ooOoO0o * iIii1I11I1II1 + I1ii11iIi11i + o0oOOo0O0Ooo
Oo0OO0000oooo = datetime . datetime . now ( ) . strftime ( "%m/%d/%y %H:%M:%S.%f" ) [ : - 3 ]
IIII1iII = open ( "./logs/lisp-traceback.log" , "a" )
IIII1iII . write ( "---------- Exception occurred: {} ----------\n" . format ( Oo0OO0000oooo ) )
try :
traceback . print_last ( file = IIII1iII )
except :
IIII1iII . write ( "traceback.print_last(file=fd) failed" )
if 28 - 28: i1IIi - iII111i
try :
traceback . print_last ( )
except :
print ( "traceback.print_last() failed" )
if 54 - 54: iII111i - O0 % OOooOOo
IIII1iII . close ( )
return
if 73 - 73: O0 . OoOoOO00 + I1IiiI - I11i % I11i . I11i
if 17 - 17: Ii1I - OoooooooOO % Ii1I . IiII / i11iIiiIii % iII111i
if 28 - 28: I11i
if 58 - 58: OoOoOO00
if 37 - 37: Oo0Ooo - iIii1I11I1II1 / I1ii11iIi11i
if 73 - 73: i11iIiiIii - IiII
if 25 - 25: OoooooooOO + IiII * I1ii11iIi11i
def lisp_set_exception ( ) :
sys . excepthook = lisp_record_traceback
return
if 92 - 92: I1IiiI + I11i + O0 / o0oOOo0O0Ooo + I1Ii111
if 18 - 18: ooOoO0o * OoOoOO00 . iII111i / I1ii11iIi11i / i11iIiiIii
if 21 - 21: oO0o / I1ii11iIi11i + Ii1I + OoooooooOO
if 91 - 91: i11iIiiIii / i1IIi + iII111i + ooOoO0o * i11iIiiIii
if 66 - 66: iIii1I11I1II1 % i1IIi - O0 + I11i * I1Ii111 . IiII
if 52 - 52: ooOoO0o + O0 . iII111i . I1ii11iIi11i . OoO0O00
if 97 - 97: I1IiiI / iII111i
def lisp_is_raspbian ( ) :
if ( distro . linux_distribution ( ) [ 0 ] != "debian" ) : return ( False )
return ( platform . machine ( ) in [ "armv6l" , "armv7l" ] )
if 71 - 71: II111iiii / i1IIi . I1ii11iIi11i % OoooooooOO . OoOoOO00
if 41 - 41: i1IIi * II111iiii / OoooooooOO . OOooOOo
if 83 - 83: iII111i . O0 / Oo0Ooo / OOooOOo - II111iiii
if 100 - 100: OoO0O00
if 46 - 46: OoOoOO00 / iIii1I11I1II1 % iII111i . iIii1I11I1II1 * iII111i
if 38 - 38: I1ii11iIi11i - iII111i / O0 . I1Ii111
if 45 - 45: I1Ii111
def lisp_is_ubuntu ( ) :
return ( distro . linux_distribution ( ) [ 0 ] == "Ubuntu" )
if 83 - 83: OoOoOO00 . OoooooooOO
if 58 - 58: i11iIiiIii + OoooooooOO % OoooooooOO / IiII / i11iIiiIii
if 62 - 62: OoO0O00 / I1ii11iIi11i
if 7 - 7: OoooooooOO . IiII
if 53 - 53: Ii1I % Ii1I * o0oOOo0O0Ooo + OoOoOO00
if 92 - 92: OoooooooOO + i1IIi / Ii1I * O0
if 100 - 100: ooOoO0o % iIii1I11I1II1 * II111iiii - iII111i
def lisp_is_fedora ( ) :
return ( distro . linux_distribution ( ) [ 0 ] == "fedora" )
if 92 - 92: ooOoO0o
if 22 - 22: Oo0Ooo % iII111i * I1ii11iIi11i / OOooOOo % i11iIiiIii * I11i
if 95 - 95: OoooooooOO - IiII * I1IiiI + OoOoOO00
if 10 - 10: o0oOOo0O0Ooo / i11iIiiIii
if 92 - 92: I11i . I1Ii111
if 85 - 85: I1ii11iIi11i . I1Ii111
if 78 - 78: ooOoO0o * I1Ii111 + iIii1I11I1II1 + iIii1I11I1II1 / I1Ii111 . Ii1I
def lisp_is_centos ( ) :
return ( distro . linux_distribution ( ) [ 0 ] == "centos" )
if 97 - 97: ooOoO0o / I1Ii111 % i1IIi % I1ii11iIi11i
if 18 - 18: iIii1I11I1II1 % I11i
if 95 - 95: ooOoO0o + i11iIiiIii * I1Ii111 - i1IIi * I1Ii111 - iIii1I11I1II1
if 75 - 75: OoooooooOO * IiII
if 9 - 9: IiII - II111iiii + O0 / iIii1I11I1II1 / i11iIiiIii
if 39 - 39: IiII * Oo0Ooo + iIii1I11I1II1 - IiII + OOooOOo
if 69 - 69: O0
def lisp_is_debian ( ) :
return ( distro . linux_distribution ( ) [ 0 ] == "debian" )
if 85 - 85: ooOoO0o / O0
if 18 - 18: o0oOOo0O0Ooo % O0 * I1ii11iIi11i
if 62 - 62: I1Ii111 . IiII . OoooooooOO
if 11 - 11: OOooOOo / I11i
if 73 - 73: i1IIi / i11iIiiIii
if 58 - 58: Oo0Ooo . II111iiii + oO0o - i11iIiiIii / II111iiii / O0
if 85 - 85: OoOoOO00 + OOooOOo
def lisp_is_debian_kali ( ) :
return ( distro . linux_distribution ( ) [ 0 ] == "Kali" )
if 10 - 10: IiII / OoO0O00 + OoOoOO00 / i1IIi
if 27 - 27: Ii1I
if 67 - 67: I1IiiI
if 55 - 55: I1ii11iIi11i - iII111i * o0oOOo0O0Ooo + OoOoOO00 * OoOoOO00 * O0
if 91 - 91: I1Ii111 - OOooOOo % iIii1I11I1II1 - OoooooooOO % ooOoO0o
if 98 - 98: OoO0O00 . OoO0O00 * oO0o * II111iiii * I1Ii111
if 92 - 92: Oo0Ooo
def lisp_is_macos ( ) :
return ( platform . uname ( ) [ 0 ] == "Darwin" )
if 40 - 40: OoOoOO00 / IiII
if 79 - 79: OoO0O00 - iIii1I11I1II1 + Ii1I - I1Ii111
if 93 - 93: II111iiii . I1IiiI - Oo0Ooo + OoOoOO00
if 61 - 61: II111iiii
if 15 - 15: i11iIiiIii % I1IiiI * I11i / I1Ii111
if 90 - 90: iII111i
if 31 - 31: OOooOOo + O0
def lisp_is_alpine ( ) :
return ( os . path . exists ( "/etc/alpine-release" ) )
if 87 - 87: ooOoO0o
if 45 - 45: OoO0O00 / OoooooooOO - iII111i / Ii1I % IiII
if 83 - 83: I1IiiI . iIii1I11I1II1 - IiII * i11iIiiIii
if 20 - 20: i1IIi * I1Ii111 + II111iiii % o0oOOo0O0Ooo % oO0o
if 13 - 13: Oo0Ooo
if 60 - 60: I1ii11iIi11i * I1IiiI
if 17 - 17: OOooOOo % Oo0Ooo / I1ii11iIi11i . IiII * OOooOOo - II111iiii
def lisp_is_x86 ( ) :
i1i1IIii1i1 = platform . machine ( )
return ( i1i1IIii1i1 in ( "x86" , "i686" , "x86_64" ) )
if 65 - 65: I1IiiI + OoOoOO00 / OOooOOo
if 83 - 83: o0oOOo0O0Ooo . iII111i - Oo0Ooo
if 65 - 65: iIii1I11I1II1 / ooOoO0o . IiII - II111iiii
if 72 - 72: iIii1I11I1II1 / IiII % iII111i % OOooOOo - I11i % OOooOOo
if 100 - 100: Oo0Ooo + i11iIiiIii
if 71 - 71: I11i / o0oOOo0O0Ooo / I1Ii111 % OOooOOo
if 51 - 51: IiII * O0 / II111iiii . Ii1I % OOooOOo / I1IiiI
def lisp_is_linux ( ) :
return ( platform . uname ( ) [ 0 ] == "Linux" )
if 9 - 9: I1IiiI % I1IiiI % II111iiii
if 30 - 30: IiII + I1Ii111 - IiII . IiII - II111iiii + O0
if 86 - 86: i1IIi
if 41 - 41: OoOoOO00 * I11i / OoOoOO00 % oO0o
if 18 - 18: II111iiii . OoooooooOO % OoOoOO00 % Ii1I
if 9 - 9: OoO0O00 - Oo0Ooo * OoooooooOO . Oo0Ooo
if 2 - 2: OoooooooOO % OOooOOo
def lisp_is_python2 ( ) :
oOoOOo0oo0 = sys . version . split ( ) [ 0 ]
return ( oOoOOo0oo0 [ 0 : 3 ] == "2.7" )
if 60 - 60: ooOoO0o * I1Ii111 + Oo0Ooo
if 19 - 19: OoO0O00 * I11i / I11i . OoooooooOO - OOooOOo + i11iIiiIii
if 88 - 88: i11iIiiIii - ooOoO0o
if 67 - 67: OOooOOo . Oo0Ooo + OoOoOO00 - OoooooooOO
if 70 - 70: OOooOOo / II111iiii - iIii1I11I1II1 - iII111i
if 11 - 11: iIii1I11I1II1 . OoooooooOO . II111iiii / i1IIi - I11i
if 30 - 30: OoOoOO00
def lisp_is_python3 ( ) :
oOoOOo0oo0 = sys . version . split ( ) [ 0 ]
return ( oOoOOo0oo0 [ 0 : 2 ] == "3." )
if 21 - 21: i11iIiiIii / I1Ii111 % OOooOOo * O0 . I11i - iIii1I11I1II1
if 26 - 26: II111iiii * OoOoOO00
if 10 - 10: II111iiii . iII111i
if 32 - 32: Ii1I . IiII . OoooooooOO - OoO0O00 + oO0o
if 88 - 88: iII111i
if 19 - 19: II111iiii * IiII + Ii1I
if 65 - 65: OOooOOo . I1Ii111 . OoO0O00 . iII111i - OOooOOo
def lisp_on_aws ( ) :
ii111i = getoutput ( "sudo dmidecode -s bios-vendor" )
if ( ii111i . find ( "command not found" ) != - 1 and lisp_on_docker ( ) ) :
oooo00 = bold ( "AWS check" , False )
lprint ( "{} - dmidecode not installed in docker container" . format ( oooo00 ) )
if 77 - 77: ooOoO0o - I1IiiI % I11i - O0
return ( ii111i . lower ( ) . find ( "amazon" ) != - 1 )
if 67 - 67: OOooOOo + Oo0Ooo
if 84 - 84: O0 * OoooooooOO - IiII * IiII
if 8 - 8: ooOoO0o / i1IIi . oO0o
if 41 - 41: iII111i + OoO0O00
if 86 - 86: OoOoOO00 . iIii1I11I1II1 - OoO0O00
if 56 - 56: O0
if 61 - 61: o0oOOo0O0Ooo / OOooOOo / Oo0Ooo * O0
def lisp_on_gcp ( ) :
ii111i = getoutput ( "sudo dmidecode -s bios-version" )
return ( ii111i . lower ( ) . find ( "google" ) != - 1 )
if 23 - 23: oO0o - OOooOOo + I11i
if 12 - 12: I1IiiI / ooOoO0o % o0oOOo0O0Ooo / i11iIiiIii % OoooooooOO
if 15 - 15: iIii1I11I1II1 % OoooooooOO - Oo0Ooo * Ii1I + I11i
if 11 - 11: iII111i * Ii1I - OoOoOO00
if 66 - 66: OoOoOO00 . i11iIiiIii - iII111i * o0oOOo0O0Ooo + OoooooooOO * I1ii11iIi11i
if 74 - 74: Oo0Ooo
if 61 - 61: Oo0Ooo - I1Ii111 * II111iiii % ooOoO0o * iIii1I11I1II1 + OoO0O00
def lisp_on_docker ( ) :
return ( os . path . exists ( "/.dockerenv" ) )
if 71 - 71: I11i / I11i * oO0o * oO0o / II111iiii
if 35 - 35: OOooOOo * o0oOOo0O0Ooo * I1IiiI % Oo0Ooo . OoOoOO00
if 58 - 58: I11i + II111iiii * iII111i * i11iIiiIii - iIii1I11I1II1
if 68 - 68: OoooooooOO % II111iiii
if 26 - 26: II111iiii % i11iIiiIii % iIii1I11I1II1 % I11i * I11i * I1ii11iIi11i
if 24 - 24: II111iiii % I1Ii111 - ooOoO0o + I1IiiI * I1ii11iIi11i
if 2 - 2: Ii1I - IiII
if 83 - 83: oO0o % o0oOOo0O0Ooo % Ii1I - II111iiii * OOooOOo / OoooooooOO
def lisp_process_logfile ( ) :
IIIiIi = "./logs/lisp-{}.log" . format ( lisp_log_id )
if ( os . path . exists ( IIIiIi ) ) : return
if 34 - 34: OoooooooOO . O0 / oO0o * OoOoOO00 - I1ii11iIi11i
sys . stdout . close ( )
sys . stdout = open ( IIIiIi , "a" )
if 36 - 36: i1IIi / O0 / OoO0O00 - O0 - i1IIi
lisp_print_banner ( bold ( "logfile rotation" , False ) )
return
if 22 - 22: i1IIi + Ii1I
if 54 - 54: ooOoO0o % OOooOOo . I1Ii111 + oO0o - OOooOOo * I1IiiI
if 92 - 92: o0oOOo0O0Ooo + I1Ii111 / Oo0Ooo % OoO0O00 % IiII . OoooooooOO
if 52 - 52: ooOoO0o / i11iIiiIii - OOooOOo . IiII % iIii1I11I1II1 + o0oOOo0O0Ooo
if 71 - 71: oO0o % I11i * OoOoOO00 . O0 / Ii1I . I1ii11iIi11i
if 58 - 58: Oo0Ooo / oO0o
if 44 - 44: OOooOOo
if 54 - 54: Ii1I - I11i - I1Ii111 . iIii1I11I1II1
def lisp_i_am ( name ) :
global lisp_log_id , lisp_i_am_itr , lisp_i_am_etr , lisp_i_am_rtr
global lisp_i_am_mr , lisp_i_am_ms , lisp_i_am_ddt , lisp_i_am_core
global lisp_hostname
if 79 - 79: Ii1I . OoO0O00
lisp_log_id = name
if ( name == "itr" ) : lisp_i_am_itr = True
if ( name == "etr" ) : lisp_i_am_etr = True
if ( name == "rtr" ) : lisp_i_am_rtr = True
if ( name == "mr" ) : lisp_i_am_mr = True
if ( name == "ms" ) : lisp_i_am_ms = True
if ( name == "ddt" ) : lisp_i_am_ddt = True
if ( name == "core" ) : lisp_i_am_core = True
if 40 - 40: o0oOOo0O0Ooo + Oo0Ooo . o0oOOo0O0Ooo % ooOoO0o
if 15 - 15: Ii1I * Oo0Ooo % I1ii11iIi11i * iIii1I11I1II1 - i11iIiiIii
if 60 - 60: I1IiiI * I1Ii111 % OoO0O00 + oO0o
if 52 - 52: i1IIi
if 84 - 84: Ii1I / IiII
lisp_hostname = socket . gethostname ( )
OOOooo0OooOoO = lisp_hostname . find ( "." )
if ( OOOooo0OooOoO != - 1 ) : lisp_hostname = lisp_hostname [ 0 : OOOooo0OooOoO ]
return
if 91 - 91: oO0o + I1IiiI
if 59 - 59: I1IiiI + i11iIiiIii + i1IIi / I11i
if 44 - 44: I11i . OoOoOO00 * I1IiiI + OoooooooOO - iII111i - IiII
if 15 - 15: IiII / O0 . o0oOOo0O0Ooo . i11iIiiIii
if 59 - 59: I1Ii111 - o0oOOo0O0Ooo - ooOoO0o
if 48 - 48: i1IIi + I11i % OoOoOO00 / Oo0Ooo - o0oOOo0O0Ooo
if 67 - 67: oO0o % o0oOOo0O0Ooo . OoooooooOO + OOooOOo * I11i * OoOoOO00
if 36 - 36: O0 + Oo0Ooo
if 5 - 5: Oo0Ooo * OoOoOO00
def lprint ( * args ) :
ii1I11iIiIII1 = ( "force" in args )
if ( lisp_debug_logging == False and ii1I11iIiIII1 == False ) : return
if 52 - 52: o0oOOo0O0Ooo * IiII + OoOoOO00
lisp_process_logfile ( )
Oo0OO0000oooo = datetime . datetime . now ( ) . strftime ( "%m/%d/%y %H:%M:%S.%f" )
Oo0OO0000oooo = Oo0OO0000oooo [ : - 3 ]
print ( "{}: {}:" . format ( Oo0OO0000oooo , lisp_log_id ) , end = " " )
if 49 - 49: iIii1I11I1II1 - O0 . i1IIi - OoooooooOO
for Ii1 in args :
if ( Ii1 == "force" ) : continue
print ( Ii1 , end = " " )
if 73 - 73: i1IIi + iII111i . i11iIiiIii
print ( )
if 5 - 5: oO0o . I1ii11iIi11i . II111iiii . OoooooooOO
try : sys . stdout . flush ( )
except : pass
return
if 96 - 96: i11iIiiIii - OOooOOo % O0 / OoO0O00
if 100 - 100: iII111i / Ii1I - OoooooooOO % II111iiii - I1IiiI % OoOoOO00
if 60 - 60: iIii1I11I1II1 + i1IIi
if 86 - 86: iIii1I11I1II1 + OoOoOO00 . i11iIiiIii - Ii1I
if 51 - 51: OoOoOO00
if 14 - 14: IiII % oO0o % Oo0Ooo - i11iIiiIii
if 53 - 53: Ii1I % Oo0Ooo
if 59 - 59: OOooOOo % iIii1I11I1II1 . i1IIi + II111iiii * IiII
def fprint ( * args ) :
i1IiiI1iIi = args + ( "force" , )
lprint ( * i1IiiI1iIi )
return
if 66 - 66: OoO0O00 * Oo0Ooo
if 28 - 28: OoO0O00 % OoOoOO00 % I1ii11iIi11i + I1IiiI / I1IiiI
if 71 - 71: OOooOOo * OoO0O00 % OoooooooOO % OoO0O00 / I1IiiI
if 56 - 56: OoooooooOO % i11iIiiIii * iIii1I11I1II1 . OoO0O00 * O0
if 23 - 23: i11iIiiIii
if 39 - 39: o0oOOo0O0Ooo - I1ii11iIi11i % iII111i * OoO0O00 - OOooOOo / iII111i
if 29 - 29: I1ii11iIi11i
if 52 - 52: i11iIiiIii / i1IIi
def dprint ( * args ) :
if ( lisp_data_plane_logging ) : lprint ( * args )
return
if 1 - 1: ooOoO0o
if 78 - 78: I1ii11iIi11i + I11i - O0
if 10 - 10: I1Ii111 % I1IiiI
if 97 - 97: OoooooooOO - I1Ii111
if 58 - 58: iIii1I11I1II1 + O0
if 30 - 30: ooOoO0o % iII111i * OOooOOo - I1ii11iIi11i * Ii1I % ooOoO0o
if 46 - 46: i11iIiiIii - O0 . oO0o
def cprint ( instance ) :
print ( "{}:" . format ( instance ) )
pprint . pprint ( instance . __dict__ )
if 100 - 100: I1IiiI / o0oOOo0O0Ooo * iII111i . O0 / OOooOOo
if 83 - 83: I1Ii111
if 48 - 48: II111iiii * OOooOOo * I1Ii111
if 50 - 50: IiII % i1IIi
if 21 - 21: OoooooooOO - iIii1I11I1II1
if 93 - 93: oO0o - o0oOOo0O0Ooo % OoOoOO00 . OoOoOO00 - ooOoO0o
if 90 - 90: ooOoO0o + II111iiii * I1ii11iIi11i / Ii1I . o0oOOo0O0Ooo + o0oOOo0O0Ooo
if 40 - 40: ooOoO0o / OoOoOO00 % i11iIiiIii % I1ii11iIi11i / I1IiiI
def debug ( * args ) :
lisp_process_logfile ( )
if 62 - 62: i1IIi - OoOoOO00
Oo0OO0000oooo = datetime . datetime . now ( ) . strftime ( "%m/%d/%y %H:%M:%S.%f" )
Oo0OO0000oooo = Oo0OO0000oooo [ : - 3 ]
if 62 - 62: i1IIi + Oo0Ooo % IiII
print ( red ( ">>>" , False ) , end = " " )
print ( "{}:" . format ( Oo0OO0000oooo ) , end = " " )
for Ii1 in args : print ( Ii1 , end = " " )
print ( red ( "<<<\n" , False ) )
try : sys . stdout . flush ( )
except : pass
return
if 28 - 28: I1ii11iIi11i . i1IIi
if 10 - 10: OoO0O00 / Oo0Ooo
if 15 - 15: iII111i . OoOoOO00 / iII111i * I11i - I1IiiI % I1ii11iIi11i
if 57 - 57: O0 % OoOoOO00 % oO0o
if 45 - 45: I1ii11iIi11i + II111iiii * i11iIiiIii
if 13 - 13: OoooooooOO * oO0o - Ii1I / OOooOOo + I11i + IiII
if 39 - 39: iIii1I11I1II1 - OoooooooOO
def lisp_print_caller ( ) :
fprint ( traceback . print_last ( ) )
if 81 - 81: I1ii11iIi11i - O0 * OoooooooOO
if 23 - 23: II111iiii / oO0o
if 28 - 28: Oo0Ooo * ooOoO0o - OoO0O00
if 19 - 19: I11i
if 67 - 67: O0 % iIii1I11I1II1 / IiII . i11iIiiIii - Ii1I + O0
if 27 - 27: OOooOOo
if 89 - 89: II111iiii / oO0o
def lisp_print_banner ( string ) :
global lisp_version , lisp_hostname
if 14 - 14: OOooOOo . I1IiiI * ooOoO0o + II111iiii - ooOoO0o + OOooOOo
if ( lisp_version == "" ) :
lisp_version = getoutput ( "cat lisp-version.txt" )
if 18 - 18: oO0o - o0oOOo0O0Ooo - I1IiiI - I1IiiI
OOooo00 = bold ( lisp_hostname , False )
lprint ( "lispers.net LISP {} {}, version {}, hostname {}" . format ( string ,
datetime . datetime . now ( ) , lisp_version , OOooo00 ) )
return
if 35 - 35: I1Ii111 . OoOoOO00 * i11iIiiIii
if 44 - 44: i11iIiiIii / Oo0Ooo
if 42 - 42: OoooooooOO + Oo0Ooo % II111iiii + OoO0O00
if 24 - 24: iII111i * II111iiii % iII111i % IiII + OoooooooOO
if 29 - 29: II111iiii - OoooooooOO - i11iIiiIii . o0oOOo0O0Ooo
if 19 - 19: II111iiii
if 72 - 72: OoooooooOO / I1IiiI + Ii1I / OoOoOO00 * Ii1I
def green ( string , html ) :
if ( html ) : return ( '<font color="green"><b>{}</b></font>' . format ( string ) )
return ( bold ( "\033[92m" + string + "\033[0m" , html ) )
if 34 - 34: O0 * O0 % OoooooooOO + iII111i * iIii1I11I1II1 % Ii1I
if 25 - 25: I11i + OoOoOO00 . o0oOOo0O0Ooo % OoOoOO00 * OOooOOo
if 32 - 32: i11iIiiIii - I1Ii111
if 53 - 53: OoooooooOO - IiII
if 87 - 87: oO0o . I1IiiI
if 17 - 17: Ii1I . i11iIiiIii
if 5 - 5: I1ii11iIi11i + O0 + O0 . I1Ii111 - ooOoO0o
def green_last_sec ( string ) :
return ( green ( string , True ) )
if 63 - 63: oO0o
if 71 - 71: i1IIi . Ii1I * iII111i % OoooooooOO + OOooOOo
if 36 - 36: IiII
if 49 - 49: OOooOOo / OoooooooOO / I1IiiI
if 74 - 74: I1Ii111 % I1ii11iIi11i
if 7 - 7: II111iiii
if 27 - 27: oO0o . OoooooooOO + i11iIiiIii
def green_last_min ( string ) :
return ( '<font color="#58D68D"><b>{}</b></font>' . format ( string ) )
if 86 - 86: I11i / o0oOOo0O0Ooo - o0oOOo0O0Ooo + I1ii11iIi11i + oO0o
if 33 - 33: o0oOOo0O0Ooo . iII111i . IiII . i1IIi
if 49 - 49: I1ii11iIi11i
if 84 - 84: I11i - Oo0Ooo / O0 - I1Ii111
if 21 - 21: O0 * O0 % I1ii11iIi11i
if 94 - 94: I11i + II111iiii % i11iIiiIii
if 8 - 8: ooOoO0o * O0
def red ( string , html ) :
if ( html ) : return ( '<font color="red"><b>{}</b></font>' . format ( string ) )
return ( bold ( "\033[91m" + string + "\033[0m" , html ) )
if 73 - 73: o0oOOo0O0Ooo / oO0o / I11i / OoO0O00
if 11 - 11: OoOoOO00 + IiII - OoooooooOO / OoO0O00
if 34 - 34: ooOoO0o
if 45 - 45: ooOoO0o / Oo0Ooo / Ii1I
if 44 - 44: I1ii11iIi11i - Ii1I / II111iiii * OoO0O00 * Oo0Ooo
if 73 - 73: o0oOOo0O0Ooo - I1IiiI * i1IIi / i11iIiiIii * OOooOOo % II111iiii
if 56 - 56: OoooooooOO * Oo0Ooo . Oo0Ooo . I1ii11iIi11i
def blue ( string , html ) :
if ( html ) : return ( '<font color="blue"><b>{}</b></font>' . format ( string ) )
return ( bold ( "\033[94m" + string + "\033[0m" , html ) )
if 24 - 24: Oo0Ooo . I11i * Ii1I % iII111i / OOooOOo
if 58 - 58: I1IiiI - I1ii11iIi11i % O0 . I1IiiI % OoO0O00 % IiII
if 87 - 87: oO0o - i11iIiiIii
if 78 - 78: i11iIiiIii / iIii1I11I1II1 - o0oOOo0O0Ooo
if 23 - 23: I11i
if 40 - 40: o0oOOo0O0Ooo - II111iiii / Oo0Ooo
if 14 - 14: I1ii11iIi11i
def bold ( string , html ) :
if ( html ) : return ( "<b>{}</b>" . format ( string ) )
return ( "\033[1m" + string + "\033[0m" )
if 5 - 5: o0oOOo0O0Ooo . iIii1I11I1II1 % iIii1I11I1II1
if 56 - 56: OoooooooOO - I11i - i1IIi
if 8 - 8: I1Ii111 / OOooOOo . I1IiiI + I1ii11iIi11i / i11iIiiIii
if 31 - 31: ooOoO0o - iIii1I11I1II1 + iII111i . Oo0Ooo / IiII % iIii1I11I1II1
if 6 - 6: IiII * i11iIiiIii % iIii1I11I1II1 % i11iIiiIii + o0oOOo0O0Ooo / i1IIi
if 53 - 53: I11i + iIii1I11I1II1
if 70 - 70: I1ii11iIi11i
def convert_font ( string ) :
oo0O = [ [ "[91m" , red ] , [ "[92m" , green ] , [ "[94m" , blue ] , [ "[1m" , bold ] ]
II = "[0m"
if 28 - 28: IiII - IiII . i1IIi - ooOoO0o + I1IiiI . IiII
for oO0ooOOO in oo0O :
iIi1I1 = oO0ooOOO [ 0 ]
O0oOoo0OoO0O = oO0ooOOO [ 1 ]
oo00 = len ( iIi1I1 )
OOOooo0OooOoO = string . find ( iIi1I1 )
if ( OOOooo0OooOoO != - 1 ) : break
if 33 - 33: iIii1I11I1II1 / iII111i - I1IiiI * I11i
if 53 - 53: ooOoO0o
while ( OOOooo0OooOoO != - 1 ) :
o0oO0oo0000OO = string [ OOOooo0OooOoO : : ] . find ( II )
I1i1ii1IiIii = string [ OOOooo0OooOoO + oo00 : OOOooo0OooOoO + o0oO0oo0000OO ]
string = string [ : OOOooo0OooOoO ] + O0oOoo0OoO0O ( I1i1ii1IiIii , True ) + string [ OOOooo0OooOoO + o0oO0oo0000OO + oo00 : : ]
if 69 - 69: OoOoOO00 % oO0o - I11i
OOOooo0OooOoO = string . find ( iIi1I1 )
if 38 - 38: iIii1I11I1II1 + i11iIiiIii / i11iIiiIii % OoO0O00 / ooOoO0o % Ii1I
if 7 - 7: IiII * I1IiiI + i1IIi + i11iIiiIii + Oo0Ooo % I1IiiI
if 62 - 62: o0oOOo0O0Ooo - Ii1I * OoOoOO00 - i11iIiiIii % ooOoO0o
if 52 - 52: I1ii11iIi11i % oO0o - i11iIiiIii
if 30 - 30: iII111i / OoO0O00 + oO0o
if ( string . find ( "[1m" ) != - 1 ) : string = convert_font ( string )
return ( string )
if 6 - 6: iII111i . I11i + Ii1I . I1Ii111
if 70 - 70: OoO0O00
if 46 - 46: I11i - i1IIi
if 46 - 46: I1Ii111 % Ii1I
if 72 - 72: iIii1I11I1II1
if 45 - 45: Oo0Ooo - o0oOOo0O0Ooo % I1Ii111
if 38 - 38: I1Ii111 % OOooOOo - OoooooooOO
def lisp_space ( num ) :
oOo0OOoooO = ""
for iIi1iIIIiIiI in range ( num ) : oOo0OOoooO += " "
return ( oOo0OOoooO )
if 62 - 62: i11iIiiIii % OOooOOo . IiII . OOooOOo
if 84 - 84: i11iIiiIii * OoO0O00
if 18 - 18: OOooOOo - Ii1I - OoOoOO00 / I1Ii111 - O0
if 30 - 30: O0 + I1ii11iIi11i + II111iiii
if 14 - 14: o0oOOo0O0Ooo / OOooOOo - iIii1I11I1II1 - oO0o % ooOoO0o
if 49 - 49: ooOoO0o * oO0o / o0oOOo0O0Ooo / Oo0Ooo * iIii1I11I1II1
if 57 - 57: OoOoOO00 - oO0o / ooOoO0o % i11iIiiIii
def lisp_button ( string , url ) :
I11 = '<button style="background-color:transparent;border-radius:10px; ' + 'type="button">'
if 100 - 100: I1ii11iIi11i + i11iIiiIii - i1IIi
if 29 - 29: o0oOOo0O0Ooo / i11iIiiIii / I1IiiI % oO0o % i11iIiiIii
if ( url == None ) :
i111II = I11 + string + "</button>"
else :
OO0O00o0 = '<a href="{}">' . format ( url )
I111 = lisp_space ( 2 )
i111II = I111 + OO0O00o0 + I11 + string + "</button></a>" + I111
if 36 - 36: i11iIiiIii / oO0o * I1ii11iIi11i * I1ii11iIi11i + Ii1I * I11i
return ( i111II )
if 32 - 32: OoO0O00
if 50 - 50: ooOoO0o + i1IIi
if 31 - 31: Ii1I
if 78 - 78: i11iIiiIii + o0oOOo0O0Ooo + I1Ii111 / o0oOOo0O0Ooo % iIii1I11I1II1 % IiII
if 83 - 83: iIii1I11I1II1 % OoOoOO00 % o0oOOo0O0Ooo % I1Ii111 . I1ii11iIi11i % O0
if 47 - 47: o0oOOo0O0Ooo
if 66 - 66: I1IiiI - IiII
def lisp_print_cour ( string ) :
oOo0OOoooO = '<font face="Courier New">{}</font>' . format ( string )
return ( oOo0OOoooO )
if 33 - 33: I1IiiI / OoO0O00
if 12 - 12: II111iiii
if 2 - 2: i1IIi - I1IiiI + I11i . II111iiii
if 25 - 25: oO0o
if 34 - 34: OoOoOO00 . iIii1I11I1II1 % O0
if 43 - 43: I1ii11iIi11i - iII111i
if 70 - 70: iII111i / OOooOOo % ooOoO0o - Ii1I
def lisp_print_sans ( string ) :
oOo0OOoooO = '<font face="Sans-Serif">{}</font>' . format ( string )
return ( oOo0OOoooO )
if 47 - 47: iII111i
if 92 - 92: OOooOOo + OoOoOO00 % i1IIi
if 23 - 23: I1Ii111 - OOooOOo + Ii1I - OoOoOO00 * OoOoOO00 . Oo0Ooo
if 47 - 47: oO0o % iIii1I11I1II1
if 11 - 11: I1IiiI % Ii1I - OoO0O00 - oO0o + o0oOOo0O0Ooo
if 98 - 98: iII111i + Ii1I - OoO0O00
if 79 - 79: OOooOOo / I1Ii111 . OoOoOO00 - I1ii11iIi11i
def lisp_span ( string , hover_string ) :
oOo0OOoooO = '<span title="{}">{}</span>' . format ( hover_string , string )
return ( oOo0OOoooO )
if 47 - 47: OoooooooOO % O0 * iII111i . Ii1I
if 38 - 38: O0 - IiII % I1Ii111
if 64 - 64: iIii1I11I1II1
if 15 - 15: I1ii11iIi11i + OOooOOo / I1ii11iIi11i / I1Ii111
if 31 - 31: ooOoO0o + O0 + ooOoO0o . iIii1I11I1II1 + Oo0Ooo / o0oOOo0O0Ooo
if 6 - 6: Oo0Ooo % IiII * I11i / I1IiiI + Oo0Ooo
if 39 - 39: OoOoOO00 - Oo0Ooo / iII111i * OoooooooOO
def lisp_eid_help_hover ( output ) :
Ooo = '''Unicast EID format:
For longest match lookups:
<address> or [<iid>]<address>
For exact match lookups:
<prefix> or [<iid>]<prefix>
Multicast EID format:
For longest match lookups:
<address>-><group> or
[<iid>]<address>->[<iid>]<group>'''
if 73 - 73: ooOoO0o + oO0o . OoO0O00
if 46 - 46: OoO0O00 - o0oOOo0O0Ooo / OoOoOO00 - OoooooooOO + oO0o
OOOO = lisp_span ( output , Ooo )
return ( OOOO )
if 37 - 37: I11i - OoOoOO00 . iIii1I11I1II1 % ooOoO0o % Ii1I * OoOoOO00
if 8 - 8: OoOoOO00 . ooOoO0o % oO0o . I1IiiI % I1IiiI . Ii1I
if 47 - 47: I11i + ooOoO0o + II111iiii % i11iIiiIii
if 93 - 93: I1ii11iIi11i % OoOoOO00 . O0 / iII111i * oO0o
if 29 - 29: o0oOOo0O0Ooo
if 86 - 86: II111iiii . IiII
if 2 - 2: OoooooooOO
def lisp_geo_help_hover ( output ) :
Ooo = '''EID format:
<address> or [<iid>]<address>
'<name>' or [<iid>]'<name>'
Geo-Point format:
d-m-s-<N|S>-d-m-s-<W|E> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>
Geo-Prefix format:
d-m-s-<N|S>-d-m-s-<W|E>/<km> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>/<km>'''
if 60 - 60: OoO0O00
if 81 - 81: OoOoOO00 % Ii1I
OOOO = lisp_span ( output , Ooo )
return ( OOOO )
if 87 - 87: iIii1I11I1II1 . OoooooooOO * OoOoOO00
if 100 - 100: OoO0O00 / i1IIi - I1IiiI % Ii1I - iIii1I11I1II1
if 17 - 17: I11i / o0oOOo0O0Ooo % Oo0Ooo
if 71 - 71: IiII . I1Ii111 . OoO0O00
if 68 - 68: i11iIiiIii % oO0o * OoO0O00 * IiII * II111iiii + O0
if 66 - 66: I11i % I1ii11iIi11i % OoooooooOO
if 34 - 34: o0oOOo0O0Ooo / iII111i % O0 . OoO0O00 . i1IIi
def space ( num ) :
oOo0OOoooO = ""
for iIi1iIIIiIiI in range ( num ) : oOo0OOoooO += " "
return ( oOo0OOoooO )
if 29 - 29: O0 . I1Ii111
if 66 - 66: oO0o * iIii1I11I1II1 % iIii1I11I1II1 * IiII - ooOoO0o - IiII
if 70 - 70: I1Ii111 + oO0o
if 93 - 93: I1Ii111 + Ii1I
if 33 - 33: O0
if 78 - 78: O0 / II111iiii * OoO0O00
if 50 - 50: OoooooooOO - iIii1I11I1II1 + i1IIi % I1Ii111 - iIii1I11I1II1 % O0
if 58 - 58: IiII + iIii1I11I1II1
def lisp_get_ephemeral_port ( ) :
return ( random . randrange ( 32768 , 65535 ) )
if 65 - 65: II111iiii - I1Ii111 % o0oOOo0O0Ooo - OoOoOO00 * iII111i + Ii1I
if 79 - 79: ooOoO0o . OoOoOO00 % I1Ii111 - Oo0Ooo
if 69 - 69: ooOoO0o - o0oOOo0O0Ooo . ooOoO0o
if 9 - 9: oO0o % i11iIiiIii / Oo0Ooo
if 20 - 20: oO0o * O0 + I11i - OoooooooOO . I11i
if 60 - 60: o0oOOo0O0Ooo . o0oOOo0O0Ooo / iII111i
if 45 - 45: O0 . i11iIiiIii % iII111i . OoOoOO00 % IiII % iIii1I11I1II1
def lisp_get_data_nonce ( ) :
return ( random . randint ( 0 , 0xffffff ) )
if 58 - 58: iIii1I11I1II1 . OoOoOO00 - i11iIiiIii * iIii1I11I1II1 % i11iIiiIii / I1IiiI
if 80 - 80: I1ii11iIi11i / iIii1I11I1II1 % OoOoOO00
if 80 - 80: OoO0O00 % iII111i
if 99 - 99: ooOoO0o / iIii1I11I1II1 - Ii1I * I1ii11iIi11i % I1IiiI
if 13 - 13: OoO0O00
if 70 - 70: I1Ii111 + O0 . oO0o * Ii1I
if 2 - 2: OoooooooOO . OOooOOo . IiII
def lisp_get_control_nonce ( ) :
return ( random . randint ( 0 , ( 2 ** 64 ) - 1 ) )
if 42 - 42: OOooOOo % oO0o / OoO0O00 - oO0o * i11iIiiIii
if 19 - 19: oO0o * I1IiiI % i11iIiiIii
if 24 - 24: o0oOOo0O0Ooo
if 10 - 10: o0oOOo0O0Ooo % Ii1I / OOooOOo
if 28 - 28: OOooOOo % ooOoO0o
if 48 - 48: i11iIiiIii % oO0o
if 29 - 29: iII111i + i11iIiiIii % I11i
if 93 - 93: OoOoOO00 % iIii1I11I1II1
if 90 - 90: I1IiiI - OOooOOo / Ii1I / O0 / I11i
def lisp_hex_string ( integer_value ) :
oOO0 = hex ( integer_value ) [ 2 : : ]
if ( oOO0 [ - 1 ] == "L" ) : oOO0 = oOO0 [ 0 : - 1 ]
return ( oOO0 )
if 15 - 15: Oo0Ooo + I11i . ooOoO0o - iIii1I11I1II1 / O0 % iIii1I11I1II1
if 86 - 86: I1IiiI / oO0o * Ii1I
if 64 - 64: ooOoO0o / O0 * OoOoOO00 * ooOoO0o
if 60 - 60: I11i / i1IIi % I1ii11iIi11i / I1ii11iIi11i * I1ii11iIi11i . i11iIiiIii
if 99 - 99: OoOoOO00
if 77 - 77: o0oOOo0O0Ooo
if 48 - 48: OoOoOO00 % I1ii11iIi11i / I11i . iIii1I11I1II1 * II111iiii
def lisp_get_timestamp ( ) :
return ( time . time ( ) )
if 65 - 65: OoOoOO00
lisp_uptime = lisp_get_timestamp ( )
if 31 - 31: I11i * OoOoOO00 . IiII % Ii1I + Oo0Ooo
if 47 - 47: O0 * I1IiiI * OoO0O00 . II111iiii
if 95 - 95: Ii1I % IiII . O0 % I1Ii111
if 68 - 68: Oo0Ooo . Oo0Ooo - I1ii11iIi11i / I11i . ooOoO0o / i1IIi
if 12 - 12: I1ii11iIi11i * i1IIi * I11i
if 23 - 23: OOooOOo / O0 / I1IiiI
def lisp_set_timestamp ( seconds ) :
return ( time . time ( ) + seconds )
if 49 - 49: I11i . o0oOOo0O0Ooo % oO0o / Ii1I
if 95 - 95: O0 * OoOoOO00 * IiII . ooOoO0o / iIii1I11I1II1
if 28 - 28: IiII + oO0o - ooOoO0o / iIii1I11I1II1 - I1IiiI
if 45 - 45: O0 / i1IIi * oO0o * OoO0O00
if 35 - 35: I1ii11iIi11i / iII111i % I1IiiI + iIii1I11I1II1
if 79 - 79: OoOoOO00 / ooOoO0o
if 77 - 77: Oo0Ooo
def lisp_print_elapsed ( ts ) :
if ( ts == 0 or ts == None ) : return ( "never" )
i1i111Iiiiiii = time . time ( ) - ts
i1i111Iiiiiii = round ( i1i111Iiiiiii , 0 )
return ( str ( datetime . timedelta ( seconds = i1i111Iiiiiii ) ) )
if 19 - 19: I1IiiI . Oo0Ooo + OoooooooOO - I1IiiI
if 93 - 93: iIii1I11I1II1 + I1IiiI + i11iIiiIii
if 74 - 74: I11i / II111iiii + ooOoO0o * iIii1I11I1II1 - I1Ii111 - OoO0O00
if 69 - 69: iIii1I11I1II1 * I1IiiI - iII111i + O0 + O0
if 65 - 65: I1Ii111 / i11iIiiIii / OoO0O00 - OOooOOo
if 9 - 9: I1IiiI / I1Ii111 - Oo0Ooo * iIii1I11I1II1
if 86 - 86: II111iiii + ooOoO0o + IiII
def lisp_print_future ( ts ) :
if ( ts == 0 ) : return ( "never" )
I11i11I = ts - time . time ( )
if ( I11i11I < 0 ) : return ( "expired" )
I11i11I = round ( I11i11I , 0 )
return ( str ( datetime . timedelta ( seconds = I11i11I ) ) )
if 90 - 90: I1ii11iIi11i
if 9 - 9: IiII + ooOoO0o
if 7 - 7: O0 % I1Ii111 + I1ii11iIi11i + Ii1I % OoooooooOO . Oo0Ooo
if 56 - 56: iII111i
if 84 - 84: OoOoOO00 - i11iIiiIii
if 1 - 1: iII111i * OoOoOO00
if 66 - 66: OoOoOO00 + i1IIi % II111iiii . O0 * I1ii11iIi11i % I1ii11iIi11i
if 87 - 87: OOooOOo + o0oOOo0O0Ooo . iII111i - OoooooooOO
if 6 - 6: iIii1I11I1II1 * OoooooooOO
if 28 - 28: Oo0Ooo * o0oOOo0O0Ooo / I1Ii111
if 52 - 52: O0 / o0oOOo0O0Ooo % iII111i * I1IiiI % OOooOOo
if 69 - 69: I1ii11iIi11i
if 83 - 83: o0oOOo0O0Ooo
def lisp_print_eid_tuple ( eid , group ) :
i1iiii = eid . print_prefix ( )
if ( group . is_null ( ) ) : return ( i1iiii )
if 90 - 90: o0oOOo0O0Ooo % I1ii11iIi11i - iIii1I11I1II1 % OoOoOO00
IIiI11I1I1i1i = group . print_prefix ( )
oooo = group . instance_id
if 70 - 70: Ii1I . i11iIiiIii % Ii1I . O0 - iIii1I11I1II1
if ( eid . is_null ( ) or eid . is_exact_match ( group ) ) :
OOOooo0OooOoO = IIiI11I1I1i1i . find ( "]" ) + 1
return ( "[{}](*, {})" . format ( oooo , IIiI11I1I1i1i [ OOOooo0OooOoO : : ] ) )
if 26 - 26: OOooOOo
if 76 - 76: i1IIi * OoooooooOO * O0 + I1Ii111 * I1Ii111
i1iIiIii = eid . print_sg ( group )
return ( i1iIiIii )
if 20 - 20: o0oOOo0O0Ooo * ooOoO0o
if 10 - 10: I11i - Oo0Ooo
if 59 - 59: OoooooooOO * Oo0Ooo + i1IIi
if 23 - 23: ooOoO0o
if 13 - 13: iIii1I11I1II1
if 77 - 77: i11iIiiIii - iIii1I11I1II1 / oO0o / ooOoO0o / OoO0O00
if 56 - 56: OoooooooOO * O0
if 85 - 85: OoooooooOO % OoOoOO00 * iIii1I11I1II1
def lisp_convert_6to4 ( addr_str ) :
if ( addr_str . find ( "::ffff:" ) == - 1 ) : return ( addr_str )
IiI = addr_str . split ( ":" )
return ( IiI [ - 1 ] )
if 60 - 60: I1Ii111
if 98 - 98: ooOoO0o
if 34 - 34: iIii1I11I1II1 * I11i * I11i / I1ii11iIi11i
if 28 - 28: OoO0O00 - oO0o + OoOoOO00 + Ii1I / iIii1I11I1II1
if 26 - 26: iIii1I11I1II1 - O0 . O0
if 68 - 68: OOooOOo + oO0o . O0 . Ii1I % i1IIi % OOooOOo
if 50 - 50: IiII + o0oOOo0O0Ooo
if 96 - 96: OoO0O00
if 92 - 92: Oo0Ooo / i11iIiiIii + I1ii11iIi11i
if 87 - 87: OoOoOO00 % iIii1I11I1II1
if 72 - 72: OOooOOo . OOooOOo - I1ii11iIi11i
def lisp_convert_4to6 ( addr_str ) :
IiI = lisp_address ( LISP_AFI_IPV6 , "" , 128 , 0 )
if ( IiI . is_ipv4_string ( addr_str ) ) : addr_str = "::ffff:" + addr_str
IiI . store_address ( addr_str )
return ( IiI )
if 48 - 48: Oo0Ooo - ooOoO0o + Oo0Ooo - I1IiiI * i11iIiiIii . iII111i
if 35 - 35: IiII . O0 + Oo0Ooo + OOooOOo + i1IIi
if 65 - 65: O0 * I1IiiI / I1IiiI . OoOoOO00
if 87 - 87: II111iiii * I1ii11iIi11i % Oo0Ooo * Oo0Ooo
if 58 - 58: OOooOOo . o0oOOo0O0Ooo + I1IiiI % Oo0Ooo - OoO0O00
if 50 - 50: iII111i % II111iiii - ooOoO0o . i1IIi + O0 % iII111i
if 10 - 10: iII111i . i1IIi + Ii1I
if 66 - 66: OoO0O00 % o0oOOo0O0Ooo
if 21 - 21: OoOoOO00 - OoooooooOO % i11iIiiIii
def lisp_gethostbyname ( string ) :
Oo00O0OO = string . split ( "." )
oOOOoo0o = string . split ( ":" )
iiiI1IiIIii = string . split ( "-" )
if 25 - 25: I1ii11iIi11i + oO0o + OoooooooOO . II111iiii . iII111i
if ( len ( Oo00O0OO ) == 4 ) :
if ( Oo00O0OO [ 0 ] . isdigit ( ) and Oo00O0OO [ 1 ] . isdigit ( ) and Oo00O0OO [ 2 ] . isdigit ( ) and
Oo00O0OO [ 3 ] . isdigit ( ) ) : return ( string )
if 66 - 66: ooOoO0o * OoOoOO00
if ( len ( oOOOoo0o ) > 1 ) :
try :
int ( oOOOoo0o [ 0 ] , 16 )
return ( string )
except :
pass
if 2 - 2: oO0o . I1Ii111 * Oo0Ooo + O0 - I11i * iIii1I11I1II1
if 12 - 12: o0oOOo0O0Ooo * I1Ii111 % II111iiii * i1IIi * iIii1I11I1II1
if 81 - 81: Oo0Ooo - I11i
if 24 - 24: OoooooooOO . OoO0O00 * II111iiii
if 59 - 59: I1Ii111 + OoO0O00 / OOooOOo
if 97 - 97: Oo0Ooo * iII111i % ooOoO0o . iII111i - I1Ii111 - OOooOOo
if 79 - 79: I1IiiI - ooOoO0o
if ( len ( iiiI1IiIIii ) == 3 ) :
for iIi1iIIIiIiI in range ( 3 ) :
try : int ( iiiI1IiIIii [ iIi1iIIIiIiI ] , 16 )
except : break
if 37 - 37: IiII . Oo0Ooo * Oo0Ooo * II111iiii * O0
if 83 - 83: IiII / I1Ii111
if 64 - 64: OoO0O00 % IiII . I1Ii111 % OoO0O00 + I11i * IiII
try :
IiI = socket . gethostbyname ( string )
return ( IiI )
except :
if ( lisp_is_alpine ( ) == False ) : return ( "" )
if 83 - 83: o0oOOo0O0Ooo % oO0o + I11i % i11iIiiIii + O0
if 65 - 65: iIii1I11I1II1 % oO0o + O0 / OoooooooOO
if 52 - 52: Ii1I % OOooOOo * I1IiiI % I11i + OOooOOo / iII111i
if 80 - 80: OoooooooOO + IiII
if 95 - 95: I1Ii111 / oO0o * I1Ii111 - OoooooooOO * OoooooooOO % OoO0O00
try :
IiI = socket . getaddrinfo ( string , 0 ) [ 0 ]
if ( IiI [ 3 ] != string ) : return ( "" )
IiI = IiI [ 4 ] [ 0 ]
except :
IiI = ""
if 43 - 43: Oo0Ooo . I1Ii111
return ( IiI )
if 12 - 12: I1Ii111 + OOooOOo + I11i . IiII / Ii1I
if 29 - 29: IiII . ooOoO0o - II111iiii
if 68 - 68: iIii1I11I1II1 + II111iiii / oO0o
if 91 - 91: OoOoOO00 % iIii1I11I1II1 . I1IiiI
if 70 - 70: I11i % II111iiii % O0 . i1IIi / I1Ii111
if 100 - 100: I1ii11iIi11i * i11iIiiIii % oO0o / Oo0Ooo / ooOoO0o + I1ii11iIi11i
if 59 - 59: I1Ii111 - IiII
if 14 - 14: iIii1I11I1II1 - iIii1I11I1II1
def lisp_ip_checksum ( data , hdrlen = 20 ) :
if ( len ( data ) < hdrlen ) :
lprint ( "IPv4 packet too short, length {}" . format ( len ( data ) ) )
return ( data )
if 5 - 5: IiII
if 84 - 84: II111iiii * oO0o * II111iiii % IiII / I1IiiI
O0O = binascii . hexlify ( data )
if 80 - 80: iIii1I11I1II1
if 23 - 23: II111iiii
if 71 - 71: I1Ii111 * Oo0Ooo . I11i
if 49 - 49: IiII * O0 . IiII
ii1II1II = 0
for iIi1iIIIiIiI in range ( 0 , hdrlen * 2 , 4 ) :
ii1II1II += int ( O0O [ iIi1iIIIiIiI : iIi1iIIIiIiI + 4 ] , 16 )
if 42 - 42: Ii1I
if 68 - 68: OOooOOo . Oo0Ooo % ooOoO0o - OoooooooOO * iII111i . OOooOOo
if 46 - 46: i11iIiiIii - OOooOOo * I1IiiI * I11i % I1ii11iIi11i * i1IIi
if 5 - 5: O0 / ooOoO0o . Oo0Ooo + OoooooooOO
if 97 - 97: IiII . Ii1I . Ii1I / iIii1I11I1II1 - OoO0O00 + iII111i
ii1II1II = ( ii1II1II >> 16 ) + ( ii1II1II & 0xffff )
ii1II1II += ii1II1II >> 16
ii1II1II = socket . htons ( ~ ii1II1II & 0xffff )
if 32 - 32: OOooOOo . o0oOOo0O0Ooo % IiII + I1ii11iIi11i + OoO0O00
if 76 - 76: OoO0O00 - i11iIiiIii + OoOoOO00 + OOooOOo / OoooooooOO
if 50 - 50: II111iiii - I1Ii111 + iIii1I11I1II1 + iIii1I11I1II1
if 91 - 91: II111iiii - O0 . iIii1I11I1II1 . O0 + I1ii11iIi11i - II111iiii
ii1II1II = struct . pack ( "H" , ii1II1II )
O0O = data [ 0 : 10 ] + ii1II1II + data [ 12 : : ]
return ( O0O )
if 26 - 26: o0oOOo0O0Ooo
if 12 - 12: OoooooooOO / O0 + II111iiii * I1ii11iIi11i
if 46 - 46: II111iiii - IiII * OoooooooOO / oO0o % IiII
if 11 - 11: iIii1I11I1II1 . OoOoOO00 / IiII % ooOoO0o
if 61 - 61: ooOoO0o - OOooOOo + OOooOOo
if 40 - 40: i11iIiiIii . iIii1I11I1II1
if 2 - 2: i1IIi * oO0o - oO0o + OoooooooOO % OoOoOO00 / OoOoOO00
if 3 - 3: OoooooooOO
def lisp_icmp_checksum ( data ) :
if ( len ( data ) < 36 ) :
lprint ( "ICMP packet too short, length {}" . format ( len ( data ) ) )
return ( data )
if 71 - 71: IiII + i1IIi - iII111i - i11iIiiIii . I11i - ooOoO0o
if 85 - 85: I1ii11iIi11i - OoOoOO00 / I1ii11iIi11i + OOooOOo - iII111i
IIii1III = binascii . hexlify ( data )
if 94 - 94: i11iIiiIii % OoooooooOO / I1IiiI
if 24 - 24: I1IiiI * oO0o
if 85 - 85: II111iiii . ooOoO0o % OOooOOo % I11i
if 80 - 80: oO0o * I11i / iIii1I11I1II1 % oO0o / iIii1I11I1II1
ii1II1II = 0
for iIi1iIIIiIiI in range ( 0 , 36 , 4 ) :
ii1II1II += int ( IIii1III [ iIi1iIIIiIiI : iIi1iIIIiIiI + 4 ] , 16 )
if 42 - 42: i1IIi / i11iIiiIii . Oo0Ooo * iII111i . i11iIiiIii * O0
if 44 - 44: i1IIi . I1IiiI / i11iIiiIii + IiII
if 27 - 27: OOooOOo
if 52 - 52: I1Ii111 % OoOoOO00 + iIii1I11I1II1 * oO0o . Ii1I
if 95 - 95: iIii1I11I1II1 . IiII - OoooooooOO * OoO0O00 / o0oOOo0O0Ooo
ii1II1II = ( ii1II1II >> 16 ) + ( ii1II1II & 0xffff )
ii1II1II += ii1II1II >> 16
ii1II1II = socket . htons ( ~ ii1II1II & 0xffff )
if 74 - 74: oO0o
if 34 - 34: iII111i
if 44 - 44: i1IIi % I1IiiI % o0oOOo0O0Ooo
if 9 - 9: Oo0Ooo % OoooooooOO - Ii1I
ii1II1II = struct . pack ( "H" , ii1II1II )
IIii1III = data [ 0 : 2 ] + ii1II1II + data [ 4 : : ]
return ( IIii1III )
if 43 - 43: OoO0O00 % OoO0O00
if 46 - 46: Oo0Ooo % iIii1I11I1II1 . iII111i . O0 * ooOoO0o / OoooooooOO
if 7 - 7: oO0o - O0 * I11i - o0oOOo0O0Ooo - II111iiii
if 41 - 41: I1IiiI - I1Ii111 % II111iiii . I1Ii111 - I11i
if 45 - 45: Ii1I - OOooOOo
if 70 - 70: OoO0O00 % I1IiiI / I1IiiI . I11i % ooOoO0o . II111iiii
if 10 - 10: Ii1I - i11iIiiIii . I1ii11iIi11i % i1IIi
if 78 - 78: iIii1I11I1II1 * Oo0Ooo . Oo0Ooo - OOooOOo . iIii1I11I1II1
if 30 - 30: ooOoO0o + ooOoO0o % IiII - o0oOOo0O0Ooo - I1ii11iIi11i
if 36 - 36: I11i % OOooOOo
if 72 - 72: I1IiiI / iII111i - O0 + I11i
if 83 - 83: O0
if 89 - 89: Oo0Ooo + I1ii11iIi11i - o0oOOo0O0Ooo
if 40 - 40: OoO0O00 + OoO0O00
if 94 - 94: iII111i * iIii1I11I1II1 . I11i
if 13 - 13: iIii1I11I1II1 * OoOoOO00 / I1Ii111 % ooOoO0o + oO0o
if 41 - 41: I1ii11iIi11i
if 5 - 5: Oo0Ooo
if 100 - 100: Ii1I + iIii1I11I1II1
if 59 - 59: IiII
if 89 - 89: OoOoOO00 % iIii1I11I1II1
if 35 - 35: I1ii11iIi11i + I1Ii111 - OoOoOO00 % oO0o % o0oOOo0O0Ooo % OoOoOO00
if 45 - 45: I1IiiI * OOooOOo % OoO0O00
if 24 - 24: ooOoO0o - I11i * oO0o
if 87 - 87: Ii1I - I1ii11iIi11i % I1ii11iIi11i . oO0o / I1ii11iIi11i
if 6 - 6: OoOoOO00 / iIii1I11I1II1 * OoooooooOO * i11iIiiIii
if 79 - 79: IiII % OoO0O00
if 81 - 81: i11iIiiIii + i11iIiiIii * OoO0O00 + IiII
if 32 - 32: O0 . OoooooooOO
if 15 - 15: I1IiiI . OoO0O00
if 17 - 17: i11iIiiIii / Oo0Ooo . OoO0O00 / I1IiiI
if 38 - 38: i1IIi . I1ii11iIi11i % Ii1I + iIii1I11I1II1 + O0
if 47 - 47: OoO0O00 + IiII / II111iiii
if 97 - 97: I1ii11iIi11i / I1IiiI % O0 + i1IIi - ooOoO0o
if 38 - 38: o0oOOo0O0Ooo % I1Ii111 + i11iIiiIii + iII111i + ooOoO0o / i11iIiiIii
def lisp_udp_checksum ( source , dest , data ) :
if 94 - 94: iII111i - Oo0Ooo + oO0o
if 59 - 59: I11i . I1IiiI - iIii1I11I1II1 + iIii1I11I1II1
if 56 - 56: oO0o + ooOoO0o
if 32 - 32: II111iiii + OoOoOO00 % ooOoO0o / OoOoOO00 + I1ii11iIi11i
I111 = lisp_address ( LISP_AFI_IPV6 , source , LISP_IPV6_HOST_MASK_LEN , 0 )
IiI11I111 = lisp_address ( LISP_AFI_IPV6 , dest , LISP_IPV6_HOST_MASK_LEN , 0 )
Ooo000O00 = socket . htonl ( len ( data ) )
i1iI1Iiii1I = socket . htonl ( LISP_UDP_PROTOCOL )
I1iII = I111 . pack_address ( )
I1iII += IiI11I111 . pack_address ( )
I1iII += struct . pack ( "II" , Ooo000O00 , i1iI1Iiii1I )
if 29 - 29: i1IIi % iII111i / IiII + OoOoOO00 - OOooOOo - I1ii11iIi11i
if 69 - 69: iIii1I11I1II1 . II111iiii . i1IIi - o0oOOo0O0Ooo
if 79 - 79: ooOoO0o % OOooOOo
if 54 - 54: OoOoOO00 - I1Ii111
O0I1II1 = binascii . hexlify ( I1iII + data )
oOOoo = len ( O0I1II1 ) % 4
for iIi1iIIIiIiI in range ( 0 , oOOoo ) : O0I1II1 += "0"
if 9 - 9: I11i . OoO0O00 * i1IIi . OoooooooOO
if 32 - 32: OoOoOO00 . I1ii11iIi11i % I1IiiI - II111iiii
if 11 - 11: O0 + I1IiiI
if 80 - 80: oO0o % oO0o % O0 - i11iIiiIii . iII111i / O0
ii1II1II = 0
for iIi1iIIIiIiI in range ( 0 , len ( O0I1II1 ) , 4 ) :
ii1II1II += int ( O0I1II1 [ iIi1iIIIiIiI : iIi1iIIIiIiI + 4 ] , 16 )
if 13 - 13: I1IiiI + O0 - I1ii11iIi11i % Oo0Ooo / Ii1I . i1IIi
if 60 - 60: Oo0Ooo . IiII % I1IiiI - I1Ii111
if 79 - 79: OoooooooOO / I1ii11iIi11i . O0
if 79 - 79: oO0o - II111iiii
if 43 - 43: i1IIi + O0 % OoO0O00 / Ii1I * I1IiiI
ii1II1II = ( ii1II1II >> 16 ) + ( ii1II1II & 0xffff )
ii1II1II += ii1II1II >> 16
ii1II1II = socket . htons ( ~ ii1II1II & 0xffff )
if 89 - 89: I1IiiI . Oo0Ooo + I1ii11iIi11i . O0 % o0oOOo0O0Ooo
if 84 - 84: OoooooooOO + I1Ii111 / I1IiiI % OOooOOo % I1ii11iIi11i * I1IiiI
if 58 - 58: OoO0O00 - OoOoOO00 . i11iIiiIii % i11iIiiIii / i1IIi / oO0o
if 24 - 24: I1IiiI * i1IIi % ooOoO0o / O0 + i11iIiiIii
ii1II1II = struct . pack ( "H" , ii1II1II )
O0I1II1 = data [ 0 : 6 ] + ii1II1II + data [ 8 : : ]
return ( O0I1II1 )
if 12 - 12: I1ii11iIi11i / Ii1I
if 5 - 5: OoooooooOO
if 18 - 18: I1IiiI % OoooooooOO - iII111i . i11iIiiIii * Oo0Ooo % Ii1I
if 12 - 12: i1IIi / OOooOOo % ooOoO0o * IiII * O0 * iIii1I11I1II1
if 93 - 93: Oo0Ooo / I1ii11iIi11i + i1IIi * oO0o . OoooooooOO
if 54 - 54: O0 / IiII % ooOoO0o * i1IIi * O0
if 48 - 48: o0oOOo0O0Ooo . oO0o % OoOoOO00 - OoOoOO00
if 33 - 33: I11i % II111iiii + OoO0O00
def lisp_igmp_checksum ( igmp ) :
Oo = binascii . hexlify ( igmp )
if 21 - 21: I1IiiI + I1ii11iIi11i * Oo0Ooo * iIii1I11I1II1 - OoO0O00 . Oo0Ooo
if 59 - 59: OoO0O00 - OoO0O00 + iII111i
if 32 - 32: i1IIi / Oo0Ooo - O0
if 85 - 85: Ii1I - O0 * i11iIiiIii . i1IIi
ii1II1II = 0
for iIi1iIIIiIiI in range ( 0 , 24 , 4 ) :
ii1II1II += int ( Oo [ iIi1iIIIiIiI : iIi1iIIIiIiI + 4 ] , 16 )
if 20 - 20: iII111i / OOooOOo
if 28 - 28: ooOoO0o * I11i % i11iIiiIii * iII111i / Ii1I
if 41 - 41: OOooOOo - o0oOOo0O0Ooo + Ii1I
if 15 - 15: I11i / o0oOOo0O0Ooo + Ii1I
if 76 - 76: Ii1I + OoooooooOO / OOooOOo % OoO0O00 / I1ii11iIi11i
ii1II1II = ( ii1II1II >> 16 ) + ( ii1II1II & 0xffff )
ii1II1II += ii1II1II >> 16
ii1II1II = socket . htons ( ~ ii1II1II & 0xffff )
if 38 - 38: I1Ii111 . iII111i . I1IiiI * OoO0O00
if 69 - 69: o0oOOo0O0Ooo % i11iIiiIii / Ii1I
if 93 - 93: ooOoO0o
if 34 - 34: oO0o - ooOoO0o * Oo0Ooo / o0oOOo0O0Ooo
ii1II1II = struct . pack ( "H" , ii1II1II )
igmp = igmp [ 0 : 2 ] + ii1II1II + igmp [ 4 : : ]
return ( igmp )
if 19 - 19: I1ii11iIi11i
if 46 - 46: iIii1I11I1II1 . i11iIiiIii - OoOoOO00 % O0 / II111iiii * i1IIi
if 66 - 66: O0
if 52 - 52: OoO0O00 * OoooooooOO
if 12 - 12: O0 + IiII * i1IIi . OoO0O00
if 71 - 71: I1Ii111 - o0oOOo0O0Ooo - OOooOOo
if 28 - 28: iIii1I11I1II1
def lisp_get_interface_address ( device ) :
if 7 - 7: o0oOOo0O0Ooo % IiII * OoOoOO00
if 58 - 58: IiII / I11i + II111iiii % iII111i - OoooooooOO
if 25 - 25: OoOoOO00 % OoooooooOO * Oo0Ooo - i1IIi * II111iiii * oO0o
if 30 - 30: I11i % OoOoOO00 / I1ii11iIi11i * O0 * Ii1I . I1IiiI
if ( device not in netifaces . interfaces ( ) ) : return ( None )
if 46 - 46: OoOoOO00 - O0
if 70 - 70: I11i + Oo0Ooo * iIii1I11I1II1 . I1IiiI * I11i
if 49 - 49: o0oOOo0O0Ooo
if 25 - 25: iII111i . OoooooooOO * iIii1I11I1II1 . o0oOOo0O0Ooo / O0 + Ii1I
ooo0o0 = netifaces . ifaddresses ( device )
if ( netifaces . AF_INET not in ooo0o0 ) : return ( None )
if 84 - 84: I11i - Oo0Ooo * O0 / Ii1I . Ii1I
if 93 - 93: O0 / ooOoO0o + I1IiiI
if 20 - 20: IiII / iII111i % OoooooooOO / iIii1I11I1II1 + I1IiiI
if 57 - 57: o0oOOo0O0Ooo / I1Ii111
iiIiII = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
if 7 - 7: Oo0Ooo - i1IIi . I1ii11iIi11i / iIii1I11I1II1 * o0oOOo0O0Ooo
for IiI in ooo0o0 [ netifaces . AF_INET ] :
O0O0 = IiI [ "addr" ]
iiIiII . store_address ( O0O0 )
return ( iiIiII )
if 70 - 70: OOooOOo * oO0o / I1IiiI * OoOoOO00 * I1IiiI
return ( None )
if 61 - 61: oO0o + I1ii11iIi11i / i1IIi * oO0o
if 90 - 90: Ii1I % oO0o
if 6 - 6: OoooooooOO / i11iIiiIii / I1Ii111
if 60 - 60: I1IiiI % oO0o / o0oOOo0O0Ooo % oO0o * i11iIiiIii / iII111i
if 34 - 34: I1Ii111 - OOooOOo
if 25 - 25: oO0o % I1IiiI + i11iIiiIii + O0 * OoooooooOO
if 64 - 64: i1IIi
if 10 - 10: I1Ii111 % O0 / I1IiiI % I11i
if 25 - 25: II111iiii / OoO0O00
if 64 - 64: O0 % ooOoO0o
if 40 - 40: o0oOOo0O0Ooo + I11i
if 77 - 77: i11iIiiIii % IiII + I1Ii111 % OoooooooOO - I11i
def lisp_get_input_interface ( packet ) :
iIIiiIi = lisp_format_packet ( packet [ 0 : 12 ] )
i1I111II = iIIiiIi . replace ( " " , "" )
Oo0OOo = i1I111II [ 0 : 12 ]
i1II11I11ii1 = i1I111II [ 12 : : ]
if 64 - 64: oO0o % OoOoOO00 / II111iiii % ooOoO0o - iII111i
try : I1II1IiI1 = ( i1II11I11ii1 in lisp_mymacs )
except : I1II1IiI1 = False
if 26 - 26: OOooOOo * Oo0Ooo
if ( Oo0OOo in lisp_mymacs ) : return ( lisp_mymacs [ Oo0OOo ] , i1II11I11ii1 , Oo0OOo , I1II1IiI1 )
if ( I1II1IiI1 ) : return ( lisp_mymacs [ i1II11I11ii1 ] , i1II11I11ii1 , Oo0OOo , I1II1IiI1 )
return ( [ "?" ] , i1II11I11ii1 , Oo0OOo , I1II1IiI1 )
if 31 - 31: I11i * oO0o . Ii1I
if 35 - 35: I11i
if 94 - 94: ooOoO0o / i11iIiiIii % O0
if 70 - 70: I11i - Oo0Ooo / OoooooooOO % OoooooooOO
if 95 - 95: OoooooooOO % OoooooooOO . Ii1I
if 26 - 26: oO0o + IiII - II111iiii . II111iiii + I1ii11iIi11i + OoOoOO00
if 68 - 68: O0
if 76 - 76: I1ii11iIi11i
def lisp_get_local_interfaces ( ) :
for ooO000OO in netifaces . interfaces ( ) :
i111IIiIiiI1 = lisp_interface ( ooO000OO )
i111IIiIiiI1 . add_interface ( )
if 73 - 73: oO0o . II111iiii * iII111i % oO0o + OoOoOO00 - OoO0O00
return
if 19 - 19: iII111i * Oo0Ooo . iII111i . OoO0O00 / OoO0O00 - oO0o
if 9 - 9: I1Ii111 * IiII * I1Ii111
if 74 - 74: iIii1I11I1II1 / o0oOOo0O0Ooo
if 58 - 58: iIii1I11I1II1 - I1IiiI % o0oOOo0O0Ooo % OoooooooOO * iIii1I11I1II1 + OOooOOo
if 25 - 25: OOooOOo % O0
if 44 - 44: I1Ii111 . Ii1I * II111iiii / IiII + iIii1I11I1II1
if 14 - 14: O0 % IiII % Ii1I * oO0o
def lisp_get_loopback_address ( ) :
for IiI in netifaces . ifaddresses ( "lo" ) [ netifaces . AF_INET ] :
if ( IiI [ "peer" ] == "127.0.0.1" ) : continue
return ( IiI [ "peer" ] )
if 65 - 65: I11i % oO0o + I1ii11iIi11i
return ( None )
if 86 - 86: iIii1I11I1II1 / O0 . I1Ii111 % iIii1I11I1II1 % Oo0Ooo
if 86 - 86: i11iIiiIii - o0oOOo0O0Ooo . ooOoO0o * Oo0Ooo / Ii1I % o0oOOo0O0Ooo
if 61 - 61: o0oOOo0O0Ooo + OoOoOO00
if 15 - 15: OoOoOO00 * oO0o + OOooOOo . I11i % I1IiiI - ooOoO0o
if 13 - 13: OoOoOO00 % OoOoOO00 % Oo0Ooo % I1IiiI * i1IIi % I11i
if 82 - 82: IiII . OoOoOO00 / ooOoO0o + iII111i - ooOoO0o
if 55 - 55: ooOoO0o % Oo0Ooo % o0oOOo0O0Ooo
if 29 - 29: IiII / iIii1I11I1II1 + I1ii11iIi11i % iII111i % I11i
def lisp_is_mac_string ( mac_str ) :
iiiI1IiIIii = mac_str . split ( "/" )
if ( len ( iiiI1IiIIii ) == 2 ) : mac_str = iiiI1IiIIii [ 0 ]
return ( len ( mac_str ) == 14 and mac_str . count ( "-" ) == 2 )
if 46 - 46: iIii1I11I1II1
if 70 - 70: i1IIi . I11i
if 74 - 74: I11i
if 58 - 58: iIii1I11I1II1 * OoO0O00 * I1Ii111 * ooOoO0o . OoooooooOO
if 6 - 6: I1ii11iIi11i - oO0o * i11iIiiIii + OoOoOO00 / ooOoO0o % OOooOOo
if 38 - 38: OOooOOo % IiII % II111iiii - Oo0Ooo - iIii1I11I1II1
if 9 - 9: o0oOOo0O0Ooo % I1ii11iIi11i . I1ii11iIi11i
if 28 - 28: OoooooooOO % oO0o + I1ii11iIi11i + O0 . I1Ii111
def lisp_get_local_macs ( ) :
for ooO000OO in netifaces . interfaces ( ) :
if 80 - 80: i11iIiiIii % I1ii11iIi11i
if 54 - 54: o0oOOo0O0Ooo + I11i - iIii1I11I1II1 % ooOoO0o % IiII
if 19 - 19: I1ii11iIi11i / iIii1I11I1II1 % i1IIi . OoooooooOO
if 57 - 57: ooOoO0o . Oo0Ooo - OoO0O00 - i11iIiiIii * I1Ii111 / o0oOOo0O0Ooo
if 79 - 79: I1ii11iIi11i + o0oOOo0O0Ooo % Oo0Ooo * o0oOOo0O0Ooo
IiI11I111 = ooO000OO . replace ( ":" , "" )
IiI11I111 = ooO000OO . replace ( "-" , "" )
if ( IiI11I111 . isalnum ( ) == False ) : continue
if 21 - 21: iII111i
if 24 - 24: iII111i / ooOoO0o
if 61 - 61: iIii1I11I1II1 + oO0o
if 8 - 8: I1Ii111 + OoO0O00
if 9 - 9: OOooOOo + o0oOOo0O0Ooo
try :
I1iII1IIi1IiI = netifaces . ifaddresses ( ooO000OO )
except :
continue
if 8 - 8: iIii1I11I1II1
if ( netifaces . AF_LINK not in I1iII1IIi1IiI ) : continue
iiiI1IiIIii = I1iII1IIi1IiI [ netifaces . AF_LINK ] [ 0 ] [ "addr" ]
iiiI1IiIIii = iiiI1IiIIii . replace ( ":" , "" )
if 55 - 55: oO0o
if 37 - 37: IiII / i11iIiiIii / Oo0Ooo
if 97 - 97: I1Ii111 . I11i / I1IiiI
if 83 - 83: I11i - I1ii11iIi11i * oO0o
if 90 - 90: Oo0Ooo * I1IiiI
if ( len ( iiiI1IiIIii ) < 12 ) : continue
if 75 - 75: I1ii11iIi11i - OoOoOO00 * i11iIiiIii . OoooooooOO - Oo0Ooo . I11i
if ( iiiI1IiIIii not in lisp_mymacs ) : lisp_mymacs [ iiiI1IiIIii ] = [ ]
lisp_mymacs [ iiiI1IiIIii ] . append ( ooO000OO )
if 6 - 6: I11i * oO0o / OoooooooOO % Ii1I * o0oOOo0O0Ooo
if 28 - 28: IiII * I1IiiI % IiII
lprint ( "Local MACs are: {}" . format ( lisp_mymacs ) )
return
if 95 - 95: O0 / I11i . I1Ii111
if 17 - 17: I11i
if 56 - 56: ooOoO0o * o0oOOo0O0Ooo + I11i
if 48 - 48: IiII * OoO0O00 % I1Ii111 - I11i
if 72 - 72: i1IIi % ooOoO0o % IiII % oO0o - oO0o
if 97 - 97: o0oOOo0O0Ooo * O0 / o0oOOo0O0Ooo * OoO0O00 * Oo0Ooo
if 38 - 38: I1Ii111
if 25 - 25: iIii1I11I1II1 % II111iiii / I11i / I1ii11iIi11i
def lisp_get_local_rloc ( ) :
iI1iIIIIIiIi1 = getoutput ( "netstat -rn | egrep 'default|0.0.0.0'" )
if ( iI1iIIIIIiIi1 == "" ) : return ( lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 ) )
if 19 - 19: OoOoOO00 . o0oOOo0O0Ooo . OoooooooOO
if 13 - 13: OOooOOo . Oo0Ooo / II111iiii
if 43 - 43: iIii1I11I1II1 % OoO0O00
if 84 - 84: Oo0Ooo
iI1iIIIIIiIi1 = iI1iIIIIIiIi1 . split ( "\n" ) [ 0 ]
ooO000OO = iI1iIIIIIiIi1 . split ( ) [ - 1 ]
if 44 - 44: OoooooooOO * i11iIiiIii / Oo0Ooo
IiI = ""
OoO = lisp_is_macos ( )
if ( OoO ) :
iI1iIIIIIiIi1 = getoutput ( "ifconfig {} | egrep 'inet '" . format ( ooO000OO ) )
if ( iI1iIIIIIiIi1 == "" ) : return ( lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 ) )
else :
oO00o00 = 'ip addr show | egrep "inet " | egrep "{}"' . format ( ooO000OO )
iI1iIIIIIiIi1 = getoutput ( oO00o00 )
if ( iI1iIIIIIiIi1 == "" ) :
oO00o00 = 'ip addr show | egrep "inet " | egrep "global lo"'
iI1iIIIIIiIi1 = getoutput ( oO00o00 )
if 51 - 51: Oo0Ooo * iIii1I11I1II1 . OoooooooOO . Ii1I - OOooOOo / I1IiiI
if ( iI1iIIIIIiIi1 == "" ) : return ( lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 ) )
if 98 - 98: II111iiii + Ii1I + OoooooooOO / i1IIi - Ii1I
if 87 - 87: iII111i / I11i / I11i % OoooooooOO - I1ii11iIi11i * oO0o
if 23 - 23: i11iIiiIii
if 100 - 100: oO0o + O0 . I1IiiI + i1IIi - OoOoOO00 + o0oOOo0O0Ooo
if 65 - 65: II111iiii / Oo0Ooo
if 42 - 42: i11iIiiIii . O0
IiI = ""
iI1iIIIIIiIi1 = iI1iIIIIIiIi1 . split ( "\n" )
if 75 - 75: I1Ii111 + iIii1I11I1II1
for IiiiI1 in iI1iIIIIIiIi1 :
OO0O00o0 = IiiiI1 . split ( ) [ 1 ]
if ( OoO == False ) : OO0O00o0 = OO0O00o0 . split ( "/" ) [ 0 ]
I1IIIi = lisp_address ( LISP_AFI_IPV4 , OO0O00o0 , 32 , 0 )
return ( I1IIIi )
if 39 - 39: I11i . I1ii11iIi11i . OOooOOo * I11i / O0 * o0oOOo0O0Ooo
return ( lisp_address ( LISP_AFI_IPV4 , IiI , 32 , 0 ) )
if 35 - 35: i1IIi * i11iIiiIii % I1ii11iIi11i / IiII / IiII
if 91 - 91: OoO0O00 * I1Ii111 % OoO0O00 . o0oOOo0O0Ooo * I1ii11iIi11i . OOooOOo
if 13 - 13: I1ii11iIi11i
if 80 - 80: Oo0Ooo % IiII % OoooooooOO * Oo0Ooo % Ii1I
if 41 - 41: OoooooooOO / i1IIi
if 70 - 70: OoOoOO00 % o0oOOo0O0Ooo % i1IIi / I1ii11iIi11i % i11iIiiIii / i1IIi
if 4 - 4: IiII
if 93 - 93: oO0o % i1IIi
if 83 - 83: I1IiiI . Oo0Ooo - I11i . o0oOOo0O0Ooo
if 73 - 73: I1IiiI - iII111i . iII111i
if 22 - 22: ooOoO0o / ooOoO0o - Ii1I % I11i . OOooOOo + IiII
def lisp_get_local_addresses ( ) :
global lisp_myrlocs
if 64 - 64: i1IIi % I1ii11iIi11i / Ii1I % OoooooooOO
if 24 - 24: I1Ii111 + OoooooooOO . IiII / OoOoOO00 / I11i
if 65 - 65: OoooooooOO
if 18 - 18: O0 - i1IIi . I1Ii111
if 98 - 98: o0oOOo0O0Ooo
if 73 - 73: Oo0Ooo - iII111i . oO0o % i1IIi . O0
if 15 - 15: ooOoO0o . iIii1I11I1II1 * I1IiiI % I11i
if 21 - 21: OoO0O00 - I1IiiI . OoooooooOO
if 6 - 6: iIii1I11I1II1 - iIii1I11I1II1 % o0oOOo0O0Ooo / iIii1I11I1II1 * I1Ii111
if 3 - 3: OOooOOo . IiII / Oo0Ooo
OooIIi111 = None
OOOooo0OooOoO = 1
oO0o0o0O = os . getenv ( "LISP_ADDR_SELECT" )
if ( oO0o0o0O != None and oO0o0o0O != "" ) :
oO0o0o0O = oO0o0o0O . split ( ":" )
if ( len ( oO0o0o0O ) == 2 ) :
OooIIi111 = oO0o0o0O [ 0 ]
OOOooo0OooOoO = oO0o0o0O [ 1 ]
else :
if ( oO0o0o0O [ 0 ] . isdigit ( ) ) :
OOOooo0OooOoO = oO0o0o0O [ 0 ]
else :
OooIIi111 = oO0o0o0O [ 0 ]
if 11 - 11: I1Ii111 - I11i % i11iIiiIii . iIii1I11I1II1 * I1IiiI - Oo0Ooo
if 73 - 73: O0 + ooOoO0o - O0 / OoooooooOO * Oo0Ooo
OOOooo0OooOoO = 1 if ( OOOooo0OooOoO == "" ) else int ( OOOooo0OooOoO )
if 32 - 32: OoO0O00 % I1IiiI % iII111i
if 66 - 66: OoOoOO00 + o0oOOo0O0Ooo
OOOO00 = [ None , None , None ]
o0 = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
I1iI111ii111i = lisp_address ( LISP_AFI_IPV6 , "" , 128 , 0 )
o00 = None
if 38 - 38: ooOoO0o
for ooO000OO in netifaces . interfaces ( ) :
if ( OooIIi111 != None and OooIIi111 != ooO000OO ) : continue
ooo0o0 = netifaces . ifaddresses ( ooO000OO )
if ( ooo0o0 == { } ) : continue
if 38 - 38: O0 - IiII * Oo0Ooo . O0 . I1ii11iIi11i
if 82 - 82: OoooooooOO
if 75 - 75: II111iiii % I1IiiI + OOooOOo % OoooooooOO / IiII
if 4 - 4: i11iIiiIii - OOooOOo % I1ii11iIi11i * I1Ii111 % o0oOOo0O0Ooo
o00 = lisp_get_interface_instance_id ( ooO000OO , None )
if 71 - 71: ooOoO0o . ooOoO0o - iIii1I11I1II1
if 22 - 22: OoooooooOO / I1ii11iIi11i % iII111i * OoOoOO00
if 32 - 32: OoooooooOO % oO0o % iIii1I11I1II1 / O0
if 61 - 61: II111iiii . O0 - Ii1I - I1ii11iIi11i / i11iIiiIii - II111iiii
if ( netifaces . AF_INET in ooo0o0 ) :
Oo00O0OO = ooo0o0 [ netifaces . AF_INET ]
O0oo0oOo = 0
for IiI in Oo00O0OO :
o0 . store_address ( IiI [ "addr" ] )
if ( o0 . is_ipv4_loopback ( ) ) : continue
if ( o0 . is_ipv4_link_local ( ) ) : continue
if ( o0 . address == 0 ) : continue
O0oo0oOo += 1
o0 . instance_id = o00
if ( OooIIi111 == None and
lisp_db_for_lookups . lookup_cache ( o0 , False ) ) : continue
OOOO00 [ 0 ] = o0
if ( O0oo0oOo == OOOooo0OooOoO ) : break
if 40 - 40: I11i % ooOoO0o
if 71 - 71: OoO0O00
if ( netifaces . AF_INET6 in ooo0o0 ) :
oOOOoo0o = ooo0o0 [ netifaces . AF_INET6 ]
O0oo0oOo = 0
for IiI in oOOOoo0o :
O0O0 = IiI [ "addr" ]
I1iI111ii111i . store_address ( O0O0 )
if ( I1iI111ii111i . is_ipv6_string_link_local ( O0O0 ) ) : continue
if ( I1iI111ii111i . is_ipv6_loopback ( ) ) : continue
O0oo0oOo += 1
I1iI111ii111i . instance_id = o00
if ( OooIIi111 == None and
lisp_db_for_lookups . lookup_cache ( I1iI111ii111i , False ) ) : continue
OOOO00 [ 1 ] = I1iI111ii111i
if ( O0oo0oOo == OOOooo0OooOoO ) : break
if 75 - 75: iII111i
if 16 - 16: I1ii11iIi11i + II111iiii * OoOoOO00 . IiII
if 10 - 10: iII111i * Ii1I - ooOoO0o . I11i - OOooOOo
if 94 - 94: I1IiiI % IiII + OoO0O00
if 90 - 90: i1IIi + O0 - oO0o . iII111i + iIii1I11I1II1
if 88 - 88: Ii1I * O0 . I1Ii111 / OoooooooOO
if ( OOOO00 [ 0 ] == None ) : continue
if 29 - 29: OoooooooOO . II111iiii % OoOoOO00
OOOO00 [ 2 ] = ooO000OO
break
if 26 - 26: iIii1I11I1II1 - I1ii11iIi11i . IiII . IiII + iIii1I11I1II1 * Oo0Ooo
if 85 - 85: OOooOOo + II111iiii - OOooOOo * oO0o - i1IIi % iII111i
IiIiI = OOOO00 [ 0 ] . print_address_no_iid ( ) if OOOO00 [ 0 ] else "none"
iI1Ii11 = OOOO00 [ 1 ] . print_address_no_iid ( ) if OOOO00 [ 1 ] else "none"
ooO000OO = OOOO00 [ 2 ] if OOOO00 [ 2 ] else "none"
if 93 - 93: I1IiiI / ooOoO0o / I11i + II111iiii + i11iIiiIii
OooIIi111 = " (user selected)" if OooIIi111 != None else ""
if 16 - 16: I1IiiI - oO0o . Oo0Ooo
IiIiI = red ( IiIiI , False )
iI1Ii11 = red ( iI1Ii11 , False )
ooO000OO = bold ( ooO000OO , False )
lprint ( "Local addresses are IPv4: {}, IPv6: {} from device {}{}, iid {}" . format ( IiIiI , iI1Ii11 , ooO000OO , OooIIi111 , o00 ) )
if 94 - 94: OoOoOO00 + IiII . ooOoO0o
if 69 - 69: O0 - O0
lisp_myrlocs = OOOO00
return ( ( OOOO00 [ 0 ] != None ) )
if 41 - 41: IiII % o0oOOo0O0Ooo
if 67 - 67: O0 % I1Ii111
if 35 - 35: I1IiiI . OoOoOO00 + OoooooooOO % Oo0Ooo % OOooOOo
if 39 - 39: Ii1I
if 60 - 60: OOooOOo
if 62 - 62: I1Ii111 * I11i
if 74 - 74: OoOoOO00 . iIii1I11I1II1
if 87 - 87: ooOoO0o
if 41 - 41: OoOoOO00 . iIii1I11I1II1 % ooOoO0o + O0
def lisp_get_all_addresses ( ) :
IIiII11 = [ ]
for i111IIiIiiI1 in netifaces . interfaces ( ) :
try : oo0O00OOOOO = netifaces . ifaddresses ( i111IIiIiiI1 )
except : continue
if 53 - 53: OoooooooOO . OoooooooOO + o0oOOo0O0Ooo - iII111i + OOooOOo
if ( netifaces . AF_INET in oo0O00OOOOO ) :
for IiI in oo0O00OOOOO [ netifaces . AF_INET ] :
OO0O00o0 = IiI [ "addr" ]
if ( OO0O00o0 . find ( "127.0.0.1" ) != - 1 ) : continue
IIiII11 . append ( OO0O00o0 )
if 44 - 44: I1Ii111 - IiII
if 100 - 100: oO0o . OoO0O00 - Ii1I + O0 * OoO0O00
if ( netifaces . AF_INET6 in oo0O00OOOOO ) :
for IiI in oo0O00OOOOO [ netifaces . AF_INET6 ] :
OO0O00o0 = IiI [ "addr" ]
if ( OO0O00o0 == "::1" ) : continue
if ( OO0O00o0 [ 0 : 5 ] == "fe80:" ) : continue
IIiII11 . append ( OO0O00o0 )
if 59 - 59: II111iiii
if 43 - 43: Oo0Ooo + OoooooooOO
if 47 - 47: ooOoO0o
return ( IIiII11 )
if 92 - 92: I11i % i11iIiiIii % Oo0Ooo
if 23 - 23: II111iiii * iII111i
if 80 - 80: I1Ii111 / i11iIiiIii + OoooooooOO
if 38 - 38: I1ii11iIi11i % ooOoO0o + i1IIi * OoooooooOO * oO0o
if 83 - 83: iIii1I11I1II1 - ooOoO0o - I1Ii111 / OoO0O00 - O0
if 81 - 81: Ii1I - oO0o * I1ii11iIi11i / I1Ii111
if 21 - 21: OoO0O00
if 63 - 63: I11i . O0 * I11i + iIii1I11I1II1
def lisp_get_all_multicast_rles ( ) :
Ii1iIi = [ ]
iI1iIIIIIiIi1 = getoutput ( 'egrep "rle-address =" ./lisp.config' )
if ( iI1iIIIIIiIi1 == "" ) : return ( Ii1iIi )
if 79 - 79: OOooOOo % I1Ii111 / oO0o - iIii1I11I1II1 - OoOoOO00
o0oOO = iI1iIIIIIiIi1 . split ( "\n" )
for IiiiI1 in o0oOO :
if ( IiiiI1 [ 0 ] == "#" ) : continue
ooo0o0O = IiiiI1 . split ( "rle-address = " ) [ 1 ]
IiiiIIi11II = int ( ooo0o0O . split ( "." ) [ 0 ] )
if ( IiiiIIi11II >= 224 and IiiiIIi11II < 240 ) : Ii1iIi . append ( ooo0o0O )
if 55 - 55: I11i
return ( Ii1iIi )
if 93 - 93: i11iIiiIii . o0oOOo0O0Ooo
if 16 - 16: i1IIi . i1IIi / I1Ii111 % OoOoOO00 / I1IiiI * I1ii11iIi11i
if 30 - 30: o0oOOo0O0Ooo + OoooooooOO + OOooOOo / II111iiii * Oo0Ooo
if 59 - 59: Ii1I / OoOoOO00 * OoO0O00 * iII111i % oO0o
if 61 - 61: Oo0Ooo - O0 - OoooooooOO
if 4 - 4: II111iiii - oO0o % Oo0Ooo * i11iIiiIii
if 18 - 18: Oo0Ooo % O0
if 66 - 66: iIii1I11I1II1 % i11iIiiIii / I1IiiI
class lisp_packet ( object ) :
def __init__ ( self , packet ) :
self . outer_source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . outer_dest = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . outer_tos = 0
self . outer_ttl = 0
self . udp_sport = 0
self . udp_dport = 0
self . udp_length = 0
self . udp_checksum = 0
self . inner_source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . inner_dest = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . inner_tos = 0
self . inner_ttl = 0
self . inner_protocol = 0
self . inner_sport = 0
self . inner_dport = 0
self . lisp_header = lisp_data_header ( )
self . packet = packet
self . inner_version = 0
self . outer_version = 0
self . encap_port = LISP_DATA_PORT
self . inner_is_fragment = False
self . packet_error = ""
self . gleaned_dest = False
if 47 - 47: I1ii11iIi11i * oO0o + iIii1I11I1II1 - oO0o / IiII
if 86 - 86: IiII
def encode ( self , nonce ) :
if 43 - 43: I1IiiI / iII111i / ooOoO0o + iIii1I11I1II1 + OoooooooOO
if 33 - 33: II111iiii - IiII - ooOoO0o
if 92 - 92: OoO0O00 * IiII
if 92 - 92: oO0o
if 7 - 7: iII111i
if ( self . outer_source . is_null ( ) ) : return ( None )
if 73 - 73: OoO0O00 % I1ii11iIi11i
if 32 - 32: OOooOOo + iII111i + iIii1I11I1II1 * Oo0Ooo
if 62 - 62: i11iIiiIii
if 2 - 2: I1IiiI
if 69 - 69: OoooooooOO / Oo0Ooo * I1Ii111
if 99 - 99: II111iiii * iIii1I11I1II1 % O0 * oO0o / II111iiii % OoooooooOO
if ( nonce == None ) :
self . lisp_header . nonce ( lisp_get_data_nonce ( ) )
elif ( self . lisp_header . is_request_nonce ( nonce ) ) :
self . lisp_header . request_nonce ( nonce )
else :
self . lisp_header . nonce ( nonce )
if 14 - 14: IiII . IiII % ooOoO0o
self . lisp_header . instance_id ( self . inner_dest . instance_id )
if 42 - 42: o0oOOo0O0Ooo . OOooOOo - ooOoO0o
if 33 - 33: II111iiii / O0 / IiII - I11i - i1IIi
if 8 - 8: i11iIiiIii . iII111i / iIii1I11I1II1 / I1ii11iIi11i / IiII - Ii1I
if 32 - 32: o0oOOo0O0Ooo . i1IIi * Oo0Ooo
if 98 - 98: Ii1I - II111iiii / I1IiiI . oO0o * IiII . I11i
if 25 - 25: i11iIiiIii / OoOoOO00 - I1Ii111 / OoO0O00 . o0oOOo0O0Ooo . o0oOOo0O0Ooo
self . lisp_header . key_id ( 0 )
iI1 = ( self . lisp_header . get_instance_id ( ) == 0xffffff )
if ( lisp_data_plane_security and iI1 == False ) :
O0O0 = self . outer_dest . print_address_no_iid ( ) + ":" + str ( self . encap_port )
if 43 - 43: I1ii11iIi11i + o0oOOo0O0Ooo
if ( O0O0 in lisp_crypto_keys_by_rloc_encap ) :
iI1iiiiiii = lisp_crypto_keys_by_rloc_encap [ O0O0 ]
if ( iI1iiiiiii [ 1 ] ) :
iI1iiiiiii [ 1 ] . use_count += 1
Oo00oo , oO0oO = self . encrypt ( iI1iiiiiii [ 1 ] , O0O0 )
if ( oO0oO ) : self . packet = Oo00oo
if 71 - 71: I1Ii111 / I1IiiI / O0
if 19 - 19: i11iIiiIii . I1IiiI + II111iiii / OOooOOo . I1ii11iIi11i * ooOoO0o
if 59 - 59: iIii1I11I1II1 / I1ii11iIi11i % ooOoO0o
if 84 - 84: iIii1I11I1II1 / I1IiiI . OoOoOO00 % I11i
if 99 - 99: Oo0Ooo + i11iIiiIii
if 36 - 36: Ii1I * I1Ii111 * iIii1I11I1II1 - I11i % i11iIiiIii
if 98 - 98: iIii1I11I1II1 - i1IIi + ooOoO0o % I11i + ooOoO0o / oO0o
if 97 - 97: IiII % ooOoO0o + II111iiii - IiII % OoO0O00 + ooOoO0o
self . udp_checksum = 0
if ( self . encap_port == LISP_DATA_PORT ) :
if ( lisp_crypto_ephem_port == None ) :
if ( self . gleaned_dest ) :
self . udp_sport = LISP_DATA_PORT
else :
self . hash_packet ( )
if 31 - 31: o0oOOo0O0Ooo
else :
self . udp_sport = lisp_crypto_ephem_port
if 35 - 35: OoOoOO00 + Ii1I * ooOoO0o / OoOoOO00
else :
self . udp_sport = LISP_DATA_PORT
if 69 - 69: ooOoO0o . OOooOOo - I1IiiI
self . udp_dport = self . encap_port
self . udp_length = len ( self . packet ) + 16
if 29 - 29: i11iIiiIii . I1ii11iIi11i / I1IiiI . OOooOOo + i11iIiiIii
if 26 - 26: IiII / Ii1I - OoooooooOO
if 9 - 9: OoooooooOO * I1ii11iIi11i
if 9 - 9: Oo0Ooo + iII111i
oooooO0oO0ooO = socket . htons ( self . udp_sport )
iIII1IiI = socket . htons ( self . udp_dport )
IIIIIiI1I1 = socket . htons ( self . udp_length )
O0I1II1 = struct . pack ( "HHHH" , oooooO0oO0ooO , iIII1IiI , IIIIIiI1I1 , self . udp_checksum )
if 62 - 62: o0oOOo0O0Ooo / iIii1I11I1II1
if 55 - 55: Ii1I / OoO0O00 + iII111i . IiII
if 47 - 47: O0
if 83 - 83: O0 + OoOoOO00 / O0 / I11i
OoIi11ii1 = self . lisp_header . encode ( )
if 1 - 1: iIii1I11I1II1 % oO0o . iIii1I11I1II1
if 10 - 10: iII111i + OoO0O00
if 6 - 6: OoO0O00
if 99 - 99: o0oOOo0O0Ooo * OOooOOo % oO0o * oO0o + OoooooooOO
if 82 - 82: I11i / OoOoOO00 - OOooOOo / ooOoO0o
if ( self . outer_version == 4 ) :
I1iIIi = socket . htons ( self . udp_length + 20 )
Ii = socket . htons ( 0x4000 )
Oo00O0o0O = struct . pack ( "BBHHHBBH" , 0x45 , self . outer_tos , I1iIIi , 0xdfdf ,
Ii , self . outer_ttl , 17 , 0 )
Oo00O0o0O += self . outer_source . pack_address ( )
Oo00O0o0O += self . outer_dest . pack_address ( )
Oo00O0o0O = lisp_ip_checksum ( Oo00O0o0O )
elif ( self . outer_version == 6 ) :
Oo00O0o0O = b""
if 86 - 86: I11i + O0 + Oo0Ooo - I11i
if 34 - 34: II111iiii % I1IiiI % I1Ii111 + Oo0Ooo - OoOoOO00
if 66 - 66: Ii1I * iIii1I11I1II1 - ooOoO0o / I1IiiI
if 62 - 62: IiII . O0 . iIii1I11I1II1
if 94 - 94: ooOoO0o % I11i % i1IIi
if 90 - 90: Ii1I * OoO0O00
if 7 - 7: iII111i . Ii1I . iII111i - I1Ii111
else :
return ( None )
if 33 - 33: ooOoO0o + OoooooooOO - OoO0O00 / i1IIi / OoooooooOO
if 82 - 82: I1ii11iIi11i / OOooOOo - iII111i / Oo0Ooo * OoO0O00
self . packet = Oo00O0o0O + O0I1II1 + OoIi11ii1 + self . packet
return ( self )
if 55 - 55: OoooooooOO
if 73 - 73: OoOoOO00 - I1ii11iIi11i % Oo0Ooo + I1ii11iIi11i - O0 . OoO0O00
def cipher_pad ( self , packet ) :
i1 = len ( packet )
if ( ( i1 % 16 ) != 0 ) :
iIii = ( old_div ( i1 , 16 ) + 1 ) * 16
packet = packet . ljust ( iIii )
if 95 - 95: I11i / IiII . O0 * IiII - o0oOOo0O0Ooo * Oo0Ooo
return ( packet )
if 6 - 6: OoOoOO00 . II111iiii * I1IiiI . I1IiiI / Ii1I
if 14 - 14: I1Ii111 % IiII - O0 / I1Ii111
def encrypt ( self , key , addr_str ) :
if ( key == None or key . shared_key == None ) :
return ( [ self . packet , False ] )
if 91 - 91: i11iIiiIii % I1Ii111 * oO0o - I1ii11iIi11i . I1Ii111
if 28 - 28: i11iIiiIii
if 51 - 51: I1IiiI + ooOoO0o * O0 . Ii1I
if 82 - 82: OOooOOo * I1ii11iIi11i % Ii1I . OOooOOo
if 43 - 43: OoO0O00 . ooOoO0o * Oo0Ooo
Oo00oo = self . cipher_pad ( self . packet )
ii = key . get_iv ( )
if 59 - 59: IiII % Ii1I
Oo0OO0000oooo = lisp_get_timestamp ( )
O0ooo = None
IiIIiII1I = False
if ( key . cipher_suite == LISP_CS_25519_CHACHA ) :
o00oOOo0Oo = chacha . ChaCha ( key . encrypt_key , ii ) . encrypt
IiIIiII1I = True
elif ( key . cipher_suite == LISP_CS_25519_GCM ) :
Oooo0o0oO = binascii . unhexlify ( key . encrypt_key )
try :
o0OOoOooO0ooO = AES . new ( Oooo0o0oO , AES . MODE_GCM , ii )
o00oOOo0Oo = o0OOoOooO0ooO . encrypt
O0ooo = o0OOoOooO0ooO . digest
except :
lprint ( "You need AES-GCM, do a 'pip install pycryptodome'" )
return ( [ self . packet , False ] )
if 50 - 50: i11iIiiIii + OoooooooOO / O0 + o0oOOo0O0Ooo / i11iIiiIii + oO0o
else :
Oooo0o0oO = binascii . unhexlify ( key . encrypt_key )
o00oOOo0Oo = AES . new ( Oooo0o0oO , AES . MODE_CBC , ii ) . encrypt
if 90 - 90: iII111i * Ii1I - iII111i + OoO0O00 + I11i % O0
if 11 - 11: OOooOOo % I1Ii111 * OoOoOO00
OoO00oo0 = o00oOOo0Oo ( Oo00oo )
if 96 - 96: i1IIi
if ( OoO00oo0 == None ) : return ( [ self . packet , False ] )
Oo0OO0000oooo = int ( str ( time . time ( ) - Oo0OO0000oooo ) . split ( "." ) [ 1 ] [ 0 : 6 ] )
if 55 - 55: oO0o + OOooOOo + Ii1I
if 82 - 82: I1ii11iIi11i . II111iiii / OoOoOO00 / OoO0O00
if 47 - 47: iII111i + O0 / II111iiii * I1IiiI - OoooooooOO . Ii1I
if 28 - 28: oO0o . oO0o . iIii1I11I1II1 . OOooOOo . I1ii11iIi11i * i11iIiiIii
if 72 - 72: I11i
if 26 - 26: IiII % Oo0Ooo
if ( IiIIiII1I ) :
OoO00oo0 = OoO00oo0 . encode ( "raw_unicode_escape" )
if 72 - 72: O0 + o0oOOo0O0Ooo + I1IiiI / Oo0Ooo
if 83 - 83: IiII - I1IiiI . Ii1I
if 34 - 34: OoOoOO00 - oO0o * OoooooooOO
if 5 - 5: i11iIiiIii * iII111i - Ii1I - I1ii11iIi11i - i1IIi + iII111i
if 4 - 4: ooOoO0o + O0 . i1IIi * I1ii11iIi11i - o0oOOo0O0Ooo
if 42 - 42: o0oOOo0O0Ooo * OoOoOO00 . OoO0O00 - iII111i / II111iiii
if 25 - 25: Oo0Ooo % OoOoOO00
if ( O0ooo != None ) : OoO00oo0 += O0ooo ( )
if 75 - 75: i1IIi
if 74 - 74: Oo0Ooo + I1Ii111 - oO0o - OoO0O00 + iII111i - iIii1I11I1II1
if 54 - 54: I1ii11iIi11i + II111iiii . I1IiiI / OoO0O00 . ooOoO0o
if 58 - 58: IiII % i11iIiiIii * II111iiii . I1ii11iIi11i
if 94 - 94: i11iIiiIii . OOooOOo + iIii1I11I1II1 * I1Ii111 * I1Ii111
self . lisp_header . key_id ( key . key_id )
OoIi11ii1 = self . lisp_header . encode ( )
if 36 - 36: I11i - IiII . IiII
Oo0OOOO0oOoo0 = key . do_icv ( OoIi11ii1 + ii + OoO00oo0 , ii )
if 92 - 92: IiII . Oo0Ooo - Oo0Ooo - o0oOOo0O0Ooo + I1Ii111 - O0
i1I1Iiii = 4 if ( key . do_poly ) else 8
if 15 - 15: ooOoO0o % o0oOOo0O0Ooo / oO0o - II111iiii . iIii1I11I1II1
ii1111Iii11i = bold ( "Encrypt" , False )
O0o0oo0O = bold ( key . cipher_suite_string , False )
addr_str = "RLOC: " + red ( addr_str , False )
Ooo00OOo000 = "poly" if key . do_poly else "sha256"
Ooo00OOo000 = bold ( Ooo00OOo000 , False )
i1ooOO00o0 = "ICV({}): 0x{}...{}" . format ( Ooo00OOo000 , Oo0OOOO0oOoo0 [ 0 : i1I1Iiii ] , Oo0OOOO0oOoo0 [ - i1I1Iiii : : ] )
dprint ( "{} for key-id: {}, {}, {}, {}-time: {} usec" . format ( ii1111Iii11i , key . key_id , addr_str , i1ooOO00o0 , O0o0oo0O , Oo0OO0000oooo ) )
if 44 - 44: I1IiiI % OOooOOo * i11iIiiIii * i11iIiiIii - Oo0Ooo . I1Ii111
if 68 - 68: iII111i . I11i
Oo0OOOO0oOoo0 = int ( Oo0OOOO0oOoo0 , 16 )
if ( key . do_poly ) :
i111iiIiiIiI = byte_swap_64 ( ( Oo0OOOO0oOoo0 >> 64 ) & LISP_8_64_MASK )
OOooooO = byte_swap_64 ( Oo0OOOO0oOoo0 & LISP_8_64_MASK )
Oo0OOOO0oOoo0 = struct . pack ( "QQ" , i111iiIiiIiI , OOooooO )
else :
i111iiIiiIiI = byte_swap_64 ( ( Oo0OOOO0oOoo0 >> 96 ) & LISP_8_64_MASK )
OOooooO = byte_swap_64 ( ( Oo0OOOO0oOoo0 >> 32 ) & LISP_8_64_MASK )
oOoo00 = socket . htonl ( Oo0OOOO0oOoo0 & 0xffffffff )
Oo0OOOO0oOoo0 = struct . pack ( "QQI" , i111iiIiiIiI , OOooooO , oOoo00 )
if 29 - 29: OOooOOo / OoOoOO00 . iIii1I11I1II1 / I11i % OoOoOO00 % iII111i
if 49 - 49: II111iiii / IiII - Ii1I
return ( [ ii + OoO00oo0 + Oo0OOOO0oOoo0 , True ] )
if 7 - 7: I1IiiI / OoO0O00 + I1Ii111 + I11i / I1IiiI
if 82 - 82: I1ii11iIi11i + OoooooooOO
def decrypt ( self , packet , header_length , key , addr_str ) :
if 21 - 21: oO0o * oO0o / I11i . iII111i
if 10 - 10: Ii1I * OOooOOo - Oo0Ooo - OoooooooOO / o0oOOo0O0Ooo
if 86 - 86: I1Ii111 % I1IiiI
if 22 - 22: i11iIiiIii * I1Ii111 . Oo0Ooo . OoooooooOO + I1IiiI
if 24 - 24: II111iiii / Ii1I . iIii1I11I1II1 - II111iiii % O0
if 8 - 8: OoO0O00 % iII111i . OoooooooOO - Ii1I % OoooooooOO
if ( key . do_poly ) :
i111iiIiiIiI , OOooooO = struct . unpack ( "QQ" , packet [ - 16 : : ] )
oOooo = byte_swap_64 ( i111iiIiiIiI ) << 64
oOooo |= byte_swap_64 ( OOooooO )
oOooo = lisp_hex_string ( oOooo ) . zfill ( 32 )
packet = packet [ 0 : - 16 ]
i1I1Iiii = 4
Iii1II1 = bold ( "poly" , False )
else :
i111iiIiiIiI , OOooooO , oOoo00 = struct . unpack ( "QQI" , packet [ - 20 : : ] )
oOooo = byte_swap_64 ( i111iiIiiIiI ) << 96
oOooo |= byte_swap_64 ( OOooooO ) << 32
oOooo |= socket . htonl ( oOoo00 )
oOooo = lisp_hex_string ( oOooo ) . zfill ( 40 )
packet = packet [ 0 : - 20 ]
i1I1Iiii = 8
Iii1II1 = bold ( "sha" , False )
if 54 - 54: OoOoOO00 . Oo0Ooo
OoIi11ii1 = self . lisp_header . encode ( )
if 38 - 38: i1IIi . Oo0Ooo * Oo0Ooo / I1ii11iIi11i
if 65 - 65: ooOoO0o % O0
if 17 - 17: i1IIi + oO0o . I11i + i1IIi - II111iiii % I1IiiI
if 34 - 34: I1IiiI
if ( key . cipher_suite == LISP_CS_25519_CHACHA ) :
o0OoOo0O00 = 8
O0o0oo0O = bold ( "chacha" , False )
elif ( key . cipher_suite == LISP_CS_25519_GCM ) :
o0OoOo0O00 = 12
O0o0oo0O = bold ( "aes-gcm" , False )
else :
o0OoOo0O00 = 16
O0o0oo0O = bold ( "aes-cbc" , False )
if 9 - 9: OOooOOo
ii = packet [ 0 : o0OoOo0O00 ]
if 38 - 38: I11i . OoO0O00 . i11iIiiIii * OoooooooOO + iII111i
if 49 - 49: Oo0Ooo - OoO0O00 / I1Ii111 / o0oOOo0O0Ooo % oO0o
if 38 - 38: o0oOOo0O0Ooo . oO0o / o0oOOo0O0Ooo % II111iiii
if 47 - 47: I11i * iIii1I11I1II1 * iII111i - OoO0O00 . O0 . ooOoO0o
iIiiIiIIiI = key . do_icv ( OoIi11ii1 + packet , ii )
if 93 - 93: IiII % I1ii11iIi11i
IiIIii = "0x{}...{}" . format ( oOooo [ 0 : i1I1Iiii ] , oOooo [ - i1I1Iiii : : ] )
oo0O0 = "0x{}...{}" . format ( iIiiIiIIiI [ 0 : i1I1Iiii ] , iIiiIiIIiI [ - i1I1Iiii : : ] )
if 34 - 34: II111iiii - IiII % OoOoOO00 % Ii1I / ooOoO0o
if ( iIiiIiIIiI != oOooo ) :
self . packet_error = "ICV-error"
Ii1II = O0o0oo0O + "/" + Iii1II1
IIiII = bold ( "ICV failed ({})" . format ( Ii1II ) , False )
i1ooOO00o0 = "packet-ICV {} != computed-ICV {}" . format ( IiIIii , oo0O0 )
dprint ( ( "{} from RLOC {}, receive-port: {}, key-id: {}, " + "packet dropped, {}" ) . format ( IIiII , red ( addr_str , False ) ,
# I1IiiI + o0oOOo0O0Ooo - IiII
self . udp_sport , key . key_id , i1ooOO00o0 ) )
dprint ( "{}" . format ( key . print_keys ( ) ) )
if 85 - 85: iII111i * iII111i % OoOoOO00 - OOooOOo % OoO0O00 - I1IiiI
if 3 - 3: OOooOOo + i1IIi % I1ii11iIi11i
if 100 - 100: OoooooooOO + i11iIiiIii % o0oOOo0O0Ooo + I1IiiI . Oo0Ooo . II111iiii
if 93 - 93: II111iiii . i11iIiiIii + II111iiii % oO0o
if 98 - 98: I1Ii111 * oO0o * OoOoOO00 + Ii1I * iII111i
if 4 - 4: IiII
lisp_retry_decap_keys ( addr_str , OoIi11ii1 + packet , ii , oOooo )
return ( [ None , False ] )
if 16 - 16: iIii1I11I1II1 * iII111i + oO0o . O0 . o0oOOo0O0Ooo
if 99 - 99: i11iIiiIii - iII111i
if 85 - 85: I1Ii111 % I1ii11iIi11i
if 95 - 95: OoO0O00 * OOooOOo * iII111i . o0oOOo0O0Ooo
if 73 - 73: OoO0O00
packet = packet [ o0OoOo0O00 : : ]
if 28 - 28: OoooooooOO - I11i
if 84 - 84: II111iiii
if 36 - 36: OOooOOo - OoOoOO00 - iIii1I11I1II1
if 10 - 10: I1ii11iIi11i / Ii1I * i1IIi % O0 + I11i
Oo0OO0000oooo = lisp_get_timestamp ( )
if ( key . cipher_suite == LISP_CS_25519_CHACHA ) :
I1i1ii1ii = chacha . ChaCha ( key . encrypt_key , ii ) . decrypt
elif ( key . cipher_suite == LISP_CS_25519_GCM ) :
Oooo0o0oO = binascii . unhexlify ( key . encrypt_key )
try :
I1i1ii1ii = AES . new ( Oooo0o0oO , AES . MODE_GCM , ii ) . decrypt
except :
self . packet_error = "no-decrypt-key"
lprint ( "You need AES-GCM, do a 'pip install pycryptodome'" )
return ( [ None , False ] )
if 32 - 32: IiII / OoooooooOO
else :
if ( ( len ( packet ) % 16 ) != 0 ) :
dprint ( "Ciphertext not multiple of 16 bytes, packet dropped" )
return ( [ None , False ] )
if 30 - 30: OoOoOO00 / I1IiiI - OoO0O00 - iII111i - i11iIiiIii
Oooo0o0oO = binascii . unhexlify ( key . encrypt_key )
I1i1ii1ii = AES . new ( Oooo0o0oO , AES . MODE_CBC , ii ) . decrypt
if 84 - 84: i1IIi - I1IiiI % iII111i
if 80 - 80: o0oOOo0O0Ooo % iII111i
ooOooOooOOO = I1i1ii1ii ( packet )
Oo0OO0000oooo = int ( str ( time . time ( ) - Oo0OO0000oooo ) . split ( "." ) [ 1 ] [ 0 : 6 ] )
if 59 - 59: I11i
if 63 - 63: OoO0O00 . oO0o + I1Ii111 . OoOoOO00 / i11iIiiIii / iII111i
if 46 - 46: Oo0Ooo + II111iiii * I1IiiI + OOooOOo
if 31 - 31: Ii1I * o0oOOo0O0Ooo * Ii1I + OoO0O00 * o0oOOo0O0Ooo . I1Ii111
ii1111Iii11i = bold ( "Decrypt" , False )
addr_str = "RLOC: " + red ( addr_str , False )
Ooo00OOo000 = "poly" if key . do_poly else "sha256"
Ooo00OOo000 = bold ( Ooo00OOo000 , False )
i1ooOO00o0 = "ICV({}): {}" . format ( Ooo00OOo000 , IiIIii )
dprint ( "{} for key-id: {}, {}, {} (good), {}-time: {} usec" . format ( ii1111Iii11i , key . key_id , addr_str , i1ooOO00o0 , O0o0oo0O , Oo0OO0000oooo ) )
if 89 - 89: OoooooooOO * Ii1I * I1IiiI . ooOoO0o * Ii1I / iII111i
if 46 - 46: i11iIiiIii
if 15 - 15: O0 / i1IIi / i1IIi . iII111i % OoOoOO00 + I1IiiI
if 48 - 48: I1Ii111 % iII111i % Ii1I % iIii1I11I1II1 . Ii1I
if 14 - 14: iII111i * OoO0O00 % O0 + I11i + I1ii11iIi11i
if 23 - 23: Oo0Ooo % iII111i + Ii1I - I1Ii111
if 65 - 65: OoooooooOO
self . packet = self . packet [ 0 : header_length ]
return ( [ ooOooOooOOO , True ] )
if 22 - 22: OOooOOo + II111iiii + Oo0Ooo
if 83 - 83: ooOoO0o
def fragment_outer ( self , outer_hdr , inner_packet ) :
i1Ii1i11ii = 1000
if 58 - 58: OoOoOO00 + OoO0O00 * Ii1I
if 31 - 31: oO0o - iII111i
if 46 - 46: I1IiiI + Oo0Ooo - Ii1I
if 99 - 99: OOooOOo + I1IiiI . I1ii11iIi11i * OoooooooOO
if 82 - 82: i11iIiiIii + iIii1I11I1II1 / Oo0Ooo + OOooOOo * II111iiii
iIiIiiIIIi1 = [ ]
oo00 = 0
i1 = len ( inner_packet )
while ( oo00 < i1 ) :
Ii = inner_packet [ oo00 : : ]
if ( len ( Ii ) > i1Ii1i11ii ) : Ii = Ii [ 0 : i1Ii1i11ii ]
iIiIiiIIIi1 . append ( Ii )
oo00 += len ( Ii )
if 25 - 25: O0
if 73 - 73: II111iiii + OOooOOo * iII111i / iII111i
if 74 - 74: O0 + iIii1I11I1II1 + oO0o * IiII
if 39 - 39: I1Ii111 . OoO0O00 % ooOoO0o . OOooOOo / iII111i * OoO0O00
if 12 - 12: I1IiiI / o0oOOo0O0Ooo
if 86 - 86: Oo0Ooo % OoOoOO00
o0o0O00oOo = [ ]
oo00 = 0
for Ii in iIiIiiIIIi1 :
if 42 - 42: II111iiii
if 60 - 60: i1IIi / I1IiiI . II111iiii . iII111i % oO0o - I1IiiI
if 39 - 39: I1IiiI . OoO0O00 + I11i + OOooOOo / II111iiii % i11iIiiIii
if 86 - 86: I1ii11iIi11i - i1IIi + Oo0Ooo * I1IiiI / i11iIiiIii % oO0o
i1i1IIi = oo00 if ( Ii == iIiIiiIIIi1 [ - 1 ] ) else 0x2000 + oo00
i1i1IIi = socket . htons ( i1i1IIi )
outer_hdr = outer_hdr [ 0 : 6 ] + struct . pack ( "H" , i1i1IIi ) + outer_hdr [ 8 : : ]
if 93 - 93: oO0o
if 85 - 85: i1IIi
if 100 - 100: OoooooooOO / I11i % OoO0O00 + Ii1I
if 42 - 42: Oo0Ooo / IiII . Ii1I * I1IiiI
oOO0O0ooOOOo = socket . htons ( len ( Ii ) + 20 )
outer_hdr = outer_hdr [ 0 : 2 ] + struct . pack ( "H" , oOO0O0ooOOOo ) + outer_hdr [ 4 : : ]
outer_hdr = lisp_ip_checksum ( outer_hdr )
o0o0O00oOo . append ( outer_hdr + Ii )
oo00 += len ( Ii ) / 8
if 91 - 91: ooOoO0o - oO0o + oO0o
return ( o0o0O00oOo )
if 14 - 14: I1ii11iIi11i * I1Ii111 % i1IIi / I1ii11iIi11i
if 48 - 48: Oo0Ooo
def send_icmp_too_big ( self , inner_packet ) :
global lisp_last_icmp_too_big_sent
global lisp_icmp_raw_socket
if 75 - 75: I1ii11iIi11i - IiII * Oo0Ooo . OoooooooOO * I1Ii111 * I1IiiI
i1i111Iiiiiii = time . time ( ) - lisp_last_icmp_too_big_sent
if ( i1i111Iiiiiii < LISP_ICMP_TOO_BIG_RATE_LIMIT ) :
lprint ( "Rate limit sending ICMP Too-Big to {}" . format ( self . inner_source . print_address_no_iid ( ) ) )
if 30 - 30: OoOoOO00 / oO0o / Ii1I * o0oOOo0O0Ooo * oO0o . I1IiiI
return ( False )
if 93 - 93: OoOoOO00
if 97 - 97: i11iIiiIii
if 68 - 68: IiII * OoO0O00 . I11i / Ii1I . o0oOOo0O0Ooo - i11iIiiIii
if 49 - 49: Oo0Ooo / Ii1I % I11i + oO0o - OoO0O00
if 13 - 13: II111iiii
if 83 - 83: OoooooooOO . I1IiiI + Ii1I * O0 / oO0o
if 8 - 8: i1IIi + II111iiii / Ii1I + I1ii11iIi11i % Ii1I - iIii1I11I1II1
if 29 - 29: Oo0Ooo + II111iiii
if 95 - 95: oO0o
if 48 - 48: I11i / iIii1I11I1II1 % II111iiii
if 39 - 39: i1IIi . I1ii11iIi11i / I11i / I11i
if 100 - 100: OoooooooOO - OoooooooOO + IiII
if 32 - 32: OoOoOO00 * o0oOOo0O0Ooo / OoooooooOO
if 90 - 90: I1Ii111
if 35 - 35: II111iiii / Ii1I
OO0000 = socket . htons ( 1400 )
IIii1III = struct . pack ( "BBHHH" , 3 , 4 , 0 , 0 , OO0000 )
IIii1III += inner_packet [ 0 : 20 + 8 ]
IIii1III = lisp_icmp_checksum ( IIii1III )
if 79 - 79: i1IIi / Oo0Ooo - I1IiiI . O0
if 56 - 56: IiII % O0 * i1IIi - II111iiii
if 74 - 74: i1IIi - OoOoOO00 % oO0o . O0 - OoooooooOO
if 84 - 84: I1Ii111
if 53 - 53: i1IIi
if 59 - 59: o0oOOo0O0Ooo + I1IiiI % OoooooooOO - iIii1I11I1II1
if 9 - 9: i1IIi - OoOoOO00
Oo00o0OOo0OO = inner_packet [ 12 : 16 ]
I1i1iiIi = self . inner_source . print_address_no_iid ( )
IIi1IiiIi1III = self . outer_source . pack_address ( )
if 19 - 19: i1IIi % I1IiiI - iIii1I11I1II1 - oO0o / I1ii11iIi11i
if 16 - 16: Ii1I
if 79 - 79: OoooooooOO - ooOoO0o * Ii1I - II111iiii % OoOoOO00 * IiII
if 31 - 31: I1IiiI
if 36 - 36: OoO0O00 + OoO0O00 + OoO0O00 % Oo0Ooo * iII111i
if 98 - 98: I11i . I11i / Oo0Ooo / Ii1I / I1IiiI
if 56 - 56: o0oOOo0O0Ooo / IiII
if 11 - 11: OoOoOO00 / I11i
I1iIIi = socket . htons ( 20 + 36 )
O0O = struct . pack ( "BBHHHBBH" , 0x45 , 0 , I1iIIi , 0 , 0 , 32 , 1 , 0 ) + IIi1IiiIi1III + Oo00o0OOo0OO
O0O = lisp_ip_checksum ( O0O )
O0O = self . fix_outer_header ( O0O )
O0O += IIii1III
IIOoOOoOo = bold ( "Too-Big" , False )
lprint ( "Send ICMP {} to {}, mtu 1400: {}" . format ( IIOoOOoOo , I1i1iiIi ,
lisp_format_packet ( O0O ) ) )
if 37 - 37: iIii1I11I1II1 . I1IiiI % OoO0O00 % OoooooooOO . OoooooooOO / O0
try :
lisp_icmp_raw_socket . sendto ( O0O , ( I1i1iiIi , 0 ) )
except socket . error as oO0ooOOO :
lprint ( "lisp_icmp_raw_socket.sendto() failed: {}" . format ( oO0ooOOO ) )
return ( False )
if 25 - 25: II111iiii % II111iiii - Ii1I . O0
if 79 - 79: IiII / OoO0O00 * OoooooooOO * OoOoOO00 + I1IiiI
if 68 - 68: I11i / iIii1I11I1II1 . Oo0Ooo + i11iIiiIii + o0oOOo0O0Ooo
if 92 - 92: OoO0O00 . o0oOOo0O0Ooo . Ii1I % OoOoOO00
if 58 - 58: I1ii11iIi11i % Ii1I * Ii1I - iII111i
if 9 - 9: ooOoO0o - Ii1I % II111iiii + IiII + OOooOOo % O0
lisp_last_icmp_too_big_sent = lisp_get_timestamp ( )
return ( True )
if 65 - 65: OOooOOo - OoO0O00 % i11iIiiIii
def fragment ( self ) :
global lisp_icmp_raw_socket
global lisp_ignore_df_bit
if 58 - 58: iII111i
Oo00oo = self . fix_outer_header ( self . packet )
if 2 - 2: II111iiii + i1IIi
if 68 - 68: OOooOOo + Ii1I
if 58 - 58: IiII * Ii1I . i1IIi
if 19 - 19: oO0o
if 85 - 85: ooOoO0o - I1IiiI / i1IIi / OoO0O00 / II111iiii
if 94 - 94: iIii1I11I1II1 + IiII
i1 = len ( Oo00oo )
if ( i1 <= 1500 ) : return ( [ Oo00oo ] , "Fragment-None" )
if 44 - 44: OoO0O00 + I11i % OoO0O00 + i1IIi + iII111i + O0
Oo00oo = self . packet
if 18 - 18: iIii1I11I1II1 % iIii1I11I1II1 % oO0o + I1IiiI % ooOoO0o / Ii1I
if 36 - 36: OoOoOO00 . i11iIiiIii
if 81 - 81: Oo0Ooo * iII111i * OoO0O00
if 85 - 85: O0 * oO0o
if 39 - 39: II111iiii * I1IiiI - iIii1I11I1II1
if ( self . inner_version != 4 ) :
Ii1o0OOOoo0000 = random . randint ( 0 , 0xffff )
IiIIii1i1i11iII = Oo00oo [ 0 : 4 ] + struct . pack ( "H" , Ii1o0OOOoo0000 ) + Oo00oo [ 6 : 20 ]
o0II1 = Oo00oo [ 20 : : ]
o0o0O00oOo = self . fragment_outer ( IiIIii1i1i11iII , o0II1 )
return ( o0o0O00oOo , "Fragment-Outer" )
if 86 - 86: oO0o . I1IiiI - I1Ii111 + iIii1I11I1II1
if 66 - 66: I11i - I11i + IiII
if 20 - 20: I1Ii111 . i1IIi
if 9 - 9: OoO0O00
if 89 - 89: i1IIi
I11II = 56 if ( self . outer_version == 6 ) else 36
IiIIii1i1i11iII = Oo00oo [ 0 : I11II ]
OOO = Oo00oo [ I11II : I11II + 20 ]
o0II1 = Oo00oo [ I11II + 20 : : ]
if 58 - 58: I1Ii111 . i11iIiiIii + OoooooooOO / i11iIiiIii . OoooooooOO % I1IiiI
if 58 - 58: oO0o + I1ii11iIi11i % OoOoOO00
if 22 - 22: iIii1I11I1II1 - Ii1I / I1IiiI * IiII
if 26 - 26: o0oOOo0O0Ooo + OOooOOo - o0oOOo0O0Ooo + Oo0Ooo . oO0o
if 97 - 97: i1IIi
ii1iI1i1 = struct . unpack ( "H" , OOO [ 6 : 8 ] ) [ 0 ]
ii1iI1i1 = socket . ntohs ( ii1iI1i1 )
if ( ii1iI1i1 & 0x4000 ) :
if ( lisp_icmp_raw_socket != None ) :
o0o0oo0OOo0O0 = Oo00oo [ I11II : : ]
if ( self . send_icmp_too_big ( o0o0oo0OOo0O0 ) ) : return ( [ ] , None )
if 37 - 37: o0oOOo0O0Ooo * Oo0Ooo
if ( lisp_ignore_df_bit ) :
ii1iI1i1 &= ~ 0x4000
else :
iI11i1I1i = bold ( "DF-bit set" , False )
dprint ( "{} in inner header, packet discarded" . format ( iI11i1I1i ) )
return ( [ ] , "Fragment-None-DF-bit" )
if 96 - 96: I1Ii111 / IiII * iIii1I11I1II1 + i11iIiiIii * I1ii11iIi11i / I1IiiI
if 93 - 93: O0 * iIii1I11I1II1 + Ii1I % iII111i
if 96 - 96: oO0o % Oo0Ooo
oo00 = 0
i1 = len ( o0II1 )
o0o0O00oOo = [ ]
while ( oo00 < i1 ) :
o0o0O00oOo . append ( o0II1 [ oo00 : oo00 + 1400 ] )
oo00 += 1400
if 20 - 20: ooOoO0o . IiII / I11i . OoooooooOO * OOooOOo + Ii1I
if 2 - 2: I1IiiI
if 11 - 11: OOooOOo + iIii1I11I1II1 / OoOoOO00 % O0
if 98 - 98: II111iiii + Oo0Ooo * iIii1I11I1II1 * I1ii11iIi11i + OOooOOo * Ii1I
if 76 - 76: ooOoO0o . oO0o
iIiIiiIIIi1 = o0o0O00oOo
o0o0O00oOo = [ ]
oO00OO0o0ooO = True if ii1iI1i1 & 0x2000 else False
ii1iI1i1 = ( ii1iI1i1 & 0x1fff ) * 8
for Ii in iIiIiiIIIi1 :
if 42 - 42: O0 * iII111i . OoOoOO00 / OOooOOo - Ii1I . I11i
if 57 - 57: o0oOOo0O0Ooo + Oo0Ooo * I1ii11iIi11i - ooOoO0o % iIii1I11I1II1 - Ii1I
if 37 - 37: OoO0O00 * I11i + Ii1I + I1ii11iIi11i * o0oOOo0O0Ooo
if 95 - 95: Ii1I - i11iIiiIii % i11iIiiIii - O0 * I1Ii111
Oo0O0oOoO0o0 = old_div ( ii1iI1i1 , 8 )
if ( oO00OO0o0ooO ) :
Oo0O0oOoO0o0 |= 0x2000
elif ( Ii != iIiIiiIIIi1 [ - 1 ] ) :
Oo0O0oOoO0o0 |= 0x2000
if 21 - 21: I1IiiI - I1IiiI + iII111i % I1IiiI * oO0o
Oo0O0oOoO0o0 = socket . htons ( Oo0O0oOoO0o0 )
OOO = OOO [ 0 : 6 ] + struct . pack ( "H" , Oo0O0oOoO0o0 ) + OOO [ 8 : : ]
if 74 - 74: iII111i / I11i . I1IiiI - OoooooooOO + II111iiii + I11i
if 36 - 36: Ii1I * I1IiiI * I1ii11iIi11i . I11i * I1ii11iIi11i
if 76 - 76: OOooOOo + O0 / IiII - OoO0O00
if 27 - 27: Oo0Ooo - iIii1I11I1II1 * iII111i * II111iiii * I1ii11iIi11i
if 9 - 9: i11iIiiIii + OOooOOo - OoOoOO00 / ooOoO0o % i1IIi / oO0o
if 22 - 22: i1IIi
i1 = len ( Ii )
ii1iI1i1 += i1
oOO0O0ooOOOo = socket . htons ( i1 + 20 )
OOO = OOO [ 0 : 2 ] + struct . pack ( "H" , oOO0O0ooOOOo ) + OOO [ 4 : 10 ] + struct . pack ( "H" , 0 ) + OOO [ 12 : : ]
if 3 - 3: OoO0O00 * I1ii11iIi11i - iII111i + I1ii11iIi11i
OOO = lisp_ip_checksum ( OOO )
O0000oO00oO0o = OOO + Ii
if 86 - 86: o0oOOo0O0Ooo / ooOoO0o . o0oOOo0O0Ooo % I1IiiI + oO0o % I11i
if 72 - 72: ooOoO0o - I1ii11iIi11i + oO0o . OoOoOO00
if 44 - 44: I1ii11iIi11i / O0 - IiII + OOooOOo . I11i . I1ii11iIi11i
if 95 - 95: OoOoOO00 % I1Ii111 % i1IIi * o0oOOo0O0Ooo + OOooOOo
if 34 - 34: I1Ii111 * o0oOOo0O0Ooo . I1IiiI % i11iIiiIii
i1 = len ( O0000oO00oO0o )
if ( self . outer_version == 4 ) :
oOO0O0ooOOOo = i1 + I11II
i1 += 16
IiIIii1i1i11iII = IiIIii1i1i11iII [ 0 : 2 ] + struct . pack ( "H" , oOO0O0ooOOOo ) + IiIIii1i1i11iII [ 4 : : ]
if 61 - 61: iIii1I11I1II1 + oO0o * I11i - i1IIi % oO0o
IiIIii1i1i11iII = lisp_ip_checksum ( IiIIii1i1i11iII )
O0000oO00oO0o = IiIIii1i1i11iII + O0000oO00oO0o
O0000oO00oO0o = self . fix_outer_header ( O0000oO00oO0o )
if 76 - 76: oO0o / OoOoOO00
if 12 - 12: I1Ii111
if 58 - 58: OoO0O00 + iIii1I11I1II1 % O0 + I11i + OoOoOO00 * OoooooooOO
if 41 - 41: oO0o * I1IiiI
if 76 - 76: oO0o . O0 * OoooooooOO + ooOoO0o
oo0O00 = I11II - 12
oOO0O0ooOOOo = socket . htons ( i1 )
O0000oO00oO0o = O0000oO00oO0o [ 0 : oo0O00 ] + struct . pack ( "H" , oOO0O0ooOOOo ) + O0000oO00oO0o [ oo0O00 + 2 : : ]
if 19 - 19: i1IIi / IiII + I1ii11iIi11i * I1ii11iIi11i
o0o0O00oOo . append ( O0000oO00oO0o )
if 90 - 90: OoooooooOO * iII111i . i11iIiiIii . ooOoO0o - I1Ii111
return ( o0o0O00oOo , "Fragment-Inner" )
if 81 - 81: I1IiiI / OoooooooOO
if 52 - 52: oO0o + I1Ii111 * I1Ii111 * Oo0Ooo - iIii1I11I1II1 + I1ii11iIi11i
def fix_outer_header ( self , packet ) :
if 34 - 34: iII111i / OoO0O00 / Oo0Ooo
if 92 - 92: I1Ii111 % iII111i % o0oOOo0O0Ooo . I1IiiI - I1ii11iIi11i - o0oOOo0O0Ooo
if 40 - 40: I1IiiI / OoooooooOO + OoO0O00 * OoO0O00
if 9 - 9: iIii1I11I1II1
if 57 - 57: ooOoO0o / Ii1I % o0oOOo0O0Ooo % i11iIiiIii
if 95 - 95: I1Ii111 - o0oOOo0O0Ooo
if 65 - 65: i11iIiiIii - OoooooooOO / O0 * IiII % I11i
if 53 - 53: OOooOOo + I1Ii111
if ( self . outer_version == 4 or self . inner_version == 4 ) :
if ( lisp_is_macos ( ) ) :
packet = packet [ 0 : 2 ] + packet [ 3 : 4 ] + packet [ 2 : 3 ] + packet [ 4 : 6 ] + packet [ 7 : 8 ] + packet [ 6 : 7 ] + packet [ 8 : : ]
if 10 - 10: I11i * i1IIi . oO0o / I1Ii111 . OOooOOo / I1Ii111
else :
packet = packet [ 0 : 2 ] + packet [ 3 : 4 ] + packet [ 2 : 3 ] + packet [ 4 : : ]
if 1 - 1: iII111i % ooOoO0o
if 99 - 99: iII111i + iIii1I11I1II1 . OOooOOo / OoO0O00 * I1ii11iIi11i
return ( packet )
if 87 - 87: IiII / II111iiii % OoO0O00 % OoO0O00
if 28 - 28: OoOoOO00 % oO0o - OOooOOo + OOooOOo + oO0o / iIii1I11I1II1
def send_packet ( self , lisp_raw_socket , dest ) :
if ( lisp_flow_logging and dest != self . inner_dest ) : self . log_flow ( True )
if 91 - 91: I1IiiI / II111iiii * OOooOOo
dest = dest . print_address_no_iid ( )
o0o0O00oOo , ooOoo000 = self . fragment ( )
if 56 - 56: ooOoO0o . iIii1I11I1II1 + i1IIi
for O0000oO00oO0o in o0o0O00oOo :
if ( len ( o0o0O00oOo ) != 1 ) :
self . packet = O0000oO00oO0o
self . print_packet ( ooOoo000 , True )
if 84 - 84: iII111i % i1IIi
if 62 - 62: I1ii11iIi11i . I1Ii111 . Ii1I
try : lisp_raw_socket . sendto ( O0000oO00oO0o , ( dest , 0 ) )
except socket . error as oO0ooOOO :
lprint ( "socket.sendto() failed: {}" . format ( oO0ooOOO ) )
if 19 - 19: I1ii11iIi11i / I1Ii111
if 35 - 35: Oo0Ooo * oO0o / OoooooooOO + O0 / OoooooooOO / OOooOOo
if 44 - 44: i1IIi . I1ii11iIi11i - ooOoO0o . OOooOOo . o0oOOo0O0Ooo + oO0o
if 17 - 17: iIii1I11I1II1 + i1IIi . I1ii11iIi11i + Ii1I % i1IIi . oO0o
def send_l2_packet ( self , l2_socket , mac_header ) :
if ( l2_socket == None ) :
lprint ( "No layer-2 socket, drop IPv6 packet" )
return
if 57 - 57: oO0o
if ( mac_header == None ) :
lprint ( "Could not build MAC header, drop IPv6 packet" )
return
if 92 - 92: II111iiii - OoO0O00 - OOooOOo % I1IiiI - OoOoOO00 * I1Ii111
if 16 - 16: iIii1I11I1II1 + OoooooooOO - ooOoO0o * IiII
Oo00oo = mac_header + self . packet
if 37 - 37: iII111i
if 15 - 15: o0oOOo0O0Ooo % OoO0O00 / iII111i
if 36 - 36: OoO0O00 + OoO0O00 % Oo0Ooo + Oo0Ooo / i1IIi % i1IIi
if 20 - 20: OOooOOo * oO0o
if 91 - 91: OoO0O00 % i1IIi - iIii1I11I1II1 . OOooOOo
if 31 - 31: oO0o % i1IIi . OoooooooOO - o0oOOo0O0Ooo + OoooooooOO
if 45 - 45: OOooOOo + I11i / OoooooooOO - Ii1I + OoooooooOO
if 42 - 42: iIii1I11I1II1 * I1IiiI * I1Ii111
if 62 - 62: OOooOOo * O0 % IiII . IiII . I1IiiI
if 91 - 91: i1IIi . iII111i
if 37 - 37: iII111i - I11i + iIii1I11I1II1 / I1Ii111 - OoO0O00 . o0oOOo0O0Ooo
l2_socket . write ( Oo00oo )
return
if 62 - 62: I1ii11iIi11i
if 47 - 47: I1Ii111 % OOooOOo * OoO0O00 . iIii1I11I1II1 % Oo0Ooo + OoooooooOO
def bridge_l2_packet ( self , eid , db ) :
try : I1Ii111I111 = db . dynamic_eids [ eid . print_address_no_iid ( ) ]
except : return
try : i111IIiIiiI1 = lisp_myinterfaces [ I1Ii111I111 . interface ]
except : return
try :
socket = i111IIiIiiI1 . get_bridge_socket ( )
if ( socket == None ) : return
except : return
if 7 - 7: I1IiiI
try : socket . send ( self . packet )
except socket . error as oO0ooOOO :
lprint ( "bridge_l2_packet(): socket.send() failed: {}" . format ( oO0ooOOO ) )
if 40 - 40: ooOoO0o
if 80 - 80: I1IiiI * I1Ii111 % oO0o . i11iIiiIii % IiII
if 42 - 42: OoooooooOO * II111iiii
def is_lisp_packet ( self , packet ) :
O0I1II1 = ( struct . unpack ( "B" , packet [ 9 : 10 ] ) [ 0 ] == LISP_UDP_PROTOCOL )
if ( O0I1II1 == False ) : return ( False )
if 53 - 53: I1Ii111 + i1IIi . OoO0O00 / i11iIiiIii + Ii1I % OoOoOO00
I1I = struct . unpack ( "H" , packet [ 22 : 24 ] ) [ 0 ]
if ( socket . ntohs ( I1I ) == LISP_DATA_PORT ) : return ( True )
I1I = struct . unpack ( "H" , packet [ 20 : 22 ] ) [ 0 ]
if ( socket . ntohs ( I1I ) == LISP_DATA_PORT ) : return ( True )
return ( False )
if 74 - 74: Oo0Ooo
if 91 - 91: OOooOOo . I1IiiI % iII111i
def decode ( self , is_lisp_packet , lisp_ipc_socket , stats ) :
self . packet_error = ""
Oo00oo = self . packet
OO00OO = len ( Oo00oo )
IiIiIi11iiIi1 = OoOoO0O00oo = True
if 71 - 71: O0 % O0
if 96 - 96: Ii1I
if 24 - 24: O0
if 33 - 33: OoooooooOO + oO0o * II111iiii / OOooOOo
ooooI11iii1iIIIIi = 0
oooo = self . lisp_header . get_instance_id ( )
if ( is_lisp_packet ) :
III1i1iiI1 = struct . unpack ( "B" , Oo00oo [ 0 : 1 ] ) [ 0 ]
self . outer_version = III1i1iiI1 >> 4
if ( self . outer_version == 4 ) :
if 62 - 62: Ii1I . i11iIiiIii % O0 % I1Ii111 - Oo0Ooo
if 69 - 69: II111iiii . OoOoOO00 * OoOoOO00 % Ii1I + I1IiiI
if 100 - 100: i11iIiiIii - Oo0Ooo
if 47 - 47: iII111i * OoOoOO00 * IiII
if 46 - 46: Ii1I
ii1 = struct . unpack ( "H" , Oo00oo [ 10 : 12 ] ) [ 0 ]
Oo00oo = lisp_ip_checksum ( Oo00oo )
ii1II1II = struct . unpack ( "H" , Oo00oo [ 10 : 12 ] ) [ 0 ]
if ( ii1II1II != 0 ) :
if ( ii1 != 0 or lisp_is_macos ( ) == False ) :
self . packet_error = "checksum-error"
if ( stats ) :
stats [ self . packet_error ] . increment ( OO00OO )
if 64 - 64: Ii1I . OoooooooOO - I1ii11iIi11i
if 19 - 19: Oo0Ooo
lprint ( "IPv4 header checksum failed for outer header" )
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( None )
if 15 - 15: Oo0Ooo . ooOoO0o / o0oOOo0O0Ooo
if 23 - 23: OoO0O00 % OoooooooOO * ooOoO0o
if 6 - 6: I1IiiI . II111iiii + I1Ii111 / OoO0O00 % I1IiiI . OoooooooOO
Oooo000 = LISP_AFI_IPV4
oo00 = 12
self . outer_tos = struct . unpack ( "B" , Oo00oo [ 1 : 2 ] ) [ 0 ]
self . outer_ttl = struct . unpack ( "B" , Oo00oo [ 8 : 9 ] ) [ 0 ]
ooooI11iii1iIIIIi = 20
elif ( self . outer_version == 6 ) :
Oooo000 = LISP_AFI_IPV6
oo00 = 8
IIii1i1 = struct . unpack ( "H" , Oo00oo [ 0 : 2 ] ) [ 0 ]
self . outer_tos = ( socket . ntohs ( IIii1i1 ) >> 4 ) & 0xff
self . outer_ttl = struct . unpack ( "B" , Oo00oo [ 7 : 8 ] ) [ 0 ]
ooooI11iii1iIIIIi = 40
else :
self . packet_error = "outer-header-error"
if ( stats ) : stats [ self . packet_error ] . increment ( OO00OO )
lprint ( "Cannot decode outer header" )
return ( None )
if 98 - 98: I1ii11iIi11i - OoooooooOO / I1IiiI . ooOoO0o - i1IIi
if 60 - 60: OoOoOO00 % OoOoOO00
self . outer_source . afi = Oooo000
self . outer_dest . afi = Oooo000
I1Ii11iI11ii = self . outer_source . addr_length ( )
if 85 - 85: i1IIi
self . outer_source . unpack_address ( Oo00oo [ oo00 : oo00 + I1Ii11iI11ii ] )
oo00 += I1Ii11iI11ii
self . outer_dest . unpack_address ( Oo00oo [ oo00 : oo00 + I1Ii11iI11ii ] )
Oo00oo = Oo00oo [ ooooI11iii1iIIIIi : : ]
self . outer_source . mask_len = self . outer_source . host_mask_len ( )
self . outer_dest . mask_len = self . outer_dest . host_mask_len ( )
if 64 - 64: OoOoOO00 % iIii1I11I1II1
if 28 - 28: oO0o * o0oOOo0O0Ooo
if 83 - 83: I1ii11iIi11i * I11i . OoooooooOO % Ii1I
if 29 - 29: iII111i + II111iiii . i11iIiiIii . Ii1I - O0
III = struct . unpack ( "H" , Oo00oo [ 0 : 2 ] ) [ 0 ]
self . udp_sport = socket . ntohs ( III )
III = struct . unpack ( "H" , Oo00oo [ 2 : 4 ] ) [ 0 ]
self . udp_dport = socket . ntohs ( III )
III = struct . unpack ( "H" , Oo00oo [ 4 : 6 ] ) [ 0 ]
self . udp_length = socket . ntohs ( III )
III = struct . unpack ( "H" , Oo00oo [ 6 : 8 ] ) [ 0 ]
self . udp_checksum = socket . ntohs ( III )
Oo00oo = Oo00oo [ 8 : : ]
if 60 - 60: II111iiii . I11i / OoooooooOO + ooOoO0o . iIii1I11I1II1
if 87 - 87: I1IiiI + I1ii11iIi11i % oO0o - Oo0Ooo
if 33 - 33: II111iiii . I1ii11iIi11i - O0 * iIii1I11I1II1 % O0 . OoooooooOO
if 53 - 53: Ii1I / I1IiiI * Ii1I + o0oOOo0O0Ooo + oO0o - Oo0Ooo
IiIiIi11iiIi1 = ( self . udp_dport == LISP_DATA_PORT or
self . udp_sport == LISP_DATA_PORT )
OoOoO0O00oo = ( self . udp_dport in ( LISP_L2_DATA_PORT , LISP_VXLAN_DATA_PORT ) )
if 16 - 16: OoO0O00 % I1Ii111 . i1IIi / I1ii11iIi11i - O0
if 85 - 85: i1IIi . i1IIi
if 16 - 16: I1IiiI - OOooOOo % Ii1I . OOooOOo + I1ii11iIi11i % i11iIiiIii
if 59 - 59: i11iIiiIii - I11i
if ( self . lisp_header . decode ( Oo00oo ) == False ) :
self . packet_error = "lisp-header-error"
if ( stats ) : stats [ self . packet_error ] . increment ( OO00OO )
if 59 - 59: OoooooooOO * o0oOOo0O0Ooo / I1Ii111
if ( lisp_flow_logging ) : self . log_flow ( False )
lprint ( "Cannot decode LISP header" )
return ( None )
if 75 - 75: o0oOOo0O0Ooo - OoooooooOO
Oo00oo = Oo00oo [ 8 : : ]
oooo = self . lisp_header . get_instance_id ( )
ooooI11iii1iIIIIi += 16
if 21 - 21: I1IiiI + iIii1I11I1II1 / i11iIiiIii / oO0o
if ( oooo == 0xffffff ) : oooo = 0
if 66 - 66: OoooooooOO + iII111i . IiII % i1IIi
if 58 - 58: OOooOOo % iII111i * O0 + I1ii11iIi11i - IiII
if 26 - 26: i1IIi / I1IiiI / I11i + I11i
if 46 - 46: I1Ii111 % I1ii11iIi11i + Ii1I
Ooii = False
i11iII1 = self . lisp_header . k_bits
if ( i11iII1 ) :
O0O0 = lisp_get_crypto_decap_lookup_key ( self . outer_source ,
self . udp_sport )
if ( O0O0 == None ) :
self . packet_error = "no-decrypt-key"
if ( stats ) : stats [ self . packet_error ] . increment ( OO00OO )
if 75 - 75: OOooOOo / i11iIiiIii / iIii1I11I1II1
self . print_packet ( "Receive" , is_lisp_packet )
i11iI1111ii1I = bold ( "No key available" , False )
dprint ( "{} for key-id {} to decrypt packet" . format ( i11iI1111ii1I , i11iII1 ) )
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( None )
if 89 - 89: i11iIiiIii / O0 - i1IIi % Oo0Ooo + i11iIiiIii
if 44 - 44: i11iIiiIii / OOooOOo * ooOoO0o
Ooo00o000o = lisp_crypto_keys_by_rloc_decap [ O0O0 ] [ i11iII1 ]
if ( Ooo00o000o == None ) :
self . packet_error = "no-decrypt-key"
if ( stats ) : stats [ self . packet_error ] . increment ( OO00OO )
if 57 - 57: I11i - I11i % II111iiii % Oo0Ooo . o0oOOo0O0Ooo % Oo0Ooo
self . print_packet ( "Receive" , is_lisp_packet )
i11iI1111ii1I = bold ( "No key available" , False )
dprint ( "{} to decrypt packet from RLOC {}" . format ( i11iI1111ii1I ,
red ( O0O0 , False ) ) )
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( None )
if 91 - 91: I1IiiI - OoO0O00 - Oo0Ooo - Ii1I * iIii1I11I1II1
if 68 - 68: OoO0O00 % O0 * iIii1I11I1II1 / oO0o * o0oOOo0O0Ooo + OOooOOo
if 89 - 89: ooOoO0o * I1IiiI . oO0o
if 75 - 75: ooOoO0o - iII111i % iII111i + ooOoO0o * o0oOOo0O0Ooo - I1ii11iIi11i
if 26 - 26: I11i * Ii1I % I1IiiI + iII111i
Ooo00o000o . use_count += 1
Oo00oo , Ooii = self . decrypt ( Oo00oo , ooooI11iii1iIIIIi , Ooo00o000o , O0O0 )
if ( Ooii == False ) :
if ( stats ) : stats [ self . packet_error ] . increment ( OO00OO )
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( None )
if 38 - 38: iII111i - Oo0Ooo / Ii1I + oO0o . iII111i + IiII
if 19 - 19: Ii1I
if 51 - 51: iIii1I11I1II1
if 8 - 8: OoO0O00 / o0oOOo0O0Ooo % iII111i . i11iIiiIii . OoooooooOO . Ii1I
if 8 - 8: OoO0O00 * Oo0Ooo
if 41 - 41: Oo0Ooo / OoO0O00 / OoOoOO00 - i11iIiiIii - OoOoOO00
if ( Ooo00o000o . cipher_suite == LISP_CS_25519_CHACHA ) :
Oo00oo = Oo00oo . encode ( "raw_unicode_escape" )
if 4 - 4: I11i . IiII
if 39 - 39: OOooOOo . Oo0Ooo - OoOoOO00 * i11iIiiIii
if 4 - 4: OoOoOO00 * O0 - I11i
if 72 - 72: I11i + ooOoO0o / I1IiiI . IiII % OoO0O00 / i11iIiiIii
if 13 - 13: I1Ii111 % o0oOOo0O0Ooo + OOooOOo + I1Ii111 + i11iIiiIii - I1ii11iIi11i
if 70 - 70: II111iiii * II111iiii . I1IiiI
III1i1iiI1 = struct . unpack ( "B" , Oo00oo [ 0 : 1 ] ) [ 0 ]
self . inner_version = III1i1iiI1 >> 4
if ( IiIiIi11iiIi1 and self . inner_version == 4 and III1i1iiI1 >= 0x45 ) :
iiIi1111iiI1 = socket . ntohs ( struct . unpack ( "H" , Oo00oo [ 2 : 4 ] ) [ 0 ] )
self . inner_tos = struct . unpack ( "B" , Oo00oo [ 1 : 2 ] ) [ 0 ]
self . inner_ttl = struct . unpack ( "B" , Oo00oo [ 8 : 9 ] ) [ 0 ]
self . inner_protocol = struct . unpack ( "B" , Oo00oo [ 9 : 10 ] ) [ 0 ]
self . inner_source . afi = LISP_AFI_IPV4
self . inner_dest . afi = LISP_AFI_IPV4
self . inner_source . unpack_address ( Oo00oo [ 12 : 16 ] )
self . inner_dest . unpack_address ( Oo00oo [ 16 : 20 ] )
ii1iI1i1 = socket . ntohs ( struct . unpack ( "H" , Oo00oo [ 6 : 8 ] ) [ 0 ] )
self . inner_is_fragment = ( ii1iI1i1 & 0x2000 or ii1iI1i1 != 0 )
if ( self . inner_protocol == LISP_UDP_PROTOCOL ) :
self . inner_sport = struct . unpack ( "H" , Oo00oo [ 20 : 22 ] ) [ 0 ]
self . inner_sport = socket . ntohs ( self . inner_sport )
self . inner_dport = struct . unpack ( "H" , Oo00oo [ 22 : 24 ] ) [ 0 ]
self . inner_dport = socket . ntohs ( self . inner_dport )
if 85 - 85: I11i + I1Ii111
elif ( IiIiIi11iiIi1 and self . inner_version == 6 and III1i1iiI1 >= 0x60 ) :
iiIi1111iiI1 = socket . ntohs ( struct . unpack ( "H" , Oo00oo [ 4 : 6 ] ) [ 0 ] ) + 40
IIii1i1 = struct . unpack ( "H" , Oo00oo [ 0 : 2 ] ) [ 0 ]
self . inner_tos = ( socket . ntohs ( IIii1i1 ) >> 4 ) & 0xff
self . inner_ttl = struct . unpack ( "B" , Oo00oo [ 7 : 8 ] ) [ 0 ]
self . inner_protocol = struct . unpack ( "B" , Oo00oo [ 6 : 7 ] ) [ 0 ]
self . inner_source . afi = LISP_AFI_IPV6
self . inner_dest . afi = LISP_AFI_IPV6
self . inner_source . unpack_address ( Oo00oo [ 8 : 24 ] )
self . inner_dest . unpack_address ( Oo00oo [ 24 : 40 ] )
if ( self . inner_protocol == LISP_UDP_PROTOCOL ) :
self . inner_sport = struct . unpack ( "H" , Oo00oo [ 40 : 42 ] ) [ 0 ]
self . inner_sport = socket . ntohs ( self . inner_sport )
self . inner_dport = struct . unpack ( "H" , Oo00oo [ 42 : 44 ] ) [ 0 ]
self . inner_dport = socket . ntohs ( self . inner_dport )
if 11 - 11: I11i
elif ( OoOoO0O00oo ) :
iiIi1111iiI1 = len ( Oo00oo )
self . inner_tos = 0
self . inner_ttl = 0
self . inner_protocol = 0
self . inner_source . afi = LISP_AFI_MAC
self . inner_dest . afi = LISP_AFI_MAC
self . inner_dest . unpack_address ( self . swap_mac ( Oo00oo [ 0 : 6 ] ) )
self . inner_source . unpack_address ( self . swap_mac ( Oo00oo [ 6 : 12 ] ) )
elif ( self . lisp_header . get_instance_id ( ) == 0xffffff ) :
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( self )
else :
self . packet_error = "bad-inner-version"
if ( stats ) : stats [ self . packet_error ] . increment ( OO00OO )
if 95 - 95: Oo0Ooo + i11iIiiIii % OOooOOo - oO0o
lprint ( "Cannot decode encapsulation, header version {}" . format ( hex ( III1i1iiI1 ) ) )
if 11 - 11: I1ii11iIi11i / O0 + II111iiii
Oo00oo = lisp_format_packet ( Oo00oo [ 0 : 20 ] )
lprint ( "Packet header: {}" . format ( Oo00oo ) )
if ( lisp_flow_logging and is_lisp_packet ) : self . log_flow ( False )
return ( None )
if 95 - 95: I1Ii111 + IiII * iIii1I11I1II1
self . inner_source . mask_len = self . inner_source . host_mask_len ( )
self . inner_dest . mask_len = self . inner_dest . host_mask_len ( )
self . inner_source . instance_id = oooo
self . inner_dest . instance_id = oooo
if 17 - 17: OoO0O00 - Oo0Ooo * O0 / Ii1I
if 19 - 19: i1IIi - iIii1I11I1II1 . I11i
if 2 - 2: Ii1I
if 12 - 12: i11iIiiIii - iIii1I11I1II1 * IiII * iII111i
if 19 - 19: O0 + oO0o + o0oOOo0O0Ooo
if ( lisp_nonce_echoing and is_lisp_packet ) :
oO0 = lisp_get_echo_nonce ( self . outer_source , None )
if ( oO0 == None ) :
IIi11IiiiI11i = self . outer_source . print_address_no_iid ( )
oO0 = lisp_echo_nonce ( IIi11IiiiI11i )
if 68 - 68: oO0o + I11i * oO0o . IiII % Ii1I - OoooooooOO
oOooo0oOOOO = self . lisp_header . get_nonce ( )
if ( self . lisp_header . is_e_bit_set ( ) ) :
oO0 . receive_request ( lisp_ipc_socket , oOooo0oOOOO )
elif ( oO0 . request_nonce_sent ) :
oO0 . receive_echo ( lisp_ipc_socket , oOooo0oOOOO )
if 81 - 81: o0oOOo0O0Ooo / I1IiiI / o0oOOo0O0Ooo * IiII + OOooOOo % I1Ii111
if 61 - 61: OoOoOO00 - OoOoOO00 . o0oOOo0O0Ooo + oO0o
if 26 - 26: II111iiii / o0oOOo0O0Ooo
if 32 - 32: I1ii11iIi11i * I1IiiI + o0oOOo0O0Ooo % II111iiii + OOooOOo + Ii1I
if 90 - 90: Ii1I
if 30 - 30: o0oOOo0O0Ooo + Ii1I / OoooooooOO - IiII % oO0o
if 21 - 21: OoooooooOO % OoOoOO00 - OoOoOO00 / I1ii11iIi11i / o0oOOo0O0Ooo
if ( Ooii ) : self . packet += Oo00oo [ : iiIi1111iiI1 ]
if 15 - 15: ooOoO0o / ooOoO0o % OoooooooOO . I1Ii111
if 93 - 93: I1ii11iIi11i * I1ii11iIi11i / OoooooooOO
if 6 - 6: I1ii11iIi11i * Oo0Ooo + iIii1I11I1II1
if 19 - 19: O0 % II111iiii * o0oOOo0O0Ooo
if ( lisp_flow_logging and is_lisp_packet ) : self . log_flow ( False )
return ( self )
if 27 - 27: OOooOOo * IiII / i11iIiiIii - oO0o + II111iiii
if 43 - 43: I1ii11iIi11i - II111iiii
def swap_mac ( self , mac ) :
return ( mac [ 1 ] + mac [ 0 ] + mac [ 3 ] + mac [ 2 ] + mac [ 5 ] + mac [ 4 ] )
if 56 - 56: I1ii11iIi11i . i1IIi / iII111i % oO0o / O0 * I11i
if 98 - 98: O0 + iII111i
def strip_outer_headers ( self ) :
oo00 = 16
oo00 += 20 if ( self . outer_version == 4 ) else 40
self . packet = self . packet [ oo00 : : ]
return ( self )
if 23 - 23: OoooooooOO . iIii1I11I1II1 / i1IIi
if 31 - 31: Oo0Ooo - iIii1I11I1II1 / I11i . OoO0O00
def hash_ports ( self ) :
Oo00oo = self . packet
III1i1iiI1 = self . inner_version
oOOo0O0Oo = 0
if ( III1i1iiI1 == 4 ) :
III1I1I1iiIi = struct . unpack ( "B" , Oo00oo [ 9 : 10 ] ) [ 0 ]
if ( self . inner_is_fragment ) : return ( III1I1I1iiIi )
if ( III1I1I1iiIi in [ 6 , 17 ] ) :
oOOo0O0Oo = III1I1I1iiIi
oOOo0O0Oo += struct . unpack ( "I" , Oo00oo [ 20 : 24 ] ) [ 0 ]
oOOo0O0Oo = ( oOOo0O0Oo >> 16 ) ^ ( oOOo0O0Oo & 0xffff )
if 30 - 30: OoOoOO00 - i11iIiiIii
if 94 - 94: OoOoOO00 % iII111i
if ( III1i1iiI1 == 6 ) :
III1I1I1iiIi = struct . unpack ( "B" , Oo00oo [ 6 : 7 ] ) [ 0 ]
if ( III1I1I1iiIi in [ 6 , 17 ] ) :
oOOo0O0Oo = III1I1I1iiIi
oOOo0O0Oo += struct . unpack ( "I" , Oo00oo [ 40 : 44 ] ) [ 0 ]
oOOo0O0Oo = ( oOOo0O0Oo >> 16 ) ^ ( oOOo0O0Oo & 0xffff )
if 39 - 39: OoOoOO00 + I1Ii111 % O0
if 26 - 26: ooOoO0o + OoOoOO00
return ( oOOo0O0Oo )
if 17 - 17: I1ii11iIi11i - iII111i % Oo0Ooo * O0 % O0 * OOooOOo
if 6 - 6: I1Ii111
def hash_packet ( self ) :
oOOo0O0Oo = self . inner_source . address ^ self . inner_dest . address
oOOo0O0Oo += self . hash_ports ( )
if ( self . inner_version == 4 ) :
oOOo0O0Oo = ( oOOo0O0Oo >> 16 ) ^ ( oOOo0O0Oo & 0xffff )
elif ( self . inner_version == 6 ) :
oOOo0O0Oo = ( oOOo0O0Oo >> 64 ) ^ ( oOOo0O0Oo & 0xffffffffffffffff )
oOOo0O0Oo = ( oOOo0O0Oo >> 32 ) ^ ( oOOo0O0Oo & 0xffffffff )
oOOo0O0Oo = ( oOOo0O0Oo >> 16 ) ^ ( oOOo0O0Oo & 0xffff )
if 46 - 46: II111iiii * I1Ii111
self . udp_sport = 0xf000 | ( oOOo0O0Oo & 0xfff )
if 23 - 23: i1IIi - O0
if 6 - 6: ooOoO0o % OoooooooOO * I1Ii111 - IiII
def print_packet ( self , s_or_r , is_lisp_packet ) :
if ( is_lisp_packet == False ) :
I1ii = "{} -> {}" . format ( self . inner_source . print_address ( ) ,
self . inner_dest . print_address ( ) )
dprint ( ( "{} {}, tos/ttl: {}/{}, length: {}, packet: {} ..." ) . format ( bold ( s_or_r , False ) ,
# iIii1I11I1II1 / Ii1I + OoooooooOO % i1IIi * i11iIiiIii
green ( I1ii , False ) , self . inner_tos ,
self . inner_ttl , len ( self . packet ) ,
lisp_format_packet ( self . packet [ 0 : 60 ] ) ) )
return
if 86 - 86: i11iIiiIii - O0 - i11iIiiIii . iIii1I11I1II1 . IiII
if 84 - 84: i1IIi / iIii1I11I1II1 / oO0o / Ii1I
if ( s_or_r . find ( "Receive" ) != - 1 ) :
iI = "decap"
iI += "-vxlan" if self . udp_dport == LISP_VXLAN_DATA_PORT else ""
else :
iI = s_or_r
if ( iI in [ "Send" , "Replicate" ] or iI . find ( "Fragment" ) != - 1 ) :
iI = "encap"
if 80 - 80: o0oOOo0O0Ooo + o0oOOo0O0Ooo + I1Ii111 * oO0o + I11i
if 75 - 75: OoO0O00 - OoOoOO00 - i1IIi % Oo0Ooo - II111iiii
oOoooO = "{} -> {}" . format ( self . outer_source . print_address_no_iid ( ) ,
self . outer_dest . print_address_no_iid ( ) )
if 64 - 64: IiII
if 80 - 80: I1IiiI - i11iIiiIii / OoO0O00 / OoOoOO00 + OoOoOO00
if 89 - 89: O0 + IiII * I1Ii111
if 30 - 30: OoOoOO00
if 39 - 39: I1ii11iIi11i + o0oOOo0O0Ooo + I1Ii111 + IiII
if ( self . lisp_header . get_instance_id ( ) == 0xffffff ) :
IiiiI1 = ( "{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + "{}/{}, outer UDP: {} -> {}, " )
if 48 - 48: I1Ii111 / ooOoO0o . iIii1I11I1II1
IiiiI1 += bold ( "control-packet" , False ) + ": {} ..."
if 72 - 72: i1IIi . o0oOOo0O0Ooo
dprint ( IiiiI1 . format ( bold ( s_or_r , False ) , red ( oOoooO , False ) ,
self . outer_tos , self . outer_ttl , self . udp_sport ,
self . udp_dport , lisp_format_packet ( self . packet [ 0 : 56 ] ) ) )
return
else :
IiiiI1 = ( "{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + "{}/{}, outer UDP: {} -> {}, inner EIDs: {}, " + "inner tos/ttl: {}/{}, length: {}, {}, packet: {} ..." )
if 3 - 3: OoOoOO00 % II111iiii - O0
if 52 - 52: OoO0O00
if 49 - 49: Ii1I . I1ii11iIi11i % ooOoO0o . Oo0Ooo * OOooOOo
if 44 - 44: iIii1I11I1II1 / O0 * Oo0Ooo + I1IiiI . ooOoO0o
if ( self . lisp_header . k_bits ) :
if ( iI == "encap" ) : iI = "encrypt/encap"
if ( iI == "decap" ) : iI = "decap/decrypt"
if 20 - 20: iII111i + o0oOOo0O0Ooo . I1Ii111 / i11iIiiIii
if 7 - 7: OoOoOO00 / OoOoOO00 . I1Ii111 * O0 + IiII + oO0o
I1ii = "{} -> {}" . format ( self . inner_source . print_address ( ) ,
self . inner_dest . print_address ( ) )
if 98 - 98: II111iiii * IiII - I1IiiI % o0oOOo0O0Ooo - iII111i % I1ii11iIi11i
dprint ( IiiiI1 . format ( bold ( s_or_r , False ) , red ( oOoooO , False ) ,
self . outer_tos , self . outer_ttl , self . udp_sport , self . udp_dport ,
green ( I1ii , False ) , self . inner_tos , self . inner_ttl ,
len ( self . packet ) , self . lisp_header . print_header ( iI ) ,
lisp_format_packet ( self . packet [ 0 : 56 ] ) ) )
if 69 - 69: i1IIi % OoO0O00 % I1Ii111 / ooOoO0o / ooOoO0o
if 6 - 6: II111iiii % I1ii11iIi11i % i1IIi * ooOoO0o
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . inner_source , self . inner_dest ) )
if 47 - 47: O0
if 55 - 55: OoO0O00 % O0 / OoooooooOO
def get_raw_socket ( self ) :
oooo = str ( self . lisp_header . get_instance_id ( ) )
if ( oooo == "0" ) : return ( None )
if ( oooo not in lisp_iid_to_interface ) : return ( None )
if 49 - 49: I1IiiI . OoO0O00 * OoooooooOO % i11iIiiIii + iIii1I11I1II1 * i1IIi
i111IIiIiiI1 = lisp_iid_to_interface [ oooo ]
I111 = i111IIiIiiI1 . get_socket ( )
if ( I111 == None ) :
ii1111Iii11i = bold ( "SO_BINDTODEVICE" , False )
oOO0oOoooOoo0 = ( os . getenv ( "LISP_ENFORCE_BINDTODEVICE" ) != None )
lprint ( "{} required for multi-tenancy support, {} packet" . format ( ii1111Iii11i , "drop" if oOO0oOoooOoo0 else "forward" ) )
if 1 - 1: O0 + iII111i * ooOoO0o - i11iIiiIii
if ( oOO0oOoooOoo0 ) : return ( None )
if 18 - 18: ooOoO0o
if 37 - 37: Oo0Ooo % i11iIiiIii - I1IiiI * I1ii11iIi11i . ooOoO0o
oooo = bold ( oooo , False )
IiI11I111 = bold ( i111IIiIiiI1 . device , False )
dprint ( "Send packet on instance-id {} interface {}" . format ( oooo , IiI11I111 ) )
return ( I111 )
if 62 - 62: OoooooooOO / ooOoO0o + I1ii11iIi11i . o0oOOo0O0Ooo - iII111i
if 29 - 29: oO0o
def log_flow ( self , encap ) :
global lisp_flow_log
if 26 - 26: O0 % OOooOOo - IiII . OOooOOo
OOo0O0 = os . path . exists ( "./log-flows" )
if ( len ( lisp_flow_log ) == LISP_FLOW_LOG_SIZE or OOo0O0 ) :
Iiiii = [ lisp_flow_log ]
lisp_flow_log = [ ]
threading . Thread ( target = lisp_write_flow_log , args = Iiiii ) . start ( )
if ( OOo0O0 ) : os . system ( "rm ./log-flows" )
return
if 8 - 8: iIii1I11I1II1 . iIii1I11I1II1 + Ii1I . OOooOOo
if 58 - 58: iIii1I11I1II1 + I1Ii111 - I1ii11iIi11i - i1IIi * OoOoOO00
Oo0OO0000oooo = datetime . datetime . now ( )
lisp_flow_log . append ( [ Oo0OO0000oooo , encap , self . packet , self ] )
if 4 - 4: OoooooooOO
if 7 - 7: IiII
def print_flow ( self , ts , encap , packet ) :
ts = ts . strftime ( "%m/%d/%y %H:%M:%S.%f" ) [ : - 3 ]
iII1iii = "{}: {}" . format ( ts , "encap" if encap else "decap" )
if 97 - 97: I1Ii111 / OOooOOo - i11iIiiIii
OO0o0o = red ( self . outer_source . print_address_no_iid ( ) , False )
O0O0O00OoO0O = red ( self . outer_dest . print_address_no_iid ( ) , False )
i1II11III = green ( self . inner_source . print_address ( ) , False )
O0OO0oo = green ( self . inner_dest . print_address ( ) , False )
if 41 - 41: OoOoOO00 % I1Ii111 * oO0o * i1IIi
if ( self . lisp_header . get_instance_id ( ) == 0xffffff ) :
iII1iii += " {}:{} -> {}:{}, LISP control message type {}\n"
iII1iii = iII1iii . format ( OO0o0o , self . udp_sport , O0O0O00OoO0O , self . udp_dport ,
self . inner_version )
return ( iII1iii )
if 32 - 32: I1IiiI + i11iIiiIii - I1Ii111 / II111iiii
if 27 - 27: ooOoO0o . Oo0Ooo + ooOoO0o + iII111i
if ( self . outer_dest . is_null ( ) == False ) :
iII1iii += " {}:{} -> {}:{}, len/tos/ttl {}/{}/{}"
iII1iii = iII1iii . format ( OO0o0o , self . udp_sport , O0O0O00OoO0O , self . udp_dport ,
len ( packet ) , self . outer_tos , self . outer_ttl )
if 28 - 28: OoO0O00 - ooOoO0o - oO0o % oO0o / O0
if 99 - 99: II111iiii - iIii1I11I1II1
if 24 - 24: I1IiiI - i1IIi - O0 % I1Ii111 - iIii1I11I1II1 . I11i
if 26 - 26: OoO0O00 % i1IIi * O0 . I1Ii111
if 31 - 31: O0 - IiII * i11iIiiIii * i1IIi
if ( self . lisp_header . k_bits != 0 ) :
O0oOo00Oo0oo0 = "\n"
if ( self . packet_error != "" ) :
O0oOo00Oo0oo0 = " ({})" . format ( self . packet_error ) + O0oOo00Oo0oo0
if 36 - 36: I1Ii111 / I1Ii111 % oO0o
iII1iii += ", encrypted" + O0oOo00Oo0oo0
return ( iII1iii )
if 97 - 97: OoooooooOO * o0oOOo0O0Ooo + OoooooooOO % Ii1I * Oo0Ooo
if 35 - 35: iIii1I11I1II1 % iII111i - i1IIi
if 20 - 20: I11i % ooOoO0o . OOooOOo / I1Ii111
if 50 - 50: oO0o + i11iIiiIii / i11iIiiIii + ooOoO0o + I1Ii111
if 65 - 65: ooOoO0o * O0 * iII111i
if ( self . outer_dest . is_null ( ) == False ) :
packet = packet [ 36 : : ] if self . outer_version == 4 else packet [ 56 : : ]
if 60 - 60: iIii1I11I1II1 . ooOoO0o + I1IiiI % oO0o
if 4 - 4: I1IiiI / II111iiii % O0 * ooOoO0o / II111iiii . Oo0Ooo
III1I1I1iiIi = packet [ 9 : 10 ] if self . inner_version == 4 else packet [ 6 : 7 ]
III1I1I1iiIi = struct . unpack ( "B" , III1I1I1iiIi ) [ 0 ]
if 16 - 16: O0 + O0 - I1IiiI
iII1iii += " {} -> {}, len/tos/ttl/prot {}/{}/{}/{}"
iII1iii = iII1iii . format ( i1II11III , O0OO0oo , len ( packet ) , self . inner_tos ,
self . inner_ttl , III1I1I1iiIi )
if 30 - 30: ooOoO0o
if 33 - 33: I1Ii111 * IiII - O0 + I1IiiI / IiII
if 19 - 19: i1IIi % II111iiii
if 85 - 85: IiII - o0oOOo0O0Ooo % OOooOOo - II111iiii
if ( III1I1I1iiIi in [ 6 , 17 ] ) :
o0o0OOooo0Oo = packet [ 20 : 24 ] if self . inner_version == 4 else packet [ 40 : 44 ]
if ( len ( o0o0OOooo0Oo ) == 4 ) :
o0o0OOooo0Oo = socket . ntohl ( struct . unpack ( "I" , o0o0OOooo0Oo ) [ 0 ] )
iII1iii += ", ports {} -> {}" . format ( o0o0OOooo0Oo >> 16 , o0o0OOooo0Oo & 0xffff )
if 48 - 48: o0oOOo0O0Ooo + I1ii11iIi11i / I1ii11iIi11i
elif ( III1I1I1iiIi == 1 ) :
oOO0o0o0 = packet [ 26 : 28 ] if self . inner_version == 4 else packet [ 46 : 48 ]
if ( len ( oOO0o0o0 ) == 2 ) :
oOO0o0o0 = socket . ntohs ( struct . unpack ( "H" , oOO0o0o0 ) [ 0 ] )
iII1iii += ", icmp-seq {}" . format ( oOO0o0o0 )
if 87 - 87: i11iIiiIii * II111iiii - Ii1I % OoooooooOO
if 55 - 55: i1IIi
if ( self . packet_error != "" ) :
iII1iii += " ({})" . format ( self . packet_error )
if 67 - 67: I1IiiI - OoO0O00
iII1iii += "\n"
return ( iII1iii )
if 60 - 60: i1IIi / iIii1I11I1II1 * oO0o + ooOoO0o + OoooooooOO + II111iiii
if 13 - 13: iIii1I11I1II1 - OOooOOo
def is_trace ( self ) :
o0o0OOooo0Oo = [ self . inner_sport , self . inner_dport ]
return ( self . inner_protocol == LISP_UDP_PROTOCOL and
LISP_TRACE_PORT in o0o0OOooo0Oo )
if 14 - 14: ooOoO0o
if 75 - 75: iIii1I11I1II1 % ooOoO0o / OOooOOo - iII111i % i11iIiiIii
if 11 - 11: I11i . Ii1I
if 87 - 87: OOooOOo + OOooOOo
if 45 - 45: i1IIi - Oo0Ooo
if 87 - 87: OoOoOO00 - OoO0O00 * OoO0O00 / Ii1I . I11i * o0oOOo0O0Ooo
if 21 - 21: II111iiii
if 29 - 29: OoOoOO00 % Ii1I
if 7 - 7: i1IIi / IiII / iII111i
if 97 - 97: OoO0O00 + iIii1I11I1II1
if 79 - 79: ooOoO0o + oO0o - II111iiii . Oo0Ooo
if 26 - 26: IiII
if 52 - 52: O0 + ooOoO0o
if 11 - 11: i1IIi / I1Ii111 * I1ii11iIi11i * I1Ii111 * ooOoO0o - i11iIiiIii
if 96 - 96: I1ii11iIi11i % I1ii11iIi11i
if 1 - 1: I1IiiI . Ii1I
LISP_N_BIT = 0x80000000
LISP_L_BIT = 0x40000000
LISP_E_BIT = 0x20000000
LISP_V_BIT = 0x10000000
LISP_I_BIT = 0x08000000
LISP_P_BIT = 0x04000000
LISP_K_BITS = 0x03000000
if 26 - 26: oO0o - ooOoO0o % Oo0Ooo - oO0o + IiII
class lisp_data_header ( object ) :
def __init__ ( self ) :
self . first_long = 0
self . second_long = 0
self . k_bits = 0
if 33 - 33: Ii1I + OoOoOO00 - I1ii11iIi11i + iIii1I11I1II1 % i1IIi * IiII
if 21 - 21: O0 * ooOoO0o % OoO0O00
def print_header ( self , e_or_d ) :
Iii1 = lisp_hex_string ( self . first_long & 0xffffff )
I11i1IiiI = lisp_hex_string ( self . second_long ) . zfill ( 8 )
if 75 - 75: OoOoOO00 / OoooooooOO / I11i % OoOoOO00 * Ii1I * IiII
IiiiI1 = ( "{} LISP-header -> flags: {}{}{}{}{}{}{}{}, nonce: {}, " + "iid/lsb: {}" )
if 11 - 11: I1ii11iIi11i / OOooOOo . Ii1I * I1ii11iIi11i
return ( IiiiI1 . format ( bold ( e_or_d , False ) ,
"N" if ( self . first_long & LISP_N_BIT ) else "n" ,
"L" if ( self . first_long & LISP_L_BIT ) else "l" ,
"E" if ( self . first_long & LISP_E_BIT ) else "e" ,
"V" if ( self . first_long & LISP_V_BIT ) else "v" ,
"I" if ( self . first_long & LISP_I_BIT ) else "i" ,
"P" if ( self . first_long & LISP_P_BIT ) else "p" ,
"K" if ( self . k_bits in [ 2 , 3 ] ) else "k" ,
"K" if ( self . k_bits in [ 1 , 3 ] ) else "k" ,
Iii1 , I11i1IiiI ) )
if 17 - 17: I1ii11iIi11i * OoooooooOO % i1IIi % OoooooooOO . iII111i
if 20 - 20: OoO0O00 . oO0o
def encode ( self ) :
II111I11iI = "II"
Iii1 = socket . htonl ( self . first_long )
I11i1IiiI = socket . htonl ( self . second_long )
if 18 - 18: OoooooooOO
ooo = struct . pack ( II111I11iI , Iii1 , I11i1IiiI )
return ( ooo )
if 42 - 42: OoooooooOO % I11i % IiII
if 54 - 54: ooOoO0o - I1IiiI - iII111i + OOooOOo - OoO0O00 / OoooooooOO
def decode ( self , packet ) :
II111I11iI = "II"
oO000 = struct . calcsize ( II111I11iI )
if ( len ( packet ) < oO000 ) : return ( False )
if 20 - 20: OoOoOO00 % O0
Iii1 , I11i1IiiI = struct . unpack ( II111I11iI , packet [ : oO000 ] )
if 59 - 59: O0 . o0oOOo0O0Ooo % I1ii11iIi11i * oO0o + I11i
if 82 - 82: OoooooooOO
self . first_long = socket . ntohl ( Iii1 )
self . second_long = socket . ntohl ( I11i1IiiI )
self . k_bits = ( self . first_long & LISP_K_BITS ) >> 24
return ( True )
if 88 - 88: O0 / o0oOOo0O0Ooo * o0oOOo0O0Ooo . o0oOOo0O0Ooo . O0
if 27 - 27: i11iIiiIii % iII111i + Ii1I . OOooOOo
def key_id ( self , key_id ) :
self . first_long &= ~ ( 0x3 << 24 )
self . first_long |= ( ( key_id & 0x3 ) << 24 )
self . k_bits = key_id
if 9 - 9: OoO0O00
if 43 - 43: Ii1I . OOooOOo + I1IiiI * i11iIiiIii
def nonce ( self , nonce ) :
self . first_long |= LISP_N_BIT
self . first_long |= nonce
if 2 - 2: OOooOOo
if 3 - 3: I1IiiI . iII111i % O0 - ooOoO0o / O0
def map_version ( self , version ) :
self . first_long |= LISP_V_BIT
self . first_long |= version
if 79 - 79: Ii1I + oO0o % ooOoO0o % I1IiiI
if 68 - 68: II111iiii - OoooooooOO / iIii1I11I1II1 - o0oOOo0O0Ooo % II111iiii
def instance_id ( self , iid ) :
if ( iid == 0 ) : return
self . first_long |= LISP_I_BIT
self . second_long &= 0xff
self . second_long |= ( iid << 8 )
if 53 - 53: iII111i . oO0o / Oo0Ooo . OoO0O00 . i11iIiiIii
if 60 - 60: II111iiii
def get_instance_id ( self ) :
return ( ( self . second_long >> 8 ) & 0xffffff )
if 25 - 25: Oo0Ooo + o0oOOo0O0Ooo - OoO0O00
if 57 - 57: II111iiii . i1IIi
def locator_status_bits ( self , lsbs ) :
self . first_long |= LISP_L_BIT
self . second_long &= 0xffffff00
self . second_long |= ( lsbs & 0xff )
if 33 - 33: iII111i + Oo0Ooo % I11i . oO0o
if 6 - 6: IiII + I1ii11iIi11i
def is_request_nonce ( self , nonce ) :
return ( nonce & 0x80000000 )
if 62 - 62: oO0o . I1Ii111 - OoooooooOO * II111iiii . i11iIiiIii
if 13 - 13: iIii1I11I1II1 * o0oOOo0O0Ooo - i11iIiiIii
def request_nonce ( self , nonce ) :
self . first_long |= LISP_E_BIT
self . first_long |= LISP_N_BIT
self . first_long |= ( nonce & 0xffffff )
if 63 - 63: OoooooooOO * I1Ii111
if 50 - 50: Oo0Ooo - o0oOOo0O0Ooo % II111iiii . O0 . oO0o % II111iiii
def is_e_bit_set ( self ) :
return ( self . first_long & LISP_E_BIT )
if 18 - 18: I11i % OoooooooOO + OoO0O00 / I11i
if 37 - 37: i1IIi - Ii1I / IiII . II111iiii % ooOoO0o
def get_nonce ( self ) :
return ( self . first_long & 0xffffff )
if 39 - 39: Ii1I % i11iIiiIii * OoO0O00
if 23 - 23: OOooOOo + ooOoO0o / i11iIiiIii * Oo0Ooo . OoO0O00
if 28 - 28: iII111i - o0oOOo0O0Ooo
class lisp_echo_nonce ( object ) :
def __init__ ( self , rloc_str ) :
self . rloc_str = rloc_str
self . rloc = lisp_address ( LISP_AFI_NONE , rloc_str , 0 , 0 )
self . request_nonce_sent = None
self . echo_nonce_sent = None
self . last_request_nonce_sent = None
self . last_new_request_nonce_sent = None
self . last_echo_nonce_sent = None
self . last_new_echo_nonce_sent = None
self . request_nonce_rcvd = None
self . echo_nonce_rcvd = None
self . last_request_nonce_rcvd = None
self . last_echo_nonce_rcvd = None
self . last_good_echo_nonce_rcvd = None
lisp_nonce_echo_list [ rloc_str ] = self
if 92 - 92: Oo0Ooo % o0oOOo0O0Ooo - ooOoO0o / ooOoO0o / OoOoOO00
if 84 - 84: OOooOOo
def send_ipc ( self , ipc_socket , ipc ) :
I1 = "lisp-itr" if lisp_i_am_itr else "lisp-etr"
I1i1iiIi = "lisp-etr" if lisp_i_am_itr else "lisp-itr"
ipc = lisp_command_ipc ( ipc , I1 )
lisp_ipc ( ipc , ipc_socket , I1i1iiIi )
if 24 - 24: Ii1I / iII111i + I1IiiI / Oo0Ooo % iIii1I11I1II1 / iIii1I11I1II1
if 10 - 10: I11i - o0oOOo0O0Ooo % OoooooooOO - I1ii11iIi11i
def send_request_ipc ( self , ipc_socket , nonce ) :
nonce = lisp_hex_string ( nonce )
oOoo = "nonce%R%{}%{}" . format ( self . rloc_str , nonce )
self . send_ipc ( ipc_socket , oOoo )
if 89 - 89: ooOoO0o % oO0o * Ii1I - Oo0Ooo / o0oOOo0O0Ooo + OoO0O00
if 56 - 56: i11iIiiIii * iII111i / i11iIiiIii * Ii1I . iIii1I11I1II1 . I1ii11iIi11i
def send_echo_ipc ( self , ipc_socket , nonce ) :
nonce = lisp_hex_string ( nonce )
oOoo = "nonce%E%{}%{}" . format ( self . rloc_str , nonce )
self . send_ipc ( ipc_socket , oOoo )
if 93 - 93: OoOoOO00 + I11i
if 27 - 27: iIii1I11I1II1 * I11i
def receive_request ( self , ipc_socket , nonce ) :
iiI1iiiii = self . request_nonce_rcvd
self . request_nonce_rcvd = nonce
self . last_request_nonce_rcvd = lisp_get_timestamp ( )
if ( lisp_i_am_rtr ) : return
if ( iiI1iiiii != nonce ) : self . send_request_ipc ( ipc_socket , nonce )
if 53 - 53: o0oOOo0O0Ooo / Oo0Ooo / iII111i + Ii1I - OoO0O00
if 18 - 18: oO0o * O0 - I1IiiI + O0 + I1Ii111
def receive_echo ( self , ipc_socket , nonce ) :
if ( self . request_nonce_sent != nonce ) : return
self . last_echo_nonce_rcvd = lisp_get_timestamp ( )
if ( self . echo_nonce_rcvd == nonce ) : return
if 70 - 70: o0oOOo0O0Ooo / I11i + oO0o % I1IiiI % Oo0Ooo + OoO0O00
self . echo_nonce_rcvd = nonce
if ( lisp_i_am_rtr ) : return
self . send_echo_ipc ( ipc_socket , nonce )
if 80 - 80: OOooOOo
if 12 - 12: Ii1I
def get_request_or_echo_nonce ( self , ipc_socket , remote_rloc ) :
if 2 - 2: OoooooooOO
if 100 - 100: Oo0Ooo / O0 * i11iIiiIii * OoooooooOO
if 46 - 46: O0 % OoooooooOO
if 22 - 22: iII111i + OoooooooOO - OoOoOO00 - OoO0O00 * I1Ii111 - oO0o
if 99 - 99: ooOoO0o / I1IiiI . Ii1I - Ii1I * I1IiiI
if ( self . request_nonce_sent and self . echo_nonce_sent and remote_rloc ) :
I1IIiIIiiI1i = lisp_myrlocs [ 0 ] if remote_rloc . is_ipv4 ( ) else lisp_myrlocs [ 1 ]
if 83 - 83: I1ii11iIi11i * II111iiii . I1Ii111 - I11i
if 46 - 46: OoO0O00 % I1ii11iIi11i
if ( remote_rloc . address > I1IIiIIiiI1i . address ) :
OO0O00o0 = "exit"
self . request_nonce_sent = None
else :
OO0O00o0 = "stay in"
self . echo_nonce_sent = None
if 58 - 58: oO0o + IiII % iII111i - Ii1I - OOooOOo % Ii1I
if 86 - 86: o0oOOo0O0Ooo
IIIiIi11 = bold ( "collision" , False )
oOO0O0ooOOOo = red ( I1IIiIIiiI1i . print_address_no_iid ( ) , False )
O00o00o00OO0 = red ( remote_rloc . print_address_no_iid ( ) , False )
lprint ( "Echo nonce {}, {} -> {}, {} request-nonce mode" . format ( IIIiIi11 ,
oOO0O0ooOOOo , O00o00o00OO0 , OO0O00o0 ) )
if 6 - 6: I1ii11iIi11i . oO0o . OoO0O00 + IiII
if 65 - 65: I1ii11iIi11i / ooOoO0o
if 23 - 23: OOooOOo / OOooOOo * o0oOOo0O0Ooo * OOooOOo
if 57 - 57: iII111i
if 29 - 29: I1IiiI
if ( self . echo_nonce_sent != None ) :
oOooo0oOOOO = self . echo_nonce_sent
oO0ooOOO = bold ( "Echoing" , False )
lprint ( "{} nonce 0x{} to {}" . format ( oO0ooOOO ,
lisp_hex_string ( oOooo0oOOOO ) , red ( self . rloc_str , False ) ) )
self . last_echo_nonce_sent = lisp_get_timestamp ( )
self . echo_nonce_sent = None
return ( oOooo0oOOOO )
if 41 - 41: I1Ii111 * OoO0O00 - iII111i . Ii1I
if 41 - 41: iIii1I11I1II1 - O0 - I1ii11iIi11i - oO0o + I1Ii111
if 22 - 22: O0 % IiII % iII111i % I1IiiI
if 34 - 34: iII111i . Oo0Ooo % I1ii11iIi11i . iII111i % IiII / IiII
if 84 - 84: Ii1I
if 1 - 1: oO0o - Oo0Ooo * iIii1I11I1II1 * Oo0Ooo * i1IIi
if 9 - 9: iII111i - iII111i
oOooo0oOOOO = self . request_nonce_sent
IiIiIi = self . last_request_nonce_sent
if ( oOooo0oOOOO and IiIiIi != None ) :
if ( time . time ( ) - IiIiIi >= LISP_NONCE_ECHO_INTERVAL ) :
self . request_nonce_sent = None
lprint ( "Stop request-nonce mode for {}, nonce 0x{}" . format ( red ( self . rloc_str , False ) , lisp_hex_string ( oOooo0oOOOO ) ) )
if 74 - 74: OoO0O00
return ( None )
if 18 - 18: I1ii11iIi11i / OoO0O00 + I11i . i1IIi
if 28 - 28: OoOoOO00
if 45 - 45: I11i . OoO0O00
if 14 - 14: OOooOOo * I1IiiI - I1ii11iIi11i
if 10 - 10: iII111i % I1Ii111 * I1ii11iIi11i * O0 * i11iIiiIii % I1Ii111
if 68 - 68: OoooooooOO * OoOoOO00
if 9 - 9: I1Ii111
if 36 - 36: I1Ii111 / OoOoOO00 + OoOoOO00 * ooOoO0o / OOooOOo * O0
if 17 - 17: OoO0O00 / ooOoO0o % I1IiiI
if ( oOooo0oOOOO == None ) :
oOooo0oOOOO = lisp_get_data_nonce ( )
if ( self . recently_requested ( ) ) : return ( oOooo0oOOOO )
if 47 - 47: Oo0Ooo * OoO0O00 / o0oOOo0O0Ooo * I1IiiI
self . request_nonce_sent = oOooo0oOOOO
lprint ( "Start request-nonce mode for {}, nonce 0x{}" . format ( red ( self . rloc_str , False ) , lisp_hex_string ( oOooo0oOOOO ) ) )
if 60 - 60: I1ii11iIi11i / IiII . i11iIiiIii / OoO0O00 % II111iiii
self . last_new_request_nonce_sent = lisp_get_timestamp ( )
if 6 - 6: iII111i % o0oOOo0O0Ooo + I1Ii111
if 91 - 91: o0oOOo0O0Ooo + O0 * oO0o * IiII * I1ii11iIi11i
if 83 - 83: OoooooooOO
if 52 - 52: o0oOOo0O0Ooo / OoOoOO00 % oO0o % OoO0O00 / IiII % o0oOOo0O0Ooo
if 88 - 88: OOooOOo / i11iIiiIii / Ii1I / i11iIiiIii * I1ii11iIi11i % I11i
if ( lisp_i_am_itr == False ) : return ( oOooo0oOOOO | 0x80000000 )
self . send_request_ipc ( ipc_socket , oOooo0oOOOO )
else :
lprint ( "Continue request-nonce mode for {}, nonce 0x{}" . format ( red ( self . rloc_str , False ) , lisp_hex_string ( oOooo0oOOOO ) ) )
if 43 - 43: OoOoOO00 * OoO0O00 % i1IIi * Ii1I + iIii1I11I1II1
if 80 - 80: o0oOOo0O0Ooo . iII111i . OoooooooOO
if 63 - 63: ooOoO0o . OOooOOo
if 66 - 66: I1IiiI
if 99 - 99: OoO0O00 % O0 . I1Ii111 - I1ii11iIi11i . Oo0Ooo / OoOoOO00
if 60 - 60: I1ii11iIi11i
if 78 - 78: oO0o + II111iiii
self . last_request_nonce_sent = lisp_get_timestamp ( )
return ( oOooo0oOOOO | 0x80000000 )
if 55 - 55: OoooooooOO
if 90 - 90: I1IiiI
def request_nonce_timeout ( self ) :
if ( self . request_nonce_sent == None ) : return ( False )
if ( self . request_nonce_sent == self . echo_nonce_rcvd ) : return ( False )
if 4 - 4: OOooOOo % ooOoO0o - OOooOOo - o0oOOo0O0Ooo
i1i111Iiiiiii = time . time ( ) - self . last_request_nonce_sent
iI1IIIiIII11 = self . last_echo_nonce_rcvd
return ( i1i111Iiiiiii >= LISP_NONCE_ECHO_INTERVAL and iI1IIIiIII11 == None )
if 70 - 70: OoooooooOO + OoO0O00 * Oo0Ooo
if 20 - 20: i11iIiiIii - II111iiii - ooOoO0o % oO0o . ooOoO0o
def recently_requested ( self ) :
iI1IIIiIII11 = self . last_request_nonce_sent
if ( iI1IIIiIII11 == None ) : return ( False )
if 50 - 50: iIii1I11I1II1 + I1Ii111 - I11i - OoooooooOO
i1i111Iiiiiii = time . time ( ) - iI1IIIiIII11
return ( i1i111Iiiiiii <= LISP_NONCE_ECHO_INTERVAL )
if 84 - 84: OoOoOO00 - I11i
if 80 - 80: i11iIiiIii % OOooOOo - Oo0Ooo % OOooOOo
def recently_echoed ( self ) :
if ( self . request_nonce_sent == None ) : return ( True )
if 89 - 89: Ii1I * I11i + OoOoOO00 / i11iIiiIii
if 68 - 68: OoooooooOO * I11i
if 86 - 86: o0oOOo0O0Ooo / OoOoOO00
if 40 - 40: iII111i
iI1IIIiIII11 = self . last_good_echo_nonce_rcvd
if ( iI1IIIiIII11 == None ) : iI1IIIiIII11 = 0
i1i111Iiiiiii = time . time ( ) - iI1IIIiIII11
if ( i1i111Iiiiiii <= LISP_NONCE_ECHO_INTERVAL ) : return ( True )
if 62 - 62: ooOoO0o / OOooOOo
if 74 - 74: iII111i % I1Ii111 / I1Ii111 - iIii1I11I1II1 - II111iiii + OOooOOo
if 92 - 92: I11i % I1Ii111
if 18 - 18: ooOoO0o + I1Ii111 / OOooOOo / oO0o + iIii1I11I1II1 % IiII
if 94 - 94: I11i
if 37 - 37: oO0o
iI1IIIiIII11 = self . last_new_request_nonce_sent
if ( iI1IIIiIII11 == None ) : iI1IIIiIII11 = 0
i1i111Iiiiiii = time . time ( ) - iI1IIIiIII11
return ( i1i111Iiiiiii <= LISP_NONCE_ECHO_INTERVAL )
if 52 - 52: I1ii11iIi11i * I1IiiI . OOooOOo + i1IIi % oO0o / iIii1I11I1II1
if 68 - 68: I1Ii111 - OoOoOO00 . i11iIiiIii + o0oOOo0O0Ooo
def change_state ( self , rloc ) :
if ( rloc . up_state ( ) and self . recently_echoed ( ) == False ) :
Oo0oo = bold ( "down" , False )
iii1I = lisp_print_elapsed ( self . last_good_echo_nonce_rcvd )
lprint ( "Take {} {}, last good echo: {}" . format ( red ( self . rloc_str , False ) , Oo0oo , iii1I ) )
if 89 - 89: iII111i + i1IIi - IiII + ooOoO0o . II111iiii
rloc . state = LISP_RLOC_NO_ECHOED_NONCE_STATE
rloc . last_state_change = lisp_get_timestamp ( )
return
if 85 - 85: iIii1I11I1II1 - Ii1I * Oo0Ooo . oO0o + I1Ii111
if 13 - 13: O0 + iIii1I11I1II1 % II111iiii + iIii1I11I1II1
if ( rloc . no_echoed_nonce_state ( ) == False ) : return
if 85 - 85: I1IiiI * iIii1I11I1II1 . iII111i / iII111i
if ( self . recently_requested ( ) == False ) :
i1I1I = bold ( "up" , False )
lprint ( "Bring {} {}, retry request-nonce mode" . format ( red ( self . rloc_str , False ) , i1I1I ) )
if 45 - 45: OoOoOO00 / I1IiiI
rloc . state = LISP_RLOC_UP_STATE
rloc . last_state_change = lisp_get_timestamp ( )
if 34 - 34: o0oOOo0O0Ooo % I1ii11iIi11i + Ii1I * I11i / oO0o
if 18 - 18: ooOoO0o
if 92 - 92: OoO0O00 % iIii1I11I1II1 / IiII * iII111i . i1IIi + oO0o
def print_echo_nonce ( self ) :
I11OooOooOOooo0 = lisp_print_elapsed ( self . last_request_nonce_sent )
o0OO00oOO = lisp_print_elapsed ( self . last_good_echo_nonce_rcvd )
if 45 - 45: iIii1I11I1II1 - Oo0Ooo . I11i - Oo0Ooo / ooOoO0o / o0oOOo0O0Ooo
o00oooo0 = lisp_print_elapsed ( self . last_echo_nonce_sent )
iIi = lisp_print_elapsed ( self . last_request_nonce_rcvd )
I111 = space ( 4 )
if 9 - 9: OoooooooOO / I11i
oOo0OOoooO = "Nonce-Echoing:\n"
oOo0OOoooO += ( "{}Last request-nonce sent: {}\n{}Last echo-nonce " + "received: {}\n" ) . format ( I111 , I11OooOooOOooo0 , I111 , o0OO00oOO )
if 47 - 47: OoooooooOO
oOo0OOoooO += ( "{}Last request-nonce received: {}\n{}Last echo-nonce " + "sent: {}" ) . format ( I111 , iIi , I111 , o00oooo0 )
if 48 - 48: OoOoOO00 . IiII % I1IiiI + I11i
if 37 - 37: Oo0Ooo + I1Ii111 * oO0o / o0oOOo0O0Ooo
return ( oOo0OOoooO )
if 78 - 78: IiII + I11i - o0oOOo0O0Ooo + OoO0O00 / iIii1I11I1II1
if 47 - 47: OOooOOo
if 20 - 20: I1Ii111 % ooOoO0o - I1Ii111 * OoooooooOO / I1ii11iIi11i
if 57 - 57: IiII % I11i * OOooOOo % I1ii11iIi11i
if 65 - 65: i1IIi - OoooooooOO
if 66 - 66: I1ii11iIi11i / i1IIi * I1IiiI - OoOoOO00 + oO0o
if 74 - 74: iII111i / I1Ii111 / II111iiii - iII111i / oO0o % I11i
if 19 - 19: IiII % OoooooooOO + OoooooooOO
if 7 - 7: i1IIi
class lisp_keys ( object ) :
def __init__ ( self , key_id , do_curve = True , do_chacha = use_chacha ,
do_poly = use_poly ) :
self . uptime = lisp_get_timestamp ( )
self . last_rekey = None
self . rekey_count = 0
self . use_count = 0
self . key_id = key_id
self . cipher_suite = LISP_CS_1024
self . dh_g_value = LISP_CS_1024_G
self . dh_p_value = LISP_CS_1024_P
self . curve25519 = None
self . cipher_suite_string = ""
if ( do_curve ) :
if ( do_chacha ) :
self . cipher_suite = LISP_CS_25519_CHACHA
self . cipher_suite_string = "chacha"
elif ( os . getenv ( "LISP_USE_AES_GCM" ) != None ) :
self . cipher_suite = LISP_CS_25519_GCM
self . cipher_suite_string = "aes-gcm"
else :
self . cipher_suite = LISP_CS_25519_CBC
self . cipher_suite_string = "aes-cbc"
if 91 - 91: OoOoOO00 - OoOoOO00 . IiII
self . local_private_key = random . randint ( 0 , 2 ** 128 - 1 )
Ooo00o000o = lisp_hex_string ( self . local_private_key ) . zfill ( 32 )
self . curve25519 = curve25519 . Private ( Ooo00o000o . encode ( ) )
else :
self . local_private_key = random . randint ( 0 , 0x1fff )
if 33 - 33: I1Ii111 - iIii1I11I1II1 / Ii1I % O0
self . local_public_key = self . compute_public_key ( )
self . remote_public_key = None
self . shared_key = None
self . encrypt_key = None
self . icv_key = None
self . icv = poly1305 if do_poly else hashlib . sha256
self . iv = None
self . get_iv ( )
self . do_poly = do_poly
if 80 - 80: IiII % OoooooooOO - IiII
if 27 - 27: I1Ii111 - o0oOOo0O0Ooo * I1ii11iIi11i - I1IiiI
def copy_keypair ( self , key ) :
self . local_private_key = key . local_private_key
self . local_public_key = key . local_public_key
self . curve25519 = key . curve25519
if 22 - 22: Oo0Ooo % OoooooooOO - Oo0Ooo - iII111i . Ii1I
if 100 - 100: II111iiii / I1Ii111 / iII111i - I1ii11iIi11i * iIii1I11I1II1
def get_iv ( self ) :
if ( self . iv == None ) :
self . iv = random . randint ( 0 , LISP_16_128_MASK )
else :
self . iv += 1
if 7 - 7: i1IIi . IiII % i11iIiiIii * I1ii11iIi11i . I11i % I1ii11iIi11i
ii = self . iv
if ( self . cipher_suite == LISP_CS_25519_CHACHA ) :
ii = struct . pack ( "Q" , ii & LISP_8_64_MASK )
elif ( self . cipher_suite == LISP_CS_25519_GCM ) :
iII1i = struct . pack ( "I" , ( ii >> 64 ) & LISP_4_32_MASK )
oO = struct . pack ( "Q" , ii & LISP_8_64_MASK )
ii = iII1i + oO
else :
ii = struct . pack ( "QQ" , ii >> 64 , ii & LISP_8_64_MASK )
return ( ii )
if 48 - 48: O0 - ooOoO0o
if 15 - 15: OoooooooOO
def key_length ( self , key ) :
if ( isinstance ( key , int ) ) : key = self . normalize_pub_key ( key )
return ( old_div ( len ( key ) , 2 ) )
if 16 - 16: OOooOOo . I11i
if 47 - 47: O0 - I11i - O0
def print_key ( self , key ) :
Oooo0o0oO = self . normalize_pub_key ( key )
iii = Oooo0o0oO [ 0 : 4 ] . decode ( )
oO000oO0oO = Oooo0o0oO [ - 4 : : ] . decode ( )
return ( "0x{}...{}({})" . format ( iii , oO000oO0oO , self . key_length ( Oooo0o0oO ) ) )
if 44 - 44: OOooOOo - IiII + iII111i
if 78 - 78: Ii1I
def normalize_pub_key ( self , key ) :
if ( isinstance ( key , int ) ) :
key = lisp_hex_string ( key ) . zfill ( 256 )
return ( key )
if 29 - 29: II111iiii
if ( self . curve25519 ) : return ( binascii . hexlify ( key ) )
return ( key )
if 79 - 79: iIii1I11I1II1 - i11iIiiIii + ooOoO0o - II111iiii . iIii1I11I1II1
if 84 - 84: Oo0Ooo % I11i * O0 * I11i
def print_keys ( self , do_bold = True ) :
oOO0O0ooOOOo = bold ( "local-key: " , False ) if do_bold else "local-key: "
if ( self . local_public_key == None ) :
oOO0O0ooOOOo += "none"
else :
oOO0O0ooOOOo += self . print_key ( self . local_public_key )
if 66 - 66: OOooOOo / iIii1I11I1II1 - OoOoOO00 % O0 . ooOoO0o
O00o00o00OO0 = bold ( "remote-key: " , False ) if do_bold else "remote-key: "
if ( self . remote_public_key == None ) :
O00o00o00OO0 += "none"
else :
O00o00o00OO0 += self . print_key ( self . remote_public_key )
if 12 - 12: Oo0Ooo + I1IiiI
iii111i11IIii = "ECDH" if ( self . curve25519 ) else "DH"
ii11I1iii = self . cipher_suite
return ( "{} cipher-suite: {}, {}, {}" . format ( iii111i11IIii , ii11I1iii , oOO0O0ooOOOo , O00o00o00OO0 ) )
if 29 - 29: I1IiiI * IiII / OOooOOo % oO0o
if 23 - 23: i1IIi / oO0o . OoO0O00 * I1Ii111 + oO0o
def compare_keys ( self , keys ) :
if ( self . dh_g_value != keys . dh_g_value ) : return ( False )
if ( self . dh_p_value != keys . dh_p_value ) : return ( False )
if ( self . remote_public_key != keys . remote_public_key ) : return ( False )
return ( True )
if 37 - 37: O0 / OOooOOo + Oo0Ooo * OoooooooOO + OoOoOO00 / iIii1I11I1II1
if 84 - 84: iIii1I11I1II1 + I1ii11iIi11i
def compute_public_key ( self ) :
if ( self . curve25519 ) : return ( self . curve25519 . get_public ( ) . public )
if 77 - 77: i11iIiiIii - I1Ii111 . I1ii11iIi11i % Oo0Ooo . Ii1I
Ooo00o000o = self . local_private_key
Oo = self . dh_g_value
iIIiiIi = self . dh_p_value
return ( int ( ( Oo ** Ooo00o000o ) % iIIiiIi ) )
if 9 - 9: o0oOOo0O0Ooo
if 55 - 55: OOooOOo % iIii1I11I1II1 + I11i . ooOoO0o
def compute_shared_key ( self , ed , print_shared = False ) :
Ooo00o000o = self . local_private_key
ooOo = self . remote_public_key
if 47 - 47: i11iIiiIii . IiII
Ii1i1 = bold ( "Compute {} shared-key" . format ( ed ) , False )
lprint ( "{}, key-material: {}" . format ( Ii1i1 , self . print_keys ( ) ) )
if 3 - 3: o0oOOo0O0Ooo / Oo0Ooo - OoO0O00 + II111iiii
if ( self . curve25519 ) :
iiOO00 = curve25519 . Public ( ooOo )
self . shared_key = self . curve25519 . get_shared_key ( iiOO00 )
else :
iIIiiIi = self . dh_p_value
self . shared_key = ( ooOo ** Ooo00o000o ) % iIIiiIi
if 44 - 44: Oo0Ooo + iII111i
if 8 - 8: iII111i - OoOoOO00 % ooOoO0o . OoO0O00
if 43 - 43: I1ii11iIi11i / Ii1I
if 80 - 80: I1ii11iIi11i
if 67 - 67: II111iiii
if 2 - 2: o0oOOo0O0Ooo - O0 * Ii1I % IiII
if 64 - 64: i1IIi . ooOoO0o
if ( print_shared ) :
Oooo0o0oO = self . print_key ( self . shared_key )
lprint ( "Computed shared-key: {}" . format ( Oooo0o0oO ) )
if 7 - 7: oO0o . iII111i - iII111i / I1Ii111 % Oo0Ooo
if 61 - 61: oO0o - I1ii11iIi11i / iII111i % I1ii11iIi11i + OoO0O00 / Oo0Ooo
if 10 - 10: i11iIiiIii / OoOoOO00
if 27 - 27: I1IiiI / OoooooooOO
if 74 - 74: I1ii11iIi11i % I1Ii111 - OoO0O00 * I11i . OoooooooOO * OoO0O00
self . compute_encrypt_icv_keys ( )
if 99 - 99: OoOoOO00 . iII111i - OoooooooOO - O0
if 6 - 6: OOooOOo
if 3 - 3: O0 - I1Ii111 * Ii1I * OOooOOo / Ii1I
if 58 - 58: Ii1I * iIii1I11I1II1 + ooOoO0o . ooOoO0o
self . rekey_count += 1
self . last_rekey = lisp_get_timestamp ( )
if 74 - 74: ooOoO0o - o0oOOo0O0Ooo * IiII % ooOoO0o
if 93 - 93: iIii1I11I1II1 / OoOoOO00 % Oo0Ooo * I1Ii111 - OoO0O00 - o0oOOo0O0Ooo
def compute_encrypt_icv_keys ( self ) :
i1ii = hashlib . sha256
if ( self . curve25519 ) :
i11 = self . shared_key
else :
i11 = lisp_hex_string ( self . shared_key )
if 39 - 39: I1ii11iIi11i
if 10 - 10: OoooooooOO . OOooOOo * Ii1I - I1ii11iIi11i
if 43 - 43: I11i . I1Ii111 + iII111i % O0 - Oo0Ooo . I11i
if 26 - 26: OoO0O00 % i11iIiiIii + oO0o * II111iiii / IiII
if 70 - 70: Oo0Ooo / I1Ii111 . IiII - OOooOOo
oOO0O0ooOOOo = self . local_public_key
if ( type ( oOO0O0ooOOOo ) != int ) : oOO0O0ooOOOo = int ( binascii . hexlify ( oOO0O0ooOOOo ) , 16 )
O00o00o00OO0 = self . remote_public_key
if ( type ( O00o00o00OO0 ) != int ) : O00o00o00OO0 = int ( binascii . hexlify ( O00o00o00OO0 ) , 16 )
o000oOOoo = "0001" + "lisp-crypto" + lisp_hex_string ( oOO0O0ooOOOo ^ O00o00o00OO0 ) + "0100"
if 62 - 62: OoOoOO00
iIiI11IIiII1iII = hmac . new ( o000oOOoo . encode ( ) , i11 , i1ii ) . hexdigest ( )
iIiI11IIiII1iII = int ( iIiI11IIiII1iII , 16 )
if 51 - 51: iIii1I11I1II1 * OoOoOO00 / Ii1I * OoO0O00
if 58 - 58: O0 - i1IIi / iII111i
if 59 - 59: Oo0Ooo % I1ii11iIi11i % ooOoO0o % I11i * iIii1I11I1II1
if 22 - 22: I1IiiI * i11iIiiIii * I1ii11iIi11i / I1IiiI . iII111i
iiiiiiiiiiiI = ( iIiI11IIiII1iII >> 128 ) & LISP_16_128_MASK
iI111iiI1II = iIiI11IIiII1iII & LISP_16_128_MASK
iiiiiiiiiiiI = lisp_hex_string ( iiiiiiiiiiiI ) . zfill ( 32 )
self . encrypt_key = iiiiiiiiiiiI . encode ( )
OOOoooO000O0 = 32 if self . do_poly else 40
iI111iiI1II = lisp_hex_string ( iI111iiI1II ) . zfill ( OOOoooO000O0 )
self . icv_key = iI111iiI1II . encode ( )
if 63 - 63: oO0o - iII111i - ooOoO0o / oO0o + I1Ii111 + Oo0Ooo
if 32 - 32: I1IiiI . I1IiiI / iIii1I11I1II1 - I11i - O0 % OOooOOo
def do_icv ( self , packet , nonce ) :
if ( self . icv_key == None ) : return ( "" )
if ( self . do_poly ) :
iI1Iii1i1I = self . icv . poly1305aes
OoO0 = self . icv . binascii . hexlify
nonce = OoO0 ( nonce )
IIiIi = iI1Iii1i1I ( self . encrypt_key , self . icv_key , nonce , packet )
if ( lisp_is_python2 ( ) ) :
IIiIi = OoO0 ( IIiIi . encode ( "raw_unicode_escape" ) )
else :
IIiIi = OoO0 ( IIiIi ) . decode ( )
if 60 - 60: iIii1I11I1II1 / I1ii11iIi11i - II111iiii / Oo0Ooo
else :
Ooo00o000o = binascii . unhexlify ( self . icv_key )
IIiIi = hmac . new ( Ooo00o000o , packet , self . icv ) . hexdigest ( )
IIiIi = IIiIi [ 0 : 40 ]
if 38 - 38: I11i % OoO0O00 - O0 + II111iiii % Ii1I . I1IiiI
return ( IIiIi )
if 43 - 43: I1IiiI % I1ii11iIi11i * Ii1I
if 31 - 31: Ii1I / iII111i
def add_key_by_nonce ( self , nonce ) :
if ( nonce not in lisp_crypto_keys_by_nonce ) :
lisp_crypto_keys_by_nonce [ nonce ] = [ None , None , None , None ]
if 3 - 3: IiII
lisp_crypto_keys_by_nonce [ nonce ] [ self . key_id ] = self
if 37 - 37: Ii1I * OoooooooOO * I11i + Oo0Ooo . I1IiiI
if 61 - 61: OOooOOo . OOooOOo
def delete_key_by_nonce ( self , nonce ) :
if ( nonce not in lisp_crypto_keys_by_nonce ) : return
lisp_crypto_keys_by_nonce . pop ( nonce )
if 17 - 17: II111iiii / ooOoO0o
if 80 - 80: OOooOOo * OoO0O00 + Ii1I
def add_key_by_rloc ( self , addr_str , encap ) :
oo0 = lisp_crypto_keys_by_rloc_encap if encap else lisp_crypto_keys_by_rloc_decap
if 10 - 10: o0oOOo0O0Ooo * o0oOOo0O0Ooo * o0oOOo0O0Ooo
if 39 - 39: Ii1I
if ( addr_str not in oo0 ) :
oo0 [ addr_str ] = [ None , None , None , None ]
if 98 - 98: OoOoOO00
oo0 [ addr_str ] [ self . key_id ] = self
if 52 - 52: ooOoO0o % IiII . OoooooooOO
if 60 - 60: Ii1I + iII111i . ooOoO0o + II111iiii + iII111i . O0
if 74 - 74: o0oOOo0O0Ooo . Ii1I / i1IIi + I1ii11iIi11i + Ii1I + i11iIiiIii
if 56 - 56: Oo0Ooo - o0oOOo0O0Ooo / iIii1I11I1II1 / Ii1I - IiII - Oo0Ooo
if 76 - 76: OOooOOo . I1IiiI + OOooOOo + iIii1I11I1II1 + IiII / iIii1I11I1II1
if ( encap == False ) :
lisp_write_ipc_decap_key ( addr_str , oo0 [ addr_str ] )
if 95 - 95: I11i
if 45 - 45: I11i - OOooOOo * iII111i - OoO0O00 . Ii1I
if 77 - 77: oO0o / I11i
def encode_lcaf ( self , rloc_addr ) :
iIIiiI1Ii1II = self . normalize_pub_key ( self . local_public_key )
iIiI1 = self . key_length ( iIIiiI1Ii1II )
IiIi11i1 = ( 6 + iIiI1 + 2 )
if ( rloc_addr != None ) : IiIi11i1 += rloc_addr . addr_length ( )
if 87 - 87: o0oOOo0O0Ooo % Oo0Ooo % II111iiii + iII111i * I1IiiI
Oo00oo = struct . pack ( "HBBBBHBB" , socket . htons ( LISP_AFI_LCAF ) , 0 , 0 ,
LISP_LCAF_SECURITY_TYPE , 0 , socket . htons ( IiIi11i1 ) , 1 , 0 )
if 18 - 18: ooOoO0o * II111iiii
if 43 - 43: o0oOOo0O0Ooo / O0 + i1IIi - I1ii11iIi11i % i11iIiiIii
if 69 - 69: OOooOOo % I1ii11iIi11i / OoOoOO00 . OOooOOo - IiII
if 74 - 74: OoO0O00 - o0oOOo0O0Ooo - IiII . O0 % ooOoO0o
if 32 - 32: OoOoOO00 . OoO0O00 / Oo0Ooo . i11iIiiIii
if 9 - 9: I11i - II111iiii + I1Ii111 / oO0o % I1ii11iIi11i
ii11I1iii = self . cipher_suite
Oo00oo += struct . pack ( "BBH" , ii11I1iii , 0 , socket . htons ( iIiI1 ) )
if 17 - 17: iIii1I11I1II1 - ooOoO0o
if 99 - 99: Oo0Ooo + I1Ii111 % ooOoO0o - o0oOOo0O0Ooo
if 52 - 52: I1ii11iIi11i
if 93 - 93: iII111i . i11iIiiIii
for iIi1iIIIiIiI in range ( 0 , iIiI1 * 2 , 16 ) :
Ooo00o000o = int ( iIIiiI1Ii1II [ iIi1iIIIiIiI : iIi1iIIIiIiI + 16 ] , 16 )
Oo00oo += struct . pack ( "Q" , byte_swap_64 ( Ooo00o000o ) )
if 24 - 24: OOooOOo . OoO0O00 + I1Ii111 . oO0o - I1ii11iIi11i % iII111i
if 49 - 49: O0 . Oo0Ooo / Ii1I
if 29 - 29: I1ii11iIi11i / oO0o * O0 - i11iIiiIii - OoO0O00 + Ii1I
if 86 - 86: I1IiiI / I1ii11iIi11i * Ii1I % i11iIiiIii
if 20 - 20: iII111i . OoooooooOO + iII111i + ooOoO0o * I1ii11iIi11i
if ( rloc_addr ) :
Oo00oo += struct . pack ( "H" , socket . htons ( rloc_addr . afi ) )
Oo00oo += rloc_addr . pack_address ( )
if 44 - 44: i11iIiiIii
return ( Oo00oo )
if 69 - 69: OOooOOo * O0 + i11iIiiIii
if 65 - 65: O0 / iII111i . i1IIi * iII111i / iIii1I11I1II1 - oO0o
def decode_lcaf ( self , packet , lcaf_len ) :
if 93 - 93: OoOoOO00 % i11iIiiIii - Ii1I % OoO0O00
if 55 - 55: o0oOOo0O0Ooo . I1ii11iIi11i
if 63 - 63: oO0o
if 79 - 79: I1ii11iIi11i - oO0o - o0oOOo0O0Ooo . OOooOOo
if ( lcaf_len == 0 ) :
II111I11iI = "HHBBH"
oO000 = struct . calcsize ( II111I11iI )
if ( len ( packet ) < oO000 ) : return ( None )
if 65 - 65: i11iIiiIii . OoO0O00 % iII111i + IiII - i11iIiiIii
Oooo000 , oo00O0OO0oo0O , IIiiIIi1II11 , oo00O0OO0oo0O , lcaf_len = struct . unpack ( II111I11iI , packet [ : oO000 ] )
if 14 - 14: Ii1I - O0
if 68 - 68: II111iiii - I1ii11iIi11i - OoO0O00 * iIii1I11I1II1 / I1IiiI * I1ii11iIi11i
if ( IIiiIIi1II11 != LISP_LCAF_SECURITY_TYPE ) :
packet = packet [ lcaf_len + 6 : : ]
return ( packet )
if 45 - 45: I1Ii111 * I11i / iIii1I11I1II1 / I1IiiI % II111iiii
lcaf_len = socket . ntohs ( lcaf_len )
packet = packet [ oO000 : : ]
if 49 - 49: Ii1I / iII111i . iII111i . iII111i + i11iIiiIii % I11i
if 7 - 7: IiII * ooOoO0o + OoOoOO00
if 22 - 22: iII111i
if 48 - 48: I1ii11iIi11i . I1IiiI
if 73 - 73: O0 . I1Ii111 - OoooooooOO % I11i % i1IIi
if 14 - 14: I1Ii111 + Ii1I * Oo0Ooo
IIiiIIi1II11 = LISP_LCAF_SECURITY_TYPE
II111I11iI = "BBBBH"
oO000 = struct . calcsize ( II111I11iI )
if ( len ( packet ) < oO000 ) : return ( None )
if 49 - 49: Oo0Ooo
OoO0O00o0ooo0 , oo00O0OO0oo0O , ii11I1iii , oo00O0OO0oo0O , iIiI1 = struct . unpack ( II111I11iI ,
packet [ : oO000 ] )
if 75 - 75: Ii1I % O0
if 57 - 57: O0 . OoO0O00
if 32 - 32: ooOoO0o
if 26 - 26: O0 * Ii1I - I1IiiI - iII111i / iIii1I11I1II1
if 57 - 57: I1ii11iIi11i - OoO0O00 * iIii1I11I1II1
if 26 - 26: OoO0O00 % ooOoO0o % o0oOOo0O0Ooo % OoOoOO00 . iII111i % O0
packet = packet [ oO000 : : ]
iIiI1 = socket . ntohs ( iIiI1 )
if ( len ( packet ) < iIiI1 ) : return ( None )
if 91 - 91: II111iiii . Oo0Ooo . oO0o - OoooooooOO / OoOoOO00
if 30 - 30: I11i % o0oOOo0O0Ooo + i1IIi * OoooooooOO * OoO0O00 - II111iiii
if 55 - 55: OoO0O00
if 20 - 20: ooOoO0o * I1Ii111 * o0oOOo0O0Ooo - ooOoO0o
i1I1IiiIIIiiI = [ LISP_CS_25519_CBC , LISP_CS_25519_GCM , LISP_CS_25519_CHACHA ,
LISP_CS_1024 ]
if ( ii11I1iii not in i1I1IiiIIIiiI ) :
lprint ( "Cipher-suites {} supported, received {}" . format ( i1I1IiiIIIiiI ,
ii11I1iii ) )
packet = packet [ iIiI1 : : ]
return ( packet )
if 62 - 62: Oo0Ooo + OoooooooOO / iII111i
if 60 - 60: Ii1I / OoOoOO00 . I11i % OOooOOo
self . cipher_suite = ii11I1iii
if 61 - 61: O0 . Ii1I . O0 * i11iIiiIii * II111iiii / I1Ii111
if 69 - 69: I11i
if 17 - 17: I11i
if 38 - 38: I1Ii111 % OOooOOo
if 9 - 9: O0 . iIii1I11I1II1
iIIiiI1Ii1II = 0
for iIi1iIIIiIiI in range ( 0 , iIiI1 , 8 ) :
Ooo00o000o = byte_swap_64 ( struct . unpack ( "Q" , packet [ iIi1iIIIiIiI : iIi1iIIIiIiI + 8 ] ) [ 0 ] )
iIIiiI1Ii1II <<= 64
iIIiiI1Ii1II |= Ooo00o000o
if 44 - 44: I1ii11iIi11i % IiII
self . remote_public_key = iIIiiI1Ii1II
if 6 - 6: OoO0O00
if 82 - 82: iIii1I11I1II1 . I11i / IiII / OOooOOo * II111iiii % oO0o
if 62 - 62: II111iiii
if 96 - 96: I11i % OoOoOO00 * I1ii11iIi11i
if 94 - 94: Oo0Ooo - i1IIi . O0 % Oo0Ooo . ooOoO0o
if ( self . curve25519 ) :
Ooo00o000o = lisp_hex_string ( self . remote_public_key )
Ooo00o000o = Ooo00o000o . zfill ( 64 )
Oo0OoO00OO0 = b""
for iIi1iIIIiIiI in range ( 0 , len ( Ooo00o000o ) , 2 ) :
oO000OO0 = int ( Ooo00o000o [ iIi1iIIIiIiI : iIi1iIIIiIiI + 2 ] , 16 )
Oo0OoO00OO0 += lisp_store_byte ( oO000OO0 )
if 96 - 96: i1IIi % I1ii11iIi11i + iIii1I11I1II1
self . remote_public_key = Oo0OoO00OO0
if 37 - 37: O0
if 97 - 97: oO0o - OoO0O00 + iII111i * O0
packet = packet [ iIiI1 : : ]
return ( packet )
if 55 - 55: i11iIiiIii + i1IIi % II111iiii + I11i % ooOoO0o
if 67 - 67: I1ii11iIi11i / Oo0Ooo * i11iIiiIii / OoOoOO00
if 38 - 38: I1IiiI . oO0o / O0 % Oo0Ooo / IiII / OoooooooOO
if 11 - 11: O0 / I1Ii111 / iIii1I11I1II1 % Ii1I
if 31 - 31: I11i . i11iIiiIii . OoO0O00 * Oo0Ooo % Ii1I . o0oOOo0O0Ooo
if 92 - 92: OoooooooOO / O0 * i1IIi + iIii1I11I1II1
if 93 - 93: ooOoO0o % I1Ii111
if 46 - 46: I1ii11iIi11i * OoOoOO00 * IiII * I1ii11iIi11i . I1ii11iIi11i
if 43 - 43: ooOoO0o . i1IIi
def lisp_store_byte_py2 ( byte ) :
return ( chr ( byte ) )
if 68 - 68: IiII % Oo0Ooo . O0 - OoOoOO00 + I1ii11iIi11i . i11iIiiIii
def lisp_store_byte_py3 ( byte ) :
return ( bytes ( [ byte ] ) )
if 45 - 45: I1IiiI
if 17 - 17: OoooooooOO - ooOoO0o + Ii1I . OoooooooOO % Oo0Ooo
lisp_store_byte = lisp_store_byte_py2
if ( lisp_is_python3 ( ) ) : lisp_store_byte = lisp_store_byte_py3
if 92 - 92: I1Ii111 - OOooOOo % OoO0O00 - o0oOOo0O0Ooo % i1IIi
if 38 - 38: I1ii11iIi11i . I11i / OoOoOO00 % I11i
if 10 - 10: O0 . I1IiiI * o0oOOo0O0Ooo / iII111i
if 61 - 61: Oo0Ooo - I1Ii111
if 51 - 51: iII111i * ooOoO0o / O0 / O0
if 52 - 52: OoooooooOO % O0
class lisp_thread ( object ) :
def __init__ ( self , name ) :
self . thread_name = name
self . thread_number = - 1
self . number_of_pcap_threads = 0
self . number_of_worker_threads = 0
self . input_queue = queue . Queue ( )
self . input_stats = lisp_stats ( )
self . lisp_packet = lisp_packet ( None )
if 56 - 56: oO0o - i1IIi * OoooooooOO - II111iiii
if 28 - 28: i1IIi / I11i . o0oOOo0O0Ooo
if 11 - 11: Oo0Ooo * OoooooooOO - i11iIiiIii
if 13 - 13: i11iIiiIii . O0 / OOooOOo * i1IIi
if 14 - 14: IiII + IiII . I11i / Ii1I . iIii1I11I1II1
if 10 - 10: II111iiii . OOooOOo / iII111i
if 35 - 35: iII111i / Oo0Ooo + O0 * iIii1I11I1II1 - O0
if 3 - 3: I1ii11iIi11i
if 42 - 42: I11i % Oo0Ooo + IiII - I11i . iIii1I11I1II1 - Ii1I
if 27 - 27: iII111i % Oo0Ooo . I1ii11iIi11i . i1IIi % OoOoOO00 . o0oOOo0O0Ooo
if 37 - 37: iII111i + I1Ii111 * Ii1I + IiII
if 39 - 39: O0 * Oo0Ooo - I1IiiI + Ii1I / II111iiii
if 66 - 66: ooOoO0o + oO0o % OoooooooOO
if 23 - 23: oO0o . OoOoOO00 + iIii1I11I1II1
if 17 - 17: IiII
if 12 - 12: i1IIi . OoO0O00
if 14 - 14: OOooOOo + II111iiii % OOooOOo . oO0o * ooOoO0o
if 54 - 54: ooOoO0o * I11i - I1Ii111
class lisp_control_header ( object ) :
def __init__ ( self ) :
self . type = 0
self . record_count = 0
self . nonce = 0
self . rloc_probe = False
self . smr_bit = False
self . smr_invoked_bit = False
self . ddt_bit = False
self . to_etr = False
self . to_ms = False
self . info_reply = False
if 15 - 15: iII111i / O0
if 61 - 61: i1IIi / i1IIi + ooOoO0o . I1Ii111 * ooOoO0o
def decode ( self , packet ) :
II111I11iI = "BBBBQ"
oO000 = struct . calcsize ( II111I11iI )
if ( len ( packet ) < oO000 ) : return ( False )
if 19 - 19: o0oOOo0O0Ooo . II111iiii / i1IIi
Oo00 , o0OO00 , oOO00 , self . record_count , self . nonce = struct . unpack ( II111I11iI , packet [ : oO000 ] )
if 27 - 27: oO0o * Oo0Ooo * Oo0Ooo / IiII + Oo0Ooo
if 94 - 94: ooOoO0o - i1IIi . O0 / I1IiiI
self . type = Oo00 >> 4
if ( self . type == LISP_MAP_REQUEST ) :
self . smr_bit = True if ( Oo00 & 0x01 ) else False
self . rloc_probe = True if ( Oo00 & 0x02 ) else False
self . smr_invoked_bit = True if ( o0OO00 & 0x40 ) else False
if 37 - 37: I1ii11iIi11i * I1Ii111 * I1IiiI * O0
if ( self . type == LISP_ECM ) :
self . ddt_bit = True if ( Oo00 & 0x04 ) else False
self . to_etr = True if ( Oo00 & 0x02 ) else False
self . to_ms = True if ( Oo00 & 0x01 ) else False
if 35 - 35: I1IiiI - I1ii11iIi11i * iII111i + IiII / i1IIi
if ( self . type == LISP_NAT_INFO ) :
self . info_reply = True if ( Oo00 & 0x08 ) else False
if 46 - 46: Oo0Ooo . ooOoO0o % Oo0Ooo / II111iiii * ooOoO0o * OOooOOo
return ( True )
if 59 - 59: I1Ii111 * iII111i
if 31 - 31: I11i / O0
def is_info_request ( self ) :
return ( ( self . type == LISP_NAT_INFO and self . is_info_reply ( ) == False ) )
if 57 - 57: i1IIi % ooOoO0o
if 69 - 69: o0oOOo0O0Ooo
def is_info_reply ( self ) :
return ( True if self . info_reply else False )
if 69 - 69: I1Ii111
if 83 - 83: iIii1I11I1II1 . o0oOOo0O0Ooo + I1Ii111 . OoooooooOO / ooOoO0o + II111iiii
def is_rloc_probe ( self ) :
return ( True if self . rloc_probe else False )
if 90 - 90: Ii1I * iII111i / OOooOOo
if 68 - 68: OoOoOO00
def is_smr ( self ) :
return ( True if self . smr_bit else False )
if 65 - 65: oO0o
if 82 - 82: o0oOOo0O0Ooo
def is_smr_invoked ( self ) :
return ( True if self . smr_invoked_bit else False )
if 80 - 80: i1IIi % OoOoOO00 + OoO0O00 - OoooooooOO / iIii1I11I1II1 + I1Ii111
if 65 - 65: Ii1I
def is_ddt ( self ) :
return ( True if self . ddt_bit else False )
if 71 - 71: I1Ii111 % I1Ii111 . oO0o + i11iIiiIii - i11iIiiIii
if 16 - 16: iIii1I11I1II1 / I1IiiI / I1Ii111 - i11iIiiIii . ooOoO0o / OOooOOo
def is_to_etr ( self ) :
return ( True if self . to_etr else False )
if 13 - 13: o0oOOo0O0Ooo % O0 - I1Ii111 * OoooooooOO / Oo0Ooo - OoooooooOO
if 78 - 78: oO0o % OoooooooOO
def is_to_ms ( self ) :
return ( True if self . to_ms else False )
if 73 - 73: I1IiiI % ooOoO0o % IiII + i1IIi - OoooooooOO / oO0o
if 78 - 78: OoooooooOO % oO0o - i11iIiiIii
if 37 - 37: IiII % Ii1I % i1IIi
if 23 - 23: ooOoO0o - O0 + i11iIiiIii
if 98 - 98: OoooooooOO
if 61 - 61: o0oOOo0O0Ooo . IiII . O0 + OoooooooOO + O0
if 65 - 65: i1IIi * OOooOOo * OoooooooOO - IiII . iII111i - OoO0O00
if 71 - 71: Ii1I * OoOoOO00
if 33 - 33: i1IIi . i1IIi * OoooooooOO % I1Ii111 * o0oOOo0O0Ooo
if 64 - 64: ooOoO0o / ooOoO0o + I1ii11iIi11i * OOooOOo % OOooOOo
if 87 - 87: OoO0O00 * Oo0Ooo
if 83 - 83: i1IIi * I1Ii111 - IiII / Ii1I
if 48 - 48: oO0o . II111iiii - OoOoOO00 % i1IIi . OoOoOO00
if 32 - 32: Ii1I * I1IiiI - OOooOOo . Oo0Ooo / O0 + Ii1I
if 67 - 67: OoOoOO00 % Oo0Ooo
if 7 - 7: i11iIiiIii % I1ii11iIi11i / I1Ii111 % Oo0Ooo - OoO0O00
if 73 - 73: I1ii11iIi11i
if 92 - 92: i11iIiiIii + O0 * I11i
if 60 - 60: o0oOOo0O0Ooo / Oo0Ooo
if 19 - 19: iIii1I11I1II1 . OoO0O00 / OoooooooOO
if 2 - 2: O0 - O0 % I1Ii111 / I1ii11iIi11i
if 76 - 76: OoO0O00 * oO0o - OoO0O00
if 57 - 57: OoooooooOO / OoOoOO00 + oO0o . Ii1I
if 14 - 14: i11iIiiIii % OOooOOo * o0oOOo0O0Ooo * OoOoOO00
if 55 - 55: I1Ii111 * OOooOOo * I1Ii111
if 70 - 70: O0 . Ii1I
if 33 - 33: OOooOOo * Ii1I
if 64 - 64: i11iIiiIii . iIii1I11I1II1
if 7 - 7: OoOoOO00 % ooOoO0o + OoOoOO00 - OoOoOO00 * i11iIiiIii % OoO0O00
if 57 - 57: OOooOOo / OoO0O00 + I1ii11iIi11i
if 60 - 60: O0 * Oo0Ooo % OOooOOo + IiII . OoO0O00 . Oo0Ooo
if 70 - 70: I11i . I1ii11iIi11i * oO0o
if 97 - 97: oO0o . iIii1I11I1II1 - OOooOOo
if 23 - 23: I1ii11iIi11i % I11i
if 18 - 18: OoooooooOO . i1IIi + II111iiii
if 99 - 99: I1Ii111 - I1ii11iIi11i - I1IiiI - I1Ii111 + OoO0O00 + II111iiii
if 34 - 34: I1Ii111 * I11i
if 31 - 31: IiII . oO0o
if 40 - 40: Ii1I - I11i / II111iiii * i1IIi + IiII * II111iiii
if 53 - 53: I1ii11iIi11i - i11iIiiIii . OoO0O00 / OoOoOO00 - I1Ii111
if 99 - 99: Ii1I - IiII - i1IIi / i11iIiiIii . IiII
if 58 - 58: OOooOOo
if 12 - 12: I1IiiI . o0oOOo0O0Ooo * OoooooooOO
if 64 - 64: OoOoOO00 + IiII - i1IIi . II111iiii . OoO0O00
if 31 - 31: oO0o . iII111i - I11i . iIii1I11I1II1 + I11i . OoOoOO00
if 86 - 86: I1ii11iIi11i - I1ii11iIi11i / iII111i - I1ii11iIi11i * iII111i + I1Ii111
if 61 - 61: Oo0Ooo / II111iiii / Oo0Ooo / i1IIi . Oo0Ooo - IiII
if 30 - 30: OoooooooOO % OOooOOo
if 14 - 14: OoOoOO00 / OoO0O00 / i11iIiiIii - OoOoOO00 / o0oOOo0O0Ooo - OOooOOo
class lisp_map_register ( object ) :
def __init__ ( self ) :
self . proxy_reply_requested = False
self . lisp_sec_present = False
self . xtr_id_present = False
self . map_notify_requested = False
self . mobile_node = False
self . merge_register_requested = False
self . use_ttl_for_timeout = False
self . map_register_refresh = False
self . record_count = 0
self . nonce = 0
self . alg_id = 0
self . key_id = 0
self . auth_len = 0
self . auth_data = 0
self . xtr_id = 0
self . site_id = 0
self . record_count = 0
self . sport = 0
self . encrypt_bit = 0
self . encryption_key_id = None
if 81 - 81: iII111i % Ii1I . ooOoO0o
if 66 - 66: I1ii11iIi11i * Ii1I / OoooooooOO * O0 % OOooOOo
def print_map_register ( self ) :
Iiooo000o0OoOo = lisp_hex_string ( self . xtr_id )
if 76 - 76: Ii1I % iIii1I11I1II1 / oO0o * iIii1I11I1II1 / iIii1I11I1II1
IiiiI1 = ( "{} -> flags: {}{}{}{}{}{}{}{}{}, record-count: " +
"{}, nonce: 0x{}, key/alg-id: {}/{}{}, auth-len: {}, xtr-id: " +
"0x{}, site-id: {}" )
if 41 - 41: IiII / i1IIi / OoOoOO00 / OOooOOo . OoO0O00 % OoOoOO00
lprint ( IiiiI1 . format ( bold ( "Map-Register" , False ) , "P" if self . proxy_reply_requested else "p" ,
# IiII . OoOoOO00 . Ii1I - IiII / II111iiii . Oo0Ooo
"S" if self . lisp_sec_present else "s" ,
"I" if self . xtr_id_present else "i" ,
"T" if self . use_ttl_for_timeout else "t" ,
"R" if self . merge_register_requested else "r" ,
"M" if self . mobile_node else "m" ,
"N" if self . map_notify_requested else "n" ,
"F" if self . map_register_refresh else "f" ,
"E" if self . encrypt_bit else "e" ,
self . record_count , lisp_hex_string ( self . nonce ) , self . key_id ,
self . alg_id , " (sha1)" if ( self . key_id == LISP_SHA_1_96_ALG_ID ) else ( " (sha2)" if ( self . key_id == LISP_SHA_256_128_ALG_ID ) else "" ) , self . auth_len , Iiooo000o0OoOo , self . site_id ) )
if 6 - 6: i1IIi / Ii1I - OoooooooOO % II111iiii . Ii1I * i11iIiiIii
if 55 - 55: Ii1I * Ii1I % I1Ii111
if 2 - 2: OoooooooOO . II111iiii % IiII
if 6 - 6: I1Ii111 % IiII / Ii1I + I1Ii111 . oO0o
def encode ( self ) :
Iii1 = ( LISP_MAP_REGISTER << 28 ) | self . record_count
if ( self . proxy_reply_requested ) : Iii1 |= 0x08000000
if ( self . lisp_sec_present ) : Iii1 |= 0x04000000
if ( self . xtr_id_present ) : Iii1 |= 0x02000000
if ( self . map_register_refresh ) : Iii1 |= 0x1000
if ( self . use_ttl_for_timeout ) : Iii1 |= 0x800
if ( self . merge_register_requested ) : Iii1 |= 0x400
if ( self . mobile_node ) : Iii1 |= 0x200
if ( self . map_notify_requested ) : Iii1 |= 0x100
if ( self . encryption_key_id != None ) :
Iii1 |= 0x2000
Iii1 |= self . encryption_key_id << 14
if 70 - 70: iIii1I11I1II1 / Ii1I
if 61 - 61: O0 * o0oOOo0O0Ooo + I1Ii111 - OOooOOo . I1IiiI - IiII
if 7 - 7: I1ii11iIi11i
if 81 - 81: Oo0Ooo % II111iiii % o0oOOo0O0Ooo / I11i
if 95 - 95: OoOoOO00 - O0 % OoooooooOO
if ( self . alg_id == LISP_NONE_ALG_ID ) :
self . auth_len = 0
else :
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
self . auth_len = LISP_SHA1_160_AUTH_DATA_LEN
if 13 - 13: i11iIiiIii
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
self . auth_len = LISP_SHA2_256_AUTH_DATA_LEN
if 54 - 54: OOooOOo . I1ii11iIi11i * I11i % I1Ii111 . O0 * IiII
if 87 - 87: Ii1I % I1ii11iIi11i * Oo0Ooo
if 59 - 59: Oo0Ooo / I11i - iIii1I11I1II1 * iIii1I11I1II1
Oo00oo = struct . pack ( "I" , socket . htonl ( Iii1 ) )
Oo00oo += struct . pack ( "QBBH" , self . nonce , self . key_id , self . alg_id ,
socket . htons ( self . auth_len ) )
if 18 - 18: I11i * I1ii11iIi11i / i11iIiiIii / iIii1I11I1II1 * OoooooooOO . OOooOOo
Oo00oo = self . zero_auth ( Oo00oo )
return ( Oo00oo )
if 69 - 69: Oo0Ooo * ooOoO0o
if 91 - 91: o0oOOo0O0Ooo . ooOoO0o / OoO0O00 / i11iIiiIii * o0oOOo0O0Ooo
def zero_auth ( self , packet ) :
oo00 = struct . calcsize ( "I" ) + struct . calcsize ( "QHH" )
Ooooo0OO = b""
o0o0OO0OO = 0
if ( self . alg_id == LISP_NONE_ALG_ID ) : return ( packet )
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
Ooooo0OO = struct . pack ( "QQI" , 0 , 0 , 0 )
o0o0OO0OO = struct . calcsize ( "QQI" )
if 21 - 21: I1IiiI - OoooooooOO / OoOoOO00 * OoooooooOO % OoooooooOO + OoO0O00
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
Ooooo0OO = struct . pack ( "QQQQ" , 0 , 0 , 0 , 0 )
o0o0OO0OO = struct . calcsize ( "QQQQ" )
if 89 - 89: iII111i . OOooOOo . I1ii11iIi11i
packet = packet [ 0 : oo00 ] + Ooooo0OO + packet [ oo00 + o0o0OO0OO : : ]
return ( packet )
if 93 - 93: II111iiii
if 8 - 8: Ii1I * OoooooooOO / Ii1I / OoO0O00 % OoOoOO00 + I11i
def encode_auth ( self , packet ) :
oo00 = struct . calcsize ( "I" ) + struct . calcsize ( "QHH" )
o0o0OO0OO = self . auth_len
Ooooo0OO = self . auth_data
packet = packet [ 0 : oo00 ] + Ooooo0OO + packet [ oo00 + o0o0OO0OO : : ]
return ( packet )
if 16 - 16: I11i % ooOoO0o - i11iIiiIii
if 38 - 38: o0oOOo0O0Ooo / I1ii11iIi11i - O0
def decode ( self , packet ) :
i1iiI11i1 = packet
II111I11iI = "I"
oO000 = struct . calcsize ( II111I11iI )
if ( len ( packet ) < oO000 ) : return ( [ None , None ] )
if 9 - 9: iII111i . Oo0Ooo % OOooOOo / I11i * IiII - o0oOOo0O0Ooo
Iii1 = struct . unpack ( II111I11iI , packet [ : oO000 ] )
Iii1 = socket . ntohl ( Iii1 [ 0 ] )
packet = packet [ oO000 : : ]
if 54 - 54: i11iIiiIii / iIii1I11I1II1 % I1ii11iIi11i / I1IiiI . iIii1I11I1II1 / iII111i
II111I11iI = "QBBH"
oO000 = struct . calcsize ( II111I11iI )
if ( len ( packet ) < oO000 ) : return ( [ None , None ] )
if 1 - 1: I1Ii111 / OoOoOO00 * OoOoOO00 - o0oOOo0O0Ooo % Ii1I
self . nonce , self . key_id , self . alg_id , self . auth_len = struct . unpack ( II111I11iI , packet [ : oO000 ] )
if 96 - 96: IiII / Ii1I % OoO0O00 . iIii1I11I1II1
if 30 - 30: I11i - OoO0O00
self . nonce = byte_swap_64 ( self . nonce )
self . auth_len = socket . ntohs ( self . auth_len )
self . proxy_reply_requested = True if ( Iii1 & 0x08000000 ) else False
if 15 - 15: OoooooooOO
self . lisp_sec_present = True if ( Iii1 & 0x04000000 ) else False
self . xtr_id_present = True if ( Iii1 & 0x02000000 ) else False
self . use_ttl_for_timeout = True if ( Iii1 & 0x800 ) else False
self . map_register_refresh = True if ( Iii1 & 0x1000 ) else False
self . merge_register_requested = True if ( Iii1 & 0x400 ) else False
self . mobile_node = True if ( Iii1 & 0x200 ) else False
self . map_notify_requested = True if ( Iii1 & 0x100 ) else False
self . record_count = Iii1 & 0xff
if 31 - 31: II111iiii
if 62 - 62: iIii1I11I1II1 % I1Ii111 % I1ii11iIi11i * IiII
if 87 - 87: IiII
if 45 - 45: oO0o + II111iiii * O0 % OOooOOo . iIii1I11I1II1
self . encrypt_bit = True if Iii1 & 0x2000 else False
if ( self . encrypt_bit ) :
self . encryption_key_id = ( Iii1 >> 14 ) & 0x7
if 55 - 55: IiII
if 43 - 43: OOooOOo
if 17 - 17: i11iIiiIii
if 94 - 94: OoooooooOO - IiII + oO0o . OoooooooOO / i1IIi
if 53 - 53: I1Ii111 % I1ii11iIi11i
if ( self . xtr_id_present ) :
if ( self . decode_xtr_id ( i1iiI11i1 ) == False ) : return ( [ None , None ] )
if 17 - 17: OoooooooOO % Ii1I % O0
if 46 - 46: iII111i + I1Ii111 % OoooooooOO * I1ii11iIi11i
packet = packet [ oO000 : : ]
if 89 - 89: IiII - IiII % iII111i / I11i + oO0o - IiII
if 97 - 97: Ii1I % OoOoOO00 / I1ii11iIi11i / iIii1I11I1II1 * OoooooooOO * OOooOOo
if 80 - 80: oO0o / O0
if 55 - 55: I1IiiI * I11i / O0 % OoOoOO00
if ( self . auth_len != 0 ) :
if ( len ( packet ) < self . auth_len ) : return ( [ None , None ] )
if 71 - 71: i11iIiiIii * OoOoOO00 * OOooOOo + oO0o + Oo0Ooo
if ( self . alg_id not in ( LISP_NONE_ALG_ID , LISP_SHA_1_96_ALG_ID ,
LISP_SHA_256_128_ALG_ID ) ) :
lprint ( "Invalid authentication alg-id: {}" . format ( self . alg_id ) )
return ( [ None , None ] )
if 59 - 59: IiII
if 54 - 54: OOooOOo
o0o0OO0OO = self . auth_len
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
oO000 = struct . calcsize ( "QQI" )
if ( o0o0OO0OO < oO000 ) :
lprint ( "Invalid sha1-96 authentication length" )
return ( [ None , None ] )
if 27 - 27: OoOoOO00 - OoO0O00 + o0oOOo0O0Ooo + ooOoO0o . OoO0O00
OoOo000o , iIIi1IiiiII1i , IIiIii1iiI = struct . unpack ( "QQI" , packet [ : o0o0OO0OO ] )
o0oOOOOOO = b""
elif ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
oO000 = struct . calcsize ( "QQQQ" )
if ( o0o0OO0OO < oO000 ) :
lprint ( "Invalid sha2-256 authentication length" )
return ( [ None , None ] )
if 31 - 31: Oo0Ooo / I1ii11iIi11i - O0 + iII111i - iII111i
OoOo000o , iIIi1IiiiII1i , IIiIii1iiI , o0oOOOOOO = struct . unpack ( "QQQQ" ,
packet [ : o0o0OO0OO ] )
else :
lprint ( "Unsupported authentication alg-id value {}" . format ( self . alg_id ) )
if 85 - 85: OoOoOO00
return ( [ None , None ] )
if 29 - 29: I1IiiI * I1ii11iIi11i + iII111i
self . auth_data = lisp_concat_auth_data ( self . alg_id , OoOo000o , iIIi1IiiiII1i ,
IIiIii1iiI , o0oOOOOOO )
i1iiI11i1 = self . zero_auth ( i1iiI11i1 )
packet = packet [ self . auth_len : : ]
if 11 - 11: o0oOOo0O0Ooo % I1IiiI / Ii1I
return ( [ i1iiI11i1 , packet ] )
if 17 - 17: IiII % OoooooooOO / ooOoO0o * OoooooooOO
if 14 - 14: II111iiii + O0 - iII111i
def encode_xtr_id ( self , packet ) :
II1i1 = self . xtr_id >> 64
ooO0OoOO0 = self . xtr_id & 0xffffffffffffffff
II1i1 = byte_swap_64 ( II1i1 )
ooO0OoOO0 = byte_swap_64 ( ooO0OoOO0 )
o0oo00 = byte_swap_64 ( self . site_id )
packet += struct . pack ( "QQQ" , II1i1 , ooO0OoOO0 , o0oo00 )
return ( packet )
if 92 - 92: i1IIi
if 68 - 68: OoO0O00 % IiII - oO0o - ooOoO0o . Oo0Ooo
def decode_xtr_id ( self , packet ) :
oO000 = struct . calcsize ( "QQQ" )
if ( len ( packet ) < oO000 ) : return ( [ None , None ] )
packet = packet [ len ( packet ) - oO000 : : ]
II1i1 , ooO0OoOO0 , o0oo00 = struct . unpack ( "QQQ" ,
packet [ : oO000 ] )
II1i1 = byte_swap_64 ( II1i1 )
ooO0OoOO0 = byte_swap_64 ( ooO0OoOO0 )
self . xtr_id = ( II1i1 << 64 ) | ooO0OoOO0
self . site_id = byte_swap_64 ( o0oo00 )
return ( True )
if 30 - 30: OoooooooOO % o0oOOo0O0Ooo + ooOoO0o * OoO0O00
if 57 - 57: I11i + iIii1I11I1II1 . OoO0O00 + oO0o
if 4 - 4: Ii1I
if 43 - 43: i1IIi . I1IiiI * iIii1I11I1II1 * i11iIiiIii - OOooOOo + ooOoO0o
if 56 - 56: Oo0Ooo % i11iIiiIii / Ii1I . I1Ii111 . OoO0O00 - OoOoOO00
if 32 - 32: I1Ii111 / oO0o / I1IiiI
if 22 - 22: OoO0O00 - OoOoOO00 . Oo0Ooo + o0oOOo0O0Ooo
if 69 - 69: oO0o - I1IiiI
if 10 - 10: i1IIi / iII111i . II111iiii * i1IIi % OoooooooOO
if 83 - 83: I11i . OOooOOo + I1Ii111 * I11i . I1Ii111 + oO0o
if 64 - 64: Ii1I . o0oOOo0O0Ooo - i1IIi
if 35 - 35: I1ii11iIi11i % OoooooooOO
if 59 - 59: I1IiiI % I11i
if 32 - 32: I1IiiI * O0 + O0
if 34 - 34: IiII
if 5 - 5: OoO0O00 . I1IiiI
if 48 - 48: Oo0Ooo - OoO0O00 . I11i - iIii1I11I1II1 % Ii1I
if 47 - 47: iII111i / OoooooooOO - II111iiii
if 91 - 91: OoOoOO00 + o0oOOo0O0Ooo
if 23 - 23: i1IIi
if 9 - 9: i1IIi % I1Ii111 - OoO0O00 * OoOoOO00 . o0oOOo0O0Ooo
if 18 - 18: Ii1I . OoOoOO00 + iII111i . I1IiiI + OoooooooOO . OoO0O00
if 31 - 31: I1Ii111 - I11i
if 49 - 49: iIii1I11I1II1 - iIii1I11I1II1 - OoOoOO00 + IiII / OoOoOO00
if 74 - 74: OoooooooOO + I1ii11iIi11i % O0
if 32 - 32: I1ii11iIi11i + I1ii11iIi11i
if 89 - 89: ooOoO0o + oO0o + Ii1I - OOooOOo
if 12 - 12: OoOoOO00 - o0oOOo0O0Ooo - I1Ii111 / I11i
if 17 - 17: OoO0O00 - I1Ii111 - II111iiii / I1Ii111 / Ii1I
if 30 - 30: OOooOOo * I1ii11iIi11i % I1ii11iIi11i + iII111i * IiII
if 33 - 33: o0oOOo0O0Ooo + I11i * O0 * OoO0O00 . I1ii11iIi11i
if 74 - 74: iII111i * iII111i * o0oOOo0O0Ooo / oO0o
if 91 - 91: i11iIiiIii . I1ii11iIi11i / II111iiii
class lisp_map_notify ( object ) :
def __init__ ( self , lisp_sockets ) :
self . etr = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . etr_port = 0
self . retransmit_timer = None
self . lisp_sockets = lisp_sockets
self . retry_count = 0
self . record_count = 0
self . alg_id = LISP_NONE_ALG_ID
self . key_id = 0
self . auth_len = 0
self . auth_data = ""
self . nonce = 0
self . nonce_key = ""
self . packet = None
self . site = ""
self . map_notify_ack = False
self . eid_records = ""
self . eid_list = [ ]
if 97 - 97: Ii1I % i1IIi % IiII + Oo0Ooo - O0 - I11i
if 64 - 64: Ii1I - iII111i
def print_notify ( self ) :
Ooooo0OO = binascii . hexlify ( self . auth_data )
if ( self . alg_id == LISP_SHA_1_96_ALG_ID and len ( Ooooo0OO ) != 40 ) :
Ooooo0OO = self . auth_data
elif ( self . alg_id == LISP_SHA_256_128_ALG_ID and len ( Ooooo0OO ) != 64 ) :
Ooooo0OO = self . auth_data
if 12 - 12: i1IIi
IiiiI1 = ( "{} -> record-count: {}, nonce: 0x{}, key/alg-id: " +
"{}{}{}, auth-len: {}, auth-data: {}" )
lprint ( IiiiI1 . format ( bold ( "Map-Notify-Ack" , False ) if self . map_notify_ack else bold ( "Map-Notify" , False ) ,
# o0oOOo0O0Ooo + I1Ii111 / IiII - Ii1I . IiII - iII111i
self . record_count , lisp_hex_string ( self . nonce ) , self . key_id ,
self . alg_id , " (sha1)" if ( self . key_id == LISP_SHA_1_96_ALG_ID ) else ( " (sha2)" if ( self . key_id == LISP_SHA_256_128_ALG_ID ) else "" ) , self . auth_len , Ooooo0OO ) )
if 58 - 58: OoO0O00 * Oo0Ooo - IiII . I1ii11iIi11i * Ii1I / O0
if 83 - 83: Ii1I - Ii1I
if 47 - 47: OOooOOo % OOooOOo / I11i . i1IIi . I1ii11iIi11i
if 2 - 2: IiII - I1IiiI * I1IiiI - I11i . O0 . o0oOOo0O0Ooo
def zero_auth ( self , packet ) :
if ( self . alg_id == LISP_NONE_ALG_ID ) : return ( packet )
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
Ooooo0OO = struct . pack ( "QQI" , 0 , 0 , 0 )
if 83 - 83: I11i * I1IiiI . II111iiii * i1IIi % O0
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
Ooooo0OO = struct . pack ( "QQQQ" , 0 , 0 , 0 , 0 )
if 35 - 35: OoOoOO00 % OoO0O00 + O0 * o0oOOo0O0Ooo % I1ii11iIi11i
packet += Ooooo0OO
return ( packet )
if 57 - 57: oO0o / I11i
if 63 - 63: ooOoO0o * OoO0O00 * ooOoO0o + OoOoOO00
def encode ( self , eid_records , password ) :
if ( self . map_notify_ack ) :
Iii1 = ( LISP_MAP_NOTIFY_ACK << 28 ) | self . record_count
else :
Iii1 = ( LISP_MAP_NOTIFY << 28 ) | self . record_count
if 25 - 25: iII111i * OoOoOO00 / I1IiiI / IiII
Oo00oo = struct . pack ( "I" , socket . htonl ( Iii1 ) )
Oo00oo += struct . pack ( "QBBH" , self . nonce , self . key_id , self . alg_id ,
socket . htons ( self . auth_len ) )
if 11 - 11: OOooOOo + i11iIiiIii
if ( self . alg_id == LISP_NONE_ALG_ID ) :
self . packet = Oo00oo + eid_records
return ( self . packet )
if 14 - 14: OoOoOO00 / IiII + OoO0O00 - Ii1I
if 38 - 38: I1Ii111
if 30 - 30: II111iiii + I11i . i11iIiiIii + iIii1I11I1II1
if 100 - 100: oO0o * o0oOOo0O0Ooo / iII111i
if 92 - 92: ooOoO0o / i11iIiiIii * OOooOOo
Oo00oo = self . zero_auth ( Oo00oo )
Oo00oo += eid_records
if 55 - 55: ooOoO0o
oOOo0O0Oo = lisp_hash_me ( Oo00oo , self . alg_id , password , False )
if 1 - 1: OoO0O00
oo00 = struct . calcsize ( "I" ) + struct . calcsize ( "QHH" )
o0o0OO0OO = self . auth_len
self . auth_data = oOOo0O0Oo
Oo00oo = Oo00oo [ 0 : oo00 ] + oOOo0O0Oo + Oo00oo [ oo00 + o0o0OO0OO : : ]
self . packet = Oo00oo
return ( Oo00oo )
if 43 - 43: iIii1I11I1II1 - OOooOOo - o0oOOo0O0Ooo + I1ii11iIi11i - I1Ii111 % I1ii11iIi11i
if 58 - 58: OoOoOO00
def decode ( self , packet ) :
i1iiI11i1 = packet
II111I11iI = "I"
oO000 = struct . calcsize ( II111I11iI )
if ( len ( packet ) < oO000 ) : return ( None )
if 27 - 27: IiII * OOooOOo - OoooooooOO . Ii1I - II111iiii
Iii1 = struct . unpack ( II111I11iI , packet [ : oO000 ] )
Iii1 = socket . ntohl ( Iii1 [ 0 ] )
self . map_notify_ack = ( ( Iii1 >> 28 ) == LISP_MAP_NOTIFY_ACK )
self . record_count = Iii1 & 0xff
packet = packet [ oO000 : : ]
if 62 - 62: I1IiiI / iIii1I11I1II1 * I11i
II111I11iI = "QBBH"
oO000 = struct . calcsize ( II111I11iI )
if ( len ( packet ) < oO000 ) : return ( None )
if 84 - 84: IiII - OoOoOO00 . IiII + ooOoO0o . iII111i
self . nonce , self . key_id , self . alg_id , self . auth_len = struct . unpack ( II111I11iI , packet [ : oO000 ] )
if 96 - 96: Ii1I % iII111i * Ii1I % I1IiiI . o0oOOo0O0Ooo / o0oOOo0O0Ooo
self . nonce_key = lisp_hex_string ( self . nonce )
self . auth_len = socket . ntohs ( self . auth_len )
packet = packet [ oO000 : : ]
self . eid_records = packet [ self . auth_len : : ]
if 7 - 7: OoO0O00 - ooOoO0o % i1IIi
if ( self . auth_len == 0 ) : return ( self . eid_records )
if 24 - 24: OoO0O00 % O0 % I11i
if 61 - 61: ooOoO0o . iII111i / ooOoO0o * OoooooooOO
if 13 - 13: II111iiii
if 17 - 17: II111iiii
if ( len ( packet ) < self . auth_len ) : return ( None )
if 66 - 66: IiII * oO0o
o0o0OO0OO = self . auth_len
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
OoOo000o , iIIi1IiiiII1i , IIiIii1iiI = struct . unpack ( "QQI" , packet [ : o0o0OO0OO ] )
o0oOOOOOO = ""
if 73 - 73: i11iIiiIii + O0 % O0
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
OoOo000o , iIIi1IiiiII1i , IIiIii1iiI , o0oOOOOOO = struct . unpack ( "QQQQ" ,
packet [ : o0o0OO0OO ] )
if 70 - 70: II111iiii * OoooooooOO - Ii1I + oO0o * O0
self . auth_data = lisp_concat_auth_data ( self . alg_id , OoOo000o , iIIi1IiiiII1i ,
IIiIii1iiI , o0oOOOOOO )
if 49 - 49: oO0o . Ii1I . OoOoOO00 - I1ii11iIi11i
oO000 = struct . calcsize ( "I" ) + struct . calcsize ( "QHH" )
packet = self . zero_auth ( i1iiI11i1 [ : oO000 ] )
oO000 += o0o0OO0OO
packet += i1iiI11i1 [ oO000 : : ]
return ( packet )
if 74 - 74: ooOoO0o % I1ii11iIi11i * i1IIi
if 18 - 18: OoOoOO00
if 30 - 30: II111iiii
if 27 - 27: i1IIi - iIii1I11I1II1 + O0 % Oo0Ooo / OOooOOo + i1IIi
if 48 - 48: Oo0Ooo
if 70 - 70: OoooooooOO * i11iIiiIii
if 60 - 60: IiII / iIii1I11I1II1 + OoooooooOO - I1ii11iIi11i * i11iIiiIii
if 47 - 47: O0 . I1IiiI / ooOoO0o % i11iIiiIii
if 47 - 47: Ii1I . OoOoOO00 . iIii1I11I1II1 . o0oOOo0O0Ooo
if 39 - 39: o0oOOo0O0Ooo
if 89 - 89: OoooooooOO + iII111i . I1Ii111 / Ii1I
if 75 - 75: iIii1I11I1II1 * iII111i / OoOoOO00 * II111iiii . i1IIi
if 6 - 6: Ii1I % Ii1I / OoooooooOO * oO0o . I1IiiI . i1IIi
if 59 - 59: I11i . I11i * I1IiiI - Ii1I % OoOoOO00
if 19 - 19: OoooooooOO / Oo0Ooo - I1Ii111 . OoOoOO00
if 8 - 8: I11i % ooOoO0o . iIii1I11I1II1
if 95 - 95: o0oOOo0O0Ooo + i11iIiiIii . I1ii11iIi11i . ooOoO0o . o0oOOo0O0Ooo
if 93 - 93: iII111i
if 55 - 55: II111iiii % o0oOOo0O0Ooo - OoO0O00
if 48 - 48: ooOoO0o * iIii1I11I1II1 % OoOoOO00
if 100 - 100: II111iiii - i11iIiiIii + OoO0O00 % ooOoO0o - iIii1I11I1II1 * i11iIiiIii
if 30 - 30: OoO0O00 . OoO0O00 . Ii1I % Ii1I * i1IIi * oO0o
if 74 - 74: OoooooooOO
if 33 - 33: o0oOOo0O0Ooo - II111iiii
if 95 - 95: OoooooooOO
if 23 - 23: II111iiii + I11i / O0 . I11i . I1Ii111 + iIii1I11I1II1
if 2 - 2: i1IIi . O0 / o0oOOo0O0Ooo . II111iiii / OoO0O00 % i1IIi
if 12 - 12: o0oOOo0O0Ooo
if 58 - 58: iIii1I11I1II1 * Ii1I . ooOoO0o . Oo0Ooo * Ii1I
if 63 - 63: OoOoOO00 . I11i * o0oOOo0O0Ooo - I11i % I11i
if 62 - 62: I11i - ooOoO0o / ooOoO0o
if 95 - 95: OoOoOO00 - i1IIi / I1Ii111 . ooOoO0o % OOooOOo - i1IIi
if 12 - 12: iII111i
if 96 - 96: O0
if 89 - 89: I1ii11iIi11i - Oo0Ooo
if 26 - 26: ooOoO0o % ooOoO0o / II111iiii / iII111i
if 2 - 2: i1IIi / i11iIiiIii + I1IiiI
if 95 - 95: I1ii11iIi11i / IiII % iIii1I11I1II1 + O0
if 6 - 6: IiII
if 73 - 73: o0oOOo0O0Ooo % o0oOOo0O0Ooo . OOooOOo * I1ii11iIi11i - Ii1I
if 97 - 97: IiII
if 15 - 15: O0 - I1IiiI / i1IIi . I1Ii111
if 64 - 64: ooOoO0o / i1IIi
if 100 - 100: II111iiii
if 16 - 16: Ii1I
if 96 - 96: o0oOOo0O0Ooo / I1Ii111 % Ii1I - ooOoO0o
if 35 - 35: OOooOOo
if 90 - 90: i11iIiiIii
if 47 - 47: OoO0O00 . i11iIiiIii
if 9 - 9: OoOoOO00 - I11i . OoooooooOO % ooOoO0o
class lisp_map_request ( object ) :
def __init__ ( self ) :
self . auth_bit = False
self . map_data_present = False
self . rloc_probe = False
self . smr_bit = False
self . pitr_bit = False
self . smr_invoked_bit = False
self . mobile_node = False
self . xtr_id_present = False
self . local_xtr = False
self . dont_reply_bit = False
self . itr_rloc_count = 0
self . record_count = 0
self . nonce = 0
self . signature_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . target_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . target_group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . itr_rlocs = [ ]
self . keys = None
self . privkey_filename = None
self . map_request_signature = None
self . subscribe_bit = False
self . xtr_id = None
self . json_telemetry = None
if 13 - 13: OoO0O00 * iIii1I11I1II1 + II111iiii - Oo0Ooo - OoOoOO00
if 43 - 43: iII111i / I1Ii111 * I1IiiI % ooOoO0o % I1IiiI
def print_prefix ( self ) :
if ( self . target_group . is_null ( ) ) :
return ( green ( self . target_eid . print_prefix ( ) , False ) )
if 18 - 18: OoO0O00
return ( green ( self . target_eid . print_sg ( self . target_group ) , False ) )
if 99 - 99: iII111i / oO0o . i11iIiiIii / I11i + i1IIi - I11i
if 50 - 50: i1IIi
def print_map_request ( self ) :
Iiooo000o0OoOo = ""
if ( self . xtr_id != None and self . subscribe_bit ) :
Iiooo000o0OoOo = "subscribe, xtr-id: 0x{}, " . format ( lisp_hex_string ( self . xtr_id ) )
if 56 - 56: OoO0O00 + I1Ii111 / Ii1I
if 75 - 75: OoOoOO00
if 96 - 96: o0oOOo0O0Ooo * I11i * Oo0Ooo
IiiiI1 = ( "{} -> flags: {}{}{}{}{}{}{}{}{}{}, itr-rloc-" +
"count: {} (+1), record-count: {}, nonce: 0x{}, source-eid: " +
"afi {}, {}{}, target-eid: afi {}, {}, {}ITR-RLOCs:" )
if 36 - 36: OoooooooOO + ooOoO0o . oO0o * ooOoO0o + IiII
lprint ( IiiiI1 . format ( bold ( "Map-Request" , False ) , "A" if self . auth_bit else "a" ,
# II111iiii * OoOoOO00 - iII111i
"D" if self . map_data_present else "d" ,
"R" if self . rloc_probe else "r" ,
"S" if self . smr_bit else "s" ,
"P" if self . pitr_bit else "p" ,
"I" if self . smr_invoked_bit else "i" ,
"M" if self . mobile_node else "m" ,
"X" if self . xtr_id_present else "x" ,
"L" if self . local_xtr else "l" ,
"D" if self . dont_reply_bit else "d" , self . itr_rloc_count ,
self . record_count , lisp_hex_string ( self . nonce ) ,
self . source_eid . afi , green ( self . source_eid . print_address ( ) , False ) ,
" (with sig)" if self . map_request_signature != None else "" ,
self . target_eid . afi , green ( self . print_prefix ( ) , False ) , Iiooo000o0OoOo ) )
if 67 - 67: Oo0Ooo - ooOoO0o . o0oOOo0O0Ooo . o0oOOo0O0Ooo
iI1iiiiiii = self . keys
for ii1oO0Oo in self . itr_rlocs :
if ( ii1oO0Oo . afi == LISP_AFI_LCAF and self . json_telemetry != None ) :
continue
if 12 - 12: Ii1I
iIIIi1Iii1 = red ( ii1oO0Oo . print_address_no_iid ( ) , False )
lprint ( " itr-rloc: afi {} {}{}" . format ( ii1oO0Oo . afi , iIIIi1Iii1 ,
"" if ( iI1iiiiiii == None ) else ", " + iI1iiiiiii [ 1 ] . print_keys ( ) ) )
iI1iiiiiii = None
if 77 - 77: I11i
if ( self . json_telemetry != None ) :
lprint ( " itr-rloc: afi {} telemetry: {}" . format ( LISP_AFI_LCAF ,
self . json_telemetry ) )
if 50 - 50: o0oOOo0O0Ooo - OoOoOO00
if 1 - 1: i1IIi / Ii1I % IiII - I11i % o0oOOo0O0Ooo
if 28 - 28: ooOoO0o - IiII + iII111i . ooOoO0o % OoooooooOO
def sign_map_request ( self , privkey ) :
IIi1i = self . signature_eid . print_address ( )
oo0Oo0 = self . source_eid . print_address ( )
i1I1ii1iI1 = self . target_eid . print_address ( )
OoI1Ii = lisp_hex_string ( self . nonce ) + oo0Oo0 + i1I1ii1iI1
self . map_request_signature = privkey . sign ( OoI1Ii . encode ( ) )
IIIII1iII1 = binascii . b2a_base64 ( self . map_request_signature )
IIIII1iII1 = { "source-eid" : oo0Oo0 , "signature-eid" : IIi1i ,
"signature" : IIIII1iII1 . decode ( ) }
return ( json . dumps ( IIIII1iII1 ) )
if 77 - 77: oO0o % O0 % O0 - iII111i - iII111i - I1IiiI
if 37 - 37: iIii1I11I1II1
def verify_map_request_sig ( self , pubkey ) :
iI1i = green ( self . signature_eid . print_address ( ) , False )
if ( pubkey == None ) :
lprint ( "Public-key not found for signature-EID {}" . format ( iI1i ) )
return ( False )
if 93 - 93: iII111i % i11iIiiIii - OoOoOO00 . Ii1I
if 72 - 72: iIii1I11I1II1 * OOooOOo . iIii1I11I1II1
oo0Oo0 = self . source_eid . print_address ( )
i1I1ii1iI1 = self . target_eid . print_address ( )
OoI1Ii = lisp_hex_string ( self . nonce ) + oo0Oo0 + i1I1ii1iI1
pubkey = binascii . a2b_base64 ( pubkey )
if 62 - 62: IiII . IiII % ooOoO0o - OoOoOO00 / OoooooooOO . I1IiiI
i11i1I1 = True
try :
Ooo00o000o = ecdsa . VerifyingKey . from_pem ( pubkey )
except :
lprint ( "Invalid public-key in mapping system for sig-eid {}" . format ( self . signature_eid . print_address_no_iid ( ) ) )
if 72 - 72: IiII + i11iIiiIii - OOooOOo
i11i1I1 = False
if 67 - 67: iIii1I11I1II1 % IiII
if 97 - 97: iII111i
if ( i11i1I1 ) :
try :
OoI1Ii = OoI1Ii . encode ( )
i11i1I1 = Ooo00o000o . verify ( self . map_request_signature , OoI1Ii )
except :
i11i1I1 = False
if 40 - 40: ooOoO0o
if 61 - 61: iII111i - OOooOOo / iII111i . Oo0Ooo % OoO0O00
if 70 - 70: I1Ii111 * Oo0Ooo
Oo0 = bold ( "passed" if i11i1I1 else "failed" , False )
lprint ( "Signature verification {} for EID {}" . format ( Oo0 , iI1i ) )
return ( i11i1I1 )
if 84 - 84: Oo0Ooo % I1Ii111 . Oo0Ooo / ooOoO0o * Ii1I - IiII
if 16 - 16: OOooOOo % IiII - II111iiii - o0oOOo0O0Ooo * i11iIiiIii / I1Ii111
def encode_json ( self , json_string ) :
IIiiIIi1II11 = LISP_LCAF_JSON_TYPE
O0oooOoOO0O = socket . htons ( LISP_AFI_LCAF )
ii111iIii1 = socket . htons ( len ( json_string ) + 4 )
oo0O0OO = socket . htons ( len ( json_string ) )
Oo00oo = struct . pack ( "HBBBBHH" , O0oooOoOO0O , 0 , 0 , IIiiIIi1II11 , 0 , ii111iIii1 ,
oo0O0OO )
Oo00oo += json_string . encode ( )
Oo00oo += struct . pack ( "H" , 0 )
return ( Oo00oo )
if 96 - 96: I1IiiI / I11i
if 92 - 92: o0oOOo0O0Ooo
def encode ( self , probe_dest , probe_port ) :
Iii1 = ( LISP_MAP_REQUEST << 28 ) | self . record_count
if 8 - 8: iII111i + I1ii11iIi11i . Ii1I
ii1I11 = lisp_telemetry_configured ( ) if ( self . rloc_probe ) else None
if ( ii1I11 != None ) : self . itr_rloc_count += 1
Iii1 = Iii1 | ( self . itr_rloc_count << 8 )
if 47 - 47: OoooooooOO + Ii1I
if ( self . auth_bit ) : Iii1 |= 0x08000000
if ( self . map_data_present ) : Iii1 |= 0x04000000
if ( self . rloc_probe ) : Iii1 |= 0x02000000
if ( self . smr_bit ) : Iii1 |= 0x01000000
if ( self . pitr_bit ) : Iii1 |= 0x00800000
if ( self . smr_invoked_bit ) : Iii1 |= 0x00400000
if ( self . mobile_node ) : Iii1 |= 0x00200000
if ( self . xtr_id_present ) : Iii1 |= 0x00100000
if ( self . local_xtr ) : Iii1 |= 0x00004000
if ( self . dont_reply_bit ) : Iii1 |= 0x00002000
if 44 - 44: Ii1I * OoOoOO00 + Oo0Ooo . i11iIiiIii + i1IIi
Oo00oo = struct . pack ( "I" , socket . htonl ( Iii1 ) )
Oo00oo += struct . pack ( "Q" , self . nonce )
if 83 - 83: iII111i + OoOoOO00 % ooOoO0o
if 76 - 76: i1IIi % I1IiiI + i1IIi
if 2 - 2: iII111i + iII111i
if 51 - 51: OoooooooOO + i11iIiiIii
if 57 - 57: Oo0Ooo % o0oOOo0O0Ooo
if 99 - 99: o0oOOo0O0Ooo / i11iIiiIii / II111iiii + OOooOOo . i1IIi + OoOoOO00
II11 = False
iIiOOO0oo0OO0o0 = self . privkey_filename
if ( iIiOOO0oo0OO0o0 != None and os . path . exists ( iIiOOO0oo0OO0o0 ) ) :
OOoO0 = open ( iIiOOO0oo0OO0o0 , "r" ) ; Ooo00o000o = OOoO0 . read ( ) ; OOoO0 . close ( )
try :
Ooo00o000o = ecdsa . SigningKey . from_pem ( Ooo00o000o )
except :
return ( None )
if 78 - 78: I11i - I1IiiI * IiII
iio0O0OOo = self . sign_map_request ( Ooo00o000o )
II11 = True
elif ( self . map_request_signature != None ) :
IIIII1iII1 = binascii . b2a_base64 ( self . map_request_signature )
iio0O0OOo = { "source-eid" : self . source_eid . print_address ( ) ,
"signature-eid" : self . signature_eid . print_address ( ) ,
"signature" : IIIII1iII1 }
iio0O0OOo = json . dumps ( iio0O0OOo )
II11 = True
if 44 - 44: I11i
if ( II11 ) :
Oo00oo += self . encode_json ( iio0O0OOo )
else :
if ( self . source_eid . instance_id != 0 ) :
Oo00oo += struct . pack ( "H" , socket . htons ( LISP_AFI_LCAF ) )
Oo00oo += self . source_eid . lcaf_encode_iid ( )
else :
Oo00oo += struct . pack ( "H" , socket . htons ( self . source_eid . afi ) )
Oo00oo += self . source_eid . pack_address ( )
if 3 - 3: iIii1I11I1II1 - i1IIi / iII111i + i1IIi + O0
if 18 - 18: iIii1I11I1II1 . iII111i % OOooOOo % oO0o + iIii1I11I1II1 * OoooooooOO
if 78 - 78: IiII
if 38 - 38: OoO0O00 * I1ii11iIi11i
if 4 - 4: OoO0O00 . I1ii11iIi11i
if 21 - 21: i11iIiiIii / OoO0O00 / I1ii11iIi11i * O0 - II111iiii * OOooOOo
if 27 - 27: o0oOOo0O0Ooo . OoOoOO00 * Ii1I * iII111i * O0
if ( probe_dest ) :
if ( probe_port == 0 ) : probe_port = LISP_DATA_PORT
O0O0 = probe_dest . print_address_no_iid ( ) + ":" + str ( probe_port )
if 93 - 93: IiII % I1Ii111 % II111iiii
if ( O0O0 in lisp_crypto_keys_by_rloc_encap ) :
self . keys = lisp_crypto_keys_by_rloc_encap [ O0O0 ]
if 20 - 20: OoooooooOO * I1Ii111
if 38 - 38: iII111i . OoooooooOO
if 28 - 28: I1Ii111 * i1IIi . I1ii11iIi11i
if 75 - 75: O0 / oO0o * ooOoO0o - OOooOOo / i1IIi
if 61 - 61: I11i
if 100 - 100: O0 - iIii1I11I1II1 * Oo0Ooo
if 35 - 35: ooOoO0o
for ii1oO0Oo in self . itr_rlocs :
if ( lisp_data_plane_security and self . itr_rlocs . index ( ii1oO0Oo ) == 0 ) :
if ( self . keys == None or self . keys [ 1 ] == None ) :
iI1iiiiiii = lisp_keys ( 1 )
self . keys = [ None , iI1iiiiiii , None , None ]
if 57 - 57: OoO0O00 . Oo0Ooo + I1IiiI
iI1iiiiiii = self . keys [ 1 ]
iI1iiiiiii . add_key_by_nonce ( self . nonce )
Oo00oo += iI1iiiiiii . encode_lcaf ( ii1oO0Oo )
else :
Oo00oo += struct . pack ( "H" , socket . htons ( ii1oO0Oo . afi ) )
Oo00oo += ii1oO0Oo . pack_address ( )
if 18 - 18: I1IiiI - I1ii11iIi11i * I11i / i11iIiiIii - o0oOOo0O0Ooo % o0oOOo0O0Ooo
if 31 - 31: I11i
if 100 - 100: i11iIiiIii * i11iIiiIii . iIii1I11I1II1 % iII111i * I1ii11iIi11i
if 17 - 17: Ii1I * IiII * i11iIiiIii / I1ii11iIi11i / i11iIiiIii
if 23 - 23: OoooooooOO + i11iIiiIii / Oo0Ooo / iII111i . iII111i * I1IiiI
if 98 - 98: IiII
if ( ii1I11 != None ) :
Oo0OO0000oooo = str ( time . time ( ) )
ii1I11 = lisp_encode_telemetry ( ii1I11 , io = Oo0OO0000oooo )
self . json_telemetry = ii1I11
Oo00oo += self . encode_json ( ii1I11 )
if 23 - 23: I11i / i1IIi * OoO0O00
if 51 - 51: OOooOOo - OoooooooOO / OoooooooOO % OoooooooOO
oOo = 0 if self . target_eid . is_binary ( ) == False else self . target_eid . mask_len
if 54 - 54: o0oOOo0O0Ooo % iIii1I11I1II1 - iII111i
if 79 - 79: IiII . Ii1I . Oo0Ooo % oO0o * oO0o
oOoO = 0
if ( self . subscribe_bit ) :
oOoO = 0x80
self . xtr_id_present = True
if ( self . xtr_id == None ) :
self . xtr_id = random . randint ( 0 , ( 2 ** 128 ) - 1 )
if 31 - 31: I1Ii111 . I1ii11iIi11i + IiII
if 65 - 65: I1IiiI * O0 * Oo0Ooo . O0
if 23 - 23: OoO0O00 / IiII * II111iiii
II111I11iI = "BB"
Oo00oo += struct . pack ( II111I11iI , oOoO , oOo )
if 32 - 32: I1Ii111 - iIii1I11I1II1 / I11i * OoO0O00 * OoO0O00
if ( self . target_group . is_null ( ) == False ) :
Oo00oo += struct . pack ( "H" , socket . htons ( LISP_AFI_LCAF ) )
Oo00oo += self . target_eid . lcaf_encode_sg ( self . target_group )
elif ( self . target_eid . instance_id != 0 or
self . target_eid . is_geo_prefix ( ) ) :
Oo00oo += struct . pack ( "H" , socket . htons ( LISP_AFI_LCAF ) )
Oo00oo += self . target_eid . lcaf_encode_iid ( )
else :
Oo00oo += struct . pack ( "H" , socket . htons ( self . target_eid . afi ) )
Oo00oo += self . target_eid . pack_address ( )
if 77 - 77: I1ii11iIi11i
if 16 - 16: II111iiii - II111iiii * I11i / OOooOOo . IiII
if 36 - 36: I11i / iIii1I11I1II1
if 59 - 59: i1IIi
if 85 - 85: I1Ii111 + iIii1I11I1II1 + ooOoO0o + Oo0Ooo
if ( self . subscribe_bit ) : Oo00oo = self . encode_xtr_id ( Oo00oo )
return ( Oo00oo )
if 75 - 75: O0 . I11i - Ii1I / I1Ii111 / I1ii11iIi11i % I11i
if 97 - 97: OoOoOO00 - OoO0O00
def lcaf_decode_json ( self , packet ) :
II111I11iI = "BBBBHH"
oO000 = struct . calcsize ( II111I11iI )
if ( len ( packet ) < oO000 ) : return ( None )
if 64 - 64: i1IIi / OoooooooOO / I1ii11iIi11i - Oo0Ooo + oO0o
iI1i1II11I , OoO0o0oOOoOoo , IIiiIIi1II11 , I1iIiiiI1II1 , ii111iIii1 , oo0O0OO = struct . unpack ( II111I11iI , packet [ : oO000 ] )
if 94 - 94: ooOoO0o % I1ii11iIi11i + OoooooooOO
if 77 - 77: O0 - Ii1I * II111iiii / I1ii11iIi11i / Ii1I - oO0o
if ( IIiiIIi1II11 != LISP_LCAF_JSON_TYPE ) : return ( packet )
if 66 - 66: OoO0O00 % Oo0Ooo . II111iiii
if 84 - 84: ooOoO0o * OoooooooOO + O0
if 84 - 84: i1IIi . I11i . i1IIi . Oo0Ooo
if 21 - 21: II111iiii . O0 + Oo0Ooo - i11iIiiIii
ii111iIii1 = socket . ntohs ( ii111iIii1 )
oo0O0OO = socket . ntohs ( oo0O0OO )
packet = packet [ oO000 : : ]
if ( len ( packet ) < ii111iIii1 ) : return ( None )
if ( ii111iIii1 != oo0O0OO + 4 ) : return ( None )
if 5 - 5: iIii1I11I1II1 * i11iIiiIii + OoO0O00 + I11i * O0 % ooOoO0o
if 88 - 88: o0oOOo0O0Ooo / i11iIiiIii * I1ii11iIi11i
if 23 - 23: O0 / iII111i
if 66 - 66: i1IIi % OoooooooOO * i11iIiiIii + oO0o * O0 / OoO0O00
iio0O0OOo = packet [ 0 : oo0O0OO ]
packet = packet [ oo0O0OO : : ]
if 14 - 14: I1IiiI . IiII
if 29 - 29: OoooooooOO / IiII + OoOoOO00 - I1Ii111 + IiII . i1IIi
if 26 - 26: i11iIiiIii - II111iiii
if 43 - 43: I1IiiI
if ( lisp_is_json_telemetry ( iio0O0OOo ) != None ) :
self . json_telemetry = iio0O0OOo
if 35 - 35: ooOoO0o + OoOoOO00 * OoooooooOO - II111iiii
if 19 - 19: i1IIi / Ii1I / OoOoOO00 . I1IiiI / Ii1I % o0oOOo0O0Ooo
if 39 - 39: ooOoO0o - OoooooooOO
if 88 - 88: i1IIi + iIii1I11I1II1 * i11iIiiIii - OoooooooOO % o0oOOo0O0Ooo
if 74 - 74: ooOoO0o - i11iIiiIii
II111I11iI = "H"
oO000 = struct . calcsize ( II111I11iI )
Oooo000 = struct . unpack ( II111I11iI , packet [ : oO000 ] ) [ 0 ]
packet = packet [ oO000 : : ]
if ( Oooo000 != 0 ) : return ( packet )
if 34 - 34: IiII + I1Ii111 + Oo0Ooo / II111iiii
if ( self . json_telemetry != None ) : return ( packet )
if 33 - 33: Ii1I . i1IIi - II111iiii - OoO0O00
if 31 - 31: I11i - OoOoOO00 / o0oOOo0O0Ooo * OoOoOO00 / Oo0Ooo + o0oOOo0O0Ooo
if 46 - 46: IiII * OoO0O00 / OOooOOo + Oo0Ooo
if 24 - 24: ooOoO0o % OOooOOo . O0 * Oo0Ooo
try :
iio0O0OOo = json . loads ( iio0O0OOo )
except :
return ( None )
if 52 - 52: O0 . I1Ii111 + iII111i / i11iIiiIii
if 52 - 52: oO0o % Oo0Ooo * II111iiii
if 24 - 24: i11iIiiIii * i1IIi * i1IIi
if 27 - 27: i1IIi - oO0o + OOooOOo
if 3 - 3: IiII % I1Ii111 . OoooooooOO
if ( "source-eid" not in iio0O0OOo ) : return ( packet )
i1I1I1IIIi11 = iio0O0OOo [ "source-eid" ]
Oooo000 = LISP_AFI_IPV4 if i1I1I1IIIi11 . count ( "." ) == 3 else LISP_AFI_IPV6 if i1I1I1IIIi11 . count ( ":" ) == 7 else None
if 81 - 81: OoOoOO00
if ( Oooo000 == None ) :
lprint ( "Bad JSON 'source-eid' value: {}" . format ( i1I1I1IIIi11 ) )
return ( None )
if 21 - 21: iII111i / OOooOOo % IiII
if 51 - 51: I11i + ooOoO0o / I1IiiI
self . source_eid . afi = Oooo000
self . source_eid . store_address ( i1I1I1IIIi11 )
if 3 - 3: iIii1I11I1II1 / OOooOOo % oO0o . Ii1I - Ii1I
if ( "signature-eid" not in iio0O0OOo ) : return ( packet )
i1I1I1IIIi11 = iio0O0OOo [ "signature-eid" ]
if ( i1I1I1IIIi11 . count ( ":" ) != 7 ) :
lprint ( "Bad JSON 'signature-eid' value: {}" . format ( i1I1I1IIIi11 ) )
return ( None )
if 55 - 55: i11iIiiIii % OoooooooOO + O0
if 7 - 7: ooOoO0o - i11iIiiIii * iII111i / Ii1I - o0oOOo0O0Ooo
self . signature_eid . afi = LISP_AFI_IPV6
self . signature_eid . store_address ( i1I1I1IIIi11 )
if 62 - 62: o0oOOo0O0Ooo - iIii1I11I1II1 . I11i . Ii1I * Ii1I
if ( "signature" not in iio0O0OOo ) : return ( packet )
IIIII1iII1 = binascii . a2b_base64 ( iio0O0OOo [ "signature" ] )
self . map_request_signature = IIIII1iII1
return ( packet )
if 24 - 24: I11i
if 93 - 93: I1IiiI % OoO0O00 / i11iIiiIii / I11i
def decode ( self , packet , source , port ) :
II111I11iI = "I"
oO000 = struct . calcsize ( II111I11iI )
if ( len ( packet ) < oO000 ) : return ( None )
if 60 - 60: ooOoO0o - Ii1I . I1IiiI * oO0o * i11iIiiIii
Iii1 = struct . unpack ( II111I11iI , packet [ : oO000 ] )
Iii1 = Iii1 [ 0 ]
packet = packet [ oO000 : : ]
if 29 - 29: OoO0O00 - Oo0Ooo . oO0o / OoO0O00 % i11iIiiIii
II111I11iI = "Q"
oO000 = struct . calcsize ( II111I11iI )
if ( len ( packet ) < oO000 ) : return ( None )
if 26 - 26: ooOoO0o . I1Ii111 / II111iiii % Ii1I
oOooo0oOOOO = struct . unpack ( II111I11iI , packet [ : oO000 ] )
packet = packet [ oO000 : : ]
if 82 - 82: OOooOOo % O0 % iIii1I11I1II1 % IiII + i11iIiiIii
Iii1 = socket . ntohl ( Iii1 )
self . auth_bit = True if ( Iii1 & 0x08000000 ) else False
self . map_data_present = True if ( Iii1 & 0x04000000 ) else False
self . rloc_probe = True if ( Iii1 & 0x02000000 ) else False
self . smr_bit = True if ( Iii1 & 0x01000000 ) else False
self . pitr_bit = True if ( Iii1 & 0x00800000 ) else False
self . smr_invoked_bit = True if ( Iii1 & 0x00400000 ) else False
self . mobile_node = True if ( Iii1 & 0x00200000 ) else False
self . xtr_id_present = True if ( Iii1 & 0x00100000 ) else False
self . local_xtr = True if ( Iii1 & 0x00004000 ) else False
self . dont_reply_bit = True if ( Iii1 & 0x00002000 ) else False
self . itr_rloc_count = ( ( Iii1 >> 8 ) & 0x1f )
self . record_count = Iii1 & 0xff
self . nonce = oOooo0oOOOO [ 0 ]
if 64 - 64: i1IIi / IiII . IiII - I1Ii111 % OOooOOo . II111iiii
if 78 - 78: I1Ii111 - O0 - I1Ii111 . iIii1I11I1II1 % I1ii11iIi11i . OoooooooOO
if 64 - 64: IiII
if 21 - 21: o0oOOo0O0Ooo - ooOoO0o * OoooooooOO . OoooooooOO
if ( self . xtr_id_present ) :
if ( self . decode_xtr_id ( packet ) == False ) : return ( None )
if 17 - 17: OOooOOo - iII111i % I1IiiI * OOooOOo * iIii1I11I1II1 . o0oOOo0O0Ooo
if 58 - 58: oO0o - II111iiii + O0
oO000 = struct . calcsize ( "H" )
if ( len ( packet ) < oO000 ) : return ( None )
if 54 - 54: iIii1I11I1II1 - IiII - IiII
Oooo000 = struct . unpack ( "H" , packet [ : oO000 ] )
self . source_eid . afi = socket . ntohs ( Oooo000 [ 0 ] )
packet = packet [ oO000 : : ]
if 18 - 18: i11iIiiIii + iIii1I11I1II1 . i11iIiiIii
if ( self . source_eid . afi == LISP_AFI_LCAF ) :
o00O00oo0 = packet
packet = self . source_eid . lcaf_decode_iid ( packet )
if ( packet == None ) :
packet = self . lcaf_decode_json ( o00O00oo0 )
if ( packet == None ) : return ( None )
if 39 - 39: I1IiiI - iII111i - i11iIiiIii + OoooooooOO
elif ( self . source_eid . afi != LISP_AFI_NONE ) :
packet = self . source_eid . unpack_address ( packet )
if ( packet == None ) : return ( None )
if 74 - 74: OOooOOo - II111iiii
self . source_eid . mask_len = self . source_eid . host_mask_len ( )
if 66 - 66: i11iIiiIii + I1Ii111 . ooOoO0o
i1II = ( os . getenv ( "LISP_NO_CRYPTO" ) != None )
self . itr_rlocs = [ ]
OO0o = self . itr_rloc_count + 1
if 92 - 92: iIii1I11I1II1 + Ii1I
while ( OO0o != 0 ) :
oO000 = struct . calcsize ( "H" )
if ( len ( packet ) < oO000 ) : return ( None )
if 69 - 69: Oo0Ooo
Oooo000 = socket . ntohs ( struct . unpack ( "H" , packet [ : oO000 ] ) [ 0 ] )
ii1oO0Oo = lisp_address ( LISP_AFI_NONE , "" , 32 , 0 )
ii1oO0Oo . afi = Oooo000
if 70 - 70: O0 - OoO0O00 - Oo0Ooo
if 95 - 95: IiII * II111iiii % o0oOOo0O0Ooo * Oo0Ooo . I11i
if 46 - 46: II111iiii - OoO0O00 % ooOoO0o
if 97 - 97: OoO0O00 . OoOoOO00
if 78 - 78: I1ii11iIi11i + I1ii11iIi11i . OoOoOO00 - IiII * iIii1I11I1II1 * O0
if ( ii1oO0Oo . afi == LISP_AFI_LCAF ) :
i1iiI11i1 = packet
IiIIiI = packet [ oO000 : : ]
packet = self . lcaf_decode_json ( IiIIiI )
if ( packet == None ) : return ( None )
if ( packet == IiIIiI ) : packet = i1iiI11i1
if 8 - 8: I1ii11iIi11i * IiII / Oo0Ooo
if 99 - 99: OOooOOo * I1Ii111 . ooOoO0o - i1IIi - I11i % IiII
if 40 - 40: OoOoOO00 % I1Ii111 / I1IiiI + i1IIi
if 53 - 53: I1Ii111
if 81 - 81: O0 % o0oOOo0O0Ooo / Ii1I / ooOoO0o . i11iIiiIii + IiII
if 29 - 29: ooOoO0o
if ( ii1oO0Oo . afi != LISP_AFI_LCAF ) :
if ( len ( packet ) < ii1oO0Oo . addr_length ( ) ) : return ( None )
packet = ii1oO0Oo . unpack_address ( packet [ oO000 : : ] )
if ( packet == None ) : return ( None )
if 70 - 70: oO0o . O0 % I11i % IiII - I11i * I1ii11iIi11i
if ( i1II ) :
self . itr_rlocs . append ( ii1oO0Oo )
OO0o -= 1
continue
if 22 - 22: i1IIi
if 82 - 82: oO0o . iIii1I11I1II1 - I1ii11iIi11i
O0O0 = lisp_build_crypto_decap_lookup_key ( ii1oO0Oo , port )
if 55 - 55: Oo0Ooo % Ii1I . iIii1I11I1II1 * I1Ii111
if 33 - 33: O0 - I1IiiI / I1ii11iIi11i / OoO0O00 + iII111i - oO0o
if 27 - 27: I1Ii111 + ooOoO0o - I1Ii111 % i11iIiiIii * Oo0Ooo * o0oOOo0O0Ooo
if 88 - 88: OOooOOo
if 25 - 25: OoO0O00 + o0oOOo0O0Ooo . ooOoO0o - Ii1I . oO0o * Ii1I
if ( lisp_nat_traversal and ii1oO0Oo . is_private_address ( ) and source ) : ii1oO0Oo = source
if 85 - 85: i1IIi
oooiiIiIIIi1 = lisp_crypto_keys_by_rloc_decap
if ( O0O0 in oooiiIiIIIi1 ) : oooiiIiIIIi1 . pop ( O0O0 )
if 35 - 35: I1ii11iIi11i . OOooOOo
if 97 - 97: I1IiiI
if 63 - 63: O0 - OoOoOO00 / i11iIiiIii / OoooooooOO / ooOoO0o / II111iiii
if 45 - 45: II111iiii . OoO0O00 + OoO0O00 * iIii1I11I1II1
if 23 - 23: IiII * OoOoOO00 % Ii1I / Ii1I - ooOoO0o - OOooOOo
if 86 - 86: OOooOOo . OoooooooOO * I1IiiI - Oo0Ooo / i11iIiiIii * iII111i
lisp_write_ipc_decap_key ( O0O0 , None )
if 56 - 56: I1IiiI . I11i % iII111i
elif ( self . json_telemetry == None ) :
if 33 - 33: I11i / OOooOOo - OOooOOo / i11iIiiIii * OoOoOO00 + O0
if 2 - 2: i11iIiiIii % I1IiiI
if 90 - 90: II111iiii
if 2 - 2: Ii1I - OoooooooOO - i11iIiiIii % Oo0Ooo / Ii1I
i1iiI11i1 = packet
OO = lisp_keys ( 1 )
packet = OO . decode_lcaf ( i1iiI11i1 , 0 )
if 92 - 92: I1Ii111 + OOooOOo - OoO0O00 . o0oOOo0O0Ooo
if ( packet == None ) : return ( None )
if 16 - 16: I1IiiI - ooOoO0o
if 39 - 39: i1IIi % i1IIi / iIii1I11I1II1 % OoooooooOO . ooOoO0o
if 30 - 30: o0oOOo0O0Ooo - Ii1I . i11iIiiIii + oO0o % ooOoO0o + I1ii11iIi11i
if 5 - 5: OOooOOo . iII111i . oO0o % IiII * O0
i1I1IiiIIIiiI = [ LISP_CS_25519_CBC , LISP_CS_25519_GCM ,
LISP_CS_25519_CHACHA ]
if ( OO . cipher_suite in i1I1IiiIIIiiI ) :
if ( OO . cipher_suite == LISP_CS_25519_CBC or
OO . cipher_suite == LISP_CS_25519_GCM ) :
Ooo00o000o = lisp_keys ( 1 , do_poly = False , do_chacha = False )
if 20 - 20: Oo0Ooo . I1IiiI . I1IiiI / OoooooooOO . OoooooooOO + iIii1I11I1II1
if ( OO . cipher_suite == LISP_CS_25519_CHACHA ) :
Ooo00o000o = lisp_keys ( 1 , do_poly = True , do_chacha = True )
if 60 - 60: OoOoOO00 / ooOoO0o % iIii1I11I1II1
else :
Ooo00o000o = lisp_keys ( 1 , do_poly = False , do_curve = False ,
do_chacha = False )
if 32 - 32: i11iIiiIii + II111iiii + II111iiii % I11i
packet = Ooo00o000o . decode_lcaf ( i1iiI11i1 , 0 )
if ( packet == None ) : return ( None )
if 96 - 96: o0oOOo0O0Ooo
if ( len ( packet ) < oO000 ) : return ( None )
Oooo000 = struct . unpack ( "H" , packet [ : oO000 ] ) [ 0 ]
ii1oO0Oo . afi = socket . ntohs ( Oooo000 )
if ( len ( packet ) < ii1oO0Oo . addr_length ( ) ) : return ( None )
if 90 - 90: IiII * Ii1I . I11i / I1ii11iIi11i % I11i
packet = ii1oO0Oo . unpack_address ( packet [ oO000 : : ] )
if ( packet == None ) : return ( None )
if 58 - 58: iII111i % iIii1I11I1II1 * OoO0O00
if ( i1II ) :
self . itr_rlocs . append ( ii1oO0Oo )
OO0o -= 1
continue
if 25 - 25: I1Ii111 - ooOoO0o + Oo0Ooo . I1IiiI % iIii1I11I1II1
if 49 - 49: i1IIi + OoO0O00 + iII111i / Oo0Ooo
O0O0 = lisp_build_crypto_decap_lookup_key ( ii1oO0Oo , port )
if 5 - 5: i11iIiiIii + I11i . IiII
IiIi1 = None
if ( lisp_nat_traversal and ii1oO0Oo . is_private_address ( ) and source ) : ii1oO0Oo = source
if 59 - 59: O0 * oO0o % iIii1I11I1II1 . oO0o
if 34 - 34: OoOoOO00 % OoOoOO00 + i1IIi - oO0o . OoooooooOO
if ( O0O0 in lisp_crypto_keys_by_rloc_decap ) :
iI1iiiiiii = lisp_crypto_keys_by_rloc_decap [ O0O0 ]
IiIi1 = iI1iiiiiii [ 1 ] if iI1iiiiiii and iI1iiiiiii [ 1 ] else None
if 40 - 40: I11i
if 44 - 44: ooOoO0o
Iii11Ii = True
if ( IiIi1 ) :
if ( IiIi1 . compare_keys ( Ooo00o000o ) ) :
self . keys = [ None , IiIi1 , None , None ]
lprint ( "Maintain stored decap-keys for RLOC {}" . format ( red ( O0O0 , False ) ) )
if 32 - 32: II111iiii * I1IiiI / iIii1I11I1II1 - I1ii11iIi11i . I11i
else :
Iii11Ii = False
oo000O = bold ( "Remote decap-rekeying" , False )
lprint ( "{} for RLOC {}" . format ( oo000O , red ( O0O0 ,
False ) ) )
Ooo00o000o . copy_keypair ( IiIi1 )
Ooo00o000o . uptime = IiIi1 . uptime
IiIi1 = None
if 41 - 41: OOooOOo - OoOoOO00 . I1IiiI + i11iIiiIii + OoO0O00 * iII111i
if 85 - 85: OoO0O00 + II111iiii
if 87 - 87: OoO0O00
if ( IiIi1 == None ) :
self . keys = [ None , Ooo00o000o , None , None ]
if ( lisp_i_am_etr == False and lisp_i_am_rtr == False ) :
Ooo00o000o . local_public_key = None
lprint ( "{} for {}" . format ( bold ( "Ignoring decap-keys" ,
False ) , red ( O0O0 , False ) ) )
elif ( Ooo00o000o . remote_public_key != None ) :
if ( Iii11Ii ) :
lprint ( "{} for RLOC {}" . format ( bold ( "New decap-keying" , False ) ,
# OoooooooOO . O0 % i11iIiiIii - OoooooooOO + OoO0O00 . OOooOOo
red ( O0O0 , False ) ) )
if 64 - 64: OoOoOO00
Ooo00o000o . compute_shared_key ( "decap" )
Ooo00o000o . add_key_by_rloc ( O0O0 , False )
if 10 - 10: OoO0O00 + iIii1I11I1II1 . II111iiii
if 8 - 8: OoO0O00 / II111iiii
if 71 - 71: Oo0Ooo % iII111i . ooOoO0o % O0 + iIii1I11I1II1 % I1Ii111
if 8 - 8: I1ii11iIi11i - ooOoO0o + iII111i * OoO0O00
self . itr_rlocs . append ( ii1oO0Oo )
OO0o -= 1
if 22 - 22: I1ii11iIi11i * OoooooooOO
if 33 - 33: OOooOOo / o0oOOo0O0Ooo + OOooOOo . i11iIiiIii
oO000 = struct . calcsize ( "BBH" )
if ( len ( packet ) < oO000 ) : return ( None )
if 19 - 19: OoOoOO00 % OoOoOO00
oOoO , oOo , Oooo000 = struct . unpack ( "BBH" , packet [ : oO000 ] )
self . subscribe_bit = ( oOoO & 0x80 )
self . target_eid . afi = socket . ntohs ( Oooo000 )
packet = packet [ oO000 : : ]
if 74 - 74: i11iIiiIii / I1ii11iIi11i - oO0o . OoO0O00
self . target_eid . mask_len = oOo
if ( self . target_eid . afi == LISP_AFI_LCAF ) :
packet , i1II1iI1ii1 = self . target_eid . lcaf_decode_eid ( packet )
if ( packet == None ) : return ( None )
if ( i1II1iI1ii1 ) : self . target_group = i1II1iI1ii1
else :
packet = self . target_eid . unpack_address ( packet )
if ( packet == None ) : return ( None )
packet = packet [ oO000 : : ]
if 93 - 93: iII111i % I1Ii111
return ( packet )
if 90 - 90: I1ii11iIi11i - OoooooooOO / OoOoOO00
if 24 - 24: I11i . OOooOOo * i1IIi . I1ii11iIi11i / ooOoO0o / O0
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . target_eid , self . target_group ) )
if 62 - 62: o0oOOo0O0Ooo % II111iiii
if 22 - 22: oO0o - o0oOOo0O0Ooo
def encode_xtr_id ( self , packet ) :
II1i1 = self . xtr_id >> 64
ooO0OoOO0 = self . xtr_id & 0xffffffffffffffff
II1i1 = byte_swap_64 ( II1i1 )
ooO0OoOO0 = byte_swap_64 ( ooO0OoOO0 )
packet += struct . pack ( "QQ" , II1i1 , ooO0OoOO0 )
return ( packet )
if 89 - 89: OOooOOo
if 34 - 34: iII111i . OOooOOo
def decode_xtr_id ( self , packet ) :
oO000 = struct . calcsize ( "QQ" )
if ( len ( packet ) < oO000 ) : return ( None )
packet = packet [ len ( packet ) - oO000 : : ]
II1i1 , ooO0OoOO0 = struct . unpack ( "QQ" , packet [ : oO000 ] )
II1i1 = byte_swap_64 ( II1i1 )
ooO0OoOO0 = byte_swap_64 ( ooO0OoOO0 )
self . xtr_id = ( II1i1 << 64 ) | ooO0OoOO0
return ( True )
if 13 - 13: OoO0O00 * OOooOOo + oO0o
if 21 - 21: i11iIiiIii . Ii1I % i1IIi * Ii1I . oO0o + Ii1I
if 92 - 92: i1IIi + OoO0O00 * I11i
if 70 - 70: Oo0Ooo
if 93 - 93: iII111i . I1ii11iIi11i . Oo0Ooo . oO0o . OoooooooOO
if 51 - 51: O0 - iII111i
if 65 - 65: O0 / II111iiii * IiII % Ii1I + o0oOOo0O0Ooo
if 43 - 43: I1Ii111 + OoO0O00 * OoooooooOO
if 85 - 85: iII111i + OOooOOo
if 36 - 36: OoO0O00 % II111iiii * O0 + II111iiii - oO0o - i1IIi
if 53 - 53: Ii1I - OOooOOo
if 75 - 75: iII111i % O0 - I11i - I1ii11iIi11i + I1IiiI - I1IiiI
if 87 - 87: i1IIi % Ii1I % i1IIi + iIii1I11I1II1
if 23 - 23: iIii1I11I1II1 * I11i . I1Ii111 - o0oOOo0O0Ooo
if 66 - 66: I1IiiI * I1Ii111 / i11iIiiIii / OOooOOo
if 19 - 19: ooOoO0o % iIii1I11I1II1 * OoooooooOO
if 60 - 60: I1Ii111 * iII111i / OoooooooOO * Oo0Ooo
if 47 - 47: iII111i + o0oOOo0O0Ooo % iIii1I11I1II1 * OoOoOO00
if 65 - 65: OOooOOo . II111iiii * i11iIiiIii + OOooOOo
if 99 - 99: I1ii11iIi11i % Oo0Ooo
if 31 - 31: o0oOOo0O0Ooo - II111iiii * OOooOOo . OOooOOo - oO0o
if 57 - 57: OOooOOo / i11iIiiIii / I1Ii111 - Oo0Ooo . iIii1I11I1II1
if 84 - 84: IiII
if 42 - 42: O0 . I1Ii111 / I11i
if 69 - 69: OoOoOO00 / I1Ii111 * I1IiiI
if 76 - 76: O0 + II111iiii * OoO0O00
if 1 - 1: o0oOOo0O0Ooo
if 34 - 34: o0oOOo0O0Ooo + OOooOOo . OoO0O00 + I1IiiI + OoooooooOO
if 90 - 90: Ii1I / OoOoOO00 - iIii1I11I1II1 / i1IIi * I1Ii111 - ooOoO0o
if 2 - 2: iII111i * I11i * ooOoO0o + i11iIiiIii + oO0o
if 81 - 81: o0oOOo0O0Ooo * OoO0O00
if 18 - 18: i11iIiiIii / o0oOOo0O0Ooo - oO0o . I11i * i1IIi
class lisp_map_reply ( object ) :
def __init__ ( self ) :
self . rloc_probe = False
self . echo_nonce_capable = False
self . security = False
self . record_count = 0
self . hop_count = 0
self . nonce = 0
self . keys = None
if 67 - 67: Ii1I
if 64 - 64: OoOoOO00 + iII111i * OoOoOO00 - I1IiiI * OoooooooOO
def print_map_reply ( self ) :
IiiiI1 = "{} -> flags: {}{}{}, hop-count: {}, record-count: {}, " + "nonce: 0x{}"
if 27 - 27: II111iiii + i11iIiiIii
lprint ( IiiiI1 . format ( bold ( "Map-Reply" , False ) , "R" if self . rloc_probe else "r" ,
# i1IIi . Ii1I
"E" if self . echo_nonce_capable else "e" ,
"S" if self . security else "s" , self . hop_count , self . record_count ,
lisp_hex_string ( self . nonce ) ) )
if 38 - 38: o0oOOo0O0Ooo / I1ii11iIi11i * oO0o + II111iiii / i11iIiiIii
if 34 - 34: i11iIiiIii % OoO0O00 - oO0o / OOooOOo / iII111i
def encode ( self ) :
Iii1 = ( LISP_MAP_REPLY << 28 ) | self . record_count
Iii1 |= self . hop_count << 8
if ( self . rloc_probe ) : Iii1 |= 0x08000000
if ( self . echo_nonce_capable ) : Iii1 |= 0x04000000
if ( self . security ) : Iii1 |= 0x02000000
if 5 - 5: I1Ii111 . oO0o
Oo00oo = struct . pack ( "I" , socket . htonl ( Iii1 ) )
Oo00oo += struct . pack ( "Q" , self . nonce )
return ( Oo00oo )
if 77 - 77: iII111i / i11iIiiIii
if 20 - 20: O0 . I11i
def decode ( self , packet ) :
II111I11iI = "I"
oO000 = struct . calcsize ( II111I11iI )
if ( len ( packet ) < oO000 ) : return ( None )
if 67 - 67: OoOoOO00 - ooOoO0o - iIii1I11I1II1
Iii1 = struct . unpack ( II111I11iI , packet [ : oO000 ] )
Iii1 = Iii1 [ 0 ]
packet = packet [ oO000 : : ]
if 31 - 31: II111iiii + o0oOOo0O0Ooo * i11iIiiIii . o0oOOo0O0Ooo
II111I11iI = "Q"
oO000 = struct . calcsize ( II111I11iI )
if ( len ( packet ) < oO000 ) : return ( None )
if 73 - 73: oO0o / OOooOOo * II111iiii % OoooooooOO - i1IIi - ooOoO0o
oOooo0oOOOO = struct . unpack ( II111I11iI , packet [ : oO000 ] )
packet = packet [ oO000 : : ]
if 43 - 43: o0oOOo0O0Ooo + Ii1I % OoO0O00 . I1Ii111 + i1IIi
Iii1 = socket . ntohl ( Iii1 )
self . rloc_probe = True if ( Iii1 & 0x08000000 ) else False
self . echo_nonce_capable = True if ( Iii1 & 0x04000000 ) else False
self . security = True if ( Iii1 & 0x02000000 ) else False
self . hop_count = ( Iii1 >> 8 ) & 0xff
self . record_count = Iii1 & 0xff
self . nonce = oOooo0oOOOO [ 0 ]
if 85 - 85: Oo0Ooo % I1ii11iIi11i / OOooOOo
if ( self . nonce in lisp_crypto_keys_by_nonce ) :
self . keys = lisp_crypto_keys_by_nonce [ self . nonce ]
self . keys [ 1 ] . delete_key_by_nonce ( self . nonce )
if 65 - 65: ooOoO0o + IiII - OoOoOO00 % II111iiii - iIii1I11I1II1
return ( packet )
if 39 - 39: I1IiiI + I1ii11iIi11i - i11iIiiIii
if 43 - 43: iIii1I11I1II1
if 73 - 73: OoOoOO00 + o0oOOo0O0Ooo
if 58 - 58: i1IIi * I1ii11iIi11i % iII111i . OoO0O00 % IiII % I11i
if 63 - 63: I1ii11iIi11i % ooOoO0o % I1ii11iIi11i
if 71 - 71: Ii1I
if 43 - 43: o0oOOo0O0Ooo / ooOoO0o
if 88 - 88: i11iIiiIii - i1IIi + Oo0Ooo - O0
if 50 - 50: I1ii11iIi11i
if 37 - 37: oO0o % iII111i / II111iiii / OoO0O00 - IiII - ooOoO0o
if 69 - 69: I1ii11iIi11i . OoooooooOO % I1Ii111
if 79 - 79: I1IiiI - IiII . OoooooooOO - I1ii11iIi11i
if 79 - 79: OOooOOo + o0oOOo0O0Ooo % iII111i . oO0o
if 49 - 49: Ii1I + i11iIiiIii * OoOoOO00 . OoOoOO00 . I1ii11iIi11i . Oo0Ooo
if 61 - 61: I11i / OOooOOo
if 85 - 85: OoOoOO00 - I11i . OoOoOO00 . OoOoOO00
if 62 - 62: IiII % OoooooooOO * OoO0O00 + OoO0O00 % Ii1I % iII111i
if 66 - 66: I1IiiI . OOooOOo - OoO0O00 % Oo0Ooo * o0oOOo0O0Ooo - oO0o
if 68 - 68: I11i - i11iIiiIii / o0oOOo0O0Ooo + ooOoO0o / I1IiiI
if 31 - 31: I1Ii111 . OoooooooOO . i1IIi
if 65 - 65: OoO0O00 . ooOoO0o
if 12 - 12: I1Ii111 + O0 - oO0o . IiII
if 46 - 46: IiII . ooOoO0o / iII111i
if 63 - 63: II111iiii - I1ii11iIi11i * II111iiii
if 92 - 92: OoO0O00 % ooOoO0o * O0 % iIii1I11I1II1 / i1IIi / OoOoOO00
if 67 - 67: I1Ii111 + I11i + I1Ii111 . OOooOOo % o0oOOo0O0Ooo / ooOoO0o
if 78 - 78: I1ii11iIi11i . O0
if 56 - 56: oO0o - i1IIi * O0 / I11i * I1IiiI . I11i
if 54 - 54: i11iIiiIii % i1IIi + Oo0Ooo / OoOoOO00
if 26 - 26: I11i . I1ii11iIi11i
if 55 - 55: OoOoOO00 * I1Ii111 % OoO0O00 - OoO0O00
if 34 - 34: O0 * OoO0O00 - oO0o - IiII * Ii1I . II111iiii
class lisp_eid_record ( object ) :
def __init__ ( self ) :
self . record_ttl = 0
self . rloc_count = 0
self . action = 0
self . authoritative = False
self . ddt_incomplete = False
self . signature_count = 0
self . map_version = 0
self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . record_ttl = 0
if 28 - 28: O0 % iII111i - i1IIi
if 49 - 49: ooOoO0o . I11i - iIii1I11I1II1
def print_prefix ( self ) :
if ( self . group . is_null ( ) ) :
return ( green ( self . eid . print_prefix ( ) , False ) )
if 41 - 41: ooOoO0o * i11iIiiIii % ooOoO0o . oO0o
return ( green ( self . eid . print_sg ( self . group ) , False ) )
if 97 - 97: oO0o - iII111i + IiII . OoOoOO00 + iIii1I11I1II1
if 75 - 75: ooOoO0o + ooOoO0o . I1Ii111 % iII111i / iIii1I11I1II1 * iII111i
def print_ttl ( self ) :
IiIi1iIIiII1i = self . record_ttl
if ( self . record_ttl & 0x80000000 ) :
IiIi1iIIiII1i = str ( self . record_ttl & 0x7fffffff ) + " secs"
elif ( ( IiIi1iIIiII1i % 60 ) == 0 ) :
IiIi1iIIiII1i = str ( old_div ( IiIi1iIIiII1i , 60 ) ) + " hours"
else :
IiIi1iIIiII1i = str ( IiIi1iIIiII1i ) + " mins"
if 87 - 87: oO0o / OoO0O00 / i11iIiiIii / OoooooooOO
return ( IiIi1iIIiII1i )
if 25 - 25: I1IiiI . Oo0Ooo + iIii1I11I1II1 * iII111i % Oo0Ooo . OoOoOO00
if 13 - 13: Ii1I - Oo0Ooo
def store_ttl ( self ) :
IiIi1iIIiII1i = self . record_ttl * 60
if ( self . record_ttl & 0x80000000 ) : IiIi1iIIiII1i = self . record_ttl & 0x7fffffff
return ( IiIi1iIIiII1i )
if 91 - 91: I1IiiI - OoooooooOO - OoooooooOO
if 69 - 69: iII111i * i11iIiiIii / i1IIi
def print_record ( self , indent , ddt ) :
Oo00Oo0o000 = ""
oOo0ooOo = ""
iI1i1i = bold ( "invalid-action" , False )
if ( ddt ) :
if ( self . action < len ( lisp_map_referral_action_string ) ) :
iI1i1i = lisp_map_referral_action_string [ self . action ]
iI1i1i = bold ( iI1i1i , False )
Oo00Oo0o000 = ( ", " + bold ( "ddt-incomplete" , False ) ) if self . ddt_incomplete else ""
if 83 - 83: O0
oOo0ooOo = ( ", sig-count: " + str ( self . signature_count ) ) if ( self . signature_count != 0 ) else ""
if 27 - 27: o0oOOo0O0Ooo + I1IiiI - IiII . i11iIiiIii . I1IiiI
if 25 - 25: O0 + OOooOOo / iII111i
else :
if ( self . action < len ( lisp_map_reply_action_string ) ) :
iI1i1i = lisp_map_reply_action_string [ self . action ]
if ( self . action != LISP_NO_ACTION ) :
iI1i1i = bold ( iI1i1i , False )
if 51 - 51: I11i
if 54 - 54: i1IIi . O0 . i1IIi . OoO0O00 + I1Ii111 - i11iIiiIii
if 80 - 80: OoOoOO00
if 5 - 5: I1IiiI - I1IiiI / O0 + OOooOOo - i11iIiiIii
Oooo000 = LISP_AFI_LCAF if ( self . eid . afi < 0 ) else self . eid . afi
IiiiI1 = ( "{}EID-record -> record-ttl: {}, rloc-count: {}, action: " +
"{}, {}{}{}, map-version: {}, afi: {}, [iid]eid/ml: {}" )
if 87 - 87: i1IIi - O0 % OoooooooOO * i11iIiiIii % i11iIiiIii
lprint ( IiiiI1 . format ( indent , self . print_ttl ( ) , self . rloc_count ,
iI1i1i , "auth" if ( self . authoritative is True ) else "non-auth" ,
Oo00Oo0o000 , oOo0ooOo , self . map_version , Oooo000 ,
green ( self . print_prefix ( ) , False ) ) )
if 19 - 19: ooOoO0o
if 44 - 44: I1Ii111 - i11iIiiIii * I1IiiI
def encode ( self ) :
oo0oOooo0O = self . action << 13
if ( self . authoritative ) : oo0oOooo0O |= 0x1000
if ( self . ddt_incomplete ) : oo0oOooo0O |= 0x800
if 2 - 2: IiII + I11i / iIii1I11I1II1 . i11iIiiIii . i1IIi * ooOoO0o
if 14 - 14: Oo0Ooo . O0 - oO0o - i11iIiiIii
if 8 - 8: I1IiiI / iIii1I11I1II1 / OoooooooOO / Oo0Ooo / ooOoO0o
if 80 - 80: I11i
Oooo000 = self . eid . afi if ( self . eid . instance_id == 0 ) else LISP_AFI_LCAF
if ( Oooo000 < 0 ) : Oooo000 = LISP_AFI_LCAF
IiiiIi = ( self . group . is_null ( ) == False )
if ( IiiiIi ) : Oooo000 = LISP_AFI_LCAF
if 81 - 81: i11iIiiIii + o0oOOo0O0Ooo / II111iiii + I11i
OOO0O0 = ( self . signature_count << 12 ) | self . map_version
oOo = 0 if self . eid . is_binary ( ) == False else self . eid . mask_len
if 25 - 25: oO0o - OoOoOO00 / OoO0O00 / Ii1I
Oo00oo = struct . pack ( "IBBHHH" , socket . htonl ( self . record_ttl ) ,
self . rloc_count , oOo , socket . htons ( oo0oOooo0O ) ,
socket . htons ( OOO0O0 ) , socket . htons ( Oooo000 ) )
if 34 - 34: ooOoO0o + Oo0Ooo
if 34 - 34: Ii1I / OoooooooOO + IiII % oO0o - I1IiiI + II111iiii
if 79 - 79: iII111i / OoooooooOO % IiII
if 78 - 78: Oo0Ooo * OOooOOo % I1ii11iIi11i + OOooOOo % Ii1I + IiII
if ( IiiiIi ) :
Oo00oo += self . eid . lcaf_encode_sg ( self . group )
return ( Oo00oo )
if 58 - 58: OoooooooOO % I1Ii111 / Oo0Ooo % OoooooooOO * OoOoOO00 . OoooooooOO
if 46 - 46: ooOoO0o * o0oOOo0O0Ooo % II111iiii / I1Ii111
if 29 - 29: OoO0O00 - i11iIiiIii % Oo0Ooo % o0oOOo0O0Ooo
if 30 - 30: oO0o - Ii1I % Ii1I
if 8 - 8: IiII
if ( self . eid . afi == LISP_AFI_GEO_COORD and self . eid . instance_id == 0 ) :
Oo00oo = Oo00oo [ 0 : - 2 ]
Oo00oo += self . eid . address . encode_geo ( )
return ( Oo00oo )
if 68 - 68: IiII . OoooooooOO - i11iIiiIii + i11iIiiIii
if 81 - 81: OoOoOO00 + iII111i . i11iIiiIii
if 10 - 10: OoOoOO00 + I11i - iIii1I11I1II1 - I11i
if 58 - 58: ooOoO0o
if 98 - 98: Ii1I / OoO0O00 % OoooooooOO
if ( Oooo000 == LISP_AFI_LCAF ) :
Oo00oo += self . eid . lcaf_encode_iid ( )
return ( Oo00oo )
if 65 - 65: ooOoO0o % Oo0Ooo - I1IiiI % I1Ii111 + iIii1I11I1II1 / iIii1I11I1II1
if 94 - 94: IiII - Oo0Ooo . o0oOOo0O0Ooo - ooOoO0o - oO0o . I11i
if 39 - 39: oO0o + OoOoOO00
if 68 - 68: i1IIi * oO0o / i11iIiiIii
if 96 - 96: I1IiiI
Oo00oo += self . eid . pack_address ( )
return ( Oo00oo )
if 78 - 78: OoO0O00
if 72 - 72: I1ii11iIi11i / O0 % II111iiii / II111iiii
def decode ( self , packet ) :
II111I11iI = "IBBHHH"
oO000 = struct . calcsize ( II111I11iI )
if ( len ( packet ) < oO000 ) : return ( None )
if 48 - 48: OOooOOo % OOooOOo / iIii1I11I1II1 - i11iIiiIii
self . record_ttl , self . rloc_count , self . eid . mask_len , oo0oOooo0O , self . map_version , self . eid . afi = struct . unpack ( II111I11iI , packet [ : oO000 ] )
if 57 - 57: I11i / IiII * i1IIi + II111iiii . o0oOOo0O0Ooo
if 11 - 11: II111iiii
if 66 - 66: Ii1I - I1IiiI . OoooooooOO * I1Ii111
self . record_ttl = socket . ntohl ( self . record_ttl )
oo0oOooo0O = socket . ntohs ( oo0oOooo0O )
self . action = ( oo0oOooo0O >> 13 ) & 0x7
self . authoritative = True if ( ( oo0oOooo0O >> 12 ) & 1 ) else False
self . ddt_incomplete = True if ( ( oo0oOooo0O >> 11 ) & 1 ) else False
self . map_version = socket . ntohs ( self . map_version )
self . signature_count = self . map_version >> 12
self . map_version = self . map_version & 0xfff
self . eid . afi = socket . ntohs ( self . eid . afi )
self . eid . instance_id = 0
packet = packet [ oO000 : : ]
if 16 - 16: IiII * OoO0O00 * i11iIiiIii - ooOoO0o
if 88 - 88: iIii1I11I1II1 / Ii1I * IiII / I1Ii111
if 31 - 31: O0 . I1IiiI
if 8 - 8: OoOoOO00
if ( self . eid . afi == LISP_AFI_LCAF ) :
packet , o0o0Oo0o0oOo = self . eid . lcaf_decode_eid ( packet )
if ( o0o0Oo0o0oOo ) : self . group = o0o0Oo0o0oOo
self . group . instance_id = self . eid . instance_id
return ( packet )
if 14 - 14: I1IiiI - i11iIiiIii * I1Ii111 . i11iIiiIii % ooOoO0o
if 53 - 53: O0 . o0oOOo0O0Ooo . II111iiii * OoOoOO00 . OOooOOo
packet = self . eid . unpack_address ( packet )
return ( packet )
if 78 - 78: OoOoOO00 * OoOoOO00 - OoO0O00 / oO0o
if 24 - 24: I1Ii111 . oO0o + ooOoO0o . I1ii11iIi11i . II111iiii
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 25 - 25: I1IiiI
if 88 - 88: i1IIi
if 93 - 93: I1ii11iIi11i . OoO0O00
if 67 - 67: II111iiii + OoooooooOO + I1IiiI
if 76 - 76: O0 / Oo0Ooo . OoOoOO00
if 81 - 81: o0oOOo0O0Ooo + II111iiii % I1Ii111 - oO0o + ooOoO0o - I1ii11iIi11i
if 99 - 99: iIii1I11I1II1
if 100 - 100: OoOoOO00 + I1Ii111 * Oo0Ooo / IiII - IiII
if 19 - 19: OoooooooOO . Ii1I + Oo0Ooo + II111iiii
if 88 - 88: O0 - OOooOOo * II111iiii
if 84 - 84: iII111i
if 51 - 51: OoooooooOO + I11i . iII111i + i11iIiiIii * iII111i - OoO0O00
if 60 - 60: iII111i * iIii1I11I1II1 . OoOoOO00 . o0oOOo0O0Ooo / iIii1I11I1II1
if 36 - 36: i1IIi . OoooooooOO - II111iiii - OoOoOO00 - IiII
if 53 - 53: I1ii11iIi11i - II111iiii . i11iIiiIii
if 76 - 76: iIii1I11I1II1 - Oo0Ooo
if 79 - 79: I1IiiI * IiII . OoooooooOO % I1Ii111 * I1Ii111
if 17 - 17: I1Ii111 - I1Ii111 . oO0o / I1Ii111
if 36 - 36: I1ii11iIi11i * i1IIi + iIii1I11I1II1
if 55 - 55: I1IiiI . I1Ii111 - I1IiiI % oO0o / iIii1I11I1II1 * Ii1I
if 77 - 77: OOooOOo
if 29 - 29: II111iiii % iIii1I11I1II1 * O0 . o0oOOo0O0Ooo
if 56 - 56: i1IIi . ooOoO0o + I11i - i11iIiiIii
if 100 - 100: iIii1I11I1II1 - i1IIi . OOooOOo
if 73 - 73: I1Ii111 / I11i / i11iIiiIii - I1ii11iIi11i % ooOoO0o
if 92 - 92: I1IiiI - o0oOOo0O0Ooo % I1ii11iIi11i / iII111i % oO0o
if 43 - 43: Oo0Ooo % oO0o . i11iIiiIii - O0
if 5 - 5: i1IIi + Ii1I
if 38 - 38: I1IiiI . O0 + OOooOOo / I1ii11iIi11i . iIii1I11I1II1 - i1IIi
if 3 - 3: Oo0Ooo + oO0o
if 65 - 65: I1IiiI / OoOoOO00 % I1IiiI * i11iIiiIii * OoooooooOO / I11i
LISP_UDP_PROTOCOL = 17
LISP_DEFAULT_ECM_TTL = 128
if 91 - 91: i11iIiiIii / i11iIiiIii
class lisp_ecm ( object ) :
def __init__ ( self , sport ) :
self . security = False
self . ddt = False
self . to_etr = False
self . to_ms = False
self . length = 0
self . ttl = LISP_DEFAULT_ECM_TTL
self . protocol = LISP_UDP_PROTOCOL
self . ip_checksum = 0
self . source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . dest = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . udp_sport = sport
self . udp_dport = LISP_CTRL_PORT
self . udp_checksum = 0
self . udp_length = 0
self . afi = LISP_AFI_NONE
if 9 - 9: I11i / I1Ii111 + iIii1I11I1II1 + I1IiiI - II111iiii
if 96 - 96: iII111i + Oo0Ooo - OoooooooOO . i1IIi + i1IIi % iIii1I11I1II1
def print_ecm ( self ) :
IiiiI1 = ( "{} -> flags: {}{}{}{}, " + "inner IP: {} -> {}, inner UDP: {} -> {}" )
if 80 - 80: OoooooooOO / O0 / I1Ii111 - Oo0Ooo . i11iIiiIii
lprint ( IiiiI1 . format ( bold ( "ECM" , False ) , "S" if self . security else "s" ,
"D" if self . ddt else "d" , "E" if self . to_etr else "e" ,
"M" if self . to_ms else "m" ,
green ( self . source . print_address ( ) , False ) ,
green ( self . dest . print_address ( ) , False ) , self . udp_sport ,
self . udp_dport ) )
if 3 - 3: Oo0Ooo - OOooOOo * OoO0O00 - II111iiii . OoooooooOO
if 14 - 14: I1IiiI
def encode ( self , packet , inner_source , inner_dest ) :
self . udp_length = len ( packet ) + 8
self . source = inner_source
self . dest = inner_dest
if ( inner_dest . is_ipv4 ( ) ) :
self . afi = LISP_AFI_IPV4
self . length = self . udp_length + 20
if 41 - 41: I1Ii111 % i1IIi + OoO0O00 / oO0o
if ( inner_dest . is_ipv6 ( ) ) :
self . afi = LISP_AFI_IPV6
self . length = self . udp_length
if 48 - 48: i1IIi . Oo0Ooo . i1IIi . I1ii11iIi11i * I1IiiI - Ii1I
if 83 - 83: OoooooooOO
if 42 - 42: I1ii11iIi11i . i1IIi - OoOoOO00 - oO0o + i11iIiiIii
if 65 - 65: I1IiiI - O0
if 15 - 15: I11i + OoOoOO00 / Oo0Ooo - I1IiiI * I1ii11iIi11i % oO0o
if 90 - 90: Ii1I / I11i
Iii1 = ( LISP_ECM << 28 )
if ( self . security ) : Iii1 |= 0x08000000
if ( self . ddt ) : Iii1 |= 0x04000000
if ( self . to_etr ) : Iii1 |= 0x02000000
if ( self . to_ms ) : Iii1 |= 0x01000000
if 98 - 98: i1IIi
O0Oooo0 = struct . pack ( "I" , socket . htonl ( Iii1 ) )
if 84 - 84: iIii1I11I1II1 % Ii1I / OoooooooOO
O0O = ""
if ( self . afi == LISP_AFI_IPV4 ) :
O0O = struct . pack ( "BBHHHBBH" , 0x45 , 0 , socket . htons ( self . length ) ,
0 , 0 , self . ttl , self . protocol , socket . htons ( self . ip_checksum ) )
O0O += self . source . pack_address ( )
O0O += self . dest . pack_address ( )
O0O = lisp_ip_checksum ( O0O )
if 62 - 62: OOooOOo * OoO0O00 * OoO0O00 + OoooooooOO . IiII + OoO0O00
if ( self . afi == LISP_AFI_IPV6 ) :
O0O = struct . pack ( "BBHHBB" , 0x60 , 0 , 0 , socket . htons ( self . length ) ,
self . protocol , self . ttl )
O0O += self . source . pack_address ( )
O0O += self . dest . pack_address ( )
if 13 - 13: O0 . I1IiiI % OoO0O00 - I11i . O0
if 14 - 14: iIii1I11I1II1
I111 = socket . htons ( self . udp_sport )
IiI11I111 = socket . htons ( self . udp_dport )
oOO0O0ooOOOo = socket . htons ( self . udp_length )
IIIiIi11 = socket . htons ( self . udp_checksum )
O0I1II1 = struct . pack ( "HHHH" , I111 , IiI11I111 , oOO0O0ooOOOo , IIIiIi11 )
return ( O0Oooo0 + O0O + O0I1II1 )
if 48 - 48: i11iIiiIii * OoOoOO00 - I1IiiI + iIii1I11I1II1
if 20 - 20: I1ii11iIi11i - iIii1I11I1II1 . iII111i
def decode ( self , packet ) :
if 52 - 52: OoO0O00 - I1Ii111
if 9 - 9: I1IiiI . i11iIiiIii
if 3 - 3: I1IiiI + I1ii11iIi11i * I1Ii111 - i1IIi . OOooOOo
if 21 - 21: OOooOOo + o0oOOo0O0Ooo
II111I11iI = "I"
oO000 = struct . calcsize ( II111I11iI )
if ( len ( packet ) < oO000 ) : return ( None )
if 39 - 39: OoOoOO00 . I11i * OOooOOo . i1IIi
Iii1 = struct . unpack ( II111I11iI , packet [ : oO000 ] )
if 69 - 69: IiII - i1IIi + o0oOOo0O0Ooo
Iii1 = socket . ntohl ( Iii1 [ 0 ] )
self . security = True if ( Iii1 & 0x08000000 ) else False
self . ddt = True if ( Iii1 & 0x04000000 ) else False
self . to_etr = True if ( Iii1 & 0x02000000 ) else False
self . to_ms = True if ( Iii1 & 0x01000000 ) else False
packet = packet [ oO000 : : ]
if 5 - 5: II111iiii
if 88 - 88: OoooooooOO % II111iiii + IiII + IiII * Oo0Ooo
if 81 - 81: I1IiiI * ooOoO0o + I1Ii111
if 49 - 49: I1IiiI % oO0o % II111iiii * II111iiii + OoooooooOO + iII111i
if ( len ( packet ) < 1 ) : return ( None )
III1i1iiI1 = struct . unpack ( "B" , packet [ 0 : 1 ] ) [ 0 ]
III1i1iiI1 = III1i1iiI1 >> 4
if 58 - 58: i11iIiiIii % iIii1I11I1II1 + OoO0O00 . I1ii11iIi11i . I1IiiI
if ( III1i1iiI1 == 4 ) :
oO000 = struct . calcsize ( "HHIBBH" )
if ( len ( packet ) < oO000 ) : return ( None )
if 54 - 54: iII111i . OoO0O00 . iIii1I11I1II1
iIiiiI1 , oOO0O0ooOOOo , iIiiiI1 , IiIi1I1i1iII , iIIiiIi , IIIiIi11 = struct . unpack ( "HHIBBH" , packet [ : oO000 ] )
self . length = socket . ntohs ( oOO0O0ooOOOo )
self . ttl = IiIi1I1i1iII
self . protocol = iIIiiIi
self . ip_checksum = socket . ntohs ( IIIiIi11 )
self . source . afi = self . dest . afi = LISP_AFI_IPV4
if 86 - 86: I11i % I1Ii111 . I11i * IiII + IiII + II111iiii
if 66 - 66: oO0o / O0 - OoOoOO00
if 69 - 69: iIii1I11I1II1 * OoO0O00 / OoooooooOO % I1ii11iIi11i . I1IiiI % I11i
if 40 - 40: i11iIiiIii % oO0o / OOooOOo
iIIiiIi = struct . pack ( "H" , 0 )
OOooo0o0000 = struct . calcsize ( "HHIBB" )
OOo0O = struct . calcsize ( "H" )
packet = packet [ : OOooo0o0000 ] + iIIiiIi + packet [ OOooo0o0000 + OOo0O : ]
if 21 - 21: OoOoOO00 + i11iIiiIii - OoooooooOO % o0oOOo0O0Ooo * OoO0O00
packet = packet [ oO000 : : ]
packet = self . source . unpack_address ( packet )
if ( packet == None ) : return ( None )
packet = self . dest . unpack_address ( packet )
if ( packet == None ) : return ( None )
if 73 - 73: Oo0Ooo % oO0o * I1Ii111 / IiII
if 88 - 88: ooOoO0o . II111iiii * O0 % IiII
if ( III1i1iiI1 == 6 ) :
oO000 = struct . calcsize ( "IHBB" )
if ( len ( packet ) < oO000 ) : return ( None )
if 15 - 15: O0 % i1IIi - OOooOOo . IiII
iIiiiI1 , oOO0O0ooOOOo , iIIiiIi , IiIi1I1i1iII = struct . unpack ( "IHBB" , packet [ : oO000 ] )
self . length = socket . ntohs ( oOO0O0ooOOOo )
self . protocol = iIIiiIi
self . ttl = IiIi1I1i1iII
self . source . afi = self . dest . afi = LISP_AFI_IPV6
if 1 - 1: I1IiiI
packet = packet [ oO000 : : ]
packet = self . source . unpack_address ( packet )
if ( packet == None ) : return ( None )
packet = self . dest . unpack_address ( packet )
if ( packet == None ) : return ( None )
if 40 - 40: o0oOOo0O0Ooo % I11i % O0
if 88 - 88: o0oOOo0O0Ooo - oO0o
self . source . mask_len = self . source . host_mask_len ( )
self . dest . mask_len = self . dest . host_mask_len ( )
if 73 - 73: II111iiii
oO000 = struct . calcsize ( "HHHH" )
if ( len ( packet ) < oO000 ) : return ( None )
if 7 - 7: O0 / OoO0O00
I111 , IiI11I111 , oOO0O0ooOOOo , IIIiIi11 = struct . unpack ( "HHHH" , packet [ : oO000 ] )
self . udp_sport = socket . ntohs ( I111 )
self . udp_dport = socket . ntohs ( IiI11I111 )
self . udp_length = socket . ntohs ( oOO0O0ooOOOo )
self . udp_checksum = socket . ntohs ( IIIiIi11 )
packet = packet [ oO000 : : ]
return ( packet )
if 90 - 90: iII111i % oO0o / iIii1I11I1II1
if 52 - 52: I1IiiI / o0oOOo0O0Ooo
if 20 - 20: I1Ii111 . I1IiiI - iIii1I11I1II1 / iII111i
if 46 - 46: I1Ii111 . i11iIiiIii
if 89 - 89: OoO0O00 - OOooOOo - i1IIi - OoO0O00 % iIii1I11I1II1
if 52 - 52: o0oOOo0O0Ooo * O0 + I1ii11iIi11i
if 83 - 83: I11i + OOooOOo - OoooooooOO
if 7 - 7: IiII % ooOoO0o / OoooooooOO / o0oOOo0O0Ooo + OoO0O00 - OoO0O00
if 15 - 15: i1IIi + OOooOOo / Ii1I
if 51 - 51: OOooOOo + O0
if 91 - 91: i11iIiiIii + o0oOOo0O0Ooo % OoO0O00 / oO0o - i1IIi
if 82 - 82: Ii1I . OoooooooOO + OoooooooOO % OoO0O00 % I1ii11iIi11i
if 65 - 65: Oo0Ooo . I11i
if 7 - 7: Oo0Ooo * II111iiii
if 11 - 11: OoOoOO00 % OoooooooOO
if 92 - 92: OoOoOO00 - iII111i * Ii1I - i1IIi
if 87 - 87: Ii1I * I1Ii111 + iIii1I11I1II1 * o0oOOo0O0Ooo * iIii1I11I1II1 . I11i
if 66 - 66: Ii1I / OoO0O00 . O0 . I11i % OoooooooOO / OOooOOo
if 49 - 49: I1IiiI * iII111i - OoO0O00 % Ii1I + Ii1I * I1Ii111
if 94 - 94: OoOoOO00 - I11i + Ii1I + OoOoOO00 + II111iiii
if 61 - 61: IiII + Ii1I / oO0o . OoooooooOO + iII111i
if 29 - 29: OOooOOo
if 69 - 69: oO0o % OoooooooOO * iII111i
if 58 - 58: oO0o / i11iIiiIii . OoOoOO00 % O0 / iIii1I11I1II1
if 50 - 50: I1Ii111 . I11i / O0 . I11i
if 91 - 91: i11iIiiIii . I1ii11iIi11i + I11i
if 67 - 67: I1ii11iIi11i * I1Ii111 * I1IiiI / I11i - IiII + oO0o
if 11 - 11: O0 + i1IIi / o0oOOo0O0Ooo * OoO0O00
if 64 - 64: i1IIi % IiII . ooOoO0o . iIii1I11I1II1 + OoO0O00 - iIii1I11I1II1
if 52 - 52: II111iiii - IiII
if 91 - 91: iIii1I11I1II1 + iII111i . I11i % i11iIiiIii - i11iIiiIii + I1IiiI
if 75 - 75: I1ii11iIi11i / I1IiiI - iIii1I11I1II1 / OoO0O00 * OOooOOo
if 73 - 73: OoooooooOO % IiII / I1Ii111 * I11i + i1IIi % i11iIiiIii
if 91 - 91: i11iIiiIii
if 6 - 6: O0 - iIii1I11I1II1 + I1Ii111 . o0oOOo0O0Ooo * i11iIiiIii
if 53 - 53: OOooOOo / I1IiiI / oO0o * OOooOOo / i1IIi - I1Ii111
if 71 - 71: O0 + Oo0Ooo % oO0o - o0oOOo0O0Ooo
if 82 - 82: iIii1I11I1II1
if 64 - 64: ooOoO0o + I1IiiI % OOooOOo + II111iiii
if 46 - 46: I1IiiI
if 72 - 72: iII111i
if 100 - 100: I1IiiI
if 55 - 55: i1IIi % IiII
if 44 - 44: oO0o - iIii1I11I1II1 / ooOoO0o - iIii1I11I1II1 % i1IIi + ooOoO0o
if 74 - 74: I11i . OoOoOO00 + OoOoOO00
if 87 - 87: IiII + o0oOOo0O0Ooo . i1IIi % I1Ii111
if 44 - 44: Oo0Ooo - OOooOOo . Ii1I * OoooooooOO
if 93 - 93: OoO0O00 . OoO0O00
if 52 - 52: OOooOOo . oO0o / Oo0Ooo . OoooooooOO % I1ii11iIi11i
if 65 - 65: ooOoO0o % II111iiii . iII111i - iIii1I11I1II1 - I1IiiI
if 63 - 63: I1IiiI . OoOoOO00 - II111iiii
if 55 - 55: ooOoO0o - o0oOOo0O0Ooo
if 32 - 32: I1Ii111 * Ii1I / I1Ii111 . OoOoOO00 + I1ii11iIi11i - ooOoO0o
if 14 - 14: IiII * O0 + O0 - ooOoO0o . i11iIiiIii - IiII
if 37 - 37: I11i
if 19 - 19: OoooooooOO % I1Ii111
if 57 - 57: OoOoOO00 + i1IIi . iIii1I11I1II1 . iIii1I11I1II1 / iIii1I11I1II1 % oO0o
if 7 - 7: i11iIiiIii * I1ii11iIi11i / OoO0O00 * oO0o
if 35 - 35: IiII . i1IIi + I1ii11iIi11i . IiII + ooOoO0o . oO0o
if 2 - 2: II111iiii
if 18 - 18: iIii1I11I1II1 % I1ii11iIi11i % Oo0Ooo
if 47 - 47: ooOoO0o - I1IiiI % OOooOOo * Ii1I % I1IiiI
if 95 - 95: OoO0O00 + OoOoOO00 % Oo0Ooo . Ii1I * I1IiiI + I1Ii111
if 22 - 22: Oo0Ooo . OoO0O00
if 55 - 55: Oo0Ooo % OoooooooOO * II111iiii % OoooooooOO
if 30 - 30: I1Ii111 / o0oOOo0O0Ooo + OoooooooOO + OoOoOO00 + OoO0O00
if 40 - 40: OoooooooOO / IiII
if 82 - 82: i11iIiiIii - oO0o - i1IIi
if 78 - 78: oO0o % iII111i / i1IIi / ooOoO0o
if 44 - 44: o0oOOo0O0Ooo + Ii1I + I1IiiI % O0
if 100 - 100: OoooooooOO
if 27 - 27: i11iIiiIii % II111iiii + I1Ii111
if 76 - 76: OOooOOo - I1Ii111 + iIii1I11I1II1 + I1IiiI * oO0o
if 93 - 93: i11iIiiIii * i11iIiiIii - I1IiiI + iIii1I11I1II1 * i11iIiiIii
if 14 - 14: ooOoO0o . OoooooooOO . I1IiiI - IiII + iIii1I11I1II1
if 47 - 47: OOooOOo % i1IIi
if 23 - 23: Ii1I * Ii1I / I11i
if 11 - 11: OOooOOo
if 58 - 58: OoO0O00 * OoooooooOO
if 47 - 47: iII111i - Oo0Ooo
if 19 - 19: O0 . i1IIi + I11i / II111iiii + ooOoO0o
if 26 - 26: Ii1I * oO0o % I1IiiI - OOooOOo . I1Ii111
if 35 - 35: i1IIi % i11iIiiIii + Ii1I
if 14 - 14: OoO0O00 * OoooooooOO
if 45 - 45: iIii1I11I1II1 * I1IiiI . OoOoOO00
if 97 - 97: I11i % II111iiii % Ii1I . II111iiii . iIii1I11I1II1
if 98 - 98: i11iIiiIii + O0 - O0 - iII111i
if 25 - 25: oO0o / O0 + I1Ii111 % i11iIiiIii / I1IiiI
if 62 - 62: iII111i . I11i * i1IIi + iII111i
if 95 - 95: Ii1I / o0oOOo0O0Ooo % ooOoO0o - I1IiiI / OOooOOo * OOooOOo
if 6 - 6: OoO0O00 % IiII + iIii1I11I1II1
if 18 - 18: II111iiii . Ii1I + OoOoOO00 + O0 - I11i
if 30 - 30: II111iiii
if 26 - 26: I11i - i1IIi - Oo0Ooo * O0 * OOooOOo . OoooooooOO
if 99 - 99: oO0o . OoO0O00 / OOooOOo
if 12 - 12: iIii1I11I1II1 + ooOoO0o * I1Ii111 % OoooooooOO / iIii1I11I1II1
if 43 - 43: O0 . i1IIi - OoooooooOO - i1IIi - I1ii11iIi11i
if 8 - 8: OoOoOO00 / Ii1I
if 12 - 12: iIii1I11I1II1
if 52 - 52: oO0o . I1ii11iIi11i + oO0o
if 73 - 73: II111iiii / i11iIiiIii / ooOoO0o
if 1 - 1: iII111i + OoOoOO00 / IiII - I1IiiI % I1IiiI
if 6 - 6: OoOoOO00 - i1IIi + II111iiii % oO0o
if 72 - 72: OOooOOo + OOooOOo
if 30 - 30: I11i
if 15 - 15: O0 - i1IIi . iIii1I11I1II1 - i11iIiiIii / Ii1I
if 11 - 11: iIii1I11I1II1 + I1IiiI
if 15 - 15: o0oOOo0O0Ooo
if 55 - 55: i11iIiiIii / OoooooooOO - I11i
if 89 - 89: I11i - i1IIi - i1IIi * OOooOOo - O0
if 94 - 94: Oo0Ooo / I11i . I1ii11iIi11i
if 31 - 31: i11iIiiIii + iIii1I11I1II1 . II111iiii
if 72 - 72: I1Ii111 * OoO0O00 + Oo0Ooo / Ii1I % OOooOOo
if 84 - 84: OoOoOO00 / o0oOOo0O0Ooo
if 9 - 9: Ii1I
if 76 - 76: I1IiiI % Oo0Ooo / iIii1I11I1II1 - Oo0Ooo
if 34 - 34: OoOoOO00 - i1IIi + OOooOOo + Ii1I . o0oOOo0O0Ooo
if 42 - 42: OoO0O00
if 59 - 59: OoO0O00 . I1Ii111 % OoO0O00
if 22 - 22: Oo0Ooo
if 21 - 21: o0oOOo0O0Ooo
if 86 - 86: ooOoO0o / iIii1I11I1II1 . OOooOOo
if 93 - 93: Oo0Ooo / II111iiii . Oo0Ooo + i1IIi + i1IIi
if 30 - 30: OoOoOO00 . OOooOOo % OOooOOo / II111iiii + i1IIi
if 61 - 61: i1IIi % II111iiii * II111iiii . o0oOOo0O0Ooo / I1ii11iIi11i - I1Ii111
if 93 - 93: Ii1I - i1IIi
if 3 - 3: oO0o + OoO0O00 - iII111i / Ii1I
if 58 - 58: Ii1I * I11i
if 95 - 95: oO0o
if 49 - 49: I1IiiI
if 23 - 23: I1Ii111
if 5 - 5: I1ii11iIi11i % OoOoOO00 . OoooooooOO . o0oOOo0O0Ooo + i11iIiiIii
if 54 - 54: ooOoO0o - O0 + iII111i
if 34 - 34: Ii1I - OOooOOo % iII111i
if 48 - 48: oO0o - O0
class lisp_rloc_record ( object ) :
def __init__ ( self ) :
self . priority = 0
self . weight = 0
self . mpriority = 0
self . mweight = 0
self . local_bit = False
self . probe_bit = False
self . reach_bit = False
self . rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . geo = None
self . elp = None
self . rle = None
self . json = None
self . rloc_name = None
self . keys = None
if 17 - 17: iIii1I11I1II1 . IiII / ooOoO0o % I11i + o0oOOo0O0Ooo - iIii1I11I1II1
if 95 - 95: OoOoOO00 + OOooOOo - I11i * i1IIi + i1IIi * O0
def print_rloc_name ( self , cour = False ) :
if ( self . rloc_name == None ) : return ( "" )
OO000o = self . rloc_name
if ( cour ) : OO000o = lisp_print_cour ( OO000o )
return ( 'rloc-name: {}' . format ( blue ( OO000o , cour ) ) )
if 60 - 60: I1Ii111 / O0 - i1IIi * IiII
if 72 - 72: O0 * I1Ii111 - iIii1I11I1II1 % i1IIi
def print_record ( self , indent ) :
IIi11IiiiI11i = self . print_rloc_name ( )
if ( IIi11IiiiI11i != "" ) : IIi11IiiiI11i = ", " + IIi11IiiiI11i
oOo0oO0 = ""
if ( self . geo ) :
o0o = ""
if ( self . geo . geo_name ) : o0o = "'{}' " . format ( self . geo . geo_name )
oOo0oO0 = ", geo: {}{}" . format ( o0o , self . geo . print_geo ( ) )
if 62 - 62: i11iIiiIii / I1IiiI * O0 - OoOoOO00
iIii1 = ""
if ( self . elp ) :
o0o = ""
if ( self . elp . elp_name ) : o0o = "'{}' " . format ( self . elp . elp_name )
iIii1 = ", elp: {}{}" . format ( o0o , self . elp . print_elp ( True ) )
if 91 - 91: I1ii11iIi11i / Ii1I - OoOoOO00 . I11i / oO0o
I1i1iI1i1i1 = ""
if ( self . rle ) :
o0o = ""
if ( self . rle . rle_name ) : o0o = "'{}' " . format ( self . rle . rle_name )
I1i1iI1i1i1 = ", rle: {}{}" . format ( o0o , self . rle . print_rle ( False ,
True ) )
if 99 - 99: OoOoOO00 / OoooooooOO + iII111i * I11i * i11iIiiIii + OOooOOo
Ii11I = ""
if ( self . json ) :
o0o = ""
if ( self . json . json_name ) :
o0o = "'{}' " . format ( self . json . json_name )
if 32 - 32: Oo0Ooo
Ii11I = ", json: {}" . format ( self . json . print_json ( False ) )
if 78 - 78: Ii1I . Oo0Ooo + I1IiiI - ooOoO0o
if 5 - 5: I1IiiI % I1ii11iIi11i * oO0o + I1Ii111
I11II1i11 = ""
if ( self . rloc . is_null ( ) == False and self . keys and self . keys [ 1 ] ) :
I11II1i11 = ", " + self . keys [ 1 ] . print_keys ( )
if 42 - 42: i1IIi . OoOoOO00 * OoOoOO00 * OoOoOO00
if 14 - 14: II111iiii / I1Ii111 . I1IiiI
IiiiI1 = ( "{}RLOC-record -> flags: {}, {}/{}/{}/{}, afi: {}, rloc: "
+ "{}{}{}{}{}{}{}" )
lprint ( IiiiI1 . format ( indent , self . print_flags ( ) , self . priority ,
self . weight , self . mpriority , self . mweight , self . rloc . afi ,
red ( self . rloc . print_address_no_iid ( ) , False ) , IIi11IiiiI11i , oOo0oO0 ,
iIii1 , I1i1iI1i1i1 , Ii11I , I11II1i11 ) )
if 66 - 66: I1Ii111 % oO0o . iII111i * i1IIi
if 81 - 81: OoooooooOO * I1IiiI / I1Ii111
def print_flags ( self ) :
return ( "{}{}{}" . format ( "L" if self . local_bit else "l" , "P" if self . probe_bit else "p" , "R" if self . reach_bit else "r" ) )
if 10 - 10: I1IiiI - II111iiii / IiII * II111iiii
if 67 - 67: II111iiii . Ii1I % oO0o . Oo0Ooo + IiII
if 10 - 10: OOooOOo - OoO0O00 * oO0o / iIii1I11I1II1 - OoOoOO00
def store_rloc_entry ( self , rloc_entry ) :
I1Ii1i111I = rloc_entry . rloc if ( rloc_entry . translated_rloc . is_null ( ) ) else rloc_entry . translated_rloc
if 51 - 51: O0 + Ii1I * OoooooooOO . oO0o + OoooooooOO
self . rloc . copy_address ( I1Ii1i111I )
if 58 - 58: ooOoO0o . Oo0Ooo / I1ii11iIi11i + OoO0O00 * OoooooooOO / I1IiiI
if ( rloc_entry . rloc_name ) :
self . rloc_name = rloc_entry . rloc_name
if 24 - 24: O0 - iII111i . Ii1I
if 20 - 20: Ii1I * I1IiiI % oO0o / i11iIiiIii . OoO0O00
if ( rloc_entry . geo ) :
self . geo = rloc_entry . geo
else :
o0o = rloc_entry . geo_name
if ( o0o and o0o in lisp_geo_list ) :
self . geo = lisp_geo_list [ o0o ]
if 18 - 18: OoooooooOO / OOooOOo % i1IIi - i1IIi / Oo0Ooo
if 94 - 94: I1Ii111 + i11iIiiIii / iII111i + OoooooooOO % i1IIi
if ( rloc_entry . elp ) :
self . elp = rloc_entry . elp
else :
o0o = rloc_entry . elp_name
if ( o0o and o0o in lisp_elp_list ) :
self . elp = lisp_elp_list [ o0o ]
if 57 - 57: iIii1I11I1II1 - i11iIiiIii / II111iiii
if 35 - 35: I1IiiI - IiII * I1Ii111 - ooOoO0o % oO0o
if ( rloc_entry . rle ) :
self . rle = rloc_entry . rle
else :
o0o = rloc_entry . rle_name
if ( o0o and o0o in lisp_rle_list ) :
self . rle = lisp_rle_list [ o0o ]
if 88 - 88: IiII * OoO0O00 / IiII * I1IiiI + O0 / IiII
if 41 - 41: OoOoOO00
if ( rloc_entry . json ) :
self . json = rloc_entry . json
else :
o0o = rloc_entry . json_name
if ( o0o and o0o in lisp_json_list ) :
self . json = lisp_json_list [ o0o ]
if 81 - 81: Ii1I . I1IiiI % o0oOOo0O0Ooo . OoOoOO00
if 94 - 94: oO0o % Oo0Ooo + OoO0O00 * oO0o - i11iIiiIii / I11i
self . priority = rloc_entry . priority
self . weight = rloc_entry . weight
self . mpriority = rloc_entry . mpriority
self . mweight = rloc_entry . mweight
if 46 - 46: IiII - OoO0O00 * iII111i . I1Ii111 - ooOoO0o . i1IIi
if 53 - 53: I1Ii111 * I1IiiI + Oo0Ooo + I1IiiI + OOooOOo
def encode_json ( self , lisp_json ) :
iio0O0OOo = lisp_json . json_string
IiiIiI1 = 0
if ( lisp_json . json_encrypted ) :
IiiIiI1 = ( lisp_json . json_key_id << 5 ) | 0x02
if 19 - 19: ooOoO0o / oO0o
if 64 - 64: i11iIiiIii - I1Ii111 * I1IiiI
IIiiIIi1II11 = LISP_LCAF_JSON_TYPE
O0oooOoOO0O = socket . htons ( LISP_AFI_LCAF )
OO00oOo00oo = self . rloc . addr_length ( ) + 2
if 57 - 57: iII111i
ii111iIii1 = socket . htons ( len ( iio0O0OOo ) + OO00oOo00oo )
if 54 - 54: OoO0O00 / I1IiiI
oo0O0OO = socket . htons ( len ( iio0O0OOo ) )
Oo00oo = struct . pack ( "HBBBBHH" , O0oooOoOO0O , 0 , 0 , IIiiIIi1II11 , IiiIiI1 ,
ii111iIii1 , oo0O0OO )
Oo00oo += iio0O0OOo . encode ( )
if 4 - 4: O0
if 87 - 87: IiII - OoO0O00 * Oo0Ooo / o0oOOo0O0Ooo % oO0o % Ii1I
if 25 - 25: Ii1I - I1ii11iIi11i + Oo0Ooo . I1IiiI
if 36 - 36: iII111i
if ( lisp_is_json_telemetry ( iio0O0OOo ) ) :
Oo00oo += struct . pack ( "H" , socket . htons ( self . rloc . afi ) )
Oo00oo += self . rloc . pack_address ( )
else :
Oo00oo += struct . pack ( "H" , 0 )
if 3 - 3: Ii1I
return ( Oo00oo )
if 44 - 44: O0 - oO0o % II111iiii . I1Ii111
if 86 - 86: IiII
def encode_lcaf ( self ) :
O0oooOoOO0O = socket . htons ( LISP_AFI_LCAF )
o0oooo00 = b""
if ( self . geo ) :
o0oooo00 = self . geo . encode_geo ( )
if 22 - 22: oO0o / II111iiii
if 51 - 51: I11i % o0oOOo0O0Ooo / OoooooooOO % i1IIi
i11IiiI = b""
if ( self . elp ) :
iI11I1I = b""
for i11I1iI1I in self . elp . elp_nodes :
Oooo000 = socket . htons ( i11I1iI1I . address . afi )
OoO0o0oOOoOoo = 0
if ( i11I1iI1I . eid ) : OoO0o0oOOoOoo |= 0x4
if ( i11I1iI1I . probe ) : OoO0o0oOOoOoo |= 0x2
if ( i11I1iI1I . strict ) : OoO0o0oOOoOoo |= 0x1
OoO0o0oOOoOoo = socket . htons ( OoO0o0oOOoOoo )
iI11I1I += struct . pack ( "HH" , OoO0o0oOOoOoo , Oooo000 )
iI11I1I += i11I1iI1I . address . pack_address ( )
if 28 - 28: II111iiii / o0oOOo0O0Ooo
if 34 - 34: OoO0O00 * II111iiii + i11iIiiIii % Ii1I
iIi1i1I = socket . htons ( len ( iI11I1I ) )
i11IiiI = struct . pack ( "HBBBBH" , O0oooOoOO0O , 0 , 0 , LISP_LCAF_ELP_TYPE ,
0 , iIi1i1I )
i11IiiI += iI11I1I
if 36 - 36: OoooooooOO + O0
if 32 - 32: Ii1I / I1ii11iIi11i . Ii1I
o00OOOoooo00 = b""
if ( self . rle ) :
o000 = b""
for iI11i1ii11i11 in self . rle . rle_nodes :
Oooo000 = socket . htons ( iI11i1ii11i11 . address . afi )
o000 += struct . pack ( "HBBH" , 0 , 0 , iI11i1ii11i11 . level , Oooo000 )
o000 += iI11i1ii11i11 . address . pack_address ( )
if ( iI11i1ii11i11 . rloc_name ) :
o000 += struct . pack ( "H" , socket . htons ( LISP_AFI_NAME ) )
o000 += ( iI11i1ii11i11 . rloc_name + "\0" ) . encode ( )
if 59 - 59: OoO0O00 + OOooOOo . I1ii11iIi11i - iII111i % ooOoO0o
if 9 - 9: IiII
if 51 - 51: I1Ii111 + O0 + OoOoOO00 % O0 + oO0o
OoOOOO0O00 = socket . htons ( len ( o000 ) )
o00OOOoooo00 = struct . pack ( "HBBBBH" , O0oooOoOO0O , 0 , 0 , LISP_LCAF_RLE_TYPE ,
0 , OoOOOO0O00 )
o00OOOoooo00 += o000
if 2 - 2: OoO0O00 . I1ii11iIi11i * i11iIiiIii
if 65 - 65: I11i
Ooo0o00OO0ooo0 = b""
if ( self . json ) :
Ooo0o00OO0ooo0 = self . encode_json ( self . json )
if 54 - 54: I1IiiI / i1IIi * I1ii11iIi11i
if 10 - 10: I1IiiI % II111iiii / I1IiiI
iii11i11I = b""
if ( self . rloc . is_null ( ) == False and self . keys and self . keys [ 1 ] ) :
iii11i11I = self . keys [ 1 ] . encode_lcaf ( self . rloc )
if 23 - 23: I1Ii111 . Ii1I % OoO0O00
if 32 - 32: I1IiiI + ooOoO0o / O0 * i11iIiiIii % Oo0Ooo + II111iiii
o0O00 = b""
if ( self . rloc_name ) :
o0O00 += struct . pack ( "H" , socket . htons ( LISP_AFI_NAME ) )
o0O00 += ( self . rloc_name + "\0" ) . encode ( )
if 78 - 78: iIii1I11I1II1 / I1IiiI - IiII
if 81 - 81: I1ii11iIi11i
Iii11i111iI = len ( o0oooo00 ) + len ( i11IiiI ) + len ( o00OOOoooo00 ) + len ( iii11i11I ) + 2 + len ( Ooo0o00OO0ooo0 ) + self . rloc . addr_length ( ) + len ( o0O00 )
if 76 - 76: I1Ii111 - O0
Iii11i111iI = socket . htons ( Iii11i111iI )
Ii11111iiIi11 = struct . pack ( "HBBBBHH" , O0oooOoOO0O , 0 , 0 , LISP_LCAF_AFI_LIST_TYPE ,
0 , Iii11i111iI , socket . htons ( self . rloc . afi ) )
Ii11111iiIi11 += self . rloc . pack_address ( )
return ( Ii11111iiIi11 + o0O00 + o0oooo00 + i11IiiI + o00OOOoooo00 + iii11i11I + Ooo0o00OO0ooo0 )
if 18 - 18: oO0o . OoOoOO00 + ooOoO0o * iII111i * iIii1I11I1II1 % O0
if 32 - 32: O0 / I11i . O0
def encode ( self ) :
OoO0o0oOOoOoo = 0
if ( self . local_bit ) : OoO0o0oOOoOoo |= 0x0004
if ( self . probe_bit ) : OoO0o0oOOoOoo |= 0x0002
if ( self . reach_bit ) : OoO0o0oOOoOoo |= 0x0001
if 25 - 25: Oo0Ooo - iII111i
Oo00oo = struct . pack ( "BBBBHH" , self . priority , self . weight ,
self . mpriority , self . mweight , socket . htons ( OoO0o0oOOoOoo ) ,
socket . htons ( self . rloc . afi ) )
if 96 - 96: O0 . I1IiiI
if ( self . geo or self . elp or self . rle or self . keys or self . rloc_name or self . json ) :
if 2 - 2: I11i . oO0o * IiII
try :
Oo00oo = Oo00oo [ 0 : - 2 ] + self . encode_lcaf ( )
except :
lprint ( "Could not encode LCAF for RLOC-record" )
if 41 - 41: Ii1I / OoO0O00 / OoO0O00 * I11i
else :
Oo00oo += self . rloc . pack_address ( )
if 31 - 31: Ii1I / OoooooooOO % iIii1I11I1II1 - IiII * I1IiiI - O0
return ( Oo00oo )
if 31 - 31: oO0o
if 74 - 74: OoO0O00
def decode_lcaf ( self , packet , nonce , ms_json_encrypt ) :
II111I11iI = "HBBBBH"
oO000 = struct . calcsize ( II111I11iI )
if ( len ( packet ) < oO000 ) : return ( None )
if 11 - 11: oO0o + O0 % Ii1I . I11i * o0oOOo0O0Ooo
Oooo000 , iI1i1II11I , OoO0o0oOOoOoo , IIiiIIi1II11 , I1iIiiiI1II1 , ii111iIii1 = struct . unpack ( II111I11iI , packet [ : oO000 ] )
if 14 - 14: I11i . iIii1I11I1II1 + I1Ii111 % OoooooooOO
if 9 - 9: oO0o + Ii1I / I1ii11iIi11i * iIii1I11I1II1 + o0oOOo0O0Ooo
ii111iIii1 = socket . ntohs ( ii111iIii1 )
packet = packet [ oO000 : : ]
if ( ii111iIii1 > len ( packet ) ) : return ( None )
if 64 - 64: I11i % i11iIiiIii % I1ii11iIi11i
if 14 - 14: I1Ii111 - OoOoOO00 - I1ii11iIi11i % I11i + OoooooooOO
if 4 - 4: I1Ii111 - I1IiiI / iIii1I11I1II1 + I1ii11iIi11i % iIii1I11I1II1 * I1IiiI
if 30 - 30: i11iIiiIii % OOooOOo
if ( IIiiIIi1II11 == LISP_LCAF_AFI_LIST_TYPE ) :
while ( ii111iIii1 > 0 ) :
II111I11iI = "H"
oO000 = struct . calcsize ( II111I11iI )
if ( ii111iIii1 < oO000 ) : return ( None )
if 52 - 52: I11i - oO0o . i11iIiiIii - II111iiii + Ii1I . iII111i
iiIi1111iiI1 = len ( packet )
Oooo000 = struct . unpack ( II111I11iI , packet [ : oO000 ] ) [ 0 ]
Oooo000 = socket . ntohs ( Oooo000 )
if 27 - 27: I1IiiI + OoOoOO00 + iII111i
if ( Oooo000 == LISP_AFI_LCAF ) :
packet = self . decode_lcaf ( packet , nonce , ms_json_encrypt )
if ( packet == None ) : return ( None )
else :
packet = packet [ oO000 : : ]
self . rloc_name = None
if ( Oooo000 == LISP_AFI_NAME ) :
packet , OO000o = lisp_decode_dist_name ( packet )
self . rloc_name = OO000o
else :
self . rloc . afi = Oooo000
packet = self . rloc . unpack_address ( packet )
if ( packet == None ) : return ( None )
self . rloc . mask_len = self . rloc . host_mask_len ( )
if 70 - 70: I11i + IiII . ooOoO0o - I1ii11iIi11i
if 34 - 34: i1IIi % Oo0Ooo . oO0o
if 36 - 36: I1ii11iIi11i / I1Ii111 - IiII + OOooOOo + I1Ii111
ii111iIii1 -= iiIi1111iiI1 - len ( packet )
if 62 - 62: Oo0Ooo . OoO0O00 * I1Ii111 . i11iIiiIii * O0
if 10 - 10: Oo0Ooo / OoOoOO00 * OOooOOo - IiII + Ii1I
elif ( IIiiIIi1II11 == LISP_LCAF_GEO_COORD_TYPE ) :
if 62 - 62: I1IiiI . Ii1I
if 74 - 74: Ii1I - I11i % ooOoO0o - I1IiiI - Ii1I - II111iiii
if 81 - 81: i1IIi * I1ii11iIi11i + IiII - OoO0O00 * i1IIi
if 6 - 6: iIii1I11I1II1 % OoOoOO00 % II111iiii % o0oOOo0O0Ooo
O00o0o0O = lisp_geo ( "" )
packet = O00o0o0O . decode_geo ( packet , ii111iIii1 , I1iIiiiI1II1 )
if ( packet == None ) : return ( None )
self . geo = O00o0o0O
if 67 - 67: IiII - I1Ii111 . I1Ii111 % Ii1I
elif ( IIiiIIi1II11 == LISP_LCAF_JSON_TYPE ) :
iiII = I1iIiiiI1II1 & 0x02
if 35 - 35: I1Ii111 / I1Ii111 + o0oOOo0O0Ooo - oO0o
if 40 - 40: OoOoOO00 - II111iiii
if 29 - 29: I1IiiI - O0
if 36 - 36: I1IiiI * I1IiiI
II111I11iI = "H"
oO000 = struct . calcsize ( II111I11iI )
if ( ii111iIii1 < oO000 ) : return ( None )
if 79 - 79: I1Ii111 - I11i
oo0O0OO = struct . unpack ( II111I11iI , packet [ : oO000 ] ) [ 0 ]
oo0O0OO = socket . ntohs ( oo0O0OO )
if ( ii111iIii1 < oO000 + oo0O0OO ) : return ( None )
if 49 - 49: II111iiii + O0 * ooOoO0o - Oo0Ooo
packet = packet [ oO000 : : ]
self . json = lisp_json ( "" , packet [ 0 : oo0O0OO ] , iiII ,
ms_json_encrypt )
packet = packet [ oo0O0OO : : ]
if 89 - 89: I1IiiI + I11i . oO0o . II111iiii + oO0o / Oo0Ooo
if 32 - 32: OoO0O00 % oO0o * I1ii11iIi11i + I11i / I1Ii111
if 5 - 5: o0oOOo0O0Ooo + iII111i / OoooooooOO + Ii1I . OoOoOO00 / oO0o
if 18 - 18: II111iiii . o0oOOo0O0Ooo
Oooo000 = socket . ntohs ( struct . unpack ( "H" , packet [ : 2 ] ) [ 0 ] )
packet = packet [ 2 : : ]
if 75 - 75: OoooooooOO - Oo0Ooo
if ( Oooo000 != 0 and lisp_is_json_telemetry ( self . json . json_string ) ) :
self . rloc . afi = Oooo000
packet = self . rloc . unpack_address ( packet )
if 56 - 56: II111iiii - i11iIiiIii - oO0o . o0oOOo0O0Ooo
if 4 - 4: i1IIi
elif ( IIiiIIi1II11 == LISP_LCAF_ELP_TYPE ) :
if 91 - 91: IiII . OoO0O00 * Ii1I / o0oOOo0O0Ooo
if 41 - 41: I1IiiI . OoO0O00 / i1IIi . Oo0Ooo . oO0o
if 44 - 44: iII111i * I11i + i11iIiiIii + i1IIi / IiII * II111iiii
if 58 - 58: OOooOOo
OOO00O = lisp_elp ( None )
OOO00O . elp_nodes = [ ]
while ( ii111iIii1 > 0 ) :
OoO0o0oOOoOoo , Oooo000 = struct . unpack ( "HH" , packet [ : 4 ] )
if 5 - 5: I1Ii111 * I11i * oO0o * I1ii11iIi11i - OOooOOo * OoOoOO00
Oooo000 = socket . ntohs ( Oooo000 )
if ( Oooo000 == LISP_AFI_LCAF ) : return ( None )
if 88 - 88: OoooooooOO . II111iiii / Oo0Ooo * OoOoOO00
i11I1iI1I = lisp_elp_node ( )
OOO00O . elp_nodes . append ( i11I1iI1I )
if 52 - 52: OoO0O00 + oO0o
OoO0o0oOOoOoo = socket . ntohs ( OoO0o0oOOoOoo )
i11I1iI1I . eid = ( OoO0o0oOOoOoo & 0x4 )
i11I1iI1I . probe = ( OoO0o0oOOoOoo & 0x2 )
i11I1iI1I . strict = ( OoO0o0oOOoOoo & 0x1 )
i11I1iI1I . address . afi = Oooo000
i11I1iI1I . address . mask_len = i11I1iI1I . address . host_mask_len ( )
packet = i11I1iI1I . address . unpack_address ( packet [ 4 : : ] )
ii111iIii1 -= i11I1iI1I . address . addr_length ( ) + 4
if 84 - 84: O0 % I1ii11iIi11i % iIii1I11I1II1 - OoOoOO00 - Oo0Ooo
OOO00O . select_elp_node ( )
self . elp = OOO00O
if 7 - 7: II111iiii % oO0o % i1IIi . iIii1I11I1II1
elif ( IIiiIIi1II11 == LISP_LCAF_RLE_TYPE ) :
if 92 - 92: Ii1I / o0oOOo0O0Ooo % OOooOOo - OoOoOO00
if 44 - 44: I1IiiI + OoOoOO00 * Oo0Ooo
if 31 - 31: I11i - I1IiiI - OoO0O00 * OoOoOO00
if 50 - 50: I1ii11iIi11i + I11i * iII111i
ooo0o0O = lisp_rle ( None )
ooo0o0O . rle_nodes = [ ]
while ( ii111iIii1 > 0 ) :
iIiiiI1 , II11iiiII1Ii , O00OoO0 , Oooo000 = struct . unpack ( "HBBH" , packet [ : 6 ] )
if 66 - 66: OoooooooOO + OoOoOO00 * OoO0O00 - I1IiiI . oO0o
Oooo000 = socket . ntohs ( Oooo000 )
if ( Oooo000 == LISP_AFI_LCAF ) : return ( None )
if 74 - 74: o0oOOo0O0Ooo . Oo0Ooo * i1IIi
iI11i1ii11i11 = lisp_rle_node ( )
ooo0o0O . rle_nodes . append ( iI11i1ii11i11 )
if 67 - 67: IiII
iI11i1ii11i11 . level = O00OoO0
iI11i1ii11i11 . address . afi = Oooo000
iI11i1ii11i11 . address . mask_len = iI11i1ii11i11 . address . host_mask_len ( )
packet = iI11i1ii11i11 . address . unpack_address ( packet [ 6 : : ] )
if 54 - 54: i11iIiiIii + I11i % iII111i % I1ii11iIi11i + Oo0Ooo % o0oOOo0O0Ooo
ii111iIii1 -= iI11i1ii11i11 . address . addr_length ( ) + 6
if ( ii111iIii1 >= 2 ) :
Oooo000 = struct . unpack ( "H" , packet [ : 2 ] ) [ 0 ]
if ( socket . ntohs ( Oooo000 ) == LISP_AFI_NAME ) :
packet = packet [ 2 : : ]
packet , iI11i1ii11i11 . rloc_name = lisp_decode_dist_name ( packet )
if 66 - 66: IiII . I1Ii111 - oO0o
if ( packet == None ) : return ( None )
ii111iIii1 -= len ( iI11i1ii11i11 . rloc_name ) + 1 + 2
if 12 - 12: i1IIi / I11i
if 79 - 79: I1IiiI + II111iiii + ooOoO0o % OoO0O00
if 72 - 72: OOooOOo * OoOoOO00
self . rle = ooo0o0O
self . rle . build_forwarding_list ( )
if 81 - 81: II111iiii / I11i - ooOoO0o - i1IIi - I1Ii111
elif ( IIiiIIi1II11 == LISP_LCAF_SECURITY_TYPE ) :
if 38 - 38: OoOoOO00 . iII111i / O0 . OOooOOo + OOooOOo
if 4 - 4: I11i
if 95 - 95: II111iiii % o0oOOo0O0Ooo . I11i
if 18 - 18: O0 / OoooooooOO * Oo0Ooo % iII111i
if 24 - 24: I1ii11iIi11i % OOooOOo + OoooooooOO + OoO0O00
i1iiI11i1 = packet
OO = lisp_keys ( 1 )
packet = OO . decode_lcaf ( i1iiI11i1 , ii111iIii1 )
if ( packet == None ) : return ( None )
if 100 - 100: Oo0Ooo % OoO0O00 - OoOoOO00
if 46 - 46: o0oOOo0O0Ooo
if 28 - 28: i1IIi
if 81 - 81: oO0o % OoooooooOO . I1Ii111 - OoOoOO00 / I1IiiI
i1I1IiiIIIiiI = [ LISP_CS_25519_CBC , LISP_CS_25519_CHACHA ]
if ( OO . cipher_suite in i1I1IiiIIIiiI ) :
if ( OO . cipher_suite == LISP_CS_25519_CBC ) :
Ooo00o000o = lisp_keys ( 1 , do_poly = False , do_chacha = False )
if 62 - 62: I1Ii111 * I11i / I11i
if ( OO . cipher_suite == LISP_CS_25519_CHACHA ) :
Ooo00o000o = lisp_keys ( 1 , do_poly = True , do_chacha = True )
if 42 - 42: ooOoO0o * ooOoO0o / Ii1I / OOooOOo * OOooOOo
else :
Ooo00o000o = lisp_keys ( 1 , do_poly = False , do_chacha = False )
if 92 - 92: Oo0Ooo / iII111i - OoooooooOO - o0oOOo0O0Ooo % ooOoO0o
packet = Ooo00o000o . decode_lcaf ( i1iiI11i1 , ii111iIii1 )
if ( packet == None ) : return ( None )
if 35 - 35: i1IIi % iII111i % I11i * iIii1I11I1II1 % Ii1I - Oo0Ooo
if ( len ( packet ) < 2 ) : return ( None )
Oooo000 = struct . unpack ( "H" , packet [ : 2 ] ) [ 0 ]
self . rloc . afi = socket . ntohs ( Oooo000 )
if ( len ( packet ) < self . rloc . addr_length ( ) ) : return ( None )
packet = self . rloc . unpack_address ( packet [ 2 : : ] )
if ( packet == None ) : return ( None )
self . rloc . mask_len = self . rloc . host_mask_len ( )
if 94 - 94: iII111i
if 68 - 68: OoooooooOO % OOooOOo / OoooooooOO / I1Ii111 + Ii1I - o0oOOo0O0Ooo
if 81 - 81: I1IiiI
if 62 - 62: Ii1I * OoOoOO00
if 27 - 27: Oo0Ooo + Oo0Ooo / II111iiii % I1Ii111
if 11 - 11: Ii1I
if ( self . rloc . is_null ( ) ) : return ( packet )
if 54 - 54: I1IiiI * I1Ii111 / ooOoO0o / iIii1I11I1II1 % iII111i / oO0o
I1Iii1i = self . rloc_name
if ( I1Iii1i ) : I1Iii1i = blue ( self . rloc_name , False )
if 50 - 50: Oo0Ooo
if 14 - 14: O0
if 67 - 67: II111iiii / O0
if 10 - 10: i1IIi / Oo0Ooo
if 20 - 20: Oo0Ooo * I1Ii111 / I1ii11iIi11i . ooOoO0o
if 67 - 67: o0oOOo0O0Ooo . Oo0Ooo % I11i
IiIi1 = self . keys [ 1 ] if self . keys else None
if ( IiIi1 == None ) :
if ( Ooo00o000o . remote_public_key == None ) :
ii1111Iii11i = bold ( "No remote encap-public-key supplied" , False )
lprint ( " {} for {}" . format ( ii1111Iii11i , I1Iii1i ) )
Ooo00o000o = None
else :
ii1111Iii11i = bold ( "New encap-keying with new state" , False )
lprint ( " {} for {}" . format ( ii1111Iii11i , I1Iii1i ) )
Ooo00o000o . compute_shared_key ( "encap" )
if 38 - 38: OOooOOo - OoO0O00 . ooOoO0o
if 50 - 50: o0oOOo0O0Ooo
if 85 - 85: II111iiii . iII111i - i1IIi
if 23 - 23: iII111i . Ii1I - OoO0O00 / I1ii11iIi11i / O0
if 4 - 4: i1IIi % Oo0Ooo % Ii1I * ooOoO0o - I11i
if 76 - 76: iIii1I11I1II1 / ooOoO0o % I1ii11iIi11i % OOooOOo
if 13 - 13: IiII
if 56 - 56: Oo0Ooo
if 55 - 55: i11iIiiIii + iIii1I11I1II1 / i1IIi / I1ii11iIi11i
if 64 - 64: IiII . OoO0O00 * i11iIiiIii
if ( IiIi1 ) :
if ( Ooo00o000o . remote_public_key == None ) :
Ooo00o000o = None
oo000O = bold ( "Remote encap-unkeying occurred" , False )
lprint ( " {} for {}" . format ( oo000O , I1Iii1i ) )
elif ( IiIi1 . compare_keys ( Ooo00o000o ) ) :
Ooo00o000o = IiIi1
lprint ( " Maintain stored encap-keys for {}" . format ( I1Iii1i ) )
if 18 - 18: Ii1I % o0oOOo0O0Ooo - Oo0Ooo
else :
if ( IiIi1 . remote_public_key == None ) :
ii1111Iii11i = "New encap-keying for existing state"
else :
ii1111Iii11i = "Remote encap-rekeying"
if 28 - 28: IiII
lprint ( " {} for {}" . format ( bold ( ii1111Iii11i , False ) ,
I1Iii1i ) )
IiIi1 . remote_public_key = Ooo00o000o . remote_public_key
IiIi1 . compute_shared_key ( "encap" )
Ooo00o000o = IiIi1
if 93 - 93: Oo0Ooo % i1IIi
if 51 - 51: oO0o % O0
self . keys = [ None , Ooo00o000o , None , None ]
if 41 - 41: I1IiiI * I1IiiI . I1Ii111
else :
if 38 - 38: I1IiiI % i11iIiiIii
if 17 - 17: i11iIiiIii
if 81 - 81: I1Ii111
if 25 - 25: I1IiiI
packet = packet [ ii111iIii1 : : ]
if 52 - 52: I1ii11iIi11i % i1IIi . IiII % OoOoOO00
return ( packet )
if 50 - 50: OOooOOo * I1IiiI / o0oOOo0O0Ooo
if 91 - 91: iIii1I11I1II1 / OOooOOo * O0 . o0oOOo0O0Ooo + oO0o / I1ii11iIi11i
def decode ( self , packet , nonce , ms_json_encrypt = False ) :
II111I11iI = "BBBBHH"
oO000 = struct . calcsize ( II111I11iI )
if ( len ( packet ) < oO000 ) : return ( None )
if 33 - 33: II111iiii + Ii1I
self . priority , self . weight , self . mpriority , self . mweight , OoO0o0oOOoOoo , Oooo000 = struct . unpack ( II111I11iI , packet [ : oO000 ] )
if 46 - 46: IiII + O0 + i1IIi + ooOoO0o / iII111i
if 94 - 94: oO0o + iII111i * OoOoOO00 - i1IIi / OoooooooOO
OoO0o0oOOoOoo = socket . ntohs ( OoO0o0oOOoOoo )
Oooo000 = socket . ntohs ( Oooo000 )
self . local_bit = True if ( OoO0o0oOOoOoo & 0x0004 ) else False
self . probe_bit = True if ( OoO0o0oOOoOoo & 0x0002 ) else False
self . reach_bit = True if ( OoO0o0oOOoOoo & 0x0001 ) else False
if 59 - 59: I11i % Ii1I / OoOoOO00
if ( Oooo000 == LISP_AFI_LCAF ) :
packet = packet [ oO000 - 2 : : ]
packet = self . decode_lcaf ( packet , nonce , ms_json_encrypt )
else :
self . rloc . afi = Oooo000
packet = packet [ oO000 : : ]
packet = self . rloc . unpack_address ( packet )
if 99 - 99: Ii1I + II111iiii / i11iIiiIii - IiII / iII111i + iII111i
self . rloc . mask_len = self . rloc . host_mask_len ( )
return ( packet )
if 55 - 55: IiII + OoooooooOO * I1ii11iIi11i . IiII * I1ii11iIi11i + IiII
if 81 - 81: iIii1I11I1II1 . ooOoO0o + OoOoOO00
def end_of_rlocs ( self , packet , rloc_count ) :
for iIi1iIIIiIiI in range ( rloc_count ) :
packet = self . decode ( packet , None , False )
if ( packet == None ) : return ( None )
if 31 - 31: I11i / OoOoOO00 + o0oOOo0O0Ooo
return ( packet )
if 80 - 80: Oo0Ooo
if 58 - 58: I1Ii111 + OOooOOo
if 76 - 76: II111iiii - o0oOOo0O0Ooo % OoO0O00 + iII111i
if 38 - 38: I1Ii111 - I11i * i1IIi + iIii1I11I1II1
if 41 - 41: Ii1I . OoO0O00 + I1ii11iIi11i + OoOoOO00
if 76 - 76: iII111i - iIii1I11I1II1
if 23 - 23: I11i / OoO0O00 % OOooOOo
if 9 - 9: ooOoO0o % I1ii11iIi11i . OoooooooOO + OoO0O00 % OOooOOo * OoooooooOO
if 21 - 21: Ii1I % O0
if 15 - 15: II111iiii * Ii1I + IiII % iII111i
if 96 - 96: II111iiii * I1Ii111 / Oo0Ooo
if 35 - 35: I1IiiI
if 54 - 54: I1ii11iIi11i % o0oOOo0O0Ooo . i1IIi
if 72 - 72: Ii1I
if 87 - 87: iII111i - I1IiiI
if 54 - 54: iIii1I11I1II1 + oO0o * o0oOOo0O0Ooo % OoooooooOO . Oo0Ooo
if 32 - 32: iII111i
if 33 - 33: ooOoO0o + Oo0Ooo * OoOoOO00 % ooOoO0o * oO0o - OoO0O00
if 40 - 40: I11i . OoooooooOO * O0 / I1Ii111 + O0
if 97 - 97: ooOoO0o - ooOoO0o * OOooOOo % OoOoOO00 - OoOoOO00 - I1Ii111
if 52 - 52: O0 % iII111i
if 81 - 81: OoooooooOO % OoOoOO00 % Oo0Ooo - I1IiiI
if 43 - 43: o0oOOo0O0Ooo % o0oOOo0O0Ooo
if 48 - 48: O0
if 5 - 5: OOooOOo / i11iIiiIii . I11i % OOooOOo
if 1 - 1: II111iiii + O0 * OoOoOO00 / IiII . O0
if 87 - 87: IiII + I1IiiI
if 74 - 74: OoO0O00 + OoO0O00 % iII111i / I11i / O0
if 54 - 54: o0oOOo0O0Ooo / OoooooooOO * ooOoO0o . OoOoOO00 - I1Ii111
if 69 - 69: oO0o - OoO0O00
class lisp_map_referral ( object ) :
def __init__ ( self ) :
self . record_count = 0
self . nonce = 0
if 80 - 80: ooOoO0o + iIii1I11I1II1 . II111iiii + I1IiiI - oO0o % OoOoOO00
if 10 - 10: iIii1I11I1II1
def print_map_referral ( self ) :
lprint ( "{} -> record-count: {}, nonce: 0x{}" . format ( bold ( "Map-Referral" , False ) , self . record_count ,
# iII111i - iIii1I11I1II1 + oO0o
lisp_hex_string ( self . nonce ) ) )
if 36 - 36: iII111i . I11i . i1IIi + I11i
if 97 - 97: II111iiii . OoooooooOO - OoOoOO00
def encode ( self ) :
Iii1 = ( LISP_MAP_REFERRAL << 28 ) | self . record_count
Oo00oo = struct . pack ( "I" , socket . htonl ( Iii1 ) )
Oo00oo += struct . pack ( "Q" , self . nonce )
return ( Oo00oo )
if 35 - 35: I1Ii111
if 35 - 35: Oo0Ooo - iIii1I11I1II1 / i1IIi + OoO0O00 - OoooooooOO / i11iIiiIii
def decode ( self , packet ) :
II111I11iI = "I"
oO000 = struct . calcsize ( II111I11iI )
if ( len ( packet ) < oO000 ) : return ( None )
if 79 - 79: I1IiiI * ooOoO0o * ooOoO0o
Iii1 = struct . unpack ( II111I11iI , packet [ : oO000 ] )
Iii1 = socket . ntohl ( Iii1 [ 0 ] )
self . record_count = Iii1 & 0xff
packet = packet [ oO000 : : ]
if 92 - 92: iII111i % I1ii11iIi11i
II111I11iI = "Q"
oO000 = struct . calcsize ( II111I11iI )
if ( len ( packet ) < oO000 ) : return ( None )
if 16 - 16: oO0o
self . nonce = struct . unpack ( II111I11iI , packet [ : oO000 ] ) [ 0 ]
packet = packet [ oO000 : : ]
return ( packet )
if 52 - 52: OoooooooOO % ooOoO0o - I1Ii111 * I11i
if 24 - 24: Ii1I + IiII + OoooooooOO / oO0o / I1IiiI + IiII
if 52 - 52: ooOoO0o
if 38 - 38: OoO0O00 + I1IiiI % IiII
if 87 - 87: oO0o * Ii1I - I1Ii111 / oO0o
if 65 - 65: OoOoOO00
if 87 - 87: I11i - i11iIiiIii - OOooOOo . OoOoOO00 + IiII . OoO0O00
if 70 - 70: iIii1I11I1II1 % OoooooooOO / OoO0O00 . O0 - I11i % II111iiii
class lisp_ddt_entry ( object ) :
def __init__ ( self ) :
self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . uptime = lisp_get_timestamp ( )
self . delegation_set = [ ]
self . source_cache = None
self . map_referrals_sent = 0
if 84 - 84: OOooOOo * i1IIi . iIii1I11I1II1 * iII111i + I1Ii111 + II111iiii
if 97 - 97: Ii1I - IiII
def is_auth_prefix ( self ) :
if ( len ( self . delegation_set ) != 0 ) : return ( False )
if ( self . is_star_g ( ) ) : return ( False )
return ( True )
if 64 - 64: oO0o . ooOoO0o / ooOoO0o - II111iiii
if 81 - 81: I1ii11iIi11i
def is_ms_peer_entry ( self ) :
if ( len ( self . delegation_set ) == 0 ) : return ( False )
return ( self . delegation_set [ 0 ] . is_ms_peer ( ) )
if 64 - 64: oO0o * OoO0O00 / OOooOOo + Ii1I % Oo0Ooo . IiII
if 2 - 2: I1Ii111 + I11i
def print_referral_type ( self ) :
if ( len ( self . delegation_set ) == 0 ) : return ( "unknown" )
Ii1iII = self . delegation_set [ 0 ]
return ( Ii1iII . print_node_type ( ) )
if 71 - 71: iII111i + IiII + I1IiiI - OoOoOO00
if 49 - 49: I1IiiI % O0 - OoooooooOO * OoO0O00 / iIii1I11I1II1 + I11i
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 7 - 7: iII111i * I1ii11iIi11i / oO0o
if 31 - 31: I1ii11iIi11i - II111iiii
def add_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_ddt_cache . add_cache ( self . eid , self )
else :
O000oO0Oo0 = lisp_ddt_cache . lookup_cache ( self . group , True )
if ( O000oO0Oo0 == None ) :
O000oO0Oo0 = lisp_ddt_entry ( )
O000oO0Oo0 . eid . copy_address ( self . group )
O000oO0Oo0 . group . copy_address ( self . group )
lisp_ddt_cache . add_cache ( self . group , O000oO0Oo0 )
if 83 - 83: i1IIi
if ( self . eid . is_null ( ) ) : self . eid . make_default_route ( O000oO0Oo0 . group )
O000oO0Oo0 . add_source_entry ( self )
if 2 - 2: i1IIi / OOooOOo * O0
if 99 - 99: OoooooooOO . OoOoOO00 / II111iiii
if 64 - 64: iII111i / i1IIi . I1IiiI + O0
def add_source_entry ( self , source_ddt ) :
if ( self . source_cache == None ) : self . source_cache = lisp_cache ( )
self . source_cache . add_cache ( source_ddt . eid , source_ddt )
if 5 - 5: O0 . i11iIiiIii
if 71 - 71: o0oOOo0O0Ooo + iII111i + ooOoO0o
def lookup_source_cache ( self , source , exact ) :
if ( self . source_cache == None ) : return ( None )
return ( self . source_cache . lookup_cache ( source , exact ) )
if 27 - 27: OoooooooOO . iII111i * I1Ii111 % O0 + OoooooooOO - iII111i
if 86 - 86: i1IIi
def is_star_g ( self ) :
if ( self . group . is_null ( ) ) : return ( False )
return ( self . eid . is_exact_match ( self . group ) )
if 81 - 81: OoOoOO00
if 52 - 52: iII111i * IiII % I1IiiI * I11i
if 73 - 73: I1Ii111 * ooOoO0o
class lisp_ddt_node ( object ) :
def __init__ ( self ) :
self . delegate_address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . public_key = ""
self . map_server_peer = False
self . map_server_child = False
self . priority = 0
self . weight = 0
if 62 - 62: OOooOOo . I1IiiI * iIii1I11I1II1 + OoO0O00 * ooOoO0o / oO0o
if 14 - 14: iII111i / OoO0O00
def print_node_type ( self ) :
if ( self . is_ddt_child ( ) ) : return ( "ddt-child" )
if ( self . is_ms_child ( ) ) : return ( "map-server-child" )
if ( self . is_ms_peer ( ) ) : return ( "map-server-peer" )
if 75 - 75: IiII
if 68 - 68: IiII - i1IIi % IiII . OoO0O00 . i11iIiiIii . OoooooooOO
def is_ddt_child ( self ) :
if ( self . map_server_child ) : return ( False )
if ( self . map_server_peer ) : return ( False )
return ( True )
if 32 - 32: iII111i + OoO0O00 % IiII + I1IiiI
if 69 - 69: I1Ii111 + I11i - iIii1I11I1II1 - II111iiii . Ii1I
def is_ms_child ( self ) :
return ( self . map_server_child )
if 74 - 74: I1ii11iIi11i % o0oOOo0O0Ooo + O0 - i11iIiiIii - IiII % OOooOOo
if 39 - 39: OoO0O00 - o0oOOo0O0Ooo
def is_ms_peer ( self ) :
return ( self . map_server_peer )
if 71 - 71: iII111i . OoO0O00 + ooOoO0o - OOooOOo - Oo0Ooo
if 100 - 100: OoooooooOO - o0oOOo0O0Ooo + I1Ii111 . OoooooooOO % i11iIiiIii
if 64 - 64: I1Ii111 % OoooooooOO / i1IIi / OoO0O00
if 2 - 2: I11i % o0oOOo0O0Ooo . OoO0O00 . OoO0O00
if 89 - 89: ooOoO0o - oO0o + II111iiii + OoO0O00 - IiII
if 27 - 27: I1Ii111 - o0oOOo0O0Ooo + OoO0O00
if 38 - 38: OoOoOO00 + OoO0O00 . i11iIiiIii + Ii1I % i1IIi % I1IiiI
class lisp_ddt_map_request ( object ) :
def __init__ ( self , lisp_sockets , packet , eid , group , nonce ) :
self . uptime = lisp_get_timestamp ( )
self . lisp_sockets = lisp_sockets
self . packet = packet
self . eid = eid
self . group = group
self . nonce = nonce
self . mr_source = None
self . sport = 0
self . itr = None
self . retry_count = 0
self . send_count = 0
self . retransmit_timer = None
self . last_request_sent_to = None
self . from_pitr = False
self . tried_root = False
self . last_cached_prefix = [ None , None ]
if 93 - 93: i11iIiiIii
if 63 - 63: iIii1I11I1II1 - iIii1I11I1II1 % o0oOOo0O0Ooo
def print_ddt_map_request ( self ) :
lprint ( "Queued Map-Request from {}ITR {}->{}, nonce 0x{}" . format ( "P" if self . from_pitr else "" ,
# Ii1I + Ii1I / OoOoOO00 % OOooOOo / OoOoOO00 . I1ii11iIi11i
red ( self . itr . print_address ( ) , False ) ,
green ( self . eid . print_address ( ) , False ) , self . nonce ) )
if 11 - 11: iIii1I11I1II1 * OoOoOO00 / IiII . OOooOOo . iIii1I11I1II1
if 38 - 38: i11iIiiIii + I1IiiI . i11iIiiIii - I11i * OOooOOo
def queue_map_request ( self ) :
self . retransmit_timer = threading . Timer ( LISP_DDT_MAP_REQUEST_INTERVAL ,
lisp_retransmit_ddt_map_request , [ self ] )
self . retransmit_timer . start ( )
lisp_ddt_map_requestQ [ str ( self . nonce ) ] = self
if 59 - 59: iII111i / OoOoOO00 + OoOoOO00 - I1IiiI
if 10 - 10: Ii1I / II111iiii
def dequeue_map_request ( self ) :
self . retransmit_timer . cancel ( )
if ( self . nonce in lisp_ddt_map_requestQ ) :
lisp_ddt_map_requestQ . pop ( str ( self . nonce ) )
if 53 - 53: i11iIiiIii . i1IIi . I1IiiI . ooOoO0o * OoOoOO00
if 98 - 98: I1ii11iIi11i + ooOoO0o
if 42 - 42: Oo0Ooo + OoOoOO00 - O0 / Oo0Ooo - OoooooooOO . Ii1I
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 64 - 64: OoooooooOO
if 25 - 25: IiII
if 29 - 29: OoOoOO00 % ooOoO0o * OoooooooOO
if 8 - 8: i11iIiiIii - I1Ii111 / IiII
if 17 - 17: i11iIiiIii * OoO0O00 . o0oOOo0O0Ooo . OoooooooOO . OoOoOO00 - I1ii11iIi11i
if 78 - 78: I1ii11iIi11i - OoooooooOO + O0
if 15 - 15: I1ii11iIi11i / IiII % I1IiiI
if 16 - 16: Ii1I
if 26 - 26: o0oOOo0O0Ooo / I11i + OoOoOO00 / OoOoOO00
if 31 - 31: I1Ii111
if 84 - 84: i11iIiiIii * OOooOOo . iII111i - Ii1I * i1IIi - I1ii11iIi11i
if 1 - 1: II111iiii
if 94 - 94: I1ii11iIi11i * iII111i % iII111i % I11i - iII111i
if 38 - 38: IiII - OoO0O00 % Ii1I - II111iiii
if 97 - 97: O0 . Ii1I
if 52 - 52: IiII
if 86 - 86: I1Ii111 / O0 + OoooooooOO % oO0o
if 45 - 45: I1IiiI . Oo0Ooo . I11i . Ii1I
if 81 - 81: II111iiii + OoOoOO00 % i11iIiiIii / iII111i . I1Ii111 + II111iiii
if 48 - 48: I1IiiI . I1ii11iIi11i * OoOoOO00 % i1IIi / I1Ii111 * II111iiii
LISP_DDT_ACTION_SITE_NOT_FOUND = - 2
LISP_DDT_ACTION_NULL = - 1
LISP_DDT_ACTION_NODE_REFERRAL = 0
LISP_DDT_ACTION_MS_REFERRAL = 1
LISP_DDT_ACTION_MS_ACK = 2
LISP_DDT_ACTION_MS_NOT_REG = 3
LISP_DDT_ACTION_DELEGATION_HOLE = 4
LISP_DDT_ACTION_NOT_AUTH = 5
LISP_DDT_ACTION_MAX = LISP_DDT_ACTION_NOT_AUTH
if 62 - 62: o0oOOo0O0Ooo * I1Ii111 . iIii1I11I1II1 / i1IIi
lisp_map_referral_action_string = [
"node-referral" , "ms-referral" , "ms-ack" , "ms-not-registered" ,
"delegation-hole" , "not-authoritative" ]
if 75 - 75: OoooooooOO / ooOoO0o - iII111i . OoooooooOO . OoOoOO00 % i1IIi
if 7 - 7: OoOoOO00 . i1IIi * i11iIiiIii % i11iIiiIii
if 54 - 54: OoO0O00 / I1IiiI . Oo0Ooo
if 39 - 39: OoO0O00 . ooOoO0o
if 41 - 41: Oo0Ooo * I1ii11iIi11i - II111iiii - II111iiii
if 7 - 7: oO0o
if 41 - 41: ooOoO0o
if 93 - 93: Ii1I + I1Ii111 + Ii1I
if 23 - 23: I1IiiI - i1IIi / ooOoO0o
if 4 - 4: IiII . I1ii11iIi11i + iII111i % ooOoO0o
if 28 - 28: I1Ii111
if 27 - 27: iII111i * I1IiiI
if 60 - 60: i1IIi / I1IiiI - I1ii11iIi11i
if 41 - 41: I1Ii111 + ooOoO0o / OOooOOo + I11i % Oo0Ooo
if 91 - 91: I1IiiI % I1ii11iIi11i % oO0o / i1IIi * iIii1I11I1II1 + I11i
if 48 - 48: ooOoO0o / I1ii11iIi11i / OoO0O00 / II111iiii * OoOoOO00
if 73 - 73: I11i / I1IiiI - IiII - i1IIi * IiII - OOooOOo
if 39 - 39: I11i . ooOoO0o * II111iiii
if 21 - 21: Ii1I
if 92 - 92: OoO0O00 * I1ii11iIi11i + iIii1I11I1II1
if 88 - 88: iIii1I11I1II1 + iIii1I11I1II1 * i11iIiiIii . I1ii11iIi11i % oO0o
if 94 - 94: I1IiiI / I1ii11iIi11i / OOooOOo
if 45 - 45: II111iiii
if 98 - 98: i11iIiiIii + I1ii11iIi11i * OOooOOo / OoOoOO00
if 84 - 84: o0oOOo0O0Ooo
if 40 - 40: OoooooooOO - oO0o / O0 * I1Ii111 . O0 + i11iIiiIii
if 9 - 9: OOooOOo % O0 % O0 / I1ii11iIi11i . II111iiii / II111iiii
if 78 - 78: iIii1I11I1II1 - i1IIi . I11i . o0oOOo0O0Ooo
if 66 - 66: OOooOOo * Oo0Ooo
if 58 - 58: OOooOOo
if 96 - 96: IiII % OoooooooOO + O0 * II111iiii / OOooOOo . I1Ii111
if 47 - 47: OoO0O00 - Oo0Ooo * OoO0O00 / oO0o
if 13 - 13: ooOoO0o
if 55 - 55: i1IIi . I11i . II111iiii + O0 + ooOoO0o - i1IIi
if 3 - 3: iIii1I11I1II1 / oO0o
if 61 - 61: I1Ii111 / O0 - iII111i
if 44 - 44: i1IIi
if 23 - 23: I1ii11iIi11i . OoooooooOO / Ii1I + o0oOOo0O0Ooo
if 89 - 89: OoOoOO00 + Oo0Ooo . OoOoOO00 - II111iiii
if 85 - 85: OoooooooOO * OoooooooOO / Ii1I - II111iiii
if 69 - 69: iII111i * I11i
if 43 - 43: o0oOOo0O0Ooo - IiII * Ii1I . i11iIiiIii / II111iiii
if 61 - 61: OoOoOO00 / I1IiiI . I1ii11iIi11i % OOooOOo
if 70 - 70: OOooOOo * OoOoOO00 / oO0o + Oo0Ooo / O0
if 16 - 16: Oo0Ooo / OoooooooOO / IiII + Oo0Ooo * i11iIiiIii
if 15 - 15: o0oOOo0O0Ooo / i11iIiiIii
if 63 - 63: I1ii11iIi11i - Ii1I + I11i
if 98 - 98: iII111i / IiII * I1IiiI / oO0o - iIii1I11I1II1
if 72 - 72: O0 . OOooOOo
if 99 - 99: i1IIi + iIii1I11I1II1 - ooOoO0o + OoO0O00 + Oo0Ooo . I1ii11iIi11i
if 74 - 74: i1IIi
if 80 - 80: ooOoO0o + I1Ii111 . I1ii11iIi11i % OoooooooOO
if 26 - 26: OoOoOO00 . iII111i * iIii1I11I1II1 / IiII
if 69 - 69: OoooooooOO / I11i + Ii1I * II111iiii
if 35 - 35: i11iIiiIii + oO0o
if 85 - 85: OoOoOO00 . O0 % OoooooooOO % oO0o
if 43 - 43: I1IiiI - I11i . I1IiiI / i11iIiiIii % IiII * i11iIiiIii
if 12 - 12: II111iiii - iIii1I11I1II1
if 43 - 43: i11iIiiIii % OoO0O00
class lisp_info ( object ) :
def __init__ ( self ) :
self . info_reply = False
self . nonce = 0
self . private_etr_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . global_etr_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . global_ms_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . ms_port = 0
self . etr_port = 0
self . rtr_list = [ ]
self . hostname = lisp_hostname
if 100 - 100: i1IIi
if 4 - 4: i11iIiiIii - OOooOOo * IiII % OoooooooOO - OoOoOO00
def print_info ( self ) :
if ( self . info_reply ) :
O0o0oO00oO0OO = "Info-Reply"
I1Ii1i111I = ( ", ms-port: {}, etr-port: {}, global-rloc: {}, " + "ms-rloc: {}, private-rloc: {}, RTR-list: " ) . format ( self . ms_port , self . etr_port ,
# I1IiiI % IiII / II111iiii / II111iiii
# OoO0O00 + OoOoOO00 / o0oOOo0O0Ooo . I11i / O0 - I11i
red ( self . global_etr_rloc . print_address_no_iid ( ) , False ) ,
red ( self . global_ms_rloc . print_address_no_iid ( ) , False ) ,
red ( self . private_etr_rloc . print_address_no_iid ( ) , False ) )
if ( len ( self . rtr_list ) == 0 ) : I1Ii1i111I += "empty, "
for i11iiI in self . rtr_list :
I1Ii1i111I += red ( i11iiI . print_address_no_iid ( ) , False ) + ", "
if 32 - 32: oO0o
I1Ii1i111I = I1Ii1i111I [ 0 : - 2 ]
else :
O0o0oO00oO0OO = "Info-Request"
OO00o00O00o0O = "<none>" if self . hostname == None else self . hostname
I1Ii1i111I = ", hostname: {}" . format ( blue ( OO00o00O00o0O , False ) )
if 5 - 5: ooOoO0o - oO0o - I1Ii111 / I11i
lprint ( "{} -> nonce: 0x{}{}" . format ( bold ( O0o0oO00oO0OO , False ) ,
lisp_hex_string ( self . nonce ) , I1Ii1i111I ) )
if 96 - 96: ooOoO0o / I1IiiI / OoooooooOO * Ii1I + I1Ii111 . Ii1I
if 82 - 82: iII111i % OoOoOO00
def encode ( self ) :
Iii1 = ( LISP_NAT_INFO << 28 )
if ( self . info_reply ) : Iii1 |= ( 1 << 27 )
if 71 - 71: i11iIiiIii / OoO0O00 . i11iIiiIii - i1IIi
if 26 - 26: o0oOOo0O0Ooo % i11iIiiIii % OoOoOO00 % OoO0O00 * iII111i % I1IiiI
if 91 - 91: i1IIi * ooOoO0o
if 33 - 33: I11i / OoooooooOO - Ii1I / OoO0O00 - OoO0O00
if 60 - 60: OOooOOo . ooOoO0o % i1IIi % Ii1I % ooOoO0o + OoO0O00
if 26 - 26: O0 % o0oOOo0O0Ooo + iII111i * I1ii11iIi11i * I1Ii111
if 4 - 4: OOooOOo * OoooooooOO * i1IIi % I1ii11iIi11i % Oo0Ooo
Oo00oo = struct . pack ( "I" , socket . htonl ( Iii1 ) )
Oo00oo += struct . pack ( "Q" , self . nonce )
Oo00oo += struct . pack ( "III" , 0 , 0 , 0 )
if 1 - 1: OoO0O00 / iIii1I11I1II1 % I1ii11iIi11i - o0oOOo0O0Ooo
if 62 - 62: I1Ii111 % II111iiii
if 91 - 91: I11i % Ii1I - IiII + iIii1I11I1II1 * iIii1I11I1II1
if 91 - 91: i11iIiiIii + Ii1I
if ( self . info_reply == False ) :
if ( self . hostname == None ) :
Oo00oo += struct . pack ( "H" , 0 )
else :
Oo00oo += struct . pack ( "H" , socket . htons ( LISP_AFI_NAME ) )
Oo00oo += ( self . hostname + "\0" ) . encode ( )
if 85 - 85: I11i % IiII
return ( Oo00oo )
if 68 - 68: Oo0Ooo . I1Ii111 - o0oOOo0O0Ooo * iIii1I11I1II1 - II111iiii % i1IIi
if 58 - 58: I11i / i11iIiiIii * i11iIiiIii
if 24 - 24: ooOoO0o - I1Ii111 * II111iiii - II111iiii
if 47 - 47: IiII - iIii1I11I1II1 / OoOoOO00 * iII111i - iIii1I11I1II1 % oO0o
if 93 - 93: Ii1I / iII111i
Oooo000 = socket . htons ( LISP_AFI_LCAF )
IIiiIIi1II11 = LISP_LCAF_NAT_TYPE
ii111iIii1 = socket . htons ( 16 )
o00oO0 = socket . htons ( self . ms_port )
II1Ii = socket . htons ( self . etr_port )
Oo00oo += struct . pack ( "HHBBHHHH" , Oooo000 , 0 , IIiiIIi1II11 , 0 , ii111iIii1 ,
o00oO0 , II1Ii , socket . htons ( self . global_etr_rloc . afi ) )
Oo00oo += self . global_etr_rloc . pack_address ( )
Oo00oo += struct . pack ( "HH" , 0 , socket . htons ( self . private_etr_rloc . afi ) )
Oo00oo += self . private_etr_rloc . pack_address ( )
if ( len ( self . rtr_list ) == 0 ) : Oo00oo += struct . pack ( "H" , 0 )
if 82 - 82: OoO0O00 - I1IiiI - i1IIi - I1IiiI % OOooOOo
if 80 - 80: OoOoOO00
if 31 - 31: OOooOOo * ooOoO0o + ooOoO0o / O0 - OOooOOo
if 47 - 47: I1Ii111 . OoooooooOO - oO0o - o0oOOo0O0Ooo . I1ii11iIi11i / iIii1I11I1II1
for i11iiI in self . rtr_list :
Oo00oo += struct . pack ( "H" , socket . htons ( i11iiI . afi ) )
Oo00oo += i11iiI . pack_address ( )
if 20 - 20: i11iIiiIii / OoO0O00 * I1IiiI - I1IiiI * Ii1I
return ( Oo00oo )
if 73 - 73: ooOoO0o % I1Ii111
if 69 - 69: OoOoOO00 / OOooOOo / I1IiiI
def decode ( self , packet ) :
i1iiI11i1 = packet
II111I11iI = "I"
oO000 = struct . calcsize ( II111I11iI )
if ( len ( packet ) < oO000 ) : return ( None )
if 12 - 12: I1ii11iIi11i . iIii1I11I1II1 . II111iiii . OoOoOO00
Iii1 = struct . unpack ( II111I11iI , packet [ : oO000 ] )
Iii1 = Iii1 [ 0 ]
packet = packet [ oO000 : : ]
if 30 - 30: i11iIiiIii / Oo0Ooo / OOooOOo + i11iIiiIii * ooOoO0o
II111I11iI = "Q"
oO000 = struct . calcsize ( II111I11iI )
if ( len ( packet ) < oO000 ) : return ( None )
if 4 - 4: O0 + I1IiiI + I1Ii111
oOooo0oOOOO = struct . unpack ( II111I11iI , packet [ : oO000 ] )
if 80 - 80: Ii1I % OoooooooOO . i1IIi - OOooOOo
Iii1 = socket . ntohl ( Iii1 )
self . nonce = oOooo0oOOOO [ 0 ]
self . info_reply = Iii1 & 0x08000000
self . hostname = None
packet = packet [ oO000 : : ]
if 10 - 10: I11i + iII111i % OoO0O00 / OoO0O00
if 91 - 91: ooOoO0o . oO0o
if 66 - 66: II111iiii + OOooOOo + i11iIiiIii / II111iiii
if 37 - 37: I1IiiI + OoO0O00 . OoO0O00 % OoOoOO00 + o0oOOo0O0Ooo
if 81 - 81: i1IIi % iIii1I11I1II1
II111I11iI = "HH"
oO000 = struct . calcsize ( II111I11iI )
if ( len ( packet ) < oO000 ) : return ( None )
if 41 - 41: oO0o - iII111i / o0oOOo0O0Ooo . iII111i % Oo0Ooo + OOooOOo
if 82 - 82: ooOoO0o
if 89 - 89: OOooOOo / I1ii11iIi11i . I1IiiI + i11iIiiIii
if 11 - 11: oO0o . i11iIiiIii * ooOoO0o % OoooooooOO % O0
if 59 - 59: i11iIiiIii / OoO0O00
i11iII1 , o0o0OO0OO = struct . unpack ( II111I11iI , packet [ : oO000 ] )
if ( o0o0OO0OO != 0 ) : return ( None )
if 48 - 48: iIii1I11I1II1
packet = packet [ oO000 : : ]
II111I11iI = "IBBH"
oO000 = struct . calcsize ( II111I11iI )
if ( len ( packet ) < oO000 ) : return ( None )
if 19 - 19: oO0o
IiIi1iIIiII1i , oo00O0OO0oo0O , OOO00o00Oo0 , i1i = struct . unpack ( II111I11iI ,
packet [ : oO000 ] )
if 6 - 6: o0oOOo0O0Ooo * OoO0O00 - OoOoOO00 / O0
if ( i1i != 0 ) : return ( None )
packet = packet [ oO000 : : ]
if 29 - 29: o0oOOo0O0Ooo + Ii1I * I1Ii111 * O0
if 20 - 20: OOooOOo
if 84 - 84: O0 . OoO0O00 * O0 - OoO0O00 / OoO0O00
if 51 - 51: II111iiii % OoO0O00
if ( self . info_reply == False ) :
II111I11iI = "H"
oO000 = struct . calcsize ( II111I11iI )
if ( len ( packet ) >= oO000 ) :
Oooo000 = struct . unpack ( II111I11iI , packet [ : oO000 ] ) [ 0 ]
if ( socket . ntohs ( Oooo000 ) == LISP_AFI_NAME ) :
packet = packet [ oO000 : : ]
packet , self . hostname = lisp_decode_dist_name ( packet )
if 85 - 85: i11iIiiIii % iII111i + II111iiii
if 16 - 16: ooOoO0o * OoOoOO00 / OoOoOO00 + II111iiii
return ( i1iiI11i1 )
if 50 - 50: OoO0O00 / OOooOOo % I1IiiI / Ii1I + OoO0O00 . iIii1I11I1II1
if 62 - 62: I1Ii111 + OoooooooOO - Ii1I - iIii1I11I1II1
if 80 - 80: OoO0O00
if 72 - 72: II111iiii % i11iIiiIii + OoOoOO00 / I1Ii111 - i11iIiiIii
if 39 - 39: i11iIiiIii - OOooOOo / OoO0O00 * OoOoOO00 / IiII
II111I11iI = "HHBBHHH"
oO000 = struct . calcsize ( II111I11iI )
if ( len ( packet ) < oO000 ) : return ( None )
if 84 - 84: I1ii11iIi11i . iIii1I11I1II1 / Ii1I / II111iiii
Oooo000 , iIiiiI1 , IIiiIIi1II11 , oo00O0OO0oo0O , ii111iIii1 , o00oO0 , II1Ii = struct . unpack ( II111I11iI , packet [ : oO000 ] )
if 56 - 56: OOooOOo * iII111i / Ii1I
if 9 - 9: I1ii11iIi11i * i11iIiiIii / I1Ii111 + iIii1I11I1II1
if ( socket . ntohs ( Oooo000 ) != LISP_AFI_LCAF ) : return ( None )
if 1 - 1: OoO0O00 % iIii1I11I1II1 * OoOoOO00 / oO0o
self . ms_port = socket . ntohs ( o00oO0 )
self . etr_port = socket . ntohs ( II1Ii )
packet = packet [ oO000 : : ]
if 73 - 73: iII111i
if 6 - 6: o0oOOo0O0Ooo + Oo0Ooo
if 45 - 45: oO0o % O0 / O0
if 98 - 98: I1Ii111
II111I11iI = "H"
oO000 = struct . calcsize ( II111I11iI )
if ( len ( packet ) < oO000 ) : return ( None )
if 58 - 58: OOooOOo
if 6 - 6: I1ii11iIi11i
if 37 - 37: i11iIiiIii . II111iiii + OOooOOo + i1IIi * OOooOOo
if 18 - 18: ooOoO0o
Oooo000 = struct . unpack ( II111I11iI , packet [ : oO000 ] ) [ 0 ]
packet = packet [ oO000 : : ]
if ( Oooo000 != 0 ) :
self . global_etr_rloc . afi = socket . ntohs ( Oooo000 )
packet = self . global_etr_rloc . unpack_address ( packet )
if ( packet == None ) : return ( None )
self . global_etr_rloc . mask_len = self . global_etr_rloc . host_mask_len ( )
if 18 - 18: I1Ii111 + OoOoOO00 % OOooOOo - IiII - i1IIi + I1ii11iIi11i
if 33 - 33: I11i * Ii1I / Oo0Ooo + oO0o % OOooOOo % OoooooooOO
if 29 - 29: Ii1I . II111iiii / I1Ii111
if 79 - 79: IiII . OoOoOO00 / oO0o % OoO0O00 / Ii1I + I11i
if 78 - 78: o0oOOo0O0Ooo + I1Ii111 % i11iIiiIii % I1IiiI - Ii1I
if 81 - 81: i11iIiiIii - II111iiii + I11i
if ( len ( packet ) < oO000 ) : return ( i1iiI11i1 )
if 52 - 52: II111iiii
Oooo000 = struct . unpack ( II111I11iI , packet [ : oO000 ] ) [ 0 ]
packet = packet [ oO000 : : ]
if ( Oooo000 != 0 ) :
self . global_ms_rloc . afi = socket . ntohs ( Oooo000 )
packet = self . global_ms_rloc . unpack_address ( packet )
if ( packet == None ) : return ( i1iiI11i1 )
self . global_ms_rloc . mask_len = self . global_ms_rloc . host_mask_len ( )
if 62 - 62: iII111i / OoO0O00 + i11iIiiIii / Oo0Ooo
if 26 - 26: I1ii11iIi11i - OoO0O00
if 19 - 19: iIii1I11I1II1 / I1ii11iIi11i + O0
if 12 - 12: I11i . OOooOOo + o0oOOo0O0Ooo . OoO0O00 + o0oOOo0O0Ooo
if 56 - 56: i1IIi / i1IIi . OoO0O00 % i1IIi - OoOoOO00 % OOooOOo
if ( len ( packet ) < oO000 ) : return ( i1iiI11i1 )
if 66 - 66: i11iIiiIii * IiII % IiII . I1IiiI / ooOoO0o
Oooo000 = struct . unpack ( II111I11iI , packet [ : oO000 ] ) [ 0 ]
packet = packet [ oO000 : : ]
if ( Oooo000 != 0 ) :
self . private_etr_rloc . afi = socket . ntohs ( Oooo000 )
packet = self . private_etr_rloc . unpack_address ( packet )
if ( packet == None ) : return ( i1iiI11i1 )
self . private_etr_rloc . mask_len = self . private_etr_rloc . host_mask_len ( )
if 50 - 50: IiII . iII111i / o0oOOo0O0Ooo % OoOoOO00 * IiII % I11i
if 15 - 15: Ii1I
if 29 - 29: I11i / I1IiiI / OoooooooOO . OoOoOO00 / I11i . I1Ii111
if 69 - 69: O0 * OoOoOO00 + o0oOOo0O0Ooo + I1IiiI % iII111i . OoooooooOO
if 45 - 45: I1Ii111 + oO0o - o0oOOo0O0Ooo - OoOoOO00 + I1IiiI / II111iiii
if 46 - 46: II111iiii . iIii1I11I1II1
while ( len ( packet ) >= oO000 ) :
Oooo000 = struct . unpack ( II111I11iI , packet [ : oO000 ] ) [ 0 ]
packet = packet [ oO000 : : ]
if ( Oooo000 == 0 ) : continue
i11iiI = lisp_address ( socket . ntohs ( Oooo000 ) , "" , 0 , 0 )
packet = i11iiI . unpack_address ( packet )
if ( packet == None ) : return ( i1iiI11i1 )
i11iiI . mask_len = i11iiI . host_mask_len ( )
self . rtr_list . append ( i11iiI )
if 62 - 62: I1ii11iIi11i % i1IIi % I1Ii111 * ooOoO0o % OOooOOo + I1IiiI
return ( i1iiI11i1 )
if 100 - 100: II111iiii - o0oOOo0O0Ooo * OoooooooOO . ooOoO0o / II111iiii / oO0o
if 43 - 43: iIii1I11I1II1 + ooOoO0o * iII111i + iIii1I11I1II1 . I1Ii111
if 87 - 87: I1Ii111
class lisp_nat_info ( object ) :
def __init__ ( self , addr_str , hostname , port ) :
self . address = addr_str
self . hostname = hostname
self . port = port
self . uptime = lisp_get_timestamp ( )
if 47 - 47: II111iiii + I1IiiI . Oo0Ooo / iIii1I11I1II1
if 14 - 14: i1IIi / OoO0O00 / iII111i % I1Ii111
def timed_out ( self ) :
i1i111Iiiiiii = time . time ( ) - self . uptime
return ( i1i111Iiiiiii >= ( LISP_INFO_INTERVAL * 2 ) )
if 72 - 72: OoO0O00 . II111iiii - IiII + IiII + iIii1I11I1II1 % oO0o
if 21 - 21: iII111i + OoOoOO00 - i11iIiiIii % O0 + OOooOOo
if 30 - 30: o0oOOo0O0Ooo - Oo0Ooo + iII111i / O0
class lisp_info_source ( object ) :
def __init__ ( self , hostname , addr_str , port ) :
self . address = lisp_address ( LISP_AFI_IPV4 , addr_str , 32 , 0 )
self . port = port
self . uptime = lisp_get_timestamp ( )
self . nonce = None
self . hostname = hostname
self . no_timeout = False
if 94 - 94: IiII
if 69 - 69: I1Ii111 . I1Ii111
def cache_address_for_info_source ( self ) :
Ooo00o000o = self . address . print_address_no_iid ( ) + self . hostname
lisp_info_sources_by_address [ Ooo00o000o ] = self
if 53 - 53: i11iIiiIii + iII111i * Oo0Ooo - I1Ii111
if 61 - 61: o0oOOo0O0Ooo / OOooOOo . II111iiii - I1IiiI * i11iIiiIii
def cache_nonce_for_info_source ( self , nonce ) :
self . nonce = nonce
lisp_info_sources_by_nonce [ nonce ] = self
if 8 - 8: iII111i % o0oOOo0O0Ooo
if 87 - 87: Ii1I % I11i / I1Ii111
if 21 - 21: OoO0O00 + Ii1I / I1Ii111
if 75 - 75: I1Ii111 . Ii1I % iIii1I11I1II1 / OoOoOO00
if 38 - 38: i1IIi
if 1 - 1: I1ii11iIi11i + OoO0O00 % I11i . OOooOOo + i1IIi / oO0o
if 35 - 35: ooOoO0o % OoOoOO00 % OoO0O00 + OOooOOo / IiII * OoOoOO00
if 65 - 65: I1IiiI . Oo0Ooo + i1IIi - Ii1I * i1IIi
if 64 - 64: I1IiiI / OoO0O00 * I1IiiI * II111iiii . Ii1I
if 98 - 98: I1Ii111 + o0oOOo0O0Ooo
if 73 - 73: I1ii11iIi11i / I1Ii111 + i11iIiiIii + OoO0O00 . ooOoO0o
def lisp_concat_auth_data ( alg_id , auth1 , auth2 , auth3 , auth4 ) :
if 54 - 54: I1ii11iIi11i + IiII - oO0o + Oo0Ooo / IiII % Oo0Ooo
if ( lisp_is_x86 ( ) ) :
if ( auth1 != "" ) : auth1 = byte_swap_64 ( auth1 )
if ( auth2 != "" ) : auth2 = byte_swap_64 ( auth2 )
if ( auth3 != "" ) :
if ( alg_id == LISP_SHA_1_96_ALG_ID ) : auth3 = socket . ntohl ( auth3 )
else : auth3 = byte_swap_64 ( auth3 )
if 2 - 2: OOooOOo / I11i * I11i + I11i / O0 - OOooOOo
if ( auth4 != "" ) : auth4 = byte_swap_64 ( auth4 )
if 29 - 29: OoOoOO00 + i11iIiiIii % OoO0O00 - OoooooooOO
if 68 - 68: iII111i / OOooOOo
if ( alg_id == LISP_SHA_1_96_ALG_ID ) :
auth1 = lisp_hex_string ( auth1 )
auth1 = auth1 . zfill ( 16 )
auth2 = lisp_hex_string ( auth2 )
auth2 = auth2 . zfill ( 16 )
auth3 = lisp_hex_string ( auth3 )
auth3 = auth3 . zfill ( 8 )
Ooooo0OO = auth1 + auth2 + auth3
if 28 - 28: II111iiii
if ( alg_id == LISP_SHA_256_128_ALG_ID ) :
auth1 = lisp_hex_string ( auth1 )
auth1 = auth1 . zfill ( 16 )
auth2 = lisp_hex_string ( auth2 )
auth2 = auth2 . zfill ( 16 )
auth3 = lisp_hex_string ( auth3 )
auth3 = auth3 . zfill ( 16 )
auth4 = lisp_hex_string ( auth4 )
auth4 = auth4 . zfill ( 16 )
Ooooo0OO = auth1 + auth2 + auth3 + auth4
if 49 - 49: I1ii11iIi11i
return ( Ooooo0OO )
if 33 - 33: iIii1I11I1II1
if 72 - 72: I1ii11iIi11i * i11iIiiIii
if 12 - 12: O0 - iIii1I11I1II1 % Oo0Ooo / O0 - IiII
if 55 - 55: OOooOOo . Oo0Ooo * OoOoOO00 / OoooooooOO * i11iIiiIii + oO0o
if 45 - 45: Ii1I
if 8 - 8: oO0o + OOooOOo
if 37 - 37: IiII - OoOoOO00 + oO0o - Oo0Ooo + IiII
if 33 - 33: Oo0Ooo % oO0o - I1IiiI + Oo0Ooo
if 90 - 90: I1ii11iIi11i * I1Ii111 - iIii1I11I1II1 % IiII * I1Ii111 . I1Ii111
if 90 - 90: o0oOOo0O0Ooo - O0 % O0 - oO0o . OoooooooOO
def lisp_open_listen_socket ( local_addr , port ) :
if ( port . isdigit ( ) ) :
if ( local_addr . find ( "." ) != - 1 ) :
I1iii1I = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM )
if 48 - 48: II111iiii - o0oOOo0O0Ooo / Ii1I
if ( local_addr . find ( ":" ) != - 1 ) :
if ( lisp_is_raspbian ( ) ) : return ( None )
I1iii1I = socket . socket ( socket . AF_INET6 , socket . SOCK_DGRAM )
if 15 - 15: I11i / i1IIi % O0 % ooOoO0o / II111iiii * I11i
I1iii1I . bind ( ( local_addr , int ( port ) ) )
else :
o0o = port
if ( os . path . exists ( o0o ) ) :
os . system ( "rm " + o0o )
time . sleep ( 1 )
if 18 - 18: i1IIi % oO0o
I1iii1I = socket . socket ( socket . AF_UNIX , socket . SOCK_DGRAM )
I1iii1I . bind ( o0o )
if 80 - 80: II111iiii
return ( I1iii1I )
if 18 - 18: I1Ii111 % iII111i + OoOoOO00 . I1ii11iIi11i / I11i
if 29 - 29: II111iiii - I1Ii111 . OoooooooOO / i11iIiiIii / I1ii11iIi11i
if 60 - 60: i1IIi % ooOoO0o / II111iiii * Oo0Ooo - i1IIi . Ii1I
if 63 - 63: OoO0O00 * OoooooooOO + iII111i / iIii1I11I1II1 . i11iIiiIii
if 17 - 17: OOooOOo
if 21 - 21: i1IIi
if 10 - 10: i11iIiiIii / ooOoO0o - o0oOOo0O0Ooo . o0oOOo0O0Ooo
def lisp_open_send_socket ( internal_name , afi ) :
if ( internal_name == "" ) :
if ( afi == LISP_AFI_IPV4 ) :
I1iii1I = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM )
if 8 - 8: iII111i + iIii1I11I1II1 . I1ii11iIi11i
if ( afi == LISP_AFI_IPV6 ) :
if ( lisp_is_raspbian ( ) ) : return ( None )
I1iii1I = socket . socket ( socket . AF_INET6 , socket . SOCK_DGRAM )
if 68 - 68: OoooooooOO . OoooooooOO % I1ii11iIi11i + i1IIi % OoooooooOO + Ii1I
else :
if ( os . path . exists ( internal_name ) ) : os . system ( "rm " + internal_name )
I1iii1I = socket . socket ( socket . AF_UNIX , socket . SOCK_DGRAM )
I1iii1I . bind ( internal_name )
if 89 - 89: ooOoO0o + I11i * O0 % OoOoOO00
return ( I1iii1I )
if 2 - 2: I1Ii111 % iIii1I11I1II1 . Ii1I - II111iiii
if 33 - 33: I11i . i11iIiiIii % i1IIi * II111iiii * i11iIiiIii + OoOoOO00
if 26 - 26: I1IiiI % OoOoOO00 % I11i + Oo0Ooo
if 86 - 86: iII111i / i1IIi % Oo0Ooo
if 84 - 84: o0oOOo0O0Ooo * OOooOOo . I11i * Ii1I
if 32 - 32: ooOoO0o % ooOoO0o * I1ii11iIi11i % Ii1I + Oo0Ooo . OoOoOO00
if 2 - 2: I1Ii111 / ooOoO0o * oO0o + IiII
def lisp_close_socket ( sock , internal_name ) :
sock . close ( )
if ( os . path . exists ( internal_name ) ) : os . system ( "rm " + internal_name )
return
if 14 - 14: OoOoOO00 / iIii1I11I1II1 . o0oOOo0O0Ooo % i11iIiiIii . OoOoOO00
if 92 - 92: OoO0O00 . i1IIi
if 22 - 22: Ii1I . I1IiiI
if 54 - 54: OOooOOo / I1ii11iIi11i % oO0o
if 66 - 66: I11i + iII111i
if 50 - 50: IiII
if 33 - 33: OOooOOo % I1IiiI - I1IiiI / IiII
if 22 - 22: ooOoO0o * ooOoO0o % o0oOOo0O0Ooo * Ii1I . OoO0O00
def lisp_is_running ( node ) :
return ( True if ( os . path . exists ( node ) ) else False )
if 55 - 55: OoOoOO00 - I1ii11iIi11i + iIii1I11I1II1 - i11iIiiIii / i1IIi / II111iiii
if 37 - 37: Ii1I + o0oOOo0O0Ooo
if 74 - 74: Oo0Ooo / O0 + i1IIi . I1IiiI + OoO0O00 / Oo0Ooo
if 13 - 13: o0oOOo0O0Ooo / Ii1I . II111iiii
if 8 - 8: I11i - I11i % IiII
if 8 - 8: I1IiiI . IiII * O0 * o0oOOo0O0Ooo
if 17 - 17: I1IiiI . oO0o + Oo0Ooo + I11i / o0oOOo0O0Ooo
if 25 - 25: iII111i / iII111i % OoOoOO00 / ooOoO0o
if 81 - 81: OOooOOo * oO0o
if 32 - 32: Oo0Ooo * OoO0O00 + ooOoO0o . O0 * oO0o * iIii1I11I1II1
if 50 - 50: i1IIi
def lisp_packet_ipc ( packet , source , sport ) :
ooo = "packet@{}@{}@{}@" . format ( str ( len ( packet ) ) , source , str ( sport ) )
return ( ooo . encode ( ) + packet )
if 53 - 53: II111iiii + O0 . ooOoO0o * IiII + i1IIi
if 80 - 80: Ii1I + O0
if 59 - 59: i11iIiiIii - OoooooooOO % I11i . OoO0O00 - Oo0Ooo * o0oOOo0O0Ooo
if 7 - 7: II111iiii % Ii1I * i11iIiiIii
if 28 - 28: II111iiii / ooOoO0o * i11iIiiIii % OOooOOo
if 18 - 18: I11i - IiII - iIii1I11I1II1
if 82 - 82: II111iiii + OoO0O00 % iIii1I11I1II1 / O0
if 75 - 75: OOooOOo * OoO0O00 + OoooooooOO + i11iIiiIii . OoO0O00
if 94 - 94: I11i * ooOoO0o . I1IiiI / Ii1I - I1IiiI % OoooooooOO
if 32 - 32: OoO0O00
def lisp_control_packet_ipc ( packet , source , dest , dport ) :
ooo = "control-packet@{}@{}@" . format ( dest , str ( dport ) )
return ( ooo . encode ( ) + packet )
if 22 - 22: II111iiii . I11i
if 61 - 61: OOooOOo % O0 . I1ii11iIi11i . iIii1I11I1II1 * I11i
if 29 - 29: ooOoO0o + i1IIi % IiII * Ii1I
if 94 - 94: OOooOOo / IiII
if 18 - 18: IiII - I11i / Ii1I % IiII * i1IIi
if 22 - 22: OoOoOO00 - Oo0Ooo
if 41 - 41: iIii1I11I1II1 * I1Ii111 / OoO0O00
if 33 - 33: I11i + O0
if 9 - 9: I11i . iII111i * ooOoO0o * ooOoO0o
def lisp_data_packet_ipc ( packet , source ) :
ooo = "data-packet@{}@{}@@" . format ( str ( len ( packet ) ) , source )
return ( ooo . encode ( ) + packet )
if 68 - 68: O0 - i11iIiiIii % iIii1I11I1II1 % ooOoO0o
if 12 - 12: II111iiii + I11i
if 9 - 9: I1ii11iIi11i
if 51 - 51: I1ii11iIi11i
if 37 - 37: I1IiiI % I1Ii111
if 22 - 22: o0oOOo0O0Ooo % OOooOOo - I11i + ooOoO0o / OOooOOo
if 98 - 98: I11i * O0 + IiII - oO0o
if 35 - 35: OoooooooOO * Ii1I
if 73 - 73: ooOoO0o . OoO0O00 % I1ii11iIi11i - oO0o
if 67 - 67: o0oOOo0O0Ooo . I11i + i1IIi
if 100 - 100: Oo0Ooo - I1IiiI . OOooOOo % iIii1I11I1II1 . I11i
def lisp_command_ipc ( ipc , source ) :
Oo00oo = "command@{}@{}@@" . format ( len ( ipc ) , source ) + ipc
return ( Oo00oo . encode ( ) )
if 83 - 83: OoOoOO00 * iII111i
if 75 - 75: i11iIiiIii . o0oOOo0O0Ooo / oO0o . OoO0O00 % Ii1I % Ii1I
if 94 - 94: iII111i . Ii1I
if 71 - 71: o0oOOo0O0Ooo * II111iiii / OOooOOo . OoO0O00
if 73 - 73: I1Ii111 * OoO0O00 / OoOoOO00 . II111iiii
if 87 - 87: OoO0O00 + Oo0Ooo + O0 % OoooooooOO - iIii1I11I1II1
if 100 - 100: Oo0Ooo + IiII
if 81 - 81: iIii1I11I1II1 + iIii1I11I1II1
if 19 - 19: ooOoO0o + i1IIi / Oo0Ooo * II111iiii * I1Ii111 / ooOoO0o
if 23 - 23: I1Ii111
if 76 - 76: Ii1I + Ii1I / i1IIi % o0oOOo0O0Ooo . iIii1I11I1II1 . OoOoOO00
def lisp_api_ipc ( source , data ) :
Oo00oo = "api@" + str ( len ( data ) ) + "@" + source + "@@" + data
return ( Oo00oo . encode ( ) )
if 75 - 75: I11i . Ii1I / I1ii11iIi11i
if 99 - 99: Ii1I
if 85 - 85: I1Ii111 + I1Ii111 + OoOoOO00 / ooOoO0o / o0oOOo0O0Ooo . Oo0Ooo
if 41 - 41: i1IIi % Ii1I . i1IIi * OoooooooOO % Ii1I
if 21 - 21: iII111i
if 72 - 72: I11i % o0oOOo0O0Ooo . iIii1I11I1II1 - I1Ii111 / i11iIiiIii
if 75 - 75: OoooooooOO
if 24 - 24: oO0o % iII111i - II111iiii / Ii1I + O0
if 37 - 37: I1Ii111 - i1IIi / iIii1I11I1II1
if 53 - 53: Ii1I - iIii1I11I1II1 % I1ii11iIi11i * i11iIiiIii + ooOoO0o
if 63 - 63: Oo0Ooo * I1IiiI
if 84 - 84: Oo0Ooo
def lisp_ipc ( packet , send_socket , node ) :
if 67 - 67: oO0o / II111iiii . I11i / oO0o
if 46 - 46: oO0o * Oo0Ooo - I11i / iIii1I11I1II1
if 100 - 100: i11iIiiIii % oO0o
if 62 - 62: OOooOOo * i1IIi - OOooOOo / i11iIiiIii
if ( lisp_is_running ( node ) == False ) :
lprint ( "Suppress sending IPC to {}" . format ( node ) )
return
if 17 - 17: I1ii11iIi11i + ooOoO0o % Ii1I % OOooOOo
if 73 - 73: i11iIiiIii
III1II1II1 = 1500 if ( packet . find ( b"control-packet" ) == - 1 ) else 9000
if 83 - 83: i1IIi - Oo0Ooo - IiII - i11iIiiIii
oo00 = 0
i1 = len ( packet )
oOoii1iI1iII1i = 0
I1III = .001
while ( i1 > 0 ) :
iIiii1Ii1i1i1I = min ( i1 , III1II1II1 )
Ii1iIii1II1 = packet [ oo00 : iIiii1Ii1i1i1I + oo00 ]
if 59 - 59: i11iIiiIii
try :
if ( type ( Ii1iIii1II1 ) == str ) : Ii1iIii1II1 = Ii1iIii1II1 . encode ( )
send_socket . sendto ( Ii1iIii1II1 , node )
lprint ( "Send IPC {}-out-of-{} byte to {} succeeded" . format ( len ( Ii1iIii1II1 ) , len ( packet ) , node ) )
if 55 - 55: I11i % i1IIi % IiII
oOoii1iI1iII1i = 0
I1III = .001
if 16 - 16: OoO0O00 * Ii1I
except socket . error as oO0ooOOO :
if ( oOoii1iI1iII1i == 12 ) :
lprint ( "Giving up on {}, consider it down" . format ( node ) )
break
if 89 - 89: OoOoOO00 / Oo0Ooo + O0 * ooOoO0o
if 80 - 80: i11iIiiIii - O0 / I1Ii111 + OOooOOo % Oo0Ooo
lprint ( "Send IPC {}-out-of-{} byte to {} failed: {}" . format ( len ( Ii1iIii1II1 ) , len ( packet ) , node , oO0ooOOO ) )
if 95 - 95: II111iiii
if 76 - 76: OoO0O00 % iII111i * OoOoOO00 / ooOoO0o / i1IIi
oOoii1iI1iII1i += 1
time . sleep ( I1III )
if 45 - 45: Ii1I . I11i * I1Ii111 . i11iIiiIii
lprint ( "Retrying after {} ms ..." . format ( I1III * 1000 ) )
I1III *= 2
continue
if 34 - 34: O0 * o0oOOo0O0Ooo / IiII
if 75 - 75: I1Ii111 - i1IIi - OoO0O00
oo00 += iIiii1Ii1i1i1I
i1 -= iIiii1Ii1i1i1I
if 25 - 25: iII111i . o0oOOo0O0Ooo
return
if 62 - 62: I11i + i1IIi . I1ii11iIi11i - I1ii11iIi11i
if 68 - 68: ooOoO0o % OoooooooOO
if 94 - 94: Oo0Ooo * o0oOOo0O0Ooo
if 60 - 60: iII111i . OOooOOo
if 39 - 39: O0 - i11iIiiIii - I1IiiI / Oo0Ooo - i11iIiiIii
if 30 - 30: OoO0O00 / OoOoOO00 + I1ii11iIi11i % IiII - OoO0O00
if 19 - 19: I1IiiI
if 99 - 99: OOooOOo - OOooOOo
def lisp_format_packet ( packet ) :
packet = binascii . hexlify ( packet )
oo00 = 0
Iii11Ii = b""
i1 = len ( packet ) * 2
while ( oo00 < i1 ) :
Iii11Ii += packet [ oo00 : oo00 + 8 ] + b" "
oo00 += 8
i1 -= 4
if 98 - 98: o0oOOo0O0Ooo + O0 * oO0o - i11iIiiIii
return ( Iii11Ii . decode ( ) )
if 83 - 83: o0oOOo0O0Ooo
if 23 - 23: o0oOOo0O0Ooo . I11i
if 67 - 67: iII111i
if 52 - 52: IiII . OoooooooOO
if 34 - 34: o0oOOo0O0Ooo / IiII . OoooooooOO . Oo0Ooo / ooOoO0o + O0
if 38 - 38: I11i
if 66 - 66: II111iiii
def lisp_send ( lisp_sockets , dest , port , packet ) :
if 57 - 57: OoO0O00 / Oo0Ooo % I1IiiI * I1ii11iIi11i
O0OOoOOoo = lisp_sockets [ 0 ] if dest . is_ipv4 ( ) else lisp_sockets [ 1 ]
if 2 - 2: I1ii11iIi11i * i1IIi
if 17 - 17: I1ii11iIi11i * Ii1I % Oo0Ooo * I1Ii111 + OoO0O00 . OoooooooOO
if 60 - 60: Ii1I . II111iiii
if 36 - 36: IiII . iII111i * O0 . i1IIi * O0 * I1Ii111
if 50 - 50: OoooooooOO + o0oOOo0O0Ooo + iIii1I11I1II1 + OOooOOo
if 90 - 90: Ii1I * I11i % I1Ii111 - I1ii11iIi11i * I1Ii111 % OoO0O00
if 50 - 50: iIii1I11I1II1
if 56 - 56: oO0o
if 55 - 55: iIii1I11I1II1 % oO0o % OOooOOo / I1Ii111 * OoooooooOO / Oo0Ooo
if 88 - 88: I11i + OoO0O00 . iIii1I11I1II1 . II111iiii
if 67 - 67: OOooOOo - ooOoO0o % iII111i % IiII
if 71 - 71: OoO0O00 - ooOoO0o - I1IiiI + O0
I1IIIi = dest . print_address_no_iid ( )
if ( I1IIIi . find ( "::ffff:" ) != - 1 and I1IIIi . count ( "." ) == 3 ) :
if ( lisp_i_am_rtr ) : O0OOoOOoo = lisp_sockets [ 0 ]
if ( O0OOoOOoo == None ) :
O0OOoOOoo = lisp_sockets [ 0 ]
I1IIIi = I1IIIi . split ( "::ffff:" ) [ - 1 ]
if 15 - 15: i1IIi
if 43 - 43: II111iiii + OOooOOo . i11iIiiIii - II111iiii
if 80 - 80: o0oOOo0O0Ooo . oO0o . I1Ii111
lprint ( "{} {} bytes {} {}, packet: {}" . format ( bold ( "Send" , False ) ,
len ( packet ) , bold ( "to " + I1IIIi , False ) , port ,
lisp_format_packet ( packet ) ) )
if 26 - 26: i1IIi - I1IiiI + IiII / OoO0O00 . I1ii11iIi11i
if 82 - 82: I1Ii111 % iII111i . OoOoOO00 % OoO0O00 + I1ii11iIi11i
if 69 - 69: I1IiiI * OoOoOO00 - ooOoO0o . O0
if 15 - 15: oO0o . IiII + I1Ii111 - OoooooooOO
try :
O0OOoOOoo . sendto ( packet , ( I1IIIi , port ) )
except socket . error as oO0ooOOO :
lprint ( "socket.sendto() failed: {}" . format ( oO0ooOOO ) )
if 85 - 85: II111iiii - Oo0Ooo + oO0o . i11iIiiIii + Oo0Ooo
return
if 86 - 86: ooOoO0o . OoO0O00
if 47 - 47: IiII % I1IiiI
if 91 - 91: Ii1I
if 69 - 69: iII111i
if 96 - 96: Ii1I
if 39 - 39: OoO0O00 - I1IiiI % II111iiii - IiII * I1ii11iIi11i
if 64 - 64: OOooOOo + Oo0Ooo . OoOoOO00 . OOooOOo + i11iIiiIii
if 7 - 7: ooOoO0o * I11i / iIii1I11I1II1
def lisp_receive_segments ( lisp_socket , packet , source , total_length ) :
if 15 - 15: OoooooooOO / iII111i
if 40 - 40: o0oOOo0O0Ooo
if 75 - 75: oO0o - OoOoOO00 * ooOoO0o . O0
if 78 - 78: Oo0Ooo
if 74 - 74: O0 / I11i
iIiii1Ii1i1i1I = total_length - len ( packet )
if ( iIiii1Ii1i1i1I == 0 ) : return ( [ True , packet ] )
if 52 - 52: I1IiiI + oO0o * II111iiii
lprint ( "Received {}-out-of-{} byte segment from {}" . format ( len ( packet ) ,
total_length , source ) )
if 15 - 15: I11i
if 72 - 72: O0
if 15 - 15: II111iiii / I11i % II111iiii % Ii1I % i11iIiiIii / I1Ii111
if 93 - 93: OOooOOo / OoooooooOO % iII111i
if 47 - 47: o0oOOo0O0Ooo - I1IiiI % O0 % I1Ii111 . O0 . OoOoOO00
i1 = iIiii1Ii1i1i1I
while ( i1 > 0 ) :
try : Ii1iIii1II1 = lisp_socket . recvfrom ( 9000 )
except : return ( [ False , None ] )
if 95 - 95: o0oOOo0O0Ooo * OOooOOo - iII111i * OoooooooOO - ooOoO0o / I1IiiI
Ii1iIii1II1 = Ii1iIii1II1 [ 0 ]
if 47 - 47: OoO0O00 % I1IiiI / OoOoOO00 - I1Ii111 / I1IiiI
if 13 - 13: o0oOOo0O0Ooo % ooOoO0o
if 15 - 15: iII111i * I1IiiI . iIii1I11I1II1 % I1IiiI / O0
if 47 - 47: OoooooooOO - i11iIiiIii . I1IiiI / i1IIi
if 74 - 74: OoooooooOO * ooOoO0o
iIiiII = Ii1iIii1II1 . decode ( )
if ( iIiiII . find ( "packet@" ) == 0 ) :
iIiiII = iIiiII . split ( "@" )
lprint ( "Received new message ({}-out-of-{}) while receiving " + "fragments, old message discarded" , len ( Ii1iIii1II1 ) ,
# o0oOOo0O0Ooo % O0
iIiiII [ 1 ] if len ( iIiiII ) > 2 else "?" )
return ( [ False , Ii1iIii1II1 ] )
if 67 - 67: OoOoOO00
if 21 - 21: I11i % Oo0Ooo + Oo0Ooo / iIii1I11I1II1 % iIii1I11I1II1
i1 -= len ( Ii1iIii1II1 )
packet += Ii1iIii1II1
if 66 - 66: iII111i
lprint ( "Received {}-out-of-{} byte segment from {}" . format ( len ( Ii1iIii1II1 ) , total_length , source ) )
if 72 - 72: ooOoO0o / oO0o / iII111i . I1Ii111 . I1ii11iIi11i + IiII
if 39 - 39: I1IiiI % I1Ii111
return ( [ True , packet ] )
if 22 - 22: OoOoOO00 - OOooOOo % i1IIi + i1IIi
if 28 - 28: oO0o + OoOoOO00 * Ii1I . I11i
if 80 - 80: I1ii11iIi11i / OoOoOO00
if 74 - 74: I1ii11iIi11i + O0 + o0oOOo0O0Ooo - iII111i
if 48 - 48: ooOoO0o * iIii1I11I1II1 % Oo0Ooo
if 60 - 60: OoOoOO00 / i1IIi * iIii1I11I1II1
if 91 - 91: I1Ii111 . OoooooooOO / IiII / I1IiiI
if 56 - 56: II111iiii + iIii1I11I1II1 / I1Ii111 / I1Ii111 % Oo0Ooo / OoOoOO00
if 46 - 46: i11iIiiIii + OoO0O00 . ooOoO0o + OoO0O00 % i11iIiiIii
def lisp_bit_stuff ( payload ) :
lprint ( "Bit-stuffing, found {} segments" . format ( len ( payload ) ) )
Oo00oo = b""
for Ii1iIii1II1 in payload : Oo00oo += Ii1iIii1II1 + b"\x40"
return ( Oo00oo [ : - 1 ] )
if 97 - 97: OoooooooOO % IiII * iIii1I11I1II1
if 97 - 97: iIii1I11I1II1 - I1Ii111 - o0oOOo0O0Ooo * o0oOOo0O0Ooo * OoOoOO00
if 80 - 80: II111iiii . I1ii11iIi11i % i11iIiiIii / Ii1I / oO0o
if 100 - 100: Ii1I . OoO0O00 * ooOoO0o
if 4 - 4: i1IIi + OoooooooOO
if 26 - 26: I1IiiI / II111iiii % I1ii11iIi11i * o0oOOo0O0Ooo . IiII / OoO0O00
if 10 - 10: i11iIiiIii / i1IIi + O0 - i11iIiiIii % I11i - i1IIi
if 38 - 38: O0 - I1IiiI + Oo0Ooo + ooOoO0o
if 56 - 56: I1Ii111 + oO0o / Ii1I + I1Ii111
if 21 - 21: OOooOOo / OoOoOO00 + OoOoOO00 + OoOoOO00 - i1IIi + Ii1I
if 43 - 43: O0 % II111iiii
if 60 - 60: iII111i / ooOoO0o - Ii1I - OoooooooOO
if 79 - 79: oO0o / iII111i . iIii1I11I1II1 * i11iIiiIii * i1IIi . iIii1I11I1II1
if 31 - 31: OoooooooOO / ooOoO0o / OoooooooOO + ooOoO0o . O0 - IiII
if 53 - 53: Oo0Ooo % iII111i % iII111i
if 71 - 71: iII111i
if 99 - 99: O0 - OoOoOO00 * I1Ii111 - Oo0Ooo
if 62 - 62: i1IIi + ooOoO0o + Oo0Ooo - i11iIiiIii
if 19 - 19: I1IiiI / OOooOOo
if 6 - 6: I1ii11iIi11i + IiII * oO0o * OoOoOO00
def lisp_receive ( lisp_socket , internal ) :
while ( True ) :
if 67 - 67: I1Ii111 + OoooooooOO + OoOoOO00 % iIii1I11I1II1 . I1IiiI
if 68 - 68: ooOoO0o
if 68 - 68: I11i % IiII
if 1 - 1: I1IiiI + OOooOOo - OOooOOo * O0 + o0oOOo0O0Ooo * OOooOOo
try : I1I11I11 = lisp_socket . recvfrom ( 9000 )
except : return ( [ "" , "" , "" , "" ] )
if 92 - 92: II111iiii + OoooooooOO + OoOoOO00 / OOooOOo * Ii1I * Oo0Ooo
if 40 - 40: I1IiiI / I11i + II111iiii + II111iiii - O0 + Oo0Ooo
if 63 - 63: OoO0O00 / I1IiiI / oO0o . Ii1I / i1IIi
if 50 - 50: I11i . I11i % I1IiiI - i1IIi
if 63 - 63: OoO0O00 . iII111i
if 28 - 28: ooOoO0o . Oo0Ooo - OoooooooOO - I1Ii111 - OoooooooOO - oO0o
if ( internal == False ) :
Oo00oo = I1I11I11 [ 0 ]
I1 = lisp_convert_6to4 ( I1I11I11 [ 1 ] [ 0 ] )
I1I = I1I11I11 [ 1 ] [ 1 ]
if 25 - 25: I11i / I1Ii111 . i11iIiiIii % i1IIi
if ( I1I == LISP_DATA_PORT ) :
Iii1i111ii1i = lisp_data_plane_logging
iIIiiIiI = lisp_format_packet ( Oo00oo [ 0 : 60 ] ) + " ..."
else :
Iii1i111ii1i = True
iIIiiIiI = lisp_format_packet ( Oo00oo )
if 70 - 70: Oo0Ooo * i11iIiiIii + IiII / OoOoOO00 . I1ii11iIi11i % OoOoOO00
if 12 - 12: I11i % II111iiii % O0 % O0
if ( Iii1i111ii1i ) :
lprint ( "{} {} bytes {} {}, packet: {}" . format ( bold ( "Receive" ,
False ) , len ( Oo00oo ) , bold ( "from " + I1 , False ) , I1I ,
iIIiiIiI ) )
if 18 - 18: iII111i . IiII . I1IiiI
return ( [ "packet" , I1 , I1I , Oo00oo ] )
if 40 - 40: IiII / oO0o + OoooooooOO / iII111i / II111iiii + i1IIi
if 33 - 33: I11i + I1ii11iIi11i + i11iIiiIii * I1IiiI % oO0o % OoooooooOO
if 4 - 4: OoO0O00 . I1IiiI - O0 % iII111i . OOooOOo
if 69 - 69: OoooooooOO
if 19 - 19: O0 + iIii1I11I1II1 / OoOoOO00 / oO0o + II111iiii - OOooOOo
if 70 - 70: i1IIi * o0oOOo0O0Ooo + I1Ii111 . ooOoO0o - O0 + i11iIiiIii
oooOooOoO = False
i11 = I1I11I11 [ 0 ]
if ( type ( i11 ) == str ) : i11 = i11 . encode ( )
oOooo0Oo0o = False
if 29 - 29: IiII * OoOoOO00 - oO0o - IiII / I1ii11iIi11i
while ( oooOooOoO == False ) :
i11 = i11 . split ( b"@" )
if 82 - 82: Oo0Ooo - ooOoO0o
if ( len ( i11 ) < 4 ) :
lprint ( "Possible fragment (length {}), from old message, " + "discarding" , len ( i11 [ 0 ] ) )
if 25 - 25: I11i + oO0o / I1Ii111 % IiII * OOooOOo - I1Ii111
oOooo0Oo0o = True
break
if 100 - 100: ooOoO0o . i11iIiiIii * Oo0Ooo - i11iIiiIii
if 72 - 72: oO0o + I11i . OoooooooOO
OOOOOo00oo0O = i11 [ 0 ] . decode ( )
try :
iIIi = int ( i11 [ 1 ] )
except :
I11iii1IIi1i = bold ( "Internal packet reassembly error" , False )
lprint ( "{}: {}" . format ( I11iii1IIi1i , I1I11I11 ) )
oOooo0Oo0o = True
break
if 3 - 3: O0
I1 = i11 [ 2 ] . decode ( )
I1I = i11 [ 3 ] . decode ( )
if 95 - 95: i11iIiiIii
if 100 - 100: iIii1I11I1II1 * I1IiiI * Ii1I * i1IIi . I1Ii111 * I1IiiI
if 54 - 54: o0oOOo0O0Ooo / iII111i + IiII - o0oOOo0O0Ooo - I11i
if 28 - 28: I1IiiI - iIii1I11I1II1 - o0oOOo0O0Ooo * IiII + OoooooooOO
if 52 - 52: I1Ii111
if 86 - 86: O0 * IiII + OoOoOO00 + OoO0O00
if 53 - 53: I1IiiI % i11iIiiIii + o0oOOo0O0Ooo . I1ii11iIi11i
if 73 - 73: iII111i - o0oOOo0O0Ooo / OOooOOo + iII111i + o0oOOo0O0Ooo % II111iiii
if ( len ( i11 ) > 5 ) :
Oo00oo = lisp_bit_stuff ( i11 [ 4 : : ] )
else :
Oo00oo = i11 [ 4 ]
if 74 - 74: I11i * iIii1I11I1II1 - OoO0O00 / i1IIi / OoO0O00 / IiII
if 60 - 60: oO0o % I1Ii111 % Oo0Ooo
if 34 - 34: o0oOOo0O0Ooo * OOooOOo % Ii1I + I1IiiI
if 77 - 77: OoOoOO00 + IiII + Oo0Ooo
if 88 - 88: i1IIi
if 45 - 45: iII111i % I1ii11iIi11i / i11iIiiIii - II111iiii . Oo0Ooo / ooOoO0o
oooOooOoO , Oo00oo = lisp_receive_segments ( lisp_socket , Oo00oo ,
I1 , iIIi )
if ( Oo00oo == None ) : return ( [ "" , "" , "" , "" ] )
if 55 - 55: OoO0O00 % IiII
if 93 - 93: OoO0O00 . I1ii11iIi11i / OOooOOo % OoooooooOO + i1IIi + I1Ii111
if 94 - 94: II111iiii + i11iIiiIii % Ii1I / ooOoO0o * OoOoOO00
if 68 - 68: O0 / Oo0Ooo / iIii1I11I1II1
if 63 - 63: I1Ii111 + iII111i
if ( oooOooOoO == False ) :
i11 = Oo00oo
continue
if 6 - 6: I1ii11iIi11i + Ii1I
if 36 - 36: iII111i + iII111i * OoO0O00 * I1ii11iIi11i
if ( I1I == "" ) : I1I = "no-port"
if ( OOOOOo00oo0O == "command" and lisp_i_am_core == False ) :
OOOooo0OooOoO = Oo00oo . find ( b" {" )
o0O00O = Oo00oo if OOOooo0OooOoO == - 1 else Oo00oo [ : OOOooo0OooOoO ]
o0O00O = ": '" + o0O00O . decode ( ) + "'"
else :
o0O00O = ""
if 51 - 51: Oo0Ooo % ooOoO0o . O0 % o0oOOo0O0Ooo
if 76 - 76: i1IIi
lprint ( "{} {} bytes {} {}, {}{}" . format ( bold ( "Receive" , False ) ,
len ( Oo00oo ) , bold ( "from " + I1 , False ) , I1I , OOOOOo00oo0O ,
o0O00O if ( OOOOOo00oo0O in [ "command" , "api" ] ) else ": ... " if ( OOOOOo00oo0O == "data-packet" ) else ": " + lisp_format_packet ( Oo00oo ) ) )
if 48 - 48: OoOoOO00 / OoooooooOO / oO0o
if 58 - 58: I1ii11iIi11i / Ii1I * ooOoO0o - IiII
if 67 - 67: ooOoO0o - ooOoO0o * o0oOOo0O0Ooo
if 65 - 65: O0
if 37 - 37: I1ii11iIi11i - Oo0Ooo . i11iIiiIii / i11iIiiIii + oO0o
if ( oOooo0Oo0o ) : continue
return ( [ OOOOOo00oo0O , I1 , I1I , Oo00oo ] )
if 19 - 19: i1IIi / i1IIi - OoooooooOO - OOooOOo . i1IIi
if 57 - 57: OOooOOo / I1ii11iIi11i * oO0o
if 53 - 53: o0oOOo0O0Ooo * Ii1I
if 42 - 42: I11i + iII111i / iIii1I11I1II1
if 1 - 1: O0 - II111iiii
if 75 - 75: II111iiii / OoO0O00 % II111iiii
if 3 - 3: Ii1I - Ii1I % I1ii11iIi11i
if 44 - 44: OOooOOo - o0oOOo0O0Ooo
def lisp_parse_packet ( lisp_sockets , packet , source , udp_sport , ttl = - 1 ) :
O0oOoOO = False
II1I1i = time . time ( )
if 82 - 82: I1IiiI + iIii1I11I1II1
ooo = lisp_control_header ( )
if ( ooo . decode ( packet ) == None ) :
lprint ( "Could not decode control header" )
return ( O0oOoOO )
if 62 - 62: OoooooooOO
if 38 - 38: iII111i % iII111i * ooOoO0o / OoO0O00 + ooOoO0o
if 52 - 52: ooOoO0o . iIii1I11I1II1 / iIii1I11I1II1 % oO0o - oO0o * II111iiii
if 57 - 57: I1Ii111
if 23 - 23: I1ii11iIi11i + II111iiii
OOiIiIiiiI11i1 = source
if ( source . find ( "lisp" ) == - 1 ) :
I111 = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
I111 . string_to_afi ( source )
I111 . store_address ( source )
source = I111
if 67 - 67: Ii1I - iII111i + I1ii11iIi11i - i11iIiiIii
if 89 - 89: iIii1I11I1II1 * I11i + OOooOOo
if ( ooo . type == LISP_MAP_REQUEST ) :
lisp_process_map_request ( lisp_sockets , packet , None , 0 , source ,
udp_sport , False , ttl , II1I1i )
if 27 - 27: i1IIi - OoO0O00
elif ( ooo . type == LISP_MAP_REPLY ) :
lisp_process_map_reply ( lisp_sockets , packet , source , ttl , II1I1i )
if 23 - 23: iIii1I11I1II1 + Oo0Ooo * IiII
elif ( ooo . type == LISP_MAP_REGISTER ) :
lisp_process_map_register ( lisp_sockets , packet , source , udp_sport )
if 80 - 80: OoooooooOO . ooOoO0o
elif ( ooo . type == LISP_MAP_NOTIFY ) :
if ( OOiIiIiiiI11i1 == "lisp-etr" ) :
lisp_process_multicast_map_notify ( packet , source )
elif ( lisp_is_running ( "lisp-rtr" ) ) :
lisp_process_multicast_map_notify ( packet , source )
elif ( lisp_is_running ( "lisp-itr" ) ) :
lisp_process_unicast_map_notify ( lisp_sockets , packet , source )
if 52 - 52: O0 + O0 + I1IiiI
if 64 - 64: ooOoO0o
elif ( ooo . type == LISP_MAP_NOTIFY_ACK ) :
lisp_process_map_notify_ack ( packet , source )
if 35 - 35: I1IiiI . iIii1I11I1II1 + IiII / i11iIiiIii - II111iiii . OoooooooOO
elif ( ooo . type == LISP_MAP_REFERRAL ) :
lisp_process_map_referral ( lisp_sockets , packet , source )
if 19 - 19: IiII - OoOoOO00
elif ( ooo . type == LISP_NAT_INFO and ooo . is_info_reply ( ) ) :
iIiiiI1 , II11iiiII1Ii , O0oOoOO = lisp_process_info_reply ( source , packet , True )
if 43 - 43: IiII / OOooOOo % II111iiii . o0oOOo0O0Ooo / i11iIiiIii
elif ( ooo . type == LISP_NAT_INFO and ooo . is_info_reply ( ) == False ) :
O0O0 = source . print_address_no_iid ( )
lisp_process_info_request ( lisp_sockets , packet , O0O0 , udp_sport ,
None )
if 5 - 5: oO0o % iII111i . Oo0Ooo . O0 . OoOoOO00 / iII111i
elif ( ooo . type == LISP_ECM ) :
lisp_process_ecm ( lisp_sockets , packet , source , udp_sport )
if 78 - 78: Ii1I - I1ii11iIi11i + iIii1I11I1II1 + OoooooooOO . OoO0O00 - ooOoO0o
else :
lprint ( "Invalid LISP control packet type {}" . format ( ooo . type ) )
if 81 - 81: o0oOOo0O0Ooo * OoooooooOO
return ( O0oOoOO )
if 32 - 32: OoOoOO00 - I11i * i11iIiiIii . I1ii11iIi11i . IiII . iIii1I11I1II1
if 41 - 41: iII111i / OoOoOO00 / OoO0O00 / ooOoO0o
if 16 - 16: iIii1I11I1II1 . II111iiii
if 80 - 80: Oo0Ooo + IiII
if 18 - 18: OoO0O00 . Oo0Ooo
if 52 - 52: OoOoOO00 . iIii1I11I1II1 / OoOoOO00
if 14 - 14: i1IIi
def lisp_process_rloc_probe_request ( lisp_sockets , map_request , source , port ,
ttl , timestamp ) :
if 63 - 63: OoOoOO00 . i11iIiiIii / IiII
iIIiiIi = bold ( "RLOC-probe" , False )
if 36 - 36: OOooOOo * OoOoOO00 + i11iIiiIii + O0 + O0
if ( lisp_i_am_etr ) :
lprint ( "Received {} Map-Request, send RLOC-probe Map-Reply" . format ( iIIiiIi ) )
lisp_etr_process_map_request ( lisp_sockets , map_request , source , port ,
ttl , timestamp )
return
if 18 - 18: Oo0Ooo . I1ii11iIi11i * ooOoO0o % Ii1I + I1ii11iIi11i
if 23 - 23: oO0o / o0oOOo0O0Ooo + I11i % IiII * OoO0O00
if ( lisp_i_am_rtr ) :
lprint ( "Received {} Map-Request, send RLOC-probe Map-Reply" . format ( iIIiiIi ) )
lisp_rtr_process_map_request ( lisp_sockets , map_request , source , port ,
ttl , timestamp )
return
if 48 - 48: OoO0O00
if 30 - 30: iIii1I11I1II1
lprint ( "Ignoring received {} Map-Request, not an ETR or RTR" . format ( iIIiiIi ) )
return
if 53 - 53: II111iiii
if 40 - 40: Ii1I % oO0o
if 69 - 69: iIii1I11I1II1 - O0 . I1Ii111 % I1IiiI / o0oOOo0O0Ooo
if 78 - 78: oO0o
if 20 - 20: i1IIi + i1IIi * i1IIi
def lisp_process_smr ( map_request ) :
lprint ( "Received SMR-based Map-Request" )
return
if 32 - 32: I1IiiI + IiII + iII111i . iIii1I11I1II1 * Ii1I
if 27 - 27: oO0o + Ii1I . i11iIiiIii
if 97 - 97: iII111i . I1IiiI
if 71 - 71: OOooOOo - IiII % oO0o * I1ii11iIi11i
if 48 - 48: o0oOOo0O0Ooo * iIii1I11I1II1 + Oo0Ooo
def lisp_process_smr_invoked_request ( map_request ) :
lprint ( "Received SMR-invoked Map-Request" )
return
if 45 - 45: oO0o
if 50 - 50: Ii1I * Ii1I / O0 . Oo0Ooo + iII111i
if 9 - 9: OoooooooOO % O0 % I1ii11iIi11i
if 100 - 100: i11iIiiIii - iII111i - I11i
if 5 - 5: oO0o % IiII * iII111i
if 98 - 98: iII111i / OOooOOo + IiII
if 100 - 100: II111iiii . i11iIiiIii / oO0o - OOooOOo + OoOoOO00 % I1ii11iIi11i
def lisp_build_map_reply ( eid , group , rloc_set , nonce , action , ttl , map_request ,
keys , enc , auth , mr_ttl = - 1 ) :
if 82 - 82: ooOoO0o % OOooOOo % Ii1I
oO00oO0o = map_request . rloc_probe if ( map_request != None ) else False
OoOO = map_request . json_telemetry if ( map_request != None ) else None
if 5 - 5: OOooOOo . OOooOOo
if 53 - 53: OOooOOo * OoOoOO00 % iII111i
O0OOoOOO00ooOoo = lisp_map_reply ( )
O0OOoOOO00ooOoo . rloc_probe = oO00oO0o
O0OOoOOO00ooOoo . echo_nonce_capable = enc
O0OOoOOO00ooOoo . hop_count = 0 if ( mr_ttl == - 1 ) else mr_ttl
O0OOoOOO00ooOoo . record_count = 1
O0OOoOOO00ooOoo . nonce = nonce
Oo00oo = O0OOoOOO00ooOoo . encode ( )
O0OOoOOO00ooOoo . print_map_reply ( )
if 60 - 60: OOooOOo % iII111i * iIii1I11I1II1
I1Ii111I111I = lisp_eid_record ( )
I1Ii111I111I . rloc_count = len ( rloc_set )
if ( OoOO != None ) : I1Ii111I111I . rloc_count += 1
I1Ii111I111I . authoritative = auth
I1Ii111I111I . record_ttl = ttl
I1Ii111I111I . action = action
I1Ii111I111I . eid = eid
I1Ii111I111I . group = group
if 36 - 36: Oo0Ooo
Oo00oo += I1Ii111I111I . encode ( )
I1Ii111I111I . print_record ( " " , False )
if 43 - 43: I11i * OoOoOO00 + iIii1I11I1II1 + iIii1I11I1II1 . I11i
iIII1111IiIII = lisp_get_all_addresses ( ) + lisp_get_all_translated_rlocs ( )
if 60 - 60: iIii1I11I1II1 / O0 . OOooOOo / OoO0O00 * I1ii11iIi11i
IiIo0oo0O = None
for ii11Ii in rloc_set :
iii111i = ii11Ii . rloc . is_multicast_address ( )
oO0OoOOO = lisp_rloc_record ( )
iiIII11I = oO00oO0o and ( iii111i or OoOO == None )
O0O0 = ii11Ii . rloc . print_address_no_iid ( )
if ( O0O0 in iIII1111IiIII or iii111i ) :
oO0OoOOO . local_bit = True
oO0OoOOO . probe_bit = iiIII11I
oO0OoOOO . keys = keys
if ( ii11Ii . priority == 254 and lisp_i_am_rtr ) :
oO0OoOOO . rloc_name = "RTR"
if 28 - 28: Oo0Ooo * oO0o % ooOoO0o / OoOoOO00 % OoOoOO00
if ( IiIo0oo0O == None ) :
if ( ii11Ii . translated_rloc . is_null ( ) ) :
IiIo0oo0O = ii11Ii . rloc
else :
IiIo0oo0O = ii11Ii . translated_rloc
if 85 - 85: I1IiiI
if 8 - 8: I1IiiI
if 31 - 31: o0oOOo0O0Ooo + OOooOOo
oO0OoOOO . store_rloc_entry ( ii11Ii )
oO0OoOOO . reach_bit = True
oO0OoOOO . print_record ( " " )
Oo00oo += oO0OoOOO . encode ( )
if 7 - 7: IiII + iIii1I11I1II1
if 97 - 97: oO0o
if 52 - 52: I1ii11iIi11i / OoOoOO00 * OoO0O00 + II111iiii * OoooooooOO
if 11 - 11: Ii1I * iII111i * I1IiiI - Oo0Ooo
if 76 - 76: oO0o * II111iiii
if ( OoOO != None ) :
oO0OoOOO = lisp_rloc_record ( )
if ( IiIo0oo0O ) : oO0OoOOO . rloc . copy_address ( IiIo0oo0O )
oO0OoOOO . local_bit = True
oO0OoOOO . probe_bit = True
oO0OoOOO . reach_bit = True
if ( lisp_i_am_rtr ) :
oO0OoOOO . priority = 254
oO0OoOOO . rloc_name = "RTR"
if 81 - 81: I11i
i1III1i = lisp_encode_telemetry ( OoOO , eo = str ( time . time ( ) ) )
oO0OoOOO . json = lisp_json ( "telemetry" , i1III1i )
oO0OoOOO . print_record ( " " )
Oo00oo += oO0OoOOO . encode ( )
if 93 - 93: IiII
return ( Oo00oo )
if 80 - 80: oO0o * I1Ii111 - i1IIi - OoooooooOO
if 85 - 85: OoO0O00 / i1IIi * o0oOOo0O0Ooo / oO0o
if 11 - 11: IiII + II111iiii
if 37 - 37: O0
if 98 - 98: IiII * OoooooooOO . iII111i
if 34 - 34: OoooooooOO + I1Ii111
if 97 - 97: II111iiii + I11i + OOooOOo / i11iIiiIii - iII111i
def lisp_build_map_referral ( eid , group , ddt_entry , action , ttl , nonce ) :
iiI11111i = lisp_map_referral ( )
iiI11111i . record_count = 1
iiI11111i . nonce = nonce
Oo00oo = iiI11111i . encode ( )
iiI11111i . print_map_referral ( )
if 69 - 69: O0 . I1Ii111 % ooOoO0o - I1ii11iIi11i . Ii1I
I1Ii111I111I = lisp_eid_record ( )
if 11 - 11: II111iiii . Ii1I
iiIIIIiIIIi1 = 0
if ( ddt_entry == None ) :
I1Ii111I111I . eid = eid
I1Ii111I111I . group = group
else :
iiIIIIiIIIi1 = len ( ddt_entry . delegation_set )
I1Ii111I111I . eid = ddt_entry . eid
I1Ii111I111I . group = ddt_entry . group
ddt_entry . map_referrals_sent += 1
if 91 - 91: OOooOOo / OoO0O00
I1Ii111I111I . rloc_count = iiIIIIiIIIi1
I1Ii111I111I . authoritative = True
if 36 - 36: I1IiiI . iII111i * I1Ii111 . IiII % I1ii11iIi11i
if 44 - 44: I11i % I1ii11iIi11i - OoooooooOO % iII111i
if 60 - 60: IiII % oO0o
if 11 - 11: I1Ii111 - II111iiii
if 12 - 12: i11iIiiIii
Oo00Oo0o000 = False
if ( action == LISP_DDT_ACTION_NULL ) :
if ( iiIIIIiIIIi1 == 0 ) :
action = LISP_DDT_ACTION_NODE_REFERRAL
else :
Ii1iII = ddt_entry . delegation_set [ 0 ]
if ( Ii1iII . is_ddt_child ( ) ) :
action = LISP_DDT_ACTION_NODE_REFERRAL
if 9 - 9: OOooOOo * I1ii11iIi11i + iIii1I11I1II1 / OoO0O00 * OoooooooOO
if ( Ii1iII . is_ms_child ( ) ) :
action = LISP_DDT_ACTION_MS_REFERRAL
if 91 - 91: i11iIiiIii % IiII + oO0o . I1IiiI - I1IiiI
if 62 - 62: Oo0Ooo * II111iiii + o0oOOo0O0Ooo . OoOoOO00
if 94 - 94: Oo0Ooo / I1IiiI * iIii1I11I1II1 - OoO0O00
if 96 - 96: ooOoO0o - OoooooooOO * iIii1I11I1II1 . IiII - O0
if 7 - 7: iIii1I11I1II1 . OoO0O00
if 88 - 88: i1IIi * II111iiii / i11iIiiIii % IiII . IiII
if 93 - 93: OoOoOO00 * i1IIi . Ii1I
if ( action == LISP_DDT_ACTION_NOT_AUTH ) : Oo00Oo0o000 = True
if ( action in ( LISP_DDT_ACTION_MS_REFERRAL , LISP_DDT_ACTION_MS_ACK ) ) :
Oo00Oo0o000 = ( lisp_i_am_ms and Ii1iII . is_ms_peer ( ) == False )
if 2 - 2: i1IIi
if 84 - 84: i1IIi / Ii1I + OoOoOO00 % Ii1I . oO0o
I1Ii111I111I . action = action
I1Ii111I111I . ddt_incomplete = Oo00Oo0o000
I1Ii111I111I . record_ttl = ttl
if 74 - 74: OOooOOo - o0oOOo0O0Ooo - I1Ii111 - OoO0O00
Oo00oo += I1Ii111I111I . encode ( )
I1Ii111I111I . print_record ( " " , True )
if 40 - 40: o0oOOo0O0Ooo . IiII * OoOoOO00
if ( iiIIIIiIIIi1 == 0 ) : return ( Oo00oo )
if 14 - 14: OOooOOo
for Ii1iII in ddt_entry . delegation_set :
oO0OoOOO = lisp_rloc_record ( )
oO0OoOOO . rloc = Ii1iII . delegate_address
oO0OoOOO . priority = Ii1iII . priority
oO0OoOOO . weight = Ii1iII . weight
oO0OoOOO . mpriority = 255
oO0OoOOO . mweight = 0
oO0OoOOO . reach_bit = True
Oo00oo += oO0OoOOO . encode ( )
oO0OoOOO . print_record ( " " )
if 18 - 18: i11iIiiIii % iII111i
return ( Oo00oo )
if 70 - 70: O0 + iII111i % I11i % I1Ii111 + OoOoOO00 / ooOoO0o
if 35 - 35: IiII + OoO0O00
if 82 - 82: i1IIi - ooOoO0o / I11i + I11i % I1IiiI - OoooooooOO
if 56 - 56: I1ii11iIi11i
if 80 - 80: Oo0Ooo / OOooOOo / iII111i . o0oOOo0O0Ooo
if 43 - 43: IiII
if 74 - 74: OoooooooOO
def lisp_etr_process_map_request ( lisp_sockets , map_request , source , sport ,
ttl , etr_in_ts ) :
if 88 - 88: Ii1I * o0oOOo0O0Ooo / oO0o
if ( map_request . target_group . is_null ( ) ) :
oOooII111iIiI1 = lisp_db_for_lookups . lookup_cache ( map_request . target_eid , False )
else :
oOooII111iIiI1 = lisp_db_for_lookups . lookup_cache ( map_request . target_group , False )
if ( oOooII111iIiI1 ) : oOooII111iIiI1 = oOooII111iIiI1 . lookup_source_cache ( map_request . target_eid , False )
if 91 - 91: oO0o - Oo0Ooo % OoOoOO00 % o0oOOo0O0Ooo
i1iiii = map_request . print_prefix ( )
if 71 - 71: i1IIi % iII111i * I1Ii111
if ( oOooII111iIiI1 == None ) :
lprint ( "Database-mapping entry not found for requested EID {}" . format ( green ( i1iiii , False ) ) )
if 36 - 36: I1ii11iIi11i % II111iiii % I1Ii111 / I1ii11iIi11i
return
if 34 - 34: OoooooooOO * i11iIiiIii
if 33 - 33: II111iiii
oo00oOOO00 = oOooII111iIiI1 . print_eid_tuple ( )
if 81 - 81: Oo0Ooo + I1Ii111 - I1IiiI
lprint ( "Found database-mapping EID-prefix {} for requested EID {}" . format ( green ( oo00oOOO00 , False ) , green ( i1iiii , False ) ) )
if 4 - 4: i1IIi
if 89 - 89: II111iiii . I11i + Ii1I * ooOoO0o + I11i . IiII
if 83 - 83: o0oOOo0O0Ooo - iIii1I11I1II1
if 9 - 9: Ii1I
if 53 - 53: Ii1I % IiII + I11i % IiII
iii11I1I1ii = map_request . itr_rlocs [ 0 ]
if ( iii11I1I1ii . is_private_address ( ) and lisp_nat_traversal ) :
iii11I1I1ii = source
if 85 - 85: iIii1I11I1II1 * i11iIiiIii
if 54 - 54: O0 * Ii1I + Ii1I
oOooo0oOOOO = map_request . nonce
oo0OOo0o000 = lisp_nonce_echoing
iI1iiiiiii = map_request . keys
if 92 - 92: o0oOOo0O0Ooo
if 31 - 31: O0 . o0oOOo0O0Ooo . O0 * OoOoOO00 - OoO0O00
if 80 - 80: II111iiii % oO0o
if 48 - 48: OOooOOo . II111iiii * OOooOOo - I11i / iIii1I11I1II1 / i11iIiiIii
if 37 - 37: II111iiii % O0 + iIii1I11I1II1 - I1IiiI . I11i + I1ii11iIi11i
I11ii1I11ii = map_request . json_telemetry
if ( I11ii1I11ii != None ) :
map_request . json_telemetry = lisp_encode_telemetry ( I11ii1I11ii , ei = etr_in_ts )
if 38 - 38: iII111i % Ii1I - I1ii11iIi11i * I1Ii111 % iII111i
if 50 - 50: Oo0Ooo + o0oOOo0O0Ooo . OoOoOO00
oOooII111iIiI1 . map_replies_sent += 1
if 8 - 8: O0 - i1IIi * oO0o + II111iiii . OoOoOO00
Oo00oo = lisp_build_map_reply ( oOooII111iIiI1 . eid , oOooII111iIiI1 . group , oOooII111iIiI1 . rloc_set , oOooo0oOOOO ,
LISP_NO_ACTION , 1440 , map_request , iI1iiiiiii , oo0OOo0o000 , True , ttl )
if 4 - 4: I1IiiI - OoO0O00 % o0oOOo0O0Ooo
if 83 - 83: iII111i % iIii1I11I1II1 / OOooOOo - OoOoOO00
if 98 - 98: I11i % oO0o . I1IiiI % OoOoOO00
if 32 - 32: I1ii11iIi11i / Ii1I
if 54 - 54: I11i - i11iIiiIii
if 91 - 91: Ii1I - OoO0O00 - I1IiiI % OoO0O00 . o0oOOo0O0Ooo
if 85 - 85: ooOoO0o . ooOoO0o % Oo0Ooo . OOooOOo + OOooOOo / I1IiiI
if 69 - 69: i1IIi + II111iiii / Ii1I
if 4 - 4: I11i * OoOoOO00 % o0oOOo0O0Ooo % ooOoO0o - I1ii11iIi11i
if 88 - 88: iIii1I11I1II1 * iIii1I11I1II1 * I11i * OoOoOO00
if 14 - 14: i11iIiiIii * I1IiiI % O0 % iIii1I11I1II1
if 18 - 18: Oo0Ooo % OOooOOo + IiII
if 28 - 28: OOooOOo . OoO0O00 / o0oOOo0O0Ooo + II111iiii / iIii1I11I1II1 * II111iiii
if 83 - 83: II111iiii . OoOoOO00 - i11iIiiIii . OoOoOO00 . i1IIi % OoooooooOO
if 47 - 47: II111iiii
if 30 - 30: i1IIi . Oo0Ooo / o0oOOo0O0Ooo + IiII * OOooOOo
if ( map_request . rloc_probe and len ( lisp_sockets ) == 4 ) :
iiOO00 = ( iii11I1I1ii . is_private_address ( ) == False )
i11iiI = iii11I1I1ii . print_address_no_iid ( )
if ( iiOO00 and i11iiI in lisp_rtr_list or sport == 0 ) :
lisp_encapsulate_rloc_probe ( lisp_sockets , iii11I1I1ii , None , Oo00oo )
return
if 26 - 26: Ii1I % O0 - i1IIi % iII111i * OoO0O00
if 60 - 60: I1ii11iIi11i * iII111i / OoOoOO00 . o0oOOo0O0Ooo / iIii1I11I1II1
if 94 - 94: OoO0O00 . ooOoO0o
if 25 - 25: I1Ii111 % OOooOOo
if 82 - 82: Ii1I
if 17 - 17: iII111i . i1IIi . i1IIi
lisp_send_map_reply ( lisp_sockets , Oo00oo , iii11I1I1ii , sport )
return
if 76 - 76: OoooooooOO % IiII
if 81 - 81: iII111i . OOooOOo * i1IIi
if 14 - 14: oO0o
if 16 - 16: iII111i
if 26 - 26: iII111i . oO0o * i11iIiiIii . iIii1I11I1II1
if 74 - 74: Ii1I / iIii1I11I1II1 + OOooOOo . II111iiii
if 65 - 65: OOooOOo * I11i * Oo0Ooo
def lisp_rtr_process_map_request ( lisp_sockets , map_request , source , sport ,
ttl , etr_in_ts ) :
if 21 - 21: Ii1I . iIii1I11I1II1
if 84 - 84: OOooOOo
if 67 - 67: I1IiiI % OoO0O00 % o0oOOo0O0Ooo % IiII
if 33 - 33: ooOoO0o % I1IiiI
iii11I1I1ii = map_request . itr_rlocs [ 0 ]
if ( iii11I1I1ii . is_private_address ( ) ) : iii11I1I1ii = source
oOooo0oOOOO = map_request . nonce
if 98 - 98: oO0o . o0oOOo0O0Ooo + II111iiii
i1I1I1IIIi11 = map_request . target_eid
o0o0Oo0o0oOo = map_request . target_group
if 62 - 62: ooOoO0o - OoooooooOO / I1ii11iIi11i / iII111i - o0oOOo0O0Ooo
OO0oOO0OoO = [ ]
for o000O000Oo in [ lisp_myrlocs [ 0 ] , lisp_myrlocs [ 1 ] ] :
if ( o000O000Oo == None ) : continue
I1Ii1i111I = lisp_rloc ( )
I1Ii1i111I . rloc . copy_address ( o000O000Oo )
I1Ii1i111I . priority = 254
OO0oOO0OoO . append ( I1Ii1i111I )
if 71 - 71: II111iiii + I1ii11iIi11i * II111iiii
if 59 - 59: OoO0O00
oo0OOo0o000 = lisp_nonce_echoing
iI1iiiiiii = map_request . keys
if 81 - 81: i11iIiiIii
if 57 - 57: Oo0Ooo * iIii1I11I1II1 - OoOoOO00 % iII111i % I1ii11iIi11i + Ii1I
if 82 - 82: IiII * Oo0Ooo - iIii1I11I1II1 - i11iIiiIii
if 85 - 85: OoooooooOO
if 37 - 37: OoooooooOO + O0 + I1ii11iIi11i + IiII * iII111i
I11ii1I11ii = map_request . json_telemetry
if ( I11ii1I11ii != None ) :
map_request . json_telemetry = lisp_encode_telemetry ( I11ii1I11ii , ei = etr_in_ts )
if 15 - 15: i11iIiiIii / Oo0Ooo - OOooOOo . IiII
if 11 - 11: OOooOOo / i1IIi % Oo0Ooo
Oo00oo = lisp_build_map_reply ( i1I1I1IIIi11 , o0o0Oo0o0oOo , OO0oOO0OoO , oOooo0oOOOO , LISP_NO_ACTION ,
1440 , map_request , iI1iiiiiii , oo0OOo0o000 , True , ttl )
lisp_send_map_reply ( lisp_sockets , Oo00oo , iii11I1I1ii , sport )
return
if 65 - 65: OOooOOo % I1ii11iIi11i
if 25 - 25: o0oOOo0O0Ooo - I1Ii111 * I1ii11iIi11i + OoooooooOO
if 93 - 93: OoOoOO00 % I1ii11iIi11i * I11i
if 34 - 34: I11i - oO0o + I11i * OoooooooOO * I11i
if 73 - 73: OOooOOo * iII111i * OoO0O00
if 11 - 11: I1Ii111 * II111iiii
if 3 - 3: Oo0Ooo * OOooOOo
if 13 - 13: I1Ii111 + i11iIiiIii / OOooOOo
if 98 - 98: I1IiiI * Oo0Ooo
if 9 - 9: O0 / i11iIiiIii . iIii1I11I1II1 . IiII
def lisp_get_private_rloc_set ( target_site_eid , seid , group ) :
OO0oOO0OoO = target_site_eid . registered_rlocs
if 14 - 14: OoOoOO00 . OOooOOo - Oo0Ooo + I1Ii111 % ooOoO0o
oOOooo0o000O0 = lisp_site_eid_lookup ( seid , group , False )
if ( oOOooo0o000O0 == None ) : return ( OO0oOO0OoO )
if 14 - 14: oO0o * I1ii11iIi11i % O0
if 96 - 96: i11iIiiIii
if 38 - 38: O0 % oO0o / II111iiii
if 100 - 100: I1ii11iIi11i * ooOoO0o % I1IiiI * IiII
IiiiIIi = None
i1iI1I11iiI = [ ]
for ii11Ii in OO0oOO0OoO :
if ( ii11Ii . is_rtr ( ) ) : continue
if ( ii11Ii . rloc . is_private_address ( ) ) :
I1Ii1i111I11I = copy . deepcopy ( ii11Ii )
i1iI1I11iiI . append ( I1Ii1i111I11I )
continue
if 42 - 42: O0 . i11iIiiIii . OoOoOO00 - I1Ii111 * iIii1I11I1II1
IiiiIIi = ii11Ii
break
if 59 - 59: OoooooooOO / oO0o - OoO0O00 / i1IIi . O0
if ( IiiiIIi == None ) : return ( OO0oOO0OoO )
IiiiIIi = IiiiIIi . rloc . print_address_no_iid ( )
if 33 - 33: oO0o % I1Ii111 % Oo0Ooo . Ii1I
if 3 - 3: I1Ii111 . o0oOOo0O0Ooo
if 6 - 6: oO0o . OoOoOO00 * i11iIiiIii
if 96 - 96: i1IIi . OoO0O00 . OoO0O00 - o0oOOo0O0Ooo - Ii1I
I1IIiI = None
for ii11Ii in oOOooo0o000O0 . registered_rlocs :
if ( ii11Ii . is_rtr ( ) ) : continue
if ( ii11Ii . rloc . is_private_address ( ) ) : continue
I1IIiI = ii11Ii
break
if 30 - 30: I1Ii111 + oO0o + iIii1I11I1II1 % OoO0O00 / I1IiiI
if ( I1IIiI == None ) : return ( OO0oOO0OoO )
I1IIiI = I1IIiI . rloc . print_address_no_iid ( )
if 55 - 55: Ii1I
if 14 - 14: i1IIi * I1ii11iIi11i
if 77 - 77: ooOoO0o . II111iiii
if 41 - 41: IiII
o0oo00 = target_site_eid . site_id
if ( o0oo00 == 0 ) :
if ( I1IIiI == IiiiIIi ) :
lprint ( "Return private RLOCs for sites behind {}" . format ( IiiiIIi ) )
if 27 - 27: IiII / IiII
return ( i1iI1I11iiI )
if 91 - 91: Ii1I
return ( OO0oOO0OoO )
if 93 - 93: OoO0O00 * OoO0O00 * I1ii11iIi11i * OoO0O00 * o0oOOo0O0Ooo
if 84 - 84: I1Ii111 * OoO0O00 - ooOoO0o - Oo0Ooo . OoO0O00 % oO0o
if 98 - 98: OoO0O00 . i1IIi
if 58 - 58: i1IIi * O0 + I1ii11iIi11i . IiII
if 11 - 11: OOooOOo + iIii1I11I1II1 - ooOoO0o * OoO0O00 * i11iIiiIii
if 45 - 45: I1ii11iIi11i + Oo0Ooo
if 7 - 7: Oo0Ooo + ooOoO0o - I1Ii111 * iIii1I11I1II1
if ( o0oo00 == oOOooo0o000O0 . site_id ) :
lprint ( "Return private RLOCs for sites in site-id {}" . format ( o0oo00 ) )
return ( i1iI1I11iiI )
if 6 - 6: ooOoO0o % I1Ii111 % ooOoO0o . Ii1I * Oo0Ooo . IiII
return ( OO0oOO0OoO )
if 100 - 100: i1IIi . Ii1I . o0oOOo0O0Ooo + Ii1I - i1IIi . I11i
if 19 - 19: i11iIiiIii + I11i - IiII . iII111i * i1IIi
if 66 - 66: ooOoO0o
if 4 - 4: iII111i / iII111i * OOooOOo + o0oOOo0O0Ooo . I1Ii111 + II111iiii
if 90 - 90: IiII * iII111i % OoOoOO00 . i11iIiiIii
if 5 - 5: O0 * i1IIi / IiII
if 4 - 4: II111iiii
if 60 - 60: ooOoO0o - II111iiii * OoO0O00 + oO0o - iII111i
if 39 - 39: OoO0O00 % I1Ii111 * I11i * Ii1I
def lisp_get_partial_rloc_set ( registered_rloc_set , mr_source , multicast ) :
OOOOO = [ ]
OO0oOO0OoO = [ ]
if 30 - 30: O0 + o0oOOo0O0Ooo / oO0o * II111iiii
if 73 - 73: OoooooooOO - II111iiii / O0 - OoooooooOO
if 21 - 21: iII111i * o0oOOo0O0Ooo
if 85 - 85: I1ii11iIi11i . OoOoOO00 . i1IIi % OOooOOo * I11i . I1Ii111
if 26 - 26: I1Ii111 + Oo0Ooo + II111iiii % OoOoOO00 % OOooOOo
if 40 - 40: I1ii11iIi11i + i1IIi
i1III11I11 = False
O0OOOo = False
for ii11Ii in registered_rloc_set :
if ( ii11Ii . priority != 254 ) : continue
O0OOOo |= True
if ( ii11Ii . rloc . is_exact_match ( mr_source ) == False ) : continue
i1III11I11 = True
break
if 51 - 51: oO0o + I1IiiI - I1Ii111 * Oo0Ooo . II111iiii
if 63 - 63: I1ii11iIi11i - ooOoO0o - II111iiii + II111iiii
if 17 - 17: I1ii11iIi11i % OoO0O00 % oO0o
if 60 - 60: i1IIi % Ii1I - O0 / iII111i
if 14 - 14: i1IIi * OoooooooOO . IiII
if 26 - 26: O0
if 70 - 70: i1IIi % IiII % iIii1I11I1II1 . II111iiii * Oo0Ooo . o0oOOo0O0Ooo
if ( O0OOOo == False ) : return ( registered_rloc_set )
if 33 - 33: iIii1I11I1II1 / OoooooooOO / I1IiiI + II111iiii
if 42 - 42: OoOoOO00 / i1IIi * O0
if 46 - 46: OOooOOo - I1Ii111 + I1IiiI - ooOoO0o
if 96 - 96: IiII + i1IIi - I11i * I11i - OoO0O00 % II111iiii
if 47 - 47: I1Ii111 . i11iIiiIii + oO0o . I1ii11iIi11i
if 12 - 12: iIii1I11I1II1 % I1Ii111 * OoOoOO00 / OoooooooOO % OoooooooOO
if 81 - 81: iIii1I11I1II1 - Oo0Ooo - ooOoO0o . OoO0O00 + I1ii11iIi11i
if 84 - 84: iII111i . OOooOOo . iII111i * oO0o % Ii1I . oO0o
if 86 - 86: iII111i * ooOoO0o / iIii1I11I1II1 + Ii1I . iII111i
if 64 - 64: IiII - Oo0Ooo % iII111i % I11i
iIiI1IIi1Ii1i = ( os . getenv ( "LISP_RTR_BEHIND_NAT" ) != None )
if 28 - 28: I1IiiI - I1Ii111
if 60 - 60: OOooOOo / O0 * o0oOOo0O0Ooo * OoooooooOO
if 95 - 95: II111iiii
if 2 - 2: I11i - OoooooooOO / I1ii11iIi11i . I1ii11iIi11i * i11iIiiIii % II111iiii
if 1 - 1: i11iIiiIii / OoOoOO00 - I1ii11iIi11i . I1IiiI / I1Ii111 % iIii1I11I1II1
for ii11Ii in registered_rloc_set :
if ( iIiI1IIi1Ii1i and ii11Ii . rloc . is_private_address ( ) ) : continue
if ( multicast == False and ii11Ii . priority == 255 ) : continue
if ( multicast and ii11Ii . mpriority == 255 ) : continue
if ( ii11Ii . priority == 254 ) :
OOOOO . append ( ii11Ii )
else :
OO0oOO0OoO . append ( ii11Ii )
if 87 - 87: OoOoOO00 - II111iiii + Oo0Ooo
if 44 - 44: i1IIi + I1ii11iIi11i / iIii1I11I1II1
if 47 - 47: I1Ii111
if 41 - 41: IiII
if 25 - 25: I11i % iIii1I11I1II1
if 27 - 27: iIii1I11I1II1 . O0 . oO0o
if ( i1III11I11 ) : return ( OO0oOO0OoO )
if 21 - 21: oO0o * I1ii11iIi11i
if 44 - 44: o0oOOo0O0Ooo * IiII - o0oOOo0O0Ooo
if 90 - 90: i1IIi + I1ii11iIi11i * oO0o % i11iIiiIii - OoO0O00
if 12 - 12: OoO0O00 . I1ii11iIi11i - I1IiiI % OOooOOo
if 9 - 9: Ii1I / O0
if 95 - 95: iII111i / I11i
if 86 - 86: O0 / II111iiii . Oo0Ooo / Oo0Ooo * II111iiii
if 22 - 22: Ii1I
if 81 - 81: iIii1I11I1II1 . ooOoO0o % I11i
if 64 - 64: I1Ii111 . Oo0Ooo * o0oOOo0O0Ooo
if 32 - 32: oO0o . I1Ii111 * I1Ii111
if 32 - 32: I1Ii111 . Ii1I / i1IIi
OO0oOO0OoO = [ ]
for ii11Ii in registered_rloc_set :
if ( ii11Ii . rloc . is_ipv6 ( ) ) : OO0oOO0OoO . append ( ii11Ii )
if ( ii11Ii . rloc . is_private_address ( ) ) : OO0oOO0OoO . append ( ii11Ii )
if 2 - 2: OOooOOo * ooOoO0o / I11i + OoO0O00
OO0oOO0OoO += OOOOO
return ( OO0oOO0OoO )
if 96 - 96: II111iiii * OoO0O00 + I1ii11iIi11i + OoOoOO00 / II111iiii . iII111i
if 64 - 64: iII111i % Oo0Ooo
if 79 - 79: IiII + iII111i / II111iiii . i1IIi + iIii1I11I1II1
if 32 - 32: Ii1I * iII111i
if 52 - 52: I11i
if 100 - 100: Oo0Ooo % Oo0Ooo % I1ii11iIi11i
if 33 - 33: I1Ii111 . I1Ii111 * i1IIi
if 22 - 22: I1ii11iIi11i . II111iiii + iIii1I11I1II1 / OoooooooOO . ooOoO0o
if 13 - 13: II111iiii
if 36 - 36: iII111i - oO0o / Oo0Ooo / O0 . OoO0O00 . i1IIi
def lisp_store_pubsub_state ( reply_eid , itr_rloc , mr_sport , nonce , ttl , xtr_id ) :
Iiooo0O0o0o = lisp_pubsub ( itr_rloc , mr_sport , nonce , ttl , xtr_id )
Iiooo0O0o0o . add ( reply_eid )
return ( Iiooo0O0o0o )
if 16 - 16: O0 + OOooOOo * I1ii11iIi11i * IiII
if 56 - 56: iII111i
if 68 - 68: OoooooooOO % o0oOOo0O0Ooo . i1IIi - II111iiii * OoOoOO00
if 46 - 46: ooOoO0o . I1IiiI - ooOoO0o + Oo0Ooo
if 31 - 31: OOooOOo + ooOoO0o . i1IIi - OoO0O00
if 16 - 16: I11i + I1IiiI - Ii1I / I1ii11iIi11i + Ii1I
if 38 - 38: i1IIi * iIii1I11I1II1 * iII111i + OoOoOO00
if 64 - 64: OoO0O00 % o0oOOo0O0Ooo
if 72 - 72: O0 + OoOoOO00 % OOooOOo / oO0o / IiII
if 98 - 98: Oo0Ooo . II111iiii * I11i
if 39 - 39: IiII * o0oOOo0O0Ooo + Ii1I - I11i
if 70 - 70: oO0o * ooOoO0o / ooOoO0o - Ii1I * Ii1I % OOooOOo
if 91 - 91: OoO0O00 - OoO0O00 % O0
if 67 - 67: ooOoO0o * i1IIi
if 66 - 66: o0oOOo0O0Ooo - I1ii11iIi11i . OoOoOO00 / iII111i - Ii1I - i1IIi
def lisp_convert_reply_to_notify ( packet ) :
if 97 - 97: oO0o % iII111i - OOooOOo . OoooooooOO
if 94 - 94: Oo0Ooo
if 10 - 10: i11iIiiIii / I1ii11iIi11i . i1IIi + i1IIi * iII111i
if 64 - 64: II111iiii % I1ii11iIi11i . OoOoOO00 . iIii1I11I1II1 / I1ii11iIi11i
iiIi1iIIIII1 = struct . unpack ( "I" , packet [ 0 : 4 ] ) [ 0 ]
iiIi1iIIIII1 = socket . ntohl ( iiIi1iIIIII1 ) & 0xff
oOooo0oOOOO = packet [ 4 : 12 ]
packet = packet [ 12 : : ]
if 1 - 1: iIii1I11I1II1
if 59 - 59: ooOoO0o % I1IiiI + i1IIi * I1Ii111 % o0oOOo0O0Ooo * II111iiii
if 22 - 22: OoOoOO00 * O0 + OoOoOO00 / iIii1I11I1II1 + oO0o + IiII
if 69 - 69: iIii1I11I1II1 . I1Ii111 * iII111i
Iii1 = ( LISP_MAP_NOTIFY << 28 ) | iiIi1iIIIII1
ooo = struct . pack ( "I" , socket . htonl ( Iii1 ) )
Ooo00OOo000 = struct . pack ( "I" , 0 )
if 6 - 6: I11i - IiII - I11i - II111iiii
if 72 - 72: i1IIi / OOooOOo . Oo0Ooo . oO0o
if 72 - 72: o0oOOo0O0Ooo % iIii1I11I1II1
if 74 - 74: Oo0Ooo % OOooOOo + i11iIiiIii
packet = ooo + oOooo0oOOOO + Ooo00OOo000 + packet
return ( packet )
if 17 - 17: OoOoOO00 . I1IiiI
if 30 - 30: i1IIi * OoOoOO00 * I11i . O0
if 45 - 45: iII111i
if 99 - 99: o0oOOo0O0Ooo % ooOoO0o % i11iIiiIii
if 32 - 32: IiII - Ii1I
if 44 - 44: OoooooooOO . oO0o
if 30 - 30: I1Ii111 % IiII / II111iiii
if 68 - 68: oO0o / O0 / OOooOOo
def lisp_notify_subscribers ( lisp_sockets , eid_record , rloc_records ,
registered_eid , site ) :
if 3 - 3: o0oOOo0O0Ooo / o0oOOo0O0Ooo
for iIiIiII1I11II in lisp_pubsub_cache :
for Iiooo0O0o0o in list ( lisp_pubsub_cache [ iIiIiII1I11II ] . values ( ) ) :
oO0ooOOO = Iiooo0O0o0o . eid_prefix
if ( oO0ooOOO . is_more_specific ( registered_eid ) == False ) : continue
if 66 - 66: o0oOOo0O0Ooo + I11i / OoOoOO00 . OoooooooOO . oO0o
ii1oO0Oo = Iiooo0O0o0o . itr
I1I = Iiooo0O0o0o . port
iIIIi1Iii1 = red ( ii1oO0Oo . print_address_no_iid ( ) , False )
oOOo00oOoo = bold ( "subscriber" , False )
Iiooo000o0OoOo = "0x" + lisp_hex_string ( Iiooo0O0o0o . xtr_id )
oOooo0oOOOO = "0x" + lisp_hex_string ( Iiooo0O0o0o . nonce )
if 83 - 83: OoooooooOO + Oo0Ooo
lprint ( " Notify {} {}:{} xtr-id {} for {}, nonce {}" . format ( oOOo00oOoo , iIIIi1Iii1 , I1I , Iiooo000o0OoOo , green ( iIiIiII1I11II , False ) , oOooo0oOOOO ) )
if 4 - 4: Oo0Ooo - i11iIiiIii / O0 / I11i + ooOoO0o / iII111i
if 72 - 72: II111iiii % iII111i + OoO0O00
if 44 - 44: OoooooooOO + OoooooooOO - Ii1I * iII111i
if 45 - 45: oO0o . O0 - ooOoO0o / o0oOOo0O0Ooo
if 58 - 58: Ii1I . iII111i * OoO0O00 + OoO0O00 % I1Ii111 + I1ii11iIi11i
if 34 - 34: i11iIiiIii + OoOoOO00
Ooo00O0 = copy . deepcopy ( eid_record )
Ooo00O0 . eid . copy_address ( oO0ooOOO )
Ooo00O0 = Ooo00O0 . encode ( ) + rloc_records
lisp_build_map_notify ( lisp_sockets , Ooo00O0 , [ iIiIiII1I11II ] , 1 , ii1oO0Oo ,
I1I , Iiooo0O0o0o . nonce , 0 , 0 , 0 , site , False )
if 13 - 13: i1IIi
Iiooo0O0o0o . map_notify_count += 1
if 1 - 1: i1IIi + IiII + OOooOOo + OoooooooOO / iIii1I11I1II1
if 62 - 62: OOooOOo - o0oOOo0O0Ooo - I1ii11iIi11i
return
if 79 - 79: OoooooooOO - OoooooooOO + oO0o
if 95 - 95: I11i % IiII
if 63 - 63: I1Ii111
if 4 - 4: o0oOOo0O0Ooo / OoooooooOO - o0oOOo0O0Ooo - II111iiii % II111iiii
if 14 - 14: i1IIi - Oo0Ooo % OOooOOo
if 14 - 14: I11i . OoO0O00
if 46 - 46: ooOoO0o
def lisp_process_pubsub ( lisp_sockets , packet , reply_eid , itr_rloc , port , nonce ,
ttl , xtr_id ) :
if 48 - 48: i1IIi * I1IiiI / i11iIiiIii
if 40 - 40: IiII
if 42 - 42: O0 / II111iiii
if 88 - 88: Oo0Ooo
Iiooo0O0o0o = lisp_store_pubsub_state ( reply_eid , itr_rloc , port , nonce , ttl ,
xtr_id )
if 20 - 20: OoooooooOO * i1IIi * IiII / OoooooooOO - Oo0Ooo / i11iIiiIii
i1I1I1IIIi11 = green ( reply_eid . print_prefix ( ) , False )
ii1oO0Oo = red ( itr_rloc . print_address_no_iid ( ) , False )
ii1IiiiiiIi = bold ( "Map-Notify" , False )
xtr_id = "0x" + lisp_hex_string ( xtr_id )
lprint ( "{} pubsub request for {} to ack ITR {} xtr-id: {}" . format ( ii1IiiiiiIi ,
i1I1I1IIIi11 , ii1oO0Oo , xtr_id ) )
if 58 - 58: OoOoOO00 - II111iiii
if 77 - 77: I1ii11iIi11i
if 72 - 72: I1IiiI - i1IIi
if 11 - 11: iIii1I11I1II1 . OoO0O00 * Ii1I
packet = lisp_convert_reply_to_notify ( packet )
lisp_send_map_notify ( lisp_sockets , packet , itr_rloc , port )
Iiooo0O0o0o . map_notify_count += 1
return
if 65 - 65: Oo0Ooo / OoooooooOO
if 60 - 60: II111iiii + I1IiiI % oO0o - o0oOOo0O0Ooo
if 50 - 50: iIii1I11I1II1 - i11iIiiIii / iII111i + ooOoO0o / OOooOOo
if 80 - 80: IiII / OoooooooOO
if 69 - 69: OoOoOO00 + IiII
if 18 - 18: O0 / I11i
if 10 - 10: I1Ii111 * i1IIi
if 48 - 48: Oo0Ooo % i1IIi / iII111i . O0
def lisp_ms_process_map_request ( lisp_sockets , packet , map_request , mr_source ,
mr_sport , ecm_source ) :
if 27 - 27: I11i + iIii1I11I1II1 - i11iIiiIii
if 81 - 81: I11i + oO0o * iIii1I11I1II1 * IiII
if 7 - 7: I11i - I1IiiI . iII111i + O0 / iIii1I11I1II1 - I1Ii111
if 32 - 32: ooOoO0o
if 9 - 9: I1Ii111
if 77 - 77: OoooooooOO * I1Ii111
i1I1I1IIIi11 = map_request . target_eid
o0o0Oo0o0oOo = map_request . target_group
i1iiii = lisp_print_eid_tuple ( i1I1I1IIIi11 , o0o0Oo0o0oOo )
iii11I1I1ii = map_request . itr_rlocs [ 0 ]
Iiooo000o0OoOo = map_request . xtr_id
oOooo0oOOOO = map_request . nonce
oo0oOooo0O = LISP_NO_ACTION
Iiooo0O0o0o = map_request . subscribe_bit
if 63 - 63: IiII * oO0o * iIii1I11I1II1
if 18 - 18: II111iiii * o0oOOo0O0Ooo % i11iIiiIii . OoOoOO00
if 40 - 40: oO0o - o0oOOo0O0Ooo * II111iiii
if 4 - 4: O0
if 9 - 9: Oo0Ooo . i1IIi - i1IIi + I1Ii111 * ooOoO0o . I1ii11iIi11i
I11IIiII111I = True
iIIIII = ( lisp_get_eid_hash ( i1I1I1IIIi11 ) != None )
if ( iIIIII ) :
IIIII1iII1 = map_request . map_request_signature
if ( IIIII1iII1 == None ) :
I11IIiII111I = False
lprint ( ( "EID-crypto-hash signature verification {}, " + "no signature found" ) . format ( bold ( "failed" , False ) ) )
if 84 - 84: OoOoOO00 + O0 % Oo0Ooo
else :
IIi1i = map_request . signature_eid
iiii1I1I11 , OOO0OoOooO0 , I11IIiII111I = lisp_lookup_public_key ( IIi1i )
if ( I11IIiII111I ) :
I11IIiII111I = map_request . verify_map_request_sig ( OOO0OoOooO0 )
else :
lprint ( "Public-key lookup failed for sig-eid {}, hash-eid {}" . format ( IIi1i . print_address ( ) , iiii1I1I11 . print_address ( ) ) )
if 2 - 2: Ii1I * O0 . II111iiii
if 39 - 39: iII111i + iIii1I11I1II1 / Ii1I . IiII
i1IiIiii = bold ( "passed" , False ) if I11IIiII111I else bold ( "failed" , False )
lprint ( "EID-crypto-hash signature verification {}" . format ( i1IiIiii ) )
if 25 - 25: I1Ii111 % iII111i / OoO0O00 % Oo0Ooo % Ii1I
if 46 - 46: II111iiii * iII111i
if 80 - 80: OoooooooOO * OoooooooOO . I1IiiI
if ( Iiooo0O0o0o and I11IIiII111I == False ) :
Iiooo0O0o0o = False
lprint ( "Suppress creating pubsub state due to signature failure" )
if 82 - 82: OoOoOO00 / oO0o - OoOoOO00 . I1IiiI
if 17 - 17: OoOoOO00
if 76 - 76: I1ii11iIi11i - ooOoO0o % OoooooooOO / Oo0Ooo % IiII / ooOoO0o
if 57 - 57: O0
if 23 - 23: OoO0O00 / II111iiii . I1ii11iIi11i . O0
if 13 - 13: I1ii11iIi11i
if 32 - 32: OOooOOo / I11i + I1Ii111 / Oo0Ooo * OoooooooOO / II111iiii
if 8 - 8: OoO0O00
if 17 - 17: iIii1I11I1II1 - Oo0Ooo
if 25 - 25: O0 + I1ii11iIi11i
if 53 - 53: OoooooooOO . Oo0Ooo
if 35 - 35: OOooOOo % i11iIiiIii % ooOoO0o . O0
if 9 - 9: ooOoO0o + iII111i / i1IIi % Oo0Ooo - o0oOOo0O0Ooo / I1IiiI
if 42 - 42: OOooOOo + oO0o % O0 * I1ii11iIi11i + i11iIiiIii
IiII11I1I1II = iii11I1I1ii if ( iii11I1I1ii . afi == ecm_source . afi ) else ecm_source
if 30 - 30: II111iiii . iIii1I11I1II1 * IiII / II111iiii
I1i = lisp_site_eid_lookup ( i1I1I1IIIi11 , o0o0Oo0o0oOo , False )
if 63 - 63: i1IIi + oO0o . OoooooooOO % i1IIi
if ( I1i == None or I1i . is_star_g ( ) ) :
O00o = bold ( "Site not found" , False )
lprint ( "{} for requested EID {}" . format ( O00o ,
green ( i1iiii , False ) ) )
if 66 - 66: i11iIiiIii % i11iIiiIii
if 38 - 38: iIii1I11I1II1
if 80 - 80: OoO0O00
if 72 - 72: I11i * II111iiii
lisp_send_negative_map_reply ( lisp_sockets , i1I1I1IIIi11 , o0o0Oo0o0oOo , oOooo0oOOOO , iii11I1I1ii ,
mr_sport , 15 , Iiooo000o0OoOo , Iiooo0O0o0o )
if 82 - 82: I1Ii111 . OoO0O00 * II111iiii
return ( [ i1I1I1IIIi11 , o0o0Oo0o0oOo , LISP_DDT_ACTION_SITE_NOT_FOUND ] )
if 99 - 99: iIii1I11I1II1 / iII111i % i1IIi - II111iiii / OoO0O00
if 33 - 33: OoooooooOO / i1IIi . Ii1I
oo00oOOO00 = I1i . print_eid_tuple ( )
OOoO = I1i . site . site_name
if 19 - 19: I1ii11iIi11i * IiII
if 60 - 60: ooOoO0o % oO0o / Oo0Ooo * oO0o % I11i . I1ii11iIi11i
if 32 - 32: II111iiii - II111iiii / i1IIi / i11iIiiIii - O0
if 38 - 38: IiII . IiII
if 53 - 53: II111iiii + Ii1I * o0oOOo0O0Ooo
if ( iIIIII == False and I1i . require_signature ) :
IIIII1iII1 = map_request . map_request_signature
IIi1i = map_request . signature_eid
if ( IIIII1iII1 == None or IIi1i . is_null ( ) ) :
lprint ( "Signature required for site {}" . format ( OOoO ) )
I11IIiII111I = False
else :
IIi1i = map_request . signature_eid
iiii1I1I11 , OOO0OoOooO0 , I11IIiII111I = lisp_lookup_public_key ( IIi1i )
if ( I11IIiII111I ) :
I11IIiII111I = map_request . verify_map_request_sig ( OOO0OoOooO0 )
else :
lprint ( "Public-key lookup failed for sig-eid {}, hash-eid {}" . format ( IIi1i . print_address ( ) , iiii1I1I11 . print_address ( ) ) )
if 47 - 47: Ii1I % OOooOOo . Oo0Ooo
if 94 - 94: Ii1I - iIii1I11I1II1 + I1IiiI - iIii1I11I1II1 . o0oOOo0O0Ooo
i1IiIiii = bold ( "passed" , False ) if I11IIiII111I else bold ( "failed" , False )
lprint ( "Required signature verification {}" . format ( i1IiIiii ) )
if 3 - 3: O0 / I11i + OoOoOO00 % IiII / i11iIiiIii
if 25 - 25: II111iiii / I1ii11iIi11i % iIii1I11I1II1
if 69 - 69: IiII
if 36 - 36: I1IiiI / oO0o
if 72 - 72: i1IIi - I1ii11iIi11i . OOooOOo + I1Ii111 - ooOoO0o
if 69 - 69: o0oOOo0O0Ooo * I1IiiI - I11i
if ( I11IIiII111I and I1i . registered == False ) :
lprint ( "Site '{}' with EID-prefix {} is not registered for EID {}" . format ( OOoO , green ( oo00oOOO00 , False ) , green ( i1iiii , False ) ) )
if 11 - 11: OOooOOo * O0
if 43 - 43: I1IiiI - i1IIi . i1IIi * II111iiii
if 64 - 64: I1IiiI * iIii1I11I1II1 % I1Ii111
if 22 - 22: OoooooooOO + I1Ii111 . o0oOOo0O0Ooo * Oo0Ooo
if 61 - 61: iIii1I11I1II1
if 95 - 95: I1ii11iIi11i + IiII * Ii1I - IiII
if ( I1i . accept_more_specifics == False ) :
i1I1I1IIIi11 = I1i . eid
o0o0Oo0o0oOo = I1i . group
if 58 - 58: I1ii11iIi11i - oO0o % I11i * O0
if 43 - 43: OoOoOO00 + O0
if 71 - 71: ooOoO0o * I1IiiI / I1ii11iIi11i
if 8 - 8: I1Ii111 / iIii1I11I1II1
if 29 - 29: i11iIiiIii % i1IIi + oO0o . I1ii11iIi11i
IiIi1iIIiII1i = 1
if ( I1i . force_ttl != None ) :
IiIi1iIIiII1i = I1i . force_ttl | 0x80000000
if 51 - 51: OOooOOo + o0oOOo0O0Ooo . OOooOOo
if 23 - 23: iIii1I11I1II1 + OoO0O00 / I1IiiI
if 48 - 48: OoOoOO00 + I11i + oO0o . I1IiiI
if 7 - 7: iII111i * i1IIi % OoOoOO00 % Ii1I . I1IiiI
if 53 - 53: OOooOOo / I11i + OOooOOo / I1IiiI / OoO0O00
lisp_send_negative_map_reply ( lisp_sockets , i1I1I1IIIi11 , o0o0Oo0o0oOo , oOooo0oOOOO , iii11I1I1ii ,
mr_sport , IiIi1iIIiII1i , Iiooo000o0OoOo , Iiooo0O0o0o )
if 12 - 12: i11iIiiIii % ooOoO0o / iII111i . IiII
return ( [ i1I1I1IIIi11 , o0o0Oo0o0oOo , LISP_DDT_ACTION_MS_NOT_REG ] )
if 68 - 68: OOooOOo / iIii1I11I1II1 + I1IiiI . ooOoO0o * IiII
if 72 - 72: I1Ii111
if 51 - 51: OoOoOO00
if 61 - 61: Oo0Ooo / i1IIi + I1Ii111 - OoooooooOO / O0
if 25 - 25: I1ii11iIi11i * i11iIiiIii / i1IIi
O0O0ooOO0O = False
i1i11iIiIi = ""
OOOOi1ii1iIiI1 = False
if ( I1i . force_nat_proxy_reply ) :
i1i11iIiIi = ", nat-forced"
O0O0ooOO0O = True
OOOOi1ii1iIiI1 = True
elif ( I1i . force_proxy_reply ) :
i1i11iIiIi = ", forced"
OOOOi1ii1iIiI1 = True
elif ( I1i . proxy_reply_requested ) :
i1i11iIiIi = ", requested"
OOOOi1ii1iIiI1 = True
elif ( map_request . pitr_bit and I1i . pitr_proxy_reply_drop ) :
i1i11iIiIi = ", drop-to-pitr"
oo0oOooo0O = LISP_DROP_ACTION
elif ( I1i . proxy_reply_action != "" ) :
oo0oOooo0O = I1i . proxy_reply_action
i1i11iIiIi = ", forced, action {}" . format ( oo0oOooo0O )
oo0oOooo0O = LISP_DROP_ACTION if ( oo0oOooo0O == "drop" ) else LISP_NATIVE_FORWARD_ACTION
if 60 - 60: IiII - I1Ii111 * iIii1I11I1II1 . I1ii11iIi11i
if 45 - 45: i1IIi - OoO0O00 % Oo0Ooo
if 42 - 42: ooOoO0o - I11i * iII111i
if 39 - 39: OOooOOo - I1ii11iIi11i % IiII % I1ii11iIi11i * II111iiii - Ii1I
if 19 - 19: I11i % OoOoOO00 / OoO0O00 % I11i + o0oOOo0O0Ooo / iII111i
if 35 - 35: ooOoO0o % I11i * I1ii11iIi11i
if 10 - 10: OoO0O00 + OoooooooOO + I1Ii111
O0000Ooo0o = False
oOoooOOoo0000 = None
if ( OOOOi1ii1iIiI1 and I1i . policy in lisp_policies ) :
iIIiiIi = lisp_policies [ I1i . policy ]
if ( iIIiiIi . match_policy_map_request ( map_request , mr_source ) ) : oOoooOOoo0000 = iIIiiIi
if 56 - 56: OoooooooOO * I1ii11iIi11i % IiII + OoO0O00
if ( oOoooOOoo0000 ) :
i1I1Iiii = bold ( "matched" , False )
lprint ( "Map-Request {} policy '{}', set-action '{}'" . format ( i1I1Iiii ,
iIIiiIi . policy_name , iIIiiIi . set_action ) )
else :
i1I1Iiii = bold ( "no match" , False )
lprint ( "Map-Request {} for policy '{}', implied drop" . format ( i1I1Iiii ,
iIIiiIi . policy_name ) )
O0000Ooo0o = True
if 22 - 22: i11iIiiIii
if 65 - 65: o0oOOo0O0Ooo % ooOoO0o
if 38 - 38: oO0o . OOooOOo - I1IiiI
if ( i1i11iIiIi != "" ) :
lprint ( "Proxy-replying for EID {}, found site '{}' EID-prefix {}{}" . format ( green ( i1iiii , False ) , OOoO , green ( oo00oOOO00 , False ) ,
# iII111i / iII111i * OoOoOO00 - i11iIiiIii
i1i11iIiIi ) )
if 27 - 27: i1IIi / I11i + I1Ii111 . II111iiii * OoO0O00
OO0oOO0OoO = I1i . registered_rlocs
IiIi1iIIiII1i = 1440
if ( O0O0ooOO0O ) :
if ( I1i . site_id != 0 ) :
OoO0OOOooo = map_request . source_eid
OO0oOO0OoO = lisp_get_private_rloc_set ( I1i , OoO0OOOooo , o0o0Oo0o0oOo )
if 10 - 10: OoO0O00 % iIii1I11I1II1 * OoOoOO00 / i11iIiiIii - I1IiiI . O0
if ( OO0oOO0OoO == I1i . registered_rlocs ) :
iii11 = ( I1i . group . is_null ( ) == False )
i1iI1I11iiI = lisp_get_partial_rloc_set ( OO0oOO0OoO , IiII11I1I1II , iii11 )
if ( i1iI1I11iiI != OO0oOO0OoO ) :
IiIi1iIIiII1i = 15
OO0oOO0OoO = i1iI1I11iiI
if 2 - 2: I1Ii111
if 68 - 68: OoOoOO00 * I1Ii111 - OoO0O00 / i1IIi % OoOoOO00 / i1IIi
if 41 - 41: oO0o % oO0o . iIii1I11I1II1 . o0oOOo0O0Ooo
if 95 - 95: i1IIi . ooOoO0o . Oo0Ooo
if 13 - 13: OOooOOo - Oo0Ooo % O0 . I1Ii111
if 66 - 66: I1IiiI + I11i
if 58 - 58: I1ii11iIi11i
if 7 - 7: oO0o - I11i
if ( I1i . force_ttl != None ) :
IiIi1iIIiII1i = I1i . force_ttl | 0x80000000
if 59 - 59: Ii1I / o0oOOo0O0Ooo / OoO0O00 + IiII + i11iIiiIii
if 64 - 64: o0oOOo0O0Ooo * IiII * IiII * iII111i % i11iIiiIii
if 22 - 22: I1ii11iIi11i * II111iiii - OOooOOo % i11iIiiIii
if 10 - 10: OOooOOo / I1ii11iIi11i
if 21 - 21: OoO0O00 % Oo0Ooo . o0oOOo0O0Ooo + IiII
if 48 - 48: O0 / i1IIi / iII111i
if ( oOoooOOoo0000 ) :
if ( oOoooOOoo0000 . set_record_ttl ) :
IiIi1iIIiII1i = oOoooOOoo0000 . set_record_ttl
lprint ( "Policy set-record-ttl to {}" . format ( IiIi1iIIiII1i ) )
if 11 - 11: O0 - OoO0O00 + OoOoOO00 * ooOoO0o - Ii1I
if ( oOoooOOoo0000 . set_action == "drop" ) :
lprint ( "Policy set-action drop, send negative Map-Reply" )
oo0oOooo0O = LISP_POLICY_DENIED_ACTION
OO0oOO0OoO = [ ]
else :
I1Ii1i111I = oOoooOOoo0000 . set_policy_map_reply ( )
if ( I1Ii1i111I ) : OO0oOO0OoO = [ I1Ii1i111I ]
if 82 - 82: Ii1I - O0 * ooOoO0o . ooOoO0o
if 32 - 32: o0oOOo0O0Ooo . OoooooooOO % OOooOOo
if 2 - 2: OoOoOO00 + I1ii11iIi11i + oO0o
if ( O0000Ooo0o ) :
lprint ( "Implied drop action, send negative Map-Reply" )
oo0oOooo0O = LISP_POLICY_DENIED_ACTION
OO0oOO0OoO = [ ]
if 27 - 27: OoooooooOO - Ii1I / OoooooooOO + OoO0O00
if 58 - 58: OOooOOo * I11i . I1IiiI
oo0OOo0o000 = I1i . echo_nonce_capable
if 46 - 46: I11i + II111iiii * iII111i % ooOoO0o - I1IiiI
if 73 - 73: I1ii11iIi11i * iIii1I11I1II1 . I1Ii111 - Ii1I
if 11 - 11: I11i
if 48 - 48: IiII / O0
if ( I11IIiII111I ) :
i1IiI11 = I1i . eid
I111iiIIIiI11I = I1i . group
else :
i1IiI11 = i1I1I1IIIi11
I111iiIIIiI11I = o0o0Oo0o0oOo
oo0oOooo0O = LISP_AUTH_FAILURE_ACTION
OO0oOO0OoO = [ ]
if 69 - 69: I1ii11iIi11i % oO0o / iIii1I11I1II1 * OoOoOO00 % I1IiiI + IiII
if 34 - 34: ooOoO0o - OoooooooOO . o0oOOo0O0Ooo
if 83 - 83: II111iiii . OOooOOo
if 88 - 88: O0
if 12 - 12: Ii1I % OOooOOo % Oo0Ooo * I1Ii111
if 96 - 96: iII111i + ooOoO0o
if ( Iiooo0O0o0o ) :
i1IiI11 = i1I1I1IIIi11
I111iiIIIiI11I = o0o0Oo0o0oOo
if 100 - 100: OOooOOo . ooOoO0o + Ii1I + Ii1I
if 70 - 70: ooOoO0o . iIii1I11I1II1 / oO0o
if 18 - 18: Ii1I / OoooooooOO % i1IIi * o0oOOo0O0Ooo
if 70 - 70: IiII % i1IIi / IiII - o0oOOo0O0Ooo . Oo0Ooo / O0
if 54 - 54: o0oOOo0O0Ooo
if 53 - 53: II111iiii / IiII . i1IIi + I1Ii111 / OoO0O00 - OoooooooOO
packet = lisp_build_map_reply ( i1IiI11 , I111iiIIIiI11I , OO0oOO0OoO ,
oOooo0oOOOO , oo0oOooo0O , IiIi1iIIiII1i , map_request , None , oo0OOo0o000 , False )
if 67 - 67: ooOoO0o . Ii1I - Oo0Ooo * iII111i . I11i - OOooOOo
if ( Iiooo0O0o0o ) :
lisp_process_pubsub ( lisp_sockets , packet , i1IiI11 , iii11I1I1ii ,
mr_sport , oOooo0oOOOO , IiIi1iIIiII1i , Iiooo000o0OoOo )
else :
lisp_send_map_reply ( lisp_sockets , packet , iii11I1I1ii , mr_sport )
if 10 - 10: I11i
if 37 - 37: o0oOOo0O0Ooo / I1IiiI * oO0o / II111iiii
return ( [ I1i . eid , I1i . group , LISP_DDT_ACTION_MS_ACK ] )
if 39 - 39: IiII - i1IIi - IiII - OoooooooOO - I1ii11iIi11i
if 66 - 66: IiII + i1IIi
if 21 - 21: IiII / i11iIiiIii / OoOoOO00
if 75 - 75: Ii1I . i1IIi / I1IiiI * iII111i . IiII / OoOoOO00
if 58 - 58: ooOoO0o + OOooOOo / ooOoO0o / i11iIiiIii
iiIIIIiIIIi1 = len ( I1i . registered_rlocs )
if ( iiIIIIiIIIi1 == 0 ) :
lprint ( ( "Requested EID {} found site '{}' with EID-prefix {} with " + "no registered RLOCs" ) . format ( green ( i1iiii , False ) , OOoO ,
# ooOoO0o . OoO0O00 . OoO0O00 % ooOoO0o * Oo0Ooo - I1IiiI
green ( oo00oOOO00 , False ) ) )
return ( [ I1i . eid , I1i . group , LISP_DDT_ACTION_MS_ACK ] )
if 8 - 8: I1IiiI - I1Ii111 - OoooooooOO * Oo0Ooo * Ii1I
if 11 - 11: I1IiiI
if 43 - 43: I11i
if 78 - 78: Ii1I % Oo0Ooo / OoO0O00 . iIii1I11I1II1 . II111iiii
if 67 - 67: oO0o % I1Ii111
OooiIiI11i = map_request . target_eid if map_request . source_eid . is_null ( ) else map_request . source_eid
if 8 - 8: I1Ii111 * IiII / Ii1I
oOOo0O0Oo = map_request . target_eid . hash_address ( OooiIiI11i )
oOOo0O0Oo %= iiIIIIiIIIi1
o0ooo000OO = I1i . registered_rlocs [ oOOo0O0Oo ]
if 17 - 17: I1IiiI . i11iIiiIii * OoO0O00 + II111iiii
if ( o0ooo000OO . rloc . is_null ( ) ) :
lprint ( ( "Suppress forwarding Map-Request for EID {} at site '{}' " + "EID-prefix {}, no RLOC address" ) . format ( green ( i1iiii , False ) ,
# OOooOOo * OoOoOO00 % O0
OOoO , green ( oo00oOOO00 , False ) ) )
else :
lprint ( ( "Forwarding Map-Request for EID {} to ETR {} at site '{}' " + "EID-prefix {}" ) . format ( green ( i1iiii , False ) ,
# Oo0Ooo % O0 % I1ii11iIi11i
red ( o0ooo000OO . rloc . print_address ( ) , False ) , OOoO ,
green ( oo00oOOO00 , False ) ) )
if 80 - 80: Ii1I . OoooooooOO - OoooooooOO % ooOoO0o
if 91 - 91: Ii1I
if 19 - 19: OOooOOo * iII111i . o0oOOo0O0Ooo * iIii1I11I1II1
if 82 - 82: I1IiiI + I1IiiI . O0
lisp_send_ecm ( lisp_sockets , packet , map_request . source_eid , mr_sport ,
map_request . target_eid , o0ooo000OO . rloc , to_etr = True )
if 74 - 74: I11i . i1IIi - i1IIi - ooOoO0o / Oo0Ooo % I11i
return ( [ I1i . eid , I1i . group , LISP_DDT_ACTION_MS_ACK ] )
if 17 - 17: OoooooooOO + ooOoO0o
if 57 - 57: i11iIiiIii / I1Ii111 * iII111i * OoOoOO00
if 40 - 40: I1ii11iIi11i - OoooooooOO
if 74 - 74: i11iIiiIii % i11iIiiIii / II111iiii + I1ii11iIi11i . OOooOOo
if 83 - 83: I1IiiI . ooOoO0o . II111iiii % OOooOOo
if 86 - 86: i11iIiiIii + I1ii11iIi11i / OoOoOO00 * OoooooooOO
if 6 - 6: II111iiii
def lisp_ddt_process_map_request ( lisp_sockets , map_request , ecm_source , port ) :
if 26 - 26: iIii1I11I1II1 / iIii1I11I1II1 . IiII * i11iIiiIii
if 21 - 21: OOooOOo + o0oOOo0O0Ooo
if 28 - 28: OOooOOo + i1IIi + II111iiii / Oo0Ooo + iIii1I11I1II1 . Oo0Ooo
if 73 - 73: Ii1I * iIii1I11I1II1 / o0oOOo0O0Ooo - o0oOOo0O0Ooo / i1IIi
i1I1I1IIIi11 = map_request . target_eid
o0o0Oo0o0oOo = map_request . target_group
i1iiii = lisp_print_eid_tuple ( i1I1I1IIIi11 , o0o0Oo0o0oOo )
oOooo0oOOOO = map_request . nonce
oo0oOooo0O = LISP_DDT_ACTION_NULL
if 64 - 64: Ii1I * I1ii11iIi11i % II111iiii
if 31 - 31: iIii1I11I1II1 % Oo0Ooo . I1IiiI % ooOoO0o
if 38 - 38: I1ii11iIi11i + I1Ii111 * I11i / OoO0O00 + o0oOOo0O0Ooo
if 46 - 46: iII111i
if 56 - 56: Oo0Ooo / II111iiii
O0ooO0oO = None
if ( lisp_i_am_ms ) :
I1i = lisp_site_eid_lookup ( i1I1I1IIIi11 , o0o0Oo0o0oOo , False )
if ( I1i == None ) : return
if 75 - 75: o0oOOo0O0Ooo + I1IiiI - iII111i / IiII * IiII / O0
if ( I1i . registered ) :
oo0oOooo0O = LISP_DDT_ACTION_MS_ACK
IiIi1iIIiII1i = 1440
else :
i1I1I1IIIi11 , o0o0Oo0o0oOo , oo0oOooo0O = lisp_ms_compute_neg_prefix ( i1I1I1IIIi11 , o0o0Oo0o0oOo )
oo0oOooo0O = LISP_DDT_ACTION_MS_NOT_REG
IiIi1iIIiII1i = 1
if 26 - 26: I11i + I1IiiI + i1IIi % OoO0O00 * OoOoOO00
else :
O0ooO0oO = lisp_ddt_cache_lookup ( i1I1I1IIIi11 , o0o0Oo0o0oOo , False )
if ( O0ooO0oO == None ) :
oo0oOooo0O = LISP_DDT_ACTION_NOT_AUTH
IiIi1iIIiII1i = 0
lprint ( "DDT delegation entry not found for EID {}" . format ( green ( i1iiii , False ) ) )
if 28 - 28: I1ii11iIi11i - o0oOOo0O0Ooo + Oo0Ooo - Ii1I
elif ( O0ooO0oO . is_auth_prefix ( ) ) :
if 98 - 98: OoOoOO00 + O0 - I1Ii111
if 67 - 67: I1IiiI / IiII / iII111i - I1Ii111 - o0oOOo0O0Ooo
if 75 - 75: OOooOOo . ooOoO0o
if 32 - 32: i1IIi / I11i + iIii1I11I1II1 . OOooOOo
oo0oOooo0O = LISP_DDT_ACTION_DELEGATION_HOLE
IiIi1iIIiII1i = 15
O00O0OOOo = O0ooO0oO . print_eid_tuple ( )
lprint ( ( "DDT delegation entry not found but auth-prefix {} " + "found for EID {}" ) . format ( O00O0OOOo ,
# O0 / o0oOOo0O0Ooo . I1IiiI
green ( i1iiii , False ) ) )
if 100 - 100: I1Ii111 + iIii1I11I1II1 . OoOoOO00 / iII111i . iIii1I11I1II1 - Ii1I
if ( o0o0Oo0o0oOo . is_null ( ) ) :
i1I1I1IIIi11 = lisp_ddt_compute_neg_prefix ( i1I1I1IIIi11 , O0ooO0oO ,
lisp_ddt_cache )
else :
o0o0Oo0o0oOo = lisp_ddt_compute_neg_prefix ( o0o0Oo0o0oOo , O0ooO0oO ,
lisp_ddt_cache )
i1I1I1IIIi11 = lisp_ddt_compute_neg_prefix ( i1I1I1IIIi11 , O0ooO0oO ,
O0ooO0oO . source_cache )
if 85 - 85: OoOoOO00
O0ooO0oO = None
else :
O00O0OOOo = O0ooO0oO . print_eid_tuple ( )
lprint ( "DDT delegation entry {} found for EID {}" . format ( O00O0OOOo , green ( i1iiii , False ) ) )
if 57 - 57: Oo0Ooo - II111iiii - I1ii11iIi11i * oO0o
IiIi1iIIiII1i = 1440
if 41 - 41: I11i / ooOoO0o + IiII % OoooooooOO
if 72 - 72: Ii1I
if 22 - 22: o0oOOo0O0Ooo / OoO0O00 + OoOoOO00 + Ii1I . II111iiii * I11i
if 85 - 85: i11iIiiIii / I11i
if 28 - 28: i11iIiiIii + IiII / I11i . Ii1I / OoO0O00
if 100 - 100: o0oOOo0O0Ooo - I11i . o0oOOo0O0Ooo
Oo00oo = lisp_build_map_referral ( i1I1I1IIIi11 , o0o0Oo0o0oOo , O0ooO0oO , oo0oOooo0O , IiIi1iIIiII1i , oOooo0oOOOO )
oOooo0oOOOO = map_request . nonce >> 32
if ( map_request . nonce != 0 and oOooo0oOOOO != 0xdfdf0e1d ) : port = LISP_CTRL_PORT
lisp_send_map_referral ( lisp_sockets , Oo00oo , ecm_source , port )
return
if 90 - 90: OoOoOO00 / II111iiii / I11i * I11i - iIii1I11I1II1
if 87 - 87: IiII
if 92 - 92: OoO0O00 / IiII - ooOoO0o
if 45 - 45: iII111i - I11i * ooOoO0o * OOooOOo / I1Ii111 * iII111i
if 33 - 33: iIii1I11I1II1 % I1ii11iIi11i - OOooOOo % iIii1I11I1II1 + I11i / i11iIiiIii
if 64 - 64: I11i * ooOoO0o / OoooooooOO
if 38 - 38: iIii1I11I1II1 . OoO0O00 * OoOoOO00 + OoOoOO00 + ooOoO0o
if 44 - 44: I1ii11iIi11i * OOooOOo % OoO0O00 . I1IiiI % Ii1I + II111iiii
if 100 - 100: oO0o - II111iiii . o0oOOo0O0Ooo
if 63 - 63: OoOoOO00 % IiII . iII111i
if 44 - 44: I1IiiI
if 25 - 25: oO0o
if 100 - 100: I1IiiI / IiII + OoO0O00 . iII111i
def lisp_find_negative_mask_len ( eid , entry_prefix , neg_prefix ) :
iiI1Ii1i11i1 = eid . hash_address ( entry_prefix )
i11Ii = eid . addr_length ( ) * 8
oOo = 0
if 14 - 14: iII111i % Oo0Ooo % ooOoO0o + i11iIiiIii / i1IIi
if 64 - 64: OOooOOo - OOooOOo
if 42 - 42: i1IIi / ooOoO0o . I1Ii111 % OoOoOO00
if 67 - 67: i1IIi * i11iIiiIii * I1IiiI
for oOo in range ( i11Ii ) :
i1111I = 1 << ( i11Ii - oOo - 1 )
if ( iiI1Ii1i11i1 & i1111I ) : break
if 30 - 30: OoO0O00 + I1IiiI
if 4 - 4: I11i
if ( oOo > neg_prefix . mask_len ) : neg_prefix . mask_len = oOo
return
if 67 - 67: ooOoO0o . I1Ii111 . Oo0Ooo . Ii1I + iIii1I11I1II1 / OoooooooOO
if 93 - 93: ooOoO0o * OoO0O00 - I1Ii111 / I1ii11iIi11i
if 60 - 60: OoO0O00 / oO0o . I1IiiI + OoOoOO00 + I1ii11iIi11i % Ii1I
if 70 - 70: i1IIi * II111iiii * I1IiiI
if 7 - 7: OoooooooOO + II111iiii % o0oOOo0O0Ooo * O0 . OoO0O00 * OoooooooOO
if 20 - 20: Oo0Ooo % OOooOOo
if 8 - 8: OOooOOo
if 92 - 92: iII111i / OOooOOo . IiII / I11i + o0oOOo0O0Ooo
if 99 - 99: II111iiii
if 70 - 70: O0 % I1ii11iIi11i
def lisp_neg_prefix_walk ( entry , parms ) :
i1I1I1IIIi11 , I1Ii11I1 , OooOo0Ooo0 = parms
if 33 - 33: IiII - OOooOOo / i11iIiiIii * iIii1I11I1II1
if ( I1Ii11I1 == None ) :
if ( entry . eid . instance_id != i1I1I1IIIi11 . instance_id ) :
return ( [ True , parms ] )
if 2 - 2: i11iIiiIii % ooOoO0o
if ( entry . eid . afi != i1I1I1IIIi11 . afi ) : return ( [ True , parms ] )
else :
if ( entry . eid . is_more_specific ( I1Ii11I1 ) == False ) :
return ( [ True , parms ] )
if 56 - 56: IiII % ooOoO0o + I1IiiI % I11i - OOooOOo
if 82 - 82: OoooooooOO . i1IIi . OoO0O00 . OoO0O00
if 31 - 31: iIii1I11I1II1
if 64 - 64: ooOoO0o
if 30 - 30: OoO0O00 + o0oOOo0O0Ooo / iIii1I11I1II1
if 69 - 69: IiII - OoooooooOO + iII111i + iII111i - Ii1I
lisp_find_negative_mask_len ( i1I1I1IIIi11 , entry . eid , OooOo0Ooo0 )
return ( [ True , parms ] )
if 27 - 27: I1ii11iIi11i % Oo0Ooo * iIii1I11I1II1 * O0 / I11i * Oo0Ooo
if 97 - 97: IiII % Oo0Ooo % OoOoOO00
if 87 - 87: i11iIiiIii . oO0o * I1IiiI * I1Ii111
if 57 - 57: iIii1I11I1II1 / i11iIiiIii / IiII + I1ii11iIi11i % I1IiiI
if 80 - 80: iIii1I11I1II1
if 23 - 23: II111iiii . ooOoO0o % I1Ii111
if 39 - 39: OoooooooOO
if 10 - 10: Oo0Ooo * iII111i
def lisp_ddt_compute_neg_prefix ( eid , ddt_entry , cache ) :
if 78 - 78: Oo0Ooo / i11iIiiIii - I1IiiI
if 51 - 51: ooOoO0o / Oo0Ooo - I1Ii111 - iII111i
if 68 - 68: I1ii11iIi11i - iIii1I11I1II1 * OoooooooOO
if 44 - 44: OoooooooOO + I1Ii111 + OoO0O00
if ( eid . is_binary ( ) == False ) : return ( eid )
if 15 - 15: iIii1I11I1II1 % i1IIi + iII111i
OooOo0Ooo0 = lisp_address ( eid . afi , "" , 0 , 0 )
OooOo0Ooo0 . copy_address ( eid )
OooOo0Ooo0 . mask_len = 0
if 48 - 48: o0oOOo0O0Ooo / oO0o
OO0000I111i1I1iii = ddt_entry . print_eid_tuple ( )
I1Ii11I1 = ddt_entry . eid
if 87 - 87: I11i
if 67 - 67: i1IIi / i1IIi + IiII . oO0o
if 70 - 70: i1IIi . I11i * o0oOOo0O0Ooo . iII111i
if 75 - 75: oO0o * OoO0O00 * I11i + oO0o + O0 . I1Ii111
if 8 - 8: I1ii11iIi11i / i1IIi - I1ii11iIi11i + Ii1I + OoO0O00 - I11i
eid , I1Ii11I1 , OooOo0Ooo0 = cache . walk_cache ( lisp_neg_prefix_walk ,
( eid , I1Ii11I1 , OooOo0Ooo0 ) )
if 79 - 79: OoooooooOO - I1Ii111 * I1IiiI . I1Ii111 - iIii1I11I1II1
if 27 - 27: OoOoOO00 % OoOoOO00 % II111iiii
if 45 - 45: iIii1I11I1II1 . o0oOOo0O0Ooo % I1IiiI
if 10 - 10: I1IiiI / i1IIi * o0oOOo0O0Ooo + Oo0Ooo - OoOoOO00 % iII111i
OooOo0Ooo0 . mask_address ( OooOo0Ooo0 . mask_len )
if 88 - 88: Ii1I % Ii1I
lprint ( ( "Least specific prefix computed from ddt-cache for EID {} " + "using auth-prefix {} is {}" ) . format ( green ( eid . print_address ( ) , False ) ,
# OOooOOo / OOooOOo
OO0000I111i1I1iii , OooOo0Ooo0 . print_prefix ( ) ) )
return ( OooOo0Ooo0 )
if 59 - 59: o0oOOo0O0Ooo + OoO0O00 - IiII + I1ii11iIi11i
if 96 - 96: OoooooooOO % iIii1I11I1II1 + OoooooooOO - I1IiiI * OoO0O00
if 86 - 86: OoOoOO00 % OoO0O00 * oO0o * Ii1I - o0oOOo0O0Ooo
if 77 - 77: I11i + O0 % I1ii11iIi11i / oO0o
if 30 - 30: I1ii11iIi11i * O0 % I1IiiI % OoO0O00
if 23 - 23: O0 * OoOoOO00 - I1ii11iIi11i + iIii1I11I1II1
if 68 - 68: Oo0Ooo % II111iiii % I1Ii111 * IiII
if 68 - 68: I1ii11iIi11i % iII111i - i11iIiiIii % I1ii11iIi11i
def lisp_ms_compute_neg_prefix ( eid , group ) :
OooOo0Ooo0 = lisp_address ( eid . afi , "" , 0 , 0 )
OooOo0Ooo0 . copy_address ( eid )
OooOo0Ooo0 . mask_len = 0
o0IIIiiiIiI1 = lisp_address ( group . afi , "" , 0 , 0 )
o0IIIiiiIiI1 . copy_address ( group )
o0IIIiiiIiI1 . mask_len = 0
I1Ii11I1 = None
if 5 - 5: oO0o + ooOoO0o % o0oOOo0O0Ooo . II111iiii . I1Ii111
if 52 - 52: OoooooooOO / IiII / IiII
if 30 - 30: ooOoO0o % I11i + II111iiii . IiII - I1IiiI * OoOoOO00
if 59 - 59: I1IiiI
if 19 - 19: i1IIi * I1Ii111
if ( group . is_null ( ) ) :
O0ooO0oO = lisp_ddt_cache . lookup_cache ( eid , False )
if ( O0ooO0oO == None ) :
OooOo0Ooo0 . mask_len = OooOo0Ooo0 . host_mask_len ( )
o0IIIiiiIiI1 . mask_len = o0IIIiiiIiI1 . host_mask_len ( )
return ( [ OooOo0Ooo0 , o0IIIiiiIiI1 , LISP_DDT_ACTION_NOT_AUTH ] )
if 33 - 33: OOooOOo + OoOoOO00 % I1Ii111 / iIii1I11I1II1 % Ii1I % o0oOOo0O0Ooo
ii1IIiIII = lisp_sites_by_eid
if ( O0ooO0oO . is_auth_prefix ( ) ) : I1Ii11I1 = O0ooO0oO . eid
else :
O0ooO0oO = lisp_ddt_cache . lookup_cache ( group , False )
if ( O0ooO0oO == None ) :
OooOo0Ooo0 . mask_len = OooOo0Ooo0 . host_mask_len ( )
o0IIIiiiIiI1 . mask_len = o0IIIiiiIiI1 . host_mask_len ( )
return ( [ OooOo0Ooo0 , o0IIIiiiIiI1 , LISP_DDT_ACTION_NOT_AUTH ] )
if 69 - 69: ooOoO0o - OoO0O00
if ( O0ooO0oO . is_auth_prefix ( ) ) : I1Ii11I1 = O0ooO0oO . group
if 95 - 95: OoOoOO00 - Oo0Ooo / ooOoO0o * OoOoOO00 - OoOoOO00 % Ii1I
group , I1Ii11I1 , o0IIIiiiIiI1 = lisp_sites_by_eid . walk_cache ( lisp_neg_prefix_walk , ( group , I1Ii11I1 , o0IIIiiiIiI1 ) )
if 90 - 90: Ii1I + Ii1I / OoO0O00 + i1IIi - ooOoO0o
if 6 - 6: Ii1I
o0IIIiiiIiI1 . mask_address ( o0IIIiiiIiI1 . mask_len )
if 39 - 39: Oo0Ooo * Oo0Ooo . I11i - OoOoOO00 * i11iIiiIii / ooOoO0o
lprint ( ( "Least specific prefix computed from site-cache for " + "group EID {} using auth-prefix {} is {}" ) . format ( group . print_address ( ) , I1Ii11I1 . print_prefix ( ) if ( I1Ii11I1 != None ) else "'not found'" ,
# OOooOOo % OOooOOo / II111iiii - Oo0Ooo + I1Ii111
# I1Ii111 % Oo0Ooo / II111iiii
# i1IIi % I1Ii111 + I1ii11iIi11i . ooOoO0o / OoO0O00
o0IIIiiiIiI1 . print_prefix ( ) ) )
if 40 - 40: IiII / i11iIiiIii
ii1IIiIII = O0ooO0oO . source_cache
if 32 - 32: Oo0Ooo * i1IIi * OOooOOo . i1IIi
if 15 - 15: O0 % Oo0Ooo % o0oOOo0O0Ooo . ooOoO0o * iII111i % O0
if 31 - 31: i1IIi . Ii1I - OoooooooOO * I11i * ooOoO0o % oO0o
if 61 - 61: I1Ii111 . Ii1I * I1ii11iIi11i
if 59 - 59: OoOoOO00 + Oo0Ooo . I1ii11iIi11i - Ii1I
oo0oOooo0O = LISP_DDT_ACTION_DELEGATION_HOLE if ( I1Ii11I1 != None ) else LISP_DDT_ACTION_NOT_AUTH
if 48 - 48: I1Ii111 % Ii1I + I1IiiI * OoooooooOO % OoOoOO00 % i11iIiiIii
if 13 - 13: iII111i % i1IIi
if 13 - 13: iII111i / OoooooooOO + Ii1I / iII111i
if 29 - 29: OOooOOo + ooOoO0o % o0oOOo0O0Ooo
if 18 - 18: I11i + OoO0O00 + OoO0O00 . ooOoO0o
if 37 - 37: i1IIi . IiII + I1IiiI % OoOoOO00
eid , I1Ii11I1 , OooOo0Ooo0 = ii1IIiIII . walk_cache ( lisp_neg_prefix_walk ,
( eid , I1Ii11I1 , OooOo0Ooo0 ) )
if 3 - 3: i11iIiiIii + Ii1I % IiII - I1Ii111 / Oo0Ooo % iIii1I11I1II1
if 86 - 86: Oo0Ooo + Oo0Ooo * oO0o * I1IiiI
if 95 - 95: IiII - OoO0O00 + OOooOOo
if 33 - 33: o0oOOo0O0Ooo . i11iIiiIii . ooOoO0o
OooOo0Ooo0 . mask_address ( OooOo0Ooo0 . mask_len )
if 100 - 100: i11iIiiIii % I1Ii111 - OoO0O00 + I1Ii111 / i11iIiiIii + OOooOOo
lprint ( ( "Least specific prefix computed from site-cache for EID {} " + "using auth-prefix {} is {}" ) . format ( green ( eid . print_address ( ) , False ) ,
# I1IiiI - i11iIiiIii . I1ii11iIi11i * OOooOOo
# iII111i - OoOoOO00 / O0
I1Ii11I1 . print_prefix ( ) if ( I1Ii11I1 != None ) else "'not found'" , OooOo0Ooo0 . print_prefix ( ) ) )
if 22 - 22: o0oOOo0O0Ooo % OoooooooOO + oO0o + Oo0Ooo
if 34 - 34: iII111i / I11i + i1IIi + I1ii11iIi11i * OoooooooOO * IiII
return ( [ OooOo0Ooo0 , o0IIIiiiIiI1 , oo0oOooo0O ] )
if 70 - 70: iIii1I11I1II1 / I1IiiI * OoOoOO00 / IiII / II111iiii + I1IiiI
if 33 - 33: oO0o
if 1 - 1: OoOoOO00 . i11iIiiIii % I1Ii111 + OoooooooOO - Oo0Ooo . I1ii11iIi11i
if 46 - 46: i11iIiiIii + I11i - iIii1I11I1II1 / OoO0O00 - ooOoO0o / i1IIi
if 44 - 44: o0oOOo0O0Ooo + Oo0Ooo
if 46 - 46: OOooOOo % I1IiiI
if 66 - 66: iIii1I11I1II1 . o0oOOo0O0Ooo - ooOoO0o
if 27 - 27: Oo0Ooo - i1IIi * OoooooooOO - OoOoOO00 + OoOoOO00
def lisp_ms_send_map_referral ( lisp_sockets , map_request , ecm_source , port ,
action , eid_prefix , group_prefix ) :
if 24 - 24: i1IIi . OoOoOO00 / I1Ii111 + O0
i1I1I1IIIi11 = map_request . target_eid
o0o0Oo0o0oOo = map_request . target_group
oOooo0oOOOO = map_request . nonce
if 86 - 86: Ii1I * OoOoOO00 % I1ii11iIi11i + OOooOOo
if ( action == LISP_DDT_ACTION_MS_ACK ) : IiIi1iIIiII1i = 1440
if 85 - 85: iII111i % i11iIiiIii
if 78 - 78: i11iIiiIii / I11i / Oo0Ooo + II111iiii - I1ii11iIi11i / I1ii11iIi11i
if 28 - 28: iIii1I11I1II1 / IiII - iIii1I11I1II1 . i1IIi - O0 * ooOoO0o
if 41 - 41: Ii1I + IiII
iiI11111i = lisp_map_referral ( )
iiI11111i . record_count = 1
iiI11111i . nonce = oOooo0oOOOO
Oo00oo = iiI11111i . encode ( )
iiI11111i . print_map_referral ( )
if 37 - 37: I1Ii111 / o0oOOo0O0Ooo - ooOoO0o - OoooooooOO . I1ii11iIi11i % I1Ii111
Oo00Oo0o000 = False
if 53 - 53: I1IiiI % OOooOOo + Ii1I - Ii1I
if 99 - 99: i1IIi * OoOoOO00 - i1IIi
if 65 - 65: OoO0O00 / i11iIiiIii + I1ii11iIi11i + OoOoOO00
if 82 - 82: Ii1I * OOooOOo % ooOoO0o / OoO0O00 - Oo0Ooo . I1Ii111
if 90 - 90: I11i * i11iIiiIii % i1IIi + I1Ii111 / OoO0O00
if 15 - 15: Oo0Ooo + oO0o . I11i % OoO0O00
if ( action == LISP_DDT_ACTION_SITE_NOT_FOUND ) :
eid_prefix , group_prefix , action = lisp_ms_compute_neg_prefix ( i1I1I1IIIi11 ,
o0o0Oo0o0oOo )
IiIi1iIIiII1i = 15
if 13 - 13: I1ii11iIi11i / ooOoO0o * I1Ii111
if ( action == LISP_DDT_ACTION_MS_NOT_REG ) : IiIi1iIIiII1i = 1
if ( action == LISP_DDT_ACTION_MS_ACK ) : IiIi1iIIiII1i = 1440
if ( action == LISP_DDT_ACTION_DELEGATION_HOLE ) : IiIi1iIIiII1i = 15
if ( action == LISP_DDT_ACTION_NOT_AUTH ) : IiIi1iIIiII1i = 0
if 45 - 45: I1ii11iIi11i - I11i
O000oO00 = False
iiIIIIiIIIi1 = 0
O0ooO0oO = lisp_ddt_cache_lookup ( i1I1I1IIIi11 , o0o0Oo0o0oOo , False )
if ( O0ooO0oO != None ) :
iiIIIIiIIIi1 = len ( O0ooO0oO . delegation_set )
O000oO00 = O0ooO0oO . is_ms_peer_entry ( )
O0ooO0oO . map_referrals_sent += 1
if 70 - 70: I11i + oO0o + o0oOOo0O0Ooo . I1Ii111 * i11iIiiIii
if 46 - 46: O0 . i11iIiiIii / OoO0O00 - iIii1I11I1II1 . iIii1I11I1II1
if 39 - 39: i11iIiiIii + I1Ii111
if 49 - 49: i1IIi * iII111i - iIii1I11I1II1 % I11i * O0 / OoOoOO00
if 48 - 48: IiII
if ( action == LISP_DDT_ACTION_NOT_AUTH ) : Oo00Oo0o000 = True
if ( action in ( LISP_DDT_ACTION_MS_REFERRAL , LISP_DDT_ACTION_MS_ACK ) ) :
Oo00Oo0o000 = ( O000oO00 == False )
if 69 - 69: o0oOOo0O0Ooo % i11iIiiIii - OOooOOo - o0oOOo0O0Ooo
if 98 - 98: o0oOOo0O0Ooo * OoO0O00 . OoooooooOO
if 40 - 40: I1Ii111 + Oo0Ooo + I1Ii111
if 57 - 57: I1Ii111 / II111iiii % iII111i
if 32 - 32: IiII - OOooOOo + i11iIiiIii + I1IiiI . iII111i
I1Ii111I111I = lisp_eid_record ( )
I1Ii111I111I . rloc_count = iiIIIIiIIIi1
I1Ii111I111I . authoritative = True
I1Ii111I111I . action = action
I1Ii111I111I . ddt_incomplete = Oo00Oo0o000
I1Ii111I111I . eid = eid_prefix
I1Ii111I111I . group = group_prefix
I1Ii111I111I . record_ttl = IiIi1iIIiII1i
if 75 - 75: o0oOOo0O0Ooo % o0oOOo0O0Ooo . I1IiiI / OoO0O00
Oo00oo += I1Ii111I111I . encode ( )
I1Ii111I111I . print_record ( " " , True )
if 22 - 22: Oo0Ooo / iIii1I11I1II1 + o0oOOo0O0Ooo
if 16 - 16: II111iiii . Ii1I + I1Ii111 % i1IIi / i11iIiiIii + OOooOOo
if 43 - 43: I1IiiI . Oo0Ooo + i1IIi + I11i / OoO0O00
if 66 - 66: i11iIiiIii
if ( iiIIIIiIIIi1 != 0 ) :
for Ii1iII in O0ooO0oO . delegation_set :
oO0OoOOO = lisp_rloc_record ( )
oO0OoOOO . rloc = Ii1iII . delegate_address
oO0OoOOO . priority = Ii1iII . priority
oO0OoOOO . weight = Ii1iII . weight
oO0OoOOO . mpriority = 255
oO0OoOOO . mweight = 0
oO0OoOOO . reach_bit = True
Oo00oo += oO0OoOOO . encode ( )
oO0OoOOO . print_record ( " " )
if 83 - 83: I1Ii111 / iIii1I11I1II1 - oO0o
if 3 - 3: OOooOOo - Oo0Ooo * I1IiiI - OoO0O00 / OOooOOo + IiII
if 83 - 83: i1IIi * i1IIi - II111iiii / OoooooooOO . Ii1I + I1Ii111
if 10 - 10: I11i
if 24 - 24: Ii1I
if 30 - 30: II111iiii / Ii1I - I11i - OoO0O00
if 25 - 25: I11i % i1IIi / I11i * i11iIiiIii
if ( map_request . nonce != 0 ) : port = LISP_CTRL_PORT
lisp_send_map_referral ( lisp_sockets , Oo00oo , ecm_source , port )
return
if 71 - 71: IiII % I11i - OoooooooOO + I1IiiI / Oo0Ooo % I11i
if 6 - 6: i1IIi * i11iIiiIii + ooOoO0o - IiII
if 97 - 97: iIii1I11I1II1 * i1IIi * II111iiii - OOooOOo - Oo0Ooo - iIii1I11I1II1
if 26 - 26: ooOoO0o + Oo0Ooo
if 24 - 24: I1IiiI
if 43 - 43: OoO0O00
if 51 - 51: OoooooooOO % IiII % Oo0Ooo
if 50 - 50: I1IiiI - i11iIiiIii / I1ii11iIi11i . Ii1I - iIii1I11I1II1
def lisp_send_negative_map_reply ( sockets , eid , group , nonce , dest , port , ttl ,
xtr_id , pubsub ) :
if 91 - 91: I1IiiI . I1Ii111 + II111iiii . Oo0Ooo
lprint ( "Build negative Map-Reply EID-prefix {}, nonce 0x{} to ITR {}" . format ( lisp_print_eid_tuple ( eid , group ) , lisp_hex_string ( nonce ) ,
# iII111i . OoO0O00 % I1IiiI * II111iiii * OoooooooOO . II111iiii
red ( dest . print_address ( ) , False ) ) )
if 97 - 97: oO0o - Ii1I - II111iiii % II111iiii * OOooOOo
oo0oOooo0O = LISP_NATIVE_FORWARD_ACTION if group . is_null ( ) else LISP_DROP_ACTION
if 84 - 84: i1IIi . OoOoOO00 % I1ii11iIi11i . OoO0O00 + i11iIiiIii
if 19 - 19: i1IIi / I1IiiI + IiII . iII111i
if 68 - 68: iII111i
if 29 - 29: II111iiii / II111iiii % OoO0O00 % Oo0Ooo . II111iiii
if 33 - 33: OoooooooOO . OoO0O00 % OoooooooOO
if ( lisp_get_eid_hash ( eid ) != None ) :
oo0oOooo0O = LISP_SEND_MAP_REQUEST_ACTION
if 9 - 9: IiII * O0 + OOooOOo . II111iiii
if 14 - 14: iIii1I11I1II1 + i11iIiiIii + o0oOOo0O0Ooo + o0oOOo0O0Ooo - IiII / I1Ii111
Oo00oo = lisp_build_map_reply ( eid , group , [ ] , nonce , oo0oOooo0O , ttl , None ,
None , False , False )
if 70 - 70: OoooooooOO + I1IiiI / OOooOOo
if 19 - 19: I1Ii111 + i1IIi % OoooooooOO + i1IIi
if 16 - 16: I1Ii111 + II111iiii + IiII
if 34 - 34: iIii1I11I1II1 - II111iiii - ooOoO0o + oO0o
if ( pubsub ) :
lisp_process_pubsub ( sockets , Oo00oo , eid , dest , port , nonce , ttl ,
xtr_id )
else :
lisp_send_map_reply ( sockets , Oo00oo , dest , port )
if 46 - 46: ooOoO0o % II111iiii
return
if 61 - 61: OoO0O00 . I1IiiI
if 89 - 89: IiII
if 73 - 73: II111iiii + ooOoO0o % OOooOOo . oO0o / oO0o * i1IIi
if 19 - 19: I1Ii111 + I11i
if 21 - 21: OoOoOO00
if 2 - 2: i1IIi . OOooOOo
if 23 - 23: Ii1I - OOooOOo
def lisp_retransmit_ddt_map_request ( mr ) :
oOIII = mr . mr_source . print_address ( )
OO00 = mr . print_eid_tuple ( )
oOooo0oOOOO = mr . nonce
if 65 - 65: iIii1I11I1II1 / IiII / IiII
if 57 - 57: OoOoOO00 . O0 / iII111i / i11iIiiIii
if 38 - 38: iII111i - Oo0Ooo / O0
if 40 - 40: ooOoO0o + iIii1I11I1II1 / OoOoOO00 * iIii1I11I1II1 - ooOoO0o * iIii1I11I1II1
if 79 - 79: ooOoO0o . oO0o + Ii1I * ooOoO0o + O0 . II111iiii
if ( mr . last_request_sent_to ) :
I1III11iIIIi = mr . last_request_sent_to . print_address ( )
OO0oO0O = lisp_referral_cache_lookup ( mr . last_cached_prefix [ 0 ] ,
mr . last_cached_prefix [ 1 ] , True )
if ( OO0oO0O and I1III11iIIIi in OO0oO0O . referral_set ) :
OO0oO0O . referral_set [ I1III11iIIIi ] . no_responses += 1
if 81 - 81: i11iIiiIii . OoOoOO00 * o0oOOo0O0Ooo / O0 * OoooooooOO / i11iIiiIii
if 62 - 62: i11iIiiIii * iII111i . Oo0Ooo % Oo0Ooo
if 4 - 4: OoooooooOO
if 66 - 66: iII111i / IiII
if 45 - 45: o0oOOo0O0Ooo - i1IIi / o0oOOo0O0Ooo + IiII
if 94 - 94: Ii1I
if 21 - 21: OoOoOO00
if ( mr . retry_count == LISP_MAX_MAP_NOTIFY_RETRIES ) :
lprint ( "DDT Map-Request retry limit reached for EID {}, nonce 0x{}" . format ( green ( OO00 , False ) , lisp_hex_string ( oOooo0oOOOO ) ) )
if 68 - 68: i11iIiiIii / OOooOOo / I1ii11iIi11i % IiII * IiII + II111iiii
mr . dequeue_map_request ( )
return
if 65 - 65: I1IiiI + OoOoOO00 - OoOoOO00 . oO0o
if 84 - 84: Ii1I * i1IIi
mr . retry_count += 1
if 42 - 42: OoOoOO00 - ooOoO0o + oO0o - II111iiii
I111 = green ( oOIII , False )
IiI11I111 = green ( OO00 , False )
lprint ( "Retransmit DDT {} from {}ITR {} EIDs: {} -> {}, nonce 0x{}" . format ( bold ( "Map-Request" , False ) , "P" if mr . from_pitr else "" ,
# o0oOOo0O0Ooo - i11iIiiIii + I11i % oO0o * OoooooooOO . OoO0O00
red ( mr . itr . print_address ( ) , False ) , I111 , IiI11I111 ,
lisp_hex_string ( oOooo0oOOOO ) ) )
if 15 - 15: i1IIi . I1ii11iIi11i + II111iiii - i1IIi
if 11 - 11: I1ii11iIi11i % iIii1I11I1II1 / OoO0O00 . oO0o . OoooooooOO
if 93 - 93: iII111i / OoO0O00
if 99 - 99: OOooOOo + ooOoO0o / iIii1I11I1II1 % iII111i + i1IIi
lisp_send_ddt_map_request ( mr , False )
if 12 - 12: O0
if 55 - 55: iIii1I11I1II1
if 7 - 7: OoO0O00
if 61 - 61: I1Ii111 / I1IiiI / OOooOOo . I1ii11iIi11i
mr . retransmit_timer = threading . Timer ( LISP_DDT_MAP_REQUEST_INTERVAL ,
lisp_retransmit_ddt_map_request , [ mr ] )
mr . retransmit_timer . start ( )
return
if 3 - 3: IiII
if 2 - 2: I1IiiI % Ii1I % Oo0Ooo / ooOoO0o % Oo0Ooo + OoOoOO00
if 44 - 44: i1IIi / OoooooooOO * OoooooooOO
if 93 - 93: OoOoOO00 % Oo0Ooo . OoO0O00 / OoooooooOO
if 59 - 59: OoO0O00 + O0 + i11iIiiIii / OoOoOO00 + iIii1I11I1II1 / OoOoOO00
if 69 - 69: OoOoOO00 * Ii1I % ooOoO0o . OoOoOO00 / oO0o * I1Ii111
if 93 - 93: OoO0O00 % IiII % ooOoO0o . I1IiiI
if 96 - 96: II111iiii
def lisp_get_referral_node ( referral , source_eid , dest_eid ) :
if 73 - 73: II111iiii
if 81 - 81: I1IiiI + OoO0O00
if 22 - 22: OoO0O00 * OoOoOO00 * I11i * IiII . OoO0O00 . I1ii11iIi11i
if 32 - 32: o0oOOo0O0Ooo - iII111i + i11iIiiIii / ooOoO0o . OoOoOO00 . IiII
iIiiii1 = [ ]
for OoooOO0 in list ( referral . referral_set . values ( ) ) :
if ( OoooOO0 . updown == False ) : continue
if ( len ( iIiiii1 ) == 0 or iIiiii1 [ 0 ] . priority == OoooOO0 . priority ) :
iIiiii1 . append ( OoooOO0 )
elif ( iIiiii1 [ 0 ] . priority > OoooOO0 . priority ) :
iIiiii1 = [ ]
iIiiii1 . append ( OoooOO0 )
if 26 - 26: I1ii11iIi11i
if 67 - 67: I1Ii111 * iIii1I11I1II1 / O0 + OoO0O00 * iIii1I11I1II1 % II111iiii
if 13 - 13: Ii1I / ooOoO0o / iII111i % II111iiii * I1IiiI * II111iiii
i1ii1 = len ( iIiiii1 )
if ( i1ii1 == 0 ) : return ( None )
if 65 - 65: iIii1I11I1II1 * O0 . II111iiii * o0oOOo0O0Ooo . I1ii11iIi11i * I1IiiI
oOOo0O0Oo = dest_eid . hash_address ( source_eid )
oOOo0O0Oo = oOOo0O0Oo % i1ii1
return ( iIiiii1 [ oOOo0O0Oo ] )
if 63 - 63: II111iiii . Oo0Ooo % iIii1I11I1II1
if 85 - 85: I1IiiI + i1IIi % I1Ii111
if 76 - 76: i11iIiiIii % i11iIiiIii
if 33 - 33: OOooOOo . ooOoO0o / iIii1I11I1II1 * OOooOOo / oO0o
if 75 - 75: Ii1I - OoOoOO00 . OOooOOo - o0oOOo0O0Ooo - I1ii11iIi11i
if 69 - 69: O0 % I1ii11iIi11i
if 77 - 77: iIii1I11I1II1 . OOooOOo
def lisp_send_ddt_map_request ( mr , send_to_root ) :
OO0ooo000 = mr . lisp_sockets
oOooo0oOOOO = mr . nonce
ii1oO0Oo = mr . itr
iiIII1IiI = mr . mr_source
i1iiii = mr . print_eid_tuple ( )
if 85 - 85: i11iIiiIii * Ii1I * i1IIi
if 2 - 2: I1IiiI . I1ii11iIi11i + oO0o + Oo0Ooo % I1IiiI - Ii1I
if 94 - 94: Oo0Ooo
if 93 - 93: O0
if 27 - 27: o0oOOo0O0Ooo + i1IIi + oO0o * II111iiii * OoO0O00
if ( mr . send_count == 8 ) :
lprint ( "Giving up on map-request-queue entry {}, nonce 0x{}" . format ( green ( i1iiii , False ) , lisp_hex_string ( oOooo0oOOOO ) ) )
if 64 - 64: I1IiiI
mr . dequeue_map_request ( )
return
if 27 - 27: I1Ii111 % I1Ii111 - I11i + IiII - oO0o
if 52 - 52: OOooOOo % Ii1I + iIii1I11I1II1 . ooOoO0o
if 83 - 83: oO0o - iIii1I11I1II1 * iII111i
if 17 - 17: I1IiiI . OoOoOO00
if 14 - 14: OOooOOo
if 84 - 84: Ii1I + OoO0O00 + OOooOOo % ooOoO0o
if ( send_to_root ) :
iI1iiIiIi1i = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
OoIiIIi1iI1I1i = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
mr . tried_root = True
lprint ( "Jumping up to root for EID {}" . format ( green ( i1iiii , False ) ) )
else :
iI1iiIiIi1i = mr . eid
OoIiIIi1iI1I1i = mr . group
if 96 - 96: IiII / I1Ii111 - I1ii11iIi11i * iII111i + OOooOOo
if 50 - 50: oO0o * OOooOOo + i1IIi / I1ii11iIi11i
if 46 - 46: O0 % ooOoO0o
if 79 - 79: I1ii11iIi11i
if 9 - 9: IiII . O0
oo = lisp_referral_cache_lookup ( iI1iiIiIi1i , OoIiIIi1iI1I1i , False )
if ( oo == None ) :
lprint ( "No referral cache entry found" )
lisp_send_negative_map_reply ( OO0ooo000 , iI1iiIiIi1i , OoIiIIi1iI1I1i ,
oOooo0oOOOO , ii1oO0Oo , mr . sport , 15 , None , False )
return
if 38 - 38: IiII . I1ii11iIi11i + iII111i * I11i % IiII
if 18 - 18: I11i
II1Ii11i11Ii = oo . print_eid_tuple ( )
lprint ( "Found referral cache entry {}, referral-type: {}" . format ( II1Ii11i11Ii ,
oo . print_referral_type ( ) ) )
if 84 - 84: OoOoOO00
OoooOO0 = lisp_get_referral_node ( oo , iiIII1IiI , mr . eid )
if ( OoooOO0 == None ) :
lprint ( "No reachable referral-nodes found" )
mr . dequeue_map_request ( )
lisp_send_negative_map_reply ( OO0ooo000 , oo . eid ,
oo . group , oOooo0oOOOO , ii1oO0Oo , mr . sport , 1 , None , False )
return
if 99 - 99: OoO0O00 - OoOoOO00 - i1IIi / OoO0O00 * I1ii11iIi11i * iIii1I11I1II1
if 65 - 65: iII111i - O0 / i1IIi . I1Ii111
lprint ( "Send DDT Map-Request to {} {} for EID {}, nonce 0x{}" . format ( OoooOO0 . referral_address . print_address ( ) ,
# I11i / Ii1I - o0oOOo0O0Ooo % oO0o / OoO0O00 * I11i
oo . print_referral_type ( ) , green ( i1iiii , False ) ,
lisp_hex_string ( oOooo0oOOOO ) ) )
if 24 - 24: i1IIi
if 21 - 21: II111iiii
if 27 - 27: I1IiiI * i11iIiiIii
if 86 - 86: I1IiiI . Oo0Ooo / o0oOOo0O0Ooo - i1IIi . I11i / OOooOOo
ooOo0o00 = ( oo . referral_type == LISP_DDT_ACTION_MS_REFERRAL or
oo . referral_type == LISP_DDT_ACTION_MS_ACK )
lisp_send_ecm ( OO0ooo000 , mr . packet , iiIII1IiI , mr . sport , mr . eid ,
OoooOO0 . referral_address , to_ms = ooOo0o00 , ddt = True )
if 54 - 54: I1ii11iIi11i - I1IiiI . OoOoOO00
if 36 - 36: OoO0O00 * I1IiiI / iII111i
if 95 - 95: Ii1I . Oo0Ooo
if 42 - 42: IiII . i1IIi % O0 * ooOoO0o - OOooOOo % ooOoO0o
mr . last_request_sent_to = OoooOO0 . referral_address
mr . last_sent = lisp_get_timestamp ( )
mr . send_count += 1
OoooOO0 . map_requests_sent += 1
return
if 99 - 99: i1IIi + OoOoOO00 - iII111i % II111iiii
if 6 - 6: ooOoO0o - I1Ii111 . OoOoOO00
if 64 - 64: iII111i + I1ii11iIi11i
if 88 - 88: I1Ii111 / i11iIiiIii - O0 . II111iiii / II111iiii * II111iiii
if 56 - 56: Oo0Ooo / I1IiiI % I1Ii111 % I1ii11iIi11i * I1IiiI - IiII
if 39 - 39: oO0o + iII111i . I1Ii111 * i11iIiiIii % o0oOOo0O0Ooo + OOooOOo
if 61 - 61: ooOoO0o / I1Ii111 / I1ii11iIi11i - Ii1I % o0oOOo0O0Ooo * iII111i
if 94 - 94: I1IiiI / I11i
def lisp_mr_process_map_request ( lisp_sockets , packet , map_request , ecm_source ,
sport , mr_source ) :
if 100 - 100: Ii1I % OoO0O00 % OoooooooOO / II111iiii * I1Ii111
i1I1I1IIIi11 = map_request . target_eid
o0o0Oo0o0oOo = map_request . target_group
OO00 = map_request . print_eid_tuple ( )
oOIII = mr_source . print_address ( )
oOooo0oOOOO = map_request . nonce
if 64 - 64: I1Ii111 * OOooOOo * Ii1I + I1ii11iIi11i / iIii1I11I1II1 / Oo0Ooo
I111 = green ( oOIII , False )
IiI11I111 = green ( OO00 , False )
lprint ( "Received Map-Request from {}ITR {} EIDs: {} -> {}, nonce 0x{}" . format ( "P" if map_request . pitr_bit else "" ,
# I11i / i11iIiiIii % ooOoO0o
red ( ecm_source . print_address ( ) , False ) , I111 , IiI11I111 ,
lisp_hex_string ( oOooo0oOOOO ) ) )
if 8 - 8: O0 * I1IiiI * O0 - i11iIiiIii - II111iiii . Ii1I
if 96 - 96: i11iIiiIii - i11iIiiIii . II111iiii
if 3 - 3: Oo0Ooo / Oo0Ooo - II111iiii % iII111i * Oo0Ooo
if 37 - 37: ooOoO0o
OOoOo0O0O0oO = lisp_ddt_map_request ( lisp_sockets , packet , i1I1I1IIIi11 , o0o0Oo0o0oOo , oOooo0oOOOO )
OOoOo0O0O0oO . packet = packet
OOoOo0O0O0oO . itr = ecm_source
OOoOo0O0O0oO . mr_source = mr_source
OOoOo0O0O0oO . sport = sport
OOoOo0O0O0oO . from_pitr = map_request . pitr_bit
OOoOo0O0O0oO . queue_map_request ( )
if 18 - 18: Oo0Ooo % OOooOOo / OOooOOo . I1IiiI + i1IIi . I1IiiI
lisp_send_ddt_map_request ( OOoOo0O0O0oO , False )
return
if 3 - 3: O0 * O0 + II111iiii + OoOoOO00 * I11i % Oo0Ooo
if 19 - 19: oO0o % IiII % OoooooooOO % I1ii11iIi11i / OoO0O00
if 6 - 6: O0 * I1Ii111 - II111iiii
if 60 - 60: oO0o % oO0o
if 76 - 76: I1Ii111 / o0oOOo0O0Ooo
if 19 - 19: O0 . i1IIi % iIii1I11I1II1 + OOooOOo * OoOoOO00 / I11i
if 82 - 82: I1ii11iIi11i
def lisp_process_map_request ( lisp_sockets , packet , ecm_source , ecm_port ,
mr_source , mr_port , ddt_request , ttl , timestamp ) :
if 75 - 75: I11i - II111iiii
i1iiI11i1 = packet
OOo0OoO0O0o0 = lisp_map_request ( )
packet = OOo0OoO0O0o0 . decode ( packet , mr_source , mr_port )
if ( packet == None ) :
lprint ( "Could not decode Map-Request packet" )
return
if 59 - 59: I11i / OoOoOO00 % ooOoO0o . Ii1I
if 48 - 48: OoOoOO00 % IiII % i1IIi + o0oOOo0O0Ooo
OOo0OoO0O0o0 . print_map_request ( )
if 33 - 33: iIii1I11I1II1 . O0
if 54 - 54: iIii1I11I1II1
if 54 - 54: iII111i + OOooOOo + OoO0O00
if 6 - 6: oO0o - OoooooooOO * iIii1I11I1II1 * I1ii11iIi11i
if ( OOo0OoO0O0o0 . rloc_probe ) :
lisp_process_rloc_probe_request ( lisp_sockets , OOo0OoO0O0o0 , mr_source ,
mr_port , ttl , timestamp )
return
if 65 - 65: IiII + OoOoOO00
if 93 - 93: Ii1I
if 43 - 43: iIii1I11I1II1 / iII111i - Ii1I + I11i % iII111i - OoO0O00
if 5 - 5: OoO0O00 / ooOoO0o
if 92 - 92: Oo0Ooo / iII111i + O0 * ooOoO0o * OOooOOo % Oo0Ooo
if ( OOo0OoO0O0o0 . smr_bit ) :
lisp_process_smr ( OOo0OoO0O0o0 )
if 97 - 97: oO0o / Ii1I
if 70 - 70: iII111i / Oo0Ooo . OoOoOO00 - II111iiii * II111iiii % I1IiiI
if 34 - 34: I1Ii111 + OOooOOo * iII111i / ooOoO0o % i11iIiiIii
if 91 - 91: IiII * Ii1I * OOooOOo
if 17 - 17: o0oOOo0O0Ooo + Ii1I % I1ii11iIi11i + IiII % I1Ii111 + I1ii11iIi11i
if ( OOo0OoO0O0o0 . smr_invoked_bit ) :
lisp_process_smr_invoked_request ( OOo0OoO0O0o0 )
if 100 - 100: I11i * OoO0O00 - i1IIi + iII111i * Ii1I - OoooooooOO
if 47 - 47: o0oOOo0O0Ooo / Ii1I - iII111i * OOooOOo / i11iIiiIii
if 97 - 97: iIii1I11I1II1 + OoOoOO00 + OoOoOO00 * o0oOOo0O0Ooo
if 14 - 14: II111iiii + I1ii11iIi11i * Oo0Ooo
if 95 - 95: IiII + iII111i % I1IiiI
if ( lisp_i_am_etr ) :
lisp_etr_process_map_request ( lisp_sockets , OOo0OoO0O0o0 , mr_source ,
mr_port , ttl , timestamp )
if 18 - 18: Oo0Ooo
if 8 - 8: O0 + iIii1I11I1II1 - O0
if 67 - 67: O0
if 22 - 22: I11i / i1IIi . II111iiii % ooOoO0o / I11i - Ii1I
if 28 - 28: O0 - Oo0Ooo
if ( lisp_i_am_ms ) :
packet = i1iiI11i1
i1I1I1IIIi11 , o0o0Oo0o0oOo , ooOo0Ooo0 = lisp_ms_process_map_request ( lisp_sockets ,
i1iiI11i1 , OOo0OoO0O0o0 , mr_source , mr_port , ecm_source )
if ( ddt_request ) :
lisp_ms_send_map_referral ( lisp_sockets , OOo0OoO0O0o0 , ecm_source ,
ecm_port , ooOo0Ooo0 , i1I1I1IIIi11 , o0o0Oo0o0oOo )
if 55 - 55: I1Ii111 * I1Ii111 / O0 - O0
return
if 15 - 15: I1ii11iIi11i % ooOoO0o * oO0o * OoO0O00 + OoO0O00
if 58 - 58: I1ii11iIi11i
if 93 - 93: i1IIi - IiII + IiII % OoooooooOO / o0oOOo0O0Ooo
if 39 - 39: I1IiiI + Ii1I - O0
if 25 - 25: IiII % iIii1I11I1II1 + ooOoO0o % iII111i - OoO0O00
if ( lisp_i_am_mr and not ddt_request ) :
lisp_mr_process_map_request ( lisp_sockets , i1iiI11i1 , OOo0OoO0O0o0 ,
ecm_source , mr_port , mr_source )
if 36 - 36: OoooooooOO / oO0o + IiII . I1IiiI - o0oOOo0O0Ooo % OOooOOo
if 15 - 15: Ii1I % IiII + IiII % iII111i - O0 * OoooooooOO
if 53 - 53: OoOoOO00 . Ii1I / Oo0Ooo
if 62 - 62: i11iIiiIii
if 38 - 38: I1ii11iIi11i % ooOoO0o * OoooooooOO + iIii1I11I1II1 % i1IIi / OOooOOo
if ( lisp_i_am_ddt or ddt_request ) :
packet = i1iiI11i1
lisp_ddt_process_map_request ( lisp_sockets , OOo0OoO0O0o0 , ecm_source ,
ecm_port )
if 6 - 6: i11iIiiIii
return
if 8 - 8: iIii1I11I1II1 + I1ii11iIi11i . i1IIi % OoOoOO00 % OoooooooOO * Oo0Ooo
if 53 - 53: oO0o
if 23 - 23: I1ii11iIi11i . I1Ii111 + OOooOOo
if 4 - 4: I1IiiI
if 31 - 31: ooOoO0o * i1IIi . O0
if 5 - 5: OOooOOo . I1ii11iIi11i + ooOoO0o . ooOoO0o + iII111i
if 100 - 100: I1Ii111
if 71 - 71: ooOoO0o * i1IIi / OoOoOO00 * i11iIiiIii - iII111i
def lisp_store_mr_stats ( source , nonce ) :
OOoOo0O0O0oO = lisp_get_map_resolver ( source , None )
if ( OOoOo0O0O0oO == None ) : return
if 88 - 88: IiII
if 29 - 29: iII111i . ooOoO0o
if 62 - 62: IiII
if 95 - 95: ooOoO0o / i1IIi + II111iiii + OoO0O00 % OoO0O00
OOoOo0O0O0oO . neg_map_replies_received += 1
OOoOo0O0O0oO . last_reply = lisp_get_timestamp ( )
if 18 - 18: ooOoO0o * I1IiiI / iII111i % iII111i
if 9 - 9: i11iIiiIii % ooOoO0o % O0 + i1IIi / O0
if 12 - 12: I1Ii111 - iII111i * iII111i + OoO0O00 . Ii1I % I11i
if 28 - 28: ooOoO0o % OoO0O00 - II111iiii * IiII - I1IiiI + I1IiiI
if ( ( OOoOo0O0O0oO . neg_map_replies_received % 100 ) == 0 ) : OOoOo0O0O0oO . total_rtt = 0
if 84 - 84: IiII / Ii1I
if 39 - 39: OOooOOo - iIii1I11I1II1 + OoOoOO00 % IiII * OoooooooOO % Ii1I
if 11 - 11: I1ii11iIi11i
if 83 - 83: O0
if ( OOoOo0O0O0oO . last_nonce == nonce ) :
OOoOo0O0O0oO . total_rtt += ( time . time ( ) - OOoOo0O0O0oO . last_used )
OOoOo0O0O0oO . last_nonce = 0
if 97 - 97: O0
if ( ( OOoOo0O0O0oO . neg_map_replies_received % 10 ) == 0 ) : OOoOo0O0O0oO . last_nonce = 0
return
if 50 - 50: I1Ii111 / OoooooooOO . o0oOOo0O0Ooo + I1IiiI * i11iIiiIii
if 28 - 28: I1Ii111 * II111iiii
if 14 - 14: iIii1I11I1II1 / Ii1I + o0oOOo0O0Ooo . iII111i % iII111i . i1IIi
if 67 - 67: IiII * II111iiii + ooOoO0o - i11iIiiIii
if 15 - 15: I11i
if 67 - 67: iIii1I11I1II1
if 91 - 91: ooOoO0o
def lisp_process_map_reply ( lisp_sockets , packet , source , ttl , itr_in_ts ) :
global lisp_map_cache
if 66 - 66: OOooOOo
O0OOoOOO00ooOoo = lisp_map_reply ( )
packet = O0OOoOOO00ooOoo . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Reply packet" )
return
if 5 - 5: i1IIi * OoOoOO00 + i1IIi % I11i
O0OOoOOO00ooOoo . print_map_reply ( )
if 79 - 79: OOooOOo % iIii1I11I1II1 / OoOoOO00
if 9 - 9: Ii1I
if 44 - 44: iII111i
if 46 - 46: I11i . i11iIiiIii * OoOoOO00 + o0oOOo0O0Ooo / ooOoO0o
iII1II1I = None
for iIi1iIIIiIiI in range ( O0OOoOOO00ooOoo . record_count ) :
I1Ii111I111I = lisp_eid_record ( )
packet = I1Ii111I111I . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode EID-record in Map-Reply packet" )
return
if 16 - 16: i1IIi - iIii1I11I1II1 - ooOoO0o / OoooooooOO - Oo0Ooo
I1Ii111I111I . print_record ( " " , False )
if 46 - 46: OoOoOO00 + i1IIi
if 43 - 43: II111iiii * IiII % iIii1I11I1II1 % i11iIiiIii % I1ii11iIi11i
if 81 - 81: oO0o % I1ii11iIi11i % ooOoO0o * O0 - OOooOOo
if 17 - 17: O0 % O0 / I1ii11iIi11i . Oo0Ooo . iII111i
if 4 - 4: OoO0O00
if ( I1Ii111I111I . rloc_count == 0 ) :
lisp_store_mr_stats ( source , O0OOoOOO00ooOoo . nonce )
if 65 - 65: Oo0Ooo % O0 / I1Ii111 * IiII - oO0o
if 32 - 32: Ii1I * OoO0O00 + ooOoO0o
iii111i = ( I1Ii111I111I . group . is_null ( ) == False )
if 41 - 41: IiII + I11i * ooOoO0o + Oo0Ooo . ooOoO0o
if 38 - 38: iII111i * OoooooooOO - IiII
if 36 - 36: I1Ii111 * II111iiii + I1ii11iIi11i - iII111i * iII111i
if 91 - 91: O0 + I1Ii111 * II111iiii - O0 . i11iIiiIii . Oo0Ooo
if 54 - 54: ooOoO0o * I11i / I1ii11iIi11i % ooOoO0o
if ( lisp_decent_push_configured ) :
oo0oOooo0O = I1Ii111I111I . action
if ( iii111i and oo0oOooo0O == LISP_DROP_ACTION ) :
if ( I1Ii111I111I . eid . is_local ( ) ) : continue
if 76 - 76: I11i . I1IiiI
if 66 - 66: oO0o % oO0o * IiII
if 39 - 39: i1IIi * Ii1I + OoOoOO00 / oO0o
if 6 - 6: I1ii11iIi11i / II111iiii / OoOoOO00 . i11iIiiIii - iII111i
if 43 - 43: i11iIiiIii * i11iIiiIii * I1Ii111
if 80 - 80: oO0o . I1IiiI * II111iiii + o0oOOo0O0Ooo / o0oOOo0O0Ooo % OoooooooOO
if 31 - 31: o0oOOo0O0Ooo - OoO0O00 % I1IiiI
if ( iii111i == False and I1Ii111I111I . eid . is_null ( ) ) : continue
if 23 - 23: OOooOOo
if 97 - 97: Oo0Ooo / OoooooooOO . OoooooooOO
if 47 - 47: OoO0O00
if 52 - 52: I1IiiI * iIii1I11I1II1 % oO0o * IiII % oO0o
if 9 - 9: I11i
if ( iii111i ) :
o0ooo0oOO0o = lisp_map_cache . lookup_cache ( I1Ii111I111I . group , True )
if ( o0ooo0oOO0o ) :
o0ooo0oOO0o = o0ooo0oOO0o . lookup_source_cache ( I1Ii111I111I . eid , False )
if 78 - 78: iIii1I11I1II1 % I1ii11iIi11i % IiII
else :
o0ooo0oOO0o = lisp_map_cache . lookup_cache ( I1Ii111I111I . eid , True )
if 59 - 59: iII111i - I1ii11iIi11i / OoooooooOO
IIiIIiii1 = ( o0ooo0oOO0o == None )
if 13 - 13: II111iiii - oO0o . o0oOOo0O0Ooo
if 82 - 82: Oo0Ooo * OoooooooOO / ooOoO0o / I1IiiI
if 70 - 70: I1IiiI
if 74 - 74: ooOoO0o * II111iiii
if 96 - 96: i11iIiiIii . I1IiiI - II111iiii . I11i
if ( o0ooo0oOO0o == None ) :
OOOi11i , iIiiiI1 , II11iiiII1Ii = lisp_allow_gleaning ( I1Ii111I111I . eid , I1Ii111I111I . group ,
None )
if ( OOOi11i ) : continue
else :
if ( o0ooo0oOO0o . gleaned ) : continue
if 1 - 1: iII111i
if 25 - 25: oO0o - i1IIi
if 67 - 67: I1IiiI % I11i - OoooooooOO
if 2 - 2: Ii1I
if 25 - 25: I1Ii111 * I1IiiI + OoOoOO00 . i11iIiiIii . I1IiiI . I11i
OO0oOO0OoO = [ ]
OOO0 = None
OO000o = None
for I11ii1IiI1Ii in range ( I1Ii111I111I . rloc_count ) :
oO0OoOOO = lisp_rloc_record ( )
oO0OoOOO . keys = O0OOoOOO00ooOoo . keys
packet = oO0OoOOO . decode ( packet , O0OOoOOO00ooOoo . nonce )
if ( packet == None ) :
lprint ( "Could not decode RLOC-record in Map-Reply packet" )
return
if 50 - 50: O0 + Oo0Ooo % I1ii11iIi11i . OOooOOo
oO0OoOOO . print_record ( " " )
if 88 - 88: iII111i * o0oOOo0O0Ooo + OoooooooOO * oO0o
iIi111I1i1Ii1 = None
if ( o0ooo0oOO0o ) : iIi111I1i1Ii1 = o0ooo0oOO0o . get_rloc ( oO0OoOOO . rloc )
if ( iIi111I1i1Ii1 ) :
I1Ii1i111I = iIi111I1i1Ii1
else :
I1Ii1i111I = lisp_rloc ( )
if 17 - 17: Oo0Ooo % o0oOOo0O0Ooo
if 3 - 3: OoO0O00 . oO0o . oO0o . Ii1I
if 100 - 100: i11iIiiIii / i1IIi . I1ii11iIi11i
if 1 - 1: IiII * I1Ii111 / I1ii11iIi11i * i11iIiiIii
if 82 - 82: o0oOOo0O0Ooo * OoO0O00 / o0oOOo0O0Ooo % OoOoOO00 * iIii1I11I1II1 % O0
if 10 - 10: ooOoO0o
if 69 - 69: I11i + I1IiiI / oO0o
I1I = I1Ii1i111I . store_rloc_from_record ( oO0OoOOO , O0OOoOOO00ooOoo . nonce ,
source )
I1Ii1i111I . echo_nonce_capable = O0OOoOOO00ooOoo . echo_nonce_capable
if 89 - 89: i1IIi % OoOoOO00 . I1ii11iIi11i
if ( I1Ii1i111I . echo_nonce_capable ) :
O0O0 = I1Ii1i111I . rloc . print_address_no_iid ( )
if ( lisp_get_echo_nonce ( None , O0O0 ) == None ) :
lisp_echo_nonce ( O0O0 )
if 85 - 85: I1Ii111 - oO0o
if 34 - 34: iIii1I11I1II1 / IiII + OoOoOO00 - IiII / ooOoO0o + OoOoOO00
if 96 - 96: oO0o
if 44 - 44: OoooooooOO / iII111i * Oo0Ooo % OoOoOO00 . oO0o
if 97 - 97: iIii1I11I1II1 / ooOoO0o
if 16 - 16: Oo0Ooo % IiII
if ( I1Ii1i111I . json ) :
if ( lisp_is_json_telemetry ( I1Ii1i111I . json . json_string ) ) :
i1III1i = I1Ii1i111I . json . json_string
i1III1i = lisp_encode_telemetry ( i1III1i , ii = itr_in_ts )
I1Ii1i111I . json . json_string = i1III1i
if 48 - 48: I1IiiI . I1Ii111 . o0oOOo0O0Ooo
if 72 - 72: Ii1I * OoO0O00 / OoO0O00
if 39 - 39: oO0o
if 49 - 49: I1IiiI * I1Ii111 . I1IiiI - II111iiii
if 57 - 57: oO0o + O0 - OoOoOO00
if 14 - 14: II111iiii + i11iIiiIii + Ii1I / o0oOOo0O0Ooo . OoO0O00
if ( OO000o == None ) :
OO000o = I1Ii1i111I . rloc_name
if 93 - 93: o0oOOo0O0Ooo + i1IIi
if 24 - 24: i1IIi
if 54 - 54: iIii1I11I1II1 - IiII + o0oOOo0O0Ooo + I1ii11iIi11i + IiII
if 99 - 99: Oo0Ooo
if 38 - 38: I1ii11iIi11i - I1IiiI
if 50 - 50: iII111i % OoO0O00 - oO0o + Oo0Ooo . O0 . iII111i
if 42 - 42: iII111i + I1ii11iIi11i
if 44 - 44: I1ii11iIi11i % IiII
if 1 - 1: Oo0Ooo + IiII - I1Ii111 / I1Ii111
if ( O0OOoOOO00ooOoo . rloc_probe and oO0OoOOO . probe_bit ) :
if ( I1Ii1i111I . rloc . afi == source . afi ) :
lisp_process_rloc_probe_reply ( I1Ii1i111I , source , I1I ,
O0OOoOOO00ooOoo , ttl , OOO0 , OO000o )
if 25 - 25: OoOoOO00
if ( I1Ii1i111I . rloc . is_multicast_address ( ) ) : OOO0 = I1Ii1i111I
if 52 - 52: OOooOOo + IiII
if 73 - 73: OoooooooOO - I1Ii111 % iII111i / OOooOOo . o0oOOo0O0Ooo - IiII
if 69 - 69: Ii1I . iIii1I11I1II1 / Oo0Ooo * Oo0Ooo % IiII
if 5 - 5: OOooOOo - I1Ii111 + IiII
if 82 - 82: OOooOOo
OO0oOO0OoO . append ( I1Ii1i111I )
if 26 - 26: ooOoO0o + OoooooooOO + ooOoO0o * I1Ii111
if 26 - 26: I1IiiI - OOooOOo
if 34 - 34: I1Ii111 % I1IiiI . OoOoOO00 / iII111i + ooOoO0o . i11iIiiIii
if 51 - 51: OoooooooOO * I1Ii111 * I11i - I1ii11iIi11i + I1Ii111
if ( lisp_data_plane_security and I1Ii1i111I . rloc_recent_rekey ( ) ) :
iII1II1I = I1Ii1i111I
if 50 - 50: OoooooooOO * II111iiii
if 7 - 7: ooOoO0o / I11i * iII111i
if 17 - 17: O0 % I1Ii111
if 28 - 28: i1IIi * ooOoO0o
if 14 - 14: II111iiii + II111iiii - I11i / I11i . OoOoOO00 + OoO0O00
if 92 - 92: II111iiii - II111iiii % IiII
if 48 - 48: oO0o / II111iiii + oO0o
if 16 - 16: o0oOOo0O0Ooo % II111iiii - i11iIiiIii - IiII + O0 - i11iIiiIii
if 58 - 58: OoooooooOO / I1ii11iIi11i - Oo0Ooo / II111iiii
if 13 - 13: o0oOOo0O0Ooo + OoOoOO00 * ooOoO0o % IiII
if 18 - 18: I1IiiI . I1ii11iIi11i + Oo0Ooo - iII111i
if ( O0OOoOOO00ooOoo . rloc_probe == False and lisp_nat_traversal ) :
i1iI1I11iiI = [ ]
o00Oo = [ ]
for I1Ii1i111I in OO0oOO0OoO :
if 15 - 15: I1Ii111 / I11i / i11iIiiIii + OoO0O00 % OOooOOo
if 8 - 8: oO0o - I1IiiI / I11i + II111iiii - I1IiiI
if 3 - 3: I11i * o0oOOo0O0Ooo . O0
if 11 - 11: Oo0Ooo
if 64 - 64: OOooOOo
if ( I1Ii1i111I . rloc . is_private_address ( ) ) :
I1Ii1i111I . priority = 1
I1Ii1i111I . state = LISP_RLOC_UNREACH_STATE
i1iI1I11iiI . append ( I1Ii1i111I )
o00Oo . append ( I1Ii1i111I . rloc . print_address_no_iid ( ) )
continue
if 8 - 8: ooOoO0o % o0oOOo0O0Ooo
if 22 - 22: O0 * IiII . OoO0O00
if 63 - 63: oO0o % Oo0Ooo * OoO0O00 / II111iiii / Ii1I - ooOoO0o
if 14 - 14: ooOoO0o . o0oOOo0O0Ooo + II111iiii
if 50 - 50: Ii1I - i1IIi * oO0o
if 52 - 52: I11i / oO0o - oO0o
if ( I1Ii1i111I . priority == 254 and lisp_i_am_rtr == False ) :
i1iI1I11iiI . append ( I1Ii1i111I )
o00Oo . append ( I1Ii1i111I . rloc . print_address_no_iid ( ) )
if 84 - 84: iIii1I11I1II1 - o0oOOo0O0Ooo
if ( I1Ii1i111I . priority != 254 and lisp_i_am_rtr ) :
i1iI1I11iiI . append ( I1Ii1i111I )
o00Oo . append ( I1Ii1i111I . rloc . print_address_no_iid ( ) )
if 37 - 37: iII111i * o0oOOo0O0Ooo
if 23 - 23: ooOoO0o + OoooooooOO * iII111i . I11i
if 2 - 2: iIii1I11I1II1 * I1ii11iIi11i - OoooooooOO
if ( o00Oo != [ ] ) :
OO0oOO0OoO = i1iI1I11iiI
lprint ( "NAT-traversal optimized RLOC-set: {}" . format ( o00Oo ) )
if 93 - 93: iII111i % ooOoO0o * Oo0Ooo
if 34 - 34: O0 * oO0o
if 58 - 58: OOooOOo . iII111i - Oo0Ooo / iII111i . I11i
if 86 - 86: iIii1I11I1II1 - iII111i % Ii1I
if 18 - 18: oO0o / IiII - OOooOOo % Ii1I
if 88 - 88: i11iIiiIii
if 13 - 13: I1IiiI
i1iI1I11iiI = [ ]
for I1Ii1i111I in OO0oOO0OoO :
if ( I1Ii1i111I . json != None ) : continue
i1iI1I11iiI . append ( I1Ii1i111I )
if 52 - 52: Ii1I * oO0o / I1Ii111 . IiII
if ( i1iI1I11iiI != [ ] ) :
O0oo0oOo = len ( OO0oOO0OoO ) - len ( i1iI1I11iiI )
lprint ( "Pruning {} no-address RLOC-records for map-cache" . format ( O0oo0oOo ) )
if 84 - 84: OoooooooOO - oO0o - I1Ii111
OO0oOO0OoO = i1iI1I11iiI
if 69 - 69: OoOoOO00 * Ii1I % OoooooooOO % OOooOOo * OoOoOO00
if 20 - 20: IiII
if 17 - 17: o0oOOo0O0Ooo % iIii1I11I1II1
if 66 - 66: OoooooooOO + IiII . II111iiii
if 66 - 66: iIii1I11I1II1 % I11i
if 38 - 38: I1ii11iIi11i * ooOoO0o
if 77 - 77: OOooOOo - i11iIiiIii - I1ii11iIi11i
if 94 - 94: OoO0O00 % iII111i - I1Ii111 + OoO0O00 - I1IiiI
if ( O0OOoOOO00ooOoo . rloc_probe and o0ooo0oOO0o != None ) : OO0oOO0OoO = o0ooo0oOO0o . rloc_set
if 65 - 65: OOooOOo
if 90 - 90: O0
if 91 - 91: O0 * OoOoOO00 - OoOoOO00 * II111iiii - iII111i
if 38 - 38: oO0o * I11i % OOooOOo
if 80 - 80: O0 % II111iiii / O0 . Oo0Ooo * OoOoOO00 + OOooOOo
i11IIiiII = IIiIIiii1
if ( o0ooo0oOO0o and OO0oOO0OoO != o0ooo0oOO0o . rloc_set ) :
o0ooo0oOO0o . delete_rlocs_from_rloc_probe_list ( )
i11IIiiII = True
if 31 - 31: OoO0O00 + i11iIiiIii / I11i % O0 / Ii1I
if 90 - 90: iIii1I11I1II1 % oO0o % IiII
if 84 - 84: I1IiiI * IiII * iII111i / i1IIi . II111iiii * o0oOOo0O0Ooo
if 1 - 1: oO0o - iIii1I11I1II1 % i1IIi
if 94 - 94: Oo0Ooo + iIii1I11I1II1 . OoO0O00 * oO0o . i1IIi
oooO0 = o0ooo0oOO0o . uptime if ( o0ooo0oOO0o ) else None
if ( o0ooo0oOO0o == None ) :
o0ooo0oOO0o = lisp_mapping ( I1Ii111I111I . eid , I1Ii111I111I . group , OO0oOO0OoO )
o0ooo0oOO0o . mapping_source = source
if 64 - 64: OoO0O00 + I1ii11iIi11i / OoO0O00 * I1Ii111 . Oo0Ooo
if 5 - 5: iII111i - iIii1I11I1II1 * IiII
if 52 - 52: OOooOOo
if 50 - 50: OoOoOO00 % o0oOOo0O0Ooo - II111iiii - i1IIi
if 35 - 35: Oo0Ooo - ooOoO0o % OoO0O00
if 26 - 26: i1IIi * I1Ii111 * OoO0O00 - IiII
if ( lisp_i_am_rtr and I1Ii111I111I . group . is_null ( ) == False ) :
o0ooo0oOO0o . map_cache_ttl = LISP_MCAST_TTL
else :
o0ooo0oOO0o . map_cache_ttl = I1Ii111I111I . store_ttl ( )
if 26 - 26: Oo0Ooo - ooOoO0o . iII111i * OoOoOO00 / OoooooooOO
o0ooo0oOO0o . action = I1Ii111I111I . action
o0ooo0oOO0o . add_cache ( i11IIiiII )
if 66 - 66: I1IiiI
if 45 - 45: II111iiii * I1Ii111 - II111iiii / I1IiiI % oO0o
OOOOoO0o0oo = "Add"
if ( oooO0 ) :
o0ooo0oOO0o . uptime = oooO0
o0ooo0oOO0o . refresh_time = lisp_get_timestamp ( )
OOOOoO0o0oo = "Replace"
if 91 - 91: ooOoO0o / i1IIi . IiII
if 83 - 83: iIii1I11I1II1
lprint ( "{} {} map-cache with {} RLOCs" . format ( OOOOoO0o0oo ,
green ( o0ooo0oOO0o . print_eid_tuple ( ) , False ) , len ( OO0oOO0OoO ) ) )
if 73 - 73: I1ii11iIi11i + II111iiii . i11iIiiIii + I1IiiI + I1ii11iIi11i
if 6 - 6: O0 % Ii1I . oO0o
if 91 - 91: O0 - oO0o * O0
if 98 - 98: Ii1I
if 54 - 54: oO0o
if ( lisp_ipc_dp_socket and iII1II1I != None ) :
lisp_write_ipc_keys ( iII1II1I )
if 85 - 85: oO0o % o0oOOo0O0Ooo % IiII
if 84 - 84: IiII . OoO0O00
if 73 - 73: OoOoOO00
if 47 - 47: oO0o
if 17 - 17: IiII
if 47 - 47: I11i . I1IiiI % ooOoO0o . i11iIiiIii
if 63 - 63: I1ii11iIi11i % I11i % OoooooooOO
if ( IIiIIiii1 ) :
ooIiIII11IIIi1 = bold ( "RLOC-probe" , False )
for I1Ii1i111I in o0ooo0oOO0o . best_rloc_set :
O0O0 = red ( I1Ii1i111I . rloc . print_address_no_iid ( ) , False )
lprint ( "Trigger {} to {}" . format ( ooIiIII11IIIi1 , O0O0 ) )
lisp_send_map_request ( lisp_sockets , 0 , o0ooo0oOO0o . eid , o0ooo0oOO0o . group , I1Ii1i111I )
if 62 - 62: IiII * OoooooooOO * I1ii11iIi11i + i11iIiiIii
if 2 - 2: i1IIi % oO0o / iIii1I11I1II1 . OoOoOO00 * O0 % I1IiiI
if 31 - 31: OoooooooOO + I11i - II111iiii % II111iiii % Ii1I
return
if 10 - 10: iIii1I11I1II1 . I1IiiI - II111iiii + O0
if 97 - 97: oO0o . Oo0Ooo % ooOoO0o + I1Ii111 . i11iIiiIii + Ii1I
if 61 - 61: IiII + iII111i
if 15 - 15: II111iiii / iIii1I11I1II1 / I1ii11iIi11i % OoOoOO00 % OoO0O00 - I1Ii111
if 17 - 17: OoooooooOO
if 23 - 23: OoO0O00
if 26 - 26: I11i % IiII . OoooooooOO % i11iIiiIii * IiII
if 55 - 55: I11i / I11i - IiII - I11i
def lisp_compute_auth ( packet , map_register , password ) :
if ( map_register . alg_id == LISP_NONE_ALG_ID ) : return ( packet )
if 3 - 3: oO0o % o0oOOo0O0Ooo + OoOoOO00
packet = map_register . zero_auth ( packet )
oOOo0O0Oo = lisp_hash_me ( packet , map_register . alg_id , password , False )
if 22 - 22: O0
if 36 - 36: OOooOOo
if 42 - 42: OOooOOo * ooOoO0o * i11iIiiIii + OoooooooOO . iIii1I11I1II1
if 95 - 95: i1IIi * O0 / II111iiii * OoOoOO00 * I1IiiI
map_register . auth_data = oOOo0O0Oo
packet = map_register . encode_auth ( packet )
return ( packet )
if 38 - 38: OOooOOo - OoOoOO00 / OoO0O00 / o0oOOo0O0Ooo - i11iIiiIii
if 4 - 4: I1IiiI * o0oOOo0O0Ooo - I11i - OoooooooOO . OoooooooOO
if 79 - 79: oO0o - iII111i
if 34 - 34: OoooooooOO + Ii1I - iII111i + OoooooooOO / I1IiiI
if 39 - 39: o0oOOo0O0Ooo . i1IIi * OoO0O00 / II111iiii / I1ii11iIi11i * OOooOOo
if 39 - 39: O0 . OOooOOo
if 95 - 95: I11i
def lisp_hash_me ( packet , alg_id , password , do_hex ) :
if ( alg_id == LISP_NONE_ALG_ID ) : return ( True )
if 58 - 58: I1ii11iIi11i / i11iIiiIii + iII111i + I11i / oO0o
if ( alg_id == LISP_SHA_1_96_ALG_ID ) :
i1IiiI11 = hashlib . sha1
if 11 - 11: OoO0O00
if ( alg_id == LISP_SHA_256_128_ALG_ID ) :
i1IiiI11 = hashlib . sha256
if 20 - 20: Oo0Ooo
if 34 - 34: I1Ii111 % i11iIiiIii / oO0o - i1IIi . o0oOOo0O0Ooo / oO0o
if ( do_hex ) :
oOOo0O0Oo = hmac . new ( password . encode ( ) , packet , i1IiiI11 ) . hexdigest ( )
else :
oOOo0O0Oo = hmac . new ( password . encode ( ) , packet , i1IiiI11 ) . digest ( )
if 68 - 68: I1Ii111 % Ii1I * Oo0Ooo - O0 . IiII
return ( oOOo0O0Oo )
if 1 - 1: I1ii11iIi11i
if 18 - 18: i11iIiiIii % OoO0O00 % OOooOOo . OOooOOo * Ii1I / II111iiii
if 81 - 81: iII111i % IiII / I11i
if 50 - 50: IiII + i1IIi % I1Ii111
if 72 - 72: I1Ii111
if 6 - 6: II111iiii - i1IIi
if 78 - 78: OoOoOO00 - Oo0Ooo * II111iiii % iIii1I11I1II1 . i11iIiiIii % iII111i
if 85 - 85: I1ii11iIi11i + OOooOOo % i1IIi
def lisp_verify_auth ( packet , alg_id , auth_data , password ) :
if ( alg_id == LISP_NONE_ALG_ID ) : return ( True )
if 13 - 13: OOooOOo + i11iIiiIii / OOooOOo . O0 . OoO0O00 - Ii1I
oOOo0O0Oo = lisp_hash_me ( packet , alg_id , password , True )
IIiIiii1ii1i = ( oOOo0O0Oo == auth_data )
if 35 - 35: Oo0Ooo / I1ii11iIi11i - I1IiiI . i11iIiiIii . iII111i * OoOoOO00
if 66 - 66: i1IIi / IiII
if 17 - 17: O0 - OOooOOo
if 96 - 96: OOooOOo * I1ii11iIi11i
if ( IIiIiii1ii1i == False ) :
lprint ( "Hashed value: {} does not match packet value: {}" . format ( oOOo0O0Oo , auth_data ) )
if 85 - 85: O0 / II111iiii * O0 - iII111i % i11iIiiIii
if 47 - 47: OoOoOO00
return ( IIiIiii1ii1i )
if 4 - 4: OOooOOo + I1ii11iIi11i - iII111i + OOooOOo / IiII
if 23 - 23: iIii1I11I1II1 + OoooooooOO + ooOoO0o . iII111i . Oo0Ooo - iIii1I11I1II1
if 25 - 25: O0 + I1IiiI % OOooOOo / Oo0Ooo . IiII / I1Ii111
if 84 - 84: ooOoO0o . O0 + I1IiiI * OoO0O00 - I1IiiI
if 24 - 24: Ii1I
if 23 - 23: Oo0Ooo * i1IIi / I1IiiI . I11i - I1ii11iIi11i . iIii1I11I1II1
if 15 - 15: O0 + o0oOOo0O0Ooo / oO0o
def lisp_retransmit_map_notify ( map_notify ) :
I1i1iiIi = map_notify . etr
I1I = map_notify . etr_port
if 27 - 27: Ii1I * II111iiii / oO0o
if 99 - 99: I11i + ooOoO0o % I11i + O0 - Ii1I - I1Ii111
if 3 - 3: Oo0Ooo . I1IiiI
if 61 - 61: OoO0O00 - I1ii11iIi11i . Ii1I * i11iIiiIii
if 97 - 97: ooOoO0o
if ( map_notify . retry_count == LISP_MAX_MAP_NOTIFY_RETRIES ) :
lprint ( "Map-Notify with nonce 0x{} retry limit reached for ETR {}" . format ( map_notify . nonce_key , red ( I1i1iiIi . print_address ( ) , False ) ) )
if 58 - 58: iII111i
if 47 - 47: II111iiii % Oo0Ooo . iIii1I11I1II1 . oO0o
Ooo00o000o = map_notify . nonce_key
if ( Ooo00o000o in lisp_map_notify_queue ) :
map_notify . retransmit_timer . cancel ( )
lprint ( "Dequeue Map-Notify from retransmit queue, key is: {}" . format ( Ooo00o000o ) )
if 52 - 52: I11i * I1IiiI % I11i - iII111i - Ii1I - OoooooooOO
try :
lisp_map_notify_queue . pop ( Ooo00o000o )
except :
lprint ( "Key not found in Map-Notify queue" )
if 15 - 15: iII111i
if 95 - 95: i11iIiiIii . Ii1I / II111iiii + II111iiii + Ii1I / I11i
return
if 72 - 72: I1Ii111 . I1Ii111 * O0 + I1ii11iIi11i / Oo0Ooo
if 96 - 96: oO0o . ooOoO0o * Oo0Ooo % ooOoO0o + I1Ii111 + iIii1I11I1II1
OO0ooo000 = map_notify . lisp_sockets
map_notify . retry_count += 1
if 45 - 45: II111iiii
lprint ( "Retransmit {} with nonce 0x{} to xTR {}, retry {}" . format ( bold ( "Map-Notify" , False ) , map_notify . nonce_key ,
# ooOoO0o . I1ii11iIi11i - I1Ii111
red ( I1i1iiIi . print_address ( ) , False ) , map_notify . retry_count ) )
if 27 - 27: o0oOOo0O0Ooo
lisp_send_map_notify ( OO0ooo000 , map_notify . packet , I1i1iiIi , I1I )
if ( map_notify . site ) : map_notify . site . map_notifies_sent += 1
if 27 - 27: i11iIiiIii / OoO0O00 * OoO0O00
if 12 - 12: OoO0O00
if 17 - 17: I1Ii111 + OOooOOo / OoooooooOO
if 75 - 75: OoooooooOO / I1ii11iIi11i . II111iiii
map_notify . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ map_notify ] )
map_notify . retransmit_timer . start ( )
return
if 7 - 7: O0 % ooOoO0o / oO0o
if 36 - 36: I1ii11iIi11i - oO0o + iII111i / I11i
if 62 - 62: I1Ii111 . ooOoO0o % I1ii11iIi11i . ooOoO0o - iIii1I11I1II1 + iII111i
if 79 - 79: II111iiii / I1Ii111 + II111iiii + Oo0Ooo - IiII / I1ii11iIi11i
if 93 - 93: OOooOOo
if 65 - 65: i1IIi * ooOoO0o * OoooooooOO - i11iIiiIii + IiII - o0oOOo0O0Ooo
if 12 - 12: I1IiiI
def lisp_send_merged_map_notify ( lisp_sockets , parent , map_register ,
eid_record ) :
if 34 - 34: o0oOOo0O0Ooo / I1IiiI * i11iIiiIii + I1Ii111 / IiII
if 55 - 55: iIii1I11I1II1 % iIii1I11I1II1 % iII111i
if 80 - 80: OoooooooOO % iII111i * IiII % IiII
if 34 - 34: OoO0O00
eid_record . rloc_count = len ( parent . registered_rlocs )
iiiIIi1III = eid_record . encode ( )
eid_record . print_record ( "Merged Map-Notify " , False )
if 15 - 15: IiII . i11iIiiIii . OoOoOO00 . OoOoOO00 . oO0o
if 98 - 98: I1IiiI * I1Ii111
if 28 - 28: ooOoO0o - I1IiiI . Ii1I - I1ii11iIi11i + iIii1I11I1II1 / OOooOOo
if 64 - 64: OoOoOO00 + I1ii11iIi11i - OoooooooOO + I11i + i1IIi
for o0Oo0Oo0oO000 in parent . registered_rlocs :
oO0OoOOO = lisp_rloc_record ( )
oO0OoOOO . store_rloc_entry ( o0Oo0Oo0oO000 )
oO0OoOOO . local_bit = True
oO0OoOOO . probe_bit = False
oO0OoOOO . reach_bit = True
iiiIIi1III += oO0OoOOO . encode ( )
oO0OoOOO . print_record ( " " )
del ( oO0OoOOO )
if 93 - 93: I1IiiI % i11iIiiIii
if 45 - 45: OoooooooOO * o0oOOo0O0Ooo - OOooOOo + O0
if 64 - 64: iII111i * I1ii11iIi11i - OoOoOO00
if 1 - 1: i1IIi / OoO0O00 % i1IIi % i11iIiiIii / i1IIi
if 8 - 8: O0 / OOooOOo + iII111i % iIii1I11I1II1 % iIii1I11I1II1 . ooOoO0o
for o0Oo0Oo0oO000 in parent . registered_rlocs :
I1i1iiIi = o0Oo0Oo0oO000 . rloc
IIiI = lisp_map_notify ( lisp_sockets )
IIiI . record_count = 1
i11iII1 = map_register . key_id
IIiI . key_id = i11iII1
IIiI . alg_id = map_register . alg_id
IIiI . auth_len = map_register . auth_len
IIiI . nonce = map_register . nonce
IIiI . nonce_key = lisp_hex_string ( IIiI . nonce )
IIiI . etr . copy_address ( I1i1iiIi )
IIiI . etr_port = map_register . sport
IIiI . site = parent . site
Oo00oo = IIiI . encode ( iiiIIi1III , parent . site . auth_key [ i11iII1 ] )
IIiI . print_notify ( )
if 89 - 89: I1IiiI % ooOoO0o / Ii1I * I1IiiI + I1Ii111
if 99 - 99: O0
if 40 - 40: OoO0O00 - oO0o / o0oOOo0O0Ooo . oO0o
if 89 - 89: i11iIiiIii - II111iiii
Ooo00o000o = IIiI . nonce_key
if ( Ooo00o000o in lisp_map_notify_queue ) :
o0O0oOo000 = lisp_map_notify_queue [ Ooo00o000o ]
o0O0oOo000 . retransmit_timer . cancel ( )
del ( o0O0oOo000 )
if 51 - 51: OOooOOo % I1Ii111 / OoooooooOO % o0oOOo0O0Ooo
lisp_map_notify_queue [ Ooo00o000o ] = IIiI
if 18 - 18: i1IIi
if 70 - 70: I1Ii111
if 55 - 55: iII111i % O0
if 57 - 57: OoooooooOO - Ii1I * i1IIi - i11iIiiIii % I1IiiI
lprint ( "Send merged Map-Notify to ETR {}" . format ( red ( I1i1iiIi . print_address ( ) , False ) ) )
if 65 - 65: IiII
lisp_send ( lisp_sockets , I1i1iiIi , LISP_CTRL_PORT , Oo00oo )
if 53 - 53: iIii1I11I1II1 / II111iiii . I1ii11iIi11i + OoooooooOO % OOooOOo
parent . site . map_notifies_sent += 1
if 41 - 41: i1IIi / oO0o % OoooooooOO * OOooOOo + I1ii11iIi11i
if 56 - 56: OOooOOo * OOooOOo / o0oOOo0O0Ooo
if 4 - 4: OoOoOO00 / OoO0O00
if 66 - 66: I1Ii111 / OoOoOO00
IIiI . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ IIiI ] )
IIiI . retransmit_timer . start ( )
if 53 - 53: OoOoOO00 . i11iIiiIii - OoooooooOO
return
if 92 - 92: O0 - i11iIiiIii + OoO0O00 - OoooooooOO - o0oOOo0O0Ooo
if 25 - 25: oO0o / oO0o / Ii1I / O0
if 56 - 56: ooOoO0o
if 19 - 19: O0 * I1IiiI + I1ii11iIi11i
if 25 - 25: I11i - ooOoO0o / OoO0O00 / iII111i - OoO0O00
if 86 - 86: OoO0O00
if 89 - 89: OoooooooOO % iII111i * I1ii11iIi11i + I1ii11iIi11i . Oo0Ooo
def lisp_build_map_notify ( lisp_sockets , eid_records , eid_list , record_count ,
source , port , nonce , key_id , alg_id , auth_len , site , map_register_ack ) :
if 4 - 4: I11i
Ooo00o000o = lisp_hex_string ( nonce ) + source . print_address ( )
if 8 - 8: IiII
if 1 - 1: ooOoO0o . IiII
if 4 - 4: iIii1I11I1II1 % I1IiiI - OoooooooOO / iII111i
if 55 - 55: O0 + iII111i * OoOoOO00 . i11iIiiIii * Ii1I + oO0o
if 66 - 66: i1IIi . I1ii11iIi11i
if 86 - 86: Oo0Ooo
lisp_remove_eid_from_map_notify_queue ( eid_list )
if ( Ooo00o000o in lisp_map_notify_queue ) :
IIiI = lisp_map_notify_queue [ Ooo00o000o ]
I111 = red ( source . print_address_no_iid ( ) , False )
lprint ( "Map-Notify with nonce 0x{} pending for xTR {}" . format ( lisp_hex_string ( IIiI . nonce ) , I111 ) )
if 48 - 48: OoO0O00
return
if 55 - 55: OoO0O00 * i1IIi * I11i / iII111i
if 42 - 42: IiII
IIiI = lisp_map_notify ( lisp_sockets )
IIiI . record_count = record_count
key_id = key_id
IIiI . key_id = key_id
IIiI . alg_id = alg_id
IIiI . auth_len = auth_len
IIiI . nonce = nonce
IIiI . nonce_key = lisp_hex_string ( nonce )
IIiI . etr . copy_address ( source )
IIiI . etr_port = port
IIiI . site = site
IIiI . eid_list = eid_list
if 28 - 28: OoOoOO00 + OoOoOO00
if 53 - 53: II111iiii % i1IIi + ooOoO0o . I1Ii111
if 52 - 52: I1IiiI + I1Ii111 * oO0o / i11iIiiIii * iIii1I11I1II1
if 27 - 27: Oo0Ooo
if ( map_register_ack == False ) :
Ooo00o000o = IIiI . nonce_key
lisp_map_notify_queue [ Ooo00o000o ] = IIiI
if 85 - 85: iIii1I11I1II1 . o0oOOo0O0Ooo + oO0o
if 79 - 79: O0 - iIii1I11I1II1 + i1IIi . I11i
if ( map_register_ack ) :
lprint ( "Send Map-Notify to ack Map-Register" )
else :
lprint ( "Send Map-Notify for RLOC-set change" )
if 21 - 21: II111iiii
if 23 - 23: I11i * i1IIi . oO0o / IiII + o0oOOo0O0Ooo
if 1 - 1: IiII / OoO0O00 . oO0o * I1Ii111 - i11iIiiIii
if 50 - 50: oO0o - O0 / I1IiiI . OoOoOO00 . Oo0Ooo
if 30 - 30: IiII . OoO0O00 + Oo0Ooo
Oo00oo = IIiI . encode ( eid_records , site . auth_key [ key_id ] )
IIiI . print_notify ( )
if 48 - 48: iIii1I11I1II1 / i11iIiiIii . OoOoOO00 * I11i
if ( map_register_ack == False ) :
I1Ii111I111I = lisp_eid_record ( )
I1Ii111I111I . decode ( eid_records )
I1Ii111I111I . print_record ( " " , False )
if 1 - 1: IiII . OoOoOO00 * o0oOOo0O0Ooo
if 63 - 63: O0 / Ii1I + I1Ii111 % OoO0O00 % OOooOOo * O0
if 35 - 35: OoO0O00 + OoooooooOO % Oo0Ooo / I11i - O0 . i1IIi
if 76 - 76: IiII % I1IiiI * Ii1I / Ii1I / OoooooooOO + Ii1I
if 19 - 19: OoooooooOO
lisp_send_map_notify ( lisp_sockets , Oo00oo , IIiI . etr , port )
site . map_notifies_sent += 1
if 88 - 88: I1IiiI % ooOoO0o % Oo0Ooo - O0
if ( map_register_ack ) : return
if 71 - 71: OOooOOo % Ii1I - i11iIiiIii - oO0o . ooOoO0o / I1Ii111
if 53 - 53: iII111i . Oo0Ooo
if 91 - 91: oO0o * OoooooooOO * oO0o % oO0o * II111iiii % I1Ii111
if 8 - 8: Ii1I
if 28 - 28: iII111i / I1ii11iIi11i - OoOoOO00 * Oo0Ooo + Ii1I * OoOoOO00
if 94 - 94: oO0o
IIiI . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ IIiI ] )
IIiI . retransmit_timer . start ( )
return
if 95 - 95: ooOoO0o * O0 + OOooOOo
if 11 - 11: i1IIi / OoOoOO00 + OoOoOO00 + I1ii11iIi11i + OOooOOo
if 21 - 21: ooOoO0o
if 28 - 28: OoOoOO00 + OoOoOO00 - OoOoOO00 / ooOoO0o
if 81 - 81: oO0o
if 34 - 34: o0oOOo0O0Ooo * OOooOOo - i1IIi * o0oOOo0O0Ooo * Oo0Ooo
if 59 - 59: iIii1I11I1II1 / Oo0Ooo % II111iiii
if 55 - 55: ooOoO0o - IiII + o0oOOo0O0Ooo
def lisp_send_map_notify_ack ( lisp_sockets , eid_records , map_notify , ms ) :
map_notify . map_notify_ack = True
if 48 - 48: O0 - iIii1I11I1II1 * OOooOOo
if 33 - 33: I11i
if 63 - 63: Ii1I % II111iiii / OoOoOO00 + Oo0Ooo
if 28 - 28: OoO0O00 + I1IiiI . oO0o + II111iiii - O0
Oo00oo = map_notify . encode ( eid_records , ms . password )
map_notify . print_notify ( )
if 32 - 32: oO0o
if 62 - 62: i11iIiiIii + OoooooooOO + IiII - OoO0O00 / oO0o * iIii1I11I1II1
if 91 - 91: o0oOOo0O0Ooo - i11iIiiIii + Oo0Ooo % iIii1I11I1II1
if 58 - 58: iII111i / ooOoO0o - I1Ii111 + I1Ii111 * ooOoO0o
I1i1iiIi = ms . map_server
lprint ( "Send Map-Notify-Ack to {}" . format (
red ( I1i1iiIi . print_address ( ) , False ) ) )
lisp_send ( lisp_sockets , I1i1iiIi , LISP_CTRL_PORT , Oo00oo )
return
if 48 - 48: iII111i % O0 % Ii1I * OoO0O00 . OoO0O00
if 74 - 74: OoO0O00 * i1IIi + I1ii11iIi11i / o0oOOo0O0Ooo / i1IIi
if 94 - 94: Ii1I
if 13 - 13: OoO0O00 - II111iiii . iII111i + OoOoOO00 / i11iIiiIii
if 32 - 32: ooOoO0o / II111iiii / I1ii11iIi11i
if 34 - 34: iIii1I11I1II1
if 47 - 47: OOooOOo * iII111i
if 71 - 71: IiII - OoooooooOO * i11iIiiIii . OoooooooOO % i1IIi . Oo0Ooo
def lisp_send_multicast_map_notify ( lisp_sockets , site_eid , eid_list , xtr ) :
if 3 - 3: OoO0O00 + i11iIiiIii + oO0o * IiII
IIiI = lisp_map_notify ( lisp_sockets )
IIiI . record_count = 1
IIiI . nonce = lisp_get_control_nonce ( )
IIiI . nonce_key = lisp_hex_string ( IIiI . nonce )
IIiI . etr . copy_address ( xtr )
IIiI . etr_port = LISP_CTRL_PORT
IIiI . eid_list = eid_list
Ooo00o000o = IIiI . nonce_key
if 19 - 19: iII111i / II111iiii . I1Ii111 * I1IiiI - OOooOOo
if 70 - 70: OoO0O00
if 42 - 42: OoooooooOO - I1Ii111 + I1ii11iIi11i * iII111i * iII111i / OoO0O00
if 85 - 85: O0 . II111iiii
if 80 - 80: O0 * I11i * I1Ii111
if 89 - 89: Ii1I * OoO0O00 . i1IIi . O0 - IiII - OoOoOO00
lisp_remove_eid_from_map_notify_queue ( IIiI . eid_list )
if ( Ooo00o000o in lisp_map_notify_queue ) :
IIiI = lisp_map_notify_queue [ Ooo00o000o ]
lprint ( "Map-Notify with nonce 0x{} pending for ITR {}" . format ( IIiI . nonce , red ( xtr . print_address_no_iid ( ) , False ) ) )
if 25 - 25: iII111i + i1IIi
return
if 64 - 64: IiII % I11i / iIii1I11I1II1
if 66 - 66: Ii1I
if 55 - 55: OOooOOo + I1IiiI + IiII . Ii1I * oO0o
if 71 - 71: IiII - iII111i % I1IiiI * iII111i
if 27 - 27: ooOoO0o - OoO0O00
lisp_map_notify_queue [ Ooo00o000o ] = IIiI
if 83 - 83: iII111i * OoOoOO00 - O0 * Ii1I
if 79 - 79: I11i / iII111i % Ii1I / OoOoOO00 % O0 / IiII
if 32 - 32: IiII * II111iiii . Ii1I
if 68 - 68: I11i / O0
IIiI1IIiI = site_eid . rtrs_in_rloc_set ( )
if ( IIiI1IIiI ) :
if ( site_eid . is_rtr_in_rloc_set ( xtr ) ) : IIiI1IIiI = False
if 33 - 33: I1IiiI / I1IiiI / I1ii11iIi11i * IiII / Ii1I
if 55 - 55: i11iIiiIii / OoooooooOO - Ii1I * Oo0Ooo . I1Ii111
if 96 - 96: IiII / OoooooooOO + i11iIiiIii . Ii1I
if 64 - 64: OoooooooOO / IiII - IiII . Ii1I % Oo0Ooo
if 35 - 35: iII111i * I1IiiI * Oo0Ooo + I1Ii111 + i1IIi - ooOoO0o
I1Ii111I111I = lisp_eid_record ( )
I1Ii111I111I . record_ttl = 1440
I1Ii111I111I . eid . copy_address ( site_eid . eid )
I1Ii111I111I . group . copy_address ( site_eid . group )
I1Ii111I111I . rloc_count = 0
for ii11Ii in site_eid . registered_rlocs :
if ( IIiI1IIiI ^ ii11Ii . is_rtr ( ) ) : continue
I1Ii111I111I . rloc_count += 1
if 23 - 23: II111iiii - O0
Oo00oo = I1Ii111I111I . encode ( )
if 58 - 58: o0oOOo0O0Ooo * OoO0O00 + OoO0O00
if 93 - 93: IiII - I1ii11iIi11i % I11i + i1IIi % OoO0O00
if 20 - 20: oO0o . Oo0Ooo + IiII - II111iiii % Ii1I
if 64 - 64: Ii1I % OoO0O00 + OOooOOo % OoOoOO00 + IiII
IIiI . print_notify ( )
I1Ii111I111I . print_record ( " " , False )
if 92 - 92: iII111i * Oo0Ooo - OoOoOO00
if 33 - 33: i11iIiiIii - OoOoOO00 . OOooOOo * II111iiii . Ii1I
if 59 - 59: OoOoOO00
if 29 - 29: iII111i - II111iiii * OoooooooOO * OoooooooOO
for ii11Ii in site_eid . registered_rlocs :
if ( IIiI1IIiI ^ ii11Ii . is_rtr ( ) ) : continue
oO0OoOOO = lisp_rloc_record ( )
oO0OoOOO . store_rloc_entry ( ii11Ii )
oO0OoOOO . local_bit = True
oO0OoOOO . probe_bit = False
oO0OoOOO . reach_bit = True
Oo00oo += oO0OoOOO . encode ( )
oO0OoOOO . print_record ( " " )
if 15 - 15: IiII / OOooOOo / iIii1I11I1II1 / OoOoOO00
if 91 - 91: i11iIiiIii % O0 . Oo0Ooo / I1Ii111
if 62 - 62: Oo0Ooo . II111iiii % OoO0O00 . Ii1I * OOooOOo + II111iiii
if 7 - 7: OOooOOo
if 22 - 22: Oo0Ooo + ooOoO0o
Oo00oo = IIiI . encode ( Oo00oo , "" )
if ( Oo00oo == None ) : return
if 71 - 71: OOooOOo . Ii1I * i11iIiiIii . I11i
if 9 - 9: O0 / I1ii11iIi11i . iII111i . O0 + IiII % I11i
if 27 - 27: i11iIiiIii - I1ii11iIi11i / O0 - i1IIi + I1IiiI * iII111i
if 26 - 26: Oo0Ooo . Ii1I
lisp_send_map_notify ( lisp_sockets , Oo00oo , xtr , LISP_CTRL_PORT )
if 7 - 7: OoOoOO00 - o0oOOo0O0Ooo + oO0o
if 8 - 8: iIii1I11I1II1
if 6 - 6: oO0o
if 51 - 51: I1Ii111 - o0oOOo0O0Ooo
IIiI . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ IIiI ] )
IIiI . retransmit_timer . start ( )
return
if 5 - 5: O0
if 7 - 7: OoOoOO00 + OoO0O00 * I1IiiI
if 63 - 63: I1ii11iIi11i + iII111i * i1IIi
if 63 - 63: I1ii11iIi11i / II111iiii % oO0o + ooOoO0o . Ii1I % I11i
if 59 - 59: I1Ii111 % o0oOOo0O0Ooo - I1IiiI * i1IIi
if 5 - 5: I1IiiI
if 22 - 22: II111iiii / iII111i
def lisp_queue_multicast_map_notify ( lisp_sockets , rle_list ) :
Iii1IIIi1I11 = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 9 - 9: I1IiiI - IiII . iIii1I11I1II1
for IiiiIi in rle_list :
o0Oo = lisp_site_eid_lookup ( IiiiIi [ 0 ] , IiiiIi [ 1 ] , True )
if ( o0Oo == None ) : continue
if 81 - 81: i1IIi + i1IIi
if 3 - 3: I1Ii111 . I1ii11iIi11i * iII111i * i11iIiiIii * IiII
if 52 - 52: iIii1I11I1II1 % o0oOOo0O0Ooo % I1IiiI
if 71 - 71: I1IiiI + iII111i
if 47 - 47: iIii1I11I1II1 . OoO0O00 . iIii1I11I1II1
if 57 - 57: IiII * ooOoO0o * ooOoO0o * iIii1I11I1II1 * I1Ii111 + OoOoOO00
if 83 - 83: OoOoOO00 . Oo0Ooo . OoO0O00
o0oOo00OOoO0O = o0Oo . registered_rlocs
if ( len ( o0oOo00OOoO0O ) == 0 ) :
iII111 = { }
for II1i11 in list ( o0Oo . individual_registrations . values ( ) ) :
for ii11Ii in II1i11 . registered_rlocs :
if ( ii11Ii . is_rtr ( ) == False ) : continue
iII111 [ ii11Ii . rloc . print_address ( ) ] = ii11Ii
if 66 - 66: i11iIiiIii % I11i / Oo0Ooo * oO0o
if 7 - 7: O0 - Ii1I - oO0o
o0oOo00OOoO0O = list ( iII111 . values ( ) )
if 95 - 95: i1IIi - OOooOOo / OoOoOO00 + I1ii11iIi11i + O0
if 10 - 10: ooOoO0o - OOooOOo + i1IIi * Ii1I
if 78 - 78: iIii1I11I1II1
if 76 - 76: ooOoO0o - i11iIiiIii * I11i / I1IiiI - OOooOOo
if 41 - 41: iII111i
if 91 - 91: I1Ii111
oOoIiIi = [ ]
OO00oo0 = False
if ( o0Oo . eid . address == 0 and o0Oo . eid . mask_len == 0 ) :
Ii1IiiI = [ ]
iiI1Ii1II = [ ]
if ( len ( o0oOo00OOoO0O ) != 0 and o0oOo00OOoO0O [ 0 ] . rle != None ) :
iiI1Ii1II = o0oOo00OOoO0O [ 0 ] . rle . rle_nodes
if 34 - 34: I11i / I1IiiI . II111iiii
for iI11i1ii11i11 in iiI1Ii1II :
oOoIiIi . append ( iI11i1ii11i11 . address )
Ii1IiiI . append ( iI11i1ii11i11 . address . print_address_no_iid ( ) )
if 79 - 79: I1Ii111 + IiII / OoooooooOO
lprint ( "Notify existing RLE-nodes {}" . format ( Ii1IiiI ) )
else :
if 53 - 53: Ii1I
if 85 - 85: OoO0O00 + II111iiii / OoO0O00 . II111iiii * OoOoOO00 * I1IiiI
if 19 - 19: iII111i / Ii1I + iIii1I11I1II1 * O0 - Oo0Ooo
if 47 - 47: iIii1I11I1II1 % I1ii11iIi11i
if 33 - 33: oO0o . oO0o / IiII + II111iiii
for ii11Ii in o0oOo00OOoO0O :
if ( ii11Ii . is_rtr ( ) ) : oOoIiIi . append ( ii11Ii . rloc )
if 34 - 34: OoO0O00 . OoOoOO00 / i1IIi / OOooOOo
if 12 - 12: o0oOOo0O0Ooo . Oo0Ooo / II111iiii
if 18 - 18: I1Ii111 % II111iiii + Ii1I * Oo0Ooo - OoooooooOO . Oo0Ooo
if 25 - 25: OoO0O00
if 83 - 83: II111iiii . iIii1I11I1II1
OO00oo0 = ( len ( oOoIiIi ) != 0 )
if ( OO00oo0 == False ) :
I1i = lisp_site_eid_lookup ( IiiiIi [ 0 ] , Iii1IIIi1I11 , False )
if ( I1i == None ) : continue
if 77 - 77: O0 . OoOoOO00 % oO0o / OOooOOo
for ii11Ii in I1i . registered_rlocs :
if ( ii11Ii . rloc . is_null ( ) ) : continue
oOoIiIi . append ( ii11Ii . rloc )
if 8 - 8: iII111i - i1IIi
if 81 - 81: ooOoO0o / OOooOOo % OoOoOO00 . iIii1I11I1II1
if 45 - 45: I1IiiI . ooOoO0o - OoooooooOO
if 84 - 84: I1ii11iIi11i
if 69 - 69: I1Ii111 + II111iiii
if 92 - 92: OoooooooOO
if ( len ( oOoIiIi ) == 0 ) :
lprint ( "No ITRs or RTRs found for {}, Map-Notify suppressed" . format ( green ( o0Oo . print_eid_tuple ( ) , False ) ) )
if 80 - 80: I1ii11iIi11i % I1ii11iIi11i . OoO0O00 . oO0o % I1IiiI % I11i
continue
if 4 - 4: OoO0O00 / iII111i / I1ii11iIi11i - o0oOOo0O0Ooo * I1Ii111
if 24 - 24: OoooooooOO / ooOoO0o + Oo0Ooo - OOooOOo - o0oOOo0O0Ooo . I1ii11iIi11i
if 2 - 2: I1IiiI . o0oOOo0O0Ooo / Oo0Ooo - OoOoOO00 - OoooooooOO
if 73 - 73: I1Ii111 . i11iIiiIii * ooOoO0o . IiII - I11i + I1Ii111
if 21 - 21: I1Ii111 + iIii1I11I1II1 + I1IiiI / O0 * I1ii11iIi11i
if 57 - 57: OOooOOo * I11i . oO0o
for o0Oo0Oo0oO000 in oOoIiIi :
lprint ( "Build Map-Notify to {}TR {} for {}" . format ( "R" if OO00oo0 else "x" , red ( o0Oo0Oo0oO000 . print_address_no_iid ( ) , False ) ,
# iII111i
green ( o0Oo . print_eid_tuple ( ) , False ) ) )
if 60 - 60: OOooOOo * I1IiiI + i1IIi % I11i - I1ii11iIi11i + Ii1I
OooOoo0oo = [ o0Oo . print_eid_tuple ( ) ]
lisp_send_multicast_map_notify ( lisp_sockets , o0Oo , OooOoo0oo , o0Oo0Oo0oO000 )
time . sleep ( .001 )
if 37 - 37: iII111i + o0oOOo0O0Ooo . OoO0O00 / i1IIi
if 52 - 52: O0 * Oo0Ooo - I1ii11iIi11i * Oo0Ooo
return
if 2 - 2: O0 - oO0o % ooOoO0o % IiII
if 86 - 86: OoO0O00 / oO0o - i11iIiiIii . Ii1I + OOooOOo - OOooOOo
if 28 - 28: O0 * ooOoO0o . OoOoOO00 * II111iiii . I1IiiI % I11i
if 28 - 28: I1ii11iIi11i * OoooooooOO
if 19 - 19: Oo0Ooo - iII111i % OoOoOO00 * i11iIiiIii / oO0o . i11iIiiIii
if 46 - 46: I1ii11iIi11i
if 50 - 50: OOooOOo * OoO0O00 * OOooOOo % I1IiiI - I1Ii111 * Ii1I
if 88 - 88: OOooOOo . iII111i / I11i
def lisp_find_sig_in_rloc_set ( packet , rloc_count ) :
for iIi1iIIIiIiI in range ( rloc_count ) :
oO0OoOOO = lisp_rloc_record ( )
packet = oO0OoOOO . decode ( packet , None )
ii1Ii1iI1 = oO0OoOOO . json
if ( ii1Ii1iI1 == None ) : continue
if 79 - 79: ooOoO0o . I1ii11iIi11i + IiII . iIii1I11I1II1 + OOooOOo
try :
ii1Ii1iI1 = json . loads ( ii1Ii1iI1 . json_string )
except :
lprint ( "Found corrupted JSON signature" )
continue
if 79 - 79: I1Ii111
if 81 - 81: OoooooooOO + OoOoOO00 / II111iiii
if ( "signature" not in ii1Ii1iI1 ) : continue
return ( oO0OoOOO )
if 39 - 39: I1Ii111 * I1IiiI - o0oOOo0O0Ooo . oO0o . OOooOOo * i11iIiiIii
return ( None )
if 70 - 70: OoOoOO00 / OOooOOo - o0oOOo0O0Ooo
if 82 - 82: OOooOOo . i11iIiiIii . I1ii11iIi11i % OoOoOO00 * Ii1I / OoO0O00
if 56 - 56: o0oOOo0O0Ooo / I1IiiI + I11i + I1IiiI
if 34 - 34: Oo0Ooo / i11iIiiIii - ooOoO0o
if 77 - 77: OoOoOO00 * OoooooooOO
if 41 - 41: iIii1I11I1II1 - O0 . II111iiii + I1IiiI - II111iiii / oO0o
if 35 - 35: ooOoO0o - OoOoOO00 / iIii1I11I1II1 / OOooOOo
if 38 - 38: i1IIi % OoooooooOO
if 5 - 5: iIii1I11I1II1 + iIii1I11I1II1 . iIii1I11I1II1 + o0oOOo0O0Ooo
if 45 - 45: I1IiiI - OoooooooOO - I1Ii111 - i1IIi - OoooooooOO * O0
if 67 - 67: OoOoOO00 * o0oOOo0O0Ooo . IiII
if 72 - 72: OoOoOO00 % OoooooooOO * O0
if 27 - 27: I1ii11iIi11i . OoooooooOO / II111iiii . OOooOOo
if 58 - 58: oO0o / ooOoO0o
if 31 - 31: o0oOOo0O0Ooo % I11i - OoO0O00
if 40 - 40: o0oOOo0O0Ooo % OoOoOO00 + I11i / O0 - II111iiii
if 9 - 9: OoooooooOO - OOooOOo . I11i * oO0o
if 3 - 3: iIii1I11I1II1 - OoO0O00
if 38 - 38: O0 + ooOoO0o * I1Ii111 - oO0o * o0oOOo0O0Ooo
def lisp_get_eid_hash ( eid ) :
oO0ooOoO = None
for II1 in lisp_eid_hashes :
if 35 - 35: I11i . ooOoO0o % I11i / iII111i / O0 % I11i
if 29 - 29: I1Ii111 + Ii1I
if 100 - 100: Ii1I + I1Ii111 / iIii1I11I1II1 / i1IIi % OoOoOO00
if 6 - 6: oO0o + ooOoO0o
oooo = II1 . instance_id
if ( oooo == - 1 ) : II1 . instance_id = eid . instance_id
if 13 - 13: Oo0Ooo . IiII % iII111i + i1IIi / OOooOOo
I11i1IiIi1II1 = eid . is_more_specific ( II1 )
II1 . instance_id = oooo
if ( I11i1IiIi1II1 ) :
oO0ooOoO = 128 - II1 . mask_len
break
if 34 - 34: I11i
if 88 - 88: I11i - I1Ii111
if ( oO0ooOoO == None ) : return ( None )
if 86 - 86: iIii1I11I1II1
I1IIIi = eid . address
ii11iIII = ""
for iIi1iIIIiIiI in range ( 0 , old_div ( oO0ooOoO , 16 ) ) :
IiI = I1IIIi & 0xffff
IiI = hex ( IiI ) [ 2 : : ]
ii11iIII = IiI . zfill ( 4 ) + ":" + ii11iIII
I1IIIi >>= 16
if 76 - 76: I1IiiI * OoooooooOO - i11iIiiIii / I11i / Oo0Ooo
if ( oO0ooOoO % 16 != 0 ) :
IiI = I1IIIi & 0xff
IiI = hex ( IiI ) [ 2 : : ]
ii11iIII = IiI . zfill ( 2 ) + ":" + ii11iIII
if 82 - 82: IiII % ooOoO0o
return ( ii11iIII [ 0 : - 1 ] )
if 100 - 100: Oo0Ooo . oO0o - iII111i + OoooooooOO
if 27 - 27: Oo0Ooo . I1Ii111 - i1IIi * I1IiiI
if 96 - 96: I1ii11iIi11i - Ii1I . I1ii11iIi11i
if 89 - 89: II111iiii % I1ii11iIi11i % IiII . I11i
if 49 - 49: iII111i % i11iIiiIii * I11i - oO0o . OOooOOo . i11iIiiIii
if 26 - 26: iIii1I11I1II1 + i11iIiiIii % iII111i + I1IiiI + oO0o - ooOoO0o
if 4 - 4: Oo0Ooo - IiII - I11i
if 72 - 72: OoooooooOO
if 19 - 19: Oo0Ooo . OOooOOo
if 58 - 58: IiII % iII111i + i1IIi % I1IiiI % OOooOOo . iII111i
if 85 - 85: i11iIiiIii . o0oOOo0O0Ooo * iII111i . I1ii11iIi11i / I1Ii111 % Ii1I
def lisp_lookup_public_key ( eid ) :
oooo = eid . instance_id
if 27 - 27: II111iiii . iIii1I11I1II1 / I1ii11iIi11i / i1IIi / iIii1I11I1II1
if 70 - 70: i11iIiiIii . OoO0O00 / OoooooooOO * OoooooooOO - OOooOOo
if 34 - 34: I1ii11iIi11i * i1IIi % OoooooooOO / I1IiiI
if 39 - 39: OoO0O00 + IiII - II111iiii % I11i
if 80 - 80: o0oOOo0O0Ooo * ooOoO0o
O0oooOo = lisp_get_eid_hash ( eid )
if ( O0oooOo == None ) : return ( [ None , None , False ] )
if 47 - 47: IiII + O0 / OoooooooOO + iIii1I11I1II1
O0oooOo = "hash-" + O0oooOo
iiii1I1I11 = lisp_address ( LISP_AFI_NAME , O0oooOo , len ( O0oooOo ) , oooo )
o0o0Oo0o0oOo = lisp_address ( LISP_AFI_NONE , "" , 0 , oooo )
if 97 - 97: OoooooooOO * I11i . I1Ii111
if 20 - 20: I1IiiI . I1ii11iIi11i
if 55 - 55: OoOoOO00 + I11i - OOooOOo
if 20 - 20: OoO0O00 . OoooooooOO - I1Ii111 * IiII
I1i = lisp_site_eid_lookup ( iiii1I1I11 , o0o0Oo0o0oOo , True )
if ( I1i == None ) : return ( [ iiii1I1I11 , None , False ] )
if 20 - 20: o0oOOo0O0Ooo . OoooooooOO * I1IiiI . Oo0Ooo * OoOoOO00
if 3 - 3: I1Ii111 % i11iIiiIii % O0 % II111iiii
if 8 - 8: OoooooooOO * ooOoO0o
if 26 - 26: i11iIiiIii + oO0o - i1IIi
OOO0OoOooO0 = None
for I1Ii1i111I in I1i . registered_rlocs :
OOo00OoOOo = I1Ii1i111I . json
if ( OOo00OoOOo == None ) : continue
try :
OOo00OoOOo = json . loads ( OOo00OoOOo . json_string )
except :
lprint ( "Registered RLOC JSON format is invalid for {}" . format ( O0oooOo ) )
if 93 - 93: iII111i - IiII * o0oOOo0O0Ooo / I1Ii111 - oO0o + I11i
return ( [ iiii1I1I11 , None , False ] )
if 81 - 81: Ii1I + I11i - OoOoOO00 + I1ii11iIi11i
if ( "public-key" not in OOo00OoOOo ) : continue
OOO0OoOooO0 = OOo00OoOOo [ "public-key" ]
break
if 6 - 6: iII111i / i1IIi + OOooOOo % OoOoOO00 . I1ii11iIi11i
return ( [ iiii1I1I11 , OOO0OoOooO0 , True ] )
if 88 - 88: OoO0O00
if 82 - 82: OOooOOo / I11i / OoooooooOO % oO0o
if 27 - 27: oO0o + IiII
if 5 - 5: iIii1I11I1II1 + OoOoOO00 * I1Ii111 * i11iIiiIii
if 18 - 18: Oo0Ooo % OOooOOo % oO0o / I11i % O0
if 76 - 76: OoooooooOO % O0 / OoO0O00
if 41 - 41: i11iIiiIii - I1ii11iIi11i - II111iiii
if 5 - 5: OoOoOO00 + i1IIi
def lisp_verify_cga_sig ( eid , rloc_record ) :
if 43 - 43: iII111i * I1IiiI
if 20 - 20: I1IiiI . I11i * OoO0O00 . ooOoO0o . II111iiii
if 6 - 6: Ii1I * OoOoOO00 % IiII + I11i
if 20 - 20: oO0o
if 34 - 34: i1IIi + oO0o * Oo0Ooo * I1Ii111 % OoooooooOO % ooOoO0o
IIIII1iII1 = json . loads ( rloc_record . json . json_string )
if 17 - 17: I1ii11iIi11i + o0oOOo0O0Ooo / OoO0O00 . Oo0Ooo - o0oOOo0O0Ooo / oO0o
if ( lisp_get_eid_hash ( eid ) ) :
IIi1i = eid
elif ( "signature-eid" in IIIII1iII1 ) :
o0Oooooo00OoO = IIIII1iII1 [ "signature-eid" ]
IIi1i = lisp_address ( LISP_AFI_IPV6 , o0Oooooo00OoO , 0 , 0 )
else :
lprint ( " No signature-eid found in RLOC-record" )
return ( False )
if 65 - 65: I11i . I11i + i11iIiiIii - O0 / ooOoO0o . I11i
if 44 - 44: I11i % I1Ii111 % i11iIiiIii / i1IIi - I11i
if 42 - 42: II111iiii
if 14 - 14: i11iIiiIii
if 85 - 85: i11iIiiIii / Ii1I + Oo0Ooo / OoOoOO00 - I1IiiI
iiii1I1I11 , OOO0OoOooO0 , i1IiiII = lisp_lookup_public_key ( IIi1i )
if ( iiii1I1I11 == None ) :
i1iiii = green ( IIi1i . print_address ( ) , False )
lprint ( " Could not parse hash in EID {}" . format ( i1iiii ) )
return ( False )
if 8 - 8: iIii1I11I1II1 - I1Ii111 % OoOoOO00
if 69 - 69: Ii1I
ooo0o00o0Oooo = "found" if i1IiiII else bold ( "not found" , False )
i1iiii = green ( iiii1I1I11 . print_address ( ) , False )
lprint ( " Lookup for crypto-hashed EID {} {}" . format ( i1iiii , ooo0o00o0Oooo ) )
if ( i1IiiII == False ) : return ( False )
if 86 - 86: II111iiii . OoOoOO00 % I1IiiI * OOooOOo . OoOoOO00 + O0
if ( OOO0OoOooO0 == None ) :
lprint ( " RLOC-record with public-key not found" )
return ( False )
if 15 - 15: i11iIiiIii / I1IiiI - iII111i
if 75 - 75: o0oOOo0O0Ooo . I11i
Ii1iii11II = OOO0OoOooO0 [ 0 : 8 ] + "..." + OOO0OoOooO0 [ - 8 : : ]
lprint ( " RLOC-record with public-key '{}' found" . format ( Ii1iii11II ) )
if 58 - 58: I1ii11iIi11i % Ii1I . Ii1I * I1ii11iIi11i . OOooOOo
if 12 - 12: OoOoOO00 * ooOoO0o % OOooOOo - oO0o * OoO0O00 - IiII
if 74 - 74: I1Ii111 - iII111i - II111iiii
if 20 - 20: iIii1I11I1II1 % oO0o + o0oOOo0O0Ooo + oO0o % IiII
if 84 - 84: IiII - O0 . I1ii11iIi11i % OOooOOo % iII111i + OoooooooOO
oOOOOoo = IIIII1iII1 [ "signature" ]
if 70 - 70: ooOoO0o
try :
IIIII1iII1 = binascii . a2b_base64 ( oOOOOoo )
except :
lprint ( " Incorrect padding in signature string" )
return ( False )
if 51 - 51: O0 - IiII % Ii1I / OoOoOO00 * OoooooooOO
if 57 - 57: Oo0Ooo % Oo0Ooo % O0 . I1Ii111 % I1ii11iIi11i
OO0O0O00Oo = len ( IIIII1iII1 )
if ( OO0O0O00Oo & 1 ) :
lprint ( " Signature length is odd, length {}" . format ( OO0O0O00Oo ) )
return ( False )
if 9 - 9: i11iIiiIii - i11iIiiIii / OOooOOo - ooOoO0o % OoOoOO00 + Ii1I
if 3 - 3: iII111i / I1ii11iIi11i / I1IiiI - Oo0Ooo
if 71 - 71: i11iIiiIii + Oo0Ooo % i11iIiiIii - i11iIiiIii
if 84 - 84: oO0o
if 55 - 55: oO0o
OoI1Ii = IIi1i . print_address ( )
if 19 - 19: I11i
if 77 - 77: ooOoO0o + OoO0O00 + Ii1I / I11i - Ii1I
if 85 - 85: Oo0Ooo + Oo0Ooo
if 70 - 70: I1ii11iIi11i % OoO0O00 * iIii1I11I1II1 . oO0o
OOO0OoOooO0 = binascii . a2b_base64 ( OOO0OoOooO0 )
try :
Ooo00o000o = ecdsa . VerifyingKey . from_pem ( OOO0OoOooO0 )
except :
I1iIi = bold ( "Bad public-key" , False )
lprint ( " {}, not in PEM format" . format ( I1iIi ) )
return ( False )
if 2 - 2: I1Ii111
if 45 - 45: OOooOOo * ooOoO0o
if 77 - 77: i11iIiiIii / OOooOOo % i11iIiiIii
if 19 - 19: OoooooooOO - I1IiiI * OoO0O00
if 65 - 65: OoooooooOO . I11i / I1ii11iIi11i / i11iIiiIii
if 20 - 20: OoOoOO00 / OoO0O00 - Oo0Ooo + ooOoO0o
if 86 - 86: O0 / II111iiii / ooOoO0o % I1ii11iIi11i / iIii1I11I1II1
if 1 - 1: O0
if 55 - 55: i1IIi % IiII - i1IIi . IiII . o0oOOo0O0Ooo
if 85 - 85: Ii1I . i11iIiiIii
if 69 - 69: OoOoOO00
try :
i11i1I1 = Ooo00o000o . verify ( IIIII1iII1 , OoI1Ii . encode ( ) , hashfunc = hashlib . sha256 )
except :
lprint ( " Signature library failed for signature data '{}'" . format ( OoI1Ii ) )
if 49 - 49: Oo0Ooo % Oo0Ooo * OoOoOO00 - Oo0Ooo
lprint ( " Signature used '{}'" . format ( oOOOOoo ) )
return ( False )
if 32 - 32: i1IIi . I11i - IiII % OoO0O00 % iIii1I11I1II1 - OoooooooOO
return ( i11i1I1 )
if 47 - 47: OoO0O00 + II111iiii . IiII - I11i . iII111i . o0oOOo0O0Ooo
if 31 - 31: I1IiiI + O0 . I1IiiI - iII111i - I1Ii111
if 88 - 88: iII111i * OoO0O00 % OoooooooOO / oO0o
if 7 - 7: i1IIi
if 30 - 30: oO0o . i1IIi / I11i
if 23 - 23: i1IIi + oO0o % iII111i - OoO0O00 - i1IIi
if 74 - 74: Ii1I + I11i . OoooooooOO - I1ii11iIi11i
if 2 - 2: oO0o - o0oOOo0O0Ooo
if 80 - 80: i1IIi
if 40 - 40: O0 . ooOoO0o * iII111i . I11i + I1Ii111 % OoO0O00
def lisp_remove_eid_from_map_notify_queue ( eid_list ) :
if 9 - 9: IiII * oO0o - o0oOOo0O0Ooo
if 17 - 17: iII111i % Oo0Ooo
if 14 - 14: I1IiiI - I1Ii111 % I1IiiI - II111iiii
if 34 - 34: I1ii11iIi11i * IiII / II111iiii / ooOoO0o * oO0o
if 3 - 3: II111iiii
oOi11i = [ ]
for IIII in eid_list :
for O00O0oo0Oo0 in lisp_map_notify_queue :
IIiI = lisp_map_notify_queue [ O00O0oo0Oo0 ]
if ( IIII not in IIiI . eid_list ) : continue
if 93 - 93: OoooooooOO / o0oOOo0O0Ooo
oOi11i . append ( O00O0oo0Oo0 )
Oooo0 = IIiI . retransmit_timer
if ( Oooo0 ) : Oooo0 . cancel ( )
if 57 - 57: OOooOOo
lprint ( "Remove from Map-Notify queue nonce 0x{} for EID {}" . format ( IIiI . nonce_key , green ( IIII , False ) ) )
if 76 - 76: Oo0Ooo . I1Ii111 + iII111i / OoooooooOO . Oo0Ooo
if 68 - 68: OoO0O00 % OoO0O00 + i11iIiiIii / Ii1I
if 20 - 20: I1Ii111 + IiII - O0 + IiII / i1IIi
if 100 - 100: OoooooooOO
if 26 - 26: Ii1I * O0
if 44 - 44: OoO0O00 - I11i
if 65 - 65: Ii1I % OOooOOo . OoO0O00 - o0oOOo0O0Ooo
for O00O0oo0Oo0 in oOi11i : lisp_map_notify_queue . pop ( O00O0oo0Oo0 )
return
if 8 - 8: OOooOOo % OoOoOO00 % Oo0Ooo . II111iiii
if 92 - 92: OoOoOO00
if 26 - 26: Oo0Ooo
if 3 - 3: I11i . OoO0O00 . i1IIi - I1IiiI * oO0o
if 93 - 93: i1IIi + I1ii11iIi11i % Oo0Ooo + iIii1I11I1II1 / II111iiii
if 100 - 100: iIii1I11I1II1 / II111iiii / Ii1I * Ii1I - OoO0O00
if 36 - 36: ooOoO0o % i1IIi / OoOoOO00 % OoOoOO00 + Ii1I
if 35 - 35: Ii1I . ooOoO0o - ooOoO0o % OoO0O00 / oO0o
def lisp_decrypt_map_register ( packet ) :
if 33 - 33: I1Ii111 / i11iIiiIii / I1ii11iIi11i
if 44 - 44: OoOoOO00 * Oo0Ooo
if 51 - 51: OOooOOo / IiII % I1Ii111 . OoOoOO00 % Ii1I
if 88 - 88: OoO0O00
if 28 - 28: I1Ii111 - iIii1I11I1II1
ooo = socket . ntohl ( struct . unpack ( "I" , packet [ 0 : 4 ] ) [ 0 ] )
oO0oOOoo0OO0 = ( ooo >> 13 ) & 0x1
if ( oO0oOOoo0OO0 == 0 ) : return ( packet )
if 25 - 25: iII111i / iII111i
Ii11iII11I1Ii = ( ooo >> 14 ) & 0x7
if 72 - 72: iII111i * OoooooooOO * OoooooooOO
if 44 - 44: OoO0O00 . OoOoOO00 + I1Ii111
if 9 - 9: IiII . I11i . I1Ii111 / i1IIi * OoOoOO00 - O0
if 3 - 3: O0 / iIii1I11I1II1 % IiII + I11i
try :
iI1II1I1i1 = lisp_ms_encryption_keys [ Ii11iII11I1Ii ]
iI1II1I1i1 = iI1II1I1i1 . zfill ( 32 )
ii = "0" * 8
except :
lprint ( "Cannot decrypt Map-Register with key-id {}" . format ( Ii11iII11I1Ii ) )
return ( None )
if 51 - 51: ooOoO0o + oO0o
if 13 - 13: IiII - OoO0O00 - ooOoO0o
IiI11I111 = bold ( "Decrypt" , False )
lprint ( "{} Map-Register with key-id {}" . format ( IiI11I111 , Ii11iII11I1Ii ) )
if 46 - 46: oO0o + I1ii11iIi11i - OoOoOO00
if 15 - 15: OoooooooOO + ooOoO0o * I1ii11iIi11i
if 6 - 6: OoooooooOO % i1IIi % II111iiii + ooOoO0o / IiII + Ii1I
if 97 - 97: ooOoO0o / I1Ii111 * I1ii11iIi11i
ooOooOooOOO = chacha . ChaCha ( iI1II1I1i1 , ii , 20 ) . decrypt ( packet [ 4 : : ] )
return ( packet [ 0 : 4 ] + ooOooOooOOO )
if 83 - 83: Ii1I + ooOoO0o
if 46 - 46: OoOoOO00
if 66 - 66: iII111i - O0 . I1Ii111 * i1IIi / OoO0O00 / II111iiii
if 35 - 35: ooOoO0o * OOooOOo / I11i % I11i / OoooooooOO . I1Ii111
if 70 - 70: I1ii11iIi11i % I1ii11iIi11i / oO0o
if 85 - 85: OoOoOO00 % I11i / Oo0Ooo + I11i - Oo0Ooo
if 20 - 20: IiII
def lisp_process_map_register ( lisp_sockets , packet , source , sport ) :
global lisp_registered_count
if 81 - 81: Oo0Ooo / I1Ii111
if 20 - 20: o0oOOo0O0Ooo + ooOoO0o % i1IIi
if 51 - 51: iII111i - ooOoO0o
if 32 - 32: IiII - i11iIiiIii
if 41 - 41: Ii1I % Ii1I * oO0o - I11i + iIii1I11I1II1 . ooOoO0o
if 30 - 30: Ii1I * iII111i . II111iiii / i1IIi
packet = lisp_decrypt_map_register ( packet )
if ( packet == None ) : return
if 77 - 77: oO0o . IiII + I1ii11iIi11i . i1IIi
I1iII1I1iIi = lisp_map_register ( )
i1iiI11i1 , packet = I1iII1I1iIi . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Register packet" )
return
if 67 - 67: oO0o * OoOoOO00 * OoO0O00 + O0 * oO0o
I1iII1I1iIi . sport = sport
if 39 - 39: i1IIi
I1iII1I1iIi . print_map_register ( )
if 32 - 32: IiII . ooOoO0o / OoO0O00 / iII111i . iIii1I11I1II1 % IiII
if 28 - 28: I1Ii111 + OoooooooOO + IiII . ooOoO0o . I1IiiI / oO0o
if 66 - 66: Ii1I - I11i + Oo0Ooo . ooOoO0o
if 89 - 89: IiII . II111iiii / OoO0O00 + I1ii11iIi11i * i11iIiiIii
oOoO0000 = True
if ( I1iII1I1iIi . auth_len == LISP_SHA1_160_AUTH_DATA_LEN ) :
oOoO0000 = True
if 43 - 43: O0 . iII111i * I11i / i11iIiiIii
if ( I1iII1I1iIi . alg_id == LISP_SHA_256_128_ALG_ID ) :
oOoO0000 = False
if 39 - 39: oO0o / ooOoO0o
if 66 - 66: iIii1I11I1II1 + I1ii11iIi11i . iIii1I11I1II1 . i1IIi / ooOoO0o - i11iIiiIii
if 23 - 23: OoO0O00 + I1IiiI / I1ii11iIi11i * I1ii11iIi11i % ooOoO0o
if 83 - 83: I1IiiI * i11iIiiIii - I1ii11iIi11i + I11i
if 33 - 33: OoO0O00 . OoooooooOO % iII111i / oO0o * Ii1I + ooOoO0o
iiiiiI1iI = [ ]
if 77 - 77: oO0o % Oo0Ooo % O0
if 51 - 51: IiII % IiII + OOooOOo . II111iiii / I1ii11iIi11i
if 4 - 4: o0oOOo0O0Ooo % I1IiiI * o0oOOo0O0Ooo * OoOoOO00 - Ii1I
if 61 - 61: OoooooooOO - OoOoOO00 . O0 / ooOoO0o . Ii1I
II1II = None
o00O = packet
Ooi1i1i1II1i = [ ]
iiIi1iIIIII1 = I1iII1I1iIi . record_count
for iIi1iIIIiIiI in range ( iiIi1iIIIII1 ) :
I1Ii111I111I = lisp_eid_record ( )
oO0OoOOO = lisp_rloc_record ( )
packet = I1Ii111I111I . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode EID-record in Map-Register packet" )
return
if 86 - 86: OoooooooOO . I1Ii111 / I11i . I1IiiI / IiII / OOooOOo
I1Ii111I111I . print_record ( " " , False )
if 39 - 39: O0 + II111iiii
if 94 - 94: OOooOOo % I1ii11iIi11i % O0 + iII111i
if 62 - 62: iIii1I11I1II1 . OoOoOO00 / iIii1I11I1II1 + IiII
if 31 - 31: Ii1I . OoO0O00 . Ii1I + OoO0O00 * iIii1I11I1II1 . iII111i
I1i = lisp_site_eid_lookup ( I1Ii111I111I . eid , I1Ii111I111I . group ,
False )
if 42 - 42: O0 / oO0o % O0 . i1IIi % OOooOOo
iII1I1iII1i = I1i . print_eid_tuple ( ) if I1i else None
if 51 - 51: i1IIi
if 29 - 29: o0oOOo0O0Ooo / II111iiii % O0 - o0oOOo0O0Ooo * I11i * Ii1I
if 2 - 2: oO0o + I11i * o0oOOo0O0Ooo / o0oOOo0O0Ooo . Oo0Ooo
if 34 - 34: i11iIiiIii / OoooooooOO - II111iiii
if 93 - 93: Oo0Ooo % ooOoO0o . OoOoOO00 + o0oOOo0O0Ooo * IiII * Oo0Ooo
if 34 - 34: I11i - OoooooooOO % i1IIi + I1IiiI
if 14 - 14: I1IiiI . o0oOOo0O0Ooo / I1Ii111
if ( I1i and I1i . accept_more_specifics == False ) :
if ( I1i . eid_record_matches ( I1Ii111I111I ) == False ) :
OoOOoooO0oo = I1i . parent_for_more_specifics
if ( OoOOoooO0oo ) : I1i = OoOOoooO0oo
if 34 - 34: ooOoO0o % II111iiii / ooOoO0o
if 87 - 87: Oo0Ooo
if 7 - 7: iIii1I11I1II1
if 85 - 85: iIii1I11I1II1 . O0
if 43 - 43: II111iiii / OoOoOO00 + OOooOOo % Oo0Ooo * OOooOOo
if 62 - 62: ooOoO0o * OOooOOo . I11i + Oo0Ooo - I1Ii111
if 48 - 48: I1Ii111 * Oo0Ooo % OoO0O00 % Ii1I
if 8 - 8: OoO0O00 . OoO0O00
I11iII1 = ( I1i and I1i . accept_more_specifics )
if ( I11iII1 ) :
IiiI1Iiii = lisp_site_eid ( I1i . site )
IiiI1Iiii . dynamic = True
IiiI1Iiii . eid . copy_address ( I1Ii111I111I . eid )
IiiI1Iiii . group . copy_address ( I1Ii111I111I . group )
IiiI1Iiii . parent_for_more_specifics = I1i
IiiI1Iiii . add_cache ( )
IiiI1Iiii . inherit_from_ams_parent ( )
I1i . more_specific_registrations . append ( IiiI1Iiii )
I1i = IiiI1Iiii
else :
I1i = lisp_site_eid_lookup ( I1Ii111I111I . eid , I1Ii111I111I . group ,
True )
if 85 - 85: ooOoO0o % iIii1I11I1II1 % Oo0Ooo * i1IIi - oO0o
if 91 - 91: iII111i - i1IIi
i1iiii = I1Ii111I111I . print_eid_tuple ( )
if 12 - 12: OoO0O00 * IiII + OoOoOO00 * I1Ii111 % OoOoOO00 + OoOoOO00
if ( I1i == None ) :
O00o = bold ( "Site not found" , False )
lprint ( " {} for EID {}{}" . format ( O00o , green ( i1iiii , False ) ,
", matched non-ams {}" . format ( green ( iII1I1iII1i , False ) if iII1I1iII1i else "" ) ) )
if 12 - 12: I1ii11iIi11i % Ii1I * OoOoOO00 . iIii1I11I1II1 * I1Ii111 - OoOoOO00
if 33 - 33: OoO0O00 * I1IiiI / i1IIi
if 88 - 88: Ii1I / ooOoO0o - I11i % OoO0O00 * iII111i
if 47 - 47: i11iIiiIii + Oo0Ooo % oO0o % O0
if 98 - 98: oO0o - O0 / iII111i % oO0o % I1IiiI / i1IIi
packet = oO0OoOOO . end_of_rlocs ( packet , I1Ii111I111I . rloc_count )
if ( packet == None ) :
lprint ( " Could not decode RLOC-record in Map-Register packet" )
return
if 61 - 61: ooOoO0o + II111iiii
continue
if 54 - 54: OoOoOO00 * o0oOOo0O0Ooo . OoO0O00
if 53 - 53: oO0o % OoO0O00 / OoO0O00 / I11i * Oo0Ooo
II1II = I1i . site
if 13 - 13: i1IIi % iIii1I11I1II1 - iII111i - I1IiiI - IiII + iIii1I11I1II1
if ( I11iII1 ) :
oO0ooOOO = I1i . parent_for_more_specifics . print_eid_tuple ( )
lprint ( " Found ams {} for site '{}' for registering prefix {}" . format ( green ( oO0ooOOO , False ) , II1II . site_name , green ( i1iiii , False ) ) )
if 22 - 22: IiII - OOooOOo + I1ii11iIi11i
else :
oO0ooOOO = green ( I1i . print_eid_tuple ( ) , False )
lprint ( " Found {} for site '{}' for registering prefix {}" . format ( oO0ooOOO , II1II . site_name , green ( i1iiii , False ) ) )
if 64 - 64: OoOoOO00
if 79 - 79: IiII
if 65 - 65: Oo0Ooo - i11iIiiIii * OoOoOO00 . I1Ii111 . iIii1I11I1II1
if 48 - 48: iIii1I11I1II1 - oO0o / OoO0O00 + O0 . Ii1I + I1Ii111
if 17 - 17: OoOoOO00 . Oo0Ooo - I1Ii111 / I1Ii111 + I11i % i1IIi
if 31 - 31: OoooooooOO . O0 / OoO0O00 . I1Ii111
if ( II1II . shutdown ) :
lprint ( ( " Rejecting registration for site '{}', configured in " +
"admin-shutdown state" ) . format ( II1II . site_name ) )
packet = oO0OoOOO . end_of_rlocs ( packet , I1Ii111I111I . rloc_count )
continue
if 41 - 41: OoooooooOO + iII111i . OOooOOo
if 73 - 73: oO0o + i1IIi + i11iIiiIii / I1ii11iIi11i
if 100 - 100: I1IiiI % ooOoO0o % OoooooooOO / i11iIiiIii + i11iIiiIii % IiII
if 39 - 39: Ii1I % o0oOOo0O0Ooo + OOooOOo / iIii1I11I1II1
if 40 - 40: iIii1I11I1II1 / iII111i % OOooOOo % i11iIiiIii
if 57 - 57: II111iiii % OoO0O00 * i1IIi
if 19 - 19: ooOoO0o . iIii1I11I1II1 + I1ii11iIi11i + I1ii11iIi11i / o0oOOo0O0Ooo . Oo0Ooo
if 9 - 9: II111iiii % OoooooooOO
i11iII1 = I1iII1I1iIi . key_id
if ( i11iII1 in II1II . auth_key ) :
Ii1iIiiII111 = II1II . auth_key [ i11iII1 ]
else :
Ii1iIiiII111 = ""
if 57 - 57: II111iiii + Oo0Ooo - Ii1I . OOooOOo * OoOoOO00
if 87 - 87: o0oOOo0O0Ooo / O0 * iIii1I11I1II1
o0O0OO = lisp_verify_auth ( i1iiI11i1 , I1iII1I1iIi . alg_id ,
I1iII1I1iIi . auth_data , Ii1iIiiII111 )
oOo00Oo = "dynamic " if I1i . dynamic else ""
if 19 - 19: Oo0Ooo % II111iiii
Oo0 = bold ( "passed" if o0O0OO else "failed" , False )
i11iII1 = "key-id {}" . format ( i11iII1 ) if i11iII1 == I1iII1I1iIi . key_id else "bad key-id {}" . format ( I1iII1I1iIi . key_id )
if 4 - 4: I11i * Oo0Ooo + I1IiiI % OoOoOO00 / OOooOOo / I1ii11iIi11i
lprint ( " Authentication {} for {}EID-prefix {}, {}" . format ( Oo0 , oOo00Oo , green ( i1iiii , False ) , i11iII1 ) )
if 6 - 6: IiII
if 68 - 68: Oo0Ooo
if 83 - 83: OOooOOo / iIii1I11I1II1 . OoO0O00 - oO0o % Oo0Ooo
if 30 - 30: Ii1I . OoOoOO00 / oO0o . OoO0O00
if 93 - 93: i11iIiiIii
if 33 - 33: i1IIi % OoooooooOO + Oo0Ooo % I1IiiI / ooOoO0o
i11iI1iIi11 = True
i1I = ( lisp_get_eid_hash ( I1Ii111I111I . eid ) != None )
if ( i1I or I1i . require_signature ) :
i1i1iI11i1 = "Required " if I1i . require_signature else ""
i1iiii = green ( i1iiii , False )
I1Ii1i111I = lisp_find_sig_in_rloc_set ( packet , I1Ii111I111I . rloc_count )
if ( I1Ii1i111I == None ) :
i11iI1iIi11 = False
lprint ( ( " {}EID-crypto-hash signature verification {} " + "for EID-prefix {}, no signature found" ) . format ( i1i1iI11i1 ,
# I1IiiI
bold ( "failed" , False ) , i1iiii ) )
else :
i11iI1iIi11 = lisp_verify_cga_sig ( I1Ii111I111I . eid , I1Ii1i111I )
Oo0 = bold ( "passed" if i11iI1iIi11 else "failed" , False )
lprint ( ( " {}EID-crypto-hash signature verification {} " + "for EID-prefix {}" ) . format ( i1i1iI11i1 , Oo0 , i1iiii ) )
if 55 - 55: OoO0O00
if 80 - 80: O0 * OOooOOo + OoooooooOO
if 67 - 67: iII111i * o0oOOo0O0Ooo * i1IIi * OoOoOO00 + i1IIi - OOooOOo
if 5 - 5: OoooooooOO % o0oOOo0O0Ooo
if ( o0O0OO == False or i11iI1iIi11 == False ) :
packet = oO0OoOOO . end_of_rlocs ( packet , I1Ii111I111I . rloc_count )
if ( packet == None ) :
lprint ( " Could not decode RLOC-record in Map-Register packet" )
return
if 40 - 40: oO0o + Oo0Ooo / Oo0Ooo - o0oOOo0O0Ooo
continue
if 55 - 55: I1ii11iIi11i
if 42 - 42: OoooooooOO . iIii1I11I1II1
if 100 - 100: i1IIi
if 41 - 41: IiII / I1ii11iIi11i - i1IIi / II111iiii % OOooOOo
if 22 - 22: OoooooooOO + i1IIi % OoooooooOO
if 15 - 15: o0oOOo0O0Ooo % I1ii11iIi11i / II111iiii
if ( I1iII1I1iIi . merge_register_requested ) :
OoOOoooO0oo = I1i
OoOOoooO0oo . inconsistent_registration = False
if 50 - 50: oO0o * Ii1I % I1Ii111
if 74 - 74: iIii1I11I1II1 - OOooOOo / I1Ii111 / ooOoO0o . oO0o % iIii1I11I1II1
if 91 - 91: o0oOOo0O0Ooo . o0oOOo0O0Ooo - Ii1I
if 60 - 60: i11iIiiIii . Oo0Ooo / iIii1I11I1II1 / II111iiii
if 31 - 31: Oo0Ooo / Oo0Ooo / iIii1I11I1II1 / I11i % OoooooooOO
if ( I1i . group . is_null ( ) ) :
if ( OoOOoooO0oo . site_id != I1iII1I1iIi . site_id ) :
OoOOoooO0oo . site_id = I1iII1I1iIi . site_id
OoOOoooO0oo . registered = False
OoOOoooO0oo . individual_registrations = { }
OoOOoooO0oo . registered_rlocs = [ ]
lisp_registered_count -= 1
if 90 - 90: I1IiiI
if 35 - 35: O0
if 10 - 10: Ii1I - I1Ii111 / Oo0Ooo + O0
Ooo00o000o = I1iII1I1iIi . xtr_id
if ( Ooo00o000o in I1i . individual_registrations ) :
I1i = I1i . individual_registrations [ Ooo00o000o ]
else :
I1i = lisp_site_eid ( II1II )
I1i . eid . copy_address ( OoOOoooO0oo . eid )
I1i . group . copy_address ( OoOOoooO0oo . group )
I1i . encrypt_json = OoOOoooO0oo . encrypt_json
OoOOoooO0oo . individual_registrations [ Ooo00o000o ] = I1i
if 67 - 67: Ii1I % i11iIiiIii . Oo0Ooo
else :
I1i . inconsistent_registration = I1i . merge_register_requested
if 78 - 78: I1IiiI - iIii1I11I1II1
if 20 - 20: i11iIiiIii % I1IiiI % OoOoOO00
if 85 - 85: I11i + OoOoOO00 * O0 * O0
I1i . map_registers_received += 1
if 92 - 92: i11iIiiIii
if 16 - 16: I11i . ooOoO0o - Oo0Ooo / OoO0O00 . i1IIi
if 59 - 59: ooOoO0o - ooOoO0o % I11i + OoO0O00
if 88 - 88: Ii1I - ooOoO0o . Oo0Ooo
if 83 - 83: I11i + Oo0Ooo . I1ii11iIi11i * I1ii11iIi11i
I1iIi = ( I1i . is_rloc_in_rloc_set ( source ) == False )
if ( I1Ii111I111I . record_ttl == 0 and I1iIi ) :
lprint ( " Ignore deregistration request from {}" . format ( red ( source . print_address_no_iid ( ) , False ) ) )
if 80 - 80: i1IIi * I11i - OOooOOo / II111iiii * iIii1I11I1II1
continue
if 42 - 42: OoOoOO00 . I11i % II111iiii
if 19 - 19: OoooooooOO
if 31 - 31: I11i . OoOoOO00 - O0 * iII111i % I1Ii111 - II111iiii
if 21 - 21: OOooOOo . Oo0Ooo - i1IIi
if 56 - 56: I11i
if 24 - 24: I1IiiI . I1IiiI % ooOoO0o
i1iiO000O0oOO = I1i . registered_rlocs
I1i . registered_rlocs = [ ]
if 9 - 9: ooOoO0o / oO0o / o0oOOo0O0Ooo
if 90 - 90: iII111i . o0oOOo0O0Ooo
if 97 - 97: Oo0Ooo . I1ii11iIi11i - I1Ii111 - Ii1I / OOooOOo
if 18 - 18: OoOoOO00 / OoO0O00 % ooOoO0o * Ii1I
O0OIii = packet
for I11ii1IiI1Ii in range ( I1Ii111I111I . rloc_count ) :
oO0OoOOO = lisp_rloc_record ( )
packet = oO0OoOOO . decode ( packet , None , I1i . encrypt_json )
if ( packet == None ) :
lprint ( " Could not decode RLOC-record in Map-Register packet" )
return
if 42 - 42: i1IIi
oO0OoOOO . print_record ( " " )
if 52 - 52: OoO0O00 % iII111i % O0
if 11 - 11: i1IIi / i11iIiiIii + Ii1I % Oo0Ooo % O0
if 50 - 50: oO0o . I1Ii111
if 38 - 38: iIii1I11I1II1 . Ii1I
if ( len ( II1II . allowed_rlocs ) > 0 ) :
O0O0 = oO0OoOOO . rloc . print_address ( )
if ( O0O0 not in II1II . allowed_rlocs ) :
lprint ( ( " Reject registration, RLOC {} not " + "configured in allowed RLOC-set" ) . format ( red ( O0O0 , False ) ) )
if 82 - 82: OOooOOo * Ii1I + I1ii11iIi11i . OoO0O00
if 15 - 15: O0
I1i . registered = False
packet = oO0OoOOO . end_of_rlocs ( packet ,
I1Ii111I111I . rloc_count - I11ii1IiI1Ii - 1 )
break
if 44 - 44: Ii1I . Oo0Ooo . I1Ii111 + oO0o
if 32 - 32: OOooOOo - II111iiii + IiII * iIii1I11I1II1 - Oo0Ooo
if 25 - 25: ooOoO0o
if 33 - 33: Oo0Ooo
if 11 - 11: I11i
if 55 - 55: i11iIiiIii * OoOoOO00 - OoOoOO00 * OoO0O00 / iII111i
I1Ii1i111I = lisp_rloc ( )
I1Ii1i111I . store_rloc_from_record ( oO0OoOOO , None , source )
if 64 - 64: iIii1I11I1II1 . Ii1I * Oo0Ooo - OoO0O00
if 74 - 74: I1IiiI / o0oOOo0O0Ooo
if 53 - 53: iIii1I11I1II1 * oO0o
if 43 - 43: IiII * Oo0Ooo / OOooOOo % oO0o
if 11 - 11: OoOoOO00 * Oo0Ooo / I11i * OOooOOo
if 15 - 15: ooOoO0o - OOooOOo / OoooooooOO
if ( source . is_exact_match ( I1Ii1i111I . rloc ) ) :
I1Ii1i111I . map_notify_requested = I1iII1I1iIi . map_notify_requested
if 41 - 41: OoOoOO00 . iII111i . i1IIi + oO0o
if 60 - 60: oO0o * I1Ii111
if 81 - 81: oO0o - OOooOOo - oO0o
if 54 - 54: oO0o % I11i
if 71 - 71: oO0o / I1ii11iIi11i . Ii1I % II111iiii
I1i . registered_rlocs . append ( I1Ii1i111I )
if 22 - 22: iIii1I11I1II1 - OoooooooOO
if 8 - 8: ooOoO0o % i11iIiiIii
I1Ioo = ( I1i . do_rloc_sets_match ( i1iiO000O0oOO ) == False )
if 78 - 78: OOooOOo
if 49 - 49: Ii1I * II111iiii / Oo0Ooo + iII111i % i11iIiiIii
if 46 - 46: I1ii11iIi11i * I1IiiI - i11iIiiIii % I11i . iII111i + I1Ii111
if 63 - 63: iII111i + ooOoO0o
if 29 - 29: I11i . I11i / i11iIiiIii . OoooooooOO . O0
if 15 - 15: i1IIi % i11iIiiIii
if ( I1iII1I1iIi . map_register_refresh and I1Ioo and
I1i . registered ) :
lprint ( " Reject registration, refreshes cannot change RLOC-set" )
I1i . registered_rlocs = i1iiO000O0oOO
continue
if 18 - 18: Ii1I . OoO0O00 . iII111i * oO0o + O0
if 35 - 35: OoOoOO00 . oO0o / II111iiii
if 97 - 97: Ii1I + I1Ii111 / II111iiii
if 14 - 14: iII111i / IiII / oO0o
if 55 - 55: OoO0O00 % O0
if 92 - 92: OoooooooOO / O0
if ( I1i . registered == False ) :
I1i . first_registered = lisp_get_timestamp ( )
lisp_registered_count += 1
if 14 - 14: i11iIiiIii
I1i . last_registered = lisp_get_timestamp ( )
I1i . registered = ( I1Ii111I111I . record_ttl != 0 )
I1i . last_registerer = source
if 43 - 43: OOooOOo
if 79 - 79: iII111i % Oo0Ooo . i1IIi % ooOoO0o
if 93 - 93: OoOoOO00
if 49 - 49: i1IIi * OOooOOo % I11i * Ii1I . I1Ii111 * iIii1I11I1II1
I1i . auth_sha1_or_sha2 = oOoO0000
I1i . proxy_reply_requested = I1iII1I1iIi . proxy_reply_requested
I1i . lisp_sec_present = I1iII1I1iIi . lisp_sec_present
I1i . map_notify_requested = I1iII1I1iIi . map_notify_requested
I1i . mobile_node_requested = I1iII1I1iIi . mobile_node
I1i . merge_register_requested = I1iII1I1iIi . merge_register_requested
if 72 - 72: ooOoO0o
I1i . use_register_ttl_requested = I1iII1I1iIi . use_ttl_for_timeout
if ( I1i . use_register_ttl_requested ) :
I1i . register_ttl = I1Ii111I111I . store_ttl ( )
else :
I1i . register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
if 63 - 63: Oo0Ooo . OoO0O00 . OoooooooOO / i1IIi
I1i . xtr_id_present = I1iII1I1iIi . xtr_id_present
if ( I1i . xtr_id_present ) :
I1i . xtr_id = I1iII1I1iIi . xtr_id
I1i . site_id = I1iII1I1iIi . site_id
if 53 - 53: OOooOOo * O0 . iII111i
if 3 - 3: OoooooooOO * I1Ii111 * IiII - OOooOOo * I1Ii111
if 78 - 78: iII111i
if 80 - 80: i1IIi * I1IiiI + OOooOOo
if 91 - 91: I1IiiI % OoOoOO00 * Oo0Ooo / I1ii11iIi11i
if ( I1iII1I1iIi . merge_register_requested ) :
if ( OoOOoooO0oo . merge_in_site_eid ( I1i ) ) :
iiiiiI1iI . append ( [ I1Ii111I111I . eid , I1Ii111I111I . group ] )
if 57 - 57: i11iIiiIii / o0oOOo0O0Ooo . II111iiii
if ( I1iII1I1iIi . map_notify_requested ) :
lisp_send_merged_map_notify ( lisp_sockets , OoOOoooO0oo , I1iII1I1iIi ,
I1Ii111I111I )
if 63 - 63: O0
if 64 - 64: i11iIiiIii / oO0o . oO0o - Oo0Ooo
if 48 - 48: i1IIi + I1ii11iIi11i + I1Ii111 - iII111i
if ( I1Ioo == False ) : continue
if ( len ( iiiiiI1iI ) != 0 ) : continue
if 3 - 3: i1IIi + OoooooooOO * ooOoO0o + I1Ii111 % OOooOOo / IiII
Ooi1i1i1II1i . append ( I1i . print_eid_tuple ( ) )
if 70 - 70: oO0o + i1IIi % o0oOOo0O0Ooo - I11i
if 74 - 74: i11iIiiIii
if 93 - 93: I1Ii111 % OOooOOo * I1IiiI % iII111i / iIii1I11I1II1 + OoO0O00
if 6 - 6: I11i
if 70 - 70: ooOoO0o + OoooooooOO % OoOoOO00 % oO0o / Ii1I . I11i
if 63 - 63: I1ii11iIi11i - ooOoO0o . OOooOOo / O0 . iIii1I11I1II1 - Ii1I
if 6 - 6: Ii1I
o0oO0o = copy . deepcopy ( I1Ii111I111I )
I1Ii111I111I = I1Ii111I111I . encode ( )
I1Ii111I111I += O0OIii
OooOoo0oo = [ I1i . print_eid_tuple ( ) ]
lprint ( " Changed RLOC-set, Map-Notifying old RLOC-set" )
if 22 - 22: O0
for I1Ii1i111I in i1iiO000O0oOO :
if ( I1Ii1i111I . map_notify_requested == False ) : continue
if ( I1Ii1i111I . rloc . is_exact_match ( source ) ) : continue
lisp_build_map_notify ( lisp_sockets , I1Ii111I111I , OooOoo0oo , 1 , I1Ii1i111I . rloc ,
LISP_CTRL_PORT , I1iII1I1iIi . nonce , I1iII1I1iIi . key_id ,
I1iII1I1iIi . alg_id , I1iII1I1iIi . auth_len , II1II , False )
if 77 - 77: OOooOOo * I11i / Ii1I
if 16 - 16: Oo0Ooo
if 44 - 44: iIii1I11I1II1 - II111iiii . IiII . i1IIi
if 37 - 37: OoooooooOO + Oo0Ooo - Oo0Ooo + I1ii11iIi11i . I1Ii111 / I1IiiI
if 60 - 60: I1IiiI % Ii1I / I1Ii111 + Ii1I
lisp_notify_subscribers ( lisp_sockets , o0oO0o , O0OIii ,
I1i . eid , II1II )
if 43 - 43: I1ii11iIi11i + I11i
if 83 - 83: II111iiii + o0oOOo0O0Ooo - I1Ii111
if 100 - 100: IiII - OoOoOO00 / I11i
if 33 - 33: I1Ii111 * OoOoOO00 . I1ii11iIi11i % I1Ii111
if 87 - 87: Oo0Ooo
if ( len ( iiiiiI1iI ) != 0 ) :
lisp_queue_multicast_map_notify ( lisp_sockets , iiiiiI1iI )
if 65 - 65: ooOoO0o . I1IiiI
if 51 - 51: IiII
if 43 - 43: oO0o - I11i . i11iIiiIii
if 78 - 78: i11iIiiIii + Oo0Ooo * Ii1I - o0oOOo0O0Ooo % i11iIiiIii
if 30 - 30: I1IiiI % oO0o * OoooooooOO
if 64 - 64: I1IiiI
if ( I1iII1I1iIi . merge_register_requested ) : return
if 11 - 11: I1ii11iIi11i % iII111i / II111iiii % ooOoO0o % IiII
if 14 - 14: ooOoO0o / IiII . o0oOOo0O0Ooo
if 27 - 27: I1IiiI - OOooOOo . II111iiii * I1ii11iIi11i % ooOoO0o / I1IiiI
if 90 - 90: o0oOOo0O0Ooo / I1ii11iIi11i - oO0o - Ii1I - I1IiiI + I1Ii111
if 93 - 93: I1IiiI - I11i . I1IiiI - iIii1I11I1II1
if ( I1iII1I1iIi . map_notify_requested and II1II != None ) :
lisp_build_map_notify ( lisp_sockets , o00O , Ooi1i1i1II1i ,
I1iII1I1iIi . record_count , source , sport , I1iII1I1iIi . nonce ,
I1iII1I1iIi . key_id , I1iII1I1iIi . alg_id , I1iII1I1iIi . auth_len ,
II1II , True )
if 1 - 1: O0 . Ii1I % Ii1I + II111iiii . oO0o
return
if 24 - 24: o0oOOo0O0Ooo . I1Ii111 % O0
if 67 - 67: I1IiiI * Ii1I
if 64 - 64: OOooOOo
if 90 - 90: iII111i . OoOoOO00 + i1IIi % ooOoO0o * I11i + OoooooooOO
if 2 - 2: o0oOOo0O0Ooo . II111iiii
if 9 - 9: I1Ii111 - II111iiii + OoOoOO00 . OoO0O00
if 33 - 33: Oo0Ooo
if 12 - 12: i11iIiiIii . Oo0Ooo / OoOoOO00 + iII111i . Ii1I + ooOoO0o
def lisp_process_unicast_map_notify ( lisp_sockets , packet , source ) :
IIiI = lisp_map_notify ( "" )
packet = IIiI . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Notify packet" )
return
if 66 - 66: IiII
if 41 - 41: II111iiii + Oo0Ooo / iII111i . IiII / iII111i / I1IiiI
IIiI . print_notify ( )
if ( IIiI . record_count == 0 ) : return
if 78 - 78: o0oOOo0O0Ooo % OoOoOO00 . O0
IiIO00OOo = IIiI . eid_records
if 53 - 53: i11iIiiIii + OoooooooOO
for iIi1iIIIiIiI in range ( IIiI . record_count ) :
I1Ii111I111I = lisp_eid_record ( )
IiIO00OOo = I1Ii111I111I . decode ( IiIO00OOo )
if ( packet == None ) : return
I1Ii111I111I . print_record ( " " , False )
i1iiii = I1Ii111I111I . print_eid_tuple ( )
if 23 - 23: i11iIiiIii - IiII - I1ii11iIi11i + I1ii11iIi11i % I1IiiI
if 79 - 79: II111iiii / OoooooooOO
if 35 - 35: i1IIi + IiII + II111iiii % OOooOOo
if 25 - 25: I11i + i11iIiiIii + O0 - Ii1I
if 69 - 69: I11i . OoOoOO00 / OOooOOo / i1IIi . II111iiii
o0ooo0oOO0o = lisp_map_cache_lookup ( I1Ii111I111I . eid , I1Ii111I111I . eid )
if ( o0ooo0oOO0o == None ) :
oO0ooOOO = green ( i1iiii , False )
lprint ( "Ignoring Map-Notify EID {}, no subscribe-request entry" . format ( oO0ooOOO ) )
if 17 - 17: I1Ii111
continue
if 2 - 2: O0 % OoOoOO00 + oO0o
if 24 - 24: iII111i + iII111i - OoooooooOO % OoooooooOO * O0
if 51 - 51: IiII
if 31 - 31: I11i - iIii1I11I1II1 * Ii1I + Ii1I
if 10 - 10: OoOoOO00 - i11iIiiIii % iIii1I11I1II1 / ooOoO0o * i11iIiiIii - Ii1I
if 64 - 64: II111iiii . i11iIiiIii . iII111i . OOooOOo
if 95 - 95: O0 - OoOoOO00
if ( o0ooo0oOO0o . action != LISP_SEND_PUBSUB_ACTION ) :
if ( o0ooo0oOO0o . subscribed_eid == None ) :
oO0ooOOO = green ( i1iiii , False )
lprint ( "Ignoring Map-Notify for non-subscribed EID {}" . format ( oO0ooOOO ) )
if 68 - 68: ooOoO0o . I1Ii111
continue
if 84 - 84: OoooooooOO + oO0o % i1IIi + o0oOOo0O0Ooo * i1IIi
if 51 - 51: oO0o . OoooooooOO + OOooOOo * I1ii11iIi11i - ooOoO0o
if 41 - 41: Oo0Ooo
if 46 - 46: i11iIiiIii + iIii1I11I1II1 . i11iIiiIii . iII111i
if 66 - 66: oO0o % i1IIi % OoooooooOO
if 58 - 58: OOooOOo
if 89 - 89: iIii1I11I1II1 - i1IIi
if 26 - 26: OOooOOo - iII111i * I1ii11iIi11i / iII111i
I11ii = [ ]
if ( o0ooo0oOO0o . action == LISP_SEND_PUBSUB_ACTION ) :
o0ooo0oOO0o = lisp_mapping ( I1Ii111I111I . eid , I1Ii111I111I . group , [ ] )
o0ooo0oOO0o . add_cache ( )
OOOOo0oO0o = copy . deepcopy ( I1Ii111I111I . eid )
iIII11i1ii1I = copy . deepcopy ( I1Ii111I111I . group )
else :
OOOOo0oO0o = o0ooo0oOO0o . subscribed_eid
iIII11i1ii1I = o0ooo0oOO0o . subscribed_group
I11ii = o0ooo0oOO0o . rloc_set
o0ooo0oOO0o . delete_rlocs_from_rloc_probe_list ( )
o0ooo0oOO0o . rloc_set = [ ]
if 31 - 31: II111iiii
if 32 - 32: o0oOOo0O0Ooo % o0oOOo0O0Ooo
if 67 - 67: IiII + oO0o * IiII
if 26 - 26: I1ii11iIi11i + i1IIi . i1IIi - oO0o + I1IiiI * o0oOOo0O0Ooo
if 62 - 62: ooOoO0o + ooOoO0o % I11i
o0ooo0oOO0o . mapping_source = None if source == "lisp-itr" else source
o0ooo0oOO0o . map_cache_ttl = I1Ii111I111I . store_ttl ( )
o0ooo0oOO0o . subscribed_eid = OOOOo0oO0o
o0ooo0oOO0o . subscribed_group = iIII11i1ii1I
if 100 - 100: II111iiii . OoooooooOO
if 32 - 32: I11i % OOooOOo * O0 / iIii1I11I1II1 / i1IIi
if 87 - 87: OoO0O00 . I1ii11iIi11i * I1IiiI
if 83 - 83: OOooOOo
if 86 - 86: I1Ii111 / oO0o
if ( len ( I11ii ) != 0 and I1Ii111I111I . rloc_count == 0 ) :
o0ooo0oOO0o . build_best_rloc_set ( )
lisp_write_ipc_map_cache ( True , o0ooo0oOO0o )
lprint ( "Update {} map-cache entry with no RLOC-set" . format ( green ( i1iiii , False ) ) )
if 67 - 67: OoOoOO00 + Oo0Ooo / i11iIiiIii . I1IiiI
continue
if 53 - 53: Oo0Ooo + IiII * ooOoO0o % OoooooooOO * oO0o . iII111i
if 78 - 78: O0 . Ii1I - I1ii11iIi11i
if 69 - 69: O0 % O0 . oO0o * OoooooooOO
if 13 - 13: i1IIi % oO0o . OoooooooOO + I1ii11iIi11i - OOooOOo
if 99 - 99: OoooooooOO % OOooOOo / I11i
if 77 - 77: II111iiii - IiII % OOooOOo
if 22 - 22: OoooooooOO / oO0o
Iii11Ii = OOo00oOoooOO = 0
for I11ii1IiI1Ii in range ( I1Ii111I111I . rloc_count ) :
oO0OoOOO = lisp_rloc_record ( )
IiIO00OOo = oO0OoOOO . decode ( IiIO00OOo , None )
oO0OoOOO . print_record ( " " )
if 67 - 67: II111iiii / iII111i - I1Ii111 % iIii1I11I1II1 . I1Ii111
if 17 - 17: I1Ii111 % oO0o + O0
if 15 - 15: o0oOOo0O0Ooo - OoooooooOO % ooOoO0o % oO0o / i11iIiiIii / Oo0Ooo
if 59 - 59: iII111i + O0 - I1ii11iIi11i * I1ii11iIi11i + iIii1I11I1II1
ooo0o00o0Oooo = False
for O00o00o00OO0 in I11ii :
if ( O00o00o00OO0 . rloc . is_exact_match ( oO0OoOOO . rloc ) ) :
ooo0o00o0Oooo = True
break
if 41 - 41: iIii1I11I1II1 . O0 - ooOoO0o / OoOoOO00 % iIii1I11I1II1 + IiII
if 23 - 23: OoOoOO00 + ooOoO0o . i11iIiiIii
if ( ooo0o00o0Oooo ) :
I1Ii1i111I = copy . deepcopy ( O00o00o00OO0 )
OOo00oOoooOO += 1
else :
I1Ii1i111I = lisp_rloc ( )
Iii11Ii += 1
if 39 - 39: OoOoOO00 - I1ii11iIi11i / I1Ii111
if 48 - 48: IiII - oO0o + I11i % o0oOOo0O0Ooo
if 81 - 81: Oo0Ooo . I1Ii111 * iIii1I11I1II1
if 60 - 60: OoooooooOO
if 41 - 41: iIii1I11I1II1 + O0 % o0oOOo0O0Ooo - IiII . I11i * O0
I1Ii1i111I . store_rloc_from_record ( oO0OoOOO , None , o0ooo0oOO0o . mapping_source )
o0ooo0oOO0o . rloc_set . append ( I1Ii1i111I )
if 39 - 39: i11iIiiIii . Ii1I
if 68 - 68: OOooOOo * ooOoO0o . I1IiiI - iII111i
lprint ( "Update {} map-cache entry with {}/{} new/replaced RLOCs" . format ( green ( i1iiii , False ) , Iii11Ii , OOo00oOoooOO ) )
if 81 - 81: I11i % Oo0Ooo / iII111i
if 44 - 44: Oo0Ooo
if 90 - 90: Oo0Ooo . ooOoO0o / IiII * I1Ii111 . ooOoO0o + II111iiii
if 43 - 43: iIii1I11I1II1 % OOooOOo + OoOoOO00 + I1ii11iIi11i - Oo0Ooo / Ii1I
if 94 - 94: Ii1I / Oo0Ooo % II111iiii % Oo0Ooo * oO0o
o0ooo0oOO0o . build_best_rloc_set ( )
lisp_write_ipc_map_cache ( True , o0ooo0oOO0o )
if 54 - 54: O0 / ooOoO0o * I1Ii111
if 5 - 5: Ii1I / OoOoOO00 - O0 * OoO0O00
if 13 - 13: IiII + Oo0Ooo - I1Ii111
if 10 - 10: OOooOOo % OoooooooOO / I1IiiI . II111iiii % iII111i
if 47 - 47: o0oOOo0O0Ooo . i11iIiiIii * i1IIi % I11i - ooOoO0o * oO0o
if 95 - 95: oO0o / Ii1I + OoO0O00
I11i1IiIi1II1 = lisp_get_map_server ( source )
if ( I11i1IiIi1II1 == None ) :
lprint ( "Cannot find Map-Server for Map-Notify source address {}" . format ( source . print_address_no_iid ( ) ) )
if 57 - 57: iIii1I11I1II1 + I1Ii111 % oO0o - Ii1I . I1IiiI
return
if 39 - 39: OoO0O00 + II111iiii
lisp_send_map_notify_ack ( lisp_sockets , IiIO00OOo , IIiI , I11i1IiIi1II1 )
if 98 - 98: O0 - I1Ii111 % oO0o - iII111i + Ii1I * i1IIi
if 76 - 76: o0oOOo0O0Ooo
if 55 - 55: OOooOOo + I1ii11iIi11i * Oo0Ooo
if 11 - 11: i1IIi - OoooooooOO * OoOoOO00 / oO0o - OoooooooOO - I1IiiI
if 22 - 22: i11iIiiIii . Ii1I . Oo0Ooo * Oo0Ooo - iII111i / I1ii11iIi11i
if 49 - 49: iII111i + I11i . Oo0Ooo
if 23 - 23: I1IiiI . Ii1I + ooOoO0o . OoooooooOO
if 57 - 57: OOooOOo / OoOoOO00 / i11iIiiIii - I11i - I11i . Ii1I
if 53 - 53: ooOoO0o . iII111i + Ii1I * I1Ii111
if 49 - 49: II111iiii . I1ii11iIi11i * OoOoOO00 - OOooOOo
def lisp_process_multicast_map_notify ( packet , source ) :
IIiI = lisp_map_notify ( "" )
packet = IIiI . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Notify packet" )
return
if 48 - 48: OoO0O00 . iIii1I11I1II1 - OoooooooOO + I1Ii111 / i11iIiiIii . Oo0Ooo
if 61 - 61: II111iiii + OOooOOo . o0oOOo0O0Ooo . iIii1I11I1II1
IIiI . print_notify ( )
if ( IIiI . record_count == 0 ) : return
if 63 - 63: I11i + i11iIiiIii . o0oOOo0O0Ooo . i1IIi + OoOoOO00
IiIO00OOo = IIiI . eid_records
if 1 - 1: i11iIiiIii
for iIi1iIIIiIiI in range ( IIiI . record_count ) :
I1Ii111I111I = lisp_eid_record ( )
IiIO00OOo = I1Ii111I111I . decode ( IiIO00OOo )
if ( packet == None ) : return
I1Ii111I111I . print_record ( " " , False )
if 1 - 1: iIii1I11I1II1
if 73 - 73: iII111i + IiII
if 95 - 95: O0
if 75 - 75: ooOoO0o
o0ooo0oOO0o = lisp_map_cache_lookup ( I1Ii111I111I . eid , I1Ii111I111I . group )
if ( o0ooo0oOO0o == None ) :
IiIiiIiI , iIiiiI1 , II11iiiII1Ii = lisp_allow_gleaning ( I1Ii111I111I . eid , I1Ii111I111I . group ,
None )
if ( IiIiiIiI == False ) : continue
if 47 - 47: iII111i * ooOoO0o . I1IiiI / O0
o0ooo0oOO0o = lisp_mapping ( I1Ii111I111I . eid , I1Ii111I111I . group , [ ] )
o0ooo0oOO0o . add_cache ( )
if 81 - 81: iII111i + I11i - I1ii11iIi11i + iIii1I11I1II1 / ooOoO0o
if 60 - 60: iIii1I11I1II1 - OoO0O00
if 11 - 11: IiII + I1IiiI . Ii1I * I1IiiI - OoooooooOO . II111iiii
if 74 - 74: o0oOOo0O0Ooo . iIii1I11I1II1 * Ii1I / O0 - I1Ii111 % oO0o
if 98 - 98: IiII
if 30 - 30: iIii1I11I1II1 - ooOoO0o / iIii1I11I1II1 / I1IiiI + OoOoOO00 - iIii1I11I1II1
if 69 - 69: i11iIiiIii . O0
if ( o0ooo0oOO0o . gleaned ) :
lprint ( "Ignore Map-Notify for gleaned {}" . format ( green ( o0ooo0oOO0o . print_eid_tuple ( ) , False ) ) )
if 21 - 21: i1IIi . OoO0O00 % I11i + II111iiii % o0oOOo0O0Ooo
continue
if 17 - 17: i11iIiiIii + oO0o * iII111i . II111iiii
if 44 - 44: I1ii11iIi11i
o0ooo0oOO0o . mapping_source = None if source == "lisp-etr" else source
o0ooo0oOO0o . map_cache_ttl = I1Ii111I111I . store_ttl ( )
if 39 - 39: iII111i + Oo0Ooo / oO0o
if 95 - 95: I1Ii111 * oO0o / ooOoO0o . Ii1I . OoOoOO00
if 99 - 99: I1IiiI * II111iiii
if 84 - 84: II111iiii - I1IiiI
if 41 - 41: iIii1I11I1II1 % I1Ii111 % OoOoOO00
if ( len ( o0ooo0oOO0o . rloc_set ) != 0 and I1Ii111I111I . rloc_count == 0 ) :
o0ooo0oOO0o . rloc_set = [ ]
o0ooo0oOO0o . build_best_rloc_set ( )
lisp_write_ipc_map_cache ( True , o0ooo0oOO0o )
lprint ( "Update {} map-cache entry with no RLOC-set" . format ( green ( o0ooo0oOO0o . print_eid_tuple ( ) , False ) ) )
if 35 - 35: I11i + i1IIi
continue
if 85 - 85: Ii1I * Ii1I . OoOoOO00 / Oo0Ooo
if 97 - 97: oO0o % iIii1I11I1II1
OoOoOOo00oo = o0ooo0oOO0o . rtrs_in_rloc_set ( )
if 31 - 31: IiII / II111iiii * IiII
if 21 - 21: ooOoO0o / i1IIi . I1IiiI % I1IiiI / iII111i + ooOoO0o
if 78 - 78: Ii1I * Ii1I % I1IiiI % I1Ii111
if 15 - 15: Ii1I
if 23 - 23: iIii1I11I1II1 - oO0o / O0 - I1Ii111 - OOooOOo
for I11ii1IiI1Ii in range ( I1Ii111I111I . rloc_count ) :
oO0OoOOO = lisp_rloc_record ( )
IiIO00OOo = oO0OoOOO . decode ( IiIO00OOo , None )
oO0OoOOO . print_record ( " " )
if ( I1Ii111I111I . group . is_null ( ) ) : continue
if ( oO0OoOOO . rle == None ) : continue
if 49 - 49: I1Ii111
if 88 - 88: O0
if 75 - 75: iII111i - Oo0Ooo / OoooooooOO - O0
if 36 - 36: OoO0O00 % Ii1I . Oo0Ooo
if 90 - 90: i11iIiiIii - iII111i * oO0o
oOo0OooOoooO = o0ooo0oOO0o . rloc_set [ 0 ] . stats if len ( o0ooo0oOO0o . rloc_set ) != 0 else None
if 23 - 23: I1IiiI % iIii1I11I1II1 - oO0o - iII111i - o0oOOo0O0Ooo
if 39 - 39: Oo0Ooo . OoO0O00
if 74 - 74: I1IiiI . O0 . IiII + IiII - IiII
if 100 - 100: ooOoO0o / OoooooooOO
I1Ii1i111I = lisp_rloc ( )
I1Ii1i111I . store_rloc_from_record ( oO0OoOOO , None , o0ooo0oOO0o . mapping_source )
if ( oOo0OooOoooO != None ) : I1Ii1i111I . stats = copy . deepcopy ( oOo0OooOoooO )
if 73 - 73: i11iIiiIii - Oo0Ooo
if ( OoOoOOo00oo and I1Ii1i111I . is_rtr ( ) == False ) : continue
if 100 - 100: iIii1I11I1II1 + I1Ii111
o0ooo0oOO0o . rloc_set = [ I1Ii1i111I ]
o0ooo0oOO0o . build_best_rloc_set ( )
lisp_write_ipc_map_cache ( True , o0ooo0oOO0o )
if 51 - 51: o0oOOo0O0Ooo * I11i
lprint ( "Update {} map-cache entry with RLE {}" . format ( green ( o0ooo0oOO0o . print_eid_tuple ( ) , False ) ,
# I11i / I11i % iII111i
I1Ii1i111I . rle . print_rle ( False , True ) ) )
if 91 - 91: II111iiii + i1IIi + II111iiii % iIii1I11I1II1 * I1ii11iIi11i - iII111i
if 17 - 17: OoOoOO00 % I1ii11iIi11i
return
if 95 - 95: Ii1I / OOooOOo + OOooOOo . II111iiii
if 13 - 13: I1IiiI
if 60 - 60: iII111i . o0oOOo0O0Ooo + iII111i
if 38 - 38: i11iIiiIii * I11i + Oo0Ooo - iIii1I11I1II1
if 75 - 75: i1IIi * iII111i - I11i * i11iIiiIii
if 75 - 75: I1IiiI . OoooooooOO + OOooOOo + IiII
if 37 - 37: iII111i + i1IIi % Oo0Ooo / o0oOOo0O0Ooo / iII111i
if 81 - 81: ooOoO0o
def lisp_process_map_notify ( lisp_sockets , orig_packet , source ) :
IIiI = lisp_map_notify ( "" )
Oo00oo = IIiI . decode ( orig_packet )
if ( Oo00oo == None ) :
lprint ( "Could not decode Map-Notify packet" )
return
if 74 - 74: OoO0O00
if 13 - 13: I1ii11iIi11i / OoO0O00
IIiI . print_notify ( )
if 90 - 90: iIii1I11I1II1 - OoO0O00 . i1IIi / o0oOOo0O0Ooo + O0
if 94 - 94: IiII * i1IIi
if 90 - 90: O0 % I1IiiI . o0oOOo0O0Ooo % ooOoO0o % I1IiiI
if 16 - 16: OoO0O00 / OOooOOo / iIii1I11I1II1 / OoooooooOO . oO0o - I1Ii111
if 43 - 43: OoOoOO00 % OOooOOo / I1IiiI + I1IiiI
I111 = source . print_address ( )
if ( IIiI . alg_id != 0 or IIiI . auth_len != 0 ) :
I11i1IiIi1II1 = None
for Ooo00o000o in lisp_map_servers_list :
if ( Ooo00o000o . find ( I111 ) == - 1 ) : continue
I11i1IiIi1II1 = lisp_map_servers_list [ Ooo00o000o ]
if 40 - 40: OOooOOo . I1Ii111 + I1Ii111
if ( I11i1IiIi1II1 == None ) :
lprint ( ( " Could not find Map-Server {} to authenticate " + "Map-Notify" ) . format ( I111 ) )
if 4 - 4: iIii1I11I1II1 - iIii1I11I1II1 * I11i
return
if 32 - 32: I1IiiI + II111iiii * iII111i + O0 / O0 * Oo0Ooo
if 64 - 64: i11iIiiIii / iII111i + i11iIiiIii . I11i
I11i1IiIi1II1 . map_notifies_received += 1
if 66 - 66: i1IIi
o0O0OO = lisp_verify_auth ( Oo00oo , IIiI . alg_id ,
IIiI . auth_data , I11i1IiIi1II1 . password )
if 98 - 98: Oo0Ooo / iIii1I11I1II1
lprint ( " Authentication {} for Map-Notify" . format ( "succeeded" if o0O0OO else "failed" ) )
if 33 - 33: O0 - iII111i
if ( o0O0OO == False ) : return
else :
I11i1IiIi1II1 = lisp_ms ( I111 , None , "" , 0 , "" , False , False , False , False , 0 , 0 , 0 ,
None )
if 40 - 40: iII111i * I11i
if 25 - 25: O0 * o0oOOo0O0Ooo % ooOoO0o % I1IiiI
if 87 - 87: OoOoOO00
if 30 - 30: IiII % OoOoOO00 + I1Ii111
if 13 - 13: iII111i * Ii1I % o0oOOo0O0Ooo * i1IIi . IiII % i1IIi
if 79 - 79: OoooooooOO % I11i / o0oOOo0O0Ooo + IiII + O0 + iII111i
IiIO00OOo = IIiI . eid_records
if ( IIiI . record_count == 0 ) :
lisp_send_map_notify_ack ( lisp_sockets , IiIO00OOo , IIiI , I11i1IiIi1II1 )
return
if 87 - 87: I11i
if 39 - 39: I1ii11iIi11i * i11iIiiIii % I1Ii111
if 72 - 72: OoO0O00 * Oo0Ooo - IiII
if 74 - 74: Ii1I
if 26 - 26: I11i . O0
if 68 - 68: Ii1I
if 26 - 26: o0oOOo0O0Ooo - I1ii11iIi11i / O0 % i11iIiiIii
if 7 - 7: I1Ii111 . Oo0Ooo + IiII / iIii1I11I1II1
I1Ii111I111I = lisp_eid_record ( )
Oo00oo = I1Ii111I111I . decode ( IiIO00OOo )
if ( Oo00oo == None ) : return
if 22 - 22: iIii1I11I1II1 - O0 . iII111i - IiII - ooOoO0o
I1Ii111I111I . print_record ( " " , False )
if 54 - 54: OoO0O00 . iII111i . OoOoOO00 * OoO0O00 + o0oOOo0O0Ooo . ooOoO0o
for I11ii1IiI1Ii in range ( I1Ii111I111I . rloc_count ) :
oO0OoOOO = lisp_rloc_record ( )
Oo00oo = oO0OoOOO . decode ( Oo00oo , None )
if ( Oo00oo == None ) :
lprint ( " Could not decode RLOC-record in Map-Notify packet" )
return
if 44 - 44: I11i * iIii1I11I1II1 . I1ii11iIi11i
oO0OoOOO . print_record ( " " )
if 9 - 9: o0oOOo0O0Ooo
if 23 - 23: ooOoO0o * OoO0O00 + O0 % I1Ii111
if 21 - 21: Ii1I * OoOoOO00
if 29 - 29: iIii1I11I1II1 / ooOoO0o
if 75 - 75: OoooooooOO + I1IiiI % OoOoOO00 / O0 - IiII
if ( I1Ii111I111I . group . is_null ( ) == False ) :
if 88 - 88: OoO0O00 % Ii1I
if 12 - 12: OoooooooOO . O0
if 33 - 33: OoooooooOO / I11i . II111iiii * i1IIi
if 34 - 34: i11iIiiIii / OoOoOO00
if 100 - 100: o0oOOo0O0Ooo - I1IiiI / I11i
lprint ( "Send {} Map-Notify IPC message to ITR process" . format ( green ( I1Ii111I111I . print_eid_tuple ( ) , False ) ) )
if 43 - 43: o0oOOo0O0Ooo % iIii1I11I1II1
if 85 - 85: oO0o + OoooooooOO - IiII % o0oOOo0O0Ooo * ooOoO0o * II111iiii
oOoo = lisp_control_packet_ipc ( orig_packet , I111 , "lisp-itr" , 0 )
lisp_ipc ( oOoo , lisp_sockets [ 2 ] , "lisp-core-pkt" )
if 4 - 4: Ii1I . i1IIi + Oo0Ooo % I11i . OoO0O00
if 70 - 70: OOooOOo * OoOoOO00 / OoOoOO00 / OoOoOO00
if 23 - 23: I1IiiI
if 24 - 24: I1Ii111 * i1IIi % O0 * Ii1I + iII111i
if 14 - 14: oO0o * iII111i + Ii1I + Ii1I * IiII
lisp_send_map_notify_ack ( lisp_sockets , IiIO00OOo , IIiI , I11i1IiIi1II1 )
return
if 82 - 82: IiII * ooOoO0o / OOooOOo + OoOoOO00
if 32 - 32: IiII
if 90 - 90: I1ii11iIi11i / I11i * o0oOOo0O0Ooo % O0 * i11iIiiIii
if 68 - 68: I11i . Ii1I + I11i / IiII . I11i / iIii1I11I1II1
if 96 - 96: O0
if 2 - 2: OoO0O00 / iII111i + o0oOOo0O0Ooo
if 27 - 27: I11i - OoOoOO00 - ooOoO0o - I1IiiI
if 51 - 51: I11i + I11i + O0 + O0 * I1Ii111
def lisp_process_map_notify_ack ( packet , source ) :
IIiI = lisp_map_notify ( "" )
packet = IIiI . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Notify-Ack packet" )
return
if 61 - 61: IiII . O0
if 38 - 38: Ii1I * I1ii11iIi11i - i11iIiiIii + ooOoO0o * I11i
IIiI . print_notify ( )
if 74 - 74: OoOoOO00 . o0oOOo0O0Ooo
if 40 - 40: ooOoO0o + I1ii11iIi11i * i11iIiiIii / i1IIi
if 95 - 95: oO0o / IiII * II111iiii * Ii1I . OoO0O00 . OoO0O00
if 85 - 85: I1IiiI / II111iiii * OoO0O00 + ooOoO0o / OoO0O00 % OOooOOo
if 100 - 100: I1Ii111 % OoooooooOO % OoOoOO00 % I1IiiI
if ( IIiI . record_count < 1 ) :
lprint ( "No EID-prefix found, cannot authenticate Map-Notify-Ack" )
return
if 32 - 32: OoO0O00 + OOooOOo . OoO0O00 - Oo0Ooo
if 12 - 12: I1IiiI * OoO0O00 - II111iiii . i1IIi
I1Ii111I111I = lisp_eid_record ( )
if 86 - 86: OOooOOo / OoooooooOO - IiII
if ( I1Ii111I111I . decode ( IIiI . eid_records ) == None ) :
lprint ( "Could not decode EID-record, cannot authenticate " +
"Map-Notify-Ack" )
return
if 56 - 56: I1ii11iIi11i - i1IIi * OoooooooOO * O0 * I1IiiI - I1Ii111
I1Ii111I111I . print_record ( " " , False )
if 32 - 32: OoooooooOO . OOooOOo . OoO0O00 . IiII / I11i % i1IIi
i1iiii = I1Ii111I111I . print_eid_tuple ( )
if 21 - 21: O0 . OoO0O00 * I1ii11iIi11i % iII111i + OoooooooOO
if 8 - 8: oO0o * iII111i * I11i
if 30 - 30: I1Ii111
if 61 - 61: iII111i
if ( IIiI . alg_id != LISP_NONE_ALG_ID and IIiI . auth_len != 0 ) :
I1i = lisp_sites_by_eid . lookup_cache ( I1Ii111I111I . eid , True )
if ( I1i == None ) :
O00o = bold ( "Site not found" , False )
lprint ( ( "{} for EID {}, cannot authenticate Map-Notify-Ack" ) . format ( O00o , green ( i1iiii , False ) ) )
if 50 - 50: Ii1I / I1IiiI . O0
return
if 49 - 49: I1Ii111 . OoO0O00 % O0
II1II = I1i . site
if 15 - 15: I11i - Oo0Ooo / I1Ii111 . ooOoO0o % I1IiiI
if 62 - 62: II111iiii + ooOoO0o + I1IiiI
if 70 - 70: o0oOOo0O0Ooo + Ii1I . OoO0O00 * Ii1I + OOooOOo + ooOoO0o
if 13 - 13: I1ii11iIi11i
II1II . map_notify_acks_received += 1
if 97 - 97: oO0o - Oo0Ooo . i11iIiiIii % ooOoO0o * i11iIiiIii - OoooooooOO
i11iII1 = IIiI . key_id
if ( i11iII1 in II1II . auth_key ) :
Ii1iIiiII111 = II1II . auth_key [ i11iII1 ]
else :
Ii1iIiiII111 = ""
if 44 - 44: I11i % OoooooooOO / iII111i - i11iIiiIii * i1IIi * o0oOOo0O0Ooo
if 51 - 51: Ii1I + IiII / I1ii11iIi11i + O0 % Ii1I
o0O0OO = lisp_verify_auth ( packet , IIiI . alg_id ,
IIiI . auth_data , Ii1iIiiII111 )
if 55 - 55: iII111i % o0oOOo0O0Ooo - oO0o % OoooooooOO
i11iII1 = "key-id {}" . format ( i11iII1 ) if i11iII1 == IIiI . key_id else "bad key-id {}" . format ( IIiI . key_id )
if 18 - 18: OoooooooOO - I1ii11iIi11i
if 94 - 94: OOooOOo . Oo0Ooo + Ii1I * o0oOOo0O0Ooo
lprint ( " Authentication {} for Map-Notify-Ack, {}" . format ( "succeeded" if o0O0OO else "failed" , i11iII1 ) )
if 79 - 79: OOooOOo + Oo0Ooo
if ( o0O0OO == False ) : return
if 33 - 33: iIii1I11I1II1
if 75 - 75: I1Ii111 / iIii1I11I1II1 . OoooooooOO
if 98 - 98: iIii1I11I1II1 / I1IiiI + i1IIi
if 80 - 80: II111iiii . Oo0Ooo * oO0o % II111iiii / I1ii11iIi11i
if 66 - 66: iII111i / OoO0O00 / i11iIiiIii
if ( IIiI . retransmit_timer ) : IIiI . retransmit_timer . cancel ( )
if 99 - 99: OOooOOo
o0ooo000OO = source . print_address ( )
Ooo00o000o = IIiI . nonce_key
if 51 - 51: i11iIiiIii . o0oOOo0O0Ooo / iII111i
if ( Ooo00o000o in lisp_map_notify_queue ) :
IIiI = lisp_map_notify_queue . pop ( Ooo00o000o )
if ( IIiI . retransmit_timer ) : IIiI . retransmit_timer . cancel ( )
lprint ( "Dequeue Map-Notify from retransmit queue, key is: {}" . format ( Ooo00o000o ) )
if 53 - 53: oO0o / i1IIi - Oo0Ooo - i1IIi + IiII
else :
lprint ( "Map-Notify with nonce 0x{} queue entry not found for {}" . format ( IIiI . nonce_key , red ( o0ooo000OO , False ) ) )
if 79 - 79: oO0o % o0oOOo0O0Ooo / o0oOOo0O0Ooo % iII111i
if 56 - 56: Oo0Ooo % I1ii11iIi11i
return
if 53 - 53: OoO0O00 . I11i - ooOoO0o
if 11 - 11: I11i + i11iIiiIii / oO0o % oO0o * o0oOOo0O0Ooo / OoOoOO00
if 74 - 74: oO0o . I1Ii111 . II111iiii
if 92 - 92: I1Ii111 % OoooooooOO * I1Ii111
if 78 - 78: Oo0Ooo . I11i . oO0o + O0 / O0
if 41 - 41: iII111i * OoO0O00 - OoO0O00
if 72 - 72: o0oOOo0O0Ooo + oO0o . I1ii11iIi11i + OoO0O00 / I1Ii111
if 58 - 58: Oo0Ooo / II111iiii % OoooooooOO % II111iiii
def lisp_map_referral_loop ( mr , eid , group , action , s ) :
if ( action not in ( LISP_DDT_ACTION_NODE_REFERRAL ,
LISP_DDT_ACTION_MS_REFERRAL ) ) : return ( False )
if 39 - 39: i1IIi
if ( mr . last_cached_prefix [ 0 ] == None ) : return ( False )
if 16 - 16: OoOoOO00 % iIii1I11I1II1 + Ii1I - o0oOOo0O0Ooo . Oo0Ooo + i1IIi
if 59 - 59: i1IIi
if 37 - 37: OoO0O00 / I1ii11iIi11i / OoOoOO00
if 15 - 15: I1IiiI % iIii1I11I1II1 . I1Ii111
oOooo0Oo0o = False
if ( group . is_null ( ) == False ) :
oOooo0Oo0o = mr . last_cached_prefix [ 1 ] . is_more_specific ( group )
if 71 - 71: I11i - Ii1I + i11iIiiIii % I1ii11iIi11i - OoO0O00 - OOooOOo
if ( oOooo0Oo0o == False ) :
oOooo0Oo0o = mr . last_cached_prefix [ 0 ] . is_more_specific ( eid )
if 71 - 71: OOooOOo
if 27 - 27: OOooOOo * O0 * i11iIiiIii / OoOoOO00 - i1IIi
if ( oOooo0Oo0o ) :
oo00oOOO00 = lisp_print_eid_tuple ( eid , group )
o00o0 = lisp_print_eid_tuple ( mr . last_cached_prefix [ 0 ] ,
mr . last_cached_prefix [ 1 ] )
if 85 - 85: I11i + I11i + oO0o - OoOoOO00
lprint ( ( "Map-Referral prefix {} from {} is not more-specific " + "than cached prefix {}" ) . format ( green ( oo00oOOO00 , False ) , s ,
# iIii1I11I1II1
o00o0 ) )
if 43 - 43: OOooOOo + I1IiiI % I1Ii111 / OoOoOO00 . Ii1I . I11i
return ( oOooo0Oo0o )
if 86 - 86: IiII + OoOoOO00 * IiII
if 44 - 44: OOooOOo * iIii1I11I1II1 * IiII + Oo0Ooo
if 60 - 60: I1Ii111
if 52 - 52: ooOoO0o . I1IiiI . i11iIiiIii . Ii1I - O0 - I1IiiI
if 53 - 53: i1IIi * OOooOOo - IiII * Oo0Ooo / OoooooooOO + OoooooooOO
if 10 - 10: oO0o - O0 / Ii1I - OOooOOo - I1Ii111
if 41 - 41: O0 / I1IiiI - I1ii11iIi11i - i11iIiiIii
def lisp_process_map_referral ( lisp_sockets , packet , source ) :
if 2 - 2: OoO0O00 % O0 + iII111i * I1Ii111 / OOooOOo
iiI11111i = lisp_map_referral ( )
packet = iiI11111i . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Referral packet" )
return
if 7 - 7: IiII
iiI11111i . print_map_referral ( )
if 30 - 30: iIii1I11I1II1 - OoooooooOO + Oo0Ooo . i1IIi % o0oOOo0O0Ooo
I111 = source . print_address ( )
oOooo0oOOOO = iiI11111i . nonce
if 7 - 7: IiII - iII111i
if 59 - 59: Oo0Ooo * ooOoO0o - Ii1I / II111iiii / Oo0Ooo
if 8 - 8: IiII / OoooooooOO - iIii1I11I1II1
if 10 - 10: I11i . I11i - OoO0O00 - II111iiii
for iIi1iIIIiIiI in range ( iiI11111i . record_count ) :
I1Ii111I111I = lisp_eid_record ( )
packet = I1Ii111I111I . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode EID-record in Map-Referral packet" )
return
if 94 - 94: ooOoO0o
I1Ii111I111I . print_record ( " " , True )
if 28 - 28: IiII
if 55 - 55: ooOoO0o + oO0o + OoOoOO00 / O0 * II111iiii * OoOoOO00
if 53 - 53: Oo0Ooo
if 16 - 16: Ii1I
Ooo00o000o = str ( oOooo0oOOOO )
if ( Ooo00o000o not in lisp_ddt_map_requestQ ) :
lprint ( ( "Map-Referral nonce 0x{} from {} not found in " + "Map-Request queue, EID-record ignored" ) . format ( lisp_hex_string ( oOooo0oOOOO ) , I111 ) )
if 73 - 73: i11iIiiIii + I1IiiI - IiII - IiII + IiII . Ii1I
if 78 - 78: OoO0O00 + oO0o
continue
if 86 - 86: ooOoO0o . ooOoO0o + oO0o
OOoOo0O0O0oO = lisp_ddt_map_requestQ [ Ooo00o000o ]
if ( OOoOo0O0O0oO == None ) :
lprint ( ( "No Map-Request queue entry found for Map-Referral " +
"nonce 0x{} from {}, EID-record ignored" ) . format ( lisp_hex_string ( oOooo0oOOOO ) , I111 ) )
if 84 - 84: OOooOOo - OoOoOO00 + i1IIi * I1ii11iIi11i % I1ii11iIi11i * I1Ii111
continue
if 31 - 31: IiII + iII111i
if 5 - 5: O0 * Ii1I
if 78 - 78: iII111i * iIii1I11I1II1 . OoO0O00 . OoOoOO00 % I1Ii111
if 77 - 77: OOooOOo / OoooooooOO
if 11 - 11: iIii1I11I1II1 - Ii1I - OoOoOO00 . oO0o / I1ii11iIi11i
if 79 - 79: i11iIiiIii % o0oOOo0O0Ooo * II111iiii . i1IIi * Ii1I - i11iIiiIii
if ( lisp_map_referral_loop ( OOoOo0O0O0oO , I1Ii111I111I . eid , I1Ii111I111I . group ,
I1Ii111I111I . action , I111 ) ) :
OOoOo0O0O0oO . dequeue_map_request ( )
continue
if 31 - 31: IiII / o0oOOo0O0Ooo
if 27 - 27: Oo0Ooo
OOoOo0O0O0oO . last_cached_prefix [ 0 ] = I1Ii111I111I . eid
OOoOo0O0O0oO . last_cached_prefix [ 1 ] = I1Ii111I111I . group
if 32 - 32: Oo0Ooo * i11iIiiIii % I1IiiI - i11iIiiIii - I1Ii111 % I1ii11iIi11i
if 35 - 35: o0oOOo0O0Ooo % iII111i / O0 * I1IiiI . o0oOOo0O0Ooo / OOooOOo
if 81 - 81: I1ii11iIi11i - i11iIiiIii
if 49 - 49: iII111i * I11i - II111iiii . o0oOOo0O0Ooo
OOOOoO0o0oo = False
oo = lisp_referral_cache_lookup ( I1Ii111I111I . eid , I1Ii111I111I . group ,
True )
if ( oo == None ) :
OOOOoO0o0oo = True
oo = lisp_referral ( )
oo . eid = I1Ii111I111I . eid
oo . group = I1Ii111I111I . group
if ( I1Ii111I111I . ddt_incomplete == False ) : oo . add_cache ( )
elif ( oo . referral_source . not_set ( ) ) :
lprint ( "Do not replace static referral entry {}" . format ( green ( oo . print_eid_tuple ( ) , False ) ) )
if 52 - 52: Ii1I + Ii1I - II111iiii . O0 + I1ii11iIi11i
OOoOo0O0O0oO . dequeue_map_request ( )
continue
if 60 - 60: i11iIiiIii + IiII
if 41 - 41: I1Ii111 * o0oOOo0O0Ooo + Oo0Ooo
oo0oOooo0O = I1Ii111I111I . action
oo . referral_source = source
oo . referral_type = oo0oOooo0O
IiIi1iIIiII1i = I1Ii111I111I . store_ttl ( )
oo . referral_ttl = IiIi1iIIiII1i
oo . expires = lisp_set_timestamp ( IiIi1iIIiII1i )
if 86 - 86: Ii1I / oO0o
if 40 - 40: OoO0O00 % oO0o + Oo0Ooo
if 60 - 60: II111iiii / Ii1I
if 14 - 14: iII111i - Oo0Ooo / o0oOOo0O0Ooo * oO0o / Oo0Ooo - I1IiiI
OoO0OOoo = oo . is_referral_negative ( )
if ( I111 in oo . referral_set ) :
OoooOO0 = oo . referral_set [ I111 ]
if 68 - 68: iII111i + I1Ii111
if ( OoooOO0 . updown == False and OoO0OOoo == False ) :
OoooOO0 . updown = True
lprint ( "Change up/down status for referral-node {} to up" . format ( I111 ) )
if 90 - 90: o0oOOo0O0Ooo
elif ( OoooOO0 . updown == True and OoO0OOoo == True ) :
OoooOO0 . updown = False
lprint ( ( "Change up/down status for referral-node {} " + "to down, received negative referral" ) . format ( I111 ) )
if 48 - 48: iII111i + Ii1I
if 45 - 45: oO0o / iIii1I11I1II1 % O0 % IiII % I1ii11iIi11i
if 89 - 89: OOooOOo - I1Ii111 - iII111i
if 67 - 67: oO0o
if 76 - 76: I1IiiI % I1IiiI - IiII / OoOoOO00 / I1ii11iIi11i
if 42 - 42: I1IiiI + I1ii11iIi11i + Oo0Ooo * i1IIi - II111iiii
if 15 - 15: o0oOOo0O0Ooo
if 60 - 60: I1ii11iIi11i / I1Ii111
iIIiiiI11i11i = { }
for Ooo00o000o in oo . referral_set : iIIiiiI11i11i [ Ooo00o000o ] = None
if 92 - 92: ooOoO0o / OoOoOO00
if 97 - 97: iII111i . O0 + OOooOOo + I1Ii111 + Oo0Ooo * OoOoOO00
if 19 - 19: II111iiii * O0 % II111iiii
if 63 - 63: i11iIiiIii . Oo0Ooo . OOooOOo - II111iiii
for iIi1iIIIiIiI in range ( I1Ii111I111I . rloc_count ) :
oO0OoOOO = lisp_rloc_record ( )
packet = oO0OoOOO . decode ( packet , None )
if ( packet == None ) :
lprint ( "Could not decode RLOC-record in Map-Referral packet" )
return
if 35 - 35: II111iiii + IiII
oO0OoOOO . print_record ( " " )
if 66 - 66: o0oOOo0O0Ooo % IiII
if 39 - 39: IiII
if 18 - 18: iII111i % o0oOOo0O0Ooo - i1IIi
if 53 - 53: o0oOOo0O0Ooo + IiII - ooOoO0o % i11iIiiIii - i11iIiiIii - I1Ii111
O0O0 = oO0OoOOO . rloc . print_address ( )
if ( O0O0 not in oo . referral_set ) :
OoooOO0 = lisp_referral_node ( )
OoooOO0 . referral_address . copy_address ( oO0OoOOO . rloc )
oo . referral_set [ O0O0 ] = OoooOO0
if ( I111 == O0O0 and OoO0OOoo ) : OoooOO0 . updown = False
else :
OoooOO0 = oo . referral_set [ O0O0 ]
if ( O0O0 in iIIiiiI11i11i ) : iIIiiiI11i11i . pop ( O0O0 )
if 79 - 79: II111iiii + i11iIiiIii . OOooOOo . I11i / iIii1I11I1II1
OoooOO0 . priority = oO0OoOOO . priority
OoooOO0 . weight = oO0OoOOO . weight
if 62 - 62: O0
if 52 - 52: OoooooooOO . oO0o
if 38 - 38: ooOoO0o . i1IIi / iII111i + I1IiiI - II111iiii
if 21 - 21: i11iIiiIii + II111iiii - i1IIi / OoooooooOO * OOooOOo % Oo0Ooo
if 59 - 59: Ii1I
for Ooo00o000o in iIIiiiI11i11i : oo . referral_set . pop ( Ooo00o000o )
if 77 - 77: I1ii11iIi11i * Ii1I * O0 * I1IiiI % OoO0O00 - iIii1I11I1II1
i1iiii = oo . print_eid_tuple ( )
if 6 - 6: i11iIiiIii . I11i - OoooooooOO
if ( OOOOoO0o0oo ) :
if ( I1Ii111I111I . ddt_incomplete ) :
lprint ( "Suppress add {} to referral-cache" . format ( green ( i1iiii , False ) ) )
if 26 - 26: I1IiiI
else :
lprint ( "Add {}, referral-count {} to referral-cache" . format ( green ( i1iiii , False ) , I1Ii111I111I . rloc_count ) )
if 26 - 26: IiII . Ii1I / IiII - OoO0O00 % OoO0O00
if 72 - 72: OoooooooOO * II111iiii + OoO0O00 % iIii1I11I1II1 . I1ii11iIi11i % OoooooooOO
else :
lprint ( "Replace {}, referral-count: {} in referral-cache" . format ( green ( i1iiii , False ) , I1Ii111I111I . rloc_count ) )
if 19 - 19: OoOoOO00 + I1Ii111
if 19 - 19: I1ii11iIi11i / I1Ii111 + OoooooooOO - O0
if 49 - 49: I1ii11iIi11i / OoOoOO00 - I1IiiI + iII111i . OOooOOo % oO0o
if 34 - 34: OoO0O00 - I1IiiI + OoOoOO00
if 22 - 22: iIii1I11I1II1 . i1IIi . OOooOOo % Oo0Ooo - i1IIi
if 78 - 78: I1IiiI / i1IIi % II111iiii % I1IiiI % Ii1I
if ( oo0oOooo0O == LISP_DDT_ACTION_DELEGATION_HOLE ) :
lisp_send_negative_map_reply ( OOoOo0O0O0oO . lisp_sockets , oo . eid ,
oo . group , OOoOo0O0O0oO . nonce , OOoOo0O0O0oO . itr , OOoOo0O0O0oO . sport , 15 , None , False )
OOoOo0O0O0oO . dequeue_map_request ( )
if 29 - 29: i1IIi % o0oOOo0O0Ooo + OOooOOo / Oo0Ooo
if 38 - 38: IiII . I1Ii111
if ( oo0oOooo0O == LISP_DDT_ACTION_NOT_AUTH ) :
if ( OOoOo0O0O0oO . tried_root ) :
lisp_send_negative_map_reply ( OOoOo0O0O0oO . lisp_sockets , oo . eid ,
oo . group , OOoOo0O0O0oO . nonce , OOoOo0O0O0oO . itr , OOoOo0O0O0oO . sport , 0 , None , False )
OOoOo0O0O0oO . dequeue_map_request ( )
else :
lisp_send_ddt_map_request ( OOoOo0O0O0oO , True )
if 69 - 69: ooOoO0o + OoOoOO00 + II111iiii % I1Ii111 + Ii1I . ooOoO0o
if 73 - 73: I11i % I11i . ooOoO0o + OoOoOO00
if 33 - 33: i11iIiiIii . i11iIiiIii * i11iIiiIii / iIii1I11I1II1 / I1ii11iIi11i . ooOoO0o
if ( oo0oOooo0O == LISP_DDT_ACTION_MS_NOT_REG ) :
if ( I111 in oo . referral_set ) :
OoooOO0 = oo . referral_set [ I111 ]
OoooOO0 . updown = False
if 11 - 11: iII111i
if ( len ( oo . referral_set ) == 0 ) :
OOoOo0O0O0oO . dequeue_map_request ( )
else :
lisp_send_ddt_map_request ( OOoOo0O0O0oO , False )
if 60 - 60: I1ii11iIi11i / I1Ii111
if 10 - 10: OoO0O00 * iIii1I11I1II1 / I11i % II111iiii . OoOoOO00 / I1IiiI
if 4 - 4: Oo0Ooo * o0oOOo0O0Ooo
if ( oo0oOooo0O in ( LISP_DDT_ACTION_NODE_REFERRAL ,
LISP_DDT_ACTION_MS_REFERRAL ) ) :
if ( OOoOo0O0O0oO . eid . is_exact_match ( I1Ii111I111I . eid ) ) :
if ( not OOoOo0O0O0oO . tried_root ) :
lisp_send_ddt_map_request ( OOoOo0O0O0oO , True )
else :
lisp_send_negative_map_reply ( OOoOo0O0O0oO . lisp_sockets ,
oo . eid , oo . group , OOoOo0O0O0oO . nonce , OOoOo0O0O0oO . itr ,
OOoOo0O0O0oO . sport , 15 , None , False )
OOoOo0O0O0oO . dequeue_map_request ( )
if 45 - 45: Ii1I % OOooOOo * Ii1I - iIii1I11I1II1
else :
lisp_send_ddt_map_request ( OOoOo0O0O0oO , False )
if 18 - 18: I1Ii111 / Oo0Ooo % Ii1I + OoO0O00
if 69 - 69: iII111i % I1ii11iIi11i
if 19 - 19: IiII
if ( oo0oOooo0O == LISP_DDT_ACTION_MS_ACK ) : OOoOo0O0O0oO . dequeue_map_request ( )
if 35 - 35: OoOoOO00
return
if 18 - 18: II111iiii . OoOoOO00 + I1ii11iIi11i * oO0o + OoooooooOO
if 39 - 39: I1IiiI * ooOoO0o / i11iIiiIii - oO0o - oO0o + O0
if 73 - 73: OOooOOo
if 44 - 44: I1ii11iIi11i * i1IIi - iIii1I11I1II1 - oO0o - oO0o * II111iiii
if 98 - 98: Oo0Ooo + ooOoO0o / OOooOOo . iIii1I11I1II1 . I1IiiI . OoOoOO00
if 92 - 92: i1IIi + OoOoOO00 * i1IIi / IiII
if 4 - 4: oO0o % OoO0O00 + IiII + o0oOOo0O0Ooo
if 82 - 82: O0 / I1Ii111 + OOooOOo . IiII + Ii1I
def lisp_process_ecm ( lisp_sockets , packet , source , ecm_port ) :
O0Oooo0 = lisp_ecm ( 0 )
packet = O0Oooo0 . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode ECM packet" )
return
if 31 - 31: i1IIi * OoO0O00 - Ii1I + I11i
if 8 - 8: O0 + i1IIi . O0
O0Oooo0 . print_ecm ( )
if 67 - 67: I1IiiI
ooo = lisp_control_header ( )
if ( ooo . decode ( packet ) == None ) :
lprint ( "Could not decode control header" )
return
if 42 - 42: ooOoO0o - o0oOOo0O0Ooo % oO0o - ooOoO0o
if 87 - 87: OoooooooOO / O0
OoO0oO000oo = ooo . type
del ( ooo )
if 28 - 28: I11i * oO0o % iIii1I11I1II1 . ooOoO0o
if ( OoO0oO000oo != LISP_MAP_REQUEST ) :
lprint ( "Received ECM without Map-Request inside" )
return
if 75 - 75: O0 + I1IiiI
if 67 - 67: OoOoOO00 % OoooooooOO / OoO0O00 - OoO0O00 / O0
if 19 - 19: iIii1I11I1II1 / OOooOOo % I11i % I1IiiI / I1ii11iIi11i
if 73 - 73: II111iiii
if 26 - 26: II111iiii . iIii1I11I1II1 - I1Ii111 % OOooOOo
OO0o00 = O0Oooo0 . udp_sport
II1I1i = time . time ( )
lisp_process_map_request ( lisp_sockets , packet , source , ecm_port ,
O0Oooo0 . source , OO0o00 , O0Oooo0 . ddt , - 1 , II1I1i )
return
if 44 - 44: O0 . iIii1I11I1II1 - Ii1I . Oo0Ooo % IiII * Ii1I
if 54 - 54: OoooooooOO + iII111i - i11iIiiIii - OoO0O00
if 25 - 25: iII111i . O0 / oO0o + OoO0O00 / iII111i . IiII
if 24 - 24: Oo0Ooo . oO0o / OoOoOO00 + I1IiiI
if 47 - 47: O0 / OOooOOo . i1IIi / OoooooooOO . IiII
if 34 - 34: OoO0O00 * II111iiii + I1Ii111
if 20 - 20: iIii1I11I1II1 . OoO0O00 . II111iiii / Ii1I - iIii1I11I1II1 / OOooOOo
if 20 - 20: i11iIiiIii * oO0o * ooOoO0o
if 65 - 65: I1ii11iIi11i / Oo0Ooo / I1IiiI + IiII
if 71 - 71: OoO0O00 . I1Ii111 + OoooooooOO
def lisp_send_map_register ( lisp_sockets , packet , map_register , ms ) :
if 9 - 9: OoooooooOO / iIii1I11I1II1 % I1IiiI . I1IiiI / I11i - iII111i
if 60 - 60: I11i - OoO0O00 - OoOoOO00 * ooOoO0o - i1IIi
if 18 - 18: ooOoO0o + i11iIiiIii + O0 + OOooOOo / Ii1I
if 65 - 65: I1IiiI . ooOoO0o
if 51 - 51: I1Ii111
if 89 - 89: Oo0Ooo
if 15 - 15: OOooOOo * II111iiii - OOooOOo * iIii1I11I1II1
I1i1iiIi = ms . map_server
if ( lisp_decent_push_configured and I1i1iiIi . is_multicast_address ( ) and
( ms . map_registers_multicast_sent == 1 or ms . map_registers_sent == 1 ) ) :
I1i1iiIi = copy . deepcopy ( I1i1iiIi )
I1i1iiIi . address = 0x7f000001
I11 = bold ( "Bootstrap" , False )
Oo = ms . map_server . print_address_no_iid ( )
lprint ( "{} mapping system for peer-group {}" . format ( I11 , Oo ) )
if 95 - 95: I1Ii111 / OoooooooOO * I11i * OoooooooOO
if 88 - 88: I1IiiI / Oo0Ooo / oO0o + oO0o % OOooOOo + Oo0Ooo
if 63 - 63: o0oOOo0O0Ooo + i11iIiiIii % OOooOOo % iIii1I11I1II1 / I1ii11iIi11i - iII111i
if 72 - 72: iII111i % oO0o . IiII + I1ii11iIi11i . IiII . II111iiii
if 10 - 10: I11i . ooOoO0o + I11i * Ii1I
if 55 - 55: OOooOOo / iII111i + OoooooooOO - OoooooooOO
packet = lisp_compute_auth ( packet , map_register , ms . password )
if 51 - 51: O0 % Ii1I % Oo0Ooo - O0
if 94 - 94: OoooooooOO - ooOoO0o % I1ii11iIi11i + I1Ii111
if 51 - 51: I1ii11iIi11i . iII111i / i1IIi * ooOoO0o % I11i
if 82 - 82: O0 % OoOoOO00 . iII111i . i1IIi . iII111i - Oo0Ooo
if 58 - 58: O0 * OOooOOo
if 60 - 60: ooOoO0o
if ( ms . ekey != None ) :
iI1II1I1i1 = ms . ekey . zfill ( 32 )
ii = "0" * 8
OoO00oo0 = chacha . ChaCha ( iI1II1I1i1 , ii , 20 ) . encrypt ( packet [ 4 : : ] )
packet = packet [ 0 : 4 ] + OoO00oo0
oO0ooOOO = bold ( "Encrypt" , False )
lprint ( "{} Map-Register with key-id {}" . format ( oO0ooOOO , ms . ekey_id ) )
if 47 - 47: i11iIiiIii
if 21 - 21: i1IIi - oO0o - Oo0Ooo
i1IIO0Oo = ""
if ( lisp_decent_pull_xtr_configured ( ) ) :
i1IIO0Oo = ", decent-index {}" . format ( bold ( ms . dns_name , False ) )
if 56 - 56: I1Ii111 * i1IIi % i11iIiiIii
if 56 - 56: Ii1I . iII111i
lprint ( "Send Map-Register to map-server {}{}{}" . format ( I1i1iiIi . print_address ( ) , ", ms-name '{}'" . format ( ms . ms_name ) , i1IIO0Oo ) )
if 76 - 76: I1IiiI / Ii1I % OoOoOO00 + IiII / i11iIiiIii . o0oOOo0O0Ooo
lisp_send ( lisp_sockets , I1i1iiIi , LISP_CTRL_PORT , packet )
return
if 31 - 31: oO0o * oO0o % o0oOOo0O0Ooo . O0 + iII111i
if 52 - 52: i11iIiiIii
if 1 - 1: i1IIi * iIii1I11I1II1
if 29 - 29: I11i
if 12 - 12: oO0o % i1IIi - oO0o / ooOoO0o * II111iiii % ooOoO0o
if 6 - 6: IiII / OoO0O00
if 83 - 83: IiII - iIii1I11I1II1 * ooOoO0o - oO0o
if 77 - 77: Ii1I
def lisp_send_ipc_to_core ( lisp_socket , packet , dest , port ) :
I1 = lisp_socket . getsockname ( )
dest = dest . print_address_no_iid ( )
if 9 - 9: OOooOOo / OoooooooOO + iII111i
lprint ( "Send IPC {} bytes to {} {}, control-packet: {}" . format ( len ( packet ) , dest , port , lisp_format_packet ( packet ) ) )
if 52 - 52: IiII / OOooOOo * iIii1I11I1II1 + o0oOOo0O0Ooo
if 20 - 20: I1Ii111
packet = lisp_control_packet_ipc ( packet , I1 , dest , port )
lisp_ipc ( packet , lisp_socket , "lisp-core-pkt" )
return
if 33 - 33: i11iIiiIii / I1Ii111 + IiII / II111iiii + I11i
if 13 - 13: i1IIi % iII111i + OoOoOO00 / Ii1I . Ii1I + II111iiii
if 44 - 44: OoOoOO00 / OoooooooOO % O0 * Ii1I * IiII
if 84 - 84: o0oOOo0O0Ooo * IiII * OOooOOo * iII111i
if 56 - 56: iII111i * II111iiii . OoooooooOO . I11i
if 25 - 25: ooOoO0o % o0oOOo0O0Ooo - i11iIiiIii
if 79 - 79: iII111i - I1IiiI % O0 / Oo0Ooo + OoOoOO00 . Oo0Ooo
if 59 - 59: I1ii11iIi11i * OoOoOO00 / Ii1I
def lisp_send_map_reply ( lisp_sockets , packet , dest , port ) :
lprint ( "Send Map-Reply to {}" . format ( dest . print_address_no_iid ( ) ) )
lisp_send_ipc_to_core ( lisp_sockets [ 2 ] , packet , dest , port )
return
if 80 - 80: IiII - ooOoO0o / OoOoOO00 / I11i * O0 + oO0o
if 77 - 77: ooOoO0o + I1ii11iIi11i * o0oOOo0O0Ooo / i1IIi * I11i
if 70 - 70: oO0o / iII111i * i1IIi / II111iiii / OoOoOO00 + oO0o
if 30 - 30: i1IIi - iII111i - i11iIiiIii . OoOoOO00 . o0oOOo0O0Ooo
if 74 - 74: i11iIiiIii / II111iiii
if 62 - 62: O0
if 63 - 63: Oo0Ooo + Oo0Ooo
if 48 - 48: Oo0Ooo * I1ii11iIi11i % II111iiii
def lisp_send_map_referral ( lisp_sockets , packet , dest , port ) :
lprint ( "Send Map-Referral to {}" . format ( dest . print_address ( ) ) )
lisp_send_ipc_to_core ( lisp_sockets [ 2 ] , packet , dest , port )
return
if 42 - 42: I1Ii111 - ooOoO0o % o0oOOo0O0Ooo * I1IiiI . o0oOOo0O0Ooo
if 84 - 84: iIii1I11I1II1
if 39 - 39: Ii1I . II111iiii / I1IiiI
if 44 - 44: Ii1I / Ii1I / OoO0O00 % ooOoO0o / I11i . I1ii11iIi11i
if 41 - 41: I1ii11iIi11i * ooOoO0o * I11i + O0 * O0 - O0
if 81 - 81: I1Ii111 % OoO0O00 / O0
if 55 - 55: i1IIi - I1Ii111 + I11i
if 93 - 93: I1IiiI % IiII . OoOoOO00 + iII111i
def lisp_send_map_notify ( lisp_sockets , packet , dest , port ) :
lprint ( "Send Map-Notify to xTR {}" . format ( dest . print_address ( ) ) )
lisp_send_ipc_to_core ( lisp_sockets [ 2 ] , packet , dest , port )
return
if 81 - 81: ooOoO0o / I1Ii111 + OOooOOo / Oo0Ooo / OoOoOO00
if 34 - 34: ooOoO0o * iIii1I11I1II1 % i11iIiiIii * OOooOOo - OOooOOo
if 63 - 63: Oo0Ooo / oO0o + iII111i % OoooooooOO * I11i
if 34 - 34: I1IiiI + I1Ii111 % ooOoO0o
if 24 - 24: Ii1I % II111iiii - i11iIiiIii
if 52 - 52: OoO0O00
if 76 - 76: ooOoO0o - iII111i % ooOoO0o / oO0o . OOooOOo
def lisp_send_ecm ( lisp_sockets , packet , inner_source , inner_sport , inner_dest ,
outer_dest , to_etr = False , to_ms = False , ddt = False ) :
if 50 - 50: IiII . i11iIiiIii % I11i
if ( inner_source == None or inner_source . is_null ( ) ) :
inner_source = inner_dest
if 22 - 22: i1IIi - II111iiii - OoOoOO00 . iII111i
if 43 - 43: I1Ii111 * OOooOOo - IiII . i11iIiiIii
if 34 - 34: iII111i . OoOoOO00
if 49 - 49: I1ii11iIi11i % oO0o - I1Ii111 . I1ii11iIi11i % II111iiii
if 20 - 20: I1ii11iIi11i . iIii1I11I1II1 - Ii1I % OoO0O00
if 27 - 27: iIii1I11I1II1 / I1Ii111 - I11i . OoO0O00 + ooOoO0o
if ( lisp_nat_traversal ) :
oooooO0oO0ooO = lisp_get_any_translated_port ( )
if ( oooooO0oO0ooO != None ) : inner_sport = oooooO0oO0ooO
if 89 - 89: I1IiiI % I11i - OOooOOo
O0Oooo0 = lisp_ecm ( inner_sport )
if 71 - 71: OOooOOo % Oo0Ooo - o0oOOo0O0Ooo / I1Ii111 - O0 - oO0o
O0Oooo0 . to_etr = to_etr if lisp_is_running ( "lisp-etr" ) else False
O0Oooo0 . to_ms = to_ms if lisp_is_running ( "lisp-ms" ) else False
O0Oooo0 . ddt = ddt
iiI1i = O0Oooo0 . encode ( packet , inner_source , inner_dest )
if ( iiI1i == None ) :
lprint ( "Could not encode ECM message" )
return
if 4 - 4: I1IiiI * I1IiiI + II111iiii . iII111i
O0Oooo0 . print_ecm ( )
if 9 - 9: I11i % o0oOOo0O0Ooo % I1Ii111 - ooOoO0o + I11i
packet = iiI1i + packet
if 87 - 87: IiII
O0O0 = outer_dest . print_address_no_iid ( )
lprint ( "Send Encapsulated-Control-Message to {}" . format ( O0O0 ) )
I1i1iiIi = lisp_convert_4to6 ( O0O0 )
lisp_send ( lisp_sockets , I1i1iiIi , LISP_CTRL_PORT , packet )
return
if 12 - 12: O0 - iII111i * IiII . i11iIiiIii
if 25 - 25: Ii1I % i1IIi * I11i * Ii1I - IiII . i11iIiiIii
if 40 - 40: OOooOOo - OoooooooOO
if 36 - 36: i1IIi % OoOoOO00 - i1IIi
if 5 - 5: I1IiiI . I1IiiI % II111iiii - I1Ii111
if 97 - 97: I11i . ooOoO0o
if 87 - 87: oO0o / iIii1I11I1II1 - I11i + OoooooooOO
LISP_AFI_GEO_COORD = - 3
LISP_AFI_IID_RANGE = - 2
LISP_AFI_ULTIMATE_ROOT = - 1
LISP_AFI_NONE = 0
LISP_AFI_IPV4 = 1
LISP_AFI_IPV6 = 2
LISP_AFI_MAC = 6
LISP_AFI_E164 = 8
LISP_AFI_NAME = 17
LISP_AFI_LCAF = 16387
if 79 - 79: I1ii11iIi11i * IiII . I1ii11iIi11i
LISP_RLOC_UNKNOWN_STATE = 0
LISP_RLOC_UP_STATE = 1
LISP_RLOC_DOWN_STATE = 2
LISP_RLOC_UNREACH_STATE = 3
LISP_RLOC_NO_ECHOED_NONCE_STATE = 4
LISP_RLOC_ADMIN_DOWN_STATE = 5
if 65 - 65: iII111i - Ii1I - II111iiii * O0 + I1ii11iIi11i . iIii1I11I1II1
LISP_AUTH_NONE = 0
LISP_AUTH_MD5 = 1
LISP_AUTH_SHA1 = 2
LISP_AUTH_SHA2 = 3
if 76 - 76: OoO0O00 * ooOoO0o
if 32 - 32: O0 . oO0o * o0oOOo0O0Ooo . Ii1I + IiII
if 98 - 98: iII111i . II111iiii % O0
if 43 - 43: OOooOOo % I1Ii111 . IiII % OoO0O00 + I1Ii111 % OoooooooOO
if 17 - 17: OoooooooOO - i1IIi * I11i
if 33 - 33: i1IIi . Oo0Ooo + I11i
if 97 - 97: OOooOOo / IiII / ooOoO0o / OoooooooOO
LISP_IPV4_HOST_MASK_LEN = 32
LISP_IPV6_HOST_MASK_LEN = 128
LISP_MAC_HOST_MASK_LEN = 48
LISP_E164_HOST_MASK_LEN = 60
if 78 - 78: I1Ii111 + I1Ii111
if 43 - 43: I1Ii111 * o0oOOo0O0Ooo + i1IIi
if 19 - 19: Ii1I
if 51 - 51: oO0o
if 57 - 57: i11iIiiIii - Oo0Ooo + I1Ii111 * OoO0O00
if 35 - 35: o0oOOo0O0Ooo % II111iiii + O0
def byte_swap_64 ( address ) :
IiI = ( ( address & 0x00000000000000ff ) << 56 ) | ( ( address & 0x000000000000ff00 ) << 40 ) | ( ( address & 0x0000000000ff0000 ) << 24 ) | ( ( address & 0x00000000ff000000 ) << 8 ) | ( ( address & 0x000000ff00000000 ) >> 8 ) | ( ( address & 0x0000ff0000000000 ) >> 24 ) | ( ( address & 0x00ff000000000000 ) >> 40 ) | ( ( address & 0xff00000000000000 ) >> 56 )
if 70 - 70: I1ii11iIi11i . II111iiii
if 54 - 54: OOooOOo
if 67 - 67: I1IiiI . o0oOOo0O0Ooo / i1IIi * I1ii11iIi11i . Oo0Ooo + II111iiii
if 63 - 63: OoOoOO00 - OoOoOO00
if 31 - 31: I1ii11iIi11i % O0 - i11iIiiIii * o0oOOo0O0Ooo . ooOoO0o * ooOoO0o
if 18 - 18: OoO0O00 - OoO0O00 . o0oOOo0O0Ooo
if 80 - 80: I11i + I1Ii111 / I1IiiI * OOooOOo % iII111i
if 48 - 48: iIii1I11I1II1 + i1IIi . I1IiiI % OoO0O00 - iIii1I11I1II1 / i1IIi
return ( IiI )
if 14 - 14: IiII . I11i
if 13 - 13: OoOoOO00 - I11i . OOooOOo % OoO0O00
if 79 - 79: iII111i / Ii1I % i11iIiiIii . I1IiiI % OoO0O00 / i11iIiiIii
if 100 - 100: OOooOOo + Oo0Ooo . iIii1I11I1II1 . ooOoO0o * Oo0Ooo
if 16 - 16: Oo0Ooo % OoOoOO00 + I1Ii111 % I1Ii111
if 12 - 12: I1Ii111 . Ii1I / iIii1I11I1II1 + i1IIi
if 9 - 9: iIii1I11I1II1
if 75 - 75: I11i . II111iiii * I1IiiI * IiII
if 36 - 36: OOooOOo / I1ii11iIi11i / oO0o / ooOoO0o / I11i
if 7 - 7: OoO0O00 - I11i - o0oOOo0O0Ooo / o0oOOo0O0Ooo + i11iIiiIii
if 28 - 28: OoOoOO00 % ooOoO0o . I1IiiI + II111iiii
if 34 - 34: iIii1I11I1II1
if 65 - 65: II111iiii - iII111i / o0oOOo0O0Ooo
if 35 - 35: i11iIiiIii - Oo0Ooo . I1ii11iIi11i % OoOoOO00
if 20 - 20: OoO0O00
class lisp_cache_entries ( object ) :
def __init__ ( self ) :
self . entries = { }
self . entries_sorted = [ ]
if 93 - 93: ooOoO0o + o0oOOo0O0Ooo - I1ii11iIi11i
if 56 - 56: Ii1I / Oo0Ooo
if 96 - 96: o0oOOo0O0Ooo . II111iiii
class lisp_cache ( object ) :
def __init__ ( self ) :
self . cache = { }
self . cache_sorted = [ ]
self . cache_count = 0
if 14 - 14: OoooooooOO - i1IIi / i11iIiiIii - OOooOOo - i11iIiiIii . ooOoO0o
if 8 - 8: oO0o * O0 - II111iiii + I1IiiI
def cache_size ( self ) :
return ( self . cache_count )
if 85 - 85: OoooooooOO % i11iIiiIii / IiII % OoOoOO00 + O0
if 6 - 6: OoooooooOO
def build_key ( self , prefix ) :
if ( prefix . afi == LISP_AFI_ULTIMATE_ROOT ) :
OOO00o00Oo0 = 0
elif ( prefix . afi == LISP_AFI_IID_RANGE ) :
OOO00o00Oo0 = prefix . mask_len
else :
OOO00o00Oo0 = prefix . mask_len + 48
if 97 - 97: II111iiii + o0oOOo0O0Ooo * II111iiii
if 17 - 17: o0oOOo0O0Ooo / ooOoO0o + i1IIi
oooo = lisp_hex_string ( prefix . instance_id ) . zfill ( 8 )
Oooo000 = lisp_hex_string ( prefix . afi ) . zfill ( 4 )
if 78 - 78: iIii1I11I1II1 * o0oOOo0O0Ooo * Oo0Ooo - OoO0O00 / OoO0O00
if ( prefix . afi > 0 ) :
if ( prefix . is_binary ( ) ) :
i1 = prefix . addr_length ( ) * 2
IiI = lisp_hex_string ( prefix . address ) . zfill ( i1 )
else :
IiI = prefix . address
if 89 - 89: o0oOOo0O0Ooo % o0oOOo0O0Ooo
elif ( prefix . afi == LISP_AFI_GEO_COORD ) :
Oooo000 = "8003"
IiI = prefix . address . print_geo ( )
else :
Oooo000 = ""
IiI = ""
if 8 - 8: Ii1I % oO0o - o0oOOo0O0Ooo
if 14 - 14: OOooOOo * IiII
Ooo00o000o = oooo + Oooo000 + IiI
return ( [ OOO00o00Oo0 , Ooo00o000o ] )
if 15 - 15: o0oOOo0O0Ooo + OoooooooOO - OOooOOo - o0oOOo0O0Ooo . iIii1I11I1II1 / Ii1I
if 33 - 33: OoO0O00
def add_cache ( self , prefix , entry ) :
if ( prefix . is_binary ( ) ) : prefix . zero_host_bits ( )
OOO00o00Oo0 , Ooo00o000o = self . build_key ( prefix )
if ( OOO00o00Oo0 not in self . cache ) :
self . cache [ OOO00o00Oo0 ] = lisp_cache_entries ( )
self . cache_sorted = self . sort_in_entry ( self . cache_sorted , OOO00o00Oo0 )
if 91 - 91: I11i % I11i % iII111i
if ( Ooo00o000o not in self . cache [ OOO00o00Oo0 ] . entries ) :
self . cache_count += 1
if 19 - 19: I11i / I11i + I1IiiI * OoO0O00 - iII111i . Oo0Ooo
self . cache [ OOO00o00Oo0 ] . entries [ Ooo00o000o ] = entry
if 76 - 76: iII111i % OOooOOo / OoooooooOO . I1IiiI % OoO0O00 % i1IIi
if 95 - 95: Oo0Ooo - O0 / I1ii11iIi11i . I1IiiI / o0oOOo0O0Ooo % OoOoOO00
def lookup_cache ( self , prefix , exact ) :
IIiiIIIi1i , Ooo00o000o = self . build_key ( prefix )
if ( exact ) :
if ( IIiiIIIi1i not in self . cache ) : return ( None )
if ( Ooo00o000o not in self . cache [ IIiiIIIi1i ] . entries ) : return ( None )
return ( self . cache [ IIiiIIIi1i ] . entries [ Ooo00o000o ] )
if 55 - 55: IiII . o0oOOo0O0Ooo * OoOoOO00
if 44 - 44: Ii1I % I1ii11iIi11i - OoOoOO00
ooo0o00o0Oooo = None
for OOO00o00Oo0 in self . cache_sorted :
if ( IIiiIIIi1i < OOO00o00Oo0 ) : return ( ooo0o00o0Oooo )
for oo0O00OOOOO in list ( self . cache [ OOO00o00Oo0 ] . entries . values ( ) ) :
if ( prefix . is_more_specific ( oo0O00OOOOO . eid ) ) :
if ( ooo0o00o0Oooo == None or
oo0O00OOOOO . eid . is_more_specific ( ooo0o00o0Oooo . eid ) ) : ooo0o00o0Oooo = oo0O00OOOOO
if 38 - 38: I11i / I11i . I1ii11iIi11i - OoO0O00
if 64 - 64: ooOoO0o % i1IIi
if 42 - 42: i11iIiiIii / O0
return ( ooo0o00o0Oooo )
if 8 - 8: I1Ii111
if 51 - 51: i11iIiiIii
def delete_cache ( self , prefix ) :
OOO00o00Oo0 , Ooo00o000o = self . build_key ( prefix )
if ( OOO00o00Oo0 not in self . cache ) : return
if ( Ooo00o000o not in self . cache [ OOO00o00Oo0 ] . entries ) : return
self . cache [ OOO00o00Oo0 ] . entries . pop ( Ooo00o000o )
self . cache_count -= 1
if 1 - 1: iIii1I11I1II1 . i1IIi . i11iIiiIii % I1ii11iIi11i
if 58 - 58: i11iIiiIii * i11iIiiIii - OoO0O00
def walk_cache ( self , function , parms ) :
for OOO00o00Oo0 in self . cache_sorted :
for oo0O00OOOOO in list ( self . cache [ OOO00o00Oo0 ] . entries . values ( ) ) :
iiiIIiIII111 , parms = function ( oo0O00OOOOO , parms )
if ( iiiIIiIII111 == False ) : return ( parms )
if 20 - 20: Oo0Ooo
if 33 - 33: oO0o - OoOoOO00 - i11iIiiIii + I1Ii111 + iIii1I11I1II1
return ( parms )
if 2 - 2: OoooooooOO + IiII / iII111i . iIii1I11I1II1 * OoOoOO00
if 84 - 84: OOooOOo
def sort_in_entry ( self , table , value ) :
if ( table == [ ] ) : return ( [ value ] )
if 68 - 68: I1Ii111
IiIi1I1i1iII = table
while ( True ) :
if ( len ( IiIi1I1i1iII ) == 1 ) :
if ( value == IiIi1I1i1iII [ 0 ] ) : return ( table )
OOOooo0OooOoO = table . index ( IiIi1I1i1iII [ 0 ] )
if ( value < IiIi1I1i1iII [ 0 ] ) :
return ( table [ 0 : OOOooo0OooOoO ] + [ value ] + table [ OOOooo0OooOoO : : ] )
if 92 - 92: oO0o * Ii1I / OoO0O00 % II111iiii
if ( value > IiIi1I1i1iII [ 0 ] ) :
return ( table [ 0 : OOOooo0OooOoO + 1 ] + [ value ] + table [ OOOooo0OooOoO + 1 : : ] )
if 54 - 54: oO0o + I11i - OoO0O00
if 86 - 86: OoooooooOO
OOOooo0OooOoO = old_div ( len ( IiIi1I1i1iII ) , 2 )
IiIi1I1i1iII = IiIi1I1i1iII [ 0 : OOOooo0OooOoO ] if ( value < IiIi1I1i1iII [ OOOooo0OooOoO ] ) else IiIi1I1i1iII [ OOOooo0OooOoO : : ]
if 51 - 51: i11iIiiIii
if 91 - 91: OOooOOo
return ( [ ] )
if 22 - 22: OoooooooOO + OoOoOO00 - Ii1I . iII111i / OoooooooOO / I1IiiI
if 73 - 73: i1IIi - Ii1I + oO0o * iIii1I11I1II1
def print_cache ( self ) :
lprint ( "Printing contents of {}: " . format ( self ) )
if ( self . cache_size ( ) == 0 ) :
lprint ( " Cache is empty" )
return
if 100 - 100: i11iIiiIii / iIii1I11I1II1 + Oo0Ooo + OoO0O00 - iII111i
for OOO00o00Oo0 in self . cache_sorted :
for Ooo00o000o in self . cache [ OOO00o00Oo0 ] . entries :
oo0O00OOOOO = self . cache [ OOO00o00Oo0 ] . entries [ Ooo00o000o ]
lprint ( " Mask-length: {}, key: {}, entry: {}" . format ( OOO00o00Oo0 , Ooo00o000o ,
oo0O00OOOOO ) )
if 8 - 8: i11iIiiIii . O0 + o0oOOo0O0Ooo * oO0o + II111iiii
if 61 - 61: ooOoO0o / ooOoO0o
if 51 - 51: iIii1I11I1II1 / oO0o * I1Ii111 + i1IIi
if 96 - 96: Oo0Ooo + oO0o - Oo0Ooo - OoOoOO00 % OOooOOo . iIii1I11I1II1
if 93 - 93: iIii1I11I1II1 % OoooooooOO
if 6 - 6: II111iiii / oO0o - OOooOOo . O0 - o0oOOo0O0Ooo
if 72 - 72: iIii1I11I1II1 / OoooooooOO * ooOoO0o / ooOoO0o % O0 + IiII
if 96 - 96: iII111i / i11iIiiIii + Oo0Ooo . I1IiiI + iII111i % OoOoOO00
lisp_referral_cache = lisp_cache ( )
lisp_ddt_cache = lisp_cache ( )
lisp_sites_by_eid = lisp_cache ( )
lisp_map_cache = lisp_cache ( )
lisp_db_for_lookups = lisp_cache ( )
if 19 - 19: i11iIiiIii . Oo0Ooo . OoOoOO00 - I1IiiI
if 85 - 85: I11i - OoO0O00 % iIii1I11I1II1 . iII111i + ooOoO0o . Oo0Ooo
if 87 - 87: iII111i
if 86 - 86: IiII - I11i
if 99 - 99: i1IIi + I1ii11iIi11i
if 24 - 24: ooOoO0o / OoooooooOO % I1ii11iIi11i * ooOoO0o
if 14 - 14: I1ii11iIi11i + OoO0O00 - I1IiiI - Oo0Ooo
def lisp_map_cache_lookup ( source , dest ) :
if 44 - 44: II111iiii / I1ii11iIi11i
iii111i = dest . is_multicast_address ( )
if 39 - 39: OoooooooOO % OoO0O00
if 83 - 83: OOooOOo % I1IiiI + O0 % OoooooooOO
if 84 - 84: I11i - Oo0Ooo % ooOoO0o - II111iiii
if 29 - 29: IiII
o0ooo0oOO0o = lisp_map_cache . lookup_cache ( dest , False )
if ( o0ooo0oOO0o == None ) :
i1iiii = source . print_sg ( dest ) if iii111i else dest . print_address ( )
i1iiii = green ( i1iiii , False )
dprint ( "Lookup for EID {} not found in map-cache" . format ( i1iiii ) )
return ( None )
if 4 - 4: II111iiii * o0oOOo0O0Ooo - IiII * iII111i
if 91 - 91: I1Ii111 * iII111i * OoO0O00
if 79 - 79: iII111i + oO0o
if 19 - 19: I1Ii111 - OOooOOo . ooOoO0o . O0 + II111iiii . OoooooooOO
if 97 - 97: O0 / OoOoOO00 / ooOoO0o
if ( iii111i == False ) :
iii11 = green ( o0ooo0oOO0o . eid . print_prefix ( ) , False )
dprint ( "Lookup for EID {} found map-cache entry {}" . format ( green ( dest . print_address ( ) , False ) , iii11 ) )
if 11 - 11: II111iiii . i11iIiiIii - Ii1I . IiII
return ( o0ooo0oOO0o )
if 10 - 10: OOooOOo * OoooooooOO
if 12 - 12: II111iiii - O0 . i1IIi % oO0o % OoooooooOO
if 36 - 36: IiII * OoOoOO00 - iIii1I11I1II1 + II111iiii
if 65 - 65: I1IiiI * I11i . I1Ii111 % I1ii11iIi11i + O0
if 91 - 91: OoooooooOO % I1Ii111 * OoO0O00 - OoOoOO00
o0ooo0oOO0o = o0ooo0oOO0o . lookup_source_cache ( source , False )
if ( o0ooo0oOO0o == None ) :
i1iiii = source . print_sg ( dest )
dprint ( "Lookup for EID {} not found in map-cache" . format ( i1iiii ) )
return ( None )
if 5 - 5: iIii1I11I1II1 * I11i - oO0o % oO0o % o0oOOo0O0Ooo . i1IIi
if 95 - 95: Oo0Ooo * I1ii11iIi11i + iII111i - o0oOOo0O0Ooo - Oo0Ooo . OoO0O00
if 62 - 62: I11i
if 58 - 58: I11i . OoOoOO00 + iII111i . iII111i
if 43 - 43: I1Ii111 + I1Ii111 % Oo0Ooo % OoO0O00 - ooOoO0o
iii11 = green ( o0ooo0oOO0o . print_eid_tuple ( ) , False )
dprint ( "Lookup for EID {} found map-cache entry {}" . format ( green ( source . print_sg ( dest ) , False ) , iii11 ) )
if 61 - 61: OoOoOO00 + Ii1I % i11iIiiIii - I1IiiI * OoO0O00 % iIii1I11I1II1
return ( o0ooo0oOO0o )
if 66 - 66: iII111i + i1IIi
if 24 - 24: O0 / OoooooooOO - OoOoOO00
if 51 - 51: OoO0O00 + o0oOOo0O0Ooo - II111iiii * I11i + Ii1I
if 16 - 16: I1Ii111 * i1IIi . I1IiiI . OOooOOo % Ii1I - o0oOOo0O0Ooo
if 89 - 89: Ii1I * I1ii11iIi11i * I1IiiI % iII111i % Ii1I + O0
if 53 - 53: i11iIiiIii % I1ii11iIi11i
if 59 - 59: OOooOOo
def lisp_referral_cache_lookup ( eid , group , exact ) :
if ( group and group . is_null ( ) ) :
OO0oO0O = lisp_referral_cache . lookup_cache ( eid , exact )
return ( OO0oO0O )
if 61 - 61: OoooooooOO + O0 - i1IIi % oO0o / I1ii11iIi11i
if 50 - 50: oO0o + II111iiii * OoOoOO00 % OoO0O00 . II111iiii % o0oOOo0O0Ooo
if 32 - 32: i1IIi / Ii1I + i11iIiiIii % oO0o
if 11 - 11: Ii1I - ooOoO0o % i11iIiiIii / OoooooooOO - O0 - IiII
if 25 - 25: IiII + O0 + oO0o % iIii1I11I1II1 - II111iiii . I1IiiI
if ( eid == None or eid . is_null ( ) ) : return ( None )
if 62 - 62: IiII . O0 + oO0o - ooOoO0o * iIii1I11I1II1
if 8 - 8: I1ii11iIi11i
if 65 - 65: i11iIiiIii
if 92 - 92: oO0o * II111iiii + I1Ii111
if 49 - 49: II111iiii * I1IiiI * O0 / ooOoO0o * IiII
if 94 - 94: OoO0O00 - I1IiiI * oO0o
OO0oO0O = lisp_referral_cache . lookup_cache ( group , exact )
if ( OO0oO0O == None ) : return ( None )
if 35 - 35: OOooOOo / i1IIi + OoO0O00
iIi11i = OO0oO0O . lookup_source_cache ( eid , exact )
if ( iIi11i ) : return ( iIi11i )
if 99 - 99: Oo0Ooo
if ( exact ) : OO0oO0O = None
return ( OO0oO0O )
if 40 - 40: OOooOOo % iII111i - oO0o
if 68 - 68: iII111i - O0 / Ii1I
if 15 - 15: I1Ii111 / I1ii11iIi11i / I1IiiI % i11iIiiIii + II111iiii . ooOoO0o
if 74 - 74: o0oOOo0O0Ooo
if 4 - 4: I1ii11iIi11i * II111iiii - Oo0Ooo % i1IIi % O0 * i11iIiiIii
if 62 - 62: OoO0O00 * I1Ii111 * Ii1I / ooOoO0o
if 27 - 27: oO0o . iII111i . oO0o
def lisp_ddt_cache_lookup ( eid , group , exact ) :
if ( group . is_null ( ) ) :
O000oO0Oo0 = lisp_ddt_cache . lookup_cache ( eid , exact )
return ( O000oO0Oo0 )
if 37 - 37: Oo0Ooo . I1ii11iIi11i / OoooooooOO % ooOoO0o / I1IiiI + ooOoO0o
if 14 - 14: I11i + ooOoO0o . oO0o * I11i
if 98 - 98: Ii1I . i1IIi * OoO0O00 * Ii1I * iIii1I11I1II1
if 22 - 22: OoooooooOO - OoO0O00 + OoOoOO00 - OOooOOo + i11iIiiIii - oO0o
if 9 - 9: I1Ii111 - i1IIi . ooOoO0o
if ( eid . is_null ( ) ) : return ( None )
if 33 - 33: I11i
if 37 - 37: Oo0Ooo
if 36 - 36: IiII % I11i
if 72 - 72: oO0o % I11i % OOooOOo * iIii1I11I1II1 - OOooOOo % O0
if 84 - 84: oO0o - o0oOOo0O0Ooo / II111iiii . o0oOOo0O0Ooo
if 82 - 82: OoooooooOO
O000oO0Oo0 = lisp_ddt_cache . lookup_cache ( group , exact )
if ( O000oO0Oo0 == None ) : return ( None )
if 14 - 14: OoO0O00 / oO0o - OOooOOo
O0o0ooooO = O000oO0Oo0 . lookup_source_cache ( eid , exact )
if ( O0o0ooooO ) : return ( O0o0ooooO )
if 46 - 46: Oo0Ooo % OoooooooOO % II111iiii / I1Ii111 * I1Ii111 % o0oOOo0O0Ooo
if ( exact ) : O000oO0Oo0 = None
return ( O000oO0Oo0 )
if 48 - 48: OoOoOO00 / OoO0O00 % II111iiii / O0
if 35 - 35: i11iIiiIii % OoooooooOO % OoooooooOO + i1IIi
if 13 - 13: o0oOOo0O0Ooo / i1IIi
if 73 - 73: ooOoO0o
if 37 - 37: OOooOOo % OoOoOO00 - II111iiii * o0oOOo0O0Ooo . I1IiiI . OoOoOO00
if 92 - 92: I11i + OoO0O00 . OoooooooOO
if 3 - 3: OoO0O00 % iIii1I11I1II1
def lisp_site_eid_lookup ( eid , group , exact ) :
if 62 - 62: OoooooooOO * o0oOOo0O0Ooo
if ( group . is_null ( ) ) :
I1i = lisp_sites_by_eid . lookup_cache ( eid , exact )
return ( I1i )
if 59 - 59: iIii1I11I1II1
if 18 - 18: ooOoO0o % I1IiiI / iIii1I11I1II1 + O0
if 99 - 99: i11iIiiIii - o0oOOo0O0Ooo + o0oOOo0O0Ooo . OoooooooOO * iII111i . Oo0Ooo
if 63 - 63: I11i
if 60 - 60: I1IiiI / I1ii11iIi11i / I11i / Ii1I + iIii1I11I1II1
if ( eid . is_null ( ) ) : return ( None )
if 85 - 85: O0 / OOooOOo . OoOoOO00 / I1ii11iIi11i
if 80 - 80: I1ii11iIi11i * iII111i % i1IIi * OOooOOo % II111iiii % i1IIi
if 44 - 44: OoooooooOO
if 18 - 18: i11iIiiIii
if 65 - 65: i1IIi . iIii1I11I1II1 % iIii1I11I1II1
if 35 - 35: iIii1I11I1II1 - o0oOOo0O0Ooo + I1ii11iIi11i * iII111i - OOooOOo . o0oOOo0O0Ooo
I1i = lisp_sites_by_eid . lookup_cache ( group , exact )
if ( I1i == None ) : return ( None )
if 12 - 12: iIii1I11I1II1 % OoO0O00 * Oo0Ooo
if 5 - 5: I11i - II111iiii * iIii1I11I1II1 / iIii1I11I1II1 % IiII * i1IIi
if 30 - 30: i1IIi % I1IiiI . OOooOOo % iIii1I11I1II1 . I1ii11iIi11i / o0oOOo0O0Ooo
if 53 - 53: OOooOOo % ooOoO0o
if 94 - 94: OOooOOo - O0 - I1Ii111 / OoooooooOO - iII111i
if 83 - 83: OOooOOo * I1ii11iIi11i * iII111i * I1ii11iIi11i . OoO0O00
if 87 - 87: ooOoO0o . O0 - oO0o
if 75 - 75: Oo0Ooo
if 22 - 22: oO0o * I1Ii111 . II111iiii / Ii1I * O0
if 33 - 33: oO0o * i1IIi + ooOoO0o * OOooOOo - O0 - iIii1I11I1II1
if 35 - 35: I1Ii111
if 12 - 12: Ii1I % I1IiiI - I11i / iIii1I11I1II1 . I1IiiI % I1ii11iIi11i
if 12 - 12: Oo0Ooo + I1IiiI
if 12 - 12: OoOoOO00 / II111iiii
if 100 - 100: I1ii11iIi11i % iIii1I11I1II1 . IiII . OoooooooOO / II111iiii
if 28 - 28: I1IiiI
if 27 - 27: I1IiiI % oO0o - iIii1I11I1II1 - o0oOOo0O0Ooo - IiII - O0
if 46 - 46: II111iiii
OoO0OOOooo = I1i . lookup_source_cache ( eid , exact )
if ( OoO0OOOooo ) : return ( OoO0OOOooo )
if 24 - 24: i11iIiiIii * i1IIi - I11i + o0oOOo0O0Ooo
if ( exact ) :
I1i = None
else :
OoOOoooO0oo = I1i . parent_for_more_specifics
if ( OoOOoooO0oo and OoOOoooO0oo . accept_more_specifics ) :
if ( group . is_more_specific ( OoOOoooO0oo . group ) ) : I1i = OoOOoooO0oo
if 60 - 60: ooOoO0o
if 62 - 62: i11iIiiIii
return ( I1i )
if 88 - 88: i11iIiiIii
if 59 - 59: oO0o - OoooooooOO % ooOoO0o
if 90 - 90: OoOoOO00
if 96 - 96: II111iiii % Ii1I
if 84 - 84: I1IiiI . I1IiiI
if 82 - 82: OoO0O00 - iIii1I11I1II1 . iIii1I11I1II1 + I1ii11iIi11i
if 45 - 45: iII111i . oO0o * iII111i
if 3 - 3: OoOoOO00 / Oo0Ooo - Oo0Ooo
if 54 - 54: Oo0Ooo . OoO0O00 * I1IiiI % IiII
if 97 - 97: o0oOOo0O0Ooo + Ii1I
if 77 - 77: I11i - oO0o . Ii1I
if 75 - 75: I11i * OoooooooOO % OoOoOO00 . i1IIi - Ii1I + iIii1I11I1II1
if 74 - 74: ooOoO0o
if 18 - 18: iIii1I11I1II1 - I11i - oO0o
if 12 - 12: O0 + O0 + ooOoO0o . I1IiiI * II111iiii
if 47 - 47: i11iIiiIii % OOooOOo / ooOoO0o . IiII - I1IiiI
if 10 - 10: Oo0Ooo / ooOoO0o / I1ii11iIi11i
if 98 - 98: O0 - I1Ii111 - i11iIiiIii
if 85 - 85: II111iiii - I1ii11iIi11i % I1IiiI . I1IiiI - OoooooooOO - I11i
if 38 - 38: i1IIi + oO0o * ooOoO0o % Ii1I % ooOoO0o
if 80 - 80: OoO0O00 + OoOoOO00 % iII111i % OoooooooOO - ooOoO0o
if 25 - 25: OoOoOO00 % i11iIiiIii - I1IiiI * iIii1I11I1II1 - Oo0Ooo . O0
if 48 - 48: I1IiiI + oO0o % i11iIiiIii % iIii1I11I1II1
if 14 - 14: iIii1I11I1II1
if 78 - 78: I1Ii111 / Oo0Ooo - I1Ii111
if 1 - 1: OoO0O00 - I1IiiI * o0oOOo0O0Ooo
class lisp_address ( object ) :
def __init__ ( self , afi , addr_str , mask_len , iid ) :
self . afi = afi
self . mask_len = mask_len
self . instance_id = iid
self . iid_list = [ ]
self . address = 0
if ( addr_str != "" ) : self . store_address ( addr_str )
if 84 - 84: OoO0O00 % OoooooooOO
if 66 - 66: OoOoOO00 . iII111i
def copy_address ( self , addr ) :
if ( addr == None ) : return
self . afi = addr . afi
self . address = addr . address
self . mask_len = addr . mask_len
self . instance_id = addr . instance_id
self . iid_list = addr . iid_list
if 1 - 1: iII111i * i1IIi . iIii1I11I1II1 % O0 - OoooooooOO
if 87 - 87: iII111i . Oo0Ooo * i11iIiiIii % o0oOOo0O0Ooo + Ii1I
def make_default_route ( self , addr ) :
self . afi = addr . afi
self . instance_id = addr . instance_id
self . mask_len = 0
self . address = 0
if 72 - 72: Ii1I / II111iiii + o0oOOo0O0Ooo
if 33 - 33: I1Ii111 * OoOoOO00 - OoooooooOO
def make_default_multicast_route ( self , addr ) :
self . afi = addr . afi
self . instance_id = addr . instance_id
if ( self . afi == LISP_AFI_IPV4 ) :
self . address = 0xe0000000
self . mask_len = 4
if 11 - 11: I1Ii111 - Oo0Ooo / iIii1I11I1II1 - OoooooooOO
if ( self . afi == LISP_AFI_IPV6 ) :
self . address = 0xff << 120
self . mask_len = 8
if 71 - 71: Oo0Ooo + Ii1I - OoooooooOO + I11i - iIii1I11I1II1 / O0
if ( self . afi == LISP_AFI_MAC ) :
self . address = 0xffffffffffff
self . mask_len = 48
if 76 - 76: i11iIiiIii % o0oOOo0O0Ooo . O0 * I11i
if 90 - 90: II111iiii + OOooOOo % I1Ii111 * iIii1I11I1II1 % iIii1I11I1II1
if 55 - 55: II111iiii % O0 * O0 - II111iiii * I1IiiI % Oo0Ooo
def not_set ( self ) :
return ( self . afi == LISP_AFI_NONE )
if 48 - 48: I1ii11iIi11i + OoooooooOO % i1IIi
if 46 - 46: OoOoOO00
def is_private_address ( self ) :
if ( self . is_ipv4 ( ) == False ) : return ( False )
IiI = self . address
if ( ( ( IiI & 0xff000000 ) >> 24 ) == 10 ) : return ( True )
if ( ( ( IiI & 0xff000000 ) >> 24 ) == 172 ) :
oO00o = ( IiI & 0x00ff0000 ) >> 16
if ( oO00o >= 16 and oO00o <= 31 ) : return ( True )
if 98 - 98: ooOoO0o + I11i % OoooooooOO
if ( ( ( IiI & 0xffff0000 ) >> 16 ) == 0xc0a8 ) : return ( True )
return ( False )
if 100 - 100: iIii1I11I1II1
if 30 - 30: ooOoO0o / O0
def is_multicast_address ( self ) :
if ( self . is_ipv4 ( ) ) : return ( self . is_ipv4_multicast ( ) )
if ( self . is_ipv6 ( ) ) : return ( self . is_ipv6_multicast ( ) )
if ( self . is_mac ( ) ) : return ( self . is_mac_multicast ( ) )
return ( False )
if 100 - 100: OOooOOo * OoooooooOO
if 80 - 80: O0 + oO0o - OoooooooOO - O0 . ooOoO0o . OoooooooOO
def host_mask_len ( self ) :
if ( self . afi == LISP_AFI_IPV4 ) : return ( LISP_IPV4_HOST_MASK_LEN )
if ( self . afi == LISP_AFI_IPV6 ) : return ( LISP_IPV6_HOST_MASK_LEN )
if ( self . afi == LISP_AFI_MAC ) : return ( LISP_MAC_HOST_MASK_LEN )
if ( self . afi == LISP_AFI_E164 ) : return ( LISP_E164_HOST_MASK_LEN )
if ( self . afi == LISP_AFI_NAME ) : return ( len ( self . address ) * 8 )
if ( self . afi == LISP_AFI_GEO_COORD ) :
return ( len ( self . address . print_geo ( ) ) * 8 )
if 76 - 76: Ii1I
return ( 0 )
if 62 - 62: O0 / OoO0O00 % i11iIiiIii / OOooOOo * iIii1I11I1II1
if 78 - 78: OOooOOo % O0 * O0
def is_iana_eid ( self ) :
if ( self . is_ipv6 ( ) == False ) : return ( False )
IiI = self . address >> 96
return ( IiI == 0x20010005 )
if 62 - 62: ooOoO0o
if 77 - 77: I1IiiI . i11iIiiIii - I1ii11iIi11i
def addr_length ( self ) :
if ( self . afi == LISP_AFI_IPV4 ) : return ( 4 )
if ( self . afi == LISP_AFI_IPV6 ) : return ( 16 )
if ( self . afi == LISP_AFI_MAC ) : return ( 6 )
if ( self . afi == LISP_AFI_E164 ) : return ( 8 )
if ( self . afi == LISP_AFI_LCAF ) : return ( 0 )
if ( self . afi == LISP_AFI_NAME ) : return ( len ( self . address ) + 1 )
if ( self . afi == LISP_AFI_IID_RANGE ) : return ( 4 )
if ( self . afi == LISP_AFI_GEO_COORD ) :
return ( len ( self . address . print_geo ( ) ) )
if 83 - 83: OoO0O00 - i11iIiiIii + I1ii11iIi11i - OOooOOo / OoOoOO00 / I11i
return ( 0 )
if 53 - 53: I11i * I1IiiI . I1IiiI / o0oOOo0O0Ooo - I1Ii111
if 50 - 50: I11i - OoOoOO00 + I1IiiI % Oo0Ooo / OoooooooOO - I1ii11iIi11i
def afi_to_version ( self ) :
if ( self . afi == LISP_AFI_IPV4 ) : return ( 4 )
if ( self . afi == LISP_AFI_IPV6 ) : return ( 6 )
return ( 0 )
if 26 - 26: IiII . Ii1I
if 35 - 35: I1ii11iIi11i + OOooOOo
def packet_format ( self ) :
if 88 - 88: O0
if 4 - 4: OoOoOO00 % iIii1I11I1II1 % OoooooooOO . oO0o
if 27 - 27: II111iiii - OoOoOO00
if 81 - 81: o0oOOo0O0Ooo - Oo0Ooo % IiII - ooOoO0o / O0
if 27 - 27: Oo0Ooo
if ( self . afi == LISP_AFI_IPV4 ) : return ( "I" )
if ( self . afi == LISP_AFI_IPV6 ) : return ( "QQ" )
if ( self . afi == LISP_AFI_MAC ) : return ( "HHH" )
if ( self . afi == LISP_AFI_E164 ) : return ( "II" )
if ( self . afi == LISP_AFI_LCAF ) : return ( "I" )
return ( "" )
if 15 - 15: iIii1I11I1II1 . OoOoOO00 % Ii1I / i1IIi . o0oOOo0O0Ooo
if 45 - 45: iIii1I11I1II1 - i1IIi % I1IiiI - I1Ii111 + oO0o
def pack_address ( self ) :
II111I11iI = self . packet_format ( )
Oo00oo = b""
if ( self . is_ipv4 ( ) ) :
Oo00oo = struct . pack ( II111I11iI , socket . htonl ( self . address ) )
elif ( self . is_ipv6 ( ) ) :
IiIiI = byte_swap_64 ( self . address >> 64 )
iI1Ii11 = byte_swap_64 ( self . address & 0xffffffffffffffff )
Oo00oo = struct . pack ( II111I11iI , IiIiI , iI1Ii11 )
elif ( self . is_mac ( ) ) :
IiI = self . address
IiIiI = ( IiI >> 32 ) & 0xffff
iI1Ii11 = ( IiI >> 16 ) & 0xffff
iiii111I = IiI & 0xffff
Oo00oo = struct . pack ( II111I11iI , IiIiI , iI1Ii11 , iiii111I )
elif ( self . is_e164 ( ) ) :
IiI = self . address
IiIiI = ( IiI >> 32 ) & 0xffffffff
iI1Ii11 = ( IiI & 0xffffffff )
Oo00oo = struct . pack ( II111I11iI , IiIiI , iI1Ii11 )
elif ( self . is_dist_name ( ) ) :
Oo00oo += ( self . address + "\0" ) . encode ( )
if 87 - 87: ooOoO0o * OoOoOO00
return ( Oo00oo )
if 3 - 3: i1IIi - Oo0Ooo + OoOoOO00 . I1Ii111 * iII111i - O0
if 66 - 66: o0oOOo0O0Ooo * I1Ii111 . O0 - iII111i
def unpack_address ( self , packet ) :
II111I11iI = self . packet_format ( )
oO000 = struct . calcsize ( II111I11iI )
if ( len ( packet ) < oO000 ) : return ( None )
if 22 - 22: OoO0O00 / I1IiiI - I1IiiI - i11iIiiIii . I1IiiI - OOooOOo
IiI = struct . unpack ( II111I11iI , packet [ : oO000 ] )
if 27 - 27: ooOoO0o
if ( self . is_ipv4 ( ) ) :
self . address = socket . ntohl ( IiI [ 0 ] )
if 34 - 34: OoooooooOO - I1Ii111 + I1Ii111 % IiII % OoooooooOO
elif ( self . is_ipv6 ( ) ) :
if 24 - 24: I1Ii111 . Oo0Ooo / ooOoO0o * O0
if 85 - 85: I1IiiI - OOooOOo
if 7 - 7: i1IIi % II111iiii
if 33 - 33: iIii1I11I1II1 . O0 . oO0o
if 69 - 69: II111iiii * O0 . ooOoO0o * IiII
if 25 - 25: I11i - I1ii11iIi11i . I1Ii111 . OoooooooOO
if 4 - 4: IiII * OoO0O00 % I1ii11iIi11i * Ii1I . iII111i
if 41 - 41: OoooooooOO % I11i . O0 + I1Ii111
if ( IiI [ 0 ] <= 0xffff and ( IiI [ 0 ] & 0xff ) == 0 ) :
OOo0oOoOOO0oo = ( IiI [ 0 ] << 48 ) << 64
else :
OOo0oOoOOO0oo = byte_swap_64 ( IiI [ 0 ] ) << 64
if 34 - 34: I1IiiI . II111iiii
oOooO = byte_swap_64 ( IiI [ 1 ] )
self . address = OOo0oOoOOO0oo | oOooO
if 33 - 33: i1IIi / o0oOOo0O0Ooo . OoooooooOO
elif ( self . is_mac ( ) ) :
Ii11i1Iiii11 = IiI [ 0 ]
OOo0oo0O0o0 = IiI [ 1 ]
OoOO0O00OO0OO = IiI [ 2 ]
self . address = ( Ii11i1Iiii11 << 32 ) + ( OOo0oo0O0o0 << 16 ) + OoOO0O00OO0OO
if 58 - 58: i11iIiiIii
elif ( self . is_e164 ( ) ) :
self . address = ( IiI [ 0 ] << 32 ) + IiI [ 1 ]
if 64 - 64: IiII % I1IiiI / ooOoO0o
elif ( self . is_dist_name ( ) ) :
packet , self . address = lisp_decode_dist_name ( packet )
self . mask_len = len ( self . address ) * 8
oO000 = 0
if 74 - 74: OoooooooOO
packet = packet [ oO000 : : ]
return ( packet )
if 22 - 22: II111iiii . O0 * I1Ii111 % OoO0O00 / OoooooooOO + I1Ii111
if 71 - 71: ooOoO0o . oO0o * OoooooooOO + iII111i - I1Ii111 . I1ii11iIi11i
def is_ipv4 ( self ) :
return ( True if ( self . afi == LISP_AFI_IPV4 ) else False )
if 100 - 100: I11i + O0 - o0oOOo0O0Ooo * I1ii11iIi11i
if 94 - 94: Oo0Ooo . IiII / Ii1I / oO0o - I1IiiI
def is_ipv4_link_local ( self ) :
if ( self . is_ipv4 ( ) == False ) : return ( False )
return ( ( ( self . address >> 16 ) & 0xffff ) == 0xa9fe )
if 77 - 77: i11iIiiIii . Ii1I - Ii1I
if 47 - 47: iII111i % OOooOOo . I1ii11iIi11i + I1ii11iIi11i . I1Ii111
def is_ipv4_loopback ( self ) :
if ( self . is_ipv4 ( ) == False ) : return ( False )
return ( self . address == 0x7f000001 )
if 20 - 20: oO0o - o0oOOo0O0Ooo + I1IiiI % OoOoOO00
if 41 - 41: oO0o . ooOoO0o
def is_ipv4_multicast ( self ) :
if ( self . is_ipv4 ( ) == False ) : return ( False )
return ( ( ( self . address >> 24 ) & 0xf0 ) == 0xe0 )
if 59 - 59: iIii1I11I1II1 - I1IiiI . ooOoO0o
if 58 - 58: I1IiiI * I1Ii111 + iII111i + iIii1I11I1II1 + I1IiiI
def is_ipv4_string ( self , addr_str ) :
return ( addr_str . find ( "." ) != - 1 )
if 78 - 78: Oo0Ooo + ooOoO0o
if 56 - 56: OoO0O00 / i1IIi + ooOoO0o . ooOoO0o . iII111i
def is_ipv6 ( self ) :
return ( True if ( self . afi == LISP_AFI_IPV6 ) else False )
if 37 - 37: iIii1I11I1II1 * OoOoOO00 . OoOoOO00 + OoooooooOO + OoO0O00
if 25 - 25: I1IiiI / IiII . OOooOOo . I1ii11iIi11i % i1IIi
def is_ipv6_link_local ( self ) :
if ( self . is_ipv6 ( ) == False ) : return ( False )
return ( ( ( self . address >> 112 ) & 0xffff ) == 0xfe80 )
if 12 - 12: O0 % O0
if 9 - 9: O0 . I1IiiI + I1ii11iIi11i / OOooOOo * I1ii11iIi11i
def is_ipv6_string_link_local ( self , addr_str ) :
return ( addr_str . find ( "fe80::" ) != - 1 )
if 10 - 10: IiII % o0oOOo0O0Ooo / O0 / II111iiii
if 81 - 81: Ii1I / o0oOOo0O0Ooo % OoOoOO00 . I1ii11iIi11i
def is_ipv6_loopback ( self ) :
if ( self . is_ipv6 ( ) == False ) : return ( False )
return ( self . address == 1 )
if 47 - 47: II111iiii + OOooOOo / II111iiii . OOooOOo
if 68 - 68: OoooooooOO
def is_ipv6_multicast ( self ) :
if ( self . is_ipv6 ( ) == False ) : return ( False )
return ( ( ( self . address >> 120 ) & 0xff ) == 0xff )
if 63 - 63: I1IiiI
if 80 - 80: oO0o + iIii1I11I1II1
def is_ipv6_string ( self , addr_str ) :
return ( addr_str . find ( ":" ) != - 1 )
if 87 - 87: I1ii11iIi11i % Ii1I . Ii1I
if 71 - 71: OoO0O00 - IiII . i1IIi * I1IiiI % I11i
def is_mac ( self ) :
return ( True if ( self . afi == LISP_AFI_MAC ) else False )
if 36 - 36: IiII * OoooooooOO . i11iIiiIii * i1IIi
if 52 - 52: IiII + ooOoO0o - II111iiii - OoooooooOO * OoO0O00 - iIii1I11I1II1
def is_mac_multicast ( self ) :
if ( self . is_mac ( ) == False ) : return ( False )
return ( ( self . address & 0x010000000000 ) != 0 )
if 38 - 38: II111iiii % iIii1I11I1II1 * IiII * OoOoOO00 % II111iiii . I1IiiI
if 35 - 35: OoooooooOO - i11iIiiIii * i11iIiiIii % Ii1I - OOooOOo . iIii1I11I1II1
def is_mac_broadcast ( self ) :
if ( self . is_mac ( ) == False ) : return ( False )
return ( self . address == 0xffffffffffff )
if 96 - 96: OOooOOo
if 18 - 18: oO0o . I1ii11iIi11i % oO0o
def is_mac_string ( self , addr_str ) :
return ( len ( addr_str ) == 15 and addr_str . find ( "-" ) != - 1 )
if 43 - 43: oO0o / ooOoO0o . o0oOOo0O0Ooo . iIii1I11I1II1
if 63 - 63: iII111i * iII111i
def is_link_local_multicast ( self ) :
if ( self . is_ipv4 ( ) ) :
return ( ( 0xe0ffff00 & self . address ) == 0xe0000000 )
if 78 - 78: iIii1I11I1II1 % iIii1I11I1II1 . iIii1I11I1II1 / Ii1I . O0 + i1IIi
if ( self . is_ipv6 ( ) ) :
return ( ( self . address >> 112 ) & 0xffff == 0xff02 )
if 53 - 53: Ii1I . I1ii11iIi11i - OOooOOo - ooOoO0o
return ( False )
if 17 - 17: OoooooooOO / I1IiiI * ooOoO0o % I1ii11iIi11i . OoO0O00
if 5 - 5: OoO0O00 % I1Ii111 . oO0o . Ii1I + I1IiiI
def is_null ( self ) :
return ( True if ( self . afi == LISP_AFI_NONE ) else False )
if 95 - 95: II111iiii . iII111i - iIii1I11I1II1 / I11i + ooOoO0o * I1Ii111
if 92 - 92: iII111i * OoooooooOO % I1IiiI / OOooOOo
def is_ultimate_root ( self ) :
return ( True if self . afi == LISP_AFI_ULTIMATE_ROOT else False )
if 46 - 46: OoOoOO00
if 52 - 52: o0oOOo0O0Ooo - OoO0O00 % i1IIi / Ii1I % IiII
def is_iid_range ( self ) :
return ( True if self . afi == LISP_AFI_IID_RANGE else False )
if 100 - 100: oO0o . i11iIiiIii - ooOoO0o
if 49 - 49: Oo0Ooo % ooOoO0o % o0oOOo0O0Ooo + ooOoO0o * I1Ii111 % I1IiiI
def is_e164 ( self ) :
return ( True if ( self . afi == LISP_AFI_E164 ) else False )
if 85 - 85: i1IIi / i1IIi
if 77 - 77: i1IIi . ooOoO0o % ooOoO0o - Ii1I
def is_dist_name ( self ) :
return ( True if ( self . afi == LISP_AFI_NAME ) else False )
if 6 - 6: OOooOOo % Ii1I + ooOoO0o
if 17 - 17: iIii1I11I1II1 * I1Ii111 % oO0o + o0oOOo0O0Ooo . Ii1I * Oo0Ooo
def is_geo_prefix ( self ) :
return ( True if ( self . afi == LISP_AFI_GEO_COORD ) else False )
if 16 - 16: I1IiiI % OoO0O00 . ooOoO0o / OoooooooOO
if 8 - 8: I1Ii111 % OoO0O00 . I1IiiI - OoOoOO00 + i1IIi / iIii1I11I1II1
def is_binary ( self ) :
if ( self . is_dist_name ( ) ) : return ( False )
if ( self . is_geo_prefix ( ) ) : return ( False )
return ( True )
if 89 - 89: II111iiii / Ii1I % Ii1I
if 57 - 57: I11i
def store_address ( self , addr_str ) :
if ( self . afi == LISP_AFI_NONE ) : self . string_to_afi ( addr_str )
if 95 - 95: OoOoOO00 + I11i * i1IIi - ooOoO0o % ooOoO0o
if 58 - 58: OOooOOo
if 74 - 74: i1IIi . IiII / ooOoO0o + I11i % i11iIiiIii % iII111i
if 62 - 62: i1IIi % I1Ii111
iIi1iIIIiIiI = addr_str . find ( "[" )
I11ii1IiI1Ii = addr_str . find ( "]" )
if ( iIi1iIIIiIiI != - 1 and I11ii1IiI1Ii != - 1 ) :
self . instance_id = int ( addr_str [ iIi1iIIIiIiI + 1 : I11ii1IiI1Ii ] )
addr_str = addr_str [ I11ii1IiI1Ii + 1 : : ]
if ( self . is_dist_name ( ) == False ) :
addr_str = addr_str . replace ( " " , "" )
if 94 - 94: i1IIi + iII111i
if 25 - 25: I1Ii111 . Ii1I - Ii1I . o0oOOo0O0Ooo - IiII
if 91 - 91: o0oOOo0O0Ooo % I1ii11iIi11i % OoOoOO00 * iIii1I11I1II1
if 18 - 18: OoOoOO00 * I1ii11iIi11i . i1IIi * iII111i
if 67 - 67: IiII + i11iIiiIii . II111iiii / OoOoOO00 + OoooooooOO + i11iIiiIii
if 23 - 23: Oo0Ooo
if ( self . is_ipv4 ( ) ) :
IIiI1 = addr_str . split ( "." )
oOO0 = int ( IIiI1 [ 0 ] ) << 24
oOO0 += int ( IIiI1 [ 1 ] ) << 16
oOO0 += int ( IIiI1 [ 2 ] ) << 8
oOO0 += int ( IIiI1 [ 3 ] )
self . address = oOO0
elif ( self . is_ipv6 ( ) ) :
if 95 - 95: iII111i * II111iiii / o0oOOo0O0Ooo . iIii1I11I1II1 . II111iiii
if 17 - 17: I1IiiI / i11iIiiIii + o0oOOo0O0Ooo . OoOoOO00 . I1IiiI
if 31 - 31: OoooooooOO . I1Ii111 % OoooooooOO * iII111i % OOooOOo . iII111i
if 17 - 17: I1Ii111 % i1IIi % I11i * O0 / Oo0Ooo
if 96 - 96: OoOoOO00 . Ii1I
if 80 - 80: OoOoOO00 + o0oOOo0O0Ooo - II111iiii
if 3 - 3: ooOoO0o * I1Ii111
if 34 - 34: Ii1I / Oo0Ooo . II111iiii - ooOoO0o - I1ii11iIi11i % OoOoOO00
if 43 - 43: Ii1I * oO0o
if 57 - 57: OoooooooOO + I1IiiI % I1ii11iIi11i % ooOoO0o * I1Ii111
if 9 - 9: i11iIiiIii
if 85 - 85: IiII / o0oOOo0O0Ooo * ooOoO0o
if 74 - 74: O0 - o0oOOo0O0Ooo
if 68 - 68: I1Ii111
if 19 - 19: o0oOOo0O0Ooo
if 63 - 63: OoooooooOO % ooOoO0o
if 26 - 26: OOooOOo + Oo0Ooo
O0O000o000O0 = ( addr_str [ 2 : 4 ] == "::" )
try :
addr_str = socket . inet_pton ( socket . AF_INET6 , addr_str )
except :
addr_str = socket . inet_pton ( socket . AF_INET6 , "0::0" )
if 46 - 46: I1Ii111
addr_str = binascii . hexlify ( addr_str )
if 56 - 56: I11i % i11iIiiIii . Ii1I
if ( O0O000o000O0 ) :
addr_str = addr_str [ 2 : 4 ] + addr_str [ 0 : 2 ] + addr_str [ 4 : : ]
if 13 - 13: o0oOOo0O0Ooo - OoOoOO00 . O0
self . address = int ( addr_str , 16 )
if 57 - 57: IiII % iII111i
elif ( self . is_geo_prefix ( ) ) :
O00o0o0O = lisp_geo ( None )
O00o0o0O . name = "geo-prefix-{}" . format ( O00o0o0O )
O00o0o0O . parse_geo_string ( addr_str )
self . address = O00o0o0O
elif ( self . is_mac ( ) ) :
addr_str = addr_str . replace ( "-" , "" )
oOO0 = int ( addr_str , 16 )
self . address = oOO0
elif ( self . is_e164 ( ) ) :
addr_str = addr_str [ 1 : : ]
oOO0 = int ( addr_str , 16 )
self . address = oOO0 << 4
elif ( self . is_dist_name ( ) ) :
self . address = addr_str . replace ( "'" , "" )
if 21 - 21: OoOoOO00
self . mask_len = self . host_mask_len ( )
if 86 - 86: O0 . O0 - I1Ii111
if 95 - 95: Ii1I / Ii1I * OoO0O00 . OoooooooOO . OoooooooOO * I11i
def store_prefix ( self , prefix_str ) :
if ( self . is_geo_string ( prefix_str ) ) :
OOOooo0OooOoO = prefix_str . find ( "]" )
oOo = len ( prefix_str [ OOOooo0OooOoO + 1 : : ] ) * 8
elif ( prefix_str . find ( "/" ) != - 1 ) :
prefix_str , oOo = prefix_str . split ( "/" )
else :
iIi1I1 = prefix_str . find ( "'" )
if ( iIi1I1 == - 1 ) : return
II = prefix_str . find ( "'" , iIi1I1 + 1 )
if ( II == - 1 ) : return
oOo = len ( prefix_str [ iIi1I1 + 1 : II ] ) * 8
if 76 - 76: OoooooooOO - Ii1I + IiII % OoOoOO00 / OoooooooOO
if 55 - 55: i11iIiiIii - IiII * OOooOOo + II111iiii . I1ii11iIi11i / O0
self . string_to_afi ( prefix_str )
self . store_address ( prefix_str )
self . mask_len = int ( oOo )
if 16 - 16: II111iiii . Oo0Ooo * I1Ii111 + o0oOOo0O0Ooo - i11iIiiIii
if 98 - 98: II111iiii - i1IIi - ooOoO0o
def zero_host_bits ( self ) :
if ( self . mask_len < 0 ) : return
i1I1Ii = ( 2 ** self . mask_len ) - 1
OOO0oooOo0 = self . addr_length ( ) * 8 - self . mask_len
i1I1Ii <<= OOO0oooOo0
self . address &= i1I1Ii
if 28 - 28: i1IIi + O0 - i11iIiiIii - I1Ii111
if 54 - 54: iII111i + i1IIi - I1Ii111 / iII111i . Oo0Ooo
def is_geo_string ( self , addr_str ) :
OOOooo0OooOoO = addr_str . find ( "]" )
if ( OOOooo0OooOoO != - 1 ) : addr_str = addr_str [ OOOooo0OooOoO + 1 : : ]
if 18 - 18: oO0o % iIii1I11I1II1 + ooOoO0o
O00o0o0O = addr_str . split ( "/" )
if ( len ( O00o0o0O ) == 2 ) :
if ( O00o0o0O [ 1 ] . isdigit ( ) == False ) : return ( False )
if 34 - 34: I1IiiI - OoooooooOO . IiII - OOooOOo % IiII
O00o0o0O = O00o0o0O [ 0 ]
O00o0o0O = O00o0o0O . split ( "-" )
i11IIi = len ( O00o0o0O )
if ( i11IIi < 8 or i11IIi > 9 ) : return ( False )
if 2 - 2: I1IiiI + II111iiii . ooOoO0o + oO0o . OoO0O00
for iIoOo0OoOO in range ( 0 , i11IIi ) :
if ( iIoOo0OoOO == 3 ) :
if ( O00o0o0O [ iIoOo0OoOO ] in [ "N" , "S" ] ) : continue
return ( False )
if 86 - 86: i1IIi
if ( iIoOo0OoOO == 7 ) :
if ( O00o0o0O [ iIoOo0OoOO ] in [ "W" , "E" ] ) : continue
return ( False )
if 73 - 73: iIii1I11I1II1 * Oo0Ooo
if ( O00o0o0O [ iIoOo0OoOO ] . isdigit ( ) == False ) : return ( False )
if 54 - 54: oO0o . Ii1I
return ( True )
if 31 - 31: I11i
if 60 - 60: Oo0Ooo - iII111i . II111iiii % ooOoO0o / OoooooooOO / iIii1I11I1II1
def string_to_afi ( self , addr_str ) :
if ( addr_str . count ( "'" ) == 2 ) :
self . afi = LISP_AFI_NAME
return
if 23 - 23: I11i + iIii1I11I1II1
if ( addr_str . find ( ":" ) != - 1 ) : self . afi = LISP_AFI_IPV6
elif ( addr_str . find ( "." ) != - 1 ) : self . afi = LISP_AFI_IPV4
elif ( addr_str . find ( "+" ) != - 1 ) : self . afi = LISP_AFI_E164
elif ( self . is_geo_string ( addr_str ) ) : self . afi = LISP_AFI_GEO_COORD
elif ( addr_str . find ( "-" ) != - 1 ) : self . afi = LISP_AFI_MAC
else : self . afi = LISP_AFI_NONE
if 60 - 60: O0 * I1IiiI + o0oOOo0O0Ooo * OoO0O00 + o0oOOo0O0Ooo / i11iIiiIii
if 54 - 54: i11iIiiIii . iII111i * i1IIi
def print_address ( self ) :
IiI = self . print_address_no_iid ( )
oooo = "[" + str ( self . instance_id )
for iIi1iIIIiIiI in self . iid_list : oooo += "," + str ( iIi1iIIIiIiI )
oooo += "]"
IiI = "{}{}" . format ( oooo , IiI )
return ( IiI )
if 68 - 68: Oo0Ooo
if 20 - 20: IiII + i11iIiiIii * OOooOOo
def print_address_no_iid ( self ) :
if ( self . is_ipv4 ( ) ) :
IiI = self . address
ii1III1IiIII1 = IiI >> 24
oOO000OOOOoo = ( IiI >> 16 ) & 0xff
OOO0ooOO = ( IiI >> 8 ) & 0xff
ooO0o = IiI & 0xff
return ( "{}.{}.{}.{}" . format ( ii1III1IiIII1 , oOO000OOOOoo , OOO0ooOO , ooO0o ) )
elif ( self . is_ipv6 ( ) ) :
O0O0 = lisp_hex_string ( self . address ) . zfill ( 32 )
O0O0 = binascii . unhexlify ( O0O0 )
O0O0 = socket . inet_ntop ( socket . AF_INET6 , O0O0 )
return ( "{}" . format ( O0O0 ) )
elif ( self . is_geo_prefix ( ) ) :
return ( "{}" . format ( self . address . print_geo ( ) ) )
elif ( self . is_mac ( ) ) :
O0O0 = lisp_hex_string ( self . address ) . zfill ( 12 )
O0O0 = "{}-{}-{}" . format ( O0O0 [ 0 : 4 ] , O0O0 [ 4 : 8 ] ,
O0O0 [ 8 : 12 ] )
return ( "{}" . format ( O0O0 ) )
elif ( self . is_e164 ( ) ) :
O0O0 = lisp_hex_string ( self . address ) . zfill ( 15 )
return ( "+{}" . format ( O0O0 ) )
elif ( self . is_dist_name ( ) ) :
return ( "'{}'" . format ( self . address ) )
elif ( self . is_null ( ) ) :
return ( "no-address" )
if 4 - 4: I11i % o0oOOo0O0Ooo - o0oOOo0O0Ooo / OoO0O00 + Ii1I
return ( "unknown-afi:{}" . format ( self . afi ) )
if 72 - 72: I11i * ooOoO0o / iII111i . OoooooooOO + I1Ii111 + I1Ii111
if 35 - 35: Oo0Ooo + oO0o * o0oOOo0O0Ooo - iIii1I11I1II1 % I1ii11iIi11i * i11iIiiIii
def print_prefix ( self ) :
if ( self . is_ultimate_root ( ) ) : return ( "[*]" )
if ( self . is_iid_range ( ) ) :
if ( self . mask_len == 32 ) : return ( "[{}]" . format ( self . instance_id ) )
oo00O0 = self . instance_id + ( 2 ** ( 32 - self . mask_len ) - 1 )
return ( "[{}-{}]" . format ( self . instance_id , oo00O0 ) )
if 41 - 41: IiII % i1IIi
IiI = self . print_address ( )
if ( self . is_dist_name ( ) ) : return ( IiI )
if ( self . is_geo_prefix ( ) ) : return ( IiI )
if 34 - 34: o0oOOo0O0Ooo - iII111i / O0 / OOooOOo - Oo0Ooo
OOOooo0OooOoO = IiI . find ( "no-address" )
if ( OOOooo0OooOoO == - 1 ) :
IiI = "{}/{}" . format ( IiI , str ( self . mask_len ) )
else :
IiI = IiI [ 0 : OOOooo0OooOoO ]
if 29 - 29: OoooooooOO - iII111i
return ( IiI )
if 97 - 97: I1Ii111 . Oo0Ooo
if 44 - 44: OoO0O00 + OOooOOo
def print_prefix_no_iid ( self ) :
IiI = self . print_address_no_iid ( )
if ( self . is_dist_name ( ) ) : return ( IiI )
if ( self . is_geo_prefix ( ) ) : return ( IiI )
return ( "{}/{}" . format ( IiI , str ( self . mask_len ) ) )
if 9 - 9: iII111i . i11iIiiIii * IiII . I11i
if 40 - 40: i11iIiiIii + iII111i % I1IiiI % I11i - Oo0Ooo * ooOoO0o
def print_prefix_url ( self ) :
if ( self . is_ultimate_root ( ) ) : return ( "0--0" )
IiI = self . print_address ( )
OOOooo0OooOoO = IiI . find ( "]" )
if ( OOOooo0OooOoO != - 1 ) : IiI = IiI [ OOOooo0OooOoO + 1 : : ]
if ( self . is_geo_prefix ( ) ) :
IiI = IiI . replace ( "/" , "-" )
return ( "{}-{}" . format ( self . instance_id , IiI ) )
if 96 - 96: I1IiiI % I11i . I1Ii111 % O0 . O0
return ( "{}-{}-{}" . format ( self . instance_id , IiI , self . mask_len ) )
if 14 - 14: ooOoO0o . OoOoOO00 + ooOoO0o * OoOoOO00 . OoOoOO00 * Oo0Ooo
if 40 - 40: OoooooooOO
def print_sg ( self , g ) :
I111 = self . print_prefix ( )
IIi1 = I111 . find ( "]" ) + 1
g = g . print_prefix ( )
ooO0oOOoOO = g . find ( "]" ) + 1
i1iIiIii = "[{}]({}, {})" . format ( self . instance_id , I111 [ IIi1 : : ] , g [ ooO0oOOoOO : : ] )
return ( i1iIiIii )
if 58 - 58: OoOoOO00 / I1Ii111 % O0
if 14 - 14: I1IiiI . OOooOOo
def hash_address ( self , addr ) :
IiIiI = self . address
iI1Ii11 = addr . address
if 28 - 28: iII111i / oO0o / iII111i
if ( self . is_geo_prefix ( ) ) : IiIiI = self . address . print_geo ( )
if ( addr . is_geo_prefix ( ) ) : iI1Ii11 = addr . address . print_geo ( )
if 97 - 97: II111iiii + Oo0Ooo
if ( type ( IiIiI ) == str ) :
IiIiI = int ( binascii . hexlify ( IiIiI [ 0 : 1 ] ) )
if 57 - 57: o0oOOo0O0Ooo % OoooooooOO - oO0o * IiII + OoooooooOO
if ( type ( iI1Ii11 ) == str ) :
iI1Ii11 = int ( binascii . hexlify ( iI1Ii11 [ 0 : 1 ] ) )
if 65 - 65: OoooooooOO + OOooOOo - I1Ii111
return ( IiIiI ^ iI1Ii11 )
if 78 - 78: Oo0Ooo * OOooOOo + i11iIiiIii
if 15 - 15: I1ii11iIi11i % I1Ii111 . I1ii11iIi11i - iIii1I11I1II1
if 20 - 20: i1IIi - Ii1I . II111iiii + O0 % oO0o % II111iiii
if 26 - 26: iIii1I11I1II1 - Ii1I / iIii1I11I1II1 . i1IIi - o0oOOo0O0Ooo
if 48 - 48: iII111i . i11iIiiIii - iIii1I11I1II1 / iIii1I11I1II1
if 92 - 92: II111iiii . oO0o - O0 + o0oOOo0O0Ooo * I1ii11iIi11i
def is_more_specific ( self , prefix ) :
if ( prefix . afi == LISP_AFI_ULTIMATE_ROOT ) : return ( True )
if 32 - 32: I1IiiI % OoO0O00
oOo = prefix . mask_len
if ( prefix . afi == LISP_AFI_IID_RANGE ) :
oooo0Oo0 = 2 ** ( 32 - oOo )
iI11IIi11i = prefix . instance_id
oo00O0 = iI11IIi11i + oooo0Oo0
return ( self . instance_id in range ( iI11IIi11i , oo00O0 ) )
if 52 - 52: I1IiiI % Ii1I - Ii1I
if 73 - 73: I1ii11iIi11i - IiII * IiII . O0 - i11iIiiIii + I1IiiI
if ( self . instance_id != prefix . instance_id ) : return ( False )
if ( self . afi != prefix . afi ) :
if ( prefix . afi != LISP_AFI_NONE ) : return ( False )
if 20 - 20: I1Ii111
if 40 - 40: OoooooooOO / o0oOOo0O0Ooo + OoOoOO00
if 73 - 73: OOooOOo / Oo0Ooo
if 80 - 80: OoO0O00 + I1IiiI % i1IIi / I11i % i1IIi * i11iIiiIii
if 27 - 27: OoOoOO00 / I1Ii111 * O0 / I1IiiI - IiII / o0oOOo0O0Ooo
if ( self . is_binary ( ) == False ) :
if ( prefix . afi == LISP_AFI_NONE ) : return ( True )
if ( type ( self . address ) != type ( prefix . address ) ) : return ( False )
IiI = self . address
ooo00o0oo = prefix . address
if ( self . is_geo_prefix ( ) ) :
IiI = self . address . print_geo ( )
ooo00o0oo = prefix . address . print_geo ( )
if 77 - 77: I1ii11iIi11i . i1IIi * OOooOOo / iII111i
if ( len ( IiI ) < len ( ooo00o0oo ) ) : return ( False )
return ( IiI . find ( ooo00o0oo ) == 0 )
if 70 - 70: o0oOOo0O0Ooo
if 49 - 49: OOooOOo - I1IiiI + OoooooooOO % iII111i + o0oOOo0O0Ooo + OoOoOO00
if 37 - 37: II111iiii % I1ii11iIi11i * OoOoOO00
if 35 - 35: i1IIi
if 81 - 81: OoO0O00
if ( self . mask_len < oOo ) : return ( False )
if 45 - 45: OoooooooOO . O0 * oO0o + IiII
OOO0oooOo0 = ( prefix . addr_length ( ) * 8 ) - oOo
i1I1Ii = ( 2 ** oOo - 1 ) << OOO0oooOo0
return ( ( self . address & i1I1Ii ) == prefix . address )
if 18 - 18: II111iiii . O0 - I11i / I11i
if 71 - 71: OoOoOO00 + iIii1I11I1II1 - II111iiii / i1IIi
def mask_address ( self , mask_len ) :
OOO0oooOo0 = ( self . addr_length ( ) * 8 ) - mask_len
i1I1Ii = ( 2 ** mask_len - 1 ) << OOO0oooOo0
self . address &= i1I1Ii
if 39 - 39: Ii1I + I1Ii111 * Oo0Ooo + OoOoOO00 / I1Ii111 - ooOoO0o
if 66 - 66: I11i * OoO0O00
def is_exact_match ( self , prefix ) :
if ( self . instance_id != prefix . instance_id ) : return ( False )
o0O = self . print_prefix ( )
II1I1IIII = prefix . print_prefix ( ) if prefix else ""
return ( o0O == II1I1IIII )
if 64 - 64: Ii1I % ooOoO0o * I1Ii111 * OOooOOo
if 68 - 68: IiII / o0oOOo0O0Ooo * OoO0O00 % iIii1I11I1II1 + I1IiiI . I1IiiI
def is_local ( self ) :
if ( self . is_ipv4 ( ) ) :
I1IiIi = lisp_myrlocs [ 0 ]
if ( I1IiIi == None ) : return ( False )
I1IiIi = I1IiIi . print_address_no_iid ( )
return ( self . print_address_no_iid ( ) == I1IiIi )
if 60 - 60: OoO0O00 - IiII % O0 * I1ii11iIi11i
if ( self . is_ipv6 ( ) ) :
I1IiIi = lisp_myrlocs [ 1 ]
if ( I1IiIi == None ) : return ( False )
I1IiIi = I1IiIi . print_address_no_iid ( )
return ( self . print_address_no_iid ( ) == I1IiIi )
if 61 - 61: O0
return ( False )
if 51 - 51: I1Ii111 - I11i % o0oOOo0O0Ooo * Oo0Ooo - oO0o + II111iiii
if 7 - 7: oO0o
def store_iid_range ( self , iid , mask_len ) :
if ( self . afi == LISP_AFI_NONE ) :
if ( iid == 0 and mask_len == 0 ) : self . afi = LISP_AFI_ULTIMATE_ROOT
else : self . afi = LISP_AFI_IID_RANGE
if 98 - 98: Ii1I + oO0o + i1IIi + IiII % IiII
self . instance_id = iid
self . mask_len = mask_len
if 79 - 79: oO0o % I11i * I11i . OOooOOo % OoooooooOO
if 71 - 71: iII111i
def lcaf_length ( self , lcaf_type ) :
i1 = self . addr_length ( ) + 2
if ( lcaf_type == LISP_LCAF_AFI_LIST_TYPE ) : i1 += 4
if ( lcaf_type == LISP_LCAF_INSTANCE_ID_TYPE ) : i1 += 4
if ( lcaf_type == LISP_LCAF_ASN_TYPE ) : i1 += 4
if ( lcaf_type == LISP_LCAF_APP_DATA_TYPE ) : i1 += 8
if ( lcaf_type == LISP_LCAF_GEO_COORD_TYPE ) : i1 += 12
if ( lcaf_type == LISP_LCAF_OPAQUE_TYPE ) : i1 += 0
if ( lcaf_type == LISP_LCAF_NAT_TYPE ) : i1 += 4
if ( lcaf_type == LISP_LCAF_NONCE_LOC_TYPE ) : i1 += 4
if ( lcaf_type == LISP_LCAF_MCAST_INFO_TYPE ) : i1 = i1 * 2 + 8
if ( lcaf_type == LISP_LCAF_ELP_TYPE ) : i1 += 0
if ( lcaf_type == LISP_LCAF_SECURITY_TYPE ) : i1 += 6
if ( lcaf_type == LISP_LCAF_SOURCE_DEST_TYPE ) : i1 += 4
if ( lcaf_type == LISP_LCAF_RLE_TYPE ) : i1 += 4
return ( i1 )
if 48 - 48: OoOoOO00 + oO0o
if 15 - 15: i11iIiiIii / IiII * I1ii11iIi11i - O0 % II111iiii + Ii1I
if 100 - 100: Ii1I + O0 . iII111i - Ii1I + O0 . OOooOOo
if 77 - 77: OOooOOo * OoOoOO00 - i1IIi * I1IiiI . I1Ii111
if 37 - 37: i1IIi - O0
if 36 - 36: I1Ii111 . OoooooooOO - i1IIi % iII111i - II111iiii * i11iIiiIii
if 90 - 90: OoOoOO00 % iII111i - Oo0Ooo
if 13 - 13: o0oOOo0O0Ooo / O0 . I1Ii111 * I1Ii111
if 76 - 76: Ii1I - iII111i
if 79 - 79: o0oOOo0O0Ooo + IiII / o0oOOo0O0Ooo - I1IiiI / OoooooooOO
if 17 - 17: OOooOOo * I1ii11iIi11i . Ii1I . iIii1I11I1II1 * OoooooooOO
if 60 - 60: II111iiii % Oo0Ooo * I11i * OoO0O00 - OoOoOO00
if 65 - 65: iII111i
if 86 - 86: OoO0O00 / II111iiii % OoOoOO00 * OOooOOo . I1IiiI / IiII
if 100 - 100: i1IIi / I1IiiI * I1ii11iIi11i % ooOoO0o + OoO0O00 * oO0o
if 51 - 51: I1Ii111 - OoooooooOO / iII111i / I1IiiI % ooOoO0o / OoO0O00
if 45 - 45: i11iIiiIii - II111iiii / i1IIi * OoOoOO00
def lcaf_encode_iid ( self ) :
IIiiIIi1II11 = LISP_LCAF_INSTANCE_ID_TYPE
I1Ii11iI11ii = socket . htons ( self . lcaf_length ( IIiiIIi1II11 ) )
oooo = self . instance_id
Oooo000 = self . afi
OOO00o00Oo0 = 0
if ( Oooo000 < 0 ) :
if ( self . afi == LISP_AFI_GEO_COORD ) :
Oooo000 = LISP_AFI_LCAF
OOO00o00Oo0 = 0
else :
Oooo000 = 0
OOO00o00Oo0 = self . mask_len
if 1 - 1: OOooOOo + I1IiiI + Ii1I . iII111i
if 89 - 89: I1Ii111 * I1IiiI . i1IIi - iIii1I11I1II1 * I1Ii111
if 5 - 5: OoOoOO00 % i1IIi
IIiiiIIIIIi1 = struct . pack ( "BBBBH" , 0 , 0 , IIiiIIi1II11 , OOO00o00Oo0 , I1Ii11iI11ii )
IIiiiIIIIIi1 += struct . pack ( "IH" , socket . htonl ( oooo ) , socket . htons ( Oooo000 ) )
if ( Oooo000 == 0 ) : return ( IIiiiIIIIIi1 )
if 41 - 41: oO0o + O0 / I1ii11iIi11i
if ( self . afi == LISP_AFI_GEO_COORD ) :
IIiiiIIIIIi1 = IIiiiIIIIIi1 [ 0 : - 2 ]
IIiiiIIIIIi1 += self . address . encode_geo ( )
return ( IIiiiIIIIIi1 )
if 55 - 55: iIii1I11I1II1 * oO0o / iII111i / i1IIi % Oo0Ooo . OoOoOO00
if 50 - 50: IiII / o0oOOo0O0Ooo
IIiiiIIIIIi1 += self . pack_address ( )
return ( IIiiiIIIIIi1 )
if 9 - 9: Oo0Ooo - OoO0O00 + iII111i / OoooooooOO
if 52 - 52: O0
def lcaf_decode_iid ( self , packet ) :
II111I11iI = "BBBBH"
oO000 = struct . calcsize ( II111I11iI )
if ( len ( packet ) < oO000 ) : return ( None )
if 34 - 34: OoooooooOO + OoOoOO00 - Oo0Ooo . OOooOOo * iIii1I11I1II1
iIiiiI1 , II11iiiII1Ii , IIiiIIi1II11 , Oo0Oo , i1 = struct . unpack ( II111I11iI ,
packet [ : oO000 ] )
packet = packet [ oO000 : : ]
if 50 - 50: I1Ii111 * OOooOOo + iII111i
if ( IIiiIIi1II11 != LISP_LCAF_INSTANCE_ID_TYPE ) : return ( None )
if 72 - 72: OoOoOO00 * I1IiiI % i1IIi / OoO0O00 * i1IIi % I1IiiI
II111I11iI = "IH"
oO000 = struct . calcsize ( II111I11iI )
if ( len ( packet ) < oO000 ) : return ( None )
if 61 - 61: I1IiiI / OoooooooOO - ooOoO0o
oooo , Oooo000 = struct . unpack ( II111I11iI , packet [ : oO000 ] )
packet = packet [ oO000 : : ]
if 22 - 22: iII111i
i1 = socket . ntohs ( i1 )
self . instance_id = socket . ntohl ( oooo )
Oooo000 = socket . ntohs ( Oooo000 )
self . afi = Oooo000
if ( Oo0Oo != 0 and Oooo000 == 0 ) : self . mask_len = Oo0Oo
if ( Oooo000 == 0 ) :
self . afi = LISP_AFI_IID_RANGE if Oo0Oo else LISP_AFI_ULTIMATE_ROOT
if 30 - 30: OoO0O00 + I11i + Oo0Ooo
if 77 - 77: II111iiii
if 92 - 92: I1Ii111 / I1IiiI / I1ii11iIi11i + I11i + Ii1I
if 51 - 51: OOooOOo
if 85 - 85: II111iiii
if ( Oooo000 == 0 ) : return ( packet )
if 60 - 60: Ii1I * OOooOOo - o0oOOo0O0Ooo - Ii1I / Oo0Ooo . OOooOOo
if 43 - 43: II111iiii * o0oOOo0O0Ooo % o0oOOo0O0Ooo + iIii1I11I1II1 + OoOoOO00
if 54 - 54: II111iiii + OOooOOo * Oo0Ooo * I1Ii111 - o0oOOo0O0Ooo % Ii1I
if 69 - 69: I11i + OoOoOO00 - i11iIiiIii * O0 % O0
if ( self . is_dist_name ( ) ) :
packet , self . address = lisp_decode_dist_name ( packet )
self . mask_len = len ( self . address ) * 8
return ( packet )
if 81 - 81: I11i - o0oOOo0O0Ooo % Ii1I / I1Ii111 * II111iiii
if 40 - 40: OoO0O00 . i11iIiiIii
if 36 - 36: o0oOOo0O0Ooo * iII111i / I1ii11iIi11i % i1IIi % I1ii11iIi11i + i11iIiiIii
if 24 - 24: I1Ii111 / ooOoO0o - i11iIiiIii
if 32 - 32: II111iiii * Ii1I . ooOoO0o * Oo0Ooo - I1ii11iIi11i % I11i
if ( Oooo000 == LISP_AFI_LCAF ) :
II111I11iI = "BBBBH"
oO000 = struct . calcsize ( II111I11iI )
if ( len ( packet ) < oO000 ) : return ( None )
if 96 - 96: Ii1I / OOooOOo / O0
iI1i1II11I , OoO0o0oOOoOoo , IIiiIIi1II11 , I1iIiiiI1II1 , ii111iIii1 = struct . unpack ( II111I11iI , packet [ : oO000 ] )
if 8 - 8: iII111i + OOooOOo / I1ii11iIi11i . iII111i
if 45 - 45: i1IIi
if ( IIiiIIi1II11 != LISP_LCAF_GEO_COORD_TYPE ) : return ( None )
if 28 - 28: iII111i
ii111iIii1 = socket . ntohs ( ii111iIii1 )
packet = packet [ oO000 : : ]
if ( ii111iIii1 > len ( packet ) ) : return ( None )
if 28 - 28: i1IIi - iII111i + o0oOOo0O0Ooo / Oo0Ooo * oO0o
O00o0o0O = lisp_geo ( "" )
self . afi = LISP_AFI_GEO_COORD
self . address = O00o0o0O
packet = O00o0o0O . decode_geo ( packet , ii111iIii1 , I1iIiiiI1II1 )
self . mask_len = self . host_mask_len ( )
return ( packet )
if 8 - 8: ooOoO0o + OOooOOo * ooOoO0o / i1IIi . I1ii11iIi11i
if 4 - 4: Ii1I - Oo0Ooo . i1IIi + iIii1I11I1II1
I1Ii11iI11ii = self . addr_length ( )
if ( len ( packet ) < I1Ii11iI11ii ) : return ( None )
if 28 - 28: O0 / ooOoO0o / IiII - I11i + IiII + OoO0O00
packet = self . unpack_address ( packet )
return ( packet )
if 84 - 84: Oo0Ooo + OoOoOO00 / iII111i . I1ii11iIi11i
if 26 - 26: Oo0Ooo
if 61 - 61: Ii1I * oO0o * i11iIiiIii + OoO0O00
if 43 - 43: OoO0O00 * OoO0O00 * oO0o
if 24 - 24: oO0o
if 77 - 77: i11iIiiIii - I1Ii111 - I1ii11iIi11i * Oo0Ooo / i11iIiiIii
if 79 - 79: Oo0Ooo % Oo0Ooo . oO0o + ooOoO0o * iII111i * I11i
if 87 - 87: o0oOOo0O0Ooo + OoOoOO00 % o0oOOo0O0Ooo + I1IiiI
if 89 - 89: II111iiii
if 41 - 41: iIii1I11I1II1
if 26 - 26: Oo0Ooo / i1IIi + Oo0Ooo
if 76 - 76: I1ii11iIi11i * i1IIi % oO0o
if 80 - 80: i1IIi * II111iiii . O0 % I1ii11iIi11i / ooOoO0o
if 58 - 58: I1IiiI * I1ii11iIi11i - i1IIi % I1Ii111 % O0
if 24 - 24: I11i + I11i % I11i
if 63 - 63: i11iIiiIii + iIii1I11I1II1 / oO0o % IiII - O0
if 21 - 21: II111iiii
if 89 - 89: OOooOOo % i11iIiiIii * OoOoOO00 % oO0o / O0 * i1IIi
if 16 - 16: IiII
if 42 - 42: i1IIi / Ii1I * I1ii11iIi11i
if 9 - 9: I11i % i1IIi / i1IIi / OoO0O00
def lcaf_encode_sg ( self , group ) :
IIiiIIi1II11 = LISP_LCAF_MCAST_INFO_TYPE
oooo = socket . htonl ( self . instance_id )
I1Ii11iI11ii = socket . htons ( self . lcaf_length ( IIiiIIi1II11 ) )
IIiiiIIIIIi1 = struct . pack ( "BBBBHIHBB" , 0 , 0 , IIiiIIi1II11 , 0 , I1Ii11iI11ii , oooo ,
0 , self . mask_len , group . mask_len )
if 46 - 46: I1Ii111 * II111iiii + II111iiii * O0 % II111iiii
IIiiiIIIIIi1 += struct . pack ( "H" , socket . htons ( self . afi ) )
IIiiiIIIIIi1 += self . pack_address ( )
IIiiiIIIIIi1 += struct . pack ( "H" , socket . htons ( group . afi ) )
IIiiiIIIIIi1 += group . pack_address ( )
return ( IIiiiIIIIIi1 )
if 37 - 37: OOooOOo . iIii1I11I1II1 / O0 . ooOoO0o + OOooOOo - OoooooooOO
if 96 - 96: I1Ii111 / oO0o . I1ii11iIi11i % I1IiiI * OOooOOo
def lcaf_decode_sg ( self , packet ) :
II111I11iI = "BBBBHIHBB"
oO000 = struct . calcsize ( II111I11iI )
if ( len ( packet ) < oO000 ) : return ( [ None , None ] )
if 99 - 99: i11iIiiIii - I1Ii111
iIiiiI1 , II11iiiII1Ii , IIiiIIi1II11 , oo00O0OO0oo0O , i1 , oooo , IIiiii1I1 , O0O0o , oOO = struct . unpack ( II111I11iI , packet [ : oO000 ] )
if 31 - 31: I1IiiI / iIii1I11I1II1 + I1IiiI - o0oOOo0O0Ooo % OoOoOO00 - OoOoOO00
packet = packet [ oO000 : : ]
if 16 - 16: oO0o - oO0o . I1Ii111 + I1ii11iIi11i
if ( IIiiIIi1II11 != LISP_LCAF_MCAST_INFO_TYPE ) : return ( [ None , None ] )
if 10 - 10: I1ii11iIi11i / Ii1I
self . instance_id = socket . ntohl ( oooo )
i1 = socket . ntohs ( i1 ) - 8
if 71 - 71: ooOoO0o * I1Ii111
if 3 - 3: IiII . i11iIiiIii . Oo0Ooo - I1Ii111 . Ii1I
if 43 - 43: O0 / Ii1I - OoO0O00 + OOooOOo
if 54 - 54: I1Ii111 % OoO0O00 - OoooooooOO
if 96 - 96: IiII
II111I11iI = "H"
oO000 = struct . calcsize ( II111I11iI )
if ( len ( packet ) < oO000 ) : return ( [ None , None ] )
if ( i1 < oO000 ) : return ( [ None , None ] )
if 31 - 31: Ii1I + O0 - OOooOOo * O0 * I11i
Oooo000 = struct . unpack ( II111I11iI , packet [ : oO000 ] ) [ 0 ]
packet = packet [ oO000 : : ]
i1 -= oO000
self . afi = socket . ntohs ( Oooo000 )
self . mask_len = O0O0o
I1Ii11iI11ii = self . addr_length ( )
if ( i1 < I1Ii11iI11ii ) : return ( [ None , None ] )
if 53 - 53: I1ii11iIi11i + i11iIiiIii / iIii1I11I1II1 + OoooooooOO + IiII * I1IiiI
packet = self . unpack_address ( packet )
if ( packet == None ) : return ( [ None , None ] )
if 16 - 16: i11iIiiIii - oO0o . i11iIiiIii + OoO0O00 + i11iIiiIii
i1 -= I1Ii11iI11ii
if 85 - 85: I1ii11iIi11i - ooOoO0o + I1Ii111 + I1Ii111
if 13 - 13: II111iiii
if 22 - 22: o0oOOo0O0Ooo
if 45 - 45: I1Ii111 + OoooooooOO + o0oOOo0O0Ooo * II111iiii
if 12 - 12: I1ii11iIi11i / O0
II111I11iI = "H"
oO000 = struct . calcsize ( II111I11iI )
if ( len ( packet ) < oO000 ) : return ( [ None , None ] )
if ( i1 < oO000 ) : return ( [ None , None ] )
if 18 - 18: OoOoOO00 . i11iIiiIii + i1IIi / OoooooooOO - IiII % OoO0O00
Oooo000 = struct . unpack ( II111I11iI , packet [ : oO000 ] ) [ 0 ]
packet = packet [ oO000 : : ]
i1 -= oO000
o0o0Oo0o0oOo = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
o0o0Oo0o0oOo . afi = socket . ntohs ( Oooo000 )
o0o0Oo0o0oOo . mask_len = oOO
o0o0Oo0o0oOo . instance_id = self . instance_id
I1Ii11iI11ii = self . addr_length ( )
if ( i1 < I1Ii11iI11ii ) : return ( [ None , None ] )
if 47 - 47: iII111i % IiII + I1Ii111 * o0oOOo0O0Ooo * OoooooooOO
packet = o0o0Oo0o0oOo . unpack_address ( packet )
if ( packet == None ) : return ( [ None , None ] )
if 100 - 100: Oo0Ooo / I1IiiI / iII111i / I1Ii111 / oO0o % o0oOOo0O0Ooo
return ( [ packet , o0o0Oo0o0oOo ] )
if 16 - 16: I1IiiI + I11i
if 66 - 66: OoooooooOO % II111iiii / I1Ii111 . i11iIiiIii
def lcaf_decode_eid ( self , packet ) :
II111I11iI = "BBB"
oO000 = struct . calcsize ( II111I11iI )
if ( len ( packet ) < oO000 ) : return ( [ None , None ] )
if 67 - 67: Ii1I + Oo0Ooo - I1IiiI - IiII + oO0o + Oo0Ooo
if 84 - 84: I1ii11iIi11i % oO0o - OOooOOo * Ii1I
if 78 - 78: i1IIi / ooOoO0o / oO0o
if 21 - 21: IiII % Ii1I + OOooOOo + IiII
if 90 - 90: o0oOOo0O0Ooo
oo00O0OO0oo0O , OoO0o0oOOoOoo , IIiiIIi1II11 = struct . unpack ( II111I11iI ,
packet [ : oO000 ] )
if 38 - 38: OoOoOO00 / OOooOOo % OoooooooOO * I1ii11iIi11i
if ( IIiiIIi1II11 == LISP_LCAF_INSTANCE_ID_TYPE ) :
return ( [ self . lcaf_decode_iid ( packet ) , None ] )
elif ( IIiiIIi1II11 == LISP_LCAF_MCAST_INFO_TYPE ) :
packet , o0o0Oo0o0oOo = self . lcaf_decode_sg ( packet )
return ( [ packet , o0o0Oo0o0oOo ] )
elif ( IIiiIIi1II11 == LISP_LCAF_GEO_COORD_TYPE ) :
II111I11iI = "BBBBH"
oO000 = struct . calcsize ( II111I11iI )
if ( len ( packet ) < oO000 ) : return ( None )
if 7 - 7: I11i * O0 + Oo0Ooo / O0 * oO0o + i11iIiiIii
iI1i1II11I , OoO0o0oOOoOoo , IIiiIIi1II11 , I1iIiiiI1II1 , ii111iIii1 = struct . unpack ( II111I11iI , packet [ : oO000 ] )
if 74 - 74: OoOoOO00
if 91 - 91: i11iIiiIii / Ii1I % OOooOOo % O0 - I11i . I11i
if ( IIiiIIi1II11 != LISP_LCAF_GEO_COORD_TYPE ) : return ( None )
if 78 - 78: i1IIi + I11i % OoooooooOO + i1IIi + iII111i % Ii1I
ii111iIii1 = socket . ntohs ( ii111iIii1 )
packet = packet [ oO000 : : ]
if ( ii111iIii1 > len ( packet ) ) : return ( None )
if 87 - 87: ooOoO0o . iIii1I11I1II1
O00o0o0O = lisp_geo ( "" )
self . instance_id = 0
self . afi = LISP_AFI_GEO_COORD
self . address = O00o0o0O
packet = O00o0o0O . decode_geo ( packet , ii111iIii1 , I1iIiiiI1II1 )
self . mask_len = self . host_mask_len ( )
if 99 - 99: Ii1I + OoooooooOO * IiII * i11iIiiIii - iIii1I11I1II1
return ( [ packet , None ] )
if 58 - 58: IiII % i1IIi . i11iIiiIii
if 5 - 5: OoOoOO00
if 75 - 75: OOooOOo
if 60 - 60: ooOoO0o - II111iiii - iIii1I11I1II1
if 23 - 23: I1ii11iIi11i
if 68 - 68: OoO0O00 . oO0o / IiII - II111iiii % Oo0Ooo
class lisp_elp_node ( object ) :
def __init__ ( self ) :
self . address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . probe = False
self . strict = False
self . eid = False
self . we_are_last = False
if 24 - 24: II111iiii / I1ii11iIi11i + oO0o / Ii1I + IiII % oO0o
if 86 - 86: I1IiiI
def copy_elp_node ( self ) :
i11I1iI1I = lisp_elp_node ( )
i11I1iI1I . copy_address ( self . address )
i11I1iI1I . probe = self . probe
i11I1iI1I . strict = self . strict
i11I1iI1I . eid = self . eid
i11I1iI1I . we_are_last = self . we_are_last
return ( i11I1iI1I )
if 83 - 83: I11i % Ii1I + IiII % I11i / i1IIi . oO0o
if 56 - 56: I1Ii111 - OOooOOo % o0oOOo0O0Ooo
if 30 - 30: I1Ii111 % i1IIi
class lisp_elp ( object ) :
def __init__ ( self , name ) :
self . elp_name = name
self . elp_nodes = [ ]
self . use_elp_node = None
self . we_are_last = False
if 98 - 98: oO0o . i11iIiiIii / Ii1I - Ii1I
if 23 - 23: iIii1I11I1II1
def copy_elp ( self ) :
OOO00O = lisp_elp ( self . elp_name )
OOO00O . use_elp_node = self . use_elp_node
OOO00O . we_are_last = self . we_are_last
for i11I1iI1I in self . elp_nodes :
OOO00O . elp_nodes . append ( i11I1iI1I . copy_elp_node ( ) )
if 30 - 30: I1ii11iIi11i + OoO0O00 - O0
return ( OOO00O )
if 42 - 42: I11i - I1Ii111
if 24 - 24: i1IIi
def print_elp ( self , want_marker ) :
iIii1 = ""
for i11I1iI1I in self . elp_nodes :
OOOO0oooO = ""
if ( want_marker ) :
if ( i11I1iI1I == self . use_elp_node ) :
OOOO0oooO = "*"
elif ( i11I1iI1I . we_are_last ) :
OOOO0oooO = "x"
if 56 - 56: I1Ii111 . I1ii11iIi11i - o0oOOo0O0Ooo / i11iIiiIii * iII111i / iIii1I11I1II1
if 49 - 49: I1IiiI / iIii1I11I1II1
iIii1 += "{}{}({}{}{}), " . format ( OOOO0oooO ,
i11I1iI1I . address . print_address_no_iid ( ) ,
"r" if i11I1iI1I . eid else "R" , "P" if i11I1iI1I . probe else "p" ,
"S" if i11I1iI1I . strict else "s" )
if 31 - 31: i1IIi % I11i * o0oOOo0O0Ooo % i1IIi / IiII
return ( iIii1 [ 0 : - 2 ] if iIii1 != "" else "" )
if 20 - 20: iIii1I11I1II1 . O0
if 61 - 61: OoOoOO00 * OOooOOo
def select_elp_node ( self ) :
iiiI1i , ooiii1iiI1 , ooO000OO = lisp_myrlocs
OOOooo0OooOoO = None
if 48 - 48: II111iiii % I1ii11iIi11i - II111iiii
for i11I1iI1I in self . elp_nodes :
if ( iiiI1i and i11I1iI1I . address . is_exact_match ( iiiI1i ) ) :
OOOooo0OooOoO = self . elp_nodes . index ( i11I1iI1I )
break
if 29 - 29: I1Ii111 - I1Ii111 - I11i * iIii1I11I1II1 % OoO0O00 % IiII
if ( ooiii1iiI1 and i11I1iI1I . address . is_exact_match ( ooiii1iiI1 ) ) :
OOOooo0OooOoO = self . elp_nodes . index ( i11I1iI1I )
break
if 73 - 73: i1IIi . OoooooooOO / OoOoOO00 % Ii1I / Ii1I / Ii1I
if 40 - 40: I1Ii111 - iIii1I11I1II1
if 88 - 88: OOooOOo * O0 * OoOoOO00
if 26 - 26: Ii1I
if 65 - 65: iII111i / iIii1I11I1II1 + I11i - iIii1I11I1II1 - Ii1I . I1Ii111
if 77 - 77: OoOoOO00 / I1IiiI + IiII
if 66 - 66: i11iIiiIii * OoooooooOO + iII111i / Ii1I
if ( OOOooo0OooOoO == None ) :
self . use_elp_node = self . elp_nodes [ 0 ]
i11I1iI1I . we_are_last = False
return
if 42 - 42: Ii1I / iIii1I11I1II1 / Oo0Ooo . O0 . oO0o * I1IiiI
if 21 - 21: OoooooooOO
if 76 - 76: i1IIi * i11iIiiIii / OOooOOo + I1Ii111
if 50 - 50: oO0o % OoOoOO00 + I1IiiI
if 15 - 15: II111iiii - iII111i / I1ii11iIi11i
if 81 - 81: Ii1I - i1IIi % oO0o * Oo0Ooo * OoOoOO00
if ( self . elp_nodes [ - 1 ] == self . elp_nodes [ OOOooo0OooOoO ] ) :
self . use_elp_node = None
i11I1iI1I . we_are_last = True
return
if 79 - 79: oO0o + I1IiiI % iII111i + II111iiii % OoO0O00 % iII111i
if 46 - 46: o0oOOo0O0Ooo
if 61 - 61: OoO0O00 . O0 + I1ii11iIi11i + OoO0O00
if 44 - 44: I11i . oO0o
if 65 - 65: I1ii11iIi11i * II111iiii % I11i + II111iiii . i1IIi / ooOoO0o
self . use_elp_node = self . elp_nodes [ OOOooo0OooOoO + 1 ]
return
if 74 - 74: OoOoOO00 % OoO0O00 . OoOoOO00
if 16 - 16: OoO0O00 / Ii1I * i11iIiiIii / o0oOOo0O0Ooo + I1Ii111
if 21 - 21: I11i % I1ii11iIi11i
class lisp_geo ( object ) :
def __init__ ( self , name ) :
self . geo_name = name
self . latitude = 0xffffffff
self . lat_mins = 0
self . lat_secs = 0
self . longitude = 0xffffffff
self . long_mins = 0
self . long_secs = 0
self . altitude = - 1
self . radius = 0
if 8 - 8: OOooOOo % OoO0O00 + O0 - o0oOOo0O0Ooo
if 46 - 46: Oo0Ooo . ooOoO0o + OoOoOO00 - I11i / i11iIiiIii . iII111i
def copy_geo ( self ) :
O00o0o0O = lisp_geo ( self . geo_name )
O00o0o0O . latitude = self . latitude
O00o0o0O . lat_mins = self . lat_mins
O00o0o0O . lat_secs = self . lat_secs
O00o0o0O . longitude = self . longitude
O00o0o0O . long_mins = self . long_mins
O00o0o0O . long_secs = self . long_secs
O00o0o0O . altitude = self . altitude
O00o0o0O . radius = self . radius
return ( O00o0o0O )
if 80 - 80: II111iiii + OoO0O00 % ooOoO0o + i11iIiiIii
if 30 - 30: Ii1I / I1ii11iIi11i % IiII - Oo0Ooo
def no_geo_altitude ( self ) :
return ( self . altitude == - 1 )
if 100 - 100: IiII . I1Ii111 * oO0o % OoO0O00 . iIii1I11I1II1 * Oo0Ooo
if 100 - 100: IiII - OoOoOO00 % iII111i
def parse_geo_string ( self , geo_str ) :
OOOooo0OooOoO = geo_str . find ( "]" )
if ( OOOooo0OooOoO != - 1 ) : geo_str = geo_str [ OOOooo0OooOoO + 1 : : ]
if 24 - 24: Oo0Ooo / OoO0O00 + i11iIiiIii
if 81 - 81: i11iIiiIii . iIii1I11I1II1 - OoooooooOO
if 52 - 52: O0 - I1Ii111 + oO0o % ooOoO0o . oO0o
if 60 - 60: oO0o + o0oOOo0O0Ooo - OOooOOo % o0oOOo0O0Ooo . I11i + OoO0O00
if 27 - 27: i11iIiiIii - I1ii11iIi11i * I1Ii111 . I1IiiI / OoO0O00 * ooOoO0o
if ( geo_str . find ( "/" ) != - 1 ) :
geo_str , iIIIiI11ii = geo_str . split ( "/" )
self . radius = int ( iIIIiI11ii )
if 82 - 82: OoooooooOO % ooOoO0o
if 68 - 68: I1ii11iIi11i . Ii1I . O0 * OoO0O00
geo_str = geo_str . split ( "-" )
if ( len ( geo_str ) < 8 ) : return ( False )
if 26 - 26: ooOoO0o + OoO0O00 / I1ii11iIi11i * ooOoO0o
O0o0oOoOO0 = geo_str [ 0 : 4 ]
i1IoO0oOOO = geo_str [ 4 : 8 ]
if 88 - 88: I1Ii111
if 3 - 3: OoO0O00
if 48 - 48: i11iIiiIii * i11iIiiIii / oO0o
if 25 - 25: iIii1I11I1II1 / iIii1I11I1II1 - OoooooooOO + I1IiiI . OoooooooOO
if ( len ( geo_str ) > 8 ) : self . altitude = int ( geo_str [ 8 ] )
if 26 - 26: OoooooooOO % iIii1I11I1II1 - IiII
if 3 - 3: oO0o * II111iiii . O0
if 19 - 19: I1IiiI / I1IiiI / Oo0Ooo + oO0o + i1IIi
if 31 - 31: iII111i / OoooooooOO - I1Ii111 . iII111i
self . latitude = int ( O0o0oOoOO0 [ 0 ] )
self . lat_mins = int ( O0o0oOoOO0 [ 1 ] )
self . lat_secs = int ( O0o0oOoOO0 [ 2 ] )
if ( O0o0oOoOO0 [ 3 ] == "N" ) : self . latitude = - self . latitude
if 38 - 38: ooOoO0o . OoooooooOO - II111iiii * i11iIiiIii / i1IIi . OoooooooOO
if 51 - 51: oO0o - I1ii11iIi11i + I1ii11iIi11i
if 100 - 100: I11i - I1ii11iIi11i . i1IIi
if 85 - 85: II111iiii
self . longitude = int ( i1IoO0oOOO [ 0 ] )
self . long_mins = int ( i1IoO0oOOO [ 1 ] )
self . long_secs = int ( i1IoO0oOOO [ 2 ] )
if ( i1IoO0oOOO [ 3 ] == "E" ) : self . longitude = - self . longitude
return ( True )
if 58 - 58: i1IIi - OoO0O00 + ooOoO0o
if 6 - 6: IiII % I1IiiI + OoooooooOO * oO0o . iII111i + oO0o
def print_geo ( self ) :
i1i1I1I1ii = "N" if self . latitude < 0 else "S"
oo0O0OoO0oo00 = "E" if self . longitude < 0 else "W"
if 17 - 17: OoO0O00 % I1Ii111 % oO0o * i11iIiiIii . iIii1I11I1II1 / i1IIi
oOo0oO0 = "{}-{}-{}-{}-{}-{}-{}-{}" . format ( abs ( self . latitude ) ,
self . lat_mins , self . lat_secs , i1i1I1I1ii , abs ( self . longitude ) ,
self . long_mins , self . long_secs , oo0O0OoO0oo00 )
if 15 - 15: IiII
if ( self . no_geo_altitude ( ) == False ) :
oOo0oO0 += "-" + str ( self . altitude )
if 70 - 70: Oo0Ooo * OOooOOo + I1Ii111 % OoOoOO00 / O0
if 23 - 23: O0 * oO0o / I1IiiI + i1IIi * O0 % oO0o
if 11 - 11: I1Ii111 . OoooooooOO * iIii1I11I1II1 / I1ii11iIi11i - ooOoO0o . iII111i
if 71 - 71: i11iIiiIii + I11i / i11iIiiIii % Oo0Ooo / iIii1I11I1II1 * OoO0O00
if 49 - 49: iII111i + OoOoOO00
if ( self . radius != 0 ) : oOo0oO0 += "/{}" . format ( self . radius )
return ( oOo0oO0 )
if 33 - 33: ooOoO0o
if 19 - 19: I1Ii111 % IiII
def geo_url ( self ) :
O00OOOoOoooo = os . getenv ( "LISP_GEO_ZOOM_LEVEL" )
O00OOOoOoooo = "10" if ( O00OOOoOoooo == "" or O00OOOoOoooo . isdigit ( ) == False ) else O00OOOoOoooo
o0oOO0OOoO , ooO0O = self . dms_to_decimal ( )
OOOO0o0o = ( "http://maps.googleapis.com/maps/api/staticmap?center={},{}" + "&markers=color:blue%7Clabel:lisp%7C{},{}" + "&zoom={}&size=1024x1024&sensor=false" ) . format ( o0oOO0OOoO , ooO0O , o0oOO0OOoO , ooO0O ,
# OoO0O00 + I11i * OoO0O00 - IiII + ooOoO0o % I11i
# iII111i
O00OOOoOoooo )
return ( OOOO0o0o )
if 100 - 100: IiII - Ii1I * iIii1I11I1II1 . iII111i . i1IIi % Oo0Ooo
if 11 - 11: I11i + oO0o % Ii1I
def print_geo_url ( self ) :
O00o0o0O = self . print_geo ( )
if ( self . radius == 0 ) :
OOOO0o0o = self . geo_url ( )
ii1111Iii11i = "<a href='{}'>{}</a>" . format ( OOOO0o0o , O00o0o0O )
else :
OOOO0o0o = O00o0o0O . replace ( "/" , "-" )
ii1111Iii11i = "<a href='/lisp/geo-map/{}'>{}</a>" . format ( OOOO0o0o , O00o0o0O )
if 22 - 22: ooOoO0o
return ( ii1111Iii11i )
if 83 - 83: OOooOOo - i11iIiiIii - i1IIi / oO0o
if 33 - 33: OoO0O00 + OOooOOo
def dms_to_decimal ( self ) :
IIII11111Iii1I , OoOO000O , iiIIi1i1IIi = self . latitude , self . lat_mins , self . lat_secs
IiI11ii = float ( abs ( IIII11111Iii1I ) )
IiI11ii += float ( OoOO000O * 60 + iiIIi1i1IIi ) / 3600
if ( IIII11111Iii1I > 0 ) : IiI11ii = - IiI11ii
oO0O = IiI11ii
if 35 - 35: Oo0Ooo - iIii1I11I1II1 - I1Ii111 % OOooOOo
IIII11111Iii1I , OoOO000O , iiIIi1i1IIi = self . longitude , self . long_mins , self . long_secs
IiI11ii = float ( abs ( IIII11111Iii1I ) )
IiI11ii += float ( OoOO000O * 60 + iiIIi1i1IIi ) / 3600
if ( IIII11111Iii1I > 0 ) : IiI11ii = - IiI11ii
oO0oIiI1i1II = IiI11ii
return ( ( oO0O , oO0oIiI1i1II ) )
if 18 - 18: OOooOOo
if 12 - 12: I1Ii111 % II111iiii / o0oOOo0O0Ooo - iIii1I11I1II1 + II111iiii
def get_distance ( self , geo_point ) :
ii1iii1II1 = self . dms_to_decimal ( )
IIii1II1i = geo_point . dms_to_decimal ( )
i1iioO = geopy . distance . distance ( ii1iii1II1 , IIii1II1i )
return ( i1iioO . km )
if 96 - 96: IiII * OOooOOo / Oo0Ooo / Oo0Ooo / OoooooooOO . i11iIiiIii
if 24 - 24: OoO0O00 - OoO0O00 * Oo0Ooo + oO0o + o0oOOo0O0Ooo % OOooOOo
def point_in_circle ( self , geo_point ) :
iii1I1i11IiI1 = self . get_distance ( geo_point )
return ( iii1I1i11IiI1 <= self . radius )
if 82 - 82: I1IiiI
if 12 - 12: O0 * OoooooooOO - i1IIi % oO0o
def encode_geo ( self ) :
O0oooOoOO0O = socket . htons ( LISP_AFI_LCAF )
i11IIi = socket . htons ( 20 + 2 )
OoO0o0oOOoOoo = 0
if 27 - 27: i1IIi - OOooOOo / Oo0Ooo
o0oOO0OOoO = abs ( self . latitude )
IIiiiiII = ( ( self . lat_mins * 60 ) + self . lat_secs ) * 1000
if ( self . latitude < 0 ) : OoO0o0oOOoOoo |= 0x40
if 86 - 86: OOooOOo * OoOoOO00 % i1IIi * IiII . I1ii11iIi11i
ooO0O = abs ( self . longitude )
Ooo00oOOo = ( ( self . long_mins * 60 ) + self . long_secs ) * 1000
if ( self . longitude < 0 ) : OoO0o0oOOoOoo |= 0x20
if 3 - 3: iIii1I11I1II1 % I1Ii111 + i11iIiiIii
IIii1IIi1 = 0
if ( self . no_geo_altitude ( ) == False ) :
IIii1IIi1 = socket . htonl ( self . altitude )
OoO0o0oOOoOoo |= 0x10
if 63 - 63: OoooooooOO % I1Ii111 + IiII / OoooooooOO
iIIIiI11ii = socket . htons ( self . radius )
if ( iIIIiI11ii != 0 ) : OoO0o0oOOoOoo |= 0x06
if 60 - 60: II111iiii + II111iiii
i1III = struct . pack ( "HBBBBH" , O0oooOoOO0O , 0 , 0 , LISP_LCAF_GEO_COORD_TYPE ,
0 , i11IIi )
i1III += struct . pack ( "BBHBBHBBHIHHH" , OoO0o0oOOoOoo , 0 , 0 , o0oOO0OOoO , IIiiiiII >> 16 ,
socket . htons ( IIiiiiII & 0x0ffff ) , ooO0O , Ooo00oOOo >> 16 ,
socket . htons ( Ooo00oOOo & 0xffff ) , IIii1IIi1 , iIIIiI11ii , 0 , 0 )
if 88 - 88: OoO0O00
return ( i1III )
if 25 - 25: OoooooooOO . Oo0Ooo + OOooOOo + Oo0Ooo * O0 % i1IIi
if 71 - 71: II111iiii / Ii1I + i1IIi - OoOoOO00 + Ii1I
def decode_geo ( self , packet , lcaf_len , radius_hi ) :
II111I11iI = "BBHBBHBBHIHHH"
oO000 = struct . calcsize ( II111I11iI )
if ( lcaf_len < oO000 ) : return ( None )
if 31 - 31: OoooooooOO * Ii1I - iII111i . oO0o % Ii1I
OoO0o0oOOoOoo , oOOoo0O0OOO , Iii1IIiIIii1 , o0oOO0OOoO , oo00OooOoO0O0 , IIiiiiII , ooO0O , O0OOo0ooOoo , Ooo00oOOo , IIii1IIi1 , iIIIiI11ii , iIIi1Iii1Ii , Oooo000 = struct . unpack ( II111I11iI ,
# ooOoO0o
packet [ : oO000 ] )
if 41 - 41: O0 % o0oOOo0O0Ooo % Oo0Ooo / i1IIi . II111iiii
if 23 - 23: I1ii11iIi11i . Oo0Ooo . iII111i % i1IIi
if 56 - 56: iIii1I11I1II1 * i11iIiiIii % O0 * Ii1I % I1Ii111 % I11i
if 65 - 65: I1ii11iIi11i . I1IiiI . II111iiii . ooOoO0o - o0oOOo0O0Ooo
Oooo000 = socket . ntohs ( Oooo000 )
if ( Oooo000 == LISP_AFI_LCAF ) : return ( None )
if 34 - 34: OoooooooOO - iII111i * iIii1I11I1II1 . OoO0O00
if ( OoO0o0oOOoOoo & 0x40 ) : o0oOO0OOoO = - o0oOO0OOoO
self . latitude = o0oOO0OOoO
oo0O0o0oO = old_div ( ( ( oo00OooOoO0O0 << 16 ) | socket . ntohs ( IIiiiiII ) ) , 1000 )
self . lat_mins = old_div ( oo0O0o0oO , 60 )
self . lat_secs = oo0O0o0oO % 60
if 61 - 61: IiII * IiII - OoOoOO00 % Ii1I . Oo0Ooo * II111iiii
if ( OoO0o0oOOoOoo & 0x20 ) : ooO0O = - ooO0O
self . longitude = ooO0O
O00ooo0OO0ooO = old_div ( ( ( O0OOo0ooOoo << 16 ) | socket . ntohs ( Ooo00oOOo ) ) , 1000 )
self . long_mins = old_div ( O00ooo0OO0ooO , 60 )
self . long_secs = O00ooo0OO0ooO % 60
if 29 - 29: II111iiii
self . altitude = socket . ntohl ( IIii1IIi1 ) if ( OoO0o0oOOoOoo & 0x10 ) else - 1
iIIIiI11ii = socket . ntohs ( iIIIiI11ii )
self . radius = iIIIiI11ii if ( OoO0o0oOOoOoo & 0x02 ) else iIIIiI11ii * 1000
if 15 - 15: I11i + I1IiiI / I11i + iIii1I11I1II1 * Oo0Ooo / I1ii11iIi11i
self . geo_name = None
packet = packet [ oO000 : : ]
if 8 - 8: ooOoO0o . O0 / OoO0O00
if ( Oooo000 != 0 ) :
self . rloc . afi = Oooo000
packet = self . rloc . unpack_address ( packet )
self . rloc . mask_len = self . rloc . host_mask_len ( )
if 50 - 50: Ii1I . OoOoOO00 * o0oOOo0O0Ooo
return ( packet )
if 68 - 68: IiII * oO0o / OoOoOO00 / I1Ii111
if 72 - 72: I1ii11iIi11i
if 74 - 74: I1Ii111 * iIii1I11I1II1 / oO0o - IiII - I1IiiI
if 84 - 84: iIii1I11I1II1 % Oo0Ooo / I1ii11iIi11i + o0oOOo0O0Ooo * II111iiii
if 81 - 81: I1IiiI / I1ii11iIi11i / OOooOOo
if 89 - 89: Oo0Ooo % IiII
class lisp_rle_node ( object ) :
def __init__ ( self ) :
self . address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . level = 0
self . translated_port = 0
self . rloc_name = None
if 36 - 36: IiII % OoOoOO00 % I1ii11iIi11i
if 7 - 7: I1ii11iIi11i % OoOoOO00 - O0 . I1Ii111
def copy_rle_node ( self ) :
iI11i1ii11i11 = lisp_rle_node ( )
iI11i1ii11i11 . address . copy_address ( self . address )
iI11i1ii11i11 . level = self . level
iI11i1ii11i11 . translated_port = self . translated_port
iI11i1ii11i11 . rloc_name = self . rloc_name
return ( iI11i1ii11i11 )
if 9 - 9: Ii1I . OoooooooOO / ooOoO0o + i1IIi
if 90 - 90: oO0o - OoOoOO00 % ooOoO0o
def store_translated_rloc ( self , rloc , port ) :
self . address . copy_address ( rloc )
self . translated_port = port
if 83 - 83: OOooOOo - I1ii11iIi11i + OoO0O00
if 99 - 99: iII111i - OoOoOO00 % ooOoO0o
def get_encap_keys ( self ) :
I1I = "4341" if self . translated_port == 0 else str ( self . translated_port )
if 27 - 27: oO0o . oO0o * iII111i % iIii1I11I1II1
O0O0 = self . address . print_address_no_iid ( ) + ":" + I1I
if 81 - 81: iII111i * II111iiii
try :
iI1iiiiiii = lisp_crypto_keys_by_rloc_encap [ O0O0 ]
if ( iI1iiiiiii [ 1 ] ) : return ( iI1iiiiiii [ 1 ] . encrypt_key , iI1iiiiiii [ 1 ] . icv_key )
return ( None , None )
except :
return ( None , None )
if 28 - 28: i11iIiiIii . Oo0Ooo . Ii1I
if 19 - 19: OoO0O00 - Ii1I + ooOoO0o + OOooOOo
if 84 - 84: iII111i / Oo0Ooo
if 21 - 21: OoO0O00 . I1IiiI - OoO0O00
class lisp_rle ( object ) :
def __init__ ( self , name ) :
self . rle_name = name
self . rle_nodes = [ ]
self . rle_forwarding_list = [ ]
if 51 - 51: iIii1I11I1II1
if 5 - 5: oO0o - OoOoOO00 . ooOoO0o
def copy_rle ( self ) :
ooo0o0O = lisp_rle ( self . rle_name )
for iI11i1ii11i11 in self . rle_nodes :
ooo0o0O . rle_nodes . append ( iI11i1ii11i11 . copy_rle_node ( ) )
if 97 - 97: I11i - ooOoO0o + oO0o . I1Ii111
ooo0o0O . build_forwarding_list ( )
return ( ooo0o0O )
if 22 - 22: Ii1I - II111iiii % Oo0Ooo * OoOoOO00 + iIii1I11I1II1
if 5 - 5: Oo0Ooo % o0oOOo0O0Ooo * I1Ii111
def print_rle ( self , html , do_formatting ) :
I1i1iI1i1i1 = ""
for iI11i1ii11i11 in self . rle_nodes :
I1I = iI11i1ii11i11 . translated_port
if 6 - 6: OOooOOo + o0oOOo0O0Ooo
Iiii1i = ""
if ( iI11i1ii11i11 . rloc_name != None ) :
Iiii1i = iI11i1ii11i11 . rloc_name
if ( do_formatting ) : Iiii1i = blue ( Iiii1i , html )
Iiii1i = "({})" . format ( Iiii1i )
if 77 - 77: OOooOOo + oO0o * iIii1I11I1II1 / oO0o / OOooOOo . i11iIiiIii
if 92 - 92: Oo0Ooo . o0oOOo0O0Ooo % OoooooooOO * i11iIiiIii * OoO0O00 * o0oOOo0O0Ooo
O0O0 = iI11i1ii11i11 . address . print_address_no_iid ( )
if ( iI11i1ii11i11 . address . is_local ( ) ) : O0O0 = red ( O0O0 , html )
I1i1iI1i1i1 += "{}{}{}, " . format ( O0O0 , "" if I1I == 0 else ":" + str ( I1I ) , Iiii1i )
if 48 - 48: iII111i * I1ii11iIi11i * oO0o % O0 . OoO0O00
if 11 - 11: OOooOOo / o0oOOo0O0Ooo
return ( I1i1iI1i1i1 [ 0 : - 2 ] if I1i1iI1i1i1 != "" else "" )
if 98 - 98: oO0o + I11i . oO0o
if 10 - 10: iII111i + i1IIi . I11i % ooOoO0o / ooOoO0o
def build_forwarding_list ( self ) :
O00OoO0 = - 1
for iI11i1ii11i11 in self . rle_nodes :
if ( O00OoO0 == - 1 ) :
if ( iI11i1ii11i11 . address . is_local ( ) ) : O00OoO0 = iI11i1ii11i11 . level
else :
if ( iI11i1ii11i11 . level > O00OoO0 ) : break
if 86 - 86: Oo0Ooo
if 7 - 7: iIii1I11I1II1
O00OoO0 = 0 if O00OoO0 == - 1 else iI11i1ii11i11 . level
if 86 - 86: IiII + iII111i * II111iiii - IiII - o0oOOo0O0Ooo
self . rle_forwarding_list = [ ]
for iI11i1ii11i11 in self . rle_nodes :
if ( iI11i1ii11i11 . level == O00OoO0 or ( O00OoO0 == 0 and
iI11i1ii11i11 . level == 128 ) ) :
if ( lisp_i_am_rtr == False and iI11i1ii11i11 . address . is_local ( ) ) :
O0O0 = iI11i1ii11i11 . address . print_address_no_iid ( )
lprint ( "Exclude local RLE RLOC {}" . format ( O0O0 ) )
continue
if 8 - 8: OOooOOo . Ii1I
self . rle_forwarding_list . append ( iI11i1ii11i11 )
if 15 - 15: ooOoO0o / OOooOOo + i1IIi / Ii1I / OOooOOo
if 47 - 47: Oo0Ooo + oO0o % OoooooooOO
if 23 - 23: I1Ii111 / i11iIiiIii - ooOoO0o * iII111i - Ii1I . iIii1I11I1II1
if 11 - 11: I11i % OoOoOO00 * Oo0Ooo
if 48 - 48: OOooOOo
class lisp_json ( object ) :
def __init__ ( self , name , string , encrypted = False , ms_encrypt = False ) :
if 66 - 66: iII111i - I1Ii111 - i11iIiiIii . o0oOOo0O0Ooo + Oo0Ooo
if 90 - 90: O0 - i11iIiiIii * ooOoO0o . I1ii11iIi11i . Ii1I - OoooooooOO
if 23 - 23: o0oOOo0O0Ooo
if 88 - 88: I1Ii111 + iIii1I11I1II1 / o0oOOo0O0Ooo
if ( type ( string ) == bytes ) : string = string . decode ( )
if 93 - 93: ooOoO0o % iIii1I11I1II1 - OOooOOo . IiII + ooOoO0o
self . json_name = name
self . json_encrypted = False
try :
json . loads ( string )
except :
lprint ( "Invalid JSON string: '{}'" . format ( string ) )
string = '{ "?" : "?" }'
if 63 - 63: I1ii11iIi11i / OOooOOo
self . json_string = string
if 28 - 28: I11i / I1Ii111 + IiII * OoooooooOO - iIii1I11I1II1
if 6 - 6: I11i % o0oOOo0O0Ooo / OoooooooOO . I1Ii111
if 17 - 17: I1ii11iIi11i + OoooooooOO / iIii1I11I1II1 . II111iiii + Oo0Ooo
if 7 - 7: O0 - I1ii11iIi11i - iIii1I11I1II1
if 96 - 96: OoOoOO00 . I1IiiI . I11i * OoooooooOO + OoooooooOO * O0
if 90 - 90: I11i + I1ii11iIi11i + OoooooooOO + OoOoOO00 + IiII / iII111i
if 75 - 75: i11iIiiIii
if 27 - 27: I11i - IiII - I1Ii111
if 90 - 90: OoO0O00 . oO0o * O0 / I11i % O0 + I1Ii111
if 48 - 48: iIii1I11I1II1 . i11iIiiIii / OoooooooOO . i1IIi . o0oOOo0O0Ooo
if ( len ( lisp_ms_json_keys ) != 0 ) :
if ( ms_encrypt == False ) : return
self . json_key_id = list ( lisp_ms_json_keys . keys ( ) ) [ 0 ]
self . json_key = lisp_ms_json_keys [ self . json_key_id ]
self . encrypt_json ( )
if 84 - 84: Ii1I
if 92 - 92: I11i
if ( lisp_log_id == "lig" and encrypted ) :
Ooo00o000o = os . getenv ( "LISP_JSON_KEY" )
if ( Ooo00o000o != None ) :
OOOooo0OooOoO = - 1
if ( Ooo00o000o [ 0 ] == "[" and "]" in Ooo00o000o ) :
OOOooo0OooOoO = Ooo00o000o . find ( "]" )
self . json_key_id = int ( Ooo00o000o [ 1 : OOOooo0OooOoO ] )
if 64 - 64: iII111i / iII111i * iII111i % O0 / IiII . I1ii11iIi11i
self . json_key = Ooo00o000o [ OOOooo0OooOoO + 1 : : ]
if 23 - 23: i1IIi / I1ii11iIi11i + o0oOOo0O0Ooo
self . decrypt_json ( )
if 82 - 82: O0 * ooOoO0o * iIii1I11I1II1 . i1IIi
if 47 - 47: I11i * I11i . OoOoOO00
if 68 - 68: OoooooooOO + OoOoOO00 + i11iIiiIii
if 89 - 89: Oo0Ooo + Ii1I * O0 - I1Ii111
def add ( self ) :
self . delete ( )
lisp_json_list [ self . json_name ] = self
if 33 - 33: iIii1I11I1II1 . I11i
if 63 - 63: oO0o - iII111i
def delete ( self ) :
if ( self . json_name in lisp_json_list ) :
del ( lisp_json_list [ self . json_name ] )
lisp_json_list [ self . json_name ] = None
if 13 - 13: I1Ii111 / i1IIi % OoooooooOO / I11i
if 66 - 66: I1Ii111 % o0oOOo0O0Ooo . iII111i . ooOoO0o + OOooOOo * II111iiii
if 33 - 33: oO0o
def print_json ( self , html ) :
OO0Oo000OO = self . json_string
I1iIi = "***"
if ( html ) : I1iIi = red ( I1iIi , html )
iI1iiiiI = I1iIi + self . json_string + I1iIi
if ( self . valid_json ( ) ) : return ( OO0Oo000OO )
return ( iI1iiiiI )
if 81 - 81: I1Ii111 . Ii1I * ooOoO0o . IiII - OoOoOO00
if 79 - 79: ooOoO0o - O0
def valid_json ( self ) :
try :
json . loads ( self . json_string )
except :
return ( False )
if 56 - 56: ooOoO0o
return ( True )
if 89 - 89: O0 % iIii1I11I1II1 / OoOoOO00 - I1Ii111 - I1IiiI
if 60 - 60: IiII % i11iIiiIii / OOooOOo
def encrypt_json ( self ) :
iI1II1I1i1 = self . json_key . zfill ( 32 )
ii = "0" * 8
if 43 - 43: i11iIiiIii * II111iiii + ooOoO0o - OoooooooOO * II111iiii / OoO0O00
oo0000OoO = json . loads ( self . json_string )
for Ooo00o000o in oo0000OoO :
oOO0 = oo0000OoO [ Ooo00o000o ]
if ( type ( oOO0 ) != str ) : oOO0 = str ( oOO0 )
oOO0 = chacha . ChaCha ( iI1II1I1i1 , ii ) . encrypt ( oOO0 )
oo0000OoO [ Ooo00o000o ] = binascii . hexlify ( oOO0 )
if 78 - 78: o0oOOo0O0Ooo / i1IIi - I11i
self . json_string = json . dumps ( oo0000OoO )
self . json_encrypted = True
if 97 - 97: IiII - iII111i
if 37 - 37: i1IIi * I1Ii111 / I11i * II111iiii + OoooooooOO . OoO0O00
def decrypt_json ( self ) :
iI1II1I1i1 = self . json_key . zfill ( 32 )
ii = "0" * 8
if 22 - 22: OoOoOO00 + OoooooooOO - I1Ii111
oo0000OoO = json . loads ( self . json_string )
for Ooo00o000o in oo0000OoO :
oOO0 = binascii . unhexlify ( oo0000OoO [ Ooo00o000o ] )
oo0000OoO [ Ooo00o000o ] = chacha . ChaCha ( iI1II1I1i1 , ii ) . encrypt ( oOO0 )
if 82 - 82: Ii1I % I1Ii111 / ooOoO0o
try :
self . json_string = json . dumps ( oo0000OoO )
self . json_encrypted = False
except :
pass
if 86 - 86: II111iiii - iIii1I11I1II1 + oO0o + I1IiiI
if 29 - 29: Ii1I % OoooooooOO * II111iiii
if 88 - 88: I1Ii111 + I11i + I1Ii111 % OoO0O00 / I1ii11iIi11i - I11i
if 15 - 15: Oo0Ooo - i1IIi
if 87 - 87: O0 . o0oOOo0O0Ooo % OOooOOo / I11i - I1Ii111 % i11iIiiIii
if 3 - 3: oO0o + iII111i + OOooOOo
if 54 - 54: i11iIiiIii + OoO0O00 - IiII - iII111i / I11i
class lisp_stats ( object ) :
def __init__ ( self ) :
self . packet_count = 0
self . byte_count = 0
self . last_rate_check = 0
self . last_packet_count = 0
self . last_byte_count = 0
self . last_increment = None
if 85 - 85: OOooOOo * OOooOOo * I1Ii111 - ooOoO0o . O0 % iII111i
if 5 - 5: i1IIi * iII111i . o0oOOo0O0Ooo - I1ii11iIi11i
def increment ( self , octets ) :
self . packet_count += 1
self . byte_count += octets
self . last_increment = lisp_get_timestamp ( )
if 84 - 84: i1IIi
if 17 - 17: IiII + iII111i * OoO0O00 / iII111i
def recent_packet_sec ( self ) :
if ( self . last_increment == None ) : return ( False )
i1i111Iiiiiii = time . time ( ) - self . last_increment
return ( i1i111Iiiiiii <= 1 )
if 67 - 67: i1IIi * IiII . OoOoOO00 % iIii1I11I1II1 - iIii1I11I1II1 * I1ii11iIi11i
if 96 - 96: iII111i / i11iIiiIii / oO0o + Oo0Ooo
def recent_packet_min ( self ) :
if ( self . last_increment == None ) : return ( False )
i1i111Iiiiiii = time . time ( ) - self . last_increment
return ( i1i111Iiiiiii <= 60 )
if 65 - 65: OoOoOO00
if 87 - 87: I11i % i1IIi + i11iIiiIii * II111iiii
def stat_colors ( self , c1 , c2 , html ) :
if ( self . recent_packet_sec ( ) ) :
return ( green_last_sec ( c1 ) , green_last_sec ( c2 ) )
if 58 - 58: OoO0O00 * I1IiiI - II111iiii / Ii1I - I1IiiI % OoooooooOO
if ( self . recent_packet_min ( ) ) :
return ( green_last_min ( c1 ) , green_last_min ( c2 ) )
if 33 - 33: IiII / i1IIi + I1Ii111
return ( c1 , c2 )
if 5 - 5: O0 / iII111i % II111iiii . Oo0Ooo - I11i
if 84 - 84: oO0o * iII111i % i11iIiiIii - O0 . iIii1I11I1II1 - OoOoOO00
def normalize ( self , count ) :
count = str ( count )
oOoOOOo = len ( count )
if ( oOoOOOo > 12 ) :
count = count [ 0 : - 10 ] + "." + count [ - 10 : - 7 ] + "T"
return ( count )
if 5 - 5: II111iiii
if ( oOoOOOo > 9 ) :
count = count [ 0 : - 9 ] + "." + count [ - 9 : - 7 ] + "B"
return ( count )
if 70 - 70: Ii1I + Oo0Ooo + Oo0Ooo / i1IIi
if ( oOoOOOo > 6 ) :
count = count [ 0 : - 6 ] + "." + count [ - 6 ] + "M"
return ( count )
if 33 - 33: OoooooooOO + o0oOOo0O0Ooo . OoOoOO00 % Oo0Ooo * O0
return ( count )
if 49 - 49: I1ii11iIi11i * I1Ii111 - OoooooooOO . i1IIi . I1ii11iIi11i
if 37 - 37: IiII - oO0o
def get_stats ( self , summary , html ) :
oOOOO = self . last_rate_check
o0OoO0 = self . last_packet_count
oO00ooo = self . last_byte_count
self . last_rate_check = lisp_get_timestamp ( )
self . last_packet_count = self . packet_count
self . last_byte_count = self . byte_count
if 9 - 9: oO0o . i11iIiiIii * i11iIiiIii . I1ii11iIi11i + iII111i
iI1iiiiIi1 = self . last_rate_check - oOOOO
if ( iI1iiiiIi1 == 0 ) :
OOOOoo00 = 0
iiIIIiIiI1 = 0
else :
OOOOoo00 = int ( old_div ( ( self . packet_count - o0OoO0 ) ,
iI1iiiiIi1 ) )
iiIIIiIiI1 = old_div ( ( self . byte_count - oO00ooo ) , iI1iiiiIi1 )
iiIIIiIiI1 = old_div ( ( iiIIIiIiI1 * 8 ) , 1000000 )
iiIIIiIiI1 = round ( iiIIIiIiI1 , 2 )
if 60 - 60: OOooOOo * I1Ii111
if 17 - 17: iII111i * I11i / iIii1I11I1II1 - II111iiii
if 97 - 97: II111iiii * o0oOOo0O0Ooo
if 13 - 13: o0oOOo0O0Ooo . II111iiii
if 76 - 76: II111iiii + I1Ii111 . OoooooooOO / IiII % i11iIiiIii
o0oO = self . normalize ( self . packet_count )
o0OoOOOoOO0Oo = self . normalize ( self . byte_count )
if 92 - 92: i11iIiiIii / II111iiii * OoO0O00
if 51 - 51: I1ii11iIi11i
if 95 - 95: I1IiiI / iII111i + i1IIi
if 31 - 31: OoOoOO00
if 37 - 37: iIii1I11I1II1 % IiII / i11iIiiIii - oO0o
if ( summary ) :
iiiiiI1I = "<br>" if html else ""
o0oO , o0OoOOOoOO0Oo = self . stat_colors ( o0oO , o0OoOOOoOO0Oo , html )
OooO0OoOOO0 = "packet-count: {}{}byte-count: {}" . format ( o0oO , iiiiiI1I , o0OoOOOoOO0Oo )
oOo0OooOoooO = "packet-rate: {} pps\nbit-rate: {} Mbps" . format ( OOOOoo00 , iiIIIiIiI1 )
if 31 - 31: Oo0Ooo
if ( html != "" ) : oOo0OooOoooO = lisp_span ( OooO0OoOOO0 , oOo0OooOoooO )
else :
I11IiIi = str ( OOOOoo00 )
ii11 = str ( iiIIIiIiI1 )
if ( html ) :
o0oO = lisp_print_cour ( o0oO )
I11IiIi = lisp_print_cour ( I11IiIi )
o0OoOOOoOO0Oo = lisp_print_cour ( o0OoOOOoOO0Oo )
ii11 = lisp_print_cour ( ii11 )
if 22 - 22: I11i % iIii1I11I1II1 - i11iIiiIii * OoOoOO00 - I1Ii111
iiiiiI1I = "<br>" if html else ", "
if 97 - 97: i11iIiiIii . OoOoOO00 + oO0o * O0 % OoO0O00 - Ii1I
oOo0OooOoooO = ( "packet-count: {}{}packet-rate: {} pps{}byte-count: " + "{}{}bit-rate: {} mbps" ) . format ( o0oO , iiiiiI1I , I11IiIi , iiiiiI1I , o0OoOOOoOO0Oo , iiiiiI1I ,
# I1Ii111 . I1Ii111 * o0oOOo0O0Ooo
ii11 )
if 51 - 51: I1Ii111 % i11iIiiIii + i1IIi - OOooOOo - Ii1I + oO0o
return ( oOo0OooOoooO )
if 5 - 5: Oo0Ooo / I1ii11iIi11i / ooOoO0o / o0oOOo0O0Ooo - i1IIi + IiII
if 25 - 25: OoOoOO00 / ooOoO0o
if 73 - 73: iII111i
if 34 - 34: o0oOOo0O0Ooo * I1ii11iIi11i
if 16 - 16: i1IIi
if 84 - 84: i11iIiiIii
if 92 - 92: o0oOOo0O0Ooo + Oo0Ooo * OoOoOO00 * o0oOOo0O0Ooo
if 33 - 33: I1IiiI + O0 - I11i
lisp_decap_stats = {
"good-packets" : lisp_stats ( ) , "ICV-error" : lisp_stats ( ) ,
"checksum-error" : lisp_stats ( ) , "lisp-header-error" : lisp_stats ( ) ,
"no-decrypt-key" : lisp_stats ( ) , "bad-inner-version" : lisp_stats ( ) ,
"outer-header-error" : lisp_stats ( )
}
if 90 - 90: I1Ii111 * OoooooooOO . iIii1I11I1II1 % OoO0O00 / I11i + iII111i
if 63 - 63: o0oOOo0O0Ooo . IiII . Oo0Ooo - iIii1I11I1II1 / I1Ii111
if 66 - 66: ooOoO0o * I1Ii111 - II111iiii
if 38 - 38: O0 % I1ii11iIi11i + O0
class lisp_rloc ( object ) :
def __init__ ( self , recurse = True ) :
self . rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . rloc_name = None
self . interface = None
self . translated_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . translated_port = 0
self . priority = 255
self . weight = 0
self . mpriority = 255
self . mweight = 0
self . uptime = lisp_get_timestamp ( )
self . state = LISP_RLOC_UP_STATE
self . last_state_change = None
self . rle_name = None
self . elp_name = None
self . geo_name = None
self . json_name = None
self . geo = None
self . elp = None
self . rle = None
self . json = None
self . stats = lisp_stats ( )
self . last_rloc_probe = None
self . last_rloc_probe_reply = None
self . rloc_probe_rtt = - 1
self . recent_rloc_probe_rtts = [ - 1 , - 1 , - 1 ]
self . rloc_probe_hops = "?/?"
self . recent_rloc_probe_hops = [ "?/?" , "?/?" , "?/?" ]
self . rloc_probe_latency = "?/?"
self . recent_rloc_probe_latencies = [ "?/?" , "?/?" , "?/?" ]
self . last_rloc_probe_nonce = 0
self . echo_nonce_capable = False
self . map_notify_requested = False
self . rloc_next_hop = None
self . next_rloc = None
self . multicast_rloc_probe_list = { }
if 37 - 37: Oo0Ooo / I1IiiI
if ( recurse == False ) : return
if 23 - 23: II111iiii / iII111i
if 55 - 55: i11iIiiIii - Ii1I % OoooooooOO * OoooooooOO
if 92 - 92: iIii1I11I1II1
if 47 - 47: Oo0Ooo + Oo0Ooo * ooOoO0o - OoOoOO00 + II111iiii
if 10 - 10: II111iiii / ooOoO0o . Ii1I / I1Ii111 / oO0o
if 8 - 8: OOooOOo / ooOoO0o * I11i + OOooOOo * i1IIi
iIiI1I1Ii = lisp_get_default_route_next_hops ( )
if ( iIiI1I1Ii == [ ] or len ( iIiI1I1Ii ) == 1 ) : return
if 16 - 16: OOooOOo % IiII . I1IiiI / Ii1I - OoOoOO00 . IiII
self . rloc_next_hop = iIiI1I1Ii [ 0 ]
IiIiIi = self
for IIi1I111I in iIiI1I1Ii [ 1 : : ] :
oOOo0 = lisp_rloc ( False )
oOOo0 = copy . deepcopy ( self )
oOOo0 . rloc_next_hop = IIi1I111I
IiIiIi . next_rloc = oOOo0
IiIiIi = oOOo0
if 95 - 95: OOooOOo / OoOoOO00 + I1ii11iIi11i
if 86 - 86: O0 / Ii1I . OoooooooOO . O0
if 87 - 87: Ii1I + o0oOOo0O0Ooo + OoooooooOO . Ii1I
def up_state ( self ) :
return ( self . state == LISP_RLOC_UP_STATE )
if 73 - 73: o0oOOo0O0Ooo + OoooooooOO - I1Ii111 . iIii1I11I1II1
if 25 - 25: OoooooooOO % I1ii11iIi11i % Oo0Ooo % i11iIiiIii
def unreach_state ( self ) :
return ( self . state == LISP_RLOC_UNREACH_STATE )
if 8 - 8: O0 - O0 % Ii1I
if 22 - 22: OoOoOO00
def no_echoed_nonce_state ( self ) :
return ( self . state == LISP_RLOC_NO_ECHOED_NONCE_STATE )
if 85 - 85: II111iiii - II111iiii
if 95 - 95: II111iiii + II111iiii + iII111i
def down_state ( self ) :
return ( self . state in [ LISP_RLOC_DOWN_STATE , LISP_RLOC_ADMIN_DOWN_STATE ] )
if 38 - 38: OoO0O00 * Ii1I * O0 / I1IiiI
if 99 - 99: Oo0Ooo + ooOoO0o - I1ii11iIi11i + I1Ii111 + Ii1I * I1IiiI
if 68 - 68: OoO0O00
def print_state ( self ) :
if ( self . state is LISP_RLOC_UNKNOWN_STATE ) :
return ( "unknown-state" )
if ( self . state is LISP_RLOC_UP_STATE ) :
return ( "up-state" )
if ( self . state is LISP_RLOC_DOWN_STATE ) :
return ( "down-state" )
if ( self . state is LISP_RLOC_ADMIN_DOWN_STATE ) :
return ( "admin-down-state" )
if ( self . state is LISP_RLOC_UNREACH_STATE ) :
return ( "unreach-state" )
if ( self . state is LISP_RLOC_NO_ECHOED_NONCE_STATE ) :
return ( "no-echoed-nonce-state" )
return ( "invalid-state" )
if 79 - 79: Ii1I . IiII + OoOoOO00
if 10 - 10: OoooooooOO * iII111i * ooOoO0o . Ii1I % I1Ii111 / I1ii11iIi11i
def print_rloc ( self , indent ) :
Oo0OO0000oooo = lisp_print_elapsed ( self . uptime )
lprint ( "{}rloc {}, uptime {}, {}, parms {}/{}/{}/{}" . format ( indent ,
red ( self . rloc . print_address ( ) , False ) , Oo0OO0000oooo , self . print_state ( ) ,
self . priority , self . weight , self . mpriority , self . mweight ) )
if 71 - 71: Ii1I + IiII
if 10 - 10: II111iiii % o0oOOo0O0Ooo . o0oOOo0O0Ooo % iII111i
def print_rloc_name ( self , cour = False ) :
if ( self . rloc_name == None ) : return ( "" )
OO000o = self . rloc_name
if ( cour ) : OO000o = lisp_print_cour ( OO000o )
return ( 'rloc-name: {}' . format ( blue ( OO000o , cour ) ) )
if 2 - 2: OoooooooOO / IiII % Oo0Ooo % iIii1I11I1II1
if 62 - 62: oO0o
def store_rloc_from_record ( self , rloc_record , nonce , source ) :
I1I = LISP_DATA_PORT
self . rloc . copy_address ( rloc_record . rloc )
if ( rloc_record . rloc_name != None ) :
self . rloc_name = rloc_record . rloc_name
if 47 - 47: I1IiiI - O0 - I1ii11iIi11i . OoOoOO00
if 98 - 98: o0oOOo0O0Ooo - OoO0O00 . I1ii11iIi11i / OOooOOo
if 43 - 43: I1IiiI + OOooOOo + o0oOOo0O0Ooo
if 44 - 44: o0oOOo0O0Ooo % OoO0O00 . OoooooooOO
if 21 - 21: Oo0Ooo * Oo0Ooo - iII111i - O0
I1Ii1i111I = self . rloc
if ( I1Ii1i111I . is_null ( ) == False ) :
O0O0O = lisp_get_nat_info ( I1Ii1i111I , self . rloc_name )
if ( O0O0O ) :
I1I = O0O0O . port
OOOII1Ii1Ii1II = lisp_nat_state_info [ self . rloc_name ] [ 0 ]
O0O0 = I1Ii1i111I . print_address_no_iid ( )
IIi11IiiiI11i = red ( O0O0 , False )
i1Iiii111 = "" if self . rloc_name == None else blue ( self . rloc_name , False )
if 29 - 29: OoooooooOO + OOooOOo
if 68 - 68: O0 + IiII / iII111i - OoOoOO00
if 5 - 5: I1IiiI * OoooooooOO - II111iiii
if 64 - 64: i1IIi
if 77 - 77: OOooOOo - i1IIi / II111iiii . I1Ii111 + O0
if 1 - 1: OoooooooOO % iIii1I11I1II1 * I1ii11iIi11i
if ( O0O0O . timed_out ( ) ) :
lprint ( ( " Matched stored NAT state timed out for " + "RLOC {}:{}, {}" ) . format ( IIi11IiiiI11i , I1I , i1Iiii111 ) )
if 17 - 17: Ii1I * i1IIi % OoO0O00
if 12 - 12: I1ii11iIi11i
O0O0O = None if ( O0O0O == OOOII1Ii1Ii1II ) else OOOII1Ii1Ii1II
if ( O0O0O and O0O0O . timed_out ( ) ) :
I1I = O0O0O . port
IIi11IiiiI11i = red ( O0O0O . address , False )
lprint ( ( " Youngest stored NAT state timed out " + " for RLOC {}:{}, {}" ) . format ( IIi11IiiiI11i , I1I ,
# I11i / iII111i . i11iIiiIii % Oo0Ooo + I1ii11iIi11i / i11iIiiIii
i1Iiii111 ) )
O0O0O = None
if 94 - 94: i1IIi * i1IIi / Ii1I
if 38 - 38: O0 % I11i - I11i / iIii1I11I1II1 - II111iiii
if 13 - 13: II111iiii * OoO0O00 - iIii1I11I1II1
if 30 - 30: O0 - O0 - I1Ii111
if 88 - 88: o0oOOo0O0Ooo % I1Ii111
if 4 - 4: i11iIiiIii + o0oOOo0O0Ooo % I11i - I1ii11iIi11i * I1ii11iIi11i
if 87 - 87: I1Ii111 % i11iIiiIii + O0
if ( O0O0O ) :
if ( O0O0O . address != O0O0 ) :
lprint ( "RLOC conflict, RLOC-record {}, NAT state {}" . format ( IIi11IiiiI11i , red ( O0O0O . address , False ) ) )
if 67 - 67: OoooooooOO / i1IIi / ooOoO0o . i1IIi - i11iIiiIii . i1IIi
self . rloc . store_address ( O0O0O . address )
if 41 - 41: i11iIiiIii / ooOoO0o - Ii1I + I11i
IIi11IiiiI11i = red ( O0O0O . address , False )
I1I = O0O0O . port
lprint ( " Use NAT translated RLOC {}:{} for {}" . format ( IIi11IiiiI11i , I1I , i1Iiii111 ) )
if 15 - 15: I1ii11iIi11i
self . store_translated_rloc ( I1Ii1i111I , I1I )
if 22 - 22: iIii1I11I1II1 - i1IIi - i11iIiiIii / I1IiiI + o0oOOo0O0Ooo
if 56 - 56: I1IiiI . ooOoO0o
if 35 - 35: iIii1I11I1II1 % Oo0Ooo + o0oOOo0O0Ooo * o0oOOo0O0Ooo % ooOoO0o
if 10 - 10: I1ii11iIi11i / II111iiii % II111iiii - OoooooooOO * o0oOOo0O0Ooo / ooOoO0o
self . geo = rloc_record . geo
self . elp = rloc_record . elp
self . json = rloc_record . json
if 26 - 26: OoO0O00 . O0 * iII111i % OoOoOO00 % iIii1I11I1II1
if 37 - 37: iII111i - ooOoO0o * Ii1I + II111iiii * i11iIiiIii
if 8 - 8: OoooooooOO % I11i - iII111i * OOooOOo . O0
if 40 - 40: I1Ii111 . oO0o + OoO0O00 % Oo0Ooo / II111iiii
self . rle = rloc_record . rle
if ( self . rle ) :
for iI11i1ii11i11 in self . rle . rle_nodes :
OO000o = iI11i1ii11i11 . rloc_name
O0O0O = lisp_get_nat_info ( iI11i1ii11i11 . address , OO000o )
if ( O0O0O == None ) : continue
if 19 - 19: i11iIiiIii
I1I = O0O0O . port
I1Iii1i = OO000o
if ( I1Iii1i ) : I1Iii1i = blue ( OO000o , False )
if 20 - 20: i11iIiiIii . II111iiii - I1ii11iIi11i / ooOoO0o % i11iIiiIii
lprint ( ( " Store translated encap-port {} for RLE-" + "node {}, rloc-name '{}'" ) . format ( I1I ,
# o0oOOo0O0Ooo + iIii1I11I1II1 + I1ii11iIi11i
iI11i1ii11i11 . address . print_address_no_iid ( ) , I1Iii1i ) )
iI11i1ii11i11 . translated_port = I1I
if 40 - 40: I1Ii111
if 18 - 18: OoOoOO00 * Ii1I
if 81 - 81: IiII . i11iIiiIii - I1IiiI * i11iIiiIii + OoO0O00
self . priority = rloc_record . priority
self . mpriority = rloc_record . mpriority
self . weight = rloc_record . weight
self . mweight = rloc_record . mweight
if ( rloc_record . reach_bit and rloc_record . local_bit and
rloc_record . probe_bit == False ) : self . state = LISP_RLOC_UP_STATE
if 94 - 94: I1ii11iIi11i + OoO0O00 . II111iiii + oO0o . II111iiii
if 96 - 96: i11iIiiIii
if 66 - 66: ooOoO0o * iII111i - iII111i - O0 . o0oOOo0O0Ooo
if 23 - 23: iIii1I11I1II1 / I11i % OoOoOO00 . OoO0O00
Ooo000O0 = source . is_exact_match ( rloc_record . rloc ) if source != None else None
if 5 - 5: IiII
if ( rloc_record . keys != None and Ooo000O0 ) :
Ooo00o000o = rloc_record . keys [ 1 ]
if ( Ooo00o000o != None ) :
O0O0 = rloc_record . rloc . print_address_no_iid ( ) + ":" + str ( I1I )
if 91 - 91: OoooooooOO + i1IIi . iII111i / OOooOOo % OoO0O00
Ooo00o000o . add_key_by_rloc ( O0O0 , True )
lprint ( " Store encap-keys for nonce 0x{}, RLOC {}" . format ( lisp_hex_string ( nonce ) , red ( O0O0 , False ) ) )
if 41 - 41: O0 - iII111i - iIii1I11I1II1
if 93 - 93: I1Ii111 / oO0o + oO0o
if 48 - 48: IiII + OoOoOO00
return ( I1I )
if 42 - 42: Oo0Ooo + iII111i * ooOoO0o
if 72 - 72: iIii1I11I1II1 % I1Ii111
def store_translated_rloc ( self , rloc , port ) :
self . rloc . copy_address ( rloc )
self . translated_rloc . copy_address ( rloc )
self . translated_port = port
if 77 - 77: I1Ii111 * I1IiiI / iIii1I11I1II1 . II111iiii * Oo0Ooo
if 71 - 71: ooOoO0o / iIii1I11I1II1 % O0 / I1ii11iIi11i . I1Ii111 / i11iIiiIii
def is_rloc_translated ( self ) :
return ( self . translated_rloc . is_null ( ) == False )
if 6 - 6: oO0o . OoO0O00 - II111iiii . I1IiiI - o0oOOo0O0Ooo - i1IIi
if 42 - 42: Ii1I + i11iIiiIii
def rloc_exists ( self ) :
if ( self . rloc . is_null ( ) == False ) : return ( True )
if ( self . rle_name or self . geo_name or self . elp_name or self . json_name ) :
return ( False )
if 46 - 46: O0 % OoOoOO00 - I1Ii111 . I1IiiI
return ( True )
if 66 - 66: II111iiii * iIii1I11I1II1 * ooOoO0o * I11i . II111iiii - ooOoO0o
if 15 - 15: I1ii11iIi11i - i11iIiiIii - Ii1I / Ii1I . iII111i
def is_rtr ( self ) :
return ( ( self . priority == 254 and self . mpriority == 255 and self . weight == 0 and self . mweight == 0 ) )
if 36 - 36: oO0o + Oo0Ooo * I1Ii111 % OOooOOo . Oo0Ooo . I1IiiI
if 81 - 81: o0oOOo0O0Ooo . OoOoOO00 . i11iIiiIii
if 13 - 13: i1IIi
def print_state_change ( self , new_state ) :
ooo0 = self . print_state ( )
ii1111Iii11i = "{} -> {}" . format ( ooo0 , new_state )
if ( new_state == "up" and self . unreach_state ( ) ) :
ii1111Iii11i = bold ( ii1111Iii11i , False )
if 70 - 70: OoooooooOO + OoO0O00 . iII111i . ooOoO0o
return ( ii1111Iii11i )
if 13 - 13: IiII
if 86 - 86: OoOoOO00 + iIii1I11I1II1 / OoOoOO00 + Oo0Ooo / Ii1I - II111iiii
def print_rloc_probe_rtt ( self ) :
if ( self . rloc_probe_rtt == - 1 ) : return ( "none" )
return ( self . rloc_probe_rtt )
if 5 - 5: I1ii11iIi11i / Oo0Ooo
if 47 - 47: OOooOOo
def print_recent_rloc_probe_rtts ( self ) :
i111ii1I1iI1 = str ( self . recent_rloc_probe_rtts )
i111ii1I1iI1 = i111ii1I1iI1 . replace ( "-1" , "?" )
return ( i111ii1I1iI1 )
if 93 - 93: O0 * II111iiii / i11iIiiIii * O0 + I1Ii111
if 42 - 42: iIii1I11I1II1
def compute_rloc_probe_rtt ( self ) :
IiIiIi = self . rloc_probe_rtt
self . rloc_probe_rtt = - 1
if ( self . last_rloc_probe_reply == None ) : return
if ( self . last_rloc_probe == None ) : return
self . rloc_probe_rtt = self . last_rloc_probe_reply - self . last_rloc_probe
self . rloc_probe_rtt = round ( self . rloc_probe_rtt , 3 )
IIiIiIi = self . recent_rloc_probe_rtts
self . recent_rloc_probe_rtts = [ IiIiIi ] + IIiIiIi [ 0 : - 1 ]
if 11 - 11: Oo0Ooo % i1IIi
if 70 - 70: II111iiii * Oo0Ooo * OOooOOo - I1IiiI + iIii1I11I1II1 + ooOoO0o
def print_rloc_probe_hops ( self ) :
return ( self . rloc_probe_hops )
if 27 - 27: I1ii11iIi11i - I1Ii111 * O0 % ooOoO0o / I1IiiI
if 53 - 53: i11iIiiIii * i11iIiiIii % O0 % IiII
def print_recent_rloc_probe_hops ( self ) :
Oo0oOOo000 = str ( self . recent_rloc_probe_hops )
return ( Oo0oOOo000 )
if 75 - 75: i1IIi + I1IiiI - iIii1I11I1II1 + O0 . OoooooooOO
if 72 - 72: OoooooooOO % I1ii11iIi11i - OoO0O00 . OoooooooOO
def store_rloc_probe_hops ( self , to_hops , from_ttl ) :
if ( to_hops == 0 ) :
to_hops = "?"
elif ( to_hops < old_div ( LISP_RLOC_PROBE_TTL , 2 ) ) :
to_hops = "!"
else :
to_hops = str ( LISP_RLOC_PROBE_TTL - to_hops )
if 83 - 83: o0oOOo0O0Ooo * Ii1I - Oo0Ooo * iII111i - i11iIiiIii
if ( from_ttl < old_div ( LISP_RLOC_PROBE_TTL , 2 ) ) :
IiIiiii = "!"
else :
IiIiiii = str ( LISP_RLOC_PROBE_TTL - from_ttl )
if 50 - 50: iII111i . II111iiii % I1Ii111 % I1IiiI / o0oOOo0O0Ooo . I1IiiI
if 76 - 76: OOooOOo % iII111i
IiIiIi = self . rloc_probe_hops
self . rloc_probe_hops = to_hops + "/" + IiIiiii
IIiIiIi = self . recent_rloc_probe_hops
self . recent_rloc_probe_hops = [ IiIiIi ] + IIiIiIi [ 0 : - 1 ]
if 80 - 80: iIii1I11I1II1 + o0oOOo0O0Ooo + iIii1I11I1II1
if 63 - 63: OoOoOO00 - o0oOOo0O0Ooo % II111iiii - Ii1I
def store_rloc_probe_latencies ( self , json_telemetry ) :
o00OO0OO0O = lisp_decode_telemetry ( json_telemetry )
if 19 - 19: I1IiiI - iII111i - oO0o / II111iiii
o0Oo0OoOOOo0 = round ( float ( o00OO0OO0O [ "etr-in" ] ) - float ( o00OO0OO0O [ "itr-out" ] ) , 3 )
iiiI11i = round ( float ( o00OO0OO0O [ "itr-in" ] ) - float ( o00OO0OO0O [ "etr-out" ] ) , 3 )
if 78 - 78: OOooOOo + ooOoO0o - I1ii11iIi11i . OoOoOO00 + iIii1I11I1II1
IiIiIi = self . rloc_probe_latency
self . rloc_probe_latency = str ( o0Oo0OoOOOo0 ) + "/" + str ( iiiI11i )
IIiIiIi = self . recent_rloc_probe_latencies
self . recent_rloc_probe_latencies = [ IiIiIi ] + IIiIiIi [ 0 : - 1 ]
if 52 - 52: o0oOOo0O0Ooo % I1Ii111 % OoO0O00 / iIii1I11I1II1
if 81 - 81: I1IiiI / ooOoO0o * IiII * Oo0Ooo - oO0o . OOooOOo
def print_rloc_probe_latency ( self ) :
return ( self . rloc_probe_latency )
if 48 - 48: I1Ii111 * iII111i
if 93 - 93: I11i % iIii1I11I1II1 + Ii1I - I1IiiI + OoooooooOO . IiII
def print_recent_rloc_probe_latencies ( self ) :
Oo0I1i = str ( self . recent_rloc_probe_latencies )
return ( Oo0I1i )
if 86 - 86: i1IIi . oO0o % OOooOOo
if 99 - 99: oO0o / I1Ii111 * oO0o * I11i
def process_rloc_probe_reply ( self , ts , nonce , eid , group , hc , ttl , jt ) :
I1Ii1i111I = self
while ( True ) :
if ( I1Ii1i111I . last_rloc_probe_nonce == nonce ) : break
I1Ii1i111I = I1Ii1i111I . next_rloc
if ( I1Ii1i111I == None ) :
lprint ( " No matching nonce state found for nonce 0x{}" . format ( lisp_hex_string ( nonce ) ) )
if 38 - 38: o0oOOo0O0Ooo + OoOoOO00
return
if 24 - 24: Ii1I - OOooOOo - o0oOOo0O0Ooo - I1Ii111 / OoooooooOO
if 17 - 17: OoO0O00
if 79 - 79: Ii1I - II111iiii
if 57 - 57: II111iiii / OoooooooOO
if 4 - 4: I11i * OoOoOO00
if 18 - 18: iIii1I11I1II1 % OOooOOo - I1ii11iIi11i * i1IIi + Oo0Ooo
I1Ii1i111I . last_rloc_probe_reply = ts
I1Ii1i111I . compute_rloc_probe_rtt ( )
oOoOII11IIi1Ii1i = I1Ii1i111I . print_state_change ( "up" )
if ( I1Ii1i111I . state != LISP_RLOC_UP_STATE ) :
lisp_update_rtr_updown ( I1Ii1i111I . rloc , True )
I1Ii1i111I . state = LISP_RLOC_UP_STATE
I1Ii1i111I . last_state_change = lisp_get_timestamp ( )
o0ooo0oOO0o = lisp_map_cache . lookup_cache ( eid , True )
if ( o0ooo0oOO0o ) : lisp_write_ipc_map_cache ( True , o0ooo0oOO0o )
if 88 - 88: iII111i - I1ii11iIi11i / OoOoOO00 + O0 % oO0o
if 22 - 22: o0oOOo0O0Ooo * O0 % Oo0Ooo
if 52 - 52: I1IiiI % I1Ii111 - i1IIi . o0oOOo0O0Ooo % I1ii11iIi11i
if 34 - 34: o0oOOo0O0Ooo / OoOoOO00
if 74 - 74: IiII + i1IIi . II111iiii
I1Ii1i111I . store_rloc_probe_hops ( hc , ttl )
if 1 - 1: Ii1I - o0oOOo0O0Ooo / i11iIiiIii
if 24 - 24: O0
if 59 - 59: OoO0O00 % iII111i + oO0o * II111iiii . OOooOOo
if 26 - 26: OOooOOo % OoooooooOO . Ii1I / iIii1I11I1II1 * I1IiiI
if ( jt ) : I1Ii1i111I . store_rloc_probe_latencies ( jt )
if 85 - 85: IiII / Ii1I - I1ii11iIi11i * OOooOOo
ooIiIII11IIIi1 = bold ( "RLOC-probe reply" , False )
O0O0 = I1Ii1i111I . rloc . print_address_no_iid ( )
ii1111Ii = bold ( str ( I1Ii1i111I . print_rloc_probe_rtt ( ) ) , False )
iIIiiIi = ":{}" . format ( self . translated_port ) if self . translated_port != 0 else ""
if 8 - 8: oO0o - iIii1I11I1II1 * iII111i
IIi1I111I = ""
if ( I1Ii1i111I . rloc_next_hop != None ) :
IiI11I111 , Ii1i1Ii1Ii1i = I1Ii1i111I . rloc_next_hop
IIi1I111I = ", nh {}({})" . format ( Ii1i1Ii1Ii1i , IiI11I111 )
if 86 - 86: OoOoOO00 + I1ii11iIi11i - Ii1I . I1Ii111 + I11i
if 6 - 6: O0 . OoooooooOO - I11i
o0oOO0OOoO = bold ( I1Ii1i111I . print_rloc_probe_latency ( ) , False )
o0oOO0OOoO = ", latency {}" . format ( o0oOO0OOoO ) if jt else ""
if 3 - 3: II111iiii . OoOoOO00 / i1IIi . I1ii11iIi11i - Ii1I
oO0ooOOO = green ( lisp_print_eid_tuple ( eid , group ) , False )
if 20 - 20: I11i + IiII
lprint ( ( " Received {} from {}{} for {}, {}, rtt {}{}, " + "to-ttl/from-ttl {}{}" ) . format ( ooIiIII11IIIi1 , red ( O0O0 , False ) , iIIiiIi , oO0ooOOO ,
# OOooOOo + II111iiii . I11i
oOoOII11IIi1Ii1i , ii1111Ii , IIi1I111I , str ( hc ) + "/" + str ( ttl ) , o0oOO0OOoO ) )
if 7 - 7: IiII . iIii1I11I1II1 % o0oOOo0O0Ooo + iII111i . OOooOOo + I1IiiI
if ( I1Ii1i111I . rloc_next_hop == None ) : return
if 64 - 64: iII111i - ooOoO0o % OoO0O00
if 51 - 51: I1Ii111 . ooOoO0o
if 100 - 100: o0oOOo0O0Ooo % iII111i
if 44 - 44: IiII * OoOoOO00 - OoO0O00 - OoooooooOO - I1ii11iIi11i - II111iiii
I1Ii1i111I = None
I1iiI1iI1 = None
while ( True ) :
I1Ii1i111I = self if I1Ii1i111I == None else I1Ii1i111I . next_rloc
if ( I1Ii1i111I == None ) : break
if ( I1Ii1i111I . up_state ( ) == False ) : continue
if ( I1Ii1i111I . rloc_probe_rtt == - 1 ) : continue
if 27 - 27: I11i % Ii1I / iII111i . OoOoOO00
if ( I1iiI1iI1 == None ) : I1iiI1iI1 = I1Ii1i111I
if ( I1Ii1i111I . rloc_probe_rtt < I1iiI1iI1 . rloc_probe_rtt ) : I1iiI1iI1 = I1Ii1i111I
if 88 - 88: iII111i - i11iIiiIii * I1Ii111 * i11iIiiIii - O0
if 8 - 8: oO0o + O0
if ( I1iiI1iI1 != None ) :
IiI11I111 , Ii1i1Ii1Ii1i = I1iiI1iI1 . rloc_next_hop
IIi1I111I = bold ( "nh {}({})" . format ( Ii1i1Ii1Ii1i , IiI11I111 ) , False )
lprint ( " Install host-route via best {}" . format ( IIi1I111I ) )
lisp_install_host_route ( O0O0 , None , False )
lisp_install_host_route ( O0O0 , Ii1i1Ii1Ii1i , True )
if 52 - 52: I11i * OOooOOo - OoOoOO00 % iIii1I11I1II1 . II111iiii
if 1 - 1: OOooOOo / I1IiiI / Ii1I * iII111i
if 14 - 14: ooOoO0o . O0 * OOooOOo
def add_to_rloc_probe_list ( self , eid , group ) :
O0O0 = self . rloc . print_address_no_iid ( )
I1I = self . translated_port
if ( I1I != 0 ) : O0O0 += ":" + str ( I1I )
if 34 - 34: I1ii11iIi11i . OOooOOo + OoO0O00 % o0oOOo0O0Ooo * O0 * I1IiiI
if ( O0O0 not in lisp_rloc_probe_list ) :
lisp_rloc_probe_list [ O0O0 ] = [ ]
if 9 - 9: IiII / i11iIiiIii . o0oOOo0O0Ooo - OOooOOo % I1Ii111
if 65 - 65: I1IiiI % OoOoOO00
if ( group . is_null ( ) ) : group . instance_id = 0
for O00o00o00OO0 , oO0ooOOO , Oo in lisp_rloc_probe_list [ O0O0 ] :
if ( oO0ooOOO . is_exact_match ( eid ) and Oo . is_exact_match ( group ) ) :
if ( O00o00o00OO0 == self ) :
if ( lisp_rloc_probe_list [ O0O0 ] == [ ] ) :
lisp_rloc_probe_list . pop ( O0O0 )
if 45 - 45: o0oOOo0O0Ooo
return
if 33 - 33: ooOoO0o % O0 % I1ii11iIi11i % o0oOOo0O0Ooo + i11iIiiIii . I1Ii111
lisp_rloc_probe_list [ O0O0 ] . remove ( [ O00o00o00OO0 , oO0ooOOO , Oo ] )
break
if 21 - 21: I1Ii111 * I1ii11iIi11i * ooOoO0o
if 73 - 73: OoOoOO00 * O0
lisp_rloc_probe_list [ O0O0 ] . append ( [ self , eid , group ] )
if 1 - 1: OOooOOo * OoooooooOO
if 46 - 46: I1ii11iIi11i * I1Ii111 / OOooOOo / I1IiiI
if 7 - 7: OOooOOo / OoOoOO00
if 93 - 93: iIii1I11I1II1 * Ii1I - iII111i
if 94 - 94: iIii1I11I1II1 * iIii1I11I1II1 * I11i % i11iIiiIii
I1Ii1i111I = lisp_rloc_probe_list [ O0O0 ] [ 0 ] [ 0 ]
if ( I1Ii1i111I . state == LISP_RLOC_UNREACH_STATE ) :
self . state = LISP_RLOC_UNREACH_STATE
self . last_state_change = lisp_get_timestamp ( )
if 38 - 38: I1IiiI % I1ii11iIi11i * I1IiiI + OOooOOo - OoOoOO00
if 78 - 78: OOooOOo + I1Ii111
if 41 - 41: I11i + Oo0Ooo . Oo0Ooo / iII111i . OoOoOO00
def delete_from_rloc_probe_list ( self , eid , group ) :
O0O0 = self . rloc . print_address_no_iid ( )
I1I = self . translated_port
if ( I1I != 0 ) : O0O0 += ":" + str ( I1I )
if ( O0O0 not in lisp_rloc_probe_list ) : return
if 1 - 1: ooOoO0o + iII111i % i11iIiiIii / OoOoOO00
o000oO0O0ooo = [ ]
for oo0O00OOOOO in lisp_rloc_probe_list [ O0O0 ] :
if ( oo0O00OOOOO [ 0 ] != self ) : continue
if ( oo0O00OOOOO [ 1 ] . is_exact_match ( eid ) == False ) : continue
if ( oo0O00OOOOO [ 2 ] . is_exact_match ( group ) == False ) : continue
o000oO0O0ooo = oo0O00OOOOO
break
if 57 - 57: iII111i
if ( o000oO0O0ooo == [ ] ) : return
if 18 - 18: II111iiii % i11iIiiIii + I11i - OOooOOo
try :
lisp_rloc_probe_list [ O0O0 ] . remove ( o000oO0O0ooo )
if ( lisp_rloc_probe_list [ O0O0 ] == [ ] ) :
lisp_rloc_probe_list . pop ( O0O0 )
if 100 - 100: o0oOOo0O0Ooo / Ii1I - iIii1I11I1II1 / oO0o
except :
return
if 68 - 68: I11i / II111iiii * oO0o . II111iiii * OOooOOo
if 78 - 78: I11i * OoO0O00 / II111iiii
if 86 - 86: I1Ii111 % II111iiii
def print_rloc_probe_state ( self , trailing_linefeed ) :
oOo0OOoooO = ""
I1Ii1i111I = self
while ( True ) :
oOO0IIiiIi11iii1 = I1Ii1i111I . last_rloc_probe
if ( oOO0IIiiIi11iii1 == None ) : oOO0IIiiIi11iii1 = 0
II1I1I = I1Ii1i111I . last_rloc_probe_reply
if ( II1I1I == None ) : II1I1I = 0
ii1111Ii = I1Ii1i111I . print_rloc_probe_rtt ( )
I111 = space ( 4 )
if 33 - 33: iIii1I11I1II1 . I1ii11iIi11i - O0 - IiII
if ( I1Ii1i111I . rloc_next_hop == None ) :
oOo0OOoooO += "RLOC-Probing:\n"
else :
IiI11I111 , Ii1i1Ii1Ii1i = I1Ii1i111I . rloc_next_hop
oOo0OOoooO += "RLOC-Probing for nh {}({}):\n" . format ( Ii1i1Ii1Ii1i , IiI11I111 )
if 51 - 51: OoooooooOO . I1IiiI . i11iIiiIii
if 76 - 76: OoOoOO00 + iII111i . ooOoO0o + OoO0O00 + I1IiiI / IiII
oOo0OOoooO += ( "{}RLOC-probe request sent: {}\n{}RLOC-probe reply " + "received: {}, rtt {}" ) . format ( I111 , lisp_print_elapsed ( oOO0IIiiIi11iii1 ) ,
# ooOoO0o % I1IiiI . o0oOOo0O0Ooo . I1IiiI % II111iiii
I111 , lisp_print_elapsed ( II1I1I ) , ii1111Ii )
if 7 - 7: O0 + IiII
if ( trailing_linefeed ) : oOo0OOoooO += "\n"
if 79 - 79: iIii1I11I1II1 * oO0o . iIii1I11I1II1 * O0
I1Ii1i111I = I1Ii1i111I . next_rloc
if ( I1Ii1i111I == None ) : break
oOo0OOoooO += "\n"
if 13 - 13: I1ii11iIi11i . IiII - I11i
return ( oOo0OOoooO )
if 81 - 81: i11iIiiIii
if 7 - 7: IiII - OoOoOO00 * i1IIi
def get_encap_keys ( self ) :
I1I = "4341" if self . translated_port == 0 else str ( self . translated_port )
if 14 - 14: I1ii11iIi11i . OoO0O00
O0O0 = self . rloc . print_address_no_iid ( ) + ":" + I1I
if 26 - 26: iII111i / ooOoO0o / Oo0Ooo / Oo0Ooo . I1ii11iIi11i * OOooOOo
try :
iI1iiiiiii = lisp_crypto_keys_by_rloc_encap [ O0O0 ]
if ( iI1iiiiiii [ 1 ] ) : return ( iI1iiiiiii [ 1 ] . encrypt_key , iI1iiiiiii [ 1 ] . icv_key )
return ( None , None )
except :
return ( None , None )
if 25 - 25: IiII % I1IiiI / O0 % OOooOOo - OoooooooOO
if 29 - 29: O0 + iII111i
if 4 - 4: I11i * I11i - Ii1I * oO0o . I1ii11iIi11i % o0oOOo0O0Ooo
def rloc_recent_rekey ( self ) :
I1I = "4341" if self . translated_port == 0 else str ( self . translated_port )
if 33 - 33: Ii1I * i11iIiiIii / O0 . Oo0Ooo + i1IIi . OoOoOO00
O0O0 = self . rloc . print_address_no_iid ( ) + ":" + I1I
if 76 - 76: OoooooooOO - O0
try :
Ooo00o000o = lisp_crypto_keys_by_rloc_encap [ O0O0 ] [ 1 ]
if ( Ooo00o000o == None ) : return ( False )
if ( Ooo00o000o . last_rekey == None ) : return ( True )
return ( time . time ( ) - Ooo00o000o . last_rekey < 1 )
except :
return ( False )
if 17 - 17: Oo0Ooo % I1Ii111 . oO0o - O0
if 32 - 32: O0 % O0
if 66 - 66: iII111i / i1IIi - Oo0Ooo . Ii1I
if 65 - 65: I1ii11iIi11i % ooOoO0o - OoOoOO00 + ooOoO0o + Oo0Ooo
class lisp_mapping ( object ) :
def __init__ ( self , eid , group , rloc_set ) :
self . eid = eid
if ( eid == "" ) : self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = group
if ( group == "" ) : self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . rloc_set = rloc_set
self . best_rloc_set = [ ]
self . build_best_rloc_set ( )
self . uptime = lisp_get_timestamp ( )
self . action = LISP_NO_ACTION
self . expires = None
self . map_cache_ttl = None
self . register_ttl = LISP_REGISTER_TTL
self . last_refresh_time = self . uptime
self . source_cache = None
self . map_replies_sent = 0
self . mapping_source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . use_mr_name = "all"
self . use_ms_name = "all"
self . stats = lisp_stats ( )
self . dynamic_eids = None
self . checkpoint_entry = False
self . secondary_iid = None
self . signature_eid = False
self . gleaned = False
self . recent_sources = { }
self . last_multicast_map_request = 0
self . subscribed_eid = None
self . subscribed_group = None
if 95 - 95: I1Ii111 * i11iIiiIii - I1IiiI - OoOoOO00 . ooOoO0o
if 34 - 34: OoooooooOO % I1ii11iIi11i + OoooooooOO % i11iIiiIii / IiII - ooOoO0o
def print_mapping ( self , eid_indent , rloc_indent ) :
Oo0OO0000oooo = lisp_print_elapsed ( self . uptime )
o0o0Oo0o0oOo = "" if self . group . is_null ( ) else ", group {}" . format ( self . group . print_prefix ( ) )
if 74 - 74: iIii1I11I1II1 % II111iiii + IiII
lprint ( "{}eid {}{}, uptime {}, {} rlocs:" . format ( eid_indent ,
green ( self . eid . print_prefix ( ) , False ) , o0o0Oo0o0oOo , Oo0OO0000oooo ,
len ( self . rloc_set ) ) )
for I1Ii1i111I in self . rloc_set : I1Ii1i111I . print_rloc ( rloc_indent )
if 71 - 71: I1IiiI / O0 * i1IIi . i1IIi + Oo0Ooo
if 32 - 32: i1IIi * I1Ii111 % I1IiiI / IiII . I1Ii111
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 11 - 11: OOooOOo
if 25 - 25: i1IIi
def print_ttl ( self ) :
IiIi1iIIiII1i = self . map_cache_ttl
if ( IiIi1iIIiII1i == None ) : return ( "forever" )
if 99 - 99: OOooOOo + OoooooooOO . I1Ii111 * Oo0Ooo % oO0o
if ( IiIi1iIIiII1i >= 3600 ) :
if ( ( IiIi1iIIiII1i % 3600 ) == 0 ) :
IiIi1iIIiII1i = str ( old_div ( IiIi1iIIiII1i , 3600 ) ) + " hours"
else :
IiIi1iIIiII1i = str ( IiIi1iIIiII1i * 60 ) + " mins"
if 75 - 75: iII111i
elif ( IiIi1iIIiII1i >= 60 ) :
if ( ( IiIi1iIIiII1i % 60 ) == 0 ) :
IiIi1iIIiII1i = str ( old_div ( IiIi1iIIiII1i , 60 ) ) + " mins"
else :
IiIi1iIIiII1i = str ( IiIi1iIIiII1i ) + " secs"
if 8 - 8: I1ii11iIi11i . I11i / I1ii11iIi11i - i1IIi
else :
IiIi1iIIiII1i = str ( IiIi1iIIiII1i ) + " secs"
if 22 - 22: OOooOOo
return ( IiIi1iIIiII1i )
if 7 - 7: O0 - I1ii11iIi11i - OoO0O00 * I1Ii111
if 17 - 17: o0oOOo0O0Ooo % OoO0O00 - I11i * o0oOOo0O0Ooo - i1IIi / I1IiiI
def refresh ( self ) :
if ( self . group . is_null ( ) ) : return ( self . refresh_unicast ( ) )
return ( self . refresh_multicast ( ) )
if 100 - 100: OoO0O00 * i1IIi * o0oOOo0O0Ooo * Oo0Ooo - o0oOOo0O0Ooo
if 100 - 100: iII111i - i11iIiiIii + OoO0O00
def refresh_unicast ( self ) :
return ( self . is_active ( ) and self . has_ttl_elapsed ( ) and
self . gleaned == False )
if 50 - 50: II111iiii
if 42 - 42: OOooOOo * I1Ii111
def refresh_multicast ( self ) :
if 53 - 53: II111iiii % OOooOOo / I1ii11iIi11i * OoOoOO00 % I1ii11iIi11i * iII111i
if 91 - 91: iII111i . OoooooooOO
if 90 - 90: i11iIiiIii - I1IiiI
if 39 - 39: iII111i % OoooooooOO % Ii1I % I1IiiI
if 63 - 63: OoO0O00 - I1Ii111 - II111iiii
i1i111Iiiiiii = int ( ( time . time ( ) - self . uptime ) % self . map_cache_ttl )
OoOooO00 = ( i1i111Iiiiiii in [ 0 , 1 , 2 ] )
if ( OoOooO00 == False ) : return ( False )
if 66 - 66: i1IIi + I1IiiI
if 45 - 45: I1Ii111 . iII111i + OoO0O00 - O0
if 71 - 71: Oo0Ooo + OOooOOo
if 94 - 94: OOooOOo
Ooooooo = ( ( time . time ( ) - self . last_multicast_map_request ) <= 2 )
if ( Ooooooo ) : return ( False )
if 66 - 66: iII111i + i11iIiiIii - o0oOOo0O0Ooo * OoooooooOO * IiII
self . last_multicast_map_request = lisp_get_timestamp ( )
return ( True )
if 59 - 59: I1ii11iIi11i + i1IIi / I11i . iII111i - II111iiii
if 66 - 66: Ii1I + OoOoOO00 - I11i / o0oOOo0O0Ooo + iIii1I11I1II1
def has_ttl_elapsed ( self ) :
if ( self . map_cache_ttl == None ) : return ( False )
i1i111Iiiiiii = time . time ( ) - self . last_refresh_time
if ( i1i111Iiiiiii >= self . map_cache_ttl ) : return ( True )
if 66 - 66: OOooOOo - I1Ii111 - OoOoOO00 - i1IIi * Ii1I
if 23 - 23: IiII - OoOoOO00 . OoO0O00
if 81 - 81: I1Ii111 / I1ii11iIi11i
if 69 - 69: I1IiiI
if 79 - 79: ooOoO0o
o0o0oo000Oo = self . map_cache_ttl - ( old_div ( self . map_cache_ttl , 10 ) )
if ( i1i111Iiiiiii >= o0o0oo000Oo ) : return ( True )
return ( False )
if 95 - 95: ooOoO0o . I11i + iIii1I11I1II1 . iII111i * i11iIiiIii
if 34 - 34: IiII / I1Ii111 + O0 / OoO0O00
def is_active ( self ) :
if ( self . stats . last_increment == None ) : return ( False )
i1i111Iiiiiii = time . time ( ) - self . stats . last_increment
return ( i1i111Iiiiiii <= 60 )
if 96 - 96: I1Ii111 % o0oOOo0O0Ooo + OoO0O00 - ooOoO0o
if 38 - 38: OOooOOo . O0
def match_eid_tuple ( self , db ) :
if ( self . eid . is_exact_match ( db . eid ) == False ) : return ( False )
if ( self . group . is_exact_match ( db . group ) == False ) : return ( False )
return ( True )
if 42 - 42: OoooooooOO / I1IiiI / o0oOOo0O0Ooo . I1ii11iIi11i . OoO0O00 % II111iiii
if 55 - 55: o0oOOo0O0Ooo
def sort_rloc_set ( self ) :
self . rloc_set . sort ( key = operator . attrgetter ( 'rloc.address' ) )
if 87 - 87: ooOoO0o - IiII % Ii1I
if 76 - 76: I11i - iIii1I11I1II1 - i1IIi + i1IIi
def delete_rlocs_from_rloc_probe_list ( self ) :
for I1Ii1i111I in self . best_rloc_set :
I1Ii1i111I . delete_from_rloc_probe_list ( self . eid , self . group )
if 60 - 60: I11i + OOooOOo - o0oOOo0O0Ooo
if 64 - 64: II111iiii / iII111i * OoOoOO00 / OOooOOo / Ii1I
if 19 - 19: OoOoOO00 % I1Ii111
def build_best_rloc_set ( self ) :
IIIiiIiiiI1 = self . best_rloc_set
self . best_rloc_set = [ ]
if ( self . rloc_set == None ) : return
if 77 - 77: I1ii11iIi11i + OoOoOO00 - i11iIiiIii % II111iiii % I11i
if 92 - 92: O0 * OoooooooOO + I1ii11iIi11i / IiII
if 97 - 97: o0oOOo0O0Ooo . Ii1I + I1Ii111
if 72 - 72: i11iIiiIii . iII111i . Ii1I * I1ii11iIi11i
II1iI111 = 256
for I1Ii1i111I in self . rloc_set :
if ( I1Ii1i111I . up_state ( ) ) : II1iI111 = min ( I1Ii1i111I . priority , II1iI111 )
if 69 - 69: I1ii11iIi11i % I1Ii111 / OoooooooOO % oO0o
if 4 - 4: OoOoOO00 * i11iIiiIii - OoOoOO00 * o0oOOo0O0Ooo % I1ii11iIi11i
if 19 - 19: OOooOOo
if 73 - 73: ooOoO0o / O0 / I1Ii111 . OoooooooOO
if 88 - 88: OoooooooOO - oO0o
if 80 - 80: ooOoO0o
if 38 - 38: IiII + OoO0O00 * I11i * iIii1I11I1II1 * oO0o
if 74 - 74: I1IiiI
if 39 - 39: iII111i * IiII / iII111i * IiII % I1ii11iIi11i
if 27 - 27: iIii1I11I1II1 . ooOoO0o
for I1Ii1i111I in self . rloc_set :
if ( I1Ii1i111I . priority <= II1iI111 ) :
if ( I1Ii1i111I . unreach_state ( ) and I1Ii1i111I . last_rloc_probe == None ) :
I1Ii1i111I . last_rloc_probe = lisp_get_timestamp ( )
if 74 - 74: i1IIi % OoOoOO00
self . best_rloc_set . append ( I1Ii1i111I )
if 98 - 98: IiII * OOooOOo / O0 - I1Ii111 . I1Ii111 + OOooOOo
if 61 - 61: iII111i * Ii1I % Ii1I + I1IiiI
if 23 - 23: oO0o + I1Ii111 / OoooooooOO / O0 + IiII
if 80 - 80: i11iIiiIii - OoooooooOO + II111iiii / i1IIi - oO0o
if 100 - 100: Ii1I
if 73 - 73: IiII - O0
if 54 - 54: OOooOOo
if 28 - 28: i1IIi - Oo0Ooo * OoO0O00 + OoooooooOO - Ii1I * i11iIiiIii
for I1Ii1i111I in IIIiiIiiiI1 :
if ( I1Ii1i111I . priority < II1iI111 ) : continue
I1Ii1i111I . delete_from_rloc_probe_list ( self . eid , self . group )
if 71 - 71: iII111i - OOooOOo / iIii1I11I1II1 % i11iIiiIii
for I1Ii1i111I in self . best_rloc_set :
if ( I1Ii1i111I . rloc . is_null ( ) ) : continue
I1Ii1i111I . add_to_rloc_probe_list ( self . eid , self . group )
if 39 - 39: o0oOOo0O0Ooo
if 32 - 32: iIii1I11I1II1 . II111iiii / IiII % O0 / iII111i
if 97 - 97: iIii1I11I1II1
def select_rloc ( self , lisp_packet , ipc_socket ) :
Oo00oo = lisp_packet . packet
i11Iiiii11 = lisp_packet . inner_version
i1 = len ( self . best_rloc_set )
if ( i1 == 0 ) :
self . stats . increment ( len ( Oo00oo ) )
return ( [ None , None , None , self . action , None , None ] )
if 76 - 76: I1ii11iIi11i
if 64 - 64: I11i
Ii1Iiiii = 4 if lisp_load_split_pings else 0
oOOo0O0Oo = lisp_packet . hash_ports ( )
if ( i11Iiiii11 == 4 ) :
for iIi1iIIIiIiI in range ( 8 + Ii1Iiiii ) :
oOOo0O0Oo = oOOo0O0Oo ^ struct . unpack ( "B" , Oo00oo [ iIi1iIIIiIiI + 12 : iIi1iIIIiIiI + 13 ] ) [ 0 ]
if 94 - 94: O0 + O0 % I1ii11iIi11i % i1IIi
elif ( i11Iiiii11 == 6 ) :
for iIi1iIIIiIiI in range ( 0 , 32 + Ii1Iiiii , 4 ) :
oOOo0O0Oo = oOOo0O0Oo ^ struct . unpack ( "I" , Oo00oo [ iIi1iIIIiIiI + 8 : iIi1iIIIiIiI + 12 ] ) [ 0 ]
if 15 - 15: I1IiiI
oOOo0O0Oo = ( oOOo0O0Oo >> 16 ) + ( oOOo0O0Oo & 0xffff )
oOOo0O0Oo = ( oOOo0O0Oo >> 8 ) + ( oOOo0O0Oo & 0xff )
else :
for iIi1iIIIiIiI in range ( 0 , 12 + Ii1Iiiii , 4 ) :
oOOo0O0Oo = oOOo0O0Oo ^ struct . unpack ( "I" , Oo00oo [ iIi1iIIIiIiI : iIi1iIIIiIiI + 4 ] ) [ 0 ]
if 48 - 48: Ii1I * IiII % O0 - II111iiii
if 66 - 66: iIii1I11I1II1 / OOooOOo
if 65 - 65: IiII . oO0o + O0 - i11iIiiIii + iIii1I11I1II1
if ( lisp_data_plane_logging ) :
OoO0ooOOo0o0 = [ ]
for O00o00o00OO0 in self . best_rloc_set :
if ( O00o00o00OO0 . rloc . is_null ( ) ) : continue
OoO0ooOOo0o0 . append ( [ O00o00o00OO0 . rloc . print_address_no_iid ( ) , O00o00o00OO0 . print_state ( ) ] )
if 94 - 94: I1Ii111 % iIii1I11I1II1 - II111iiii . ooOoO0o + i11iIiiIii - i11iIiiIii
dprint ( "Packet hash {}, index {}, best-rloc-list: {}" . format ( hex ( oOOo0O0Oo ) , oOOo0O0Oo % i1 , red ( str ( OoO0ooOOo0o0 ) , False ) ) )
if 55 - 55: OoooooooOO % iIii1I11I1II1 % I1ii11iIi11i % i1IIi
if 46 - 46: I11i - ooOoO0o . I1IiiI
if 36 - 36: I11i + OoO0O00 * O0 * OoOoOO00 * iII111i
if 90 - 90: i11iIiiIii / i1IIi
if 35 - 35: Ii1I . I11i / oO0o / OoOoOO00
if 5 - 5: I1ii11iIi11i . o0oOOo0O0Ooo * iII111i * I1ii11iIi11i % I1Ii111
I1Ii1i111I = self . best_rloc_set [ oOOo0O0Oo % i1 ]
if 83 - 83: iIii1I11I1II1 * o0oOOo0O0Ooo % i11iIiiIii + OoO0O00 . O0
if 87 - 87: II111iiii - iIii1I11I1II1 % I11i % I1IiiI . o0oOOo0O0Ooo
if 52 - 52: i11iIiiIii . oO0o / OoooooooOO - OoO0O00
if 7 - 7: I1IiiI * I1IiiI % OOooOOo % iIii1I11I1II1 * OoO0O00 . o0oOOo0O0Ooo
if 32 - 32: ooOoO0o / i1IIi
oO0 = lisp_get_echo_nonce ( I1Ii1i111I . rloc , None )
if ( oO0 ) :
oO0 . change_state ( I1Ii1i111I )
if ( I1Ii1i111I . no_echoed_nonce_state ( ) ) :
oO0 . request_nonce_sent = None
if 55 - 55: oO0o . OoOoOO00 + OoooooooOO - ooOoO0o . OoooooooOO
if 77 - 77: I1IiiI
if 16 - 16: I1IiiI + ooOoO0o - O0 / o0oOOo0O0Ooo
if 36 - 36: Oo0Ooo - OoOoOO00 - II111iiii
if 25 - 25: i11iIiiIii + II111iiii * OOooOOo % OOooOOo
if 87 - 87: I11i % Ii1I % Oo0Ooo . II111iiii / oO0o
if ( I1Ii1i111I . up_state ( ) == False ) :
IiI1I11iIiIIII = oOOo0O0Oo % i1
OOOooo0OooOoO = ( IiI1I11iIiIIII + 1 ) % i1
while ( OOOooo0OooOoO != IiI1I11iIiIIII ) :
I1Ii1i111I = self . best_rloc_set [ OOOooo0OooOoO ]
if ( I1Ii1i111I . up_state ( ) ) : break
OOOooo0OooOoO = ( OOOooo0OooOoO + 1 ) % i1
if 88 - 88: ooOoO0o
if ( OOOooo0OooOoO == IiI1I11iIiIIII ) :
self . build_best_rloc_set ( )
return ( [ None , None , None , None , None , None ] )
if 91 - 91: OoO0O00 % IiII / I1IiiI - i11iIiiIii - IiII * ooOoO0o
if 54 - 54: O0 % o0oOOo0O0Ooo + o0oOOo0O0Ooo % i11iIiiIii * I11i
if 34 - 34: I1IiiI % iIii1I11I1II1 . I1ii11iIi11i * Oo0Ooo * iIii1I11I1II1 / O0
if 98 - 98: iII111i % IiII + OoO0O00
if 23 - 23: OOooOOo
if 83 - 83: I1ii11iIi11i / O0 * II111iiii + IiII + Oo0Ooo
I1Ii1i111I . stats . increment ( len ( Oo00oo ) )
if 99 - 99: II111iiii + O0
if 94 - 94: ooOoO0o * ooOoO0o + o0oOOo0O0Ooo . iII111i % iIii1I11I1II1 + Ii1I
if 88 - 88: Oo0Ooo . iII111i
if 89 - 89: OOooOOo + I1Ii111 % i11iIiiIii + Oo0Ooo / Oo0Ooo + OoO0O00
if ( I1Ii1i111I . rle_name and I1Ii1i111I . rle == None ) :
if ( I1Ii1i111I . rle_name in lisp_rle_list ) :
I1Ii1i111I . rle = lisp_rle_list [ I1Ii1i111I . rle_name ]
if 9 - 9: OoOoOO00 % i1IIi + IiII
if 19 - 19: I1Ii111 - II111iiii / I1Ii111 + I1IiiI - OoooooooOO + o0oOOo0O0Ooo
if ( I1Ii1i111I . rle ) : return ( [ None , None , None , None , I1Ii1i111I . rle , None ] )
if 100 - 100: OoO0O00 / OoOoOO00 / OOooOOo / OoO0O00
if 95 - 95: ooOoO0o
if 95 - 95: Ii1I + i1IIi . I1IiiI % I1Ii111 / Ii1I * O0
if 68 - 68: I1Ii111 - IiII - oO0o - Oo0Ooo - o0oOOo0O0Ooo
if ( I1Ii1i111I . elp and I1Ii1i111I . elp . use_elp_node ) :
return ( [ I1Ii1i111I . elp . use_elp_node . address , None , None , None , None ,
None ] )
if 32 - 32: OoOoOO00 % i11iIiiIii
if 53 - 53: I1Ii111 * Ii1I / IiII . i1IIi * II111iiii / o0oOOo0O0Ooo
if 44 - 44: I1Ii111 + ooOoO0o
if 15 - 15: I11i + OoO0O00 + OoOoOO00
if 100 - 100: I1Ii111
oo000OO = None if ( I1Ii1i111I . rloc . is_null ( ) ) else I1Ii1i111I . rloc
I1I = I1Ii1i111I . translated_port
oo0oOooo0O = self . action if ( oo000OO == None ) else None
if 72 - 72: OoOoOO00 * Oo0Ooo + iII111i
if 99 - 99: II111iiii . OoooooooOO * iIii1I11I1II1
if 72 - 72: OoooooooOO . I1ii11iIi11i * I1Ii111 / OoooooooOO % OOooOOo
if 60 - 60: OoO0O00
if 54 - 54: I1IiiI + O0 - I1Ii111 - oO0o + O0 - I1ii11iIi11i
oOooo0oOOOO = None
if ( oO0 and oO0 . request_nonce_timeout ( ) == False ) :
oOooo0oOOOO = oO0 . get_request_or_echo_nonce ( ipc_socket , oo000OO )
if 21 - 21: ooOoO0o . i1IIi / Oo0Ooo . OoO0O00
if 49 - 49: oO0o % i11iIiiIii * Ii1I
if 9 - 9: Oo0Ooo - OoO0O00 + ooOoO0o / o0oOOo0O0Ooo
if 61 - 61: O0 - i11iIiiIii * o0oOOo0O0Ooo
if 92 - 92: Oo0Ooo + OOooOOo - i11iIiiIii
return ( [ oo000OO , I1I , oOooo0oOOOO , oo0oOooo0O , None , I1Ii1i111I ] )
if 26 - 26: O0 % Oo0Ooo + ooOoO0o - Ii1I . Oo0Ooo
if 33 - 33: I1Ii111 / iII111i . I1Ii111 % II111iiii
def do_rloc_sets_match ( self , rloc_address_set ) :
if ( len ( self . rloc_set ) != len ( rloc_address_set ) ) : return ( False )
if 52 - 52: I1ii11iIi11i
if 1 - 1: II111iiii + I1ii11iIi11i * OoOoOO00 % ooOoO0o - iII111i % OoooooooOO
if 77 - 77: iII111i + o0oOOo0O0Ooo
if 60 - 60: I1ii11iIi11i
if 23 - 23: iII111i % I1IiiI % I1Ii111 * oO0o * I1IiiI
for ii11Ii in self . rloc_set :
for I1Ii1i111I in rloc_address_set :
if ( I1Ii1i111I . is_exact_match ( ii11Ii . rloc ) == False ) : continue
I1Ii1i111I = None
break
if 74 - 74: O0 / I11i . Oo0Ooo / I11i % OoO0O00 % o0oOOo0O0Ooo
if ( I1Ii1i111I == rloc_address_set [ - 1 ] ) : return ( False )
if 83 - 83: OoO0O00 - i11iIiiIii + iIii1I11I1II1
return ( True )
if 52 - 52: OoooooooOO
if 44 - 44: O0 / OoooooooOO + ooOoO0o * I1ii11iIi11i
def get_rloc ( self , rloc ) :
for ii11Ii in self . rloc_set :
O00o00o00OO0 = ii11Ii . rloc
if ( rloc . is_exact_match ( O00o00o00OO0 ) ) : return ( ii11Ii )
if 36 - 36: I1ii11iIi11i / OoO0O00 - oO0o % O0
return ( None )
if 12 - 12: i1IIi * ooOoO0o / oO0o + I1IiiI / OoooooooOO
if 86 - 86: Oo0Ooo / OoO0O00
def get_rloc_by_interface ( self , interface ) :
for ii11Ii in self . rloc_set :
if ( ii11Ii . interface == interface ) : return ( ii11Ii )
if 78 - 78: I1IiiI * I1IiiI
return ( None )
if 13 - 13: oO0o
if 43 - 43: oO0o / Ii1I % OOooOOo
def add_db ( self ) :
if ( self . group . is_null ( ) ) :
lisp_db_for_lookups . add_cache ( self . eid , self )
else :
oOooII111iIiI1 = lisp_db_for_lookups . lookup_cache ( self . group , True )
if ( oOooII111iIiI1 == None ) :
oOooII111iIiI1 = lisp_mapping ( self . group , self . group , [ ] )
lisp_db_for_lookups . add_cache ( self . group , oOooII111iIiI1 )
if 45 - 45: II111iiii
oOooII111iIiI1 . add_source_entry ( self )
if 41 - 41: Ii1I / OOooOOo * Oo0Ooo . O0 - i11iIiiIii
if 77 - 77: o0oOOo0O0Ooo + I1IiiI + I1Ii111 / I1ii11iIi11i * i1IIi
if 37 - 37: O0 + iIii1I11I1II1 % IiII * oO0o
def add_cache ( self , do_ipc = True ) :
if ( self . group . is_null ( ) ) :
lisp_map_cache . add_cache ( self . eid , self )
if ( lisp_program_hardware ) : lisp_program_vxlan_hardware ( self )
else :
o0ooo0oOO0o = lisp_map_cache . lookup_cache ( self . group , True )
if ( o0ooo0oOO0o == None ) :
o0ooo0oOO0o = lisp_mapping ( self . group , self . group , [ ] )
o0ooo0oOO0o . eid . copy_address ( self . group )
o0ooo0oOO0o . group . copy_address ( self . group )
lisp_map_cache . add_cache ( self . group , o0ooo0oOO0o )
if 43 - 43: OOooOOo . O0
if ( self . eid . is_null ( ) ) : self . eid . make_default_route ( o0ooo0oOO0o . group )
o0ooo0oOO0o . add_source_entry ( self )
if 76 - 76: OOooOOo * OoooooooOO / IiII . OoO0O00 + II111iiii
if ( do_ipc ) : lisp_write_ipc_map_cache ( True , self )
if 23 - 23: OoO0O00 - OoooooooOO * I11i . iIii1I11I1II1 / o0oOOo0O0Ooo + oO0o
if 74 - 74: II111iiii / I1IiiI * O0 * OoO0O00 . I11i
def delete_cache ( self ) :
self . delete_rlocs_from_rloc_probe_list ( )
lisp_write_ipc_map_cache ( False , self )
if 74 - 74: O0 . i1IIi / I1ii11iIi11i + o0oOOo0O0Ooo
if ( self . group . is_null ( ) ) :
lisp_map_cache . delete_cache ( self . eid )
if ( lisp_program_hardware ) :
I1I11I1IIi = self . eid . print_prefix_no_iid ( )
os . system ( "ip route delete {}" . format ( I1I11I1IIi ) )
if 3 - 3: i1IIi + OoOoOO00 - OoOoOO00
else :
o0ooo0oOO0o = lisp_map_cache . lookup_cache ( self . group , True )
if ( o0ooo0oOO0o == None ) : return
if 85 - 85: o0oOOo0O0Ooo / o0oOOo0O0Ooo + Oo0Ooo * II111iiii + Ii1I * Ii1I
II1Iii = o0ooo0oOO0o . lookup_source_cache ( self . eid , True )
if ( II1Iii == None ) : return
if 27 - 27: Ii1I . OoOoOO00 % oO0o % o0oOOo0O0Ooo / i11iIiiIii - iIii1I11I1II1
o0ooo0oOO0o . source_cache . delete_cache ( self . eid )
if ( o0ooo0oOO0o . source_cache . cache_size ( ) == 0 ) :
lisp_map_cache . delete_cache ( self . group )
if 77 - 77: o0oOOo0O0Ooo . OoOoOO00 % Ii1I
if 94 - 94: I11i / IiII - OoOoOO00 % OoO0O00 % i11iIiiIii . Ii1I
if 26 - 26: i1IIi - Ii1I * I1IiiI
if 74 - 74: I1ii11iIi11i * oO0o * i1IIi % oO0o % I11i . i1IIi
def add_source_entry ( self , source_mc ) :
if ( self . source_cache == None ) : self . source_cache = lisp_cache ( )
self . source_cache . add_cache ( source_mc . eid , source_mc )
if 90 - 90: I1Ii111 + O0
if 100 - 100: II111iiii - I1Ii111 % OoO0O00
def lookup_source_cache ( self , source , exact ) :
if ( self . source_cache == None ) : return ( None )
return ( self . source_cache . lookup_cache ( source , exact ) )
if 67 - 67: oO0o . I1IiiI % iIii1I11I1II1 + o0oOOo0O0Ooo / I1ii11iIi11i * II111iiii
if 1 - 1: OoooooooOO / I1ii11iIi11i - O0
def dynamic_eid_configured ( self ) :
return ( self . dynamic_eids != None )
if 72 - 72: Oo0Ooo * iII111i - I11i
if 81 - 81: I1Ii111
def star_secondary_iid ( self , prefix ) :
if ( self . secondary_iid == None ) : return ( prefix )
oooo = "," + str ( self . secondary_iid )
return ( prefix . replace ( oooo , oooo + "*" ) )
if 85 - 85: O0 % OoOoOO00 . I1ii11iIi11i
if 46 - 46: OOooOOo * iIii1I11I1II1
def increment_decap_stats ( self , packet ) :
I1I = packet . udp_dport
if ( I1I == LISP_DATA_PORT ) :
I1Ii1i111I = self . get_rloc ( packet . outer_dest )
else :
if 33 - 33: OoO0O00 * II111iiii / i1IIi
if 93 - 93: I1Ii111 % I11i
if 64 - 64: I1IiiI % OoOoOO00 / Oo0Ooo
if 40 - 40: Ii1I + iIii1I11I1II1 / oO0o . II111iiii % O0 - IiII
for I1Ii1i111I in self . rloc_set :
if ( I1Ii1i111I . translated_port != 0 ) : break
if 49 - 49: IiII - OOooOOo * OOooOOo . O0
if 60 - 60: OoOoOO00 % iIii1I11I1II1 + IiII % o0oOOo0O0Ooo
if ( I1Ii1i111I != None ) : I1Ii1i111I . stats . increment ( len ( packet . packet ) )
self . stats . increment ( len ( packet . packet ) )
if 64 - 64: OoOoOO00 * I1ii11iIi11i . OoooooooOO . i1IIi
if 61 - 61: OoO0O00
def rtrs_in_rloc_set ( self ) :
for I1Ii1i111I in self . rloc_set :
if ( I1Ii1i111I . is_rtr ( ) ) : return ( True )
if 100 - 100: OoOoOO00
return ( False )
if 97 - 97: OoooooooOO
if 91 - 91: o0oOOo0O0Ooo / O0 % OoO0O00
def add_recent_source ( self , source ) :
self . recent_sources [ source . print_address ( ) ] = lisp_get_timestamp ( )
if 35 - 35: iII111i % OoO0O00 * O0
if 37 - 37: OOooOOo
if 100 - 100: Oo0Ooo * I1IiiI . ooOoO0o
class lisp_dynamic_eid ( object ) :
def __init__ ( self ) :
self . dynamic_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . uptime = lisp_get_timestamp ( )
self . interface = None
self . last_packet = None
self . timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
if 53 - 53: OOooOOo + o0oOOo0O0Ooo * Ii1I + O0
if 75 - 75: OoooooooOO
def get_timeout ( self , interface ) :
try :
I11iiI111iI = lisp_myinterfaces [ interface ]
self . timeout = I11iiI111iI . dynamic_eid_timeout
except :
self . timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
if 90 - 90: II111iiii - Oo0Ooo - IiII / I1Ii111
if 51 - 51: II111iiii * iII111i
if 30 - 30: I1Ii111 - OoOoOO00 / OOooOOo * I1IiiI + Ii1I
if 41 - 41: ooOoO0o . i1IIi * iIii1I11I1II1 - I1IiiI
class lisp_group_mapping ( object ) :
def __init__ ( self , group_name , ms_name , group_prefix , sources , rle_addr ) :
self . group_name = group_name
self . group_prefix = group_prefix
self . use_ms_name = ms_name
self . sources = sources
self . rle_address = rle_addr
if 9 - 9: I11i % i1IIi / ooOoO0o % iII111i - oO0o - II111iiii
if 29 - 29: ooOoO0o . II111iiii . i1IIi % oO0o
def add_group ( self ) :
lisp_group_mapping_list [ self . group_name ] = self
if 11 - 11: OoOoOO00 . OoO0O00 % I11i * iII111i % I1Ii111 . O0
if 17 - 17: OOooOOo / i11iIiiIii - i11iIiiIii . II111iiii . ooOoO0o
if 38 - 38: OOooOOo . OoooooooOO . II111iiii + OoO0O00 / oO0o . OoooooooOO
if 100 - 100: OoO0O00
if 36 - 36: oO0o + Ii1I - O0
if 19 - 19: O0 + I1Ii111 . I1Ii111 * IiII * ooOoO0o + i1IIi
if 51 - 51: ooOoO0o % OoOoOO00 % i1IIi / O0
if 11 - 11: OOooOOo . I1ii11iIi11i * OOooOOo * OoO0O00
if 11 - 11: I11i
if 85 - 85: OoOoOO00 - Ii1I / Oo0Ooo % I1ii11iIi11i
def lisp_is_group_more_specific ( group_str , group_mapping ) :
oooo = group_mapping . group_prefix . instance_id
oOo = group_mapping . group_prefix . mask_len
o0o0Oo0o0oOo = lisp_address ( LISP_AFI_IPV4 , group_str , 32 , oooo )
if ( o0o0Oo0o0oOo . is_more_specific ( group_mapping . group_prefix ) ) : return ( oOo )
return ( - 1 )
if 12 - 12: i1IIi + o0oOOo0O0Ooo / oO0o . O0
if 37 - 37: IiII
if 99 - 99: i11iIiiIii % i11iIiiIii . I11i * I1ii11iIi11i . OoO0O00 / I1IiiI
if 44 - 44: iII111i - OoO0O00 / i11iIiiIii
if 55 - 55: O0 * OoO0O00 * i1IIi
if 9 - 9: IiII
if 64 - 64: ooOoO0o + OoooooooOO
def lisp_lookup_group ( group ) :
OoO0ooOOo0o0 = None
for oo0oooo00000 in list ( lisp_group_mapping_list . values ( ) ) :
oOo = lisp_is_group_more_specific ( group , oo0oooo00000 )
if ( oOo == - 1 ) : continue
if ( OoO0ooOOo0o0 == None or oOo > OoO0ooOOo0o0 . group_prefix . mask_len ) : OoO0ooOOo0o0 = oo0oooo00000
if 99 - 99: Oo0Ooo . i1IIi . ooOoO0o . i1IIi * iIii1I11I1II1 . I11i
return ( OoO0ooOOo0o0 )
if 82 - 82: I11i . ooOoO0o - ooOoO0o
if 11 - 11: I1ii11iIi11i / o0oOOo0O0Ooo % I1ii11iIi11i / OoooooooOO
lisp_site_flags = {
"P" : "ETR is {}Requesting Map-Server to Proxy Map-Reply" ,
"S" : "ETR is {}LISP-SEC capable" ,
"I" : "xTR-ID and site-ID are {}included in Map-Register" ,
"T" : "Use Map-Register TTL field to timeout registration is {}set" ,
"R" : "Merging registrations are {}requested" ,
"M" : "ETR is {}a LISP Mobile-Node" ,
"N" : "ETR is {}requesting Map-Notify messages from Map-Server"
}
if 35 - 35: i1IIi % I11i * I1Ii111 + IiII
class lisp_site ( object ) :
def __init__ ( self ) :
self . site_name = ""
self . description = ""
self . shutdown = False
self . auth_sha1_or_sha2 = False
self . auth_key = { }
self . encryption_key = None
self . allowed_prefixes = { }
self . allowed_prefixes_sorted = [ ]
self . allowed_rlocs = { }
self . map_notifies_sent = 0
self . map_notify_acks_received = 0
if 53 - 53: I1IiiI
if 62 - 62: o0oOOo0O0Ooo
if 54 - 54: iIii1I11I1II1 / OoooooooOO + o0oOOo0O0Ooo . i1IIi - OoooooooOO
class lisp_site_eid ( object ) :
def __init__ ( self , site ) :
self . site = site
self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . first_registered = 0
self . last_registered = 0
self . last_registerer = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
self . registered = False
self . registered_rlocs = [ ]
self . auth_sha1_or_sha2 = False
self . individual_registrations = { }
self . map_registers_received = 0
self . proxy_reply_requested = False
self . force_proxy_reply = False
self . force_nat_proxy_reply = False
self . force_ttl = None
self . pitr_proxy_reply_drop = False
self . proxy_reply_action = ""
self . lisp_sec_present = False
self . map_notify_requested = False
self . mobile_node_requested = False
self . echo_nonce_capable = False
self . use_register_ttl_requested = False
self . merge_register_requested = False
self . xtr_id_present = False
self . xtr_id = 0
self . site_id = 0
self . accept_more_specifics = False
self . parent_for_more_specifics = None
self . dynamic = False
self . more_specific_registrations = [ ]
self . source_cache = None
self . inconsistent_registration = False
self . policy = None
self . require_signature = False
self . encrypt_json = False
if 70 - 70: Ii1I / OoOoOO00 * Oo0Ooo
if 32 - 32: I1Ii111 . OoOoOO00 % OoooooooOO + I1Ii111 * OoO0O00
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 84 - 84: OoOoOO00
if 80 - 80: oO0o
def print_flags ( self , html ) :
if ( html == False ) :
oOo0OOoooO = "{}-{}-{}-{}-{}-{}-{}" . format ( "P" if self . proxy_reply_requested else "p" ,
# II111iiii * OOooOOo . OoOoOO00 * I1ii11iIi11i
"S" if self . lisp_sec_present else "s" ,
"I" if self . xtr_id_present else "i" ,
"T" if self . use_register_ttl_requested else "t" ,
"R" if self . merge_register_requested else "r" ,
"M" if self . mobile_node_requested else "m" ,
"N" if self . map_notify_requested else "n" )
else :
o0OO00 = self . print_flags ( False )
o0OO00 = o0OO00 . split ( "-" )
oOo0OOoooO = ""
for o000o000oOo in o0OO00 :
iIiI1II = lisp_site_flags [ o000o000oOo . upper ( ) ]
iIiI1II = iIiI1II . format ( "" if o000o000oOo . isupper ( ) else "not " )
oOo0OOoooO += lisp_span ( o000o000oOo , iIiI1II )
if ( o000o000oOo . lower ( ) != "n" ) : oOo0OOoooO += "-"
if 46 - 46: I1Ii111 / I11i
if 13 - 13: I1ii11iIi11i + II111iiii * IiII * OoooooooOO + O0 * O0
return ( oOo0OOoooO )
if 15 - 15: Oo0Ooo % I11i * O0
if 61 - 61: I1ii11iIi11i - ooOoO0o / OoOoOO00 % OOooOOo * i1IIi . IiII
def copy_state_to_parent ( self , child ) :
self . xtr_id = child . xtr_id
self . site_id = child . site_id
self . first_registered = child . first_registered
self . last_registered = child . last_registered
self . last_registerer = child . last_registerer
self . register_ttl = child . register_ttl
if ( self . registered == False ) :
self . first_registered = lisp_get_timestamp ( )
if 27 - 27: I1ii11iIi11i % iII111i . Oo0Ooo * iIii1I11I1II1
self . auth_sha1_or_sha2 = child . auth_sha1_or_sha2
self . registered = child . registered
self . proxy_reply_requested = child . proxy_reply_requested
self . lisp_sec_present = child . lisp_sec_present
self . xtr_id_present = child . xtr_id_present
self . use_register_ttl_requested = child . use_register_ttl_requested
self . merge_register_requested = child . merge_register_requested
self . mobile_node_requested = child . mobile_node_requested
self . map_notify_requested = child . map_notify_requested
if 40 - 40: I11i
if 58 - 58: o0oOOo0O0Ooo / OOooOOo . oO0o % ooOoO0o
def build_sort_key ( self ) :
IiiIIIi1I1i11 = lisp_cache ( )
OOO00o00Oo0 , Ooo00o000o = IiiIIIi1I1i11 . build_key ( self . eid )
I1o0O = ""
if ( self . group . is_null ( ) == False ) :
oOO , I1o0O = IiiIIIi1I1i11 . build_key ( self . group )
I1o0O = "-" + I1o0O [ 0 : 12 ] + "-" + str ( oOO ) + "-" + I1o0O [ 12 : : ]
if 25 - 25: IiII
Ooo00o000o = Ooo00o000o [ 0 : 12 ] + "-" + str ( OOO00o00Oo0 ) + "-" + Ooo00o000o [ 12 : : ] + I1o0O
del ( IiiIIIi1I1i11 )
return ( Ooo00o000o )
if 47 - 47: I1ii11iIi11i / I1IiiI - Oo0Ooo - Ii1I / Oo0Ooo % IiII
if 33 - 33: II111iiii . OOooOOo % iIii1I11I1II1 - Oo0Ooo - OoOoOO00 % i11iIiiIii
def merge_in_site_eid ( self , child ) :
o0Oo00OO0O0 = False
if ( self . group . is_null ( ) ) :
self . merge_rlocs_in_site_eid ( )
else :
o0Oo00OO0O0 = self . merge_rles_in_site_eid ( )
if 55 - 55: OOooOOo - OoO0O00 . I1IiiI % o0oOOo0O0Ooo + iII111i
if 10 - 10: iIii1I11I1II1 - Ii1I
if 84 - 84: iII111i
if 21 - 21: i11iIiiIii
if 30 - 30: OoO0O00 + OoooooooOO
if 98 - 98: I1ii11iIi11i % I1IiiI
if ( child != None ) :
self . copy_state_to_parent ( child )
self . map_registers_received += 1
if 9 - 9: o0oOOo0O0Ooo / I1Ii111 % i1IIi - OOooOOo % I1IiiI / I1ii11iIi11i
return ( o0Oo00OO0O0 )
if 66 - 66: IiII
if 56 - 56: oO0o + OoooooooOO
def copy_rloc_records ( self ) :
oo0OOOoOooO = [ ]
for ii11Ii in self . registered_rlocs :
oo0OOOoOooO . append ( copy . deepcopy ( ii11Ii ) )
if 11 - 11: i1IIi / iII111i
return ( oo0OOOoOooO )
if 14 - 14: O0
if 9 - 9: I1Ii111 * i11iIiiIii / o0oOOo0O0Ooo / iII111i
def merge_rlocs_in_site_eid ( self ) :
self . registered_rlocs = [ ]
for I1i in list ( self . individual_registrations . values ( ) ) :
if ( self . site_id != I1i . site_id ) : continue
if ( I1i . registered == False ) : continue
self . registered_rlocs += I1i . copy_rloc_records ( )
if 57 - 57: iII111i
if 63 - 63: iIii1I11I1II1
if 63 - 63: Ii1I % I1Ii111 + O0 * OoO0O00 . oO0o
if 34 - 34: I1IiiI . I1ii11iIi11i . O0 - OoOoOO00 - i11iIiiIii / iII111i
if 63 - 63: OOooOOo
if 84 - 84: i11iIiiIii * iIii1I11I1II1 % I11i % iII111i + OoooooooOO . o0oOOo0O0Ooo
oo0OOOoOooO = [ ]
for ii11Ii in self . registered_rlocs :
if ( ii11Ii . rloc . is_null ( ) or len ( oo0OOOoOooO ) == 0 ) :
oo0OOOoOooO . append ( ii11Ii )
continue
if 78 - 78: o0oOOo0O0Ooo . iII111i + O0 / I1ii11iIi11i + I1ii11iIi11i + II111iiii
for ooooooOO0oO0 in oo0OOOoOooO :
if ( ooooooOO0oO0 . rloc . is_null ( ) ) : continue
if ( ii11Ii . rloc . is_exact_match ( ooooooOO0oO0 . rloc ) ) : break
if 7 - 7: Ii1I - I11i / I1ii11iIi11i + iII111i
if ( ooooooOO0oO0 == oo0OOOoOooO [ - 1 ] ) : oo0OOOoOooO . append ( ii11Ii )
if 47 - 47: I11i * IiII / oO0o - OoooooooOO . OoooooooOO / I11i
self . registered_rlocs = oo0OOOoOooO
if 73 - 73: Ii1I . IiII % IiII
if 56 - 56: I1Ii111 + iII111i + iII111i
if 99 - 99: o0oOOo0O0Ooo % I1ii11iIi11i / Oo0Ooo . O0 + OoO0O00 * OoOoOO00
if 48 - 48: iIii1I11I1II1 + O0 * I11i * i11iIiiIii . Ii1I / i1IIi
if ( len ( self . registered_rlocs ) == 0 ) : self . registered = False
return
if 48 - 48: i1IIi % iIii1I11I1II1 + I1IiiI - OoOoOO00 % I11i . I1Ii111
if 66 - 66: I1Ii111 * i11iIiiIii + I1IiiI % II111iiii
def merge_rles_in_site_eid ( self ) :
if 47 - 47: II111iiii % o0oOOo0O0Ooo
if 26 - 26: I1ii11iIi11i / I11i / Oo0Ooo / i1IIi + O0 * ooOoO0o
if 53 - 53: IiII / II111iiii / oO0o % O0 / I1Ii111
if 91 - 91: oO0o * OoOoOO00 + O0 % Oo0Ooo
Oo0oooo00 = { }
for ii11Ii in self . registered_rlocs :
if ( ii11Ii . rle == None ) : continue
for iI11i1ii11i11 in ii11Ii . rle . rle_nodes :
IiI = iI11i1ii11i11 . address . print_address_no_iid ( )
Oo0oooo00 [ IiI ] = iI11i1ii11i11 . address
if 68 - 68: iII111i + OOooOOo - Ii1I
break
if 67 - 67: OoooooooOO * O0 * Ii1I . ooOoO0o
if 15 - 15: iII111i / O0
if 65 - 65: oO0o * ooOoO0o . I11i / i11iIiiIii - IiII * OoO0O00
if 57 - 57: iII111i * I11i % o0oOOo0O0Ooo * OoOoOO00 % I1ii11iIi11i + i11iIiiIii
if 66 - 66: i11iIiiIii . ooOoO0o
self . merge_rlocs_in_site_eid ( )
if 83 - 83: I1Ii111 % ooOoO0o + OoooooooOO
if 50 - 50: i11iIiiIii % I1IiiI * iII111i / Ii1I
if 12 - 12: iII111i / OoO0O00 - II111iiii + Oo0Ooo
if 78 - 78: i1IIi
if 25 - 25: Ii1I * II111iiii / OoOoOO00
if 86 - 86: i1IIi + I1IiiI + I1Ii111 % II111iiii . IiII - iIii1I11I1II1
if 54 - 54: i11iIiiIii . Ii1I % I1IiiI . I1Ii111 . OoooooooOO
if 49 - 49: OOooOOo % I11i - OOooOOo + Ii1I . I1ii11iIi11i + ooOoO0o
i1iI1IIIi1iIii1 = [ ]
for ii11Ii in self . registered_rlocs :
if ( self . registered_rlocs . index ( ii11Ii ) == 0 ) :
i1iI1IIIi1iIii1 . append ( ii11Ii )
continue
if 64 - 64: OoOoOO00
if ( ii11Ii . rle == None ) : i1iI1IIIi1iIii1 . append ( ii11Ii )
if 20 - 20: OoOoOO00 / O0 * OOooOOo % I11i + OoO0O00 + o0oOOo0O0Ooo
self . registered_rlocs = i1iI1IIIi1iIii1
if 51 - 51: Ii1I - OoOoOO00 / i11iIiiIii + O0
if 71 - 71: ooOoO0o
if 35 - 35: OoOoOO00
if 55 - 55: iII111i - o0oOOo0O0Ooo + IiII * II111iiii
if 6 - 6: I1Ii111 / i1IIi / IiII . o0oOOo0O0Ooo
if 69 - 69: ooOoO0o - OoOoOO00 . I1IiiI . I11i + OoOoOO00 / i11iIiiIii
if 20 - 20: OoO0O00 . OoooooooOO - ooOoO0o . I11i / Oo0Ooo
ooo0o0O = lisp_rle ( "" )
ooOOOoo0O00 = { }
OO000o = None
for I1i in list ( self . individual_registrations . values ( ) ) :
if ( I1i . registered == False ) : continue
IIIii1I = I1i . registered_rlocs [ 0 ] . rle
if ( IIIii1I == None ) : continue
if 43 - 43: I1Ii111 / I1Ii111
OO000o = I1i . registered_rlocs [ 0 ] . rloc_name
for o0OoO0OoO0O0O in IIIii1I . rle_nodes :
IiI = o0OoO0OoO0O0O . address . print_address_no_iid ( )
if ( IiI in ooOOOoo0O00 ) : break
if 26 - 26: iII111i - I1ii11iIi11i
iI11i1ii11i11 = lisp_rle_node ( )
iI11i1ii11i11 . address . copy_address ( o0OoO0OoO0O0O . address )
iI11i1ii11i11 . level = o0OoO0OoO0O0O . level
iI11i1ii11i11 . rloc_name = OO000o
ooo0o0O . rle_nodes . append ( iI11i1ii11i11 )
ooOOOoo0O00 [ IiI ] = o0OoO0OoO0O0O . address
if 65 - 65: I1ii11iIi11i + OoOoOO00
if 43 - 43: O0 + I11i % II111iiii
if 56 - 56: IiII + Oo0Ooo . IiII % iIii1I11I1II1 % ooOoO0o % ooOoO0o
if 70 - 70: ooOoO0o / i1IIi - I11i - i11iIiiIii
if 79 - 79: OoO0O00 - OoooooooOO % iII111i . O0
if 93 - 93: I1Ii111
if ( len ( ooo0o0O . rle_nodes ) == 0 ) : ooo0o0O = None
if ( len ( self . registered_rlocs ) != 0 ) :
self . registered_rlocs [ 0 ] . rle = ooo0o0O
if ( OO000o ) : self . registered_rlocs [ 0 ] . rloc_name = None
if 3 - 3: OoO0O00 / IiII - oO0o / oO0o
if 50 - 50: II111iiii + OoOoOO00
if 17 - 17: ooOoO0o + I1ii11iIi11i
if 34 - 34: Ii1I / II111iiii + OoOoOO00 . II111iiii + OoooooooOO * o0oOOo0O0Ooo
if 48 - 48: O0
if ( list ( Oo0oooo00 . keys ( ) ) == list ( ooOOOoo0O00 . keys ( ) ) ) : return ( False )
if 99 - 99: II111iiii * oO0o / I1ii11iIi11i - i1IIi
lprint ( "{} {} from {} to {}" . format ( green ( self . print_eid_tuple ( ) , False ) , bold ( "RLE change" , False ) ,
# i11iIiiIii / OoooooooOO . ooOoO0o % I1Ii111 * I11i
list ( Oo0oooo00 . keys ( ) ) , list ( ooOOOoo0O00 . keys ( ) ) ) )
if 28 - 28: OoooooooOO * i1IIi . Oo0Ooo * i11iIiiIii . OoooooooOO % iIii1I11I1II1
return ( True )
if 62 - 62: Oo0Ooo - I1IiiI + oO0o . I1ii11iIi11i
if 23 - 23: OoOoOO00
def add_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_sites_by_eid . add_cache ( self . eid , self )
else :
II1i11 = lisp_sites_by_eid . lookup_cache ( self . group , True )
if ( II1i11 == None ) :
II1i11 = lisp_site_eid ( self . site )
II1i11 . eid . copy_address ( self . group )
II1i11 . group . copy_address ( self . group )
lisp_sites_by_eid . add_cache ( self . group , II1i11 )
if 98 - 98: o0oOOo0O0Ooo . iIii1I11I1II1 / Ii1I % I1IiiI
if 19 - 19: I1Ii111 / O0 % o0oOOo0O0Ooo
if 1 - 1: OoOoOO00 / I11i
if 43 - 43: o0oOOo0O0Ooo - i1IIi / Ii1I . OoOoOO00 + i11iIiiIii
if 69 - 69: i11iIiiIii - iIii1I11I1II1
II1i11 . parent_for_more_specifics = self . parent_for_more_specifics
if 40 - 40: I1IiiI / oO0o + ooOoO0o
if ( self . eid . is_null ( ) ) : self . eid . make_default_route ( II1i11 . group )
II1i11 . add_source_entry ( self )
if 100 - 100: OoOoOO00 % iII111i * ooOoO0o . O0
if 37 - 37: I1ii11iIi11i
if 24 - 24: O0 . I1Ii111 * i11iIiiIii
def delete_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_sites_by_eid . delete_cache ( self . eid )
else :
II1i11 = lisp_sites_by_eid . lookup_cache ( self . group , True )
if ( II1i11 == None ) : return
if 84 - 84: ooOoO0o / I1ii11iIi11i - o0oOOo0O0Ooo . OoooooooOO * iIii1I11I1II1
I1i = II1i11 . lookup_source_cache ( self . eid , True )
if ( I1i == None ) : return
if 16 - 16: I11i % O0
if ( II1i11 . source_cache == None ) : return
if 56 - 56: Ii1I * OoOoOO00 . i1IIi
II1i11 . source_cache . delete_cache ( self . eid )
if ( II1i11 . source_cache . cache_size ( ) == 0 ) :
lisp_sites_by_eid . delete_cache ( self . group )
if 15 - 15: I1Ii111
if 64 - 64: OOooOOo * Oo0Ooo
if 96 - 96: Oo0Ooo / I1ii11iIi11i * iIii1I11I1II1 / iII111i
if 18 - 18: I1Ii111
def add_source_entry ( self , source_se ) :
if ( self . source_cache == None ) : self . source_cache = lisp_cache ( )
self . source_cache . add_cache ( source_se . eid , source_se )
if 29 - 29: i1IIi - I1IiiI / i1IIi
if 64 - 64: IiII
def lookup_source_cache ( self , source , exact ) :
if ( self . source_cache == None ) : return ( None )
return ( self . source_cache . lookup_cache ( source , exact ) )
if 69 - 69: OOooOOo . I1IiiI
if 11 - 11: I1Ii111 * I1IiiI - I1Ii111 / iII111i
def is_star_g ( self ) :
if ( self . group . is_null ( ) ) : return ( False )
return ( self . eid . is_exact_match ( self . group ) )
if 22 - 22: iII111i % I11i % O0 - I11i
if 71 - 71: I1Ii111 / II111iiii - OoooooooOO % i1IIi + OoOoOO00 % OoooooooOO
def eid_record_matches ( self , eid_record ) :
if ( self . eid . is_exact_match ( eid_record . eid ) == False ) : return ( False )
if ( eid_record . group . is_null ( ) ) : return ( True )
return ( eid_record . group . is_exact_match ( self . group ) )
if 52 - 52: Ii1I . OoOoOO00 / o0oOOo0O0Ooo / iII111i
if 83 - 83: OoO0O00 - Oo0Ooo + I1Ii111 . I1IiiI
def inherit_from_ams_parent ( self ) :
OoOOoooO0oo = self . parent_for_more_specifics
if ( OoOOoooO0oo == None ) : return
self . force_proxy_reply = OoOOoooO0oo . force_proxy_reply
self . force_nat_proxy_reply = OoOOoooO0oo . force_nat_proxy_reply
self . force_ttl = OoOOoooO0oo . force_ttl
self . pitr_proxy_reply_drop = OoOOoooO0oo . pitr_proxy_reply_drop
self . proxy_reply_action = OoOOoooO0oo . proxy_reply_action
self . echo_nonce_capable = OoOOoooO0oo . echo_nonce_capable
self . policy = OoOOoooO0oo . policy
self . require_signature = OoOOoooO0oo . require_signature
self . encrypt_json = OoOOoooO0oo . encrypt_json
if 78 - 78: I11i / ooOoO0o . OoOoOO00 * i1IIi
if 15 - 15: i1IIi . II111iiii * OoOoOO00 / Oo0Ooo
def rtrs_in_rloc_set ( self ) :
for ii11Ii in self . registered_rlocs :
if ( ii11Ii . is_rtr ( ) ) : return ( True )
if 99 - 99: iII111i - o0oOOo0O0Ooo / O0
return ( False )
if 97 - 97: iIii1I11I1II1 * I1Ii111
if 39 - 39: I1Ii111 . II111iiii
def is_rtr_in_rloc_set ( self , rtr_rloc ) :
for ii11Ii in self . registered_rlocs :
if ( ii11Ii . rloc . is_exact_match ( rtr_rloc ) == False ) : continue
if ( ii11Ii . is_rtr ( ) ) : return ( True )
if 94 - 94: OoO0O00 - OoO0O00 + iIii1I11I1II1 + O0 * oO0o
return ( False )
if 9 - 9: Ii1I * Oo0Ooo / oO0o / Ii1I
if 34 - 34: I1IiiI
def is_rloc_in_rloc_set ( self , rloc ) :
for ii11Ii in self . registered_rlocs :
if ( ii11Ii . rle ) :
for ooo0o0O in ii11Ii . rle . rle_nodes :
if ( ooo0o0O . address . is_exact_match ( rloc ) ) : return ( True )
if 56 - 56: Ii1I
if 71 - 71: O0 / i1IIi
if ( ii11Ii . rloc . is_exact_match ( rloc ) ) : return ( True )
if 20 - 20: OOooOOo . iIii1I11I1II1 - I1Ii111 . i1IIi
return ( False )
if 82 - 82: oO0o * i11iIiiIii % o0oOOo0O0Ooo % IiII - I11i - OoO0O00
if 24 - 24: oO0o . II111iiii + OoO0O00 * I1ii11iIi11i / oO0o
def do_rloc_sets_match ( self , prev_rloc_set ) :
if ( len ( self . registered_rlocs ) != len ( prev_rloc_set ) ) : return ( False )
if 86 - 86: I1Ii111 + I1ii11iIi11i
for ii11Ii in prev_rloc_set :
iIi111I1i1Ii1 = ii11Ii . rloc
if ( self . is_rloc_in_rloc_set ( iIi111I1i1Ii1 ) == False ) : return ( False )
if 63 - 63: ooOoO0o - i11iIiiIii . o0oOOo0O0Ooo - i1IIi - IiII
return ( True )
if 32 - 32: I1Ii111 / iIii1I11I1II1 + oO0o % I11i * OoooooooOO
if 69 - 69: OOooOOo
if 9 - 9: i11iIiiIii * Oo0Ooo
class lisp_mr ( object ) :
def __init__ ( self , addr_str , dns_name , mr_name ) :
self . mr_name = mr_name if ( mr_name != None ) else "all"
self . dns_name = dns_name
self . map_resolver = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . last_dns_resolve = None
self . a_record_index = 0
if ( addr_str ) :
self . map_resolver . store_address ( addr_str )
self . insert_mr ( )
else :
self . resolve_dns_name ( )
if 33 - 33: oO0o / ooOoO0o
self . last_used = 0
self . last_reply = 0
self . last_nonce = 0
self . map_requests_sent = 0
self . neg_map_replies_received = 0
self . total_rtt = 0
if 92 - 92: O0 . Oo0Ooo - Ii1I * I1IiiI * Oo0Ooo * iII111i
if 78 - 78: Ii1I * iIii1I11I1II1 - Ii1I - I1ii11iIi11i * I1ii11iIi11i
def resolve_dns_name ( self ) :
if ( self . dns_name == None ) : return
if ( self . last_dns_resolve and
time . time ( ) - self . last_dns_resolve < 30 ) : return
if 44 - 44: o0oOOo0O0Ooo
try :
ooo0o0 = socket . gethostbyname_ex ( self . dns_name )
self . last_dns_resolve = lisp_get_timestamp ( )
iiii = ooo0o0 [ 2 ]
except :
return
if 53 - 53: IiII - I1Ii111 - OOooOOo . OoOoOO00 / iIii1I11I1II1
if 89 - 89: Oo0Ooo
if 57 - 57: i1IIi - oO0o % IiII . I11i
if 17 - 17: i1IIi % OoO0O00 + i11iIiiIii % I1Ii111 * ooOoO0o . I1ii11iIi11i
if 64 - 64: O0 - iII111i
if 82 - 82: O0
if ( len ( iiii ) <= self . a_record_index ) :
self . delete_mr ( )
return
if 37 - 37: I1Ii111
if 98 - 98: iII111i - OoOoOO00 / I1Ii111 . OOooOOo - OOooOOo - ooOoO0o
IiI = iiii [ self . a_record_index ]
if ( IiI != self . map_resolver . print_address_no_iid ( ) ) :
self . delete_mr ( )
self . map_resolver . store_address ( IiI )
self . insert_mr ( )
if 84 - 84: OOooOOo * ooOoO0o / O0
if 96 - 96: I11i . I11i % II111iiii
if 14 - 14: iII111i / OoooooooOO
if 8 - 8: OOooOOo + I1IiiI - Oo0Ooo + i1IIi . Ii1I . I1Ii111
if 38 - 38: I1IiiI / II111iiii * OoOoOO00 / I1Ii111
if 80 - 80: I1ii11iIi11i / ooOoO0o * ooOoO0o . Oo0Ooo
if ( lisp_is_decent_dns_suffix ( self . dns_name ) == False ) : return
if ( self . a_record_index != 0 ) : return
if 44 - 44: Ii1I * i1IIi % OoOoOO00 . OoOoOO00
for IiI in iiii [ 1 : : ] :
OO0O00o0 = lisp_address ( LISP_AFI_NONE , IiI , 0 , 0 )
OOoOo0O0O0oO = lisp_get_map_resolver ( OO0O00o0 , None )
if ( OOoOo0O0O0oO != None and OOoOo0O0O0oO . a_record_index == iiii . index ( IiI ) ) :
continue
if 16 - 16: Oo0Ooo / i1IIi / iIii1I11I1II1 / iIii1I11I1II1 % o0oOOo0O0Ooo / I1ii11iIi11i
OOoOo0O0O0oO = lisp_mr ( IiI , None , None )
OOoOo0O0O0oO . a_record_index = iiii . index ( IiI )
OOoOo0O0O0oO . dns_name = self . dns_name
OOoOo0O0O0oO . last_dns_resolve = lisp_get_timestamp ( )
if 11 - 11: I1IiiI
if 45 - 45: OOooOOo / i1IIi * IiII * I1Ii111
if 34 - 34: ooOoO0o / iIii1I11I1II1 . iII111i
if 91 - 91: OoO0O00
if 8 - 8: oO0o
oO000Oo0oOOo = [ ]
for OOoOo0O0O0oO in list ( lisp_map_resolvers_list . values ( ) ) :
if ( self . dns_name != OOoOo0O0O0oO . dns_name ) : continue
OO0O00o0 = OOoOo0O0O0oO . map_resolver . print_address_no_iid ( )
if ( OO0O00o0 in iiii ) : continue
oO000Oo0oOOo . append ( OOoOo0O0O0oO )
if 26 - 26: o0oOOo0O0Ooo . i1IIi
for OOoOo0O0O0oO in oO000Oo0oOOo : OOoOo0O0O0oO . delete_mr ( )
if 62 - 62: IiII * I1ii11iIi11i % iIii1I11I1II1 / II111iiii - OoO0O00
if 52 - 52: iII111i . I11i - I11i + oO0o + iIii1I11I1II1
def insert_mr ( self ) :
Ooo00o000o = self . mr_name + self . map_resolver . print_address ( )
lisp_map_resolvers_list [ Ooo00o000o ] = self
if 83 - 83: I11i * iIii1I11I1II1 + OoOoOO00
if 81 - 81: ooOoO0o * OOooOOo / OoO0O00 + I1ii11iIi11i % I1Ii111
def delete_mr ( self ) :
Ooo00o000o = self . mr_name + self . map_resolver . print_address ( )
if ( Ooo00o000o not in lisp_map_resolvers_list ) : return
lisp_map_resolvers_list . pop ( Ooo00o000o )
if 37 - 37: i11iIiiIii - OoooooooOO - OoOoOO00 * oO0o / Ii1I
if 100 - 100: II111iiii / Oo0Ooo / iII111i / OOooOOo
if 100 - 100: iIii1I11I1II1
class lisp_ddt_root ( object ) :
def __init__ ( self ) :
self . root_address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . public_key = ""
self . priority = 0
self . weight = 0
if 50 - 50: I1Ii111 / ooOoO0o * I11i
if 53 - 53: II111iiii . IiII
if 5 - 5: i1IIi % IiII
class lisp_referral ( object ) :
def __init__ ( self ) :
self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . referral_set = { }
self . referral_type = LISP_DDT_ACTION_NULL
self . referral_source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . referral_ttl = 0
self . uptime = lisp_get_timestamp ( )
self . expires = 0
self . source_cache = None
if 16 - 16: ooOoO0o - iII111i % Ii1I . OoOoOO00
if 56 - 56: i11iIiiIii % i11iIiiIii % OoooooooOO . Ii1I . iII111i + I11i
def print_referral ( self , eid_indent , referral_indent ) :
oOoII = lisp_print_elapsed ( self . uptime )
iIiiiIiIi1iI1 = lisp_print_future ( self . expires )
lprint ( "{}Referral EID {}, uptime/expires {}/{}, {} referrals:" . format ( eid_indent , green ( self . eid . print_prefix ( ) , False ) , oOoII ,
# OoO0O00 % Ii1I - ooOoO0o
iIiiiIiIi1iI1 , len ( self . referral_set ) ) )
if 67 - 67: I1IiiI % O0 + I1IiiI * I1Ii111 * OoOoOO00 * II111iiii
for OoooOO0 in list ( self . referral_set . values ( ) ) :
OoooOO0 . print_ref_node ( referral_indent )
if 79 - 79: I1IiiI
if 37 - 37: I1Ii111 + Ii1I
if 50 - 50: i11iIiiIii
def print_referral_type ( self ) :
if ( self . eid . afi == LISP_AFI_ULTIMATE_ROOT ) : return ( "root" )
if ( self . referral_type == LISP_DDT_ACTION_NULL ) :
return ( "null-referral" )
if 57 - 57: O0 * i1IIi - I1IiiI
if ( self . referral_type == LISP_DDT_ACTION_SITE_NOT_FOUND ) :
return ( "no-site-action" )
if 48 - 48: IiII / iIii1I11I1II1
if ( self . referral_type > LISP_DDT_ACTION_MAX ) :
return ( "invalid-action" )
if 20 - 20: oO0o / OoooooooOO
return ( lisp_map_referral_action_string [ self . referral_type ] )
if 95 - 95: Oo0Ooo . i11iIiiIii
if 50 - 50: iII111i . i11iIiiIii - i1IIi
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 24 - 24: i11iIiiIii % iII111i . oO0o
if 44 - 44: II111iiii - OoO0O00 + i11iIiiIii
def print_ttl ( self ) :
IiIi1iIIiII1i = self . referral_ttl
if ( IiIi1iIIiII1i < 60 ) : return ( str ( IiIi1iIIiII1i ) + " secs" )
if 34 - 34: I1ii11iIi11i % ooOoO0o / II111iiii * O0 % OOooOOo
if ( ( IiIi1iIIiII1i % 60 ) == 0 ) :
IiIi1iIIiII1i = str ( old_div ( IiIi1iIIiII1i , 60 ) ) + " mins"
else :
IiIi1iIIiII1i = str ( IiIi1iIIiII1i ) + " secs"
if 9 - 9: I1ii11iIi11i / I1ii11iIi11i - OOooOOo . iIii1I11I1II1
return ( IiIi1iIIiII1i )
if 33 - 33: I1IiiI + oO0o % I1IiiI / iII111i - ooOoO0o - i11iIiiIii
if 39 - 39: i11iIiiIii / oO0o
def is_referral_negative ( self ) :
return ( self . referral_type in ( LISP_DDT_ACTION_MS_NOT_REG , LISP_DDT_ACTION_DELEGATION_HOLE ,
# ooOoO0o + I1ii11iIi11i * I1Ii111 . i1IIi * i1IIi
LISP_DDT_ACTION_NOT_AUTH ) )
if 33 - 33: II111iiii - OoooooooOO / II111iiii % Oo0Ooo / o0oOOo0O0Ooo
if 41 - 41: I1Ii111 / IiII % OoO0O00 - iIii1I11I1II1
def add_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_referral_cache . add_cache ( self . eid , self )
else :
OO0oO0O = lisp_referral_cache . lookup_cache ( self . group , True )
if ( OO0oO0O == None ) :
OO0oO0O = lisp_referral ( )
OO0oO0O . eid . copy_address ( self . group )
OO0oO0O . group . copy_address ( self . group )
lisp_referral_cache . add_cache ( self . group , OO0oO0O )
if 98 - 98: OoOoOO00 + i11iIiiIii - iII111i + II111iiii
if ( self . eid . is_null ( ) ) : self . eid . make_default_route ( OO0oO0O . group )
OO0oO0O . add_source_entry ( self )
if 10 - 10: ooOoO0o * i11iIiiIii . o0oOOo0O0Ooo % ooOoO0o
if 14 - 14: i11iIiiIii . o0oOOo0O0Ooo % OoooooooOO
if 15 - 15: I11i - OoOoOO00 . OoOoOO00 * iII111i - Ii1I . i11iIiiIii
def delete_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_referral_cache . delete_cache ( self . eid )
else :
OO0oO0O = lisp_referral_cache . lookup_cache ( self . group , True )
if ( OO0oO0O == None ) : return
if 68 - 68: iII111i
iIi11i = OO0oO0O . lookup_source_cache ( self . eid , True )
if ( iIi11i == None ) : return
if 68 - 68: I1Ii111 - OoO0O00 % OoO0O00 % OOooOOo - OoO0O00
OO0oO0O . source_cache . delete_cache ( self . eid )
if ( OO0oO0O . source_cache . cache_size ( ) == 0 ) :
lisp_referral_cache . delete_cache ( self . group )
if 3 - 3: iIii1I11I1II1 + iIii1I11I1II1 + OoO0O00
if 59 - 59: iII111i
if 7 - 7: o0oOOo0O0Ooo * OoooooooOO - Ii1I * II111iiii % I1Ii111
if 82 - 82: OoOoOO00 - OoOoOO00 + iIii1I11I1II1 + o0oOOo0O0Ooo + IiII - o0oOOo0O0Ooo
def add_source_entry ( self , source_ref ) :
if ( self . source_cache == None ) : self . source_cache = lisp_cache ( )
self . source_cache . add_cache ( source_ref . eid , source_ref )
if 65 - 65: I1Ii111 + OOooOOo
if 97 - 97: oO0o % OoOoOO00 * oO0o % II111iiii + iIii1I11I1II1
def lookup_source_cache ( self , source , exact ) :
if ( self . source_cache == None ) : return ( None )
return ( self . source_cache . lookup_cache ( source , exact ) )
if 11 - 11: ooOoO0o . o0oOOo0O0Ooo
if 94 - 94: ooOoO0o . oO0o * OoooooooOO % oO0o
if 77 - 77: ooOoO0o % I1IiiI
class lisp_referral_node ( object ) :
def __init__ ( self ) :
self . referral_address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . priority = 0
self . weight = 0
self . updown = True
self . map_requests_sent = 0
self . no_responses = 0
self . uptime = lisp_get_timestamp ( )
if 26 - 26: o0oOOo0O0Ooo
if 72 - 72: I1IiiI
def print_ref_node ( self , indent ) :
Oo0OO0000oooo = lisp_print_elapsed ( self . uptime )
lprint ( "{}referral {}, uptime {}, {}, priority/weight: {}/{}" . format ( indent , red ( self . referral_address . print_address ( ) , False ) , Oo0OO0000oooo ,
# ooOoO0o . o0oOOo0O0Ooo - iIii1I11I1II1 + i1IIi * I1IiiI * OoooooooOO
"up" if self . updown else "down" , self . priority , self . weight ) )
if 23 - 23: IiII
if 32 - 32: OoOoOO00 - iII111i % oO0o / I1ii11iIi11i - o0oOOo0O0Ooo
if 52 - 52: Ii1I / OoooooooOO % i11iIiiIii + iII111i
class lisp_ms ( object ) :
def __init__ ( self , addr_str , dns_name , ms_name , alg_id , key_id , pw , pr ,
mr , rr , wmn , site_id , ekey_id , ekey ) :
self . ms_name = ms_name if ( ms_name != None ) else "all"
self . dns_name = dns_name
self . map_server = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . last_dns_resolve = None
self . a_record_index = 0
if ( lisp_map_servers_list == { } ) :
self . xtr_id = lisp_get_control_nonce ( )
else :
self . xtr_id = list ( lisp_map_servers_list . values ( ) ) [ 0 ] . xtr_id
if 59 - 59: Ii1I / o0oOOo0O0Ooo / oO0o + iII111i * I1ii11iIi11i - o0oOOo0O0Ooo
self . alg_id = alg_id
self . key_id = key_id
self . password = pw
self . proxy_reply = pr
self . merge_registrations = mr
self . refresh_registrations = rr
self . want_map_notify = wmn
self . site_id = site_id
self . map_registers_sent = 0
self . map_registers_multicast_sent = 0
self . map_notifies_received = 0
self . map_notify_acks_sent = 0
self . ekey_id = ekey_id
self . ekey = ekey
if ( addr_str ) :
self . map_server . store_address ( addr_str )
self . insert_ms ( )
else :
self . resolve_dns_name ( )
if 70 - 70: O0 / I1ii11iIi11i + ooOoO0o . OoO0O00 - OoO0O00 / i11iIiiIii
if 1 - 1: iIii1I11I1II1 % I1ii11iIi11i
if 49 - 49: iII111i + o0oOOo0O0Ooo % I1ii11iIi11i . O0 % OoooooooOO . o0oOOo0O0Ooo
def resolve_dns_name ( self ) :
if ( self . dns_name == None ) : return
if ( self . last_dns_resolve and
time . time ( ) - self . last_dns_resolve < 30 ) : return
if 3 - 3: i11iIiiIii - i1IIi * o0oOOo0O0Ooo / OoOoOO00 % Oo0Ooo
try :
ooo0o0 = socket . gethostbyname_ex ( self . dns_name )
self . last_dns_resolve = lisp_get_timestamp ( )
iiii = ooo0o0 [ 2 ]
except :
return
if 65 - 65: OoooooooOO + iII111i - i11iIiiIii - IiII + oO0o
if 67 - 67: i1IIi * I1Ii111 * O0
if 16 - 16: OoO0O00 + iII111i + i1IIi + I1ii11iIi11i - I1IiiI
if 88 - 88: oO0o % iII111i + I1ii11iIi11i - II111iiii . I11i
if 18 - 18: I1ii11iIi11i - i1IIi - IiII * II111iiii % I1Ii111 . II111iiii
if 80 - 80: oO0o + OoO0O00 + o0oOOo0O0Ooo . OoOoOO00
if ( len ( iiii ) <= self . a_record_index ) :
self . delete_ms ( )
return
if 75 - 75: i11iIiiIii
if 58 - 58: iII111i
IiI = iiii [ self . a_record_index ]
if ( IiI != self . map_server . print_address_no_iid ( ) ) :
self . delete_ms ( )
self . map_server . store_address ( IiI )
self . insert_ms ( )
if 48 - 48: OoO0O00 * OOooOOo / iII111i
if 90 - 90: I1IiiI * i11iIiiIii . OOooOOo / o0oOOo0O0Ooo
if 82 - 82: Oo0Ooo
if 50 - 50: I1Ii111 * OOooOOo * OoOoOO00 / OoooooooOO % iII111i
if 80 - 80: I1Ii111
if 35 - 35: Ii1I . O0 % i11iIiiIii * oO0o - OoooooooOO
if ( lisp_is_decent_dns_suffix ( self . dns_name ) == False ) : return
if ( self . a_record_index != 0 ) : return
if 87 - 87: iII111i * ooOoO0o - OOooOOo . O0
for IiI in iiii [ 1 : : ] :
OO0O00o0 = lisp_address ( LISP_AFI_NONE , IiI , 0 , 0 )
I11i1IiIi1II1 = lisp_get_map_server ( OO0O00o0 )
if ( I11i1IiIi1II1 != None and I11i1IiIi1II1 . a_record_index == iiii . index ( IiI ) ) :
continue
if 20 - 20: OoOoOO00 - IiII
I11i1IiIi1II1 = copy . deepcopy ( self )
I11i1IiIi1II1 . map_server . store_address ( IiI )
I11i1IiIi1II1 . a_record_index = iiii . index ( IiI )
I11i1IiIi1II1 . last_dns_resolve = lisp_get_timestamp ( )
I11i1IiIi1II1 . insert_ms ( )
if 9 - 9: O0 . I11i % I1ii11iIi11i * oO0o - I1Ii111 - i1IIi
if 66 - 66: II111iiii / Oo0Ooo
if 93 - 93: iII111i + I11i * OoooooooOO . OoO0O00
if 40 - 40: ooOoO0o * I1Ii111 + iII111i
if 52 - 52: iII111i % I11i
oO000Oo0oOOo = [ ]
for I11i1IiIi1II1 in list ( lisp_map_servers_list . values ( ) ) :
if ( self . dns_name != I11i1IiIi1II1 . dns_name ) : continue
OO0O00o0 = I11i1IiIi1II1 . map_server . print_address_no_iid ( )
if ( OO0O00o0 in iiii ) : continue
oO000Oo0oOOo . append ( I11i1IiIi1II1 )
if 95 - 95: IiII + Ii1I / OoO0O00 - iII111i / I1IiiI
for I11i1IiIi1II1 in oO000Oo0oOOo : I11i1IiIi1II1 . delete_ms ( )
if 27 - 27: Oo0Ooo + i1IIi + i11iIiiIii . OoO0O00 . OoO0O00
if 56 - 56: I1Ii111 / OoO0O00 + o0oOOo0O0Ooo . OoooooooOO * Oo0Ooo
def insert_ms ( self ) :
Ooo00o000o = self . ms_name + self . map_server . print_address ( )
lisp_map_servers_list [ Ooo00o000o ] = self
if 14 - 14: OoO0O00
if 21 - 21: II111iiii + i11iIiiIii + I11i % I1IiiI
def delete_ms ( self ) :
Ooo00o000o = self . ms_name + self . map_server . print_address ( )
if ( Ooo00o000o not in lisp_map_servers_list ) : return
lisp_map_servers_list . pop ( Ooo00o000o )
if 65 - 65: IiII + I1ii11iIi11i / iII111i / I1IiiI + Ii1I
if 88 - 88: IiII % iIii1I11I1II1
if 3 - 3: ooOoO0o / I1Ii111 % iIii1I11I1II1 % I11i * oO0o / iIii1I11I1II1
class lisp_interface ( object ) :
def __init__ ( self , device ) :
self . interface_name = ""
self . device = device
self . instance_id = None
self . bridge_socket = None
self . raw_socket = None
self . dynamic_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . dynamic_eid_device = None
self . dynamic_eid_timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
self . multi_tenant_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 75 - 75: i11iIiiIii . iII111i
if 68 - 68: OOooOOo . I1ii11iIi11i % I1ii11iIi11i . i11iIiiIii
def add_interface ( self ) :
lisp_myinterfaces [ self . device ] = self
if 45 - 45: oO0o % I1ii11iIi11i * I1Ii111
if 21 - 21: O0 + i11iIiiIii
def get_instance_id ( self ) :
return ( self . instance_id )
if 72 - 72: OoOoOO00 * OoooooooOO % O0 / I1ii11iIi11i % Ii1I - I11i
if 65 - 65: iIii1I11I1II1 + II111iiii * OoO0O00 * i11iIiiIii / IiII
def get_socket ( self ) :
return ( self . raw_socket )
if 15 - 15: OoOoOO00 % O0 - OOooOOo - oO0o . iII111i . OoO0O00
if 52 - 52: II111iiii * o0oOOo0O0Ooo
def get_bridge_socket ( self ) :
return ( self . bridge_socket )
if 95 - 95: I1Ii111 - OoooooooOO
if 99 - 99: OoooooooOO % IiII . I11i + OoooooooOO
def does_dynamic_eid_match ( self , eid ) :
if ( self . dynamic_eid . is_null ( ) ) : return ( False )
return ( eid . is_more_specific ( self . dynamic_eid ) )
if 57 - 57: Ii1I / I1IiiI * i1IIi
if 21 - 21: I11i . O0 * OoooooooOO + ooOoO0o * oO0o % i11iIiiIii
def set_socket ( self , device ) :
I111 = socket . socket ( socket . AF_INET , socket . SOCK_RAW , socket . IPPROTO_RAW )
I111 . setsockopt ( socket . SOL_IP , socket . IP_HDRINCL , 1 )
try :
I111 . setsockopt ( socket . SOL_SOCKET , socket . SO_BINDTODEVICE , device )
except :
I111 . close ( )
I111 = None
if 30 - 30: ooOoO0o * I1Ii111 + OoO0O00
self . raw_socket = I111
if 30 - 30: Ii1I / iII111i * Ii1I
if 11 - 11: OoOoOO00 - OoOoOO00 % oO0o
def set_bridge_socket ( self , device ) :
I111 = socket . socket ( socket . PF_PACKET , socket . SOCK_RAW )
try :
I111 = I111 . bind ( ( device , 0 ) )
self . bridge_socket = I111
except :
return
if 3 - 3: I1IiiI - OoooooooOO % iIii1I11I1II1 + I1Ii111 + OoOoOO00
if 71 - 71: i1IIi % O0 % ooOoO0o
if 24 - 24: O0
if 88 - 88: OoooooooOO / Oo0Ooo / oO0o
class lisp_datetime ( object ) :
def __init__ ( self , datetime_str ) :
self . datetime_name = datetime_str
self . datetime = None
self . parse_datetime ( )
if 99 - 99: I1Ii111 % OoOoOO00 % IiII - Ii1I
if 79 - 79: ooOoO0o + Oo0Ooo
def valid_datetime ( self ) :
OOoO0O0Ooo = self . datetime_name
if ( OOoO0O0Ooo . find ( ":" ) == - 1 ) : return ( False )
if ( OOoO0O0Ooo . find ( "-" ) == - 1 ) : return ( False )
iIII1 , o0000oo , ooOOOo00OoO , time = OOoO0O0Ooo [ 0 : 4 ] , OOoO0O0Ooo [ 5 : 7 ] , OOoO0O0Ooo [ 8 : 10 ] , OOoO0O0Ooo [ 11 : : ]
if 21 - 21: iIii1I11I1II1 - iII111i
if ( ( iIII1 + o0000oo + ooOOOo00OoO ) . isdigit ( ) == False ) : return ( False )
if ( o0000oo < "01" and o0000oo > "12" ) : return ( False )
if ( ooOOOo00OoO < "01" and ooOOOo00OoO > "31" ) : return ( False )
if 15 - 15: O0 + iII111i + i11iIiiIii
iiii1IIiiiI11 , iII11i11I1 , oOo00o = time . split ( ":" )
if 50 - 50: OoO0O00 - iII111i + I1IiiI . I11i . I11i
if ( ( iiii1IIiiiI11 + iII11i11I1 + oOo00o ) . isdigit ( ) == False ) : return ( False )
if ( iiii1IIiiiI11 < "00" and iiii1IIiiiI11 > "23" ) : return ( False )
if ( iII11i11I1 < "00" and iII11i11I1 > "59" ) : return ( False )
if ( oOo00o < "00" and oOo00o > "59" ) : return ( False )
return ( True )
if 40 - 40: O0 - I11i . I1IiiI + Oo0Ooo - Ii1I - I11i
if 98 - 98: OoOoOO00 - OoooooooOO * Ii1I
def parse_datetime ( self ) :
OO0 = self . datetime_name
OO0 = OO0 . replace ( "-" , "" )
OO0 = OO0 . replace ( ":" , "" )
self . datetime = int ( OO0 )
if 45 - 45: II111iiii
if 14 - 14: OoOoOO00 - oO0o
def now ( self ) :
Oo0OO0000oooo = datetime . datetime . now ( ) . strftime ( "%Y-%m-%d-%H:%M:%S" )
Oo0OO0000oooo = lisp_datetime ( Oo0OO0000oooo )
return ( Oo0OO0000oooo )
if 27 - 27: OoOoOO00 * I11i
if 90 - 90: OoOoOO00 % OoOoOO00 + I11i
def print_datetime ( self ) :
return ( self . datetime_name )
if 70 - 70: I1IiiI . ooOoO0o / I11i / OoO0O00
if 40 - 40: oO0o % iIii1I11I1II1 * iIii1I11I1II1 / Oo0Ooo * OoO0O00
def future ( self ) :
return ( self . datetime > self . now ( ) . datetime )
if 61 - 61: OOooOOo
if 80 - 80: I1ii11iIi11i
def past ( self ) :
return ( self . future ( ) == False )
if 6 - 6: I1ii11iIi11i + OOooOOo % ooOoO0o
if 65 - 65: iIii1I11I1II1 % i1IIi / I1IiiI / oO0o % ooOoO0o / I11i
def now_in_range ( self , upper ) :
return ( self . past ( ) and upper . future ( ) )
if 2 - 2: I1ii11iIi11i
if 90 - 90: II111iiii * I1Ii111 . ooOoO0o - I1ii11iIi11i % I11i * o0oOOo0O0Ooo
def this_year ( self ) :
o0Ooo0O0 = str ( self . now ( ) . datetime ) [ 0 : 4 ]
Oo0OO0000oooo = str ( self . datetime ) [ 0 : 4 ]
return ( Oo0OO0000oooo == o0Ooo0O0 )
if 11 - 11: I1IiiI + I11i . OoOoOO00 - II111iiii
if 10 - 10: iII111i - IiII + OoOoOO00 + I1IiiI + Oo0Ooo
def this_month ( self ) :
o0Ooo0O0 = str ( self . now ( ) . datetime ) [ 0 : 6 ]
Oo0OO0000oooo = str ( self . datetime ) [ 0 : 6 ]
return ( Oo0OO0000oooo == o0Ooo0O0 )
if 25 - 25: I1IiiI / I1ii11iIi11i % iII111i / O0 % II111iiii
if 20 - 20: O0 % I11i * iII111i
def today ( self ) :
o0Ooo0O0 = str ( self . now ( ) . datetime ) [ 0 : 8 ]
Oo0OO0000oooo = str ( self . datetime ) [ 0 : 8 ]
return ( Oo0OO0000oooo == o0Ooo0O0 )
if 6 - 6: OoooooooOO % ooOoO0o % OoO0O00 * IiII
if 62 - 62: i1IIi . I11i / I11i
if 90 - 90: O0 * OOooOOo / oO0o . Oo0Ooo * I11i
if 93 - 93: oO0o / ooOoO0o - I1Ii111
if 70 - 70: OOooOOo / Ii1I - ooOoO0o + OoooooooOO / OoO0O00 - i11iIiiIii
if 26 - 26: O0 + Oo0Ooo
class lisp_policy_match ( object ) :
def __init__ ( self ) :
self . source_eid = None
self . dest_eid = None
self . source_rloc = None
self . dest_rloc = None
self . rloc_record_name = None
self . geo_name = None
self . elp_name = None
self . rle_name = None
self . json_name = None
self . datetime_lower = None
self . datetime_upper = None
if 30 - 30: IiII
if 6 - 6: O0
class lisp_policy ( object ) :
def __init__ ( self , policy_name ) :
self . policy_name = policy_name
self . match_clauses = [ ]
self . set_action = None
self . set_record_ttl = None
self . set_source_eid = None
self . set_dest_eid = None
self . set_rloc_address = None
self . set_rloc_record_name = None
self . set_geo_name = None
self . set_elp_name = None
self . set_rle_name = None
self . set_json_name = None
if 92 - 92: I11i
if 76 - 76: I11i / iIii1I11I1II1 - i11iIiiIii / O0 / O0
def match_policy_map_request ( self , mr , srloc ) :
for iii11 in self . match_clauses :
iIIiiIi = iii11 . source_eid
IiIi1I1i1iII = mr . source_eid
if ( iIIiiIi and IiIi1I1i1iII and IiIi1I1i1iII . is_more_specific ( iIIiiIi ) == False ) : continue
if 19 - 19: Ii1I . I1IiiI - i1IIi * ooOoO0o . iIii1I11I1II1
iIIiiIi = iii11 . dest_eid
IiIi1I1i1iII = mr . target_eid
if ( iIIiiIi and IiIi1I1i1iII and IiIi1I1i1iII . is_more_specific ( iIIiiIi ) == False ) : continue
if 87 - 87: ooOoO0o % I1ii11iIi11i . I1IiiI
iIIiiIi = iii11 . source_rloc
IiIi1I1i1iII = srloc
if ( iIIiiIi and IiIi1I1i1iII and IiIi1I1i1iII . is_more_specific ( iIIiiIi ) == False ) : continue
oOO0O0ooOOOo = iii11 . datetime_lower
I11iiI1i11I = iii11 . datetime_upper
if ( oOO0O0ooOOOo and I11iiI1i11I and oOO0O0ooOOOo . now_in_range ( I11iiI1i11I ) == False ) : continue
return ( True )
if 3 - 3: Oo0Ooo . IiII . Oo0Ooo
return ( False )
if 80 - 80: I1Ii111 + IiII + O0 - I1Ii111 . iIii1I11I1II1
if 53 - 53: OoO0O00 / i11iIiiIii * I1Ii111
def set_policy_map_reply ( self ) :
OOoOO = ( self . set_rloc_address == None and
self . set_rloc_record_name == None and self . set_geo_name == None and
self . set_elp_name == None and self . set_rle_name == None )
if ( OOoOO ) : return ( None )
if 88 - 88: ooOoO0o % I11i % ooOoO0o + OoOoOO00 * I1ii11iIi11i % iIii1I11I1II1
I1Ii1i111I = lisp_rloc ( )
if ( self . set_rloc_address ) :
I1Ii1i111I . rloc . copy_address ( self . set_rloc_address )
IiI = I1Ii1i111I . rloc . print_address_no_iid ( )
lprint ( "Policy set-rloc-address to {}" . format ( IiI ) )
if 17 - 17: II111iiii
if ( self . set_rloc_record_name ) :
I1Ii1i111I . rloc_name = self . set_rloc_record_name
o0o = blue ( I1Ii1i111I . rloc_name , False )
lprint ( "Policy set-rloc-record-name to {}" . format ( o0o ) )
if 54 - 54: o0oOOo0O0Ooo + I1IiiI / o0oOOo0O0Ooo / ooOoO0o % I11i % Ii1I
if ( self . set_geo_name ) :
I1Ii1i111I . geo_name = self . set_geo_name
o0o = I1Ii1i111I . geo_name
o00O0O0Oo0o = "" if ( o0o in lisp_geo_list ) else "(not configured)"
if 96 - 96: oO0o % I1Ii111 . I11i - I11i + OoO0O00 - oO0o
lprint ( "Policy set-geo-name '{}' {}" . format ( o0o , o00O0O0Oo0o ) )
if 25 - 25: IiII % O0 - I1IiiI + I1Ii111 . i11iIiiIii
if ( self . set_elp_name ) :
I1Ii1i111I . elp_name = self . set_elp_name
o0o = I1Ii1i111I . elp_name
o00O0O0Oo0o = "" if ( o0o in lisp_elp_list ) else "(not configured)"
if 50 - 50: OOooOOo * OoooooooOO . OoO0O00 . oO0o
lprint ( "Policy set-elp-name '{}' {}" . format ( o0o , o00O0O0Oo0o ) )
if 52 - 52: I11i . OOooOOo + OoO0O00
if ( self . set_rle_name ) :
I1Ii1i111I . rle_name = self . set_rle_name
o0o = I1Ii1i111I . rle_name
o00O0O0Oo0o = "" if ( o0o in lisp_rle_list ) else "(not configured)"
if 10 - 10: Oo0Ooo * OoooooooOO * OOooOOo
lprint ( "Policy set-rle-name '{}' {}" . format ( o0o , o00O0O0Oo0o ) )
if 50 - 50: ooOoO0o + oO0o
if ( self . set_json_name ) :
I1Ii1i111I . json_name = self . set_json_name
o0o = I1Ii1i111I . json_name
o00O0O0Oo0o = "" if ( o0o in lisp_json_list ) else "(not configured)"
if 74 - 74: Ii1I + OOooOOo - I11i * iIii1I11I1II1 - I1Ii111 % i11iIiiIii
lprint ( "Policy set-json-name '{}' {}" . format ( o0o , o00O0O0Oo0o ) )
if 32 - 32: Oo0Ooo * i1IIi . iII111i . iII111i
return ( I1Ii1i111I )
if 77 - 77: OOooOOo
if 74 - 74: O0
def save_policy ( self ) :
lisp_policies [ self . policy_name ] = self
if 86 - 86: OoOoOO00
if 4 - 4: OoooooooOO * OoO0O00
if 93 - 93: OoO0O00 - I1Ii111 - OoO0O00
class lisp_pubsub ( object ) :
def __init__ ( self , itr , port , nonce , ttl , xtr_id ) :
self . itr = itr
self . port = port
self . nonce = nonce
self . uptime = lisp_get_timestamp ( )
self . ttl = ttl
self . xtr_id = xtr_id
self . map_notify_count = 0
self . eid_prefix = None
if 1 - 1: o0oOOo0O0Ooo . oO0o * i11iIiiIii * IiII - OoO0O00 - OoooooooOO
if 29 - 29: iIii1I11I1II1 + OoO0O00 * II111iiii * Ii1I * iII111i . O0
def add ( self , eid_prefix ) :
self . eid_prefix = eid_prefix
IiIi1iIIiII1i = self . ttl
i1I1I1IIIi11 = eid_prefix . print_prefix ( )
if ( i1I1I1IIIi11 not in lisp_pubsub_cache ) :
lisp_pubsub_cache [ i1I1I1IIIi11 ] = { }
if 6 - 6: I1IiiI - OoOoOO00
Iiooo0O0o0o = lisp_pubsub_cache [ i1I1I1IIIi11 ]
if 63 - 63: OOooOOo - oO0o * I1IiiI
ooOOOOOO0 = "Add"
if ( self . xtr_id in Iiooo0O0o0o ) :
ooOOOOOO0 = "Replace"
del ( Iiooo0O0o0o [ self . xtr_id ] )
if 69 - 69: i1IIi + I1Ii111
Iiooo0O0o0o [ self . xtr_id ] = self
if 66 - 66: ooOoO0o / O0 . Oo0Ooo + iIii1I11I1II1 / I1Ii111 + OoO0O00
i1I1I1IIIi11 = green ( i1I1I1IIIi11 , False )
ii1oO0Oo = red ( self . itr . print_address_no_iid ( ) , False )
Iiooo000o0OoOo = "0x" + lisp_hex_string ( self . xtr_id )
lprint ( "{} pubsub state {} for {}, xtr-id: {}, ttl {}" . format ( ooOOOOOO0 , i1I1I1IIIi11 ,
ii1oO0Oo , Iiooo000o0OoOo , IiIi1iIIiII1i ) )
if 85 - 85: ooOoO0o / I1IiiI
if 7 - 7: Oo0Ooo - iIii1I11I1II1 / I1ii11iIi11i * I1IiiI + Ii1I
def delete ( self , eid_prefix ) :
i1I1I1IIIi11 = eid_prefix . print_prefix ( )
ii1oO0Oo = red ( self . itr . print_address_no_iid ( ) , False )
Iiooo000o0OoOo = "0x" + lisp_hex_string ( self . xtr_id )
if ( i1I1I1IIIi11 in lisp_pubsub_cache ) :
Iiooo0O0o0o = lisp_pubsub_cache [ i1I1I1IIIi11 ]
if ( self . xtr_id in Iiooo0O0o0o ) :
Iiooo0O0o0o . pop ( self . xtr_id )
lprint ( "Remove pubsub state {} for {}, xtr-id: {}" . format ( i1I1I1IIIi11 ,
ii1oO0Oo , Iiooo000o0OoOo ) )
if 99 - 99: i11iIiiIii - I1ii11iIi11i
if 64 - 64: IiII . OoOoOO00 . Oo0Ooo . I1Ii111 / I11i / Ii1I
if 95 - 95: iIii1I11I1II1 . Ii1I % oO0o - I11i % IiII
if 42 - 42: OoOoOO00 + oO0o * i1IIi + i11iIiiIii
if 25 - 25: Ii1I - Ii1I - I1ii11iIi11i / i1IIi . OoOoOO00 % Oo0Ooo
if 76 - 76: I1Ii111 / OoOoOO00
if 61 - 61: Oo0Ooo . i1IIi
if 78 - 78: i11iIiiIii
if 20 - 20: Ii1I
if 100 - 100: OoooooooOO . I1Ii111
if 32 - 32: iIii1I11I1II1 . iIii1I11I1II1 % II111iiii / Oo0Ooo . iIii1I11I1II1 . O0
if 63 - 63: I1IiiI . iIii1I11I1II1 . Oo0Ooo % OOooOOo - iII111i + ooOoO0o
if 64 - 64: o0oOOo0O0Ooo / Ii1I % I1Ii111 % iII111i + OOooOOo * IiII
if 87 - 87: I1ii11iIi11i . i1IIi - I11i + OoOoOO00 . O0
if 37 - 37: IiII
if 65 - 65: ooOoO0o * Ii1I / I1IiiI . i1IIi % ooOoO0o . OoooooooOO
if 17 - 17: ooOoO0o / OoO0O00 / I1IiiI / OOooOOo % IiII
if 88 - 88: i1IIi - OoOoOO00
if 66 - 66: OoooooooOO - OoooooooOO * I11i / II111iiii + oO0o / Ii1I
if 7 - 7: Ii1I / iIii1I11I1II1
if 36 - 36: iIii1I11I1II1 % i11iIiiIii
if 35 - 35: Oo0Ooo + I1IiiI - O0 - I1Ii111
class lisp_trace ( object ) :
def __init__ ( self ) :
self . nonce = lisp_get_control_nonce ( )
self . packet_json = [ ]
self . local_rloc = None
self . local_port = None
self . lisp_socket = None
if 64 - 64: i1IIi * OoOoOO00 / II111iiii * oO0o
if 35 - 35: i1IIi - Ii1I - Ii1I . O0 % iII111i * iII111i
def print_trace ( self ) :
oo0000OoO = self . packet_json
lprint ( "LISP-Trace JSON: '{}'" . format ( oo0000OoO ) )
if 15 - 15: OoooooooOO . Ii1I * I1Ii111 . ooOoO0o % OoO0O00 * Oo0Ooo
if 10 - 10: iII111i + i11iIiiIii . OOooOOo % iII111i - i1IIi
def encode ( self ) :
Iii1 = socket . htonl ( 0x90000000 )
Oo00oo = struct . pack ( "II" , Iii1 , 0 )
Oo00oo += struct . pack ( "Q" , self . nonce )
Oo00oo += json . dumps ( self . packet_json )
return ( Oo00oo )
if 10 - 10: iIii1I11I1II1 * i11iIiiIii - O0
if 45 - 45: oO0o % OOooOOo - IiII + o0oOOo0O0Ooo + i11iIiiIii
def decode ( self , packet ) :
II111I11iI = "I"
oO000 = struct . calcsize ( II111I11iI )
if ( len ( packet ) < oO000 ) : return ( False )
Iii1 = struct . unpack ( II111I11iI , packet [ : oO000 ] ) [ 0 ]
packet = packet [ oO000 : : ]
Iii1 = socket . ntohl ( Iii1 )
if ( ( Iii1 & 0xff000000 ) != 0x90000000 ) : return ( False )
if 79 - 79: IiII % I1Ii111 . I1IiiI + O0 * oO0o * ooOoO0o
if ( len ( packet ) < oO000 ) : return ( False )
IiI = struct . unpack ( II111I11iI , packet [ : oO000 ] ) [ 0 ]
packet = packet [ oO000 : : ]
if 38 - 38: IiII
IiI = socket . ntohl ( IiI )
OO0Oo0OO0O0oo = IiI >> 24
ooOOOOO = ( IiI >> 16 ) & 0xff
iIii1iI = ( IiI >> 8 ) & 0xff
iiiI1i = IiI & 0xff
self . local_rloc = "{}.{}.{}.{}" . format ( OO0Oo0OO0O0oo , ooOOOOO , iIii1iI , iiiI1i )
self . local_port = str ( Iii1 & 0xffff )
if 41 - 41: ooOoO0o
II111I11iI = "Q"
oO000 = struct . calcsize ( II111I11iI )
if ( len ( packet ) < oO000 ) : return ( False )
self . nonce = struct . unpack ( II111I11iI , packet [ : oO000 ] ) [ 0 ]
packet = packet [ oO000 : : ]
if ( len ( packet ) == 0 ) : return ( True )
if 89 - 89: i11iIiiIii . i11iIiiIii . IiII
try :
self . packet_json = json . loads ( packet )
except :
return ( False )
if 29 - 29: o0oOOo0O0Ooo * iIii1I11I1II1 . iIii1I11I1II1
return ( True )
if 32 - 32: IiII - OoOoOO00
if 88 - 88: OOooOOo - II111iiii + i1IIi * Oo0Ooo
def myeid ( self , eid ) :
return ( lisp_is_myeid ( eid ) )
if 48 - 48: I1Ii111 + IiII % iII111i * iII111i + I1Ii111
if 83 - 83: OoO0O00 . I11i * I1ii11iIi11i - II111iiii
def return_to_sender ( self , lisp_socket , rts_rloc , packet ) :
I1Ii1i111I , I1I = self . rtr_cache_nat_trace_find ( rts_rloc )
if ( I1Ii1i111I == None ) :
I1Ii1i111I , I1I = rts_rloc . split ( ":" )
I1I = int ( I1I )
lprint ( "Send LISP-Trace to address {}:{}" . format ( I1Ii1i111I , I1I ) )
else :
lprint ( "Send LISP-Trace to translated address {}:{}" . format ( I1Ii1i111I ,
I1I ) )
if 41 - 41: OoooooooOO . OoOoOO00 * iIii1I11I1II1
if 18 - 18: IiII / I1Ii111 % i1IIi * i11iIiiIii
if ( lisp_socket == None ) :
I111 = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM )
I111 . bind ( ( "0.0.0.0" , LISP_TRACE_PORT ) )
I111 . sendto ( packet , ( I1Ii1i111I , I1I ) )
I111 . close ( )
else :
lisp_socket . sendto ( packet , ( I1Ii1i111I , I1I ) )
if 16 - 16: Oo0Ooo
if 24 - 24: o0oOOo0O0Ooo . OoOoOO00
if 50 - 50: I1ii11iIi11i / iIii1I11I1II1 - Oo0Ooo - i11iIiiIii % o0oOOo0O0Ooo - ooOoO0o
def packet_length ( self ) :
O0I1II1 = 8 ; OooOo00o0 = 4 + 4 + 8
return ( O0I1II1 + OooOo00o0 + len ( json . dumps ( self . packet_json ) ) )
if 96 - 96: I1IiiI . oO0o % O0
if 19 - 19: iIii1I11I1II1 + I1Ii111 / OoooooooOO % OOooOOo - i1IIi + I11i
def rtr_cache_nat_trace ( self , translated_rloc , translated_port ) :
Ooo00o000o = self . local_rloc + ":" + self . local_port
oOO0 = ( translated_rloc , translated_port )
lisp_rtr_nat_trace_cache [ Ooo00o000o ] = oOO0
lprint ( "Cache NAT Trace addresses {} -> {}" . format ( Ooo00o000o , oOO0 ) )
if 87 - 87: OoooooooOO
if 97 - 97: ooOoO0o * IiII / iIii1I11I1II1
def rtr_cache_nat_trace_find ( self , local_rloc_and_port ) :
Ooo00o000o = local_rloc_and_port
try : oOO0 = lisp_rtr_nat_trace_cache [ Ooo00o000o ]
except : oOO0 = ( None , None )
return ( oOO0 )
if 65 - 65: i1IIi - i11iIiiIii + oO0o % I1IiiI - OoO0O00 % ooOoO0o
if 23 - 23: o0oOOo0O0Ooo . o0oOOo0O0Ooo - iIii1I11I1II1 / o0oOOo0O0Ooo
if 65 - 65: I1Ii111 + I1Ii111 . I1ii11iIi11i . OoOoOO00 % o0oOOo0O0Ooo * o0oOOo0O0Ooo
if 2 - 2: oO0o % iII111i + I1ii11iIi11i / II111iiii * I1ii11iIi11i
if 45 - 45: II111iiii . iII111i
if 55 - 55: ooOoO0o / iII111i / O0
if 98 - 98: O0 % iII111i + II111iiii
if 13 - 13: I1IiiI * oO0o - o0oOOo0O0Ooo
if 23 - 23: iIii1I11I1II1 + oO0o . oO0o / o0oOOo0O0Ooo
if 77 - 77: i1IIi * o0oOOo0O0Ooo * IiII
if 24 - 24: i11iIiiIii / iIii1I11I1II1 / iII111i
def lisp_get_map_server ( address ) :
for I11i1IiIi1II1 in list ( lisp_map_servers_list . values ( ) ) :
if ( I11i1IiIi1II1 . map_server . is_exact_match ( address ) ) : return ( I11i1IiIi1II1 )
if 31 - 31: OOooOOo . iIii1I11I1II1 - oO0o
return ( None )
if 36 - 36: O0
if 30 - 30: i11iIiiIii * Oo0Ooo . IiII
if 65 - 65: oO0o * IiII * OOooOOo / OoooooooOO % I11i / I1Ii111
if 21 - 21: i1IIi * iII111i + OoO0O00
if 27 - 27: I11i / oO0o . iII111i + o0oOOo0O0Ooo - OOooOOo
if 85 - 85: OoooooooOO
if 83 - 83: iII111i * I11i . OOooOOo - OoO0O00 % IiII
def lisp_get_any_map_server ( ) :
for I11i1IiIi1II1 in list ( lisp_map_servers_list . values ( ) ) : return ( I11i1IiIi1II1 )
return ( None )
if 8 - 8: I1Ii111
if 86 - 86: ooOoO0o + iII111i * O0 % OoO0O00 + OoOoOO00
if 49 - 49: OOooOOo / i1IIi - II111iiii . iIii1I11I1II1 + I11i . OOooOOo
if 9 - 9: iIii1I11I1II1 + Ii1I + I11i
if 96 - 96: OoO0O00 + i11iIiiIii + OoO0O00
if 7 - 7: i1IIi . I1IiiI
if 68 - 68: OoooooooOO
if 91 - 91: IiII . ooOoO0o * I11i
if 39 - 39: o0oOOo0O0Ooo + i11iIiiIii
if 69 - 69: iIii1I11I1II1 . II111iiii
def lisp_get_map_resolver ( address , eid ) :
if ( address != None ) :
IiI = address . print_address ( )
OOoOo0O0O0oO = None
for Ooo00o000o in lisp_map_resolvers_list :
if ( Ooo00o000o . find ( IiI ) == - 1 ) : continue
OOoOo0O0O0oO = lisp_map_resolvers_list [ Ooo00o000o ]
if 36 - 36: I1IiiI * i1IIi + OoOoOO00
return ( OOoOo0O0O0oO )
if 63 - 63: OoOoOO00 - iII111i
if 83 - 83: i1IIi / iII111i % ooOoO0o % i11iIiiIii + I1ii11iIi11i
if 82 - 82: iIii1I11I1II1 / OOooOOo
if 7 - 7: OoooooooOO
if 71 - 71: OOooOOo * Oo0Ooo . Oo0Ooo % iIii1I11I1II1
if 56 - 56: IiII * iIii1I11I1II1 - iIii1I11I1II1 . O0
if 56 - 56: I1Ii111 / iIii1I11I1II1 % IiII * iIii1I11I1II1 . I1ii11iIi11i . OOooOOo
if ( eid == "" ) :
I1OoO0OO = ""
elif ( eid == None ) :
I1OoO0OO = "all"
else :
oOooII111iIiI1 = lisp_db_for_lookups . lookup_cache ( eid , False )
I1OoO0OO = "all" if oOooII111iIiI1 == None else oOooII111iIiI1 . use_mr_name
if 20 - 20: OoO0O00 / I1ii11iIi11i / iII111i / o0oOOo0O0Ooo
if 37 - 37: o0oOOo0O0Ooo - ooOoO0o + OoOoOO00
iiIIii = None
for OOoOo0O0O0oO in list ( lisp_map_resolvers_list . values ( ) ) :
if ( I1OoO0OO == "" ) : return ( OOoOo0O0O0oO )
if ( OOoOo0O0O0oO . mr_name != I1OoO0OO ) : continue
if ( iiIIii == None or OOoOo0O0O0oO . last_used < iiIIii . last_used ) : iiIIii = OOoOo0O0O0oO
if 27 - 27: iIii1I11I1II1 + oO0o - I1IiiI
return ( iiIIii )
if 54 - 54: oO0o . oO0o % i1IIi . OoooooooOO
if 60 - 60: II111iiii . i11iIiiIii . iII111i
if 35 - 35: Ii1I
if 54 - 54: OOooOOo
if 83 - 83: i1IIi / II111iiii - I1IiiI + I1ii11iIi11i . IiII * oO0o
if 92 - 92: OoOoOO00 + oO0o % Ii1I / Ii1I - iII111i
if 11 - 11: Oo0Ooo % II111iiii * Ii1I + II111iiii
if 9 - 9: I1Ii111
def lisp_get_decent_map_resolver ( eid ) :
OOOooo0OooOoO = lisp_get_decent_index ( eid )
ooO0000 = str ( OOOooo0OooOoO ) + "." + lisp_decent_dns_suffix
if 44 - 44: iII111i % O0 + I1IiiI
lprint ( "Use LISP-Decent map-resolver {} for EID {}" . format ( bold ( ooO0000 , False ) , eid . print_prefix ( ) ) )
if 70 - 70: OOooOOo . I1Ii111
if 63 - 63: ooOoO0o
iiIIii = None
for OOoOo0O0O0oO in list ( lisp_map_resolvers_list . values ( ) ) :
if ( ooO0000 != OOoOo0O0O0oO . dns_name ) : continue
if ( iiIIii == None or OOoOo0O0O0oO . last_used < iiIIii . last_used ) : iiIIii = OOoOo0O0O0oO
if 73 - 73: IiII + OoooooooOO % IiII
return ( iiIIii )
if 89 - 89: OoO0O00 % o0oOOo0O0Ooo - O0 % II111iiii - I1IiiI
if 72 - 72: OoooooooOO * OoOoOO00 . OOooOOo + Ii1I . OOooOOo / II111iiii
if 8 - 8: i1IIi
if 1 - 1: OoOoOO00 . OoO0O00 . OoO0O00 * O0
if 97 - 97: OoooooooOO % ooOoO0o . I1Ii111 / iII111i
if 59 - 59: II111iiii + O0 . I1ii11iIi11i . Oo0Ooo * OoO0O00
if 35 - 35: oO0o / I1Ii111 * OOooOOo + OoooooooOO . IiII
def lisp_ipv4_input ( packet ) :
if 1 - 1: I1IiiI + I1Ii111 / OOooOOo . Ii1I . oO0o / I1ii11iIi11i
if 54 - 54: OOooOOo
if 86 - 86: oO0o * Oo0Ooo / OOooOOo
if 18 - 18: II111iiii - I1Ii111
if ( ord ( packet [ 9 : 10 ] ) == 2 ) : return ( [ True , packet ] )
if 13 - 13: i11iIiiIii - O0 % OoOoOO00 + OOooOOo * ooOoO0o
if 55 - 55: i1IIi - OOooOOo / I11i * Ii1I
if 20 - 20: OoOoOO00 * iIii1I11I1II1 % O0 - i1IIi
if 51 - 51: I1ii11iIi11i * Ii1I - oO0o / O0 * OoooooooOO
ii1II1II = struct . unpack ( "H" , packet [ 10 : 12 ] ) [ 0 ]
if ( ii1II1II == 0 ) :
dprint ( "Packet arrived with checksum of 0!" )
else :
packet = lisp_ip_checksum ( packet )
ii1II1II = struct . unpack ( "H" , packet [ 10 : 12 ] ) [ 0 ]
if ( ii1II1II != 0 ) :
dprint ( "IPv4 header checksum failed for inner header" )
packet = lisp_format_packet ( packet [ 0 : 20 ] )
dprint ( "Packet header: {}" . format ( packet ) )
return ( [ False , None ] )
if 12 - 12: i1IIi / iIii1I11I1II1 / O0 * OoO0O00
if 15 - 15: i11iIiiIii / IiII + Ii1I % OOooOOo % I1ii11iIi11i * oO0o
if 24 - 24: OOooOOo / OOooOOo + I11i / iII111i . oO0o - iII111i
if 59 - 59: I1ii11iIi11i % II111iiii - i11iIiiIii - I1Ii111
if 34 - 34: II111iiii + iII111i / IiII
if 47 - 47: OoO0O00
if 40 - 40: o0oOOo0O0Ooo / iII111i . o0oOOo0O0Ooo
IiIi1iIIiII1i = struct . unpack ( "B" , packet [ 8 : 9 ] ) [ 0 ]
if ( IiIi1iIIiII1i == 0 ) :
dprint ( "IPv4 packet arrived with ttl 0, packet discarded" )
return ( [ False , None ] )
elif ( IiIi1iIIiII1i == 1 ) :
dprint ( "IPv4 packet {}, packet discarded" . format ( bold ( "ttl expiry" , False ) ) )
if 63 - 63: o0oOOo0O0Ooo * iIii1I11I1II1 * II111iiii . OoO0O00 - oO0o / OoOoOO00
return ( [ False , None ] )
if 78 - 78: i11iIiiIii / OoO0O00 / i1IIi . i11iIiiIii
if 100 - 100: II111iiii . IiII . I11i
IiIi1iIIiII1i -= 1
packet = packet [ 0 : 8 ] + struct . pack ( "B" , IiIi1iIIiII1i ) + packet [ 9 : : ]
packet = packet [ 0 : 10 ] + struct . pack ( "H" , 0 ) + packet [ 12 : : ]
packet = lisp_ip_checksum ( packet )
return ( [ False , packet ] )
if 60 - 60: OoOoOO00 % OOooOOo * i1IIi
if 3 - 3: OoooooooOO
if 75 - 75: OoooooooOO * I1Ii111 * o0oOOo0O0Ooo + I1ii11iIi11i . iIii1I11I1II1 / O0
if 23 - 23: oO0o - O0 * IiII + i11iIiiIii * Ii1I
if 8 - 8: ooOoO0o / II111iiii . I1ii11iIi11i * ooOoO0o % oO0o
if 36 - 36: I1ii11iIi11i % OOooOOo - ooOoO0o - I11i + I1IiiI
if 37 - 37: I1ii11iIi11i * IiII
def lisp_ipv6_input ( packet ) :
I1i1iiIi = packet . inner_dest
packet = packet . packet
if 65 - 65: OOooOOo / O0 . I1ii11iIi11i % i1IIi % Oo0Ooo
if 36 - 36: i11iIiiIii - OOooOOo + iII111i + iII111i * I11i * oO0o
if 14 - 14: O0 - iII111i * I1Ii111 - I1IiiI + IiII
if 46 - 46: OoooooooOO * OoO0O00 . I1Ii111
if 95 - 95: ooOoO0o . I1ii11iIi11i . ooOoO0o / I1IiiI * OoOoOO00 . O0
IiIi1iIIiII1i = struct . unpack ( "B" , packet [ 7 : 8 ] ) [ 0 ]
if ( IiIi1iIIiII1i == 0 ) :
dprint ( "IPv6 packet arrived with hop-limit 0, packet discarded" )
return ( None )
elif ( IiIi1iIIiII1i == 1 ) :
dprint ( "IPv6 packet {}, packet discarded" . format ( bold ( "ttl expiry" , False ) ) )
if 78 - 78: oO0o
return ( None )
if 33 - 33: oO0o + i1IIi
if 32 - 32: iIii1I11I1II1
if 71 - 71: Ii1I * I1IiiI
if 62 - 62: II111iiii / I1IiiI . I1ii11iIi11i
if 49 - 49: IiII / OoOoOO00 / O0 * i11iIiiIii
if ( I1i1iiIi . is_ipv6_link_local ( ) ) :
dprint ( "Do not encapsulate IPv6 link-local packets" )
return ( None )
if 47 - 47: i11iIiiIii + iII111i + i11iIiiIii
if 66 - 66: o0oOOo0O0Ooo . I1IiiI + OoooooooOO . iII111i / OoooooooOO - IiII
IiIi1iIIiII1i -= 1
packet = packet [ 0 : 7 ] + struct . pack ( "B" , IiIi1iIIiII1i ) + packet [ 8 : : ]
return ( packet )
if 47 - 47: o0oOOo0O0Ooo / II111iiii * i11iIiiIii * OoO0O00 . iIii1I11I1II1
if 34 - 34: I11i / o0oOOo0O0Ooo * OOooOOo * OOooOOo
if 89 - 89: I1ii11iIi11i . OoooooooOO
if 61 - 61: i1IIi + i11iIiiIii
if 59 - 59: i11iIiiIii * OOooOOo + i1IIi * iIii1I11I1II1 + I11i
if 97 - 97: OoO0O00 - I11i . OoooooooOO
if 58 - 58: I1ii11iIi11i / II111iiii / i11iIiiIii
if 27 - 27: iIii1I11I1II1 - O0 + OoOoOO00
def lisp_mac_input ( packet ) :
return ( packet )
if 28 - 28: oO0o . IiII * iII111i % Oo0Ooo - OoO0O00 / I11i
if 67 - 67: i11iIiiIii + i11iIiiIii / ooOoO0o - o0oOOo0O0Ooo
if 94 - 94: O0 + OoO0O00 / I1IiiI * II111iiii * i11iIiiIii
if 55 - 55: OoooooooOO * O0 + i1IIi % I1IiiI
if 10 - 10: II111iiii - Ii1I . I11i . O0 + Ii1I
if 50 - 50: iIii1I11I1II1 / Ii1I . ooOoO0o / ooOoO0o * OoOoOO00 * iII111i
if 15 - 15: o0oOOo0O0Ooo % II111iiii + I1IiiI
if 21 - 21: I1ii11iIi11i - ooOoO0o
if 81 - 81: iII111i / i11iIiiIii / I1Ii111
def lisp_rate_limit_map_request ( dest ) :
o0Ooo0O0 = lisp_get_timestamp ( )
if 70 - 70: I1ii11iIi11i / i11iIiiIii
if 90 - 90: II111iiii / OoOoOO00 . Ii1I . OoooooooOO
if 76 - 76: OoooooooOO
if 78 - 78: IiII % i11iIiiIii
i1i111Iiiiiii = o0Ooo0O0 - lisp_no_map_request_rate_limit
if ( i1i111Iiiiiii < LISP_NO_MAP_REQUEST_RATE_LIMIT_TIME ) :
iIi1I1 = int ( LISP_NO_MAP_REQUEST_RATE_LIMIT_TIME - i1i111Iiiiiii )
dprint ( "No Rate-Limit Mode for another {} secs" . format ( iIi1I1 ) )
return ( False )
if 23 - 23: iIii1I11I1II1 - o0oOOo0O0Ooo - Ii1I % OOooOOo
if 100 - 100: oO0o . OoO0O00 . i11iIiiIii % II111iiii * IiII
if 81 - 81: OOooOOo - OOooOOo + OoOoOO00
if 19 - 19: o0oOOo0O0Ooo
if 20 - 20: I1Ii111 + iIii1I11I1II1 % I1IiiI + ooOoO0o
if ( lisp_last_map_request_sent == None ) : return ( False )
i1i111Iiiiiii = o0Ooo0O0 - lisp_last_map_request_sent
Ooooooo = ( i1i111Iiiiiii < LISP_MAP_REQUEST_RATE_LIMIT )
if 86 - 86: o0oOOo0O0Ooo * i11iIiiIii - I11i
if ( Ooooooo ) :
dprint ( "Rate-limiting Map-Request for {}, sent {} secs ago" . format ( green ( dest . print_address ( ) , False ) , round ( i1i111Iiiiiii , 3 ) ) )
if 71 - 71: OoO0O00 - I11i
if 96 - 96: I1Ii111 / Ii1I
return ( Ooooooo )
if 65 - 65: I1ii11iIi11i * O0 . IiII
if 11 - 11: I11i / Ii1I % oO0o
if 50 - 50: i11iIiiIii
if 93 - 93: i1IIi / Ii1I * II111iiii - Oo0Ooo . OoOoOO00 - OOooOOo
if 25 - 25: I11i / ooOoO0o % ooOoO0o - OOooOOo
if 59 - 59: I1IiiI + o0oOOo0O0Ooo . iIii1I11I1II1 - O0 - i11iIiiIii
if 4 - 4: I1IiiI
def lisp_send_map_request ( lisp_sockets , lisp_ephem_port , seid , deid , rloc ,
pubsub = False ) :
global lisp_last_map_request_sent
if 36 - 36: Ii1I
if 76 - 76: i11iIiiIii + i1IIi
if 56 - 56: OoOoOO00 + II111iiii / i11iIiiIii * OoOoOO00 * OoooooooOO
if 15 - 15: OoOoOO00 / OoooooooOO + OOooOOo
if 76 - 76: Ii1I * iII111i . OoooooooOO
if 92 - 92: iIii1I11I1II1 - Oo0Ooo - I1IiiI - OOooOOo * I1Ii111
i1ii1Iii1 = OooO00O0oooo0 = None
if ( rloc ) :
i1ii1Iii1 = rloc . rloc
OooO00O0oooo0 = rloc . translated_port if lisp_i_am_rtr else LISP_DATA_PORT
if 85 - 85: IiII + oO0o * I1ii11iIi11i . OoOoOO00 - I1ii11iIi11i * ooOoO0o
if 32 - 32: o0oOOo0O0Ooo + OoOoOO00 . OOooOOo / OoOoOO00 % iIii1I11I1II1
if 47 - 47: ooOoO0o . i11iIiiIii / OoO0O00
if 48 - 48: O0
if 89 - 89: i11iIiiIii % OoO0O00 . OoOoOO00 + Oo0Ooo + OoOoOO00
O00O0 , oO0OoOo0000O0O00 , ooO000OO = lisp_myrlocs
if ( O00O0 == None ) :
lprint ( "Suppress sending Map-Request, IPv4 RLOC not found" )
return
if 26 - 26: iII111i + Oo0Ooo
if ( oO0OoOo0000O0O00 == None and i1ii1Iii1 != None and i1ii1Iii1 . is_ipv6 ( ) ) :
lprint ( "Suppress sending Map-Request, IPv6 RLOC not found" )
return
if 95 - 95: iII111i . oO0o % iIii1I11I1II1 - I1IiiI
if 38 - 38: ooOoO0o % iIii1I11I1II1 - OOooOOo
OOo0OoO0O0o0 = lisp_map_request ( )
OOo0OoO0O0o0 . record_count = 1
OOo0OoO0O0o0 . nonce = lisp_get_control_nonce ( )
OOo0OoO0O0o0 . rloc_probe = ( i1ii1Iii1 != None )
OOo0OoO0O0o0 . subscribe_bit = pubsub
OOo0OoO0O0o0 . xtr_id_present = pubsub
if 13 - 13: OOooOOo . i11iIiiIii
if 71 - 71: oO0o + I1ii11iIi11i * I1ii11iIi11i
if 79 - 79: oO0o
if 47 - 47: OoooooooOO - i1IIi * OOooOOo
if 11 - 11: I11i / OOooOOo . o0oOOo0O0Ooo - O0 * OoooooooOO % iII111i
if 7 - 7: OoOoOO00 . IiII + OoooooooOO - I1Ii111 / oO0o
if 32 - 32: iIii1I11I1II1 + I11i + OOooOOo - OoooooooOO + i11iIiiIii * o0oOOo0O0Ooo
if ( rloc ) : rloc . last_rloc_probe_nonce = OOo0OoO0O0o0 . nonce
if 8 - 8: iII111i
IiiiIi = deid . is_multicast_address ( )
if ( IiiiIi ) :
OOo0OoO0O0o0 . target_eid = seid
OOo0OoO0O0o0 . target_group = deid
else :
OOo0OoO0O0o0 . target_eid = deid
if 10 - 10: OoOoOO00 % I11i
if 49 - 49: oO0o % ooOoO0o + II111iiii
if 21 - 21: i1IIi + OoO0O00 . I1IiiI - Oo0Ooo
if 99 - 99: OoOoOO00
if 46 - 46: I1ii11iIi11i / II111iiii / OoooooooOO / Ii1I
if 37 - 37: I1ii11iIi11i - Ii1I / oO0o . I1IiiI % I1Ii111
if 8 - 8: oO0o
if 46 - 46: I1Ii111 + IiII + II111iiii . o0oOOo0O0Ooo + i11iIiiIii
if 97 - 97: o0oOOo0O0Ooo % OoOoOO00 * O0 / iIii1I11I1II1 * OoO0O00 / i11iIiiIii
if ( OOo0OoO0O0o0 . rloc_probe == False ) :
oOooII111iIiI1 = lisp_get_signature_eid ( )
if ( oOooII111iIiI1 ) :
OOo0OoO0O0o0 . signature_eid . copy_address ( oOooII111iIiI1 . eid )
OOo0OoO0O0o0 . privkey_filename = "./lisp-sig.pem"
if 1 - 1: OoooooooOO . Ii1I
if 68 - 68: Ii1I
if 98 - 98: iII111i
if 33 - 33: OoO0O00 - ooOoO0o % O0 % iIii1I11I1II1 * iII111i - iII111i
if 27 - 27: i11iIiiIii + I1ii11iIi11i + i1IIi
if 67 - 67: o0oOOo0O0Ooo
if ( seid == None or IiiiIi ) :
OOo0OoO0O0o0 . source_eid . afi = LISP_AFI_NONE
else :
OOo0OoO0O0o0 . source_eid = seid
if 58 - 58: IiII % o0oOOo0O0Ooo + i1IIi
if 33 - 33: II111iiii
if 61 - 61: I1Ii111
if 56 - 56: I1ii11iIi11i - OoooooooOO
if 52 - 52: Oo0Ooo - I11i - IiII - OoOoOO00
if 21 - 21: oO0o % o0oOOo0O0Ooo + I1Ii111 . OOooOOo / OOooOOo
if 41 - 41: Oo0Ooo . ooOoO0o * oO0o
if 31 - 31: Oo0Ooo * IiII / IiII
if 3 - 3: I1Ii111
if 65 - 65: iIii1I11I1II1 % Oo0Ooo % I11i / OoooooooOO
if 82 - 82: o0oOOo0O0Ooo
if 33 - 33: OoOoOO00 / i11iIiiIii - I1IiiI - OoooooooOO + i1IIi * I1Ii111
if ( i1ii1Iii1 != None and lisp_nat_traversal and lisp_i_am_rtr == False ) :
if ( i1ii1Iii1 . is_private_address ( ) == False ) :
O00O0 = lisp_get_any_translated_rloc ( )
if 92 - 92: iII111i + OoO0O00
if ( O00O0 == None ) :
lprint ( "Suppress sending Map-Request, translated RLOC not found" )
return
if 70 - 70: iIii1I11I1II1
if 100 - 100: OOooOOo . oO0o % ooOoO0o * ooOoO0o . I1Ii111 - oO0o
if 33 - 33: Oo0Ooo . i1IIi - OoooooooOO
if 14 - 14: I1Ii111 + Oo0Ooo
if 35 - 35: i11iIiiIii * Ii1I
if 100 - 100: O0 . iII111i / iIii1I11I1II1
if 47 - 47: ooOoO0o + OoOoOO00
if 67 - 67: IiII - I1ii11iIi11i * i1IIi - ooOoO0o
if ( i1ii1Iii1 == None or i1ii1Iii1 . is_ipv4 ( ) ) :
if ( lisp_nat_traversal and i1ii1Iii1 == None ) :
oOooOoo00o = lisp_get_any_translated_rloc ( )
if ( oOooOoo00o != None ) : O00O0 = oOooOoo00o
if 16 - 16: I11i * ooOoO0o * I1ii11iIi11i % OoO0O00 * iIii1I11I1II1
OOo0OoO0O0o0 . itr_rlocs . append ( O00O0 )
if 39 - 39: oO0o - O0
if ( i1ii1Iii1 == None or i1ii1Iii1 . is_ipv6 ( ) ) :
if ( oO0OoOo0000O0O00 == None or oO0OoOo0000O0O00 . is_ipv6_link_local ( ) ) :
oO0OoOo0000O0O00 = None
else :
OOo0OoO0O0o0 . itr_rloc_count = 1 if ( i1ii1Iii1 == None ) else 0
OOo0OoO0O0o0 . itr_rlocs . append ( oO0OoOo0000O0O00 )
if 15 - 15: OoooooooOO . OoOoOO00 / iII111i - IiII % iII111i . ooOoO0o
if 78 - 78: OoOoOO00 / i1IIi
if 87 - 87: I1ii11iIi11i . O0 / I1ii11iIi11i
if 35 - 35: IiII % Oo0Ooo * Ii1I . IiII
if 16 - 16: I1ii11iIi11i % I1IiiI + Ii1I * I11i + i1IIi
if 14 - 14: iII111i / ooOoO0o % IiII - I1IiiI . Oo0Ooo
if 30 - 30: O0 . OOooOOo
if 23 - 23: i1IIi + OoooooooOO * OOooOOo . Oo0Ooo
if 83 - 83: OoooooooOO
if ( i1ii1Iii1 != None and OOo0OoO0O0o0 . itr_rlocs != [ ] ) :
iii11I1I1ii = OOo0OoO0O0o0 . itr_rlocs [ 0 ]
else :
if ( deid . is_ipv4 ( ) ) :
iii11I1I1ii = O00O0
elif ( deid . is_ipv6 ( ) ) :
iii11I1I1ii = oO0OoOo0000O0O00
else :
iii11I1I1ii = O00O0
if 53 - 53: o0oOOo0O0Ooo - Oo0Ooo / IiII + O0
if 88 - 88: Oo0Ooo % I1Ii111 * O0 - i1IIi * OoO0O00
if 74 - 74: Oo0Ooo % iIii1I11I1II1 + OOooOOo
if 50 - 50: OoO0O00 . OoooooooOO
if 31 - 31: OoO0O00
if 55 - 55: OoOoOO00 + I1Ii111 * o0oOOo0O0Ooo - I1ii11iIi11i + OoOoOO00
Oo00oo = OOo0OoO0O0o0 . encode ( i1ii1Iii1 , OooO00O0oooo0 )
OOo0OoO0O0o0 . print_map_request ( )
if 6 - 6: II111iiii % iIii1I11I1II1 * I1Ii111
if 2 - 2: IiII - I1Ii111 . iIii1I11I1II1 - Ii1I * I11i
if 58 - 58: i1IIi % iIii1I11I1II1 % i11iIiiIii - o0oOOo0O0Ooo + ooOoO0o
if 23 - 23: Oo0Ooo % Oo0Ooo / IiII
if 63 - 63: I11i % Oo0Ooo * I1Ii111 - Oo0Ooo % i11iIiiIii . II111iiii
if 44 - 44: I11i . I1Ii111 . I1ii11iIi11i . oO0o
if ( i1ii1Iii1 != None ) :
if ( rloc . is_rloc_translated ( ) ) :
O0O0O = lisp_get_nat_info ( i1ii1Iii1 , rloc . rloc_name )
if 1 - 1: I11i % II111iiii / OoO0O00 + OoO0O00
if 46 - 46: Oo0Ooo * Ii1I / IiII % O0 * iII111i
if 74 - 74: OoooooooOO + Ii1I
if 100 - 100: I1IiiI
if ( O0O0O == None ) :
O00o00o00OO0 = rloc . rloc . print_address_no_iid ( )
Oo = "gleaned-{}" . format ( O00o00o00OO0 )
iIIiiIi = rloc . translated_port
O0O0O = lisp_nat_info ( O00o00o00OO0 , Oo , iIIiiIi )
if 59 - 59: I1IiiI - OoOoOO00 * ooOoO0o / O0
lisp_encapsulate_rloc_probe ( lisp_sockets , i1ii1Iii1 , O0O0O ,
Oo00oo )
return
if 54 - 54: Oo0Ooo % iIii1I11I1II1 * Oo0Ooo
if 80 - 80: I1ii11iIi11i - I1ii11iIi11i
if ( i1ii1Iii1 . is_ipv4 ( ) and i1ii1Iii1 . is_multicast_address ( ) ) :
I1i1iiIi = i1ii1Iii1
else :
O0O0 = i1ii1Iii1 . print_address_no_iid ( )
I1i1iiIi = lisp_convert_4to6 ( O0O0 )
if 26 - 26: I1ii11iIi11i - I1IiiI * I1Ii111 % iIii1I11I1II1
if 77 - 77: o0oOOo0O0Ooo + I1Ii111 . OOooOOo . i1IIi . I1IiiI
lisp_send ( lisp_sockets , I1i1iiIi , LISP_CTRL_PORT , Oo00oo )
return
if 100 - 100: ooOoO0o . i11iIiiIii + Ii1I - OOooOOo - i11iIiiIii - OoooooooOO
if 42 - 42: OoOoOO00 . I1IiiI / OoOoOO00 / I1ii11iIi11i . OoO0O00
if 67 - 67: Ii1I - O0 . OoooooooOO . I1Ii111 . o0oOOo0O0Ooo
if 73 - 73: I11i - oO0o . I1Ii111 + oO0o
if 48 - 48: IiII . IiII * o0oOOo0O0Ooo * II111iiii % ooOoO0o
if 40 - 40: I1ii11iIi11i
oO0000OoO = None if lisp_i_am_rtr else seid
if ( lisp_decent_pull_xtr_configured ( ) ) :
OOoOo0O0O0oO = lisp_get_decent_map_resolver ( deid )
else :
OOoOo0O0O0oO = lisp_get_map_resolver ( None , oO0000OoO )
if 26 - 26: O0 - Oo0Ooo * I1IiiI . OOooOOo . IiII
if ( OOoOo0O0O0oO == None ) :
lprint ( "Cannot find Map-Resolver for source-EID {}" . format ( green ( seid . print_address ( ) , False ) ) )
if 87 - 87: I1Ii111 / O0 / iIii1I11I1II1 % OoOoOO00 + iII111i . iIii1I11I1II1
return
if 36 - 36: O0 . OoO0O00 + Oo0Ooo + Oo0Ooo % I1Ii111 + ooOoO0o
OOoOo0O0O0oO . last_used = lisp_get_timestamp ( )
OOoOo0O0O0oO . map_requests_sent += 1
if ( OOoOo0O0O0oO . last_nonce == 0 ) : OOoOo0O0O0oO . last_nonce = OOo0OoO0O0o0 . nonce
if 89 - 89: iII111i
if 29 - 29: I1ii11iIi11i . ooOoO0o * II111iiii / iII111i . OoooooooOO - OoOoOO00
if 99 - 99: IiII % O0 - I1Ii111 * OoO0O00
if 77 - 77: OoooooooOO - I11i / I1IiiI % OoOoOO00 - OOooOOo
if ( seid == None ) : seid = iii11I1I1ii
lisp_send_ecm ( lisp_sockets , Oo00oo , seid , lisp_ephem_port , deid ,
OOoOo0O0O0oO . map_resolver )
if 37 - 37: ooOoO0o
if 22 - 22: I1ii11iIi11i + II111iiii / OoooooooOO % o0oOOo0O0Ooo * OoOoOO00 . Oo0Ooo
if 26 - 26: OoO0O00 % oO0o * Ii1I % OoooooooOO - oO0o
if 46 - 46: I1IiiI + OoO0O00 - O0 * O0
lisp_last_map_request_sent = lisp_get_timestamp ( )
if 75 - 75: OOooOOo + iIii1I11I1II1 * OOooOOo
if 82 - 82: iII111i - I1Ii111 - OoOoOO00
if 96 - 96: Oo0Ooo . Oo0Ooo % o0oOOo0O0Ooo - I1IiiI * iIii1I11I1II1
if 29 - 29: i1IIi / Ii1I / oO0o * iII111i
OOoOo0O0O0oO . resolve_dns_name ( )
return
if 44 - 44: O0
if 95 - 95: OOooOOo + OOooOOo - OoOoOO00
if 83 - 83: II111iiii * ooOoO0o - O0 - i11iIiiIii
if 62 - 62: I1IiiI + II111iiii * iIii1I11I1II1 % iII111i + IiII / ooOoO0o
if 14 - 14: iIii1I11I1II1 * I1ii11iIi11i + OOooOOo + O0
if 79 - 79: II111iiii - iII111i
if 89 - 89: O0 - OoO0O00
if 8 - 8: I1ii11iIi11i / oO0o - OoooooooOO + ooOoO0o + o0oOOo0O0Ooo % i11iIiiIii
def lisp_send_info_request ( lisp_sockets , dest , port , device_name ) :
if 32 - 32: O0 + IiII
if 93 - 93: OoOoOO00 - I11i / iII111i - iIii1I11I1II1 + I11i % oO0o
if 24 - 24: Ii1I / iIii1I11I1II1 + o0oOOo0O0Ooo
if 17 - 17: OOooOOo
O00oIIi1iI1i111 = lisp_info ( )
O00oIIi1iI1i111 . nonce = lisp_get_control_nonce ( )
if ( device_name ) : O00oIIi1iI1i111 . hostname += "-" + device_name
if 85 - 85: OoO0O00 * I1Ii111 - OoooooooOO / iIii1I11I1II1 - i1IIi + Ii1I
O0O0 = dest . print_address_no_iid ( )
if 76 - 76: iII111i * OoooooooOO
if 49 - 49: II111iiii - OOooOOo + II111iiii + OoOoOO00
if 51 - 51: i11iIiiIii
if 39 - 39: o0oOOo0O0Ooo % I1Ii111 % i1IIi - II111iiii + i11iIiiIii
if 62 - 62: I1ii11iIi11i - I1IiiI * i11iIiiIii % oO0o
if 63 - 63: II111iiii - Oo0Ooo
if 55 - 55: iIii1I11I1II1 / O0 * O0 * i11iIiiIii * OoooooooOO
if 94 - 94: II111iiii . II111iiii / OoOoOO00 % oO0o * i1IIi % Oo0Ooo
if 78 - 78: IiII - I1IiiI
if 59 - 59: oO0o + i1IIi - IiII % OOooOOo % iIii1I11I1II1
if 71 - 71: OoO0O00
if 72 - 72: II111iiii + o0oOOo0O0Ooo / i1IIi * Oo0Ooo / i1IIi
if 52 - 52: I1Ii111 % OoO0O00 . I1Ii111 * I1ii11iIi11i * OoOoOO00 + i1IIi
if 54 - 54: Ii1I / I1IiiI
if 7 - 7: iIii1I11I1II1 . O0 + OOooOOo . Ii1I * Oo0Ooo
if 25 - 25: I1Ii111 . Oo0Ooo % II111iiii . IiII - O0
iI1iIiiIIiIiI = False
if ( device_name ) :
oO0oooo = lisp_get_host_route_next_hop ( O0O0 )
if 79 - 79: OOooOOo
if 75 - 75: OoO0O00 - I1Ii111 . ooOoO0o . o0oOOo0O0Ooo
if 82 - 82: I11i / I1ii11iIi11i
if 79 - 79: oO0o
if 6 - 6: IiII . o0oOOo0O0Ooo
if 11 - 11: IiII - OoooooooOO + I1IiiI / I11i % i1IIi
if 1 - 1: Ii1I - II111iiii
if 1 - 1: Oo0Ooo * OoOoOO00 . oO0o + I1ii11iIi11i
if 14 - 14: OoooooooOO
if ( port == LISP_CTRL_PORT and oO0oooo != None ) :
while ( True ) :
time . sleep ( .01 )
oO0oooo = lisp_get_host_route_next_hop ( O0O0 )
if ( oO0oooo == None ) : break
if 73 - 73: OoOoOO00 % o0oOOo0O0Ooo
if 28 - 28: OoO0O00
if 15 - 15: OoO0O00 . I11i
OOO0oO0 = lisp_get_default_route_next_hops ( )
for ooO000OO , IIi1I111I in OOO0oO0 :
if ( ooO000OO != device_name ) : continue
if 30 - 30: OoO0O00 * I1ii11iIi11i + OoooooooOO % i11iIiiIii - ooOoO0o
if 15 - 15: O0 * OOooOOo * I11i + Ii1I * OoooooooOO + OOooOOo
if 77 - 77: O0
if 98 - 98: iII111i - iII111i % i1IIi - I1Ii111 . I1IiiI % o0oOOo0O0Ooo
if 38 - 38: IiII % OoOoOO00 . OOooOOo . I1ii11iIi11i
if 34 - 34: iII111i . i11iIiiIii + OoO0O00 + o0oOOo0O0Ooo / ooOoO0o - i11iIiiIii
if ( oO0oooo != IIi1I111I ) :
if ( oO0oooo != None ) :
lisp_install_host_route ( O0O0 , oO0oooo , False )
if 63 - 63: ooOoO0o % OoO0O00 % ooOoO0o
lisp_install_host_route ( O0O0 , IIi1I111I , True )
iI1iIiiIIiIiI = True
if 28 - 28: IiII * I1Ii111 * o0oOOo0O0Ooo + ooOoO0o - IiII / IiII
break
if 73 - 73: iIii1I11I1II1 . I1ii11iIi11i + OOooOOo
if 51 - 51: I11i % Oo0Ooo * OOooOOo % OoooooooOO - OoOoOO00 % Ii1I
if 60 - 60: OoOoOO00 - IiII + OoO0O00
if 77 - 77: iIii1I11I1II1
if 92 - 92: IiII
if 68 - 68: OOooOOo . IiII / iIii1I11I1II1 % i11iIiiIii
Oo00oo = O00oIIi1iI1i111 . encode ( )
O00oIIi1iI1i111 . print_info ( )
if 74 - 74: iII111i + i11iIiiIii
if 95 - 95: Ii1I
if 49 - 49: I1ii11iIi11i . i1IIi + OoO0O00 % O0 + OoO0O00
if 21 - 21: ooOoO0o * oO0o / OoooooooOO % ooOoO0o / O0
IIii1i1i1 = "(for control)" if port == LISP_CTRL_PORT else "(for data)"
IIii1i1i1 = bold ( IIii1i1i1 , False )
iIIiiIi = bold ( "{}" . format ( port ) , False )
OO0O00o0 = red ( O0O0 , False )
i11iiI = "RTR " if port == LISP_DATA_PORT else "MS "
lprint ( "Send Info-Request to {}{}, port {} {}" . format ( i11iiI , OO0O00o0 , iIIiiIi , IIii1i1i1 ) )
if 77 - 77: iIii1I11I1II1 . iII111i + iIii1I11I1II1 + ooOoO0o . I1ii11iIi11i - I1ii11iIi11i
if 71 - 71: OoooooooOO . II111iiii % O0 - I1IiiI + II111iiii / Oo0Ooo
if 65 - 65: I11i * I1IiiI - o0oOOo0O0Ooo
if 58 - 58: I1IiiI
if 51 - 51: I1Ii111 % Oo0Ooo + iII111i / iII111i - IiII + O0
if 97 - 97: OOooOOo
if ( port == LISP_CTRL_PORT ) :
lisp_send ( lisp_sockets , dest , LISP_CTRL_PORT , Oo00oo )
else :
ooo = lisp_data_header ( )
ooo . instance_id ( 0xffffff )
ooo = ooo . encode ( )
if ( ooo ) :
Oo00oo = ooo + Oo00oo
if 47 - 47: oO0o + i1IIi + I1Ii111 . OOooOOo
if 61 - 61: i11iIiiIii * I1ii11iIi11i
if 91 - 91: iII111i . i11iIiiIii . iII111i
if 100 - 100: OoooooooOO / ooOoO0o . OoO0O00
if 89 - 89: I11i % II111iiii
if 35 - 35: oO0o
if 65 - 65: II111iiii
if 87 - 87: oO0o / OoO0O00 - oO0o
if 69 - 69: i11iIiiIii
lisp_send ( lisp_sockets , dest , LISP_DATA_PORT , Oo00oo )
if 29 - 29: IiII . ooOoO0o / iII111i - OOooOOo / OOooOOo % Oo0Ooo
if 42 - 42: OoO0O00 . I1Ii111 . I1IiiI + Oo0Ooo * O0
if 35 - 35: Oo0Ooo / iII111i - O0 - OOooOOo * Oo0Ooo . i11iIiiIii
if 43 - 43: OoOoOO00 % oO0o % OoO0O00 / Ii1I . I11i
if 86 - 86: I1Ii111 * i1IIi + IiII - OoOoOO00
if 14 - 14: I1ii11iIi11i / i11iIiiIii * I11i % o0oOOo0O0Ooo + IiII / I1ii11iIi11i
if 82 - 82: OOooOOo . oO0o
if ( iI1iIiiIIiIiI ) :
lisp_install_host_route ( O0O0 , None , False )
if ( oO0oooo != None ) : lisp_install_host_route ( O0O0 , oO0oooo , True )
if 12 - 12: i11iIiiIii + II111iiii
return
if 49 - 49: OoooooooOO
if 48 - 48: i1IIi . IiII - O0 + OoooooooOO
if 6 - 6: I1Ii111 * OOooOOo + o0oOOo0O0Ooo . I1ii11iIi11i * I1Ii111
if 6 - 6: oO0o / II111iiii
if 23 - 23: IiII - OoooooooOO / oO0o
if 69 - 69: O0 - OoooooooOO
if 31 - 31: o0oOOo0O0Ooo . i1IIi - i1IIi % i1IIi - iIii1I11I1II1
def lisp_process_info_request ( lisp_sockets , packet , addr_str , sport , rtr_list ) :
if 50 - 50: IiII - OOooOOo % OoOoOO00
if 66 - 66: IiII * i11iIiiIii
if 64 - 64: i11iIiiIii . I1Ii111 % i11iIiiIii % I11i
if 56 - 56: o0oOOo0O0Ooo + ooOoO0o + OoooooooOO
O00oIIi1iI1i111 = lisp_info ( )
packet = O00oIIi1iI1i111 . decode ( packet )
if ( packet == None ) : return
O00oIIi1iI1i111 . print_info ( )
if 64 - 64: OOooOOo / OoOoOO00
if 30 - 30: OOooOOo % I1Ii111 - i11iIiiIii
if 20 - 20: i1IIi * I11i / OoO0O00 / i1IIi / I1Ii111 * O0
if 95 - 95: Ii1I + Ii1I % IiII - IiII / OOooOOo
if 46 - 46: IiII + iII111i + II111iiii . iII111i - i11iIiiIii % OoO0O00
O00oIIi1iI1i111 . info_reply = True
O00oIIi1iI1i111 . global_etr_rloc . store_address ( addr_str )
O00oIIi1iI1i111 . etr_port = sport
if 24 - 24: oO0o + IiII . o0oOOo0O0Ooo . OoooooooOO . i11iIiiIii / I1ii11iIi11i
if 49 - 49: IiII
if 1 - 1: oO0o / I11i
if 99 - 99: OoO0O00 % IiII + I1Ii111 - oO0o
if 28 - 28: OOooOOo - O0 - O0 % i11iIiiIii * OoooooooOO
if ( O00oIIi1iI1i111 . hostname != None ) :
O00oIIi1iI1i111 . private_etr_rloc . afi = LISP_AFI_NAME
O00oIIi1iI1i111 . private_etr_rloc . store_address ( O00oIIi1iI1i111 . hostname )
if 60 - 60: OoooooooOO / i1IIi / i1IIi / Ii1I . IiII
if 24 - 24: O0
if ( rtr_list != None ) : O00oIIi1iI1i111 . rtr_list = rtr_list
packet = O00oIIi1iI1i111 . encode ( )
O00oIIi1iI1i111 . print_info ( )
if 6 - 6: I1IiiI . i11iIiiIii . OoooooooOO . I1IiiI . o0oOOo0O0Ooo
if 65 - 65: i11iIiiIii
if 46 - 46: i11iIiiIii
if 70 - 70: i1IIi + o0oOOo0O0Ooo
if 44 - 44: iII111i . II111iiii % o0oOOo0O0Ooo
lprint ( "Send Info-Reply to {}" . format ( red ( addr_str , False ) ) )
I1i1iiIi = lisp_convert_4to6 ( addr_str )
lisp_send ( lisp_sockets , I1i1iiIi , sport , packet )
if 29 - 29: i11iIiiIii * i1IIi
if 36 - 36: OoO0O00 * I11i . ooOoO0o
if 50 - 50: oO0o * OoOoOO00 / OoO0O00 / ooOoO0o + II111iiii
if 55 - 55: II111iiii - IiII
if 24 - 24: oO0o % Ii1I / i1IIi
oOOO = lisp_info_source ( O00oIIi1iI1i111 . hostname , addr_str , sport )
oOOO . cache_address_for_info_source ( )
return
if 17 - 17: Ii1I / OoOoOO00 % I1ii11iIi11i - IiII
if 76 - 76: Ii1I / o0oOOo0O0Ooo % IiII % Oo0Ooo
if 68 - 68: o0oOOo0O0Ooo / O0 + i11iIiiIii % II111iiii
if 10 - 10: iII111i - Oo0Ooo
if 10 - 10: IiII + I1Ii111 / OoooooooOO % I1Ii111 * i11iIiiIii - oO0o
if 73 - 73: IiII - II111iiii - OOooOOo % II111iiii + iIii1I11I1II1
if 81 - 81: i11iIiiIii - O0 + I1IiiI
if 39 - 39: IiII * OOooOOo . OoooooooOO + Oo0Ooo + iIii1I11I1II1
def lisp_get_signature_eid ( ) :
for oOooII111iIiI1 in lisp_db_list :
if ( oOooII111iIiI1 . signature_eid ) : return ( oOooII111iIiI1 )
if 67 - 67: iII111i . OOooOOo / ooOoO0o * iIii1I11I1II1
return ( None )
if 29 - 29: I1Ii111 / OoOoOO00 % I1ii11iIi11i * IiII / II111iiii
if 10 - 10: O0 / I11i
if 29 - 29: i11iIiiIii % I11i
if 49 - 49: I11i
if 69 - 69: o0oOOo0O0Ooo . O0 * I11i
if 92 - 92: OoO0O00 . O0 / Ii1I % Oo0Ooo . Ii1I
if 40 - 40: o0oOOo0O0Ooo - Ii1I . iII111i - O0
if 53 - 53: Oo0Ooo - I1IiiI * O0 . II111iiii
def lisp_get_any_translated_port ( ) :
for oOooII111iIiI1 in lisp_db_list :
for ii11Ii in oOooII111iIiI1 . rloc_set :
if ( ii11Ii . translated_rloc . is_null ( ) ) : continue
return ( ii11Ii . translated_port )
if 72 - 72: ooOoO0o - Ii1I . Ii1I . I11i / OoooooooOO + Ii1I
if 32 - 32: O0
return ( None )
if 42 - 42: i1IIi * I1ii11iIi11i * OoOoOO00
if 43 - 43: I1ii11iIi11i % I1ii11iIi11i % i1IIi
if 56 - 56: I1IiiI - OoO0O00 - iII111i . o0oOOo0O0Ooo . I1Ii111
if 70 - 70: iIii1I11I1II1 - I11i
if 2 - 2: oO0o / II111iiii * OoO0O00
if 71 - 71: i1IIi + I11i * OoO0O00 . OOooOOo + oO0o
if 40 - 40: OOooOOo
if 14 - 14: OoooooooOO - OoooooooOO % i11iIiiIii % ooOoO0o / ooOoO0o
if 33 - 33: iII111i / i1IIi . II111iiii % I1ii11iIi11i
def lisp_get_any_translated_rloc ( ) :
for oOooII111iIiI1 in lisp_db_list :
for ii11Ii in oOooII111iIiI1 . rloc_set :
if ( ii11Ii . translated_rloc . is_null ( ) ) : continue
return ( ii11Ii . translated_rloc )
if 74 - 74: iII111i / OOooOOo / O0 / iIii1I11I1II1 + IiII
if 26 - 26: OOooOOo % i1IIi . I1Ii111 / O0 + I1Ii111
return ( None )
if 39 - 39: I1ii11iIi11i * I1IiiI * II111iiii . Oo0Ooo % I1IiiI
if 100 - 100: iIii1I11I1II1 - OoooooooOO * OoooooooOO - iII111i / ooOoO0o
if 98 - 98: OoO0O00 + oO0o - II111iiii
if 84 - 84: Oo0Ooo . OoOoOO00 - iII111i
if 5 - 5: OoooooooOO . O0 / OOooOOo + I11i - Ii1I
if 77 - 77: iIii1I11I1II1 * Oo0Ooo . IiII / oO0o + O0
if 76 - 76: iII111i + o0oOOo0O0Ooo - OoooooooOO * oO0o % OoooooooOO - O0
def lisp_get_all_translated_rlocs ( ) :
i1IIIIIi111 = [ ]
for oOooII111iIiI1 in lisp_db_list :
for ii11Ii in oOooII111iIiI1 . rloc_set :
if ( ii11Ii . is_rloc_translated ( ) == False ) : continue
IiI = ii11Ii . translated_rloc . print_address_no_iid ( )
i1IIIIIi111 . append ( IiI )
if 10 - 10: o0oOOo0O0Ooo + ooOoO0o + Oo0Ooo
if 67 - 67: I1IiiI / i11iIiiIii - I1Ii111 % OoooooooOO
return ( i1IIIIIi111 )
if 36 - 36: oO0o % iII111i % oO0o
if 56 - 56: ooOoO0o - O0 + iII111i % I11i / i1IIi
if 78 - 78: i1IIi . iIii1I11I1II1
if 70 - 70: O0 + II111iiii % IiII / I1Ii111 - IiII
if 58 - 58: II111iiii * oO0o - i1IIi . I11i
if 23 - 23: OoO0O00 - I1IiiI * i11iIiiIii
if 62 - 62: OoO0O00 . i11iIiiIii / i1IIi
if 3 - 3: OoO0O00 + O0 % Oo0Ooo * Oo0Ooo % i11iIiiIii
def lisp_update_default_routes ( map_resolver , iid , rtr_list ) :
iIiI1IIi1Ii1i = ( os . getenv ( "LISP_RTR_BEHIND_NAT" ) != None )
if 29 - 29: ooOoO0o / iII111i / OOooOOo - iIii1I11I1II1
ii111iI11Iiii = { }
for I1Ii1i111I in rtr_list :
if ( I1Ii1i111I == None ) : continue
IiI = rtr_list [ I1Ii1i111I ]
if ( iIiI1IIi1Ii1i and IiI . is_private_address ( ) ) : continue
ii111iI11Iiii [ I1Ii1i111I ] = IiI
if 54 - 54: iII111i
rtr_list = ii111iI11Iiii
if 77 - 77: o0oOOo0O0Ooo - IiII . i1IIi
OoI1IIIi1II = [ ]
for Oooo000 in [ LISP_AFI_IPV4 , LISP_AFI_IPV6 , LISP_AFI_MAC ] :
if ( Oooo000 == LISP_AFI_MAC and lisp_l2_overlay == False ) : break
if 12 - 12: OoOoOO00 - ooOoO0o % iIii1I11I1II1 / iIii1I11I1II1
if 61 - 61: oO0o
if 12 - 12: iIii1I11I1II1 - I1ii11iIi11i % I1ii11iIi11i * I1Ii111
if 98 - 98: oO0o / iII111i - Oo0Ooo / I1Ii111 * oO0o - OoO0O00
if 12 - 12: IiII . OoooooooOO - iIii1I11I1II1 % iII111i
I1I11I1IIi = lisp_address ( Oooo000 , "" , 0 , iid )
I1I11I1IIi . make_default_route ( I1I11I1IIi )
o0ooo0oOO0o = lisp_map_cache . lookup_cache ( I1I11I1IIi , True )
if ( o0ooo0oOO0o ) :
if ( o0ooo0oOO0o . checkpoint_entry ) :
lprint ( "Updating checkpoint entry for {}" . format ( green ( o0ooo0oOO0o . print_eid_tuple ( ) , False ) ) )
if 56 - 56: Oo0Ooo / I1IiiI + iIii1I11I1II1 + I1IiiI % iIii1I11I1II1
elif ( o0ooo0oOO0o . do_rloc_sets_match ( list ( rtr_list . values ( ) ) ) ) :
continue
if 64 - 64: O0
o0ooo0oOO0o . delete_cache ( )
if 55 - 55: OoO0O00 * oO0o . Ii1I + OoOoOO00 % I11i + IiII
if 55 - 55: OoooooooOO + oO0o . o0oOOo0O0Ooo % iIii1I11I1II1 - I1Ii111
OoI1IIIi1II . append ( [ I1I11I1IIi , "" ] )
if 40 - 40: I1IiiI . o0oOOo0O0Ooo - Oo0Ooo
if 44 - 44: Ii1I % OoO0O00 * oO0o * OoO0O00
if 7 - 7: I1Ii111 % i1IIi . I11i . O0 / i1IIi
if 56 - 56: Oo0Ooo
o0o0Oo0o0oOo = lisp_address ( Oooo000 , "" , 0 , iid )
o0o0Oo0o0oOo . make_default_multicast_route ( o0o0Oo0o0oOo )
iiIIIi1i1iiii = lisp_map_cache . lookup_cache ( o0o0Oo0o0oOo , True )
if ( iiIIIi1i1iiii ) : iiIIIi1i1iiii = iiIIIi1i1iiii . source_cache . lookup_cache ( I1I11I1IIi , True )
if ( iiIIIi1i1iiii ) : iiIIIi1i1iiii . delete_cache ( )
if 92 - 92: OoO0O00 * i1IIi - Ii1I * O0 + ooOoO0o - OoooooooOO
OoI1IIIi1II . append ( [ I1I11I1IIi , o0o0Oo0o0oOo ] )
if 36 - 36: iIii1I11I1II1 - I1ii11iIi11i % Ii1I % Oo0Ooo - II111iiii
if ( len ( OoI1IIIi1II ) == 0 ) : return
if 73 - 73: Ii1I - II111iiii + I1IiiI % i11iIiiIii * I11i
if 69 - 69: I1Ii111 . Ii1I * I1ii11iIi11i % I11i - o0oOOo0O0Ooo
if 30 - 30: ooOoO0o / Oo0Ooo * iII111i % OoooooooOO / I1ii11iIi11i
if 64 - 64: OoooooooOO
OO0oOO0OoO = [ ]
for i11iiI in rtr_list :
I1iOo0OOOOoo = rtr_list [ i11iiI ]
ii11Ii = lisp_rloc ( )
ii11Ii . rloc . copy_address ( I1iOo0OOOOoo )
ii11Ii . priority = 254
ii11Ii . mpriority = 255
ii11Ii . rloc_name = "RTR"
OO0oOO0OoO . append ( ii11Ii )
if 71 - 71: O0 - OoooooooOO
if 82 - 82: i11iIiiIii * II111iiii % IiII
for I1I11I1IIi in OoI1IIIi1II :
o0ooo0oOO0o = lisp_mapping ( I1I11I1IIi [ 0 ] , I1I11I1IIi [ 1 ] , OO0oOO0OoO )
o0ooo0oOO0o . mapping_source = map_resolver
o0ooo0oOO0o . map_cache_ttl = LISP_MR_TTL * 60
o0ooo0oOO0o . add_cache ( )
lprint ( "Add {} to map-cache with RTR RLOC-set: {}" . format ( green ( o0ooo0oOO0o . print_eid_tuple ( ) , False ) , list ( rtr_list . keys ( ) ) ) )
if 80 - 80: Ii1I . i11iIiiIii % oO0o * o0oOOo0O0Ooo
OO0oOO0OoO = copy . deepcopy ( OO0oOO0OoO )
if 56 - 56: I1Ii111 % iII111i / II111iiii - Oo0Ooo - Oo0Ooo - iIii1I11I1II1
return
if 67 - 67: iII111i
if 80 - 80: Ii1I . iII111i * I1IiiI * Ii1I
if 82 - 82: OoO0O00 % OoOoOO00 * i11iIiiIii . OoO0O00 . I1ii11iIi11i + Ii1I
if 60 - 60: i1IIi / iII111i
if 10 - 10: I1Ii111 / OoOoOO00 * Ii1I % o0oOOo0O0Ooo . OoOoOO00 / I1ii11iIi11i
if 2 - 2: iIii1I11I1II1
if 85 - 85: O0 - ooOoO0o
if 35 - 35: o0oOOo0O0Ooo - I1IiiI
if 47 - 47: i11iIiiIii * iII111i . OoOoOO00 * I1Ii111 % i11iIiiIii + Ii1I
if 65 - 65: Ii1I % i11iIiiIii
def lisp_process_info_reply ( source , packet , store ) :
if 98 - 98: iII111i * o0oOOo0O0Ooo % Oo0Ooo
if 7 - 7: oO0o * OoooooooOO % o0oOOo0O0Ooo . I1Ii111 + O0
if 14 - 14: I11i * II111iiii % o0oOOo0O0Ooo / iII111i . OoooooooOO % iII111i
if 88 - 88: iII111i
O00oIIi1iI1i111 = lisp_info ( )
packet = O00oIIi1iI1i111 . decode ( packet )
if ( packet == None ) : return ( [ None , None , False ] )
if 94 - 94: OoooooooOO
O00oIIi1iI1i111 . print_info ( )
if 32 - 32: I1ii11iIi11i
if 8 - 8: I11i * i11iIiiIii - ooOoO0o
if 47 - 47: ooOoO0o . I1IiiI / i11iIiiIii * iII111i * I1IiiI
if 8 - 8: oO0o % oO0o . iII111i / i1IIi % IiII
OO0OOoO = False
for i11iiI in O00oIIi1iI1i111 . rtr_list :
O0O0 = i11iiI . print_address_no_iid ( )
if ( O0O0 in lisp_rtr_list ) :
if ( lisp_register_all_rtrs == False ) : continue
if ( lisp_rtr_list [ O0O0 ] != None ) : continue
if 62 - 62: i1IIi . Ii1I * i1IIi * O0 . I1IiiI % o0oOOo0O0Ooo
OO0OOoO = True
lisp_rtr_list [ O0O0 ] = i11iiI
if 16 - 16: I11i . Ii1I - ooOoO0o . OOooOOo % O0 / oO0o
if 42 - 42: II111iiii . iII111i
if 67 - 67: i1IIi - i11iIiiIii / ooOoO0o * oO0o
if 64 - 64: oO0o / IiII
if 86 - 86: I11i
if ( lisp_i_am_itr and OO0OOoO ) :
if ( lisp_iid_to_interface == { } ) :
lisp_update_default_routes ( source , lisp_default_iid , lisp_rtr_list )
else :
for oooo in list ( lisp_iid_to_interface . keys ( ) ) :
lisp_update_default_routes ( source , int ( oooo ) , lisp_rtr_list )
if 36 - 36: o0oOOo0O0Ooo / OoO0O00
if 6 - 6: I11i % I1IiiI + iII111i * OoooooooOO . O0
if 87 - 87: ooOoO0o / Ii1I % O0 . OoO0O00
if 55 - 55: i1IIi . o0oOOo0O0Ooo % OoooooooOO + II111iiii . OoOoOO00
if 32 - 32: IiII * I1Ii111 * Oo0Ooo . i1IIi * OoooooooOO
if 12 - 12: I1IiiI . OOooOOo % Oo0Ooo
if 86 - 86: i11iIiiIii
if ( store == False ) :
return ( [ O00oIIi1iI1i111 . global_etr_rloc , O00oIIi1iI1i111 . etr_port , OO0OOoO ] )
if 57 - 57: iII111i - OoooooooOO - ooOoO0o % II111iiii
if 62 - 62: i11iIiiIii . Oo0Ooo / Oo0Ooo . IiII . OoooooooOO
if 86 - 86: I1ii11iIi11i * OoOoOO00 + iII111i
if 79 - 79: I11i - II111iiii
if 27 - 27: I1IiiI + o0oOOo0O0Ooo * oO0o % I1IiiI
if 66 - 66: OoO0O00 + IiII . o0oOOo0O0Ooo . IiII
for oOooII111iIiI1 in lisp_db_list :
for ii11Ii in oOooII111iIiI1 . rloc_set :
I1Ii1i111I = ii11Ii . rloc
i111IIiIiiI1 = ii11Ii . interface
if ( i111IIiIiiI1 == None ) :
if ( I1Ii1i111I . is_null ( ) ) : continue
if ( I1Ii1i111I . is_local ( ) == False ) : continue
if ( O00oIIi1iI1i111 . private_etr_rloc . is_null ( ) == False and
I1Ii1i111I . is_exact_match ( O00oIIi1iI1i111 . private_etr_rloc ) == False ) :
continue
if 88 - 88: oO0o + oO0o % OoO0O00 . OoooooooOO - OoooooooOO . Oo0Ooo
elif ( O00oIIi1iI1i111 . private_etr_rloc . is_dist_name ( ) ) :
OO000o = O00oIIi1iI1i111 . private_etr_rloc . address
if ( OO000o != ii11Ii . rloc_name ) : continue
if 44 - 44: I1IiiI * IiII . OoooooooOO
if 62 - 62: I11i - Ii1I / i11iIiiIii * I1IiiI + ooOoO0o + o0oOOo0O0Ooo
i1iiii = green ( oOooII111iIiI1 . eid . print_prefix ( ) , False )
IIi11IiiiI11i = red ( I1Ii1i111I . print_address_no_iid ( ) , False )
if 10 - 10: i1IIi + o0oOOo0O0Ooo
I111i1IIiii11 = O00oIIi1iI1i111 . global_etr_rloc . is_exact_match ( I1Ii1i111I )
if ( ii11Ii . translated_port == 0 and I111i1IIiii11 ) :
lprint ( "No NAT for {} ({}), EID-prefix {}" . format ( IIi11IiiiI11i ,
i111IIiIiiI1 , i1iiii ) )
continue
if 79 - 79: i11iIiiIii % I1IiiI . OoooooooOO * oO0o . Ii1I
if 14 - 14: iIii1I11I1II1 / I11i - o0oOOo0O0Ooo / IiII / o0oOOo0O0Ooo . OoO0O00
if 2 - 2: I11i
if 12 - 12: i1IIi . I1Ii111
if 99 - 99: Oo0Ooo / i11iIiiIii
O00 = O00oIIi1iI1i111 . global_etr_rloc
i1I1Iii1Iiii = ii11Ii . translated_rloc
if ( i1I1Iii1Iiii . is_exact_match ( O00 ) and
O00oIIi1iI1i111 . etr_port == ii11Ii . translated_port ) : continue
if 10 - 10: O0 . Ii1I . i1IIi
lprint ( "Store translation {}:{} for {} ({}), EID-prefix {}" . format ( red ( O00oIIi1iI1i111 . global_etr_rloc . print_address_no_iid ( ) , False ) ,
# Ii1I - i1IIi . I1Ii111
O00oIIi1iI1i111 . etr_port , IIi11IiiiI11i , i111IIiIiiI1 , i1iiii ) )
if 1 - 1: Oo0Ooo % iII111i . OOooOOo
ii11Ii . store_translated_rloc ( O00oIIi1iI1i111 . global_etr_rloc ,
O00oIIi1iI1i111 . etr_port )
if 63 - 63: OoooooooOO * iII111i
if 8 - 8: OoooooooOO * i11iIiiIii * iII111i * O0 - OoOoOO00
return ( [ O00oIIi1iI1i111 . global_etr_rloc , O00oIIi1iI1i111 . etr_port , OO0OOoO ] )
if 3 - 3: OoooooooOO % oO0o + OoOoOO00 % I1IiiI
if 50 - 50: OoO0O00 - Oo0Ooo
if 13 - 13: OoOoOO00
if 72 - 72: II111iiii * iII111i . II111iiii + iII111i * IiII
if 90 - 90: oO0o * I1Ii111 / O0
if 15 - 15: o0oOOo0O0Ooo * O0 . OOooOOo / Oo0Ooo
if 28 - 28: OoooooooOO + OoooooooOO
if 27 - 27: I11i . oO0o / OoooooooOO - OoO0O00 . I11i
def lisp_test_mr ( lisp_sockets , port ) :
return
lprint ( "Test Map-Resolvers" )
if 15 - 15: II111iiii * OoO0O00
i1I1I1IIIi11 = lisp_address ( LISP_AFI_IPV4 , "" , 0 , 0 )
IiIiiI = lisp_address ( LISP_AFI_IPV6 , "" , 0 , 0 )
if 46 - 46: Ii1I . i11iIiiIii / I1Ii111 - I1ii11iIi11i
if 13 - 13: IiII % I1Ii111
if 9 - 9: OoooooooOO * ooOoO0o % I1ii11iIi11i . I1IiiI % O0
if 91 - 91: OOooOOo * OoooooooOO * I1IiiI . i1IIi
i1I1I1IIIi11 . store_address ( "10.0.0.1" )
lisp_send_map_request ( lisp_sockets , port , None , i1I1I1IIIi11 , None )
i1I1I1IIIi11 . store_address ( "192.168.0.1" )
lisp_send_map_request ( lisp_sockets , port , None , i1I1I1IIIi11 , None )
if 9 - 9: oO0o / i11iIiiIii + IiII / IiII - I11i
if 87 - 87: iII111i
if 37 - 37: oO0o + OoO0O00
if 66 - 66: iIii1I11I1II1 * iIii1I11I1II1 + IiII % I1IiiI
IiIiiI . store_address ( "0100::1" )
lisp_send_map_request ( lisp_sockets , port , None , IiIiiI , None )
IiIiiI . store_address ( "8000::1" )
lisp_send_map_request ( lisp_sockets , port , None , IiIiiI , None )
if 60 - 60: I1Ii111 . IiII / Oo0Ooo
if 32 - 32: OoOoOO00 + Ii1I * iII111i % Oo0Ooo
if 61 - 61: OoooooooOO % iII111i - O0
if 62 - 62: iIii1I11I1II1
i1Iii1II1iii = threading . Timer ( LISP_TEST_MR_INTERVAL , lisp_test_mr ,
[ lisp_sockets , port ] )
i1Iii1II1iii . start ( )
return
if 14 - 14: i11iIiiIii + I1Ii111 . OoOoOO00 - oO0o * OoO0O00
if 23 - 23: iIii1I11I1II1
if 32 - 32: iII111i * iIii1I11I1II1 + I1Ii111 + IiII + O0 * OoO0O00
if 100 - 100: II111iiii
if 34 - 34: I11i % OOooOOo - iII111i % II111iiii
if 14 - 14: I11i * o0oOOo0O0Ooo % II111iiii
if 36 - 36: ooOoO0o - iIii1I11I1II1 / IiII + OoOoOO00
if 42 - 42: ooOoO0o + I1IiiI * iII111i / OoOoOO00 . i1IIi - OoooooooOO
if 8 - 8: iIii1I11I1II1 - Oo0Ooo + iII111i
if 40 - 40: o0oOOo0O0Ooo * I1IiiI
if 75 - 75: O0 * OOooOOo / ooOoO0o + I11i
if 56 - 56: I1IiiI % OoooooooOO % Oo0Ooo
if 19 - 19: i11iIiiIii - iIii1I11I1II1 . i1IIi . I1Ii111 / I1IiiI * I1Ii111
def lisp_update_local_rloc ( rloc ) :
if ( rloc . interface == None ) : return
if 41 - 41: oO0o . o0oOOo0O0Ooo . I11i * OoOoOO00
IiI = lisp_get_interface_address ( rloc . interface )
if ( IiI == None ) : return
if 16 - 16: oO0o
i1Ii = rloc . rloc . print_address_no_iid ( )
Iii11Ii = IiI . print_address_no_iid ( )
if 38 - 38: I11i / i11iIiiIii * IiII . OOooOOo
if ( i1Ii == Iii11Ii ) : return
if 79 - 79: I1Ii111 - OOooOOo - OoOoOO00
lprint ( "Local interface address changed on {} from {} to {}" . format ( rloc . interface , i1Ii , Iii11Ii ) )
if 29 - 29: OoooooooOO - i1IIi % OoooooooOO * o0oOOo0O0Ooo / II111iiii
if 38 - 38: i1IIi % o0oOOo0O0Ooo - Oo0Ooo
rloc . rloc . copy_address ( IiI )
lisp_myrlocs [ 0 ] = IiI
return
if 46 - 46: I11i . iII111i - I1IiiI / iIii1I11I1II1
if 80 - 80: Ii1I % oO0o / I1Ii111
if 12 - 12: II111iiii / ooOoO0o . Oo0Ooo / I1Ii111
if 59 - 59: I1IiiI % O0
if 93 - 93: OoooooooOO % IiII % Ii1I - i11iIiiIii
if 74 - 74: ooOoO0o + OoOoOO00
if 94 - 94: i11iIiiIii
if 88 - 88: ooOoO0o + ooOoO0o / I1Ii111
def lisp_update_encap_port ( mc ) :
for I1Ii1i111I in mc . rloc_set :
O0O0O = lisp_get_nat_info ( I1Ii1i111I . rloc , I1Ii1i111I . rloc_name )
if ( O0O0O == None ) : continue
if ( I1Ii1i111I . translated_port == O0O0O . port ) : continue
if 69 - 69: O0 * o0oOOo0O0Ooo + i1IIi * ooOoO0o . o0oOOo0O0Ooo
lprint ( ( "Encap-port changed from {} to {} for RLOC {}, " + "EID-prefix {}" ) . format ( I1Ii1i111I . translated_port , O0O0O . port ,
# II111iiii + I1Ii111 + Oo0Ooo
red ( I1Ii1i111I . rloc . print_address_no_iid ( ) , False ) ,
green ( mc . print_eid_tuple ( ) , False ) ) )
if 90 - 90: ooOoO0o % o0oOOo0O0Ooo . Ii1I - iII111i
I1Ii1i111I . store_translated_rloc ( I1Ii1i111I . rloc , O0O0O . port )
if 61 - 61: oO0o - iIii1I11I1II1
return
if 74 - 74: iIii1I11I1II1 - Oo0Ooo % Ii1I * i1IIi / I1Ii111 . OoOoOO00
if 57 - 57: ooOoO0o . I1Ii111 * OoOoOO00 . IiII
if 63 - 63: Ii1I
if 94 - 94: OOooOOo + i1IIi - Ii1I / iII111i - I11i % I1Ii111
if 90 - 90: i1IIi * OoooooooOO / OOooOOo + O0
if 32 - 32: i11iIiiIii . Oo0Ooo - iIii1I11I1II1
if 97 - 97: II111iiii * OoOoOO00 / o0oOOo0O0Ooo % OOooOOo
if 82 - 82: i1IIi
if 91 - 91: OoOoOO00 . II111iiii + oO0o
if 92 - 92: Oo0Ooo + II111iiii + OOooOOo % I1IiiI / I1ii11iIi11i
if 25 - 25: I1ii11iIi11i - o0oOOo0O0Ooo / OoooooooOO . i11iIiiIii
if 62 - 62: i1IIi + OoOoOO00 % OOooOOo
def lisp_timeout_map_cache_entry ( mc , delete_list ) :
if ( mc . map_cache_ttl == None ) :
lisp_update_encap_port ( mc )
return ( [ True , delete_list ] )
if 69 - 69: iIii1I11I1II1 - OoOoOO00 % i1IIi . I1IiiI
if 66 - 66: OOooOOo . I1Ii111 / OoOoOO00 - I1IiiI / oO0o + OoO0O00
o0Ooo0O0 = lisp_get_timestamp ( )
iiIiII1iiI1i1 = mc . last_refresh_time
if 63 - 63: iIii1I11I1II1 . OoooooooOO
if 78 - 78: I1IiiI / iIii1I11I1II1 / I1IiiI
if 21 - 21: oO0o - IiII
if 61 - 61: o0oOOo0O0Ooo
if 53 - 53: Ii1I / oO0o . I11i
if 62 - 62: O0 + I1ii11iIi11i + Ii1I / i1IIi
if 77 - 77: O0
if ( lisp_is_running ( "lisp-ms" ) and lisp_uptime + ( 5 * 60 ) >= o0Ooo0O0 ) :
if ( mc . action == LISP_NATIVE_FORWARD_ACTION ) :
iiIiII1iiI1i1 = 0
lprint ( "Remove startup-mode native-forward map-cache entry" )
if 49 - 49: o0oOOo0O0Ooo / i11iIiiIii
if 36 - 36: II111iiii
if 78 - 78: OoO0O00 + iIii1I11I1II1 * i1IIi
if 7 - 7: i11iIiiIii
if 49 - 49: I1IiiI - oO0o % OOooOOo / O0 / II111iiii
if 41 - 41: IiII % II111iiii
if 99 - 99: IiII - O0
if 59 - 59: iII111i % O0 + OOooOOo * ooOoO0o
if ( iiIiII1iiI1i1 + mc . map_cache_ttl > o0Ooo0O0 ) :
if ( mc . action == LISP_NO_ACTION ) : lisp_update_encap_port ( mc )
return ( [ True , delete_list ] )
if 27 - 27: I1Ii111 % i11iIiiIii * I1IiiI
if 19 - 19: OoOoOO00 / o0oOOo0O0Ooo - iII111i / OoO0O00
if 12 - 12: I1ii11iIi11i - I11i * O0 % I1IiiI + O0 - II111iiii
if 13 - 13: iII111i / OOooOOo * i11iIiiIii / oO0o / OoooooooOO
if 89 - 89: Ii1I * Oo0Ooo / I1Ii111 * I1ii11iIi11i + O0 * Oo0Ooo
if ( lisp_nat_traversal and mc . eid . address == 0 and mc . eid . mask_len == 0 ) :
return ( [ True , delete_list ] )
if 74 - 74: I11i . I11i
if 74 - 74: OoOoOO00 * ooOoO0o * I1Ii111
if 56 - 56: iIii1I11I1II1 * OoO0O00 - oO0o * Ii1I
if 62 - 62: i1IIi + I11i / OOooOOo - OoooooooOO % i1IIi . I1IiiI
if 13 - 13: O0 * iII111i
iiI111iiI = lisp_print_elapsed ( mc . uptime )
IIii111II1iiI = lisp_print_elapsed ( mc . last_refresh_time )
oo00oOOO00 = mc . print_eid_tuple ( )
lprint ( ( "Map-cache entry {} {}, had uptime {}, last-refresh-time {}" ) . format ( green ( oo00oOOO00 , False ) , bold ( "timed out" , False ) , iiI111iiI , IIii111II1iiI ) )
if 19 - 19: iII111i * ooOoO0o * i1IIi * Ii1I . OoO0O00 % iII111i
if 74 - 74: ooOoO0o
if 70 - 70: iIii1I11I1II1 - I1Ii111 . oO0o . iII111i / o0oOOo0O0Ooo
if 8 - 8: O0 - I1Ii111
if 82 - 82: iII111i + II111iiii
delete_list . append ( mc )
return ( [ True , delete_list ] )
if 29 - 29: O0 % Ii1I * ooOoO0o % O0
if 83 - 83: oO0o
if 95 - 95: Oo0Ooo * O0 % i1IIi / iII111i + oO0o
if 85 - 85: iIii1I11I1II1 / I11i
if 65 - 65: I11i / i1IIi * OoOoOO00 * Ii1I * OoO0O00
if 74 - 74: I1ii11iIi11i . I1ii11iIi11i % IiII + OOooOOo . OoO0O00 * I11i
if 20 - 20: OOooOOo % i1IIi * Ii1I / i11iIiiIii
if 89 - 89: ooOoO0o
def lisp_timeout_map_cache_walk ( mc , parms ) :
oO000Oo0oOOo = parms [ 0 ]
O0O00OOoo00 = parms [ 1 ]
if 32 - 32: ooOoO0o - i1IIi
if 39 - 39: II111iiii + OoooooooOO / I11i . i11iIiiIii + I1Ii111
if 19 - 19: I1ii11iIi11i / OOooOOo . I1IiiI / ooOoO0o + OoO0O00 + i11iIiiIii
if 80 - 80: OoO0O00 . O0 / Ii1I % I1Ii111 / iII111i * I1IiiI
if ( mc . group . is_null ( ) ) :
iiiIIiIII111 , oO000Oo0oOOo = lisp_timeout_map_cache_entry ( mc , oO000Oo0oOOo )
if ( oO000Oo0oOOo == [ ] or mc != oO000Oo0oOOo [ - 1 ] ) :
O0O00OOoo00 = lisp_write_checkpoint_entry ( O0O00OOoo00 , mc )
if 41 - 41: O0 / OoooooooOO - i1IIi
return ( [ iiiIIiIII111 , parms ] )
if 6 - 6: i1IIi - I1ii11iIi11i % I1Ii111 - II111iiii / ooOoO0o / i11iIiiIii
if 32 - 32: oO0o / IiII - I11i . ooOoO0o
if ( mc . source_cache == None ) : return ( [ True , parms ] )
if 69 - 69: i11iIiiIii * i11iIiiIii
if 100 - 100: I1ii11iIi11i * I1ii11iIi11i + i1IIi
if 96 - 96: I1Ii111 / I1IiiI + ooOoO0o
if 16 - 16: I1ii11iIi11i % o0oOOo0O0Ooo % OOooOOo % OoOoOO00 + ooOoO0o % I1ii11iIi11i
if 85 - 85: oO0o * OoooooooOO * iIii1I11I1II1 + iII111i
parms = mc . source_cache . walk_cache ( lisp_timeout_map_cache_entry , parms )
return ( [ True , parms ] )
if 67 - 67: Ii1I / i11iIiiIii % OoOoOO00 % O0 / OoOoOO00
if 54 - 54: I11i . OoOoOO00 / II111iiii . i1IIi + OOooOOo % II111iiii
if 82 - 82: i11iIiiIii . OoooooooOO % OoOoOO00 * O0 - I1Ii111
if 78 - 78: OoOoOO00 % Ii1I % OOooOOo % Oo0Ooo % I11i . Ii1I
if 73 - 73: OoooooooOO / i1IIi . iIii1I11I1II1
if 89 - 89: I1Ii111
if 29 - 29: I11i * ooOoO0o - OoooooooOO
def lisp_timeout_map_cache ( lisp_map_cache ) :
I1iII1IIi1IiI = [ [ ] , [ ] ]
I1iII1IIi1IiI = lisp_map_cache . walk_cache ( lisp_timeout_map_cache_walk , I1iII1IIi1IiI )
if 92 - 92: O0 % i1IIi / OOooOOo - oO0o
if 83 - 83: o0oOOo0O0Ooo . OoO0O00 % iIii1I11I1II1 % OoOoOO00 - i11iIiiIii
if 71 - 71: I1ii11iIi11i - II111iiii / O0 % i1IIi + oO0o
if 73 - 73: OoooooooOO
if 25 - 25: i1IIi . II111iiii . I1Ii111
oO000Oo0oOOo = I1iII1IIi1IiI [ 0 ]
for o0ooo0oOO0o in oO000Oo0oOOo : o0ooo0oOO0o . delete_cache ( )
if 81 - 81: II111iiii + OoOoOO00 * II111iiii / iIii1I11I1II1 - Oo0Ooo % oO0o
if 66 - 66: ooOoO0o % O0 + iIii1I11I1II1 * I1Ii111 - I1Ii111
if 61 - 61: I1ii11iIi11i
if 12 - 12: OoO0O00
O0O00OOoo00 = I1iII1IIi1IiI [ 1 ]
lisp_checkpoint ( O0O00OOoo00 )
return
if 97 - 97: OOooOOo . Oo0Ooo . oO0o * i1IIi
if 7 - 7: Oo0Ooo
if 38 - 38: Oo0Ooo - I1ii11iIi11i
if 19 - 19: Ii1I * OoO0O00 / OoO0O00 . II111iiii % iIii1I11I1II1
if 61 - 61: I1ii11iIi11i * oO0o % iII111i + IiII + i11iIiiIii * I11i
if 3 - 3: Ii1I
if 71 - 71: iIii1I11I1II1 . OOooOOo / I11i / i1IIi
if 69 - 69: i1IIi / iII111i + Ii1I + I11i + IiII
if 86 - 86: Oo0Ooo
if 97 - 97: I1IiiI
if 91 - 91: ooOoO0o / oO0o * OOooOOo . II111iiii - I11i - I11i
if 5 - 5: O0 + OoooooooOO + i11iIiiIii * Oo0Ooo * OoOoOO00 . oO0o
if 6 - 6: OoO0O00 % Oo0Ooo % I1IiiI % o0oOOo0O0Ooo % O0 % Oo0Ooo
if 94 - 94: I11i . i1IIi / II111iiii + OOooOOo
if 64 - 64: I1IiiI % ooOoO0o
if 72 - 72: O0 * II111iiii % OoO0O00 - I1IiiI * OOooOOo
def lisp_store_nat_info ( hostname , rloc , port ) :
O0O0 = rloc . print_address_no_iid ( )
O0o0O0OoOOoO = "{} NAT state for {}, RLOC {}, port {}" . format ( "{}" ,
blue ( hostname , False ) , red ( O0O0 , False ) , port )
if 66 - 66: iIii1I11I1II1 - Oo0Ooo % OoooooooOO % O0
i1iii = lisp_nat_info ( O0O0 , hostname , port )
if 15 - 15: O0 * OoooooooOO - O0 + OoooooooOO
if ( hostname not in lisp_nat_state_info ) :
lisp_nat_state_info [ hostname ] = [ i1iii ]
lprint ( O0o0O0OoOOoO . format ( "Store initial" ) )
return ( True )
if 40 - 40: O0 * OoooooooOO - oO0o + iIii1I11I1II1 * OOooOOo + I1ii11iIi11i
if 43 - 43: OoO0O00 . O0
if 36 - 36: I11i
if 28 - 28: ooOoO0o
if 1 - 1: IiII / OoO0O00 * oO0o - I1Ii111 . OoOoOO00
if 85 - 85: i11iIiiIii + OoOoOO00
O0O0O = lisp_nat_state_info [ hostname ] [ 0 ]
if ( O0O0O . address == O0O0 and O0O0O . port == port ) :
O0O0O . uptime = lisp_get_timestamp ( )
lprint ( O0o0O0OoOOoO . format ( "Refresh existing" ) )
return ( False )
if 4 - 4: OOooOOo . OoO0O00 * II111iiii + OoO0O00 % Oo0Ooo
if 60 - 60: OOooOOo . Ii1I
if 13 - 13: i1IIi . iII111i / OoOoOO00 . I1Ii111
if 65 - 65: oO0o % I1Ii111 % OoO0O00 . iIii1I11I1II1
if 38 - 38: IiII / I11i / IiII * iII111i
if 30 - 30: oO0o
if 30 - 30: IiII / OoO0O00
OOo = None
for O0O0O in lisp_nat_state_info [ hostname ] :
if ( O0O0O . address == O0O0 and O0O0O . port == port ) :
OOo = O0O0O
break
if 50 - 50: iIii1I11I1II1 * iIii1I11I1II1
if 20 - 20: OoOoOO00
if 86 - 86: OoooooooOO - iIii1I11I1II1 . OoO0O00 * Ii1I / I1Ii111 + I1Ii111
if ( OOo == None ) :
lprint ( O0o0O0OoOOoO . format ( "Store new" ) )
else :
lisp_nat_state_info [ hostname ] . remove ( OOo )
lprint ( O0o0O0OoOOoO . format ( "Use previous" ) )
if 52 - 52: iIii1I11I1II1 % OoO0O00 - IiII % i11iIiiIii - o0oOOo0O0Ooo
if 25 - 25: Oo0Ooo - OOooOOo . i1IIi * OoOoOO00 / I11i / o0oOOo0O0Ooo
OOOo = lisp_nat_state_info [ hostname ]
lisp_nat_state_info [ hostname ] = [ i1iii ] + OOOo
return ( True )
if 60 - 60: I1ii11iIi11i - I1IiiI * I1Ii111 * I1Ii111 / OoooooooOO
if 17 - 17: i1IIi - ooOoO0o
if 86 - 86: I1ii11iIi11i . o0oOOo0O0Ooo
if 30 - 30: o0oOOo0O0Ooo / i11iIiiIii
if 33 - 33: OOooOOo % OoooooooOO
if 98 - 98: Ii1I
if 38 - 38: ooOoO0o - iII111i * OOooOOo % I1ii11iIi11i + Oo0Ooo
if 95 - 95: iIii1I11I1II1 / O0 % O0
def lisp_get_nat_info ( rloc , hostname ) :
if ( hostname not in lisp_nat_state_info ) : return ( None )
if 53 - 53: ooOoO0o . ooOoO0o
O0O0 = rloc . print_address_no_iid ( )
for O0O0O in lisp_nat_state_info [ hostname ] :
if ( O0O0O . address == O0O0 ) : return ( O0O0O )
if 80 - 80: i11iIiiIii % I1Ii111 % I1IiiI / I1IiiI + oO0o + iII111i
return ( None )
if 18 - 18: OoO0O00 * ooOoO0o
if 32 - 32: oO0o . OoooooooOO - o0oOOo0O0Ooo + II111iiii
if 4 - 4: OOooOOo * I1IiiI - I11i - I11i
if 67 - 67: I1IiiI
if 32 - 32: oO0o * i11iIiiIii - I11i % Oo0Ooo * I1ii11iIi11i
if 79 - 79: II111iiii / Oo0Ooo / I1ii11iIi11i
if 30 - 30: I11i . o0oOOo0O0Ooo / II111iiii
if 59 - 59: i11iIiiIii
if 5 - 5: i11iIiiIii + o0oOOo0O0Ooo . OoO0O00 % OoOoOO00 + I11i
if 59 - 59: I1ii11iIi11i
if 47 - 47: I1IiiI + Oo0Ooo
if 78 - 78: i1IIi / I1ii11iIi11i % ooOoO0o * OoO0O00
if 10 - 10: i1IIi % ooOoO0o / iII111i
if 98 - 98: IiII / o0oOOo0O0Ooo - i1IIi - OOooOOo
if 65 - 65: Ii1I + OoOoOO00 * Oo0Ooo . O0 . IiII
if 33 - 33: i11iIiiIii . i1IIi . I1Ii111 - OoOoOO00 + OOooOOo
if 34 - 34: I1ii11iIi11i . i1IIi * O0 / OoooooooOO
if 22 - 22: OOooOOo % o0oOOo0O0Ooo - i11iIiiIii
if 58 - 58: IiII . Ii1I + II111iiii
if 31 - 31: i11iIiiIii + i11iIiiIii + I11i * Oo0Ooo . I11i
def lisp_build_info_requests ( lisp_sockets , dest , port ) :
if ( lisp_nat_traversal == False ) : return
if 28 - 28: OOooOOo * iIii1I11I1II1 * OoOoOO00
if 75 - 75: Oo0Ooo % IiII + II111iiii + oO0o
if 35 - 35: I1ii11iIi11i - oO0o - O0 / iII111i % IiII
if 10 - 10: OOooOOo + oO0o - I1Ii111 . I1IiiI
if 11 - 11: I1ii11iIi11i . I1Ii111 / o0oOOo0O0Ooo + IiII
if 73 - 73: OoO0O00 . i11iIiiIii * OoO0O00 * i1IIi + I11i
Ii1IiiIOo0Oo0Oo = [ ]
o0Oo0o00o00O0 = [ ]
if ( dest == None ) :
for OOoOo0O0O0oO in list ( lisp_map_resolvers_list . values ( ) ) :
o0Oo0o00o00O0 . append ( OOoOo0O0O0oO . map_resolver )
if 44 - 44: Oo0Ooo % I1ii11iIi11i % OOooOOo
Ii1IiiIOo0Oo0Oo = o0Oo0o00o00O0
if ( Ii1IiiIOo0Oo0Oo == [ ] ) :
for I11i1IiIi1II1 in list ( lisp_map_servers_list . values ( ) ) :
Ii1IiiIOo0Oo0Oo . append ( I11i1IiIi1II1 . map_server )
if 26 - 26: IiII + o0oOOo0O0Ooo / IiII - iII111i * Ii1I
if 15 - 15: OoO0O00 % iIii1I11I1II1 % OoooooooOO . iII111i - i11iIiiIii . ooOoO0o
if ( Ii1IiiIOo0Oo0Oo == [ ] ) : return
else :
Ii1IiiIOo0Oo0Oo . append ( dest )
if 11 - 11: iII111i . oO0o % I11i
if 42 - 42: I1ii11iIi11i
if 77 - 77: iIii1I11I1II1 * i11iIiiIii + Ii1I . ooOoO0o / OOooOOo * O0
if 44 - 44: Oo0Ooo * o0oOOo0O0Ooo - I11i
if 56 - 56: Ii1I * OoO0O00 % ooOoO0o . I11i % I1Ii111
i1IIIIIi111 = { }
for oOooII111iIiI1 in lisp_db_list :
for ii11Ii in oOooII111iIiI1 . rloc_set :
lisp_update_local_rloc ( ii11Ii )
if ( ii11Ii . rloc . is_null ( ) ) : continue
if ( ii11Ii . interface == None ) : continue
if 78 - 78: i1IIi * OOooOOo . I1ii11iIi11i . iIii1I11I1II1 + i1IIi % Ii1I
IiI = ii11Ii . rloc . print_address_no_iid ( )
if ( IiI in i1IIIIIi111 ) : continue
i1IIIIIi111 [ IiI ] = ii11Ii . interface
if 31 - 31: iII111i + Oo0Ooo / I1ii11iIi11i / I1IiiI * OoooooooOO . I1ii11iIi11i
if 100 - 100: iIii1I11I1II1 . i1IIi / OOooOOo * i11iIiiIii
if ( i1IIIIIi111 == { } ) :
lprint ( 'Suppress Info-Request, no "interface = <device>" RLOC ' + "found in any database-mappings" )
if 93 - 93: I1ii11iIi11i
return
if 45 - 45: I1ii11iIi11i * I1ii11iIi11i
if 31 - 31: OoO0O00 - OOooOOo . iII111i * I1Ii111 * iII111i + I1ii11iIi11i
if 5 - 5: Oo0Ooo . I1Ii111
if 77 - 77: i11iIiiIii / I1Ii111 / I1ii11iIi11i % oO0o
if 83 - 83: Ii1I % iIii1I11I1II1 / I1ii11iIi11i + I11i
if 23 - 23: iIii1I11I1II1 - I1IiiI
for IiI in i1IIIIIi111 :
i111IIiIiiI1 = i1IIIIIi111 [ IiI ]
OO0O00o0 = red ( IiI , False )
lprint ( "Build Info-Request for private address {} ({})" . format ( OO0O00o0 ,
i111IIiIiiI1 ) )
ooO000OO = i111IIiIiiI1 if len ( i1IIIIIi111 ) > 1 else None
for dest in Ii1IiiIOo0Oo0Oo :
lisp_send_info_request ( lisp_sockets , dest , port , ooO000OO )
if 51 - 51: OoooooooOO / IiII / I1ii11iIi11i . Oo0Ooo - o0oOOo0O0Ooo * OoooooooOO
if 40 - 40: OoO0O00 / IiII . O0 / I1IiiI + OoO0O00 . o0oOOo0O0Ooo
if 25 - 25: ooOoO0o * I1Ii111 * oO0o
if 64 - 64: Ii1I / I1ii11iIi11i
if 30 - 30: OoooooooOO + O0 / I1ii11iIi11i * o0oOOo0O0Ooo
if 11 - 11: O0 + OoO0O00 - Oo0Ooo - Oo0Ooo . i11iIiiIii
if ( o0Oo0o00o00O0 != [ ] ) :
for OOoOo0O0O0oO in list ( lisp_map_resolvers_list . values ( ) ) :
OOoOo0O0O0oO . resolve_dns_name ( )
if 15 - 15: Ii1I % i11iIiiIii / OoOoOO00
if 85 - 85: ooOoO0o . i1IIi / iII111i % iIii1I11I1II1 / II111iiii / I1Ii111
return
if 60 - 60: iIii1I11I1II1 - iIii1I11I1II1 . I11i
if 55 - 55: OoO0O00
if 87 - 87: Ii1I - iII111i / O0 - o0oOOo0O0Ooo - iIii1I11I1II1 % Ii1I
if 47 - 47: iII111i * I1Ii111 % o0oOOo0O0Ooo / OoOoOO00 / OoO0O00 % OoO0O00
if 43 - 43: Oo0Ooo
if 34 - 34: OoO0O00 . i1IIi + IiII * IiII
if 76 - 76: OOooOOo
if 54 - 54: O0 * II111iiii * OOooOOo
def lisp_valid_address_format ( kw , value ) :
if ( kw != "address" ) : return ( True )
if 44 - 44: I1IiiI
if 66 - 66: o0oOOo0O0Ooo
if 40 - 40: OOooOOo * Ii1I
if 38 - 38: ooOoO0o
if 5 - 5: OoooooooOO + iII111i - I11i
if ( value [ 0 ] == "'" and value [ - 1 ] == "'" ) : return ( True )
if 95 - 95: OOooOOo / i11iIiiIii - Ii1I + I1ii11iIi11i
if 7 - 7: I1ii11iIi11i
if 37 - 37: O0 . II111iiii
if 70 - 70: o0oOOo0O0Ooo / iII111i + i1IIi + I11i % iIii1I11I1II1 % Oo0Ooo
if ( value . find ( "." ) != - 1 ) :
IiI = value . split ( "." )
if ( len ( IiI ) != 4 ) : return ( False )
if 1 - 1: O0 + OoO0O00 . i11iIiiIii + I1Ii111 - OoO0O00 - IiII
for oO000OO0 in IiI :
if ( oO000OO0 . isdigit ( ) == False ) : return ( False )
if ( int ( oO000OO0 ) > 255 ) : return ( False )
if 1 - 1: I1ii11iIi11i / i1IIi . I1IiiI / Ii1I
return ( True )
if 19 - 19: iIii1I11I1II1 / Oo0Ooo . O0 - Oo0Ooo
if 74 - 74: I1ii11iIi11i * OoooooooOO . iII111i
if 45 - 45: I1IiiI - IiII % ooOoO0o - IiII . Oo0Ooo - o0oOOo0O0Ooo
if 27 - 27: iII111i
if 64 - 64: iIii1I11I1II1 - OOooOOo . iII111i % o0oOOo0O0Ooo / II111iiii % OoooooooOO
if ( value . find ( "-" ) != - 1 ) :
IiI = value . split ( "-" )
for iIi1iIIIiIiI in [ "N" , "S" , "W" , "E" ] :
if ( iIi1iIIIiIiI in IiI ) :
if ( len ( IiI ) < 8 ) : return ( False )
return ( True )
if 87 - 87: OoooooooOO
if 70 - 70: o0oOOo0O0Ooo % OoooooooOO % I1IiiI . OoOoOO00 * I1IiiI - ooOoO0o
if 92 - 92: I1IiiI . I11i
if 66 - 66: I1Ii111 / I11i / OoooooooOO % OoOoOO00 . oO0o * iII111i
if 34 - 34: I1ii11iIi11i * I1ii11iIi11i % I11i / OOooOOo % oO0o . OoOoOO00
if 25 - 25: I1ii11iIi11i / I11i + i1IIi . I1IiiI + ooOoO0o
if 29 - 29: IiII + I1ii11iIi11i
if ( value . find ( "-" ) != - 1 ) :
IiI = value . split ( "-" )
if ( len ( IiI ) != 3 ) : return ( False )
if 8 - 8: IiII % I1IiiI
for iiI1iii1ii in IiI :
try : int ( iiI1iii1ii , 16 )
except : return ( False )
if 4 - 4: OoooooooOO * I1ii11iIi11i - I1ii11iIi11i
return ( True )
if 38 - 38: I1Ii111
if 23 - 23: Ii1I . I1ii11iIi11i + I1Ii111 + i1IIi * o0oOOo0O0Ooo - i11iIiiIii
if 92 - 92: I1Ii111 - I1IiiI + Ii1I / iII111i % OOooOOo
if 32 - 32: i1IIi . iII111i - Ii1I % iII111i % II111iiii - oO0o
if 36 - 36: OoooooooOO * OoooooooOO . ooOoO0o . O0
if ( value . find ( ":" ) != - 1 ) :
IiI = value . split ( ":" )
if ( len ( IiI ) < 2 ) : return ( False )
if 5 - 5: I11i % I1IiiI - OoO0O00 . Oo0Ooo
O000o0 = False
O0oo0oOo = 0
for iiI1iii1ii in IiI :
O0oo0oOo += 1
if ( iiI1iii1ii == "" ) :
if ( O000o0 ) :
if ( len ( IiI ) == O0oo0oOo ) : break
if ( O0oo0oOo > 2 ) : return ( False )
if 31 - 31: IiII * Oo0Ooo % OoO0O00
O000o0 = True
continue
if 60 - 60: I1IiiI + i11iIiiIii + oO0o + OoooooooOO % II111iiii
try : int ( iiI1iii1ii , 16 )
except : return ( False )
if 94 - 94: o0oOOo0O0Ooo - OoO0O00
return ( True )
if 90 - 90: i11iIiiIii + II111iiii + I1IiiI % I1ii11iIi11i
if 3 - 3: I1Ii111 + Ii1I + Ii1I + iIii1I11I1II1 + I1Ii111 * I11i
if 44 - 44: i1IIi - I1IiiI / IiII + IiII
if 65 - 65: OOooOOo * I1Ii111 . i1IIi % iIii1I11I1II1
if 31 - 31: I1IiiI * I1Ii111 * O0 * I1Ii111 . II111iiii
if ( value [ 0 ] == "+" ) :
IiI = value [ 1 : : ]
for Oo0ooOOOOO0o0oO in IiI :
if ( Oo0ooOOOOO0o0oO . isdigit ( ) == False ) : return ( False )
if 28 - 28: ooOoO0o + Oo0Ooo - I1ii11iIi11i
return ( True )
if 16 - 16: O0 - OoO0O00 % Ii1I % O0
return ( False )
if 51 - 51: iIii1I11I1II1 * i11iIiiIii . I1IiiI + o0oOOo0O0Ooo / iII111i - I1IiiI
if 73 - 73: OOooOOo
if 100 - 100: o0oOOo0O0Ooo - OoOoOO00
if 91 - 91: II111iiii / i11iIiiIii . Oo0Ooo * iIii1I11I1II1
if 6 - 6: ooOoO0o * Oo0Ooo . OoO0O00
if 24 - 24: O0 * oO0o % O0 * iIii1I11I1II1 - OoO0O00
if 18 - 18: Ii1I + I1ii11iIi11i % I1ii11iIi11i + II111iiii
if 86 - 86: iII111i . O0 - iIii1I11I1II1 - iIii1I11I1II1
if 79 - 79: OoOoOO00 + Ii1I - oO0o - iIii1I11I1II1 + OoooooooOO
if 87 - 87: ooOoO0o
if 74 - 74: o0oOOo0O0Ooo - o0oOOo0O0Ooo % OoooooooOO . o0oOOo0O0Ooo - I1IiiI - I1ii11iIi11i
if 40 - 40: II111iiii . Oo0Ooo * I1Ii111
if 63 - 63: OoooooooOO + OoOoOO00 - OoooooooOO
if 54 - 54: OoO0O00 + I1IiiI % O0 + OoO0O00
def lisp_process_api ( process , lisp_socket , data_structure ) :
Ii1IoO0 , I1iII1IIi1IiI = data_structure . split ( "%" )
if 24 - 24: ooOoO0o * I1IiiI / I1Ii111 - Oo0Ooo % iII111i - OOooOOo
lprint ( "Process API request '{}', parameters: '{}'" . format ( Ii1IoO0 ,
I1iII1IIi1IiI ) )
if 57 - 57: iIii1I11I1II1 . Oo0Ooo / O0
i11 = [ ]
if ( Ii1IoO0 == "map-cache" ) :
if ( I1iII1IIi1IiI == "" ) :
i11 = lisp_map_cache . walk_cache ( lisp_process_api_map_cache , i11 )
else :
i11 = lisp_process_api_map_cache_entry ( json . loads ( I1iII1IIi1IiI ) )
if 86 - 86: I1IiiI + OOooOOo + IiII
if 11 - 11: Oo0Ooo + I1IiiI % i11iIiiIii % Oo0Ooo + ooOoO0o + i1IIi
if ( Ii1IoO0 == "site-cache" ) :
if ( I1iII1IIi1IiI == "" ) :
i11 = lisp_sites_by_eid . walk_cache ( lisp_process_api_site_cache ,
i11 )
else :
i11 = lisp_process_api_site_cache_entry ( json . loads ( I1iII1IIi1IiI ) )
if 100 - 100: II111iiii - OOooOOo + iII111i - i11iIiiIii . O0 / iII111i
if 64 - 64: Ii1I
if ( Ii1IoO0 == "site-cache-summary" ) :
i11 = lisp_process_api_site_cache_summary ( lisp_sites_by_eid )
if 4 - 4: OoOoOO00
if ( Ii1IoO0 == "map-server" ) :
I1iII1IIi1IiI = { } if ( I1iII1IIi1IiI == "" ) else json . loads ( I1iII1IIi1IiI )
i11 = lisp_process_api_ms_or_mr ( True , I1iII1IIi1IiI )
if 78 - 78: i1IIi - iII111i + O0 - I1IiiI % o0oOOo0O0Ooo
if ( Ii1IoO0 == "map-resolver" ) :
I1iII1IIi1IiI = { } if ( I1iII1IIi1IiI == "" ) else json . loads ( I1iII1IIi1IiI )
i11 = lisp_process_api_ms_or_mr ( False , I1iII1IIi1IiI )
if 48 - 48: iII111i / II111iiii * I1Ii111 + I11i / ooOoO0o . OoOoOO00
if ( Ii1IoO0 == "database-mapping" ) :
i11 = lisp_process_api_database_mapping ( )
if 45 - 45: OOooOOo / Ii1I % O0
if 7 - 7: oO0o * i11iIiiIii + OoooooooOO + I11i
if 9 - 9: II111iiii * Oo0Ooo * I1Ii111 . IiII
if 80 - 80: i11iIiiIii . i11iIiiIii . i11iIiiIii . OoooooooOO - OOooOOo * OoooooooOO
if 96 - 96: oO0o
i11 = json . dumps ( i11 )
oOoo = lisp_api_ipc ( process , i11 )
lisp_ipc ( oOoo , lisp_socket , "lisp-core" )
return
if 80 - 80: IiII - oO0o % Ii1I - iIii1I11I1II1 . OoO0O00
if 64 - 64: I1IiiI % i11iIiiIii / oO0o
if 78 - 78: II111iiii - Oo0Ooo . iIii1I11I1II1 - ooOoO0o . oO0o
if 84 - 84: iII111i . ooOoO0o * I1IiiI * Oo0Ooo / I1Ii111
if 93 - 93: i1IIi * i11iIiiIii % OoOoOO00 % iII111i
if 31 - 31: OoO0O00
if 89 - 89: II111iiii
def lisp_process_api_map_cache ( mc , data ) :
if 33 - 33: OOooOOo / oO0o % OoOoOO00 * O0
if 65 - 65: OoO0O00 % OoOoOO00 % I1ii11iIi11i / OoooooooOO
if 85 - 85: O0 * OOooOOo % I1Ii111
if 33 - 33: O0
if ( mc . group . is_null ( ) ) : return ( lisp_gather_map_cache_data ( mc , data ) )
if 30 - 30: II111iiii . O0 . oO0o * I1ii11iIi11i + oO0o . o0oOOo0O0Ooo
if ( mc . source_cache == None ) : return ( [ True , data ] )
if 43 - 43: iIii1I11I1II1
if 88 - 88: I1IiiI - OoO0O00 . O0 . oO0o
if 75 - 75: II111iiii % OOooOOo / iIii1I11I1II1 / OoO0O00 + oO0o
if 16 - 16: oO0o + I1Ii111 - II111iiii - o0oOOo0O0Ooo / i11iIiiIii
if 59 - 59: OOooOOo - o0oOOo0O0Ooo
data = mc . source_cache . walk_cache ( lisp_gather_map_cache_data , data )
return ( [ True , data ] )
if 82 - 82: IiII % ooOoO0o - OoO0O00 % ooOoO0o
if 51 - 51: ooOoO0o % iII111i . o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 20 - 20: i1IIi - ooOoO0o % OoooooooOO * I1ii11iIi11i + II111iiii % i1IIi
if 30 - 30: i11iIiiIii - I1IiiI + o0oOOo0O0Ooo + IiII
if 16 - 16: I1ii11iIi11i / Ii1I + I1ii11iIi11i * I1Ii111
if 49 - 49: ooOoO0o * OoOoOO00 . OoooooooOO . ooOoO0o + Oo0Ooo * IiII
if 47 - 47: iII111i . i1IIi . I1ii11iIi11i / OoooooooOO
def lisp_gather_map_cache_data ( mc , data ) :
oo0O00OOOOO = { }
oo0O00OOOOO [ "instance-id" ] = str ( mc . eid . instance_id )
oo0O00OOOOO [ "eid-prefix" ] = mc . eid . print_prefix_no_iid ( )
if ( mc . group . is_null ( ) == False ) :
oo0O00OOOOO [ "group-prefix" ] = mc . group . print_prefix_no_iid ( )
if 84 - 84: o0oOOo0O0Ooo * I11i
oo0O00OOOOO [ "uptime" ] = lisp_print_elapsed ( mc . uptime )
oo0O00OOOOO [ "expires" ] = lisp_print_elapsed ( mc . uptime )
oo0O00OOOOO [ "action" ] = lisp_map_reply_action_string [ mc . action ]
oo0O00OOOOO [ "ttl" ] = "--" if mc . map_cache_ttl == None else str ( mc . map_cache_ttl / 60 )
if 22 - 22: i1IIi + OOooOOo % OoooooooOO
if 34 - 34: oO0o / O0 - II111iiii % Oo0Ooo + I11i
if 23 - 23: o0oOOo0O0Ooo + i11iIiiIii . I1IiiI + iIii1I11I1II1
if 18 - 18: o0oOOo0O0Ooo . O0 + I1Ii111
if 66 - 66: OoooooooOO
OO0oOO0OoO = [ ]
for I1Ii1i111I in mc . rloc_set :
O00o00o00OO0 = lisp_fill_rloc_in_json ( I1Ii1i111I )
if 90 - 90: IiII - OoOoOO00
if 98 - 98: Oo0Ooo / oO0o . Ii1I
if 56 - 56: ooOoO0o % OoO0O00 * i11iIiiIii % IiII % I1IiiI - oO0o
if 37 - 37: iII111i - Ii1I . oO0o
if 47 - 47: IiII / I1ii11iIi11i . o0oOOo0O0Ooo . ooOoO0o + OOooOOo . OOooOOo
if ( I1Ii1i111I . rloc . is_multicast_address ( ) ) :
O00o00o00OO0 [ "multicast-rloc-set" ] = [ ]
for OOO0 in list ( I1Ii1i111I . multicast_rloc_probe_list . values ( ) ) :
OOoOo0O0O0oO = lisp_fill_rloc_in_json ( OOO0 )
O00o00o00OO0 [ "multicast-rloc-set" ] . append ( OOoOo0O0O0oO )
if 25 - 25: oO0o
if 43 - 43: Ii1I - o0oOOo0O0Ooo % oO0o - O0
if 20 - 20: OoO0O00 . ooOoO0o / OoOoOO00 - OoOoOO00 . iII111i / OOooOOo
OO0oOO0OoO . append ( O00o00o00OO0 )
if 39 - 39: iIii1I11I1II1 % ooOoO0o
oo0O00OOOOO [ "rloc-set" ] = OO0oOO0OoO
if 75 - 75: i1IIi * II111iiii * O0 * i11iIiiIii % iII111i / iII111i
data . append ( oo0O00OOOOO )
return ( [ True , data ] )
if 36 - 36: IiII / I1IiiI % iII111i / iII111i
if 38 - 38: OOooOOo * I1ii11iIi11i * I1Ii111 + I11i
if 65 - 65: O0 + O0 * I1Ii111
if 66 - 66: OOooOOo / O0 + i1IIi . O0 % I1ii11iIi11i - OoooooooOO
if 16 - 16: I11i % iII111i
if 29 - 29: I1IiiI - ooOoO0o * OoO0O00 . i11iIiiIii % OoOoOO00 * o0oOOo0O0Ooo
if 43 - 43: OoO0O00 * OOooOOo / I1Ii111 % OoOoOO00 . oO0o / OOooOOo
if 62 - 62: O0 * I1ii11iIi11i - O0 / I11i % ooOoO0o
def lisp_fill_rloc_in_json ( rloc ) :
O00o00o00OO0 = { }
O0O0 = None
if ( rloc . rloc_exists ( ) ) :
O00o00o00OO0 [ "address" ] = rloc . rloc . print_address_no_iid ( )
O0O0 = O00o00o00OO0 [ "address" ]
if 1 - 1: O0 / iIii1I11I1II1
if 17 - 17: OoOoOO00 + ooOoO0o * II111iiii * OoOoOO00 + I1IiiI + i11iIiiIii
if ( rloc . translated_port != 0 ) :
O00o00o00OO0 [ "encap-port" ] = str ( rloc . translated_port )
O0O0 += ":" + O00o00o00OO0 [ "encap-port" ]
if 46 - 46: i1IIi - II111iiii . I1IiiI . i11iIiiIii
if 54 - 54: O0 * I1ii11iIi11i / OOooOOo / IiII * IiII
if ( O0O0 and O0O0 in lisp_crypto_keys_by_rloc_encap ) :
Ooo00o000o = lisp_crypto_keys_by_rloc_encap [ O0O0 ] [ 1 ]
if ( Ooo00o000o != None and Ooo00o000o . shared_key != None ) :
O00o00o00OO0 [ "encap-crypto" ] = "crypto-" + Ooo00o000o . cipher_suite_string
if 69 - 69: Oo0Ooo * OoooooooOO / I1IiiI
if 16 - 16: o0oOOo0O0Ooo
if 3 - 3: i11iIiiIii . I1ii11iIi11i
O00o00o00OO0 [ "state" ] = rloc . print_state ( )
if ( rloc . geo ) : O00o00o00OO0 [ "geo" ] = rloc . geo . print_geo ( )
if ( rloc . elp ) : O00o00o00OO0 [ "elp" ] = rloc . elp . print_elp ( False )
if ( rloc . rle ) : O00o00o00OO0 [ "rle" ] = rloc . rle . print_rle ( False , False )
if ( rloc . json ) : O00o00o00OO0 [ "json" ] = rloc . json . print_json ( False )
if ( rloc . rloc_name ) : O00o00o00OO0 [ "rloc-name" ] = rloc . rloc_name
oOo0OooOoooO = rloc . stats . get_stats ( False , False )
if ( oOo0OooOoooO ) : O00o00o00OO0 [ "stats" ] = oOo0OooOoooO
O00o00o00OO0 [ "uptime" ] = lisp_print_elapsed ( rloc . uptime )
O00o00o00OO0 [ "upriority" ] = str ( rloc . priority )
O00o00o00OO0 [ "uweight" ] = str ( rloc . weight )
O00o00o00OO0 [ "mpriority" ] = str ( rloc . mpriority )
O00o00o00OO0 [ "mweight" ] = str ( rloc . mweight )
OoO0OO0OO000o = rloc . last_rloc_probe_reply
if ( OoO0OO0OO000o ) :
O00o00o00OO0 [ "last-rloc-probe-reply" ] = lisp_print_elapsed ( OoO0OO0OO000o )
O00o00o00OO0 [ "rloc-probe-rtt" ] = str ( rloc . rloc_probe_rtt )
if 10 - 10: IiII + OOooOOo . iII111i - ooOoO0o
O00o00o00OO0 [ "rloc-hop-count" ] = rloc . rloc_probe_hops
O00o00o00OO0 [ "recent-rloc-hop-counts" ] = rloc . recent_rloc_probe_hops
if 100 - 100: o0oOOo0O0Ooo
O00o00o00OO0 [ "rloc-probe-latency" ] = rloc . rloc_probe_latency
O00o00o00OO0 [ "recent-rloc-probe-latencies" ] = rloc . recent_rloc_probe_latencies
if 95 - 95: iII111i * oO0o * i1IIi
O0O0OoOoOOoo0 = [ ]
for ii1111Ii in rloc . recent_rloc_probe_rtts : O0O0OoOoOOoo0 . append ( str ( ii1111Ii ) )
O00o00o00OO0 [ "recent-rloc-probe-rtts" ] = O0O0OoOoOOoo0
return ( O00o00o00OO0 )
if 36 - 36: I1IiiI
if 3 - 3: IiII - OoO0O00 + iII111i . II111iiii * OOooOOo
if 53 - 53: ooOoO0o + iII111i
if 70 - 70: Ii1I . OoO0O00 . I1Ii111
if 42 - 42: I11i . I11i . II111iiii * OoOoOO00 + IiII - IiII
if 69 - 69: iII111i / iIii1I11I1II1 . Ii1I . Oo0Ooo + II111iiii * II111iiii
if 9 - 9: iIii1I11I1II1 . I1IiiI
def lisp_process_api_map_cache_entry ( parms ) :
oooo = parms [ "instance-id" ]
oooo = 0 if ( oooo == "" ) else int ( oooo )
if 92 - 92: iIii1I11I1II1 - I11i - OOooOOo / Ii1I . o0oOOo0O0Ooo . OoO0O00
if 33 - 33: oO0o / I11i % ooOoO0o * I11i / oO0o - OoOoOO00
if 89 - 89: iIii1I11I1II1 . II111iiii + IiII
if 8 - 8: I1ii11iIi11i / II111iiii / II111iiii
i1I1I1IIIi11 = lisp_address ( LISP_AFI_NONE , "" , 0 , oooo )
i1I1I1IIIi11 . store_prefix ( parms [ "eid-prefix" ] )
I1i1iiIi = i1I1I1IIIi11
I1 = i1I1I1IIIi11
if 62 - 62: I11i - iII111i . Ii1I
if 20 - 20: I1ii11iIi11i
if 99 - 99: o0oOOo0O0Ooo + I1ii11iIi11i * IiII
if 67 - 67: I1IiiI
if 93 - 93: ooOoO0o . Ii1I + IiII / Oo0Ooo % I11i
o0o0Oo0o0oOo = lisp_address ( LISP_AFI_NONE , "" , 0 , oooo )
if ( "group-prefix" in parms ) :
o0o0Oo0o0oOo . store_prefix ( parms [ "group-prefix" ] )
I1i1iiIi = o0o0Oo0o0oOo
if 40 - 40: Oo0Ooo % OoOoOO00 . IiII / I1IiiI % OoooooooOO
if 33 - 33: OOooOOo - OoooooooOO . iII111i
i11 = [ ]
o0ooo0oOO0o = lisp_map_cache_lookup ( I1 , I1i1iiIi )
if ( o0ooo0oOO0o ) : iiiIIiIII111 , i11 = lisp_process_api_map_cache ( o0ooo0oOO0o , i11 )
return ( i11 )
if 2 - 2: I11i + i1IIi
if 52 - 52: I11i - OoO0O00 % I1Ii111 . OOooOOo
if 90 - 90: O0 - Oo0Ooo / i1IIi * iIii1I11I1II1 % o0oOOo0O0Ooo / oO0o
if 73 - 73: iII111i % iIii1I11I1II1 + o0oOOo0O0Ooo % Ii1I . II111iiii + IiII
if 55 - 55: OoOoOO00 * II111iiii / iII111i + OOooOOo / OoooooooOO
if 12 - 12: II111iiii * O0 - Oo0Ooo + o0oOOo0O0Ooo . Oo0Ooo + iIii1I11I1II1
if 4 - 4: I1Ii111 - I1Ii111 / I1ii11iIi11i . i1IIi + I1ii11iIi11i / oO0o
if 18 - 18: iIii1I11I1II1 . ooOoO0o
if 68 - 68: o0oOOo0O0Ooo
if 36 - 36: Oo0Ooo . I11i + I1IiiI * i1IIi % Ii1I + OOooOOo
if 5 - 5: o0oOOo0O0Ooo % oO0o / OoO0O00
def lisp_process_api_site_cache_summary ( site_cache ) :
II1II = { "site" : "" , "registrations" : [ ] }
oo0O00OOOOO = { "eid-prefix" : "" , "count" : 0 , "registered-count" : 0 }
if 17 - 17: OoooooooOO - I1ii11iIi11i / OoO0O00 - I1Ii111 + i1IIi
iIiiII1Ii = { }
for OOO00o00Oo0 in site_cache . cache_sorted :
for II1i11 in list ( site_cache . cache [ OOO00o00Oo0 ] . entries . values ( ) ) :
if ( II1i11 . accept_more_specifics == False ) : continue
if ( II1i11 . site . site_name not in iIiiII1Ii ) :
iIiiII1Ii [ II1i11 . site . site_name ] = [ ]
if 15 - 15: I1Ii111 + i1IIi - OoooooooOO % ooOoO0o % IiII
oO0ooOOO = copy . deepcopy ( oo0O00OOOOO )
oO0ooOOO [ "eid-prefix" ] = II1i11 . eid . print_prefix ( )
oO0ooOOO [ "count" ] = len ( II1i11 . more_specific_registrations )
for O0ooOo0OoOo0 in II1i11 . more_specific_registrations :
if ( O0ooOo0OoOo0 . registered ) : oO0ooOOO [ "registered-count" ] += 1
if 71 - 71: OoOoOO00
iIiiII1Ii [ II1i11 . site . site_name ] . append ( oO0ooOOO )
if 29 - 29: O0 . i11iIiiIii
if 51 - 51: IiII
if 53 - 53: O0
i11 = [ ]
for OOoO in iIiiII1Ii :
I111 = copy . deepcopy ( II1II )
I111 [ "site" ] = OOoO
I111 [ "registrations" ] = iIiiII1Ii [ OOoO ]
i11 . append ( I111 )
if 19 - 19: o0oOOo0O0Ooo / iII111i % OoOoOO00
return ( i11 )
if 65 - 65: o0oOOo0O0Ooo
if 89 - 89: iIii1I11I1II1 + OoooooooOO + i1IIi + OoooooooOO % IiII * OoO0O00
if 53 - 53: OOooOOo . IiII % I11i - OoO0O00 - Oo0Ooo
if 58 - 58: I1Ii111 / OoooooooOO . I11i % I1Ii111
if 8 - 8: Oo0Ooo % ooOoO0o / i11iIiiIii
if 54 - 54: IiII
if 85 - 85: OOooOOo - i1IIi
def lisp_process_api_site_cache ( se , data ) :
if 10 - 10: I1ii11iIi11i
if 3 - 3: ooOoO0o * O0 / o0oOOo0O0Ooo
if 22 - 22: OoOoOO00 + OOooOOo . iII111i % iIii1I11I1II1 - I11i
if 23 - 23: OoOoOO00 * I1Ii111
if ( se . group . is_null ( ) ) : return ( lisp_gather_site_cache_data ( se , data ) )
if 18 - 18: o0oOOo0O0Ooo % i11iIiiIii . Ii1I . O0
if ( se . source_cache == None ) : return ( [ True , data ] )
if 85 - 85: I1ii11iIi11i * iIii1I11I1II1 + o0oOOo0O0Ooo * OoO0O00
if 25 - 25: o0oOOo0O0Ooo / Ii1I / Oo0Ooo . ooOoO0o - ooOoO0o * O0
if 14 - 14: O0 - Ii1I + iIii1I11I1II1 + II111iiii . ooOoO0o + Ii1I
if 25 - 25: OoO0O00 * oO0o
if 29 - 29: OOooOOo - I1Ii111 - i11iIiiIii % i1IIi
data = se . source_cache . walk_cache ( lisp_gather_site_cache_data , data )
return ( [ True , data ] )
if 2 - 2: i11iIiiIii % iIii1I11I1II1 * OOooOOo
if 45 - 45: oO0o + i1IIi + iII111i + o0oOOo0O0Ooo * OOooOOo + ooOoO0o
if 83 - 83: OoO0O00 - ooOoO0o / OoooooooOO % iIii1I11I1II1 - II111iiii
if 73 - 73: Oo0Ooo + II111iiii - IiII
if 60 - 60: i1IIi . i11iIiiIii / i1IIi . I11i % OOooOOo
if 47 - 47: oO0o + IiII * I1Ii111 % o0oOOo0O0Ooo - O0 % IiII
if 66 - 66: II111iiii * I1IiiI . Oo0Ooo * OoooooooOO % OoOoOO00 . II111iiii
def lisp_process_api_ms_or_mr ( ms_or_mr , data ) :
I1IIIi = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
ooO0000 = data [ "dns-name" ] if ( "dns-name" in data ) else None
if ( "address" in data ) :
I1IIIi . store_address ( data [ "address" ] )
if 4 - 4: iII111i + I1Ii111 % OoOoOO00 / Ii1I
if 94 - 94: OoO0O00
oOO0 = { }
if ( ms_or_mr ) :
for I11i1IiIi1II1 in list ( lisp_map_servers_list . values ( ) ) :
if ( ooO0000 ) :
if ( ooO0000 != I11i1IiIi1II1 . dns_name ) : continue
else :
if ( I1IIIi . is_exact_match ( I11i1IiIi1II1 . map_server ) == False ) : continue
if 35 - 35: I1ii11iIi11i % OoO0O00 + II111iiii % II111iiii / IiII - iII111i
if 9 - 9: I1ii11iIi11i * o0oOOo0O0Ooo . oO0o
oOO0 [ "dns-name" ] = I11i1IiIi1II1 . dns_name
oOO0 [ "address" ] = I11i1IiIi1II1 . map_server . print_address_no_iid ( )
oOO0 [ "ms-name" ] = "" if I11i1IiIi1II1 . ms_name == None else I11i1IiIi1II1 . ms_name
return ( [ oOO0 ] )
if 48 - 48: IiII . I1Ii111 + OoooooooOO - I1Ii111 . Ii1I . I1Ii111
else :
for OOoOo0O0O0oO in list ( lisp_map_resolvers_list . values ( ) ) :
if ( ooO0000 ) :
if ( ooO0000 != OOoOo0O0O0oO . dns_name ) : continue
else :
if ( I1IIIi . is_exact_match ( OOoOo0O0O0oO . map_resolver ) == False ) : continue
if 24 - 24: ooOoO0o * iIii1I11I1II1
if 1 - 1: I1ii11iIi11i . O0
oOO0 [ "dns-name" ] = OOoOo0O0O0oO . dns_name
oOO0 [ "address" ] = OOoOo0O0O0oO . map_resolver . print_address_no_iid ( )
oOO0 [ "mr-name" ] = "" if OOoOo0O0O0oO . mr_name == None else OOoOo0O0O0oO . mr_name
return ( [ oOO0 ] )
if 3 - 3: iIii1I11I1II1 * ooOoO0o - OoOoOO00 * I1ii11iIi11i % OoOoOO00 - OoooooooOO
if 42 - 42: I1Ii111 - i1IIi
return ( [ ] )
if 91 - 91: iII111i . OOooOOo / iIii1I11I1II1 . Oo0Ooo . II111iiii . OoOoOO00
if 31 - 31: OoO0O00 . I1ii11iIi11i % I11i - II111iiii
if 70 - 70: ooOoO0o - IiII - OoO0O00 / I11i
if 59 - 59: IiII % ooOoO0o . iII111i / Ii1I * Ii1I
if 73 - 73: I1ii11iIi11i . oO0o % I11i . I1ii11iIi11i / I1Ii111 / II111iiii
if 23 - 23: OoooooooOO . o0oOOo0O0Ooo
if 76 - 76: I1Ii111
if 91 - 91: iIii1I11I1II1 / Ii1I . I1IiiI
def lisp_process_api_database_mapping ( ) :
i11 = [ ]
if 63 - 63: ooOoO0o . Ii1I - I1Ii111 - oO0o * I1Ii111 + ooOoO0o
for oOooII111iIiI1 in lisp_db_list :
oo0O00OOOOO = { }
oo0O00OOOOO [ "eid-prefix" ] = oOooII111iIiI1 . eid . print_prefix ( )
if ( oOooII111iIiI1 . group . is_null ( ) == False ) :
oo0O00OOOOO [ "group-prefix" ] = oOooII111iIiI1 . group . print_prefix ( )
if 85 - 85: II111iiii + I1ii11iIi11i
if 33 - 33: iII111i
OOOO00 = [ ]
for O00o00o00OO0 in oOooII111iIiI1 . rloc_set :
I1Ii1i111I = { }
if ( O00o00o00OO0 . rloc . is_null ( ) == False ) :
I1Ii1i111I [ "rloc" ] = O00o00o00OO0 . rloc . print_address_no_iid ( )
if 14 - 14: O0 * Oo0Ooo / i1IIi
if ( O00o00o00OO0 . rloc_name != None ) : I1Ii1i111I [ "rloc-name" ] = O00o00o00OO0 . rloc_name
if ( O00o00o00OO0 . interface != None ) : I1Ii1i111I [ "interface" ] = O00o00o00OO0 . interface
Oo0o00OOo0 = O00o00o00OO0 . translated_rloc
if ( Oo0o00OOo0 . is_null ( ) == False ) :
I1Ii1i111I [ "translated-rloc" ] = Oo0o00OOo0 . print_address_no_iid ( )
if 26 - 26: OOooOOo / OoooooooOO . i1IIi % o0oOOo0O0Ooo - I1Ii111
if ( I1Ii1i111I != { } ) : OOOO00 . append ( I1Ii1i111I )
if 65 - 65: i1IIi % o0oOOo0O0Ooo - Oo0Ooo + OOooOOo - oO0o
if 30 - 30: iII111i
if 91 - 91: OoooooooOO . OoO0O00 % ooOoO0o + I1ii11iIi11i % iIii1I11I1II1
if 48 - 48: I1IiiI . OoooooooOO . i11iIiiIii / i1IIi % ooOoO0o * O0
if 1 - 1: I1ii11iIi11i
oo0O00OOOOO [ "rlocs" ] = OOOO00
if 85 - 85: I1ii11iIi11i
if 6 - 6: IiII % ooOoO0o . IiII . I1Ii111 - iIii1I11I1II1 + iIii1I11I1II1
if 30 - 30: OoooooooOO - ooOoO0o + Ii1I
if 88 - 88: II111iiii / Oo0Ooo . Oo0Ooo % o0oOOo0O0Ooo * OoOoOO00 . I1ii11iIi11i
i11 . append ( oo0O00OOOOO )
if 32 - 32: OoooooooOO * I11i
return ( i11 )
if 86 - 86: I1Ii111 - i1IIi % O0
if 38 - 38: I1IiiI + OoO0O00 % iII111i / ooOoO0o
if 93 - 93: OoOoOO00 . o0oOOo0O0Ooo - OoooooooOO
if 90 - 90: iIii1I11I1II1 . Ii1I / i11iIiiIii . oO0o . I11i - I11i
if 46 - 46: I11i
if 2 - 2: I1Ii111 * oO0o
if 93 - 93: I11i
def lisp_gather_site_cache_data ( se , data ) :
oo0O00OOOOO = { }
oo0O00OOOOO [ "site-name" ] = se . site . site_name
oo0O00OOOOO [ "instance-id" ] = str ( se . eid . instance_id )
oo0O00OOOOO [ "eid-prefix" ] = se . eid . print_prefix_no_iid ( )
if ( se . group . is_null ( ) == False ) :
oo0O00OOOOO [ "group-prefix" ] = se . group . print_prefix_no_iid ( )
if 2 - 2: i1IIi / I1IiiI
oo0O00OOOOO [ "registered" ] = "yes" if se . registered else "no"
oo0O00OOOOO [ "first-registered" ] = lisp_print_elapsed ( se . first_registered )
oo0O00OOOOO [ "last-registered" ] = lisp_print_elapsed ( se . last_registered )
if 29 - 29: Ii1I * iIii1I11I1II1 * i1IIi
IiI = se . last_registerer
IiI = "none" if IiI . is_null ( ) else IiI . print_address ( )
oo0O00OOOOO [ "last-registerer" ] = IiI
oo0O00OOOOO [ "ams" ] = "yes" if ( se . accept_more_specifics ) else "no"
oo0O00OOOOO [ "dynamic" ] = "yes" if ( se . dynamic ) else "no"
oo0O00OOOOO [ "site-id" ] = str ( se . site_id )
if ( se . xtr_id_present ) :
oo0O00OOOOO [ "xtr-id" ] = "0x" + lisp_hex_string ( se . xtr_id )
if 83 - 83: oO0o % O0 . I11i / I11i / I1IiiI - OoOoOO00
if 91 - 91: iIii1I11I1II1 - IiII + iIii1I11I1II1 % Oo0Ooo % I1IiiI
if 84 - 84: iIii1I11I1II1 . Oo0Ooo - OoooooooOO % Oo0Ooo
if 27 - 27: I1ii11iIi11i - ooOoO0o + I11i - I1ii11iIi11i
if 57 - 57: Oo0Ooo
OO0oOO0OoO = [ ]
for I1Ii1i111I in se . registered_rlocs :
O00o00o00OO0 = { }
O00o00o00OO0 [ "address" ] = I1Ii1i111I . rloc . print_address_no_iid ( ) if I1Ii1i111I . rloc_exists ( ) else "none"
if 31 - 31: I1IiiI % Ii1I / OOooOOo + OoooooooOO . i11iIiiIii
if 87 - 87: iII111i + IiII * I1ii11iIi11i . iII111i + Ii1I - II111iiii
if ( I1Ii1i111I . geo ) : O00o00o00OO0 [ "geo" ] = I1Ii1i111I . geo . print_geo ( )
if ( I1Ii1i111I . elp ) : O00o00o00OO0 [ "elp" ] = I1Ii1i111I . elp . print_elp ( False )
if ( I1Ii1i111I . rle ) : O00o00o00OO0 [ "rle" ] = I1Ii1i111I . rle . print_rle ( False , True )
if ( I1Ii1i111I . json ) : O00o00o00OO0 [ "json" ] = I1Ii1i111I . json . print_json ( False )
if ( I1Ii1i111I . rloc_name ) : O00o00o00OO0 [ "rloc-name" ] = I1Ii1i111I . rloc_name
O00o00o00OO0 [ "uptime" ] = lisp_print_elapsed ( I1Ii1i111I . uptime )
O00o00o00OO0 [ "upriority" ] = str ( I1Ii1i111I . priority )
O00o00o00OO0 [ "uweight" ] = str ( I1Ii1i111I . weight )
O00o00o00OO0 [ "mpriority" ] = str ( I1Ii1i111I . mpriority )
O00o00o00OO0 [ "mweight" ] = str ( I1Ii1i111I . mweight )
if 87 - 87: OoOoOO00 . o0oOOo0O0Ooo + I1ii11iIi11i
OO0oOO0OoO . append ( O00o00o00OO0 )
if 53 - 53: o0oOOo0O0Ooo * II111iiii + i1IIi
oo0O00OOOOO [ "registered-rlocs" ] = OO0oOO0OoO
if 83 - 83: I11i * o0oOOo0O0Ooo * Ii1I + OoooooooOO
data . append ( oo0O00OOOOO )
return ( [ True , data ] )
if 76 - 76: I1ii11iIi11i . OoooooooOO + ooOoO0o / I1IiiI
if 56 - 56: Ii1I % I11i / O0 % O0 % iIii1I11I1II1 + I1IiiI
if 51 - 51: O0 * Ii1I / oO0o * OoooooooOO
if 93 - 93: I1ii11iIi11i . OOooOOo + i1IIi
if 30 - 30: Oo0Ooo + I1Ii111 / OOooOOo
if 74 - 74: iIii1I11I1II1
if 69 - 69: ooOoO0o % iIii1I11I1II1 * o0oOOo0O0Ooo + OoOoOO00 % I1Ii111 % Oo0Ooo
def lisp_process_api_site_cache_entry ( parms ) :
oooo = parms [ "instance-id" ]
oooo = 0 if ( oooo == "" ) else int ( oooo )
if 64 - 64: iIii1I11I1II1 * Ii1I * ooOoO0o * i11iIiiIii
if 54 - 54: IiII . Ii1I
if 54 - 54: iII111i
if 2 - 2: OoOoOO00 + I1IiiI . ooOoO0o - oO0o . iIii1I11I1II1
i1I1I1IIIi11 = lisp_address ( LISP_AFI_NONE , "" , 0 , oooo )
i1I1I1IIIi11 . store_prefix ( parms [ "eid-prefix" ] )
if 76 - 76: Ii1I
if 31 - 31: ooOoO0o
if 70 - 70: O0
if 42 - 42: I1Ii111 + OoooooooOO + I11i
if 48 - 48: Oo0Ooo . IiII / ooOoO0o + I11i
o0o0Oo0o0oOo = lisp_address ( LISP_AFI_NONE , "" , 0 , oooo )
if ( "group-prefix" in parms ) :
o0o0Oo0o0oOo . store_prefix ( parms [ "group-prefix" ] )
if 40 - 40: I1IiiI + I1ii11iIi11i * I1IiiI % Ii1I
if 27 - 27: O0 / Oo0Ooo . oO0o
i11 = [ ]
II1i11 = lisp_site_eid_lookup ( i1I1I1IIIi11 , o0o0Oo0o0oOo , False )
if ( II1i11 ) : lisp_gather_site_cache_data ( II1i11 , i11 )
return ( i11 )
if 34 - 34: I1Ii111 % Ii1I / Oo0Ooo % ooOoO0o / i11iIiiIii * I1IiiI
if 36 - 36: i11iIiiIii * i1IIi % iII111i . Oo0Ooo
if 54 - 54: o0oOOo0O0Ooo % i1IIi % I1ii11iIi11i . o0oOOo0O0Ooo / OoOoOO00
if 55 - 55: O0 / OoooooooOO % Ii1I * O0 + iIii1I11I1II1 . iIii1I11I1II1
if 55 - 55: Ii1I . OoooooooOO % Ii1I . IiII
if 67 - 67: oO0o
if 12 - 12: I1IiiI + OoooooooOO
def lisp_get_interface_instance_id ( device , source_eid ) :
i111IIiIiiI1 = None
if ( device in lisp_myinterfaces ) :
i111IIiIiiI1 = lisp_myinterfaces [ device ]
if 25 - 25: iIii1I11I1II1 - I1IiiI . i11iIiiIii + ooOoO0o
if 19 - 19: OoooooooOO / IiII
if 40 - 40: OoOoOO00 / OoooooooOO * iIii1I11I1II1 / i1IIi . OoooooooOO
if 88 - 88: I1IiiI % I1IiiI / II111iiii - IiII
if 72 - 72: OoO0O00 - I1ii11iIi11i . Oo0Ooo / OoO0O00
if 86 - 86: i11iIiiIii - oO0o . i11iIiiIii
if ( i111IIiIiiI1 == None or i111IIiIiiI1 . instance_id == None ) :
return ( lisp_default_iid )
if 51 - 51: OoO0O00 - OoO0O00 * IiII
if 24 - 24: OoooooooOO . II111iiii
if 97 - 97: II111iiii . O0
if 18 - 18: iII111i
if 35 - 35: ooOoO0o / O0 / iIii1I11I1II1 - iIii1I11I1II1 + I11i
if 8 - 8: I1Ii111 . oO0o % Oo0Ooo * OoooooooOO
if 25 - 25: OoO0O00
if 54 - 54: O0
if 20 - 20: ooOoO0o + Oo0Ooo - Oo0Ooo
oooo = i111IIiIiiI1 . get_instance_id ( )
if ( source_eid == None ) : return ( oooo )
if 2 - 2: i1IIi - IiII . I1ii11iIi11i / i1IIi
o000o0O0O = source_eid . instance_id
OoO0ooOOo0o0 = None
for i111IIiIiiI1 in lisp_multi_tenant_interfaces :
if ( i111IIiIiiI1 . device != device ) : continue
I1I11I1IIi = i111IIiIiiI1 . multi_tenant_eid
source_eid . instance_id = I1I11I1IIi . instance_id
if ( source_eid . is_more_specific ( I1I11I1IIi ) == False ) : continue
if ( OoO0ooOOo0o0 == None or OoO0ooOOo0o0 . multi_tenant_eid . mask_len < I1I11I1IIi . mask_len ) :
OoO0ooOOo0o0 = i111IIiIiiI1
if 8 - 8: o0oOOo0O0Ooo
if 52 - 52: IiII * OoooooooOO . oO0o + Oo0Ooo
source_eid . instance_id = o000o0O0O
if 95 - 95: OoO0O00 * I1IiiI - Oo0Ooo . IiII
if ( OoO0ooOOo0o0 == None ) : return ( oooo )
return ( OoO0ooOOo0o0 . get_instance_id ( ) )
if 82 - 82: I11i
if 89 - 89: iIii1I11I1II1 . I11i + OOooOOo / i11iIiiIii / I1ii11iIi11i * i11iIiiIii
if 20 - 20: I1Ii111 . II111iiii % II111iiii
if 79 - 79: II111iiii . I11i + o0oOOo0O0Ooo % I1ii11iIi11i + I1ii11iIi11i
if 4 - 4: I1ii11iIi11i % OoooooooOO
if 43 - 43: IiII - I1Ii111 % ooOoO0o
if 49 - 49: OoOoOO00
if 43 - 43: I1Ii111 - Oo0Ooo % i1IIi . II111iiii
if 80 - 80: IiII . iII111i + I1Ii111 + iII111i % Oo0Ooo
def lisp_allow_dynamic_eid ( device , eid ) :
if ( device not in lisp_myinterfaces ) : return ( None )
if 98 - 98: i11iIiiIii . II111iiii + OoOoOO00
i111IIiIiiI1 = lisp_myinterfaces [ device ]
IiiiI1I = device if i111IIiIiiI1 . dynamic_eid_device == None else i111IIiIiiI1 . dynamic_eid_device
if 67 - 67: OOooOOo - OOooOOo * I1IiiI - II111iiii . i1IIi + Oo0Ooo
if 97 - 97: O0 / i11iIiiIii - o0oOOo0O0Ooo - OoOoOO00 . oO0o
if ( i111IIiIiiI1 . does_dynamic_eid_match ( eid ) ) : return ( IiiiI1I )
return ( None )
if 77 - 77: oO0o * oO0o . OoOoOO00 . i1IIi
if 90 - 90: OOooOOo . Ii1I . II111iiii + Ii1I
if 2 - 2: I1Ii111 * OOooOOo + II111iiii - OoOoOO00
if 94 - 94: Ii1I - iII111i . I1ii11iIi11i - Oo0Ooo % o0oOOo0O0Ooo + I1Ii111
if 58 - 58: oO0o . ooOoO0o . I1IiiI . Oo0Ooo * iIii1I11I1II1 - iII111i
if 96 - 96: OOooOOo % o0oOOo0O0Ooo / iIii1I11I1II1
if 60 - 60: i1IIi / iIii1I11I1II1 + I11i % iII111i
def lisp_start_rloc_probe_timer ( interval , lisp_sockets ) :
global lisp_rloc_probe_timer
if 64 - 64: I11i . i11iIiiIii / iIii1I11I1II1 . I11i
if ( lisp_rloc_probe_timer != None ) : lisp_rloc_probe_timer . cancel ( )
if 73 - 73: OoO0O00 % iIii1I11I1II1 + IiII * I1Ii111 % II111iiii
I1iI1II1Iii = lisp_process_rloc_probe_timer
Oooo0 = threading . Timer ( interval , I1iI1II1Iii , [ lisp_sockets ] )
lisp_rloc_probe_timer = Oooo0
Oooo0 . start ( )
return
if 16 - 16: i1IIi
if 86 - 86: OoOoOO00 - iII111i - Oo0Ooo
if 33 - 33: Ii1I - OoO0O00
if 15 - 15: O0 . iIii1I11I1II1 - I1Ii111 + O0 + ooOoO0o / I1IiiI
if 8 - 8: iII111i % O0 - OoOoOO00
if 49 - 49: oO0o - OOooOOo / Ii1I / I1Ii111 . o0oOOo0O0Ooo . iII111i
if 58 - 58: IiII + Ii1I
def lisp_show_rloc_probe_list ( ) :
lprint ( bold ( "----- RLOC-probe-list -----" , False ) )
for Ooo00o000o in lisp_rloc_probe_list :
O00O = lisp_rloc_probe_list [ Ooo00o000o ]
lprint ( "RLOC {}:" . format ( Ooo00o000o ) )
for O00o00o00OO0 , oO0ooOOO , Oo in O00O :
lprint ( " [{}, {}, {}, {}]" . format ( hex ( id ( O00o00o00OO0 ) ) , oO0ooOOO . print_prefix ( ) ,
Oo . print_prefix ( ) , O00o00o00OO0 . translated_port ) )
if 26 - 26: OoO0O00 + i11iIiiIii % I11i . I1ii11iIi11i
if 76 - 76: i1IIi + ooOoO0o - Oo0Ooo + OoOoOO00 / I1ii11iIi11i . OOooOOo
lprint ( bold ( "---------------------------" , False ) )
return
if 50 - 50: IiII - Ii1I % iIii1I11I1II1
if 60 - 60: o0oOOo0O0Ooo - Oo0Ooo
if 92 - 92: OoOoOO00 + IiII . OoO0O00 % iII111i / II111iiii / I11i
if 62 - 62: I1ii11iIi11i
if 100 - 100: iII111i / ooOoO0o / IiII % II111iiii
if 6 - 6: OoooooooOO - I1IiiI + OoooooooOO
if 89 - 89: oO0o % Oo0Ooo . O0 . ooOoO0o
if 46 - 46: IiII * I11i - OoO0O00 - Ii1I
if 93 - 93: iIii1I11I1II1 / o0oOOo0O0Ooo - I11i - OOooOOo % ooOoO0o
def lisp_mark_rlocs_for_other_eids ( eid_list ) :
if 16 - 16: ooOoO0o * o0oOOo0O0Ooo - IiII + I1ii11iIi11i / o0oOOo0O0Ooo - O0
if 71 - 71: i1IIi
if 79 - 79: iII111i * O0 / Ii1I / O0 % i1IIi
if 52 - 52: OoooooooOO % oO0o - I11i % OoOoOO00 . II111iiii
I1Ii1i111I , oO0ooOOO , Oo = eid_list [ 0 ]
O0o = [ lisp_print_eid_tuple ( oO0ooOOO , Oo ) ]
if 56 - 56: IiII % o0oOOo0O0Ooo % I1ii11iIi11i
for I1Ii1i111I , oO0ooOOO , Oo in eid_list [ 1 : : ] :
I1Ii1i111I . state = LISP_RLOC_UNREACH_STATE
I1Ii1i111I . last_state_change = lisp_get_timestamp ( )
O0o . append ( lisp_print_eid_tuple ( oO0ooOOO , Oo ) )
if 88 - 88: iIii1I11I1II1 - o0oOOo0O0Ooo . OoOoOO00 / I1ii11iIi11i / i11iIiiIii / Ii1I
if 10 - 10: I1Ii111 / Ii1I * I1Ii111 / OoO0O00 - I1ii11iIi11i
IiiI1 = bold ( "unreachable" , False )
IIi11IiiiI11i = red ( I1Ii1i111I . rloc . print_address_no_iid ( ) , False )
if 56 - 56: IiII + OoO0O00 . OoO0O00 + oO0o
for i1I1I1IIIi11 in O0o :
oO0ooOOO = green ( i1I1I1IIIi11 , False )
lprint ( "RLOC {} went {} for EID {}" . format ( IIi11IiiiI11i , IiiI1 , oO0ooOOO ) )
if 26 - 26: I1Ii111 * Oo0Ooo % Oo0Ooo + oO0o
if 62 - 62: iII111i . I11i % oO0o
if 57 - 57: OoO0O00 / I1ii11iIi11i - I1Ii111 % i1IIi . O0
if 96 - 96: i11iIiiIii + IiII % I1IiiI
if 21 - 21: OoO0O00 / i11iIiiIii % OOooOOo + I1Ii111 * O0 / OoOoOO00
if 44 - 44: iIii1I11I1II1 * O0 % I11i % I1Ii111 - I1ii11iIi11i * Oo0Ooo
for I1Ii1i111I , oO0ooOOO , Oo in eid_list :
o0ooo0oOO0o = lisp_map_cache . lookup_cache ( oO0ooOOO , True )
if ( o0ooo0oOO0o ) : lisp_write_ipc_map_cache ( True , o0ooo0oOO0o )
if 11 - 11: OoooooooOO
return
if 85 - 85: O0 * i1IIi
if 29 - 29: i11iIiiIii
if 34 - 34: OoOoOO00
if 17 - 17: oO0o * OoOoOO00 % OoO0O00 % I1IiiI * I11i
if 78 - 78: OoooooooOO . I1Ii111 + Ii1I - II111iiii - IiII / iIii1I11I1II1
if 92 - 92: Ii1I
if 34 - 34: OOooOOo * OoooooooOO / I1ii11iIi11i
if 41 - 41: i1IIi
if 75 - 75: o0oOOo0O0Ooo . I1Ii111 - I1Ii111 % Ii1I * OoooooooOO
if 99 - 99: OOooOOo + o0oOOo0O0Ooo - OOooOOo . i1IIi
def lisp_process_rloc_probe_timer ( lisp_sockets ) :
lisp_set_exception ( )
if 86 - 86: Ii1I % oO0o - i11iIiiIii - O0 + IiII + iII111i
lisp_start_rloc_probe_timer ( LISP_RLOC_PROBE_INTERVAL , lisp_sockets )
if ( lisp_rloc_probing == False ) : return
if 100 - 100: OoO0O00 . Oo0Ooo
if 29 - 29: OoO0O00
if 34 - 34: O0 - o0oOOo0O0Ooo % OOooOOo . OoO0O00 % IiII
if 63 - 63: O0 % iIii1I11I1II1 . o0oOOo0O0Ooo . I1IiiI * Ii1I % i1IIi
if ( lisp_print_rloc_probe_list ) : lisp_show_rloc_probe_list ( )
if 47 - 47: II111iiii * I1ii11iIi11i
if 70 - 70: I1ii11iIi11i - o0oOOo0O0Ooo
if 71 - 71: I1ii11iIi11i * i1IIi
if 67 - 67: I1ii11iIi11i % OoOoOO00 . iII111i / Ii1I . I1IiiI
I1ii1I = lisp_get_default_route_next_hops ( )
if 54 - 54: iIii1I11I1II1 + iII111i % OoOoOO00 % OOooOOo
lprint ( "---------- Start RLOC Probing for {} entries ----------" . format ( len ( lisp_rloc_probe_list ) ) )
if 67 - 67: iII111i . II111iiii - I1IiiI / iII111i . Ii1I
if 42 - 42: I1IiiI % I1Ii111 % iII111i + iII111i
if 71 - 71: Oo0Ooo / OoOoOO00 - I1ii11iIi11i
if 32 - 32: iII111i
if 99 - 99: o0oOOo0O0Ooo . oO0o
O0oo0oOo = 0
ooIiIII11IIIi1 = bold ( "RLOC-probe" , False )
for iIiI1iIiII1 in list ( lisp_rloc_probe_list . values ( ) ) :
if 56 - 56: I11i % OoOoOO00 - OoO0O00
if 31 - 31: iII111i % i11iIiiIii - Ii1I / OOooOOo - I1Ii111
if 60 - 60: o0oOOo0O0Ooo + Oo0Ooo . O0
if 51 - 51: i11iIiiIii / iIii1I11I1II1 . I1IiiI - Ii1I * I1Ii111 . iII111i
if 72 - 72: Ii1I . I11i / i1IIi % i1IIi + I1ii11iIi11i
OOOO0oO00 = None
for Oo0ooo0OO0O , i1I1I1IIIi11 , o0o0Oo0o0oOo in iIiI1iIiII1 :
O0O0 = Oo0ooo0OO0O . rloc . print_address_no_iid ( )
if 6 - 6: IiII + I11i / Ii1I / Oo0Ooo - oO0o
if 31 - 31: i11iIiiIii % oO0o + ooOoO0o - i1IIi
if 87 - 87: IiII + oO0o
if 87 - 87: ooOoO0o
OOOi11i , i1i11iIIii111 , II11iiiII1Ii = lisp_allow_gleaning ( i1I1I1IIIi11 , None , Oo0ooo0OO0O )
if ( OOOi11i and i1i11iIIii111 == False ) :
oO0ooOOO = green ( i1I1I1IIIi11 . print_address ( ) , False )
O0O0 += ":{}" . format ( Oo0ooo0OO0O . translated_port )
lprint ( "Suppress probe to RLOC {} for gleaned EID {}" . format ( red ( O0O0 , False ) , oO0ooOOO ) )
if 74 - 74: I1ii11iIi11i * I1Ii111 % OoOoOO00 + oO0o
continue
if 33 - 33: I1Ii111
if 96 - 96: i1IIi
if 52 - 52: OoO0O00 * Ii1I + OOooOOo + ooOoO0o * OoooooooOO
if 34 - 34: I1Ii111 . I1Ii111 * ooOoO0o % OoOoOO00
if 71 - 71: I1Ii111 - I1Ii111
if 13 - 13: iII111i + I1ii11iIi11i - oO0o / IiII * i1IIi * Oo0Ooo
if 65 - 65: Ii1I - OOooOOo % O0 * I1ii11iIi11i . II111iiii
if ( Oo0ooo0OO0O . down_state ( ) ) : continue
if 59 - 59: O0 . O0 / i11iIiiIii * Oo0Ooo . I11i . Ii1I
if 89 - 89: O0 + OoO0O00
if 3 - 3: Oo0Ooo * OoooooooOO * oO0o % OoOoOO00 * OoOoOO00 . ooOoO0o
if 16 - 16: ooOoO0o / o0oOOo0O0Ooo - O0 * I1IiiI
if 13 - 13: iII111i . iII111i % O0 % o0oOOo0O0Ooo
if 99 - 99: OoO0O00 - OoOoOO00 + OoO0O00
if 67 - 67: I1Ii111
if 31 - 31: OoO0O00 * Oo0Ooo % O0 * II111iiii + ooOoO0o * I1IiiI
if 77 - 77: ooOoO0o
if 98 - 98: I1Ii111 + I1ii11iIi11i % OoO0O00 * Ii1I + iII111i
if 6 - 6: iII111i / iII111i . i11iIiiIii
if ( OOOO0oO00 ) :
Oo0ooo0OO0O . last_rloc_probe_nonce = OOOO0oO00 . last_rloc_probe_nonce
if 12 - 12: I11i - OoO0O00
if ( OOOO0oO00 . translated_port == Oo0ooo0OO0O . translated_port and OOOO0oO00 . rloc_name == Oo0ooo0OO0O . rloc_name ) :
if 68 - 68: IiII - OoOoOO00
oO0ooOOO = green ( lisp_print_eid_tuple ( i1I1I1IIIi11 , o0o0Oo0o0oOo ) , False )
lprint ( "Suppress probe to duplicate RLOC {} for {}" . format ( red ( O0O0 , False ) , oO0ooOOO ) )
if 22 - 22: i1IIi . IiII
if 8 - 8: IiII % o0oOOo0O0Ooo . i11iIiiIii
if 69 - 69: I1Ii111 / Ii1I - ooOoO0o
if 38 - 38: II111iiii % OoooooooOO / OoooooooOO . Ii1I . Ii1I
if 13 - 13: oO0o - i1IIi / i1IIi + OoooooooOO
if 57 - 57: OoooooooOO / O0 + I1ii11iIi11i % I11i * oO0o / Ii1I
Oo0ooo0OO0O . last_rloc_probe = OOOO0oO00 . last_rloc_probe
continue
if 49 - 49: I1IiiI * ooOoO0o * OOooOOo + OoO0O00 + ooOoO0o
if 42 - 42: i1IIi . OoO0O00 % iII111i
if 57 - 57: I1ii11iIi11i / I1IiiI
IIi1I111I = None
I1Ii1i111I = None
while ( True ) :
I1Ii1i111I = Oo0ooo0OO0O if I1Ii1i111I == None else I1Ii1i111I . next_rloc
if ( I1Ii1i111I == None ) : break
if 69 - 69: iII111i - iII111i . OoO0O00 / oO0o - OoO0O00 + I1Ii111
if 98 - 98: iII111i . oO0o - O0 % I1IiiI . I1ii11iIi11i / i1IIi
if 72 - 72: I1IiiI / Oo0Ooo % IiII - O0 / O0 * O0
if 83 - 83: O0 / I1Ii111 - OoooooooOO
if 42 - 42: Ii1I / i1IIi - IiII / I1Ii111
if ( I1Ii1i111I . rloc_next_hop != None ) :
if ( I1Ii1i111I . rloc_next_hop not in I1ii1I ) :
if ( I1Ii1i111I . up_state ( ) ) :
IiI11I111 , Ii1i1Ii1Ii1i = I1Ii1i111I . rloc_next_hop
I1Ii1i111I . state = LISP_RLOC_UNREACH_STATE
I1Ii1i111I . last_state_change = lisp_get_timestamp ( )
lisp_update_rtr_updown ( I1Ii1i111I . rloc , False )
if 39 - 39: OoooooooOO
IiiI1 = bold ( "unreachable" , False )
lprint ( "Next-hop {}({}) for RLOC {} is {}" . format ( Ii1i1Ii1Ii1i , IiI11I111 ,
red ( O0O0 , False ) , IiiI1 ) )
continue
if 4 - 4: iIii1I11I1II1 - Oo0Ooo / OOooOOo % OoooooooOO . Oo0Ooo - Oo0Ooo
if 41 - 41: II111iiii . o0oOOo0O0Ooo
if 92 - 92: Ii1I - O0 - i11iIiiIii + IiII % I1Ii111 + II111iiii
if 71 - 71: ooOoO0o * I1Ii111 + i11iIiiIii + i1IIi . I1IiiI
if 15 - 15: OoO0O00
if 37 - 37: OoO0O00 . OoooooooOO - OOooOOo
IiIiIi = I1Ii1i111I . last_rloc_probe
IIiiiI = 0 if IiIiIi == None else time . time ( ) - IiIiIi
if ( I1Ii1i111I . unreach_state ( ) and IIiiiI < LISP_RLOC_PROBE_INTERVAL ) :
lprint ( "Waiting for probe-reply from RLOC {}" . format ( red ( O0O0 , False ) ) )
if 96 - 96: I1IiiI / iIii1I11I1II1 / O0 / I1Ii111
continue
if 58 - 58: OoOoOO00 * i1IIi
if 20 - 20: IiII
if 81 - 81: I1Ii111 . i1IIi / o0oOOo0O0Ooo
if 30 - 30: i11iIiiIii . I1IiiI
if 5 - 5: Ii1I / O0 + iIii1I11I1II1
if 22 - 22: ooOoO0o . ooOoO0o * OOooOOo % OoOoOO00
oO0 = lisp_get_echo_nonce ( None , O0O0 )
if ( oO0 and oO0 . request_nonce_timeout ( ) ) :
I1Ii1i111I . state = LISP_RLOC_NO_ECHOED_NONCE_STATE
I1Ii1i111I . last_state_change = lisp_get_timestamp ( )
IiiI1 = bold ( "unreachable" , False )
lprint ( "RLOC {} went {}, nonce-echo failed" . format ( red ( O0O0 , False ) , IiiI1 ) )
if 51 - 51: OoOoOO00 . oO0o - OoOoOO00
lisp_update_rtr_updown ( I1Ii1i111I . rloc , False )
continue
if 79 - 79: iII111i
if 71 - 71: i1IIi / OoO0O00 / OOooOOo + I1Ii111
if 80 - 80: Oo0Ooo . iIii1I11I1II1 . OoooooooOO % iII111i . oO0o
if 10 - 10: i11iIiiIii * OoooooooOO . i11iIiiIii
if 35 - 35: OOooOOo * OOooOOo + o0oOOo0O0Ooo / i1IIi - I11i
if 12 - 12: I1ii11iIi11i - i11iIiiIii + I1IiiI . Oo0Ooo
if ( oO0 and oO0 . recently_echoed ( ) ) :
lprint ( ( "Suppress RLOC-probe to {}, nonce-echo " + "received" ) . format ( red ( O0O0 , False ) ) )
if 26 - 26: oO0o + I1Ii111 + IiII * o0oOOo0O0Ooo . oO0o
continue
if 95 - 95: OoOoOO00 . I1Ii111 / Ii1I . I1Ii111 % OoO0O00
if 16 - 16: Ii1I / I1IiiI / I1IiiI - OoooooooOO
if 13 - 13: OOooOOo / OoooooooOO
if 7 - 7: II111iiii - ooOoO0o
if 72 - 72: Ii1I
if 27 - 27: ooOoO0o / IiII + OoO0O00 + Ii1I % I1Ii111
if ( I1Ii1i111I . last_rloc_probe != None ) :
IiIiIi = I1Ii1i111I . last_rloc_probe_reply
if ( IiIiIi == None ) : IiIiIi = 0
IIiiiI = time . time ( ) - IiIiIi
if ( I1Ii1i111I . up_state ( ) and IIiiiI >= LISP_RLOC_PROBE_REPLY_WAIT ) :
if 86 - 86: O0 % i11iIiiIii - Ii1I * oO0o % OOooOOo * i1IIi
I1Ii1i111I . state = LISP_RLOC_UNREACH_STATE
I1Ii1i111I . last_state_change = lisp_get_timestamp ( )
lisp_update_rtr_updown ( I1Ii1i111I . rloc , False )
IiiI1 = bold ( "unreachable" , False )
lprint ( "RLOC {} went {}, probe it" . format ( red ( O0O0 , False ) , IiiI1 ) )
if 87 - 87: II111iiii
if 53 - 53: OoOoOO00 * i11iIiiIii / I1Ii111
lisp_mark_rlocs_for_other_eids ( iIiI1iIiII1 )
if 100 - 100: ooOoO0o + I1IiiI * oO0o + ooOoO0o
if 24 - 24: i11iIiiIii + ooOoO0o
if 80 - 80: IiII % I11i % oO0o
I1Ii1i111I . last_rloc_probe = lisp_get_timestamp ( )
if 97 - 97: i1IIi * i11iIiiIii / Ii1I - I1IiiI % IiII
oo0Oo00oo0OoO0O0 = "" if I1Ii1i111I . unreach_state ( ) == False else " unreachable"
if 38 - 38: IiII . OoO0O00 * IiII % ooOoO0o * Ii1I / ooOoO0o
if 56 - 56: O0 / OoooooooOO / OoOoOO00
if 19 - 19: o0oOOo0O0Ooo / i11iIiiIii . i1IIi / Oo0Ooo / I1Ii111
if 83 - 83: iII111i % o0oOOo0O0Ooo * OoOoOO00
if 49 - 49: II111iiii / OoO0O00
if 69 - 69: Ii1I * II111iiii
if 24 - 24: I1Ii111 * I1ii11iIi11i . OOooOOo . I1IiiI - I1ii11iIi11i
OOOOOO0O00O00 = ""
Ii1i1Ii1Ii1i = None
if ( I1Ii1i111I . rloc_next_hop != None ) :
IiI11I111 , Ii1i1Ii1Ii1i = I1Ii1i111I . rloc_next_hop
lisp_install_host_route ( O0O0 , Ii1i1Ii1Ii1i , True )
OOOOOO0O00O00 = ", send on nh {}({})" . format ( Ii1i1Ii1Ii1i , IiI11I111 )
if 94 - 94: iII111i
if 69 - 69: OoO0O00 . Ii1I / Oo0Ooo - iIii1I11I1II1 / OoooooooOO
if 86 - 86: II111iiii % Oo0Ooo % I1IiiI / IiII * Oo0Ooo
if 67 - 67: i11iIiiIii % OoOoOO00 - oO0o
if 28 - 28: I1Ii111 . I1ii11iIi11i % Ii1I . i1IIi + I11i
ii1111Ii = I1Ii1i111I . print_rloc_probe_rtt ( )
O0oOoO0oooO = O0O0
if ( I1Ii1i111I . translated_port != 0 ) :
O0oOoO0oooO += ":{}" . format ( I1Ii1i111I . translated_port )
if 23 - 23: IiII + oO0o + o0oOOo0O0Ooo . I1ii11iIi11i / i11iIiiIii + iIii1I11I1II1
O0oOoO0oooO = red ( O0oOoO0oooO , False )
if ( I1Ii1i111I . rloc_name != None ) :
O0oOoO0oooO += " (" + blue ( I1Ii1i111I . rloc_name , False ) + ")"
if 74 - 74: I11i % OOooOOo
lprint ( "Send {}{} {}, last rtt: {}{}" . format ( ooIiIII11IIIi1 , oo0Oo00oo0OoO0O0 ,
O0oOoO0oooO , ii1111Ii , OOOOOO0O00O00 ) )
if 57 - 57: O0 + I1IiiI + i11iIiiIii
if 90 - 90: I1ii11iIi11i . OoO0O00 * iIii1I11I1II1 - Oo0Ooo
if 28 - 28: I1IiiI . ooOoO0o - ooOoO0o * OOooOOo . IiII
if 16 - 16: iIii1I11I1II1 % i11iIiiIii / Ii1I % iIii1I11I1II1 / iII111i
if 27 - 27: II111iiii * OoooooooOO / Oo0Ooo % O0
if 41 - 41: oO0o / iIii1I11I1II1 % iII111i - I1Ii111 % I11i * i11iIiiIii
if 21 - 21: O0
if 14 - 14: IiII / I1ii11iIi11i + Ii1I
if ( I1Ii1i111I . rloc_next_hop != None ) :
IIi1I111I = lisp_get_host_route_next_hop ( O0O0 )
if ( IIi1I111I ) : lisp_install_host_route ( O0O0 , IIi1I111I , False )
if 48 - 48: I1Ii111 * oO0o / o0oOOo0O0Ooo * OoOoOO00 * ooOoO0o
if 38 - 38: I1IiiI * Ii1I + Oo0Ooo - OoooooooOO
if 63 - 63: I1ii11iIi11i
if 99 - 99: I1Ii111 % oO0o - II111iiii . ooOoO0o
if 26 - 26: I1ii11iIi11i * iII111i . OoooooooOO - Oo0Ooo - IiII
if 6 - 6: OOooOOo - I1IiiI . IiII
if ( I1Ii1i111I . rloc . is_null ( ) ) :
I1Ii1i111I . rloc . copy_address ( Oo0ooo0OO0O . rloc )
if 40 - 40: II111iiii
if 13 - 13: OoOoOO00
if 23 - 23: Oo0Ooo / II111iiii % OOooOOo % iII111i - Oo0Ooo / OoO0O00
if 7 - 7: Ii1I / I11i / II111iiii % I11i * I11i + iIii1I11I1II1
if 6 - 6: iIii1I11I1II1 * oO0o - iIii1I11I1II1 . O0 . O0
OoO0OOOooo = None if ( o0o0Oo0o0oOo . is_null ( ) ) else i1I1I1IIIi11
O00oOoOoO0ooO = i1I1I1IIIi11 if ( o0o0Oo0o0oOo . is_null ( ) ) else o0o0Oo0o0oOo
lisp_send_map_request ( lisp_sockets , 0 , OoO0OOOooo , O00oOoOoO0ooO , I1Ii1i111I )
OOOO0oO00 = Oo0ooo0OO0O
if 33 - 33: O0
if 14 - 14: i11iIiiIii . I1Ii111 % I1ii11iIi11i . I1ii11iIi11i % IiII
if 93 - 93: iIii1I11I1II1 / IiII
if 91 - 91: i11iIiiIii % ooOoO0o - iII111i * I1Ii111 . i11iIiiIii
if ( Ii1i1Ii1Ii1i ) : lisp_install_host_route ( O0O0 , Ii1i1Ii1Ii1i , False )
if 1 - 1: IiII + iIii1I11I1II1 * I1ii11iIi11i - IiII - i1IIi
if 75 - 75: II111iiii * o0oOOo0O0Ooo / I1ii11iIi11i
if 46 - 46: OOooOOo
if 67 - 67: OoO0O00 . I11i % OOooOOo + Oo0Ooo
if 40 - 40: OoO0O00 / I11i % iIii1I11I1II1 - ooOoO0o
if ( IIi1I111I ) : lisp_install_host_route ( O0O0 , IIi1I111I , True )
if 51 - 51: Oo0Ooo % iIii1I11I1II1 % oO0o + o0oOOo0O0Ooo
if 32 - 32: I1Ii111 * I1IiiI + Ii1I
if 30 - 30: OoooooooOO / I1IiiI . iIii1I11I1II1 / ooOoO0o
if 20 - 20: OoooooooOO * OOooOOo
O0oo0oOo += 1
if ( ( O0oo0oOo % 10 ) == 0 ) : time . sleep ( 0.020 )
if 77 - 77: Ii1I - OoooooooOO . OoOoOO00
if 93 - 93: OoooooooOO / I1Ii111
if 91 - 91: I1Ii111
lprint ( "---------- End RLOC Probing ----------" )
return
if 18 - 18: ooOoO0o * I11i
if 53 - 53: I11i . i11iIiiIii - iIii1I11I1II1 / I1Ii111
if 86 - 86: i1IIi % OoO0O00 - OoooooooOO
if 63 - 63: o0oOOo0O0Ooo . iIii1I11I1II1 % IiII * i11iIiiIii
if 70 - 70: iIii1I11I1II1
if 12 - 12: OoOoOO00 / o0oOOo0O0Ooo - I1ii11iIi11i + oO0o + O0
if 9 - 9: I1ii11iIi11i * OoooooooOO . O0 . ooOoO0o * i11iIiiIii / i1IIi
if 38 - 38: OoOoOO00 . OoooooooOO % I1ii11iIi11i . oO0o % oO0o
def lisp_update_rtr_updown ( rtr , updown ) :
global lisp_ipc_socket
if 80 - 80: i11iIiiIii / OoOoOO00 . OOooOOo . iIii1I11I1II1
if 81 - 81: I1ii11iIi11i * OoO0O00 . o0oOOo0O0Ooo . OoooooooOO
if 64 - 64: Oo0Ooo . I1ii11iIi11i / ooOoO0o % oO0o . iIii1I11I1II1
if 84 - 84: II111iiii . oO0o * O0 / iII111i + OoooooooOO
if ( lisp_i_am_itr == False ) : return
if 99 - 99: I1ii11iIi11i . oO0o + Oo0Ooo + I1ii11iIi11i / I1Ii111 . I1ii11iIi11i
if 95 - 95: OoOoOO00 * iIii1I11I1II1 / OoooooooOO % i1IIi
if 91 - 91: OOooOOo - OoOoOO00
if 58 - 58: II111iiii . OOooOOo % II111iiii * oO0o % OoO0O00 % I11i
if 71 - 71: Ii1I * II111iiii * I1IiiI
if ( lisp_register_all_rtrs ) : return
if 22 - 22: oO0o
o0o000OOO00O = rtr . print_address_no_iid ( )
if 46 - 46: I1IiiI % oO0o . OoooooooOO . IiII / I11i - i1IIi
if 43 - 43: OoOoOO00 - o0oOOo0O0Ooo
if 22 - 22: i1IIi
if 33 - 33: O0
if 34 - 34: I1Ii111 . IiII % iII111i
if ( o0o000OOO00O not in lisp_rtr_list ) : return
if 94 - 94: OOooOOo % i11iIiiIii . OOooOOo
updown = "up" if updown else "down"
lprint ( "Send ETR IPC message, RTR {} has done {}" . format (
red ( o0o000OOO00O , False ) , bold ( updown , False ) ) )
if 55 - 55: OoOoOO00 . OoOoOO00 % o0oOOo0O0Ooo . I11i . I1ii11iIi11i - o0oOOo0O0Ooo
if 1 - 1: i11iIiiIii - i1IIi * oO0o - iIii1I11I1II1
if 75 - 75: i1IIi * i11iIiiIii
if 40 - 40: I1ii11iIi11i + OoO0O00
oOoo = "rtr%{}%{}" . format ( o0o000OOO00O , updown )
oOoo = lisp_command_ipc ( oOoo , "lisp-itr" )
lisp_ipc ( oOoo , lisp_ipc_socket , "lisp-etr" )
return
if 8 - 8: i11iIiiIii - iIii1I11I1II1
if 73 - 73: OoOoOO00
if 25 - 25: iII111i / oO0o
if 61 - 61: OoooooooOO . Ii1I . I11i + oO0o
if 73 - 73: II111iiii % i11iIiiIii * I1ii11iIi11i + O0
if 61 - 61: I1IiiI / OOooOOo
if 67 - 67: OoOoOO00
def lisp_process_rloc_probe_reply ( rloc_entry , source , port , map_reply , ttl ,
mrloc , rloc_name ) :
I1Ii1i111I = rloc_entry . rloc
oOooo0oOOOO = map_reply . nonce
I11IIIiiii1ii = map_reply . hop_count
ooIiIII11IIIi1 = bold ( "RLOC-probe reply" , False )
OoOo0oo = I1Ii1i111I . print_address_no_iid ( )
o00ooOo0OOO = source . print_address_no_iid ( )
I1iIiI = lisp_rloc_probe_list
I11ii1I11ii = rloc_entry . json . json_string if rloc_entry . json else None
Oo0OO0000oooo = lisp_get_timestamp ( )
if 86 - 86: II111iiii - OOooOOo + o0oOOo0O0Ooo
if 26 - 26: oO0o / I1ii11iIi11i - oO0o
if 9 - 9: ooOoO0o * iIii1I11I1II1 * OoooooooOO
if 13 - 13: iII111i . i11iIiiIii * o0oOOo0O0Ooo . iII111i
if 96 - 96: Ii1I
if 90 - 90: II111iiii
if ( mrloc != None ) :
Oo00i1i11iI1II = mrloc . rloc . print_address_no_iid ( )
if ( OoOo0oo not in mrloc . multicast_rloc_probe_list ) :
oo0o0oOo = lisp_rloc ( )
oo0o0oOo = copy . deepcopy ( mrloc )
oo0o0oOo . rloc . copy_address ( I1Ii1i111I )
oo0o0oOo . multicast_rloc_probe_list = { }
mrloc . multicast_rloc_probe_list [ OoOo0oo ] = oo0o0oOo
if 17 - 17: oO0o
oo0o0oOo = mrloc . multicast_rloc_probe_list [ OoOo0oo ]
oo0o0oOo . rloc_name = rloc_name
oo0o0oOo . last_rloc_probe_nonce = mrloc . last_rloc_probe_nonce
oo0o0oOo . last_rloc_probe = mrloc . last_rloc_probe
O00o00o00OO0 , i1I1I1IIIi11 , o0o0Oo0o0oOo = lisp_rloc_probe_list [ Oo00i1i11iI1II ] [ 0 ]
oo0o0oOo . process_rloc_probe_reply ( Oo0OO0000oooo , oOooo0oOOOO , i1I1I1IIIi11 , o0o0Oo0o0oOo , I11IIIiiii1ii , ttl , I11ii1I11ii )
mrloc . process_rloc_probe_reply ( Oo0OO0000oooo , oOooo0oOOOO , i1I1I1IIIi11 , o0o0Oo0o0oOo , I11IIIiiii1ii , ttl , I11ii1I11ii )
return
if 27 - 27: o0oOOo0O0Ooo
if 91 - 91: ooOoO0o
if 47 - 47: II111iiii + I11i + ooOoO0o % Oo0Ooo / iII111i
if 9 - 9: O0 + IiII
if 69 - 69: I1IiiI
if 11 - 11: I11i % I1Ii111 + O0 . Ii1I . I1ii11iIi11i % I1Ii111
if 28 - 28: IiII . o0oOOo0O0Ooo + iII111i - OoOoOO00 / OOooOOo
IiI = OoOo0oo
if ( IiI not in I1iIiI ) :
IiI += ":" + str ( port )
if ( IiI not in I1iIiI ) :
IiI = o00ooOo0OOO
if ( IiI not in I1iIiI ) :
IiI += ":" + str ( port )
lprint ( " Received unsolicited {} from {}/{}, port {}" . format ( ooIiIII11IIIi1 , red ( OoOo0oo , False ) , red ( o00ooOo0OOO ,
# IiII % OoOoOO00 * II111iiii + Ii1I - OOooOOo / IiII
False ) , port ) )
return
if 92 - 92: I1IiiI . OoooooooOO % OOooOOo + ooOoO0o + iIii1I11I1II1 % I11i
if 41 - 41: IiII . iIii1I11I1II1
if 74 - 74: I1ii11iIi11i . OoO0O00
if 23 - 23: IiII + oO0o
if 48 - 48: iII111i * OoO0O00 * OoOoOO00 * I11i
if 74 - 74: ooOoO0o
if 93 - 93: Oo0Ooo % ooOoO0o
if 38 - 38: II111iiii . I1Ii111 . iIii1I11I1II1 / o0oOOo0O0Ooo
for I1Ii1i111I , i1I1I1IIIi11 , o0o0Oo0o0oOo in lisp_rloc_probe_list [ IiI ] :
if ( lisp_i_am_rtr ) :
if ( I1Ii1i111I . translated_port != 0 and I1Ii1i111I . translated_port != port ) :
continue
if 6 - 6: ooOoO0o - i1IIi * I1IiiI
if 24 - 24: iIii1I11I1II1 / I1Ii111
I1Ii1i111I . process_rloc_probe_reply ( Oo0OO0000oooo , oOooo0oOOOO , i1I1I1IIIi11 , o0o0Oo0o0oOo , I11IIIiiii1ii , ttl , I11ii1I11ii )
if 16 - 16: OoOoOO00 * I1Ii111 - I1IiiI / I1Ii111
return
if 64 - 64: I1ii11iIi11i . i1IIi % II111iiii % Oo0Ooo + oO0o - I1IiiI
if 24 - 24: IiII . II111iiii . II111iiii . OoOoOO00 . i11iIiiIii
if 11 - 11: Ii1I
if 82 - 82: I11i - i1IIi . Oo0Ooo * I1Ii111
if 44 - 44: iII111i
if 56 - 56: II111iiii / Oo0Ooo % IiII * II111iiii - iIii1I11I1II1 + ooOoO0o
if 33 - 33: o0oOOo0O0Ooo . I11i / I1IiiI
if 29 - 29: o0oOOo0O0Ooo - ooOoO0o
def lisp_db_list_length ( ) :
O0oo0oOo = 0
for oOooII111iIiI1 in lisp_db_list :
O0oo0oOo += len ( oOooII111iIiI1 . dynamic_eids ) if oOooII111iIiI1 . dynamic_eid_configured ( ) else 1
O0oo0oOo += len ( oOooII111iIiI1 . eid . iid_list )
if 59 - 59: I11i / IiII * OoO0O00 / IiII . I1Ii111
return ( O0oo0oOo )
if 82 - 82: OOooOOo . iIii1I11I1II1 + I1Ii111
if 14 - 14: IiII . i11iIiiIii
if 17 - 17: ooOoO0o % ooOoO0o * oO0o
if 8 - 8: ooOoO0o + OoO0O00 . II111iiii / iIii1I11I1II1 - OOooOOo
if 87 - 87: iIii1I11I1II1 . IiII % I1IiiI . OoO0O00 - I1Ii111
if 53 - 53: I1Ii111 % i11iIiiIii
if 99 - 99: I1IiiI - i1IIi * i11iIiiIii + OoO0O00
if 80 - 80: o0oOOo0O0Ooo . I11i % iIii1I11I1II1 + OoOoOO00
def lisp_is_myeid ( eid ) :
for oOooII111iIiI1 in lisp_db_list :
if ( eid . is_more_specific ( oOooII111iIiI1 . eid ) ) : return ( True )
if 87 - 87: I1Ii111 + II111iiii / I1ii11iIi11i + OoOoOO00
return ( False )
if 71 - 71: I1IiiI + iIii1I11I1II1 + O0 * iII111i % IiII
if 42 - 42: OOooOOo - I1ii11iIi11i
if 93 - 93: I1Ii111 + OOooOOo % ooOoO0o / I1Ii111 % OOooOOo . IiII
if 37 - 37: iII111i * oO0o / oO0o / Ii1I % I11i
if 12 - 12: i11iIiiIii
if 62 - 62: oO0o + OOooOOo + oO0o + I1IiiI
if 10 - 10: IiII - Oo0Ooo % ooOoO0o
if 38 - 38: oO0o * o0oOOo0O0Ooo . I11i % II111iiii / I11i % Ii1I
if 19 - 19: II111iiii / i11iIiiIii * II111iiii + OoOoOO00 - OoOoOO00
def lisp_format_macs ( sa , da ) :
sa = sa [ 0 : 4 ] + "-" + sa [ 4 : 8 ] + "-" + sa [ 8 : 12 ]
da = da [ 0 : 4 ] + "-" + da [ 4 : 8 ] + "-" + da [ 8 : 12 ]
return ( "{} -> {}" . format ( sa , da ) )
if 7 - 7: OoOoOO00 - OoO0O00 % OoOoOO00 . I1ii11iIi11i % Oo0Ooo * iII111i
if 90 - 90: IiII - OOooOOo + iIii1I11I1II1
if 88 - 88: ooOoO0o . o0oOOo0O0Ooo . OOooOOo - I11i
if 76 - 76: IiII % I1IiiI . iII111i
if 5 - 5: ooOoO0o . oO0o - OoOoOO00 - OoooooooOO
if 2 - 2: OOooOOo
if 37 - 37: IiII - iIii1I11I1II1 * i11iIiiIii . ooOoO0o
def lisp_get_echo_nonce ( rloc , rloc_str ) :
if ( lisp_nonce_echoing == False ) : return ( None )
if 78 - 78: OOooOOo - I1ii11iIi11i + iII111i % OoOoOO00
if ( rloc ) : rloc_str = rloc . print_address_no_iid ( )
oO0 = None
if ( rloc_str in lisp_nonce_echo_list ) :
oO0 = lisp_nonce_echo_list [ rloc_str ]
if 28 - 28: I11i + i1IIi / i11iIiiIii * OOooOOo * II111iiii
return ( oO0 )
if 78 - 78: OoO0O00 - i1IIi % I1Ii111
if 87 - 87: I11i
if 37 - 37: iII111i . I1Ii111 - iII111i - I11i - iIii1I11I1II1 - II111iiii
if 80 - 80: I1Ii111 % O0 - IiII / II111iiii + i1IIi
if 4 - 4: OOooOOo + II111iiii
if 1 - 1: OoooooooOO * I1Ii111 - I11i / IiII
if 43 - 43: i11iIiiIii * I1IiiI
if 48 - 48: Oo0Ooo - OOooOOo / iII111i % I1ii11iIi11i . OoOoOO00
def lisp_decode_dist_name ( packet ) :
O0oo0oOo = 0
iIOOo00ooO = b""
if 34 - 34: O0 * iIii1I11I1II1 . o0oOOo0O0Ooo . I1Ii111 . iIii1I11I1II1 * iIii1I11I1II1
while ( packet [ 0 : 1 ] != b"\x00" ) :
if ( O0oo0oOo == 255 ) : return ( [ None , None ] )
iIOOo00ooO += packet [ 0 : 1 ]
packet = packet [ 1 : : ]
O0oo0oOo += 1
if 38 - 38: iIii1I11I1II1
if 83 - 83: iII111i - Ii1I . oO0o - I1Ii111 * o0oOOo0O0Ooo
packet = packet [ 1 : : ]
return ( packet , iIOOo00ooO . decode ( ) )
if 70 - 70: i11iIiiIii - OoO0O00 / i11iIiiIii
if 46 - 46: II111iiii + O0 * OoooooooOO
if 39 - 39: OoooooooOO % II111iiii . o0oOOo0O0Ooo
if 29 - 29: I11i . o0oOOo0O0Ooo . i1IIi . o0oOOo0O0Ooo
if 77 - 77: iIii1I11I1II1 + iIii1I11I1II1
if 52 - 52: I1ii11iIi11i - IiII % I1IiiI % i1IIi
if 98 - 98: I1Ii111 + II111iiii % OoO0O00 % iII111i
if 54 - 54: II111iiii . ooOoO0o . iII111i - I1IiiI
def lisp_write_flow_log ( flow_log ) :
OOoO0 = open ( "./logs/lisp-flow.log" , "a" )
if 97 - 97: oO0o - O0 / II111iiii * II111iiii - oO0o * IiII
O0oo0oOo = 0
for iII1iii in flow_log :
Oo00oo = iII1iii [ 3 ]
O0oOOO0o0oo = Oo00oo . print_flow ( iII1iii [ 0 ] , iII1iii [ 1 ] , iII1iii [ 2 ] )
OOoO0 . write ( O0oOOO0o0oo )
O0oo0oOo += 1
if 13 - 13: I1IiiI - Ii1I - iII111i - iIii1I11I1II1 . II111iiii
OOoO0 . close ( )
del ( flow_log )
if 40 - 40: I1ii11iIi11i * o0oOOo0O0Ooo + oO0o - OoOoOO00
O0oo0oOo = bold ( str ( O0oo0oOo ) , False )
lprint ( "Wrote {} flow entries to ./logs/lisp-flow.log" . format ( O0oo0oOo ) )
return
if 80 - 80: I1ii11iIi11i . OoooooooOO / ooOoO0o
if 19 - 19: oO0o
if 97 - 97: IiII
if 36 - 36: II111iiii
if 83 - 83: I11i . ooOoO0o
if 57 - 57: IiII
if 34 - 34: I1ii11iIi11i + i11iIiiIii - I1ii11iIi11i / OoOoOO00 + i1IIi . i11iIiiIii
def lisp_policy_command ( kv_pair ) :
iIIiiIi = lisp_policy ( "" )
II1I1I1IiiI = None
if 73 - 73: OoOoOO00 + OOooOOo * II111iiii . OOooOOo % I1Ii111 % oO0o
oO00O0oO0O = [ ]
for iIi1iIIIiIiI in range ( len ( kv_pair [ "datetime-range" ] ) ) :
oO00O0oO0O . append ( lisp_policy_match ( ) )
if 12 - 12: i1IIi - I1IiiI - OOooOOo - i11iIiiIii % oO0o
if 89 - 89: Ii1I - OOooOOo / ooOoO0o - IiII + iIii1I11I1II1 + OoO0O00
for i11Ii1 in list ( kv_pair . keys ( ) ) :
oOO0 = kv_pair [ i11Ii1 ]
if 48 - 48: II111iiii . OOooOOo . ooOoO0o - iII111i
if 90 - 90: OOooOOo
if 43 - 43: IiII + ooOoO0o
if 4 - 4: i1IIi
if ( i11Ii1 == "instance-id" ) :
for iIi1iIIIiIiI in range ( len ( oO00O0oO0O ) ) :
oOooii111 = oOO0 [ iIi1iIIIiIiI ]
if ( oOooii111 == "" ) : continue
O00Ii1II1II = oO00O0oO0O [ iIi1iIIIiIiI ]
if ( O00Ii1II1II . source_eid == None ) :
O00Ii1II1II . source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 61 - 61: Ii1I + OoO0O00 - II111iiii
if ( O00Ii1II1II . dest_eid == None ) :
O00Ii1II1II . dest_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 47 - 47: I1IiiI * O0 + I1ii11iIi11i - OOooOOo
O00Ii1II1II . source_eid . instance_id = int ( oOooii111 )
O00Ii1II1II . dest_eid . instance_id = int ( oOooii111 )
if 24 - 24: i1IIi / i1IIi + I11i * II111iiii / IiII
if 8 - 8: I11i . I11i + I11i % OoooooooOO / ooOoO0o
if ( i11Ii1 == "source-eid" ) :
for iIi1iIIIiIiI in range ( len ( oO00O0oO0O ) ) :
oOooii111 = oOO0 [ iIi1iIIIiIiI ]
if ( oOooii111 == "" ) : continue
O00Ii1II1II = oO00O0oO0O [ iIi1iIIIiIiI ]
if ( O00Ii1II1II . source_eid == None ) :
O00Ii1II1II . source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 25 - 25: I1IiiI / OoO0O00
oooo = O00Ii1II1II . source_eid . instance_id
O00Ii1II1II . source_eid . store_prefix ( oOooii111 )
O00Ii1II1II . source_eid . instance_id = oooo
if 92 - 92: oO0o % I1IiiI / OoO0O00 - I11i
if 36 - 36: i1IIi * iIii1I11I1II1 + I1ii11iIi11i + iII111i - II111iiii
if ( i11Ii1 == "destination-eid" ) :
for iIi1iIIIiIiI in range ( len ( oO00O0oO0O ) ) :
oOooii111 = oOO0 [ iIi1iIIIiIiI ]
if ( oOooii111 == "" ) : continue
O00Ii1II1II = oO00O0oO0O [ iIi1iIIIiIiI ]
if ( O00Ii1II1II . dest_eid == None ) :
O00Ii1II1II . dest_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 48 - 48: oO0o + OoOoOO00 - OoO0O00 . II111iiii * i11iIiiIii . OoooooooOO
oooo = O00Ii1II1II . dest_eid . instance_id
O00Ii1II1II . dest_eid . store_prefix ( oOooii111 )
O00Ii1II1II . dest_eid . instance_id = oooo
if 37 - 37: OoooooooOO + O0 . I11i % OoOoOO00
if 57 - 57: I1Ii111 . OOooOOo + I1Ii111 . iIii1I11I1II1 / oO0o / O0
if ( i11Ii1 == "source-rloc" ) :
for iIi1iIIIiIiI in range ( len ( oO00O0oO0O ) ) :
oOooii111 = oOO0 [ iIi1iIIIiIiI ]
if ( oOooii111 == "" ) : continue
O00Ii1II1II = oO00O0oO0O [ iIi1iIIIiIiI ]
O00Ii1II1II . source_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
O00Ii1II1II . source_rloc . store_prefix ( oOooii111 )
if 88 - 88: I1Ii111
if 16 - 16: Oo0Ooo . ooOoO0o / OoO0O00 / o0oOOo0O0Ooo . OoooooooOO * OoO0O00
if ( i11Ii1 == "destination-rloc" ) :
for iIi1iIIIiIiI in range ( len ( oO00O0oO0O ) ) :
oOooii111 = oOO0 [ iIi1iIIIiIiI ]
if ( oOooii111 == "" ) : continue
O00Ii1II1II = oO00O0oO0O [ iIi1iIIIiIiI ]
O00Ii1II1II . dest_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
O00Ii1II1II . dest_rloc . store_prefix ( oOooii111 )
if 50 - 50: II111iiii + I11i . OoooooooOO . I1Ii111 - OOooOOo
if 83 - 83: oO0o
if ( i11Ii1 == "rloc-record-name" ) :
for iIi1iIIIiIiI in range ( len ( oO00O0oO0O ) ) :
oOooii111 = oOO0 [ iIi1iIIIiIiI ]
if ( oOooii111 == "" ) : continue
O00Ii1II1II = oO00O0oO0O [ iIi1iIIIiIiI ]
O00Ii1II1II . rloc_record_name = oOooii111
if 100 - 100: I1Ii111 + o0oOOo0O0Ooo * oO0o / oO0o . oO0o + iII111i
if 71 - 71: II111iiii + iII111i + O0 % Oo0Ooo / I1IiiI
if ( i11Ii1 == "geo-name" ) :
for iIi1iIIIiIiI in range ( len ( oO00O0oO0O ) ) :
oOooii111 = oOO0 [ iIi1iIIIiIiI ]
if ( oOooii111 == "" ) : continue
O00Ii1II1II = oO00O0oO0O [ iIi1iIIIiIiI ]
O00Ii1II1II . geo_name = oOooii111
if 52 - 52: Oo0Ooo . I1Ii111 * i1IIi / Oo0Ooo / OoO0O00
if 29 - 29: iII111i
if ( i11Ii1 == "elp-name" ) :
for iIi1iIIIiIiI in range ( len ( oO00O0oO0O ) ) :
oOooii111 = oOO0 [ iIi1iIIIiIiI ]
if ( oOooii111 == "" ) : continue
O00Ii1II1II = oO00O0oO0O [ iIi1iIIIiIiI ]
O00Ii1II1II . elp_name = oOooii111
if 91 - 91: Oo0Ooo - IiII
if 47 - 47: iII111i / OOooOOo + iII111i
if ( i11Ii1 == "rle-name" ) :
for iIi1iIIIiIiI in range ( len ( oO00O0oO0O ) ) :
oOooii111 = oOO0 [ iIi1iIIIiIiI ]
if ( oOooii111 == "" ) : continue
O00Ii1II1II = oO00O0oO0O [ iIi1iIIIiIiI ]
O00Ii1II1II . rle_name = oOooii111
if 69 - 69: I1IiiI . I1ii11iIi11i
if 18 - 18: I11i * I1IiiI
if ( i11Ii1 == "json-name" ) :
for iIi1iIIIiIiI in range ( len ( oO00O0oO0O ) ) :
oOooii111 = oOO0 [ iIi1iIIIiIiI ]
if ( oOooii111 == "" ) : continue
O00Ii1II1II = oO00O0oO0O [ iIi1iIIIiIiI ]
O00Ii1II1II . json_name = oOooii111
if 42 - 42: i1IIi . I1Ii111 - ooOoO0o + I11i / oO0o
if 60 - 60: i1IIi + OoooooooOO % i11iIiiIii / IiII % Oo0Ooo + I1IiiI
if ( i11Ii1 == "datetime-range" ) :
for iIi1iIIIiIiI in range ( len ( oO00O0oO0O ) ) :
oOooii111 = oOO0 [ iIi1iIIIiIiI ]
O00Ii1II1II = oO00O0oO0O [ iIi1iIIIiIiI ]
if ( oOooii111 == "" ) : continue
oOO0O0ooOOOo = lisp_datetime ( oOooii111 [ 0 : 19 ] )
I11iiI1i11I = lisp_datetime ( oOooii111 [ 19 : : ] )
if ( oOO0O0ooOOOo . valid_datetime ( ) and I11iiI1i11I . valid_datetime ( ) ) :
O00Ii1II1II . datetime_lower = oOO0O0ooOOOo
O00Ii1II1II . datetime_upper = I11iiI1i11I
if 87 - 87: Ii1I % OoooooooOO % I1Ii111 * i11iIiiIii * OoOoOO00
if 78 - 78: I11i
if 62 - 62: iIii1I11I1II1 . o0oOOo0O0Ooo . ooOoO0o % oO0o % O0 % oO0o
if 51 - 51: Oo0Ooo / IiII - Oo0Ooo
if 71 - 71: I11i * I1ii11iIi11i * OOooOOo * o0oOOo0O0Ooo
if 53 - 53: I1IiiI % I1IiiI
if 80 - 80: OoO0O00 - i11iIiiIii / iII111i * I1ii11iIi11i / I1IiiI - I1Ii111
if ( i11Ii1 == "set-action" ) :
iIIiiIi . set_action = oOO0
if 85 - 85: IiII
if ( i11Ii1 == "set-record-ttl" ) :
iIIiiIi . set_record_ttl = int ( oOO0 )
if 72 - 72: iII111i * OoOoOO00
if ( i11Ii1 == "set-instance-id" ) :
if ( iIIiiIi . set_source_eid == None ) :
iIIiiIi . set_source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 65 - 65: iIii1I11I1II1 / iIii1I11I1II1 % O0 / II111iiii . OOooOOo . O0
if ( iIIiiIi . set_dest_eid == None ) :
iIIiiIi . set_dest_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 65 - 65: I11i
II1I1I1IiiI = int ( oOO0 )
iIIiiIi . set_source_eid . instance_id = II1I1I1IiiI
iIIiiIi . set_dest_eid . instance_id = II1I1I1IiiI
if 35 - 35: o0oOOo0O0Ooo - i11iIiiIii
if ( i11Ii1 == "set-source-eid" ) :
if ( iIIiiIi . set_source_eid == None ) :
iIIiiIi . set_source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 78 - 78: ooOoO0o - II111iiii - i1IIi
iIIiiIi . set_source_eid . store_prefix ( oOO0 )
if ( II1I1I1IiiI != None ) : iIIiiIi . set_source_eid . instance_id = II1I1I1IiiI
if 18 - 18: OoooooooOO % OoOoOO00 - IiII / oO0o . OOooOOo . I1IiiI
if ( i11Ii1 == "set-destination-eid" ) :
if ( iIIiiIi . set_dest_eid == None ) :
iIIiiIi . set_dest_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 77 - 77: I1ii11iIi11i . OoO0O00 / OoOoOO00 / O0
iIIiiIi . set_dest_eid . store_prefix ( oOO0 )
if ( II1I1I1IiiI != None ) : iIIiiIi . set_dest_eid . instance_id = II1I1I1IiiI
if 67 - 67: ooOoO0o % I11i % oO0o
if ( i11Ii1 == "set-rloc-address" ) :
iIIiiIi . set_rloc_address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
iIIiiIi . set_rloc_address . store_address ( oOO0 )
if 74 - 74: II111iiii
if ( i11Ii1 == "set-rloc-record-name" ) :
iIIiiIi . set_rloc_record_name = oOO0
if 44 - 44: Oo0Ooo + OoO0O00 + OoOoOO00 - I1IiiI
if ( i11Ii1 == "set-elp-name" ) :
iIIiiIi . set_elp_name = oOO0
if 68 - 68: i11iIiiIii / OOooOOo . i1IIi . i11iIiiIii . I11i
if ( i11Ii1 == "set-geo-name" ) :
iIIiiIi . set_geo_name = oOO0
if 56 - 56: iIii1I11I1II1 - II111iiii * i1IIi / Ii1I
if ( i11Ii1 == "set-rle-name" ) :
iIIiiIi . set_rle_name = oOO0
if 65 - 65: OOooOOo / I1IiiI . OoooooooOO + I1IiiI + OoooooooOO + i11iIiiIii
if ( i11Ii1 == "set-json-name" ) :
iIIiiIi . set_json_name = oOO0
if 20 - 20: I1IiiI + iII111i + O0 * O0
if ( i11Ii1 == "policy-name" ) :
iIIiiIi . policy_name = oOO0
if 18 - 18: I11i - I11i . OoOoOO00 . ooOoO0o
if 31 - 31: ooOoO0o
if 87 - 87: OoooooooOO + OOooOOo - I1ii11iIi11i / I1IiiI + ooOoO0o - Oo0Ooo
if 19 - 19: ooOoO0o + I1ii11iIi11i - ooOoO0o
if 17 - 17: I11i * i1IIi + iIii1I11I1II1 % I1IiiI
if 44 - 44: IiII + I1IiiI . Ii1I % Oo0Ooo
iIIiiIi . match_clauses = oO00O0oO0O
iIIiiIi . save_policy ( )
return
if 97 - 97: O0
if 95 - 95: OoO0O00 % iII111i / I1IiiI * OoooooooOO
lisp_policy_commands = {
"lisp policy" : [ lisp_policy_command , {
"policy-name" : [ True ] ,
"match" : [ ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"source-eid" : [ True ] ,
"destination-eid" : [ True ] ,
"source-rloc" : [ True ] ,
"destination-rloc" : [ True ] ,
"rloc-record-name" : [ True ] ,
"elp-name" : [ True ] ,
"geo-name" : [ True ] ,
"rle-name" : [ True ] ,
"json-name" : [ True ] ,
"datetime-range" : [ True ] ,
"set-action" : [ False , "process" , "drop" ] ,
"set-record-ttl" : [ True , 0 , 0x7fffffff ] ,
"set-instance-id" : [ True , 0 , 0xffffffff ] ,
"set-source-eid" : [ True ] ,
"set-destination-eid" : [ True ] ,
"set-rloc-address" : [ True ] ,
"set-rloc-record-name" : [ True ] ,
"set-elp-name" : [ True ] ,
"set-geo-name" : [ True ] ,
"set-rle-name" : [ True ] ,
"set-json-name" : [ True ] } ]
}
if 31 - 31: iIii1I11I1II1
if 62 - 62: o0oOOo0O0Ooo - iII111i / II111iiii . o0oOOo0O0Ooo
if 20 - 20: iIii1I11I1II1 % OOooOOo
if 91 - 91: ooOoO0o
if 96 - 96: I1IiiI . OOooOOo
if 94 - 94: OoooooooOO + II111iiii % ooOoO0o - II111iiii / O0
if 34 - 34: IiII % oO0o
def lisp_send_to_arista ( command , interface ) :
interface = "" if ( interface == None ) else "interface " + interface
if 54 - 54: I1IiiI
OOiI1 = command
if ( interface != "" ) : OOiI1 = interface + ": " + OOiI1
lprint ( "Send CLI command '{}' to hardware" . format ( OOiI1 ) )
if 31 - 31: I11i * o0oOOo0O0Ooo
i1iiiII11iii = '''
enable
configure
{}
{}
''' . format ( interface , command )
if 51 - 51: Ii1I * Oo0Ooo + ooOoO0o / o0oOOo0O0Ooo . OoOoOO00
os . system ( "FastCli -c '{}'" . format ( i1iiiII11iii ) )
return
if 45 - 45: i1IIi * I1ii11iIi11i % oO0o / O0
if 6 - 6: II111iiii / i11iIiiIii - O0 * i1IIi % OOooOOo
if 80 - 80: I11i * Oo0Ooo + i1IIi + I1IiiI
if 73 - 73: i1IIi - I1Ii111 * o0oOOo0O0Ooo - iIii1I11I1II1 + ooOoO0o
if 1 - 1: Oo0Ooo / Ii1I
if 43 - 43: I1ii11iIi11i - Oo0Ooo . oO0o
if 2 - 2: OoOoOO00 . I1IiiI
def lisp_arista_is_alive ( prefix ) :
oO00o00 = "enable\nsh plat trident l3 software routes {}\n" . format ( prefix )
oOo0OOoooO = getoutput ( "FastCli -c '{}'" . format ( oO00o00 ) )
if 88 - 88: I1IiiI
if 34 - 34: ooOoO0o + I1Ii111 / iIii1I11I1II1 + Ii1I . o0oOOo0O0Ooo * OoO0O00
if 74 - 74: i1IIi / iIii1I11I1II1 . I1ii11iIi11i
if 71 - 71: ooOoO0o % ooOoO0o * iII111i / Ii1I * O0
oOo0OOoooO = oOo0OOoooO . split ( "\n" ) [ 1 ]
IIII1IIiiIiI1 = oOo0OOoooO . split ( " " )
IIII1IIiiIiI1 = IIII1IIiiIiI1 [ - 1 ] . replace ( "\r" , "" )
if 66 - 66: I1Ii111 . ooOoO0o - OoOoOO00 + oO0o . OoO0O00
if 88 - 88: OOooOOo - ooOoO0o * o0oOOo0O0Ooo . OoooooooOO
if 3 - 3: I1Ii111
if 24 - 24: Ii1I + i11iIiiIii * I1Ii111 - OoOoOO00 / Ii1I - OoOoOO00
return ( IIII1IIiiIiI1 == "Y" )
if 69 - 69: I11i - I1IiiI . oO0o - OoooooooOO
if 33 - 33: o0oOOo0O0Ooo - o0oOOo0O0Ooo
if 55 - 55: OoooooooOO / IiII + i1IIi
if 54 - 54: ooOoO0o * Ii1I / Ii1I
if 15 - 15: oO0o * I1Ii111
if 11 - 11: Ii1I + o0oOOo0O0Ooo * OoooooooOO % iIii1I11I1II1
if 87 - 87: OoO0O00 + o0oOOo0O0Ooo
if 46 - 46: oO0o + OoOoOO00
if 17 - 17: Ii1I . Oo0Ooo - oO0o % OOooOOo
if 59 - 59: O0
if 75 - 75: o0oOOo0O0Ooo / OoooooooOO . I1ii11iIi11i * oO0o * I11i / OoooooooOO
if 17 - 17: Ii1I % I1ii11iIi11i + I11i
if 80 - 80: i1IIi . OoooooooOO % OoooooooOO . oO0o / OOooOOo
if 85 - 85: OOooOOo
if 80 - 80: ooOoO0o % O0 % I1ii11iIi11i + Oo0Ooo
if 82 - 82: oO0o / iIii1I11I1II1 % ooOoO0o . Ii1I / i1IIi - I1Ii111
if 15 - 15: I11i - OOooOOo . II111iiii . iIii1I11I1II1
if 93 - 93: I11i + o0oOOo0O0Ooo / OOooOOo + Ii1I % Oo0Ooo % I1ii11iIi11i
if 72 - 72: IiII / II111iiii
if 25 - 25: i1IIi + OoOoOO00 + oO0o + OoooooooOO
if 21 - 21: I1ii11iIi11i
if 60 - 60: i1IIi / OoO0O00 . Ii1I
if 16 - 16: i11iIiiIii + OoOoOO00 % Oo0Ooo + I1ii11iIi11i * Ii1I / I1Ii111
if 26 - 26: iII111i
if 31 - 31: iII111i
if 45 - 45: OoO0O00
if 55 - 55: iIii1I11I1II1 % iIii1I11I1II1 + I11i - ooOoO0o + I1IiiI * O0
if 47 - 47: ooOoO0o + iIii1I11I1II1 * OOooOOo . I1IiiI . o0oOOo0O0Ooo
if 49 - 49: Oo0Ooo . OoOoOO00 * OOooOOo
if 86 - 86: IiII * OOooOOo + Ii1I
if 62 - 62: I11i
if 86 - 86: Oo0Ooo % II111iiii + I1Ii111 / I1ii11iIi11i
if 15 - 15: I1IiiI / I1Ii111 % iII111i
if 57 - 57: I1Ii111 . iIii1I11I1II1 / Oo0Ooo / IiII / iII111i * OoOoOO00
if 35 - 35: i1IIi + I1Ii111 - ooOoO0o . I1ii11iIi11i + Oo0Ooo
if 43 - 43: oO0o . OoO0O00 * i1IIi
if 1 - 1: ooOoO0o / i1IIi
if 42 - 42: I1ii11iIi11i * ooOoO0o + OoOoOO00 % I1ii11iIi11i . IiII
if 75 - 75: OoO0O00 * i1IIi - OOooOOo % II111iiii % OoO0O00 - OoOoOO00
if 75 - 75: I11i * IiII * ooOoO0o
if 31 - 31: Ii1I
if 72 - 72: OOooOOo * Ii1I % OoO0O00
if 72 - 72: OoOoOO00 + o0oOOo0O0Ooo - i1IIi - OoO0O00 % OoOoOO00
if 42 - 42: oO0o / i1IIi . IiII
def lisp_program_vxlan_hardware ( mc ) :
if 12 - 12: i11iIiiIii . ooOoO0o
if 80 - 80: O0 / iIii1I11I1II1 % iII111i * ooOoO0o / i11iIiiIii . OoOoOO00
if 88 - 88: OoooooooOO . I1IiiI
if 6 - 6: I1Ii111 - i11iIiiIii - oO0o
if 7 - 7: i1IIi
if 6 - 6: OoooooooOO - Oo0Ooo - I1ii11iIi11i
if ( os . path . exists ( "/persist/local/lispers.net" ) == False ) : return
if 34 - 34: iII111i + i11iIiiIii . IiII
if 54 - 54: Oo0Ooo + I11i - iII111i * ooOoO0o % i11iIiiIii . IiII
if 29 - 29: II111iiii % i11iIiiIii % O0
if 38 - 38: o0oOOo0O0Ooo * IiII
if ( len ( mc . best_rloc_set ) == 0 ) : return
if 51 - 51: OoooooooOO . Ii1I % OoooooooOO - I1IiiI + I1Ii111 % oO0o
if 28 - 28: i11iIiiIii - I1IiiI * OoO0O00
if 19 - 19: OoooooooOO
if 34 - 34: OoOoOO00 . oO0o
II1 = mc . eid . print_prefix_no_iid ( )
I1Ii1i111I = mc . best_rloc_set [ 0 ] . rloc . print_address_no_iid ( )
if 53 - 53: oO0o + OoooooooOO * ooOoO0o
if 85 - 85: I1ii11iIi11i - o0oOOo0O0Ooo % o0oOOo0O0Ooo % iII111i * OoOoOO00
if 50 - 50: I1Ii111 + I1Ii111 + I11i - OoOoOO00
if 65 - 65: oO0o / I11i + iII111i - I1ii11iIi11i
oooO0000OoOOooo = getoutput ( "ip route get {} | egrep vlan4094" . format ( II1 ) )
if 52 - 52: iIii1I11I1II1 + O0
if ( oooO0000OoOOooo != "" ) :
lprint ( "Route {} already in hardware: '{}'" . format ( green ( II1 , False ) , oooO0000OoOOooo ) )
if 84 - 84: OOooOOo / iII111i . I1IiiI / O0 % OOooOOo . iII111i
return
if 32 - 32: OoO0O00 + OoO0O00 % o0oOOo0O0Ooo / O0
if 29 - 29: iII111i % I1Ii111
if 95 - 95: OOooOOo - ooOoO0o % i1IIi / O0 % I11i . IiII
if 63 - 63: ooOoO0o
if 22 - 22: OOooOOo . i11iIiiIii + II111iiii - Oo0Ooo % i1IIi / o0oOOo0O0Ooo
if 90 - 90: IiII
if 38 - 38: i1IIi / ooOoO0o / I11i * I1ii11iIi11i / II111iiii . iIii1I11I1II1
OO0000O0o0 = getoutput ( "ifconfig | egrep 'vxlan|vlan4094'" )
if ( OO0000O0o0 . find ( "vxlan" ) == - 1 ) :
lprint ( "No VXLAN interface found, cannot program hardware" )
return
if 2 - 2: I1Ii111 % iII111i . OoooooooOO - o0oOOo0O0Ooo
if ( OO0000O0o0 . find ( "vlan4094" ) == - 1 ) :
lprint ( "No vlan4094 interface found, cannot program hardware" )
return
if 30 - 30: i1IIi / I1Ii111 * oO0o - oO0o / oO0o
I1iI1 = getoutput ( "ip addr | egrep vlan4094 | egrep inet" )
if ( I1iI1 == "" ) :
lprint ( "No IP address found on vlan4094, cannot program hardware" )
return
if 88 - 88: Ii1I . OoOoOO00 . OOooOOo / I1ii11iIi11i % O0
I1iI1 = I1iI1 . split ( "inet " ) [ 1 ]
I1iI1 = I1iI1 . split ( "/" ) [ 0 ]
if 98 - 98: OOooOOo
if 11 - 11: OOooOOo * iIii1I11I1II1 % IiII - I1IiiI . I11i
if 29 - 29: OOooOOo % I11i - OOooOOo - OOooOOo * I11i . oO0o
if 75 - 75: II111iiii . O0 . I1Ii111 * O0 / OoooooooOO
if 60 - 60: OOooOOo - Oo0Ooo * OOooOOo / OoO0O00
if 55 - 55: I1ii11iIi11i * II111iiii * iIii1I11I1II1
if 38 - 38: iIii1I11I1II1 % I1ii11iIi11i . Ii1I + I1IiiI % i11iIiiIii - i11iIiiIii
o0oooo = [ ]
iIi1 = getoutput ( "arp -i vlan4094" ) . split ( "\n" )
for IiiiI1 in iIi1 :
if ( IiiiI1 . find ( "vlan4094" ) == - 1 ) : continue
if ( IiiiI1 . find ( "(incomplete)" ) == - 1 ) : continue
IIi1I111I = IiiiI1 . split ( " " ) [ 0 ]
o0oooo . append ( IIi1I111I )
if 73 - 73: OoO0O00 . ooOoO0o
if 13 - 13: o0oOOo0O0Ooo - OoOoOO00
IIi1I111I = None
I1IiIi = I1iI1
I1iI1 = I1iI1 . split ( "." )
for iIi1iIIIiIiI in range ( 1 , 255 ) :
I1iI1 [ 3 ] = str ( iIi1iIIIiIiI )
IiI = "." . join ( I1iI1 )
if ( IiI in o0oooo ) : continue
if ( IiI == I1IiIi ) : continue
IIi1I111I = IiI
break
if 60 - 60: OoO0O00
if ( IIi1I111I == None ) :
lprint ( "Address allocation failed for vlan4094, cannot program " + "hardware" )
if 17 - 17: i11iIiiIii % i1IIi % I1IiiI % ooOoO0o + I1Ii111 + Oo0Ooo
return
if 16 - 16: iII111i . I1ii11iIi11i . oO0o . OoO0O00
if 90 - 90: i1IIi . ooOoO0o + i11iIiiIii * OoooooooOO
if 30 - 30: iII111i . OoO0O00 . i11iIiiIii / I1ii11iIi11i * Oo0Ooo
if 38 - 38: IiII + II111iiii
if 20 - 20: iII111i * I1IiiI * iII111i - o0oOOo0O0Ooo + i1IIi + ooOoO0o
if 49 - 49: II111iiii * I1IiiI / oO0o
if 50 - 50: Ii1I + O0 . I1IiiI * Oo0Ooo
iI11ii = I1Ii1i111I . split ( "." )
I11I1I1111i = lisp_hex_string ( iI11ii [ 1 ] ) . zfill ( 2 )
I111i1iii111 = lisp_hex_string ( iI11ii [ 2 ] ) . zfill ( 2 )
OooO00oo0oO = lisp_hex_string ( iI11ii [ 3 ] ) . zfill ( 2 )
iiiI1IiIIii = "00:00:00:{}:{}:{}" . format ( I11I1I1111i , I111i1iii111 , OooO00oo0oO )
iIiII = "0000.00{}.{}{}" . format ( I11I1I1111i , I111i1iii111 , OooO00oo0oO )
II1I = "arp -i vlan4094 -s {} {}" . format ( IIi1I111I , iiiI1IiIIii )
os . system ( II1I )
if 66 - 66: I1Ii111 . Ii1I / I1ii11iIi11i / iIii1I11I1II1 + O0 / i1IIi
if 72 - 72: ooOoO0o . II111iiii
if 32 - 32: I1Ii111 - oO0o + OoooooooOO . OoOoOO00 + i11iIiiIii / i1IIi
if 26 - 26: I1IiiI + OoooooooOO % OoOoOO00 . IiII - II111iiii . OoOoOO00
IIIi1Ii111I = ( "mac address-table static {} vlan 4094 " + "interface vxlan 1 vtep {}" ) . format ( iIiII , I1Ii1i111I )
if 18 - 18: o0oOOo0O0Ooo / OOooOOo
lisp_send_to_arista ( IIIi1Ii111I , None )
if 28 - 28: O0 / Ii1I - oO0o % I1ii11iIi11i % O0 . OoO0O00
if 100 - 100: O0
if 19 - 19: Ii1I * iIii1I11I1II1 * Oo0Ooo - i11iIiiIii * i11iIiiIii - OOooOOo
if 88 - 88: O0 . iIii1I11I1II1 . I1ii11iIi11i
if 80 - 80: oO0o / i1IIi * iIii1I11I1II1
ii1IiIIi1ii = "ip route add {} via {}" . format ( II1 , IIi1I111I )
os . system ( ii1IiIIi1ii )
if 44 - 44: oO0o * i1IIi . i11iIiiIii % iII111i
lprint ( "Hardware programmed with commands:" )
ii1IiIIi1ii = ii1IiIIi1ii . replace ( II1 , green ( II1 , False ) )
lprint ( " " + ii1IiIIi1ii )
lprint ( " " + II1I )
IIIi1Ii111I = IIIi1Ii111I . replace ( I1Ii1i111I , red ( I1Ii1i111I , False ) )
lprint ( " " + IIIi1Ii111I )
return
if 39 - 39: I1ii11iIi11i - I1Ii111
if 36 - 36: I1Ii111 - OoO0O00 . I1ii11iIi11i * I1ii11iIi11i
if 9 - 9: OOooOOo - oO0o - iIii1I11I1II1 * i11iIiiIii / I11i
if 2 - 2: i1IIi % iII111i * ooOoO0o / OoOoOO00 + Oo0Ooo
if 59 - 59: i11iIiiIii / I1IiiI * iII111i
if 16 - 16: i11iIiiIii * II111iiii - ooOoO0o
if 80 - 80: iIii1I11I1II1 + iIii1I11I1II1 + I1Ii111 - IiII * iII111i - Ii1I
def lisp_clear_hardware_walk ( mc , parms ) :
I1I11I1IIi = mc . eid . print_prefix_no_iid ( )
os . system ( "ip route delete {}" . format ( I1I11I1IIi ) )
return ( [ True , None ] )
if 89 - 89: O0 * ooOoO0o
if 36 - 36: I1ii11iIi11i * II111iiii * iII111i + I1IiiI + OoO0O00 + oO0o
if 28 - 28: Ii1I - i11iIiiIii . oO0o / II111iiii
if 82 - 82: iII111i * iII111i . IiII * II111iiii
if 17 - 17: OoooooooOO % I1Ii111 * I1Ii111 / II111iiii . OoOoOO00 * iII111i
if 80 - 80: IiII % i11iIiiIii
if 6 - 6: II111iiii + i11iIiiIii - Oo0Ooo % OOooOOo + Oo0Ooo
if 46 - 46: iII111i
def lisp_clear_map_cache ( ) :
global lisp_map_cache , lisp_rloc_probe_list
global lisp_crypto_keys_by_rloc_encap , lisp_crypto_keys_by_rloc_decap
global lisp_rtr_list , lisp_gleaned_groups
global lisp_no_map_request_rate_limit
if 31 - 31: OoO0O00 + I1Ii111 / iIii1I11I1II1
i1IiIiiII = bold ( "User cleared" , False )
O0oo0oOo = lisp_map_cache . cache_count
lprint ( "{} map-cache with {} entries" . format ( i1IiIiiII , O0oo0oOo ) )
if 16 - 16: ooOoO0o - I1Ii111
if ( lisp_program_hardware ) :
lisp_map_cache . walk_cache ( lisp_clear_hardware_walk , None )
if 69 - 69: o0oOOo0O0Ooo - OoO0O00 * iIii1I11I1II1 + o0oOOo0O0Ooo
lisp_map_cache = lisp_cache ( )
if 100 - 100: II111iiii . OoO0O00 - i1IIi % IiII / i11iIiiIii * oO0o
if 22 - 22: OoOoOO00 - oO0o + OoooooooOO
if 51 - 51: ooOoO0o + Ii1I * o0oOOo0O0Ooo * I1IiiI / oO0o + OoO0O00
if 92 - 92: oO0o * o0oOOo0O0Ooo % ooOoO0o + OoOoOO00 * OoooooooOO * Oo0Ooo
lisp_no_map_request_rate_limit = lisp_get_timestamp ( )
if 86 - 86: iII111i / OoooooooOO * I1Ii111 % I1IiiI + Ii1I
if 16 - 16: OoO0O00
if 41 - 41: i1IIi
if 72 - 72: OoooooooOO / i11iIiiIii - O0 . OoOoOO00
if 41 - 41: IiII + oO0o * iIii1I11I1II1 % oO0o + IiII
lisp_rloc_probe_list = { }
if 64 - 64: I1ii11iIi11i % OoO0O00 + oO0o
if 47 - 47: I1ii11iIi11i + Ii1I % I1Ii111 % OoO0O00 . IiII % i1IIi
if 14 - 14: O0 / I1IiiI . I1ii11iIi11i
if 47 - 47: I1Ii111 * ooOoO0o / iII111i . O0
lisp_crypto_keys_by_rloc_encap = { }
lisp_crypto_keys_by_rloc_decap = { }
if 61 - 61: II111iiii . OoO0O00 * OoO0O00 % II111iiii % OOooOOo * OoOoOO00
if 82 - 82: Ii1I
if 83 - 83: I1IiiI
if 22 - 22: IiII / Ii1I + I1Ii111 % iIii1I11I1II1
if 75 - 75: OoOoOO00 % OoOoOO00 % o0oOOo0O0Ooo % I1ii11iIi11i + IiII
lisp_rtr_list = { }
if 45 - 45: I11i - iIii1I11I1II1
if 20 - 20: OoOoOO00
if 84 - 84: OoOoOO00
if 59 - 59: Ii1I / I1Ii111 + i11iIiiIii
lisp_gleaned_groups = { }
if 20 - 20: O0 / I1Ii111 - OOooOOo % iIii1I11I1II1
if 89 - 89: O0 * OoOoOO00 . ooOoO0o
if 11 - 11: iIii1I11I1II1 * OoO0O00 . I1IiiI * OoOoOO00 / II111iiii
if 72 - 72: I11i
lisp_process_data_plane_restart ( True )
return
if 7 - 7: i1IIi - o0oOOo0O0Ooo - I1IiiI
if 62 - 62: OoOoOO00 * oO0o - I1IiiI / Ii1I
if 48 - 48: o0oOOo0O0Ooo % o0oOOo0O0Ooo - OoOoOO00
if 13 - 13: OoO0O00 - Ii1I . ooOoO0o / O0 * OoOoOO00
if 57 - 57: O0 + OoooooooOO % o0oOOo0O0Ooo / I1Ii111 / OOooOOo - OoOoOO00
if 48 - 48: o0oOOo0O0Ooo - II111iiii + OoOoOO00
if 54 - 54: II111iiii - OoO0O00 - o0oOOo0O0Ooo - O0 % I1Ii111
if 9 - 9: i1IIi % iII111i / Ii1I
if 83 - 83: oO0o
if 1 - 1: oO0o * iIii1I11I1II1 % iIii1I11I1II1 % iIii1I11I1II1 / oO0o + IiII
if 29 - 29: OoooooooOO
def lisp_encapsulate_rloc_probe ( lisp_sockets , rloc , nat_info , packet ) :
if ( len ( lisp_sockets ) != 4 ) : return
if 55 - 55: O0 - o0oOOo0O0Ooo % I1ii11iIi11i * I11i * oO0o
o0oo00o0o0O0o0 = lisp_myrlocs [ 0 ]
if 12 - 12: oO0o - ooOoO0o
if 71 - 71: OoOoOO00 * o0oOOo0O0Ooo + oO0o - Ii1I
if 79 - 79: I1IiiI + oO0o
if 70 - 70: I1Ii111 % iIii1I11I1II1
if 74 - 74: i1IIi % i11iIiiIii + oO0o
i1 = len ( packet ) + 28
O0O = struct . pack ( "BBHIBBHII" , 0x45 , 0 , socket . htons ( i1 ) , 0 , 64 ,
17 , 0 , socket . htonl ( o0oo00o0o0O0o0 . address ) , socket . htonl ( rloc . address ) )
O0O = lisp_ip_checksum ( O0O )
if 94 - 94: OoO0O00 * I1IiiI / O0 + I1Ii111 / i11iIiiIii
O0I1II1 = struct . pack ( "HHHH" , 0 , socket . htons ( LISP_CTRL_PORT ) ,
socket . htons ( i1 - 20 ) , 0 )
if 34 - 34: Oo0Ooo . i1IIi
if 97 - 97: I11i
if 89 - 89: iII111i % OoOoOO00 . Oo0Ooo
if 20 - 20: oO0o % OoOoOO00
packet = lisp_packet ( O0O + O0I1II1 + packet )
if 93 - 93: I1ii11iIi11i - Ii1I % i1IIi / i1IIi
if 82 - 82: OOooOOo
if 27 - 27: I1Ii111 / IiII - i1IIi * Ii1I
if 90 - 90: ooOoO0o
packet . inner_dest . copy_address ( rloc )
packet . inner_dest . instance_id = 0xffffff
packet . inner_source . copy_address ( o0oo00o0o0O0o0 )
packet . inner_ttl = 64
packet . outer_dest . copy_address ( rloc )
packet . outer_source . copy_address ( o0oo00o0o0O0o0 )
packet . outer_version = packet . outer_dest . afi_to_version ( )
packet . outer_ttl = 64
packet . encap_port = nat_info . port if nat_info else LISP_DATA_PORT
if 100 - 100: iII111i * i1IIi . iII111i / O0 / OoO0O00 - oO0o
IIi11IiiiI11i = red ( rloc . print_address_no_iid ( ) , False )
if ( nat_info ) :
OO00o00O00o0O = " {}" . format ( blue ( nat_info . hostname , False ) )
ooIiIII11IIIi1 = bold ( "RLOC-probe request" , False )
else :
OO00o00O00o0O = ""
ooIiIII11IIIi1 = bold ( "RLOC-probe reply" , False )
if 65 - 65: OoOoOO00 + ooOoO0o * OoO0O00 % OoooooooOO + OoooooooOO * OoooooooOO
if 49 - 49: o0oOOo0O0Ooo + i1IIi / iII111i
lprint ( ( "Data encapsulate {} to {}{} port {} for " + "NAT-traversal" ) . format ( ooIiIII11IIIi1 , IIi11IiiiI11i , OO00o00O00o0O , packet . encap_port ) )
if 43 - 43: i1IIi . OoO0O00 + I1ii11iIi11i
if 88 - 88: OoooooooOO / I11i % II111iiii % OOooOOo - I11i
if 55 - 55: Oo0Ooo - OOooOOo - O0
if 40 - 40: OoOoOO00 - OOooOOo
if 3 - 3: IiII % I11i * I1Ii111 + iIii1I11I1II1 . oO0o
if ( packet . encode ( None ) == None ) : return
packet . print_packet ( "Send" , True )
if 35 - 35: II111iiii
I1Ii11i11II1 = lisp_sockets [ 3 ]
packet . send_packet ( I1Ii11i11II1 , packet . outer_dest )
del ( packet )
return
if 14 - 14: iIii1I11I1II1
if 29 - 29: i1IIi
if 12 - 12: OOooOOo
if 84 - 84: i11iIiiIii * o0oOOo0O0Ooo
if 24 - 24: Ii1I . OOooOOo
if 34 - 34: I11i % Oo0Ooo . II111iiii - OoO0O00 - I1Ii111 + Oo0Ooo
if 71 - 71: O0 + OOooOOo % OoooooooOO
if 51 - 51: I1ii11iIi11i * o0oOOo0O0Ooo * I11i
def lisp_get_default_route_next_hops ( ) :
if 27 - 27: OoOoOO00 % OoO0O00 * oO0o . II111iiii - i11iIiiIii
if 56 - 56: OOooOOo . IiII - OOooOOo / i11iIiiIii * I1ii11iIi11i
if 66 - 66: oO0o + ooOoO0o
if 1 - 1: ooOoO0o
if ( lisp_is_macos ( ) ) :
oO00o00 = "route -n get default"
OOoo = getoutput ( oO00o00 ) . split ( "\n" )
I1i111 = i111IIiIiiI1 = None
for OOoO0 in OOoo :
if ( OOoO0 . find ( "gateway: " ) != - 1 ) : I1i111 = OOoO0 . split ( ": " ) [ 1 ]
if ( OOoO0 . find ( "interface: " ) != - 1 ) : i111IIiIiiI1 = OOoO0 . split ( ": " ) [ 1 ]
if 8 - 8: I1ii11iIi11i * i1IIi
return ( [ [ i111IIiIiiI1 , I1i111 ] ] )
if 52 - 52: I11i
if 32 - 32: ooOoO0o * OoO0O00 - I11i - OoooooooOO % i1IIi
if 81 - 81: OOooOOo * O0 + II111iiii . Oo0Ooo
if 52 - 52: I1IiiI . oO0o % O0
if 42 - 42: I1Ii111
oO00o00 = "ip route | egrep 'default via'"
OOO0oO0 = getoutput ( oO00o00 ) . split ( "\n" )
if 81 - 81: I1IiiI % iIii1I11I1II1 . I1IiiI . I1ii11iIi11i - O0 * iII111i
iIiI1I1Ii = [ ]
for oooO0000OoOOooo in OOO0oO0 :
if ( oooO0000OoOOooo . find ( " metric " ) != - 1 ) : continue
O00o00o00OO0 = oooO0000OoOOooo . split ( " " )
try :
II1iiI = O00o00o00OO0 . index ( "via" ) + 1
if ( II1iiI >= len ( O00o00o00OO0 ) ) : continue
i1I11IiI1iIII = O00o00o00OO0 . index ( "dev" ) + 1
if ( i1I11IiI1iIII >= len ( O00o00o00OO0 ) ) : continue
except :
continue
if 82 - 82: II111iiii
if 55 - 55: I11i . iIii1I11I1II1 / Ii1I - OoO0O00 * I1ii11iIi11i % iIii1I11I1II1
iIiI1I1Ii . append ( [ O00o00o00OO0 [ i1I11IiI1iIII ] , O00o00o00OO0 [ II1iiI ] ] )
if 48 - 48: ooOoO0o + Oo0Ooo / Oo0Ooo
return ( iIiI1I1Ii )
if 15 - 15: iIii1I11I1II1 . I1Ii111 * OoooooooOO * O0 % OOooOOo
if 53 - 53: Ii1I
if 63 - 63: I11i % OoOoOO00
if 46 - 46: iIii1I11I1II1 . II111iiii / OoooooooOO - ooOoO0o * iII111i
if 52 - 52: I11i + iII111i
if 9 - 9: OoOoOO00 % II111iiii . I11i * Oo0Ooo
if 53 - 53: II111iiii / i1IIi + OoooooooOO * O0
def lisp_get_host_route_next_hop ( rloc ) :
oO00o00 = "ip route | egrep '{} via'" . format ( rloc )
oooO0000OoOOooo = getoutput ( oO00o00 ) . split ( " " )
if 62 - 62: IiII . O0
try : OOOooo0OooOoO = oooO0000OoOOooo . index ( "via" ) + 1
except : return ( None )
if 87 - 87: I1ii11iIi11i / oO0o / IiII . OOooOOo
if ( OOOooo0OooOoO >= len ( oooO0000OoOOooo ) ) : return ( None )
return ( oooO0000OoOOooo [ OOOooo0OooOoO ] )
if 91 - 91: OOooOOo % oO0o . OoOoOO00 . I1IiiI - OoOoOO00
if 18 - 18: O0 - I1IiiI + i1IIi % i11iIiiIii
if 97 - 97: iII111i * OoooooooOO + I1Ii111 + ooOoO0o - ooOoO0o
if 63 - 63: o0oOOo0O0Ooo * OOooOOo + iIii1I11I1II1 + Oo0Ooo
if 25 - 25: oO0o + IiII % o0oOOo0O0Ooo
if 24 - 24: OoOoOO00
if 87 - 87: I1ii11iIi11i / ooOoO0o * i1IIi
def lisp_install_host_route ( dest , nh , install ) :
install = "add" if install else "delete"
OOOOOO0O00O00 = "none" if nh == None else nh
if 71 - 71: OoOoOO00 - I11i
lprint ( "{} host-route {}, nh {}" . format ( install . title ( ) , dest , OOOOOO0O00O00 ) )
if 83 - 83: oO0o + oO0o - Oo0Ooo . Oo0Ooo - iII111i . OOooOOo
if ( nh == None ) :
ooOOOOOO0 = "ip route {} {}/32" . format ( install , dest )
else :
ooOOOOOO0 = "ip route {} {}/32 via {}" . format ( install , dest , nh )
if 56 - 56: OoOoOO00 * IiII + i1IIi
os . system ( ooOOOOOO0 )
return
if 40 - 40: I1ii11iIi11i / O0
if 87 - 87: ooOoO0o
if 100 - 100: iII111i + II111iiii * Oo0Ooo * OOooOOo
if 6 - 6: IiII % OOooOOo
if 3 - 3: OoOoOO00 / OoOoOO00 - II111iiii
if 41 - 41: oO0o
if 12 - 12: I1IiiI + I1Ii111
if 66 - 66: I1Ii111 + OOooOOo + I1Ii111 . OoooooooOO * oO0o / OoO0O00
def lisp_checkpoint ( checkpoint_list ) :
if ( lisp_checkpoint_map_cache == False ) : return
if 74 - 74: O0 % OOooOOo * OoOoOO00 / oO0o - Oo0Ooo
OOoO0 = open ( lisp_checkpoint_filename , "w" )
for oo0O00OOOOO in checkpoint_list :
OOoO0 . write ( oo0O00OOOOO + "\n" )
if 79 - 79: Ii1I + IiII
OOoO0 . close ( )
lprint ( "{} {} entries to file '{}'" . format ( bold ( "Checkpoint" , False ) ,
len ( checkpoint_list ) , lisp_checkpoint_filename ) )
return
if 21 - 21: o0oOOo0O0Ooo * iII111i * o0oOOo0O0Ooo * o0oOOo0O0Ooo . Oo0Ooo
if 98 - 98: I1ii11iIi11i
if 58 - 58: IiII / i11iIiiIii % I11i
if 74 - 74: OoooooooOO - I1ii11iIi11i + OOooOOo % IiII . o0oOOo0O0Ooo
if 21 - 21: Ii1I
if 72 - 72: I1Ii111 . OoooooooOO / I1Ii111 - Ii1I / I1ii11iIi11i * I1ii11iIi11i
if 72 - 72: IiII . Ii1I + OoooooooOO * OoOoOO00 + Oo0Ooo . iII111i
if 92 - 92: O0 * Ii1I - I1ii11iIi11i - IiII . OoO0O00 + I1IiiI
def lisp_load_checkpoint ( ) :
if ( lisp_checkpoint_map_cache == False ) : return
if ( os . path . exists ( lisp_checkpoint_filename ) == False ) : return
if 59 - 59: i1IIi * OOooOOo % Oo0Ooo
OOoO0 = open ( lisp_checkpoint_filename , "r" )
if 44 - 44: iIii1I11I1II1 . OOooOOo
O0oo0oOo = 0
for oo0O00OOOOO in OOoO0 :
O0oo0oOo += 1
oO0ooOOO = oo0O00OOOOO . split ( " rloc " )
OOOO00 = [ ] if ( oO0ooOOO [ 1 ] in [ "native-forward\n" , "\n" ] ) else oO0ooOOO [ 1 ] . split ( ", " )
if 57 - 57: II111iiii + I1Ii111
if 42 - 42: OoOoOO00 % O0
OO0oOO0OoO = [ ]
for I1Ii1i111I in OOOO00 :
ii11Ii = lisp_rloc ( False )
O00o00o00OO0 = I1Ii1i111I . split ( " " )
ii11Ii . rloc . store_address ( O00o00o00OO0 [ 0 ] )
ii11Ii . priority = int ( O00o00o00OO0 [ 1 ] )
ii11Ii . weight = int ( O00o00o00OO0 [ 2 ] )
OO0oOO0OoO . append ( ii11Ii )
if 70 - 70: iIii1I11I1II1 * Oo0Ooo - I1IiiI / OoO0O00 + OoOoOO00
if 94 - 94: OoooooooOO + O0 * iIii1I11I1II1 * II111iiii
o0ooo0oOO0o = lisp_mapping ( "" , "" , OO0oOO0OoO )
if ( o0ooo0oOO0o != None ) :
o0ooo0oOO0o . eid . store_prefix ( oO0ooOOO [ 0 ] )
o0ooo0oOO0o . checkpoint_entry = True
o0ooo0oOO0o . map_cache_ttl = LISP_NMR_TTL * 60
if ( OO0oOO0OoO == [ ] ) : o0ooo0oOO0o . action = LISP_NATIVE_FORWARD_ACTION
o0ooo0oOO0o . add_cache ( )
continue
if 90 - 90: I11i + O0 / I1IiiI . oO0o / O0
if 46 - 46: O0 . O0 - oO0o . II111iiii * I1IiiI * Ii1I
O0oo0oOo -= 1
if 10 - 10: i1IIi + i1IIi . i1IIi - I1IiiI - I1IiiI
if 26 - 26: Ii1I * I11i / I11i
OOoO0 . close ( )
lprint ( "{} {} map-cache entries from file '{}'" . format (
bold ( "Loaded" , False ) , O0oo0oOo , lisp_checkpoint_filename ) )
return
if 79 - 79: ooOoO0o / oO0o - oO0o / OoooooooOO
if 91 - 91: iIii1I11I1II1 - O0 * o0oOOo0O0Ooo * o0oOOo0O0Ooo . II111iiii
if 69 - 69: II111iiii - Oo0Ooo + i1IIi . II111iiii + o0oOOo0O0Ooo
if 20 - 20: OoooooooOO - OoO0O00 * ooOoO0o * OoOoOO00 / OOooOOo
if 64 - 64: O0 + iII111i / I11i * OoOoOO00 + o0oOOo0O0Ooo + I1Ii111
if 16 - 16: I11i
if 9 - 9: Ii1I / IiII * I11i - i11iIiiIii * I1ii11iIi11i / iII111i
if 61 - 61: O0 % iII111i
if 41 - 41: I1Ii111 * OoooooooOO
if 76 - 76: OoooooooOO * II111iiii . II111iiii / o0oOOo0O0Ooo - iII111i
if 49 - 49: O0 . I1ii11iIi11i . OoOoOO00 . I1Ii111 % O0 . iIii1I11I1II1
if 19 - 19: iIii1I11I1II1
if 97 - 97: Ii1I . I11i / ooOoO0o + Oo0Ooo
if 100 - 100: iII111i / I1Ii111 % OoOoOO00 . O0 / OoOoOO00
def lisp_write_checkpoint_entry ( checkpoint_list , mc ) :
if ( lisp_checkpoint_map_cache == False ) : return
if 81 - 81: OoO0O00 % i11iIiiIii / OoO0O00 + ooOoO0o
oo0O00OOOOO = "{} rloc " . format ( mc . eid . print_prefix ( ) )
if 100 - 100: O0 . Oo0Ooo % Oo0Ooo % O0 / i11iIiiIii
for ii11Ii in mc . rloc_set :
if ( ii11Ii . rloc . is_null ( ) ) : continue
oo0O00OOOOO += "{} {} {}, " . format ( ii11Ii . rloc . print_address_no_iid ( ) ,
ii11Ii . priority , ii11Ii . weight )
if 56 - 56: IiII - OOooOOo - OoOoOO00 - I11i
if 57 - 57: i1IIi
if ( mc . rloc_set != [ ] ) :
oo0O00OOOOO = oo0O00OOOOO [ 0 : - 2 ]
elif ( mc . action == LISP_NATIVE_FORWARD_ACTION ) :
oo0O00OOOOO += "native-forward"
if 41 - 41: I11i / Ii1I
if 1 - 1: II111iiii / iII111i
checkpoint_list . append ( oo0O00OOOOO )
return
if 83 - 83: OoO0O00 / iII111i
if 59 - 59: I1Ii111 % OOooOOo . I1IiiI + I1ii11iIi11i % oO0o
if 96 - 96: OoO0O00
if 53 - 53: oO0o + OoO0O00
if 58 - 58: iIii1I11I1II1 + OoOoOO00
if 65 - 65: iII111i % Oo0Ooo * iIii1I11I1II1 + I1IiiI + II111iiii
if 72 - 72: OoOoOO00 . OoooooooOO - OOooOOo
def lisp_check_dp_socket ( ) :
iiIIII1 = lisp_ipc_dp_socket_name
if ( os . path . exists ( iiIIII1 ) == False ) :
iIi11I11IIi1 = bold ( "does not exist" , False )
lprint ( "Socket '{}' {}" . format ( iiIIII1 , iIi11I11IIi1 ) )
return ( False )
if 32 - 32: oO0o - oO0o % iII111i - o0oOOo0O0Ooo
return ( True )
if 33 - 33: I11i - o0oOOo0O0Ooo - Oo0Ooo - IiII
if 16 - 16: I1Ii111 - OOooOOo % I1Ii111
if 71 - 71: I1IiiI / Ii1I
if 6 - 6: OoOoOO00 + i1IIi . I1Ii111 - I1Ii111 - o0oOOo0O0Ooo . O0
if 39 - 39: i11iIiiIii / IiII
if 10 - 10: OOooOOo . o0oOOo0O0Ooo / i11iIiiIii + Oo0Ooo * Ii1I
if 38 - 38: Ii1I - I1ii11iIi11i + iII111i / i1IIi
def lisp_write_to_dp_socket ( entry ) :
try :
OoO0o = json . dumps ( entry )
ooO0oo = bold ( "Write IPC" , False )
lprint ( "{} record to named socket: '{}'" . format ( ooO0oo , OoO0o ) )
lisp_ipc_dp_socket . sendto ( OoO0o , lisp_ipc_dp_socket_name )
except :
lprint ( "Failed to write IPC record to named socket: '{}'" . format ( OoO0o ) )
if 87 - 87: I1ii11iIi11i - O0 + II111iiii + IiII % Oo0Ooo
return
if 71 - 71: oO0o
if 75 - 75: Oo0Ooo * oO0o + iIii1I11I1II1 / Oo0Ooo
if 51 - 51: Ii1I * Ii1I + iII111i * oO0o / OOooOOo - ooOoO0o
if 16 - 16: I1Ii111 + O0 - O0 * iIii1I11I1II1 / iII111i
if 4 - 4: iII111i
if 75 - 75: I1IiiI * IiII % OoO0O00 - ooOoO0o * iII111i
if 32 - 32: iII111i
if 59 - 59: OoOoOO00 - I1Ii111
if 34 - 34: ooOoO0o . OoooooooOO / ooOoO0o + OoooooooOO
def lisp_write_ipc_keys ( rloc ) :
O0O0 = rloc . rloc . print_address_no_iid ( )
I1I = rloc . translated_port
if ( I1I != 0 ) : O0O0 += ":" + str ( I1I )
if ( O0O0 not in lisp_rloc_probe_list ) : return
if 24 - 24: OoooooooOO * I1ii11iIi11i / O0 / Oo0Ooo * I1IiiI / ooOoO0o
for O00o00o00OO0 , oO0ooOOO , Oo in lisp_rloc_probe_list [ O0O0 ] :
o0ooo0oOO0o = lisp_map_cache . lookup_cache ( oO0ooOOO , True )
if ( o0ooo0oOO0o == None ) : continue
lisp_write_ipc_map_cache ( True , o0ooo0oOO0o )
if 33 - 33: Ii1I
return
if 20 - 20: Ii1I + I11i
if 98 - 98: OOooOOo
if 58 - 58: i11iIiiIii / OoOoOO00
if 18 - 18: ooOoO0o + O0 - OOooOOo + iIii1I11I1II1 . OOooOOo * iIii1I11I1II1
if 83 - 83: OoO0O00 - Oo0Ooo * I1IiiI % Oo0Ooo % oO0o
if 64 - 64: OoOoOO00 + oO0o / OoooooooOO . i11iIiiIii / II111iiii
if 55 - 55: ooOoO0o . i11iIiiIii . o0oOOo0O0Ooo
def lisp_write_ipc_map_cache ( add_or_delete , mc , dont_send = False ) :
if ( lisp_i_am_etr ) : return
if ( lisp_ipc_dp_socket == None ) : return
if ( lisp_check_dp_socket ( ) == False ) : return
if 52 - 52: IiII . oO0o + i11iIiiIii % IiII
if 45 - 45: i1IIi - I1IiiI / IiII - I1IiiI
if 21 - 21: IiII
if 43 - 43: IiII
oOOoo = "add" if add_or_delete else "delete"
oo0O00OOOOO = { "type" : "map-cache" , "opcode" : oOOoo }
if 9 - 9: OOooOOo * ooOoO0o + ooOoO0o . I1Ii111
iii111i = ( mc . group . is_null ( ) == False )
if ( iii111i ) :
oo0O00OOOOO [ "eid-prefix" ] = mc . group . print_prefix_no_iid ( )
oo0O00OOOOO [ "rles" ] = [ ]
else :
oo0O00OOOOO [ "eid-prefix" ] = mc . eid . print_prefix_no_iid ( )
oo0O00OOOOO [ "rlocs" ] = [ ]
if 8 - 8: IiII * iIii1I11I1II1
oo0O00OOOOO [ "instance-id" ] = str ( mc . eid . instance_id )
if 7 - 7: I1Ii111 / OoooooooOO % O0 - I1ii11iIi11i
if ( iii111i ) :
if ( len ( mc . rloc_set ) >= 1 and mc . rloc_set [ 0 ] . rle ) :
for iI11i1ii11i11 in mc . rloc_set [ 0 ] . rle . rle_forwarding_list :
IiI = iI11i1ii11i11 . address . print_address_no_iid ( )
I1I = str ( 4341 ) if iI11i1ii11i11 . translated_port == 0 else str ( iI11i1ii11i11 . translated_port )
if 49 - 49: OoooooooOO . I1ii11iIi11i / OoooooooOO * oO0o
O00o00o00OO0 = { "rle" : IiI , "port" : I1I }
iI1II1I1i1 , oOOOoo0oOo0o0OO = iI11i1ii11i11 . get_encap_keys ( )
O00o00o00OO0 = lisp_build_json_keys ( O00o00o00OO0 , iI1II1I1i1 , oOOOoo0oOo0o0OO , "encrypt-key" )
oo0O00OOOOO [ "rles" ] . append ( O00o00o00OO0 )
if 27 - 27: Oo0Ooo - II111iiii
if 95 - 95: i1IIi + oO0o . OoOoOO00 / i11iIiiIii
else :
for I1Ii1i111I in mc . rloc_set :
if ( I1Ii1i111I . rloc . is_ipv4 ( ) == False and I1Ii1i111I . rloc . is_ipv6 ( ) == False ) :
continue
if 56 - 56: O0
if ( I1Ii1i111I . up_state ( ) == False ) : continue
if 37 - 37: OoooooooOO - i11iIiiIii
I1I = str ( 4341 ) if I1Ii1i111I . translated_port == 0 else str ( I1Ii1i111I . translated_port )
if 41 - 41: i1IIi % ooOoO0o * Oo0Ooo . OoO0O00 . OoOoOO00
O00o00o00OO0 = { "rloc" : I1Ii1i111I . rloc . print_address_no_iid ( ) , "priority" :
str ( I1Ii1i111I . priority ) , "weight" : str ( I1Ii1i111I . weight ) , "port" :
I1I }
iI1II1I1i1 , oOOOoo0oOo0o0OO = I1Ii1i111I . get_encap_keys ( )
O00o00o00OO0 = lisp_build_json_keys ( O00o00o00OO0 , iI1II1I1i1 , oOOOoo0oOo0o0OO , "encrypt-key" )
oo0O00OOOOO [ "rlocs" ] . append ( O00o00o00OO0 )
if 35 - 35: II111iiii * Oo0Ooo / iIii1I11I1II1 + O0 + II111iiii / I1IiiI
if 49 - 49: i11iIiiIii % I1ii11iIi11i * O0 . o0oOOo0O0Ooo . I1ii11iIi11i / o0oOOo0O0Ooo
if 99 - 99: I1ii11iIi11i * O0 / OoO0O00 % i1IIi + ooOoO0o
if ( dont_send == False ) : lisp_write_to_dp_socket ( oo0O00OOOOO )
return ( oo0O00OOOOO )
if 85 - 85: OOooOOo / O0 - iIii1I11I1II1 . I11i . ooOoO0o - I1IiiI
if 97 - 97: iIii1I11I1II1 * Oo0Ooo
if 76 - 76: OoO0O00 / i11iIiiIii % ooOoO0o % I11i * O0
if 84 - 84: II111iiii - iII111i / IiII . O0 % i1IIi / I1ii11iIi11i
if 2 - 2: OoooooooOO . OoO0O00 . II111iiii / Ii1I - OOooOOo % Oo0Ooo
if 47 - 47: OOooOOo * oO0o
if 41 - 41: OoooooooOO * I1IiiI
def lisp_write_ipc_decap_key ( rloc_addr , keys ) :
if ( lisp_i_am_itr ) : return
if ( lisp_ipc_dp_socket == None ) : return
if ( lisp_check_dp_socket ( ) == False ) : return
if 3 - 3: IiII
if 96 - 96: I11i - OOooOOo + I11i
if 71 - 71: Oo0Ooo
if 48 - 48: o0oOOo0O0Ooo / II111iiii / OoOoOO00 * o0oOOo0O0Ooo + I1IiiI . OoOoOO00
if ( keys == None or len ( keys ) == 0 or keys [ 1 ] == None ) : return
if 52 - 52: Ii1I / OoOoOO00 . OOooOOo * IiII . OoooooooOO
iI1II1I1i1 = keys [ 1 ] . encrypt_key
oOOOoo0oOo0o0OO = keys [ 1 ] . icv_key
if 6 - 6: i1IIi . oO0o % IiII . Oo0Ooo % I11i
if 86 - 86: OoooooooOO + IiII % o0oOOo0O0Ooo . i1IIi . iII111i
if 25 - 25: iII111i * I1ii11iIi11i + I11i - I1ii11iIi11i
if 75 - 75: IiII
oOo0O0oO = rloc_addr . split ( ":" )
if ( len ( oOo0O0oO ) == 1 ) :
oo0O00OOOOO = { "type" : "decap-keys" , "rloc" : oOo0O0oO [ 0 ] }
else :
oo0O00OOOOO = { "type" : "decap-keys" , "rloc" : oOo0O0oO [ 0 ] , "port" : oOo0O0oO [ 1 ] }
if 15 - 15: I1Ii111
oo0O00OOOOO = lisp_build_json_keys ( oo0O00OOOOO , iI1II1I1i1 , oOOOoo0oOo0o0OO , "decrypt-key" )
if 25 - 25: I1ii11iIi11i * O0
lisp_write_to_dp_socket ( oo0O00OOOOO )
return
if 8 - 8: i11iIiiIii
if 95 - 95: ooOoO0o + i1IIi / OOooOOo . i11iIiiIii
if 31 - 31: iII111i - iII111i - oO0o
if 62 - 62: Oo0Ooo % Oo0Ooo / OoooooooOO * o0oOOo0O0Ooo . Ii1I
if 1 - 1: I1ii11iIi11i / II111iiii / II111iiii + o0oOOo0O0Ooo + OoooooooOO
if 34 - 34: i11iIiiIii + iIii1I11I1II1 - i11iIiiIii * o0oOOo0O0Ooo - iII111i
if 87 - 87: OOooOOo * OoO0O00
if 61 - 61: iII111i - II111iiii . I1Ii111 % II111iiii / I11i
def lisp_build_json_keys ( entry , ekey , ikey , key_type ) :
if ( ekey == None ) : return ( entry )
if 86 - 86: II111iiii
entry [ "keys" ] = [ ]
Ooo00o000o = { "key-id" : "1" , key_type : ekey , "icv-key" : ikey }
entry [ "keys" ] . append ( Ooo00o000o )
return ( entry )
if 94 - 94: o0oOOo0O0Ooo % Ii1I * Ii1I % Oo0Ooo / I1ii11iIi11i
if 40 - 40: Oo0Ooo . II111iiii / II111iiii - i1IIi
if 91 - 91: Ii1I
if 45 - 45: I1ii11iIi11i + Oo0Ooo
if 72 - 72: I1ii11iIi11i
if 5 - 5: i1IIi
if 31 - 31: iII111i - OoooooooOO + oO0o / OoooooooOO + I1ii11iIi11i
def lisp_write_ipc_database_mappings ( ephem_port ) :
if ( lisp_i_am_etr == False ) : return
if ( lisp_ipc_dp_socket == None ) : return
if ( lisp_check_dp_socket ( ) == False ) : return
if 93 - 93: o0oOOo0O0Ooo * I1ii11iIi11i % I1IiiI * ooOoO0o
if 37 - 37: OoO0O00 * OoooooooOO / oO0o * I11i * I1ii11iIi11i
if 42 - 42: OoooooooOO - ooOoO0o . OOooOOo + OoOoOO00
if 53 - 53: o0oOOo0O0Ooo
oo0O00OOOOO = { "type" : "database-mappings" , "database-mappings" : [ ] }
if 55 - 55: ooOoO0o . i1IIi - ooOoO0o + O0 + I1IiiI
if 31 - 31: OoO0O00 % I1Ii111
if 62 - 62: oO0o / O0 - I1Ii111 . IiII
if 81 - 81: i11iIiiIii
for oOooII111iIiI1 in lisp_db_list :
if ( oOooII111iIiI1 . eid . is_ipv4 ( ) == False and oOooII111iIiI1 . eid . is_ipv6 ( ) == False ) : continue
o00oooOOooo0 = { "instance-id" : str ( oOooII111iIiI1 . eid . instance_id ) ,
"eid-prefix" : oOooII111iIiI1 . eid . print_prefix_no_iid ( ) }
oo0O00OOOOO [ "database-mappings" ] . append ( o00oooOOooo0 )
if 99 - 99: O0 / OoO0O00 * II111iiii . II111iiii
lisp_write_to_dp_socket ( oo0O00OOOOO )
if 14 - 14: OoOoOO00 * i1IIi - OoOoOO00 . OoooooooOO
if 24 - 24: iIii1I11I1II1 + OOooOOo * iII111i % IiII % OOooOOo
if 64 - 64: IiII . I1ii11iIi11i - o0oOOo0O0Ooo - ooOoO0o + OoooooooOO
if 95 - 95: iII111i . I1ii11iIi11i + ooOoO0o + o0oOOo0O0Ooo % OoO0O00
if 50 - 50: iII111i * O0 % II111iiii
oo0O00OOOOO = { "type" : "etr-nat-port" , "port" : ephem_port }
lisp_write_to_dp_socket ( oo0O00OOOOO )
return
if 80 - 80: OOooOOo - II111iiii - OoO0O00
if 62 - 62: Ii1I . i11iIiiIii % OOooOOo
if 44 - 44: i1IIi * I1ii11iIi11i % Ii1I . Ii1I * I11i + II111iiii
if 15 - 15: i1IIi - I11i - I1Ii111 / OoO0O00 + Oo0Ooo + I1IiiI
if 81 - 81: IiII
if 54 - 54: I1IiiI % OoO0O00 % OoOoOO00
if 12 - 12: II111iiii . O0 * i11iIiiIii . I11i
def lisp_write_ipc_interfaces ( ) :
if ( lisp_i_am_etr ) : return
if ( lisp_ipc_dp_socket == None ) : return
if ( lisp_check_dp_socket ( ) == False ) : return
if 98 - 98: II111iiii + i1IIi * oO0o % I1IiiI
if 53 - 53: i11iIiiIii . I1ii11iIi11i - OOooOOo - OOooOOo
if 97 - 97: I1IiiI % iII111i % OoooooooOO / ooOoO0o / i11iIiiIii
if 7 - 7: O0 % IiII / o0oOOo0O0Ooo
oo0O00OOOOO = { "type" : "interfaces" , "interfaces" : [ ] }
if 79 - 79: IiII + I1Ii111
for i111IIiIiiI1 in list ( lisp_myinterfaces . values ( ) ) :
if ( i111IIiIiiI1 . instance_id == None ) : continue
o00oooOOooo0 = { "interface" : i111IIiIiiI1 . device ,
"instance-id" : str ( i111IIiIiiI1 . instance_id ) }
oo0O00OOOOO [ "interfaces" ] . append ( o00oooOOooo0 )
if 59 - 59: iII111i - oO0o . ooOoO0o / IiII * i11iIiiIii
if 61 - 61: I11i - Oo0Ooo * II111iiii + iIii1I11I1II1
lisp_write_to_dp_socket ( oo0O00OOOOO )
return
if 37 - 37: OoooooooOO % II111iiii / o0oOOo0O0Ooo . OOooOOo * I1ii11iIi11i . iIii1I11I1II1
if 73 - 73: OoOoOO00
if 44 - 44: Oo0Ooo / oO0o
if 9 - 9: i1IIi % I1IiiI + OoO0O00 * ooOoO0o / iIii1I11I1II1 / iII111i
if 80 - 80: OOooOOo / O0 % IiII * OoOoOO00
if 53 - 53: OOooOOo + i11iIiiIii
if 25 - 25: i11iIiiIii
if 51 - 51: iII111i . ooOoO0o
if 70 - 70: I11i / O0 - I11i + o0oOOo0O0Ooo . ooOoO0o . o0oOOo0O0Ooo
if 6 - 6: I11i + II111iiii - I1Ii111
if 45 - 45: i1IIi / iII111i + i11iIiiIii * I11i + ooOoO0o / OoooooooOO
if 56 - 56: I11i + I1Ii111
if 80 - 80: II111iiii . Ii1I + o0oOOo0O0Ooo / II111iiii / OoO0O00 + iIii1I11I1II1
if 29 - 29: o0oOOo0O0Ooo + OoOoOO00 + ooOoO0o - I1ii11iIi11i
def lisp_parse_auth_key ( value ) :
iIiI1iIiII1 = value . split ( "[" )
ooooooO0Ooo0 = { }
if ( len ( iIiI1iIiII1 ) == 1 ) :
ooooooO0Ooo0 [ 0 ] = value
return ( ooooooO0Ooo0 )
if 8 - 8: O0 . I1IiiI * o0oOOo0O0Ooo + I1IiiI
if 44 - 44: i1IIi % iII111i . i11iIiiIii / I11i + OoooooooOO
for oOooii111 in iIiI1iIiII1 :
if ( oOooii111 == "" ) : continue
OOOooo0OooOoO = oOooii111 . find ( "]" )
i11iII1 = oOooii111 [ 0 : OOOooo0OooOoO ]
try : i11iII1 = int ( i11iII1 )
except : return
if 21 - 21: OoOoOO00 . OoO0O00 . OoOoOO00 + OoOoOO00
ooooooO0Ooo0 [ i11iII1 ] = oOooii111 [ OOOooo0OooOoO + 1 : : ]
if 30 - 30: I1IiiI - iII111i - OOooOOo + oO0o
return ( ooooooO0Ooo0 )
if 51 - 51: Ii1I % O0 / II111iiii . Oo0Ooo
if 90 - 90: i11iIiiIii * II111iiii % iIii1I11I1II1 . I1ii11iIi11i / Oo0Ooo . OOooOOo
if 77 - 77: OoO0O00
if 95 - 95: II111iiii
if 59 - 59: iIii1I11I1II1 % OOooOOo / OoOoOO00 * I1Ii111 * OoooooooOO * O0
if 43 - 43: OoO0O00 * I1IiiI * OOooOOo * O0 - O0 / o0oOOo0O0Ooo
if 77 - 77: I11i % I1Ii111 . IiII % OoooooooOO * o0oOOo0O0Ooo
if 87 - 87: iII111i + IiII / ooOoO0o * ooOoO0o * OOooOOo
if 97 - 97: I1Ii111
if 47 - 47: iII111i / I1ii11iIi11i - Ii1I . II111iiii
if 56 - 56: O0 - i1IIi % o0oOOo0O0Ooo + IiII
if 42 - 42: o0oOOo0O0Ooo . OOooOOo % I11i - OoOoOO00
if 38 - 38: OoooooooOO
if 27 - 27: O0 + I1ii11iIi11i % Ii1I . i1IIi + OoO0O00 + OoOoOO00
if 22 - 22: II111iiii / I1IiiI + o0oOOo0O0Ooo * I1IiiI . OoooooooOO * OOooOOo
if 49 - 49: I1ii11iIi11i * I1IiiI + OOooOOo + i11iIiiIii * I1ii11iIi11i . o0oOOo0O0Ooo
def lisp_reassemble ( packet ) :
i1i1IIi = socket . ntohs ( struct . unpack ( "H" , packet [ 6 : 8 ] ) [ 0 ] )
if 36 - 36: o0oOOo0O0Ooo - i11iIiiIii
if 37 - 37: O0 + IiII + I1IiiI
if 50 - 50: OoooooooOO . I1Ii111
if 100 - 100: ooOoO0o * ooOoO0o - Ii1I
if ( i1i1IIi == 0 or i1i1IIi == 0x4000 ) : return ( packet )
if 13 - 13: iII111i . I11i * OoO0O00 . i1IIi . iIii1I11I1II1 - o0oOOo0O0Ooo
if 68 - 68: Ii1I % o0oOOo0O0Ooo / OoooooooOO + Ii1I - Ii1I
if 79 - 79: II111iiii / IiII
if 4 - 4: O0 - i11iIiiIii % ooOoO0o * O0 - ooOoO0o
Ii1o0OOOoo0000 = socket . ntohs ( struct . unpack ( "H" , packet [ 4 : 6 ] ) [ 0 ] )
o0Oo0OoOOOo0 = socket . ntohs ( struct . unpack ( "H" , packet [ 2 : 4 ] ) [ 0 ] )
if 96 - 96: oO0o % II111iiii . Ii1I % OoO0O00 . iIii1I11I1II1 / IiII
OOooo = ( i1i1IIi & 0x2000 == 0 and ( i1i1IIi & 0x1fff ) != 0 )
oo0O00OOOOO = [ ( i1i1IIi & 0x1fff ) * 8 , o0Oo0OoOOOo0 - 20 , packet , OOooo ]
if 13 - 13: Ii1I % II111iiii % o0oOOo0O0Ooo . OoooooooOO / OOooOOo
if 95 - 95: I1IiiI * OoooooooOO
if 94 - 94: OOooOOo / OOooOOo * IiII * o0oOOo0O0Ooo
if 45 - 45: OoO0O00 - i1IIi . OoO0O00 * I1ii11iIi11i / OoOoOO00
if 88 - 88: II111iiii * IiII . Oo0Ooo + I1Ii111
if 75 - 75: Ii1I - OoOoOO00 + OoO0O00 + IiII * iIii1I11I1II1 % I1Ii111
if 23 - 23: O0 % I1ii11iIi11i % iIii1I11I1II1
if 49 - 49: iII111i + I1Ii111 % OoOoOO00
if ( i1i1IIi == 0x2000 ) :
oooooO0oO0ooO , iIII1IiI = struct . unpack ( "HH" , packet [ 20 : 24 ] )
oooooO0oO0ooO = socket . ntohs ( oooooO0oO0ooO )
iIII1IiI = socket . ntohs ( iIII1IiI )
if ( iIII1IiI not in [ 4341 , 8472 , 4789 ] and oooooO0oO0ooO != 4341 ) :
lisp_reassembly_queue [ Ii1o0OOOoo0000 ] = [ ]
oo0O00OOOOO [ 2 ] = None
if 67 - 67: Ii1I
if 27 - 27: Oo0Ooo / i11iIiiIii / II111iiii . Ii1I - II111iiii / OoO0O00
if 61 - 61: ooOoO0o - OOooOOo
if 45 - 45: O0 . OoO0O00
if 80 - 80: IiII + OoO0O00
if 2 - 2: IiII + OoOoOO00 % oO0o
if ( Ii1o0OOOoo0000 not in lisp_reassembly_queue ) :
lisp_reassembly_queue [ Ii1o0OOOoo0000 ] = [ ]
if 76 - 76: o0oOOo0O0Ooo
if 25 - 25: OoooooooOO
if 78 - 78: oO0o / i11iIiiIii * O0 / OOooOOo % i11iIiiIii % O0
if 86 - 86: IiII
if 26 - 26: IiII - I1Ii111 + i11iIiiIii % ooOoO0o * i11iIiiIii + Oo0Ooo
queue = lisp_reassembly_queue [ Ii1o0OOOoo0000 ]
if 39 - 39: Ii1I - i1IIi + i11iIiiIii
if 21 - 21: IiII
if 76 - 76: o0oOOo0O0Ooo % Oo0Ooo + OoO0O00
if 36 - 36: OOooOOo . oO0o
if 15 - 15: I1IiiI + ooOoO0o - o0oOOo0O0Ooo
if ( len ( queue ) == 1 and queue [ 0 ] [ 2 ] == None ) :
dprint ( "Drop non-LISP encapsulated fragment 0x{}" . format ( lisp_hex_string ( Ii1o0OOOoo0000 ) . zfill ( 4 ) ) )
if 62 - 62: Ii1I - OOooOOo
return ( None )
if 88 - 88: iIii1I11I1II1 * Oo0Ooo / II111iiii / IiII / OoO0O00 % ooOoO0o
if 19 - 19: I11i * iII111i . O0 * iII111i % I1ii11iIi11i - OoOoOO00
if 68 - 68: I1Ii111 - OoO0O00 % Ii1I + i1IIi . ooOoO0o
if 36 - 36: oO0o * iIii1I11I1II1 - O0 - IiII * O0 + i11iIiiIii
if 76 - 76: OoO0O00 % O0 / Ii1I + I1IiiI
queue . append ( oo0O00OOOOO )
queue = sorted ( queue )
if 23 - 23: I1IiiI % IiII . o0oOOo0O0Ooo
if 2 - 2: I1ii11iIi11i
if 51 - 51: iIii1I11I1II1 / II111iiii / iIii1I11I1II1 / oO0o % i1IIi
if 54 - 54: ooOoO0o
IiI = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
IiI . address = socket . ntohl ( struct . unpack ( "I" , packet [ 12 : 16 ] ) [ 0 ] )
i1iII11i11IIi = IiI . print_address_no_iid ( )
IiI . address = socket . ntohl ( struct . unpack ( "I" , packet [ 16 : 20 ] ) [ 0 ] )
O00ooo = IiI . print_address_no_iid ( )
IiI = red ( "{} -> {}" . format ( i1iII11i11IIi , O00ooo ) , False )
if 43 - 43: oO0o * ooOoO0o - I11i
dprint ( "{}{} fragment, RLOCs: {}, packet 0x{}, frag-offset: 0x{}" . format ( bold ( "Received" , False ) , " non-LISP encapsulated" if oo0O00OOOOO [ 2 ] == None else "" , IiI , lisp_hex_string ( Ii1o0OOOoo0000 ) . zfill ( 4 ) ,
# I1IiiI / Ii1I - OoOoOO00 . iIii1I11I1II1 % OOooOOo
# OoooooooOO . iII111i . Oo0Ooo + iIii1I11I1II1 - II111iiii % i1IIi
lisp_hex_string ( i1i1IIi ) . zfill ( 4 ) ) )
if 48 - 48: O0
if 60 - 60: ooOoO0o - IiII % i1IIi
if 5 - 5: oO0o
if 29 - 29: i1IIi . OoOoOO00 . i1IIi + oO0o . I1Ii111 + O0
if 62 - 62: I1ii11iIi11i . IiII + OoO0O00 - OoOoOO00 * O0 + I1Ii111
if ( queue [ 0 ] [ 0 ] != 0 or queue [ - 1 ] [ 3 ] == False ) : return ( None )
oOoIiI11iIi1 = queue [ 0 ]
for Ii in queue [ 1 : : ] :
i1i1IIi = Ii [ 0 ]
iIiI111i1 , iiII11ii11II1 = oOoIiI11iIi1 [ 0 ] , oOoIiI11iIi1 [ 1 ]
if ( iIiI111i1 + iiII11ii11II1 != i1i1IIi ) : return ( None )
oOoIiI11iIi1 = Ii
if 86 - 86: I1Ii111 + i11iIiiIii % IiII
lisp_reassembly_queue . pop ( Ii1o0OOOoo0000 )
if 89 - 89: IiII * I11i + I1ii11iIi11i * oO0o - II111iiii
if 58 - 58: ooOoO0o . I1Ii111 / i1IIi % I1ii11iIi11i + o0oOOo0O0Ooo
if 94 - 94: i11iIiiIii + I1Ii111 . iII111i - ooOoO0o % I1Ii111
if 94 - 94: i11iIiiIii - OOooOOo - O0 * OoooooooOO - ooOoO0o
if 35 - 35: iII111i . i11iIiiIii - OOooOOo % Oo0Ooo + Ii1I . iIii1I11I1II1
packet = queue [ 0 ] [ 2 ]
for Ii in queue [ 1 : : ] : packet += Ii [ 2 ] [ 20 : : ]
if 91 - 91: o0oOOo0O0Ooo / OoO0O00 + I1IiiI % i11iIiiIii % i1IIi
dprint ( "{} fragments arrived for packet 0x{}, length {}" . format ( bold ( "All" , False ) , lisp_hex_string ( Ii1o0OOOoo0000 ) . zfill ( 4 ) , len ( packet ) ) )
if 22 - 22: I1Ii111 * O0 % OoO0O00 * I1ii11iIi11i
if 47 - 47: OoO0O00 / OOooOOo / OoOoOO00 % i11iIiiIii / OoOoOO00
if 52 - 52: ooOoO0o / I11i % i11iIiiIii - I1Ii111 % ooOoO0o - o0oOOo0O0Ooo
if 67 - 67: OoOoOO00 / I1Ii111 + i11iIiiIii - IiII
if 79 - 79: I11i . I11i - OoOoOO00
i1 = socket . htons ( len ( packet ) )
ooo = packet [ 0 : 2 ] + struct . pack ( "H" , i1 ) + packet [ 4 : 6 ] + struct . pack ( "H" , 0 ) + packet [ 8 : 10 ] + struct . pack ( "H" , 0 ) + packet [ 12 : 20 ]
if 86 - 86: OoO0O00 * Oo0Ooo . iIii1I11I1II1 * O0
if 52 - 52: iII111i - i11iIiiIii + o0oOOo0O0Ooo + i1IIi
ooo = lisp_ip_checksum ( ooo )
return ( ooo + packet [ 20 : : ] )
if 58 - 58: OOooOOo - Ii1I * I1Ii111 - O0 . oO0o
if 72 - 72: i1IIi * iII111i * Ii1I / o0oOOo0O0Ooo . I1Ii111 + i11iIiiIii
if 33 - 33: I11i / OoO0O00 * ooOoO0o + iIii1I11I1II1
if 54 - 54: Oo0Ooo / IiII + i11iIiiIii . O0
if 94 - 94: OoooooooOO + iII111i * OoooooooOO / o0oOOo0O0Ooo
if 12 - 12: iIii1I11I1II1 / iIii1I11I1II1 / II111iiii
if 93 - 93: oO0o
if 53 - 53: OoO0O00 * i1IIi / Oo0Ooo / OoO0O00 * ooOoO0o
def lisp_get_crypto_decap_lookup_key ( addr , port ) :
O0O0 = addr . print_address_no_iid ( ) + ":" + str ( port )
if ( O0O0 in lisp_crypto_keys_by_rloc_decap ) : return ( O0O0 )
if 77 - 77: iIii1I11I1II1 % I1IiiI + o0oOOo0O0Ooo + I1Ii111 * Oo0Ooo * i1IIi
O0O0 = addr . print_address_no_iid ( )
if ( O0O0 in lisp_crypto_keys_by_rloc_decap ) : return ( O0O0 )
if 14 - 14: iIii1I11I1II1 * iIii1I11I1II1 - OOooOOo . iII111i / ooOoO0o
if 54 - 54: OoOoOO00 - I1IiiI - iII111i
if 49 - 49: i11iIiiIii * Oo0Ooo
if 100 - 100: Oo0Ooo * oO0o
if 85 - 85: OoooooooOO . IiII / IiII . ooOoO0o . IiII % II111iiii
for OOoOO0000 in lisp_crypto_keys_by_rloc_decap :
OO0O00o0 = OOoOO0000 . split ( ":" )
if ( len ( OO0O00o0 ) == 1 ) : continue
OO0O00o0 = OO0O00o0 [ 0 ] if len ( OO0O00o0 ) == 2 else ":" . join ( OO0O00o0 [ 0 : - 1 ] )
if ( OO0O00o0 == O0O0 ) :
iI1iiiiiii = lisp_crypto_keys_by_rloc_decap [ OOoOO0000 ]
lisp_crypto_keys_by_rloc_decap [ O0O0 ] = iI1iiiiiii
return ( O0O0 )
if 49 - 49: II111iiii * Ii1I % OoOoOO00 % OoOoOO00
if 35 - 35: oO0o + Oo0Ooo / Oo0Ooo % iII111i
return ( None )
if 84 - 84: OoOoOO00 * I1ii11iIi11i
if 45 - 45: O0 % OoO0O00
if 35 - 35: i1IIi * I11i * iII111i
if 21 - 21: II111iiii * iII111i * IiII % II111iiii / iII111i
if 22 - 22: iII111i - OOooOOo . Ii1I - I1Ii111
if 67 - 67: I11i - OoO0O00 / Oo0Ooo
if 27 - 27: Ii1I % I1IiiI - iII111i
if 13 - 13: IiII + OOooOOo . I11i - ooOoO0o . Ii1I - IiII
if 8 - 8: Ii1I + I11i . O0 / II111iiii
if 79 - 79: IiII / I11i - I1Ii111
if 62 - 62: IiII + I11i % I1ii11iIi11i . ooOoO0o % OoOoOO00
def lisp_build_crypto_decap_lookup_key ( addr , port ) :
addr = addr . print_address_no_iid ( )
I111iI = addr + ":" + str ( port )
if 98 - 98: I11i % I1ii11iIi11i
if ( lisp_i_am_rtr ) :
if ( addr in lisp_rloc_probe_list ) : return ( addr )
if 12 - 12: I1Ii111 - I1IiiI % i11iIiiIii * iIii1I11I1II1 + OoOoOO00 + i11iIiiIii
if 36 - 36: Oo0Ooo + oO0o / I1Ii111 / iII111i . O0 % II111iiii
if 67 - 67: I11i / iIii1I11I1II1 / ooOoO0o
if 90 - 90: II111iiii % I1Ii111 - IiII . Oo0Ooo % OOooOOo - OoOoOO00
if 89 - 89: Oo0Ooo - I1ii11iIi11i . I1Ii111
if 65 - 65: ooOoO0o % OOooOOo + OOooOOo % I1Ii111 . I1IiiI % O0
for O0O0O in list ( lisp_nat_state_info . values ( ) ) :
for O0O0ooOO0O in O0O0O :
if ( addr == O0O0ooOO0O . address ) : return ( I111iI )
if 46 - 46: OoO0O00 * I1Ii111 + iII111i . oO0o % OOooOOo / i11iIiiIii
if 1 - 1: I1ii11iIi11i % O0 - I1ii11iIi11i / OoooooooOO / OoO0O00
return ( addr )
if 82 - 82: i1IIi % Ii1I
return ( I111iI )
if 85 - 85: I1Ii111 * i11iIiiIii * iIii1I11I1II1 % iIii1I11I1II1
if 64 - 64: OoO0O00 / Ii1I
if 79 - 79: Ii1I % OOooOOo
if 39 - 39: I1ii11iIi11i / Ii1I - II111iiii . i1IIi
if 59 - 59: II111iiii
if 36 - 36: ooOoO0o . II111iiii - OoOoOO00 % I1ii11iIi11i * O0
if 91 - 91: iII111i + Oo0Ooo / OoooooooOO * iIii1I11I1II1 - OoO0O00
def lisp_is_rloc_probe_request ( lisp_type ) :
lisp_type = struct . unpack ( "B" , lisp_type ) [ 0 ]
return ( lisp_type == 0x12 )
if 73 - 73: iIii1I11I1II1 % I1Ii111 % II111iiii * Oo0Ooo * OoO0O00
if 48 - 48: OOooOOo * i11iIiiIii - i11iIiiIii + iIii1I11I1II1 + I1IiiI % OoooooooOO
if 61 - 61: i1IIi
if 56 - 56: iIii1I11I1II1 / I11i * iII111i * I11i * OoooooooOO
if 44 - 44: I1ii11iIi11i - OOooOOo % I11i - I1Ii111 / iIii1I11I1II1 - OOooOOo
if 38 - 38: iIii1I11I1II1 - OoooooooOO * II111iiii . OoooooooOO + OOooOOo
if 59 - 59: OoooooooOO
def lisp_is_rloc_probe_reply ( lisp_type ) :
lisp_type = struct . unpack ( "B" , lisp_type ) [ 0 ]
return ( lisp_type == 0x28 )
if 22 - 22: II111iiii
if 85 - 85: I1Ii111 + I1ii11iIi11i * I11i % o0oOOo0O0Ooo + Ii1I
if 23 - 23: IiII * OoO0O00
if 42 - 42: IiII
if 83 - 83: i1IIi * o0oOOo0O0Ooo / OoO0O00 / o0oOOo0O0Ooo
if 55 - 55: Oo0Ooo % O0 - OoO0O00
if 42 - 42: OoooooooOO * OOooOOo
if 93 - 93: OOooOOo + II111iiii . oO0o * Oo0Ooo - O0 + I1Ii111
if 99 - 99: OoO0O00 * o0oOOo0O0Ooo + OoOoOO00 * iIii1I11I1II1
if 38 - 38: I1ii11iIi11i - OOooOOo * O0 - I1ii11iIi11i
if 95 - 95: OoO0O00 . oO0o . OoooooooOO - iIii1I11I1II1
if 35 - 35: o0oOOo0O0Ooo / OoooooooOO - i1IIi * iIii1I11I1II1 + ooOoO0o
if 66 - 66: Oo0Ooo - OoOoOO00 . I1Ii111 + O0 + o0oOOo0O0Ooo
if 36 - 36: II111iiii % IiII . i11iIiiIii
if 88 - 88: Oo0Ooo . IiII * Oo0Ooo
if 92 - 92: I1IiiI % IiII
if 95 - 95: OoooooooOO / OoO0O00 % O0 / I1Ii111 * Ii1I + I1ii11iIi11i
if 7 - 7: ooOoO0o
if 83 - 83: oO0o / I1Ii111 + I1Ii111 * I1ii11iIi11i
def lisp_is_rloc_probe ( packet , rr ) :
O0I1II1 = ( struct . unpack ( "B" , packet [ 9 : 10 ] ) [ 0 ] == 17 )
if ( O0I1II1 == False ) : return ( [ packet , None , None , None ] )
if 8 - 8: I11i . I1ii11iIi11i % i1IIi + Ii1I
oooooO0oO0ooO = struct . unpack ( "H" , packet [ 20 : 22 ] ) [ 0 ]
iIII1IiI = struct . unpack ( "H" , packet [ 22 : 24 ] ) [ 0 ]
ooooo = ( socket . htons ( LISP_CTRL_PORT ) in [ oooooO0oO0ooO , iIII1IiI ] )
if ( ooooo == False ) : return ( [ packet , None , None , None ] )
if 50 - 50: O0 / I1Ii111 . iII111i + Ii1I
if ( rr == 0 ) :
ooIiIII11IIIi1 = lisp_is_rloc_probe_request ( packet [ 28 : 29 ] )
if ( ooIiIII11IIIi1 == False ) : return ( [ packet , None , None , None ] )
elif ( rr == 1 ) :
ooIiIII11IIIi1 = lisp_is_rloc_probe_reply ( packet [ 28 : 29 ] )
if ( ooIiIII11IIIi1 == False ) : return ( [ packet , None , None , None ] )
elif ( rr == - 1 ) :
ooIiIII11IIIi1 = lisp_is_rloc_probe_request ( packet [ 28 : 29 ] )
if ( ooIiIII11IIIi1 == False ) :
ooIiIII11IIIi1 = lisp_is_rloc_probe_reply ( packet [ 28 : 29 ] )
if ( ooIiIII11IIIi1 == False ) : return ( [ packet , None , None , None ] )
if 49 - 49: I1Ii111 - O0 * I11i / OOooOOo / I11i * ooOoO0o
if 64 - 64: O0 + I1Ii111 - i1IIi % iIii1I11I1II1 + Oo0Ooo % O0
if 73 - 73: iII111i
if 33 - 33: ooOoO0o . OoO0O00 - Oo0Ooo
if 15 - 15: OoO0O00 / Ii1I + O0 . i11iIiiIii
if 68 - 68: o0oOOo0O0Ooo
I1 = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
I1 . address = socket . ntohl ( struct . unpack ( "I" , packet [ 12 : 16 ] ) [ 0 ] )
if 54 - 54: I11i / OoOoOO00 % OoooooooOO - o0oOOo0O0Ooo
if 84 - 84: iIii1I11I1II1
if 65 - 65: OoooooooOO + I1ii11iIi11i
if 41 - 41: OOooOOo + I1Ii111 + i11iIiiIii % iII111i % I1Ii111 - ooOoO0o
if ( I1 . is_local ( ) ) : return ( [ None , None , None , None ] )
if 85 - 85: IiII % iIii1I11I1II1 . I1Ii111
if 38 - 38: iII111i - I1IiiI / ooOoO0o
if 46 - 46: OOooOOo . O0 / i11iIiiIii . OOooOOo
if 19 - 19: I11i / Oo0Ooo + I1Ii111
I1 = I1 . print_address_no_iid ( )
I1I = socket . ntohs ( struct . unpack ( "H" , packet [ 20 : 22 ] ) [ 0 ] )
IiIi1iIIiII1i = struct . unpack ( "B" , packet [ 8 : 9 ] ) [ 0 ] - 1
packet = packet [ 28 : : ]
if 43 - 43: I1ii11iIi11i
O00o00o00OO0 = bold ( "Receive(pcap)" , False )
OOoO0 = bold ( "from " + I1 , False )
iIIiiIi = lisp_format_packet ( packet )
lprint ( "{} {} bytes {} {}, packet: {}" . format ( O00o00o00OO0 , len ( packet ) , OOoO0 , I1I , iIIiiIi ) )
if 18 - 18: I11i / OOooOOo % I11i - o0oOOo0O0Ooo
return ( [ packet , I1 , I1I , IiIi1iIIiII1i ] )
if 22 - 22: iII111i
if 88 - 88: I11i + OoOoOO00 % IiII % OoO0O00 * O0 / OoooooooOO
if 83 - 83: IiII + I1Ii111 . I1ii11iIi11i * iIii1I11I1II1
if 9 - 9: ooOoO0o % IiII - OoOoOO00
if 66 - 66: oO0o % Oo0Ooo
if 40 - 40: i11iIiiIii . O0 * I11i - oO0o / OOooOOo . oO0o
if 86 - 86: OOooOOo - I1Ii111 * IiII - i1IIi + ooOoO0o + I11i
if 32 - 32: IiII
if 99 - 99: II111iiii
if 34 - 34: OOooOOo + OoOoOO00 * o0oOOo0O0Ooo + I1ii11iIi11i + IiII * i1IIi
if 73 - 73: I1ii11iIi11i - IiII - O0 . oO0o + Oo0Ooo % iII111i
def lisp_ipc_write_xtr_parameters ( cp , dp ) :
if ( lisp_ipc_dp_socket == None ) : return
if 68 - 68: I1ii11iIi11i - OoooooooOO
oOoo = { "type" : "xtr-parameters" , "control-plane-logging" : cp ,
"data-plane-logging" : dp , "rtr" : lisp_i_am_rtr }
if 5 - 5: I1ii11iIi11i * I1IiiI + OoooooooOO / Oo0Ooo
lisp_write_to_dp_socket ( oOoo )
return
if 18 - 18: OoO0O00 * iII111i % I1IiiI . OOooOOo * o0oOOo0O0Ooo
if 58 - 58: iII111i . IiII + iIii1I11I1II1
if 13 - 13: oO0o * I1Ii111 / I1Ii111 . I1IiiI
if 93 - 93: I11i % OoOoOO00 - OOooOOo + iIii1I11I1II1 / OoooooooOO % i11iIiiIii
if 90 - 90: oO0o % iIii1I11I1II1 + o0oOOo0O0Ooo - I11i / i11iIiiIii
if 57 - 57: I1IiiI . Oo0Ooo / I1IiiI / II111iiii - I1Ii111
if 68 - 68: I1IiiI
if 97 - 97: Ii1I + o0oOOo0O0Ooo / OoO0O00
def lisp_external_data_plane ( ) :
oO00o00 = 'egrep "ipc-data-plane = yes" ./lisp.config'
if ( getoutput ( oO00o00 ) != "" ) : return ( True )
if 97 - 97: i11iIiiIii % iIii1I11I1II1 + II111iiii
if ( os . getenv ( "LISP_RUN_LISP_XTR" ) != None ) : return ( True )
return ( False )
if 90 - 90: OOooOOo / I1IiiI
if 28 - 28: OoooooooOO + i1IIi
if 29 - 29: Oo0Ooo
if 98 - 98: OOooOOo / Oo0Ooo % Ii1I * OoooooooOO - oO0o
if 64 - 64: I1IiiI - I1IiiI
if 90 - 90: iII111i - I1IiiI - II111iiii / OOooOOo + Ii1I
if 34 - 34: i11iIiiIii + I1Ii111 / O0 / iIii1I11I1II1 * OoooooooOO % Ii1I
if 32 - 32: i11iIiiIii - OoOoOO00 / iIii1I11I1II1 * o0oOOo0O0Ooo % I1IiiI + O0
if 36 - 36: I1ii11iIi11i + I1ii11iIi11i % I1Ii111 * ooOoO0o * OoOoOO00
if 54 - 54: Oo0Ooo - I1IiiI % OOooOOo . I1ii11iIi11i / I1IiiI
if 75 - 75: OOooOOo - O0 % iII111i . Ii1I % I1ii11iIi11i + I1ii11iIi11i
if 32 - 32: Ii1I + II111iiii * IiII
if 9 - 9: I1Ii111
if 96 - 96: I1Ii111 / iIii1I11I1II1
def lisp_process_data_plane_restart ( do_clear = False ) :
os . system ( "touch ./lisp.config" )
if 48 - 48: iII111i * IiII + OoooooooOO
oo0o = { "type" : "entire-map-cache" , "entries" : [ ] }
if 25 - 25: oO0o / OoO0O00 * iII111i - OoOoOO00
if ( do_clear == False ) :
II1Ii1IiI = oo0o [ "entries" ]
lisp_map_cache . walk_cache ( lisp_ipc_walk_map_cache , II1Ii1IiI )
if 11 - 11: I1Ii111 + iIii1I11I1II1 * O0 * Oo0Ooo
if 66 - 66: OoooooooOO % OoO0O00 + i11iIiiIii + I1Ii111 % OoO0O00
lisp_write_to_dp_socket ( oo0o )
return
if 80 - 80: Oo0Ooo - Ii1I
if 54 - 54: O0 - iIii1I11I1II1 . OoO0O00 . IiII % OoO0O00
if 28 - 28: O0 % i1IIi % OoO0O00 / o0oOOo0O0Ooo . iIii1I11I1II1 - iII111i
if 50 - 50: o0oOOo0O0Ooo + iII111i / i1IIi % II111iiii
if 61 - 61: IiII
if 5 - 5: OOooOOo % iIii1I11I1II1 % O0 * i11iIiiIii / I1Ii111
if 48 - 48: IiII * oO0o
if 53 - 53: i1IIi * iIii1I11I1II1 . OOooOOo
if 68 - 68: IiII % IiII - iII111i . IiII + OoooooooOO
if 82 - 82: Ii1I . II111iiii / i1IIi * OoO0O00
if 80 - 80: I11i
if 96 - 96: i1IIi - I1ii11iIi11i * iII111i . OOooOOo . OoO0O00
if 93 - 93: oO0o * Oo0Ooo * IiII
if 26 - 26: o0oOOo0O0Ooo + O0 % i11iIiiIii . ooOoO0o . I1IiiI + Oo0Ooo
def lisp_process_data_plane_stats ( msg , lisp_sockets , lisp_port ) :
if ( "entries" not in msg ) :
lprint ( "No 'entries' in stats IPC message" )
return
if 90 - 90: IiII * OoooooooOO + II111iiii / iII111i + i11iIiiIii / ooOoO0o
if ( type ( msg [ "entries" ] ) != list ) :
lprint ( "'entries' in stats IPC message must be an array" )
return
if 20 - 20: II111iiii % I1ii11iIi11i - OoooooooOO * Ii1I / I11i - OoooooooOO
if 11 - 11: I1IiiI + Ii1I + i11iIiiIii * I1ii11iIi11i - oO0o
for msg in msg [ "entries" ] :
if ( "eid-prefix" not in msg ) :
lprint ( "No 'eid-prefix' in stats IPC message" )
continue
if 46 - 46: OoooooooOO - Oo0Ooo
i1iiii = msg [ "eid-prefix" ]
if 4 - 4: II111iiii . OOooOOo - Ii1I - i11iIiiIii
if ( "instance-id" not in msg ) :
lprint ( "No 'instance-id' in stats IPC message" )
continue
if 27 - 27: iII111i * iII111i - OoO0O00 % o0oOOo0O0Ooo . o0oOOo0O0Ooo
oooo = int ( msg [ "instance-id" ] )
if 64 - 64: I1ii11iIi11i * ooOoO0o - OoooooooOO - I1IiiI
if 59 - 59: I1ii11iIi11i . I1Ii111 - OOooOOo / Oo0Ooo + OOooOOo . I1ii11iIi11i
if 69 - 69: Oo0Ooo
if 34 - 34: I1Ii111 - ooOoO0o . o0oOOo0O0Ooo
i1I1I1IIIi11 = lisp_address ( LISP_AFI_NONE , "" , 0 , oooo )
i1I1I1IIIi11 . store_prefix ( i1iiii )
o0ooo0oOO0o = lisp_map_cache_lookup ( None , i1I1I1IIIi11 )
if ( o0ooo0oOO0o == None ) :
lprint ( "Map-cache entry for {} not found for stats update" . format ( i1iiii ) )
if 52 - 52: o0oOOo0O0Ooo % I11i * I11i / iIii1I11I1II1
continue
if 77 - 77: OoOoOO00
if 67 - 67: OoooooooOO / OoooooooOO + IiII - ooOoO0o
if ( "rlocs" not in msg ) :
lprint ( "No 'rlocs' in stats IPC message for {}" . format ( i1iiii ) )
if 72 - 72: Ii1I
continue
if 21 - 21: ooOoO0o + iII111i
if ( type ( msg [ "rlocs" ] ) != list ) :
lprint ( "'rlocs' in stats IPC message must be an array" )
continue
if 39 - 39: o0oOOo0O0Ooo % I1Ii111 - o0oOOo0O0Ooo % Oo0Ooo
OOoO0Ii1IIIiIi = msg [ "rlocs" ]
if 67 - 67: iII111i + I11i - OoO0O00 . OOooOOo * iIii1I11I1II1
if 44 - 44: OoooooooOO * i1IIi % i1IIi - i11iIiiIii % OOooOOo - OoO0O00
if 62 - 62: OOooOOo + OoooooooOO / I1Ii111 % iIii1I11I1II1
if 59 - 59: i11iIiiIii . IiII
for oOO00IiI11I11 in OOoO0Ii1IIIiIi :
if ( "rloc" not in oOO00IiI11I11 ) : continue
if 30 - 30: I1Ii111 . i1IIi * OoooooooOO * OoooooooOO
IIi11IiiiI11i = oOO00IiI11I11 [ "rloc" ]
if ( IIi11IiiiI11i == "no-address" ) : continue
if 43 - 43: OoooooooOO * O0
I1Ii1i111I = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
I1Ii1i111I . store_address ( IIi11IiiiI11i )
if 56 - 56: i1IIi / iIii1I11I1II1 - OoO0O00
ii11Ii = o0ooo0oOO0o . get_rloc ( I1Ii1i111I )
if ( ii11Ii == None ) : continue
if 77 - 77: I1IiiI + IiII - oO0o - I1ii11iIi11i * II111iiii + i1IIi
if 79 - 79: I1ii11iIi11i + O0 * OoooooooOO
if 43 - 43: I11i
if 29 - 29: o0oOOo0O0Ooo / I11i
OOo00o00O = 0 if ( "packet-count" not in oOO00IiI11I11 ) else oOO00IiI11I11 [ "packet-count" ]
if 56 - 56: OoOoOO00 - iIii1I11I1II1 / I1IiiI - i1IIi / o0oOOo0O0Ooo * I11i
o0OoOOOoOO0Oo = 0 if ( "byte-count" not in oOO00IiI11I11 ) else oOO00IiI11I11 [ "byte-count" ]
if 70 - 70: OOooOOo
Oo0OO0000oooo = 0 if ( "seconds-last-packet" not in oOO00IiI11I11 ) else oOO00IiI11I11 [ "seconds-last-packet" ]
if 11 - 11: I11i * II111iiii * Oo0Ooo + OOooOOo % i1IIi
if 73 - 73: OoO0O00 + O0 / Ii1I . OoooooooOO % iIii1I11I1II1 * i1IIi
ii11Ii . stats . packet_count += OOo00o00O
ii11Ii . stats . byte_count += o0OoOOOoOO0Oo
ii11Ii . stats . last_increment = lisp_get_timestamp ( ) - Oo0OO0000oooo
if 84 - 84: o0oOOo0O0Ooo . iII111i / o0oOOo0O0Ooo + I1ii11iIi11i % OoO0O00
lprint ( "Update stats {}/{}/{}s for {} RLOC {}" . format ( OOo00o00O , o0OoOOOoOO0Oo ,
Oo0OO0000oooo , i1iiii , IIi11IiiiI11i ) )
if 52 - 52: OoOoOO00 / Ii1I % OoOoOO00 % i11iIiiIii + I1IiiI / o0oOOo0O0Ooo
if 63 - 63: I1IiiI
if 20 - 20: oO0o + OoOoOO00
if 32 - 32: o0oOOo0O0Ooo % oO0o % I1IiiI * OoooooooOO
if 4 - 4: OOooOOo % oO0o
if ( o0ooo0oOO0o . group . is_null ( ) and o0ooo0oOO0o . has_ttl_elapsed ( ) ) :
i1iiii = green ( o0ooo0oOO0o . print_eid_tuple ( ) , False )
lprint ( "Refresh map-cache entry {}" . format ( i1iiii ) )
lisp_send_map_request ( lisp_sockets , lisp_port , None , o0ooo0oOO0o . eid , None )
if 18 - 18: Ii1I * I11i
if 14 - 14: ooOoO0o . ooOoO0o * OoOoOO00 * o0oOOo0O0Ooo - iII111i - I1Ii111
return
if 53 - 53: Oo0Ooo * OoOoOO00 * II111iiii % IiII - I1ii11iIi11i
if 56 - 56: Oo0Ooo . I1ii11iIi11i - i11iIiiIii / iIii1I11I1II1 . ooOoO0o
if 28 - 28: OoooooooOO + I1IiiI / oO0o . iIii1I11I1II1 - oO0o
if 64 - 64: I1Ii111 + Oo0Ooo / iII111i
if 61 - 61: Ii1I * Ii1I . OoOoOO00 + OoO0O00 * i11iIiiIii * OoO0O00
if 4 - 4: OoooooooOO % iII111i % Oo0Ooo * IiII % o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 66 - 66: I1IiiI . Oo0Ooo - oO0o
if 53 - 53: oO0o / Ii1I + oO0o + II111iiii
if 70 - 70: OoooooooOO - I1Ii111 + OoOoOO00
if 61 - 61: I1IiiI * I1Ii111 * i11iIiiIii
if 68 - 68: OoOoOO00 - iII111i - I1IiiI
if 37 - 37: iII111i - I1Ii111 + i1IIi / o0oOOo0O0Ooo % iII111i / iII111i
if 8 - 8: i1IIi % I11i
if 12 - 12: ooOoO0o / II111iiii + ooOoO0o * I1ii11iIi11i / i1IIi - iIii1I11I1II1
if 71 - 71: IiII - i11iIiiIii
if 3 - 3: i11iIiiIii - o0oOOo0O0Ooo / oO0o . OoO0O00 * I11i + o0oOOo0O0Ooo
if 18 - 18: OoooooooOO % oO0o / IiII - ooOoO0o
if 80 - 80: I11i
if 98 - 98: iII111i / I1ii11iIi11i
if 87 - 87: iII111i - O0 * ooOoO0o / II111iiii % OoooooooOO . o0oOOo0O0Ooo
if 55 - 55: OOooOOo - o0oOOo0O0Ooo * I1IiiI / o0oOOo0O0Ooo + I1Ii111 + iIii1I11I1II1
if 3 - 3: II111iiii % iII111i / IiII * ooOoO0o . OoooooooOO
if 56 - 56: IiII * II111iiii + Oo0Ooo - O0 - OoO0O00 . I1Ii111
if 53 - 53: i1IIi + IiII
if 90 - 90: II111iiii / oO0o / oO0o . OoOoOO00 / OoO0O00 / iIii1I11I1II1
def lisp_process_data_plane_decap_stats ( msg , lisp_ipc_socket ) :
if 96 - 96: iIii1I11I1II1 % I1ii11iIi11i
if 35 - 35: i1IIi - OoooooooOO * Ii1I / OOooOOo % I11i
if 72 - 72: I1Ii111 / OoO0O00 + II111iiii
if 40 - 40: Ii1I + O0 . i11iIiiIii % I11i / Oo0Ooo
if 25 - 25: IiII * IiII
if ( lisp_i_am_itr ) :
lprint ( "Send decap-stats IPC message to lisp-etr process" )
oOoo = "stats%{}" . format ( json . dumps ( msg ) )
oOoo = lisp_command_ipc ( oOoo , "lisp-itr" )
lisp_ipc ( oOoo , lisp_ipc_socket , "lisp-etr" )
return
if 54 - 54: I1Ii111
if 90 - 90: Oo0Ooo / Ii1I
if 66 - 66: i11iIiiIii - I11i + oO0o . OoooooooOO
if 77 - 77: OoO0O00 / OOooOOo
if 97 - 97: OoOoOO00 / Ii1I * I1IiiI - Oo0Ooo % O0
if 66 - 66: O0 + I1IiiI % iIii1I11I1II1 . i1IIi % II111iiii - i1IIi
if 93 - 93: O0 + OoooooooOO % IiII % oO0o % I1ii11iIi11i
if 36 - 36: I1IiiI - oO0o * Oo0Ooo + oO0o % iII111i - i11iIiiIii
oOoo = bold ( "IPC" , False )
lprint ( "Process decap-stats {} message: '{}'" . format ( oOoo , msg ) )
if 93 - 93: O0
if ( lisp_i_am_etr ) : msg = json . loads ( msg )
if 11 - 11: OoooooooOO . I1ii11iIi11i + I1ii11iIi11i
ooO = [ "good-packets" , "ICV-error" , "checksum-error" ,
"lisp-header-error" , "no-decrypt-key" , "bad-inner-version" ,
"outer-header-error" ]
if 69 - 69: IiII + I1ii11iIi11i - ooOoO0o . II111iiii
for i1I1IiI1I in ooO :
OOo00o00O = 0 if ( i1I1IiI1I not in msg ) else msg [ i1I1IiI1I ] [ "packet-count" ]
lisp_decap_stats [ i1I1IiI1I ] . packet_count += OOo00o00O
if 24 - 24: OoO0O00 - iIii1I11I1II1 . Ii1I
o0OoOOOoOO0Oo = 0 if ( i1I1IiI1I not in msg ) else msg [ i1I1IiI1I ] [ "byte-count" ]
lisp_decap_stats [ i1I1IiI1I ] . byte_count += o0OoOOOoOO0Oo
if 67 - 67: I1ii11iIi11i . OoooooooOO * I1Ii111 + Ii1I * OOooOOo
Oo0OO0000oooo = 0 if ( i1I1IiI1I not in msg ) else msg [ i1I1IiI1I ] [ "seconds-last-packet" ]
if 84 - 84: OOooOOo
lisp_decap_stats [ i1I1IiI1I ] . last_increment = lisp_get_timestamp ( ) - Oo0OO0000oooo
if 78 - 78: O0 % O0
return
if 72 - 72: o0oOOo0O0Ooo * IiII / II111iiii / iIii1I11I1II1
if 41 - 41: iII111i / Ii1I
if 11 - 11: Oo0Ooo % OOooOOo . ooOoO0o
if 24 - 24: IiII / Oo0Ooo
if 90 - 90: ooOoO0o . OOooOOo - Ii1I
if 60 - 60: i11iIiiIii % iII111i . I1IiiI * I1ii11iIi11i
if 30 - 30: Ii1I + i11iIiiIii . I11i + o0oOOo0O0Ooo - OoO0O00
if 55 - 55: ooOoO0o - II111iiii . ooOoO0o . iII111i / OoooooooOO
if 51 - 51: I1IiiI * I1Ii111 - ooOoO0o + IiII
if 22 - 22: OoOoOO00 % Ii1I + iII111i
if 64 - 64: ooOoO0o
if 87 - 87: IiII - Ii1I / Oo0Ooo / I1ii11iIi11i . iII111i
if 49 - 49: IiII * OoooooooOO * iIii1I11I1II1 * Oo0Ooo / iII111i % oO0o
if 88 - 88: I1Ii111 * OOooOOo
if 38 - 38: Oo0Ooo - OoooooooOO - OoooooooOO / II111iiii
if 10 - 10: II111iiii - OoO0O00 / II111iiii % Ii1I - OoOoOO00
if 90 - 90: I11i + II111iiii - oO0o - ooOoO0o / ooOoO0o / i11iIiiIii
def lisp_process_punt ( punt_socket , lisp_send_sockets , lisp_ephem_port ) :
OOooOo0o0OO , I1 = punt_socket . recvfrom ( 4000 )
if 17 - 17: OOooOOo % Oo0Ooo . I1IiiI * O0 * oO0o % OoOoOO00
O0o0O0OoOOoO = json . loads ( OOooOo0o0OO )
if ( type ( O0o0O0OoOOoO ) != dict ) :
lprint ( "Invalid punt message from {}, not in JSON format" . format ( I1 ) )
if 99 - 99: Oo0Ooo - ooOoO0o . OoO0O00 - Oo0Ooo / O0
return
if 42 - 42: Ii1I - OoOoOO00 . OoOoOO00
OOoOO0o0ooo0oO = bold ( "Punt" , False )
lprint ( "{} message from '{}': '{}'" . format ( OOoOO0o0ooo0oO , I1 , O0o0O0OoOOoO ) )
if 14 - 14: II111iiii
if ( "type" not in O0o0O0OoOOoO ) :
lprint ( "Punt IPC message has no 'type' key" )
return
if 68 - 68: Ii1I % Oo0Ooo + I1ii11iIi11i + I1ii11iIi11i + oO0o % Oo0Ooo
if 22 - 22: OoO0O00
if 40 - 40: I1ii11iIi11i * I1Ii111
if 6 - 6: i11iIiiIii . o0oOOo0O0Ooo * iIii1I11I1II1 . OoOoOO00 . II111iiii
if 67 - 67: OoO0O00 - Oo0Ooo + OOooOOo / OoOoOO00 + OOooOOo
if ( O0o0O0OoOOoO [ "type" ] == "statistics" ) :
lisp_process_data_plane_stats ( O0o0O0OoOOoO , lisp_send_sockets , lisp_ephem_port )
return
if 18 - 18: Oo0Ooo % OoOoOO00 % i1IIi
if ( O0o0O0OoOOoO [ "type" ] == "decap-statistics" ) :
lisp_process_data_plane_decap_stats ( O0o0O0OoOOoO , punt_socket )
return
if 66 - 66: OoOoOO00 % II111iiii
if 16 - 16: i11iIiiIii - I1IiiI + ooOoO0o * oO0o
if 30 - 30: II111iiii / o0oOOo0O0Ooo
if 57 - 57: I11i / I1ii11iIi11i . I11i
if 68 - 68: OoOoOO00 + O0 . I1IiiI
if ( O0o0O0OoOOoO [ "type" ] == "restart" ) :
lisp_process_data_plane_restart ( )
return
if 26 - 26: I1ii11iIi11i
if 98 - 98: Oo0Ooo
if 72 - 72: oO0o + OoooooooOO . O0 + IiII
if 49 - 49: i1IIi - i11iIiiIii + II111iiii + Ii1I / OoO0O00
if 34 - 34: I1ii11iIi11i * i11iIiiIii
if ( O0o0O0OoOOoO [ "type" ] != "discovery" ) :
lprint ( "Punt IPC message has wrong format" )
return
if 6 - 6: I1ii11iIi11i + I1IiiI / OoooooooOO % I11i * Oo0Ooo
if ( "interface" not in O0o0O0OoOOoO ) :
lprint ( "Invalid punt message from {}, required keys missing" . format ( I1 ) )
if 20 - 20: Oo0Ooo
return
if 85 - 85: I1Ii111
if 98 - 98: OoO0O00 - IiII % iIii1I11I1II1 . OoOoOO00 + i1IIi + OoooooooOO
if 29 - 29: I1ii11iIi11i * I1Ii111 - i1IIi * i11iIiiIii * iIii1I11I1II1 % I11i
if 73 - 73: OoO0O00 . I1IiiI / o0oOOo0O0Ooo
if 12 - 12: I11i * i11iIiiIii - O0 * o0oOOo0O0Ooo - IiII + I1IiiI
ooO000OO = O0o0O0OoOOoO [ "interface" ]
if ( ooO000OO == "" ) :
oooo = int ( O0o0O0OoOOoO [ "instance-id" ] )
if ( oooo == - 1 ) : return
else :
oooo = lisp_get_interface_instance_id ( ooO000OO , None )
if 7 - 7: oO0o + I1Ii111 . o0oOOo0O0Ooo / IiII + iIii1I11I1II1 % I1Ii111
if 24 - 24: i11iIiiIii + iIii1I11I1II1
if 22 - 22: i11iIiiIii . II111iiii / o0oOOo0O0Ooo / Ii1I . O0 . OoOoOO00
if 89 - 89: O0 * Oo0Ooo + I1Ii111 + ooOoO0o * OoOoOO00
if 20 - 20: OoO0O00 - OoOoOO00
OoO0OOOooo = None
if ( "source-eid" in O0o0O0OoOOoO ) :
oo0Oo0 = O0o0O0OoOOoO [ "source-eid" ]
OoO0OOOooo = lisp_address ( LISP_AFI_NONE , oo0Oo0 , 0 , oooo )
if ( OoO0OOOooo . is_null ( ) ) :
lprint ( "Invalid source-EID format '{}'" . format ( oo0Oo0 ) )
return
if 84 - 84: iIii1I11I1II1 + ooOoO0o . o0oOOo0O0Ooo % iII111i
if 35 - 35: I11i - oO0o * oO0o / OoooooooOO + iII111i + OoOoOO00
O00oOoOoO0ooO = None
if ( "dest-eid" in O0o0O0OoOOoO ) :
I1II = O0o0O0OoOOoO [ "dest-eid" ]
O00oOoOoO0ooO = lisp_address ( LISP_AFI_NONE , I1II , 0 , oooo )
if ( O00oOoOoO0ooO . is_null ( ) ) :
lprint ( "Invalid dest-EID format '{}'" . format ( I1II ) )
return
if 25 - 25: o0oOOo0O0Ooo % Oo0Ooo . Oo0Ooo + OoO0O00
if 23 - 23: I11i + I1ii11iIi11i * iIii1I11I1II1 - i1IIi
if 33 - 33: I1IiiI + o0oOOo0O0Ooo . OoOoOO00
if 35 - 35: iII111i / Ii1I
if 57 - 57: ooOoO0o . I1IiiI * OOooOOo
if 87 - 87: I11i - I11i % iII111i - Ii1I
if 29 - 29: oO0o - ooOoO0o * iIii1I11I1II1 / OoOoOO00
if 34 - 34: I1IiiI . Oo0Ooo
if ( OoO0OOOooo ) :
oO0ooOOO = green ( OoO0OOOooo . print_address ( ) , False )
oOooII111iIiI1 = lisp_db_for_lookups . lookup_cache ( OoO0OOOooo , False )
if ( oOooII111iIiI1 != None ) :
if 4 - 4: Ii1I - II111iiii * iII111i / oO0o - I1IiiI
if 32 - 32: iIii1I11I1II1 - I11i
if 49 - 49: I11i * I1Ii111 - iIii1I11I1II1 * O0
if 72 - 72: I1IiiI * iII111i
if 61 - 61: Ii1I * Oo0Ooo * I1Ii111 % I11i + iII111i % oO0o
if ( oOooII111iIiI1 . dynamic_eid_configured ( ) ) :
i111IIiIiiI1 = lisp_allow_dynamic_eid ( ooO000OO , OoO0OOOooo )
if ( i111IIiIiiI1 != None and lisp_i_am_itr ) :
lisp_itr_discover_eid ( oOooII111iIiI1 , OoO0OOOooo , ooO000OO , i111IIiIiiI1 )
else :
lprint ( ( "Disallow dynamic source-EID {} " + "on interface {}" ) . format ( oO0ooOOO , ooO000OO ) )
if 67 - 67: IiII
if 90 - 90: o0oOOo0O0Ooo
if 5 - 5: i1IIi
else :
lprint ( "Punt from non-EID source {}" . format ( oO0ooOOO ) )
if 55 - 55: Ii1I
if 46 - 46: OOooOOo / iII111i . i1IIi . i11iIiiIii . iIii1I11I1II1 % I11i
if 62 - 62: I11i % II111iiii % OoooooooOO * ooOoO0o / oO0o
if 29 - 29: o0oOOo0O0Ooo / O0 / OoO0O00
if 23 - 23: Ii1I + i11iIiiIii % IiII
if 64 - 64: i11iIiiIii + OoooooooOO . oO0o * Ii1I
if ( O00oOoOoO0ooO ) :
o0ooo0oOO0o = lisp_map_cache_lookup ( OoO0OOOooo , O00oOoOoO0ooO )
if ( o0ooo0oOO0o == None or lisp_mr_or_pubsub ( o0ooo0oOO0o . action ) ) :
if 49 - 49: O0
if 72 - 72: I1Ii111
if 96 - 96: II111iiii / OOooOOo % i1IIi / Oo0Ooo
if 22 - 22: I1IiiI % iIii1I11I1II1 % I1ii11iIi11i
if 68 - 68: iII111i + I11i
if ( lisp_rate_limit_map_request ( O00oOoOoO0ooO ) ) : return
if 61 - 61: oO0o . I1Ii111
Iiooo0O0o0o = ( o0ooo0oOO0o and o0ooo0oOO0o . action == LISP_SEND_PUBSUB_ACTION )
lisp_send_map_request ( lisp_send_sockets , lisp_ephem_port ,
OoO0OOOooo , O00oOoOoO0ooO , None , Iiooo0O0o0o )
else :
oO0ooOOO = green ( O00oOoOoO0ooO . print_address ( ) , False )
lprint ( "Map-cache entry for {} already exists" . format ( oO0ooOOO ) )
if 74 - 74: O0 . Ii1I - iII111i % IiII + II111iiii
if 71 - 71: oO0o + Ii1I % oO0o
return
if 17 - 17: I1Ii111 % I1Ii111 * o0oOOo0O0Ooo
if 84 - 84: I1Ii111 + iII111i . i1IIi / O0 / I1Ii111 + o0oOOo0O0Ooo
if 70 - 70: O0 % ooOoO0o - iII111i + oO0o
if 12 - 12: I1Ii111 - OoO0O00 % II111iiii % ooOoO0o / II111iiii % OoOoOO00
if 74 - 74: iII111i . OOooOOo * Ii1I / Oo0Ooo . OoO0O00 . I11i
if 65 - 65: i11iIiiIii - OoO0O00 / OoooooooOO * I1IiiI % iII111i
if 15 - 15: OOooOOo * Ii1I / ooOoO0o
def lisp_ipc_map_cache_entry ( mc , jdata ) :
oo0O00OOOOO = lisp_write_ipc_map_cache ( True , mc , dont_send = True )
jdata . append ( oo0O00OOOOO )
return ( [ True , jdata ] )
if 70 - 70: i11iIiiIii * oO0o . I11i - OoooooooOO / I1ii11iIi11i
if 10 - 10: IiII * OoOoOO00 . II111iiii . II111iiii * Oo0Ooo
if 23 - 23: I1ii11iIi11i + I11i
if 74 - 74: i1IIi % I1IiiI
if 44 - 44: Oo0Ooo - OoooooooOO % ooOoO0o + II111iiii
if 60 - 60: o0oOOo0O0Ooo - ooOoO0o + i11iIiiIii % I1ii11iIi11i % II111iiii
if 62 - 62: Ii1I
if 30 - 30: iII111i % O0 + II111iiii * I1IiiI
def lisp_ipc_walk_map_cache ( mc , jdata ) :
if 91 - 91: i11iIiiIii
if 35 - 35: OoOoOO00 * I1Ii111 / Oo0Ooo - i1IIi - IiII + OOooOOo
if 96 - 96: Oo0Ooo + I1ii11iIi11i . O0
if 62 - 62: i1IIi % OoooooooOO % OoooooooOO
if ( mc . group . is_null ( ) ) : return ( lisp_ipc_map_cache_entry ( mc , jdata ) )
if 53 - 53: O0 * oO0o
if ( mc . source_cache == None ) : return ( [ True , jdata ] )
if 22 - 22: OOooOOo % Oo0Ooo % ooOoO0o - O0 + i1IIi
if 67 - 67: OoO0O00 / I1IiiI - IiII + iII111i - iII111i
if 4 - 4: IiII . Ii1I . IiII % OoO0O00
if 12 - 12: OoOoOO00 + O0 / O0 . i1IIi
if 58 - 58: IiII . iII111i % O0 . Ii1I * Oo0Ooo
jdata = mc . source_cache . walk_cache ( lisp_ipc_map_cache_entry , jdata )
return ( [ True , jdata ] )
if 54 - 54: OoO0O00 % OOooOOo - OoO0O00 . Oo0Ooo % i1IIi
if 95 - 95: iII111i . OoooooooOO . o0oOOo0O0Ooo / II111iiii - OoooooooOO / I1Ii111
if 11 - 11: II111iiii / iII111i . oO0o / ooOoO0o / OOooOOo + OoO0O00
if 37 - 37: iIii1I11I1II1 * O0
if 64 - 64: I1Ii111 - II111iiii + oO0o % ooOoO0o * oO0o
if 27 - 27: iIii1I11I1II1 - Ii1I . i11iIiiIii / IiII . I1Ii111 / i11iIiiIii
if 27 - 27: OoOoOO00 . I11i / OoOoOO00
def lisp_itr_discover_eid ( db , eid , input_interface , routed_interface ,
lisp_ipc_listen_socket ) :
i1iiii = eid . print_address ( )
if ( i1iiii in db . dynamic_eids ) :
db . dynamic_eids [ i1iiii ] . last_packet = lisp_get_timestamp ( )
return
if 96 - 96: OoO0O00 - I1IiiI
if 73 - 73: I1IiiI - o0oOOo0O0Ooo - I1Ii111
if 34 - 34: iIii1I11I1II1 - i1IIi + OoO0O00 % Oo0Ooo + i1IIi
if 46 - 46: I1IiiI
if 82 - 82: iII111i . i1IIi
I1Ii111I111 = lisp_dynamic_eid ( )
I1Ii111I111 . dynamic_eid . copy_address ( eid )
I1Ii111I111 . interface = routed_interface
I1Ii111I111 . last_packet = lisp_get_timestamp ( )
I1Ii111I111 . get_timeout ( routed_interface )
db . dynamic_eids [ i1iiii ] = I1Ii111I111
if 38 - 38: Ii1I . I1IiiI . I1ii11iIi11i
Ii1iI1I1 = ""
if ( input_interface != routed_interface ) :
Ii1iI1I1 = ", routed-interface " + routed_interface
if 55 - 55: II111iiii % o0oOOo0O0Ooo + IiII % i1IIi % OoooooooOO - O0
if 39 - 39: i11iIiiIii / Ii1I / ooOoO0o
OOoOoOO0 = green ( i1iiii , False ) + bold ( " discovered" , False )
lprint ( "Dynamic-EID {} on interface {}{}, timeout {}" . format ( OOoOoOO0 , input_interface , Ii1iI1I1 , I1Ii111I111 . timeout ) )
if 40 - 40: OOooOOo - Ii1I * I1ii11iIi11i * OoooooooOO + II111iiii / ooOoO0o
if 11 - 11: OoooooooOO * ooOoO0o / II111iiii * oO0o / OoOoOO00 . iIii1I11I1II1
if 9 - 9: iII111i
if 13 - 13: IiII - Oo0Ooo
if 94 - 94: I11i - iIii1I11I1II1 + oO0o
oOoo = "learn%{}%{}" . format ( i1iiii , routed_interface )
oOoo = lisp_command_ipc ( oOoo , "lisp-itr" )
lisp_ipc ( oOoo , lisp_ipc_listen_socket , "lisp-etr" )
return
if 72 - 72: i1IIi . OoO0O00
if 95 - 95: OoOoOO00 + Ii1I
if 48 - 48: Ii1I % IiII + OoO0O00 . IiII
if 42 - 42: Ii1I
if 70 - 70: I11i
if 82 - 82: O0
if 58 - 58: II111iiii . O0 - OoO0O00 - IiII
if 4 - 4: i11iIiiIii + i11iIiiIii / O0
if 46 - 46: I11i % ooOoO0o - Ii1I
if 25 - 25: O0 / i11iIiiIii . O0
if 24 - 24: I1ii11iIi11i - i11iIiiIii / iII111i . Oo0Ooo / I1ii11iIi11i
if 92 - 92: I11i % OoooooooOO
if 14 - 14: i11iIiiIii * i11iIiiIii * OoOoOO00
def lisp_retry_decap_keys ( addr_str , packet , iv , packet_icv ) :
if ( lisp_search_decap_keys == False ) : return
if 84 - 84: OOooOOo % I1Ii111 + I11i / I1IiiI . iII111i
if 78 - 78: oO0o . Oo0Ooo
if 18 - 18: IiII
if 35 - 35: OoooooooOO / i1IIi - OoO0O00 + Oo0Ooo - o0oOOo0O0Ooo
if ( addr_str . find ( ":" ) != - 1 ) : return
if 100 - 100: II111iiii % i11iIiiIii % oO0o + O0
OoOOoooO0oo = lisp_crypto_keys_by_rloc_decap [ addr_str ]
if 46 - 46: OoO0O00 / I1IiiI - Oo0Ooo . o0oOOo0O0Ooo . Oo0Ooo % I11i
for Ooo00o000o in lisp_crypto_keys_by_rloc_decap :
if 43 - 43: IiII - O0 + I1Ii111 % OoooooooOO % OoO0O00 / I1Ii111
if 48 - 48: I1ii11iIi11i . i1IIi % i1IIi - iII111i * o0oOOo0O0Ooo + IiII
if 45 - 45: II111iiii . II111iiii + I1IiiI / I1Ii111 . OoO0O00 - o0oOOo0O0Ooo
if 20 - 20: ooOoO0o % oO0o
if ( Ooo00o000o . find ( addr_str ) == - 1 ) : continue
if 28 - 28: i1IIi . II111iiii + O0 / O0 % OoOoOO00 + OOooOOo
if 24 - 24: OoooooooOO
if 11 - 11: i11iIiiIii / iIii1I11I1II1 % ooOoO0o + OOooOOo
if 73 - 73: OoOoOO00 + OoooooooOO + iIii1I11I1II1 + II111iiii * iIii1I11I1II1 - OoOoOO00
if ( Ooo00o000o == addr_str ) : continue
if 71 - 71: O0 * OOooOOo . I1IiiI . I1Ii111 * I11i
if 45 - 45: O0 . O0 . II111iiii * ooOoO0o
if 2 - 2: OoO0O00 . o0oOOo0O0Ooo
if 48 - 48: Ii1I
oo0O00OOOOO = lisp_crypto_keys_by_rloc_decap [ Ooo00o000o ]
if ( oo0O00OOOOO == OoOOoooO0oo ) : continue
if 45 - 45: I1ii11iIi11i - I11i + Ii1I
if 82 - 82: iII111i
if 81 - 81: i1IIi % OOooOOo - OoO0O00 - Oo0Ooo
if 19 - 19: i1IIi
OO0o0o0 = oo0O00OOOOO [ 1 ]
if ( packet_icv != OO0o0o0 . do_icv ( packet , iv ) ) :
lprint ( "Test ICV with key {} failed" . format ( red ( Ooo00o000o , False ) ) )
continue
if 89 - 89: IiII % i11iIiiIii + OoO0O00 . oO0o / I1IiiI . Ii1I
if 11 - 11: ooOoO0o - I1Ii111 - I11i + OoOoOO00
lprint ( "Changing decap crypto key to {}" . format ( red ( Ooo00o000o , False ) ) )
lisp_crypto_keys_by_rloc_decap [ addr_str ] = oo0O00OOOOO
if 20 - 20: I11i + O0
return
if 27 - 27: Oo0Ooo
if 12 - 12: I1ii11iIi11i . iII111i - iII111i - OOooOOo - iIii1I11I1II1
if 50 - 50: I1IiiI - iIii1I11I1II1 . iII111i - Ii1I / I1Ii111 + iII111i
if 46 - 46: OOooOOo + iII111i % Oo0Ooo * iII111i % OoooooooOO * IiII
if 27 - 27: I1IiiI + I1IiiI + I1ii11iIi11i - oO0o * OOooOOo
if 53 - 53: I1ii11iIi11i / OoooooooOO * iIii1I11I1II1
if 4 - 4: I1IiiI . iIii1I11I1II1 + OOooOOo / IiII . o0oOOo0O0Ooo . I11i
if 52 - 52: ooOoO0o % i11iIiiIii . IiII + OoO0O00
def lisp_decent_pull_xtr_configured ( ) :
return ( lisp_decent_modulus != 0 and lisp_decent_dns_suffix != None )
if 66 - 66: II111iiii . Ii1I
if 42 - 42: iIii1I11I1II1 * iII111i * I1IiiI
if 66 - 66: Oo0Ooo * i1IIi / I1ii11iIi11i / OoO0O00
if 12 - 12: OOooOOo + iIii1I11I1II1 % I1Ii111 + OOooOOo
if 19 - 19: OoO0O00 / I1IiiI - o0oOOo0O0Ooo - i1IIi + I1ii11iIi11i * OoooooooOO
if 74 - 74: I1Ii111 . I11i / Oo0Ooo
if 88 - 88: oO0o % OoO0O00 - i11iIiiIii % I1Ii111 / O0 * IiII
if 99 - 99: o0oOOo0O0Ooo . ooOoO0o / i11iIiiIii
def lisp_is_decent_dns_suffix ( dns_name ) :
if ( lisp_decent_dns_suffix == None ) : return ( False )
o0o = dns_name . split ( "." )
o0o = "." . join ( o0o [ 1 : : ] )
return ( o0o == lisp_decent_dns_suffix )
if 44 - 44: IiII + OOooOOo % OoO0O00 . OoooooooOO * O0
if 72 - 72: i1IIi - iII111i * I1IiiI % O0 - I11i * O0
if 78 - 78: I1IiiI - OoO0O00 / Ii1I . i1IIi
if 30 - 30: IiII
if 21 - 21: i1IIi . iII111i - I1IiiI
if 28 - 28: IiII / Ii1I - i1IIi - OoOoOO00
if 65 - 65: o0oOOo0O0Ooo * OoO0O00 / o0oOOo0O0Ooo
if 77 - 77: OoooooooOO - Oo0Ooo - OoOoOO00 / I11i / O0 . i11iIiiIii
if 27 - 27: I1Ii111 * O0
if 9 - 9: i1IIi - Oo0Ooo - i11iIiiIii / iIii1I11I1II1 . i1IIi
if 2 - 2: I11i + II111iiii - I11i / oO0o / I11i
def lisp_get_decent_index ( eid ) :
i1iiii = eid . print_prefix ( )
o0o0O0Oooo0 = hmac . new ( b"lisp-decent" , i1iiii , hashlib . sha256 ) . hexdigest ( )
if 13 - 13: O0 / I11i % I11i * Oo0Ooo / IiII * OoO0O00
if 67 - 67: I1ii11iIi11i
if 72 - 72: Oo0Ooo * Ii1I - Oo0Ooo - o0oOOo0O0Ooo
if 7 - 7: II111iiii + OoO0O00 . I1IiiI - iII111i . o0oOOo0O0Ooo
o0oooO = os . getenv ( "LISP_DECENT_HASH_WIDTH" )
if ( o0oooO in [ "" , None ] ) :
o0oooO = 12
else :
o0oooO = int ( o0oooO )
if ( o0oooO > 32 ) :
o0oooO = 12
else :
o0oooO *= 2
if 86 - 86: II111iiii % I1ii11iIi11i
if 88 - 88: Oo0Ooo . oO0o + OoOoOO00 % OoooooooOO
if 81 - 81: OoooooooOO . I1Ii111 + OoO0O00 % I1Ii111
IIOOOO = o0o0O0Oooo0 [ 0 : o0oooO ]
OOOooo0OooOoO = int ( IIOOOO , 16 ) % lisp_decent_modulus
if 70 - 70: ooOoO0o
lprint ( "LISP-Decent modulus {}, hash-width {}, mod-value {}, index {}" . format ( lisp_decent_modulus , old_div ( o0oooO , 2 ) , IIOOOO , OOOooo0OooOoO ) )
if 84 - 84: OoO0O00 - o0oOOo0O0Ooo
if 57 - 57: I11i - i1IIi - II111iiii - O0 . iII111i + OoO0O00
return ( OOOooo0OooOoO )
if 67 - 67: OOooOOo * iII111i / iIii1I11I1II1 / I1ii11iIi11i
if 10 - 10: OoooooooOO % I1ii11iIi11i * i1IIi . iII111i
if 96 - 96: II111iiii % i11iIiiIii - Oo0Ooo
if 70 - 70: O0 * iIii1I11I1II1 - IiII * I11i / Ii1I + i11iIiiIii
if 26 - 26: II111iiii - I11i % I11i / ooOoO0o + Oo0Ooo
if 91 - 91: I1IiiI % Ii1I - OOooOOo - Oo0Ooo / I1IiiI / OoO0O00
if 40 - 40: OoooooooOO
def lisp_get_decent_dns_name ( eid ) :
OOOooo0OooOoO = lisp_get_decent_index ( eid )
return ( str ( OOOooo0OooOoO ) + "." + lisp_decent_dns_suffix )
if 71 - 71: OOooOOo
if 88 - 88: O0
if 44 - 44: II111iiii - IiII / I1IiiI + ooOoO0o % iII111i - iII111i
if 53 - 53: OoooooooOO
if 41 - 41: i1IIi - oO0o
if 41 - 41: I11i
if 92 - 92: i11iIiiIii
if 62 - 62: i1IIi / I1IiiI - o0oOOo0O0Ooo
def lisp_get_decent_dns_name_from_str ( iid , eid_str ) :
i1I1I1IIIi11 = lisp_address ( LISP_AFI_NONE , eid_str , 0 , iid )
OOOooo0OooOoO = lisp_get_decent_index ( i1I1I1IIIi11 )
return ( str ( OOOooo0OooOoO ) + "." + lisp_decent_dns_suffix )
if 3 - 3: O0 * OoOoOO00 * I11i / OoOoOO00
if 77 - 77: i1IIi
if 3 - 3: iII111i * OoO0O00 - oO0o + iII111i . o0oOOo0O0Ooo + I1IiiI
if 65 - 65: O0 / OoOoOO00
if 77 - 77: OoO0O00
if 17 - 17: i1IIi
if 35 - 35: OoOoOO00
if 61 - 61: I1Ii111
if 78 - 78: I1Ii111 * Ii1I % Ii1I + I1IiiI
if 83 - 83: iIii1I11I1II1 + O0 / IiII . iIii1I11I1II1
def lisp_trace_append ( packet , reason = None , ed = "encap" , lisp_socket = None ,
rloc_entry = None ) :
if 74 - 74: Oo0Ooo
oo00 = 28 if packet . inner_version == 4 else 48
oo0Oi1I1iI1 = packet . packet [ oo00 : : ]
OooOo00o0 = lisp_trace ( )
if ( OooOo00o0 . decode ( oo0Oi1I1iI1 ) == False ) :
lprint ( "Could not decode JSON portion of a LISP-Trace packet" )
return ( False )
if 90 - 90: ooOoO0o
if 53 - 53: O0 * I1Ii111 . i11iIiiIii / iIii1I11I1II1
IiiIIi = "?" if packet . outer_dest . is_null ( ) else packet . outer_dest . print_address_no_iid ( )
if 69 - 69: i1IIi + O0
if 67 - 67: I1ii11iIi11i * iIii1I11I1II1 / O0 - I1Ii111
if 82 - 82: I1ii11iIi11i % i11iIiiIii - OoOoOO00 / I1Ii111 * o0oOOo0O0Ooo * OoO0O00
if 85 - 85: Oo0Ooo + Ii1I - OoooooooOO . O0
if 10 - 10: OOooOOo / Oo0Ooo . O0 / i1IIi - OoOoOO00
if 41 - 41: II111iiii - I1ii11iIi11i - I1Ii111
if ( IiiIIi != "?" and packet . encap_port != LISP_DATA_PORT ) :
if ( ed == "encap" ) : IiiIIi += ":{}" . format ( packet . encap_port )
if 82 - 82: I1IiiI * I1IiiI / iIii1I11I1II1
if 14 - 14: I11i + Ii1I - OOooOOo % Ii1I / Ii1I
if 86 - 86: I1Ii111 - i11iIiiIii + Ii1I + I11i
if 96 - 96: Ii1I
if 28 - 28: i1IIi . oO0o . IiII + Oo0Ooo . Oo0Ooo . i1IIi
oo0O00OOOOO = { }
oo0O00OOOOO [ "n" ] = "ITR" if lisp_i_am_itr else "ETR" if lisp_i_am_etr else "RTR" if lisp_i_am_rtr else "?"
if 34 - 34: Oo0Ooo + IiII / i1IIi
iiIIOOoOo0 = packet . outer_source
if ( iiIIOOoOo0 . is_null ( ) ) : iiIIOOoOo0 = lisp_myrlocs [ 0 ]
oo0O00OOOOO [ "sr" ] = iiIIOOoOo0 . print_address_no_iid ( )
if 63 - 63: oO0o . OoOoOO00 / IiII
if 9 - 9: O0 + IiII . oO0o % IiII
if 80 - 80: o0oOOo0O0Ooo * Oo0Ooo % i11iIiiIii * iII111i + i1IIi + II111iiii
if 73 - 73: oO0o % oO0o * OoOoOO00 * O0 % OoO0O00 * i11iIiiIii
if 49 - 49: I11i . ooOoO0o . i11iIiiIii - II111iiii
if ( oo0O00OOOOO [ "n" ] == "ITR" and packet . inner_sport != LISP_TRACE_PORT ) :
oo0O00OOOOO [ "sr" ] += ":{}" . format ( packet . inner_sport )
if 7 - 7: I1Ii111 % o0oOOo0O0Ooo . oO0o . ooOoO0o % i1IIi / I1IiiI
if 88 - 88: i11iIiiIii / oO0o - i1IIi / I1IiiI
oo0O00OOOOO [ "hn" ] = lisp_hostname
Ooo00o000o = ed [ 0 ] + "ts"
oo0O00OOOOO [ Ooo00o000o ] = lisp_get_timestamp ( )
if 57 - 57: oO0o + O0 * I11i
if 87 - 87: o0oOOo0O0Ooo % Oo0Ooo * I1ii11iIi11i / OoooooooOO / o0oOOo0O0Ooo
if 78 - 78: Ii1I
if 5 - 5: i1IIi * ooOoO0o / OoOoOO00 % i11iIiiIii
if 57 - 57: IiII
if 89 - 89: I1ii11iIi11i - I1Ii111 + o0oOOo0O0Ooo
if ( IiiIIi == "?" and oo0O00OOOOO [ "n" ] == "ETR" ) :
oOooII111iIiI1 = lisp_db_for_lookups . lookup_cache ( packet . inner_dest , False )
if ( oOooII111iIiI1 != None and len ( oOooII111iIiI1 . rloc_set ) >= 1 ) :
IiiIIi = oOooII111iIiI1 . rloc_set [ 0 ] . rloc . print_address_no_iid ( )
if 62 - 62: I1ii11iIi11i + OoooooooOO * OOooOOo
if 49 - 49: i1IIi - I11i * II111iiii
oo0O00OOOOO [ "dr" ] = IiiIIi
if 4 - 4: o0oOOo0O0Ooo + o0oOOo0O0Ooo
if 57 - 57: I1IiiI * OOooOOo . i11iIiiIii * oO0o - OoOoOO00
if 35 - 35: O0
if 65 - 65: Oo0Ooo
if ( IiiIIi == "?" and reason != None ) :
oo0O00OOOOO [ "dr" ] += " ({})" . format ( reason )
if 100 - 100: I1Ii111 . o0oOOo0O0Ooo * OoooooooOO . o0oOOo0O0Ooo
if 90 - 90: i11iIiiIii . I1IiiI + ooOoO0o * OoooooooOO * OoooooooOO + oO0o
if 77 - 77: OOooOOo * OoOoOO00
if 75 - 75: Oo0Ooo * Oo0Ooo - IiII - OoOoOO00 / i11iIiiIii + I1Ii111
if 57 - 57: i11iIiiIii / oO0o
if ( rloc_entry != None ) :
oo0O00OOOOO [ "rtts" ] = rloc_entry . recent_rloc_probe_rtts
oo0O00OOOOO [ "hops" ] = rloc_entry . recent_rloc_probe_hops
oo0O00OOOOO [ "lats" ] = rloc_entry . recent_rloc_probe_latencies
if 37 - 37: o0oOOo0O0Ooo + OoOoOO00 - i1IIi . Oo0Ooo
if 3 - 3: ooOoO0o % OoooooooOO / I1Ii111 + oO0o - O0
if 72 - 72: oO0o * OoO0O00
if 89 - 89: OoooooooOO . OOooOOo
if 96 - 96: o0oOOo0O0Ooo + OoOoOO00 / i11iIiiIii - o0oOOo0O0Ooo * i11iIiiIii + OOooOOo
if 16 - 16: IiII / I1Ii111 . II111iiii * I11i
OoO0OOOooo = packet . inner_source . print_address ( )
O00oOoOoO0ooO = packet . inner_dest . print_address ( )
if ( OooOo00o0 . packet_json == [ ] ) :
OoO0o = { }
OoO0o [ "se" ] = OoO0OOOooo
OoO0o [ "de" ] = O00oOoOoO0ooO
OoO0o [ "paths" ] = [ ]
OooOo00o0 . packet_json . append ( OoO0o )
if 33 - 33: I1ii11iIi11i / Oo0Ooo % i11iIiiIii
if 37 - 37: Oo0Ooo - I1Ii111 - IiII / oO0o % I1IiiI / I1Ii111
if 80 - 80: iII111i - oO0o % i1IIi * iIii1I11I1II1 . oO0o
if 86 - 86: Ii1I
if 36 - 36: i11iIiiIii % i11iIiiIii
if 91 - 91: Oo0Ooo + I1Ii111 % iII111i
for OoO0o in OooOo00o0 . packet_json :
if ( OoO0o [ "de" ] != O00oOoOoO0ooO ) : continue
OoO0o [ "paths" ] . append ( oo0O00OOOOO )
break
if 7 - 7: I1Ii111 + II111iiii
if 63 - 63: OoO0O00 - o0oOOo0O0Ooo / iII111i % II111iiii * IiII
if 71 - 71: IiII
if 34 - 34: II111iiii
if 7 - 7: IiII / I1ii11iIi11i
if 88 - 88: iIii1I11I1II1 / o0oOOo0O0Ooo
if 68 - 68: OoooooooOO % Ii1I + ooOoO0o / oO0o
if 60 - 60: i11iIiiIii / O0 / I1IiiI
OooO = False
if ( len ( OooOo00o0 . packet_json ) == 1 and oo0O00OOOOO [ "n" ] == "ETR" and
OooOo00o0 . myeid ( packet . inner_dest ) ) :
OoO0o = { }
OoO0o [ "se" ] = O00oOoOoO0ooO
OoO0o [ "de" ] = OoO0OOOooo
OoO0o [ "paths" ] = [ ]
OooOo00o0 . packet_json . append ( OoO0o )
OooO = True
if 27 - 27: ooOoO0o + i11iIiiIii * o0oOOo0O0Ooo
if 10 - 10: OOooOOo * OoooooooOO
if 1 - 1: iII111i . I1ii11iIi11i - ooOoO0o + OoO0O00 . OoooooooOO
if 71 - 71: I1ii11iIi11i
if 59 - 59: I1Ii111 + OoO0O00 + II111iiii
if 12 - 12: I1Ii111 % I1ii11iIi11i - I1Ii111 + I11i
OooOo00o0 . print_trace ( )
oo0Oi1I1iI1 = OooOo00o0 . encode ( )
if 62 - 62: I1Ii111 % I11i % IiII - ooOoO0o . oO0o - OoooooooOO
if 14 - 14: OOooOOo + Oo0Ooo % i1IIi + iIii1I11I1II1
if 64 - 64: OoOoOO00 / Ii1I * Oo0Ooo - I1ii11iIi11i
if 9 - 9: i11iIiiIii % Oo0Ooo + IiII + Ii1I . ooOoO0o / i1IIi
if 40 - 40: I1Ii111 + I1IiiI - Ii1I
if 27 - 27: i1IIi
if 66 - 66: iII111i - ooOoO0o / i11iIiiIii + I1ii11iIi11i - Ii1I
if 9 - 9: O0
oOi1II1 = OooOo00o0 . packet_json [ 0 ] [ "paths" ] [ 0 ] [ "sr" ]
if ( IiiIIi == "?" ) :
lprint ( "LISP-Trace return to sender RLOC {}" . format ( oOi1II1 ) )
OooOo00o0 . return_to_sender ( lisp_socket , oOi1II1 , oo0Oi1I1iI1 )
return ( False )
if 59 - 59: IiII . i1IIi + O0 . O0 + OoO0O00 / i1IIi
if 27 - 27: I1IiiI / OoO0O00 * O0 / IiII % I1ii11iIi11i . OoO0O00
if 64 - 64: OoO0O00
if 12 - 12: oO0o + Ii1I
if 7 - 7: iII111i / Oo0Ooo - OoO0O00 + I1Ii111 * II111iiii * ooOoO0o
if 80 - 80: oO0o - i1IIi / I11i . II111iiii % O0 % I11i
Ooo000O00 = OooOo00o0 . packet_length ( )
if 70 - 70: iIii1I11I1II1 * i1IIi * OOooOOo - Oo0Ooo % i1IIi
if 60 - 60: o0oOOo0O0Ooo . OOooOOo % II111iiii - I1ii11iIi11i
if 4 - 4: OOooOOo % ooOoO0o
if 39 - 39: Ii1I
if 67 - 67: iIii1I11I1II1 - OOooOOo
if 47 - 47: OOooOOo - OOooOOo * I1Ii111
iI1Iiiii = packet . packet [ 0 : oo00 ]
iIIiiIi = struct . pack ( "HH" , socket . htons ( Ooo000O00 ) , 0 )
iI1Iiiii = iI1Iiiii [ 0 : oo00 - 4 ] + iIIiiIi
if ( packet . inner_version == 6 and oo0O00OOOOO [ "n" ] == "ETR" and
len ( OooOo00o0 . packet_json ) == 2 ) :
O0I1II1 = iI1Iiiii [ oo00 - 8 : : ] + oo0Oi1I1iI1
O0I1II1 = lisp_udp_checksum ( OoO0OOOooo , O00oOoOoO0ooO , O0I1II1 )
iI1Iiiii = iI1Iiiii [ 0 : oo00 - 8 ] + O0I1II1 [ 0 : 8 ]
if 51 - 51: II111iiii . iIii1I11I1II1 . o0oOOo0O0Ooo
if 47 - 47: Ii1I * I1Ii111 / II111iiii
if 73 - 73: ooOoO0o
if 53 - 53: IiII . Oo0Ooo
if 54 - 54: i11iIiiIii % ooOoO0o % I1Ii111 + o0oOOo0O0Ooo
if 2 - 2: IiII
if 25 - 25: OoOoOO00 . OoO0O00 * o0oOOo0O0Ooo . OoooooooOO - Oo0Ooo + I1IiiI
if 82 - 82: OoO0O00 - Ii1I * I11i * o0oOOo0O0Ooo
if 17 - 17: OoooooooOO + I1Ii111
if ( OooO ) :
if ( packet . inner_version == 4 ) :
iI1Iiiii = iI1Iiiii [ 0 : 12 ] + iI1Iiiii [ 16 : 20 ] + iI1Iiiii [ 12 : 16 ] + iI1Iiiii [ 22 : 24 ] + iI1Iiiii [ 20 : 22 ] + iI1Iiiii [ 24 : : ]
if 91 - 91: iIii1I11I1II1 % i11iIiiIii - o0oOOo0O0Ooo
else :
iI1Iiiii = iI1Iiiii [ 0 : 8 ] + iI1Iiiii [ 24 : 40 ] + iI1Iiiii [ 8 : 24 ] + iI1Iiiii [ 42 : 44 ] + iI1Iiiii [ 40 : 42 ] + iI1Iiiii [ 44 : : ]
if 98 - 98: o0oOOo0O0Ooo % II111iiii * IiII - i11iIiiIii * oO0o
if 15 - 15: O0 - II111iiii - Oo0Ooo . I1ii11iIi11i % OoO0O00
IiI11I111 = packet . inner_dest
packet . inner_dest = packet . inner_source
packet . inner_source = IiI11I111
if 63 - 63: o0oOOo0O0Ooo / OoOoOO00 % I1ii11iIi11i % I11i
if 58 - 58: O0 + iII111i
if 66 - 66: i1IIi . O0 . i1IIi - iIii1I11I1II1 - ooOoO0o % I1ii11iIi11i
if 96 - 96: i1IIi + oO0o - OoOoOO00 - OoOoOO00
if 13 - 13: I11i
if 52 - 52: iII111i . OoOoOO00 * iIii1I11I1II1 . iII111i * IiII
if 52 - 52: iII111i + iII111i
oo00 = 2 if packet . inner_version == 4 else 4
I1IIi1IIIi1I = 20 + Ooo000O00 if packet . inner_version == 4 else Ooo000O00
iiiiiI1I = struct . pack ( "H" , socket . htons ( I1IIi1IIIi1I ) )
iI1Iiiii = iI1Iiiii [ 0 : oo00 ] + iiiiiI1I + iI1Iiiii [ oo00 + 2 : : ]
if 6 - 6: i11iIiiIii * ooOoO0o - Oo0Ooo / I1ii11iIi11i
if 38 - 38: I1IiiI / OoO0O00
if 85 - 85: Ii1I % OoOoOO00 / I1ii11iIi11i . OoOoOO00
if 4 - 4: I1Ii111 - Oo0Ooo
if ( packet . inner_version == 4 ) :
IIIiIi11 = struct . pack ( "H" , 0 )
iI1Iiiii = iI1Iiiii [ 0 : 10 ] + IIIiIi11 + iI1Iiiii [ 12 : : ]
iiiiiI1I = lisp_ip_checksum ( iI1Iiiii [ 0 : 20 ] )
iI1Iiiii = iiiiiI1I + iI1Iiiii [ 20 : : ]
if 94 - 94: iIii1I11I1II1
if 55 - 55: Ii1I . o0oOOo0O0Ooo * i11iIiiIii
if 89 - 89: O0 % iIii1I11I1II1 . I1ii11iIi11i + OOooOOo / IiII
if 84 - 84: i11iIiiIii . Oo0Ooo + OoOoOO00
if 75 - 75: o0oOOo0O0Ooo
packet . packet = iI1Iiiii + oo0Oi1I1iI1
return ( True )
if 54 - 54: o0oOOo0O0Ooo
if 95 - 95: Ii1I % I11i - OoooooooOO
if 11 - 11: OoO0O00 - oO0o
if 50 - 50: II111iiii * IiII
if 26 - 26: OoO0O00 . II111iiii
if 19 - 19: iII111i / i11iIiiIii
if 31 - 31: I1Ii111 / I1Ii111 % IiII
if 68 - 68: O0 / OOooOOo % OoOoOO00
if 68 - 68: OoooooooOO - IiII + I1IiiI * IiII / I11i - OoO0O00
if 69 - 69: oO0o / II111iiii
def lisp_allow_gleaning ( eid , group , rloc ) :
if ( lisp_glean_mappings == [ ] ) : return ( False , False , False )
if 56 - 56: i1IIi + II111iiii + Ii1I . OoooooooOO
for oo0O00OOOOO in lisp_glean_mappings :
if ( "instance-id" in oo0O00OOOOO ) :
oooo = eid . instance_id
oOooO , OOo0oOoOOO0oo = oo0O00OOOOO [ "instance-id" ]
if ( oooo < oOooO or oooo > OOo0oOoOOO0oo ) : continue
if 26 - 26: OoooooooOO % Ii1I % I11i * oO0o - i1IIi - i1IIi
if ( "eid-prefix" in oo0O00OOOOO ) :
oO0ooOOO = copy . deepcopy ( oo0O00OOOOO [ "eid-prefix" ] )
oO0ooOOO . instance_id = eid . instance_id
if ( eid . is_more_specific ( oO0ooOOO ) == False ) : continue
if 76 - 76: i11iIiiIii + OoO0O00 - iII111i . OoOoOO00 * Oo0Ooo
if ( "group-prefix" in oo0O00OOOOO ) :
if ( group == None ) : continue
Oo = copy . deepcopy ( oo0O00OOOOO [ "group-prefix" ] )
Oo . instance_id = group . instance_id
if ( group . is_more_specific ( Oo ) == False ) : continue
if 15 - 15: II111iiii + iIii1I11I1II1
if ( "rloc-prefix" in oo0O00OOOOO ) :
if ( rloc != None and rloc . is_more_specific ( oo0O00OOOOO [ "rloc-prefix" ] )
== False ) : continue
if 100 - 100: OOooOOo
return ( True , oo0O00OOOOO [ "rloc-probe" ] , oo0O00OOOOO [ "igmp-query" ] )
if 43 - 43: OoO0O00 + I1Ii111 + OoOoOO00
return ( False , False , False )
if 78 - 78: I11i
if 30 - 30: iIii1I11I1II1
if 74 - 74: I1IiiI - Oo0Ooo - i1IIi . iIii1I11I1II1 - I11i
if 57 - 57: I1IiiI - i11iIiiIii - I1ii11iIi11i
if 49 - 49: i1IIi . O0 % Ii1I * i1IIi
if 39 - 39: I1ii11iIi11i
if 74 - 74: II111iiii % oO0o * Oo0Ooo / iIii1I11I1II1
def lisp_build_gleaned_multicast ( seid , geid , rloc , port , igmp ) :
IIiI11I1I1i1i = geid . print_address ( )
oo0OoOo = seid . print_address_no_iid ( )
I111 = green ( "{}" . format ( oo0OoOo ) , False )
oO0ooOOO = green ( "(*, {})" . format ( IIiI11I1I1i1i ) , False )
O00o00o00OO0 = red ( rloc . print_address_no_iid ( ) + ":" + str ( port ) , False )
if 92 - 92: Ii1I
if 5 - 5: I11i . I11i / i1IIi - o0oOOo0O0Ooo % Oo0Ooo
if 85 - 85: OOooOOo
if 32 - 32: iII111i
o0ooo0oOO0o = lisp_map_cache_lookup ( seid , geid )
if ( o0ooo0oOO0o == None ) :
o0ooo0oOO0o = lisp_mapping ( "" , "" , [ ] )
o0ooo0oOO0o . group . copy_address ( geid )
o0ooo0oOO0o . eid . copy_address ( geid )
o0ooo0oOO0o . eid . address = 0
o0ooo0oOO0o . eid . mask_len = 0
o0ooo0oOO0o . mapping_source . copy_address ( rloc )
o0ooo0oOO0o . map_cache_ttl = LISP_IGMP_TTL
o0ooo0oOO0o . gleaned = True
o0ooo0oOO0o . add_cache ( )
lprint ( "Add gleaned EID {} to map-cache" . format ( oO0ooOOO ) )
if 27 - 27: iIii1I11I1II1 - iII111i
if 68 - 68: oO0o + OoooooooOO - i1IIi * OoOoOO00 % Oo0Ooo
if 19 - 19: IiII * Oo0Ooo + I1IiiI * I1Ii111 % iIii1I11I1II1
if 15 - 15: II111iiii % OoO0O00 % Oo0Ooo + I1Ii111
if 54 - 54: I1Ii111 + OOooOOo
if 6 - 6: Ii1I
ii11Ii = i1i1ii = iI11i1ii11i11 = None
if ( o0ooo0oOO0o . rloc_set != [ ] ) :
ii11Ii = o0ooo0oOO0o . rloc_set [ 0 ]
if ( ii11Ii . rle ) :
i1i1ii = ii11Ii . rle
for iIiiI111Iii1 in i1i1ii . rle_nodes :
if ( iIiiI111Iii1 . rloc_name != oo0OoOo ) : continue
iI11i1ii11i11 = iIiiI111Iii1
break
if 31 - 31: II111iiii + O0 * Oo0Ooo
if 41 - 41: OoooooooOO % I1Ii111
if 42 - 42: iIii1I11I1II1 * OoooooooOO * IiII . OoooooooOO + o0oOOo0O0Ooo * OoooooooOO
if 56 - 56: Ii1I + IiII * I1IiiI % OoOoOO00
if 99 - 99: I1ii11iIi11i * OoO0O00 * oO0o - ooOoO0o
if 28 - 28: OoO0O00 % OoO0O00 / Ii1I / Oo0Ooo
if 93 - 93: O0 + oO0o . OOooOOo * IiII
if ( ii11Ii == None ) :
ii11Ii = lisp_rloc ( )
o0ooo0oOO0o . rloc_set = [ ii11Ii ]
ii11Ii . priority = 253
ii11Ii . mpriority = 255
o0ooo0oOO0o . build_best_rloc_set ( )
if 12 - 12: iIii1I11I1II1
if ( i1i1ii == None ) :
i1i1ii = lisp_rle ( geid . print_address ( ) )
ii11Ii . rle = i1i1ii
if 64 - 64: OoOoOO00 + iII111i % I1Ii111 - OOooOOo + O0
if ( iI11i1ii11i11 == None ) :
iI11i1ii11i11 = lisp_rle_node ( )
iI11i1ii11i11 . rloc_name = oo0OoOo
i1i1ii . rle_nodes . append ( iI11i1ii11i11 )
i1i1ii . build_forwarding_list ( )
lprint ( "Add RLE {} from {} for gleaned EID {}" . format ( O00o00o00OO0 , I111 , oO0ooOOO ) )
elif ( rloc . is_exact_match ( iI11i1ii11i11 . address ) == False or
port != iI11i1ii11i11 . translated_port ) :
lprint ( "Changed RLE {} from {} for gleaned EID {}" . format ( O00o00o00OO0 , I111 , oO0ooOOO ) )
if 83 - 83: I1Ii111 + I1Ii111
if 43 - 43: oO0o * i1IIi * Ii1I . iIii1I11I1II1 % o0oOOo0O0Ooo
if 97 - 97: I1IiiI . i1IIi * OoOoOO00 / OOooOOo
if 50 - 50: II111iiii . OoO0O00
if 60 - 60: I11i . iIii1I11I1II1
iI11i1ii11i11 . store_translated_rloc ( rloc , port )
if 41 - 41: II111iiii / I1IiiI
if 2 - 2: IiII / OoOoOO00 + I11i
if 3 - 3: OoooooooOO + Oo0Ooo + OOooOOo
if 20 - 20: Ii1I - oO0o - OoO0O00 + I1ii11iIi11i % OoO0O00 . i1IIi
if 2 - 2: ooOoO0o * IiII . Ii1I
if ( igmp ) :
oOIII = seid . print_address ( )
if ( oOIII not in lisp_gleaned_groups ) :
lisp_gleaned_groups [ oOIII ] = { }
if 69 - 69: IiII % i1IIi
lisp_gleaned_groups [ oOIII ] [ IIiI11I1I1i1i ] = lisp_get_timestamp ( )
if 17 - 17: o0oOOo0O0Ooo . OoO0O00 * ooOoO0o * II111iiii - OoooooooOO % iII111i
if 47 - 47: I1IiiI * iIii1I11I1II1 - I11i - o0oOOo0O0Ooo
if 47 - 47: IiII + OoO0O00 % ooOoO0o - iII111i - IiII - oO0o
if 63 - 63: OoooooooOO / I1Ii111
if 90 - 90: I1Ii111 . i11iIiiIii - iIii1I11I1II1 + I1Ii111
if 67 - 67: IiII - I1ii11iIi11i + ooOoO0o . iIii1I11I1II1 . IiII
if 13 - 13: I1IiiI / i11iIiiIii % iIii1I11I1II1 - Oo0Ooo . i11iIiiIii + I1IiiI
if 77 - 77: o0oOOo0O0Ooo / II111iiii + i11iIiiIii % Ii1I . iIii1I11I1II1
def lisp_remove_gleaned_multicast ( seid , geid ) :
if 66 - 66: iII111i / oO0o - OoO0O00 . Oo0Ooo
if 31 - 31: IiII % O0
if 46 - 46: iIii1I11I1II1 - OoooooooOO . oO0o % iIii1I11I1II1 / i1IIi + Ii1I
if 5 - 5: I1ii11iIi11i % II111iiii
o0ooo0oOO0o = lisp_map_cache_lookup ( seid , geid )
if ( o0ooo0oOO0o == None ) : return
if 17 - 17: i11iIiiIii - II111iiii / O0 % OoO0O00 . Oo0Ooo + IiII
ooo0o0O = o0ooo0oOO0o . rloc_set [ 0 ] . rle
if ( ooo0o0O == None ) : return
if 60 - 60: I11i % I1IiiI
OO000o = seid . print_address_no_iid ( )
ooo0o00o0Oooo = False
for iI11i1ii11i11 in ooo0o0O . rle_nodes :
if ( iI11i1ii11i11 . rloc_name == OO000o ) :
ooo0o00o0Oooo = True
break
if 99 - 99: oO0o . OOooOOo % iII111i * Ii1I
if 98 - 98: Oo0Ooo * O0 + i1IIi
if ( ooo0o00o0Oooo == False ) : return
if 41 - 41: i1IIi % OoO0O00 * iIii1I11I1II1
if 2 - 2: I1ii11iIi11i * iII111i . iIii1I11I1II1 * Oo0Ooo
if 34 - 34: i11iIiiIii % O0 . I1IiiI / ooOoO0o + OoO0O00
if 28 - 28: Ii1I / iIii1I11I1II1
ooo0o0O . rle_nodes . remove ( iI11i1ii11i11 )
ooo0o0O . build_forwarding_list ( )
if 41 - 41: iIii1I11I1II1
IIiI11I1I1i1i = geid . print_address ( )
oOIII = seid . print_address ( )
I111 = green ( "{}" . format ( oOIII ) , False )
oO0ooOOO = green ( "(*, {})" . format ( IIiI11I1I1i1i ) , False )
lprint ( "Gleaned EID {} RLE removed for {}" . format ( oO0ooOOO , I111 ) )
if 57 - 57: I1Ii111 * o0oOOo0O0Ooo - o0oOOo0O0Ooo * I11i
if 89 - 89: Ii1I % O0
if 81 - 81: OoooooooOO / II111iiii - ooOoO0o
if 14 - 14: O0
if ( oOIII in lisp_gleaned_groups ) :
if ( IIiI11I1I1i1i in lisp_gleaned_groups [ oOIII ] ) :
lisp_gleaned_groups [ oOIII ] . pop ( IIiI11I1I1i1i )
if 59 - 59: I11i % II111iiii . iIii1I11I1II1 * oO0o % Ii1I
if 79 - 79: OoooooooOO . II111iiii
if 55 - 55: II111iiii
if 2 - 2: I1ii11iIi11i * i1IIi + OOooOOo / OoO0O00 % OoOoOO00 / O0
if 47 - 47: OoooooooOO - i11iIiiIii - IiII * O0 * iII111i * Ii1I
if 36 - 36: I1Ii111
if ( ooo0o0O . rle_nodes == [ ] ) :
o0ooo0oOO0o . delete_cache ( )
lprint ( "Gleaned EID {} remove, no more RLEs" . format ( oO0ooOOO ) )
if 85 - 85: Oo0Ooo % OOooOOo
if 10 - 10: O0 + Oo0Ooo + Ii1I % IiII
if 89 - 89: oO0o / iII111i + OOooOOo
if 27 - 27: Ii1I / o0oOOo0O0Ooo % I11i
if 96 - 96: i11iIiiIii % O0
if 11 - 11: II111iiii . i11iIiiIii % ooOoO0o * Ii1I * OoOoOO00 * OoooooooOO
if 80 - 80: OoO0O00
if 55 - 55: iIii1I11I1II1 % OoO0O00 / II111iiii - OoO0O00
def lisp_change_gleaned_multicast ( seid , rloc , port ) :
oOIII = seid . print_address ( )
if ( oOIII not in lisp_gleaned_groups ) : return
if 95 - 95: o0oOOo0O0Ooo / OOooOOo * OOooOOo * O0
for o0o0Oo0o0oOo in lisp_gleaned_groups [ oOIII ] :
lisp_geid . store_address ( o0o0Oo0o0oOo )
lisp_build_gleaned_multicast ( seid , lisp_geid , rloc , port , False )
if 93 - 93: OOooOOo / ooOoO0o
if 89 - 89: OoooooooOO + iIii1I11I1II1 / I1ii11iIi11i % iIii1I11I1II1 / iII111i
if 74 - 74: Ii1I + I1IiiI * iII111i / i11iIiiIii - ooOoO0o * OoooooooOO
if 98 - 98: I1IiiI
if 85 - 85: OoooooooOO * i1IIi * O0 * OoooooooOO . IiII
if 22 - 22: ooOoO0o
if 44 - 44: I1ii11iIi11i + IiII + IiII * I1ii11iIi11i - OoooooooOO / I1Ii111
if 3 - 3: I1ii11iIi11i + o0oOOo0O0Ooo * I11i / Oo0Ooo
if 31 - 31: i11iIiiIii % OoO0O00 - oO0o / o0oOOo0O0Ooo % O0
if 53 - 53: iIii1I11I1II1 * I1ii11iIi11i
if 46 - 46: OOooOOo % OoOoOO00 * iII111i
if 55 - 55: I1IiiI * iIii1I11I1II1 . OoOoOO00
if 82 - 82: iIii1I11I1II1 - iII111i % I1IiiI + I1IiiI * i1IIi % O0
if 63 - 63: I1IiiI + OoOoOO00
if 55 - 55: o0oOOo0O0Ooo
if 95 - 95: OoO0O00 * ooOoO0o * oO0o % Oo0Ooo
if 36 - 36: I1IiiI - Ii1I + oO0o . iIii1I11I1II1
if 47 - 47: Ii1I
if 12 - 12: I1IiiI / IiII + OoOoOO00 . I1Ii111 / I1Ii111
if 97 - 97: OOooOOo - iII111i . I1IiiI * oO0o . OoOoOO00 * IiII
if 29 - 29: iIii1I11I1II1
if 94 - 94: Ii1I - i11iIiiIii % O0 + Ii1I / O0 % I11i
if 42 - 42: I1ii11iIi11i . iIii1I11I1II1 % I11i
if 54 - 54: OoOoOO00 / Ii1I
if 84 - 84: Oo0Ooo / OoO0O00 . o0oOOo0O0Ooo - iII111i . iII111i - II111iiii
if 99 - 99: I1Ii111 % Oo0Ooo
if 61 - 61: OoooooooOO % i11iIiiIii + OOooOOo
if 53 - 53: iII111i . iIii1I11I1II1
if 59 - 59: II111iiii . II111iiii - iII111i
if 46 - 46: oO0o / iIii1I11I1II1 + OoO0O00
if 33 - 33: Ii1I . iIii1I11I1II1 . O0 * I1ii11iIi11i . OoOoOO00 / i11iIiiIii
if 85 - 85: iII111i
if 23 - 23: O0
if 83 - 83: i11iIiiIii % OoooooooOO
if 45 - 45: OoO0O00 + Ii1I
if 90 - 90: O0 * i1IIi . i1IIi * I1ii11iIi11i + I1ii11iIi11i / i1IIi
if 52 - 52: O0 / iIii1I11I1II1 * IiII
if 50 - 50: oO0o . Ii1I . OoooooooOO * o0oOOo0O0Ooo
if 25 - 25: o0oOOo0O0Ooo % ooOoO0o
if 91 - 91: I1Ii111 * i11iIiiIii / o0oOOo0O0Ooo * oO0o - o0oOOo0O0Ooo * OOooOOo
if 2 - 2: i1IIi - OoOoOO00 / iII111i
if 70 - 70: IiII / O0 - i1IIi
if 23 - 23: OoOoOO00
if 2 - 2: II111iiii * OoOoOO00 . iIii1I11I1II1 . ooOoO0o . ooOoO0o + iII111i
if 60 - 60: I1ii11iIi11i / I1ii11iIi11i
if 44 - 44: i11iIiiIii / ooOoO0o - iIii1I11I1II1 + OoO0O00
if 62 - 62: i1IIi / I1Ii111 + ooOoO0o
if 80 - 80: iII111i + OoO0O00 % OoO0O00
if 4 - 4: OoOoOO00 * I11i * O0 . OoooooooOO + Ii1I % i1IIi
if 11 - 11: OoOoOO00 % i11iIiiIii . OoOoOO00 % Oo0Ooo * Ii1I
if 67 - 67: IiII - OoOoOO00 / I1Ii111 % oO0o % OOooOOo
if 19 - 19: OoO0O00 - iII111i
if 76 - 76: OoOoOO00 * ooOoO0o - iII111i * I1IiiI + I11i
if 4 - 4: Oo0Ooo
if 95 - 95: Oo0Ooo * i11iIiiIii - O0
if 100 - 100: iIii1I11I1II1 / I1ii11iIi11i - o0oOOo0O0Ooo / iII111i
if 73 - 73: OoooooooOO
if 68 - 68: II111iiii / i11iIiiIii % i11iIiiIii % OoooooooOO
if 81 - 81: i1IIi + O0 . IiII . I1IiiI / ooOoO0o
if 75 - 75: I1ii11iIi11i / OoOoOO00
if 59 - 59: OoO0O00 . OoooooooOO % IiII
if 35 - 35: I1ii11iIi11i + I1Ii111
if 25 - 25: iIii1I11I1II1 / I11i % OoooooooOO / Oo0Ooo
if 4 - 4: i1IIi % i1IIi % oO0o
if 51 - 51: o0oOOo0O0Ooo * i11iIiiIii
if 44 - 44: II111iiii - o0oOOo0O0Ooo + i1IIi / I1Ii111 . I11i
if 17 - 17: OOooOOo - O0 . II111iiii - OoooooooOO + I1ii11iIi11i
if 100 - 100: OoOoOO00 * OOooOOo % i11iIiiIii / OoOoOO00
if 72 - 72: I1IiiI . oO0o
if 76 - 76: Ii1I - Oo0Ooo * II111iiii
if 17 - 17: I1Ii111 * O0
if 8 - 8: i11iIiiIii / OoO0O00 / OOooOOo
if 26 - 26: I1ii11iIi11i . Ii1I - iIii1I11I1II1 . Ii1I / Ii1I % I11i
if 56 - 56: OOooOOo . I11i + O0 * oO0o - i11iIiiIii / i11iIiiIii
if 73 - 73: I1ii11iIi11i
if 59 - 59: iII111i % iIii1I11I1II1 * OoOoOO00
if 41 - 41: i1IIi * IiII - i11iIiiIii / O0 + Oo0Ooo + ooOoO0o
if 94 - 94: OoO0O00 . O0 + iIii1I11I1II1 . oO0o % oO0o
if 7 - 7: I1ii11iIi11i * oO0o / OoOoOO00
if 89 - 89: OoO0O00 / oO0o % I11i - I1ii11iIi11i . o0oOOo0O0Ooo
if 46 - 46: i11iIiiIii
if 99 - 99: i11iIiiIii / oO0o / OoOoOO00 / O0 * I1ii11iIi11i
if 72 - 72: ooOoO0o - I1Ii111 - iIii1I11I1II1 . I1IiiI
if 77 - 77: Oo0Ooo * OoO0O00
if 67 - 67: OoOoOO00 . I1Ii111 / I1IiiI * II111iiii
if 45 - 45: I1ii11iIi11i * o0oOOo0O0Ooo . iIii1I11I1II1 * Oo0Ooo
if 58 - 58: OOooOOo + O0
if 19 - 19: o0oOOo0O0Ooo
if 8 - 8: OOooOOo * OOooOOo - Ii1I * OoOoOO00 % OoO0O00 * O0
if 70 - 70: I1IiiI
if 17 - 17: I11i % OOooOOo - i11iIiiIii . OoooooooOO % OoO0O00 + OoO0O00
if 24 - 24: Ii1I . OOooOOo . IiII / Oo0Ooo . Oo0Ooo . II111iiii
igmp_types = { 17 : "IGMP-query" , 18 : "IGMPv1-report" , 19 : "DVMRP" ,
20 : "PIMv1" , 22 : "IGMPv2-report" , 23 : "IGMPv2-leave" ,
30 : "mtrace-response" , 31 : "mtrace-request" , 34 : "IGMPv3-report" }
if 63 - 63: ooOoO0o . I11i
lisp_igmp_record_types = { 1 : "include-mode" , 2 : "exclude-mode" ,
3 : "change-to-include" , 4 : "change-to-exclude" , 5 : "allow-new-source" ,
6 : "block-old-sources" }
if 39 - 39: II111iiii % oO0o % I1IiiI - iIii1I11I1II1 / I1IiiI
def lisp_process_igmp_packet ( packet ) :
I1 = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
I1 . address = socket . ntohl ( struct . unpack ( "I" , packet [ 12 : 16 ] ) [ 0 ] )
I1 = bold ( "from {}" . format ( I1 . print_address_no_iid ( ) ) , False )
if 94 - 94: iII111i + oO0o
O00o00o00OO0 = bold ( "Receive" , False )
lprint ( "{} {}-byte {}, IGMP packet: {}" . format ( O00o00o00OO0 , len ( packet ) , I1 ,
lisp_format_packet ( packet ) ) )
if 43 - 43: iIii1I11I1II1 + iIii1I11I1II1
if 8 - 8: iIii1I11I1II1
if 30 - 30: OOooOOo - I1ii11iIi11i * iIii1I11I1II1 + Oo0Ooo
if 25 - 25: IiII
OO0oO0Oo0OOoO = ( struct . unpack ( "B" , packet [ 0 : 1 ] ) [ 0 ] & 0x0f ) * 4
if 36 - 36: ooOoO0o
if 30 - 30: OOooOOo . OoO0O00 % iII111i - OoO0O00 % i11iIiiIii
if 28 - 28: Ii1I + Oo0Ooo / iIii1I11I1II1
if 57 - 57: o0oOOo0O0Ooo
i11ii = packet [ OO0oO0Oo0OOoO : : ]
ooOO0Ooo = struct . unpack ( "B" , i11ii [ 0 : 1 ] ) [ 0 ]
if 7 - 7: Oo0Ooo + iII111i . ooOoO0o
if 31 - 31: iIii1I11I1II1 - OoOoOO00 - II111iiii / I1ii11iIi11i
if 70 - 70: iIii1I11I1II1 / I1ii11iIi11i . I1Ii111 % I1ii11iIi11i
if 40 - 40: I1Ii111 + o0oOOo0O0Ooo - I11i + OoO0O00
if 49 - 49: i11iIiiIii % OoO0O00 - Ii1I + I1Ii111
o0o0Oo0o0oOo = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
o0o0Oo0o0oOo . address = socket . ntohl ( struct . unpack ( "II" , i11ii [ : 8 ] ) [ 1 ] )
IIiI11I1I1i1i = o0o0Oo0o0oOo . print_address_no_iid ( )
if 7 - 7: ooOoO0o * I1ii11iIi11i - Ii1I % i1IIi + I11i
if ( ooOO0Ooo == 17 ) :
lprint ( "IGMP Query for group {}" . format ( IIiI11I1I1i1i ) )
return ( True )
if 22 - 22: I1IiiI - OOooOOo - II111iiii * I1IiiI
if 93 - 93: OOooOOo + I11i
oo0OOOO = ( ooOO0Ooo in ( 0x12 , 0x16 , 0x17 , 0x22 ) )
if ( oo0OOOO == False ) :
I111III = "{} ({})" . format ( ooOO0Ooo , igmp_types [ ooOO0Ooo ] ) if ( ooOO0Ooo in igmp_types ) else ooOO0Ooo
if 82 - 82: I11i + Oo0Ooo / Ii1I - oO0o
lprint ( "IGMP type {} not supported" . format ( I111III ) )
return ( [ ] )
if 4 - 4: ooOoO0o * iII111i . Ii1I - i1IIi
if 16 - 16: I1ii11iIi11i % II111iiii
if ( len ( i11ii ) < 8 ) :
lprint ( "IGMP message too small" )
return ( [ ] )
if 9 - 9: IiII % I1IiiI - i11iIiiIii . I1ii11iIi11i
if 69 - 69: I11i - I1ii11iIi11i / OOooOOo * OoOoOO00 * OoooooooOO
if 2 - 2: II111iiii % o0oOOo0O0Ooo / I1Ii111 + OoooooooOO . I1ii11iIi11i
if 52 - 52: I1IiiI - i11iIiiIii + O0 / ooOoO0o + IiII / OoO0O00
if 35 - 35: I1ii11iIi11i . iIii1I11I1II1 * IiII
if ( ooOO0Ooo == 0x17 ) :
lprint ( "IGMPv2 leave (*, {})" . format ( bold ( IIiI11I1I1i1i , False ) ) )
return ( [ [ None , IIiI11I1I1i1i , False ] ] )
if 55 - 55: Oo0Ooo + OOooOOo + IiII
if ( ooOO0Ooo in ( 0x12 , 0x16 ) ) :
lprint ( "IGMPv{} join (*, {})" . format ( 1 if ( ooOO0Ooo == 0x12 ) else 2 , bold ( IIiI11I1I1i1i , False ) ) )
if 55 - 55: O0 . I1Ii111 * I1ii11iIi11i * o0oOOo0O0Ooo - ooOoO0o
if 17 - 17: OOooOOo
if 66 - 66: O0 - i11iIiiIii * O0 / iII111i . I1Ii111 / IiII
if 96 - 96: OoOoOO00 / i11iIiiIii - OoooooooOO / II111iiii * i1IIi
if 82 - 82: iII111i
if ( IIiI11I1I1i1i . find ( "224.0.0." ) != - 1 ) :
lprint ( "Suppress registration for link-local groups" )
else :
return ( [ [ None , IIiI11I1I1i1i , True ] ] )
if 55 - 55: OoOoOO00 + I1ii11iIi11i % ooOoO0o % I1Ii111 . i1IIi % OOooOOo
if 21 - 21: OoO0O00 / Ii1I . IiII
if 35 - 35: i1IIi
if 58 - 58: Ii1I - IiII / ooOoO0o % o0oOOo0O0Ooo + I1ii11iIi11i
if 89 - 89: IiII / OoooooooOO
return ( [ ] )
if 13 - 13: II111iiii . OOooOOo - O0 * oO0o
if 71 - 71: ooOoO0o % ooOoO0o + o0oOOo0O0Ooo + iII111i / OoOoOO00
if 27 - 27: I1ii11iIi11i * OoO0O00 - OoO0O00
if 87 - 87: I1IiiI * I11i + iIii1I11I1II1 % i1IIi
if 6 - 6: o0oOOo0O0Ooo
iiIi1iIIIII1 = o0o0Oo0o0oOo . address
i11ii = i11ii [ 8 : : ]
if 94 - 94: I1ii11iIi11i * i11iIiiIii
oooo00o0 = "BBHI"
iIIiII1 = struct . calcsize ( oooo00o0 )
ooOo0 = "I"
o00OO0oO = struct . calcsize ( ooOo0 )
I1 = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
if 73 - 73: OOooOOo - I1ii11iIi11i - I1IiiI + Ii1I * OoO0O00
if 80 - 80: i11iIiiIii + ooOoO0o . IiII
if 22 - 22: Ii1I / i11iIiiIii
if 97 - 97: OoO0O00 % oO0o * II111iiii / iII111i
ooooO0oO0oOoO = [ ]
for iIi1iIIIiIiI in range ( iiIi1iIIIII1 ) :
if ( len ( i11ii ) < iIIiII1 ) : return
i1I1IO000OO , iIiiiI1 , oOOOOi1IiIi , I1IIIi = struct . unpack ( oooo00o0 ,
i11ii [ : iIIiII1 ] )
if 74 - 74: OoO0O00 - OoO0O00 / I1IiiI % OOooOOo * iIii1I11I1II1 + ooOoO0o
i11ii = i11ii [ iIIiII1 : : ]
if 86 - 86: Oo0Ooo . I1Ii111 * Ii1I . i11iIiiIii * I1ii11iIi11i
if ( i1I1IO000OO not in lisp_igmp_record_types ) :
lprint ( "Invalid record type {}" . format ( i1I1IO000OO ) )
continue
if 11 - 11: oO0o + oO0o + o0oOOo0O0Ooo / iIii1I11I1II1 / I11i
if 68 - 68: OoooooooOO + i1IIi % I1ii11iIi11i . iII111i
O0OoooOOO0O0 = lisp_igmp_record_types [ i1I1IO000OO ]
oOOOOi1IiIi = socket . ntohs ( oOOOOi1IiIi )
o0o0Oo0o0oOo . address = socket . ntohl ( I1IIIi )
IIiI11I1I1i1i = o0o0Oo0o0oOo . print_address_no_iid ( )
if 7 - 7: II111iiii
lprint ( "Record type: {}, group: {}, source-count: {}" . format ( O0OoooOOO0O0 , IIiI11I1I1i1i , oOOOOi1IiIi ) )
if 92 - 92: O0 % I1Ii111 - ooOoO0o
if 56 - 56: o0oOOo0O0Ooo * I1ii11iIi11i . iIii1I11I1II1 + Oo0Ooo % i11iIiiIii - i11iIiiIii
if 34 - 34: Ii1I % I1ii11iIi11i / I1ii11iIi11i * O0 / OoOoOO00 + i1IIi
if 71 - 71: oO0o % IiII
if 77 - 77: i1IIi * o0oOOo0O0Ooo - Oo0Ooo / I1Ii111 - Ii1I * IiII
if 51 - 51: OoO0O00 * IiII
if 36 - 36: II111iiii + I11i - O0
i1IiooOoooOoO0 = False
if ( i1I1IO000OO in ( 1 , 5 ) ) : i1IiooOoooOoO0 = True
if ( i1I1IO000OO in ( 2 , 4 ) and oOOOOi1IiIi == 0 ) : i1IiooOoooOoO0 = True
i1iIii = "join" if ( i1IiooOoooOoO0 ) else "leave"
if 80 - 80: I11i . o0oOOo0O0Ooo % IiII - OoOoOO00 % OOooOOo / OoooooooOO
if 57 - 57: OoooooooOO % o0oOOo0O0Ooo - iIii1I11I1II1 . OoooooooOO
if 42 - 42: o0oOOo0O0Ooo % OoooooooOO * OoO0O00 - o0oOOo0O0Ooo
if 83 - 83: i1IIi . i1IIi * ooOoO0o
if ( IIiI11I1I1i1i . find ( "224.0.0." ) != - 1 ) :
lprint ( "Suppress registration for link-local groups" )
continue
if 26 - 26: I1IiiI - IiII
if 99 - 99: IiII * iII111i + i1IIi * I1Ii111
if 88 - 88: o0oOOo0O0Ooo . IiII - Oo0Ooo
if 24 - 24: Oo0Ooo - OOooOOo / Ii1I / II111iiii . Oo0Ooo - Ii1I
if 5 - 5: IiII
if 66 - 66: OoO0O00 . I1ii11iIi11i . OoooooooOO
if 21 - 21: I11i / IiII + i1IIi . Oo0Ooo % II111iiii
if 8 - 8: oO0o / iIii1I11I1II1 + OoooooooOO
if ( oOOOOi1IiIi == 0 ) :
ooooO0oO0oOoO . append ( [ None , IIiI11I1I1i1i , i1IiooOoooOoO0 ] )
lprint ( "IGMPv3 {} (*, {})" . format ( bold ( i1iIii , False ) ,
bold ( IIiI11I1I1i1i , False ) ) )
if 11 - 11: OOooOOo . O0 + IiII . i1IIi
if 81 - 81: OoO0O00 - I11i - OoO0O00 + oO0o
if 20 - 20: OoooooooOO - Oo0Ooo + I1Ii111 + OoooooooOO
if 66 - 66: I1ii11iIi11i / oO0o % IiII + II111iiii % iII111i
if 54 - 54: iII111i * O0 / I1IiiI % Ii1I
for I11ii1IiI1Ii in range ( oOOOOi1IiIi ) :
if ( len ( i11ii ) < o00OO0oO ) : return
I1IIIi = struct . unpack ( ooOo0 , i11ii [ : o00OO0oO ] ) [ 0 ]
I1 . address = socket . ntohl ( I1IIIi )
I1IiIIII1i = I1 . print_address_no_iid ( )
ooooO0oO0oOoO . append ( [ I1IiIIII1i , IIiI11I1I1i1i , i1IiooOoooOoO0 ] )
lprint ( "{} ({}, {})" . format ( i1iIii ,
green ( I1IiIIII1i , False ) , bold ( IIiI11I1I1i1i , False ) ) )
i11ii = i11ii [ o00OO0oO : : ]
if 25 - 25: I11i - I11i / iII111i - ooOoO0o - iIii1I11I1II1 % iII111i
if 74 - 74: Oo0Ooo - IiII - iII111i - IiII / IiII
if 75 - 75: I11i - i11iIiiIii % O0 - O0 % O0
if 93 - 93: ooOoO0o + iIii1I11I1II1
if 27 - 27: i1IIi * i11iIiiIii - OoOoOO00 * Ii1I . IiII + iII111i
if 25 - 25: I1ii11iIi11i % o0oOOo0O0Ooo - OoO0O00
if 28 - 28: oO0o
if 8 - 8: I11i / OoooooooOO % OoooooooOO . Oo0Ooo
return ( ooooO0oO0oOoO )
if 30 - 30: iII111i
if 25 - 25: I11i % i1IIi + OOooOOo * Ii1I . i1IIi
if 81 - 81: I11i % OoOoOO00 . Ii1I
if 82 - 82: i1IIi / II111iiii
if 40 - 40: II111iiii - I1Ii111 + Oo0Ooo / IiII
if 15 - 15: I1Ii111 + ooOoO0o / II111iiii . OoOoOO00 - I1Ii111
if 59 - 59: Ii1I * iIii1I11I1II1 - iIii1I11I1II1 % I1Ii111 - OoO0O00 / I1IiiI
if 89 - 89: I1Ii111 . OoO0O00
lisp_geid = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
if 52 - 52: OoO0O00 - iIii1I11I1II1
def lisp_glean_map_cache ( seid , rloc , encap_port , igmp ) :
if 52 - 52: OOooOOo + I1IiiI * Ii1I % OoooooooOO / I1Ii111
if 74 - 74: iIii1I11I1II1
if 82 - 82: OOooOOo
if 64 - 64: II111iiii
if 48 - 48: iII111i + i11iIiiIii * I1IiiI % OoOoOO00
if 49 - 49: Oo0Ooo
Ooo000O = True
o0ooo0oOO0o = lisp_map_cache . lookup_cache ( seid , True )
if ( o0ooo0oOO0o and len ( o0ooo0oOO0o . rloc_set ) != 0 ) :
o0ooo0oOO0o . last_refresh_time = lisp_get_timestamp ( )
if 75 - 75: oO0o . iIii1I11I1II1 . Oo0Ooo / OoO0O00 / O0 + I1Ii111
o0O0o = o0ooo0oOO0o . rloc_set [ 0 ]
iIiiiIIi1 = o0O0o . rloc
iIiiiiIIii11 = o0O0o . translated_port
Ooo000O = ( iIiiiIIi1 . is_exact_match ( rloc ) == False or
iIiiiiIIii11 != encap_port )
if 74 - 74: II111iiii + I11i
if ( Ooo000O ) :
oO0ooOOO = green ( seid . print_address ( ) , False )
O00o00o00OO0 = red ( rloc . print_address_no_iid ( ) + ":" + str ( encap_port ) , False )
lprint ( "Change gleaned EID {} to RLOC {}" . format ( oO0ooOOO , O00o00o00OO0 ) )
o0O0o . delete_from_rloc_probe_list ( o0ooo0oOO0o . eid , o0ooo0oOO0o . group )
lisp_change_gleaned_multicast ( seid , rloc , encap_port )
if 80 - 80: OOooOOo . oO0o / iIii1I11I1II1
else :
o0ooo0oOO0o = lisp_mapping ( "" , "" , [ ] )
o0ooo0oOO0o . eid . copy_address ( seid )
o0ooo0oOO0o . mapping_source . copy_address ( rloc )
o0ooo0oOO0o . map_cache_ttl = LISP_GLEAN_TTL
o0ooo0oOO0o . gleaned = True
oO0ooOOO = green ( seid . print_address ( ) , False )
O00o00o00OO0 = red ( rloc . print_address_no_iid ( ) + ":" + str ( encap_port ) , False )
lprint ( "Add gleaned EID {} to map-cache with RLOC {}" . format ( oO0ooOOO , O00o00o00OO0 ) )
o0ooo0oOO0o . add_cache ( )
if 4 - 4: iII111i + I1IiiI
if 95 - 95: Oo0Ooo / I1ii11iIi11i % OoO0O00
if 47 - 47: I1IiiI + i1IIi + I1ii11iIi11i / OOooOOo * i11iIiiIii % I1Ii111
if 7 - 7: OoOoOO00 * iII111i * i11iIiiIii + OoOoOO00
if 20 - 20: i1IIi % Ii1I / iIii1I11I1II1 / II111iiii
if ( Ooo000O ) :
ii11Ii = lisp_rloc ( )
ii11Ii . store_translated_rloc ( rloc , encap_port )
ii11Ii . add_to_rloc_probe_list ( o0ooo0oOO0o . eid , o0ooo0oOO0o . group )
ii11Ii . priority = 253
ii11Ii . mpriority = 255
OO0oOO0OoO = [ ii11Ii ]
o0ooo0oOO0o . rloc_set = OO0oOO0OoO
o0ooo0oOO0o . build_best_rloc_set ( )
if 16 - 16: I1IiiI % Ii1I
if 30 - 30: i11iIiiIii / i1IIi % O0 - OoooooooOO - OOooOOo
if 55 - 55: OoooooooOO % ooOoO0o % I1Ii111 - Oo0Ooo % OoooooooOO . I11i
if 22 - 22: i11iIiiIii
if 39 - 39: oO0o / OoOoOO00 % iIii1I11I1II1 - OoOoOO00
if ( igmp == None ) : return
if 29 - 29: I1ii11iIi11i - I11i . I1ii11iIi11i - o0oOOo0O0Ooo - OoooooooOO % OoO0O00
if 74 - 74: iIii1I11I1II1 / iII111i * OoO0O00 * iIii1I11I1II1 + i11iIiiIii
if 90 - 90: II111iiii - oO0o - oO0o + I1IiiI
if 36 - 36: OoooooooOO % OoooooooOO / OoO0O00 * I1IiiI
if 55 - 55: O0 - O0
lisp_geid . instance_id = seid . instance_id
if 32 - 32: I1IiiI + o0oOOo0O0Ooo + Oo0Ooo / OoO0O00 . I11i . Oo0Ooo
if 32 - 32: I1Ii111 / i1IIi
if 30 - 30: i11iIiiIii . II111iiii * Oo0Ooo + II111iiii - I1IiiI
if 80 - 80: o0oOOo0O0Ooo - iII111i % i11iIiiIii % i11iIiiIii % OoooooooOO - IiII
if 39 - 39: II111iiii / I1Ii111 + OoooooooOO + IiII + iIii1I11I1II1
II1Ii1IiI = lisp_process_igmp_packet ( igmp )
if ( type ( II1Ii1IiI ) == bool ) : return
if 59 - 59: OoOoOO00 / II111iiii . Ii1I
for I1 , o0o0Oo0o0oOo , i1IiooOoooOoO0 in II1Ii1IiI :
if ( I1 != None ) : continue
if 90 - 90: II111iiii
if 77 - 77: i11iIiiIii . i11iIiiIii - iIii1I11I1II1 + OOooOOo
if 55 - 55: OoO0O00 + Oo0Ooo
if 74 - 74: i1IIi - I11i - oO0o % I1IiiI
lisp_geid . store_address ( o0o0Oo0o0oOo )
IiIiiIiI , iIiiiI1 , II11iiiII1Ii = lisp_allow_gleaning ( seid , lisp_geid , rloc )
if ( IiIiiIiI == False ) : continue
if 57 - 57: Oo0Ooo / II111iiii + OoOoOO00
if ( i1IiooOoooOoO0 ) :
lisp_build_gleaned_multicast ( seid , lisp_geid , rloc , encap_port ,
True )
else :
lisp_remove_gleaned_multicast ( seid , lisp_geid )
if 67 - 67: IiII * IiII % oO0o - IiII * i11iIiiIii - i11iIiiIii
if 27 - 27: i1IIi
if 29 - 29: OOooOOo % I11i * Oo0Ooo
if 92 - 92: OoOoOO00 / OoooooooOO % OoooooooOO + o0oOOo0O0Ooo
if 91 - 91: OoOoOO00 - iII111i / iII111i - OoO0O00
if 97 - 97: Oo0Ooo / IiII % OOooOOo % Ii1I
if 59 - 59: I1IiiI / Oo0Ooo / OoOoOO00
if 79 - 79: O0 / ooOoO0o + OoOoOO00
if 23 - 23: I11i
if 81 - 81: OoOoOO00 * ooOoO0o + OoOoOO00
if 7 - 7: I1ii11iIi11i - II111iiii
if 100 - 100: OoO0O00 . I1IiiI / i1IIi + OOooOOo / IiII
def lisp_is_json_telemetry ( json_string ) :
try :
o00OO0OO0O = json . loads ( json_string )
if ( type ( o00OO0OO0O ) != dict ) : return ( None )
except :
lprint ( "Could not decode telemetry json: {}" . format ( json_string ) )
return ( None )
if 48 - 48: i11iIiiIii % i1IIi + iIii1I11I1II1 . I1Ii111
if 67 - 67: i11iIiiIii / o0oOOo0O0Ooo . i11iIiiIii . I1ii11iIi11i - O0
if ( "type" not in o00OO0OO0O ) : return ( None )
if ( "sub-type" not in o00OO0OO0O ) : return ( None )
if ( o00OO0OO0O [ "type" ] != "telemetry" ) : return ( None )
if ( o00OO0OO0O [ "sub-type" ] != "timestamps" ) : return ( None )
return ( o00OO0OO0O )
if 76 - 76: i1IIi % OOooOOo
if 37 - 37: Oo0Ooo - oO0o / II111iiii . o0oOOo0O0Ooo % OoOoOO00 % ooOoO0o
if 44 - 44: I11i / I1IiiI + I1Ii111 - O0 - ooOoO0o
if 57 - 57: I1IiiI * OOooOOo - Ii1I
if 82 - 82: OoOoOO00
if 78 - 78: ooOoO0o - I1IiiI % I1ii11iIi11i
if 90 - 90: I1ii11iIi11i / II111iiii
if 92 - 92: i11iIiiIii
if 35 - 35: O0 + i11iIiiIii . OoO0O00
if 1 - 1: OoOoOO00 + o0oOOo0O0Ooo . Ii1I / II111iiii
if 54 - 54: ooOoO0o + iIii1I11I1II1
if 89 - 89: I1IiiI
def lisp_encode_telemetry ( json_string , ii = "?" , io = "?" , ei = "?" , eo = "?" ) :
o00OO0OO0O = lisp_is_json_telemetry ( json_string )
if ( o00OO0OO0O == None ) : return ( json_string )
if 75 - 75: O0 / I1ii11iIi11i
if ( o00OO0OO0O [ "itr-in" ] == "?" ) : o00OO0OO0O [ "itr-in" ] = ii
if ( o00OO0OO0O [ "itr-out" ] == "?" ) : o00OO0OO0O [ "itr-out" ] = io
if ( o00OO0OO0O [ "etr-in" ] == "?" ) : o00OO0OO0O [ "etr-in" ] = ei
if ( o00OO0OO0O [ "etr-out" ] == "?" ) : o00OO0OO0O [ "etr-out" ] = eo
json_string = json . dumps ( o00OO0OO0O )
return ( json_string )
if 36 - 36: i1IIi - IiII - I1IiiI / I11i
if 41 - 41: I1IiiI . OoooooooOO * oO0o - I1ii11iIi11i % IiII
if 88 - 88: i11iIiiIii * ooOoO0o
if 19 - 19: i1IIi / I1Ii111 % II111iiii
if 4 - 4: o0oOOo0O0Ooo - OoO0O00 % i1IIi % OoooooooOO * oO0o - Oo0Ooo
if 18 - 18: oO0o % Oo0Ooo / o0oOOo0O0Ooo + OOooOOo
if 65 - 65: OOooOOo
if 23 - 23: OoOoOO00
if 26 - 26: i11iIiiIii * o0oOOo0O0Ooo . ooOoO0o + OoO0O00
if 86 - 86: OoOoOO00 % i11iIiiIii . ooOoO0o + i1IIi + O0 - OOooOOo
if 24 - 24: I11i - ooOoO0o + I1IiiI % O0 % iII111i * II111iiii
if 35 - 35: oO0o - I11i - i1IIi
def lisp_decode_telemetry ( json_string ) :
o00OO0OO0O = lisp_is_json_telemetry ( json_string )
if ( o00OO0OO0O == None ) : return ( { } )
return ( o00OO0OO0O )
if 83 - 83: ooOoO0o % OoooooooOO % Oo0Ooo * o0oOOo0O0Ooo * oO0o % i1IIi
if 66 - 66: Ii1I . ooOoO0o / OoooooooOO - I1IiiI - iIii1I11I1II1 + OOooOOo
if 33 - 33: Ii1I + I1IiiI - iII111i . OoooooooOO / I1ii11iIi11i
if 64 - 64: OoO0O00 + OoO0O00
if 2 - 2: ooOoO0o * IiII . ooOoO0o
if 5 - 5: o0oOOo0O0Ooo - o0oOOo0O0Ooo
if 40 - 40: OoO0O00 % I11i - OoOoOO00
if 51 - 51: iIii1I11I1II1 . OOooOOo % I1ii11iIi11i
if 46 - 46: OoOoOO00 - iIii1I11I1II1 * Oo0Ooo * OOooOOo + i1IIi / iII111i
def lisp_telemetry_configured ( ) :
if ( "telemetry" not in lisp_json_list ) : return ( None )
if 11 - 11: Oo0Ooo
iio0O0OOo = lisp_json_list [ "telemetry" ] . json_string
if ( lisp_is_json_telemetry ( iio0O0OOo ) == None ) : return ( None )
if 65 - 65: I1IiiI
return ( iio0O0OOo )
if 9 - 9: OOooOOo + I1Ii111 - O0
if 95 - 95: oO0o
if 45 - 45: Ii1I * oO0o / oO0o + o0oOOo0O0Ooo % OoOoOO00 % I11i
if 78 - 78: OoO0O00 + I11i
if 87 - 87: OOooOOo % I1ii11iIi11i - IiII . II111iiii . o0oOOo0O0Ooo
if 9 - 9: Ii1I / oO0o + I11i . iII111i
if 3 - 3: OoooooooOO + OoooooooOO * OOooOOo / O0
def lisp_mr_or_pubsub ( action ) :
return ( action in [ LISP_SEND_MAP_REQUEST_ACTION , LISP_SEND_PUBSUB_ACTION ] )
if 81 - 81: i11iIiiIii - OoOoOO00
if 80 - 80: iIii1I11I1II1 % OOooOOo + oO0o + II111iiii - I1ii11iIi11i
if 44 - 44: OoooooooOO * iII111i
# dd678faae9ac167bc83abf78e5cb2f3f0688d3a3
|
tests.py | # -*- coding: utf-8 -*-
# Unit and doctests for specific database backends.
from __future__ import unicode_literals
import copy
import datetime
import re
import threading
import unittest
import warnings
from decimal import Decimal, Rounded
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import no_style
from django.db import (
DEFAULT_DB_ALIAS, DatabaseError, IntegrityError, connection, connections,
reset_queries, transaction,
)
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.postgresql_psycopg2 import version as pg_version
from django.db.backends.signals import connection_created
from django.db.backends.utils import CursorWrapper, format_number
from django.db.models import Avg, StdDev, Sum, Variance
from django.db.models.sql.constants import CURSOR
from django.db.utils import ConnectionHandler
from django.test import (
TestCase, TransactionTestCase, mock, override_settings, skipIfDBFeature,
skipUnlessDBFeature,
)
from django.test.utils import ignore_warnings, str_prefix
from django.utils import six
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.six.moves import range
from . import models
class DummyBackendTest(TestCase):
def test_no_databases(self):
"""
Test that empty DATABASES setting default to the dummy backend.
"""
DATABASES = {}
conns = ConnectionHandler(DATABASES)
self.assertEqual(conns[DEFAULT_DB_ALIAS].settings_dict['ENGINE'],
'django.db.backends.dummy')
with self.assertRaises(ImproperlyConfigured):
conns[DEFAULT_DB_ALIAS].ensure_connection()
@unittest.skipUnless(connection.vendor == 'oracle', "Test only for Oracle")
class OracleTests(unittest.TestCase):
def test_quote_name(self):
# Check that '%' chars are escaped for query execution.
name = '"SOME%NAME"'
quoted_name = connection.ops.quote_name(name)
self.assertEqual(quoted_name % (), name)
def test_dbms_session(self):
# If the backend is Oracle, test that we can call a standard
# stored procedure through our cursor wrapper.
from django.db.backends.oracle.base import convert_unicode
with connection.cursor() as cursor:
cursor.callproc(convert_unicode('DBMS_SESSION.SET_IDENTIFIER'),
[convert_unicode('_django_testing!')])
def test_cursor_var(self):
# If the backend is Oracle, test that we can pass cursor variables
# as query parameters.
from django.db.backends.oracle.base import Database
with connection.cursor() as cursor:
var = cursor.var(Database.STRING)
cursor.execute("BEGIN %s := 'X'; END; ", [var])
self.assertEqual(var.getvalue(), 'X')
def test_long_string(self):
# If the backend is Oracle, test that we can save a text longer
# than 4000 chars and read it properly
with connection.cursor() as cursor:
cursor.execute('CREATE TABLE ltext ("TEXT" NCLOB)')
long_str = ''.join(six.text_type(x) for x in range(4000))
cursor.execute('INSERT INTO ltext VALUES (%s)', [long_str])
cursor.execute('SELECT text FROM ltext')
row = cursor.fetchone()
self.assertEqual(long_str, row[0].read())
cursor.execute('DROP TABLE ltext')
def test_client_encoding(self):
# If the backend is Oracle, test that the client encoding is set
# correctly. This was broken under Cygwin prior to r14781.
connection.ensure_connection()
self.assertEqual(connection.connection.encoding, "UTF-8")
self.assertEqual(connection.connection.nencoding, "UTF-8")
def test_order_of_nls_parameters(self):
# an 'almost right' datetime should work with configured
# NLS parameters as per #18465.
with connection.cursor() as cursor:
query = "select 1 from dual where '1936-12-29 00:00' < sysdate"
# Test that the query succeeds without errors - pre #18465 this
# wasn't the case.
cursor.execute(query)
self.assertEqual(cursor.fetchone()[0], 1)
@unittest.skipUnless(connection.vendor == 'sqlite', "Test only for SQLite")
class SQLiteTests(TestCase):
longMessage = True
def test_autoincrement(self):
"""
Check that auto_increment fields are created with the AUTOINCREMENT
keyword in order to be monotonically increasing. Refs #10164.
"""
with connection.schema_editor(collect_sql=True) as editor:
editor.create_model(models.Square)
statements = editor.collected_sql
match = re.search('"id" ([^,]+),', statements[0])
self.assertIsNotNone(match)
self.assertEqual('integer NOT NULL PRIMARY KEY AUTOINCREMENT',
match.group(1), "Wrong SQL used to create an auto-increment "
"column on SQLite")
def test_aggregation(self):
"""
#19360: Raise NotImplementedError when aggregating on date/time fields.
"""
for aggregate in (Sum, Avg, Variance, StdDev):
self.assertRaises(
NotImplementedError,
models.Item.objects.all().aggregate, aggregate('time'))
self.assertRaises(
NotImplementedError,
models.Item.objects.all().aggregate, aggregate('date'))
self.assertRaises(
NotImplementedError,
models.Item.objects.all().aggregate, aggregate('last_modified'))
self.assertRaises(
NotImplementedError,
models.Item.objects.all().aggregate,
**{'complex': aggregate('last_modified') + aggregate('last_modified')})
def test_memory_db_test_name(self):
"""
A named in-memory db should be allowed where supported.
"""
from django.db.backends.sqlite3.base import DatabaseWrapper
settings_dict = {
'TEST': {
'NAME': 'file:memorydb_test?mode=memory&cache=shared',
}
}
wrapper = DatabaseWrapper(settings_dict)
creation = wrapper.creation
if creation.connection.features.can_share_in_memory_db:
expected = creation.connection.settings_dict['TEST']['NAME']
self.assertEqual(creation._get_test_db_name(), expected)
else:
msg = (
"Using a shared memory database with `mode=memory` in the "
"database name is not supported in your environment, "
"use `:memory:` instead."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
creation._get_test_db_name()
@unittest.skipUnless(connection.vendor == 'postgresql', "Test only for PostgreSQL")
class PostgreSQLTests(TestCase):
def assert_parses(self, version_string, version):
self.assertEqual(pg_version._parse_version(version_string), version)
def test_parsing(self):
"""Test PostgreSQL version parsing from `SELECT version()` output"""
self.assert_parses("PostgreSQL 9.3 beta4", 90300)
self.assert_parses("PostgreSQL 9.3", 90300)
self.assert_parses("EnterpriseDB 9.3", 90300)
self.assert_parses("PostgreSQL 9.3.6", 90306)
self.assert_parses("PostgreSQL 9.4beta1", 90400)
self.assert_parses("PostgreSQL 9.3.1 on i386-apple-darwin9.2.2, compiled by GCC i686-apple-darwin9-gcc-4.0.1 (GCC) 4.0.1 (Apple Inc. build 5478)", 90301)
def test_nodb_connection(self):
"""
Test that the _nodb_connection property fallbacks to the default connection
database when access to the 'postgres' database is not granted.
"""
def mocked_connect(self):
if self.settings_dict['NAME'] is None:
raise DatabaseError()
return ''
nodb_conn = connection._nodb_connection
self.assertIsNone(nodb_conn.settings_dict['NAME'])
# Now assume the 'postgres' db isn't available
del connection._nodb_connection
with warnings.catch_warnings(record=True) as w:
with mock.patch('django.db.backends.base.base.BaseDatabaseWrapper.connect',
side_effect=mocked_connect, autospec=True):
warnings.simplefilter('always', RuntimeWarning)
nodb_conn = connection._nodb_connection
del connection._nodb_connection
self.assertIsNotNone(nodb_conn.settings_dict['NAME'])
self.assertEqual(nodb_conn.settings_dict['NAME'], settings.DATABASES[DEFAULT_DB_ALIAS]['NAME'])
# Check a RuntimeWarning nas been emitted
self.assertEqual(len(w), 1)
self.assertEqual(w[0].message.__class__, RuntimeWarning)
def test_version_detection(self):
"""Test PostgreSQL version detection"""
# Helper mocks
class CursorMock(object):
"Very simple mock of DB-API cursor"
def execute(self, arg):
pass
def fetchone(self):
return ["PostgreSQL 9.3"]
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
class OlderConnectionMock(object):
"Mock of psycopg2 (< 2.0.12) connection"
def cursor(self):
return CursorMock()
# psycopg2 < 2.0.12 code path
conn = OlderConnectionMock()
self.assertEqual(pg_version.get_version(conn), 90300)
def test_connect_and_rollback(self):
"""
PostgreSQL shouldn't roll back SET TIME ZONE, even if the first
transaction is rolled back (#17062).
"""
databases = copy.deepcopy(settings.DATABASES)
new_connections = ConnectionHandler(databases)
new_connection = new_connections[DEFAULT_DB_ALIAS]
try:
# Ensure the database default time zone is different than
# the time zone in new_connection.settings_dict. We can
# get the default time zone by reset & show.
cursor = new_connection.cursor()
cursor.execute("RESET TIMEZONE")
cursor.execute("SHOW TIMEZONE")
db_default_tz = cursor.fetchone()[0]
new_tz = 'Europe/Paris' if db_default_tz == 'UTC' else 'UTC'
new_connection.close()
# Fetch a new connection with the new_tz as default
# time zone, run a query and rollback.
new_connection.settings_dict['TIME_ZONE'] = new_tz
new_connection.set_autocommit(False)
cursor = new_connection.cursor()
new_connection.rollback()
# Now let's see if the rollback rolled back the SET TIME ZONE.
cursor.execute("SHOW TIMEZONE")
tz = cursor.fetchone()[0]
self.assertEqual(new_tz, tz)
finally:
new_connection.close()
def test_connect_non_autocommit(self):
"""
The connection wrapper shouldn't believe that autocommit is enabled
after setting the time zone when AUTOCOMMIT is False (#21452).
"""
databases = copy.deepcopy(settings.DATABASES)
databases[DEFAULT_DB_ALIAS]['AUTOCOMMIT'] = False
new_connections = ConnectionHandler(databases)
new_connection = new_connections[DEFAULT_DB_ALIAS]
try:
# Open a database connection.
new_connection.cursor()
self.assertFalse(new_connection.get_autocommit())
finally:
new_connection.close()
def test_connect_isolation_level(self):
"""
Regression test for #18130 and #24318.
"""
from psycopg2.extensions import (
ISOLATION_LEVEL_READ_COMMITTED as read_committed,
ISOLATION_LEVEL_SERIALIZABLE as serializable,
)
# Since this is a django.test.TestCase, a transaction is in progress
# and the isolation level isn't reported as 0. This test assumes that
# PostgreSQL is configured with the default isolation level.
# Check the level on the psycopg2 connection, not the Django wrapper.
self.assertEqual(connection.connection.isolation_level, read_committed)
databases = copy.deepcopy(settings.DATABASES)
databases[DEFAULT_DB_ALIAS]['OPTIONS']['isolation_level'] = serializable
new_connections = ConnectionHandler(databases)
new_connection = new_connections[DEFAULT_DB_ALIAS]
try:
# Start a transaction so the isolation level isn't reported as 0.
new_connection.set_autocommit(False)
# Check the level on the psycopg2 connection, not the Django wrapper.
self.assertEqual(new_connection.connection.isolation_level, serializable)
finally:
new_connection.close()
def _select(self, val):
with connection.cursor() as cursor:
cursor.execute("SELECT %s", (val,))
return cursor.fetchone()[0]
def test_select_ascii_array(self):
a = ["awef"]
b = self._select(a)
self.assertEqual(a[0], b[0])
def test_select_unicode_array(self):
a = ["ᄲawef"]
b = self._select(a)
self.assertEqual(a[0], b[0])
def test_lookup_cast(self):
from django.db.backends.postgresql_psycopg2.operations import DatabaseOperations
do = DatabaseOperations(connection=None)
for lookup in ('iexact', 'contains', 'icontains', 'startswith',
'istartswith', 'endswith', 'iendswith', 'regex', 'iregex'):
self.assertIn('::text', do.lookup_cast(lookup))
def test_correct_extraction_psycopg2_version(self):
from django.db.backends.postgresql_psycopg2.base import psycopg2_version
version_path = 'django.db.backends.postgresql_psycopg2.base.Database.__version__'
with mock.patch(version_path, '2.6.9'):
self.assertEqual(psycopg2_version(), (2, 6, 9))
with mock.patch(version_path, '2.5.dev0'):
self.assertEqual(psycopg2_version(), (2, 5))
class DateQuotingTest(TestCase):
def test_django_date_trunc(self):
"""
Test the custom ``django_date_trunc method``, in particular against
fields which clash with strings passed to it (e.g. 'year') - see
#12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
years = models.SchoolClass.objects.dates('last_updated', 'year')
self.assertEqual(list(years), [datetime.date(2010, 1, 1)])
def test_django_date_extract(self):
"""
Test the custom ``django_date_extract method``, in particular against fields
which clash with strings passed to it (e.g. 'day') - see #12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
classes = models.SchoolClass.objects.filter(last_updated__day=20)
self.assertEqual(len(classes), 1)
@override_settings(DEBUG=True)
class LastExecutedQueryTest(TestCase):
def test_last_executed_query(self):
"""
last_executed_query should not raise an exception even if no previous
query has been run.
"""
cursor = connection.cursor()
try:
connection.ops.last_executed_query(cursor, '', ())
except Exception:
self.fail("'last_executed_query' should not raise an exception.")
def test_debug_sql(self):
list(models.Reporter.objects.filter(first_name="test"))
sql = connection.queries[-1]['sql'].lower()
self.assertIn("select", sql)
self.assertIn(models.Reporter._meta.db_table, sql)
def test_query_encoding(self):
"""
Test that last_executed_query() returns an Unicode string
"""
data = models.RawData.objects.filter(raw_data=b'\x00\x46 \xFE').extra(select={'föö': 1})
sql, params = data.query.sql_with_params()
cursor = data.query.get_compiler('default').execute_sql(CURSOR)
last_sql = cursor.db.ops.last_executed_query(cursor, sql, params)
self.assertIsInstance(last_sql, six.text_type)
@unittest.skipUnless(connection.vendor == 'sqlite',
"This test is specific to SQLite.")
def test_no_interpolation_on_sqlite(self):
# Regression for #17158
# This shouldn't raise an exception
query = "SELECT strftime('%Y', 'now');"
connection.cursor().execute(query)
self.assertEqual(connection.queries[-1]['sql'],
str_prefix("QUERY = %(_)s\"SELECT strftime('%%Y', 'now');\" - PARAMS = ()"))
class ParameterHandlingTest(TestCase):
def test_bad_parameter_count(self):
"An executemany call with too many/not enough parameters will raise an exception (Refs #12612)"
cursor = connection.cursor()
query = ('INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (
connection.introspection.table_name_converter('backends_square'),
connection.ops.quote_name('root'),
connection.ops.quote_name('square')
))
self.assertRaises(Exception, cursor.executemany, query, [(1, 2, 3)])
self.assertRaises(Exception, cursor.executemany, query, [(1,)])
# Unfortunately, the following tests would be a good test to run on all
# backends, but it breaks MySQL hard. Until #13711 is fixed, it can't be run
# everywhere (although it would be an effective test of #13711).
class LongNameTest(TransactionTestCase):
"""Long primary keys and model names can result in a sequence name
that exceeds the database limits, which will result in truncation
on certain databases (e.g., Postgres). The backend needs to use
the correct sequence name in last_insert_id and other places, so
check it is. Refs #8901.
"""
available_apps = ['backends']
def test_sequence_name_length_limits_create(self):
"""Test creation of model with long name and long pk name doesn't error. Ref #8901"""
models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
def test_sequence_name_length_limits_m2m(self):
"""Test an m2m save of a model with a long name and a long m2m field name doesn't error as on Django >=1.2 this now uses object saves. Ref #8901"""
obj = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
rel_obj = models.Person.objects.create(first_name='Django', last_name='Reinhardt')
obj.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.add(rel_obj)
def test_sequence_name_length_limits_flush(self):
"""Test that sequence resetting as part of a flush with model with long name and long pk name doesn't error. Ref #8901"""
# A full flush is expensive to the full test, so we dig into the
# internals to generate the likely offending SQL and run it manually
# Some convenience aliases
VLM = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ
VLM_m2m = VLM.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.through
tables = [
VLM._meta.db_table,
VLM_m2m._meta.db_table,
]
sequences = [
{
'column': VLM._meta.pk.column,
'table': VLM._meta.db_table
},
]
cursor = connection.cursor()
for statement in connection.ops.sql_flush(no_style(), tables, sequences):
cursor.execute(statement)
class SequenceResetTest(TestCase):
def test_generic_relation(self):
"Sequence names are correct when resetting generic relations (Ref #13941)"
# Create an object with a manually specified PK
models.Post.objects.create(id=10, name='1st post', text='hello world')
# Reset the sequences for the database
cursor = connection.cursor()
commands = connections[DEFAULT_DB_ALIAS].ops.sequence_reset_sql(no_style(), [models.Post])
for sql in commands:
cursor.execute(sql)
# If we create a new object now, it should have a PK greater
# than the PK we specified manually.
obj = models.Post.objects.create(name='New post', text='goodbye world')
self.assertGreater(obj.pk, 10)
# This test needs to run outside of a transaction, otherwise closing the
# connection would implicitly rollback and cause problems during teardown.
class ConnectionCreatedSignalTest(TransactionTestCase):
available_apps = []
# Unfortunately with sqlite3 the in-memory test database cannot be closed,
# and so it cannot be re-opened during testing.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_signal(self):
data = {}
def receiver(sender, connection, **kwargs):
data["connection"] = connection
connection_created.connect(receiver)
connection.close()
connection.cursor()
self.assertIs(data["connection"].connection, connection.connection)
connection_created.disconnect(receiver)
data.clear()
connection.cursor()
self.assertEqual(data, {})
class EscapingChecks(TestCase):
"""
All tests in this test case are also run with settings.DEBUG=True in
EscapingChecksDebug test case, to also test CursorDebugWrapper.
"""
bare_select_suffix = connection.features.bare_select_suffix
def test_paramless_no_escaping(self):
cursor = connection.cursor()
cursor.execute("SELECT '%s'" + self.bare_select_suffix)
self.assertEqual(cursor.fetchall()[0][0], '%s')
def test_parameter_escaping(self):
cursor = connection.cursor()
cursor.execute("SELECT '%%', %s" + self.bare_select_suffix, ('%d',))
self.assertEqual(cursor.fetchall()[0], ('%', '%d'))
@unittest.skipUnless(connection.vendor == 'sqlite',
"This is an sqlite-specific issue")
def test_sqlite_parameter_escaping(self):
# '%s' escaping support for sqlite3 #13648
cursor = connection.cursor()
cursor.execute("select strftime('%s', date('now'))")
response = cursor.fetchall()[0][0]
# response should be an non-zero integer
self.assertTrue(int(response))
@override_settings(DEBUG=True)
class EscapingChecksDebug(EscapingChecks):
pass
class BackendTestCase(TransactionTestCase):
available_apps = ['backends']
def create_squares_with_executemany(self, args):
self.create_squares(args, 'format', True)
def create_squares(self, args, paramstyle, multiple):
cursor = connection.cursor()
opts = models.Square._meta
tbl = connection.introspection.table_name_converter(opts.db_table)
f1 = connection.ops.quote_name(opts.get_field('root').column)
f2 = connection.ops.quote_name(opts.get_field('square').column)
if paramstyle == 'format':
query = 'INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (tbl, f1, f2)
elif paramstyle == 'pyformat':
query = 'INSERT INTO %s (%s, %s) VALUES (%%(root)s, %%(square)s)' % (tbl, f1, f2)
else:
raise ValueError("unsupported paramstyle in test")
if multiple:
cursor.executemany(query, args)
else:
cursor.execute(query, args)
def test_cursor_executemany(self):
# Test cursor.executemany #4896
args = [(i, i ** 2) for i in range(-5, 6)]
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 11)
for i in range(-5, 6):
square = models.Square.objects.get(root=i)
self.assertEqual(square.square, i ** 2)
def test_cursor_executemany_with_empty_params_list(self):
# Test executemany with params=[] does nothing #4765
args = []
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 0)
def test_cursor_executemany_with_iterator(self):
# Test executemany accepts iterators #10320
args = iter((i, i ** 2) for i in range(-3, 2))
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 5)
args = iter((i, i ** 2) for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 9)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_execute_with_pyformat(self):
# Support pyformat style passing of parameters #10070
args = {'root': 3, 'square': 9}
self.create_squares(args, 'pyformat', multiple=False)
self.assertEqual(models.Square.objects.count(), 1)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_executemany_with_pyformat(self):
# Support pyformat style passing of parameters #10070
args = [{'root': i, 'square': i ** 2} for i in range(-5, 6)]
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 11)
for i in range(-5, 6):
square = models.Square.objects.get(root=i)
self.assertEqual(square.square, i ** 2)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_executemany_with_pyformat_iterator(self):
args = iter({'root': i, 'square': i ** 2} for i in range(-3, 2))
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 5)
args = iter({'root': i, 'square': i ** 2} for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 9)
def test_unicode_fetches(self):
# fetchone, fetchmany, fetchall return strings as unicode objects #6254
qn = connection.ops.quote_name
models.Person(first_name="John", last_name="Doe").save()
models.Person(first_name="Jane", last_name="Doe").save()
models.Person(first_name="Mary", last_name="Agnelline").save()
models.Person(first_name="Peter", last_name="Parker").save()
models.Person(first_name="Clark", last_name="Kent").save()
opts2 = models.Person._meta
f3, f4 = opts2.get_field('first_name'), opts2.get_field('last_name')
query2 = ('SELECT %s, %s FROM %s ORDER BY %s'
% (qn(f3.column), qn(f4.column), connection.introspection.table_name_converter(opts2.db_table),
qn(f3.column)))
cursor = connection.cursor()
cursor.execute(query2)
self.assertEqual(cursor.fetchone(), ('Clark', 'Kent'))
self.assertEqual(list(cursor.fetchmany(2)), [('Jane', 'Doe'), ('John', 'Doe')])
self.assertEqual(list(cursor.fetchall()), [('Mary', 'Agnelline'), ('Peter', 'Parker')])
def test_unicode_password(self):
old_password = connection.settings_dict['PASSWORD']
connection.settings_dict['PASSWORD'] = "françois"
try:
connection.cursor()
except DatabaseError:
# As password is probably wrong, a database exception is expected
pass
except Exception as e:
self.fail("Unexpected error raised with unicode password: %s" % e)
finally:
connection.settings_dict['PASSWORD'] = old_password
def test_database_operations_helper_class(self):
# Ticket #13630
self.assertTrue(hasattr(connection, 'ops'))
self.assertTrue(hasattr(connection.ops, 'connection'))
self.assertEqual(connection, connection.ops.connection)
def test_database_operations_init(self):
"""
Test that DatabaseOperations initialization doesn't query the database.
See #17656.
"""
with self.assertNumQueries(0):
connection.ops.__class__(connection)
def test_cached_db_features(self):
self.assertIn(connection.features.supports_transactions, (True, False))
self.assertIn(connection.features.supports_stddev, (True, False))
self.assertIn(connection.features.can_introspect_foreign_keys, (True, False))
def test_duplicate_table_error(self):
""" Test that creating an existing table returns a DatabaseError """
cursor = connection.cursor()
query = 'CREATE TABLE %s (id INTEGER);' % models.Article._meta.db_table
with self.assertRaises(DatabaseError):
cursor.execute(query)
def test_cursor_contextmanager(self):
"""
Test that cursors can be used as a context manager
"""
with connection.cursor() as cursor:
self.assertIsInstance(cursor, CursorWrapper)
# Both InterfaceError and ProgrammingError seem to be used when
# accessing closed cursor (psycopg2 has InterfaceError, rest seem
# to use ProgrammingError).
with self.assertRaises(connection.features.closed_cursor_error_class):
# cursor should be closed, so no queries should be possible.
cursor.execute("SELECT 1" + connection.features.bare_select_suffix)
@unittest.skipUnless(connection.vendor == 'postgresql',
"Psycopg2 specific cursor.closed attribute needed")
def test_cursor_contextmanager_closing(self):
# There isn't a generic way to test that cursors are closed, but
# psycopg2 offers us a way to check that by closed attribute.
# So, run only on psycopg2 for that reason.
with connection.cursor() as cursor:
self.assertIsInstance(cursor, CursorWrapper)
self.assertTrue(cursor.closed)
# Unfortunately with sqlite3 the in-memory test database cannot be closed.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_is_usable_after_database_disconnects(self):
"""
Test that is_usable() doesn't crash when the database disconnects.
Regression for #21553.
"""
# Open a connection to the database.
with connection.cursor():
pass
# Emulate a connection close by the database.
connection._close()
# Even then is_usable() should not raise an exception.
try:
self.assertFalse(connection.is_usable())
finally:
# Clean up the mess created by connection._close(). Since the
# connection is already closed, this crashes on some backends.
try:
connection.close()
except Exception:
pass
@override_settings(DEBUG=True)
def test_queries(self):
"""
Test the documented API of connection.queries.
"""
with connection.cursor() as cursor:
reset_queries()
cursor.execute("SELECT 1" + connection.features.bare_select_suffix)
self.assertEqual(1, len(connection.queries))
self.assertIsInstance(connection.queries, list)
self.assertIsInstance(connection.queries[0], dict)
six.assertCountEqual(self, connection.queries[0].keys(), ['sql', 'time'])
reset_queries()
self.assertEqual(0, len(connection.queries))
# Unfortunately with sqlite3 the in-memory test database cannot be closed.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
@override_settings(DEBUG=True)
def test_queries_limit(self):
"""
Test that the backend doesn't store an unlimited number of queries.
Regression for #12581.
"""
old_queries_limit = BaseDatabaseWrapper.queries_limit
BaseDatabaseWrapper.queries_limit = 3
new_connections = ConnectionHandler(settings.DATABASES)
new_connection = new_connections[DEFAULT_DB_ALIAS]
# Initialize the connection and clear initialization statements.
with new_connection.cursor():
pass
new_connection.queries_log.clear()
try:
with new_connection.cursor() as cursor:
cursor.execute("SELECT 1" + new_connection.features.bare_select_suffix)
cursor.execute("SELECT 2" + new_connection.features.bare_select_suffix)
with warnings.catch_warnings(record=True) as w:
self.assertEqual(2, len(new_connection.queries))
self.assertEqual(0, len(w))
with new_connection.cursor() as cursor:
cursor.execute("SELECT 3" + new_connection.features.bare_select_suffix)
cursor.execute("SELECT 4" + new_connection.features.bare_select_suffix)
with warnings.catch_warnings(record=True) as w:
self.assertEqual(3, len(new_connection.queries))
self.assertEqual(1, len(w))
self.assertEqual(str(w[0].message), "Limit for query logging "
"exceeded, only the last 3 queries will be returned.")
finally:
BaseDatabaseWrapper.queries_limit = old_queries_limit
new_connection.close()
def test_timezone_none_use_tz_false(self):
connection.ensure_connection()
with self.settings(TIME_ZONE=None, USE_TZ=False):
connection.init_connection_state()
# We don't make these tests conditional because that means we would need to
# check and differentiate between:
# * MySQL+InnoDB, MySQL+MYISAM (something we currently can't do).
# * if sqlite3 (if/once we get #14204 fixed) has referential integrity turned
# on or not, something that would be controlled by runtime support and user
# preference.
# verify if its type is django.database.db.IntegrityError.
class FkConstraintsTests(TransactionTestCase):
available_apps = ['backends']
def setUp(self):
# Create a Reporter.
self.r = models.Reporter.objects.create(first_name='John', last_name='Smith')
def test_integrity_checks_on_creation(self):
"""
Try to create a model instance that violates a FK constraint. If it
fails it should fail with IntegrityError.
"""
a1 = models.Article(headline="This is a test", pub_date=datetime.datetime(2005, 7, 27), reporter_id=30)
try:
a1.save()
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
# Now that we know this backend supports integrity checks we make sure
# constraints are also enforced for proxy models. Refs #17519
a2 = models.Article(headline='This is another test', reporter=self.r,
pub_date=datetime.datetime(2012, 8, 3),
reporter_proxy_id=30)
self.assertRaises(IntegrityError, a2.save)
def test_integrity_checks_on_update(self):
"""
Try to update a model instance introducing a FK constraint violation.
If it fails it should fail with IntegrityError.
"""
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a1 = models.Article.objects.get(headline="Test article")
a1.reporter_id = 30
try:
a1.save()
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
# Now that we know this backend supports integrity checks we make sure
# constraints are also enforced for proxy models. Refs #17519
# Create another article
r_proxy = models.ReporterProxy.objects.get(pk=self.r.pk)
models.Article.objects.create(headline='Another article',
pub_date=datetime.datetime(1988, 5, 15),
reporter=self.r, reporter_proxy=r_proxy)
# Retrieve the second article from the DB
a2 = models.Article.objects.get(headline='Another article')
a2.reporter_proxy_id = 30
self.assertRaises(IntegrityError, a2.save)
def test_disable_constraint_checks_manually(self):
"""
When constraint checks are disabled, should be able to write bad data without IntegrityErrors.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
connection.disable_constraint_checking()
a.save()
connection.enable_constraint_checking()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
transaction.set_rollback(True)
def test_disable_constraint_checks_context_manager(self):
"""
When constraint checks are disabled (using context manager), should be able to write bad data without IntegrityErrors.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
with connection.constraint_checks_disabled():
a.save()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
transaction.set_rollback(True)
def test_check_constraints(self):
"""
Constraint checks should raise an IntegrityError when bad data is in the DB.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
with connection.constraint_checks_disabled():
a.save()
with self.assertRaises(IntegrityError):
connection.check_constraints()
transaction.set_rollback(True)
class ThreadTests(TransactionTestCase):
available_apps = ['backends']
def test_default_connection_thread_local(self):
"""
Ensure that the default connection (i.e. django.db.connection) is
different for each thread.
Refs #17258.
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
connection.cursor()
connections_dict[id(connection)] = connection
def runner():
# Passing django.db.connection between threads doesn't work while
# connections[DEFAULT_DB_ALIAS] does.
from django.db import connections
connection = connections[DEFAULT_DB_ALIAS]
# Allow thread sharing so the connection can be closed by the
# main thread.
connection.allow_thread_sharing = True
connection.cursor()
connections_dict[id(connection)] = connection
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
# Check that each created connection got different inner connection.
self.assertEqual(
len(set(conn.connection for conn in connections_dict.values())),
3)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_dict.values():
if conn is not connection:
conn.close()
def test_connections_thread_local(self):
"""
Ensure that the connections are different for each thread.
Refs #17258.
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
for conn in connections.all():
connections_dict[id(conn)] = conn
def runner():
from django.db import connections
for conn in connections.all():
# Allow thread sharing so the connection can be closed by the
# main thread.
conn.allow_thread_sharing = True
connections_dict[id(conn)] = conn
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertEqual(len(connections_dict), 6)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_dict.values():
if conn is not connection:
conn.close()
def test_pass_connection_between_threads(self):
"""
Ensure that a connection can be passed from one thread to the other.
Refs #17258.
"""
models.Person.objects.create(first_name="John", last_name="Doe")
def do_thread():
def runner(main_thread_connection):
from django.db import connections
connections['default'] = main_thread_connection
try:
models.Person.objects.get(first_name="John", last_name="Doe")
except Exception as e:
exceptions.append(e)
t = threading.Thread(target=runner, args=[connections['default']])
t.start()
t.join()
# Without touching allow_thread_sharing, which should be False by default.
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to False
connections['default'].allow_thread_sharing = False
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to True
connections['default'].allow_thread_sharing = True
exceptions = []
do_thread()
# All good
self.assertEqual(exceptions, [])
def test_closing_non_shared_connections(self):
"""
Ensure that a connection that is not explicitly shareable cannot be
closed by another thread.
Refs #17258.
"""
# First, without explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# The exception was raised
self.assertEqual(len(exceptions), 1)
# Then, with explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
# Enable thread sharing
connections['default'].allow_thread_sharing = True
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# No exception was raised
self.assertEqual(len(exceptions), 0)
class MySQLPKZeroTests(TestCase):
"""
Zero as id for AutoField should raise exception in MySQL, because MySQL
does not allow zero for autoincrement primary key.
"""
@skipIfDBFeature('allows_auto_pk_0')
def test_zero_as_autoval(self):
with self.assertRaises(ValueError):
models.Square.objects.create(id=0, root=0, square=1)
class DBConstraintTestCase(TestCase):
def test_can_reference_existent(self):
obj = models.Object.objects.create()
ref = models.ObjectReference.objects.create(obj=obj)
self.assertEqual(ref.obj, obj)
ref = models.ObjectReference.objects.get(obj=obj)
self.assertEqual(ref.obj, obj)
def test_can_reference_non_existent(self):
self.assertFalse(models.Object.objects.filter(id=12345).exists())
ref = models.ObjectReference.objects.create(obj_id=12345)
ref_new = models.ObjectReference.objects.get(obj_id=12345)
self.assertEqual(ref, ref_new)
with self.assertRaises(models.Object.DoesNotExist):
ref.obj
def test_many_to_many(self):
obj = models.Object.objects.create()
obj.related_objects.create()
self.assertEqual(models.Object.objects.count(), 2)
self.assertEqual(obj.related_objects.count(), 1)
intermediary_model = models.Object._meta.get_field("related_objects").rel.through
intermediary_model.objects.create(from_object_id=obj.id, to_object_id=12345)
self.assertEqual(obj.related_objects.count(), 1)
self.assertEqual(intermediary_model.objects.count(), 2)
class BackendUtilTests(TestCase):
def test_format_number(self):
"""
Test the format_number converter utility
"""
def equal(value, max_d, places, result):
self.assertEqual(format_number(Decimal(value), max_d, places), result)
equal('0', 12, 3,
'0.000')
equal('0', 12, 8,
'0.00000000')
equal('1', 12, 9,
'1.000000000')
equal('0.00000000', 12, 8,
'0.00000000')
equal('0.000000004', 12, 8,
'0.00000000')
equal('0.000000008', 12, 8,
'0.00000001')
equal('0.000000000000000000999', 10, 8,
'0.00000000')
equal('0.1234567890', 12, 10,
'0.1234567890')
equal('0.1234567890', 12, 9,
'0.123456789')
equal('0.1234567890', 12, 8,
'0.12345679')
equal('0.1234567890', 12, 5,
'0.12346')
equal('0.1234567890', 12, 3,
'0.123')
equal('0.1234567890', 12, 1,
'0.1')
equal('0.1234567890', 12, 0,
'0')
equal('0.1234567890', None, 0,
'0')
equal('1234567890.1234567890', None, 0,
'1234567890')
equal('1234567890.1234567890', None, 2,
'1234567890.12')
equal('0.1234', 5, None,
'0.1234')
equal('123.12', 5, None,
'123.12')
with self.assertRaises(Rounded):
equal('0.1234567890', 5, None,
'0.12346')
with self.assertRaises(Rounded):
equal('1234567890.1234', 5, None,
'1234600000')
@ignore_warnings(category=UserWarning,
message="Overriding setting DATABASES can lead to unexpected behavior")
class DBTestSettingsRenamedTests(TestCase):
mismatch_msg = ("Connection 'test-deprecation' has mismatched TEST "
"and TEST_* database settings.")
def setUp(self):
super(DBTestSettingsRenamedTests, self).setUp()
self.handler = ConnectionHandler()
self.db_settings = {'default': {}}
def test_mismatched_database_test_settings_1(self):
# if the TEST setting is used, all TEST_* keys must appear in it.
self.db_settings.update({
'test-deprecation': {
'TEST': {},
'TEST_NAME': 'foo',
}
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_mismatched_database_test_settings_2(self):
# if the TEST setting is used, all TEST_* keys must match.
self.db_settings.update({
'test-deprecation': {
'TEST': {'NAME': 'foo'},
'TEST_NAME': 'bar',
},
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_mismatched_database_test_settings_3(self):
# Verifies the mapping of an aliased key.
self.db_settings.update({
'test-deprecation': {
'TEST': {'CREATE_DB': 'foo'},
'TEST_CREATE': 'bar',
},
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_mismatched_database_test_settings_4(self):
# Verifies the mapping of an aliased key when the aliased key is missing.
self.db_settings.update({
'test-deprecation': {
'TEST': {},
'TEST_CREATE': 'bar',
},
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_mismatched_settings_old_none(self):
self.db_settings.update({
'test-deprecation': {
'TEST': {'CREATE_DB': None},
'TEST_CREATE': '',
},
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_mismatched_settings_new_none(self):
self.db_settings.update({
'test-deprecation': {
'TEST': {},
'TEST_CREATE': None,
},
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_matched_test_settings(self):
# should be able to define new settings and the old, if they match
self.db_settings.update({
'test-deprecation': {
'TEST': {'NAME': 'foo'},
'TEST_NAME': 'foo',
},
})
with override_settings(DATABASES=self.db_settings):
self.handler.prepare_test_settings('test-deprecation')
def test_new_settings_only(self):
# should be able to define new settings without the old
self.db_settings.update({
'test-deprecation': {
'TEST': {'NAME': 'foo'},
},
})
with override_settings(DATABASES=self.db_settings):
self.handler.prepare_test_settings('test-deprecation')
@ignore_warnings(category=RemovedInDjango19Warning)
def test_old_settings_only(self):
# should be able to define old settings without the new
self.db_settings.update({
'test-deprecation': {
'TEST_NAME': 'foo',
},
})
with override_settings(DATABASES=self.db_settings):
self.handler.prepare_test_settings('test-deprecation')
def test_empty_settings(self):
with override_settings(DATABASES=self.db_settings):
self.handler.prepare_test_settings('default')
@unittest.skipUnless(connection.vendor == 'sqlite', 'SQLite specific test.')
@skipUnlessDBFeature('can_share_in_memory_db')
class TestSqliteThreadSharing(TransactionTestCase):
available_apps = ['backends']
def test_database_sharing_in_threads(self):
def create_object():
models.Object.objects.create()
create_object()
thread = threading.Thread(target=create_object)
thread.start()
thread.join()
self.assertEqual(models.Object.objects.count(), 2)
|
processor.py | import asyncio
from .utils import *
from multiprocessing import Process, Pipe, Lock
SOURCE_ID = 0
SENTINEL = -1
class Processor:
'''A camera processor'''
def __init__(self, camera, streams, stride, has_consumer=False, name=None):
self.streams = streams
if name is None:
self.name = self.__class__.__name__
else:
self.name = name
self.stride = stride
camera.add_frame_processor(self)
self.camera = camera
#set-up offloaded thread and pipes for data
self.has_consumer = has_consumer
if has_consumer:
self._work_conn, p = Pipe(duplex=True)
self._lock = Lock()
self.consumer = Process(target=self._consume_work, args=(p,self._lock))
print('starting consumer thread....')
self.consumer.start()
@property
def objects(self):
return []
def close(self):
print('Closing ' + self.__class__.__name__)
self.camera.remove_frame_processor(self)
if self.has_consumer:
self._work_conn.send(SENTINEL)
self.consumer.join()
def _queue_work(self,data):
asyncio.ensure_future(self._await_work(data))
async def _await_work(self, data):
# apparently you cannot await Connection objects???
# also, there is some kind of buggy interaction when polling directlry
# use a lock instead
self._work_conn.send(data)
while not self._lock.acquire(False):
await asyncio.sleep(0) # do other things
result = self._work_conn.recv()
self._receive_result(result)
self._lock.release()
def _receive_result(self, result):
'''override this to receive and process data which was processed via _process_work'''
pass
@classmethod
def _consume_work(cls, return_conn, lock):
'''This is the other thread main loop, which reads in data, handles the exit and calls _process_work'''
while True:
data = return_conn.recv()
if data == SENTINEL:
break
result = cls._process_work(data)
lock.release()
return_conn.send(result)
lock.acquire()
@classmethod
def _process_work(cls, data):
'''Override this method to process data passed to queue_work in a different thread'''
pass
|
TProcessPoolServer.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import logging
from multiprocessing import Process, Value, Condition
from .TServer import TServer
from thrift.transport.TTransport import TTransportException
logger = logging.getLogger(__name__)
class TProcessPoolServer(TServer):
"""Server with a fixed size pool of worker subprocesses to service requests
Note that if you need shared state between the handlers - it's up to you!
Written by Dvir Volk, doat.com
"""
def __init__(self, *args):
TServer.__init__(self, *args)
self.numWorkers = 10
self.workers = []
self.isRunning = Value('b', False)
self.stopCondition = Condition()
self.postForkCallback = None
def setPostForkCallback(self, callback):
if not callable(callback):
raise TypeError("This is not a callback!")
self.postForkCallback = callback
def setNumWorkers(self, num):
"""Set the number of worker threads that should be created"""
self.numWorkers = num
def workerProcess(self):
"""Loop getting clients from the shared queue and process them"""
if self.postForkCallback:
self.postForkCallback()
while self.isRunning.value:
try:
client = self.serverTransport.accept()
if not client:
continue
self.serveClient(client)
except (KeyboardInterrupt, SystemExit):
return 0
except Exception as x:
logger.exception(x)
def serveClient(self, client):
"""Process input/output from a client for as long as possible"""
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
try:
while True:
self.processor.process(iprot, oprot)
except TTransportException:
pass
except Exception as x:
logger.exception(x)
itrans.close()
otrans.close()
def serve(self):
"""Start workers and put into queue"""
# this is a shared state that can tell the workers to exit when False
self.isRunning.value = True
# first bind and listen to the port
self.serverTransport.listen()
# fork the children
for i in range(self.numWorkers):
try:
w = Process(target=self.workerProcess)
w.daemon = True
w.start()
self.workers.append(w)
except Exception as x:
logger.exception(x)
# wait until the condition is set by stop()
while True:
self.stopCondition.acquire()
try:
self.stopCondition.wait()
break
except (SystemExit, KeyboardInterrupt):
break
except Exception as x:
logger.exception(x)
self.isRunning.value = False
def stop(self):
self.isRunning.value = False
self.stopCondition.acquire()
self.stopCondition.notify()
self.stopCondition.release()
|
python_ls.py | # Copyright 2017 Palantir Technologies, Inc.
import logging
import socketserver
import threading
from pyls_jsonrpc.dispatchers import MethodDispatcher
from pyls_jsonrpc.endpoint import Endpoint
from pyls_jsonrpc.streams import JsonRpcStreamReader, JsonRpcStreamWriter
from . import lsp, _utils, uris
from .config import config
from .workspace import Workspace
log = logging.getLogger(__name__)
LINT_DEBOUNCE_S = 0.5 # 500 ms
PARENT_PROCESS_WATCH_INTERVAL = 10 # 10 s
MAX_WORKERS = 64
PYTHON_FILE_EXTENSIONS = ('.py', '.pyi')
CONFIG_FILEs = ('pycodestyle.cfg', 'setup.cfg', 'tox.ini', '.flake8')
class _StreamHandlerWrapper(socketserver.StreamRequestHandler, object):
"""A wrapper class that is used to construct a custom handler class."""
delegate = None
def setup(self):
super(_StreamHandlerWrapper, self).setup()
# pylint: disable=no-member
self.delegate = self.DELEGATE_CLASS(self.rfile, self.wfile)
def handle(self):
self.delegate.start()
def start_tcp_lang_server(bind_addr, port, handler_class):
if not issubclass(handler_class, PythonLanguageServer):
raise ValueError('Handler class must be an instance of PythonLanguageServer')
# Construct a custom wrapper class around the user's handler_class
wrapper_class = type(
handler_class.__name__ + 'Handler',
(_StreamHandlerWrapper,),
{'DELEGATE_CLASS': handler_class}
)
server = socketserver.TCPServer((bind_addr, port), wrapper_class)
server.allow_reuse_address = True
try:
log.info('Serving %s on (%s, %s)', handler_class.__name__, bind_addr, port)
server.serve_forever()
finally:
log.info('Shutting down')
server.server_close()
def start_io_lang_server(rfile, wfile, check_parent_process, handler_class):
if not issubclass(handler_class, PythonLanguageServer):
raise ValueError('Handler class must be an instance of PythonLanguageServer')
log.info('Starting %s IO language server', handler_class.__name__)
server = handler_class(rfile, wfile, check_parent_process)
server.start()
class PythonLanguageServer(MethodDispatcher):
""" Implementation of the Microsoft VSCode Language Server Protocol
https://github.com/Microsoft/language-server-protocol/blob/master/versions/protocol-1-x.md
"""
# pylint: disable=too-many-public-methods,redefined-builtin
def __init__(self, rx, tx, check_parent_process=False):
self.workspace = None
self.config = None
self._jsonrpc_stream_reader = JsonRpcStreamReader(rx)
self._jsonrpc_stream_writer = JsonRpcStreamWriter(tx)
self._check_parent_process = check_parent_process
self._endpoint = Endpoint(self, self._jsonrpc_stream_writer.write, max_workers=MAX_WORKERS)
self._dispatchers = []
self._shutdown = False
def start(self):
"""Entry point for the server."""
self._jsonrpc_stream_reader.listen(self._endpoint.consume)
def __getitem__(self, item):
"""Override getitem to fallback through multiple dispatchers."""
if self._shutdown and item != 'exit':
# exit is the only allowed method during shutdown
log.debug("Ignoring non-exit method during shutdown: %s", item)
raise KeyError
try:
return super(PythonLanguageServer, self).__getitem__(item)
except KeyError:
# Fallback through extra dispatchers
for dispatcher in self._dispatchers:
try:
return dispatcher[item]
except KeyError:
continue
raise KeyError()
def m_shutdown(self, **_kwargs):
self._shutdown = True
return None
def m_exit(self, **_kwargs):
self._endpoint.shutdown()
self._jsonrpc_stream_reader.close()
self._jsonrpc_stream_writer.close()
def _hook(self, hook_name, doc_uri=None, **kwargs):
"""Calls hook_name and returns a list of results from all registered handlers"""
doc = self.workspace.get_document(doc_uri) if doc_uri else None
hook_handlers = self.config.plugin_manager.subset_hook_caller(hook_name, self.config.disabled_plugins)
return hook_handlers(config=self.config, workspace=self.workspace, document=doc, **kwargs)
def capabilities(self):
server_capabilities = {
'codeActionProvider': True,
'codeLensProvider': {
'resolveProvider': False, # We may need to make this configurable
},
'completionProvider': {
'resolveProvider': False, # We know everything ahead of time
'triggerCharacters': ['.']
},
'documentFormattingProvider': True,
'documentHighlightProvider': True,
'documentRangeFormattingProvider': True,
'documentSymbolProvider': True,
'definitionProvider': True,
'executeCommandProvider': {
'commands': flatten(self._hook('pyls_commands'))
},
'hoverProvider': True,
'referencesProvider': True,
'renameProvider': True,
'signatureHelpProvider': {
'triggerCharacters': ['(', ',']
},
'textDocumentSync': lsp.TextDocumentSyncKind.INCREMENTAL,
'experimental': merge(self._hook('pyls_experimental_capabilities'))
}
log.info('Server capabilities: %s', server_capabilities)
return server_capabilities
def m_initialize(self, processId=None, rootUri=None, rootPath=None, initializationOptions=None, **_kwargs):
log.debug('Language server initialized with %s %s %s %s', processId, rootUri, rootPath, initializationOptions)
if rootUri is None:
rootUri = uris.from_fs_path(rootPath) if rootPath is not None else ''
self.workspace = Workspace(rootUri, self._endpoint)
self.config = config.Config(rootUri, initializationOptions or {},
processId, _kwargs.get('capabilities', {}))
self._dispatchers = self._hook('pyls_dispatchers')
self._hook('pyls_initialize')
if self._check_parent_process and processId is not None:
def watch_parent_process(pid):
# exist when the given pid is not alive
if not _utils.is_process_alive(pid):
log.info("parent process %s is not alive", pid)
self.m_exit()
log.debug("parent process %s is still alive", pid)
threading.Timer(PARENT_PROCESS_WATCH_INTERVAL, watch_parent_process, args=[pid]).start()
watching_thread = threading.Thread(target=watch_parent_process, args=(processId,))
watching_thread.daemon = True
watching_thread.start()
# Get our capabilities
return {'capabilities': self.capabilities()}
def m_initialized(self, **_kwargs):
pass
def code_actions(self, doc_uri, range, context):
return flatten(self._hook('pyls_code_actions', doc_uri, range=range, context=context))
def code_lens(self, doc_uri):
return flatten(self._hook('pyls_code_lens', doc_uri))
def completions(self, doc_uri, position):
completions = self._hook('pyls_completions', doc_uri, position=position)
return {
'isIncomplete': False,
'items': flatten(completions)
}
def definitions(self, doc_uri, position):
return flatten(self._hook('pyls_definitions', doc_uri, position=position))
def document_symbols(self, doc_uri):
return flatten(self._hook('pyls_document_symbols', doc_uri))
def execute_command(self, command, arguments):
return self._hook('pyls_execute_command', command=command, arguments=arguments)
def format_document(self, doc_uri):
return self._hook('pyls_format_document', doc_uri)
def format_range(self, doc_uri, range):
return self._hook('pyls_format_range', doc_uri, range=range)
def highlight(self, doc_uri, position):
return flatten(self._hook('pyls_document_highlight', doc_uri, position=position)) or None
def hover(self, doc_uri, position):
return self._hook('pyls_hover', doc_uri, position=position) or {'contents': ''}
@_utils.debounce(LINT_DEBOUNCE_S, keyed_by='doc_uri')
def lint(self, doc_uri, is_saved):
# Since we're debounced, the document may no longer be open
if doc_uri in self.workspace.documents:
self.workspace.publish_diagnostics(
doc_uri,
flatten(self._hook('pyls_lint', doc_uri, is_saved=is_saved))
)
def references(self, doc_uri, position, exclude_declaration):
return flatten(self._hook(
'pyls_references', doc_uri, position=position,
exclude_declaration=exclude_declaration
))
def rename(self, doc_uri, position, new_name):
return self._hook('pyls_rename', doc_uri, position=position, new_name=new_name)
def signature_help(self, doc_uri, position):
return self._hook('pyls_signature_help', doc_uri, position=position)
def m_text_document__did_close(self, textDocument=None, **_kwargs):
self.workspace.rm_document(textDocument['uri'])
def m_text_document__did_open(self, textDocument=None, **_kwargs):
self.workspace.put_document(textDocument['uri'], textDocument['text'], version=textDocument.get('version'))
self._hook('pyls_document_did_open', textDocument['uri'])
self.lint(textDocument['uri'], is_saved=False)
def m_text_document__did_change(self, contentChanges=None, textDocument=None, **_kwargs):
for change in contentChanges:
self.workspace.update_document(
textDocument['uri'],
change,
version=textDocument.get('version')
)
self.lint(textDocument['uri'], is_saved=False)
def m_text_document__did_save(self, textDocument=None, **_kwargs):
self.lint(textDocument['uri'], is_saved=True)
def m_text_document__code_action(self, textDocument=None, range=None, context=None, **_kwargs):
return self.code_actions(textDocument['uri'], range, context)
def m_text_document__code_lens(self, textDocument=None, **_kwargs):
return self.code_lens(textDocument['uri'])
def m_text_document__completion(self, textDocument=None, position=None, **_kwargs):
return self.completions(textDocument['uri'], position)
def m_text_document__definition(self, textDocument=None, position=None, **_kwargs):
return self.definitions(textDocument['uri'], position)
def m_text_document__document_highlight(self, textDocument=None, position=None, **_kwargs):
return self.highlight(textDocument['uri'], position)
def m_text_document__hover(self, textDocument=None, position=None, **_kwargs):
return self.hover(textDocument['uri'], position)
def m_text_document__document_symbol(self, textDocument=None, **_kwargs):
return self.document_symbols(textDocument['uri'])
def m_text_document__formatting(self, textDocument=None, _options=None, **_kwargs):
# For now we're ignoring formatting options.
return self.format_document(textDocument['uri'])
def m_text_document__rename(self, textDocument=None, position=None, newName=None, **_kwargs):
return self.rename(textDocument['uri'], position, newName)
def m_text_document__range_formatting(self, textDocument=None, range=None, _options=None, **_kwargs):
# Again, we'll ignore formatting options for now.
return self.format_range(textDocument['uri'], range)
def m_text_document__references(self, textDocument=None, position=None, context=None, **_kwargs):
exclude_declaration = not context['includeDeclaration']
return self.references(textDocument['uri'], position, exclude_declaration)
def m_text_document__signature_help(self, textDocument=None, position=None, **_kwargs):
return self.signature_help(textDocument['uri'], position)
def m_workspace__did_change_configuration(self, settings=None):
self.config.update((settings or {}).get('pyls', {}))
for doc_uri in self.workspace.documents:
self.lint(doc_uri, is_saved=False)
def m_workspace__did_change_watched_files(self, changes=None, **_kwargs):
changed_py_files = set()
config_changed = False
for d in (changes or []):
if d['uri'].endswith(PYTHON_FILE_EXTENSIONS):
changed_py_files.add(d['uri'])
elif d['uri'].endswith(CONFIG_FILEs):
config_changed = True
if config_changed:
self.config.settings.cache_clear()
elif not changed_py_files:
# Only externally changed python files and lint configs may result in changed diagnostics.
return
for doc_uri in self.workspace.documents:
# Changes in doc_uri are already handled by m_text_document__did_save
if doc_uri not in changed_py_files:
self.lint(doc_uri, is_saved=False)
def m_workspace__execute_command(self, command=None, arguments=None):
return self.execute_command(command, arguments)
def flatten(list_of_lists):
return [item for lst in list_of_lists for item in lst]
def merge(list_of_dicts):
return {k: v for dictionary in list_of_dicts for k, v in dictionary.items()}
|
abs_task.py | from abc import ABC
from abc import abstractmethod
import argparse
from distutils.version import LooseVersion
import functools
import logging
import os
from pathlib import Path
import sys
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Sequence
from typing import Tuple
from typing import Union
import configargparse
import humanfriendly
import numpy as np
import torch
import torch.multiprocessing
import torch.nn
import torch.optim
from torch.utils.data import DataLoader
from typeguard import check_argument_types
from typeguard import check_return_type
import yaml
from espnet.utils.cli_utils import get_commandline_args
from espnet2.iterators.abs_iter_factory import AbsIterFactory
from espnet2.iterators.chunk_iter_factory import ChunkIterFactory
from espnet2.iterators.multiple_iter_factory import MultipleIterFactory
from espnet2.iterators.sequence_iter_factory import SequenceIterFactory
from espnet2.main_funcs.average_nbest_models import average_nbest_models
from espnet2.main_funcs.collect_stats import collect_stats
from espnet2.optimizers.sgd import SGD
from espnet2.samplers.build_batch_sampler import BATCH_TYPES
from espnet2.samplers.build_batch_sampler import build_batch_sampler
from espnet2.samplers.unsorted_batch_sampler import UnsortedBatchSampler
from espnet2.schedulers.abs_scheduler import AbsScheduler
from espnet2.schedulers.noam_lr import NoamLR
from espnet2.schedulers.warmup_lr import WarmupLR
from espnet2.torch_utils.load_pretrained_model import load_pretrained_model
from espnet2.torch_utils.pytorch_version import pytorch_cudnn_version
from espnet2.torch_utils.set_all_random_seed import set_all_random_seed
from espnet2.train.abs_espnet_model import AbsESPnetModel
from espnet2.train.class_choices import ClassChoices
from espnet2.train.dataset import DATA_TYPES
from espnet2.train.dataset import ESPnetDataset
from espnet2.train.distributed_utils import DistributedOption
from espnet2.train.distributed_utils import free_port
from espnet2.train.distributed_utils import get_master_port
from espnet2.train.distributed_utils import get_node_rank
from espnet2.train.distributed_utils import get_num_nodes
from espnet2.train.distributed_utils import resolve_distributed_mode
from espnet2.train.iterable_dataset import IterableESPnetDataset
from espnet2.train.reporter import Reporter
from espnet2.train.trainer import Trainer
from espnet2.utils.build_dataclass import build_dataclass
from espnet2.utils.get_default_kwargs import get_default_kwargs
from espnet2.utils.model_summary import model_summary
from espnet2.utils.nested_dict_action import NestedDictAction
from espnet2.utils.types import humanfriendly_parse_size_or_none
from espnet2.utils.types import int_or_none
from espnet2.utils.types import str2bool
from espnet2.utils.types import str2triple_str
from espnet2.utils.types import str_or_int
from espnet2.utils.types import str_or_none
from espnet2.utils.yaml_no_alias_safe_dump import yaml_no_alias_safe_dump
if LooseVersion(torch.__version__) >= LooseVersion("1.5.0"):
from torch.multiprocessing.spawn import ProcessContext
else:
from torch.multiprocessing.spawn import SpawnContext as ProcessContext
optim_classes = dict(
adam=torch.optim.Adam,
sgd=SGD,
adadelta=torch.optim.Adadelta,
adagrad=torch.optim.Adagrad,
adamax=torch.optim.Adamax,
asgd=torch.optim.ASGD,
lbfgs=torch.optim.LBFGS,
rmsprop=torch.optim.RMSprop,
rprop=torch.optim.Rprop,
)
if LooseVersion(torch.__version__) >= LooseVersion("1.2.0"):
optim_classes["adamw"] = torch.optim.AdamW
try:
import torch_optimizer
optim_classes.update(
accagd=torch_optimizer.AccSGD,
adabound=torch_optimizer.AdaBound,
adamod=torch_optimizer.AdaMod,
diffgrad=torch_optimizer.DiffGrad,
lamb=torch_optimizer.Lamb,
novograd=torch_optimizer.NovoGrad,
pid=torch_optimizer.PID,
# torch_optimizer<=0.0.1a10 doesn't support
# qhadam=torch_optimizer.QHAdam,
qhm=torch_optimizer.QHM,
radam=torch_optimizer.RAdam,
sgdw=torch_optimizer.SGDW,
yogi=torch_optimizer.Yogi,
)
del torch_optimizer
except ImportError:
pass
try:
import apex
optim_classes.update(
fusedadam=apex.optimizers.FusedAdam,
fusedlamb=apex.optimizers.FusedLAMB,
fusednovograd=apex.optimizers.FusedNovoGrad,
fusedsgd=apex.optimizers.FusedSGD,
)
del apex
except ImportError:
pass
scheduler_classes = dict(
ReduceLROnPlateau=torch.optim.lr_scheduler.ReduceLROnPlateau,
lambdalr=torch.optim.lr_scheduler.LambdaLR,
steplr=torch.optim.lr_scheduler.StepLR,
multisteplr=torch.optim.lr_scheduler.MultiStepLR,
exponentiallr=torch.optim.lr_scheduler.ExponentialLR,
CosineAnnealingLR=torch.optim.lr_scheduler.CosineAnnealingLR,
)
if LooseVersion(torch.__version__) >= LooseVersion("1.1.0"):
scheduler_classes.update(
noamlr=NoamLR, warmuplr=WarmupLR,
)
if LooseVersion(torch.__version__) >= LooseVersion("1.3.0"):
CosineAnnealingWarmRestarts = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts
scheduler_classes.update(
cycliclr=torch.optim.lr_scheduler.CyclicLR,
onecyclelr=torch.optim.lr_scheduler.OneCycleLR,
CosineAnnealingWarmRestarts=CosineAnnealingWarmRestarts,
)
# To lower keys
optim_classes = {k.lower(): v for k, v in optim_classes.items()}
scheduler_classes = {k.lower(): v for k, v in scheduler_classes.items()}
class AbsTask(ABC):
# Use @staticmethod, or @classmethod,
# instead of instance method to avoid God classes
# If you need more than one optimizers, change this value in inheritance
num_optimizers: int = 1
trainer = Trainer
class_choices_list: List[ClassChoices] = []
def __init__(self):
raise RuntimeError("This class can't be instantiated.")
@classmethod
@abstractmethod
def add_task_arguments(cls, parser: argparse.ArgumentParser):
pass
@classmethod
@abstractmethod
def build_collate_fn(
cls, args: argparse.Namespace
) -> Callable[[Sequence[Dict[str, np.ndarray]]], Dict[str, torch.Tensor]]:
"""Return "collate_fn", which is a callable object and given to DataLoader.
>>> from torch.utils.data import DataLoader
>>> loader = DataLoader(collate_fn=cls.build_collate_fn(args), ...)
In many cases, you can use our common collate_fn.
"""
raise NotImplementedError
@classmethod
@abstractmethod
def build_preprocess_fn(
cls, args: argparse.Namespace, train: bool
) -> Optional[Callable[[str, Dict[str, np.array]], Dict[str, np.ndarray]]]:
raise NotImplementedError
@classmethod
@abstractmethod
def required_data_names(cls, inference: bool = False) -> Tuple[str, ...]:
"""Define the required names by Task
This function is used by
>>> cls.check_task_requirements()
If your model is defined as following,
>>> from espnet2.train.abs_espnet_model import AbsESPnetModel
>>> class Model(AbsESPnetModel):
... def forward(self, input, output, opt=None): pass
then "required_data_names" should be as
>>> required_data_names = ('input', 'output')
"""
raise NotImplementedError
@classmethod
@abstractmethod
def optional_data_names(cls, inference: bool = False) -> Tuple[str, ...]:
"""Define the optional names by Task
This function is used by
>>> cls.check_task_requirements()
If your model is defined as following,
>>> from espnet2.train.abs_espnet_model import AbsESPnetModel
>>> class Model(AbsESPnetModel):
... def forward(self, input, output, opt=None): pass
then "optional_data_names" should be as
>>> optional_data_names = ('opt',)
"""
raise NotImplementedError
@classmethod
@abstractmethod
def build_model(cls, args: argparse.Namespace) -> AbsESPnetModel:
raise NotImplementedError
@classmethod
def get_parser(cls) -> configargparse.ArgumentParser:
assert check_argument_types()
class ArgumentDefaultsRawTextHelpFormatter(
configargparse.RawTextHelpFormatter,
configargparse.ArgumentDefaultsHelpFormatter,
):
pass
parser = configargparse.ArgumentParser(
description="base parser",
config_file_parser_class=configargparse.YAMLConfigFileParser,
formatter_class=ArgumentDefaultsRawTextHelpFormatter,
)
# NOTE(kamo): Use '_' instead of '-' to avoid confusion.
# I think '-' looks really confusing if it's written in yaml.
# NOTE(kamo): add_arguments(..., required=True) can't be used
# to provide --print_config mode. Instead of it, do as
parser.set_defaults(required=["output_dir"])
group = parser.add_argument_group("Common configuration")
group.add_argument("--config", is_config_file=True, help="config file path")
group.add_argument(
"--print_config",
action="store_true",
help="Print the config file and exit",
)
group.add_argument(
"--log_level",
type=lambda x: x.upper(),
default="INFO",
choices=("ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="The verbose level of logging",
)
group.add_argument(
"--dry_run",
type=str2bool,
default=False,
help="Perform process without training",
)
group.add_argument(
"--iterator_type",
type=str,
choices=["sequence", "none"],
default="sequence",
help="Specify iterator type",
)
group.add_argument("--output_dir", type=str_or_none, default=None)
group.add_argument(
"--ngpu",
type=int,
default=0,
help="The number of gpus. 0 indicates CPU mode",
)
group.add_argument("--seed", type=int, default=0, help="Random seed")
group.add_argument(
"--num_workers",
type=int,
default=1,
help="The number of workers used for DataLoader",
)
group.add_argument(
"--num_att_plot",
type=int,
default=3,
help="The number images to plot the outputs from attention. "
"This option makes sense only when attention-based model",
)
group = parser.add_argument_group("distributed training related")
group.add_argument(
"--dist_backend", default="nccl", type=str, help="distributed backend",
)
group.add_argument(
"--dist_init_method",
type=str,
default="env://",
help='if init_method="env://", env values of "MASTER_PORT", "MASTER_ADDR", '
'"WORLD_SIZE", and "RANK" are referred.',
)
group.add_argument(
"--dist_world_size",
default=None,
type=int_or_none,
help="number of nodes for distributed training",
)
group.add_argument(
"--dist_rank",
type=int_or_none,
default=None,
help="node rank for distributed training",
)
group.add_argument(
# Not starting with "dist_" for compatibility to launch.py
"--local_rank",
type=int_or_none,
default=None,
help="local rank for distributed training. This option is used if "
"--multiprocessing_distributed=false",
)
group.add_argument(
"--dist_master_addr",
default=None,
type=str_or_none,
help="The master address for distributed training. "
"This value is used when dist_init_method == 'env://'",
)
group.add_argument(
"--dist_master_port",
default=None,
type=int_or_none,
help="The master port for distributed training"
"This value is used when dist_init_method == 'env://'",
)
group.add_argument(
"--dist_launcher",
default=None,
type=str_or_none,
choices=["slurm", "mpi", None],
help="The launcher type for distributed training",
)
group.add_argument(
"--multiprocessing_distributed",
default=False,
type=str2bool,
help="Use multi-processing distributed training to launch "
"N processes per node, which has N GPUs. This is the "
"fastest way to use PyTorch for either single node or "
"multi node data parallel training",
)
group = parser.add_argument_group("cudnn mode related")
group.add_argument(
"--cudnn_enabled",
type=str2bool,
default=torch.backends.cudnn.enabled,
help="Enable CUDNN",
)
group.add_argument(
"--cudnn_benchmark",
type=str2bool,
default=torch.backends.cudnn.benchmark,
help="Enable cudnn-benchmark mode",
)
group.add_argument(
"--cudnn_deterministic",
type=str2bool,
default=True,
help="Enable cudnn-deterministic mode",
)
group = parser.add_argument_group("collect stats mode related")
group.add_argument(
"--collect_stats",
type=str2bool,
default=False,
help='Perform on "collect stats" mode',
)
group.add_argument(
"--write_collected_feats",
type=str2bool,
default=False,
help='Write the output features from the model when "collect stats" mode',
)
group = parser.add_argument_group("Trainer related")
group.add_argument(
"--max_epoch",
type=int,
default=40,
help="The maximum number epoch to train",
)
group.add_argument(
"--patience",
type=int_or_none,
default=None,
help="Number of epochs to wait without improvement "
"before stopping the training",
)
group.add_argument(
"--val_scheduler_criterion",
type=str,
nargs=2,
default=("valid", "loss"),
help="The criterion used for the value given to the lr scheduler. "
'Give a pair referring the phase, "train" or "valid",'
'and the criterion name. The mode specifying "min" or "max" can '
"be changed by --scheduler_conf",
)
group.add_argument(
"--early_stopping_criterion",
type=str,
nargs=3,
default=("valid", "loss", "min"),
help="The criterion used for judging of early stopping. "
'Give a pair referring the phase, "train" or "valid",'
'the criterion name and the mode, "min" or "max", e.g. "acc,max".',
)
group.add_argument(
"--best_model_criterion",
type=str2triple_str,
nargs="+",
default=[
("train", "loss", "min"),
("valid", "loss", "min"),
("train", "acc", "max"),
("valid", "acc", "max"),
],
help="The criterion used for judging of the best model. "
'Give a pair referring the phase, "train" or "valid",'
'the criterion name, and the mode, "min" or "max", e.g. "acc,max".',
)
group.add_argument(
"--keep_nbest_models",
type=int,
default=10,
help="Remove previous snapshots excluding the n-best scored epochs",
)
group.add_argument(
"--grad_clip",
type=float,
default=5.0,
help="Gradient norm threshold to clip",
)
group.add_argument(
"--grad_noise",
type=str2bool,
default=False,
help="The flag to switch to use noise injection to "
"gradients during training",
)
group.add_argument(
"--accum_grad",
type=int,
default=1,
help="The number of gradient accumulation",
)
group.add_argument(
"--no_forward_run",
type=str2bool,
default=False,
help="Just only iterating data loading without "
"model forwarding and training",
)
group.add_argument(
"--resume",
type=str2bool,
default=False,
help="Enable resuming if checkpoint is existing",
)
group.add_argument(
"--train_dtype",
default="float32",
choices=["float16", "float32", "float64", "O0", "O1", "O2", "O3"],
help="Data type for training. O0,O1,.. flags require apex. "
"See https://nvidia.github.io/apex/amp.html#opt-levels",
)
group.add_argument(
"--log_interval",
type=int_or_none,
default=None,
help="Show the logs every the number iterations in each epochs at the "
"training phase. If None is given, it is decided according the number "
"of training samples automatically .",
)
group = parser.add_argument_group("Pretraining model related")
group.add_argument("--pretrain_path", type=str, default=[], nargs="*")
group.add_argument("--pretrain_key", type=str_or_none, default=[], nargs="*")
group = parser.add_argument_group("BatchSampler related")
group.add_argument(
"--num_iters_per_epoch",
type=int_or_none,
default=None,
help="Restrict the number of iterations for training per epoch",
)
group.add_argument(
"--batch_size",
type=int,
default=20,
help="The mini-batch size used for training. Used if batch_type='unsorted',"
" 'sorted', or 'folded'.",
)
group.add_argument(
"--valid_batch_size",
type=int_or_none,
default=None,
help="If not given, the value of --batch_size is used",
)
group.add_argument(
"--batch_bins",
type=int,
default=1000000,
help="The number of batch bins. Used if batch_type='length' or 'numel'",
)
group.add_argument(
"--valid_batch_bins",
type=int_or_none,
default=None,
help="If not given, the value of --batch_bins is used",
)
group.add_argument("--train_shape_file", type=str, action="append", default=[])
group.add_argument("--valid_shape_file", type=str, action="append", default=[])
group = parser.add_argument_group("Sequence iterator related")
_batch_type_help = ""
for key, value in BATCH_TYPES.items():
_batch_type_help += f'"{key}":\n{value}\n'
group.add_argument(
"--batch_type",
type=str,
default="folded",
choices=list(BATCH_TYPES),
help=_batch_type_help,
)
group.add_argument(
"--valid_batch_type",
type=str_or_none,
default=None,
choices=list(BATCH_TYPES) + [None],
help="If not given, the value of --batch_type is used",
)
group.add_argument("--fold_length", type=int, action="append", default=[])
group.add_argument(
"--sort_in_batch",
type=str,
default="descending",
choices=["descending", "ascending"],
help="Sort the samples in each mini-batches by the sample "
'lengths. To enable this, "shape_file" must have the length information.',
)
group.add_argument(
"--sort_batch",
type=str,
default="descending",
choices=["descending", "ascending"],
help="Sort mini-batches by the sample lengths",
)
group.add_argument(
"--multiple_iterator",
type=str2bool,
default=False,
help="Use multiple iterator mode",
)
group = parser.add_argument_group("Chunk iterator related")
group.add_argument(
"--chunk_length",
type=str_or_int,
default=500,
help="Specify chunk length. e.g. '300', '300,400,500', or '300-400'."
"If multiple numbers separated by command are given, "
"one of them is selected randomly for each samples. "
"If two numbers are given with '-', it indicates the range of the choices. "
"Note that if the sequence length is shorter than the all chunk_lengths, "
"the sample is discarded. ",
)
group.add_argument(
"--chunk_shift_ratio",
type=float,
default=0.5,
help="Specify the shift width of chunks. If it's less than 1, "
"allows the overlapping and if bigger than 1, there are some gaps "
"between each chunk.",
)
group.add_argument(
"--num_cache_chunks",
type=int,
default=1024,
help="Shuffle in the specified number of chunks and generate mini-batches "
"More larger this value, more randomness can be obtained.",
)
group = parser.add_argument_group("Dataset related")
_data_path_and_name_and_type_help = (
"Give three words splitted by comma. It's used for the training data. "
"e.g. '--train_data_path_and_name_and_type some/path/a.scp,foo,sound'. "
"The first value, some/path/a.scp, indicates the file path, "
"and the second, foo, is the key name used for the mini-batch data, "
"and the last, sound, decides the file type. "
"This option is repeatable, so you can input any number of features "
"for your task. Supported file types are as follows:\n\n"
)
for key, dic in DATA_TYPES.items():
_data_path_and_name_and_type_help += f'"{key}":\n{dic["help"]}\n\n'
group.add_argument(
"--train_data_path_and_name_and_type",
type=str2triple_str,
action="append",
default=[],
help=_data_path_and_name_and_type_help,
)
group.add_argument(
"--valid_data_path_and_name_and_type",
type=str2triple_str,
action="append",
default=[],
)
group.add_argument(
"--allow_variable_data_keys",
type=str2bool,
default=False,
help="Allow the arbitrary keys for mini-batch with ignoring "
"the task requirements",
)
group.add_argument(
"--max_cache_size",
type=humanfriendly.parse_size,
default=0.0,
help="The maximum cache size for data loader. e.g. 10MB, 20GB.",
)
group.add_argument(
"--valid_max_cache_size",
type=humanfriendly_parse_size_or_none,
default=None,
help="The maximum cache size for validation data loader. e.g. 10MB, 20GB. "
"If None, the 5 percent size of --max_cache_size",
)
group = parser.add_argument_group("Optimizer related")
for i in range(1, cls.num_optimizers + 1):
suf = "" if i == 1 else str(i)
group.add_argument(
f"--optim{suf}",
type=lambda x: x.lower(),
default="adadelta",
choices=list(optim_classes),
help="The optimizer type",
)
group.add_argument(
f"--optim{suf}_conf",
action=NestedDictAction,
default=dict(),
help="The keyword arguments for optimizer",
)
group.add_argument(
f"--scheduler{suf}",
type=lambda x: str_or_none(x.lower()),
default=None,
choices=list(scheduler_classes) + [None],
help="The lr scheduler type",
)
group.add_argument(
f"--scheduler{suf}_conf",
action=NestedDictAction,
default=dict(),
help="The keyword arguments for lr scheduler",
)
cls.trainer.add_arguments(parser)
cls.add_task_arguments(parser)
assert check_return_type(parser)
return parser
@classmethod
def build_optimizers(
cls, args: argparse.Namespace, model: torch.nn.Module,
) -> List[torch.optim.Optimizer]:
if cls.num_optimizers != 1:
raise RuntimeError(
"build_optimizers() must be overridden if num_optimizers != 1"
)
optim_class = optim_classes.get(args.optim)
if optim_class is None:
raise ValueError(f"must be one of {list(optim_classes)}: {args.optim}")
optim = optim_class(model.parameters(), **args.optim_conf)
optimizers = [optim]
return optimizers
@classmethod
def exclude_opts(cls) -> Tuple[str, ...]:
"""The options not to be shown by --print_config"""
return "required", "print_config", "config", "ngpu"
@classmethod
def get_default_config(cls) -> Dict[str, Any]:
"""Return the configuration as dict.
This method is used by print_config()
"""
def get_class_type(name: str, classes: dict):
_cls = classes.get(name)
if _cls is None:
raise ValueError(f"must be one of {list(classes)}: {name}")
return _cls
# This method is used only for --print_config
assert check_argument_types()
parser = cls.get_parser()
args, _ = parser.parse_known_args()
config = vars(args)
# Excludes the options not to be shown
for k in AbsTask.exclude_opts():
config.pop(k)
for i in range(1, cls.num_optimizers + 1):
suf = "" if i == 1 else str(i)
name = config[f"optim{suf}"]
optim_class = get_class_type(name, optim_classes)
conf = get_default_kwargs(optim_class)
# Overwrite the default by the arguments,
conf.update(config[f"optim{suf}_conf"])
# and set it again
config[f"optim{suf}_conf"] = conf
name = config[f"scheduler{suf}"]
if name is not None:
scheduler_class = get_class_type(name, scheduler_classes)
conf = get_default_kwargs(scheduler_class)
# Overwrite the default by the arguments,
conf.update(config[f"scheduler{suf}_conf"])
# and set it again
config[f"scheduler{suf}_conf"] = conf
for class_choices in cls.class_choices_list:
if getattr(args, class_choices.name) is not None:
class_obj = class_choices.get_class(getattr(args, class_choices.name))
conf = get_default_kwargs(class_obj)
name = class_choices.name
# Overwrite the default by the arguments,
conf.update(config[f"{name}_conf"])
# and set it again
config[f"{name}_conf"] = conf
return config
@classmethod
def check_required_command_args(cls, args: argparse.Namespace):
assert check_argument_types()
for k in vars(args):
if "-" in k:
raise RuntimeError(f'Use "_" instead of "-": parser.get_parser("{k}")')
if len(args.pretrain_path) != len(args.pretrain_key):
raise RuntimeError(
"The number of --pretrain_path and --pretrain_key must be same"
)
required = ", ".join(
f"--{a}" for a in args.required if getattr(args, a) is None
)
if len(required) != 0:
parser = cls.get_parser()
parser.print_help(file=sys.stderr)
p = Path(sys.argv[0]).name
print(file=sys.stderr)
print(
f"{p}: error: the following arguments are required: " f"{required}",
file=sys.stderr,
)
sys.exit(2)
@classmethod
def check_task_requirements(
cls,
dataset: Union[ESPnetDataset, IterableESPnetDataset],
allow_variable_data_keys: bool,
inference: bool = False,
) -> None:
"""Check if the dataset satisfy the requirement of current Task"""
assert check_argument_types()
mes = (
f"If you intend to use an additional input, modify "
f'"{cls.__name__}.required_data_names()" or '
f'"{cls.__name__}.optional_data_names()". '
f"Otherwise you need to set --allow_variable_data_keys true "
)
for k in cls.required_data_names(inference):
if not dataset.has_name(k):
raise RuntimeError(
f'"{cls.required_data_names(inference)}" are required for'
f' {cls.__name__}. but "{dataset.names()}" are input.\n{mes}'
)
if not allow_variable_data_keys:
task_keys = cls.required_data_names(inference) + cls.optional_data_names(
inference
)
for k in dataset.names():
if k not in task_keys:
raise RuntimeError(
f"The data-name must be one of {task_keys} "
f'for {cls.__name__}: "{k}" is not allowed.\n{mes}'
)
@staticmethod
def resume(
checkpoint: Union[str, Path],
model: torch.nn.Module,
reporter: Reporter,
optimizers: Sequence[torch.optim.Optimizer],
schedulers: Sequence[Optional[AbsScheduler]],
ngpu: int = 0,
use_apex: bool = False,
):
states = torch.load(
checkpoint,
map_location=f"cuda:{torch.cuda.current_device()}" if ngpu > 0 else "cpu",
)
model.load_state_dict(states["model"])
reporter.load_state_dict(states["reporter"])
for optimizer, state in zip(optimizers, states["optimizers"]):
optimizer.load_state_dict(state)
for scheduler, state in zip(schedulers, states["schedulers"]):
if scheduler is not None:
scheduler.load_state_dict(state)
if use_apex and states["amp"] is not None:
try:
from apex import amp
except ImportError:
logging.error(
"You need to install apex. "
"See https://github.com/NVIDIA/apex#linux"
)
amp.load_state_dict(states["amp"])
logging.info(f"The training was resumed using {checkpoint}")
@classmethod
def print_config(cls, file=sys.stdout) -> None:
assert check_argument_types()
# Shows the config: e.g. python train.py asr --print_config
config = cls.get_default_config()
file.write(yaml_no_alias_safe_dump(config, indent=4, sort_keys=False))
@classmethod
def main(cls, args: argparse.Namespace = None, cmd: Sequence[str] = None):
if cls.num_optimizers != cls.trainer.num_optimizers:
raise RuntimeError(
f"Task.num_optimizers != Task.trainer.num_optimizers: "
f"{cls.num_optimizers} != {cls.trainer.num_optimizers}"
)
assert check_argument_types()
print(get_commandline_args(), file=sys.stderr)
if args is None:
parser = cls.get_parser()
args = parser.parse_args(cmd)
if args.print_config:
cls.print_config()
sys.exit(0)
cls.check_required_command_args(args)
# "distributed" is decided using the other command args
resolve_distributed_mode(args)
if not args.distributed or not args.multiprocessing_distributed:
cls.main_worker(args)
else:
assert args.ngpu > 1, args.ngpu
# Multi-processing distributed mode: e.g. 2node-4process-4GPU
# | Host1 | Host2 |
# | Process1 | Process2 | <= Spawn processes
# |Child1|Child2|Child1|Child2|
# |GPU1 |GPU2 |GPU1 |GPU2 |
# See also the following usage of --multiprocessing-distributed:
# https://github.com/pytorch/examples/blob/master/imagenet/main.py
num_nodes = get_num_nodes(args.dist_world_size, args.dist_launcher)
if num_nodes == 1:
args.dist_master_addr = "localhost"
args.dist_rank = 0
# Single node distributed training with multi-GPUs
if (
args.dist_init_method == "env://"
and get_master_port(args.dist_master_port) is None
):
# Get the unused port
args.dist_master_port = free_port()
# Assume that nodes use same number of GPUs each other
args.dist_world_size = args.ngpu * num_nodes
node_rank = get_node_rank(args.dist_rank, args.dist_launcher)
# The following block is copied from:
# https://github.com/pytorch/pytorch/blob/master/torch/multiprocessing/spawn.py
error_queues = []
processes = []
mp = torch.multiprocessing.get_context("spawn")
for i in range(args.ngpu):
# Copy args
local_args = argparse.Namespace(**vars(args))
local_args.local_rank = i
local_args.dist_rank = args.ngpu * node_rank + i
local_args.ngpu = 1
process = mp.Process(
target=cls.main_worker, args=(local_args,), daemon=False,
)
process.start()
processes.append(process)
error_queues.append(mp.SimpleQueue())
# Loop on join until it returns True or raises an exception.
while not ProcessContext(processes, error_queues).join():
pass
@classmethod
def main_worker(cls, args: argparse.Namespace):
assert check_argument_types()
# 0. Init distributed process
distributed_option = build_dataclass(DistributedOption, args)
distributed_option.init()
# NOTE(kamo): Don't use logging before invoking logging.basicConfig()
if not distributed_option.distributed or distributed_option.dist_rank == 0:
if not distributed_option.distributed:
_rank = ""
else:
_rank = (
f":{distributed_option.dist_rank}/"
f"{distributed_option.dist_world_size}"
)
# NOTE(kamo):
# logging.basicConfig() is invoked in main_worker() instead of main()
# because it can be invoked only once in a process.
# FIXME(kamo): Should we use logging.getLogger()?
logging.basicConfig(
level=args.log_level,
format=f"[{os.uname()[1].split('.')[0]}{_rank}]"
f" %(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
# Suppress logging if RANK != 0
logging.basicConfig(
level="ERROR",
format=f"[{os.uname()[1].split('.')[0]}"
f":{distributed_option.dist_rank}/{distributed_option.dist_world_size}]"
f" %(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
# 1. Set random-seed
set_all_random_seed(args.seed)
torch.backends.cudnn.enabled = args.cudnn_enabled
torch.backends.cudnn.benchmark = args.cudnn_benchmark
torch.backends.cudnn.deterministic = args.cudnn_deterministic
# 2. Build model
model = cls.build_model(args=args)
if not isinstance(model, AbsESPnetModel):
raise RuntimeError(
f"model must inherit {AbsESPnetModel.__name__}, but got {type(model)}"
)
if args.train_dtype in ("float16", "float32", "float64"):
dtype = getattr(torch, args.train_dtype)
else:
dtype = torch.float32
model = model.to(dtype=dtype, device="cuda" if args.ngpu > 0 else "cpu")
# 3. Build optimizer
optimizers = cls.build_optimizers(args, model=model)
# For apex support
use_apex = args.train_dtype in ("O0", "O1", "O2", "O3")
if use_apex:
try:
from apex import amp
except ImportError:
logging.error(
"You need to install apex. "
"See https://github.com/NVIDIA/apex#linux"
)
raise
model, optimizers = amp.initialize(
model, optimizers, opt_level=args.train_dtype
)
# 4. Build schedulers
schedulers = []
for i, optim in enumerate(optimizers, 1):
suf = "" if i == 1 else str(i)
name = getattr(args, f"scheduler{suf}")
conf = getattr(args, f"scheduler{suf}_conf")
if name is not None:
cls_ = scheduler_classes.get(name)
if cls_ is None:
raise ValueError(
f"must be one of {list(scheduler_classes)}: {name}"
)
scheduler = cls_(optim, **conf)
else:
scheduler = None
schedulers.append(scheduler)
logging.info(pytorch_cudnn_version())
logging.info(model_summary(model))
for i, (o, s) in enumerate(zip(optimizers, schedulers), 1):
suf = "" if i == 1 else str(i)
logging.info(f"Optimizer{suf}:\n{o}")
logging.info(f"Scheduler{suf}: {s}")
# 5. Dump "args" to config.yaml
# NOTE(kamo): "args" should be saved after object-buildings are done
# because they are allowed to modify "args".
output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
with (output_dir / "config.yaml").open("w", encoding="utf-8") as f:
logging.info(f'Saving the configuration in {output_dir / "config.yaml"}')
yaml_no_alias_safe_dump(vars(args), f, indent=4, sort_keys=False)
# 6. Loads pre-trained model
for p, k in zip(args.pretrain_path, args.pretrain_key):
load_pretrained_model(
model=model,
# Directly specify the model path e.g. exp/train/loss.best.pt
pretrain_path=p,
# if pretrain_key is None -> model
# elif pretrain_key is str e.g. "encoder" -> model.encoder
pretrain_key=k,
# NOTE(kamo): "cuda" for torch.load always indicates cuda:0
# in PyTorch<=1.4
map_location=f"cuda:{torch.cuda.current_device()}"
if args.ngpu > 0
else "cpu",
)
# 7. Resume the training state from the previous epoch
reporter = Reporter()
if args.resume and (output_dir / "checkpoint.pth").exists():
cls.resume(
checkpoint=output_dir / "checkpoint.pth",
model=model,
optimizers=optimizers,
schedulers=schedulers,
reporter=reporter,
ngpu=args.ngpu,
use_apex=use_apex,
)
if args.dry_run:
pass
elif args.collect_stats:
# Perform on collect_stats mode. This mode has two roles
# - Derive the length and dimension of all input data
# - Accumulate feats, square values, and the length for whitening
if args.valid_batch_size is None:
args.valid_batch_size = args.batch_size
if len(args.train_shape_file) != 0:
train_key_file = args.train_shape_file[0]
else:
train_key_file = None
if len(args.valid_shape_file) != 0:
valid_key_file = args.valid_shape_file[0]
else:
valid_key_file = None
collect_stats(
model=model,
train_iter=cls.build_streaming_iterator(
data_path_and_name_and_type=args.train_data_path_and_name_and_type,
key_file=train_key_file,
batch_size=args.batch_size,
dtype=args.train_dtype,
num_workers=args.num_workers,
allow_variable_data_keys=args.allow_variable_data_keys,
ngpu=args.ngpu,
preprocess_fn=cls.build_preprocess_fn(args, train=False),
collate_fn=cls.build_collate_fn(args),
),
valid_iter=cls.build_streaming_iterator(
data_path_and_name_and_type=args.valid_data_path_and_name_and_type,
key_file=valid_key_file,
batch_size=args.valid_batch_size,
dtype=args.train_dtype,
num_workers=args.num_workers,
allow_variable_data_keys=args.allow_variable_data_keys,
ngpu=args.ngpu,
preprocess_fn=cls.build_preprocess_fn(args, train=False),
collate_fn=cls.build_collate_fn(args),
),
output_dir=output_dir,
ngpu=args.ngpu,
log_interval=args.log_interval,
write_collected_feats=args.write_collected_feats,
)
else:
# 8. Build iterator factories
common_iter_kwargs = dict(
iterator_type=args.iterator_type,
train_dtype=args.train_dtype,
num_workers=args.num_workers,
seed=args.seed,
allow_variable_data_keys=args.allow_variable_data_keys,
ngpu=args.ngpu,
fold_length=args.fold_length,
sort_in_batch=args.sort_in_batch,
sort_batch=args.sort_batch,
chunk_length=args.chunk_length,
chunk_shift_ratio=args.chunk_shift_ratio,
num_cache_chunks=args.num_cache_chunks,
)
train_iter_factory = cls.build_iter_factory(
data_path_and_name_and_type=args.train_data_path_and_name_and_type,
shape_files=args.train_shape_file,
batch_size=args.batch_size,
batch_bins=args.batch_bins,
batch_type=args.batch_type,
train=not args.collect_stats,
multiple_iterator=args.multiple_iterator,
preprocess_fn=cls.build_preprocess_fn(args, train=True),
collate_fn=cls.build_collate_fn(args),
num_iters_per_epoch=args.num_iters_per_epoch,
max_cache_size=args.max_cache_size,
distributed=distributed_option.distributed,
name="train",
**common_iter_kwargs,
)
if args.valid_batch_type is None:
args.valid_batch_type = args.batch_type
if args.valid_batch_size is None:
args.valid_batch_size = args.batch_size
if args.valid_batch_bins is None:
args.valid_batch_bins = args.batch_bins
if args.valid_max_cache_size is None:
# Cache 5% of maximum size for validation loader
args.valid_max_cache_size = 0.05 * args.max_cache_size
valid_iter_factory = cls.build_iter_factory(
data_path_and_name_and_type=args.valid_data_path_and_name_and_type,
shape_files=args.valid_shape_file,
batch_size=args.valid_batch_size,
batch_bins=args.valid_batch_bins,
batch_type=args.batch_type,
train=False,
multiple_iterator=False,
preprocess_fn=cls.build_preprocess_fn(args, train=False),
collate_fn=cls.build_collate_fn(args),
num_iters_per_epoch=None,
max_cache_size=args.valid_max_cache_size,
distributed=distributed_option.distributed,
name="valid",
**common_iter_kwargs,
)
if args.num_att_plot != 0:
plot_attention_iter_factory = cls.build_iter_factory(
data_path_and_name_and_type=args.valid_data_path_and_name_and_type,
shape_files=args.valid_shape_file,
batch_type="unsorted",
batch_size=1,
batch_bins=0,
train=False,
multiple_iterator=False,
preprocess_fn=cls.build_preprocess_fn(args, train=False),
collate_fn=cls.build_collate_fn(args),
num_batches=args.num_att_plot,
num_iters_per_epoch=None,
# num_att_plot should be a few sample ~ 3, so cache all data.
max_cache_size=np.inf if args.max_cache_size != 0.0 else 0.0,
# always False because plot_attention performs on RANK0
distributed=False,
name="plot_att",
**common_iter_kwargs,
)
else:
plot_attention_iter_factory = None
# 9. Start training
# Don't give args to trainer.run() directly!!!
# Instead of it, define "Options" object and build here.
trainer_options = cls.trainer.build_options(args)
cls.trainer.run(
model=model,
optimizers=optimizers,
schedulers=schedulers,
train_iter_factory=train_iter_factory,
valid_iter_factory=valid_iter_factory,
plot_attention_iter_factory=plot_attention_iter_factory,
reporter=reporter,
output_dir=output_dir,
max_epoch=args.max_epoch,
seed=args.seed,
patience=args.patience,
keep_nbest_models=args.keep_nbest_models,
early_stopping_criterion=args.early_stopping_criterion,
best_model_criterion=args.best_model_criterion,
val_scheduler_criterion=args.val_scheduler_criterion,
trainer_options=trainer_options,
distributed_option=distributed_option,
)
if not distributed_option.distributed or distributed_option.dist_rank == 0:
# Generated n-best averaged model
average_nbest_models(
reporter=reporter,
output_dir=output_dir,
best_model_criterion=args.best_model_criterion,
nbest=args.keep_nbest_models,
)
@classmethod
def build_iter_factory(
cls,
iterator_type: str,
batch_size: int,
batch_bins: int,
preprocess_fn,
collate_fn,
train_dtype: str,
num_workers: int,
seed: int,
allow_variable_data_keys: bool,
ngpu: int,
data_path_and_name_and_type,
shape_files: Union[Tuple[str, ...], List[str]],
batch_type: str,
train: bool,
num_iters_per_epoch: Optional[int],
max_cache_size: float,
distributed: bool,
name: str,
fold_length: Sequence[int],
sort_in_batch: str,
sort_batch: str,
chunk_length: Union[int, str],
chunk_shift_ratio: float,
num_cache_chunks: int,
multiple_iterator: bool,
num_batches: int = None,
) -> AbsIterFactory:
"""Build a factory object of mini-batch iterator.
This object is invoked at every epochs to build the iterator for each epoch
as following:
>>> iter_factory = cls.build_iter_factory(...)
>>> for epoch in range(1, max_epoch):
... for keys, batch in iter_fatory.build_iter(epoch):
... model(**batch)
The mini-batches for each epochs are fully controlled by this class.
Note that the random seed used for shuffling is decided as "seed + epoch" and
the generated mini-batches can be reproduces when resuming.
Note that the definition of "epoch" doesn't always indicate
to run out of the whole training corpus.
"--num_iters_per_epoch" option restricts the number of iterations for each epoch
and the rest of samples for the originally epoch are left for the next epoch.
e.g. If The number of mini-batches equals to 4, the following two are same:
- 1 epoch without "--num_iters_per_epoch"
- 4 epoch with "--num_iters_per_epoch" == 4
"""
assert check_argument_types()
kwargs = dict(
data_path_and_name_and_type=data_path_and_name_and_type,
shape_files=shape_files,
train=train,
preprocess_fn=preprocess_fn,
collate_fn=collate_fn,
num_batches=num_batches,
num_iters_per_epoch=num_iters_per_epoch,
max_cache_size=max_cache_size,
distributed=distributed,
name=name,
batch_size=batch_size,
train_dtype=train_dtype,
num_workers=num_workers,
seed=seed,
allow_variable_data_keys=allow_variable_data_keys,
ngpu=ngpu,
)
if multiple_iterator:
return cls.build_multiple_iter_factroy(
**kwargs,
multiple_iterator=False,
iterator_type=iterator_type,
batch_type=batch_type,
batch_bins=batch_bins,
fold_length=fold_length,
sort_in_batch=sort_in_batch,
sort_batch=sort_batch,
chunk_length=chunk_length,
chunk_shift_ratio=chunk_shift_ratio,
num_cache_chunks=num_cache_chunks,
)
elif iterator_type == "sequence":
return cls.build_sequence_iter_factory(
**kwargs,
batch_type=batch_type,
batch_bins=batch_bins,
fold_length=fold_length,
sort_in_batch=sort_in_batch,
sort_batch=sort_batch,
)
elif iterator_type == "chunk":
return cls.build_chunk_iter_factory(
**kwargs,
chunk_length=chunk_length,
chunk_shift_ratio=chunk_shift_ratio,
num_cache_chunks=num_cache_chunks,
)
else:
raise RuntimeError(f"Not supported: iterator_type={iterator_type}")
@classmethod
def build_sequence_iter_factory(
cls,
data_path_and_name_and_type,
shape_files: Union[Tuple[str, ...], List[str]],
batch_type: str,
train: bool,
preprocess_fn,
batch_size: int,
batch_bins: int,
collate_fn,
train_dtype: str,
fold_length: Sequence[int],
num_workers: int,
sort_in_batch: str,
sort_batch: str,
seed: int,
allow_variable_data_keys: bool,
ngpu: int,
num_batches: Optional[int],
num_iters_per_epoch: Optional[int],
max_cache_size: float,
distributed: bool,
name: str,
) -> AbsIterFactory:
assert check_argument_types()
if train_dtype in ("float32", "O0", "O1", "O2", "O3"):
train_dtype = "float32"
dataset = ESPnetDataset(
data_path_and_name_and_type,
float_dtype=train_dtype,
preprocess=preprocess_fn,
max_cache_size=max_cache_size,
)
cls.check_task_requirements(dataset, allow_variable_data_keys)
batch_sampler = build_batch_sampler(
type=batch_type,
shape_files=shape_files,
fold_lengths=fold_length,
batch_size=batch_size,
batch_bins=batch_bins,
sort_in_batch=sort_in_batch,
sort_batch=sort_batch,
drop_last=False,
min_batch_size=torch.distributed.get_world_size() if distributed else 1,
)
batches = list(batch_sampler)
if num_batches is not None:
batches = batches[:num_batches]
bs_list = [len(batch) for batch in batches]
logging.info(f"[{name}] dataset:\n{dataset}")
logging.info(f"[{name}] Batch sampler: {batch_sampler}")
logging.info(
f"[{name}] mini-batch sizes summary: N-batch={len(bs_list)}, "
f"mean={np.mean(bs_list):.1f}, min={np.min(bs_list)}, max={np.max(bs_list)}"
)
if distributed:
world_size = torch.distributed.get_world_size()
rank = torch.distributed.get_rank()
for batch in batches:
if len(batch) < world_size:
raise RuntimeError(
f"The batch-size must be equal or more than world_size: "
f"{len(batch)} < {world_size}"
)
batches = [batch[rank::world_size] for batch in batches]
return SequenceIterFactory(
dataset=dataset,
batches=batches,
seed=seed,
num_iters_per_epoch=num_iters_per_epoch,
shuffle=train,
num_workers=num_workers,
collate_fn=collate_fn,
pin_memory=ngpu > 0,
)
@classmethod
def build_chunk_iter_factory(
cls,
data_path_and_name_and_type,
shape_files: Union[Tuple[str, ...], List[str]],
train: bool,
preprocess_fn,
collate_fn,
train_dtype: str,
num_workers: int,
seed: int,
allow_variable_data_keys: bool,
batch_size: int,
ngpu: int,
chunk_length: Union[int, str],
chunk_shift_ratio: float,
num_cache_chunks: int,
num_batches: Optional[int],
num_iters_per_epoch: Optional[int],
max_cache_size: float,
distributed: bool,
name: str,
) -> AbsIterFactory:
assert check_argument_types()
if train_dtype in ("float32", "O0", "O1", "O2", "O3"):
train_dtype = "float32"
dataset = ESPnetDataset(
data_path_and_name_and_type,
float_dtype=train_dtype,
preprocess=preprocess_fn,
max_cache_size=max_cache_size,
)
cls.check_task_requirements(dataset, allow_variable_data_keys)
if len(shape_files) == 0:
key_file = data_path_and_name_and_type[0][0]
else:
key_file = shape_files[0]
batch_sampler = UnsortedBatchSampler(batch_size=1, key_file=key_file)
batches = list(batch_sampler)
if num_batches is not None:
batches = batches[:num_batches]
logging.info(f"[{name}] dataset:\n{dataset}")
if distributed:
world_size = torch.distributed.get_world_size()
rank = torch.distributed.get_rank()
if len(batches) < world_size:
raise RuntimeError("Number of samples is smaller than world_size")
if batch_size < world_size:
raise RuntimeError("batch_size must be equal or more than world_size")
if rank < batch_size % world_size:
batch_size = batch_size // world_size + 1
else:
batch_size = batch_size // world_size
num_cache_chunks = num_cache_chunks // world_size
# NOTE(kamo): Split whole corpus by sample numbers without considering
# each of the lengths, therefore the number of iteration counts are not
# always equal to each other and the iterations are limitted
# by the fewest iterations.
# i.e. the samples over the counts are discarded.
batches = batches[rank::world_size]
return ChunkIterFactory(
dataset=dataset,
batches=batches,
seed=seed,
# For chunk iterator,
# --num_iters_per_epoch doesn't indicate the number of iterations,
# but indicates the number of samples.
num_samples_per_epoch=num_iters_per_epoch,
shuffle=train,
num_workers=num_workers,
collate_fn=collate_fn,
pin_memory=ngpu > 0,
batch_size=batch_size,
chunk_length=chunk_length,
chunk_shift_ratio=chunk_shift_ratio,
num_cache_chunks=num_cache_chunks,
)
@classmethod
def build_multiple_iter_factroy(
cls,
data_path_and_name_and_type,
shape_files: Union[Tuple[str, ...], List[str]],
train: bool,
num_iters_per_epoch: Optional[int],
max_cache_size: float,
seed: int,
**kwargs,
):
assert check_argument_types()
assert len(data_path_and_name_and_type) > 0, len(data_path_and_name_and_type)
# 1. Sanity check
num_splits = None
for path in [path for path, _, _ in data_path_and_name_and_type] + list(
shape_files
):
if not Path(path).is_dir():
raise RuntimeError(f"{path} is not a directory")
p = Path(path) / "num_splits"
if not p.exists():
raise FileNotFoundError(f"{p} is not found")
with p.open() as f:
_num_splits = int(f.read())
if num_splits is not None and num_splits != _num_splits:
raise RuntimeError(
f"Number of splits are mismathed: "
f"{data_path_and_name_and_type[0][0]} and {path}"
)
num_splits = _num_splits
for i in range(num_splits):
p = Path(path) / f"split.{i}"
if not p.exists():
raise FileNotFoundError(f"{p} is not found")
# 2. Create functions to build an iter factory for each splits
data_path_and_name_and_type_list = [
[
(str(Path(p) / f"split.{i}"), n, t)
for p, n, t in data_path_and_name_and_type
]
for i in range(num_splits)
]
shape_files_list = [
[str(Path(s) / f"split.{i}") for s in shape_files]
for i in range(num_splits)
]
num_iters_per_epoch_list = [
(num_iters_per_epoch + i) // num_splits
if num_iters_per_epoch is not None
else None
for i in range(num_splits)
]
max_cache_size = max_cache_size / num_splits
# Note that iter-factories are built for each epoch at runtime lazily.
build_funcs = [
functools.partial(
cls.build_iter_factory,
data_path_and_name_and_type=_data_path_and_name_and_type,
shape_files=_shape_files,
num_iters_per_epoch=_num_iters_per_epoch,
max_cache_size=max_cache_size,
seed=seed,
train=train,
**kwargs,
)
for (
_data_path_and_name_and_type,
_shape_files,
_num_iters_per_epoch,
) in zip(
data_path_and_name_and_type_list,
shape_files_list,
num_iters_per_epoch_list,
)
]
# 3. Build MultipleIterFactory
return MultipleIterFactory(build_funcs=build_funcs, shuffle=train, seed=seed,)
@classmethod
def build_streaming_iterator(
cls,
data_path_and_name_and_type,
preprocess_fn,
collate_fn,
key_file: str = None,
batch_size: int = 1,
dtype: str = np.float32,
num_workers: int = 1,
allow_variable_data_keys: bool = False,
ngpu: int = 0,
inference: bool = False,
) -> DataLoader:
"""Build DataLoader using iterable dataset"""
assert check_argument_types()
if dtype in ("float32", "O0", "O1", "O2", "O3"):
dtype = "float32"
# For backward compatibility for pytorch DataLoader
if collate_fn is not None:
kwargs = dict(collate_fn=collate_fn)
else:
kwargs = {}
# IterableDataset is supported from pytorch=1.2
if LooseVersion(torch.__version__) >= LooseVersion("1.2"):
dataset = IterableESPnetDataset(
data_path_and_name_and_type,
float_dtype=dtype,
preprocess=preprocess_fn,
key_file=key_file,
)
kwargs.update(batch_size=batch_size)
else:
dataset = ESPnetDataset(
data_path_and_name_and_type,
float_dtype=dtype,
preprocess=preprocess_fn,
)
if key_file is None:
key_file = data_path_and_name_and_type[0][0]
batch_sampler = UnsortedBatchSampler(
batch_size=batch_size, key_file=key_file, drop_last=False,
)
kwargs.update(batch_sampler=batch_sampler)
cls.check_task_requirements(dataset, allow_variable_data_keys, inference)
return DataLoader(
dataset=dataset, pin_memory=ngpu > 0, num_workers=num_workers, **kwargs,
)
# ~~~~~~~~~ The methods below are mainly used for inference ~~~~~~~~~
@classmethod
def build_model_from_file(
cls,
config_file: Union[Path, str],
model_file: Union[Path, str] = None,
device: str = "cpu",
) -> Tuple[AbsESPnetModel, argparse.Namespace]:
"""This method is used for inference or fine-tuning.
Args:
config_file: The yaml file saved when training.
model_file: The model file saved when training.
device:
"""
assert check_argument_types()
config_file = Path(config_file)
with config_file.open("r", encoding="utf-8") as f:
args = yaml.safe_load(f)
args = argparse.Namespace(**args)
model = cls.build_model(args)
if not isinstance(model, AbsESPnetModel):
raise RuntimeError(
f"model must inherit {AbsESPnetModel.__name__}, but got {type(model)}"
)
model.to(device)
if model_file is not None:
if device == "cuda":
# NOTE(kamo): "cuda" for torch.load always indicates cuda:0
# in PyTorch<=1.4
device = f"cuda:{torch.cuda.current_device()}"
model.load_state_dict(torch.load(model_file, map_location=device))
return model, args
|
test_dag_serialization.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for stringified DAGs."""
import copy
import importlib
import importlib.util
import json
import multiprocessing
import os
from datetime import datetime, timedelta
from glob import glob
from unittest import mock
import pendulum
import pytest
from dateutil.relativedelta import FR, relativedelta
from kubernetes.client import models as k8s
from airflow.exceptions import SerializationError
from airflow.hooks.base import BaseHook
from airflow.kubernetes.pod_generator import PodGenerator
from airflow.models import DAG, Connection, DagBag
from airflow.models.baseoperator import BaseOperator, BaseOperatorLink
from airflow.models.mappedoperator import MappedOperator
from airflow.models.param import Param, ParamsDict
from airflow.models.xcom import XCom
from airflow.operators.bash import BashOperator
from airflow.security import permissions
from airflow.serialization.json_schema import load_dag_schema_dict
from airflow.serialization.serialized_objects import (
SerializedBaseOperator,
SerializedDAG,
SerializedTaskGroup,
)
from airflow.timetables.simple import NullTimetable, OnceTimetable
from airflow.utils import timezone
from airflow.utils.context import Context
from airflow.utils.operator_resources import Resources
from airflow.utils.task_group import TaskGroup
from tests.test_utils.mock_operators import CustomOperator, CustomOpLink, GoogleLink, MockOperator
from tests.test_utils.timetables import CustomSerializationTimetable, cron_timetable, delta_timetable
executor_config_pod = k8s.V1Pod(
metadata=k8s.V1ObjectMeta(name="my-name"),
spec=k8s.V1PodSpec(
containers=[
k8s.V1Container(name="base", volume_mounts=[k8s.V1VolumeMount(name="my-vol", mount_path="/vol/")])
]
),
)
serialized_simple_dag_ground_truth = {
"__version": 1,
"dag": {
"default_args": {
"__type": "dict",
"__var": {
"depends_on_past": False,
"retries": 1,
"retry_delay": {"__type": "timedelta", "__var": 300.0},
"max_retry_delay": {"__type": "timedelta", "__var": 600.0},
"sla": {"__type": "timedelta", "__var": 100.0},
},
},
"start_date": 1564617600.0,
'_task_group': {
'_group_id': None,
'prefix_group_id': True,
'children': {'bash_task': ('operator', 'bash_task'), 'custom_task': ('operator', 'custom_task')},
'tooltip': '',
'ui_color': 'CornflowerBlue',
'ui_fgcolor': '#000',
'upstream_group_ids': [],
'downstream_group_ids': [],
'upstream_task_ids': [],
'downstream_task_ids': [],
},
"is_paused_upon_creation": False,
"_dag_id": "simple_dag",
"doc_md": "### DAG Tutorial Documentation",
"fileloc": None,
"tasks": [
{
"task_id": "bash_task",
"owner": "airflow",
"retries": 1,
"retry_delay": 300.0,
"max_retry_delay": 600.0,
"sla": 100.0,
"downstream_task_ids": [],
"_inlets": [],
"_is_dummy": False,
"_outlets": [],
"ui_color": "#f0ede4",
"ui_fgcolor": "#000",
"template_ext": ['.sh', '.bash'],
"template_fields": ['bash_command', 'env'],
"template_fields_renderers": {'bash_command': 'bash', 'env': 'json'},
"bash_command": "echo {{ task.task_id }}",
"_task_type": "BashOperator",
"_task_module": "airflow.operators.bash",
"pool": "default_pool",
"executor_config": {
'__type': 'dict',
'__var': {
"pod_override": {
'__type': 'k8s.V1Pod',
'__var': PodGenerator.serialize_pod(executor_config_pod),
}
},
},
"doc_md": "### Task Tutorial Documentation",
},
{
"task_id": "custom_task",
"retries": 1,
"retry_delay": 300.0,
"max_retry_delay": 600.0,
"sla": 100.0,
"downstream_task_ids": [],
"_inlets": [],
"_is_dummy": False,
"_outlets": [],
"_operator_extra_links": [{"tests.test_utils.mock_operators.CustomOpLink": {}}],
"ui_color": "#fff",
"ui_fgcolor": "#000",
"template_ext": [],
"template_fields": ['bash_command'],
"template_fields_renderers": {},
"_task_type": "CustomOperator",
"_task_module": "tests.test_utils.mock_operators",
"pool": "default_pool",
},
],
"schedule_interval": {"__type": "timedelta", "__var": 86400.0},
"timezone": "UTC",
"_access_control": {
"__type": "dict",
"__var": {
"test_role": {
"__type": "set",
"__var": [permissions.ACTION_CAN_READ, permissions.ACTION_CAN_EDIT],
}
},
},
"edge_info": {},
"dag_dependencies": [],
"params": {},
},
}
ROOT_FOLDER = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir)
)
CUSTOM_TIMETABLE_SERIALIZED = {
"__type": "tests.test_utils.timetables.CustomSerializationTimetable",
"__var": {"value": "foo"},
}
def make_example_dags(module_path):
"""Loads DAGs from a module for test."""
dagbag = DagBag(module_path)
return dagbag.dags
def make_simple_dag():
"""Make very simple DAG to verify serialization result."""
with DAG(
dag_id='simple_dag',
default_args={
"retries": 1,
"retry_delay": timedelta(minutes=5),
"max_retry_delay": timedelta(minutes=10),
"depends_on_past": False,
"sla": timedelta(seconds=100),
},
start_date=datetime(2019, 8, 1),
is_paused_upon_creation=False,
access_control={"test_role": {permissions.ACTION_CAN_READ, permissions.ACTION_CAN_EDIT}},
doc_md="### DAG Tutorial Documentation",
) as dag:
CustomOperator(task_id='custom_task')
BashOperator(
task_id='bash_task',
bash_command='echo {{ task.task_id }}',
owner='airflow',
executor_config={"pod_override": executor_config_pod},
doc_md="### Task Tutorial Documentation",
)
return {'simple_dag': dag}
def make_user_defined_macro_filter_dag():
"""Make DAGs with user defined macros and filters using locally defined methods.
For Webserver, we do not include ``user_defined_macros`` & ``user_defined_filters``.
The examples here test:
(1) functions can be successfully displayed on UI;
(2) templates with function macros have been rendered before serialization.
"""
def compute_next_execution_date(dag, execution_date):
return dag.following_schedule(execution_date)
default_args = {'start_date': datetime(2019, 7, 10)}
dag = DAG(
'user_defined_macro_filter_dag',
default_args=default_args,
user_defined_macros={
'next_execution_date': compute_next_execution_date,
},
user_defined_filters={'hello': lambda name: f'Hello {name}'},
catchup=False,
)
BashOperator(
task_id='echo',
bash_command='echo "{{ next_execution_date(dag, execution_date) }}"',
dag=dag,
)
return {dag.dag_id: dag}
def collect_dags(dag_folder=None):
"""Collects DAGs to test."""
dags = {}
dags.update(make_simple_dag())
dags.update(make_user_defined_macro_filter_dag())
if dag_folder:
if isinstance(dag_folder, (list, tuple)):
patterns = dag_folder
else:
patterns = [dag_folder]
else:
patterns = [
"airflow/example_dags",
"airflow/providers/*/example_dags",
"airflow/providers/*/*/example_dags",
]
for pattern in patterns:
for directory in glob(f"{ROOT_FOLDER}/{pattern}"):
dags.update(make_example_dags(directory))
# Filter subdags as they are stored in same row in Serialized Dag table
dags = {dag_id: dag for dag_id, dag in dags.items() if not dag.is_subdag}
return dags
def get_timetable_based_simple_dag(timetable):
"""Create a simple_dag variant that uses timetable instead of schedule_interval."""
dag = collect_dags(["airflow/example_dags"])["simple_dag"]
dag.timetable = timetable
dag.schedule_interval = timetable.summary
return dag
def serialize_subprocess(queue, dag_folder):
"""Validate pickle in a subprocess."""
dags = collect_dags(dag_folder)
for dag in dags.values():
queue.put(SerializedDAG.to_json(dag))
queue.put(None)
@pytest.fixture()
def timetable_plugin(monkeypatch):
"""Patch plugins manager to always and only return our custom timetable."""
from airflow import plugins_manager
monkeypatch.setattr(plugins_manager, "initialize_timetables_plugins", lambda: None)
monkeypatch.setattr(
plugins_manager,
"timetable_classes",
{"tests.test_utils.timetables.CustomSerializationTimetable": CustomSerializationTimetable},
)
class TestStringifiedDAGs:
"""Unit tests for stringified DAGs."""
def setup_method(self):
self.backup_base_hook_get_connection = BaseHook.get_connection
BaseHook.get_connection = mock.Mock(
return_value=Connection(
extra=(
'{'
'"project_id": "mock", '
'"location": "mock", '
'"instance": "mock", '
'"database_type": "postgres", '
'"use_proxy": "False", '
'"use_ssl": "False"'
'}'
)
)
)
self.maxDiff = None
def teardown_method(self):
BaseHook.get_connection = self.backup_base_hook_get_connection
def test_serialization(self):
"""Serialization and deserialization should work for every DAG and Operator."""
dags = collect_dags()
serialized_dags = {}
for _, v in dags.items():
dag = SerializedDAG.to_dict(v)
SerializedDAG.validate_schema(dag)
serialized_dags[v.dag_id] = dag
# Compares with the ground truth of JSON string.
self.validate_serialized_dag(serialized_dags['simple_dag'], serialized_simple_dag_ground_truth)
@pytest.mark.parametrize(
"timetable, serialized_timetable",
[
(
cron_timetable("0 0 * * *"),
{
"__type": "airflow.timetables.interval.CronDataIntervalTimetable",
"__var": {"expression": "0 0 * * *", "timezone": "UTC"},
},
),
(
CustomSerializationTimetable("foo"),
CUSTOM_TIMETABLE_SERIALIZED,
),
],
)
@pytest.mark.usefixtures("timetable_plugin")
def test_dag_serialization_to_timetable(self, timetable, serialized_timetable):
"""Verify a timetable-backed schedule_interval is excluded in serialization."""
dag = get_timetable_based_simple_dag(timetable)
serialized_dag = SerializedDAG.to_dict(dag)
SerializedDAG.validate_schema(serialized_dag)
expected = copy.deepcopy(serialized_simple_dag_ground_truth)
del expected["dag"]["schedule_interval"]
expected["dag"]["timetable"] = serialized_timetable
self.validate_serialized_dag(serialized_dag, expected)
def test_dag_serialization_unregistered_custom_timetable(self):
"""Verify serialization fails without timetable registration."""
dag = get_timetable_based_simple_dag(CustomSerializationTimetable("bar"))
with pytest.raises(SerializationError) as ctx:
SerializedDAG.to_dict(dag)
message = (
"Failed to serialize DAG 'simple_dag': Timetable class "
"'tests.test_utils.timetables.CustomSerializationTimetable' "
"is not registered"
)
assert str(ctx.value) == message
def validate_serialized_dag(self, json_dag, ground_truth_dag):
"""Verify serialized DAGs match the ground truth."""
assert json_dag['dag']['fileloc'].split('/')[-1] == 'test_dag_serialization.py'
json_dag['dag']['fileloc'] = None
def sorted_serialized_dag(dag_dict: dict):
"""
Sorts the "tasks" list and "access_control" permissions in the
serialised dag python dictionary. This is needed as the order of
items should not matter but assertEqual would fail if the order of
items changes in the dag dictionary
"""
dag_dict["dag"]["tasks"] = sorted(dag_dict["dag"]["tasks"], key=lambda x: sorted(x.keys()))
dag_dict["dag"]["_access_control"]["__var"]["test_role"]["__var"] = sorted(
dag_dict["dag"]["_access_control"]["__var"]["test_role"]["__var"]
)
return dag_dict
assert sorted_serialized_dag(ground_truth_dag) == sorted_serialized_dag(json_dag)
def test_deserialization_across_process(self):
"""A serialized DAG can be deserialized in another process."""
# Since we need to parse the dags twice here (once in the subprocess,
# and once here to get a DAG to compare to) we don't want to load all
# dags.
queue = multiprocessing.Queue()
proc = multiprocessing.Process(target=serialize_subprocess, args=(queue, "airflow/example_dags"))
proc.daemon = True
proc.start()
stringified_dags = {}
while True:
v = queue.get()
if v is None:
break
dag = SerializedDAG.from_json(v)
assert isinstance(dag, DAG)
stringified_dags[dag.dag_id] = dag
dags = collect_dags("airflow/example_dags")
assert set(stringified_dags.keys()) == set(dags.keys())
# Verify deserialized DAGs.
for dag_id in stringified_dags:
self.validate_deserialized_dag(stringified_dags[dag_id], dags[dag_id])
def test_roundtrip_provider_example_dags(self):
dags = collect_dags(
[
"airflow/providers/*/example_dags",
"airflow/providers/*/*/example_dags",
]
)
# Verify deserialized DAGs.
for dag in dags.values():
serialized_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(serialized_dag, dag)
@pytest.mark.parametrize(
"timetable",
[cron_timetable("0 0 * * *"), CustomSerializationTimetable("foo")],
)
@pytest.mark.usefixtures("timetable_plugin")
def test_dag_roundtrip_from_timetable(self, timetable):
"""Verify a timetable-backed serialization can be deserialized."""
dag = get_timetable_based_simple_dag(timetable)
roundtripped = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(roundtripped, dag)
def validate_deserialized_dag(self, serialized_dag, dag):
"""
Verify that all example DAGs work with DAG Serialization by
checking fields between Serialized Dags & non-Serialized Dags
"""
fields_to_check = dag.get_serialized_fields() - {
# Doesn't implement __eq__ properly. Check manually.
'timetable',
'timezone',
# Need to check fields in it, to exclude functions.
'default_args',
"_task_group",
'params',
}
for field in fields_to_check:
assert getattr(serialized_dag, field) == getattr(
dag, field
), f'{dag.dag_id}.{field} does not match'
if dag.default_args:
for k, v in dag.default_args.items():
if callable(v):
# Check we stored _something_.
assert k in serialized_dag.default_args
else:
assert (
v == serialized_dag.default_args[k]
), f'{dag.dag_id}.default_args[{k}] does not match'
assert serialized_dag.timetable.summary == dag.timetable.summary
assert serialized_dag.timetable.serialize() == dag.timetable.serialize()
assert serialized_dag.timezone.name == dag.timezone.name
for task_id in dag.task_ids:
self.validate_deserialized_task(serialized_dag.get_task(task_id), dag.get_task(task_id))
def validate_deserialized_task(
self,
serialized_task,
task,
):
"""Verify non-airflow operators are casted to BaseOperator."""
assert isinstance(serialized_task, SerializedBaseOperator)
assert not isinstance(task, SerializedBaseOperator)
assert isinstance(task, BaseOperator)
# Every task should have a task_group property -- even if it's the DAG's root task group
assert serialized_task.task_group
fields_to_check = task.get_serialized_fields() - {
# Checked separately
'_task_type',
'subdag',
# Type is excluded, so don't check it
'_log',
# List vs tuple. Check separately
'template_ext',
'template_fields',
# We store the string, real dag has the actual code
'on_failure_callback',
'on_success_callback',
'on_retry_callback',
# Checked separately
'resources',
'params',
}
assert serialized_task.task_type == task.task_type
assert set(serialized_task.template_ext) == set(task.template_ext)
assert set(serialized_task.template_fields) == set(task.template_fields)
assert serialized_task.upstream_task_ids == task.upstream_task_ids
assert serialized_task.downstream_task_ids == task.downstream_task_ids
for field in fields_to_check:
assert getattr(serialized_task, field) == getattr(
task, field
), f'{task.dag.dag_id}.{task.task_id}.{field} does not match'
if serialized_task.resources is None:
assert task.resources is None or task.resources == []
else:
assert serialized_task.resources == task.resources
# Ugly hack as some operators override params var in their init
if isinstance(task.params, ParamsDict):
assert serialized_task.params.dump() == task.params.dump()
# Check that for Deserialized task, task.subdag is None for all other Operators
# except for the SubDagOperator where task.subdag is an instance of DAG object
if task.task_type == "SubDagOperator":
assert serialized_task.subdag is not None
assert isinstance(serialized_task.subdag, DAG)
else:
assert serialized_task.subdag is None
@pytest.mark.parametrize(
"dag_start_date, task_start_date, expected_task_start_date",
[
(datetime(2019, 8, 1, tzinfo=timezone.utc), None, datetime(2019, 8, 1, tzinfo=timezone.utc)),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
datetime(2019, 8, 1, tzinfo=timezone.utc),
),
(pendulum.datetime(2019, 8, 1, tz='UTC'), None, pendulum.datetime(2019, 8, 1, tz='UTC')),
],
)
def test_deserialization_start_date(self, dag_start_date, task_start_date, expected_task_start_date):
dag = DAG(dag_id='simple_dag', start_date=dag_start_date)
BaseOperator(task_id='simple_task', dag=dag, start_date=task_start_date)
serialized_dag = SerializedDAG.to_dict(dag)
if not task_start_date or dag_start_date >= task_start_date:
# If dag.start_date > task.start_date -> task.start_date=dag.start_date
# because of the logic in dag.add_task()
assert "start_date" not in serialized_dag["dag"]["tasks"][0]
else:
assert "start_date" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert simple_task.start_date == expected_task_start_date
def test_deserialization_with_dag_context(self):
with DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1, tzinfo=timezone.utc)) as dag:
BaseOperator(task_id='simple_task')
# should not raise RuntimeError: dictionary changed size during iteration
SerializedDAG.to_dict(dag)
@pytest.mark.parametrize(
"dag_end_date, task_end_date, expected_task_end_date",
[
(datetime(2019, 8, 1, tzinfo=timezone.utc), None, datetime(2019, 8, 1, tzinfo=timezone.utc)),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
datetime(2019, 8, 1, tzinfo=timezone.utc),
),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
),
],
)
def test_deserialization_end_date(self, dag_end_date, task_end_date, expected_task_end_date):
dag = DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1), end_date=dag_end_date)
BaseOperator(task_id='simple_task', dag=dag, end_date=task_end_date)
serialized_dag = SerializedDAG.to_dict(dag)
if not task_end_date or dag_end_date <= task_end_date:
# If dag.end_date < task.end_date -> task.end_date=dag.end_date
# because of the logic in dag.add_task()
assert "end_date" not in serialized_dag["dag"]["tasks"][0]
else:
assert "end_date" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert simple_task.end_date == expected_task_end_date
@pytest.mark.parametrize(
"serialized_timetable, expected_timetable",
[
({"__type": "airflow.timetables.simple.NullTimetable", "__var": {}}, NullTimetable()),
(
{
"__type": "airflow.timetables.interval.CronDataIntervalTimetable",
"__var": {"expression": "@weekly", "timezone": "UTC"},
},
cron_timetable("0 0 * * 0"),
),
({"__type": "airflow.timetables.simple.OnceTimetable", "__var": {}}, OnceTimetable()),
(
{
"__type": "airflow.timetables.interval.DeltaDataIntervalTimetable",
"__var": {"delta": 86400.0},
},
delta_timetable(timedelta(days=1)),
),
(CUSTOM_TIMETABLE_SERIALIZED, CustomSerializationTimetable("foo")),
],
)
@pytest.mark.usefixtures("timetable_plugin")
def test_deserialization_timetable(
self,
serialized_timetable,
expected_timetable,
):
serialized = {
"__version": 1,
"dag": {
"default_args": {"__type": "dict", "__var": {}},
"_dag_id": "simple_dag",
"fileloc": __file__,
"tasks": [],
"timezone": "UTC",
"timetable": serialized_timetable,
},
}
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
assert dag.timetable == expected_timetable
def test_deserialization_timetable_unregistered(self):
serialized = {
"__version": 1,
"dag": {
"default_args": {"__type": "dict", "__var": {}},
"_dag_id": "simple_dag",
"fileloc": __file__,
"tasks": [],
"timezone": "UTC",
"timetable": CUSTOM_TIMETABLE_SERIALIZED,
},
}
SerializedDAG.validate_schema(serialized)
with pytest.raises(ValueError) as ctx:
SerializedDAG.from_dict(serialized)
message = (
"Timetable class "
"'tests.test_utils.timetables.CustomSerializationTimetable' "
"is not registered"
)
assert str(ctx.value) == message
@pytest.mark.parametrize(
"serialized_schedule_interval, expected_timetable",
[
(None, NullTimetable()),
("@weekly", cron_timetable("0 0 * * 0")),
("@once", OnceTimetable()),
(
{"__type": "timedelta", "__var": 86400.0},
delta_timetable(timedelta(days=1)),
),
],
)
def test_deserialization_schedule_interval(
self,
serialized_schedule_interval,
expected_timetable,
):
"""Test DAGs serialized before 2.2 can be correctly deserialized."""
serialized = {
"__version": 1,
"dag": {
"default_args": {"__type": "dict", "__var": {}},
"_dag_id": "simple_dag",
"fileloc": __file__,
"tasks": [],
"timezone": "UTC",
"schedule_interval": serialized_schedule_interval,
},
}
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
assert dag.timetable == expected_timetable
@pytest.mark.parametrize(
"val, expected",
[
(relativedelta(days=-1), {"__type": "relativedelta", "__var": {"days": -1}}),
(relativedelta(month=1, days=-1), {"__type": "relativedelta", "__var": {"month": 1, "days": -1}}),
# Every friday
(relativedelta(weekday=FR), {"__type": "relativedelta", "__var": {"weekday": [4]}}),
# Every second friday
(relativedelta(weekday=FR(2)), {"__type": "relativedelta", "__var": {"weekday": [4, 2]}}),
],
)
def test_roundtrip_relativedelta(self, val, expected):
serialized = SerializedDAG._serialize(val)
assert serialized == expected
round_tripped = SerializedDAG._deserialize(serialized)
assert val == round_tripped
@pytest.mark.parametrize(
"val, expected_val",
[
(None, {}),
({"param_1": "value_1"}, {"param_1": "value_1"}),
({"param_1": {1, 2, 3}}, {"param_1": {1, 2, 3}}),
],
)
def test_dag_params_roundtrip(self, val, expected_val):
"""
Test that params work both on Serialized DAGs & Tasks
"""
dag = DAG(dag_id='simple_dag', params=val)
BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag_json = SerializedDAG.to_json(dag)
serialized_dag = json.loads(serialized_dag_json)
assert "params" in serialized_dag["dag"]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_simple_task = deserialized_dag.task_dict["simple_task"]
assert expected_val == deserialized_dag.params.dump()
assert expected_val == deserialized_simple_task.params.dump()
def test_invalid_params(self):
"""
Test to make sure that only native Param objects are being passed as dag or task params
"""
class S3Param(Param):
def __init__(self, path: str):
schema = {"type": "string", "pattern": r"s3:\/\/(.+?)\/(.+)"}
super().__init__(default=path, schema=schema)
dag = DAG(dag_id='simple_dag', params={'path': S3Param('s3://my_bucket/my_path')})
with pytest.raises(SerializationError):
SerializedDAG.to_dict(dag)
dag = DAG(dag_id='simple_dag')
BaseOperator(
task_id='simple_task',
dag=dag,
start_date=datetime(2019, 8, 1),
params={'path': S3Param('s3://my_bucket/my_path')},
)
@pytest.mark.parametrize(
'param',
[
Param('my value', description='hello', schema={'type': 'string'}),
Param('my value', description='hello'),
Param(None, description=None),
],
)
def test_full_param_roundtrip(self, param):
"""
Test to make sure that only native Param objects are being passed as dag or task params
"""
dag = DAG(dag_id='simple_dag', params={'my_param': param})
serialized_json = SerializedDAG.to_json(dag)
serialized = json.loads(serialized_json)
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
assert dag.params["my_param"] == param.value
observed_param = dag.params.get_param('my_param')
assert isinstance(observed_param, Param)
assert observed_param.description == param.description
assert observed_param.schema == param.schema
@pytest.mark.parametrize(
"val, expected_val",
[
(None, {}),
({"param_1": "value_1"}, {"param_1": "value_1"}),
({"param_1": {1, 2, 3}}, {"param_1": {1, 2, 3}}),
],
)
def test_task_params_roundtrip(self, val, expected_val):
"""
Test that params work both on Serialized DAGs & Tasks
"""
dag = DAG(dag_id='simple_dag')
BaseOperator(task_id='simple_task', dag=dag, params=val, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if val:
assert "params" in serialized_dag["dag"]["tasks"][0]
else:
assert "params" not in serialized_dag["dag"]["tasks"][0]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_simple_task = deserialized_dag.task_dict["simple_task"]
assert expected_val == deserialized_simple_task.params.dump()
def test_extra_serialized_field_and_operator_links(self, dag_maker):
"""
Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links.
This tests also depends on GoogleLink() registered as a plugin
in tests/plugins/test_plugin.py
The function tests that if extra operator links are registered in plugin
in ``operator_extra_links`` and the same is also defined in
the Operator in ``BaseOperator.operator_extra_links``, it has the correct
extra link.
"""
test_date = timezone.DateTime(2019, 8, 1, tzinfo=timezone.utc)
with dag_maker(dag_id='simple_dag', start_date=test_date) as dag:
CustomOperator(task_id='simple_task', bash_command="true")
serialized_dag = SerializedDAG.to_dict(dag)
assert "bash_command" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert getattr(simple_task, "bash_command") == "true"
#########################################################
# Verify Operator Links work with Serialized Operator
#########################################################
# Check Serialized version of operator link only contains the inbuilt Op Link
assert serialized_dag["dag"]["tasks"][0]["_operator_extra_links"] == [
{'tests.test_utils.mock_operators.CustomOpLink': {}}
]
# Test all the extra_links are set
assert set(simple_task.extra_links) == {'Google Custom', 'airflow', 'github', 'google'}
dag_maker.create_dagrun(execution_date=test_date)
XCom.set(
key='search_query',
value="dummy_value_1",
task_id=simple_task.task_id,
dag_id=simple_task.dag_id,
execution_date=test_date,
)
# Test Deserialized inbuilt link
custom_inbuilt_link = simple_task.get_extra_links(test_date, CustomOpLink.name)
assert 'http://google.com/custom_base_link?search=dummy_value_1' == custom_inbuilt_link
# Test Deserialized link registered via Airflow Plugin
google_link_from_plugin = simple_task.get_extra_links(test_date, GoogleLink.name)
assert "https://www.google.com" == google_link_from_plugin
def test_extra_operator_links_logs_error_for_non_registered_extra_links(self, caplog):
"""
Assert OperatorLinks not registered via Plugins and if it is not an inbuilt Operator Link,
it can still deserialize the DAG (does not error) but just logs an error
"""
class TaskStateLink(BaseOperatorLink):
"""OperatorLink not registered via Plugins nor a built-in OperatorLink"""
name = 'My Link'
def get_link(self, operator, dttm):
return 'https://www.google.com'
class MyOperator(BaseOperator):
"""Just a DummyOperator using above defined Extra Operator Link"""
operator_extra_links = [TaskStateLink()]
def execute(self, context: Context):
pass
with DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1)) as dag:
MyOperator(task_id='blah')
serialized_dag = SerializedDAG.to_dict(dag)
with caplog.at_level("ERROR", logger="airflow.serialization.serialized_objects"):
SerializedDAG.from_dict(serialized_dag)
expected_err_msg = (
"Operator Link class 'tests.serialization.test_dag_serialization.TaskStateLink' not registered"
)
assert expected_err_msg in caplog.text
def test_extra_serialized_field_and_multiple_operator_links(self, dag_maker):
"""
Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links.
This tests also depends on GoogleLink() registered as a plugin
in tests/plugins/test_plugin.py
The function tests that if extra operator links are registered in plugin
in ``operator_extra_links`` and the same is also defined in
the Operator in ``BaseOperator.operator_extra_links``, it has the correct
extra link.
"""
test_date = timezone.DateTime(2019, 8, 1, tzinfo=timezone.utc)
with dag_maker(dag_id='simple_dag', start_date=test_date) as dag:
CustomOperator(task_id='simple_task', bash_command=["echo", "true"])
serialized_dag = SerializedDAG.to_dict(dag)
assert "bash_command" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert getattr(simple_task, "bash_command") == ["echo", "true"]
#########################################################
# Verify Operator Links work with Serialized Operator
#########################################################
# Check Serialized version of operator link only contains the inbuilt Op Link
assert serialized_dag["dag"]["tasks"][0]["_operator_extra_links"] == [
{'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 0}},
{'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 1}},
]
# Test all the extra_links are set
assert set(simple_task.extra_links) == {
'BigQuery Console #1',
'BigQuery Console #2',
'airflow',
'github',
'google',
}
dag_maker.create_dagrun(execution_date=test_date)
XCom.set(
key='search_query',
value=["dummy_value_1", "dummy_value_2"],
task_id=simple_task.task_id,
dag_id=simple_task.dag_id,
execution_date=test_date,
)
# Test Deserialized inbuilt link #1
custom_inbuilt_link = simple_task.get_extra_links(test_date, "BigQuery Console #1")
assert 'https://console.cloud.google.com/bigquery?j=dummy_value_1' == custom_inbuilt_link
# Test Deserialized inbuilt link #2
custom_inbuilt_link = simple_task.get_extra_links(test_date, "BigQuery Console #2")
assert 'https://console.cloud.google.com/bigquery?j=dummy_value_2' == custom_inbuilt_link
# Test Deserialized link registered via Airflow Plugin
google_link_from_plugin = simple_task.get_extra_links(test_date, GoogleLink.name)
assert "https://www.google.com" == google_link_from_plugin
class ClassWithCustomAttributes:
"""
Class for testing purpose: allows to create objects with custom attributes in one single statement.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __str__(self):
return f"{self.__class__.__name__}({str(self.__dict__)})"
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
@pytest.mark.parametrize(
"templated_field, expected_field",
[
(None, None),
([], []),
({}, {}),
("{{ task.task_id }}", "{{ task.task_id }}"),
(["{{ task.task_id }}", "{{ task.task_id }}"]),
({"foo": "{{ task.task_id }}"}, {"foo": "{{ task.task_id }}"}),
({"foo": {"bar": "{{ task.task_id }}"}}, {"foo": {"bar": "{{ task.task_id }}"}}),
(
[{"foo1": {"bar": "{{ task.task_id }}"}}, {"foo2": {"bar": "{{ task.task_id }}"}}],
[{"foo1": {"bar": "{{ task.task_id }}"}}, {"foo2": {"bar": "{{ task.task_id }}"}}],
),
(
{"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}},
{"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}},
),
(
ClassWithCustomAttributes(
att1="{{ task.task_id }}", att2="{{ task.task_id }}", template_fields=["att1"]
),
"ClassWithCustomAttributes("
"{'att1': '{{ task.task_id }}', 'att2': '{{ task.task_id }}', 'template_fields': ['att1']})",
),
(
ClassWithCustomAttributes(
nested1=ClassWithCustomAttributes(
att1="{{ task.task_id }}", att2="{{ task.task_id }}", template_fields=["att1"]
),
nested2=ClassWithCustomAttributes(
att3="{{ task.task_id }}", att4="{{ task.task_id }}", template_fields=["att3"]
),
template_fields=["nested1"],
),
"ClassWithCustomAttributes("
"{'nested1': ClassWithCustomAttributes({'att1': '{{ task.task_id }}', "
"'att2': '{{ task.task_id }}', 'template_fields': ['att1']}), "
"'nested2': ClassWithCustomAttributes({'att3': '{{ task.task_id }}', 'att4': "
"'{{ task.task_id }}', 'template_fields': ['att3']}), 'template_fields': ['nested1']})",
),
],
)
def test_templated_fields_exist_in_serialized_dag(self, templated_field, expected_field):
"""
Test that templated_fields exists for all Operators in Serialized DAG
Since we don't want to inflate arbitrary python objects (it poses a RCE/security risk etc.)
we want check that non-"basic" objects are turned in to strings after deserializing.
"""
dag = DAG("test_serialized_template_fields", start_date=datetime(2019, 8, 1))
with dag:
BashOperator(task_id="test", bash_command=templated_field)
serialized_dag = SerializedDAG.to_dict(dag)
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_test_task = deserialized_dag.task_dict["test"]
assert expected_field == getattr(deserialized_test_task, "bash_command")
def test_dag_serialized_fields_with_schema(self):
"""
Additional Properties are disabled on DAGs. This test verifies that all the
keys in DAG.get_serialized_fields are listed in Schema definition.
"""
dag_schema: dict = load_dag_schema_dict()["definitions"]["dag"]["properties"]
# The parameters we add manually in Serialization needs to be ignored
ignored_keys: set = {
"is_subdag",
"tasks",
"has_on_success_callback",
"has_on_failure_callback",
"dag_dependencies",
"params",
}
keys_for_backwards_compat: set = {
"_concurrency",
}
dag_params: set = set(dag_schema.keys()) - ignored_keys - keys_for_backwards_compat
assert set(DAG.get_serialized_fields()) == dag_params
def test_operator_subclass_changing_base_defaults(self):
assert (
BaseOperator(task_id='dummy').do_xcom_push is True
), "Precondition check! If this fails the test won't make sense"
class MyOperator(BaseOperator):
def __init__(self, do_xcom_push=False, **kwargs):
super().__init__(**kwargs)
self.do_xcom_push = do_xcom_push
op = MyOperator(task_id='dummy')
assert op.do_xcom_push is False
blob = SerializedBaseOperator.serialize_operator(op)
serialized_op = SerializedBaseOperator.deserialize_operator(blob)
assert serialized_op.do_xcom_push is False
def test_no_new_fields_added_to_base_operator(self):
"""
This test verifies that there are no new fields added to BaseOperator. And reminds that
tests should be added for it.
"""
base_operator = BaseOperator(task_id="10")
fields = {k: v for (k, v) in vars(base_operator).items() if k in BaseOperator.get_serialized_fields()}
assert fields == {
'_inlets': [],
'_log': base_operator.log,
'_outlets': [],
'_pre_execute_hook': None,
'_post_execute_hook': None,
'depends_on_past': False,
'downstream_task_ids': set(),
'do_xcom_push': True,
'doc': None,
'doc_json': None,
'doc_md': None,
'doc_rst': None,
'doc_yaml': None,
'email': None,
'email_on_failure': True,
'email_on_retry': True,
'execution_timeout': None,
'executor_config': {},
'max_active_tis_per_dag': None,
'max_retry_delay': None,
'on_execute_callback': None,
'on_failure_callback': None,
'on_retry_callback': None,
'on_success_callback': None,
'owner': 'airflow',
'params': {},
'pool': 'default_pool',
'pool_slots': 1,
'priority_weight': 1,
'queue': 'default',
'resources': None,
'retries': 0,
'retry_delay': timedelta(0, 300),
'retry_exponential_backoff': False,
'run_as_user': None,
'sla': None,
'task_id': '10',
'trigger_rule': 'all_success',
'wait_for_downstream': False,
'weight_rule': 'downstream',
}, """
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
ACTION NEEDED! PLEASE READ THIS CAREFULLY AND CORRECT TESTS CAREFULLY
Some fields were added to the BaseOperator! Please add them to the list above and make sure that
you add support for DAG serialization - you should add the field to
`airflow/serialization/schema.json` - they should have correct type defined there.
Note that we do not support versioning yet so you should only add optional fields to BaseOperator.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
"""
def test_operator_deserialize_old_names(self):
blob = {
"task_id": "custom_task",
"_downstream_task_ids": ['foo'],
"template_ext": [],
"template_fields": ['bash_command'],
"template_fields_renderers": {},
"_task_type": "CustomOperator",
"_task_module": "tests.test_utils.mock_operators",
"pool": "default_pool",
"ui_color": "#fff",
"ui_fgcolor": "#000",
}
SerializedDAG._json_schema.validate(blob, _schema=load_dag_schema_dict()['definitions']['operator'])
serialized_op = SerializedBaseOperator.deserialize_operator(blob)
assert serialized_op.downstream_task_ids == {'foo'}
def test_task_resources(self):
"""
Test task resources serialization/deserialization.
"""
from airflow.operators.dummy import DummyOperator
execution_date = datetime(2020, 1, 1)
task_id = 'task1'
with DAG("test_task_resources", start_date=execution_date) as dag:
task = DummyOperator(task_id=task_id, resources={"cpus": 0.1, "ram": 2048})
SerializedDAG.validate_schema(SerializedDAG.to_dict(dag))
json_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
deserialized_task = json_dag.get_task(task_id)
assert deserialized_task.resources == task.resources
assert isinstance(deserialized_task.resources, Resources)
def test_task_group_serialization(self):
"""
Test TaskGroup serialization/deserialization.
"""
from airflow.operators.dummy import DummyOperator
execution_date = datetime(2020, 1, 1)
with DAG("test_task_group_serialization", start_date=execution_date) as dag:
task1 = DummyOperator(task_id="task1")
with TaskGroup("group234") as group234:
_ = DummyOperator(task_id="task2")
with TaskGroup("group34") as group34:
_ = DummyOperator(task_id="task3")
_ = DummyOperator(task_id="task4")
task5 = DummyOperator(task_id="task5")
task1 >> group234
group34 >> task5
dag_dict = SerializedDAG.to_dict(dag)
SerializedDAG.validate_schema(dag_dict)
json_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(json_dag, dag)
serialized_dag = SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(dag))
assert serialized_dag.task_group.children
assert serialized_dag.task_group.children.keys() == dag.task_group.children.keys()
def check_task_group(node):
try:
children = node.children.values()
except AttributeError:
# Round-trip serialization and check the result
expected_serialized = SerializedBaseOperator.serialize_operator(dag.get_task(node.task_id))
expected_deserialized = SerializedBaseOperator.deserialize_operator(expected_serialized)
expected_dict = SerializedBaseOperator.serialize_operator(expected_deserialized)
assert node
assert SerializedBaseOperator.serialize_operator(node) == expected_dict
return
for child in children:
check_task_group(child)
check_task_group(serialized_dag.task_group)
def test_deps_sorted(self):
"""
Tests serialize_operator, make sure the deps is in order
"""
from airflow.operators.dummy import DummyOperator
from airflow.sensors.external_task import ExternalTaskSensor
execution_date = datetime(2020, 1, 1)
with DAG(dag_id="test_deps_sorted", start_date=execution_date) as dag:
task1 = ExternalTaskSensor(
task_id="task1",
external_dag_id="external_dag_id",
mode="reschedule",
)
task2 = DummyOperator(task_id="task2")
task1 >> task2
serialize_op = SerializedBaseOperator.serialize_operator(dag.task_dict["task1"])
deps = serialize_op["deps"]
assert deps == [
'airflow.ti_deps.deps.not_in_retry_period_dep.NotInRetryPeriodDep',
'airflow.ti_deps.deps.not_previously_skipped_dep.NotPreviouslySkippedDep',
'airflow.ti_deps.deps.prev_dagrun_dep.PrevDagrunDep',
'airflow.ti_deps.deps.ready_to_reschedule.ReadyToRescheduleDep',
'airflow.ti_deps.deps.trigger_rule_dep.TriggerRuleDep',
]
def test_task_group_sorted(self):
"""
Tests serialize_task_group, make sure the list is in order
"""
from airflow.operators.dummy import DummyOperator
from airflow.serialization.serialized_objects import SerializedTaskGroup
"""
start
╱ ╲
╱ ╲
task_group_up1 task_group_up2
(task_up1) (task_up2)
╲ ╱
task_group_middle
(task_middle)
╱ ╲
task_group_down1 task_group_down2
(task_down1) (task_down2)
╲ ╱
╲ ╱
end
"""
execution_date = datetime(2020, 1, 1)
with DAG(dag_id="test_task_group_sorted", start_date=execution_date) as dag:
start = DummyOperator(task_id="start")
with TaskGroup("task_group_up1") as task_group_up1:
_ = DummyOperator(task_id="task_up1")
with TaskGroup("task_group_up2") as task_group_up2:
_ = DummyOperator(task_id="task_up2")
with TaskGroup("task_group_middle") as task_group_middle:
_ = DummyOperator(task_id="task_middle")
with TaskGroup("task_group_down1") as task_group_down1:
_ = DummyOperator(task_id="task_down1")
with TaskGroup("task_group_down2") as task_group_down2:
_ = DummyOperator(task_id="task_down2")
end = DummyOperator(task_id='end')
start >> task_group_up1
start >> task_group_up2
task_group_up1 >> task_group_middle
task_group_up2 >> task_group_middle
task_group_middle >> task_group_down1
task_group_middle >> task_group_down2
task_group_down1 >> end
task_group_down2 >> end
task_group_middle_dict = SerializedTaskGroup.serialize_task_group(
dag.task_group.children["task_group_middle"]
)
upstream_group_ids = task_group_middle_dict["upstream_group_ids"]
assert upstream_group_ids == ['task_group_up1', 'task_group_up2']
upstream_task_ids = task_group_middle_dict["upstream_task_ids"]
assert upstream_task_ids == ['task_group_up1.task_up1', 'task_group_up2.task_up2']
downstream_group_ids = task_group_middle_dict["downstream_group_ids"]
assert downstream_group_ids == ['task_group_down1', 'task_group_down2']
task_group_down1_dict = SerializedTaskGroup.serialize_task_group(
dag.task_group.children["task_group_down1"]
)
downstream_task_ids = task_group_down1_dict["downstream_task_ids"]
assert downstream_task_ids == ['end']
def test_edge_info_serialization(self):
"""
Tests edge_info serialization/deserialization.
"""
from airflow.operators.dummy import DummyOperator
from airflow.utils.edgemodifier import Label
with DAG("test_edge_info_serialization", start_date=datetime(2020, 1, 1)) as dag:
task1 = DummyOperator(task_id="task1")
task2 = DummyOperator(task_id="task2")
task1 >> Label("test label") >> task2
dag_dict = SerializedDAG.to_dict(dag)
SerializedDAG.validate_schema(dag_dict)
json_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(json_dag, dag)
serialized_dag = SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(dag))
assert serialized_dag.edge_info == dag.edge_info
@pytest.mark.parametrize(
"mode, expect_custom_deps",
[
("poke", False),
("reschedule", True),
],
)
def test_serialize_sensor(self, mode, expect_custom_deps):
from airflow.sensors.base import BaseSensorOperator
class DummySensor(BaseSensorOperator):
def poke(self, context: Context):
return False
op = DummySensor(task_id='dummy', mode=mode, poke_interval=23)
blob = SerializedBaseOperator.serialize_operator(op)
if expect_custom_deps:
assert "deps" in blob
else:
assert "deps" not in blob
serialized_op = SerializedBaseOperator.deserialize_operator(blob)
assert op.deps == serialized_op.deps
@pytest.mark.parametrize(
"passed_success_callback, expected_value",
[
({"on_success_callback": lambda x: print("hi")}, True),
({}, False),
],
)
def test_dag_on_success_callback_roundtrip(self, passed_success_callback, expected_value):
"""
Test that when on_success_callback is passed to the DAG, has_on_success_callback is stored
in Serialized JSON blob. And when it is de-serialized dag.has_on_success_callback is set to True.
When the callback is not set, has_on_success_callback should not be stored in Serialized blob
and so default to False on de-serialization
"""
dag = DAG(dag_id='test_dag_on_success_callback_roundtrip', **passed_success_callback)
BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if expected_value:
assert "has_on_success_callback" in serialized_dag["dag"]
else:
assert "has_on_success_callback" not in serialized_dag["dag"]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
assert deserialized_dag.has_on_success_callback is expected_value
@pytest.mark.parametrize(
"passed_failure_callback, expected_value",
[
({"on_failure_callback": lambda x: print("hi")}, True),
({}, False),
],
)
def test_dag_on_failure_callback_roundtrip(self, passed_failure_callback, expected_value):
"""
Test that when on_failure_callback is passed to the DAG, has_on_failure_callback is stored
in Serialized JSON blob. And when it is de-serialized dag.has_on_failure_callback is set to True.
When the callback is not set, has_on_failure_callback should not be stored in Serialized blob
and so default to False on de-serialization
"""
dag = DAG(dag_id='test_dag_on_failure_callback_roundtrip', **passed_failure_callback)
BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if expected_value:
assert "has_on_failure_callback" in serialized_dag["dag"]
else:
assert "has_on_failure_callback" not in serialized_dag["dag"]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
assert deserialized_dag.has_on_failure_callback is expected_value
@pytest.mark.parametrize(
"object_to_serialized, expected_output",
[
(
['task_1', 'task_5', 'task_2', 'task_4'],
['task_1', 'task_5', 'task_2', 'task_4'],
),
(
{'task_1', 'task_5', 'task_2', 'task_4'},
['task_1', 'task_2', 'task_4', 'task_5'],
),
(
('task_1', 'task_5', 'task_2', 'task_4'),
['task_1', 'task_5', 'task_2', 'task_4'],
),
(
{
"staging_schema": [
{"key:": "foo", "value": "bar"},
{"key:": "this", "value": "that"},
"test_conf",
]
},
{
"staging_schema": [
{"__type": "dict", "__var": {"key:": "foo", "value": "bar"}},
{
"__type": "dict",
"__var": {"key:": "this", "value": "that"},
},
"test_conf",
]
},
),
(
{"task3": "test3", "task2": "test2", "task1": "test1"},
{"task1": "test1", "task2": "test2", "task3": "test3"},
),
(
('task_1', 'task_5', 'task_2', 3, ["x", "y"]),
['task_1', 'task_5', 'task_2', 3, ["x", "y"]],
),
],
)
def test_serialized_objects_are_sorted(self, object_to_serialized, expected_output):
"""Test Serialized Sets are sorted while list and tuple preserve order"""
serialized_obj = SerializedDAG._serialize(object_to_serialized)
if isinstance(serialized_obj, dict) and "__type" in serialized_obj:
serialized_obj = serialized_obj["__var"]
assert serialized_obj == expected_output
def test_params_upgrade(self):
"""when pre-2.2.0 param (i.e. primitive) is deserialized we convert to Param"""
serialized = {
"__version": 1,
"dag": {
"_dag_id": "simple_dag",
"fileloc": '/path/to/file.py',
"tasks": [],
"timezone": "UTC",
"params": {"none": None, "str": "str", "dict": {"a": "b"}},
},
}
dag = SerializedDAG.from_dict(serialized)
assert dag.params["none"] is None
assert isinstance(dag.params.get_param("none"), Param)
assert dag.params["str"] == "str"
def test_params_serialize_default_2_2_0(self):
"""In 2.0.0, param ``default`` was assumed to be json-serializable objects and were not run though
the standard serializer function. In 2.2.2 we serialize param ``default``. We keep this
test only to ensure that params stored in 2.2.0 can still be parsed correctly."""
serialized = {
"__version": 1,
"dag": {
"_dag_id": "simple_dag",
"fileloc": '/path/to/file.py',
"tasks": [],
"timezone": "UTC",
"params": {"str": {"__class": "airflow.models.param.Param", "default": "str"}},
},
}
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
assert isinstance(dag.params.get_param("str"), Param)
assert dag.params["str"] == "str"
def test_params_serialize_default(self):
serialized = {
"__version": 1,
"dag": {
"_dag_id": "simple_dag",
"fileloc": '/path/to/file.py',
"tasks": [],
"timezone": "UTC",
"params": {
"my_param": {
"default": "a string value",
"description": "hello",
"schema": {"__var": {"type": "string"}, "__type": "dict"},
"__class": "airflow.models.param.Param",
}
},
},
}
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
assert dag.params["my_param"] == "a string value"
param = dag.params.get_param('my_param')
assert isinstance(param, Param)
assert param.description == 'hello'
assert param.schema == {'type': 'string'}
def test_kubernetes_optional():
"""Serialisation / deserialisation continues to work without kubernetes installed"""
def mock__import__(name, globals_=None, locals_=None, fromlist=(), level=0):
if level == 0 and name.partition('.')[0] == 'kubernetes':
raise ImportError("No module named 'kubernetes'")
return importlib.__import__(name, globals=globals_, locals=locals_, fromlist=fromlist, level=level)
with mock.patch('builtins.__import__', side_effect=mock__import__) as import_mock:
# load module from scratch, this does not replace any already imported
# airflow.serialization.serialized_objects module in sys.modules
spec = importlib.util.find_spec("airflow.serialization.serialized_objects")
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
# if we got this far, the module did not try to load kubernetes, but
# did it try to access airflow.kubernetes.*?
imported_airflow = {
c.args[0].split('.', 2)[1] for c in import_mock.call_args_list if c.args[0].startswith("airflow.")
}
assert "kubernetes" not in imported_airflow
# pod loading is not supported when kubernetes is not available
pod_override = {
'__type': 'k8s.V1Pod',
'__var': PodGenerator.serialize_pod(executor_config_pod),
}
with pytest.raises(RuntimeError):
module.BaseSerialization.from_dict(pod_override)
# basic serialization should succeed
module.SerializedDAG.to_dict(make_simple_dag()["simple_dag"])
def test_mapped_operator_serde():
literal = [1, 2, {'a': 'b'}]
real_op = BashOperator.partial(task_id='a', executor_config={'dict': {'sub': 'value'}}).map(
bash_command=literal
)
serialized = SerializedBaseOperator._serialize(real_op)
assert serialized == {
'_is_dummy': False,
'_is_mapped': True,
'_task_module': 'airflow.operators.bash',
'_task_type': 'BashOperator',
'downstream_task_ids': [],
'mapped_kwargs': {
'bash_command': [
1,
2,
{"__type": "dict", "__var": {'a': 'b'}},
]
},
'partial_kwargs': {
'executor_config': {
'__type': 'dict',
'__var': {
'dict': {"__type": "dict", "__var": {'sub': 'value'}},
},
},
},
'task_id': 'a',
'operator_extra_links': [],
'template_fields': ['bash_command', 'env'],
'template_ext': ['.sh', '.bash'],
'ui_color': '#f0ede4',
'ui_fgcolor': '#000',
}
op = SerializedBaseOperator.deserialize_operator(serialized)
assert isinstance(op, MappedOperator)
assert op.deps is MappedOperator.deps_for(BaseOperator)
assert op.operator_class == "airflow.operators.bash.BashOperator"
assert op.mapped_kwargs['bash_command'] == literal
assert op.partial_kwargs['executor_config'] == {'dict': {'sub': 'value'}}
def test_mapped_operator_xcomarg_serde():
from airflow.models.xcom_arg import XComArg
with DAG("test-dag", start_date=datetime(2020, 1, 1)) as dag:
task1 = BaseOperator(task_id="op1")
xcomarg = XComArg(task1, "test_key")
mapped = MockOperator.partial(task_id='task_2').map(arg2=xcomarg)
serialized = SerializedBaseOperator._serialize(mapped)
assert serialized == {
'_is_dummy': False,
'_is_mapped': True,
'_task_module': 'tests.test_utils.mock_operators',
'_task_type': 'MockOperator',
'downstream_task_ids': [],
'mapped_kwargs': {'arg2': {'__type': 'xcomref', '__var': {'task_id': 'op1', 'key': 'test_key'}}},
'partial_kwargs': {},
'task_id': 'task_2',
'template_fields': ['arg1', 'arg2'],
'template_ext': [],
'operator_extra_links': [],
'ui_color': '#fff',
'ui_fgcolor': '#000',
}
op = SerializedBaseOperator.deserialize_operator(serialized)
assert op.deps is MappedOperator.deps_for(BaseOperator)
arg = op.mapped_kwargs['arg2']
assert arg.task_id == 'op1'
assert arg.key == 'test_key'
serialized_dag: DAG = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
xcom_arg = serialized_dag.task_dict['task_2'].mapped_kwargs['arg2']
assert isinstance(xcom_arg, XComArg)
assert xcom_arg.operator is serialized_dag.task_dict['op1']
def test_task_resources_serde():
"""
Test task resources serialization/deserialization.
"""
from airflow.operators.dummy import DummyOperator
execution_date = datetime(2020, 1, 1)
task_id = 'task1'
with DAG("test_task_resources", start_date=execution_date) as _:
task = DummyOperator(task_id=task_id, resources={"cpus": 0.1, "ram": 2048})
serialized = SerializedBaseOperator._serialize(task)
assert serialized['resources'] == {
"cpus": {"name": "CPU", "qty": 0.1, "units_str": "core(s)"},
"disk": {"name": "Disk", "qty": 512, "units_str": "MB"},
"gpus": {"name": "GPU", "qty": 0, "units_str": "gpu(s)"},
"ram": {"name": "RAM", "qty": 2048, "units_str": "MB"},
}
def test_mapped_decorator_serde():
from airflow.decorators import task
from airflow.models.xcom_arg import XComArg
from airflow.serialization.serialized_objects import _XComRef
with DAG("test-dag", start_date=datetime(2020, 1, 1)) as dag:
op1 = BaseOperator(task_id="op1")
xcomarg = XComArg(op1, "my_key")
@task(retry_delay=30)
def x(arg1, arg2, arg3):
print(arg1, arg2, arg3)
x.partial(arg1=[1, 2, {"a": "b"}]).map(arg2={"a": 1, "b": 2}, arg3=xcomarg)
original = dag.get_task("x")
serialized = SerializedBaseOperator._serialize(original)
assert serialized == {
'_is_dummy': False,
'_is_mapped': True,
'_task_module': 'airflow.decorators.python',
'_task_type': '_PythonDecoratedOperator',
'downstream_task_ids': [],
'partial_kwargs': {
'op_args': [],
'op_kwargs': {'arg1': [1, 2, {"__type": "dict", "__var": {'a': 'b'}}]},
'retry_delay': {'__type': 'timedelta', '__var': 30.0},
},
'mapped_kwargs': {},
'mapped_op_kwargs': {
'arg2': {"__type": "dict", "__var": {'a': 1, 'b': 2}},
'arg3': {'__type': 'xcomref', '__var': {'task_id': 'op1', 'key': 'my_key'}},
},
'operator_extra_links': [],
'ui_color': '#ffefeb',
'ui_fgcolor': '#000',
'task_id': 'x',
'template_ext': [],
'template_fields': ['op_args', 'op_kwargs'],
}
deserialized = SerializedBaseOperator.deserialize_operator(serialized)
assert isinstance(deserialized, MappedOperator)
assert deserialized.deps is MappedOperator.deps_for(BaseOperator)
assert deserialized.upstream_task_ids == set()
assert deserialized.downstream_task_ids == set()
assert deserialized.mapped_op_kwargs == {
"arg2": {"a": 1, "b": 2},
"arg3": _XComRef("op1", "my_key"),
}
assert deserialized.partial_kwargs == {
"op_args": [],
"op_kwargs": {"arg1": [1, 2, {"a": "b"}]},
"retry_delay": timedelta(seconds=30),
}
def test_mapped_task_group_serde():
execution_date = datetime(2020, 1, 1)
literal = [1, 2, {'a': 'b'}]
with DAG("test", start_date=execution_date) as dag:
with TaskGroup("process_one", dag=dag).map(literal) as process_one:
BaseOperator(task_id='one')
serialized = SerializedTaskGroup.serialize_task_group(process_one)
assert serialized == {
'_group_id': 'process_one',
'children': {'process_one.one': ('operator', 'process_one.one')},
'downstream_group_ids': [],
'downstream_task_ids': [],
'prefix_group_id': True,
'tooltip': '',
'ui_color': 'CornflowerBlue',
'ui_fgcolor': '#000',
'upstream_group_ids': [],
'upstream_task_ids': [],
'mapped_arg': [
1,
2,
{"__type": "dict", "__var": {'a': 'b'}},
],
}
with DAG("test", start_date=execution_date):
SerializedTaskGroup.deserialize_task_group(serialized, None, dag.task_dict)
|
EE2CameraController.py | import time
import rospy
import picamera
from rospy import Service
from std_srvs.srv import (Empty, EmptyRequest, EmptyResponse)
from tactics.ee2.EE2ClientDisableable import EE2ClientDisableable
import threading
class CameraController(EE2ClientDisableable):
# Operational variable
__camera = None
# Predefined variables
__is_recording: bool = False
__start_recording_service: Service
__stop_recording_service: Service
__recording_thread: threading.Thread
__stop_thread: bool = False
def __init__(self):
print("Initializing camera controller!")
# EE2 Disable tactic specific:
super().__init__(hardware_name='camera', change_event=self.__disable_enable_change_event)
self.__start_recording_service = rospy.Service('camera/start', Empty, self.__start_recording)
self.__stop_recording_service = rospy.Service('camera/stop', Empty, self.__stop_recording)
self.__recording_thread = threading.Thread(target=self.__recording_worker_thread)
self.__recording_thread.start()
def __disable_enable_change_event(self) -> None:
if self.is_disabled():
if self.__is_recording:
self.__stop_recording(EmptyRequest())
self.__camera.close()
self.__camera = None
self.__is_recording = False
else:
self.__camera = picamera.PiCamera(framerate=60)
self.__camera.resolution = (1280, 720)
def __start_recording(self, msg: EmptyRequest) -> EmptyResponse:
print("Camera recording STARTED!")
if not self.is_disabled():
self.__is_recording = True
return EmptyResponse()
def __stop_recording(self, msg: EmptyRequest) -> EmptyResponse:
print("Camera recording STOPPED!")
self.__is_recording = False
return EmptyResponse()
def exit(self):
self.__stop_recording(EmptyRequest())
time.sleep(1) # Give time to stop
self.__stop_thread = True
self.__recording_thread.join() # Wait for thread to stop and join current process
self.__camera.close() # Close camera object and release resources
print("Succesfully stopped thread!")
def __recording_worker_thread(self):
thread_recording: bool = False
def thread_start_recording():
nonlocal thread_recording
self.__camera.start_recording('/home/pi/VIDEO_TEST.h264')
thread_recording = True
def thread_stop_recording():
nonlocal thread_recording
self.__camera.stop_recording()
thread_recording = False
while True: # Keep thread alive
while self.__is_recording: # Record if requested through service
if not thread_recording: # If not yet recording (first iteration) start recording
thread_start_recording() # Start recording and prevent second call of start recording
time.sleep(0.5)
if thread_recording: # Broken out of recording loop, if just recorded, stop recording
thread_stop_recording() # Stop recording and prevent second call if not recording again
if self.__stop_thread:
print("\tThread ordered to stop!")
break
time.sleep(0.5) |
controller.py | #!/usr/bin/env python3
import os
import time
import math
import atexit
import numpy as np
import threading
import random
import cereal.messaging as messaging
from common.params import Params
from common.realtime import Ratekeeper
from can import can_function, sendcan_function
import queue
pm = messaging.PubMaster(['frame', 'sensorEvents', 'can'])
W,H = 1164, 874
def cam_callback(image):
img = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
img = np.reshape(img, (H, W, 4))
img = img[:, :, [0,1,2]].copy()
dat = messaging.new_message()
dat.init('frame')
dat.frame = {
"frameId": image.frame,
"image": img.tostring(),
}
pm.send('frame', dat)
def imu_callback(imu):
#print(imu, imu.accelerometer)
dat = messaging.new_message()
dat.init('sensorEvents', 2)
dat.sensorEvents[0].sensor = 4
dat.sensorEvents[0].type = 0x10
dat.sensorEvents[0].init('acceleration')
dat.sensorEvents[0].acceleration.v = [imu.accelerometer.x, imu.accelerometer.y, imu.accelerometer.z]
# copied these numbers from locationd
dat.sensorEvents[1].sensor = 5
dat.sensorEvents[1].type = 0x10
dat.sensorEvents[1].init('gyroUncalibrated')
dat.sensorEvents[1].gyroUncalibrated.v = [imu.gyroscope.x, imu.gyroscope.y, imu.gyroscope.z]
pm.send('sensorEvents', dat)
def health_function():
pm = messaging.PubMaster(['health'])
rk = Ratekeeper(1.0)
while 1:
dat = messaging.new_message()
dat.init('health')
dat.valid = True
dat.health = {
'ignitionLine': True,
'hwType': "whitePanda",
'controlsAllowed': True
}
pm.send('health', dat)
rk.keep_time()
def fake_driver_monitoring():
pm = messaging.PubMaster(['driverState'])
while 1:
dat = messaging.new_message()
dat.init('driverState')
dat.driverState.faceProb = 1.0
pm.send('driverState', dat)
time.sleep(0.1)
def go():
import carla
client = carla.Client("127.0.0.1", 2000)
client.set_timeout(5.0)
world = client.load_world('Town03')
settings = world.get_settings()
settings.fixed_delta_seconds = 0.05
world.apply_settings(settings)
weather = carla.WeatherParameters(
cloudyness=0.0,
precipitation=0.0,
precipitation_deposits=0.0,
wind_intensity=0.0,
sun_azimuth_angle=0.0,
sun_altitude_angle=0.0)
world.set_weather(weather)
blueprint_library = world.get_blueprint_library()
"""
for blueprint in blueprint_library.filter('sensor.*'):
print(blueprint.id)
exit(0)
"""
world_map = world.get_map()
vehicle_bp = random.choice(blueprint_library.filter('vehicle.bmw.*'))
vehicle = world.spawn_actor(vehicle_bp, random.choice(world_map.get_spawn_points()))
#vehicle.set_autopilot(True)
blueprint = blueprint_library.find('sensor.camera.rgb')
blueprint.set_attribute('image_size_x', str(W))
blueprint.set_attribute('image_size_y', str(H))
blueprint.set_attribute('fov', '70')
blueprint.set_attribute('sensor_tick', '0.05')
transform = carla.Transform(carla.Location(x=0.8, z=1.45))
camera = world.spawn_actor(blueprint, transform, attach_to=vehicle)
camera.listen(cam_callback)
# TODO: wait for carla 0.9.7
imu_bp = blueprint_library.find('sensor.other.imu')
imu = world.spawn_actor(imu_bp, transform, attach_to=vehicle)
imu.listen(imu_callback)
def destroy():
print("clean exit")
imu.destroy()
camera.destroy()
vehicle.destroy()
print("done")
atexit.register(destroy)
# can loop
sendcan = messaging.sub_sock('sendcan')
rk = Ratekeeper(100)
steer_angle = 0
while 1:
vel = vehicle.get_velocity()
speed = math.sqrt(vel.x**2 + vel.y**2 + vel.z**2)
can_function(pm, speed, steer_angle, rk.frame, rk.frame%500 == 499)
if rk.frame%5 == 0:
throttle, brake, steer = sendcan_function(sendcan)
steer_angle += steer/10000.0 # torque
vc = carla.VehicleControl(throttle=throttle, steer=steer_angle, brake=brake)
vehicle.apply_control(vc)
print(speed, steer_angle, vc)
rk.keep_time()
if __name__ == "__main__":
params = Params()
params.delete("Offroad_ConnectivityNeeded")
from selfdrive.version import terms_version, training_version
params.put("HasAcceptedTerms", terms_version)
params.put("CompletedTrainingVersion", training_version)
threading.Thread(target=health_function).start()
threading.Thread(target=fake_driver_monitoring).start()
# no carla, still run
try:
import carla
except ImportError:
print("WARNING: NO CARLA")
while 1:
time.sleep(1)
go()
|
temporary.py | """Temporary worker module."""
import logging
import os
import threading
import time
from typing import Optional
from celery import Celery
from celery.utils.nodenames import default_nodename
logger = logging.getLogger(__name__)
class TemporaryWorker:
"""Temporary worker that automatically shuts down when queue is empty."""
def __init__( # pylint: disable=too-many-arguments
self,
app: Celery,
timeout: int = 60,
concurrency: Optional[int] = None,
loglevel: Optional[str] = None,
task_events: bool = True,
):
"""Construct a worker.
Arguments:
app: Celery application instance.
timeout: Queue timeout in seconds. Worker will be terminated if the
queue remains empty after timeout.
concurrency: Worker concurrency.
loglevel: Worker loglevel.
task_events: Enable worker task event monitoring.
"""
self.app = app
self.timeout = timeout
self.concurrency = concurrency
self.loglevel = loglevel or "info"
self.task_events = task_events
def start(self, name: str) -> None:
"""Start the worker if it does not already exist.
Runs the Celery worker main thread in the current process.
Arguments:
name: Celery worker name.
"""
if os.name == "nt":
# see https://github.com/celery/billiard/issues/247
os.environ["FORKED_BY_MULTIPROCESSING"] = "1"
if not self.app.control.ping(destination=[name]):
monitor = threading.Thread(
target=self.monitor, daemon=True, args=(name,)
)
monitor.start()
argv = [
"worker",
f"--loglevel={self.loglevel}",
f"--hostname={name}",
]
if self.concurrency:
argv.append(f"--concurrency={self.concurrency}")
if self.task_events:
argv.append("-E")
self.app.worker_main(argv=argv)
def monitor(self, name: str) -> None:
"""Monitor the worker and stop it when the queue is empty."""
logger.debug("monitor: waiting for worker to start")
nodename = default_nodename(name)
while not self.app.control.ping(destination=[nodename]):
# wait for worker to start
time.sleep(1)
def _tasksets(nodes):
for taskset in (
nodes.active(),
nodes.scheduled(),
nodes.reserved(),
):
if taskset is not None:
yield from taskset.values()
logger.info("monitor: watching celery worker '%s'", nodename)
while self.app.control.ping(destination=[nodename]):
time.sleep(self.timeout)
nodes = self.app.control.inspect( # type: ignore[call-arg]
destination=[nodename]
)
if nodes is None or not any(tasks for tasks in _tasksets(nodes)):
logger.info("monitor: shutting down due to empty queue.")
self.app.control.shutdown(destination=[nodename])
break
logger.info("monitor: done")
|
PC_Miner.py | #!/usr/bin/env python3
##########################################
# Duino-Coin Python PC Miner (v2.5.1)
# https://github.com/revoxhere/duino-coin
# Distributed under MIT license
# © Duino-Coin Community 2019-2021
##########################################
# Import libraries
import sys
from configparser import ConfigParser
from datetime import datetime
from hashlib import sha1
from json import load as jsonload
from locale import LC_ALL, getdefaultlocale, getlocale, setlocale
from os import _exit, execl, mkdir, remove
from os import name as osname
from platform import machine as osprocessor
from os import path, system
from pathlib import Path
from platform import system as plsystem
from re import sub
from signal import SIGINT, signal
from socket import socket
from statistics import mean
from subprocess import DEVNULL, Popen, check_call
from threading import Thread as thrThread
from time import ctime, sleep, strptime, time
from multiprocessing import Lock
import pip
from zipfile import ZipFile
thread_lock = Lock()
def install(package):
try:
pip.main(["install", package])
except AttributeError:
check_call([sys.executable, '-m', 'pip', 'install', package])
execl(sys.executable, sys.executable, *sys.argv)
def now():
# Return datetime object
return datetime.now()
try:
# Check if cpuinfo is installed
import cpuinfo
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Cpuinfo is not installed. "
+ "Miner will try to install it. "
+ "If it fails, please manually install \"py-cpuinfo\"."
+ "\nIf you can\'t install it, use the Minimal-PC_Miner.")
install("py-cpuinfo")
try:
# Check if colorama is installed
from colorama import Back, Fore, Style, init
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Colorama is not installed. "
+ "Miner will try to install it. "
+ "If it fails, please manually install \"colorama\"."
+ "\nIf you can\'t install it, use the Minimal-PC_Miner.")
install("colorama")
try:
# Check if requests is installed
import requests
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Requests is not installed. "
+ "Miner will try to install it. "
+ "If it fails, please manually install \"requests\"."
+ "\nIf you can\'t install it, use the Minimal-PC_Miner.")
install("requests")
try:
# Check if pypresence is installed
from pypresence import Presence
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Pypresence is not installed. "
+ "Miner will try to install it. "
+ "If it fails, please manually install \"pypresence\"."
+ "\nIf you can\'t install it, use the Minimal-PC_Miner.")
install("pypresence")
try:
# Check if xxhash is installed
import xxhash
xxhash_enabled = True
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Xxhash is not installed - "
+ "Xxhash support will be disabled")
xxhash_enabled = False
# Global variables
MINER_VER = "2.51" # Version number
SOC_TIMEOUT = 60 # Socket timeout
RESOURCES_DIR = "PCMiner_" + str(MINER_VER) + "_resources"
donatorrunning = False
debug = "n"
discord_presence = "y"
auto_update = "y"
rig_identiier = "None"
requested_diff = "NET"
algorithm = "DUCO-S1"
server_ip_file = ("https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duino-coin/gh-pages/"
+ "serverip.txt") # Serverip file
config = ConfigParser()
donation_level = 0
thread = []
totalhashrate_mean = []
start_time = time()
# Create resources folder if it doesn't exist
if not path.exists(RESOURCES_DIR):
mkdir(RESOURCES_DIR)
# Check if languages file exists
if not Path(RESOURCES_DIR + "/langs.json").is_file():
url = ("https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duino-coin/master/Resources/"
+ "PC_Miner_langs.json")
r = requests.get(url)
with open(RESOURCES_DIR + "/langs.json", "wb") as f:
f.write(r.content)
# Load language file
with open(RESOURCES_DIR + "/langs.json", "r", encoding="utf8") as lang_file:
lang_file = jsonload(lang_file)
# OS X invalid locale hack
if plsystem() == "Darwin":
if getlocale()[0] is None:
setlocale(LC_ALL, "en_US.UTF-8")
# Check if miner is configured, if it isn't, autodetect language
try:
if not Path(RESOURCES_DIR + "/Miner_config.cfg").is_file():
locale = getdefaultlocale()[0]
if locale.startswith("es"):
lang = "spanish"
elif locale.startswith("pl"):
lang = "polish"
elif locale.startswith("fr"):
lang = "french"
elif locale.startswith("mt"):
lang = "maltese"
elif locale.startswith("ru"):
lang = "russian"
elif locale.startswith("de"):
lang = "german"
elif locale.startswith("tr"):
lang = "turkish"
elif locale.startswith("pr"):
lang = "portugese"
elif locale.startswith("it"):
lang = "italian"
elif locale.startswith("zh"):
lang = "chinese_simplified"
else:
lang = "english"
else:
# Read language variable from configfile
try:
config.read(RESOURCES_DIR + "/Miner_config.cfg")
lang = config["Duino-Coin-PC-Miner"]["language"]
except Exception:
# If it fails, fallback to english
lang = "english"
except:
lang = "english"
def getString(string_name):
# Get string form language file
if string_name in lang_file[lang]:
return lang_file[lang][string_name]
elif string_name in lang_file["english"]:
return lang_file["english"][string_name]
else:
return "String not found: " + string_name
def debug_output(text):
# Debug output
if debug == "y":
print(now().strftime(Style.DIM + "%H:%M:%S.%f ") + "DEBUG: " + text)
def title(title):
# Set window title
if osname == "nt":
# Windows systems
system("title " + title)
else:
# Most standard terminals
print("\33]0;" + title + "\a", end="")
sys.stdout.flush()
def handler(signal_received, frame):
# SIGINT handler
if current_process().name == "MainProcess":
prettyPrint(
"sys0",
getString("sigint_detected")
+ Style.NORMAL
+ Fore.RESET
+ getString("goodbye"),
"warning")
try:
# Close previous socket connection (if any)
socket.close()
except Exception:
pass
_exit(0)
def calculate_uptime(start_time):
uptime = time() - start_time
if uptime <= 59:
return round(uptime), "seconds"
elif uptime >= 60:
return round(uptime // 60), "minutes"
elif uptime >= 3600:
return round(uptime // 3600), "hours"
# Enable signal handler
signal(SIGINT, handler)
def Greeting():
# Greeting message
global greeting
print(Style.RESET_ALL)
if requested_diff == "LOW":
diffName = getString("low_diff_short")
elif requested_diff == "MEDIUM":
diffName = getString("medium_diff_short")
else:
diffName = getString("net_diff_short")
current_hour = strptime(ctime(time())).tm_hour
if current_hour < 12:
greeting = getString("greeting_morning")
elif current_hour == 12:
greeting = getString("greeting_noon")
elif current_hour > 12 and current_hour < 18:
greeting = getString("greeting_afternoon")
elif current_hour >= 18:
greeting = getString("greeting_evening")
else:
greeting = getString("greeting_back")
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Fore.YELLOW
+ Style.BRIGHT
+ getString("banner")
+ Style.RESET_ALL
+ Fore.MAGENTA
+ " (v"
+ str(MINER_VER)
+ ") "
+ Fore.RESET
+ "2019-2021")
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.YELLOW
+ "https://github.com/revoxhere/duino-coin")
if lang != "english":
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ lang.capitalize()
+ " translation: "
+ Fore.YELLOW
+ getString("translation_autor"))
try:
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ "CPU: "
+ Style.BRIGHT
+ Fore.YELLOW
+ str(threadcount)
+ "x "
+ str(cpu["brand_raw"]))
except Exception as e:
debug_output("Error displaying CPU message: " + str(e))
if osname == "nt" or osname == "posix":
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ getString("donation_level")
+ Style.BRIGHT
+ Fore.YELLOW
+ str(donation_level))
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ getString("algorithm")
+ Style.BRIGHT
+ Fore.YELLOW
+ algorithm
+ " @ "
+ diffName)
if rig_identiier != "None":
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ getString("rig_identifier")
+ Style.BRIGHT
+ Fore.YELLOW
+ rig_identiier)
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ str(greeting)
+ ", "
+ Style.BRIGHT
+ Fore.YELLOW
+ str(username)
+ "!\n")
if int(donation_level) > 0:
if osname == "nt":
if not Path(RESOURCES_DIR + "/Donate_executable.exe").is_file():
url = ("https://github.com/revoxhere/"
+ "duino-coin/blob/useful-tools/Donate_executables/"
+ "DonateExecutableWindows.exe?raw=true")
r = requests.get(url)
with open(RESOURCES_DIR + "/Donate_executable.exe", "wb") as f:
f.write(r.content)
elif osname == "posix":
if osprocessor() == "aarch64":
url = ("https://github.com/revoxhere/"
+ "duino-coin/blob/useful-tools/Donate_executables/"
+ "DonateExecutableAARCH64?raw=true")
elif osprocessor() == "armv7l":
url = ("https://github.com/revoxhere/"
+ "duino-coin/blob/useful-tools/Donate_executables/"
+ "DonateExecutableAARCH32?raw=true")
else:
url = ("https://github.com/revoxhere/"
+ "duino-coin/blob/useful-tools/Donate_executables/"
+ "DonateExecutableLinux?raw=true")
if not Path(RESOURCES_DIR + "/Donate_executable").is_file():
r = requests.get(url)
with open(RESOURCES_DIR + "/Donate_executable", "wb") as f:
f.write(r.content)
def loadConfig():
# Config loading section
global username
global efficiency
global donation_level
global debug
global threadcount
global requested_diff
global rig_identiier
global lang
global algorithm
global auto_update
global discord_presence
# Initial configuration
if not Path(RESOURCES_DIR + "/Miner_config.cfg").is_file():
print(
Style.BRIGHT
+ getString("basic_config_tool")
+ RESOURCES_DIR
+ getString("edit_config_file_warning"))
print(
Style.RESET_ALL
+ getString("dont_have_account")
+ Fore.YELLOW
+ getString("wallet")
+ Fore.RESET
+ getString("register_warning"))
username = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_username")
+ Fore.RESET
+ Style.BRIGHT)
if xxhash_enabled:
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "1"
+ Style.NORMAL
+ " - DUCO-S1 ("
+ getString("recommended")
+ ")")
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "2"
+ Style.NORMAL
+ " - XXHASH")
algorithm = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_algorithm")
+ Fore.RESET
+ Style.BRIGHT)
else:
algorithm = "1"
efficiency = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_intensity")
+ Fore.RESET
+ Style.BRIGHT)
threadcount = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_threads")
+ str(cpu_count())
+ "): "
+ Fore.RESET
+ Style.BRIGHT)
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "1"
+ Style.NORMAL
+ " - "
+ getString("low_diff"))
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "2"
+ Style.NORMAL
+ " - "
+ getString("medium_diff"))
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "3"
+ Style.NORMAL
+ " - "
+ getString("net_diff"))
requested_diff = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_difficulty")
+ Fore.RESET
+ Style.BRIGHT)
rig_identiier = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_rig_identifier")
+ Fore.RESET
+ Style.BRIGHT)
if rig_identiier == "y" or rig_identiier == "Y":
rig_identiier = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_rig_name")
+ Fore.RESET
+ Style.BRIGHT)
else:
rig_identiier = "None"
donation_level = "0"
if osname == "nt" or osname == "posix":
donation_level = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_donation_level")
+ Fore.RESET
+ Style.BRIGHT)
# Check wheter efficiency is correct
efficiency = sub(r"\D", "", efficiency)
if efficiency == "":
efficiency = 95
elif float(efficiency) > int(100):
efficiency = 100
elif float(efficiency) < int(1):
efficiency = 1
# Check wheter threadcount is correct
threadcount = sub(r"\D", "", threadcount)
if threadcount == "":
threadcount = cpu_count()
elif int(threadcount) > int(8):
threadcount = 8
print(
Style.RESET_ALL
+ Style.BRIGHT
+ getString("max_threads_notice"))
elif int(threadcount) < int(1):
threadcount = 1
# Check wheter algo setting is correct
if algorithm == "2":
algorithm = "XXHASH"
else:
algorithm = "DUCO-S1"
# Check wheter diff setting is correct
if requested_diff == "1":
requested_diff = "LOW"
elif requested_diff == "2":
requested_diff = "MEDIUM"
else:
requested_diff = "NET"
# Check wheter donation_level is correct
donation_level = sub(r"\D", "", donation_level)
if donation_level == "":
donation_level = 1
elif float(donation_level) > int(5):
donation_level = 5
elif float(donation_level) < int(0):
donation_level = 0
# Format data
config["Duino-Coin-PC-Miner"] = {
"username": username,
"efficiency": efficiency,
"threads": threadcount,
"requested_diff": requested_diff,
"donate": donation_level,
"identifier": rig_identiier,
"algorithm": algorithm,
"language": lang,
"debug": "n",
"soc_timeout": 60,
"discord_presence": "y",
"auto_update": "y"
}
# Write data to configfile
with open(RESOURCES_DIR + "/Miner_config.cfg", "w") as configfile:
config.write(configfile)
# Calulate efficiency for later use with sleep function
efficiency = (100 - float(efficiency)) * 0.01
print(Style.RESET_ALL + getString("config_saved"))
else:
# If config already exists, load data from it
config.read(RESOURCES_DIR + "/Miner_config.cfg")
username = config["Duino-Coin-PC-Miner"]["username"]
efficiency = config["Duino-Coin-PC-Miner"]["efficiency"]
threadcount = config["Duino-Coin-PC-Miner"]["threads"]
requested_diff = config["Duino-Coin-PC-Miner"]["requested_diff"]
donation_level = config["Duino-Coin-PC-Miner"]["donate"]
algorithm = config["Duino-Coin-PC-Miner"]["algorithm"]
rig_identiier = config["Duino-Coin-PC-Miner"]["identifier"]
debug = config["Duino-Coin-PC-Miner"]["debug"]
SOC_TIMEOUT = config["Duino-Coin-PC-Miner"]["soc_timeout"]
discord_presence = config["Duino-Coin-PC-Miner"]["discord_presence"]
auto_update = config["Duino-Coin-PC-Miner"]["auto_update"]
# Calulate efficiency for use with sleep function
efficiency = (100 - float(efficiency)) * 0.01
def Donate():
global donation_level
global donatorrunning
global donateExecutable
if osname == "nt":
cmd = (
"cd "
+ RESOURCES_DIR
+ "& Donate_executable.exe "
+ "-o stratum+tcp://xmg.minerclaim.net:3333 "
+ "-u revox.donate "
+ "-p x -s 4 -e ")
elif osname == "posix":
cmd = (
"cd "
+ RESOURCES_DIR
+ "&& chmod +x Donate_executable "
+ "&& ./Donate_executable "
+ "-o stratum+tcp://xmg.minerclaim.net:3333 "
+ "-u revox.donate "
+ "-p x -s 4 -e ")
if int(donation_level) <= 0:
prettyPrint(
"sys0",
Fore.YELLOW
+ getString("free_network_warning")
+ getString("donate_warning")
+ Fore.GREEN
+ "https://duinocoin.com/donate"
+ Fore.YELLOW
+ getString("learn_more_donate"),
"warning")
sleep(10)
elif donatorrunning == False:
if int(donation_level) == 5:
cmd += "80"
elif int(donation_level) == 4:
cmd += "60"
elif int(donation_level) == 3:
cmd += "40"
elif int(donation_level) == 2:
cmd += "20"
elif int(donation_level) == 1:
cmd += "10"
if int(donation_level) > 0:
debug_output(getString("starting_donation"))
donatorrunning = True
# Launch CMD as subprocess
donateExecutable = Popen(
cmd, shell=True, stderr=DEVNULL)
prettyPrint(
"sys0",
getString("thanks_donation"),
"warning")
def update():
Miner_URL = "https://raw.githubusercontent.com/revoxhere/duino-coin/master/PC_Miner.py"
request = requests.get(Miner_URL)
miner_latest_ver = ""
if request.text[102] != ")":
if request.text[102] == ".":
miner_latest_ver = request.text[99]+request.text[100]+request.text[101]+request.text[102]+request.text[103]
else:
miner_latest_ver = request.text[99]+request.text[100]+request.text[101]+request.text[102]
else:
miner_latest_ver = request.text[99]+request.text[100]+request.text[101]
if not Path("PC_Miner.py").is_file():
if Path("PC_Miner.exe").is_file():
updateEXE(miner_latest_ver)
else:
return
if MINER_VER != miner_latest_ver:
print("Updating miner...")
with open("PC_Miner.py", "wb") as f1:
f1.write(request.content)
print("PC miner successfully updated.")
_exit(0)
def updateEXE(ver):
if Path("PC_Miner.exe").is_file():
if ver[3] == ".":
ver2 = ver[0]+"."+ver[2]+ver[4] # converts 2.5.1 to 2.51 if needed
else:
ver2 = ver
if ver2 == MINER_VER:
return
newest_release_windows = ("https://github.com/"
+ "revoxhere/"
+ "duino-coin/releases/download/"+ver+"/Duino-Coin_"+ver+"_windows.zip")
request2 = requests.get(newest_release_windows)
if request2.text == "Not Found":
return
with open('duino-zip.zip', 'wb') as f2:
f2.write(request2.content) # download zip file
remove("PC_Miner.exe")
with ZipFile('duino-zip.zip', 'r') as zip_file:
zip_file.extract('PC_Miner.exe') # extract PC miner from zip file
remove("duino-zip.zip")
print("Miner successfully updated.")
_exit(0)
else:
return
def ducos1(
lastBlockHash,
expectedHash,
difficulty,
efficiency):
# DUCO-S1 algorithm
# Measure starting time
timeStart = time()
base_hash = sha1(str(lastBlockHash).encode('ascii'))
temp_hash = None
# Loop from 1 too 100*diff
for ducos1res in range(100 * int(difficulty) + 1):
# If efficiency lower than 100% sleep to use less CPU
if ducos1res % 1000000 == 0 and float(100 - efficiency * 100) < 100:
sleep(float(efficiency))
# Generate hash
temp_hash = base_hash.copy()
temp_hash.update(str(ducos1res).encode('ascii'))
ducos1 = temp_hash.hexdigest()
# Check if result was found
if ducos1 == expectedHash:
# Measure finish time
timeStop = time()
# Calculate hashrate
timeDelta = timeStop - timeStart
hashrate = ducos1res / timeDelta
return [ducos1res, hashrate]
def ducos1xxh(
lastBlockHash,
expectedHash,
difficulty,
efficiency):
# XXHASH algorithm
# Measure starting time
timeStart = time()
# Loop from 1 too 100*diff
for ducos1xxres in range(100 * int(difficulty) + 1):
# If efficiency lower than 100% sleep to use less CPU
if ducos1xxres % 1000000 == 0 and float(100 - efficiency * 100) < 100:
sleep(float(efficiency))
# Generate hash
ducos1xx = xxhash.xxh64(
str(lastBlockHash) + str(ducos1xxres), seed=2811)
ducos1xx = ducos1xx.hexdigest()
# Check if result was found
if ducos1xx == expectedHash:
# Measure finish time
timeStop = time()
# Calculate hashrate
timeDelta = timeStop - timeStart
hashrate = ducos1xxres / timeDelta
return [ducos1xxres, hashrate]
def Thread(
threadid,
accepted,
rejected,
requested_diff,
khashcount,
username,
efficiency,
rig_identiier,
algorithm,
hashrates_list,
totalhashrate_mean):
# Mining section for every thread
while True:
# Grab server IP and port
while True:
try:
# Use request to grab data from raw github file
res = requests.get(server_ip_file, data=None)
if res.status_code == 200:
# Read content and split into lines
content = (res.content.decode().splitlines())
# Line 1 = IP
masterServer_address = content[0]
# Line 2 = port
masterServer_port = 2813 # content[1]
debug_output(
"Retrieved pool IP: "
+ masterServer_address
+ ":"
+ str(masterServer_port))
break
except Exception as e:
# If there was an error with grabbing data from GitHub
prettyPrint(
"net"
+ str(threadid),
getString("data_error")
+ Style.NORMAL
+ Fore.RESET
+ " (git err: "
+ str(e)
+ ")",
"error")
debug_output("GitHub error: " + str(e))
sleep(10)
# Connect to the server
while True:
try:
soc = socket()
# Establish socket connection to the server
soc.connect((str(masterServer_address),
int(masterServer_port)))
soc.settimeout(SOC_TIMEOUT)
serverVersion = soc.recv(3).decode().rstrip(
"\n") # Get server version
debug_output("Server version: " + serverVersion)
if threadid == 0:
soc.send(bytes("MOTD", encoding="utf8"))
motd = soc.recv(1024).decode().rstrip("\n")
prettyPrint("net" + str(threadid),
" Server message: " + motd,
"warning")
if float(serverVersion) <= float(MINER_VER):
# If miner is up-to-date, display a message and continue
prettyPrint(
"net"
+ str(threadid),
getString("connected")
+ Fore.RESET
+ Style.NORMAL
+ getString("connected_server")
+ str(serverVersion)
+ ")",
"success")
break
else:
# Miner is outdated
prettyPrint(
"sys"
+ str(threadid),
getString("outdated_miner")
+ MINER_VER
+ ") -"
+ getString("server_is_on_version")
+ serverVersion
+ Style.NORMAL
+ Fore.RESET
+ getString("update_warning"),
"warning")
update()
break
except Exception as e:
# Socket connection error
prettyPrint(
"net"
+ str(threadid),
getString("connecting_error")
+ Style.NORMAL
+ Fore.RESET
+ " (net err: "
+ str(e)
+ ")",
"error")
debug_output("Connection error: " + str(e))
sleep(10)
if algorithm == "XXHASH":
using_algo = getString("using_algo_xxh")
else:
using_algo = getString("using_algo")
prettyPrint(
"sys"
+ str(threadid),
getString("mining_thread")
+ str(threadid)
+ getString("mining_thread_starting")
+ Style.NORMAL
+ Fore.RESET
+ using_algo
+ Fore.YELLOW
+ str(int(100 - efficiency * 100))
+ "% "
+ getString("efficiency"),
"success")
# Mining section
while True:
try:
while True:
# Ask the server for job
if algorithm == "XXHASH":
soc.sendall(bytes(
"JOBXX,"
+ str(username)
+ ","
+ str(requested_diff),
encoding="utf8"))
else:
soc.sendall(bytes(
"JOB,"
+ str(username)
+ ","
+ str(requested_diff),
encoding="utf8"))
# Retrieve work
job = soc.recv(128).decode().rstrip("\n")
job = job.split(",")
debug_output("Received: " + str(job))
try:
diff = int(job[2])
debug_output(str(threadid) +
"Correct job received")
break
except:
prettyPrint("cpu" + str(threadid),
" Server message: "
+ job[1],
"warning")
sleep(3)
while True:
computetimeStart = time()
if algorithm == "XXHASH":
algo_back_color = Back.CYAN
result = ducos1xxh(job[0], job[1], diff, efficiency)
else:
algo_back_color = Back.YELLOW
result = ducos1(job[0], job[1], diff, efficiency)
computetimeStop = time()
computetime = computetimeStop - computetimeStart
debug_output("Thread "
+ str(threadid)
+ ": result found: "
+ str(result[0]))
# Convert to kH/s
threadhashcount = int(result[1] / 1000)
# Add this thread's hash counter
# to the global hashrate counter
hashrates_list[threadid] = threadhashcount
# Calculate total hashrate of all thrads
sharehashrate = 0
for thread in hashrates_list.keys():
sharehashrate += hashrates_list[thread]
totalhashrate_mean.append(sharehashrate)
# Get average from the last 20 hashrate measurements
totalhashrate = mean(totalhashrate_mean[-20:])
while True:
# Send result of hashing algorithm to the server
soc.sendall(bytes(
str(result[0])
+ ","
+ str(result[1])
+ ","
+ "Official PC Miner ("
+ str(algorithm)
+ ") v"
+ str(MINER_VER)
+ ","
+ str(rig_identiier),
encoding="utf8"))
responsetimetart = now()
feedback = soc.recv(64).decode().rstrip("\n")
responsetimestop = now()
ping = int((responsetimestop - responsetimetart
).microseconds / 1000)
debug_output("Thread "
+ str(threadid)
+ ": Feedback received: "
+ str(feedback)
+ " Ping: "
+ str(ping))
if totalhashrate > 800:
# Format hashcount to MH/s
formattedhashcount = str(
"%03.2f" % round(totalhashrate / 1000, 2)
+ " MH/s")
elif totalhashrate > 100:
# Format for >100 kH/s
formattedhashcount = str(
"%03.0f" % float(totalhashrate)
+ " kH/s")
else:
# Format for small hashrates
formattedhashcount = str(
"%02.1f" % float(totalhashrate)
+ " kH/s")
if (totalhashrate > 1500
and accepted.value % 50 == 0):
prettyPrint("sys0",
" " + getString("max_hashrate_notice"),
"warning")
uptime, uptime_type = calculate_uptime(start_time)
if feedback == "GOOD":
# If result was correct
accepted.value += 1
title(
getString("duco_python_miner")
+ str(MINER_VER)
+ ") - "
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ getString("accepted_shares"))
with thread_lock:
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ algo_back_color
+ Fore.RESET
+ " cpu"
+ str(threadid)
+ " "
+ Back.RESET
+ Fore.GREEN
+ " ✓"
+ getString("accepted")
+ Fore.RESET
+ str(int(accepted.value))
+ "/"
+ str(int(accepted.value + rejected.value))
+ Fore.YELLOW
+ " ("
+ str(int(
(accepted.value
/ (accepted.value + rejected.value)
* 100)))
+ "%)"
+ Style.NORMAL
+ Fore.RESET
+ " ∙ "
+ str("%05.2f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(formattedhashcount)
+ Fore.RESET
+ Style.NORMAL
+ " @ diff "
+ str(diff)
+ "∙ "
+ Fore.CYAN
+ "ping "
+ str("%02.0f" % int(ping))
+ "ms. ∙"
+ " uptime "
+ str(uptime)
+ " "
+ uptime_type)
elif feedback == "BLOCK":
# If block was found
accepted.value += 1
title(
getString("duco_python_miner")
+ str(MINER_VER)
+ ") - "
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ getString("accepted_shares"))
with thread_lock:
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ algo_back_color
+ Fore.RESET
+ " cpu"
+ str(threadid)
+ " "
+ Back.RESET
+ Fore.CYAN
+ " ✓"
+ getString("block_found")
+ Fore.RESET
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ Fore.YELLOW
+ " ("
+ str(int(
(accepted.value
/ (accepted.value + rejected.value)
* 100)))
+ "%)"
+ Style.NORMAL
+ Fore.RESET
+ " ∙ "
+ str("%05.2f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(formattedhashcount)
+ Fore.RESET
+ Style.NORMAL
+ " @ diff "
+ str(diff)
+ " ∙ "
+ Fore.CYAN
+ "ping "
+ str("%02.0f" % int(ping))
+ "ms")
else:
# If result was incorrect
rejected.value += 1
title(
getString("duco_python_miner")
+ str(MINER_VER)
+ ") - "
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ getString("accepted_shares"))
with thread_lock:
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ algo_back_color
+ Fore.RESET
+ " cpu"
+ str(threadid)
+ " "
+ Style.BRIGHT
+ Back.RESET
+ Fore.RED
+ " ✗"
+ getString("rejected")
+ Fore.RESET
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ Fore.YELLOW
+ " ("
+ str(int(
(accepted.value
/ (accepted.value + rejected.value)
* 100)))
+ "%)"
+ Style.NORMAL
+ Fore.RESET
+ " ∙ "
+ str("%05.2f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(formattedhashcount)
+ Fore.RESET
+ Style.NORMAL
+ " @ diff "
+ str(diff)
+ " ∙ "
+ Fore.CYAN
+ "ping "
+ str("%02.0f" % int(ping))
+ "ms")
break
break
except Exception as e:
prettyPrint(
"net"
+ str(threadid),
getString("error_while_mining")
+ Style.NORMAL
+ Fore.RESET
+ " (mining err: "
+ str(e)
+ ")",
"error")
debug_output("Error while mining: " + str(e))
sleep(5)
break
def prettyPrint(messageType, message, state):
# Print output messages in the DUCO "standard"
# Usb/net/sys background
if messageType.startswith("net"):
background = Back.BLUE
elif messageType.startswith("cpu"):
background = Back.YELLOW
if messageType.startswith("sys"):
background = Back.GREEN
# Text color
if state == "success":
color = Fore.GREEN
elif state == "warning":
color = Fore.YELLOW
else:
color = Fore.RED
with thread_lock:
print(Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ background
+ " "
+ messageType
+ " "
+ Back.RESET
+ color
+ Style.BRIGHT
+ message
+ Style.NORMAL
+ Fore.RESET)
def initRichPresence():
# Initialize Discord rich presence
global RPC
try:
RPC = Presence(808045598447632384)
RPC.connect()
debug_output("Discord rich presence initialized")
except Exception as e:
# Discord not launched
debug_output("Error launching Discord RPC thread: " + str(e))
def updateRichPresence():
# Update rich presence status
startTime = int(time())
while True:
try:
# Calculate average total hashrate with prefix
totalhashrate = mean(totalhashrate_mean[-20:])
if totalhashrate > 800:
totalhashrate = str(round(totalhashrate / 1000, 2)) + " MH/s"
else:
totalhashrate = str(round(totalhashrate, 1)) + " kH/s"
RPC.update(
details="Hashrate: " + str(totalhashrate),
start=startTime,
state="Acc. shares: "
+ str(accepted.value)
+ "/"
+ str(rejected.value + accepted.value),
large_image="ducol",
large_text="Duino-Coin, "
+ "a coin that can be mined with almost everything, "
+ "including AVR boards",
buttons=[
{"label": "Learn more",
"url": "https://duinocoin.com"},
{"label": "Discord Server",
"url": "https://discord.gg/k48Ht5y"}])
debug_output("Rich presence updated")
except Exception as e:
# Discord not launched
debug_output("Error launching Discord RPC thread: " + str(e))
sleep(15) # 15 seconds to respect Discord rate limit
if __name__ == "__main__":
from multiprocessing import freeze_support
freeze_support()
# Processor info
cpu = cpuinfo.get_cpu_info()
# Colorama
init(autoreset=True)
title(getString("duco_python_miner") + str(MINER_VER) + ")")
try:
from multiprocessing import (
Manager,
Process,
Value,
cpu_count,
current_process
)
manager = Manager()
# Multiprocessing globals
khashcount = Value("i", 0)
accepted = Value("i", 0)
rejected = Value("i", 0)
hashrates_list = manager.dict()
totalhashrate_mean = manager.list()
except Exception as e:
print(e)
prettyPrint(
"sys0",
" Multiprocessing is not available. "
+ "Please check permissions and/or your python installation. "
+ "Exiting in 10s.",
"error")
sleep(10)
_exit(1)
try:
# Load config file or create new one
loadConfig()
debug_output("Config file loaded")
except Exception as e:
prettyPrint(
"sys0",
getString("load_config_error")
+ RESOURCES_DIR
+ getString("load_config_error_warning")
+ Style.NORMAL
+ Fore.RESET
+ " (config load err: "
+ str(e)
+ ")",
"error")
debug_output("Error reading configfile: " + str(e))
sleep(10)
_exit(1)
if auto_update == "y":
try:
update()
except Exception as e:
debug_output('Error updating miner: ' + str(e))
try:
# Display greeting message
Greeting()
debug_output("Greeting displayed")
except Exception as e:
prettyPrint(
"sys0",
"Error displaying greeting message"
+ Style.NORMAL
+ Fore.RESET
+ " (greeting err: "
+ str(e)
+ ")",
"error")
debug_output("Error displaying greeting message: " + str(e))
try:
# Start donation thread
Donate()
except Exception as e:
debug_output("Error launching donation thread: " + str(e))
try:
for x in range(int(threadcount)):
# Launch duco mining threads
thread.append(x)
thread[x] = Process(
target=Thread,
args=(
x,
accepted,
rejected,
requested_diff,
khashcount,
username,
efficiency,
rig_identiier,
algorithm,
hashrates_list,
totalhashrate_mean))
thread[x].start()
except Exception as e:
prettyPrint(
"sys0",
"Error launching CPU thread(s)"
+ Style.NORMAL
+ Fore.RESET
+ " (cpu launch err: "
+ str(e)
+ ")",
"error")
debug_output("Error launching CPU thead(s): " + str(e))
if discord_presence == "y":
try:
# Discord rich presence threads
initRichPresence()
thrThread(
target=updateRichPresence).start()
except Exception as e:
debug_output("Error launching Discord RPC thead: " + str(e))
|
views.py | from django.shortcuts import render, redirect
from django.shortcuts import render, redirect
from datasets.models import Dataset, Modality, Term
from django.template import RequestContext, Context, loader
from django.http import (
HttpResponse,
HttpResponseNotFound,
HttpResponseForbidden)
from models.models import ArtmModel, Topic, TopicRelated, TopicInTopic, TopTerm
from django.contrib.auth.decorators import login_required, permission_required
import visartm.views as general_views
import traceback
from django.conf import settings
import os
from threading import Thread
from datetime import datetime
import numpy as np
import json
from models.bigartm_config import REGULARIZERS
def models_list(request):
try:
models = (
ArtmModel.objects.filter(
author=request.user).order_by("id") | ArtmModel.objects.filter(
is_public=True))
except BaseException:
models = ArtmModel.objects.filter(is_public=True)
context = {"models": models}
return render(request, 'models/models_list.html', context)
def visual_model(request):
model = ArtmModel.objects.get(id=request.GET['model'])
if model.status != 0:
if model.status == 1 or model.status == 11:
return general_views.wait(
request, model.read_log(), model.creation_time)
if model.status == 2:
error_message = model.error_message.replace('\n', "<br>")
return general_views.message(
request, ("Model is bad. Error occured.<br>%s<br>"
"<a href='/models/delete_model?model=%d'>"
"Delete this model</a><br>"
"<a href='/models/reload_model?model=%d'>"
"Reload this model</a><br>") %
(error_message, model.id, model.id))
if model.status == 3:
return redirect('/models/settings?model_id=%d' % model.id)
if 'matrices' in request.GET:
try:
head = int(request.GET['matrices'])
except BaseException:
head = 10
import pandas as pd
ret = ""
ret += "MATRIX PHI<br>"
ret += pd.read_pickle(os.path.join(model.get_folder(),
"phi"))[0:head].to_html() + "<br>"
phi = model.get_phi()
for i in range(head):
for j in range(phi.shape[1]):
ret += ("%.05f " % phi[i][j])
ret += "<br>"
ret += "<br><br><br>MATRIX THETA<br>"
ret += pd.read_pickle(os.path.join(model.get_folder(),
"theta"))[0:head].to_html()
theta = model.get_theta()
for i in range(min(theta.shape[0], head)):
for j in range(min(theta.shape[1], head)):
ret += ("%.02e " % theta[i][j])
ret += "<br>"
return HttpResponse(ret)
topics_count = model.topics_count.split()
topics = Topic.objects.filter(model=model)
topics_layers = [
{"i": i + 1,
"topics_count": topics_count[i + 1],
"topics": topics.filter(layer=i + 1).order_by("spectrum_index")}
for i in range(0, model.layers_count)
]
template = loader.get_template('models/model.html')
context = Context({'model': model, 'topics_layers': topics_layers})
from algo.arranging.metrics import metrics_list
context["metrics"] = metrics_list
return render(request, 'models/model.html', context)
def model_log(request):
model = ArtmModel.objects.get(id=request.GET['model_id'])
return HttpResponse(model.read_log())
@login_required
def reload_model(request):
try:
model = ArtmModel.objects.get(id=request.GET['model'])
except BaseException:
model = ArtmModel.objects.get(id=request.GET['id'])
if request.user != model.author:
return HttpResponseForbidden("You are not the author")
if model.status == 1:
return general_views.message(request, "Model is locked.")
model.creation_time = datetime.now()
model.status = 1
model.save()
model.prepare_log()
t = Thread(target=ArtmModel.reload_untrusted, args=(model, ), daemon=True)
t.start()
return redirect("/model?model=" + str(model.id))
@login_required
def arrange_topics(request):
model = ArtmModel.objects.get(id=request.GET['model'])
if request.user != model.author:
return HttpResponseForbidden("You are not the author")
if model.status != 0:
return general_views.message(request, "Model is locked.")
model.creation_time = datetime.now()
model.status = 11
model.save()
model.prepare_log()
mode = request.GET['mode']
try:
metric = request.GET['metric']
except BaseException:
metric = "default"
if settings.THREADING:
t = Thread(
target=ArtmModel.arrange_topics,
args=(
model,
mode,
metric,
),
daemon=True)
t.start()
else:
model.arrange_topics(mode, metric)
return redirect("/model?model=" + str(model.id))
@login_required
def reset_visuals(request):
model = ArtmModel.objects.get(id=request.GET['model'])
if request.user != model.author:
return HttpResponseForbidden("You are not the author")
model.reset_visuals()
return general_views.message(
request, "Resetted. <a href ='/model?model=" + str(
model.id) + "'> <br>Return to model</a>.")
@permission_required("models.add_artmmodel")
@login_required
def create_model(request):
if request.method == 'GET':
dataset = Dataset.get_dataset(request)
if not dataset:
return HttpResponceForbidden()
modalities = Modality.objects.filter(dataset=dataset)
scripts = os.listdir(os.path.join(settings.DATA_DIR, "scripts"))
unreg = []
try:
folders = os.listdir(
os.path.join(
settings.DATA_DIR,
"datasets",
dataset.text_id,
"models"))
existing_models = [
model.text_id for model in ArtmModel.objects.filter(
dataset=dataset)]
unreg = [
i for i in folders if (
i not in existing_models) and (
not i[0] == '.')]
except BaseException:
pass
context = Context({'dataset': dataset,
'modalities': modalities,
'scripts': scripts,
'unreg': unreg})
context['regularizers'] = REGULARIZERS
if settings.DEBUG:
context['DEBUG'] = True
return render(request, 'models/create_model.html', context)
dataset = Dataset.get_dataset(request)
if not dataset:
return HttpResponceForbidden()
model = ArtmModel()
model.dataset = dataset
model.name = request.POST['model_name']
model.threshold_hier = int(request.POST['threshold_hier'])
model.threshold_docs = int(request.POST['threshold_docs'])
model.author = request.user
model.creation_time = datetime.now()
model.status = 1
model.save()
# model.prepare_log()
if settings.THREADING:
t = Thread(
target=ArtmModel.create_generic,
args=(
model,
request.POST,
),
daemon=True)
t.start()
else:
model.create_generic(request.POST)
return redirect("/model?model=" + str(model.id))
@login_required
def delete_model(request):
model = ArtmModel.objects.get(id=request.GET['model'])
if request.user != model.author:
return HttpResponseForbidden(
("You are not the author! "
"(<a href='/admin/models/artmmodel/%d/change/'>"
"Delete as admin</a>)") %
model.id)
dataset_name = model.dataset.text_id
if request.user != model.author:
return HttpResponseForbidden("You are not the author of the model.")
if 'sure' in request.GET and request.GET['sure'] == 'yes':
ArtmModel.objects.filter(id=request.GET['model']).delete()
return general_views.message(
request,
"Model was deleted. <a href ='/dataset?dataset=" +
dataset_name +
"'> Return to dataset</a>.")
else:
return general_views.message(
request,
"Are you sure that you want delete model " +
str(model) +
" permanently?<br>" +
"<a href = '/models/delete_model?model=" +
str(
model.id) +
"&sure=yes'>Yes</a><br>" +
"<a href = '/dataset?dataset=" +
dataset_name +
"'>No</a>")
@login_required
def delete_all_models(request):
dataset = Dataset.objects.filter(text_id=request.GET['dataset'])[0]
if request.user != dataset.owner:
return HttpResponseForbidden("You are not the owner of the dataset.")
if 'sure' in request.GET and request.GET['sure'] == 'yes':
ArtmModel.objects.filter(dataset=dataset).delete()
return general_views.message(
request,
"All models were deleted. <a href ='/dataset?dataset=" +
dataset.text_id +
"'>Return to dataset</a>.")
else:
return general_views.message(
request,
"Are you sure that you want delete ALL models for dataset " +
str(dataset) +
" permanently?<br>" +
"<a href = '/models/delete_all_models?dataset=" +
dataset.text_id +
"&sure=yes'>Yes</a><br>" +
"<a href = '/dataset?dataset=" +
dataset.text_id +
"'>No</a>")
def visual_topic(request):
topic = Topic.objects.filter(id=request.GET['id'])[0]
model = topic.model
related_topics = TopicRelated.objects.filter(
model=topic.model, topic1=topic).order_by("weight")
context = {'topic': topic, 'related_topics': related_topics}
if 'mode' in request.GET and request.GET['mode'] == 'phi_column':
terms = Term.objects.filter(dataset=model.dataset).order_by('index_id')
mod_index = dict()
for modality in Modality.objects.filter(dataset=model.dataset):
mod_index[modality.id] = modality.name
ans = ""
phi = model.get_phi()
for term in terms:
ans += "%s %s %f<br>" % (term.text,
mod_index[term.modality_id],
phi[term.index_id,
topic.matrix_id])
return HttpResponse(ans)
if 'mode' in request.GET:
mode = request.GET['mode']
else:
if (topic.layer == model.layers_count):
mode = 'documents'
else:
mode = 'topics'
if mode == 'topterms':
context['modalities'] = Modality.objects.filter(dataset=model.dataset)
top_terms = TopTerm.objects.filter(topic=topic)
if 'modality' in request.GET and request.GET['modality'] != 'all':
modality = Modality.objects.get(
dataset=model.dataset, name=request.GET["modality"])
top_terms = top_terms.filter(term__modality=modality)
top_terms = top_terms.order_by("-weight_normed")
context['top_terms'] = top_terms
elif mode == 'topics':
context['topics'] = TopicInTopic.objects.filter(parent=topic)
elif mode == 'documents':
context['documents'] = True
context['low_level'] = (topic.layer == model.layers_count)
context['mode'] = mode
return render(request, 'models/topic.html', Context(context))
def dump_model(request):
model = ArtmModel.get_model(request)
import zipfile
import io
outfile = io.BytesIO()
folder = model.get_folder()
with zipfile.ZipFile(outfile, 'w') as zf:
files = ["theta"]
if model.dataset.modalities_count == 1:
files.append("phi")
else:
for modality in Modality.objects.filter(dataset=model.dataset):
files.append("phi_" + modality.name)
files += [("psi%d" % i) for i in range(1, model.layers_count)]
for file_name in files:
zf.write(os.path.join(folder, file_name), file_name)
zipped_file = outfile.getvalue()
response = HttpResponse(
zipped_file,
content_type='application/octet-stream')
response['Content-Disposition'] = 'attachment; filename=%s_%s.zip' % (
str(model.dataset), str(model))
return response
def related_topics(request):
topic = Topic.objects.get(id=request.GET["topic_id"])
model = topic.model
import algo.arranging.metrics as metrics
if "metric" in request.GET:
metric = request.GET["metric"]
else:
metric = metrics.default_metric
context = {
"topic": topic,
"metrics": metrics.metrics_list,
"metric": metric}
context["topics"] = model.get_related_topics(topic, metric=metric)
return render(request, 'models/related_topics.html', Context(context))
@login_required
def rename_topic(request):
topic = Topic.objects.get(id=request.POST['id'])
if request.user != topic.model.author:
return HttpResponseForbidden("You are not the author of the model.")
topic.rename(request.POST['new_title'])
return redirect("/topic?id=" + request.POST['id'])
@login_required
def model_settings(request):
if request.method == 'POST':
action = request.POST['action']
model = ArtmModel.get_model(request)
if request.user != model.author:
return HttpResponseForbidden("You are not the author.")
if action == 'parameters':
new_threshold_docs = int(request.POST['threshold_docs'])
new_threshold_hier = int(request.POST['threshold_hier'])
new_max_parents_hier = int(request.POST['max_parents_hier'])
if new_threshold_docs != model.threshold_docs:
model.threshold_docs = new_threshold_docs
model.save()
model.extract_docs()
if (new_threshold_hier != model.threshold_hier or
new_max_parents_hier != model.max_parents_hier):
model.threshold_hier = new_threshold_hier
model.max_parents_hier = new_max_parents_hier
model.save()
model.build_hier()
model.reset_visuals()
elif action == 'topic_naming':
model.topic_naming_top_words = int(
request.POST["topic_naming_top_words"])
model.save()
elif action == 'matrices':
model.log("Archive uploaded.")
archive = request.FILES['archive']
from tools.views import get_temp_folder
import zipfile
with get_temp_folder() as folder:
zip_file_name = os.path.join(folder, "a.zip")
with open(zip_file_name, 'wb+') as f:
for chunk in archive.chunks():
f.write(chunk)
zip_ref = zipfile.ZipFile(zip_file_name, 'r')
zip_ref.extractall(model.get_folder())
zip_ref.close()
model.log("Archive unpacked.")
return redirect('/models/reload_model?model=' + str(model.id))
return redirect('/model?model=' + str(model.id))
model = ArtmModel.objects.get(id=request.GET['model_id'])
if request.user != model.author:
return HttpResponseForbidden("You are not the author.")
context = {'model': model}
return render(request, 'models/model_settings.html', Context(context))
def delete_cached_distances(request):
model = ArtmModel.get_model(request, modify=True)
model.delete_cached_distances()
return redirect("/model?model=%d" % model.id)
|
photobooth_test.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os, os.path
import sys
import time
import signal
import traceback
import logging
from logging import handlers
import argparse
import gzip
import gphoto2 as gp
import serial
import threading
import SimpleHTTPServer
import SocketServer
# Project-related imports
import photobooth_serial as pb_serial
import photobooth_camera as pb_camera
import photobooth_image as pb_image
#####################################
# FUNCTIONS
#####################################
# Manage Ctrl+C gracefully
def signal_handler_quit(signal, frame):
logging.info("Shutting down requested")
webserver.shutdown()
logging.debug("Webserver shutdown")
gp.check_result(gp.gp_camera_exit(camera))
logging.info("Releasing camera")
logging.critical("Photob00th shutdown.\n")
sys.exit(0)
# Class to create the WebServer and to log the requests in a separate file
class CustomHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def end_headers(self):
self.send_header("Access-Control-Allow-Origin", "*")
SimpleHTTPServer.SimpleHTTPRequestHandler.end_headers(self)
# Logs in a separated file
buffer = 1
logfile = "webserver.log"
log_file = open(logfile, 'w', buffer)
logging.info('Web Server logs are in %s' %logfile )
def log_message(self, format, *args):
self.log_file.write("%s - [%s] : %s\n" %(
self.client_address[0],self.log_date_time_string(),format%args))
#################################
# LOGGING
#################################
# Convert verbose count in loggin level for the loggin module
def logging_level(verbosity):
levels = [
logging.CRITICAL, # 50
logging.ERROR, # 40
logging.WARNING, # 30
logging.INFO, # 20
logging.DEBUG # 10
]
return levels[max(min(len(levels) - 1, verbosity), 0)]
def configureLogging(numeric_level, logfile, console):
## VARIABLES
format_entry = '%(asctime)s.%(msecs)03d | %(module)-10s [%(levelname)-8s] %(message)s'
format_date = '%Y-%m-%d %H:%M:%S'
formatter = logging.Formatter(format_entry, format_date)
log_lvl = logging_level(numeric_level)
# Configure root logger to a file
logging.root.handlers = []
logging.basicConfig(format='%(asctime)s.%(msecs)03d | %(module)-10s [%(levelname)-8s] %(message)s',\
level=logging.DEBUG ,\
filename='test_debug.log')
if console :
# Logging to sys.stderr
consolehandler = logging.StreamHandler()
consolehandler.setLevel(log_lvl)
consolehandler.setFormatter(formatter)
logging.getLogger("").addHandler(consolehandler)
# Logging to file is provided
if logfile :
# Use rotating files : 1 per day, and all are kept (no rotation thus)
filehandler = handlers.TimedRotatingFileHandler(logfile, when='d', interval=1, backupCount=0)
filehandler.suffix = "%Y-%m-%d"
# We can set here different log formats for the stderr output !
filehandler.setLevel(logging.DEBUG)
# use the same format as the file
filehandler.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger("").addHandler(filehandler)
logging.info("Logging level set to %s", logging_level(numeric_level))
# Have Gphot2 lgos in python logging module
gp.use_python_logging()
logging.getLogger('gphoto2').setLevel(logging.DEBUG)
return logging
#################################
# Program options
#################################
def createParser():
parser = argparse.ArgumentParser(epilog="If you have any questions : https://github.com/3isenHeiM/Photobooth",\
description="This is %(prog)s, the program to test the good processing of the commands sent by the arduino")
parser.add_argument('-v', '--verbose', action='count', default=3,\
help='Increases verbosity of logging (up to -vvvv).')
parser.add_argument('-o', '--output_file', action='store', default='',\
help='Path/Name of log file (log level of 0). If no file specified, output only to stdout')
parser.add_argument('-t', '--test', action='store_true', \
help='Do not try to control the camera')
parser.add_argument('-c', '--console', action='store_true',\
help='Enable logging output on STDOUT')
parser.add_argument('-n', '--no-filter', action='store_false', dest="noFilter", \
help='Disables the instagram-like filter when processing the images.')
return parser
def restart():
args = sys.argv[:]
logging.info('Re-spawning %s' % ' '.join(args))
args.insert(0, sys.executable)
time.sleep(1)
os.chdir(_startup_cwd)
os.execv(sys.executable, args)
#####################################
# MAIN
#####################################
parser = createParser()
results = parser.parse_args()
testMode = results.test
_startup_cwd = os.getcwd()
pictureFolder = "images/jpg"
batt_lvl = 25
pb_image.enableFilter = results.noFilter
# Init the Gphoto2 objects
context = gp.Context()
camera = gp.Camera()
log = configureLogging(results.verbose, results.output_file, results.console)
# Manage Ctrl+C
signal.signal(signal.SIGINT, signal_handler_quit)
try:
logging.info("Photob00th started !")
# Launch the webserver
PORT = 8000
webserver = SocketServer.TCPServer(("", PORT), CustomHTTPRequestHandler)
thread = threading.Thread(target = webserver.serve_forever)
thread.daemon = True
thread.start()
logging.info('Starting webserver on port %d', PORT )
if not testMode :
# Init the camera
pb_camera.initCamera(camera, context)
if camera == None :
logging.error("Error initializing camera")
sys.exit(2)
else :
logging.info('Camera initialized')
else :
logging.info('Starting program in interactive mode')
# Get the image count
pb_image.getImageCount()
logging.info("Image count : %d" %pb_image.count)
# Available functions :
# - takePhoto
input=1
while True :
# Get keyboard input
command = raw_input("Enter your command : ")
logging.debug("Command received : %s" %command)
if command == 'takePhoto':
pictureName = pb_camera.takePhoto(camera, pictureFolder)
logging.info("Triggered postprocessing script")
pb_image.postProcess(pictureName)
elif command == "ready" :
# Do nothing
logging.info("Ready")
else :
logging.info("Unknown command")
except Exception:
logging.error("Caught unexpected exception:")
logging.error(traceback.format_exc())
logging.warning("Going to sleep 2 seconds and restart")
time.sleep(2)
# Cancelled the restart for testing purposes
#restart()
sys.exit(0)
|
dataloader.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""This script is responsible for loading data for each GPU."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import OrderedDict
import cProfile
import logging
import numpy as np
import pstats
import Queue
import random
import signal
import StringIO
import threading
import time
import uuid
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace, scope
from core.config import config as cfg
from datasets.coordinator import Coordinator, coordinated_put, coordinated_get
from datasets.ava import AvaDataset
from datasets.ava_data_input import create_data_input \
as create_ava_data_input
from datasets.charades import CharadesDataset
from datasets.charades_data_input import create_data_input \
as create_charades_data_input
from datasets.epic import EpicDataset
from datasets.epic_data_input import create_data_input as \
create_epic_data_input
logger = logging.getLogger(__name__)
db_loader_map = {
'charades': CharadesDataset,
'ava': AvaDataset,
'epic': EpicDataset,
}
create_data_input_map = {
'charades': create_charades_data_input,
'ava': create_ava_data_input,
'epic': create_epic_data_input,
}
class DataLoader(object):
def __init__(
self,
split,
input_db,
batch_size,
num_workers=4,
num_processes=12,
minibatch_queue_size=64,
blobs_queue_capacity=1,
node_id=0,
loader_stats_file=None,
suffix='',
crop_size=224,
):
# Debugging tool.
self._loader_stats_file = loader_stats_file
if loader_stats_file is not None:
logger.info("Profiling minibatch loader and saving to {}".format(
loader_stats_file))
self._node_id = node_id
self._split = split
self._input_db = input_db
self._db_size = input_db.get_db_size()
self._lock = threading.Lock()
self._batch_size = batch_size
self._current = 0
self._perm = np.arange(input_db.get_db_size())
self.coordinator = Coordinator()
self._num_gpus = cfg.NUM_GPUS
self._num_workers = num_workers
self._num_processes = num_processes
self._crop_size = crop_size
self._expected_data_size = 3 * self._crop_size ** 2
self._minibatch_queue_capacity = minibatch_queue_size
self._minibatch_queue = Queue.Queue(maxsize=minibatch_queue_size)
self._gpu_blobs_queue_capacity = blobs_queue_capacity
self._blobs_queue_name = '{}_blobs_queue_{}'.format(
cfg.DATASET, str(uuid.uuid4())
)
self.suffix = suffix
self.blobnames = input_db.blobnames
# Assign indexes to blobs so that they can be queued in the same order.
self._blobs_idx_map = OrderedDict()
for i, blobname in enumerate(self.blobnames):
self._blobs_idx_map[blobname] = i
if split == 'train':
self._shuffle_db_indices(self._db_size)
self._mb_index = 0
self._enqueue_mb_index = 0
self._enqueue_ooo_buf = {}
self.create_threads()
self._create_data_input()
def get_worker_ids(self):
if self._split == 'train':
return range(0, self._num_workers)
else:
assert self._num_workers < 100
return range(100, 100 + self._num_workers)
def _create_data_input(self):
create_data_input = create_data_input_map[cfg.DATASET]
(context_execution, fetch_func) = create_data_input(
self._input_db, self._expected_data_size, self._num_processes,
self._num_workers, self._split, self._batch_size,
crop_size=self._crop_size,
)
self._context_execution = context_execution
self._minibatch_fetch_func = fetch_func
worker_ids = self.get_worker_ids()
self._context_execution(worker_ids)
def get_blob_names(self):
return self._blobs_idx_map.keys()
def create_blobs_queue(self, queue_name, num_blobs, capacity):
"""
Create a BlobsQueue in the workspace to hold the mini-batches. Each GPU
has its own workspace and we chose the namescope per GPU.
"""
workspace.RunOperatorOnce(
core.CreateOperator(
'CreateBlobsQueue',
[], [queue_name],
num_blobs=num_blobs,
capacity=capacity,
)
)
def close_blobs_queue(self):
"""Close a BlobsQueue"""
workspace.RunOperatorOnce(
core.CreateOperator(
'CloseBlobsQueue',
[self._blobs_queue_name],
[]
)
)
def _shuffle_db_indices(self, db_size):
"""Randomly permute the training roidb"""
assert(self._split == 'train')
indices = range(db_size)
random.shuffle(indices)
self._perm = indices
self._current = 0
return None
def _get_next_minibatch_indices(self):
"""
For single machine training: data can be randomly shuffled in K bins
For distributed training: data can be either:
(i) randomly sampled
(ii) sampled from a global shuffle permutations
"""
db_size = self._db_size
with self._lock:
mb_index = self._mb_index
self._mb_index += 1
if self._split == 'train':
if ((self._current + self._batch_size) >= db_size):
self._shuffle_db_indices(db_size)
db_indices = self._perm[
self._current:self._current + self._batch_size
]
self._current += self._batch_size
return db_indices, mb_index
elif self._split in ['test', 'val']:
if self._current == db_size:
self._current = 0
elif self._current > db_size:
self._current = 0
end_idx = self._current + self._batch_size
db_indices = self._perm[self._current:end_idx]
self._current += self._batch_size
return db_indices, mb_index
def _get_next_minibatch(self, worker_id):
"""
Returns next blobs to be used for the next mini-batch queue
"""
db_indices, mb_index = self._get_next_minibatch_indices()
blobs = self._minibatch_fetch_func(
self._input_db, worker_id, self._batch_size, db_indices,
self._crop_size,
)
assert len(self.blobnames) == len(blobs), \
'Expected %d blobs; got %d blobs' % (
len(self.blobnames), len(blobs))
minibatch_blobs = {
name: blob for name, blob in zip(self.blobnames, blobs)
}
return minibatch_blobs, mb_index
def minibatch_loader(self, worker_id):
"""Load mini-batches and put them into a queue in CPU memory"""
if self._loader_stats_file is not None:
prof = cProfile.Profile()
prof.enable()
with self.coordinator.stop_on_execution():
while not self.coordinator.should_stop():
minibatch_blobs, mb_index = self._get_next_minibatch(worker_id)
ordered_minibatch_blobs = OrderedDict()
for key in self.get_blob_names():
ordered_minibatch_blobs[key] = minibatch_blobs[key]
coordinated_put(
self.coordinator,
self._minibatch_queue,
(mb_index, ordered_minibatch_blobs),
)
if self._loader_stats_file is not None:
prof.disable()
s = StringIO.StringIO()
ps = pstats.Stats(prof, stream=s).sort_stats('cumulative')
ps.print_stats()
with open(self._loader_stats_file, 'w') as f:
f.write(s.getvalue())
logger.debug("Stopping mini-batch loader thread...")
def enqueue_blobs(
self,
gpu_id,
enqueue_blobs_names,
blob_values,
):
enqueue_blobs_names = [
'gpu_{}/{}'.format(
gpu_id, enqueue_blob_name
) for enqueue_blob_name in enqueue_blobs_names
]
deviceOption = core.DeviceOption(caffe2_pb2.CUDA, gpu_id)
for (blob_name, blob) in zip(enqueue_blobs_names, blob_values):
workspace.FeedBlob(blob_name, blob, device_option=deviceOption)
queue_name = 'gpu_{}/{}'.format(gpu_id, self._blobs_queue_name)
workspace.RunOperatorOnce(
core.CreateOperator(
'SafeEnqueueBlobs',
[queue_name] + enqueue_blobs_names,
enqueue_blobs_names + [queue_name + '_enqueue_status'],
device_option=deviceOption,
)
)
def enqueue_blobs_thread(self, _gpu_id, enqueue_blobs_names):
"""
Transfer mini-batches from the CPU mini-batch queue to all GPU
BlobsQueues.
"""
with self.coordinator.stop_on_execution():
while not self.coordinator.should_stop():
root_gpu_id = cfg.ROOT_GPU_ID
for gpu_id in range(root_gpu_id, root_gpu_id + self._num_gpus):
if self._enqueue_mb_index in self._enqueue_ooo_buf:
blobs = self._enqueue_ooo_buf[self._enqueue_mb_index]
del self._enqueue_ooo_buf[self._enqueue_mb_index]
else:
while True:
(mb_index, blobs) = coordinated_get(
self.coordinator, self._minibatch_queue
)
if self._enqueue_mb_index == mb_index:
break
else:
self._enqueue_ooo_buf[mb_index] = blobs
self.enqueue_blobs(
gpu_id,
enqueue_blobs_names,
blobs.values(),
)
self._enqueue_mb_index += 1
logger.debug("Stopping enqueuer thread...")
# Minibatch loader threads: each thread builds minibatches and places them
# into a queue in CPU memory.
def create_threads(self):
# "worker" threads to construct (partial) minibatches and put them on
# minibatch queue in CPU memory (limited by queue size).
self._worker_ids = self.get_worker_ids()
self._workers = [
threading.Thread(
target=self.minibatch_loader,
name='worker_{}'.format(worker_id),
args=[worker_id],
) for worker_id in self._worker_ids
]
# Create one BlobsQueue per GPU which holds the training data in GPU
# memory and feeds to the net.
root_gpu_id = cfg.ROOT_GPU_ID
for gpu_id in range(root_gpu_id, root_gpu_id + self._num_gpus):
with core.NameScope('gpu_{}'.format(gpu_id)):
self.create_blobs_queue(
queue_name=self._blobs_queue_name,
num_blobs=len(self._blobs_idx_map),
capacity=self._gpu_blobs_queue_capacity
)
# Launch enqueuer threads.
blob_names = self._blobs_idx_map.keys()
enqueue_blobs_names = [
'{}_{}_enqueue'.format(self._split, blob_name)
for blob_name in blob_names
]
for gpu_id in range(root_gpu_id, root_gpu_id + self._num_gpus):
with core.NameScope('gpu_{}'.format(gpu_id)):
with core.DeviceScope(
core.DeviceOption(caffe2_pb2.CUDA, gpu_id)
):
for blob_list in enqueue_blobs_names:
for blob in blob_list:
scoped_blob_name = scope.CurrentNameScope() + blob
workspace.CreateBlob(scoped_blob_name)
self._enqueuer = threading.Thread(
target=self.enqueue_blobs_thread, args=(0, enqueue_blobs_names)
)
def prefill_minibatch_queue(self):
logger.info('Pre-filling {} minibatch queue'.format(self._split))
while(self.minibatch_queue_size() < self._minibatch_queue_capacity):
time.sleep(1.0)
logger.info("{} minibatch queue pre-filled.".format(self._split))
def start(self, prefill=False):
for w in self._workers + [self._enqueuer]:
w.daemon = True
w.start()
if prefill:
self.prefill_minibatch_queue()
def join(self):
for w in self._workers + [self._enqueuer]:
w.join()
def shutdown_dataloader(self):
self.coordinator.request_stop()
self.coordinator.wait_for_stop()
root_gpu_id = cfg.ROOT_GPU_ID
for idx in range(root_gpu_id, root_gpu_id + self._num_gpus):
with core.NameScope("gpu_{}".format(idx)):
self.close_blobs_queue()
self.join()
def register_sigint_handler(self):
def signal_handler(signal, frame):
logger.info(
"SIGINT: shutting down data loader threads and exiting")
self.shutdown_dataloader()
signal.signal(signal.SIGINT, signal_handler)
def minibatch_queue_size(self):
return self._minibatch_queue.qsize()
def get_input_db(dataset, data_type, model,
lfb_infer_only=False,
shift=None, lfb=None, suffix=''):
assert dataset in db_loader_map.keys(), \
"Unknown dataset: {}".format(dataset)
input_db = db_loader_map[dataset](
split=data_type,
lfb_infer_only=lfb_infer_only,
shift=shift, lfb=lfb, suffix=suffix)
return input_db
|
followers.py | """
Calls the Github API to populate User and Follower CSV data to build the
followers relationship graph
Users aren't distinct in the outputted CSV
User-Follows are distinct
"""
import os
import json
import csv
import requests
from queue import Queue
from threading import Thread
from _base import DATA_DIR, argparse_required_username
USERNAME = argparse_required_username()
# Github Auth header
headers = {'Authorization': 'token {}'.format(os.environ['GITHUB_API_TOKEN'])}
# file destination to save output
user_data_dir = os.path.join(DATA_DIR, USERNAME)
output_file = os.path.join(user_data_dir, 'followers.csv')
# Github user data that will be saved
columns = [
# info
'login',
'name',
'email',
'company',
'created_at',
'updated_at',
'avatar_url',
'location',
# status
'followers',
'following',
'public_gists',
'public_repos',
# linking
'follows'
]
def get_enpoint_data(url):
r = requests.get(url, headers=headers)
r.status_code
if not r.status_code == 200:
raise AssertionError('bad request')
return json.loads(r.content.decode('utf8'))
def get_user_data(username, follows=''):
url = 'https://api.github.com/users/{}'.format(username)
data = get_enpoint_data(url)
data['follows'] = follows
return data
def get_follower_data(username):
url = 'https://api.github.com/users/{}/followers'.format(username)
return get_enpoint_data(url)
def put_user_data_on_queue(queue, username, follows):
queue.put(get_user_data(username, follows))
def get_user_and_followers_data(username):
"""
Returns a list of Github User dict objects
"""
q = Queue()
threads = []
followers = [f['login'] for f in get_follower_data(username)]
for f in followers:
threads.append(
Thread(target=put_user_data_on_queue, args=(q, f, username)))
for t in threads:
t.start()
for t in threads:
t.join()
return [q.get(1) for _ in range(q.qsize())]
def main():
main_user_data = get_user_data(USERNAME)
follower_data = get_user_and_followers_data(USERNAME)
next_follower_data = []
for f in follower_data:
next_follower_data.append(get_user_and_followers_data(f['login']))
# create output dir if it doesn't exist
os.makedirs(os.path.dirname(output_file), exist_ok=True)
with open(output_file, 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
# header
writer.writerow(columns)
# main_user data
writer.writerow([main_user_data[x] for x in columns])
# follower data
for data in follower_data:
writer.writerow([data[c] for c in columns])
# 2nd level follower data
for nf in next_follower_data:
for data in nf:
writer.writerow([data[c] for c in columns])
if __name__ == '__main__':
main()
|
MeterRead.py | #!/usr/bin/env python
#
# Query the metersvc on a SmartHUB directly.
#
# Pre-requisites:
#
# # apt install python-is-python3 python3-pip python3-virtualenv
# # pip3 install aiohttp==3.7.4.post0 pytz requests Sphinx sphinx_rtd_theme
# # mkdir -m 775 /var/log/metersummary
import os, sys, signal, threading, queue, socket, requests, json, pytz, configparser
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from client.onem2m.OneM2MPrimitive import OneM2MPrimitive
from client.onem2m.http.OneM2MRequest import OneM2MRequest
from client.onem2m.resource.Container import Container
from client.onem2m.resource.ContentInstance import ContentInstance
from client.cse.CSE import CSE
from client.ae.AE import AE
from client.ae.AsyncResponseListener import AsyncResponseListenerFactory
from client.Utility import Utility
from threading import Lock
from datetime import datetime, timedelta
from typing import Final
from aiohttp import web
################# Configure the following for your environment #################
# The AE App and credential IDs, as generated in PolicyNet via More -> System settings -> AE Registration Credentials.
APP_ID: Final = 'Nmeterread'
AE_ID: Final = 'XXXXXXXXXXXXXXXX'
# Address of the IN-CSE running in your cloud environment.
CSE_PROTOCOL: Final = 'http'
CSE_HOST: Final = 'dev9.usw1.aws.corp.grid-net.com'
CSE_PORT: Final = 21300
# Identification of this IN-AE.
RESOURCE_NAME: Final = APP_ID[1:]
APP_NAME: Final = 'com.grid-net.' + RESOURCE_NAME
# Timezone for log rotation and subscriptions. A new log file is started at midnight in this timezone.
tz = pytz.timezone('Australia/Sydney')
############################## End of site config ##############################
# MN-AE metersvc reading frequency content instance.
CONFIG_RESOURCE_NAME: Final = 'reportInterval'
# Details of the (usually local) listener that the IN-CSE will send notifications to.
NOTIFICATION_PROTOCOL: Final = 'http'
NOTIFICATION_HOST: Final = Utility.myIpAddress()
NOTIFICATION_PORT: Final = 8082
NOTIFICATION_CONTAINER: Final = 'cnt-00001'
NOTIFICATION_SUBSCRIPTION: Final = 'sub-00001'
NOTIFICATION_INTERVAL: Final = 1
NOTIFICATION_CONTAINER_TIME: Final = 300
NOTIFICATION_CONTAINER_MAX_AGE: Final = 900
NOTIFICATION_LOG_DIR: Final = '/var/log/meterread'
NOTIFICATION_LOG_PREFIX: Final = 'notification_log_'
NOTIFICATION_LOG_SUFFIX: Final = '.json'
SETTINGS_FILE: Final = '/var/tmp/meterread.ini'
# Create an instance of the CSE to send requests to.
pn_cse = CSE(CSE_HOST, CSE_PORT)
# Persistent settings via INI file.
settings = configparser.ConfigParser()
# Queue used to control the configWorker thread.
configQueue = queue.Queue()
# Mutex to enforce atomicity on log file writes.
logMutex = Lock()
def saveConfig(ri):
settings.set('DEFAULT', 'ri_persistent', ri)
with open(SETTINGS_FILE, 'w') as inifile:
settings.write(inifile)
# Term signal handler to perform deregistration at shutdown.
def handleSignalTerm(signal, frame):
if pn_cse.ae is not None:
del_res = pn_cse.delete_ae()
del_res.dump('Delete AE')
saveConfig('')
sys.exit(0)
def main():
try:
signal.signal(signal.SIGTERM, handleSignalTerm)
sys.stdout.reconfigure(line_buffering=True, encoding="utf-8")
# Confirm that there isn't already an instance running, using the HTTP listening port as a lock.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
try:
bindres = sock.bind(('', NOTIFICATION_PORT))
if bindres is not None and bindres != 0:
print('Error binding to port {}: {}'.format(NOTIFICATION_PORT, os.strerror(bindres)))
sys.exit(-1)
except socket.error as msg:
print('Error binding to port {}: {}'.format(NOTIFICATION_PORT, msg))
sys.exit(-1)
sock.close()
# Open persistent setting file, or create if it doesn't exist.
if settings.read(SETTINGS_FILE) == []:
with open(SETTINGS_FILE, 'w') as fp:
print('[DEFAULT]\nri_persistent = ', file=fp)
fp.close()
settings.read(SETTINGS_FILE)
# If we did not cleanly exit last time, clean up the previous registration before continuing.
ri_persistent = settings.get('DEFAULT', 'ri_persistent')
if ri_persistent is not None and ri_persistent != '' and ri_persistent != "":
print('Deregistering AE "{}" with CSE @ {}'.format(ri_persistent, CSE_HOST))
to_ae = '{}://{}:{}/PN_CSE/{}'.format(pn_cse.transport_protocol, pn_cse.host, pn_cse.port, ri_persistent)
res = pn_cse.delete_ae(to_ae, ri_persistent)
res.dump('Deregister AE')
saveConfig('')
# Create an AE instance to register with the CSE.
NOTIFICATION_URI: Final = '{}://{}:{}'.format(NOTIFICATION_PROTOCOL, NOTIFICATION_HOST, NOTIFICATION_PORT)
req_ae = AE(
{
AE.M2M_ATTR_APP_ID : APP_ID,
AE.M2M_ATTR_APP_NAME : APP_NAME,
AE.M2M_ATTR_AE_ID : AE_ID,
AE.M2M_ATTR_POINT_OF_ACCESS: [NOTIFICATION_URI],
}
)
# Start the configuration worker thread.
threading.Thread(target=configWorker, daemon=True).start()
print('Registering AE "{}" with CSE @ {}'.format(req_ae.aei, CSE_HOST))
# Register with the specified resourceName (or, if it is None, let the IN-CSE allocate one).
res = pn_cse.register_ae(req_ae, RESOURCE_NAME)
res.dump('Register AE')
if res.rsc != OneM2MPrimitive.M2M_RSC_CREATED:
print('Could not register AE\nExiting...')
sys.exit(-2)
# Save the name and RI we registered as.
rn = res.pc["m2m:ae"]["rn"]
saveConfig(res.pc["m2m:ae"]["ri"])
ri_persistent = res.pc["m2m:ae"]["ri"]
print('AE registration successful: {}'.format(rn))
# Create a new container.
print('Creating container {}/{}'.format(rn, NOTIFICATION_CONTAINER))
content = Container({'rn': NOTIFICATION_CONTAINER, 'mia': NOTIFICATION_CONTAINER_MAX_AGE})
res = pn_cse.create_resource(rn, None, content, OneM2MRequest.M2M_RCN_HIERARCHICAL_ADDRESS)
res.dump('Create Container')
# Create a subscription to the container.
container_url = '~/355808100064390/metersvc/reads'
print('Subscribing to container: {}'.format(container_url))
sub_res = pn_cse.create_subscription(container_url, NOTIFICATION_SUBSCRIPTION, '/PN_CSE/' + ri_persistent, [3],
OneM2MRequest.M2M_RCN_HIERARCHICAL_ADDRESS, False)
sub_res.dump('Create Subscription')
# Get the request ID to register with the async response handler.
# NOTE The key we actually need isn't the RI, but rather the subscription URI.
request_id = sub_res.pc["m2m:uri"]
# Create the meter reading policy.
container_url = '/~/355808100064390/metersvc/policies'
print('Creating configuration content instance {}'.format(container_url))
end_time = datetime.now(tz) + timedelta(seconds=NOTIFICATION_CONTAINER_TIME)
read_policy = {
'read': {
'rtype': 'powerQuality',
'tsched': {
'recper': NOTIFICATION_INTERVAL,
'sched': {
'start': '2020-01-01T00:00:00',
'end': end_time.strftime('%Y-%m-%dT%H:%M:%S'),
},
},
},
}
content = ContentInstance({'rn': CONFIG_RESOURCE_NAME, 'con': read_policy})
to = '{}://{}:{}{}'.format(CSE_PROTOCOL, CSE_HOST, CSE_PORT, container_url)
params = {
OneM2MPrimitive.M2M_PARAM_FROM: pn_cse.ae.ri,
OneM2MPrimitive.M2M_PARAM_RESULT_CONTENT: 2,
OneM2MPrimitive.M2M_PARAM_RESOURCE_TYPE: OneM2MPrimitive.M2M_RESOURCE_TYPES.ContentInstance.value,
}
content_instance = content
oneM2MRequest = OneM2MRequest()
try:
response = oneM2MRequest.create(to, params, content_instance)
response.dump('Configuration Content Instance')
except requests.exceptions.HTTPError as e:
print("Error: Configuration content instance creation failed with error {}".format(e.response.status_code))
# Callback that will be execute whenever an HTTP request is sent to localhost:8082
# and X-M2M-RI header is set. The handler functions should process the request and
# return the appropriate HTTP response orginator.
# @todo AsyncResponseListener needs further refinement. It should work with OneM2M primitives, not
# HTTP messages directly.
# Params are aiohttp request and response instance.
# https://docs.aiohttp.org/en/stable/web_reference.html?highlight=Request#request-and-base-request
# https://docs.aiohttp.org/en/stable/web_reference.html?highlight=Response#response-classes
async def request_handler(req: web.Request, res: web.Response):
# Process request.
if req.method == 'POST' or req.body_exists():
# Modify response.
res.headers.popall('Content-Type', "")
res.headers['X-M2M-RSC'] = '2000'
res.headers['X-M2M-RI'] = req.headers.get('X-M2M-RI')
# Print and log the JSON.
body = await req.json()
if body is not None:
# Create a new log file every day, starting at 00:00:00 in the local timezone.
day_now = datetime.now(tz).strftime('%Y-%m-%d')
logFileName = NOTIFICATION_LOG_DIR + '/' + NOTIFICATION_LOG_PREFIX + day_now + NOTIFICATION_LOG_SUFFIX
with logMutex:
logFile = open(logFileName, 'a')
json.dump(body, logFile, separators=(',', ':'))
# DEBUG Append the reception time for comparison with 'rtl' and 'ct'.
logFile.write('{{"received":"{}"}}\n'.format(datetime.now(tz).strftime("%Y-%m-%dT%H:%M:%S.%f")))
# logFile.write('\n') # Newline-terminate, i.e. create valid NDJSON
logFile.close()
return res
print('IN-AE started')
handlerFactory = (
AsyncResponseListenerFactory(NOTIFICATION_HOST, NOTIFICATION_PORT)
)
handler = handlerFactory.get_instance()
handler.set_rqi_cb(
request_id, request_handler
) # Map request ID to corresponding handler function.
handler.run()
except Exception as err:
print('Exception raised...\n')
print(err)
if err.response is not None and err.response.text is not None:
print(err.response.text)
finally:
print('Cleaning up...')
# Clean up AE.
if pn_cse.ae is not None:
del_res = pn_cse.delete_ae()
del_res.dump('Delete AE')
saveConfig('')
if __name__ == '__main__':
main()
|
conftest.py | import asyncio
from functools import partial
from multiprocessing import Process
import pytest
from server.tcp import start_server
from tests.integration.tcp.bot import TcpBot
@pytest.fixture
def tcp_bot_factory(running_server):
host, port = running_server
return partial(TcpBot, host, port)
@pytest.fixture
async def running_server(unused_tcp_port, game_play):
host = '127.0.0.1'
port = unused_tcp_port
startup_delay_secs = 1
# yield host, 8888
# return
def start_server_async():
asyncio.run(
start_server(
game_play,
host,
port
)
)
server_process = Process(target=start_server_async)
server_process.start()
await asyncio.sleep(startup_delay_secs)
yield host, port
server_process.kill()
|
threads.py | from queue import Queue, Empty
import threading
import time
class ThreadingMixin(object):
def _thread_wrapper(self, *args):
''' Wrapper for the worker method defined in the module. Handles calling the actual worker, cleanly exiting upon
interrupt, and passing exceptions back to the main process.'''
thread_name = threading.current_thread().name
self.debug(f"THREAD => {thread_name} started.")
while not self.stopped.is_set():
try:
# use the get_nowait() method for retrieving a queued item to
# prevent the thread from blocking when the queue is empty
obj = self.q.get_nowait()
except Empty:
continue
try:
# launch the public module_thread method
self.module_thread(obj, *args)
except:
# handle exceptions local to the thread
self.print_exception(f"(thread={thread_name}, object={repr(obj)})")
finally:
self.q.task_done()
self.debug(f"THREAD => {thread_name} exited.")
# sometimes a keyboardinterrupt causes a race condition between when the self.q.task_done() call above and the
# self.q.empty() call below, causing all the threads to hang. introducing the time.sleep(.7) call below reduces
# the likelihood of encountering the race condition.
def thread(self, *args):
# disable threading in debug mode
if self._global_options['verbosity'] >= 2:
# call the thread method in serial for each input
for item in args[0]:
self.module_thread(item, *args[1:])
return
# begin threading code
thread_count = self._global_options['threads']
self.stopped = threading.Event()
self.exc_info = None
self.q = Queue()
# populate the queue from the user-defined iterable. should be done
# before the threads start so they have something to process right away
for item in args[0]:
self.q.put(item)
# launch the threads
threads = []
for i in range(thread_count):
t = threading.Thread(target=self._thread_wrapper, args=args[1:])
threads.append(t)
t.setDaemon(True)
t.start()
# hack to catch keyboard interrupts
try:
while not self.q.empty():
time.sleep(.7)
except KeyboardInterrupt:
self.error('Ok. Waiting for threads to exit...')
# set the event flag to trigger an exit for all threads (interrupt condition)
self.stopped.set()
# prevent the module from returning to the interpreter until all threads have exited
for t in threads:
t.join()
raise
self.q.join()
# set the event flag to trigger an exit for all threads (normal condition)
# the threads are no longer needed once all the data has been processed
self.stopped.set()
|
__init__.py | from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
__all__ = ['ToastNotifier']
# #############################################################################
# ########## Libraries #############
# ##################################
# standard library
import logging
import threading
from os import path
from time import sleep
from pkg_resources import Requirement
from pkg_resources import resource_filename
# 3rd party modules
from win32api import GetModuleHandle
from win32api import PostQuitMessage
from win32con import CW_USEDEFAULT
from win32con import IDI_APPLICATION
from win32con import IMAGE_ICON
from win32con import LR_DEFAULTSIZE
from win32con import LR_LOADFROMFILE
from win32con import WM_DESTROY
from win32con import WM_USER
from win32con import WS_OVERLAPPED
from win32con import WS_SYSMENU
from win32gui import CreateWindow
from win32gui import DestroyWindow
from win32gui import LoadIcon
from win32gui import LoadImage
from win32gui import NIF_ICON
from win32gui import NIF_INFO
from win32gui import NIF_MESSAGE
from win32gui import NIF_TIP
from win32gui import NIM_ADD
from win32gui import NIM_DELETE
from win32gui import NIM_MODIFY
from win32gui import RegisterClass
from win32gui import UnregisterClass
from win32gui import Shell_NotifyIcon
from win32gui import UpdateWindow
from win32gui import WNDCLASS
# ############################################################################
# ########### Class ToastNotifier##############
# ####################################################################
#Add comments on new line
class ToastNotifier(object):
"""Create a Windows 10 toast notification.
from: https://github.com/jithurjacob/Windows-10-Toast-Notifications
"""
def __init__(self):
"""Initialize."""
self._thread = None
def _show_toast(self, title, msg,
icon_path, duration):
"""Notification settings.
:title: notification title
:msg: notification message
:icon_path: path to the .ico file to custom notification
:duration: delay in seconds before notification self-destruction
"""
message_map = {WM_DESTROY: self.on_destroy, }
# Register the window class.
self.wc = WNDCLASS()
self.hinst = self.wc.hInstance = GetModuleHandle(None)
self.wc.lpszClassName = str("PythonTaskbar") # must be a string
self.wc.lpfnWndProc = message_map # could also specify a wndproc.
try:
self.classAtom = RegisterClass(self.wc)
except:
pass #not sure of this
style = WS_OVERLAPPED | WS_SYSMENU
self.hwnd = CreateWindow(self.classAtom, "Taskbar", style,
0, 0, CW_USEDEFAULT,
CW_USEDEFAULT,
0, 0, self.hinst, None)
UpdateWindow(self.hwnd)
# icon
if icon_path is not None:
icon_path = path.realpath(icon_path)
else:
icon_path = resource_filename(Requirement.parse("win10toast"), "win10toast/data/python.ico")
icon_flags = LR_LOADFROMFILE | LR_DEFAULTSIZE
try:
hicon = LoadImage(self.hinst, icon_path,
IMAGE_ICON, 0, 0, icon_flags)
except Exception as e:
logging.error("Some trouble with the icon ({}): {}"
.format(icon_path, e))
hicon = LoadIcon(0, IDI_APPLICATION)
# Taskbar icon
flags = NIF_ICON | NIF_MESSAGE | NIF_TIP
nid = (self.hwnd, 0, flags, WM_USER + 20, hicon, "Tooltip")
Shell_NotifyIcon(NIM_ADD, nid)
Shell_NotifyIcon(NIM_MODIFY, (self.hwnd, 0, NIF_INFO,
WM_USER + 20,
hicon, "Balloon Tooltip", msg, 200,
title))
# take a rest then destroy
sleep(duration)
DestroyWindow(self.hwnd)
UnregisterClass(self.wc.lpszClassName, None)
return None
def show_toast(self, title="Notification", msg="Here comes the message",
icon_path=None, duration=5, threaded=False):
"""Notification settings.
:title: notification title
:msg: notification message
:icon_path: path to the .ico file to custom notification
:duration: delay in seconds before notification self-destruction
"""
if not threaded:
self._show_toast(title, msg, icon_path, duration)
else:
if self.notification_active():
# We have an active notification, let is finish so we don't spam them
return False
self._thread = threading.Thread(target=self._show_toast, args=(title, msg, icon_path, duration))
self._thread.start()
return True
def notification_active(self):
"""See if we have an active notification showing"""
if self._thread != None and self._thread.is_alive():
# We have an active notification, let is finish we don't spam them
return True
return False
def on_destroy(self, hwnd, msg, wparam, lparam):
"""Clean after notification ended.
:hwnd:
:msg:
:wparam:
:lparam:
"""
nid = (self.hwnd, 0)
Shell_NotifyIcon(NIM_DELETE, nid)
PostQuitMessage(0)
return None
|
conftest.py | # Copyright (c) 2011 Florian Mounier
# Copyright (c) 2011 Anshuman Bhaduri
# Copyright (c) 2014 Sean Vig
# Copyright (c) 2014-2015 Tycho Andersen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import functools
import logging
import multiprocessing
import os
import subprocess
import sys
import tempfile
import time
import traceback
import pytest
import xcffib
import xcffib.testing
import xcffib.xproto
import libqtile.config
from libqtile import command_client, command_interface, ipc
from libqtile.backend.x11.core import Core
from libqtile.confreader import Config
from libqtile.core.session_manager import SessionManager
from libqtile.lazy import lazy
from libqtile.log_utils import init_log
from libqtile.resources import default_config
# the default sizes for the Xephyr windows
WIDTH = 800
HEIGHT = 600
SECOND_WIDTH = 640
SECOND_HEIGHT = 480
max_sleep = 5.0
sleep_time = 0.1
def pytest_addoption(parser):
parser.addoption(
"--debuglog", action="store_true", default=False, help="enable debug output"
)
class Retry:
def __init__(self, fail_msg='retry failed!', ignore_exceptions=(),
dt=sleep_time, tmax=max_sleep, return_on_fail=False):
self.fail_msg = fail_msg
self.ignore_exceptions = ignore_exceptions
self.dt = dt
self.tmax = tmax
self.return_on_fail = return_on_fail
def __call__(self, fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
tmax = time.time() + self.tmax
dt = self.dt
ignore_exceptions = self.ignore_exceptions
while time.time() <= tmax:
try:
return fn(*args, **kwargs)
except ignore_exceptions:
pass
except AssertionError:
break
time.sleep(dt)
dt *= 1.5
if self.return_on_fail:
return False
else:
raise AssertionError(self.fail_msg)
return wrapper
@Retry(ignore_exceptions=(xcffib.ConnectionException,), return_on_fail=True)
def can_connect_x11(disp=':0', *, ok=None):
if ok is not None and not ok():
raise AssertionError()
conn = xcffib.connect(display=disp)
conn.disconnect()
return True
@Retry(ignore_exceptions=(ipc.IPCError,), return_on_fail=True)
def can_connect_qtile(socket_path, *, ok=None):
if ok is not None and not ok():
raise AssertionError()
ipc_client = ipc.Client(socket_path)
ipc_command = command_interface.IPCCommandInterface(ipc_client)
client = command_client.InteractiveCommandClient(ipc_command)
val = client.status()
if val == 'OK':
return True
return False
def whereis(program):
"""Search PATH for executable"""
for path in os.environ.get('PATH', '').split(':'):
if os.path.exists(os.path.join(path, program)) and \
not os.path.isdir(os.path.join(path, program)):
return os.path.join(path, program)
return None
class BareConfig(Config):
auto_fullscreen = True
groups = [
libqtile.config.Group("a"),
libqtile.config.Group("b"),
libqtile.config.Group("c"),
libqtile.config.Group("d")
]
layouts = [
libqtile.layout.stack.Stack(num_stacks=1),
libqtile.layout.stack.Stack(num_stacks=2)
]
floating_layout = libqtile.resources.default_config.floating_layout
keys = [
libqtile.config.Key(
["control"],
"k",
lazy.layout.up(),
),
libqtile.config.Key(
["control"],
"j",
lazy.layout.down(),
),
]
mouse = []
screens = [libqtile.config.Screen()]
follow_mouse_focus = False
class Xephyr:
"""Spawn Xephyr instance
Set-up a Xephyr instance with the given parameters. The Xephyr instance
must be started, and then stopped.
"""
def __init__(self,
xinerama=True,
randr=False,
two_screens=True,
width=WIDTH,
height=HEIGHT,
xoffset=None):
self.xinerama = xinerama
self.randr = randr
self.two_screens = two_screens
self.width = width
self.height = height
if xoffset is None:
self.xoffset = width
else:
self.xoffset = xoffset
self.proc = None # Handle to Xephyr instance, subprocess.Popen object
self.display = None
self.display_file = None
def __enter__(self):
try:
self.start_xephyr()
except: # noqa: E722
self.stop_xephyr()
raise
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop_xephyr()
def start_xephyr(self):
"""Start Xephyr instance
Starts the Xephyr instance and sets the `self.display` to the display
which is used to setup the instance.
"""
# get a new display
display, self.display_file = xcffib.testing.find_display()
self.display = ":{}".format(display)
# build up arguments
args = [
"Xephyr",
"-name",
"qtile_test",
self.display,
"-ac",
"-screen",
"{}x{}".format(self.width, self.height),
]
if self.two_screens:
args.extend(["-origin", "%s,0" % self.xoffset, "-screen",
"%sx%s" % (SECOND_WIDTH, SECOND_HEIGHT)])
if self.xinerama:
args.extend(["+xinerama"])
if self.randr:
args.extend(["+extension", "RANDR"])
self.proc = subprocess.Popen(args)
if can_connect_x11(self.display, ok=lambda: self.proc.poll() is None):
return
# we wern't able to get a display up
if self.proc.poll() is None:
raise AssertionError("Unable to conncet to running Xephyr")
else:
raise AssertionError("Unable to start Xephyr, quit with return code {:d}".format(
self.proc.returncode
))
def stop_xephyr(self):
"""Stop the Xephyr instance"""
# Xephyr must be started first
if self.proc is None:
return
# Kill xephyr only if it is running
if self.proc.poll() is None:
# We should always be able to kill xephyr nicely
self.proc.terminate()
self.proc.wait()
self.proc = None
# clean up the lock file for the display we allocated
try:
self.display_file.close()
os.remove(xcffib.testing.lock_path(int(self.display[1:])))
except OSError:
pass
class TestManager:
"""Spawn a Qtile instance
Setup a Qtile server instance on the given display, with the given socket
and log files. The Qtile server must be started, and then stopped when it
is done. Windows can be spawned for the Qtile instance to interact with
with various `.test_*` methods.
"""
def __init__(self, sockfile, display, debug_log):
self.sockfile = sockfile
self.display = display
self.log_level = logging.DEBUG if debug_log else logging.INFO
self.proc = None
self.c = None
self.testwindows = []
def start(self, config_class):
rpipe, wpipe = multiprocessing.Pipe()
def run_qtile():
try:
kore = Core(display_name=self.display)
init_log(self.log_level, log_path=None, log_color=False)
q = SessionManager(kore, config_class(), socket_path=self.sockfile)
q.loop()
except Exception:
wpipe.send(traceback.format_exc())
self.proc = multiprocessing.Process(target=run_qtile)
self.proc.start()
# First, wait for socket to appear
if can_connect_qtile(self.sockfile, ok=lambda: not rpipe.poll()):
ipc_client = ipc.Client(self.sockfile)
ipc_command = command_interface.IPCCommandInterface(ipc_client)
self.c = command_client.InteractiveCommandClient(ipc_command)
return
if rpipe.poll(sleep_time):
error = rpipe.recv()
raise AssertionError("Error launching qtile, traceback:\n%s" % error)
raise AssertionError("Error launching qtile")
def create_manager(self, config_class):
"""Create a Qtile manager instance in this thread
This should only be used when it is known that the manager will throw
an error and the returned manager should not be started, otherwise this
will likely block the thread.
"""
init_log(self.log_level, log_path=None, log_color=False)
kore = Core(display_name=self.display)
config = config_class()
for attr in dir(default_config):
if not hasattr(config, attr):
setattr(config, attr, getattr(default_config, attr))
return SessionManager(kore, config, socket_path=self.sockfile)
def terminate(self):
if self.proc is None:
print("qtile is not alive", file=sys.stderr)
else:
# try to send SIGTERM and wait up to 10 sec to quit
self.proc.terminate()
self.proc.join(10)
if self.proc.is_alive():
print("Killing qtile forcefully", file=sys.stderr)
# desperate times... this probably messes with multiprocessing...
try:
os.kill(self.proc.pid, 9)
self.proc.join()
except OSError:
# The process may have died due to some other error
pass
if self.proc.exitcode:
print("qtile exited with exitcode: %d" % self.proc.exitcode, file=sys.stderr)
self.proc = None
for proc in self.testwindows[:]:
proc.terminate()
proc.wait()
self.testwindows.remove(proc)
def create_window(self, create, failed=None):
"""
Uses the fucntion f to create a window.
Waits until qtile actually maps the window and then returns.
"""
client = self.c
start = len(client.windows())
create()
@Retry(ignore_exceptions=(RuntimeError,), fail_msg='Window never appeared...')
def success():
while failed is None or not failed():
if len(client.windows()) > start:
return True
raise RuntimeError("not here yet")
return success()
def _spawn_window(self, *args):
"""Starts a program which opens a window
Spawns a new subprocess for a command that opens a window, given by the
arguments to this method. Spawns the new process and checks that qtile
maps the new window.
"""
if not args:
raise AssertionError("Trying to run nothing! (missing arguments)")
proc = None
def spawn():
nonlocal proc
proc = subprocess.Popen(args, env={"DISPLAY": self.display})
def failed():
if proc.poll() is not None:
return True
return False
self.create_window(spawn, failed=failed)
self.testwindows.append(proc)
return proc
def _spawn_script(self, script, *args):
python = sys.executable
d = os.path.dirname(os.path.realpath(__file__))
python = sys.executable
path = os.path.join(d, "scripts", script)
return self._spawn_window(python, path, *args)
def kill_window(self, proc):
"""Kill a window and check that qtile unmaps it
Kills a window created by calling one of the `self.test*` methods,
ensuring that qtile removes it from the `windows` attribute.
"""
assert proc in self.testwindows, "Given process is not a spawned window"
start = len(self.c.windows())
proc.terminate()
proc.wait()
self.testwindows.remove(proc)
@Retry(ignore_exceptions=(ValueError,))
def success():
if len(self.c.windows()) < start:
return True
raise ValueError('window is still in client list!')
if not success():
raise AssertionError("Window could not be killed...")
def test_window(self, name):
return self._spawn_script("window.py", self.display, name)
def test_tkwindow(self, name, wm_type):
return self._spawn_script("tkwindow.py", name, wm_type)
def test_dialog(self, name="dialog"):
return self.test_tkwindow(name, "dialog")
def test_notification(self, name="notification"):
"""
Simulate a notification window. Note that, for testing purposes, this
process must be killed explicitly, unlike actual notifications which
are sent to a notification server and then expire after a timeout.
"""
# Don't use a real notification, e.g. notify-send or
# zenity --notification, since we want to keep the process on until
# explicitly killed
return self.test_tkwindow(name, "notification")
def test_xclock(self):
path = whereis("xclock")
return self._spawn_window(path)
def test_xeyes(self):
path = whereis("xeyes")
return self._spawn_window(path)
def test_xcalc(self):
path = whereis("xcalc")
return self._spawn_window(path)
def groupconsistency(self):
groups = self.c.groups()
screens = self.c.screens()
seen = set()
for g in groups.values():
scrn = g["screen"]
if scrn is not None:
if scrn in seen:
raise AssertionError(
"Screen referenced from more than one group.")
seen.add(scrn)
assert screens[scrn]["group"] == g["name"]
assert len(seen) == len(screens), "Not all screens \
had an attached group."
@pytest.fixture(scope="session")
def xvfb():
with xcffib.testing.XvfbTest():
display = os.environ["DISPLAY"]
if not can_connect_x11(display):
raise OSError("Xvfb did not come up")
yield
@pytest.fixture(scope="function")
def xephyr(request, xvfb):
kwargs = getattr(request, "param", {})
with Xephyr(**kwargs) as x:
yield x
@pytest.fixture(scope="function")
def manager(request, xephyr):
config = getattr(request, "param", BareConfig)
for attr in dir(default_config):
if not hasattr(config, attr):
setattr(config, attr, getattr(default_config, attr))
with tempfile.NamedTemporaryFile() as f:
sockfile = f.name
try:
manager = TestManager(sockfile, xephyr.display, request.config.getoption("--debuglog"))
manager.start(config)
yield manager
finally:
manager.terminate()
@pytest.fixture(scope="function")
def manager_nospawn(request, xephyr):
with tempfile.NamedTemporaryFile() as f:
sockfile = f.name
try:
manager = TestManager(sockfile, xephyr.display, request.config.getoption("--debuglog"))
yield manager
finally:
manager.terminate()
no_xinerama = pytest.mark.parametrize("xephyr", [{"xinerama": False}], indirect=True)
|
dns.py | #Servidor intermediario para acesso indireto de Felipe Gemmal, Carlos Henrique Rorato Souza
# -*- coding: utf-8 -*-
import socket
import sys
import threading
#dados guardados em maps
#names = {"1":("localhost","1234")}
#keys = {"1":(listaS1)}
names = {}
keys ={}
#Para cada serviço, uma lista com suas palavras-chave
#listaS1 = ["video","engraçado","youtube","comedia"]
#listaS2 = ["audio","musica"]
#listaS3 = ["imagem","foto"]
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#ip e porta do servidor de nomes
ip = 'localhost'
porta = 12351 # Porta padrão que será enviada
#ip e porta do middleware
midIp = 'localhost'
midPorta = 12388
def cliente(connection,porta):
print("Thread criada em busca por nome")
connection.send("OK".encode('utf-8'))
nomeServico= str(connection.recv(1024).decode('utf-8')).split(" ")
if nomeServico[0] == "":
print("Sem dados")
connection.send(("N").encode('utf-8'))
connection.close()
return
print(nomeServico)
endereco = names.get(nomeServico[0],-1) #aqui ele deve procurar pelo nome do serviço
#Não encontrou o serviço
if(endereco == -1):
connection.send(("N").encode('utf-8'))
#Encontrou o nome do serviço
else:
returnip, returnporta = endereco
connection.send((str(returnip)+" "+ returnporta).encode('utf-8'))
connection.close()
return
def clienteKey(connection, porta):
print("Thread criada em busca por chave")
connection.send("OK".encode('utf-8'))
nomeServico = str(connection.recv(1024).decode('utf-8')).split(" ")
if nomeServico[0] == "":
print("Sem dados")
connection.send(("N").encode('utf-8'))
connection.close()
return
print("Procurando por:"+nomeServico[0])
# aqui ele deve procurar pelo nome do serviço
encontrou = 0
for x in keys.keys(): #para cada serviço
y = keys.get(x) #pegar as descrições
for z in range (len(y)): #e comparar ela com o que o cliente mandou
if(y[z] == nomeServico[0]):
encontrou = 1
servico = x
break
if(encontrou == 1):
break
#Não encontrou o serviço
if(encontrou == 0):
print("Nome nao existe")
connection.send(("N").encode('utf-8'))
#Encontrou o serviço
else:
endereco = names.get(servico, -1)
returnip, returnporta = endereco
connection.send((returnip+" "+ returnporta).encode('utf-8'))
connection.close()
return
def addService(connection,cliente):
print("Thread criada")
connection.send("OK".encode('utf-8'))
#recebe nome , ip e porta do serviço
novoServico= str(connection.recv(1024).decode('utf-8')).split(" ")
connection.send("OK".encode('utf-8'))
#recebe as chaves de busca do novo servico
chavesServico = str(connection.recv(1024).decode('utf-8')).split(" ")
print(chavesServico)
#print("Colocando a chave: "+novoServico[0]+" e valor : "+novoServico[1]+" no dicionario")
#print("Tamanho da lista de strings "+ str(len(novoServico)) )
#print("Service String:"+novoServico[0])
#for x in range (3,len(novoServico)):
# listaAux.append(novoServico[x])
names.update({novoServico[0] : (novoServico[1], novoServico[2])})
keys.update({novoServico[0]: chavesServico})
print("Novo servico adicionado: "+str(novoServico[0]) +" "+str(names.get(novoServico[0])))
connection.send("Novo servico adicionado".encode('utf-8'))
connection.close()
return
def connectMiddleware(ip,porta,minhaPorta):
middle = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
middle.connect((ip,porta))
middle.send("addAddress".encode('utf-8'))
resposta =str(middle.recv(1024).decode('utf-8'))
print(resposta)
middle.send(str(minhaPorta).encode('utf-8'))
resposta = str(middle.recv(1024).decode('utf-8'))
print(resposta)
middle.close()
return
connectMiddleware(midIp,midPorta,porta)
server.bind((ip,porta))
server.listen(10)
print("Esperando conexao")
while True:
co,endCliente = server.accept()
print("Conexao aceita")
tipo= str(co.recv(1024).decode('utf-8'))
print(tipo +' Criando thread')
if(tipo == "request"):
linha = threading.Thread(target=cliente,args=(co,porta))
linha.start()
elif(tipo == "addService"):
linha = threading.Thread(target=addService,args=(co,endCliente))
linha.start()
elif(tipo == "keyRequest"):
linha = threading.Thread(target=clienteKey,args=(co,porta))
linha.start()
server.close()
|
mainfile.py | from tkinter import *
import os
from pygame import mixer
import tkinter.messagebox
from tkinter import filedialog
import time
import threading
from tkinter import ttk
from ttkthemes import themed_tk as tk
from mutagen.mp3 import MP3
import pyttsx3
import datetime
engine=pyttsx3.init('sapi5')
voices=engine.getProperty('voices')
engine.setProperty('voice',voices[1].id)
def speak(audio):
engine.say(audio)
engine.runAndWait()
rate = engine.getProperty('rate')
engine.setProperty('rate', 155)
def wishMe():
hour=int(datetime.datetime.now().hour)
if hour>0 and hour<12:
speak("Good Morning You can Listen to some morning music")
elif hour>12 and hour<18:
speak("Good Day You can listen to some energetic music")
else:
speak("Good Evening You can hear some relaxing music ")
wishMe()
root = tk.ThemedTk()
root.geometry("800x500")
root.get_themes()
root.set_theme("clearlooks")
root.configure(bg='light blue')
statusbar = ttk.Label(root, text="Musical", relief=SUNKEN, anchor=W, font='lucida 10 italic')
statusbar.pack(side=BOTTOM, fill=X)
menubar = Menu(root)
root.config(menu=menubar)
subMenu = Menu(menubar, tearoff=0)
playlist = []
def search_file():
global filename_path
filename_path = filedialog.askopenfilename()
add_to_playlist(filename_path)
mixer.music.queue(filename_path)
def add_to_playlist(filename):
filename = os.path.basename(filename)
index = 0
playlistbox.insert(index, filename)
playlist.insert(index, filename_path)
index += 1
menubar.add_cascade(label="File", menu=subMenu)
subMenu.add_command(label="Open", command=search_file)
subMenu.add_command(label="Exit", command=root.destroy)
def about():
tkinter.messagebox.showinfo('Its a music player.For any help Contact Madhura,Imdad or Janvi')
subMenu = Menu(menubar, tearoff=0)
menubar.add_cascade(label="Help", menu=subMenu)
subMenu.add_command(label="About Us", command=about)
mixer.init()
root.title("Musical")
root.iconbitmap(r'a.ico')
add_del_frame = Frame(root,relief=RAISED, borderwidth=4)
add_del_frame.pack(side=RIGHT, padx=29, pady=30)
playlistbox = Listbox(add_del_frame)
playlistbox.pack()
addBtn = ttk.Button(add_del_frame, text="+ Add", command=search_file)
addBtn.pack(side=LEFT)
def delete_song():
song_chosen = playlistbox.curselection()
song_chosen = int(song_chosen[0])
playlistbox.delete(song_chosen)
playlist.pop(song_chosen)
del_Btn = ttk.Button(add_del_frame, text="- Remove", command=delete_song)
del_Btn.pack(side=LEFT)
right_frame = Frame(root,relief=RAISED, borderwidth=7)
right_frame.pack(pady=50)
topframe = Frame(right_frame)
topframe.pack()
lengthlabel = ttk.Label(topframe, text='Full Length : --:--',)
lengthlabel.pack(pady=5.5)
currenttimelabel = ttk.Label(topframe, text='Current Time : --:--', relief=GROOVE)
currenttimelabel.pack()
def show_details(play_song):
file_data = os.path.splitext(play_song)
if file_data[1] == '.mp3':
audio = MP3(play_song)
total_length = audio.info.length
else:
a = mixer.Sound(play_song)
total_length = a.get_length()
# div - total_length/60, mod - total_length % 60
minutes, seconds = divmod(total_length, 60)
minutes = round(minutes)
seconds = round(seconds)
timeformat = '{:02d}:{:02d}'.format(minutes, seconds)
lengthlabel['text'] = "Full Length" + ' - ' + timeformat
t1 = threading.Thread(target=start_count, args=(total_length,))
t1.start()
def start_count(t):
global paused
current_time = 0
while current_time <= t and mixer.music.get_busy():
if paused:
continue
else:
minutes, seconds = divmod(current_time, 60)
minutes = round(minutes)
seconds = round(seconds)
timeformat = '{:02d}:{:02d}'.format(minutes, seconds)
currenttimelabel['text'] = "Present Time" + ' - ' + timeformat
time.sleep(1)
current_time += 1
def play_music():
global paused
if paused:
mixer.music.unpause()
statusbar['text'] = "Music has been Resumed"
paused = FALSE
else:
try:
stop_music()
time.sleep(1)
selected_song = playlistbox.curselection()
selected_song = int(selected_song[0])
play_it = playlist[selected_song]
mixer.music.load(play_it)
mixer.music.play()
statusbar['text'] = "Playing your music" + ' - ' + os.path.basename(play_it)
show_details(play_it)
except:
tkinter.messagebox.showerror('File not found','Musical was unable to find this file. Try Again.')
def stop_music():
mixer.music.stop()
statusbar['text'] = "Music has been Stopped"
paused = FALSE
def pause_music():
global paused
paused = TRUE
mixer.music.pause()
statusbar['text'] = "Music has been Paused"
def rewind_music():
play_music()
statusbar['text'] = "Music has been Rewinded"
def set_vol(val):
volume = float(val) / 100
mixer.music.set_volume(volume)
muted = FALSE
def mute_music():
global muted
if muted:
mixer.music.set_volume(0.9)
volumeBtn.configure(image=volume_Photo)
scale.set(90)
muted = FALSE
else:
mixer.music.set_volume(0)
volumeBtn.configure(image=mute_Photo)
scale.set(0)
muted = TRUE
middleframe = Frame(right_frame)
middleframe.pack(pady=30, padx=30)
play_Photo = PhotoImage(file='play.png')
playBtn = ttk.Button(middleframe, image=play_Photo, command=play_music)
playBtn.grid(row=0, column=0, padx=10)
pause_Photo = PhotoImage(file='pause.png')
pauseBtn = ttk.Button(middleframe, image=pause_Photo, command=pause_music)
pauseBtn.grid(row=0, column=1, padx=10)
stop_Photo = PhotoImage(file='stop.png')
stopBtn = ttk.Button(middleframe, image=stop_Photo, command=stop_music)
stopBtn.grid(row=0, column=2, padx=10)
bottomframe = Frame(right_frame)
bottomframe.pack()
rewind_Photo = PhotoImage(file='rewind.png')
rewindBtn = ttk.Button(bottomframe, image=rewind_Photo, command=rewind_music)
rewindBtn.grid(row=0, column=0)
mute_Photo = PhotoImage(file='mute.png')
volume_Photo = PhotoImage(file='volume.png')
volumeBtn = ttk.Button(bottomframe, image=volume_Photo, command=mute_music)
volumeBtn.grid(row=0, column=1)
scale = ttk.Scale(bottomframe, from_=0, to=100, orient=VERTICAL, command=set_vol)
scale.set(60)
mixer.music.set_volume(0.6)
scale.grid(row=0, column=2, pady=15, padx=30)
def on_close():
stop_music()
root.destroy()
root.protocol("WM_DELETE_WINDOW", on_close)
root.mainloop()
|
server.py | import asyncio
import os
import traceback
from functools import partial
from inspect import isawaitable
from multiprocessing import Process
from signal import SIG_IGN, SIGINT, SIGTERM, Signals
from signal import signal as signal_func
from socket import SO_REUSEADDR, SOL_SOCKET, socket
from time import time
from httptools import HttpRequestParser
from httptools.parser.errors import HttpParserError
from sanic.compat import Header
from sanic.exceptions import (
HeaderExpectationFailed,
InvalidUsage,
PayloadTooLarge,
RequestTimeout,
ServerError,
ServiceUnavailable,
)
from sanic.log import access_logger, logger
from sanic.request import EXPECT_HEADER, Request, StreamBuffer
from sanic.response import HTTPResponse
try:
import uvloop
if not isinstance(asyncio.get_event_loop_policy(), uvloop.EventLoopPolicy):
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ImportError:
pass
class Signal:
stopped = False
class HttpProtocol(asyncio.Protocol):
"""
This class provides a basic HTTP implementation of the sanic framework.
"""
__slots__ = (
# app
"app",
# event loop, connection
"loop",
"transport",
"connections",
"signal",
# request params
"parser",
"request",
"url",
"headers",
# request config
"request_handler",
"request_timeout",
"response_timeout",
"keep_alive_timeout",
"request_max_size",
"request_buffer_queue_size",
"request_class",
"is_request_stream",
"router",
"error_handler",
# enable or disable access log purpose
"access_log",
# connection management
"_total_request_size",
"_request_timeout_handler",
"_response_timeout_handler",
"_keep_alive_timeout_handler",
"_last_request_time",
"_last_response_time",
"_is_stream_handler",
"_not_paused",
"_request_handler_task",
"_request_stream_task",
"_keep_alive",
"_header_fragment",
"state",
"_debug",
)
def __init__(
self,
*,
loop,
app,
request_handler,
error_handler,
signal=Signal(),
connections=None,
request_timeout=60,
response_timeout=60,
keep_alive_timeout=5,
request_max_size=None,
request_buffer_queue_size=100,
request_class=None,
access_log=True,
keep_alive=True,
is_request_stream=False,
router=None,
state=None,
debug=False,
**kwargs
):
self.loop = loop
self.app = app
self.transport = None
self.request = None
self.parser = None
self.url = None
self.headers = None
self.router = router
self.signal = signal
self.access_log = access_log
self.connections = connections if connections is not None else set()
self.request_handler = request_handler
self.error_handler = error_handler
self.request_timeout = request_timeout
self.request_buffer_queue_size = request_buffer_queue_size
self.response_timeout = response_timeout
self.keep_alive_timeout = keep_alive_timeout
self.request_max_size = request_max_size
self.request_class = request_class or Request
self.is_request_stream = is_request_stream
self._is_stream_handler = False
self._not_paused = asyncio.Event(loop=loop)
self._total_request_size = 0
self._request_timeout_handler = None
self._response_timeout_handler = None
self._keep_alive_timeout_handler = None
self._last_request_time = None
self._last_response_time = None
self._request_handler_task = None
self._request_stream_task = None
self._keep_alive = keep_alive
self._header_fragment = b""
self.state = state if state else {}
if "requests_count" not in self.state:
self.state["requests_count"] = 0
self._debug = debug
self._not_paused.set()
@property
def keep_alive(self):
"""
Check if the connection needs to be kept alive based on the params
attached to the `_keep_alive` attribute, :attr:`Signal.stopped`
and :func:`HttpProtocol.parser.should_keep_alive`
:return: ``True`` if connection is to be kept alive ``False`` else
"""
return (
self._keep_alive
and not self.signal.stopped
and self.parser.should_keep_alive()
)
# -------------------------------------------- #
# Connection
# -------------------------------------------- #
def connection_made(self, transport):
self.connections.add(self)
self._request_timeout_handler = self.loop.call_later(
self.request_timeout, self.request_timeout_callback
)
self.transport = transport
self._last_request_time = time()
def connection_lost(self, exc):
self.connections.discard(self)
if self._request_handler_task:
self._request_handler_task.cancel()
if self._request_stream_task:
self._request_stream_task.cancel()
if self._request_timeout_handler:
self._request_timeout_handler.cancel()
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
if self._keep_alive_timeout_handler:
self._keep_alive_timeout_handler.cancel()
def pause_writing(self):
self._not_paused.clear()
def resume_writing(self):
self._not_paused.set()
def request_timeout_callback(self):
# See the docstring in the RequestTimeout exception, to see
# exactly what this timeout is checking for.
# Check if elapsed time since request initiated exceeds our
# configured maximum request timeout value
time_elapsed = time() - self._last_request_time
if time_elapsed < self.request_timeout:
time_left = self.request_timeout - time_elapsed
self._request_timeout_handler = self.loop.call_later(
time_left, self.request_timeout_callback
)
else:
if self._request_stream_task:
self._request_stream_task.cancel()
if self._request_handler_task:
self._request_handler_task.cancel()
self.write_error(RequestTimeout("Request Timeout"))
def response_timeout_callback(self):
# Check if elapsed time since response was initiated exceeds our
# configured maximum request timeout value
time_elapsed = time() - self._last_request_time
if time_elapsed < self.response_timeout:
time_left = self.response_timeout - time_elapsed
self._response_timeout_handler = self.loop.call_later(
time_left, self.response_timeout_callback
)
else:
if self._request_stream_task:
self._request_stream_task.cancel()
if self._request_handler_task:
self._request_handler_task.cancel()
self.write_error(ServiceUnavailable("Response Timeout"))
def keep_alive_timeout_callback(self):
"""
Check if elapsed time since last response exceeds our configured
maximum keep alive timeout value and if so, close the transport
pipe and let the response writer handle the error.
:return: None
"""
time_elapsed = time() - self._last_response_time
if time_elapsed < self.keep_alive_timeout:
time_left = self.keep_alive_timeout - time_elapsed
self._keep_alive_timeout_handler = self.loop.call_later(
time_left, self.keep_alive_timeout_callback
)
else:
logger.debug("KeepAlive Timeout. Closing connection.")
self.transport.close()
self.transport = None
# -------------------------------------------- #
# Parsing
# -------------------------------------------- #
def data_received(self, data):
# Check for the request itself getting too large and exceeding
# memory limits
self._total_request_size += len(data)
if self._total_request_size > self.request_max_size:
self.write_error(PayloadTooLarge("Payload Too Large"))
# Create parser if this is the first time we're receiving data
if self.parser is None:
assert self.request is None
self.headers = []
self.parser = HttpRequestParser(self)
# requests count
self.state["requests_count"] = self.state["requests_count"] + 1
# Parse request chunk or close connection
try:
self.parser.feed_data(data)
except HttpParserError:
message = "Bad Request"
if self._debug:
message += "\n" + traceback.format_exc()
self.write_error(InvalidUsage(message))
def on_url(self, url):
if not self.url:
self.url = url
else:
self.url += url
def on_header(self, name, value):
self._header_fragment += name
if value is not None:
if (
self._header_fragment == b"Content-Length"
and int(value) > self.request_max_size
):
self.write_error(PayloadTooLarge("Payload Too Large"))
try:
value = value.decode()
except UnicodeDecodeError:
value = value.decode("latin_1")
self.headers.append(
(self._header_fragment.decode().casefold(), value)
)
self._header_fragment = b""
def on_headers_complete(self):
self.request = self.request_class(
url_bytes=self.url,
headers=Header(self.headers),
version=self.parser.get_http_version(),
method=self.parser.get_method().decode(),
transport=self.transport,
app=self.app,
)
# Remove any existing KeepAlive handler here,
# It will be recreated if required on the new request.
if self._keep_alive_timeout_handler:
self._keep_alive_timeout_handler.cancel()
self._keep_alive_timeout_handler = None
if self.request.headers.get(EXPECT_HEADER):
self.expect_handler()
if self.is_request_stream:
self._is_stream_handler = self.router.is_stream_handler(
self.request
)
if self._is_stream_handler:
self.request.stream = StreamBuffer(
self.request_buffer_queue_size
)
self.execute_request_handler()
def expect_handler(self):
"""
Handler for Expect Header.
"""
expect = self.request.headers.get(EXPECT_HEADER)
if self.request.version == "1.1":
if expect.lower() == "100-continue":
self.transport.write(b"HTTP/1.1 100 Continue\r\n\r\n")
else:
self.write_error(
HeaderExpectationFailed(
"Unknown Expect: {expect}".format(expect=expect)
)
)
def on_body(self, body):
if self.is_request_stream and self._is_stream_handler:
self._request_stream_task = self.loop.create_task(
self.body_append(body)
)
else:
self.request.body_push(body)
async def body_append(self, body):
if self.request.stream.is_full():
self.transport.pause_reading()
await self.request.stream.put(body)
self.transport.resume_reading()
else:
await self.request.stream.put(body)
def on_message_complete(self):
# Entire request (headers and whole body) is received.
# We can cancel and remove the request timeout handler now.
if self._request_timeout_handler:
self._request_timeout_handler.cancel()
self._request_timeout_handler = None
if self.is_request_stream and self._is_stream_handler:
self._request_stream_task = self.loop.create_task(
self.request.stream.put(None)
)
return
self.request.body_finish()
self.execute_request_handler()
def execute_request_handler(self):
"""
Invoke the request handler defined by the
:func:`sanic.app.Sanic.handle_request` method
:return: None
"""
self._response_timeout_handler = self.loop.call_later(
self.response_timeout, self.response_timeout_callback
)
self._last_request_time = time()
self._request_handler_task = self.loop.create_task(
self.request_handler(
self.request, self.write_response, self.stream_response
)
)
# -------------------------------------------- #
# Responding
# -------------------------------------------- #
def log_response(self, response):
"""
Helper method provided to enable the logging of responses in case if
the :attr:`HttpProtocol.access_log` is enabled.
:param response: Response generated for the current request
:type response: :class:`sanic.response.HTTPResponse` or
:class:`sanic.response.StreamingHTTPResponse`
:return: None
"""
if self.access_log:
extra = {"status": getattr(response, "status", 0)}
if isinstance(response, HTTPResponse):
extra["byte"] = len(response.body)
else:
extra["byte"] = -1
extra["host"] = "UNKNOWN"
if self.request is not None:
if self.request.ip:
extra["host"] = "{0}:{1}".format(
self.request.ip, self.request.port
)
extra["request"] = "{0} {1}".format(
self.request.method, self.request.url
)
else:
extra["request"] = "nil"
access_logger.info("", extra=extra)
def write_response(self, response):
"""
Writes response content synchronously to the transport.
"""
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
self._response_timeout_handler = None
try:
keep_alive = self.keep_alive
self.transport.write(
response.output(
self.request.version, keep_alive, self.keep_alive_timeout
)
)
self.log_response(response)
except AttributeError:
logger.error(
"Invalid response object for url %s, "
"Expected Type: HTTPResponse, Actual Type: %s",
self.url,
type(response),
)
self.write_error(ServerError("Invalid response type"))
except RuntimeError:
if self._debug:
logger.error(
"Connection lost before response written @ %s",
self.request.ip,
)
keep_alive = False
except Exception as e:
self.bail_out(
"Writing response failed, connection closed {}".format(repr(e))
)
finally:
if not keep_alive:
self.transport.close()
self.transport = None
else:
self._keep_alive_timeout_handler = self.loop.call_later(
self.keep_alive_timeout, self.keep_alive_timeout_callback
)
self._last_response_time = time()
self.cleanup()
async def drain(self):
await self._not_paused.wait()
async def push_data(self, data):
self.transport.write(data)
async def stream_response(self, response):
"""
Streams a response to the client asynchronously. Attaches
the transport to the response so the response consumer can
write to the response as needed.
"""
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
self._response_timeout_handler = None
try:
keep_alive = self.keep_alive
response.protocol = self
await response.stream(
self.request.version, keep_alive, self.keep_alive_timeout
)
self.log_response(response)
except AttributeError:
logger.error(
"Invalid response object for url %s, "
"Expected Type: HTTPResponse, Actual Type: %s",
self.url,
type(response),
)
self.write_error(ServerError("Invalid response type"))
except RuntimeError:
if self._debug:
logger.error(
"Connection lost before response written @ %s",
self.request.ip,
)
keep_alive = False
except Exception as e:
self.bail_out(
"Writing response failed, connection closed {}".format(repr(e))
)
finally:
if not keep_alive:
self.transport.close()
self.transport = None
else:
self._keep_alive_timeout_handler = self.loop.call_later(
self.keep_alive_timeout, self.keep_alive_timeout_callback
)
self._last_response_time = time()
self.cleanup()
def write_error(self, exception):
# An error _is_ a response.
# Don't throw a response timeout, when a response _is_ given.
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
self._response_timeout_handler = None
response = None
try:
response = self.error_handler.response(self.request, exception)
version = self.request.version if self.request else "1.1"
self.transport.write(response.output(version))
except RuntimeError:
if self._debug:
logger.error(
"Connection lost before error written @ %s",
self.request.ip if self.request else "Unknown",
)
except Exception as e:
self.bail_out(
"Writing error failed, connection closed {}".format(repr(e)),
from_error=True,
)
finally:
if self.parser and (
self.keep_alive or getattr(response, "status", 0) == 408
):
self.log_response(response)
try:
self.transport.close()
except AttributeError:
logger.debug("Connection lost before server could close it.")
def bail_out(self, message, from_error=False):
"""
In case if the transport pipes are closed and the sanic app encounters
an error while writing data to the transport pipe, we log the error
with proper details.
:param message: Error message to display
:param from_error: If the bail out was invoked while handling an
exception scenario.
:type message: str
:type from_error: bool
:return: None
"""
if from_error or self.transport is None or self.transport.is_closing():
logger.error(
"Transport closed @ %s and exception "
"experienced during error handling",
(
self.transport.get_extra_info("peername")
if self.transport is not None
else "N/A"
),
)
logger.debug("Exception:", exc_info=True)
else:
self.write_error(ServerError(message))
logger.error(message)
def cleanup(self):
"""This is called when KeepAlive feature is used,
it resets the connection in order for it to be able
to handle receiving another request on the same connection."""
self.parser = None
self.request = None
self.url = None
self.headers = None
self._request_handler_task = None
self._request_stream_task = None
self._total_request_size = 0
self._is_stream_handler = False
def close_if_idle(self):
"""Close the connection if a request is not being sent or received
:return: boolean - True if closed, false if staying open
"""
if not self.parser:
self.transport.close()
return True
return False
def close(self):
"""
Force close the connection.
"""
if self.transport is not None:
self.transport.close()
self.transport = None
def trigger_events(events, loop):
"""Trigger event callbacks (functions or async)
:param events: one or more sync or async functions to execute
:param loop: event loop
"""
for event in events:
result = event(loop)
if isawaitable(result):
loop.run_until_complete(result)
def serve(
host,
port,
app,
request_handler,
error_handler,
before_start=None,
after_start=None,
before_stop=None,
after_stop=None,
debug=False,
request_timeout=60,
response_timeout=60,
keep_alive_timeout=5,
ssl=None,
sock=None,
request_max_size=None,
request_buffer_queue_size=100,
reuse_port=False,
loop=None,
protocol=HttpProtocol,
backlog=100,
register_sys_signals=True,
run_multiple=False,
run_async=False,
connections=None,
signal=Signal(),
request_class=None,
access_log=True,
keep_alive=True,
is_request_stream=False,
router=None,
websocket_max_size=None,
websocket_max_queue=None,
websocket_read_limit=2 ** 16,
websocket_write_limit=2 ** 16,
state=None,
graceful_shutdown_timeout=15.0,
asyncio_server_kwargs=None,
):
"""Start asynchronous HTTP Server on an individual process.
:param host: Address to host on
:param port: Port to host on
:param request_handler: Sanic request handler with middleware
:param error_handler: Sanic error handler with middleware
:param before_start: function to be executed before the server starts
listening. Takes arguments `app` instance and `loop`
:param after_start: function to be executed after the server starts
listening. Takes arguments `app` instance and `loop`
:param before_stop: function to be executed when a stop signal is
received before it is respected. Takes arguments
`app` instance and `loop`
:param after_stop: function to be executed when a stop signal is
received after it is respected. Takes arguments
`app` instance and `loop`
:param debug: enables debug output (slows server)
:param request_timeout: time in seconds
:param response_timeout: time in seconds
:param keep_alive_timeout: time in seconds
:param ssl: SSLContext
:param sock: Socket for the server to accept connections from
:param request_max_size: size in bytes, `None` for no limit
:param reuse_port: `True` for multiple workers
:param loop: asyncio compatible event loop
:param protocol: subclass of asyncio protocol class
:param request_class: Request class to use
:param access_log: disable/enable access log
:param websocket_max_size: enforces the maximum size for
incoming messages in bytes.
:param websocket_max_queue: sets the maximum length of the queue
that holds incoming messages.
:param websocket_read_limit: sets the high-water limit of the buffer for
incoming bytes, the low-water limit is half
the high-water limit.
:param websocket_write_limit: sets the high-water limit of the buffer for
outgoing bytes, the low-water limit is a
quarter of the high-water limit.
:param is_request_stream: disable/enable Request.stream
:param request_buffer_queue_size: streaming request buffer queue size
:param router: Router object
:param graceful_shutdown_timeout: How long take to Force close non-idle
connection
:param asyncio_server_kwargs: key-value args for asyncio/uvloop
create_server method
:return: Nothing
"""
if not run_async:
# create new event_loop after fork
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
if debug:
loop.set_debug(debug)
app.asgi = False
connections = connections if connections is not None else set()
server = partial(
protocol,
loop=loop,
connections=connections,
signal=signal,
app=app,
request_handler=request_handler,
error_handler=error_handler,
request_timeout=request_timeout,
response_timeout=response_timeout,
keep_alive_timeout=keep_alive_timeout,
request_max_size=request_max_size,
request_class=request_class,
access_log=access_log,
keep_alive=keep_alive,
is_request_stream=is_request_stream,
router=router,
websocket_max_size=websocket_max_size,
websocket_max_queue=websocket_max_queue,
websocket_read_limit=websocket_read_limit,
websocket_write_limit=websocket_write_limit,
state=state,
debug=debug,
)
asyncio_server_kwargs = (
asyncio_server_kwargs if asyncio_server_kwargs else {}
)
server_coroutine = loop.create_server(
server,
host,
port,
ssl=ssl,
reuse_port=reuse_port,
sock=sock,
backlog=backlog,
**asyncio_server_kwargs
)
if run_async:
return server_coroutine
trigger_events(before_start, loop)
try:
http_server = loop.run_until_complete(server_coroutine)
except BaseException:
logger.exception("Unable to start server")
return
trigger_events(after_start, loop)
# Ignore SIGINT when run_multiple
if run_multiple:
signal_func(SIGINT, SIG_IGN)
# Register signals for graceful termination
if register_sys_signals:
_singals = (SIGTERM,) if run_multiple else (SIGINT, SIGTERM)
for _signal in _singals:
try:
loop.add_signal_handler(_signal, loop.stop)
except NotImplementedError:
logger.warning(
"Sanic tried to use loop.add_signal_handler "
"but it is not implemented on this platform."
)
pid = os.getpid()
try:
logger.info("Starting worker [%s]", pid)
loop.run_forever()
finally:
logger.info("Stopping worker [%s]", pid)
# Run the on_stop function if provided
trigger_events(before_stop, loop)
# Wait for event loop to finish and all connections to drain
http_server.close()
loop.run_until_complete(http_server.wait_closed())
# Complete all tasks on the loop
signal.stopped = True
for connection in connections:
connection.close_if_idle()
# Gracefully shutdown timeout.
# We should provide graceful_shutdown_timeout,
# instead of letting connection hangs forever.
# Let's roughly calcucate time.
start_shutdown = 0
while connections and (start_shutdown < graceful_shutdown_timeout):
loop.run_until_complete(asyncio.sleep(0.1))
start_shutdown = start_shutdown + 0.1
# Force close non-idle connection after waiting for
# graceful_shutdown_timeout
coros = []
for conn in connections:
if hasattr(conn, "websocket") and conn.websocket:
coros.append(conn.websocket.close_connection())
else:
conn.close()
_shutdown = asyncio.gather(*coros, loop=loop)
loop.run_until_complete(_shutdown)
trigger_events(after_stop, loop)
loop.close()
def serve_multiple(server_settings, workers):
"""Start multiple server processes simultaneously. Stop on interrupt
and terminate signals, and drain connections when complete.
:param server_settings: kw arguments to be passed to the serve function
:param workers: number of workers to launch
:param stop_event: if provided, is used as a stop signal
:return:
"""
server_settings["reuse_port"] = True
server_settings["run_multiple"] = True
# Handling when custom socket is not provided.
if server_settings.get("sock") is None:
sock = socket()
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
sock.bind((server_settings["host"], server_settings["port"]))
sock.set_inheritable(True)
server_settings["sock"] = sock
server_settings["host"] = None
server_settings["port"] = None
processes = []
def sig_handler(signal, frame):
logger.info("Received signal %s. Shutting down.", Signals(signal).name)
for process in processes:
os.kill(process.pid, SIGTERM)
signal_func(SIGINT, lambda s, f: sig_handler(s, f))
signal_func(SIGTERM, lambda s, f: sig_handler(s, f))
for _ in range(workers):
process = Process(target=serve, kwargs=server_settings)
process.daemon = True
process.start()
processes.append(process)
for process in processes:
process.join()
# the above processes will block this until they're stopped
for process in processes:
process.terminate()
server_settings.get("sock").close()
|
__init__.py | #coding=utf-8
from concurrent.futures.thread import ThreadPoolExecutor
from datetime import datetime
import multiprocessing
import concurrent.futures
import os, queue
from queue import Queue
import subprocess
import sys
import traceback
from util import Log
def queueThread(taskQueue, errorQueue, tcnt=4):
print('threads : ' + str(tcnt))
with ThreadPoolExecutor(tcnt) as pool :
for i in range(1, tcnt + 1) :
pool.submit(runThread, i, taskQueue, errorQueue)
def runThread(tid, taskQueue, errorQueue):
try:
while True:
func, *args = taskQueue.get_nowait()
if not func(tid, *args) :
errorQueue.put((func, *args))
except queue.Empty:
pass
except :
t, v, tb = sys.exc_info()
print(t, v)
traceback.print_tb(tb)
Log.printDetailln('线程异常,' + str(sys.exc_info()))
finally :
Log.printDetailln('线程结束, tid : ' + str(tid))
def queueProcess(taskQueue, errorQueue, tcnt=4):
print('proces : ', tcnt)
pools = []
for i in range(1, tcnt+1):
pool = multiprocessing.Process(target=runProcess, args=(i, taskQueue, errorQueue))
# pool.daemon = True
pools.append(pool)
for pool in pools:
pool.start()
for pool in pools:
pool.join()
def runProcess(tid, taskQueue, errorQueue):
try:
while taskQueue.qsize() > 0:
func, *args = taskQueue.get()
if not func(tid, *args) :
errorQueue.put((func, *args))
except :
t, v, tb = sys.exc_info()
print(t, v)
traceback.print_tb(tb)
Log.printDetailln('线程异常,' + str(sys.exc_info()))
finally :
Log.printDetailln('线程结束, tid : ' + str(tid))
def execCmd(cmd):
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
#Log.printDetailln(cmd)
return p.wait()
|
__init__.py | import sys
import struct
import abc
import queue
import threading
# constants
RMF_CMD_START_ADDR = 0x3FFFFC00
RMF_FILE_TYPE_FIXED = 0
RMF_FILE_TYPE_DYNAMIC = 1
RMF_FILE_TYPE_STREAM = 2
RMF_CMD_ACK = 0 # reserved for future use
RMF_CMD_NACK = 1 # reserved for future use
RMF_CMD_EOT = 2 # reserved for future use
RMF_CMD_FILE_INFO = 3
RMF_CMD_FILE_OPEN = 10
RMF_CMD_FILE_CLOSE = 11
RMF_DIGEST_TYPE_NONE = 0
RMF_MSG_CONNECT = 0
RMF_MSG_FILEINFO = 1
RMF_MSG_FILEOPEN = 2
RMF_MSG_FILECLOSE = 3
RMF_MSG_WRITE_DATA = 4
RMF_FILEINFO_BASE_LEN = 48
RMF_MORE_BIT_MASK = 0x40
RMF_HIGH_BIT_MASK = 0x80
RMF_ADDR_BIT_MASK = 0x3F
def unpackHeader(data):
"""
Returns tuple (bytes_parsed, address, more_bit)
"""
i = 0
bytes_parsed = 0
more_bit = False
address = None
if((i+1) < len(data)): # parse 16-bits
b1, b2 = data[i:i+2]
i += 2
if b1 & RMF_MORE_BIT_MASK:
more_bit = True
if b1 & RMF_HIGH_BIT_MASK:
# Parse next 16 bits to form a full 30-bit address
b1 &= RMF_ADDR_BIT_MASK
if (i+2) < len(data):
b3, b4 = data[i:i+2]
i += 2
address = (b1 << 24) | (b2 << 16) | (b3 << 8) | b4
else:
# parse 2 or 3 bytes as header depending on mode
b1 &= RMF_ADDR_BIT_MASK
address = (b1 << 8) | (b2)
if address is not None:
bytes_parsed = i
return (bytes_parsed, address, more_bit)
def packHeader(address, more_bit=False):
"""
packs address and more_bit into bytearray
returns bytes
"""
if address < 16384:
# address will fit into 16 bits
b1, b2 = (address >> 8) & RMF_ADDR_BIT_MASK, address & 0xFF
if more_bit:
b1 |= RMF_MORE_BIT_MASK
return bytes([b1, b2])
elif address < 1073741824:
b1, b2 = (address >> 24) & RMF_ADDR_BIT_MASK, (address >> 16) & 0xFF
b3, b4 = (address >> 8) & 0xFF, address & 0xFF
if more_bit:
b1 |= RMF_MORE_BIT_MASK
return bytes([b1 | RMF_HIGH_BIT_MASK, b2, b3, b4])
else:
raise ValueError('address must be less than 1073741824')
def packFileInfo(file, byte_order='<'):
"""
packs FileInfo object into bytearray b
"""
if file.address is None:
raise ValueError('FileInfo object does not have an address')
if (byte_order != '<') and (byte_order != '>'):
raise ValueError('unknown byte_order: '+str(byte_order))
fmt = '%s3I2H32s%ds' % (byte_order, len(file.name))
return struct.pack(fmt, RMF_CMD_FILE_INFO, file.address,
file.length, file.fileType, file.digestType,
file.digestData, bytes(file.name, encoding='ascii'))
def unpackFileInfo(data, byte_order='<'):
"""
unpacks FileInfo object from bytearray b
"""
if (byte_order != '<') and (byte_order != '>'):
raise ValueError('unknown byte_order: '+str(byte_order))
fmt = '%s3I2H32s' % (byte_order)
assert(RMF_FILEINFO_BASE_LEN == RMF_FILEINFO_BASE_LEN)
if len(data) >= RMF_FILEINFO_BASE_LEN:
part1 = data[0:RMF_FILEINFO_BASE_LEN]
part2 = data[RMF_FILEINFO_BASE_LEN:]
(cmdType, address, length, fileType,
digestType, digestData) = struct.unpack(fmt, part1)
endOffset = None
for i in range(0, len(part2)):
if part2[i] == 0:
endOffset = i
break
if endOffset is not None:
name = str(part2[0:endOffset], encoding='utf-8')
else:
name = str(part2[0:], encoding='utf-8')
file = File(name, length, fileType, address)
file.digestType = digestType
file.digestData = digestData
return file
else:
return None
def packFileOpen(address, byte_order='<'):
if (byte_order != '<') and (byte_order != '>'):
raise ValueError('unknown byte_order: '+str(byte_order))
fmt = '%s2I' % (byte_order)
return struct.pack(fmt, RMF_CMD_FILE_OPEN, address)
def unpackFileOpen(data, byte_order='<'):
if (byte_order != '<') and (byte_order != '>'):
raise ValueError('unknown byte_order: '+str(byte_order))
fmt = '%s2I' % (byte_order)
cmdType, address = struct.unpack(fmt, data)
if cmdType != RMF_CMD_FILE_OPEN:
raise ValueError('expected RMF_CMD_FILE_OPEN')
return address
def packFileClose(address, byte_order='<'):
if (byte_order != '<') and (byte_order != '>'):
raise ValueError('unknown byte_order: '+str(byte_order))
fmt = '%s2I' % (byte_order)
return struct.pack(fmt, RMF_CMD_FILE_CLOSE, address)
def unpackFileClose(data, byte_order='<'):
if (byte_order != '<') and (byte_order != '>'):
raise ValueError('unknown byte_order: '+str(byte_order))
fmt = '%s2I' % (byte_order)
cmdType, address = struct.unpack(fmt, data)
if cmdType != RMF_CMD_FILE_ClOSE:
raise ValueError('expected RMF_CMD_FILE_ClOSE')
return address
class TransmitHandler(metaclass=abc.ABCMeta):
def getSendAvail(self):
return None
@abc.abstractmethod
def send(self, data: bytes):
"""
send data bytes
"""
class ReceiveHandler(metaclass=abc.ABCMeta):
@abc.abstractmethod
def onMsgReceived(self, msg):
"""
called by socket adapter when a message has been received
"""
@abc.abstractmethod
def onConnected(self, transmitHandler):
"""
called by socket adapter on a new connection, a reference to self
(the socket adapter) is given as the argument
"""
class File:
"""
Base class for File.
This can be inherited from in case you need more properties (e.g. APX).
Note: in C implemenation this class is called FileInfo_t
"""
def __init__(self, name, length,
fileType=RMF_FILE_TYPE_FIXED, address=None):
self.name = str(name) # part of FILEINFO struct
self.length = int(length) # part of FILEINFO struct
self.fileType = int(fileType) # part of FILEINFO struct
self.address = address # part of FILEINFO struct
self.digestType = RMF_DIGEST_TYPE_NONE # part of FILEINFO struct
self.digestData = bytes([0]*32) # part of FILEINFO struct
self.isRemoteFile = False # not part of FILEINFO struct
self.isOpen = False # not part of FILEINFO struct
def open(self):
self.isOpen = True
def close(self):
self.isOpen = False
class FileMap(metaclass=abc.ABCMeta):
"""
abstract baseclass of FileMap
FileMaps are used by the FileManager class.
It is expected that files in the FileMap are sorted by address (ascending)
"""
@abc.abstractmethod
def insert(self, file):
"""
inserts file into the FileMap.
The FileMap must assign an address to the file when inserted
"""
@abc.abstractmethod
def remove(self, file):
"""
removes file from FileMap.
"""
@ReceiveHandler.register
class FileManager:
"""
The FileManager manages local and remote files
mapped to a point-to-point connection
"""
def __init__(self, localFileMap, remoteFileMap):
assert(isinstance(localFileMap, FileMap))
assert(isinstance(remoteFileMap, FileMap))
self.localFileMap = localFileMap
self.remoteFileMap = remoteFileMap
self.requestedFiles = {}
self.byteOrder = '<' # use '<' for little endian, '>' for big endian
def worker():
"""
this is the worker thread.
It awaits commands in the message queue q.
When the special message None arrives it quits
"""
transmitHandler = None
while True:
msg = self.msgQueue.get()
if msg is None:
break
msgType = msg[0]
if msgType == RMF_MSG_CONNECT:
transmitHandler = msg[1]
elif msgType == RMF_MSG_FILEINFO:
fileInfo = msg[1]
header = packHeader(RMF_CMD_START_ADDR)
if transmitHandler is not None:
transmitHandler.send(header+fileInfo)
elif msgType == RMF_MSG_WRITE_DATA:
(address, data) = msg[1:3]
header = packHeader(address)
if transmitHandler is not None:
transmitHandler.send(header+data)
elif msgType == RMF_MSG_FILEOPEN:
data = packFileOpen(msg[1])
header = packHeader(RMF_CMD_START_ADDR)
if transmitHandler is not None:
transmitHandler.send(header+data)
else:
raise NotImplementedError(msgType)
self.msgQueue = queue.Queue()
self.worker_active = False
self.worker = threading.Thread(target=worker)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.stop()
def start(self):
self.worker_active = True
self.worker.start()
def stop(self):
if self.worker_active:
# send special message None to stop the worker thread
self.msgQueue.put(None)
self.worker.join()
self.worker = None
self.worker_active = False
def _FileInfo_handler(self, msg):
print("_FileInfo_handler")
def attachLocalFile(self, file):
self.localFileMap.insert(file)
file.fileManager = self
def requestRemoteFile(self, file):
self.requestedFiles[file.name] = file
# ReceiveHandler API
def onMsgReceived(self, msg):
bytes_parsed, address, more_bit = unpackHeader(msg)
if bytes_parsed > 0:
if address == RMF_CMD_START_ADDR:
self._processCmd(msg[bytes_parsed:])
elif address < RMF_CMD_START_ADDR:
self._processFileWrite(address, more_bit, msg[bytes_parsed:])
else:
raise ValueError("invalid address %d" % address)
def onConnected(self, transmitHandler):
"""
called on new connection
"""
print("FileManager.onConnected")
self.msgQueue.put((RMF_MSG_CONNECT, transmitHandler))
for file in self.localFileMap:
self.msgQueue.put((RMF_MSG_FILEINFO, packFileInfo(file)))
def _processCmd(self, data):
fmt = self.byteOrder+'I'
size = struct.calcsize(fmt)
if len(data) >= size:
(cmd,) = struct.unpack(fmt, data[0:size])
if cmd == RMF_CMD_FILE_INFO:
remoteFile = unpackFileInfo(data, self.byteOrder)
# check if this is a requested file
if remoteFile.name in self.requestedFiles:
requestedFile = self.requestedFiles[remoteFile.name]
if requestedFile.length == remoteFile.length:
del self.requestedFiles[requestedFile.name]
requestedFile.address = remoteFile.address
requestedFile.fileType = remoteFile.fileType
requestedFile.digestType = remoteFile.digestType
requestedFile.digestData = remoteFile.digestData
requestedFile.open()
self.remoteFileMap.insert(requestedFile)
print("sending request to open file %s" %
requestedFile.name)
msg = (RMF_MSG_FILEOPEN, requestedFile.address)
self.msgQueue.put(msg)
else:
print("[remoteFile.FileManager] FileInfo received for %s but with length=%d, expected length=%d" % (requstedFile.name, remoteFile.length, requstedFile.length))
else:
self.remoteFileMap.insert(remoteFile)
elif cmd == RMF_CMD_FILE_OPEN:
address = unpackFileOpen(data, self.byteOrder)
file = self.localFileMap.findByAddress(address)
if file is not None:
print("FileManager.open(%s)" % file.name)
file.open()
fileContent = file.read(0, file.length)
if fileContent is not None:
msg = (RMF_MSG_WRITE_DATA, file.address, fileContent)
self.msgQueue.put(msg)
elif cmd == RMF_CMD_FILE_CLOSE:
address = unpackFileClose(data, self.byteOrder)
file = self.localFileMap.findByAddress(address)
if file is not None:
print("FileManager.close(%s)" % file.name)
file.close()
else:
print("[remotefile] unknown command %d" % cmd, file=sys.stderr)
def _processFileWrite(self, address, more_bit, data):
remoteFile = self.remoteFileMap.findByAddress(address)
if remoteFile is not None and remoteFile.isOpen:
offset = address-remoteFile.address
if (offset >= 0) and ((offset+len(data)) <= remoteFile.length):
remoteFile.write(offset, data, more_bit)
def outPortDataWriteNotify(self, file: File, offset: int, length: int):
assert(file.address is not None)
fileContent = file.read(offset, length)
if fileContent is not None:
msg = (RMF_MSG_WRITE_DATA, file.address+offset, fileContent)
self.msgQueue.put(msg)
from remotefile.socket_adapter import TcpSocketAdapter
from remotefile.proto import readLine
|
apt_tdc_roetdec.py | """
This is the main script for controlling the experiment.
It contains the main control loop of experiment.
"""
import time
import datetime
import multiprocessing
from multiprocessing.queues import Queue
import threading
import numpy as np
import logging
import sys
# Serial ports and NI
import serial.tools.list_ports
# Local project scripts
from pyccapt.tdc_roentdec import tdc_roentdec
from pyccapt.devices import email_send, initialize_devices
from pyccapt.control_tools import experiment_statistics, variables, hdf5_creator, loggi
class APT_SIMPLE:
"""
APT_VOLTAGE class is a main class for controlling laser atom probe with Roentdec TDC.
"""
def __init__(self, queue_x, queue_y, queue_tof, queue_AbsoluteTimeStamp,
queue_ch0, queue_ch1, queue_ch2, queue_ch3,
queue_ch4, queue_ch5, queue_ch6, queue_ch7,
queue_stop_measurement, lock1, conf):
"""
This is the constructor class that accepts several initialized queues objects corresponding
to various parameters of the groups like dld,TDC. This constructor also objects used for
creating locks on resources to reduce concurrent access on resources and reduce dirty read.
"""
# Queues for sharing data between tdc and main process
# dld queues
self.com_port_v_dc = None
self.queue_x = queue_x
self.queue_y = queue_y
self.queue_tof = queue_tof
self.queue_AbsoluteTimeStamp = queue_AbsoluteTimeStamp
self.queue_ch0 = queue_ch0
self.queue_ch1 = queue_ch1
self.queue_ch2 = queue_ch2
self.queue_ch3 = queue_ch3
self.queue_ch4 = queue_ch4
self.queue_ch5 = queue_ch5
self.queue_ch6 = queue_ch6
self.queue_ch7 = queue_ch7
self.queue_stop_measurement = queue_stop_measurement
self.lock1 = lock1
self.conf = conf
self.log_apt_tdc_roetdec = loggi.logger_creator('apt_tdc_roetdec', 'apt_tdc_roetdec.log')
def initialize_v_dc(self):
"""
This class method initializes the high voltage device:.
The function utilizes the serial library to communicate over the
COM port serially and read the corresponding v_dc parameter.
The COM port number has to be enter in the config file.
It exits if it is not able to connect on the COM Port.
Attributes:
Accepts only the self (class object)
Returns:
Does not return anything
"""
try:
# Setting the com port of V_dc
self.log_apt_tdc_roetdec.info("Function - initialize_v_dc | Port selection -> {}".format(initialize_devices.com_ports[variables.COM_PORT_V_dc].device))
self.com_port_v_dc = serial.Serial(
port=initialize_devices.com_ports[variables.COM_PORT_V_dc].device, # chosen COM port
baudrate=115200, # 115200
bytesize=serial.EIGHTBITS, # 8
parity=serial.PARITY_NONE, # N
stopbits=serial.STOPBITS_ONE # 1
)
self.log_apt_tdc_roetdec.info("Function - initialize_v_dc | Successful Port Open - O/p of serial variable - > {}".format(self.com_port_v_dc))
# configure the COM port to talk to. Default values: 115200,8,N,1
if self.com_port_v_dc.is_open:
self.com_port_v_dc.flushInput()
self.com_port_v_dc.flushOutput()
cmd_list = [">S1 3.0e-4", ">S0B 0", ">S0 %s" % variables.vdc_min, "F0", ">S0?", ">DON?",
">S0A?"]
for cmd in range(len(cmd_list)):
self.command_v_dc(cmd_list[cmd])
except Exception as e:
self.log_apt_tdc_roetdec.error("Function - initialize_v_dc | Port error - O/p of serial variable - > {} ".format(self.com_port_v_dc))
print("Couldn't open Port!")
print(e)
self.log_apt_tdc_roetdec.error("Function - initialize_v_dc | Port error - Error stack - > {} ".format(e))
# apply command to the V_dc
def command_v_dc(self, cmd):
"""
This class method is used to send commands on the high voltage parameter: v_dc.
The function utilizes the serial library to communicate over the
COM port serially and read the corresponding v_dc parameter.
Attributes:
Accepts only the self (class object)
Returns:
Returns the response code after executing the command.
"""
self.com_port_v_dc.write(
(cmd + '\r\n').encode()) # send cmd to device # might not work with older devices -> "LF" only needed!
time.sleep(0.005) # small sleep for response
# Intialize the response to returned as string
response = ''
# Read the response code after execution(command write).
while self.com_port_v_dc.in_waiting > 0:
response = self.com_port_v_dc.readline() # all characters received, read line till '\r\n'
response = response.decode("utf-8")
self.log_apt_tdc_roetdec.info("Function - command_v_dc | response - {} ".format(response))
return response
def reader_queue_dld(self):
"""
This class method runs in an infinite loop and listens and reads dld queues.
over the queues for the group: dld
This function is called continuously by a separate thread in the main function.
The values read from the queues are updates in imported "variables" file
Attributes:
Accepts only the self (class object)
Returns:
Does not return anything
"""
while True:
# Check if any value is present in queue to read from
while not self.queue_x.empty() or not self.queue_y.empty() or not self.queue_tof.empty() or not self.queue_AbsoluteTimeStamp.empty() \
or not self.queue_ch0.empty() or not self.queue_ch1.empty() or not self.queue_ch2.empty() or not self.queue_ch3.empty() \
or not self.queue_ch4.empty() or not self.queue_ch5.empty() or not self.queue_ch6.empty() or not self.queue_ch7.empty():
# Utilize locking mechanism to avoid concurrent use of resources and dirty reads
with self.lock1:
length = self.queue_x.get()
variables.x = np.append(variables.x, length)
variables.y = np.append(variables.y, self.queue_y.get())
variables.t = np.append(variables.t, self.queue_tof.get())
variables.time_stamp = np.append(variables.time_stamp,
self.queue_AbsoluteTimeStamp.get())
variables.ch0 = np.append(variables.ch0, self.queue_ch0.get())
variables.ch1 = np.append(variables.ch1, self.queue_ch1.get())
variables.ch2 = np.append(variables.ch2, self.queue_ch2.get())
variables.ch3 = np.append(variables.ch3, self.queue_ch3.get())
variables.ch4 = np.append(variables.ch4, self.queue_ch4.get())
variables.ch5 = np.append(variables.ch5, self.queue_ch5.get())
variables.ch6 = np.append(variables.ch6, self.queue_ch6.get())
variables.ch7 = np.append(variables.ch7, self.queue_ch7.get())
variables.main_v_dc_dld = np.append(variables.main_v_dc_dld,
np.tile(variables.specimen_voltage, len(length)))
# If end of experiment flag is set break the while loop
if variables.end_experiment:
break
def main_ex_loop(self, counts_target):
"""
This function is contaion all methods that itretively has to run to control the exprement.
This class method:
1. Read the number of detected Ions(in TDC or Counter mode)
2- Calculate the error of detection rate of desire rate
3- Regulate the high voltage and pulser
This function is called in each loop of main function.
Attributes:
counts_target: Calculated parameter(((detection_rate/100)* pulse_frequency)/pulse_frequency)
Returns:
Does not return anything
"""
if variables.counter_source == 'TDC':
variables.total_ions = len(variables.x)
variables.count_temp = variables.total_ions - variables.count_last
variables.count_last = variables.total_ions
# saving the values of high dc voltage, pulse, and current iteration ions
variables.main_v_dc = np.append(variables.main_v_dc, variables.specimen_voltage)
variables.main_counter = np.append(variables.main_counter, variables.count_temp)
# averaging count rate of N_averg counts
variables.avg_n_count = variables.ex_freq * (
sum(variables.main_counter[-variables.cycle_avg:]) / variables.cycle_avg)
counts_measured = variables.avg_n_count / (1 + variables.pulse_frequency * 1000)
counts_error = counts_target - counts_measured # deviation from setpoint
self.log_apt_tdc_roetdec.info("Function - main_ex_loop | count_temp | value - {}| type - {}".format(variables.count_temp,type(variables.count_temp)))
self.log_apt_tdc_roetdec.info("Function - main_ex_loop | count_last | value - {}| type - {}".format(variables.count_last,type(variables.count_last)))
self.log_apt_tdc_roetdec.info("Function - main_ex_loop | main_v_dc | value - {}| type - {}".format(variables.main_v_dc,type(variables.main_v_dc)))
self.log_apt_tdc_roetdec.info("Function - main_ex_loop | main_counter | value - {}| type - {}".format(variables.main_counter,type(variables.main_counter)))
self.log_apt_tdc_roetdec.info("Function - main_ex_loop | avg_n_count | value - {}| type - {}".format(variables.avg_n_count,type(variables.avg_n_count)))
self.log_apt_tdc_roetdec.info("Function - main_ex_loop | ex_freq | value - {}| type - {}".format(variables.ex_freq,type(variables.ex_freq)))
self.log_apt_tdc_roetdec.info("Function - main_ex_loop | counts_measured | value - {}| type - {}".format(counts_measured,type(counts_measured)))
self.log_apt_tdc_roetdec.info("Function - main_ex_loop | counts_error | value - {}| type - {}".format(counts_error,type(counts_error)))
# simple proportional control with averaging
rate = ((variables.avg_n_count * 100) / (1 + variables.pulse_frequency * 1000))
if rate < 0.01 and variables.specimen_voltage < 5000:
ramp_speed_factor = 2.5
else:
ramp_speed_factor = 1
if counts_error > 0:
voltage_step = counts_error * variables.vdc_step_up * ramp_speed_factor
elif counts_error <= 0:
voltage_step = counts_error * variables.vdc_step_down * ramp_speed_factor
# update v_dc
if variables.specimen_voltage < variables.vdc_max:
if variables.specimen_voltage >= variables.vdc_min:
specimen_voltage_temp = variables.specimen_voltage + voltage_step
if specimen_voltage_temp > variables.specimen_voltage:
if self.conf['v_dc'] != "off":
# sending VDC via serial
self.command_v_dc(">S0 %s" % (specimen_voltage_temp))
variables.specimen_voltage = specimen_voltage_temp
def clear_up(self, ):
"""
This function clears global variables and deinitialize high voltage and pulser function
and clear up global variables
Attributes:
Does not accept any arguments
Returns:
Does not return anything
"""
def cleanup_variables():
"""
Clear up all the global variables
"""
variables.stop_flag = False
variables.end_experiment = False
variables.start_flag = False
variables.detection_rate = 0.0
variables.detection_rate_elapsed = 0.0
variables.count = 0
variables.count_temp = 0
variables.count_last = 0
variables.index_plot = 0
variables.index_wait_on_plot_start = 0
variables.index_plot_save = 0
variables.index_plot = 0
variables.x = np.zeros(0)
variables.y = np.zeros(0)
variables.t = np.zeros(0)
variables.time_stamp = np.zeros(0)
variables.ch0 = np.zeros(0)
variables.ch1 = np.zeros(0)
variables.ch2 = np.zeros(0)
variables.ch3 = np.zeros(0)
variables.ch4 = np.zeros(0)
variables.ch5 = np.zeros(0)
variables.ch6 = np.zeros(0)
variables.ch7 = np.zeros(0)
#
variables.main_v_dc = np.zeros(0)
variables.main_counter = np.zeros(0)
variables.main_v_dc_dld = np.zeros(0)
variables.main_v_dc_tdc = np.zeros(0)
self.log_apt_tdc_roetdec.info("Function - cleanup_variables | ch1 | value - {}| type - {}".format(variables.count_temp,type(variables.count_temp)))
self.log_apt_tdc_roetdec.info("Function - cleanup_variables | main_v_dc_tdc | value - {}| type - {}".format(variables.main_v_dc_tdc,type(variables.main_v_dc_tdc)))
print('starting to clean up')
# save the data to the HDF5
if self.conf['v_dc'] != "off":
# Switch off the v_dc
self.command_v_dc('F0')
self.com_port_v_dc.close()
# Zero variables
cleanup_variables()
print('Clean up is finished')
def main(conf):
"""
Main function for doing experiments
1- Initialize all the devices (High voltage, pulser, TDC or Edge-Counter)
2- Create and start reader DLD and TDC thread
3- Create and start the TDC process if TDC is selected in GUI
4- Iterate over the main loop of experiments and control the experiment frequency
5- Stop the experiment if stop condition is achieved
6- Deinitialize devices
7- Save the data
8- Send email and tweet
"""
variables.start_time = datetime.datetime.now().strftime("%d/%m/%Y %H:%M")
if conf['tdc'] != "off":
# Create and start the TDC process and related queues
if variables.counter_source == 'TDC' or variables.counter_source == 'TDC_Raw':
queue_x = Queue(maxsize=-1, ctx=multiprocessing.get_context())
queue_y = Queue(maxsize=-1, ctx=multiprocessing.get_context())
queue_tof = Queue(maxsize=-1, ctx=multiprocessing.get_context())
queue_AbsoluteTimeStamp = Queue(maxsize=-1, ctx=multiprocessing.get_context())
queue_ch0 = Queue(maxsize=1, ctx=multiprocessing.get_context())
queue_ch1 = Queue(maxsize=1, ctx=multiprocessing.get_context())
queue_ch2 = Queue(maxsize=1, ctx=multiprocessing.get_context())
queue_ch3 = Queue(maxsize=1, ctx=multiprocessing.get_context())
queue_ch4 = Queue(maxsize=1, ctx=multiprocessing.get_context())
queue_ch5 = Queue(maxsize=1, ctx=multiprocessing.get_context())
queue_ch6 = Queue(maxsize=1, ctx=multiprocessing.get_context())
queue_ch7 = Queue(maxsize=1, ctx=multiprocessing.get_context())
queue_stop_measurement = Queue(maxsize=1, ctx=multiprocessing.get_context())
tdc_process = multiprocessing.Process(target=tdc_roentdec.experiment_measure, args=(queue_x, queue_y, queue_tof, queue_AbsoluteTimeStamp,
queue_ch0, queue_ch1, queue_ch2, queue_ch3,
queue_ch4, queue_ch5, queue_ch6, queue_ch7,
queue_stop_measurement))
tdc_process.daemon = True
tdc_process.start()
else:
queue_x = None
queue_y = None
queue_tof = None
queue_AbsoluteTimeStamp = None
queue_ch0 = None
queue_ch1 = None
queue_ch2 = None
queue_ch3 = None
queue_ch4 = None
queue_ch5 = None
queue_ch6 = None
queue_ch7 = None
queue_stop_measurement = None
# Initialize lock that is used by TDC and DLD threads
# Module used: threading
lock1 = threading.Lock()
# Create the experiment object
experiment = APT_SIMPLE(queue_x, queue_y, queue_tof, queue_AbsoluteTimeStamp,
queue_ch0, queue_ch1, queue_ch2, queue_ch3,
queue_ch4, queue_ch5, queue_ch6, queue_ch7,
queue_stop_measurement, lock1, conf)
if conf['v_dc'] != "off":
# Initialize high voltage
experiment.initialize_v_dc()
logger.info('High voltage is initialized')
# start the timer for main experiment
time_ex_s = np.zeros(0)
time_ex_m = np.zeros(0)
time_ex_h = np.zeros(0)
time_counter = np.zeros(0)
counts_target = ((variables.detection_rate / 100) * variables.pulse_frequency) / variables.pulse_frequency
logger.info('Starting the main loop')
if conf['tdc'] != "off":
# Initialze threads that will read from the queue for the group: dld
if variables.counter_source == 'TDC':
read_dld_queue_thread = threading.Thread(target=experiment.reader_queue_dld)
read_dld_queue_thread.setDaemon(True)
read_dld_queue_thread.start()
total_steps = variables.ex_time * variables.ex_freq
steps = 0
flag_achieved_high_voltage = 0
index_time = 0
ex_time_temp = variables.ex_time
# Main loop of experiment
while steps < total_steps:
# Only for initializing every thing at firs iteration
if steps == 0:
if conf['v_dc'] != "off":
# Turn on the v_dc and
experiment.command_v_dc("F1")
time.sleep(0.5)
variables.start_flag = True
# Wait for 4 second to all devices get ready
time.sleep(4)
# Total experiment time variable
start_main_ex = time.time()
logger.info('Experiment is started')
# set the start specimen_voltage
variables.specimen_voltage = variables.vdc_min
# Measure time
start = datetime.datetime.now()
# main loop function
experiment.main_ex_loop(counts_target)
end = datetime.datetime.now()
# If the main experiment function takes less than experiment frequency we have to waite
if (1000 / variables.ex_freq) > ((end - start).microseconds / 1000): # time in milliseconds
sleep_time = ((1000 / variables.ex_freq) - ((end - start).microseconds / 1000))
time.sleep(sleep_time / 1000)
else:
print(
f"{initialize_devices.bcolors.WARNING}Warning: Experiment loop takes longer than %s Millisecond{initialize_devices.bcolors.ENDC}" % (
int(1000 / variables.ex_freq)))
logger.error('Experiment loop takes longer than %s Millisecond' % (int(1000 / variables.ex_freq)))
print('%s- The iteration time:' % index_time, ((end - start).microseconds / 1000))
index_time += 1
time_ex_s = np.append(time_ex_s, int(end.strftime("%S")))
time_ex_m = np.append(time_ex_m, int(end.strftime("%M")))
time_ex_h = np.append(time_ex_h, int(end.strftime("%H")))
end_main_ex_loop = time.time()
variables.elapsed_time = end_main_ex_loop - start_main_ex
# Counter of iteration
time_counter = np.append(time_counter, steps)
steps += 1
if variables.stop_flag:
logger.info('Experiment is stopped by user')
if conf['tdc'] != "off":
if variables.counter_source == 'TDC':
queue_stop_measurement.put(True)
time.sleep(1)
break
if variables.criteria_ions:
if variables.max_ions <= variables.total_ions:
logger.info('Total number of Ions is achieved')
if variables.counter_source == 'TDC' or variables.counter_source == 'TDC_Raw':
queue_stop_measurement.put(True)
time.sleep(1)
break
if variables.criteria_vdc:
if variables.vdc_max <= variables.specimen_voltage:
if flag_achieved_high_voltage > variables.ex_freq * 10:
logger.info('High Voltage Max. is achieved')
time.sleep(1)
break
flag_achieved_high_voltage += 1
if variables.ex_time != ex_time_temp:
total_steps = variables.ex_time * variables.ex_freq - steps
ex_time_temp = variables.ex_time
# Because experiment time is not a stop criteria, increase total_steps
if not variables.criteria_time and steps + 1 == total_steps:
total_steps += 1
if conf['tdc'] != "off":
# Stop the TDC process
try:
# if variables.counter_source == 'TDC'or variables.counter_source == 'TDC_Raw':
if variables.counter_source == 'TDC':
tdc_process.join(3)
if tdc_process.is_alive():
tdc_process.terminate()
tdc_process.join(1)
# Release all the resources of the TDC process
tdc_process.close()
except Exception as e:
print(
f"{initialize_devices.bcolors.WARNING}Warning: The TDC process cannot be terminated properly{initialize_devices.bcolors.ENDC}")
print(e)
variables.end_experiment = True
time.sleep(1)
if conf['tdc'] != "off":
# Stop the TDC and DLD thread
if variables.counter_source == 'TDC':
read_dld_queue_thread.join(1)
if variables.counter_source == 'TDC':
variables.total_ions = len(variables.x)
time.sleep(1)
logger.info('Experiment is finished')
# Check the length of arrays to be equal
if variables.counter_source == 'TDC':
if all(len(lst) == len(variables.x) for lst in [variables.x, variables.y,
variables.t, variables.time_stamp,
variables.main_v_dc_dld, variables.ch0,
variables.ch0, variables.ch1, variables.ch2,
variables.ch3, variables.ch4, variables.ch5,
variables.ch6, variables.ch7]):
logger.warning('dld data have not same length')
# save data in hdf5 file
hdf5_creator.hdf_creator_physic(time_counter, time_ex_s, time_ex_m, time_ex_h)
logger.info('HDF5 file is created')
variables.end_time = datetime.datetime.now().strftime("%d/%m/%Y %H:%M")
# Save new value of experiment counter
with open('./files/counter_physic.txt', 'w') as f:
f.write(str(variables.counter + 1))
logger.info('Experiment counter is increased')
# Adding results of the experiment to the log file
logger.info('Total number of Ions is: %s' % variables.total_ions)
# send an email
subject = 'apt_simple Experiment {} Report'.format(variables.hdf5_path)
elapsed_time_temp = float("{:.3f}".format(variables.elapsed_time))
message = 'The experiment was started at: {}\n' \
'The experiment was ended at: {}\n' \
'Experiment duration: {}\n' \
'Total number of ions: {}\n'.format(variables.start_time,
variables.end_time, elapsed_time_temp, variables.total_ions)
if len(variables.email) > 3:
logger.info('Email is sent')
email_send.send_email(variables.email, subject, message)
# save setup parameters and run statistics in a txt file
experiment_statistics.save_statistics_apt_physic()
# Clear up all the variables and deinitialize devices
experiment.clear_up()
logger.info('Variables and devices is cleared')
|
moduleinspect.py | """Basic introspection of modules."""
from typing import List, Optional, Union
from types import ModuleType
from multiprocessing import Process, Queue
import importlib
import inspect
import os
import pkgutil
import queue
import sys
class ModuleProperties:
def __init__(self,
name: str,
file: Optional[str],
path: Optional[List[str]],
all: Optional[List[str]],
is_c_module: bool,
subpackages: List[str]) -> None:
self.name = name # __name__ attribute
self.file = file # __file__ attribute
self.path = path # __path__ attribute
self.all = all # __all__ attribute
self.is_c_module = is_c_module
self.subpackages = subpackages
def is_c_module(module: ModuleType) -> bool:
if module.__dict__.get('__file__') is None:
# Could be a namespace package. These must be handled through
# introspection, since there is no source file.
return True
return os.path.splitext(module.__dict__['__file__'])[-1] in ['.so', '.pyd']
class InspectError(Exception):
pass
def get_package_properties(package_id: str) -> ModuleProperties:
"""Use runtime introspection to get information about a module/package."""
try:
package = importlib.import_module(package_id)
except BaseException as e:
raise InspectError(str(e)) from e
name = getattr(package, '__name__', package_id)
file = getattr(package, '__file__', None)
path = getattr(package, '__path__', None) # type: Optional[List[str]]
if not isinstance(path, list):
path = None
pkg_all = getattr(package, '__all__', None)
if pkg_all is not None:
try:
pkg_all = list(pkg_all)
except Exception:
pkg_all = None
is_c = is_c_module(package)
if path is None:
# Object has no path; this means it's either a module inside a package
# (and thus no sub-packages), or it could be a C extension package.
if is_c:
# This is a C extension module, now get the list of all sub-packages
# using the inspect module
subpackages = [package.__name__ + "." + name
for name, val in inspect.getmembers(package)
if inspect.ismodule(val)
and val.__name__ == package.__name__ + "." + name]
else:
# It's a module inside a package. There's nothing else to walk/yield.
subpackages = []
else:
all_packages = pkgutil.walk_packages(path, prefix=package.__name__ + ".",
onerror=lambda r: None)
subpackages = [qualified_name for importer, qualified_name, ispkg in all_packages]
return ModuleProperties(name=name,
file=file,
path=path,
all=pkg_all,
is_c_module=is_c,
subpackages=subpackages)
def worker(tasks: 'Queue[str]',
results: 'Queue[Union[str, ModuleProperties]]',
sys_path: List[str]) -> None:
"""The main loop of a worker introspection process."""
sys.path = sys_path
while True:
mod = tasks.get()
try:
prop = get_package_properties(mod)
except InspectError as e:
results.put(str(e))
continue
results.put(prop)
class ModuleInspect:
"""Perform runtime introspection of modules in a separate process.
Reuse the process for multiple modules for efficiency. However, if there is an
error, retry using a fresh process to avoid cross-contamination of state between
modules.
We use a separate process to isolate us from many side effects. For example, the
import of a module may kill the current process, and we want to recover from that.
Always use in a with statement for proper clean-up:
with ModuleInspect() as m:
p = m.get_package_properties('urllib.parse')
"""
def __init__(self) -> None:
self._start()
def _start(self) -> None:
self.tasks = Queue() # type: Queue[str]
self.results = Queue() # type: Queue[Union[ModuleProperties, str]]
self.proc = Process(target=worker, args=(self.tasks, self.results, sys.path))
self.proc.start()
self.counter = 0 # Number of successful roundtrips
def close(self) -> None:
"""Free any resources used."""
self.proc.terminate()
def get_package_properties(self, package_id: str) -> ModuleProperties:
"""Return some properties of a module/package using runtime introspection.
Raise InspectError if the target couldn't be imported.
"""
self.tasks.put(package_id)
res = self._get_from_queue()
if res is None:
# The process died; recover and report error.
self._start()
raise InspectError('Process died when importing %r' % package_id)
if isinstance(res, str):
# Error importing module
if self.counter > 0:
# Also try with a fresh process. Maybe one of the previous imports has
# corrupted some global state.
self.close()
self._start()
return self.get_package_properties(package_id)
raise InspectError(res)
self.counter += 1
return res
def _get_from_queue(self) -> Union[ModuleProperties, str, None]:
"""Get value from the queue.
Return the value read from the queue, or None if the process unexpectedly died.
"""
max_iter = 100
n = 0
while True:
if n == max_iter:
raise RuntimeError('Timeout waiting for subprocess')
try:
return self.results.get(timeout=0.05)
except queue.Empty:
if not self.proc.is_alive():
return None
n += 1
def __enter__(self) -> 'ModuleInspect':
return self
def __exit__(self, *args: object) -> None:
self.close()
|
email.py | from threading import Thread
from flask import render_template
from flask_mail import Message
from app import app, mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
Thread(target=send_async_email, args=(app, msg)).start()
def send_password_reset_email(user):
token = user.get_reset_password_token()
send_email('WBL Reset Your Password',
sender=app.config['ADMINS'][0],
recipients=[user.email],
text_body=render_template('email/reset_password.txt',
user=user, token=token),
html_body=render_template('email/reset_password.html',
user=user, token=token))
|
test_tcp.py | """Test tcp connection.
Connection will happen on localhost and at a random free port.
"""
import socket
import threading
import socketserver
import pytest
from src import nuke_tools
LOCALHOST = '127.0.0.1'
with socketserver.TCPServer((LOCALHOST, 0), None) as s:
FREE_PORT = s.server_address[1]
def socket_server():
"""Create a Server that listen for incoming requests."""
_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_socket.bind((LOCALHOST, FREE_PORT))
_socket.listen(1)
while True:
conn, _ = _socket.accept()
try:
data = conn.recv(2048)
conn.sendall(data)
break
except Exception: # skipcq: PYL-W0703
break
_socket.close()
conn.close()
@pytest.fixture()
def tcp_server():
"""Start the tcp server in a thread to allow async operation."""
server = threading.Thread(target=socket_server)
server.daemon = True
server.start()
yield server
server.join()
def test_send_data(tcp_server):
"""Test that send data method returns expected value."""
data = nuke_tools.send_data(LOCALHOST, FREE_PORT, 'hello')
assert isinstance(data, str)
assert '[NukeTools] hello' in data
def test_connection_refused():
"""Test sending data when server is not listening."""
data = nuke_tools.send_data(LOCALHOST, FREE_PORT, 'hello')
assert 'ConnectionRefusedError. {}:{}'.format(LOCALHOST, FREE_PORT) in data
def test_connection_timeout():
"""Test connection timeout."""
hostname = '192.168.1.99'
data = nuke_tools.send_data('192.168.1.99', FREE_PORT, 'hello', 0.1)
assert 'ConnectionTimeoutError. {}:{}'.format(hostname, FREE_PORT) in data
def test_connection_socket_error():
"""Test connection base exception.
Wrong hostname and port to force socket error.
"""
data = nuke_tools.send_data('111.111.1.11', 0, 'hello')
assert 'UnknownError:' in data
def test_connection_generic_exception():
"""Test connection base exception.
Convert port to string to force exception.
"""
data = nuke_tools.send_data(LOCALHOST, str(FREE_PORT), 'hello')
assert 'UnknownException:' in data
|
machine.py | from contextlib import _GeneratorContextManager
from pathlib import Path
from queue import Queue
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
import base64
import io
import os
import queue
import re
import shlex
import shutil
import socket
import subprocess
import sys
import tempfile
import threading
import time
from test_driver.logger import rootlog
CHAR_TO_KEY = {
"A": "shift-a",
"N": "shift-n",
"-": "0x0C",
"_": "shift-0x0C",
"B": "shift-b",
"O": "shift-o",
"=": "0x0D",
"+": "shift-0x0D",
"C": "shift-c",
"P": "shift-p",
"[": "0x1A",
"{": "shift-0x1A",
"D": "shift-d",
"Q": "shift-q",
"]": "0x1B",
"}": "shift-0x1B",
"E": "shift-e",
"R": "shift-r",
";": "0x27",
":": "shift-0x27",
"F": "shift-f",
"S": "shift-s",
"'": "0x28",
'"': "shift-0x28",
"G": "shift-g",
"T": "shift-t",
"`": "0x29",
"~": "shift-0x29",
"H": "shift-h",
"U": "shift-u",
"\\": "0x2B",
"|": "shift-0x2B",
"I": "shift-i",
"V": "shift-v",
",": "0x33",
"<": "shift-0x33",
"J": "shift-j",
"W": "shift-w",
".": "0x34",
">": "shift-0x34",
"K": "shift-k",
"X": "shift-x",
"/": "0x35",
"?": "shift-0x35",
"L": "shift-l",
"Y": "shift-y",
" ": "spc",
"M": "shift-m",
"Z": "shift-z",
"\n": "ret",
"!": "shift-0x02",
"@": "shift-0x03",
"#": "shift-0x04",
"$": "shift-0x05",
"%": "shift-0x06",
"^": "shift-0x07",
"&": "shift-0x08",
"*": "shift-0x09",
"(": "shift-0x0A",
")": "shift-0x0B",
}
def make_command(args: list) -> str:
return " ".join(map(shlex.quote, (map(str, args))))
def _perform_ocr_on_screenshot(
screenshot_path: str, model_ids: Iterable[int]
) -> List[str]:
if shutil.which("tesseract") is None:
raise Exception("OCR requested but enableOCR is false")
magick_args = (
"-filter Catrom -density 72 -resample 300 "
+ "-contrast -normalize -despeckle -type grayscale "
+ "-sharpen 1 -posterize 3 -negate -gamma 100 "
+ "-blur 1x65535"
)
tess_args = f"-c debug_file=/dev/null --psm 11"
cmd = f"convert {magick_args} {screenshot_path} tiff:{screenshot_path}.tiff"
ret = subprocess.run(cmd, shell=True, capture_output=True)
if ret.returncode != 0:
raise Exception(f"TIFF conversion failed with exit code {ret.returncode}")
model_results = []
for model_id in model_ids:
cmd = f"tesseract {screenshot_path}.tiff - {tess_args} --oem {model_id}"
ret = subprocess.run(cmd, shell=True, capture_output=True)
if ret.returncode != 0:
raise Exception(f"OCR failed with exit code {ret.returncode}")
model_results.append(ret.stdout.decode("utf-8"))
return model_results
def retry(fn: Callable, timeout: int = 900) -> None:
"""Call the given function repeatedly, with 1 second intervals,
until it returns True or a timeout is reached.
"""
for _ in range(timeout):
if fn(False):
return
time.sleep(1)
if not fn(True):
raise Exception(f"action timed out after {timeout} seconds")
class StartCommand:
"""The Base Start Command knows how to append the necesary
runtime qemu options as determined by a particular test driver
run. Any such start command is expected to happily receive and
append additional qemu args.
"""
_cmd: str
def cmd(
self,
monitor_socket_path: Path,
shell_socket_path: Path,
allow_reboot: bool = False, # TODO: unused, legacy?
) -> str:
display_opts = ""
display_available = any(x in os.environ for x in ["DISPLAY", "WAYLAND_DISPLAY"])
if not display_available:
display_opts += " -nographic"
# qemu options
qemu_opts = ""
qemu_opts += (
""
if allow_reboot
else " -no-reboot"
" -device virtio-serial"
" -device virtconsole,chardev=shell"
" -device virtio-rng-pci"
" -serial stdio"
)
# TODO: qemu script already catpures this env variable, legacy?
qemu_opts += " " + os.environ.get("QEMU_OPTS", "")
return (
f"{self._cmd}"
f" -monitor unix:{monitor_socket_path}"
f" -chardev socket,id=shell,path={shell_socket_path}"
f"{qemu_opts}"
f"{display_opts}"
)
@staticmethod
def build_environment(
state_dir: Path,
shared_dir: Path,
) -> dict:
# We make a copy to not update the current environment
env = dict(os.environ)
env.update(
{
"TMPDIR": str(state_dir),
"SHARED_DIR": str(shared_dir),
"USE_TMPDIR": "1",
}
)
return env
def run(
self,
state_dir: Path,
shared_dir: Path,
monitor_socket_path: Path,
shell_socket_path: Path,
) -> subprocess.Popen:
return subprocess.Popen(
self.cmd(monitor_socket_path, shell_socket_path),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
cwd=state_dir,
env=self.build_environment(state_dir, shared_dir),
)
class NixStartScript(StartCommand):
"""A start script from nixos/modules/virtualiation/qemu-vm.nix
that also satisfies the requirement of the BaseStartCommand.
These Nix commands have the particular charactersitic that the
machine name can be extracted out of them via a regex match.
(Admittedly a _very_ implicit contract, evtl. TODO fix)
"""
def __init__(self, script: str):
self._cmd = script
@property
def machine_name(self) -> str:
match = re.search("run-(.+)-vm$", self._cmd)
name = "machine"
if match:
name = match.group(1)
return name
class LegacyStartCommand(StartCommand):
"""Used in some places to create an ad-hoc machine instead of
using nix test instrumentation + module system for that purpose.
Legacy.
"""
def __init__(
self,
netBackendArgs: Optional[str] = None,
netFrontendArgs: Optional[str] = None,
hda: Optional[Tuple[Path, str]] = None,
cdrom: Optional[str] = None,
usb: Optional[str] = None,
bios: Optional[str] = None,
qemuBinary: Optional[str] = None,
qemuFlags: Optional[str] = None,
):
if qemuBinary is not None:
self._cmd = qemuBinary
else:
self._cmd = "qemu-kvm"
self._cmd += " -m 384"
# networking
net_backend = "-netdev user,id=net0"
net_frontend = "-device virtio-net-pci,netdev=net0"
if netBackendArgs is not None:
net_backend += "," + netBackendArgs
if netFrontendArgs is not None:
net_frontend += "," + netFrontendArgs
self._cmd += f" {net_backend} {net_frontend}"
# hda
hda_cmd = ""
if hda is not None:
hda_path = hda[0].resolve()
hda_interface = hda[1]
if hda_interface == "scsi":
hda_cmd += (
f" -drive id=hda,file={hda_path},werror=report,if=none"
" -device scsi-hd,drive=hda"
)
else:
hda_cmd += f" -drive file={hda_path},if={hda_interface},werror=report"
self._cmd += hda_cmd
# cdrom
if cdrom is not None:
self._cmd += f" -cdrom {cdrom}"
# usb
usb_cmd = ""
if usb is not None:
# https://github.com/qemu/qemu/blob/master/docs/usb2.txt
usb_cmd += (
" -device usb-ehci"
f" -drive id=usbdisk,file={usb},if=none,readonly"
" -device usb-storage,drive=usbdisk "
)
self._cmd += usb_cmd
# bios
if bios is not None:
self._cmd += f" -bios {bios}"
# qemu flags
if qemuFlags is not None:
self._cmd += f" {qemuFlags}"
class Machine:
"""A handle to the machine with this name, that also knows how to manage
the machine lifecycle with the help of a start script / command."""
name: str
out_dir: Path
tmp_dir: Path
shared_dir: Path
state_dir: Path
monitor_path: Path
shell_path: Path
start_command: StartCommand
keep_vm_state: bool
allow_reboot: bool
process: Optional[subprocess.Popen]
pid: Optional[int]
monitor: Optional[socket.socket]
shell: Optional[socket.socket]
serial_thread: Optional[threading.Thread]
booted: bool
connected: bool
# Store last serial console lines for use
# of wait_for_console_text
last_lines: Queue = Queue()
callbacks: List[Callable]
def __repr__(self) -> str:
return f"<Machine '{self.name}'>"
def __init__(
self,
out_dir: Path,
tmp_dir: Path,
start_command: StartCommand,
name: str = "machine",
keep_vm_state: bool = False,
allow_reboot: bool = False,
callbacks: Optional[List[Callable]] = None,
) -> None:
self.out_dir = out_dir
self.tmp_dir = tmp_dir
self.keep_vm_state = keep_vm_state
self.allow_reboot = allow_reboot
self.name = name
self.start_command = start_command
self.callbacks = callbacks if callbacks is not None else []
# set up directories
self.shared_dir = self.tmp_dir / "shared-xchg"
self.shared_dir.mkdir(mode=0o700, exist_ok=True)
self.state_dir = self.tmp_dir / f"vm-state-{self.name}"
self.monitor_path = self.state_dir / "monitor"
self.shell_path = self.state_dir / "shell"
if (not self.keep_vm_state) and self.state_dir.exists():
self.cleanup_statedir()
self.state_dir.mkdir(mode=0o700, exist_ok=True)
self.process = None
self.pid = None
self.monitor = None
self.shell = None
self.serial_thread = None
self.booted = False
self.connected = False
@staticmethod
def create_startcommand(args: Dict[str, str]) -> StartCommand:
rootlog.warning(
"Using legacy create_startcommand(),"
"please use proper nix test vm instrumentation, instead"
"to generate the appropriate nixos test vm qemu startup script"
)
hda = None
if args.get("hda"):
hda_arg: str = args.get("hda", "")
hda_arg_path: Path = Path(hda_arg)
hda = (hda_arg_path, args.get("hdaInterface", ""))
return LegacyStartCommand(
netBackendArgs=args.get("netBackendArgs"),
netFrontendArgs=args.get("netFrontendArgs"),
hda=hda,
cdrom=args.get("cdrom"),
usb=args.get("usb"),
bios=args.get("bios"),
qemuBinary=args.get("qemuBinary"),
qemuFlags=args.get("qemuFlags"),
)
def is_up(self) -> bool:
return self.booted and self.connected
def log(self, msg: str) -> None:
rootlog.log(msg, {"machine": self.name})
def log_serial(self, msg: str) -> None:
rootlog.log_serial(msg, self.name)
def nested(self, msg: str, attrs: Dict[str, str] = {}) -> _GeneratorContextManager:
my_attrs = {"machine": self.name}
my_attrs.update(attrs)
return rootlog.nested(msg, my_attrs)
def wait_for_monitor_prompt(self) -> str:
with self.nested("waiting for monitor prompt"):
assert self.monitor is not None
answer = ""
while True:
undecoded_answer = self.monitor.recv(1024)
if not undecoded_answer:
break
answer += undecoded_answer.decode()
if answer.endswith("(qemu) "):
break
return answer
def send_monitor_command(self, command: str) -> str:
self.run_callbacks()
with self.nested("sending monitor command: {}".format(command)):
message = ("{}\n".format(command)).encode()
assert self.monitor is not None
self.monitor.send(message)
return self.wait_for_monitor_prompt()
def wait_for_unit(self, unit: str, user: Optional[str] = None) -> None:
"""Wait for a systemd unit to get into "active" state.
Throws exceptions on "failed" and "inactive" states as well as
after timing out.
"""
def check_active(_: Any) -> bool:
info = self.get_unit_info(unit, user)
state = info["ActiveState"]
if state == "failed":
raise Exception('unit "{}" reached state "{}"'.format(unit, state))
if state == "inactive":
status, jobs = self.systemctl("list-jobs --full 2>&1", user)
if "No jobs" in jobs:
info = self.get_unit_info(unit, user)
if info["ActiveState"] == state:
raise Exception(
(
'unit "{}" is inactive and there ' "are no pending jobs"
).format(unit)
)
return state == "active"
with self.nested(
"waiting for unit {}{}".format(
unit, f" with user {user}" if user is not None else ""
)
):
retry(check_active)
def get_unit_info(self, unit: str, user: Optional[str] = None) -> Dict[str, str]:
status, lines = self.systemctl('--no-pager show "{}"'.format(unit), user)
if status != 0:
raise Exception(
'retrieving systemctl info for unit "{}" {} failed with exit code {}'.format(
unit, "" if user is None else 'under user "{}"'.format(user), status
)
)
line_pattern = re.compile(r"^([^=]+)=(.*)$")
def tuple_from_line(line: str) -> Tuple[str, str]:
match = line_pattern.match(line)
assert match is not None
return match[1], match[2]
return dict(
tuple_from_line(line)
for line in lines.split("\n")
if line_pattern.match(line)
)
def systemctl(self, q: str, user: Optional[str] = None) -> Tuple[int, str]:
if user is not None:
q = q.replace("'", "\\'")
return self.execute(
(
"su -l {} --shell /bin/sh -c "
"$'XDG_RUNTIME_DIR=/run/user/`id -u` "
"systemctl --user {}'"
).format(user, q)
)
return self.execute("systemctl {}".format(q))
def require_unit_state(self, unit: str, require_state: str = "active") -> None:
with self.nested(
"checking if unit ‘{}’ has reached state '{}'".format(unit, require_state)
):
info = self.get_unit_info(unit)
state = info["ActiveState"]
if state != require_state:
raise Exception(
"Expected unit ‘{}’ to to be in state ".format(unit)
+ "'{}' but it is in state ‘{}’".format(require_state, state)
)
def _next_newline_closed_block_from_shell(self) -> str:
assert self.shell
output_buffer = []
while True:
# This receives up to 4096 bytes from the socket
chunk = self.shell.recv(4096)
if not chunk:
# Probably a broken pipe, return the output we have
break
decoded = chunk.decode()
output_buffer += [decoded]
if decoded[-1] == "\n":
break
return "".join(output_buffer)
def execute(
self, command: str, check_return: bool = True, timeout: Optional[int] = 900
) -> Tuple[int, str]:
self.run_callbacks()
self.connect()
# Always run command with shell opts
command = f"set -euo pipefail; {command}"
timeout_str = ""
if timeout is not None:
timeout_str = f"timeout {timeout}"
out_command = (
f"{timeout_str} sh -c {shlex.quote(command)} | (base64 --wrap 0; echo)\n"
)
assert self.shell
self.shell.send(out_command.encode())
# Get the output
output = base64.b64decode(self._next_newline_closed_block_from_shell())
if not check_return:
return (-1, output.decode())
# Get the return code
self.shell.send("echo ${PIPESTATUS[0]}\n".encode())
rc = int(self._next_newline_closed_block_from_shell().strip())
return (rc, output.decode())
def shell_interact(self) -> None:
"""Allows you to interact with the guest shell
Should only be used during test development, not in the production test."""
self.connect()
self.log("Terminal is ready (there is no initial prompt):")
assert self.shell
subprocess.run(
["socat", "READLINE,prompt=$ ", f"FD:{self.shell.fileno()}"],
pass_fds=[self.shell.fileno()],
)
def console_interact(self) -> None:
"""Allows you to interact with QEMU's stdin
The shell can be exited with Ctrl+D. Note that Ctrl+C is not allowed to be used.
QEMU's stdout is read line-wise.
Should only be used during test development, not in the production test."""
self.log("Terminal is ready (there is no prompt):")
assert self.process
assert self.process.stdin
while True:
try:
char = sys.stdin.buffer.read(1)
except KeyboardInterrupt:
break
if char == b"": # ctrl+d
self.log("Closing connection to the console")
break
self.send_console(char.decode())
def succeed(self, *commands: str, timeout: Optional[int] = None) -> str:
"""Execute each command and check that it succeeds."""
output = ""
for command in commands:
with self.nested("must succeed: {}".format(command)):
(status, out) = self.execute(command, timeout=timeout)
if status != 0:
self.log("output: {}".format(out))
raise Exception(
"command `{}` failed (exit code {})".format(command, status)
)
output += out
return output
def fail(self, *commands: str, timeout: Optional[int] = None) -> str:
"""Execute each command and check that it fails."""
output = ""
for command in commands:
with self.nested("must fail: {}".format(command)):
(status, out) = self.execute(command, timeout=timeout)
if status == 0:
raise Exception(
"command `{}` unexpectedly succeeded".format(command)
)
output += out
return output
def wait_until_succeeds(self, command: str, timeout: int = 900) -> str:
"""Wait until a command returns success and return its output.
Throws an exception on timeout.
"""
output = ""
def check_success(_: Any) -> bool:
nonlocal output
status, output = self.execute(command, timeout=timeout)
return status == 0
with self.nested("waiting for success: {}".format(command)):
retry(check_success, timeout)
return output
def wait_until_fails(self, command: str, timeout: int = 900) -> str:
"""Wait until a command returns failure.
Throws an exception on timeout.
"""
output = ""
def check_failure(_: Any) -> bool:
nonlocal output
status, output = self.execute(command, timeout=timeout)
return status != 0
with self.nested("waiting for failure: {}".format(command)):
retry(check_failure)
return output
def wait_for_shutdown(self) -> None:
if not self.booted:
return
with self.nested("waiting for the VM to power off"):
sys.stdout.flush()
assert self.process
self.process.wait()
self.pid = None
self.booted = False
self.connected = False
def get_tty_text(self, tty: str) -> str:
status, output = self.execute(
"fold -w$(stty -F /dev/tty{0} size | "
"awk '{{print $2}}') /dev/vcs{0}".format(tty)
)
return output
def wait_until_tty_matches(self, tty: str, regexp: str) -> None:
"""Wait until the visible output on the chosen TTY matches regular
expression. Throws an exception on timeout.
"""
matcher = re.compile(regexp)
def tty_matches(last: bool) -> bool:
text = self.get_tty_text(tty)
if last:
self.log(
f"Last chance to match /{regexp}/ on TTY{tty}, "
f"which currently contains: {text}"
)
return len(matcher.findall(text)) > 0
with self.nested("waiting for {} to appear on tty {}".format(regexp, tty)):
retry(tty_matches)
def send_chars(self, chars: str) -> None:
with self.nested("sending keys ‘{}‘".format(chars)):
for char in chars:
self.send_key(char)
def wait_for_file(self, filename: str) -> None:
"""Waits until the file exists in machine's file system."""
def check_file(_: Any) -> bool:
status, _ = self.execute("test -e {}".format(filename))
return status == 0
with self.nested("waiting for file ‘{}‘".format(filename)):
retry(check_file)
def wait_for_open_port(self, port: int) -> None:
def port_is_open(_: Any) -> bool:
status, _ = self.execute("nc -z localhost {}".format(port))
return status == 0
with self.nested("waiting for TCP port {}".format(port)):
retry(port_is_open)
def wait_for_closed_port(self, port: int) -> None:
def port_is_closed(_: Any) -> bool:
status, _ = self.execute("nc -z localhost {}".format(port))
return status != 0
with self.nested("waiting for TCP port {} to be closed"):
retry(port_is_closed)
def start_job(self, jobname: str, user: Optional[str] = None) -> Tuple[int, str]:
return self.systemctl("start {}".format(jobname), user)
def stop_job(self, jobname: str, user: Optional[str] = None) -> Tuple[int, str]:
return self.systemctl("stop {}".format(jobname), user)
def wait_for_job(self, jobname: str) -> None:
self.wait_for_unit(jobname)
def connect(self) -> None:
if self.connected:
return
with self.nested("waiting for the VM to finish booting"):
self.start()
assert self.shell
tic = time.time()
self.shell.recv(1024)
# TODO: Timeout
toc = time.time()
self.log("connected to guest root shell")
self.log("(connecting took {:.2f} seconds)".format(toc - tic))
self.connected = True
def screenshot(self, filename: str) -> None:
word_pattern = re.compile(r"^\w+$")
if word_pattern.match(filename):
filename = os.path.join(self.out_dir, "{}.png".format(filename))
tmp = "{}.ppm".format(filename)
with self.nested(
"making screenshot {}".format(filename),
{"image": os.path.basename(filename)},
):
self.send_monitor_command("screendump {}".format(tmp))
ret = subprocess.run("pnmtopng {} > {}".format(tmp, filename), shell=True)
os.unlink(tmp)
if ret.returncode != 0:
raise Exception("Cannot convert screenshot")
def copy_from_host_via_shell(self, source: str, target: str) -> None:
"""Copy a file from the host into the guest by piping it over the
shell into the destination file. Works without host-guest shared folder.
Prefer copy_from_host for whenever possible.
"""
with open(source, "rb") as fh:
content_b64 = base64.b64encode(fh.read()).decode()
self.succeed(
f"mkdir -p $(dirname {target})",
f"echo -n {content_b64} | base64 -d > {target}",
)
def copy_from_host(self, source: str, target: str) -> None:
"""Copy a file from the host into the guest via the `shared_dir` shared
among all the VMs (using a temporary directory).
"""
host_src = Path(source)
vm_target = Path(target)
with tempfile.TemporaryDirectory(dir=self.shared_dir) as shared_td:
shared_temp = Path(shared_td)
host_intermediate = shared_temp / host_src.name
vm_shared_temp = Path("/tmp/shared") / shared_temp.name
vm_intermediate = vm_shared_temp / host_src.name
self.succeed(make_command(["mkdir", "-p", vm_shared_temp]))
if host_src.is_dir():
shutil.copytree(host_src, host_intermediate)
else:
shutil.copy(host_src, host_intermediate)
self.succeed(make_command(["mkdir", "-p", vm_target.parent]))
self.succeed(make_command(["cp", "-r", vm_intermediate, vm_target]))
def copy_from_vm(self, source: str, target_dir: str = "") -> None:
"""Copy a file from the VM (specified by an in-VM source path) to a path
relative to `$out`. The file is copied via the `shared_dir` shared among
all the VMs (using a temporary directory).
"""
# Compute the source, target, and intermediate shared file names
vm_src = Path(source)
with tempfile.TemporaryDirectory(dir=self.shared_dir) as shared_td:
shared_temp = Path(shared_td)
vm_shared_temp = Path("/tmp/shared") / shared_temp.name
vm_intermediate = vm_shared_temp / vm_src.name
intermediate = shared_temp / vm_src.name
# Copy the file to the shared directory inside VM
self.succeed(make_command(["mkdir", "-p", vm_shared_temp]))
self.succeed(make_command(["cp", "-r", vm_src, vm_intermediate]))
abs_target = self.out_dir / target_dir / vm_src.name
abs_target.parent.mkdir(exist_ok=True, parents=True)
# Copy the file from the shared directory outside VM
if intermediate.is_dir():
shutil.copytree(intermediate, abs_target)
else:
shutil.copy(intermediate, abs_target)
def dump_tty_contents(self, tty: str) -> None:
"""Debugging: Dump the contents of the TTY<n>"""
self.execute("fold -w 80 /dev/vcs{} | systemd-cat".format(tty))
def _get_screen_text_variants(self, model_ids: Iterable[int]) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
screenshot_path = os.path.join(tmpdir, "ppm")
self.send_monitor_command(f"screendump {screenshot_path}")
return _perform_ocr_on_screenshot(screenshot_path, model_ids)
def get_screen_text_variants(self) -> List[str]:
return self._get_screen_text_variants([0, 1, 2])
def get_screen_text(self) -> str:
return self._get_screen_text_variants([2])[0]
def wait_for_text(self, regex: str) -> None:
def screen_matches(last: bool) -> bool:
variants = self.get_screen_text_variants()
for text in variants:
if re.search(regex, text) is not None:
return True
if last:
self.log("Last OCR attempt failed. Text was: {}".format(variants))
return False
with self.nested("waiting for {} to appear on screen".format(regex)):
retry(screen_matches)
def wait_for_console_text(self, regex: str) -> None:
with self.nested("waiting for {} to appear on console".format(regex)):
# Buffer the console output, this is needed
# to match multiline regexes.
console = io.StringIO()
while True:
try:
console.write(self.last_lines.get())
except queue.Empty:
self.sleep(1)
continue
console.seek(0)
matches = re.search(regex, console.read())
if matches is not None:
return
def send_key(self, key: str) -> None:
key = CHAR_TO_KEY.get(key, key)
self.send_monitor_command("sendkey {}".format(key))
time.sleep(0.01)
def send_console(self, chars: str) -> None:
assert self.process
assert self.process.stdin
self.process.stdin.write(chars.encode())
self.process.stdin.flush()
def start(self) -> None:
if self.booted:
return
self.log("starting vm")
def clear(path: Path) -> Path:
if path.exists():
path.unlink()
return path
def create_socket(path: Path) -> socket.socket:
s = socket.socket(family=socket.AF_UNIX, type=socket.SOCK_STREAM)
s.bind(str(path))
s.listen(1)
return s
monitor_socket = create_socket(clear(self.monitor_path))
shell_socket = create_socket(clear(self.shell_path))
self.process = self.start_command.run(
self.state_dir,
self.shared_dir,
self.monitor_path,
self.shell_path,
)
self.monitor, _ = monitor_socket.accept()
self.shell, _ = shell_socket.accept()
# Store last serial console lines for use
# of wait_for_console_text
self.last_lines: Queue = Queue()
def process_serial_output() -> None:
assert self.process
assert self.process.stdout
for _line in self.process.stdout:
# Ignore undecodable bytes that may occur in boot menus
line = _line.decode(errors="ignore").replace("\r", "").rstrip()
self.last_lines.put(line)
self.log_serial(line)
self.serial_thread = threading.Thread(target=process_serial_output)
self.serial_thread.start()
self.wait_for_monitor_prompt()
self.pid = self.process.pid
self.booted = True
self.log("QEMU running (pid {})".format(self.pid))
def cleanup_statedir(self) -> None:
shutil.rmtree(self.state_dir)
rootlog.log(f"deleting VM state directory {self.state_dir}")
rootlog.log("if you want to keep the VM state, pass --keep-vm-state")
def shutdown(self) -> None:
if not self.booted:
return
assert self.shell
self.shell.send("poweroff\n".encode())
self.wait_for_shutdown()
def crash(self) -> None:
if not self.booted:
return
self.log("forced crash")
self.send_monitor_command("quit")
self.wait_for_shutdown()
def wait_for_x(self) -> None:
"""Wait until it is possible to connect to the X server. Note that
testing the existence of /tmp/.X11-unix/X0 is insufficient.
"""
def check_x(_: Any) -> bool:
cmd = (
"journalctl -b SYSLOG_IDENTIFIER=systemd | "
+ 'grep "Reached target Current graphical"'
)
status, _ = self.execute(cmd)
if status != 0:
return False
status, _ = self.execute("[ -e /tmp/.X11-unix/X0 ]")
return status == 0
with self.nested("waiting for the X11 server"):
retry(check_x)
def get_window_names(self) -> List[str]:
return self.succeed(
r"xwininfo -root -tree | sed 's/.*0x[0-9a-f]* \"\([^\"]*\)\".*/\1/; t; d'"
).splitlines()
def wait_for_window(self, regexp: str) -> None:
pattern = re.compile(regexp)
def window_is_visible(last_try: bool) -> bool:
names = self.get_window_names()
if last_try:
self.log(
"Last chance to match {} on the window list,".format(regexp)
+ " which currently contains: "
+ ", ".join(names)
)
return any(pattern.search(name) for name in names)
with self.nested("waiting for a window to appear"):
retry(window_is_visible)
def sleep(self, secs: int) -> None:
# We want to sleep in *guest* time, not *host* time.
self.succeed(f"sleep {secs}")
def forward_port(self, host_port: int = 8080, guest_port: int = 80) -> None:
"""Forward a TCP port on the host to a TCP port on the guest.
Useful during interactive testing.
"""
self.send_monitor_command(
"hostfwd_add tcp::{}-:{}".format(host_port, guest_port)
)
def block(self) -> None:
"""Make the machine unreachable by shutting down eth1 (the multicast
interface used to talk to the other VMs). We keep eth0 up so that
the test driver can continue to talk to the machine.
"""
self.send_monitor_command("set_link virtio-net-pci.1 off")
def unblock(self) -> None:
"""Make the machine reachable."""
self.send_monitor_command("set_link virtio-net-pci.1 on")
def release(self) -> None:
if self.pid is None:
return
rootlog.info(f"kill machine (pid {self.pid})")
assert self.process
assert self.shell
assert self.monitor
assert self.serial_thread
self.process.terminate()
self.shell.close()
self.monitor.close()
self.serial_thread.join()
def run_callbacks(self) -> None:
for callback in self.callbacks:
callback()
|
rss_feed.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import PyRSS2Gen
import threading
import requests
from all_channel import get_all_channel
def rss_parse(thread_id, r_ids):
headers = {
"acPlatform": "ANDROID_PHONE",
"User-agent": "acvideo core/5.13.0.635(Xiaomi;MI 5;7.0)",
"deviceType": "1",
"uuid": "326f52e8-63ad-448d-8be7-5b3c75db85d3",
"Cookie": "did=ddd824e2-dc17-38c2-97b1-6de8d1f44af2",
"appVersion": "5.13.0.635",
"market": "tencent",
"productId": "2000",
"uid": "0",
"resolution": "1080x1920",
"udid": "ddd824e2-dc17-38c2-97b1-6de8d1f44af2",
"requestTime": "2019-01-30 16:36:16.928",
"Host": "apipc.app.acfun.cn",
"Connection": "Keep-Alive",
"Accept-Encoding": "gzip"
}
params = {
"channelId": thread_id,
"day": -1,
"sort": 5,
"realmIds": r_ids.split("$$")[1],
"pageNo": 1,
"pageSize": 10,
}
resp = requests.get("http://apipc.app.acfun.cn/v3/regions/search",
headers=headers,
params=params)
json = resp.json()
items = []
list = json["vdata"]["list"]
for item in list:
items.append(PyRSS2Gen.RSSItem(
title=item["title"],
link="http://www.acfun.tv/a/ac" + item["href"],
guid=PyRSS2Gen.Guid("http://www.acfun.cn/a/ac" + item["href"]),
pubDate=datetime.datetime.fromtimestamp(item["time"] / 1000).strftime("%Y-%m-%d %H:%M:%S")))
rss = PyRSS2Gen.RSS2(
title="Acfun-文章区-%s" % r_ids.split("$$")[0],
link="http://www.acfun.cm/v/list%d/index.htm" % thread_id,
lastBuildDate=datetime.datetime.now(),
description="Acfun-文章区-%s" % r_ids.split("$$")[0],
items=items)
save_file = "./rrs/feed_%d.xml" % thread_id
rss.write_xml(open(save_file, "w", encoding="utf-8"), encoding="utf-8")
if __name__ == "__main__":
channel = get_all_channel()
for k, v in channel.items():
threading.Thread(target=rss_parse, args=(k, v,)).start()
|
sleepycat.py | from rdflib.store import Store, VALID_STORE, CORRUPTED_STORE, NO_STORE, UNKNOWN
from rdflib.term import URIRef
from rdflib.py3compat import b
def bb(u): return u.encode('utf-8')
try:
from bsddb import db
has_bsddb = True
except ImportError:
try:
from bsddb3 import db
has_bsddb = True
except ImportError:
has_bsddb = False
from os import mkdir
from os.path import exists, abspath
from urllib import pathname2url
from threading import Thread
import logging
_logger = logging.getLogger(__name__)
__all__ = ['Sleepycat']
class Sleepycat(Store):
context_aware = True
formula_aware = True
transaction_aware = False
db_env = None
def __init__(self, configuration=None, identifier=None):
if not has_bsddb: raise ImportError("Unable to import bsddb/bsddb3, store is unusable.")
self.__open = False
self.__identifier = identifier
super(Sleepycat, self).__init__(configuration)
self._loads = self.node_pickler.loads
self._dumps = self.node_pickler.dumps
def __get_identifier(self):
return self.__identifier
identifier = property(__get_identifier)
def _init_db_environment(self, homeDir, create=True):
envsetflags = db.DB_CDB_ALLDB
envflags = db.DB_INIT_MPOOL | db.DB_INIT_CDB | db.DB_THREAD
if not exists(homeDir):
if create==True:
mkdir(homeDir) # TODO: implement create method and refactor this to it
self.create(homeDir)
else:
return NO_STORE
db_env = db.DBEnv()
db_env.set_cachesize(0, 1024*1024*50) # TODO
#db_env.set_lg_max(1024*1024)
db_env.set_flags(envsetflags, 1)
db_env.open(homeDir, envflags | db.DB_CREATE)
return db_env
def is_open(self):
return self.__open
def open(self, path, create=True):
if not has_bsddb: return NO_STORE
homeDir = path
if self.__identifier is None:
self.__identifier = URIRef(pathname2url(abspath(homeDir)))
db_env = self._init_db_environment(homeDir, create)
if db_env == NO_STORE:
return NO_STORE
self.db_env = db_env
self.__open = True
dbname = None
dbtype = db.DB_BTREE
# auto-commit ensures that the open-call commits when transactions are enabled
dbopenflags = db.DB_THREAD
if self.transaction_aware == True:
dbopenflags |= db.DB_AUTO_COMMIT
dbmode = 0660
dbsetflags = 0
# create and open the DBs
self.__indicies = [None,] * 3
self.__indicies_info = [None,] * 3
for i in xrange(0, 3):
index_name = to_key_func(i)((b("s"), b("p"), b("o")), b("c")).decode()
index = db.DB(db_env)
index.set_flags(dbsetflags)
index.open(index_name, dbname, dbtype, dbopenflags|db.DB_CREATE, dbmode)
self.__indicies[i] = index
self.__indicies_info[i] = (index, to_key_func(i), from_key_func(i))
lookup = {}
for i in xrange(0, 8):
results = []
for start in xrange(0, 3):
score = 1
len = 0
for j in xrange(start, start+3):
if i & (1<<(j%3)):
score = score << 1
len += 1
else:
break
tie_break = 2-start
results.append(((score, tie_break), start, len))
results.sort()
score, start, len = results[-1]
def get_prefix_func(start, end):
def get_prefix(triple, context):
if context is None:
yield ""
else:
yield context
i = start
while i<end:
yield triple[i%3]
i += 1
yield ""
return get_prefix
lookup[i] = (self.__indicies[start], get_prefix_func(start, start + len), from_key_func(start), results_from_key_func(start, self._from_string))
self.__lookup_dict = lookup
self.__contexts = db.DB(db_env)
self.__contexts.set_flags(dbsetflags)
self.__contexts.open("contexts", dbname, dbtype, dbopenflags|db.DB_CREATE, dbmode)
self.__namespace = db.DB(db_env)
self.__namespace.set_flags(dbsetflags)
self.__namespace.open("namespace", dbname, dbtype, dbopenflags|db.DB_CREATE, dbmode)
self.__prefix = db.DB(db_env)
self.__prefix.set_flags(dbsetflags)
self.__prefix.open("prefix", dbname, dbtype, dbopenflags|db.DB_CREATE, dbmode)
self.__k2i = db.DB(db_env)
self.__k2i.set_flags(dbsetflags)
self.__k2i.open("k2i", dbname, db.DB_HASH, dbopenflags|db.DB_CREATE, dbmode)
self.__i2k = db.DB(db_env)
self.__i2k.set_flags(dbsetflags)
self.__i2k.open("i2k", dbname, db.DB_RECNO, dbopenflags|db.DB_CREATE, dbmode)
self.__needs_sync = False
t = Thread(target=self.__sync_run)
t.setDaemon(True)
t.start()
self.__sync_thread = t
return VALID_STORE
def __sync_run(self):
from time import sleep, time
try:
min_seconds, max_seconds = 10, 300
while self.__open:
if self.__needs_sync:
t0 = t1 = time()
self.__needs_sync = False
while self.__open:
sleep(.1)
if self.__needs_sync:
t1 = time()
self.__needs_sync = False
if time()-t1 > min_seconds or time()-t0 > max_seconds:
self.__needs_sync = False
_logger.debug("sync")
self.sync()
break
else:
sleep(1)
except Exception, e:
_logger.exception(e)
def sync(self):
if self.__open:
for i in self.__indicies:
i.sync()
self.__contexts.sync()
self.__namespace.sync()
self.__prefix.sync()
self.__i2k.sync()
self.__k2i.sync()
def close(self, commit_pending_transaction=False):
self.__open = False
self.__sync_thread.join()
for i in self.__indicies:
i.close()
self.__contexts.close()
self.__namespace.close()
self.__prefix.close()
self.__i2k.close()
self.__k2i.close()
self.db_env.close()
def add(self, (subject, predicate, object), context, quoted=False, txn=None):
"""\
Add a triple to the store of triples.
"""
assert self.__open, "The Store must be open."
assert context!=self, "Can not add triple directly to store"
Store.add(self, (subject, predicate, object), context, quoted)
_to_string = self._to_string
s = _to_string(subject, txn=txn)
p = _to_string(predicate, txn=txn)
o = _to_string(object, txn=txn)
c = _to_string(context, txn=txn)
cspo, cpos, cosp = self.__indicies
value = cspo.get(bb("%s^%s^%s^%s^" % (c, s, p, o)), txn=txn)
if value is None:
self.__contexts.put(bb(c), "", txn=txn)
contexts_value = cspo.get(bb("%s^%s^%s^%s^" % ("", s, p, o)), txn=txn) or b("")
contexts = set(contexts_value.split(b("^")))
contexts.add(bb(c))
contexts_value = b("^").join(contexts)
assert contexts_value!=None
cspo.put(bb("%s^%s^%s^%s^" % (c, s, p, o)), "", txn=txn)
cpos.put(bb("%s^%s^%s^%s^" % (c, p, o, s)), "", txn=txn)
cosp.put(bb("%s^%s^%s^%s^" % (c, o, s, p)), "", txn=txn)
if not quoted:
cspo.put(bb("%s^%s^%s^%s^" % ("", s, p, o)), contexts_value, txn=txn)
cpos.put(bb("%s^%s^%s^%s^" % ("", p, o, s)), contexts_value, txn=txn)
cosp.put(bb("%s^%s^%s^%s^" % ("", o, s, p)), contexts_value, txn=txn)
self.__needs_sync = True
def __remove(self, (s, p, o), c, quoted=False, txn=None):
cspo, cpos, cosp = self.__indicies
contexts_value = cspo.get(b("^").join([b(""), s, p, o, b("")]), txn=txn) or b("")
contexts = set(contexts_value.split(b("^")))
contexts.discard(c)
contexts_value = b("^").join(contexts)
for i, _to_key, _from_key in self.__indicies_info:
i.delete(_to_key((s, p, o), c), txn=txn)
if not quoted:
if contexts_value:
for i, _to_key, _from_key in self.__indicies_info:
i.put(_to_key((s, p, o), b("")), contexts_value, txn=txn)
else:
for i, _to_key, _from_key in self.__indicies_info:
try:
i.delete(_to_key((s, p, o), b("")), txn=txn)
except db.DBNotFoundError, e:
pass # TODO: is it okay to ignore these?
def remove(self, (subject, predicate, object), context, txn=None):
assert self.__open, "The Store must be open."
Store.remove(self, (subject, predicate, object), context)
_to_string = self._to_string
if context is not None:
if context == self:
context = None
if subject is not None and predicate is not None and object is not None and context is not None:
s = _to_string(subject, txn=txn)
p = _to_string(predicate, txn=txn)
o = _to_string(object, txn=txn)
c = _to_string(context, txn=txn)
value = self.__indicies[0].get(bb("%s^%s^%s^%s^" % (c, s, p, o)), txn=txn)
if value is not None:
self.__remove((bb(s), bb(p), bb(o)), bb(c), txn=txn)
self.__needs_sync = True
else:
cspo, cpos, cosp = self.__indicies
index, prefix, from_key, results_from_key = self.__lookup((subject, predicate, object), context, txn=txn)
cursor = index.cursor(txn=txn)
try:
current = cursor.set_range(prefix)
needs_sync = True
except db.DBNotFoundError:
current = None
needs_sync = False
cursor.close()
while current:
key, value = current
cursor = index.cursor(txn=txn)
try:
cursor.set_range(key)
# Hack to stop 2to3 converting this to next(cursor)
current = getattr(cursor, 'next')()
except db.DBNotFoundError:
current = None
cursor.close()
if key.startswith(prefix):
c, s, p, o = from_key(key)
if context is None:
contexts_value = index.get(key, txn=txn) or b("")
contexts = set(contexts_value.split(b("^"))) # remove triple from all non quoted contexts
contexts.add(b("")) # and from the conjunctive index
for c in contexts:
for i, _to_key, _ in self.__indicies_info:
i.delete(_to_key((s, p, o), c), txn=txn)
else:
self.__remove((s, p, o), c, txn=txn)
else:
break
if context is not None:
if subject is None and predicate is None and object is None:
# TODO: also if context becomes empty and not just on remove((None, None, None), c)
try:
self.__contexts.delete(bb(_to_string(context, txn=txn)), txn=txn)
except db.DBNotFoundError, e:
pass
self.__needs_sync = needs_sync
def triples(self, (subject, predicate, object), context=None, txn=None):
"""A generator over all the triples matching """
assert self.__open, "The Store must be open."
if context is not None:
if context == self:
context = None
_from_string = self._from_string
index, prefix, from_key, results_from_key = self.__lookup((subject, predicate, object), context, txn=txn)
cursor = index.cursor(txn=txn)
try:
current = cursor.set_range(prefix)
except db.DBNotFoundError:
current = None
cursor.close()
while current:
key, value = current
cursor = index.cursor(txn=txn)
try:
cursor.set_range(key)
# Cheap hack so 2to3 doesn't convert to next(cursor)
current = getattr(cursor, 'next')()
except db.DBNotFoundError:
current = None
cursor.close()
if key and key.startswith(prefix):
contexts_value = index.get(key, txn=txn)
yield results_from_key(key, subject, predicate, object, contexts_value)
else:
break
def __len__(self, context=None):
assert self.__open, "The Store must be open."
if context is not None:
if context == self:
context = None
if context is None:
prefix = b("^")
else:
prefix = bb("%s^" % self._to_string(context))
index = self.__indicies[0]
cursor = index.cursor()
current = cursor.set_range(prefix)
count = 0
while current:
key, value = current
if key.startswith(prefix):
count +=1
# Hack to stop 2to3 converting this to next(cursor)
current = getattr(cursor, 'next')()
else:
break
cursor.close()
return count
def bind(self, prefix, namespace):
prefix = prefix.encode("utf-8")
namespace = namespace.encode("utf-8")
bound_prefix = self.__prefix.get(namespace)
if bound_prefix:
self.__namespace.delete(bound_prefix)
self.__prefix[namespace] = prefix
self.__namespace[prefix] = namespace
def namespace(self, prefix):
prefix = prefix.encode("utf-8")
ns = self.__namespace.get(prefix, None)
if ns is not None:
return URIRef(ns.decode('utf-8'))
return None
def prefix(self, namespace):
namespace = namespace.encode("utf-8")
prefix = self.__prefix.get(namespace, None)
if prefix is not None:
return prefix.decode('utf-8')
return None
def namespaces(self):
cursor = self.__namespace.cursor()
results = []
current = cursor.first()
while current:
prefix, namespace = current
results.append((prefix.decode('utf-8'), namespace.decode('utf-8')))
# Hack to stop 2to3 converting this to next(cursor)
current = getattr(cursor, 'next')()
cursor.close()
for prefix, namespace in results:
yield prefix, URIRef(namespace)
def contexts(self, triple=None):
_from_string = self._from_string
_to_string = self._to_string
if triple:
s, p, o = triple
s = _to_string(s)
p = _to_string(p)
o = _to_string(o)
contexts = self.__indicies[0].get(bb("%s^%s^%s^%s^" % ("", s, p, o)))
if contexts:
for c in contexts.split(b("^")):
if c:
yield _from_string(c)
else:
index = self.__contexts
cursor = index.cursor()
current = cursor.first()
cursor.close()
while current:
key, value = current
context = _from_string(key)
yield context
cursor = index.cursor()
try:
cursor.set_range(key)
# Hack to stop 2to3 converting this to next(cursor)
current = getattr(cursor, 'next')()
except db.DBNotFoundError:
current = None
cursor.close()
def _from_string(self, i):
k = self.__i2k.get(int(i))
return self._loads(k)
def _to_string(self, term, txn=None):
k = self._dumps(term)
i = self.__k2i.get(k, txn=txn)
if i is None:
# weird behavoir from bsddb not taking a txn as a keyword argument
# for append
if self.transaction_aware:
i = "%s" % self.__i2k.append(k, txn)
else:
i = "%s" % self.__i2k.append(k)
self.__k2i.put(k, i, txn=txn)
else:
i = i.decode()
return i
def __lookup(self, (subject, predicate, object), context, txn=None):
_to_string = self._to_string
if context is not None:
context = _to_string(context, txn=txn)
i = 0
if subject is not None:
i += 1
subject = _to_string(subject, txn=txn)
if predicate is not None:
i += 2
predicate = _to_string(predicate, txn=txn)
if object is not None:
i += 4
object = _to_string(object, txn=txn)
index, prefix_func, from_key, results_from_key = self.__lookup_dict[i]
#print (subject, predicate, object), context, prefix_func, index #DEBUG
prefix = bb("^".join(prefix_func((subject, predicate, object), context)))
return index, prefix, from_key, results_from_key
def to_key_func(i):
def to_key(triple, context):
"Takes a string; returns key"
return b("^").join((context, triple[i%3], triple[(i+1)%3], triple[(i+2)%3], b(""))) # "" to tac on the trailing ^
return to_key
def from_key_func(i):
def from_key(key):
"Takes a key; returns string"
parts = key.split(b("^"))
return parts[0], parts[(3-i+0)%3+1], parts[(3-i+1)%3+1], parts[(3-i+2)%3+1]
return from_key
def results_from_key_func(i, from_string):
def from_key(key, subject, predicate, object, contexts_value):
"Takes a key and subject, predicate, object; returns tuple for yield"
parts = key.split(b("^"))
if subject is None:
# TODO: i & 1: # dis assemble and/or measure to see which is faster
# subject is None or i & 1
s = from_string(parts[(3-i+0)%3+1])
else:
s = subject
if predicate is None:#i & 2:
p = from_string(parts[(3-i+1)%3+1])
else:
p = predicate
if object is None:#i & 4:
o = from_string(parts[(3-i+2)%3+1])
else:
o = object
return (s, p, o), (from_string(c) for c in contexts_value.split(b("^")) if c)
return from_key
def readable_index(i):
s, p, o = "?" * 3
if i & 1: s = "s"
if i & 2: p = "p"
if i & 4: o = "o"
return "%s,%s,%s" % (s, p, o)
|
__init__.py | import os
import sys
import cmd
import time
import serial
import select
import struct
import threading
import math
import cPickle as pickle
from cancat import iso_tp
# defaults for Linux:
serialdev = '/dev/ttyACM0' # FIXME: if Windows: "COM10" is default
baud = 4000000
# command constants (used to identify messages between
# python client and the CanCat transceiver
CMD_LOG = 0x2f
CMD_LOG_HEX = 0x2e
CMD_CAN_RECV = 0x30
CMD_PING_RESPONSE = 0x31
CMD_CHANGE_BAUD_RESULT = 0x32
CMD_CAN_BAUD_RESULT = 0x33
CMD_CAN_SEND_RESULT = 0x34
CMD_ISO_RECV = 0x35
CMD_SET_FILT_MASK = 0x36
CMD_CAN_MODE_RESULT = 0x37
CMD_CAN_SEND_ISOTP_RESULT = 0x38
CMD_CAN_RECV_ISOTP_RESULT = 0x39
CMD_CAN_SENDRECV_ISOTP_RESULT = 0x3A
CMD_SET_FILT_MASK_RESULT = 0x3B
CMD_PRINT_CAN_REGS = 0x3C
CMD_PING = 0x41
CMD_CHANGE_BAUD = 0x42
CMD_CAN_BAUD = 0x43
CMD_CAN_SEND = 0x44
CMD_CAN_MODE = 0x45
CMD_CAN_MODE_SNIFF_CAN0 = 0x00 # Start sniffing on can 0
CMD_CAN_MODE_SNIFF_CAN1 = 0x01 # Start sniffing on can 1
CMD_CAN_MODE_CITM = 0x02 # Start CITM between can1 and can2
CMD_CAN_SEND_ISOTP = 0x46
CMD_CAN_RECV_ISOTP = 0x47
CMD_CAN_SENDRECV_ISOTP = 0x48
CAN_RESP_OK = (0)
CAN_RESP_FAILINIT = (1)
CAN_RESP_FAILTX = (2)
CAN_RESP_MSGAVAIL = (3)
CAN_RESP_NOMSG = (4)
CAN_RESP_CTRLERROR = (5)
CAN_RESP_GETTXBFTIMEOUT = (6)
CAN_RESP_SENDMSGTIMEOUT = (7)
CAN_RESP_FAIL = (0xff)
CAN_RESPS = { v: k for k,v in globals().items() if k.startswith('CAN_RESP') }
# constants for setting baudrate for the CAN bus
CAN_AUTOBPS = 0
CAN_5KBPS = 1
CAN_10KBPS = 2
CAN_20KBPS = 3
CAN_25KBPS = 4
CAN_31K25BPS = 5
CAN_33KBPS = 6
CAN_40KBPS = 7
CAN_50KBPS = 8
CAN_80KBPS = 9
CAN_83K3BPS = 10
CAN_95KBPS = 11
CAN_100KBPS = 12
CAN_125KBPS = 13
CAN_200KBPS = 14
CAN_250KBPS = 15
CAN_500KBPS = 16
CAN_666KBPS = 17
CAN_1000KBPS = 18
# state constants for the Receiver thread
RXTX_DISCONN = -1
RXTX_SYNC = 0
RXTX_GO = 1
# constants for CANreplay mode
TIMING_FAST = 0
TIMING_REAL = 1
TIMING_INTERACTIVE = 2
TIMING_SEARCH = 3
# constants for VIEW settings:
VIEW_ASCII = 1<<0
VIEW_COMPARE = 1<<1
VIEW_BOOKMARKS = 1<<2
VIEW_TS_DELTA = 1<<3
VIEW_ENDSUM = 1<<4
VIEW_ALL = VIEW_ASCII | VIEW_COMPARE | VIEW_BOOKMARKS | VIEW_TS_DELTA | VIEW_ENDSUM
# message id's and metadata (soon to be moved into modules)
GM_messages = {
}
Ford_messages = {
}
Chrysler_messages = {
}
Toyota_messages = {
}
Honda_messages = {
}
VW_messages = {
}
Nissan_messages = {
}
Mitsubishi_messages = {
}
Hyundai_messages = {
}
Kia_messages = {
}
Suzuki_messages = {
}
Harley_messages = {
}
# helper functions for printing log messages from the CanCat Transceiver
def handleLogToScreen(message, canbuf):
print('LOG: %s' % repr(message))
def handleLogHexToScreen(message, canbuf):
num = struct.unpack("<L", message)
print('LOG: %x' % num)
def handleCanMsgsDuringSniff(message, canbuf, arbids=None):
idx, ts = canbuf._submitMessage(CMD_CAN_RECV, message)
ts = time.time()
arbid, data = canbuf._splitCanMsg(message)
if arbids:
if arbid in arbids:
print reprCanMsg(idx, ts, arbid, data)
else:
print reprCanMsg(idx, ts, arbid, data)
default_cmdhandlers = {
CMD_LOG : handleLogToScreen,
CMD_LOG_HEX: handleLogHexToScreen,
}
def loadCanBuffer(filename):
return pickle.load(file(filename))
def keystop(delay=0):
if os.name == 'posix':
return len(select.select([sys.stdin],[],[],delay)[0])
else:
return msvcrt.kbhit()
class SPECIAL_CASE(object):
pass
DONT_PRINT_THIS_MESSAGE = SPECIAL_CASE
class CanInterface(object):
def __init__(self, port=serialdev, baud=baud, verbose=False, cmdhandlers=None, comment='', load_filename=None, orig_iface=None, max_msgs=None):
'''
CAN Analysis Workspace
This can be subclassed by vendor to allow more vendor-specific code
based on the way each vendor uses the varios Buses
'''
if orig_iface != None:
self._consumeInterface(orig_iface)
return
self._go = False
self._inbuf = ''
self._trash = []
self._messages = {}
self._msg_events = {}
self._queuelock = threading.Lock()
self._max_msgs = max_msgs
self._shutdown = False
self.verbose = verbose
self.port = port
self._baud = baud
self._io = None
self._in_lock = None
self._out_lock = None
self.name = port
self._commsthread = None
self._last_can_msg = None
self.bookmarks = []
self.bookmark_info = {}
self.comments = []
if cmdhandlers == None:
cmdhandlers = default_cmdhandlers
self._cmdhandlers = cmdhandlers
if load_filename != None:
self.loadFromFile(load_filename)
# If we specify a file and no port, assume we just want to read the file, only try to guess
# ports if there is no file specified
if self.port == None and load_filename == None:
self.port = getDeviceFile()
# No filename, can't guess the port, whatcha gonna do?
if self.port == None and load_filename == None:
raise Exception("Cannot find device, and no filename specified. Please try again.")
if self.port != None:
self._reconnect()
self._startRxThread()
def _startRxThread(self):
self._go = True
self._commsthread = threading.Thread(target=self._rxtx)
self._commsthread.setDaemon(True)
self._commsthread.start()
def register_handler(self, cmd, handler):
self._cmdhandlers[cmd] = handler
def remove_handler(self, cmd):
self._cmdhandlers[cmd] = None
def _consumeInterface(self, other):
other._go = False
for k,v in vars(other).items():
setattr(self, k, v)
if other._commsthread != None:
self._startRxThread()
def _reconnect(self, port=None, baud=None):
'''
Attempt to connect/reconnect to the CanCat Transceiver
'''
if self.port == None and port == None:
print "cannot connect to an unspecified port"
return
if self._io != None:
self._io.close()
self._io = serial.Serial(port=self.port, baudrate=self._baud, dsrdtr=True)
self._io.setDTR(True)
# clear all locks and free anything waiting for them
if self._in_lock != None:
while self._in_lock.locked_lock():
self._in_lock.release()
time.sleep(.01)
self._in_lock = threading.Lock()
if self._out_lock != None:
while self._out_lock.locked_lock():
self._out_lock.release()
time.sleep(.01)
self._out_lock = threading.Lock()
time.sleep(1)
return self._io
def __del__(self):
'''
Destructor, called when the CanInterface object is being garbage collected
'''
if self._io and isinstance(self._io, serial.Serial):
print "shutting down serial connection"
self._io.close()
self._shutdown = True
if self._commsthread != None:
self._commsthread.wait()
def clearCanMsgs(self):
'''
Clear out all messages currently received on the CAN bus, allowing for
basically a new analysis session without creating a new object/connection
returns a list of the messages
'''
allmsgs = self.recvall(CMD_CAN_RECV)
# Clear the bookmarks as well because they are no longer meaningful
self.bookmarks = []
self.bookmark_info = {}
return allmsgs
def _rxtx(self):
'''
Receiver thread runner. Internal use only.
Processes data from the CanCat transceiver, parses and places messages
into correct mailboxes and/or hands off to pre-configured handlers.
'''
self._rxtx_state = RXTX_SYNC
while not self._shutdown:
try:
if not self._go:
time.sleep(.04)
continue
if self.verbose > 4:
if self.verbose > 5:
print "STATE: %s" % self._rxtx_state
else:
sys.stderr.write('.')
# try to reconnect to disconnected unit (FIXME: not working right yet)
if self._rxtx_state == RXTX_DISCONN:
print "FIXME: reconnect disconnected serial port..."
time.sleep(1)
self._reconnect()
self._rxtx_state = RXTX_SYNC
continue
# fill the queue ##########################################
self._in_lock.acquire()
try:
char = self._io.read()
except serial.serialutil.SerialException, e:
self.errorcode = e
self.log("serial exception")
if "disconnected" in e.message:
self._io.close()
self._rxtx_state = RXTX_DISCONN
continue
finally:
if self._in_lock.locked_lock():
self._in_lock.release()
self._inbuf += char
#self.log("RECV: %s" % repr(self._inbuf), 4)
##########################################################
# FIXME: should we make the rest of this a separate thread, so we're not keeping messages from flowing?
# ====== it would require more locking/synchronizing...
# make sure we're synced
if self._rxtx_state == RXTX_SYNC:
if self._inbuf[0] != "@":
self._queuelock.acquire()
try:
idx = self._inbuf.find('@')
if idx == -1:
self.log("sitting on garbage...", 3)
continue
trash = self._inbuf[:idx]
self._trash.append(trash)
self._inbuf = self._inbuf[idx:]
finally:
self._queuelock.release()
self._rxtx_state = RXTX_GO
# handle buffer if we have anything in it
if self._rxtx_state == RXTX_GO:
if len(self._inbuf) < 3: continue
if self._inbuf[0] != '@':
self._rxtx_state = RXTX_SYNC
continue
pktlen = ord(self._inbuf[1]) + 2 # <size>, doesn't include "@"
if len(self._inbuf) >= pktlen:
self._queuelock.acquire()
try:
cmd = ord(self._inbuf[2]) # first bytes are @<size>
message = self._inbuf[3:pktlen]
self._inbuf = self._inbuf[pktlen:]
finally:
self._queuelock.release()
#if we have a handler, use it
cmdhandler = self._cmdhandlers.get(cmd)
if cmdhandler != None:
cmdhandler(message, self)
# otherwise, file it
else:
self._submitMessage(cmd, message)
self._rxtx_state = RXTX_SYNC
except:
if self.verbose:
sys.excepthook(*sys.exc_info())
def _submitMessage(self, cmd, message):
'''
submits a message to the cmd mailbox. creates mbox if doesn't exist.
*threadsafe*
'''
timestamp = time.time()
self._queuelock.acquire()
try:
mbox = self._messages.get(cmd)
if mbox == None:
mbox = []
self._messages[cmd] = mbox
self._msg_events[cmd] = threading.Event()
mbox.append((timestamp, message))
self._msg_events[cmd].set()
except Exception, e:
self.log("_submitMessage: ERROR: %r" % e, -1)
finally:
self._queuelock.release()
return len(mbox)-1, timestamp
def log(self, message, verbose=2):
'''
print a log message. Only prints if CanCat's verbose setting >=verbose
'''
if self.verbose >= verbose:
print "%.2f %s: %s" % (time.time(), self.name, message)
def recv(self, cmd, wait=None):
'''
Warning: Destructive:
removes a message from a mailbox and returns it.
For CMD_CAN_RECV mailbox, this will alter analysis results!
'''
start = time.time()
while (time.time() - start) < wait:
mbox = self._messages.get(cmd)
if mbox != None and len(mbox):
self._queuelock.acquire()
try:
timestamp, message = mbox.pop(0)
finally:
self._queuelock.release()
return timestamp, message
time.sleep(.01)
return None, None
def recvall(self, cmd):
'''
Warning: Destructive:
removes ALL messages from a mailbox and returns them.
For CMD_CAN_RECV mailbox, this is like getting a new
analysis session
'''
mbox = self._messages.get(cmd)
if mbox == None:
return []
self._queuelock.acquire()
try:
messages = list(mbox)
self._messages[cmd] = []
finally:
self._queuelock.release()
return messages
def _inWaiting(self, cmd):
'''
Does the given cmd mailbox have any messages??
'''
mbox = self._messages.get(cmd)
if mbox == None:
return 0
return len(mbox)
def _send(self, cmd, message):
'''
Send a message to the CanCat transceiver (not the CAN bus)
'''
msgchar = struct.pack(">H", len(message) + 3) # 2 byte Big Endian
msg = msgchar + chr(cmd) + message
self.log("XMIT: %s" % repr(msg), 4)
self._out_lock.acquire()
try:
self._io.write(msg)
finally:
self._out_lock.release()
# FIXME: wait for response?
def CANrecv(self, count=1):
'''
Warning: Destructive:
removes a message from the received CAN messages and returns it.
== This will alter analysis results! ==
'''
if count == -1:
count = self.getCanMsgCount()
for x in range(count):
yield self.recv(CMD_CAN_RECV)
def CANxmit(self, arbid, message, extflag=0, timeout=3, count=1):
'''
Transmit a CAN message on the attached CAN bus
Currently returns the *last* result
'''
msg = struct.pack('>I', arbid) + chr(extflag) + message
for i in range(count):
self._send(CMD_CAN_SEND, msg)
ts, result = self.recv(CMD_CAN_SEND_RESULT, timeout)
if result == None:
print "CANxmit: Return is None!?"
return None
resval = ord(result)
if resval != 0:
print "CANxmit() failed: %s" % CAN_RESPS.get(resval)
return resval
def ISOTPxmit(self, tx_arbid, rx_arbid, message, extflag=0, timeout=3, count=1):
'''
Transmit an ISOTP can message. tx_arbid is the arbid we're transmitting,
and rx_arbid is the arbid we're listening for
'''
msg = struct.pack('>IIB', tx_arbid, rx_arbid, extflag) + message
for i in range(count):
self._send(CMD_CAN_SEND_ISOTP, msg)
ts, result = self.recv(CMD_CAN_SEND_ISOTP_RESULT, timeout)
if result == None:
print "ISOTPxmit: Return is None!?"
resval = ord(result)
if resval != 0:
print "ISOTPxmit() failed: %s" % CAN_RESPS.get(resval)
return resval
def ISOTPrecv(self, tx_arbid, rx_arbid, extflag=0, timeout=3, count=1, start_msg_idx=None):
'''
Receives an ISOTP can message. This function just causes
the hardware to send the appropriate flow control command
when an ISOTP frame is received from rx_arbid, using
tx_arbid for the flow control frame. The ISOTP frame
itself needs to be extracted from the received can messages
'''
if start_msg_idx is None:
start_msg_idx = self.getCanMsgCount()
# set the CANCat to respond to Flow Control messages
resval = self._isotp_enable_flowcontrol(tx_arbid, rx_arbid, extflag)
msg = self._getIsoTpMsg(rx_arbid, start_index=start_msg_idx, timeout=timeout)
return msg
def _isotp_enable_flowcontrol(self, tx_arbid, rx_arbid, extflag):
msg = struct.pack('>IIB', tx_arbid, rx_arbid, extflag)
self._send(CMD_CAN_RECV_ISOTP, msg)
ts, result = self.recv(CMD_CAN_RECV_ISOTP_RESULT, timeout)
if result == None:
print "_isotp_enable_flowcontrol: Return is None!?"
resval = ord(result)
if resval != 0:
print "_isotp_enable_flowcontrol() failed: %s" % CAN_RESPS.get(resval)
return resval
def ISOTPxmit_recv(self, tx_arbid, rx_arbid, message, extflag=0, timeout=3, count=1, service=None):
'''
Transmit an ISOTP can message, then wait for a response.
tx_arbid is the arbid we're transmitting, and rx_arbid
is the arbid we're listening for
'''
currIdx = self.getCanMsgCount()
msg = struct.pack('>II', tx_arbid, rx_arbid) + chr(extflag) + message
for i in range(count):
self._send(CMD_CAN_SENDRECV_ISOTP, msg)
ts, result = self.recv(CMD_CAN_SENDRECV_ISOTP_RESULT, timeout)
if result == None:
print "ISOTPxmit: Return is None!?"
resval = ord(result)
if resval != 0:
print "ISOTPxmit() failed: %s" % CAN_RESPS.get(resval)
msg, idx = self._isotp_get_msg(rx_arbid, start_index = currIdx, service = service, timeout = timeout)
return msg, idx
def _isotp_get_msg(self, rx_arbid, start_index=0, service=None, timeout=None):
'''
Internal Method to piece together a valid ISO-TP message from received CAN packets.
'''
found = False
complete = False
starttime = lasttime = time.time()
while not complete and (not timeout or (lasttime-starttime < timeout)):
time.sleep(0.01)
msgs = [msg for msg in self.genCanMsgs(start=start_index, arbids=[rx_arbid])]
if len(msgs):
try:
# Check that the message is for the expected service, if specified
arbid, msg, count = iso_tp.msg_decode(msgs)
if ord(msg[0]) == 0x7e: # response for TesterPresent... ignore
start_index = msgs[count-1][0] + 1
elif service is not None:
# Check if this is the right service, or there was an error
if ord(msg[0]) == service or ord(msg[0]) == 0x7f:
msg_found = True
return msg, msgs[count-1][0]
print "Hey, we got here, wrong service code?"
start_index = msgs[count-1][0] + 1
else:
msg_found = True
return msg, msgs[count-1][0]
except iso_tp.IncompleteIsoTpMsg, e:
#print e # debugging only, this is expected
pass
lasttime = time.time()
#print "_isotp_get_msg: status: %r - %r (%r) > %r" % (lasttime, starttime, (lasttime-starttime), timeout)
if self.verbose:
print "_isotp_get_msg: Timeout: %r - %r (%r) > %r" % (lasttime, starttime, (lasttime-starttime), timeout)
return None, start_index
def CANsniff(self, start_msg=None, arbids=None, advfilters=[], maxmsgs=None):
'''
Print messages in real time.
start_msg - first message to print
(None: the next message captured, 0: first message since starting CanCat)
arbids - list of arbids to print (others will be ignored)
advfilters - list of python code to eval for each message (message context provided)
eg. ['pf==0xeb', 'sa==0', 'ps & 0xf']
will print TP data message from source address 0 if the top 4 bits of PS
are set.
Expressions are evaluated from left to right in a "and" like fashion. If any
expression evaluates to "False" and the message will be ignored.
Variables mapped into default namespace:
'arbid'
'id'
'ts'
'data'
J1939 adds 'pgn', 'pf', 'ps', 'edp', 'dp', 'sa'
(this description is true for all advfilters, not specifically CANsniff)
'''
count = 0
msg_gen = self.reprCanMsgsLines(start_msg=start_msg, arbids=arbids, advfilters=advfilters, tail=True)
while True:
if maxmsgs != None and maxmsgs < count:
return
line = msg_gen.next()
print line
count += 1
if keystop():
break
def CANreplay(self, start_bkmk=None, stop_bkmk=None, start_msg=0, stop_msg=None, arbids=None, timing=TIMING_FAST):
'''
Replay packets between two bookmarks.
timing = TIMING_FAST: just slam them down the CAN bus as fast as possible
timing = TIMING_READ: send the messages using similar timing to how they
were received
timing = TIMING_INTERACTIVE: wait for the user to press Enter between each
message being transmitted
timing = TIMING_SEARCH: wait for the user to respond (binary search)
'''
if start_bkmk != None:
start_msg = self.getMsgIndexFromBookmark(start_bkmk)
if stop_bkmk != None:
stop_msg = self.getMsgIndexFromBookmark(stop_bkmk)
if timing == TIMING_SEARCH:
diff = stop_msg - start_msg
if diff == 1:
mid_msg = stop_msg
start_tmp = start_msg
else:
mid_msg = int(start_msg + math.floor((stop_msg - start_msg) / 2))
start_tmp = start_msg
start_msg = mid_msg
last_time = -1
newstamp = time.time()
for idx,ts,arbid,data in self.genCanMsgs(start_msg, stop_msg, arbids=arbids):
laststamp = newstamp
newstamp = time.time()
delta_correction = newstamp - laststamp
if timing == TIMING_INTERACTIVE:
char = raw_input("Transmit this message? %s (Y/n)" % reprCanMsg(idx, ts, arbid, data))
if char is not None and len(char) > 0 and char[0] == 'n':
return
elif timing == TIMING_SEARCH:
self.CANreplay(start_msg=mid_msg, stop_msg=stop_msg)
char = raw_input("Expected outcome? start_msg = %s, stop_msg = %s (Y/n/q)" % (mid_msg, stop_msg))
if char is not None and len(char) > 0 and char[0] == 'q':
return
if diff > 1:
if char is not None and len(char) > 0 and char[0] == 'y':
return self.CANreplay(start_msg=mid_msg, stop_msg=stop_msg, timing=TIMING_SEARCH)
elif char is not None and len(char) > 0 and char[0] == 'n':
return self.CANreplay(start_msg=start_tmp, stop_msg=mid_msg, timing=TIMING_SEARCH)
else:
if char is not None and len(char) > 0 and char[0] == 'y':
print "Target message: %s" % (stop_msg)
return
elif char is not None and len(char) > 0 and char[0] == 'n':
print "Target message: %s" % (start_tmp)
return
elif timing == TIMING_REAL:
if last_time != -1:
delta = ts - last_time - delta_correction
if delta >= 0:
time.sleep(delta)
last_time = ts
self.CANxmit(arbid, data)
if timing == TIMING_INTERACTIVE:
print "Message transmitted"
def setCanBaud(self, baud_const=CAN_500KBPS):
'''
set the baud rate for the CAN bus. this has nothing to do with the
connection from the computer to the tool
'''
self._send(CMD_CAN_BAUD, chr(baud_const))
response = self.recv(CMD_CAN_BAUD_RESULT, wait=30)
while(response[1] != '\x01'):
print "CAN INIT FAILED: Retrying"
response = self.recv(CMD_CAN_BAUD_RESULT, wait=30)
def setCanMode(self, mode):
'''
Sets the desired operation mode. Note that just setting the operational mode
does not change anything on the hardware, after changing the mode you must change
the baud rate in order to properly configure the hardware
'''
CAN_MODES = { v: k for k,v in globals().items() if k.startswith('CMD_CAN_MODE_') and k is not 'CMD_CAN_MODE_RESULT' }
if mode not in CAN_MODES:
print "{} is not a valid can mode. Valid modes are:".format(mode)
for k in CAN_MODES:
print "{} ({})".format(CAN_MODES[k], k)
else:
self._send(CMD_CAN_MODE, chr(mode))
response = self.recv(CMD_CAN_MODE_RESULT, wait=30)
while(response[1] != '\x01'):
print "CAN INIT FAILED: Retrying"
response = self.recv(CMD_CAN_MODE_RESULT, wait=30)
def ping(self, buf='ABCDEFGHIJKL'):
'''
Utility function, only to send and receive data from the
CanCat Transceiver. Has no effect on the CAN bus
'''
self._send(CMD_PING, buf)
response = self.recv(CMD_PING_RESPONSE, wait=3)
return response
def genCanMsgs(self, start=0, stop=None, arbids=None, tail=False, maxsecs=None):
'''
CAN message generator. takes in start/stop indexes as well as a list
of desired arbids (list)
maxsecs limits the number of seconds this generator will go for. it's intended
for use with tail
'''
messages = self._messages.get(CMD_CAN_RECV, None)
# get the ts of the first received message
if messages != None and len(messages):
startts = messages[0][0]
else:
startts = time.time()
if start == None:
start = self.getCanMsgCount()
if messages == None:
stop = 0
elif stop == None or tail:
stop = len(messages)
else:
stop = stop + 1 # This makes the stop index inclusive if specified
starttime = time.time()
idx = start
while tail or idx < stop:
# obey our time restrictions
# placed here to ensure checking whether we're receiving messages or not
if maxsecs != None and time.time() > maxsecs+starttime:
return
# If we start sniffing before we receive any messages,
# messages will be "None". In this case, each time through
# this loop, check to see if we have messages, and if so,
# re-create the messages handle
if messages == None:
messages = self._messages.get(CMD_CAN_RECV, None)
# if we're off the end of the original request, and "tailing"
if messages != None:
if tail and idx >= stop:
msglen = len(messages)
self.log("stop=%d len=%d" % (stop, msglen), 3)
if stop == msglen:
self.log("waiting for messages", 3)
# wait for trigger event so we're not constantly polling
self._msg_events[CMD_CAN_RECV].wait(1)
self._msg_events[CMD_CAN_RECV].clear()
self.log("received 'new messages' event trigger", 3)
# we've gained some messages since last check...
stop = len(messages)
continue # to the big message loop.
# now actually handle messages
ts, msg = messages[idx]
# make ts an offset instead of the real time.
ts -= startts
arbid, data = self._splitCanMsg(msg)
if arbids != None and arbid not in arbids:
# allow filtering of arbids
idx += 1
continue
yield((idx, ts, arbid, data))
idx += 1
def _splitCanMsg(self, msg):
'''
takes in captured message
returns arbid and data
does not check msg size. MUST be at least 4 bytes in length as the
tool should send 4 bytes for the arbid
'''
arbid = struct.unpack(">I", msg[:4])[0]
data = msg[4:]
return arbid, data
def getCanMsgCount(self):
'''
the number of CAN messages we've received this session
'''
canmsgs = self._messages.get(CMD_CAN_RECV, [])
return len(canmsgs)
def printSessionStatsByBookmark(self, start=None, stop=None):
'''
Prints session stats only for messages between two bookmarks
'''
print self.getSessionStatsByBookmark(start, stop)
def printSessionStats(self, start=0, stop=None):
'''
Print session stats by Arbitration ID (aka WID/PID/CANID/etc...)
between two message indexes (where they sit in the CMD_CAN_RECV
mailbox)
'''
print self.getSessionStats(start, stop)
def getSessionStatsByBookmark(self, start=None, stop=None):
'''
returns session stats by bookmarks
'''
if start != None:
start_msg = self.getMsgIndexFromBookmark(start)
else:
start_msg = 0
if stop != None:
stop_msg = self.getMsgIndexFromBookmark(stop)
else:
stop_msg = self.getCanMsgCount()
return self.getSessionStats(start=start_msg, stop=stop_msg)
def getArbitrationIds(self, start=0, stop=None, reverse=False):
'''
return a list of Arbitration IDs
'''
arbids = {}
msg_count = 0
for idx,ts,arbid,data in self.genCanMsgs(start, stop):
arbmsgs = arbids.get(arbid)
if arbmsgs == None:
arbmsgs = []
arbids[arbid] = arbmsgs
arbmsgs.append((ts, data))
msg_count += 1
arbid_list = [(len(msgs), arbid, msgs) for arbid,msgs in arbids.items()]
arbid_list.sort(reverse=reverse)
return arbid_list
def getSessionStats(self, start=0, stop=None):
out = []
arbid_list = self.getArbitrationIds(start=start, stop=stop, reverse=True)
for datalen, arbid, msgs in arbid_list:
last = 0
high = 0
low = 0xffffffff
for ts, data in msgs:
if last == 0:
last = ts
continue
# calculate the high and low
delta = ts - last
if delta > high:
high = delta
if delta < low:
low = delta
# track repeated values (rounded to nearest .001 sec)
last = ts
if datalen > 1:
mean = (msgs[-1][0] - msgs[0][0]) / (datalen-1)
median = low + (high-low) / 2
else:
low = 0
mean = 0
median = mean
out.append("id: 0x%x\tcount: %d\ttiming:: mean: %.3f\tmedian: %.3f\thigh: %.3f\tlow: %.3f" % \
(arbid, datalen, mean, median, high, low))
msg_count = self.getCanMsgCount()
out.append("Total Uniq IDs: %d\nTotal Messages: %d" % (len(arbid_list), msg_count))
return '\n'.join(out)
def loadFromFile(self, filename, force=False):
'''
Load a previous analysis session from a saved file
see: saveSessionToFile()
'''
me = pickle.load(file(filename))
self.restoreSession(me, force=force)
self._filename = filename
def restoreSession(self, me, force=False):
'''
Load a previous analysis session from a python dictionary object
see: saveSession()
'''
if isinstance(self._io, serial.Serial) and force==False:
print("Refusing to reload a session while active session! use 'force=True' option")
return
self._messages = me.get('messages')
self.bookmarks = me.get('bookmarks')
self.bookmark_info = me.get('bookmark_info')
self.comments = me.get('comments')
for cmd in self._messages:
self._msg_events[cmd] = threading.Event()
def saveSessionToFile(self, filename=None):
'''
Saves the current analysis session to the filename given
If saved previously, the name will already be cached, so it is
unnecessary to provide it again.
'''
if filename != None:
self._filename = filename
elif self._filename == None:
raise Exception('Cannot save to file when no filename given (and first time save)')
else:
filename = self._filename
savegame = self.saveSession()
me = pickle.dumps(savegame)
outfile = file(filename, 'w')
outfile.write(me)
outfile.close()
def saveSession(self):
'''
Save the current analysis session to a python dictionary object
What you do with it form there is your own business.
This function is called by saveSessionToFile() to get the data
to save to the file.
'''
savegame = { 'messages' : self._messages,
'bookmarks' : self.bookmarks,
'bookmark_info' : self.bookmark_info,
'comments' : self.comments,
}
return savegame
# bookmark subsystem
def placeCanBookmark(self, name=None, comment=None):
'''
Save a named bookmark (with optional comment).
This stores the message index number from the
CMD_CAN_RECV mailbox.
DON'T USE CANrecv or recv(CMD_CAN_RECV) with Bookmarks or Snapshots!!
'''
mbox = self._messages.get(CMD_CAN_RECV)
if mbox == None:
msg_index = 0
else:
msg_index = len(mbox)
bkmk_index = len(self.bookmarks)
self.bookmarks.append(msg_index)
info = { 'name' : name,
'comment' : comment }
self.bookmark_info[bkmk_index] = info #should this be msg_index? benefit either way?
return bkmk_index
def getMsgIndexFromBookmark(self, bkmk_index):
return self.bookmarks[bkmk_index]
def getBookmarkFromMsgIndex(self, msg_index):
bkmk_index = self.bookmarks.index(msg_index)
return bkmk_index
def setCanBookmarkName(self, bkmk_index, name):
info = self.bookmark_info[bkmk_index]
info[name] = name
def setCanBookmarkComment(self, bkmk_index, comment):
info = self.bookmark_info[bkmk_index]
info[name] = name
def setCanBookmarkNameByMsgIndex(self, msg_index, name):
bkmk_index = self.bookmarks.index(msg_index)
info = self.bookmark_info[bkmk_index]
info[name] = name
def setCanBookmarkCommentByMsgIndex(self, msg_index, comment):
bkmk_index = self.bookmarks.index(msg_index)
info = self.bookmark_info[bkmk_index]
info[name] = name
def snapshotCanMessages(self, name=None, comment=None):
'''
Save bookmarks at the start and end of some event you are about to do
Bookmarks are named "Start_" + name and "Stop_" + name
DON'T USE CANrecv or recv(CMD_CAN_RECV) with Bookmarks or Snapshots!!
'''
start_bkmk = self.placeCanBookmark("Start_" + name, comment)
raw_input("Press Enter When Done...")
stop_bkmk = self.placeCanBookmark("Stop_" + name, comment)
def filterCanMsgsByBookmark(self, start_bkmk=None, stop_bkmk=None, start_baseline_bkmk=None, stop_baseline_bkmk=None,
arbids=None, ignore=[], advfilters=[]):
if start_bkmk != None:
start_msg = self.getMsgIndexFromBookmark(start_bkmk)
else:
start_msg = 0
if stop_bkmk != None:
stop_msg = self.getMsgIndexFromBookmark(stop_bkmk)
else:
stop_bkmk = -1
if start_baseline_bkmk != None:
start_baseline_msg = self.getMsgIndexFromBookmark(start_baseline_bkmk)
else:
start_baseline_msg = None
if stop_baseline_bkmk != None:
stop_baseline_msg = self.getMsgIndexFromBookmark(stop_baseline_bkmk)
else:
stop_baseline_msg = None
return self.filterCanMsgs(start_msg, stop_msg, start_baseline_msg, stop_baseline_msg, arbids, ignore, advfilters)
def _getLocals(self, idx, ts, arbid, data):
return {'idx':idx, 'ts':ts, 'arbid':arbid, 'data':data}
def filterCanMsgs(self, start_msg=0, stop_msg=None, start_baseline_msg=None, stop_baseline_msg=None, arbids=None, ignore=[], advfilters=[], tail=False, maxsecs=None):
'''
returns the received CAN messages between indexes "start_msg" and "stop_msg"
but only messages to ID's that *do not* appear in the the baseline indicated
by "start_baseline_msg" and "stop_baseline_msg".
for message indexes, you *will* want to look into the bookmarking subsystem!
'''
self.log("starting filtering messages...")
if stop_baseline_msg != None:
self.log("ignoring arbids from baseline...")
# get a list of baseline arbids
filter_ids = { arbid:1 for idx,ts,arbid,data in self.genCanMsgs(start_baseline_msg, stop_baseline_msg)
}.keys()
else:
filter_ids = None
self.log("filtering messages...")
if arbids != None and type(arbids) != list:
arbids = [arbids]
for idx,ts,arbid,msg in self.genCanMsgs(start_msg, stop_msg, arbids=arbids, tail=tail, maxsecs=maxsecs):
if not ((arbids != None and arbid in arbids) or arbid not in ignore and (filter_ids==None or arbid not in filter_ids)):
self.log("skipping message: (%r, %r, %r, %r)" % ((idx, ts, arbid, msg)))
continue
# advanced filters allow python code to be handed in. if any of the python code snippits result in "False" or 0, skip this message
skip = False
for advf in advfilters:
lcls = self._getLocals(idx, ts, arbid, msg)
if not eval(advf, lcls):
skip = True
if skip:
self.log("skipping message(adv): (%r, %r, %r, %r)" % ((idx, ts, arbid, msg)))
continue
yield (idx, ts, arbid, msg)
def printCanMsgsByBookmark(self, start_bkmk=None, stop_bkmk=None, start_baseline_bkmk=None, stop_baseline_bkmk=None,
arbids=None, ignore=[], advfilters=[]):
'''
deprecated: use printCanMsgs(start_bkmk=foo, stop_bkmk=bar)
'''
print self.reprCanMsgsByBookmark(start_bkmk, stop_bkmk, start_baseline_bkmk, stop_baseline_bkmk, arbids, ignore, advfilters)
def reprCanMsgsByBookmark(self, start_bkmk=None, stop_bkmk=None, start_baseline_bkmk=None, stop_baseline_bkmk=None, arbids=None, ignore=[], advfilters=[]):
'''
deprecated: use reprCanMsgs(start_bkmk=foo, stop_bkmk=bar)
'''
out = []
if start_bkmk != None:
start_msg = self.getMsgIndexFromBookmark(start_bkmk)
else:
start_msg = 0
if stop_bkmk != None:
stop_msg = self.getMsgIndexFromBookmark(stop_bkmk)
else:
stop_bkmk = -1
if start_baseline_bkmk != None:
start_baseline_msg = self.getMsgIndexFromBookmark(start_baseline_bkmk)
else:
start_baseline_msg = None
if stop_baseline_bkmk != None:
stop_baseline_msg = self.getMsgIndexFromBookmark(stop_baseline_bkmk)
else:
stop_baseline_msg = None
return self.reprCanMsgs(start_msg, stop_msg, start_baseline_msg, stop_baseline_msg, arbids, ignore, advfilters)
def printCanMsgs(self, start_msg=0, stop_msg=None, start_bkmk=None, stop_bkmk=None, start_baseline_msg=None, stop_baseline_msg=None, arbids=None, ignore=[], advfilters=[], pretty=False, paginate=None, viewbits=VIEW_ALL):
data = self.reprCanMsgsLines(start_msg, stop_msg, start_bkmk, stop_bkmk, start_baseline_msg, stop_baseline_msg, arbids, ignore, advfilters, pretty, viewbits=viewbits)
pidx = 0
try:
while True:
line = data.next()
lines = line.split('\n')
for thing in lines:
print thing
pidx += 1
if paginate != None and pidx % paginate == 0:
inp = raw_input("PRESS ENTER TO CONTINUE")
except StopIteration:
pass
def reprCanMsgsLines(self, start_msg=0, stop_msg=None, start_bkmk=None, stop_bkmk=None, start_baseline_msg=None, stop_baseline_msg=None, arbids=None, ignore=[], advfilters=[], pretty=False, tail=False, viewbits=VIEW_ALL):
# FIXME: make different stats selectable using a bitfield arg (eg. REPR_TIME_DELTA | REPR_ASCII)
'''
String representation of a set of CAN Messages.
These can be filtered by start and stop message indexes, as well as
use a baseline (defined by start/stop message indexes),
by a list of "desired" arbids as well as a list of
ignored arbids
Many functions wrap this one.
viewbits is a bitfield made up of VIEW_* options OR'd together:
... viewbits=VIEW_ASCII|VIEW_COMPARE)
'''
if start_bkmk != None:
start_msg = self.getMsgIndexFromBookmark(start_bkmk)
if stop_bkmk != None:
stop_msg = self.getMsgIndexFromBookmark(stop_bkmk)
if (viewbits & VIEW_BOOKMARKS) and start_msg in self.bookmarks:
bkmk = self.bookmarks.index(start_msg)
yield ("starting from bookmark %d: '%s'" %
(bkmk,
self.bookmark_info[bkmk].get('name'))
)
if (viewbits & VIEW_BOOKMARKS) and stop_msg in self.bookmarks:
bkmk = self.bookmarks.index(stop_msg)
yield ("stoppng at bookmark %d: '%s'" %
(bkmk,
self.bookmark_info[bkmk].get('name'))
)
last_msg = None
next_bkmk = 0
next_bkmk_idx = 0
msg_count = 0
last_ts = None
tot_delta_ts = 0
counted_msgs = 0 # used for calculating averages, excluding outliers
data_delta = None
data_repeat = 0
data_similar = 0
for idx, ts, arbid, msg in self.filterCanMsgs(start_msg, stop_msg, start_baseline_msg, stop_baseline_msg, arbids=arbids, ignore=ignore, advfilters=advfilters, tail=tail):
# insert bookmark names/comments in appropriate places
while next_bkmk_idx < len(self.bookmarks) and idx >= self.bookmarks[next_bkmk_idx]:
yield (self.reprBookmark(next_bkmk_idx))
next_bkmk_idx += 1
msg_count += 1
diff = []
# check data
byte_cnt_diff = 0
if (viewbits & VIEW_COMPARE) and last_msg != None:
if len(last_msg) == len(msg):
for bidx in range(len(msg)):
if last_msg[bidx] != msg[bidx]:
byte_cnt_diff += 1
if byte_cnt_diff == 0:
diff.append("REPEAT")
data_repeat += 1
elif byte_cnt_diff <=4:
diff.append("Similar")
data_similar += 1
# FIXME: make some better heuristic to identify "out of norm"
# look for ASCII data (4+ consecutive bytes)
if (viewbits & VIEW_ASCII) and hasAscii(msg):
diff.append("ASCII: %s" % repr(msg))
# calculate timestamp delta and comment if out of whack
if last_ts == None:
last_ts = ts
delta_ts = ts - last_ts
if counted_msgs:
avg_delta_ts = tot_delta_ts / counted_msgs
else:
avg_delta_ts = delta_ts
if abs(delta_ts - avg_delta_ts) <= delta_ts:
tot_delta_ts += delta_ts
counted_msgs += 1
elif (viewbits & VIEW_TS_DELTA):
diff.append("TS_delta: %.3f" % delta_ts)
if pretty:
if delta_ts >= .95:
yield ('')
msgrepr = self._reprCanMsg(idx, ts, arbid, msg, comment='\t'.join(diff))
# allow _reprCanMsg to return None to skip printing the message
if msgrepr != DONT_PRINT_THIS_MESSAGE:
yield msgrepr
last_ts = ts
last_msg = msg
if viewbits & VIEW_ENDSUM:
yield ("Total Messages: %d (repeat: %d / similar: %d)" % (msg_count, data_repeat, data_similar))
def reprCanMsgs(self, start_msg=0, stop_msg=None, start_bkmk=None, stop_bkmk=None, start_baseline_msg=None, stop_baseline_msg=None, arbids=None, ignore=[], advfilters=[], pretty=False, tail=False, viewbits=VIEW_ALL):
out = [x for x in self.reprCanMsgsLines(start_msg, stop_msg, start_bkmk, stop_bkmk, start_baseline_msg, stop_baseline_msg, arbids, ignore, advfilters, pretty, tail, viewbits)]
return "\n".join(out)
def _reprCanMsg(self, idx, ts, arbid, msg, comment=None):
return reprCanMsg(idx, ts, arbid, msg, comment=comment)
def printCanSessions(self, arbid_list=None, advfilters=[]):
'''
Split CAN messages into Arbitration ID's and prints entire
sessions for each CAN id.
Defaults to printing by least number of messages, including all IDs
Or... provide your own list of ArbIDs in whatever order you like
'''
if arbid_list == None:
arbids = self.getArbitrationIds()
else:
arbids = [arbdata for arbdata in self.getArbitrationIds() if arbdata[1] in arbid_list]
for datalen,arbid,msgs in arbids:
print self.reprCanMsgs(arbids=[arbid], advfilters=advfilters)
cmd = raw_input("\n[N]ext, R)eplay, F)astReplay, I)nteractiveReplay, S)earchReplay, Q)uit: ").upper()
while len(cmd) and cmd != 'N':
if cmd == 'R':
self.CANreplay(arbids=[arbid], timing=TIMING_REAL)
elif cmd == 'F':
self.CANreplay(arbids=[arbid], timing=TIMING_FAST)
elif cmd == 'I':
self.CANreplay(arbids=[arbid], timing=TIMING_INTERACTIVE)
elif cmd == 'S':
self.CANreplay(arbids=[arbid], timing=TIMING_SEARCH)
elif cmd == 'Q':
return
cmd = raw_input("\n[N]ext, R)eplay, F)astReplay, I)nteractiveReplay, S)earchReplay, Q)uit: ").upper()
print
def printBookmarks(self):
'''
Print out the list of current Bookmarks and where they sit
'''
print(self.reprBookmarks())
def printAsciiStrings(self, minbytes=4, strict=True):
'''
Search through messages looking for ASCII strings
'''
for idx, ts, arbid, msg in self.genCanMsgs():
if hasAscii(msg, minbytes=minbytes, strict=strict):
print reprCanMsg(idx, ts, arbid, msg, repr(msg))
def reprBookmarks(self):
'''
get a string representation of the bookmarks
'''
out = []
for bid in range(len(self.bookmarks)):
out.append(self.reprBookmark(bid))
return '\n'.join(out)
def reprBookmark(self, bid):
'''
get a string representation of one bookmark
'''
msgidx = self.bookmarks[bid]
info = self.bookmark_info.get(bid)
comment = info.get('comment')
if comment == None:
return "bkmkidx: %d\tmsgidx: %d\tbkmk: %s" % (bid, msgidx, info.get('name'))
return "bkmkidx: %d\tmsgidx: %d\tbkmk: %s \tcomment: %s" % (bid, msgidx, info.get('name'), info.get('comment'))
def setMaskAndFilter(self,
mask0=0,
mask1=0,
filter0=0,
filter1=0,
filter2=0,
filter3=0,
filter4=0,
filter5=0):
'''
Set the filters and masks. The mask determines which bits matter for the filter following the
below truth table:
_____________________________________________________________________________
| Mask Bit n | Filter Bit n | Arbitration ID bit n | Accept or Reject |
| 0 | X | X | Accept |
| 1 | 0 | 0 | Accept |
| 1 | 0 | 1 | Reject |
| 1 | 1 | 0 | Reject |
| 1 | 1 | 1 | Accept |
-----------------------------------------------------------------------------
There are two RX buffers. mask0 and filters 0 and 1 apply to buffer 0. mask1 and the other four filters
apply to buffer 1.
'''
msg = struct.pack('>IIIIIIII', mask0, mask1, filter0, filter1, filter2, filter3, filter4, filter5)
return self._send(CMD_SET_FILT_MASK, msg)
def clearMaskAndFilter(self):
'''
Clears all masks and filters
'''
msg = struct.pack('>IIIIIIII', 0, 0, 0, 0, 0, 0, 0, 0)
return self._send(CMD_SET_FILT_MASK, msg)
def _test_throughput(self):
'''
Use in conjuction with the M2_TEST_FW to test throughput
Connect one CanCat up to another M2 or Arduino DUE device runing the M2_TEST_FW firmware
and run this function to perform a throughput test. No other device should be connected
to allow the test to run unimpeded by other CAN traffic.
'''
self.clearCanMsgs()
self.CANxmit(0x0010, "TEST")
for i in range(6, 3, -1):
print "Time remaining: ", i*10, " seconds"
time.sleep(10)
self.CANxmit(0x810, "TEST", extflag=True)
for i in range(3, 0, -1):
print "Time remaining: ", i*10, " seconds"
time.sleep(10)
out_of_order_count = 0
msg_count = 0
prev_val = 0xFF
for foo in self.genCanMsgs(arbids=[0x00]):
msg_count += 1
prev_val += 1
if prev_val > 0xff:
prev_val = 0
if prev_val != ord(foo[3]):
out_of_order_count += 1
prev_val = ord(foo[3])
if (out_of_order_count > 0):
print "ERROR: 11 bit IDs, 1 byte messages, ", out_of_order_count, " Messages received out of order"
elif (msg_count != 181810):
print "ERROR: Received ", msg_count, " out of expected 181810 message"
else:
print "PASS: 11 bit IDs, 1 byte messages"
out_of_order_count = 0
msg_count = 0
prev_val = 0xFF
for foo in self.genCanMsgs(arbids=[0x01]):
msg_count += 1
prev_val += 1
if prev_val > 0xff:
prev_val = 0
if prev_val != ord(foo[3][0]):
out_of_order_count += 1
prev_val = ord(foo[3][0])
if (out_of_order_count > 0):
print "ERROR: 11 bit IDs, 8 byte messages, ", out_of_order_count, " Messages received out of order"
elif (msg_count != 90090):
print "ERROR: Received ", msg_count, " out of expected 90090 message"
else:
print "PASS: 11 bit IDs, 8 byte messages"
out_of_order_count = 0
msg_count = 0
prev_val = 0xFF
for foo in self.genCanMsgs(arbids=[0x800]):
msg_count += 1
prev_val += 1
if prev_val > 0xff:
prev_val = 0
if prev_val != ord(foo[3]):
out_of_order_count += 1
prev_val = ord(foo[3])
if (out_of_order_count > 0):
print "ERROR: 29 bit IDs, 1 byte messages, ", out_of_order_count, " Messages received out of order"
elif (msg_count != 133330):
print "ERROR: Received ", msg_count, " out of expected 133330 message"
else:
print "PASS: 29 bit IDs, 1 byte messages"
out_of_order_count = 0
msg_count = 0
prev_val = 0xFF
for foo in self.genCanMsgs(arbids=[0x801]):
msg_count += 1
prev_val += 1
if prev_val > 0xff:
prev_val = 0
if prev_val != ord(foo[3][0]):
out_of_order_count += 1
prev_val = ord(foo[3][0])
if (out_of_order_count > 0):
print "ERROR: 29 bit IDs, 8 byte messages, ", out_of_order_count, " Messages received out of order"
elif (msg_count != 76330):
print "ERROR: Received ", msg_count, " out of expected 76330 message"
else:
print "PASS: 29 bit IDs, 8 byte messages"
def _printCanRegs(self):
self._send(CMD_PRINT_CAN_REGS, "")
class CanControl(cmd.Cmd):
'''
Command User Interface (as if ipython wasn't enough!)
'''
def __init__(self, serialdev=serialdev, baud=baud):
cmd.Cmd.__init__(self)
self.serialdev = serialdev
self.canbuf = CanBuffer(self.serialdev, self._baud)
def getAscii(msg, minbytes=3):
'''
if strict, every character has to be clean ASCII
otherwise, look for strings of at least minbytes in length
'''
strings = []
ascii_match = 0
ascii_count = 0
startidx = None
for bidx in range(len(msg)):
byte = msg[bidx]
if 0x20 <= ord(byte) < 0x7f:
if startidx == None:
startidx = bidx
ascii_count +=1
else:
# non printable char
# if we reached the magic threshold, package it
if ascii_count >= minbytes:
strings.append(msg[startidx:bidx])
# reset counters
ascii_count = 0
startidx = None
# in case we have a string all the way to the end
if ascii_count >= minbytes:
strings.append(msg[startidx:])
return strings
def hasAscii(msg, minbytes=3, strict=False):
'''
if minbytes == -1, every character has to be clean ASCII
otherwise, look for strings of at least minbytes in length
'''
ascii_match = 0
ascii_count = 0
for byte in msg:
if 0x20 <= ord(byte) < 0x7f:
ascii_count +=1
if ascii_count >= minbytes:
ascii_match = 1
else:
if strict:
return 0
ascii_count = 0
return ascii_match
def reprCanMsg(idx, ts, arbid, data, comment=None):
#TODO: make some repr magic that spits out known ARBID's and other subdata
if comment == None:
comment = ''
return "%.8d %8.3f ID: %.3x, Len: %.2x, Data: %-18s\t%s" % (idx, ts, arbid, len(data), data.encode('hex'), comment)
class FordInterface(CanInterface):
def setCanBaudHSCAN(self):
self.setCanBaud(CAN_500KBPS)
def setCanBaudMSCAN(self):
self.setCanBaud(CAN_125KBPS)
def setCanBaudICAN(self):
self.setCanBaud(CAN_500KBPS)
class GMInterface(CanInterface):
'''
DLC port:
SW-LS-CAN - pin 1 33kbps
MS-CAN - pins 3+ and 11- 95kbps
DW-FT-CAN - pins 1+ and 9- <125kbps
HS-CAN - pins 6+ and 14- 500kbps
'''
def setCanBaudHSCAN(self):
self.setCanBaud(CAN_500KBPS)
def setCanBaudMSCAN(self):
self.setCanBaud(CAN_95KBPS)
def setCanBaudLSCAN(self):
self.setCanBaud(CAN_33KBPS)
class CanInTheMiddleInterface(CanInterface):
def __init__(self, port=serialdev, baud=baud, verbose=False, cmdhandlers=None, comment='', load_filename=None, orig_iface=None):
'''
CAN in the middle. Allows the user to determine what CAN messages are being
sent by a device by isolating a device from the CAN network and using two
Can shields on one Arduino to relay the CAN messages to each other.
Device<----->Isolation CanCat<----->Arduino<----->Vehicle CanCat<----->Vehicle
CAN SPI | SPI CAN
|
| < Serial
PC
This solves the problem of not being able to determine which device is sending
which CAN message, since CAN messages have no source information and all messages
are broadcast.
The Can shield connected to the device is referred to as the isolation CanCat.
This CanCat should be modified so that the CS SPI pin is connected to D10, rather
than the default of D9. This is accomplished by cutting a trace on the circuit
board and bridging the CS pad to the D10 pad. Seeedstudio has instructions
on their Wiki, but there shield differed slightly from my board. The CanCat
connected to the vehicle is referred to as the vehicle CanCat and should be unmodified.
'''
self.bookmarks_iso = []
self.bookmark_info_iso = {}
CanInterface.__init__(self, port=port, baud=baud, verbose=verbose, cmdhandlers=cmdhandlers, comment=comment, load_filename=load_filename, orig_iface=orig_iface)
if load_filename is None:
self.setCanMode(CMD_CAN_MODE_CITM)
def genCanMsgsIso(self, start=0, stop=None, arbids=None):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
CAN message generator. takes in start/stop indexes as well as a list
of desired arbids (list). Uses the isolation messages.
'''
messages = self._messages.get(CMD_ISO_RECV, [])
if stop == None:
stop = len(messages)
else:
stop = stop + 1
for idx in xrange(start, stop):
ts, msg = messages[idx]
arbid, data = self._splitCanMsg(msg)
if arbids != None and arbid not in arbids:
# allow filtering of arbids
continue
yield((idx, ts, arbid, data))
def getCanMsgCountIso(self):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
the number of CAN messages we've received on the isolation side session
'''
canmsgs = self._messages.get(CMD_ISO_RECV, [])
return len(canmsgs)
def printSessionStatsByBookmarkIso(self, start=None, stop=None):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
Prints session stats only for messages between two bookmarks
'''
print self.getSessionStatsByBookmarkIso(start, stop)
def printSessionStatsIso(self, start=0, stop=None):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
Print session stats by Arbitration ID (aka WID/PID/CANID/etc...)
between two message indexes (where they sit in the CMD_CAN_RECV
mailbox)
'''
print self.getSessionStatsIso(start, stop)
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
def getSessionStatsByBookmarkIso(self, start=None, stop=None):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
returns session stats by bookmarks
'''
if start != None:
start_msg = self.getMsgIndexFromBookmarkIso(start)
else:
start_msg = 0
if stop != None:
stop_msg = self.getMsgIndexFromBookmarkIso(stop)
else:
stop_msg = self.getCanMsgCountIso()
return self.getSessionStatsIso(start=start_msg, stop=stop_msg)
def getArbitrationIdsIso(self, start=0, stop=None, reverse=False):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
return a list of Arbitration IDs
'''
arbids = {}
msg_count = 0
for idx,ts,arbid,data in self.genCanMsgsIso(start, stop):
arbmsgs = arbids.get(arbid)
if arbmsgs == None:
arbmsgs = []
arbids[arbid] = arbmsgs
arbmsgs.append((ts, data))
msg_count += 1
arbid_list = [(len(msgs), arbid, msgs) for arbid,msgs in arbids.items()]
arbid_list.sort(reverse=reverse)
return arbid_list
def getSessionStatsIso(self, start=0, stop=None):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
out = []
arbid_list = self.getArbitrationIdsIso(start=start, stop=stop, reverse=True)
for datalen, arbid, msgs in arbid_list:
last = 0
high = 0
low = 0xffffffff
for ts, data in msgs:
if last == 0:
last = ts
continue
# calculate the high and low
delta = ts - last
if delta > high:
high = delta
if delta < low:
low = delta
# track repeated values (rounded to nearest .001 sec)
last = ts
if datalen > 1:
mean = (msgs[-1][0] - msgs[0][0]) / (datalen-1)
median = low + (high-low) / 2
else:
low = 0
mean = 0
median = mean
out.append("id: 0x%x\tcount: %d\ttiming:: mean: %.3f\tmedian: %.3f\thigh: %.3f\tlow: %.3f" % \
(arbid, datalen, mean, median, high, low))
msg_count = self.getCanMsgCountIso()
out.append("Total Uniq IDs: %d\nTotal Messages: %d" % (len(arbid_list), msg_count))
return '\n'.join(out)
# bookmark subsystem
def placeCanBookmark(self, name=None, comment=None):
'''
Save a named bookmark (with optional comment).
This stores the message index number from the
CMD_ISO_RECV mailbox.
This also places a bookmark in the normal CAN message
stream.
DON'T USE CANrecv or recv(CMD_CAN_RECV) with Bookmarks or Snapshots!!
'''
mbox = self._messages.get(CMD_ISO_RECV)
if mbox == None:
msg_index = 0
else:
msg_index = len(mbox)
bkmk_index = len(self.bookmarks_iso)
self.bookmarks_iso.append(msg_index)
info = { 'name' : name,
'comment' : comment }
self.bookmark_info_iso[bkmk_index] = info #should this be msg_index? benefit either way?
CanInterface.placeCanBookmark(self, name=name, comment=comment)
return bkmk_index
def getMsgIndexFromBookmarkIso(self, bkmk_index):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
return self.bookmarks_iso[bkmk_index]
def getBookmarkFromMsgIndexIso(self, msg_index):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
bkmk_index = self.bookmarks_iso.index(msg_index)
return bkmk_index
def setCanBookmarkNameIso(self, bkmk_index, name):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
info = self.bookmark_info_iso[bkmk_index]
info[name] = name
def setCanBookmarkCommentIso(self, bkmk_index, comment):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
info = self.bookmark_info_iso[bkmk_index]
info[name] = name
def setCanBookmarkNameByMsgIndexIso(self, msg_index, name):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
bkmk_index = self.bookmarks_iso.index(msg_index)
info = self.bookmark_info_iso[bkmk_index]
info[name] = name
def setCanBookmarkCommentByMsgIndexIso(self, msg_index, comment):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
bkmk_index = self.bookmarks_iso.index(msg_index)
info = self.bookmark_info_iso[bkmk_index]
info[name] = name
def snapshotCanMessagesIso(self, name=None, comment=None):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
Save bookmarks at the start and end of some event you are about to do
Bookmarks are named "Start_" + name and "Stop_" + name
DON'T USE CANrecv or recv(CMD_CAN_RECV) with Bookmarks or Snapshots!!
'''
start_bkmk = self.placeCanBookmarkIso("Start_" + name, comment)
raw_input("Press Enter When Done...")
stop_bkmk = self.placeCanBookmarkIso("Stop_" + name, comment)
def filterCanMsgsByBookmarkIso(self, start_bkmk=None, stop_bkmk=None, start_baseline_bkmk=None, stop_baseline_bkmk=None,
arbids=None, ignore=[], advfilters=[]):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
if start_bkmk != None:
start_msg = self.getMsgIndexFromBookmarkIso(start_bkmk)
else:
start_msg = 0
if stop_bkmk != None:
stop_msg = self.getMsgIndexFromBookmarkIso(stop_bkmk)
else:
stop_bkmk = -1
if start_baseline_bkmk != None:
start_baseline_msg = self.getMsgIndexFromBookmarkIso(start_baseline_bkmk)
else:
start_baseline_msg = None
if stop_baseline_bkmk != None:
stop_baseline_msg = self.getMsgIndexFromBookmarkIso(stop_baseline_bkmk)
else:
stop_baseline_msg = None
return self.filterCanMsgsIso(start_msg, stop_msg, start_baseline_msg, stop_baseline_msg, arbids, ignore, advfilters)
def filterCanMsgsIso(self, start_msg=0, stop_msg=None, start_baseline_msg=None, stop_baseline_msg=None, arbids=None, ignore=[], advfilters=[]):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
Iso means the second CAN bus (M2's and DUE_CAN models have two CAN interfaces)
returns the received CAN messages between indexes "start_msg" and "stop_msg"
but only messages to ID's that *do not* appear in the the baseline indicated
by "start_baseline_msg" and "stop_baseline_msg".
for message indexes, you *will* want to look into the bookmarking subsystem!
'''
self.log("starting filtering messages...")
if stop_baseline_msg != None:
self.log("ignoring arbids from baseline...")
# get a list of baseline arbids
filter_ids = { arbid:1 for ts,arbid,data in self.genCanMsgs(start_baseline_msg, stop_baseline_msg)
}.keys()
else:
filter_ids = None
self.log("filtering messages...")
if type(arbids) != list:
arbids = [arbids]
for idx,ts,arbid,msg in self.genCanMsgs(start_msg, stop_msg, arbids=arbids):
if not ((arbids != None and arbid in arbids) or arbid not in ignore and (filter_ids==None or arbid not in filter_ids)):
continue
# advanced filters allow python code to be handed in. if any of the python code snippits result in "False" or 0, skip this message
skip = False
for advf in advfilters:
lcls = self._locals(idx, ts, arbid, msg)
if not eval(advf, lcls):
skip = True
if skip:
continue
yield (idx, ts,arbid,msg)
def printCanMsgsByBookmarkIso(self, start_bkmk=None, stop_bkmk=None, start_baseline_bkmk=None, stop_baseline_bkmk=None,
arbids=None, ignore=[], advfilters=[]):
'''
deprecated: use printCanMsgs(start_bkmk=foo, stop_bkmk=bar)
'''
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
print self.reprCanMsgsByBookmarkIso(start_bkmk, stop_bkmk, start_baseline_bkmk, stop_baseline_bkmk, arbids, ignore, advfilters)
def reprCanMsgsByBookmarkIso(self, start_bkmk=None, stop_bkmk=None, start_baseline_bkmk=None, stop_baseline_bkmk=None, arbids=None, ignore=[], advfilters=[]):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
deprecated: use reprCanMsgs(start_bkmk=foo, stop_bkmk=bar)
'''
out = []
if start_bkmk != None:
start_msg = self.getMsgIndexFromBookmarkIso(start_bkmk)
else:
start_msg = 0
if stop_bkmk != None:
stop_msg = self.getMsgIndexFromBookmarkIso(stop_bkmk)
else:
stop_bkmk = -1
if start_baseline_bkmk != None:
start_baseline_msg = self.getMsgIndexFromBookmarkIso(start_baseline_bkmk)
else:
start_baseline_msg = None
if stop_baseline_bkmk != None:
stop_baseline_msg = self.getMsgIndexFromBookmarkIso(stop_baseline_bkmk)
else:
stop_baseline_msg = None
return self.reprCanMsgsIso(start_msg, stop_msg, start_baseline_msg, stop_baseline_msg, arbids, ignore, advfilters)
def printCanMsgsIso(self, start_msg=0, stop_msg=None, start_baseline_msg=None, stop_baseline_msg=None, arbids=None, ignore=[], advfilters=[]):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
print self.reprCanMsgsIso(start_msg, stop_msg, start_baseline_msg, stop_baseline_msg, arbids, ignore, advfilters)
def reprCanMsgsIso(self, start_msg=0, stop_msg=None, start_baseline_msg=None, stop_baseline_msg=None, arbids=None, ignore=[], adfilters=[]):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
String representation of a set of CAN Messages.
These can be filtered by start and stop message indexes, as well as
use a baseline (defined by start/stop message indexes),
by a list of "desired" arbids as well as a list of
ignored arbids
Many functions wrap this one.
'''
out = []
if start_msg in self.bookmarks_iso:
bkmk = self.bookmarks_iso.index(start_msg)
out.append("starting from bookmark %d: '%s'" %
(bkmk,
self.bookmark_info_iso[bkmk].get('name'))
)
if stop_msg in self.bookmarks_iso:
bkmk = self.bookmarks_iso.index(stop_msg)
out.append("stoppng at bookmark %d: '%s'" %
(bkmk,
self.bookmark_info_iso[bkmk].get('name'))
)
last_msg = None
next_bkmk = 0
next_bkmk_idx = 0
msg_count = 0
last_ts = None
tot_delta_ts = 0
counted_msgs = 0 # used for calculating averages, excluding outliers
data_delta = None
data_repeat = 0
data_similar = 0
for idx, ts, arbid, msg in self.filterCanMsgsIso(start_msg, stop_msg, start_baseline_msg, stop_baseline_msg, arbids=arbids, ignore=ignore, advfilters=advfilters):
diff = []
# insert bookmark names/comments in appropriate places
while next_bkmk_idx < len(self.bookmarks_iso) and idx >= self.bookmarks_iso[next_bkmk_idx]:
out.append(self.reprBookmarkIso(next_bkmk_idx))
next_bkmk_idx += 1
msg_count += 1
# check data
byte_cnt_diff = 0
if last_msg != None:
if len(last_msg) == len(msg):
for bidx in range(len(msg)):
if last_msg[bidx] != msg[bidx]:
byte_cnt_diff += 1
if byte_cnt_diff == 0:
diff.append("REPEAT")
data_repeat += 1
elif byte_cnt_diff <=4:
diff.append("Similar")
data_similar += 1
# FIXME: make some better heuristic to identify "out of norm"
# look for ASCII data (4+ consecutive bytes)
if hasAscii(msg):
diff.append("ASCII: %s" % repr(msg))
# calculate timestamp delta and comment if out of whack
if last_ts == None:
last_ts = ts
delta_ts = ts - last_ts
if counted_msgs:
avg_delta_ts = tot_delta_ts / counted_msgs
else:
avg_delta_ts = delta_ts
if abs(delta_ts - avg_delta_ts) <= delta_ts:
tot_delta_ts += delta_ts
counted_msgs += 1
else:
diff.append("TS_delta: %.3f" % delta_ts)
out.append(reprCanMsg(idx, ts, arbid, msg, comment='\t'.join(diff)))
last_ts = ts
last_msg = msg
out.append("Total Messages: %d (repeat: %d / similar: %d)" % (msg_count, data_repeat, data_similar))
return "\n".join(out)
def printCanSessionsIso(self, arbid_list=None, advfilters=[]):
'''
Split CAN messages into Arbitration ID's and prints entire
sessions for each CAN id.
Defaults to printing by least number of messages, including all IDs
Or... provide your own list of ArbIDs in whatever order you like
'''
if arbid_list == None:
arbids = self.getArbitrationIdsIso()
else:
arbids = [arbdata for arbdata in self.getArbitrationIdsIso() if arbdata[1] in arbid_list]
for datalen,arbid,msgs in arbids:
print self.reprCanMsgsIso(arbids=[arbid], advfilters=advfilters)
raw_input("\nPress Enter to review the next Session...")
print
def printBookmarksIso(self):
'''
Print out the list of current Bookmarks and where they sit
'''
print(self.reprBookmarksIso())
def printAsciiStringsIso(self, minbytes=4, strict=True):
'''
Search through messages looking for ASCII strings
'''
for idx, ts, arbid, msg in self.genCanMsgsIso():
if hasAscii(msg, minbytes=minbytes, strict=strict):
print reprCanMsgIso(idx, ts, arbid, msg, repr(msg))
def reprBookmarksIso(self):
'''
get a string representation of the bookmarks
'''
out = []
for bid in range(len(self.bookmarks_iso)):
out.append(self.reprBookmarkIso(bid))
return '\n'.join(out)
def reprBookmarkIso(self, bid):
'''
get a string representation of one bookmark
'''
msgidx = self.bookmarks_iso[bid]
info = self.bookmark_info_iso.get(bid)
comment = info.get('comment')
if comment == None:
return "bkmkidx: %d\tmsgidx: %d\tbkmk: %s" % (bid, msgidx, info.get('name'))
def restoreSession(self, me, force=False):
'''
Load a previous analysis session from a python dictionary object
see: saveSession()
'''
if isinstance(self._io, serial.Serial) and force==False:
print("Refusing to reload a session while active session! use 'force=True' option")
return
self._messages = me.get('messages')
self.bookmarks = me.get('bookmarks')
self.bookmark_info = me.get('bookmark_info')
self.comments = me.get('comments')
self.bookmarks_iso = me.get('bookmarks_iso')
self.bookmark_info_iso = me.get('bookmark_info_iso')
def saveSession(self):
'''
Save the current analysis session to a python dictionary object
What you do with it form there is your own business.
This function is called by saveSessionToFile() to get the data
to save to the file.
'''
savegame = { 'messages' : self._messages,
'bookmarks' : self.bookmarks,
'bookmark_info' : self.bookmark_info,
'bookmarks_iso' : self.bookmarks_iso,
'bookmark_info_iso' : self.bookmark_info_iso,
'comments' : self.comments,
}
return savegame
######### administrative, supporting code ##########
cs = []
def cleanupInteractiveAtExit():
global cs
for c in cs:
try:
c.__del__()
except:
pass
devlocs = [
'/dev/ttyACM0',
'/dev/ttyACM1',
'/dev/ttyACM2',
'/dev/tty.usbmodem1411',
'/dev/tty.usbmodem1421',
'/dev/tty.usbmodem1431',
'/dev/ttyACM0',
]
def getDeviceFile():
for devloc in devlocs:
if os.path.exists(devloc):
return devloc
def interactive(port=None, InterfaceClass=CanInterface, intro='', load_filename=None, can_baud=None):
global c
import atexit
c = InterfaceClass(port=port, load_filename=load_filename)
atexit.register(cleanupInteractiveAtExit)
if load_filename is None:
if can_baud != None:
c.setCanBaud(can_baud)
else:
c.setCanBaud(CAN_500KBPS)
gbls = globals()
lcls = locals()
try:
import IPython.Shell
ipsh = IPython.Shell.IPShell(argv=[''], user_ns=lcls, user_global_ns=gbls)
print intro
ipsh.mainloop(intro)
except ImportError, e:
try:
from IPython.terminal.interactiveshell import TerminalInteractiveShell
from IPython.terminal.ipapp import load_default_config
ipsh = TerminalInteractiveShell(config=load_default_config())
ipsh.user_global_ns.update(gbls)
ipsh.user_global_ns.update(lcls)
ipsh.autocall = 2 # don't require parenthesis around *everything*. be smart!
ipsh.mainloop(intro)
except ImportError, e:
try:
from IPython.frontend.terminal.interactiveshell import TerminalInteractiveShell
ipsh = TerminalInteractiveShell()
ipsh.user_global_ns.update(gbls)
ipsh.user_global_ns.update(lcls)
ipsh.autocall = 2 # don't require parenthesis around *everything*. be smart!
ipsh.mainloop(intro)
except ImportError, e:
print e
shell = code.InteractiveConsole(gbls)
shell.interact(intro)
|
eval_real_robot.py | import argparse
import json
import logging
import os
import threading
import time
from copy import copy
# from concurrent.futures import ThreadPoolExecutor
# from queue import Queue
import numpy as np
import rospy
from matplotlib import pyplot as plt
from sensor_msgs.msg import JointState
from std_msgs.msg import Float32MultiArray
from alg.sac import SAC
from model.mujoco_agent import MujocoAgent
from model.mujoco_model import MujocoModel
from rlschool.quadrupedal.envs.aliengo_robot_env import AliengoRealEnv
from rlschool.quadrupedal.envs.aliengo_robot_env import SENSOR_MODE
from rlschool.quadrupedal.envs.env_wrappers.MonitorEnv import Param_Dict, Random_Param_Dict
from rlschool.quadrupedal.envs.utilities.ETG_model import ETG_layer
from rlschool.quadrupedal.robots import robot_config
GAMMA = 0.99
TAU = 0.005
ALPHA = 0.2 # determines the relative importance of entropy term against the reward
ACTOR_LR = 3e-4
CRITIC_LR = 3e-4
ENV_NUMS = 8
reward_param = copy(Param_Dict)
sensor_param = copy(SENSOR_MODE)
mode_map = {"pose" : robot_config.MotorControlMode.POSITION,
"torque": robot_config.MotorControlMode.TORQUE,
"traj" : robot_config.MotorControlMode.POSITION, }
plt.rcParams['figure.dpi'] = 300
logger = logging.getLogger(__name__)
def plot_gait(w, b, ETG, points, save_path=None, show_fig=True):
w = np.vstack((w[0,], w[-1,]))
b = np.hstack((b[0], b[-1]))
t_t = np.arange(0.0, ETG.T, ETG.T / 100)
p = []
for t in np.nditer(t_t):
p.append(w.dot(ETG.update(t)) + b)
p = np.array(p)
colors = t_t * 200
fig0 = plt.figure()
plt.scatter(points[:, 0], points[:, 1], c='red', alpha=0.5)
plt.scatter(p[50, 0], p[50, 1], c='blue', alpha=1)
plt.scatter(p[0, 0], p[0, 1], c='blue', alpha=1)
plt.scatter(p[:, 0], p[:, 1], s=10, c=colors, cmap='viridis')
plt.xlim(-0.15, 0.15)
plt.ylim(-0.10, 0.15)
plt.tight_layout()
# plt.colorbar()
fig1, axs = plt.subplots(2, 1, sharex=True)
plt.xlim(0.0, ETG.T)
plt.xlabel("time(s)")
axs[0].set_ylim(-0.10, 0.10)
axs[0].set_ylabel("x(m)")
axs[0].scatter(t_t, p[:, 0], s=10, c='blue')
axs[1].set_ylim(-0.03, 0.13)
axs[1].set_ylabel("z(m)")
axs[1].scatter(t_t, p[:, 1], s=10, c='blue')
plt.tight_layout()
if save_path is not None:
plot_path = save_path + '_1.png'
fig0.savefig(plot_path)
plot_path = save_path + '_2.png'
fig1.savefig(plot_path)
if show_fig is True:
plt.show()
plt.close(fig0)
plt.close(fig1)
else:
plt.show()
# solve "x" of "Ax = b"
def LS_sol(A, b, precision=1e-4, alpha=0.05, lamb=1, w0=None):
n, m = A.shape # 6x20
if w0 is not None:
x = copy(w0)
else:
x = np.zeros((m, 1)) # 20x1
err = A.dot(x) - b # 6x1
err = err.transpose().dot(err)
i = 0
diff = 1
while err > precision and i < 1000:
A1 = A.transpose().dot(A) # 20x20
dx = A1.dot(x) - A.transpose().dot(b)
if w0 is not None:
dx += lamb * (x - w0)
x = x - alpha * dx
diff = np.linalg.norm(dx)
err = A.dot(x) - b
err = err.transpose().dot(err)
i += 1
return x
def Opt_with_points(ETG, ETG_T=0.8, ETG_H=20, points=None, b0=None, w0=None, precision=1e-4, lamb=0.5, **kwargs):
# ts = [0.5 * ETG_T + 0.1, 0, 0.05, 0.1, 0.15, 0.2]
ts = [0.5 * ETG_T + 0.2, 0, 0.1, 0.2, 0.3, 0.4]
if points is None:
Steplength = kwargs["Steplength"] if "Steplength" in kwargs else 0.05
Footheight = kwargs["Footheight"] if "Footheight" in kwargs else 0.08
Penetration = kwargs["Penetration"] if "Penetration" in kwargs else 0.01
# [[0.0, -0.01], [-0.05, -0.005], [-0.075, 0.06], [0.0, 0.1], [0.075, 0.06], [0.05, -0.005]]
points = np.array([[0, -Penetration],
[-Steplength, -Penetration * 0.5], [-Steplength * 1.0, 0.6 * Footheight],
[0, Footheight],
[Steplength * 1.0, 0.6 * Footheight], [Steplength, -Penetration * 0.5]])
obs = []
for t in ts:
v = ETG.update(t) # calculate V(t), 20 dim
obs.append(v)
obs = np.array(obs).reshape(-1, ETG_H) # V(1-6), 6x(20x1)
if b0 is None:
b = np.mean(points, axis=0)
else:
b = np.array([b0[0], b0[-1]]) # 2x1
points_t = points - b # 6x(2x1), W*V(t)=P(t)-b
if w0 is None:
x1 = LS_sol(A=obs, b=points_t[:, 0].reshape(-1, 1), precision=precision, alpha=0.05) # 1x20, "x" axis
x2 = LS_sol(A=obs, b=points_t[:, 1].reshape(-1, 1), precision=precision, alpha=0.05) # 1x20, "z" axis
else:
x1 = LS_sol(A=obs, b=points_t[:, 0].reshape(-1, 1), precision=precision, alpha=0.05, lamb=lamb,
w0=w0[0, :].reshape(-1, 1))
x2 = LS_sol(A=obs, b=points_t[:, 1].reshape(-1, 1), precision=precision, alpha=0.05, lamb=lamb,
w0=w0[-1, :].reshape(-1, 1))
# x1 = np.zeros((20,1))
w_ = np.stack((x1, np.zeros((ETG_H, 1)), x2), axis=0).reshape(3, -1) # 3x20
b_ = np.array([b[0], 0, b[1]]) # 3x1
return w_, b_, points
def param2dynamic_dict(params):
param = copy(params)
param = np.clip(param, -1, 1)
dynamic_param = {}
dynamic_param['control_latency'] = np.clip(40 + 10 * param[0], 0, 80)
dynamic_param['footfriction'] = np.clip(0.2 + 10 * param[1], 0, 20)
dynamic_param['basemass'] = np.clip(1.5 + 1 * param[2], 0.5, 3)
dynamic_param['baseinertia'] = np.clip(
np.ones(3) + 1 * param[3:6], np.array([0.1] * 3), np.array([3] * 3)).tolist()
dynamic_param['legmass'] = np.clip(
np.ones(3) + 1 * param[6:9], np.array([0.1] * 3), np.array([3] * 3)).tolist()
dynamic_param['leginertia'] = np.clip(
np.ones(12) + 1 * param[9:21], np.array([0.1] * 12), np.array([3] * 12)).tolist()
dynamic_param['motor_kp'] = np.clip(
80 * np.ones(12) + 40 * param[21:33], np.array([20] * 12), np.array([200] * 12)).tolist()
dynamic_param['motor_kd'] = np.clip(
np.array([1., 2., 2.] * 4) + param[33:45] * np.array([1, 2, 2] * 4), np.array([0] * 12), np.array([5] * 12)).tolist()
if param.shape[0] > 45:
dynamic_param['gravity'] = np.clip(
np.array([0, 0, -10]) + param[45:48] * np.array([2, 2, 10]), np.array([-5, -5, -20]), np.array([5, 5, -4])).tolist()
return dynamic_param
# Runs policy for 5 episodes by default and returns average reward
# A fixed seed is used for the eval environment
def run_evaluate_episodes(agent, env, max_step, action_bound, w=None, b=None, pub_msgs=False):
avg_reward = 0.
infos = {}
steps_all = 0
# obs, info = env.reset(ETG_w=w, ETG_b=b, x_noise=args.x_noise)
obs = env.reset(ETG_w=w, ETG_b=b, x_noise=args.x_noise, heightfield_terrain=args.hft)
done = False
steps = 0
step_time_all = 0.0
plt.ion()
plt.figure(1)
# plt.ylim(-0.3, -0.1)
plt.xlim(0.05, 0.3)
pub_joint = rospy.Publisher("joint_states", JointState, queue_size=100)
joint_msg = JointState()
joint_msg.header.frame_id = "robot"
joint_msg.name.append("torso_to_abduct_fr_j")
joint_msg.name.append("abduct_fr_to_thigh_fr_j")
joint_msg.name.append("thigh_fr_to_knee_fr_j")
joint_msg.name.append("torso_to_abduct_fl_j")
joint_msg.name.append("abduct_fl_to_thigh_fl_j")
joint_msg.name.append("thigh_fl_to_knee_fl_j")
joint_msg.name.append("torso_to_abduct_hr_j")
joint_msg.name.append("abduct_hr_to_thigh_hr_j")
joint_msg.name.append("thigh_hr_to_knee_hr_j")
joint_msg.name.append("torso_to_abduct_hl_j")
joint_msg.name.append("abduct_hl_to_thigh_hl_j")
joint_msg.name.append("thigh_hl_to_knee_hl_j")
thread = threading.Thread(target=lambda: rospy.spin())
thread.start()
rate = rospy.Rate(100)
while not rospy.is_shutdown():
# while not done:
steps += 1
t0 = time.time()
# action = agent.predict(obs) # NN output: residual control signal
t1 = time.time()
pred_time = t1 - t0
# print("pred time:", pred_time)
# new_action = copy(action)
new_action = np.zeros(12)
obs, reward, done, info = env.step(new_action * action_bound)
t2 = time.time()
step_time = t2 - t1
step_time_all += step_time
# avg_reward += reward
# print("step time:", step_time)
joint_msg.position.clear()
for i in range(0, 12):
joint_msg.position.append(info['real_action'][i]) # target joint pos (action)
for i in range(0, 12):
# joint_msg.position.append((info['ETG_act'] + info['init_act'])[i]) #
joint_msg.position.append(info['joint_angle'][i]) # current joint pos
joint_msg.header.stamp = rospy.Time.now()
pub_joint.publish(joint_msg)
# plt.ylim(0.0, 0.1)
# if info["real_contact"][0] == True:
# plt.scatter(info["foot_position_world"][0][0], info["foot_position_world"][0][2], s=5, c='red')
# else:
# plt.scatter(info["foot_position_world"][0][0], info["foot_position_world"][0][2], s=5, c='blue')
# plot foot trajectory
# plt.scatter(info["ETG_trj"][0], info["ETG_trj"][2], s=5, c='red')
# plt.scatter(info["foot_position"][0][0], info["foot_position"][0][2], s=5, c='blue')
# plt.pause(0.0001)
# for key in Param_Dict.keys():
# if key in info.keys():
# if key not in infos.keys():
# infos[key] = info[key]
# else:
# infos[key] += info[key]
# if steps > max_step:
# break
rate.sleep()
steps_all += steps
plt.ioff()
plt.close(1)
# print("\033[1;32m[Evaluation] Average step time: {} step/second.\033[0m".format(steps_all/step_time_all))
logger.debug("[Evaluation] Average step time: {} step/second.".format(steps_all/step_time_all))
return avg_reward, steps_all, infos
def main():
rospy.init_node("eval_real_node")
## ETG init
phase = np.array([-np.pi / 2, 0])
dt = args.action_repeat_num * 0.002 # default: 13 * 0.002 = 0.026s
args.ETG_T, args.ETG_H = 0.8, 20
ETG_agent = ETG_layer(args.ETG_T, dt, args.ETG_H, 0.04, phase, 0.2, args.ETG_T2)
# prior_points: 6x2 [[0.0, -0.01], [-0.05, -0.005], [-0.075, 0.06], [0.0, 0.1], [0.075, 0.06], [0.05, -0.005]]
w0, b0, prior_points = Opt_with_points(ETG=ETG_agent, ETG_T=args.ETG_T, ETG_H=args.ETG_H,
Footheight=args.footheight, Steplength=args.steplen)
# plot_gait(w0, b0, ETG_agent, prior_points)
# if not os.path.exists(ETG_path):
# np.savez(ETG_path, w=w0, b=b0, param=prior_points)
## create gym envs of the quadruped robot
env = AliengoRealEnv(motor_control_mode=mode_map[args.act_mode],sensor_mode=sensor_param,
normal=args.normal, reward_param=reward_param,
ETG=args.ETG, ETG_T=args.ETG_T, ETG_H=args.ETG_H,
vel_d=args.vel_d, foot_dx=args.foot_dx, step_y=args.step_y, reward_p=args.reward_p,
action_repeat=args.action_repeat_num)
obs_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
if args.act_mode == "pose":
act_bound = np.array([0.1, 0.7, 0.7] * 4)
elif args.act_mode == "torque":
act_bound = np.array([10] * 12)
elif args.act_mode == "traj":
act_bound_now = args.act_bound
act_bound = np.array([act_bound_now, act_bound_now, act_bound_now] * 4) # 0.3*12
# Initialize RL model, algorithm, agent, replay_memory
model = MujocoModel(obs_dim, action_dim)
algorithm = SAC(
model,
gamma=GAMMA,
tau=TAU,
alpha=ALPHA,
actor_lr=ACTOR_LR,
critic_lr=CRITIC_LR)
RL_agent = MujocoAgent(algorithm)
if args.load != "":
RL_agent.restore(args.load)
load_dir = args.load[:-3]
# ETG_info = np.load(load_dir + ".npz")
# w = ETG_info["w"]
# b = ETG_info["b"]
# prior_points = ETG_info["param"].reshape(-1, 2)
w = w0
b = b0
if not os.path.exists(load_dir):
os.makedirs(load_dir)
# plot_gait(w, b, ETG_agent, prior_points,
# save_path=os.path.join(load_dir, 'eval_etg'))
avg_reward, avg_step, info = run_evaluate_episodes(RL_agent, env, 600, act_bound, w, b)
print("\033[1;32m[Evaluation] Reward: {} Steps: {}\033[0m".format(avg_reward, avg_step))
print("total reward: ", info)
origin_rew = {}
for key in info.keys():
if Param_Dict[key] != 0.0:
origin_rew[key] = info[key] / reward_param[key]
else:
origin_rew[key] = 0.0
print("origin reward: ", origin_rew)
with open(os.path.join(load_dir, 'origin_reward'), 'w') as f:
json.dump(origin_rew, f, indent=2)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--resume", type=str, default="")
parser.add_argument("--load", type=str, default="", help="Directory to load agent from.")
parser.add_argument("--eval", type=int, default=1, help="Evaluate or not")
parser.add_argument("--render", type=int, default=0, help="render or not")
parser.add_argument("--outdir", type=str, default="train_log")
parser.add_argument("--suffix", type=str, default="exp0")
parser.add_argument("--task_mode", type=str, default="heightfield")
parser.add_argument("--max_steps", type=int, default=3e6)
parser.add_argument("--env_nums", type=int, default=16)
parser.add_argument("--learn", type=int, default=8)
parser.add_argument("--epsilon", type=float, default=0.4)
parser.add_argument("--gamma", type=float, default=0.95)
parser.add_argument("--sigma", type=float, default=0.02)
parser.add_argument("--sigma_decay", type=float, default=0.99)
parser.add_argument("--popsize", type=float, default=40)
parser.add_argument("--random", type=int, default=0)
parser.add_argument("--normal", type=int, default=1)
parser.add_argument("--footheight", type=float, default=0.06)
parser.add_argument("--steplen", type=float, default=0.05)
parser.add_argument("--act_mode", type=str, default="traj")
parser.add_argument("--act_bound", type=float, default=0.3)
parser.add_argument("--x_noise", type=int, default=0)
parser.add_argument("--hft", type=str, default="slope")
parser.add_argument("--ETG", type=int, default=1)
parser.add_argument("--ETG_T", type=float, default=0.4)
parser.add_argument("--ETG_H", type=int, default=20)
parser.add_argument("--ETG_T2", type=float, default=0.5)
parser.add_argument("--ETG_path", type=str, default="None")
parser.add_argument("--vel_d", type=float, default=0.5)
parser.add_argument("--foot_dx", type=float, default=0.2)
parser.add_argument("--step_y", type=float, default=0.05)
parser.add_argument("--reward_p", type=float, default=5)
parser.add_argument("--enable_action_filter", type=int, default=0)
parser.add_argument("--action_repeat_num", type=int, default=5)
parser.add_argument("--ES", type=int, default=1)
parser.add_argument("--es_rpm", type=int, default=1, help='ES training store into RPM for SAC')
parser.add_argument("--e_step", type=int, default=400)
parser.add_argument("--stand", type=float, default=0)
parser.add_argument("--random_dynamic", type=int, default=0)
parser.add_argument("--random_force", type=int, default=0)
parser.add_argument("--rew_torso", type=float, default=1.5)
parser.add_argument("--rew_up", type=float, default=0.6)
parser.add_argument("--rew_tau", type=float, default=0.07)
parser.add_argument("--rew_feet_vel", type=float, default=0.3)
parser.add_argument("--rew_badfoot", type=float, default=0.1)
parser.add_argument("--rew_footcontact", type=float, default=0.1)
parser.add_argument("--sensor_dis", type=int, default=0)
parser.add_argument("--sensor_motor", type=int, default=1)
parser.add_argument("--sensor_imu", type=int, default=1)
parser.add_argument("--sensor_contact", type=int, default=1)
parser.add_argument("--sensor_ETG", type=int, default=1)
parser.add_argument("--sensor_ETG_obs", type=int, default=0)
parser.add_argument("--sensor_footpose", type=int, default=1)
parser.add_argument("--sensor_dynamic", type=int, default=0)
parser.add_argument("--sensor_exforce", type=int, default=0)
parser.add_argument("--sensor_noise", type=int, default=0)
parser.add_argument("--timesteps", type=int, default=5)
parser.add_argument("--timeinterval", type=int, default=1)
parser.add_argument("--RNN_mode", type=str, default="None")
args = parser.parse_args()
# resume args from stored files
if args.eval != 0:
load_args = args.load
task_mode = args.task_mode
hft = args.hft
load_path = os.path.split(args.load)[0]
args_file = os.path.join(load_path, 'parse_args')
with open(args_file, 'r') as f:
args.__dict__ = json.load(f)
args.load = load_args
args.task_mode = task_mode
args.hft = hft
args.eval = True
args.render = True
param_file = os.path.join(load_path, 'reward_param')
with open(param_file, 'r') as f:
reward_param = json.load(f)
# set params
# reward_param['rew_torso'] = args.torso
# reward_param['rew_up'] = args.up
# reward_param['rew_tau'] = args.tau
# reward_param['rew_feet_vel'] = args.feet
# reward_param['rew_stand'] = args.stand
# reward_param['rew_badfoot'] = args.badfoot
# reward_param['rew_footcontact'] = args.footcontact
sensor_param['dis'] = args.sensor_dis
sensor_param['motor'] = args.sensor_motor
sensor_param["imu"] = args.sensor_imu
sensor_param["contact"] = args.sensor_contact
sensor_param["ETG"] = args.sensor_ETG
sensor_param["ETG_obs"] = args.sensor_ETG_obs
sensor_param["footpose"] = args.sensor_footpose
sensor_param["dynamic_vec"] = args.sensor_dynamic
sensor_param["force_vec"] = args.sensor_exforce
sensor_param["noise"] = args.sensor_noise
rnn_config = {}
rnn_config["time_steps"] = args.timesteps
rnn_config["time_interval"] = args.timeinterval
rnn_config["mode"] = args.RNN_mode
sensor_param["RNN"] = rnn_config
main()
|
controlsd.py | #!/usr/bin/env python3
import os
import math
import requests
import threading
from numbers import Number
from cereal import car, log
from common.numpy_fast import clip
from common.realtime import sec_since_boot, config_realtime_process, Priority, Ratekeeper, DT_CTRL
from common.profiler import Profiler
from common.params import Params, put_nonblocking
import cereal.messaging as messaging
# from common.travis_checker import gh_actions
# import selfdrive.crash as crash
# from selfdrive.version import is_fork_remote
from selfdrive.config import Conversions as CV
from selfdrive.swaglog import cloudlog
from selfdrive.boardd.boardd import can_list_to_can_capnp
from selfdrive.car.car_helpers import get_car, get_startup_event, get_one_can
from selfdrive.controls.lib.drive_helpers import update_v_cruise, initialize_v_cruise
from selfdrive.controls.lib.drive_helpers import get_lag_adjusted_curvature
from selfdrive.controls.lib.longcontrol import LongControl
from selfdrive.controls.lib.latcontrol_pid import LatControlPID
from selfdrive.controls.lib.latcontrol_indi import LatControlINDI
from selfdrive.controls.lib.latcontrol_lqr import LatControlLQR
from selfdrive.controls.lib.latcontrol_model import LatControlModel
from selfdrive.controls.lib.latcontrol_angle import LatControlAngle
from selfdrive.controls.lib.events import Events, ET
from selfdrive.controls.lib.alertmanager import AlertManager, set_offroad_alert
from selfdrive.controls.lib.vehicle_model import VehicleModel
from selfdrive.locationd.calibrationd import Calibration
from selfdrive.hardware import HARDWARE, TICI, EON
from selfdrive.manager.process_config import managed_processes
from selfdrive.controls.lib.dynamic_follow.df_manager import dfManager
from common.op_params import opParams
SOFT_DISABLE_TIME = 3 # seconds
LDW_MIN_SPEED = 31 * CV.MPH_TO_MS
LANE_DEPARTURE_THRESHOLD = 0.1
REPLAY = "REPLAY" in os.environ
SIMULATION = "SIMULATION" in os.environ
NOSENSOR = "NOSENSOR" in os.environ
IGNORE_PROCESSES = {"rtshield", "uploader", "deleter", "loggerd", "logmessaged", "tombstoned",
"logcatd", "proclogd", "clocksd", "updated", "timezoned", "manage_athenad",
"statsd", "shutdownd"} | \
{k for k, v in managed_processes.items() if not v.enabled}
ACTUATOR_FIELDS = set(car.CarControl.Actuators.schema.fields.keys())
ThermalStatus = log.DeviceState.ThermalStatus
State = log.ControlsState.OpenpilotState
PandaType = log.PandaState.PandaType
Desire = log.LateralPlan.Desire
LaneChangeState = log.LateralPlan.LaneChangeState
LaneChangeDirection = log.LateralPlan.LaneChangeDirection
EventName = car.CarEvent.EventName
ButtonEvent = car.CarState.ButtonEvent
SafetyModel = car.CarParams.SafetyModel
IGNORED_SAFETY_MODES = [SafetyModel.silent, SafetyModel.noOutput]
CSID_MAP = {"0": EventName.roadCameraError, "1": EventName.wideRoadCameraError, "2": EventName.driverCameraError}
# def log_fingerprint(candidate, timeout=15):
# if not gh_actions and is_fork_remote:
# try:
# requests.get('https://sentry.io', timeout=timeout)
# crash.init()
# crash.capture_message("fingerprinted {}".format(candidate), level='info')
# return
# except:
# pass
class Controls:
def __init__(self, sm=None, pm=None, can_sock=None):
config_realtime_process(4 if TICI else 3, Priority.CTRL_HIGH)
self.op_params = opParams()
# Setup sockets
self.pm = pm
if self.pm is None:
self.pm = messaging.PubMaster(['sendcan', 'controlsState', 'carState',
'carControl', 'carEvents', 'carParams'])
self.camera_packets = ["roadCameraState", "driverCameraState"]
if TICI:
self.camera_packets.append("wideRoadCameraState")
params = Params()
self.joystick_mode = params.get_bool("JoystickDebugMode")
joystick_packet = ['testJoystick'] if self.joystick_mode else []
self.sm = sm
if self.sm is None:
ignore = ['driverCameraState', 'managerState'] if SIMULATION else None
self.sm = messaging.SubMaster(['deviceState', 'pandaStates', 'peripheralState', 'modelV2', 'liveCalibration',
'driverMonitoringState', 'longitudinalPlan', 'lateralPlan', 'liveLocationKalman',
'managerState', 'liveParameters', 'radarState'] + self.camera_packets + joystick_packet,
ignore_alive=ignore, ignore_avg_freq=['radarState', 'longitudinalPlan'])
self.sm_smiskol = messaging.SubMaster(['radarState', 'dynamicFollowData', 'liveTracks', 'dynamicFollowButton',
'laneSpeed', 'dynamicCameraOffset', 'modelLongButton'])
self.op_params = opParams()
self.df_manager = dfManager()
self.last_model_long = False
self.can_sock = can_sock
if can_sock is None:
can_timeout = None if os.environ.get('NO_CAN_TIMEOUT', False) else 100
self.can_sock = messaging.sub_sock('can', timeout=can_timeout)
if TICI:
self.log_sock = messaging.sub_sock('androidLog')
# wait for one pandaState and one CAN packet
print("Waiting for CAN messages...")
get_one_can(self.can_sock)
self.CI, self.CP, candidate = get_car(self.can_sock, self.pm.sock['sendcan'])
# threading.Thread(target=log_fingerprint, args=[candidate]).start()
# read params
self.is_metric = params.get_bool("IsMetric")
self.is_ldw_enabled = params.get_bool("IsLdwEnabled")
openpilot_enabled_toggle = params.get_bool("OpenpilotEnabledToggle")
passive = params.get_bool("Passive") or not openpilot_enabled_toggle
# detect sound card presence and ensure successful init
sounds_available = HARDWARE.get_sound_card_online()
car_recognized = self.CP.carName != 'mock'
controller_available = self.CI.CC is not None and not passive and not self.CP.dashcamOnly
self.read_only = not car_recognized or not controller_available or self.CP.dashcamOnly
if self.read_only:
safety_config = car.CarParams.SafetyConfig.new_message()
safety_config.safetyModel = car.CarParams.SafetyModel.noOutput
self.CP.safetyConfigs = [safety_config]
# Write CarParams for radard
cp_bytes = self.CP.to_bytes()
params.put("CarParams", cp_bytes)
put_nonblocking("CarParamsCache", cp_bytes)
self.CC = car.CarControl.new_message()
self.AM = AlertManager()
self.events = Events()
self.LoC = LongControl(self.CP)
self.VM = VehicleModel(self.CP)
if self.CP.steerControlType == car.CarParams.SteerControlType.angle:
self.LaC = LatControlAngle(self.CP, self.CI)
elif self.CP.lateralTuning.which() == 'pid':
self.LaC = LatControlPID(self.CP, self.CI)
elif self.CP.lateralTuning.which() == 'indi':
self.LaC = LatControlINDI(self.CP, self.CI)
elif self.CP.lateralTuning.which() == 'lqr':
self.LaC = LatControlLQR(self.CP, self.CI)
elif self.CP.lateralTuning.which() == 'model':
self.LaC = LatControlModel(self.CP, self.CI)
self.initialized = False
self.state = State.disabled
self.enabled = False
self.active = False
self.can_rcv_error = False
self.soft_disable_timer = 0
self.v_cruise_kph = 255
self.v_cruise_kph_last = 0
self.mismatch_counter = 0
self.cruise_mismatch_counter = 0
self.can_rcv_error_counter = 0
self.last_blinker_frame = 0
self.distance_traveled = 0
self.last_functional_fan_frame = 0
self.events_prev = []
self.current_alert_types = [ET.PERMANENT]
self.logged_comm_issue = False
self.button_timers = {ButtonEvent.Type.decelCruise: 0, ButtonEvent.Type.accelCruise: 0}
self.last_actuators = car.CarControl.Actuators.new_message()
self.lat_delay_offset = 0.0
# TODO: no longer necessary, aside from process replay
self.sm['liveParameters'].valid = True
self.startup_event = get_startup_event(car_recognized, controller_available, len(self.CP.carFw) > 0, self.CP)
if not sounds_available:
self.events.add(EventName.soundsUnavailable, static=True)
if not car_recognized:
self.events.add(EventName.carUnrecognized, static=True)
if len(self.CP.carFw) > 0:
set_offroad_alert("Offroad_CarUnrecognized", True)
else:
set_offroad_alert("Offroad_NoFirmware", True)
elif self.read_only:
self.events.add(EventName.dashcamMode, static=True)
elif self.joystick_mode:
self.events.add(EventName.joystickDebug, static=True)
self.startup_event = None
# controlsd is driven by can recv, expected at 100Hz
self.rk = Ratekeeper(100, print_delay_threshold=None)
self.prof = Profiler(False) # off by default
def update_events(self, CS):
"""Compute carEvents from carState"""
self.events.clear()
# Add startup event
if self.startup_event is not None:
self.events.add(self.startup_event)
self.startup_event = None
# Don't add any more events if not initialized
if not self.initialized:
self.events.add(EventName.controlsInitializing)
return
self.events.add_from_msg(CS.events)
self.events.add_from_msg(self.sm['driverMonitoringState'].events)
# Create events for battery, temperature, disk space, and memory
if EON and (self.sm['peripheralState'].pandaType != PandaType.uno) and \
self.sm['deviceState'].batteryPercent < 1 and self.sm['deviceState'].chargingError:
# at zero percent battery, while discharging, OP should not allowed
self.events.add(EventName.lowBattery)
if self.sm['deviceState'].thermalStatus >= ThermalStatus.red:
self.events.add(EventName.overheat)
if self.sm['deviceState'].freeSpacePercent < 7 and not SIMULATION:
# under 7% of space free no enable allowed
self.events.add(EventName.outOfSpace)
# TODO: make tici threshold the same
if self.sm['deviceState'].memoryUsagePercent > (90 if TICI else 65) and not SIMULATION:
self.events.add(EventName.lowMemory)
# TODO: enable this once loggerd CPU usage is more reasonable
#cpus = list(self.sm['deviceState'].cpuUsagePercent)[:(-1 if EON else None)]
#if max(cpus, default=0) > 95 and not SIMULATION:
# self.events.add(EventName.highCpuUsage)
# Alert if fan isn't spinning for 5 seconds
if self.sm['peripheralState'].pandaType in (PandaType.uno, PandaType.dos):
if self.sm['peripheralState'].fanSpeedRpm == 0 and self.sm['deviceState'].fanSpeedPercentDesired > 50:
if (self.sm.frame - self.last_functional_fan_frame) * DT_CTRL > 5.0:
self.events.add(EventName.fanMalfunction)
else:
self.last_functional_fan_frame = self.sm.frame
# Handle calibration status
cal_status = self.sm['liveCalibration'].calStatus
if cal_status != Calibration.CALIBRATED:
if cal_status == Calibration.UNCALIBRATED:
self.events.add(EventName.calibrationIncomplete)
else:
self.events.add(EventName.calibrationInvalid)
# Handle lane change
if self.sm['lateralPlan'].laneChangeState == LaneChangeState.preLaneChange:
direction = self.sm['lateralPlan'].laneChangeDirection
if (CS.leftBlindspot and direction == LaneChangeDirection.left) or \
(CS.rightBlindspot and direction == LaneChangeDirection.right):
self.events.add(EventName.laneChangeBlocked)
else:
if direction == LaneChangeDirection.left:
self.events.add(EventName.preLaneChangeLeft)
else:
self.events.add(EventName.preLaneChangeRight)
elif self.sm['lateralPlan'].laneChangeState in (LaneChangeState.laneChangeStarting,
LaneChangeState.laneChangeFinishing):
self.events.add(EventName.laneChange)
if not CS.canValid:
self.events.add(EventName.canError)
for i, pandaState in enumerate(self.sm['pandaStates']):
# All pandas must match the list of safetyConfigs, and if outside this list, must be silent or noOutput
if i < len(self.CP.safetyConfigs):
safety_mismatch = pandaState.safetyModel != self.CP.safetyConfigs[i].safetyModel or \
pandaState.safetyParam != self.CP.safetyConfigs[i].safetyParam or \
pandaState.unsafeMode != self.CP.unsafeMode
else:
safety_mismatch = pandaState.safetyModel not in IGNORED_SAFETY_MODES
if safety_mismatch or self.mismatch_counter >= 200:
self.events.add(EventName.controlsMismatch)
if log.PandaState.FaultType.relayMalfunction in pandaState.faults:
self.events.add(EventName.relayMalfunction)
# Check for HW or system issues
if len(self.sm['radarState'].radarErrors):
self.events.add(EventName.radarFault)
elif not self.sm.valid["pandaStates"]:
self.events.add(EventName.usbError)
elif not self.sm.all_alive_and_valid() or self.can_rcv_error:
self.events.add(EventName.commIssue)
if not self.logged_comm_issue:
invalid = [s for s, valid in self.sm.valid.items() if not valid]
not_alive = [s for s, alive in self.sm.alive.items() if not alive]
cloudlog.event("commIssue", invalid=invalid, not_alive=not_alive, can_error=self.can_rcv_error, error=True)
self.logged_comm_issue = True
else:
self.logged_comm_issue = False
if not self.sm['liveParameters'].valid:
self.events.add(EventName.vehicleModelInvalid)
if not self.sm['lateralPlan'].mpcSolutionValid:
self.events.add(EventName.plannerError)
if not self.sm['liveLocationKalman'].sensorsOK and not NOSENSOR:
if self.sm.frame > 5 / DT_CTRL: # Give locationd some time to receive all the inputs
self.events.add(EventName.sensorDataInvalid)
if not self.sm['liveLocationKalman'].posenetOK:
self.events.add(EventName.posenetInvalid)
if not self.sm['liveLocationKalman'].deviceStable:
self.events.add(EventName.deviceFalling)
if not REPLAY:
# Check for mismatch between openpilot and car's PCM
cruise_mismatch = CS.cruiseState.enabled and (not self.enabled or not self.CP.pcmCruise)
self.cruise_mismatch_counter = self.cruise_mismatch_counter + 1 if cruise_mismatch else 0
if self.cruise_mismatch_counter > int(3. / DT_CTRL):
self.events.add(EventName.cruiseMismatch)
# Check for FCW
stock_long_is_braking = self.enabled and not self.CP.openpilotLongitudinalControl and CS.aEgo < -1.5
model_fcw = self.sm['modelV2'].meta.hardBrakePredicted and not CS.brakePressed and not stock_long_is_braking
planner_fcw = self.sm['longitudinalPlan'].fcw and self.enabled
if planner_fcw or model_fcw:
self.events.add(EventName.fcw)
if TICI:
for m in messaging.drain_sock(self.log_sock, wait_for_one=False):
try:
msg = m.androidLog.message
if any(err in msg for err in ("ERROR_CRC", "ERROR_ECC", "ERROR_STREAM_UNDERFLOW", "APPLY FAILED")):
csid = msg.split("CSID:")[-1].split(" ")[0]
evt = CSID_MAP.get(csid, None)
if evt is not None:
self.events.add(evt)
except UnicodeDecodeError:
pass
# TODO: fix simulator
if not SIMULATION:
# if not NOSENSOR:
# if not self.sm['liveLocationKalman'].gpsOK and (self.distance_traveled > 1000):
# Not show in first 1 km to allow for driving out of garage. This event shows after 5 minutes
# self.events.add(EventName.noGps)
if not self.sm.all_alive(self.camera_packets):
self.events.add(EventName.cameraMalfunction)
if self.sm['modelV2'].frameDropPerc > 20:
self.events.add(EventName.modeldLagging)
if self.sm['liveLocationKalman'].excessiveResets:
self.events.add(EventName.localizerMalfunction)
# Check if all manager processes are running
not_running = {p.name for p in self.sm['managerState'].processes if not p.running}
if self.sm.rcv_frame['managerState'] and (not_running - IGNORE_PROCESSES):
self.events.add(EventName.processNotRunning)
# Only allow engagement with brake pressed when stopped behind another stopped car
speeds = self.sm['longitudinalPlan'].speeds
if len(speeds) > 1:
v_future = speeds[-1]
else:
v_future = 100.0
if CS.brakePressed and v_future >= self.CP.vEgoStarting \
and self.CP.openpilotLongitudinalControl and CS.vEgo < 0.3 and not self.last_model_long:
self.events.add(EventName.noTarget)
self.add_stock_additions_alerts(CS)
def add_stock_additions_alerts(self, CS):
self.AM.SA_set_frame(self.sm.frame)
self.AM.SA_set_enabled(self.enabled)
# alert priority is defined by code location, keeping is highest, then lane speed alert, then auto-df alert
if self.sm_smiskol['modelLongButton'].enabled != self.last_model_long:
extra_text_1 = 'disabled!' if self.last_model_long else 'enabled!'
extra_text_2 = '' if self.last_model_long else ', model may behave unexpectedly'
self.AM.SA_add('modelLongAlert', extra_text_1=extra_text_1, extra_text_2=extra_text_2)
return
if self.sm_smiskol['dynamicCameraOffset'].keepingLeft:
self.AM.SA_add('laneSpeedKeeping', extra_text_1='LEFT', extra_text_2='Oncoming traffic in right lane')
return
elif self.sm_smiskol['dynamicCameraOffset'].keepingRight:
self.AM.SA_add('laneSpeedKeeping', extra_text_1='RIGHT', extra_text_2='Oncoming traffic in left lane')
return
ls_state = self.sm_smiskol['laneSpeed'].state
if ls_state != '':
self.AM.SA_add('lsButtonAlert', extra_text_1=ls_state)
return
faster_lane = self.sm_smiskol['laneSpeed'].fastestLane
if faster_lane in ['left', 'right']:
ls_alert = 'laneSpeedAlert'
if not self.sm_smiskol['laneSpeed'].new:
ls_alert += 'Silent'
self.AM.SA_add(ls_alert, extra_text_1='{} lane faster'.format(faster_lane).upper(), extra_text_2='Change lanes to faster {} lane'.format(faster_lane))
return
df_out = self.df_manager.update()
if df_out.changed:
df_alert = 'dfButtonAlert'
if df_out.is_auto and df_out.last_is_auto:
# only show auto alert if engaged, not hiding auto, and time since lane speed alert not showing
if CS.cruiseState.enabled and not self.op_params.get('hide_auto_df_alerts'):
df_alert += 'Silent'
self.AM.SA_add(df_alert, extra_text_1=df_out.model_profile_text + ' (auto)')
return
elif self.op_params.get('df_button_alerts').strip().lower() == 'off':
return
else:
if self.op_params.get('df_button_alerts').strip().lower() == 'silent':
df_alert += 'Silent'
self.AM.SA_add(df_alert, extra_text_1=df_out.user_profile_text, extra_text_2='Dynamic follow: {} profile active'.format(df_out.user_profile_text))
return
def data_sample(self):
"""Receive data from sockets and update carState"""
# Update carState from CAN
can_strs = messaging.drain_sock_raw(self.can_sock, wait_for_one=True)
CS = self.CI.update(self.CC, can_strs)
self.sm.update(0)
self.sm_smiskol.update(0)
if not self.initialized:
all_valid = CS.canValid and self.sm.all_alive_and_valid()
if all_valid or self.sm.frame * DT_CTRL > 3.5 or SIMULATION:
if not self.read_only:
self.CI.init(self.CP, self.can_sock, self.pm.sock['sendcan'])
self.initialized = True
if REPLAY and self.sm['pandaStates'][0].controlsAllowed:
self.state = State.enabled
Params().put_bool("ControlsReady", True)
# Check for CAN timeout
if not can_strs:
self.can_rcv_error_counter += 1
self.can_rcv_error = True
else:
self.can_rcv_error = False
# When the panda and controlsd do not agree on controls_allowed
# we want to disengage openpilot. However the status from the panda goes through
# another socket other than the CAN messages and one can arrive earlier than the other.
# Therefore we allow a mismatch for two samples, then we trigger the disengagement.
if not self.enabled:
self.mismatch_counter = 0
# All pandas not in silent mode must have controlsAllowed when openpilot is enabled
if self.enabled and any(not ps.controlsAllowed for ps in self.sm['pandaStates']
if ps.safetyModel not in IGNORED_SAFETY_MODES):
self.mismatch_counter += 1
self.distance_traveled += CS.vEgo * DT_CTRL
return CS
def state_transition(self, CS):
"""Compute conditional state transitions and execute actions on state transitions"""
self.v_cruise_kph_last = self.v_cruise_kph
# if stock cruise is completely disabled, then we can use our own set speed logic
if not self.CP.pcmCruise:
self.v_cruise_kph = update_v_cruise(self.v_cruise_kph, CS.buttonEvents, self.button_timers, self.enabled, self.is_metric)
elif CS.cruiseState.enabled:
self.v_cruise_kph = CS.cruiseState.speed * CV.MS_TO_KPH
# decrement the soft disable timer at every step, as it's reset on
# entrance in SOFT_DISABLING state
self.soft_disable_timer = max(0, self.soft_disable_timer - 1)
self.current_alert_types = [ET.PERMANENT]
# ENABLED, PRE ENABLING, SOFT DISABLING
if self.state != State.disabled:
# user and immediate disable always have priority in a non-disabled state
if self.events.any(ET.USER_DISABLE):
self.state = State.disabled
self.current_alert_types.append(ET.USER_DISABLE)
elif self.events.any(ET.IMMEDIATE_DISABLE):
self.state = State.disabled
self.current_alert_types.append(ET.IMMEDIATE_DISABLE)
else:
# ENABLED
if self.state == State.enabled:
if self.events.any(ET.SOFT_DISABLE):
self.state = State.softDisabling
self.soft_disable_timer = int(SOFT_DISABLE_TIME / DT_CTRL)
self.current_alert_types.append(ET.SOFT_DISABLE)
# SOFT DISABLING
elif self.state == State.softDisabling:
if not self.events.any(ET.SOFT_DISABLE):
# no more soft disabling condition, so go back to ENABLED
self.state = State.enabled
elif self.soft_disable_timer > 0:
self.current_alert_types.append(ET.SOFT_DISABLE)
elif self.soft_disable_timer <= 0:
self.state = State.disabled
# PRE ENABLING
elif self.state == State.preEnabled:
if not self.events.any(ET.PRE_ENABLE):
self.state = State.enabled
else:
self.current_alert_types.append(ET.PRE_ENABLE)
# DISABLED
elif self.state == State.disabled:
if self.events.any(ET.ENABLE):
if self.events.any(ET.NO_ENTRY):
self.current_alert_types.append(ET.NO_ENTRY)
else:
if self.events.any(ET.PRE_ENABLE):
self.state = State.preEnabled
else:
self.state = State.enabled
self.current_alert_types.append(ET.ENABLE)
self.v_cruise_kph = initialize_v_cruise(CS.vEgo, CS.buttonEvents, self.v_cruise_kph_last)
# Check if actuators are enabled
self.active = self.state == State.enabled or self.state == State.softDisabling
if self.active:
self.current_alert_types.append(ET.WARNING)
# Check if openpilot is engaged
self.enabled = self.active or self.state == State.preEnabled
def state_control(self, CS):
"""Given the state, this function returns an actuators packet"""
# Update VehicleModel
params = self.sm['liveParameters']
x = max(params.stiffnessFactor, 0.1)
sr = max(params.steerRatio, 0.1)
self.VM.update_params(x, sr)
lat_plan = self.sm['lateralPlan']
long_plan = self.sm['longitudinalPlan']
actuators = car.CarControl.Actuators.new_message()
actuators.longControlState = self.LoC.long_control_state
if CS.leftBlinker or CS.rightBlinker:
self.last_blinker_frame = self.sm.frame
# State specific actions
if not self.active:
self.LaC.reset()
self.LoC.reset(v_pid=CS.vEgo)
if not self.joystick_mode:
extras_loc = {'lead_one': self.sm_smiskol['radarState'].leadOne, 'mpc_TR': self.sm_smiskol['dynamicFollowData'].mpcTR, # TODO: just pass the services
'live_tracks': self.sm_smiskol['liveTracks'], 'has_lead': long_plan.hasLead}
# accel PID loop
pid_accel_limits = self.CI.get_pid_accel_limits(self.CP, CS.vEgo, self.v_cruise_kph * CV.KPH_TO_MS)
actuators.accel = self.LoC.update(self.active, CS, self.CP, long_plan, pid_accel_limits, extras_loc)
# interpolate lat plan to 100hz
self.lat_delay_offset += DT_CTRL
if self.sm.updated['lateralPlan']:
self.lat_delay_offset = 0.
# Steering PID loop and lateral MPC
lat_active = self.active and not CS.steerWarning and not CS.steerError and CS.vEgo > self.CP.minSteerSpeed
desired_curvature, desired_curvature_rate = get_lag_adjusted_curvature(self.CP, CS.vEgo,
lat_plan.psis,
lat_plan.curvatures,
lat_plan.curvatureRates,
self.lat_delay_offset)
actuators.steer, actuators.steeringAngleDeg, lac_log = self.LaC.update(lat_active, CS, self.CP, self.VM, params, self.last_actuators,
desired_curvature, desired_curvature_rate)
else:
lac_log = log.ControlsState.LateralDebugState.new_message()
if self.sm.rcv_frame['testJoystick'] > 0 and self.active:
actuators.accel = 4.0*clip(self.sm['testJoystick'].axes[0], -1, 1)
steer = clip(self.sm['testJoystick'].axes[1], -1, 1)
# max angle is 45 for angle-based cars
actuators.steer, actuators.steeringAngleDeg = steer, steer * 45.
lac_log.active = True
lac_log.steeringAngleDeg = CS.steeringAngleDeg
lac_log.output = steer
lac_log.saturated = abs(steer) >= 0.9
# Send a "steering required alert" if saturation count has reached the limit
if lac_log.active and lac_log.saturated and not CS.steeringPressed:
dpath_points = lat_plan.dPathPoints
if len(dpath_points):
# Check if we deviated from the path
# TODO use desired vs actual curvature
left_deviation = actuators.steer > 0 and dpath_points[0] < -0.20
right_deviation = actuators.steer < 0 and dpath_points[0] > 0.20
if left_deviation or right_deviation:
self.events.add(EventName.steerSaturated)
# Ensure no NaNs/Infs
for p in ACTUATOR_FIELDS:
attr = getattr(actuators, p)
if not isinstance(attr, Number):
continue
if not math.isfinite(attr):
cloudlog.error(f"actuators.{p} not finite {actuators.to_dict()}")
setattr(actuators, p, 0.0)
return actuators, lac_log
def update_button_timers(self, buttonEvents):
# increment timer for buttons still pressed
for k in self.button_timers:
if self.button_timers[k] > 0:
self.button_timers[k] += 1
for b in buttonEvents:
if b.type.raw in self.button_timers:
self.button_timers[b.type.raw] = 1 if b.pressed else 0
def publish_logs(self, CS, start_time, actuators, lac_log):
"""Send actuators and hud commands to the car, send controlsstate and MPC logging"""
CC = car.CarControl.new_message()
CC.enabled = self.enabled
CC.active = self.active
CC.actuators = actuators
orientation_value = self.sm['liveLocationKalman'].orientationNED.value
if len(orientation_value) > 2:
CC.roll = orientation_value[0]
CC.pitch = orientation_value[1]
CC.cruiseControl.cancel = CS.cruiseState.enabled and (not self.enabled or not self.CP.pcmCruise)
if self.joystick_mode and self.sm.rcv_frame['testJoystick'] > 0 and self.sm['testJoystick'].buttons[0]:
CC.cruiseControl.cancel = True
hudControl = CC.hudControl
hudControl.setSpeed = float(self.v_cruise_kph * CV.KPH_TO_MS)
hudControl.speedVisible = self.enabled
hudControl.lanesVisible = self.enabled
hudControl.leadVisible = self.sm['longitudinalPlan'].hasLead
hudControl.rightLaneVisible = True
hudControl.leftLaneVisible = True
recent_blinker = (self.sm.frame - self.last_blinker_frame) * DT_CTRL < 5.0 # 5s blinker cooldown
ldw_allowed = self.is_ldw_enabled and CS.vEgo > LDW_MIN_SPEED and not recent_blinker \
and not self.active and self.sm['liveCalibration'].calStatus == Calibration.CALIBRATED
model_v2 = self.sm['modelV2']
desire_prediction = model_v2.meta.desirePrediction
if len(desire_prediction) and ldw_allowed:
right_lane_visible = self.sm['lateralPlan'].rProb > 0.5
left_lane_visible = self.sm['lateralPlan'].lProb > 0.5
l_lane_change_prob = desire_prediction[Desire.laneChangeLeft - 1]
r_lane_change_prob = desire_prediction[Desire.laneChangeRight - 1]
lane_lines = model_v2.laneLines
CAMERA_OFFSET = self.sm['lateralPlan'].cameraOffset
l_lane_close = left_lane_visible and (lane_lines[1].y[0] > -(1.08 + CAMERA_OFFSET))
r_lane_close = right_lane_visible and (lane_lines[2].y[0] < (1.08 - CAMERA_OFFSET))
hudControl.leftLaneDepart = bool(l_lane_change_prob > LANE_DEPARTURE_THRESHOLD and l_lane_close)
hudControl.rightLaneDepart = bool(r_lane_change_prob > LANE_DEPARTURE_THRESHOLD and r_lane_close)
if hudControl.rightLaneDepart or hudControl.leftLaneDepart:
self.events.add(EventName.ldw)
clear_event_types = set()
if ET.WARNING not in self.current_alert_types:
clear_event_types.add(ET.WARNING)
if self.enabled:
clear_event_types.add(ET.NO_ENTRY)
alerts = self.events.create_alerts(self.current_alert_types, [self.CP, self.sm, self.is_metric, self.soft_disable_timer])
self.AM.add_many(self.sm.frame, alerts)
current_alert = self.AM.process_alerts(self.sm.frame, clear_event_types)
if current_alert:
hudControl.visualAlert = current_alert.visual_alert
self.last_model_long = self.sm_smiskol['modelLongButton'].enabled
if not self.read_only and self.initialized:
# send car controls over can
self.last_actuators, can_sends = self.CI.apply(CC)
self.pm.send('sendcan', can_list_to_can_capnp(can_sends, msgtype='sendcan', valid=CS.canValid))
CC.actuatorsOutput = self.last_actuators
force_decel = (self.sm['driverMonitoringState'].awarenessStatus < 0.) or \
(self.state == State.softDisabling)
# Curvature & Steering angle
params = self.sm['liveParameters']
steer_angle_without_offset = math.radians(CS.steeringAngleDeg - params.angleOffsetDeg)
curvature = -self.VM.calc_curvature(steer_angle_without_offset, CS.vEgo, params.roll)
# controlsState
dat = messaging.new_message('controlsState')
dat.valid = CS.canValid
controlsState = dat.controlsState
if current_alert:
controlsState.alertText1 = current_alert.alert_text_1
controlsState.alertText2 = current_alert.alert_text_2
controlsState.alertSize = current_alert.alert_size
controlsState.alertStatus = current_alert.alert_status
controlsState.alertBlinkingRate = current_alert.alert_rate
controlsState.alertType = current_alert.alert_type
controlsState.alertSound = current_alert.audible_alert
controlsState.canMonoTimes = list(CS.canMonoTimes)
controlsState.longitudinalPlanMonoTime = self.sm.logMonoTime['longitudinalPlan']
controlsState.lateralPlanMonoTime = self.sm.logMonoTime['lateralPlan']
controlsState.enabled = self.enabled
controlsState.active = self.active
controlsState.curvature = curvature
controlsState.state = self.state
controlsState.engageable = not self.events.any(ET.NO_ENTRY)
controlsState.longControlState = self.LoC.long_control_state
controlsState.vPid = float(self.LoC.v_pid)
controlsState.vCruise = float(self.v_cruise_kph)
controlsState.upAccelCmd = float(self.LoC.pid.p)
controlsState.uiAccelCmd = float(self.LoC.pid.i)
controlsState.ufAccelCmd = float(self.LoC.pid.f)
controlsState.cumLagMs = -self.rk.remaining * 1000.
controlsState.startMonoTime = int(start_time * 1e9)
controlsState.forceDecel = bool(force_decel)
controlsState.canErrorCounter = self.can_rcv_error_counter
lat_tuning = self.CP.lateralTuning.which()
if self.joystick_mode:
controlsState.lateralControlState.debugState = lac_log
elif self.CP.steerControlType == car.CarParams.SteerControlType.angle:
controlsState.lateralControlState.angleState = lac_log
elif lat_tuning == 'pid':
controlsState.lateralControlState.pidState = lac_log
elif lat_tuning == 'lqr':
controlsState.lateralControlState.lqrState = lac_log
elif lat_tuning == 'indi':
controlsState.lateralControlState.indiState = lac_log
elif lat_tuning == 'model':
controlsState.lateralControlState.modelState = lac_log
self.pm.send('controlsState', dat)
# carState
car_events = self.events.to_msg()
cs_send = messaging.new_message('carState')
cs_send.valid = CS.canValid
cs_send.carState = CS
cs_send.carState.events = car_events
self.pm.send('carState', cs_send)
# carEvents - logged every second or on change
if (self.sm.frame % int(1. / DT_CTRL) == 0) or (self.events.names != self.events_prev):
ce_send = messaging.new_message('carEvents', len(self.events))
ce_send.carEvents = car_events
self.pm.send('carEvents', ce_send)
self.events_prev = self.events.names.copy()
# carParams - logged every 50 seconds (> 1 per segment)
if (self.sm.frame % int(50. / DT_CTRL) == 0):
cp_send = messaging.new_message('carParams')
cp_send.carParams = self.CP
self.pm.send('carParams', cp_send)
# carControl
cc_send = messaging.new_message('carControl')
cc_send.valid = CS.canValid
cc_send.carControl = CC
self.pm.send('carControl', cc_send)
# copy CarControl to pass to CarInterface on the next iteration
self.CC = CC
def step(self):
start_time = sec_since_boot()
self.prof.checkpoint("Ratekeeper", ignore=True)
# Sample data from sockets and get a carState
CS = self.data_sample()
self.prof.checkpoint("Sample")
self.update_events(CS)
if not self.read_only and self.initialized:
# Update control state
self.state_transition(CS)
self.prof.checkpoint("State transition")
# Compute actuators (runs PID loops and lateral MPC)
actuators, lac_log = self.state_control(CS)
self.prof.checkpoint("State Control")
# Publish data
self.publish_logs(CS, start_time, actuators, lac_log)
self.prof.checkpoint("Sent")
self.update_button_timers(CS.buttonEvents)
def controlsd_thread(self):
while True:
self.step()
self.rk.monitor_time()
self.prof.display()
def main(sm=None, pm=None, logcan=None):
controls = Controls(sm, pm, logcan)
controls.controlsd_thread()
if __name__ == "__main__":
main()
|
email.py | # coding=utf-8
from threading import Thread
from flask import render_template
from flask_mail import Mail, Message
from flask import current_app
from . import mail
def send_async_email(msg):
with current_app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
"""发送电子邮件"""
msg = Message(current_app.config['NECTAR_MAIL_SUBJECT_PREFIX'] + subject,
sender=current_app.config['NECTAR_MAIL_SENDER'],
recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
# msg.html = render_template(template + '.html', **kwargs)
# thread = Thread(target=send_async_email, args=[msg])
# thread.start()
mail.send(msg)
# return thread
|
build_update_part.py | # Copyright 2010-2012 Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Created on 2. okt. 2010
@author: Yngve
'''
import sys,os,subprocess,time,os.path
sys.path.insert(1, os.path.join(".."))
import libinit
from optparse import OptionParser
import probedb.standalone
import probedb.probedata2.models as ProbeData
import probedb.resultdb2.models as Results
from django.db import transaction
from django.db import DatabaseError
from django.db import IntegrityError
from django.db.models import Count, F
import probedb.batch.models as Batch
import datetime
if not Batch.UpdateBatchStatus.IsActive():
sys.exit()
__threads_active = False
def __ProgressCounter(queue, text):
"""Report progress about the ongoing task"""
import Queue
global __threads_active
i=0
while __threads_active:
try:
result = queue.get(timeout=1)
except Queue.Empty:
continue
queue.task_done()
i += 1
if i%100 == 0:
if text:
print text, i
else:
print i
def checkactive():
"""Are we still active?"""
return Batch.UpdateBatchStatus.IsActive()
def __do_action(self, action_func, n, queue, report_queue, lock):
"""
Get an item from the queue, have action_func process it,
then report the action to the report_queue
"""
import Queue
global __threads_active
if not isinstance(action_func, list):
action_func = [action_func]
while __threads_active:
try:
item = queue.get(timeout=1)
except Queue.Empty:
continue
for f in action_func:
try:
f(self, item, lock)
except:
pass
report_queue.put(True)
queue.task_done()
def update_action(self, queue_query, action_func, options, text=None):
"""
Perform an action by queuing the items from queue_query, then
performing action_func as a number of threads, which will perform
actions on the queued items
"""
import threading
import Queue
global __threads_active
if not checkactive():
sys.exit()
__threads_active = True
probe_results = Queue.Queue(100000)
result_tick = Queue.Queue()
lock = threading.Lock()
num_probers = options.threads if options else 100
threads = []
for i in range(num_probers):
new_thread = threading.Thread(target=__do_action, args=(self, action_func, i,probe_results,result_tick, lock))
new_thread.daemon = True
new_thread.start()
threads.append(new_thread)
new_thread = threading.Thread(target=__ProgressCounter, args=(result_tick,text))
new_thread.daemon = True
new_thread.start()
threads.append(new_thread)
print "Items (%d) %s"% (options.run_id, (text if text else "")), queue_query.count()
last_check = datetime.datetime.now()
for result in queue_query.iterator():
if (datetime.datetime.now() - last_check).seconds >= 10.0:
last_check = datetime.datetime.now()
if not checkactive():
sys.exit()
probe_results.put(result)
while not probe_results.empty():
if not checkactive():
sys.exit()
time.sleep(10)
probe_results.join() # wait for the last items to be completed
result_tick.join()
__threads_active = False
for t in threads:
t.join()
print "Completed"
options_config = OptionParser()
options_config.add_option("--testbase2", action="store_true", dest="use_testbase2")
options_config.add_option("--threads", action="store", type="int", dest="threads", default=20)
options_config.add_option("--id", action="store", type="int", dest="run_id", default=0)
options_config.add_option("--verbose", action="store_true", dest="verbose")
(options, args) = options_config.parse_args()
if options.run_id:
run = ProbeData.ProbeRun.objects.get(id = options.run_id)
summary = Results.ResultSummaryList.objects.get(part_of_run=run)
######################################
# Add code and functions below
#
# @transaction.commit_on_success
# def user_foo(master, item, lock):
# bar()
#
# update_action(summary, summary.query(), user_foo, options)
#####################################
class update_master:
"""Put common data needed here"""
def __init__(self):
pass
common_update_master = update_master()
###### ALL CODE ABOVE THIS LINE. DO NOT REMOVE CODE BELOW ######
summary.updatebatchitem.enabled = False
summary.updatebatchitem.save() |
http_json_poster.py | import sys
import threading
import requests
class HttpJsonPoster:
__headers = {
"Content-Type": "application/json",
"Accept": "application/json",
}
def __init__(self, url, timeout=3):
self.__cond = threading.Condition()
self.__url = url
self.timeout = timeout
self.__data = None
self.__new_data = None
self.__thread = threading.Thread(target=lambda: self.__loop(), daemon=True)
self.__thread.start()
@property
def condition(self):
return self.__cond
@property
def url(self):
return self.__url
@url.setter
def url(self, url):
with self.__cond:
self.__url = url
self.__cond.notifyAll()
def request_post(self, data):
data_utf8 = data.encode("utf-8")
with self.__cond:
self.__new_data = data_utf8
self.__cond.notifyAll()
def __loop(self):
while True:
with self.__cond:
if self.__new_data is None:
self.__cond.wait()
# if there is new data -> overwrite the old data
if self.__new_data is not None:
self.__data = self.__new_data
self.__new_data = None
# send the most recent data
if self.__data is not None:
self.__post()
def __post(self):
with self.__cond:
url = self.url
timeout = self.timeout
data = self.__data
error_msg = "Error sending request:"
try:
with requests.post(
url,
data=data,
headers=HttpJsonPoster.__headers,
timeout=timeout,
) as r:
if not r.status_code == 200:
print(
error_msg, r.status_code, r.reason, url, data, file=sys.stderr
)
except requests.RequestException as e:
print(error_msg, e, url, data, file=sys.stderr)
|
tiger-calculator.py | #!/usr/bin/python3
import sys
import argparse
import formats
import multiprocessing
PARSER_DESC = "Simple TIGER rates calculator."
FORMAT_ERROR_MSG = "Please specify one of the available formats: " + formats.getFormatsAsString()
N_PROCESSES = int(multiprocessing.cpu_count())
class ActivePool(object):
def __init__(self):
super(ActivePool, self).__init__()
self.mgr = multiprocessing.Manager()
self.active = self.mgr.list()
self.result = self.mgr.dict()
self.data = self.mgr.dict()
self.lock = multiprocessing.Lock()
def makeActive(self, name):
with self.lock:
self.active.append(name)
def makeInactive(self, name):
with self.lock:
self.active.remove(name)
def __str__(self):
with self.lock:
return str(self.active)
def split_list(alist, wanted_parts=1):
'''Split a list equally into a specified number of parts'''
length = len(alist)
return [ alist[i*length // wanted_parts: (i+1)*length // wanted_parts]
for i in range(wanted_parts) ]
def calculate_tiger_rates(analyzed_keys,pool):
'''Calculate partition agreements and TIGER rates for the characters specified by the array keys'''
name = multiprocessing.current_process().name
with s:
pool.makeActive(name)
data = pool.data
# Calculate partition agreement scores
for x in analyzed_keys:
agr_array = []
for y in char_dict.keys():
if x == y:
continue
agreements = 0 # numerator of pa(i,j)
total = 0 # denominator of pa(i,j). Equal to len(char_dict)-1.
valid_taxa = set()
for sp_x in set_parts[x]:
valid_taxa = valid_taxa|set_parts[x][sp_x] # set of taxa without missing data for site x
for sp_y in set_parts[y]:
match = False
for sp_x in set_parts[x]:
current_x = set_parts[x][sp_x]
current_y = set_parts[y][sp_y]
if current_y.intersection(valid_taxa).issubset(current_x): # Compare taxa in y minus missing taxa in x to taxa in x
match = True
break # Found a match; don't compare the remaining ones
total += 1
if match:
agreements += 1
try:
agr_array.append(float(agreements)/total)
except ZeroDivisionError:
print('Zero division error')
# Calculate TIGER rates
pool.result[x] = sum(agr_array) / len(agr_array) # TIGER rate for the current character
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=PARSER_DESC)
parser.add_argument(dest="in_file",
help="Input file to analyze.",
metavar='IN_FILE',
default=None,
type=str)
parser.add_argument("-f", "--format",
dest="format",
help="Specify input format. Available formats: " + formats.getFormatsAsString(),
default="",
type=str)
parser.add_argument("-i","--ignored-characters",
dest="ignored_chars",
help="A comma-separated list of ignored characters. Missing characters should be included here.",
default="",
type=str)
parser.add_argument("-x","--excluded-taxa",
dest="excluded_taxa",
help="A comma-separated list of taxa excluded from the calculations.",
default="",
type=str)
parser.add_argument("-p","--processes",
dest="n_processes",
help="Number of processes (threads) to use. Default: %i (the detected number of logical CPUs)." % N_PROCESSES,
default=N_PROCESSES,
type=int)
parser.add_argument("-n","--named-characters",
dest="named_characters",
help="Include a column identifying which TIGER rate belongs to which aligned character.",
default=False,
action='store_true')
parser.add_argument("-s","--synonym-strategy",
dest="synonym_strategy",
help="Strategy for resolving synonyms. Available strategies: random, minimum, maximum.",
default="minimum",
type=str)
if len(sys.argv) == 1:
parser.print_help()
exit(0)
args = parser.parse_args()
if args.format == None:
print(FORMAT_ERROR_MSG, file=sys.stderr)
exit(1)
reader = formats.getReader(args.format)
if args.format == "cldf":
reader.synonym_strategy = args.synonym_strategy
if reader == None:
print(FORMAT_ERROR_MSG, file=sys.stderr)
exit(1)
content = reader.getContents(args.in_file)
taxa = content[0]
chars = content[1]
try:
names = content[2]
except:
names = range(1, len(chars[0]) + 1)
excluded_taxa = args.excluded_taxa.split(",")
if excluded_taxa != [""]:
excluded_taxa = set(excluded_taxa)
for taxon in excluded_taxa:
if taxon not in taxa:
print("Taxon %s not found in data." % taxon, file=sys.stderr)
exit(1)
while len(excluded_taxa) > 0:
for i in range(len(taxa)):
if taxa[i] in excluded_taxa:
current_taxon = taxa[i]
excluded_taxa.remove(current_taxon)
del taxa[i]
del chars[i]
break
ignored_chars = args.ignored_chars.split(",")
if len(taxa) == 0 or len(chars) == 0:
print("Error: Empty characters or taxa in input file.", file=sys.stderr)
exit(1)
# set up a dict with [char_num][taxon] format. Each terminal node contains an aligned character
char_dict = {}
for i in range(len(chars[0])):
char_dict[i] = {}
for j in range(len(taxa)):
char_dict[i][taxa[j]] = chars[j][i]
# Step 1: collect set partitions
# set up a dict with [site][char] format, where each terminal node contains a set of taxa
set_parts = {}
for i in range(len(chars[0])):
set_parts[i] = {}
for site in char_dict.keys():
# Collect a set of what chars this site contains. Set up the necessary arrays in set_parts.
chars_at_site = set()
for taxon in char_dict[site].keys():
chars_at_site.add(char_dict[site][taxon])
for c in chars_at_site:
set_parts[site][c] = set()
# Determine set partitions: collect a list corresponding to each multistate character
for taxon in char_dict[site].keys():
content = char_dict[site][taxon]
set_parts[site][content].add(taxon)
# Remove ignored characters from set_parts
for c in ignored_chars:
removed = set_parts[site].pop(c,None)
# Steps 2 and 3: calculate partition agreements and TIGER rates concurrently
pool = ActivePool()
pool.data.update(char_dict)
s = multiprocessing.Semaphore(args.n_processes)
jobs = [ multiprocessing.Process(target=calculate_tiger_rates, name=str(k), args=(k, pool))
for k in split_list(list(char_dict.keys()),args.n_processes)]
for j in jobs:
j.start()
for j in jobs:
j.join()
for k in sorted(pool.result.keys()):
line = ""
if args.named_characters:
line += str(names[k]) + "\t"
line += str(pool.result[k])
print(line)
|
test.py | import gzip
import json
import logging
import os
import io
import random
import threading
import time
import helpers.client
import pytest
from helpers.cluster import ClickHouseCluster, ClickHouseInstance, get_instances_dir
MINIO_INTERNAL_PORT = 9001
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
CONFIG_PATH = os.path.join(SCRIPT_DIR, './{}/dummy/configs/config.d/defaultS3.xml'.format(get_instances_dir()))
# Creates S3 bucket for tests and allows anonymous read-write access to it.
def prepare_s3_bucket(started_cluster):
# Allows read-write access for bucket without authorization.
bucket_read_write_policy = {"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:GetBucketLocation",
"Resource": "arn:aws:s3:::root"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::root"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::root/*"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::root/*"
}
]}
minio_client = started_cluster.minio_client
minio_client.set_bucket_policy(started_cluster.minio_bucket, json.dumps(bucket_read_write_policy))
started_cluster.minio_restricted_bucket = "{}-with-auth".format(started_cluster.minio_bucket)
if minio_client.bucket_exists(started_cluster.minio_restricted_bucket):
minio_client.remove_bucket(started_cluster.minio_restricted_bucket)
minio_client.make_bucket(started_cluster.minio_restricted_bucket)
def put_s3_file_content(started_cluster, bucket, filename, data):
buf = io.BytesIO(data)
started_cluster.minio_client.put_object(bucket, filename, buf, len(data))
# Returns content of given S3 file as string.
def get_s3_file_content(started_cluster, bucket, filename, decode=True):
# type: (ClickHouseCluster, str, str, bool) -> str
data = started_cluster.minio_client.get_object(bucket, filename)
data_str = b""
for chunk in data.stream():
data_str += chunk
if decode:
return data_str.decode()
return data_str
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster = ClickHouseCluster(__file__)
cluster.add_instance("restricted_dummy", main_configs=["configs/config_for_test_remote_host_filter.xml"],
with_minio=True)
cluster.add_instance("dummy", with_minio=True, main_configs=["configs/defaultS3.xml", "configs/named_collections.xml"])
cluster.add_instance("s3_max_redirects", with_minio=True, main_configs=["configs/defaultS3.xml"],
user_configs=["configs/s3_max_redirects.xml"])
logging.info("Starting cluster...")
cluster.start()
logging.info("Cluster started")
prepare_s3_bucket(cluster)
logging.info("S3 bucket created")
run_s3_mocks(cluster)
yield cluster
finally:
cluster.shutdown()
def run_query(instance, query, stdin=None, settings=None):
# type: (ClickHouseInstance, str, object, dict) -> str
logging.info("Running query '{}'...".format(query))
result = instance.query(query, stdin=stdin, settings=settings)
logging.info("Query finished")
return result
# Test simple put. Also checks that wrong credentials produce an error with every compression method.
@pytest.mark.parametrize("maybe_auth,positive,compression", [
pytest.param("", True, 'auto', id="positive"),
pytest.param("'minio','minio123',", True, 'auto', id="auth_positive"),
pytest.param("'wrongid','wrongkey',", False, 'auto', id="auto"),
pytest.param("'wrongid','wrongkey',", False, 'gzip', id="gzip"),
pytest.param("'wrongid','wrongkey',", False, 'deflate', id="deflate"),
pytest.param("'wrongid','wrongkey',", False, 'brotli', id="brotli"),
pytest.param("'wrongid','wrongkey',", False, 'xz', id="xz"),
pytest.param("'wrongid','wrongkey',", False, 'zstd', id="zstd")
])
def test_put(started_cluster, maybe_auth, positive, compression):
# type: (ClickHouseCluster) -> None
bucket = started_cluster.minio_bucket if not maybe_auth else started_cluster.minio_restricted_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = "(1, 2, 3), (3, 2, 1), (78, 43, 45)"
values_csv = "1,2,3\n3,2,1\n78,43,45\n"
filename = "test.csv"
put_query = f"""insert into table function s3('http://{started_cluster.minio_ip}:{started_cluster.minio_port}/{bucket}/{filename}',
{maybe_auth}'CSV', '{table_format}', {compression}) values {values}"""
try:
run_query(instance, put_query)
except helpers.client.QueryRuntimeException:
if positive:
raise
else:
assert positive
assert values_csv == get_s3_file_content(started_cluster, bucket, filename)
def test_partition_by(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
partition_by = "column3"
values = "(1, 2, 3), (3, 2, 1), (78, 43, 45)"
filename = "test_{_partition_id}.csv"
put_query = f"""INSERT INTO TABLE FUNCTION
s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{filename}', 'CSV', '{table_format}')
PARTITION BY {partition_by} VALUES {values}"""
run_query(instance, put_query)
assert "1,2,3\n" == get_s3_file_content(started_cluster, bucket, "test_3.csv")
assert "3,2,1\n" == get_s3_file_content(started_cluster, bucket, "test_1.csv")
assert "78,43,45\n" == get_s3_file_content(started_cluster, bucket, "test_45.csv")
filename = "test2_{_partition_id}.csv"
instance.query(f"create table p ({table_format}) engine=S3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{filename}', 'CSV') partition by column3")
instance.query(f"insert into p values {values}")
assert "1,2,3\n" == get_s3_file_content(started_cluster, bucket, "test2_3.csv")
assert "3,2,1\n" == get_s3_file_content(started_cluster, bucket, "test2_1.csv")
assert "78,43,45\n" == get_s3_file_content(started_cluster, bucket, "test2_45.csv")
def test_partition_by_string_column(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "col_num UInt32, col_str String"
partition_by = "col_str"
values = "(1, 'foo/bar'), (3, 'йцук'), (78, '你好')"
filename = "test_{_partition_id}.csv"
put_query = f"""INSERT INTO TABLE FUNCTION
s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{filename}', 'CSV', '{table_format}')
PARTITION BY {partition_by} VALUES {values}"""
run_query(instance, put_query)
assert '1,"foo/bar"\n' == get_s3_file_content(started_cluster, bucket, "test_foo/bar.csv")
assert '3,"йцук"\n' == get_s3_file_content(started_cluster, bucket, "test_йцук.csv")
assert '78,"你好"\n' == get_s3_file_content(started_cluster, bucket, "test_你好.csv")
def test_partition_by_const_column(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = "(1, 2, 3), (3, 2, 1), (78, 43, 45)"
partition_by = "'88'"
values_csv = "1,2,3\n3,2,1\n78,43,45\n"
filename = "test_{_partition_id}.csv"
put_query = f"""INSERT INTO TABLE FUNCTION
s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{filename}', 'CSV', '{table_format}')
PARTITION BY {partition_by} VALUES {values}"""
run_query(instance, put_query)
assert values_csv == get_s3_file_content(started_cluster, bucket, "test_88.csv")
@pytest.mark.parametrize("special", [
"space",
"plus"
])
def test_get_file_with_special(started_cluster, special):
symbol = {"space": " ", "plus": "+"}[special]
urlsafe_symbol = {"space": "%20", "plus": "%2B"}[special]
auth = "'minio','minio123',"
bucket = started_cluster.minio_restricted_bucket
instance = started_cluster.instances["dummy"]
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = [[12549, 2463, 19893], [64021, 38652, 66703], [81611, 39650, 83516], [11079, 59507, 61546], [51764, 69952, 6876], [41165, 90293, 29095], [40167, 78432, 48309], [81629, 81327, 11855], [55852, 21643, 98507], [6738, 54643, 41155]]
values_csv = ('\n'.join((','.join(map(str, row)) for row in values)) + '\n').encode()
filename = f"get_file_with_{special}_{symbol}two.csv"
put_s3_file_content(started_cluster, bucket, filename, values_csv)
get_query = f"SELECT * FROM s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/get_file_with_{special}_{urlsafe_symbol}two.csv', {auth}'CSV', '{table_format}') FORMAT TSV"
assert [list(map(int, l.split())) for l in run_query(instance, get_query).splitlines()] == values
get_query = f"SELECT * FROM s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/get_file_with_{special}*.csv', {auth}'CSV', '{table_format}') FORMAT TSV"
assert [list(map(int, l.split())) for l in run_query(instance, get_query).splitlines()] == values
get_query = f"SELECT * FROM s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/get_file_with_{special}_{urlsafe_symbol}*.csv', {auth}'CSV', '{table_format}') FORMAT TSV"
assert [list(map(int, l.split())) for l in run_query(instance, get_query).splitlines()] == values
@pytest.mark.parametrize("special", [
"space",
"plus",
"plus2"
])
def test_get_path_with_special(started_cluster, special):
symbol = {"space": "%20", "plus": "%2B", "plus2": "%2B"}[special]
safe_symbol = {"space": "%20", "plus": "+", "plus2": "%2B"}[special]
auth = "'minio','minio123',"
table_format = "column1 String"
instance = started_cluster.instances["dummy"]
get_query = f"SELECT * FROM s3('http://resolver:8082/get-my-path/{safe_symbol}.csv', {auth}'CSV', '{table_format}') FORMAT TSV"
assert run_query(instance, get_query).splitlines() == [f"/{symbol}.csv"]
# Test put no data to S3.
@pytest.mark.parametrize("auth", [
pytest.param("'minio','minio123',", id="minio")
])
def test_empty_put(started_cluster, auth):
# type: (ClickHouseCluster, str) -> None
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
drop_empty_table_query = "DROP TABLE IF EXISTS empty_table"
create_empty_table_query = """
CREATE TABLE empty_table (
{}
) ENGINE = Null()
""".format(table_format)
run_query(instance, drop_empty_table_query)
run_query(instance, create_empty_table_query)
filename = "empty_put_test.csv"
put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') select * from empty_table".format(
started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename, auth, table_format)
run_query(instance, put_query)
try:
run_query(instance, "select count(*) from s3('http://{}:{}/{}/{}', {}'CSV', '{}')".format(
started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename, auth, table_format))
assert False, "Query should be failed."
except helpers.client.QueryRuntimeException as e:
assert str(e).find("The specified key does not exist") != 0
# Test put values in CSV format.
@pytest.mark.parametrize("maybe_auth,positive", [
pytest.param("", True, id="positive"),
pytest.param("'minio','minio123',", True, id="auth_positive"),
pytest.param("'wrongid','wrongkey',", False, id="negative"),
])
def test_put_csv(started_cluster, maybe_auth, positive):
# type: (ClickHouseCluster, bool, str) -> None
bucket = started_cluster.minio_bucket if not maybe_auth else started_cluster.minio_restricted_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') format CSV".format(
started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename, maybe_auth, table_format)
csv_data = "8,9,16\n11,18,13\n22,14,2\n"
try:
run_query(instance, put_query, stdin=csv_data)
except helpers.client.QueryRuntimeException:
if positive:
raise
else:
assert positive
assert csv_data == get_s3_file_content(started_cluster, bucket, filename)
# Test put and get with S3 server redirect.
def test_put_get_with_redirect(started_cluster):
# type: (ClickHouseCluster) -> None
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = "(1, 1, 1), (1, 1, 1), (11, 11, 11)"
values_csv = "1,1,1\n1,1,1\n11,11,11\n"
filename = "test.csv"
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, filename, table_format, values)
run_query(instance, query)
assert values_csv == get_s3_file_content(started_cluster, bucket, filename)
query = "select *, column1*column2*column3 from s3('http://{}:{}/{}/{}', 'CSV', '{}')".format(
started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, filename, table_format)
stdout = run_query(instance, query)
assert list(map(str.split, stdout.splitlines())) == [
["1", "1", "1", "1"],
["1", "1", "1", "1"],
["11", "11", "11", "1331"],
]
# Test put with restricted S3 server redirect.
def test_put_with_zero_redirect(started_cluster):
# type: (ClickHouseCluster) -> None
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["s3_max_redirects"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = "(1, 1, 1), (1, 1, 1), (11, 11, 11)"
filename = "test.csv"
# Should work without redirect
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename, table_format, values)
run_query(instance, query)
# Should not work with redirect
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, filename, table_format, values)
exception_raised = False
try:
run_query(instance, query)
except Exception as e:
assert str(e).find("Too many redirects while trying to access") != -1
exception_raised = True
finally:
assert exception_raised
def test_put_get_with_globs(started_cluster):
# type: (ClickHouseCluster) -> None
unique_prefix = random.randint(1,10000)
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
max_path = ""
for i in range(10):
for j in range(10):
path = "{}/{}_{}/{}.csv".format(unique_prefix, i, random.choice(['a', 'b', 'c', 'd']), j)
max_path = max(path, max_path)
values = "({},{},{})".format(i, j, i + j)
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, path, table_format, values)
run_query(instance, query)
query = "select sum(column1), sum(column2), sum(column3), min(_file), max(_path) from s3('http://{}:{}/{}/{}/*_{{a,b,c,d}}/%3f.csv', 'CSV', '{}')".format(
started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, unique_prefix, table_format)
assert run_query(instance, query).splitlines() == [
"450\t450\t900\t0.csv\t{bucket}/{max_path}".format(bucket=bucket, max_path=max_path)]
minio = started_cluster.minio_client
for obj in list(minio.list_objects(started_cluster.minio_bucket, prefix='{}/'.format(unique_prefix), recursive=True)):
minio.remove_object(started_cluster.minio_bucket, obj.object_name)
# Test multipart put.
@pytest.mark.parametrize("maybe_auth,positive", [
pytest.param("", True, id="positive"),
pytest.param("'wrongid','wrongkey'", False, id="negative"),
# ("'minio','minio123',",True), Redirect with credentials not working with nginx.
])
def test_multipart_put(started_cluster, maybe_auth, positive):
# type: (ClickHouseCluster) -> None
bucket = started_cluster.minio_bucket if not maybe_auth else started_cluster.minio_restricted_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
# Minimum size of part is 5 Mb for Minio.
# See: https://github.com/minio/minio/blob/master/docs/minio-limits.md
min_part_size_bytes = 5 * 1024 * 1024
csv_size_bytes = int(min_part_size_bytes * 1.5) # To have 2 parts.
one_line_length = 6 # 3 digits, 2 commas, 1 line separator.
# Generate data having size more than one part
int_data = [[1, 2, 3] for i in range(csv_size_bytes // one_line_length)]
csv_data = "".join(["{},{},{}\n".format(x, y, z) for x, y, z in int_data])
assert len(csv_data) > min_part_size_bytes
filename = "test_multipart.csv"
put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') format CSV".format(
started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, filename, maybe_auth, table_format)
try:
run_query(instance, put_query, stdin=csv_data, settings={'s3_min_upload_part_size': min_part_size_bytes,
's3_max_single_part_upload_size': 0})
except helpers.client.QueryRuntimeException:
if positive:
raise
else:
assert positive
# Use proxy access logs to count number of parts uploaded to Minio.
proxy_logs = started_cluster.get_container_logs("proxy1") # type: str
assert proxy_logs.count("PUT /{}/{}".format(bucket, filename)) >= 2
assert csv_data == get_s3_file_content(started_cluster, bucket, filename)
def test_remote_host_filter(started_cluster):
instance = started_cluster.instances["restricted_dummy"]
format = "column1 UInt32, column2 UInt32, column3 UInt32"
query = "select *, column1*column2*column3 from s3('http://{}:{}/{}/test.csv', 'CSV', '{}')".format(
"invalid_host", MINIO_INTERNAL_PORT, started_cluster.minio_bucket, format)
assert "not allowed in configuration file" in instance.query_and_get_error(query)
other_values = "(1, 1, 1), (1, 1, 1), (11, 11, 11)"
query = "insert into table function s3('http://{}:{}/{}/test.csv', 'CSV', '{}') values {}".format(
"invalid_host", MINIO_INTERNAL_PORT, started_cluster.minio_bucket, format, other_values)
assert "not allowed in configuration file" in instance.query_and_get_error(query)
@pytest.mark.parametrize("s3_storage_args", [
pytest.param("''", id="1_argument"),
pytest.param("'','','','','',''", id="6_arguments"),
])
def test_wrong_s3_syntax(started_cluster, s3_storage_args):
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
expected_err_msg = "Code: 42" # NUMBER_OF_ARGUMENTS_DOESNT_MATCH
query = "create table test_table_s3_syntax (id UInt32) ENGINE = S3({})".format(s3_storage_args)
assert expected_err_msg in instance.query_and_get_error(query)
# https://en.wikipedia.org/wiki/One_Thousand_and_One_Nights
def test_s3_glob_scheherazade(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
max_path = ""
values = "(1, 1, 1)"
nights_per_job = 1001 // 30
jobs = []
for night in range(0, 1001, nights_per_job):
def add_tales(start, end):
for i in range(start, end):
path = "night_{}/tale.csv".format(i)
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, path, table_format, values)
run_query(instance, query)
jobs.append(threading.Thread(target=add_tales, args=(night, min(night + nights_per_job, 1001))))
jobs[-1].start()
for job in jobs:
job.join()
query = "select count(), sum(column1), sum(column2), sum(column3) from s3('http://{}:{}/{}/night_*/tale.csv', 'CSV', '{}')".format(
started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, table_format)
assert run_query(instance, query).splitlines() == ["1001\t1001\t1001\t1001"]
def run_s3_mocks(started_cluster):
logging.info("Starting s3 mocks")
mocks = (
("mock_s3.py", "resolver", "8080"),
("unstable_server.py", "resolver", "8081"),
("echo.py", "resolver", "8082"),
)
for mock_filename, container, port in mocks:
container_id = started_cluster.get_container_id(container)
current_dir = os.path.dirname(__file__)
started_cluster.copy_file_to_container(container_id, os.path.join(current_dir, "s3_mocks", mock_filename), mock_filename)
started_cluster.exec_in_container(container_id, ["python", mock_filename, port], detach=True)
# Wait for S3 mocks to start
for mock_filename, container, port in mocks:
num_attempts = 100
for attempt in range(num_attempts):
ping_response = started_cluster.exec_in_container(started_cluster.get_container_id(container),
["curl", "-s", f"http://localhost:{port}/"], nothrow=True)
if ping_response != 'OK':
if attempt == num_attempts - 1:
assert ping_response == 'OK', 'Expected "OK", but got "{}"'.format(ping_response)
else:
time.sleep(1)
else:
logging.debug(f"mock {mock_filename} ({port}) answered {ping_response} on attempt {attempt}")
break
logging.info("S3 mocks started")
def replace_config(old, new):
config = open(CONFIG_PATH, 'r')
config_lines = config.readlines()
config.close()
config_lines = [line.replace(old, new) for line in config_lines]
config = open(CONFIG_PATH, 'w')
config.writelines(config_lines)
config.close()
def test_custom_auth_headers(started_cluster):
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
get_query = "select * from s3('http://resolver:8080/{bucket}/{file}', 'CSV', '{table_format}')".format(
bucket=started_cluster.minio_restricted_bucket,
file=filename,
table_format=table_format)
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
result = run_query(instance, get_query)
assert result == '1\t2\t3\n'
instance.query("DROP TABLE IF EXISTS test")
instance.query(
"CREATE TABLE test ({table_format}) ENGINE = S3('http://resolver:8080/{bucket}/{file}', 'CSV')".format(
bucket=started_cluster.minio_restricted_bucket,
file=filename,
table_format=table_format
))
assert run_query(instance, "SELECT * FROM test") == '1\t2\t3\n'
replace_config("<header>Authorization: Bearer TOKEN", "<header>Authorization: Bearer INVALID_TOKEN")
instance.query("SYSTEM RELOAD CONFIG")
ret, err = instance.query_and_get_answer_with_error("SELECT * FROM test")
assert ret == "" and err != ""
replace_config("<header>Authorization: Bearer INVALID_TOKEN", "<header>Authorization: Bearer TOKEN")
instance.query("SYSTEM RELOAD CONFIG")
assert run_query(instance, "SELECT * FROM test") == '1\t2\t3\n'
instance.query("DROP TABLE test")
def test_custom_auth_headers_exclusion(started_cluster):
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
get_query = f"SELECT * FROM s3('http://resolver:8080/{started_cluster.minio_restricted_bucket}/restricteddirectory/{filename}', 'CSV', '{table_format}')"
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
with pytest.raises(helpers.client.QueryRuntimeException) as ei:
result = run_query(instance, get_query)
print(result)
assert ei.value.returncode == 243
assert 'Forbidden Error' in ei.value.stderr
def test_infinite_redirect(started_cluster):
bucket = "redirected"
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
get_query = f"select * from s3('http://resolver:{started_cluster.minio_redirect_port}/{bucket}/{filename}', 'CSV', '{table_format}')"
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
exception_raised = False
try:
run_query(instance, get_query)
except Exception as e:
assert str(e).find("Too many redirects while trying to access") != -1
exception_raised = True
finally:
assert exception_raised
@pytest.mark.parametrize("extension,method", [
pytest.param("bin", "gzip", id="bin"),
pytest.param("gz", "auto", id="gz"),
])
def test_storage_s3_get_gzip(started_cluster, extension, method):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
filename = f"test_get_gzip.{extension}"
name = f"test_get_gzip_{extension}"
data = [
"Sophia Intrieri,55",
"Jack Taylor,71",
"Christopher Silva,66",
"Clifton Purser,35",
"Richard Aceuedo,43",
"Lisa Hensley,31",
"Alice Wehrley,1",
"Mary Farmer,47",
"Samara Ramirez,19",
"Shirley Lloyd,51",
"Santos Cowger,0",
"Richard Mundt,88",
"Jerry Gonzalez,15",
"Angela James,10",
"Norman Ortega,33",
""
]
run_query(instance, f"DROP TABLE IF EXISTS {name}")
buf = io.BytesIO()
compressed = gzip.GzipFile(fileobj=buf, mode="wb")
compressed.write(("\n".join(data)).encode())
compressed.close()
put_s3_file_content(started_cluster, bucket, filename, buf.getvalue())
run_query(instance, f"""CREATE TABLE {name} (name String, id UInt32) ENGINE = S3(
'http://{started_cluster.minio_ip}:{MINIO_INTERNAL_PORT}/{bucket}/{filename}',
'CSV',
'{method}')""")
run_query(instance, f"SELECT sum(id) FROM {name}").splitlines() == ["565"]
run_query(instance, f"DROP TABLE {name}")
def test_storage_s3_get_unstable(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
table_format = "column1 Int64, column2 Int64, column3 Int64, column4 Int64"
get_query = f"SELECT count(), sum(column3), sum(column4) FROM s3('http://resolver:8081/{started_cluster.minio_bucket}/test.csv', 'CSV', '{table_format}') FORMAT CSV"
result = run_query(instance, get_query)
assert result.splitlines() == ["500001,500000,0"]
def test_storage_s3_put_uncompressed(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
filename = "test_put_uncompressed.bin"
name = "test_put_uncompressed"
data = [
"'Gloria Thompson',99",
"'Matthew Tang',98",
"'Patsy Anderson',23",
"'Nancy Badillo',93",
"'Roy Hunt',5",
"'Adam Kirk',51",
"'Joshua Douds',28",
"'Jolene Ryan',0",
"'Roxanne Padilla',50",
"'Howard Roberts',41",
"'Ricardo Broughton',13",
"'Roland Speer',83",
"'Cathy Cohan',58",
"'Kathie Dawson',100",
"'Gregg Mcquistion',11",
]
run_query(instance, "CREATE TABLE {} (name String, id UInt32) ENGINE = S3('http://{}:{}/{}/{}', 'CSV')".format(
name, started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename))
run_query(instance, "INSERT INTO {} VALUES ({})".format(name, "),(".join(data)))
run_query(instance, "SELECT sum(id) FROM {}".format(name)).splitlines() == ["753"]
uncompressed_content = get_s3_file_content(started_cluster, bucket, filename)
assert sum([ int(i.split(',')[1]) for i in uncompressed_content.splitlines() ]) == 753
@pytest.mark.parametrize("extension,method", [
pytest.param("bin", "gzip", id="bin"),
pytest.param("gz", "auto", id="gz")
])
def test_storage_s3_put_gzip(started_cluster, extension, method):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
filename = f"test_put_gzip.{extension}"
name = f"test_put_gzip_{extension}"
data = [
"'Joseph Tomlinson',5",
"'Earnest Essary',44",
"'Matha Pannell',24",
"'Michael Shavers',46",
"'Elias Groce',38",
"'Pamela Bramlet',50",
"'Lewis Harrell',49",
"'Tamara Fyall',58",
"'George Dixon',38",
"'Alice Walls',49",
"'Paula Mais',24",
"'Myrtle Pelt',93",
"'Sylvia Naffziger',18",
"'Amanda Cave',83",
"'Yolanda Joseph',89"
]
run_query(instance, f"""CREATE TABLE {name} (name String, id UInt32) ENGINE = S3(
'http://{started_cluster.minio_ip}:{MINIO_INTERNAL_PORT}/{bucket}/{filename}',
'CSV',
'{method}')""")
run_query(instance, f"INSERT INTO {name} VALUES ({'),('.join(data)})")
run_query(instance, f"SELECT sum(id) FROM {name}").splitlines() == ["708"]
buf = io.BytesIO(get_s3_file_content(started_cluster, bucket, filename, decode=False))
f = gzip.GzipFile(fileobj=buf, mode="rb")
uncompressed_content = f.read().decode()
assert sum([ int(i.split(',')[1]) for i in uncompressed_content.splitlines() ]) == 708
def test_truncate_table(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
name = "truncate"
instance.query("CREATE TABLE {} (id UInt32) ENGINE = S3('http://{}:{}/{}/{}', 'CSV')".format(
name, started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, name))
instance.query("INSERT INTO {} SELECT number FROM numbers(10)".format(name))
result = instance.query("SELECT * FROM {}".format(name))
assert result == instance.query("SELECT number FROM numbers(10)")
instance.query("TRUNCATE TABLE {}".format(name))
minio = started_cluster.minio_client
timeout = 30
while timeout > 0:
if len(list(minio.list_objects(started_cluster.minio_bucket, 'truncate/'))) == 0:
return
timeout -= 1
time.sleep(1)
assert(len(list(minio.list_objects(started_cluster.minio_bucket, 'truncate/'))) == 0)
assert instance.query("SELECT * FROM {}".format(name)) == ""
def test_predefined_connection_configuration(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
name = "test_table"
instance.query("drop table if exists {}".format(name))
instance.query("CREATE TABLE {} (id UInt32) ENGINE = S3(s3_conf1, format='CSV')".format(name))
instance.query("INSERT INTO {} SELECT number FROM numbers(10)".format(name))
result = instance.query("SELECT * FROM {}".format(name))
assert result == instance.query("SELECT number FROM numbers(10)")
result = instance.query("SELECT * FROM s3(s3_conf1, format='CSV', structure='id UInt32')")
assert result == instance.query("SELECT number FROM numbers(10)")
|
test_base.py | import datetime
import json
import os
import pytest
import signal
import sys
import tempfile
import time
from multiprocessing import Pool, Process
from freezefrog import FreezeTime
from tasktiger import (
JobTimeoutException,
StopRetry,
Task,
TaskNotFound,
Worker,
exponential,
fixed,
linear,
)
from tasktiger._internal import serialize_func_name
from .config import DELAY
from .tasks import (
batch_task,
decorated_task,
decorated_task_simple_func,
exception_task,
file_args_task,
locked_task,
long_task_killed,
long_task_ok,
MyErrorRunnerClass,
MyRunnerClass,
non_batch_task,
retry_task,
retry_task_2,
simple_task,
sleep_task,
StaticTask,
task_on_other_queue,
unique_task,
unique_exception_task,
unique_key_task,
verify_current_task,
verify_current_tasks,
verify_tasktiger_instance,
)
from .utils import Patch, external_worker, get_tiger
class BaseTestCase:
def setup_method(self, method):
self.tiger = get_tiger()
self.conn = self.tiger.connection
self.conn.flushdb()
def teardown_method(self, method):
self.conn.flushdb()
self.conn.close()
# Force disconnect so we don't get Too many open files
self.conn.connection_pool.disconnect()
def _ensure_queues(
self, queued=None, active=None, error=None, scheduled=None
):
expected_queues = {
'queued': {name for name, n in (queued or {}).items() if n},
'active': {name for name, n in (active or {}).items() if n},
'error': {name for name, n in (error or {}).items() if n},
'scheduled': {name for name, n in (scheduled or {}).items() if n},
}
actual_queues = {
i: self.conn.smembers('t:{}'.format(i))
for i in ('queued', 'active', 'error', 'scheduled')
}
assert expected_queues == actual_queues
def _ensure_queue(typ, data):
data = data or {}
ret = {}
for name, n in data.items():
task_ids = self.conn.zrange('t:%s:%s' % (typ, name), 0, -1)
assert len(task_ids) == n
ret[name] = [
json.loads(self.conn.get('t:task:%s' % task_id))
for task_id in task_ids
]
assert [task['id'] for task in ret[name]] == task_ids
return ret
return {
'queued': _ensure_queue('queued', queued),
'active': _ensure_queue('active', active),
'error': _ensure_queue('error', error),
'scheduled': _ensure_queue('scheduled', scheduled),
}
class TestCase(BaseTestCase):
"""
TaskTiger main test cases.
Run a single test like this:
pytest tests/test_base.py::TestCase::test_unique_task
"""
def test_task_decorated_simple_func(self):
decorated_task_simple_func.delay(1, 2, a=3, b=4)
queues = self._ensure_queues(queued={'default': 1})
task = queues['queued']['default'][0]
assert task['func'] == 'tests.tasks:decorated_task_simple_func'
assert task['args'] == [1, 2]
assert task['kwargs'] == {'a': 3, 'b': 4}
def test_simple_task(self):
self.tiger.delay(simple_task)
queues = self._ensure_queues(queued={'default': 1})
task = queues['queued']['default'][0]
assert task['func'] == 'tests.tasks:simple_task'
Worker(self.tiger).run(once=True)
self._ensure_queues(queued={'default': 0})
assert not self.conn.exists('t:task:%s' % task['id'])
@pytest.mark.skipif(
sys.version_info < (3, 3), reason='__qualname__ unavailable'
)
def test_staticmethod_task(self):
self.tiger.delay(StaticTask.task)
queues = self._ensure_queues(queued={'default': 1})
task = queues['queued']['default'][0]
assert task['func'] == 'tests.tasks:StaticTask.task'
Worker(self.tiger).run(once=True)
self._ensure_queues(queued={'default': 0})
assert not self.conn.exists('t:task:%s' % task['id'])
def test_task_delay(self):
decorated_task.delay(1, 2, a=3, b=4)
queues = self._ensure_queues(queued={'default': 1})
task = queues['queued']['default'][0]
assert task['func'] == 'tests.tasks:decorated_task'
assert task['args'] == [1, 2]
assert task['kwargs'] == {'a': 3, 'b': 4}
def test_file_args_task(self):
# Use a temp file to communicate since we're forking.
tmpfile = tempfile.NamedTemporaryFile()
worker = Worker(self.tiger)
self.tiger.delay(file_args_task, args=(tmpfile.name,))
queues = self._ensure_queues(queued={'default': 1})
task = queues['queued']['default'][0]
assert task['func'] == 'tests.tasks:file_args_task'
worker.run(once=True)
self._ensure_queues(queued={'default': 0})
json_data = tmpfile.read().decode('utf8')
assert json.loads(json_data) == {'args': [], 'kwargs': {}}
tmpfile.seek(0)
self.tiger.delay(
file_args_task,
args=(tmpfile.name, 123, 'args'),
kwargs={'more': [1, 2, 3]},
)
self._ensure_queues(queued={'default': 1})
worker.run(once=True)
self._ensure_queues(queued={'default': 0})
json_data = tmpfile.read().decode('utf8')
assert json.loads(json_data) == {
'args': [123, 'args'],
'kwargs': {'more': [1, 2, 3]},
}
def test_queue(self):
self.tiger.delay(simple_task, queue='a')
self._ensure_queues(queued={'a': 1, 'b': 0, 'c': 0})
self.tiger.delay(simple_task, queue='b')
self._ensure_queues(queued={'a': 1, 'b': 1, 'c': 0})
self.tiger.delay(simple_task, queue='c')
self._ensure_queues(queued={'a': 1, 'b': 1, 'c': 1})
Worker(self.tiger, queues=['a', 'b']).run(once=True)
self._ensure_queues(queued={'a': 0, 'b': 0, 'c': 1})
Worker(self.tiger).run(once=True)
self._ensure_queues(queued={'a': 0, 'b': 0, 'c': 0})
def test_nested_queue(self):
self.tiger.delay(simple_task, queue='x')
self.tiger.delay(simple_task, queue='a')
self.tiger.delay(simple_task, queue='a.b')
self.tiger.delay(simple_task, queue='a.b.c')
self._ensure_queues(queued={'a': 1, 'a.b': 1, 'a.b.c': 1, 'x': 1})
Worker(self.tiger, queues=['a', 'b']).run(once=True)
self._ensure_queues(queued={'a': 0, 'a.b': 0, 'a.b.c': 0, 'x': 1})
def test_nested_queue_2(self):
self.tiger.delay(simple_task, queue='x')
self.tiger.delay(simple_task, queue='a')
self.tiger.delay(simple_task, queue='a.b')
self.tiger.delay(simple_task, queue='a.b.c')
self._ensure_queues(queued={'a': 1, 'a.b': 1, 'a.b.c': 1, 'x': 1})
Worker(self.tiger, queues=['a.b', 'b']).run(once=True)
self._ensure_queues(queued={'a': 1, 'a.b': 0, 'a.b.c': 0, 'x': 1})
def test_task_on_other_queue(self):
self.tiger.delay(task_on_other_queue)
self._ensure_queues(queued={'other': 1})
Worker(self.tiger).run(once=True)
self._ensure_queues(queued={'other': 0})
def test_when(self):
self.tiger.delay(simple_task, when=datetime.timedelta(seconds=DELAY))
self._ensure_queues(queued={'default': 0}, scheduled={'default': 1})
Worker(self.tiger).run(once=True)
self._ensure_queues(queued={'default': 0}, scheduled={'default': 1})
time.sleep(DELAY)
# Two runs: The first one picks the task up from the "scheduled" queue,
# the second one processes it.
Worker(self.tiger).run(once=True)
self._ensure_queues(queued={'default': 1}, scheduled={'default': 0})
Worker(self.tiger).run(once=True)
self._ensure_queues(queued={'default': 0}, scheduled={'default': 0})
@pytest.mark.parametrize('store_tracebacks', [False, True])
def test_exception_task(self, store_tracebacks):
self.tiger.config['STORE_TRACEBACKS'] = store_tracebacks
self.tiger.delay(exception_task)
Worker(self.tiger).run(once=True)
queues = self._ensure_queues(
queued={'default': 0}, error={'default': 1}
)
task = queues['error']['default'][0]
assert task['func'] == 'tests.tasks:exception_task'
executions = self.conn.lrange(
't:task:%s:executions' % task['id'], 0, -1
)
assert len(executions) == 1
execution = json.loads(executions[0])
assert execution['exception_name'] == serialize_func_name(Exception)
assert not execution['success']
if store_tracebacks:
assert execution['traceback'].startswith(
'Traceback (most recent call last):'
)
else:
assert 'traceback' not in execution
def test_long_task_ok(self):
self.tiger.delay(long_task_ok)
Worker(self.tiger).run(once=True)
self._ensure_queues(queued={'default': 0}, error={'default': 0})
def test_long_task_killed(self):
self.tiger.delay(long_task_killed)
Worker(self.tiger).run(once=True)
queues = self._ensure_queues(
queued={'default': 0}, error={'default': 1}
)
task = queues['error']['default'][0]
assert task['func'] == 'tests.tasks:long_task_killed'
executions = self.conn.lrange(
't:task:%s:executions' % task['id'], 0, -1
)
assert len(executions) == 1
execution = json.loads(executions[0])
exception_name = execution['exception_name']
assert exception_name == 'tasktiger.exceptions:JobTimeoutException'
assert not execution['success']
def test_unique_task_1(self):
self.tiger.delay(unique_task, kwargs={'value': 1})
self.tiger.delay(unique_task, kwargs={'value': 2})
self.tiger.delay(unique_task, kwargs={'value': 2})
queues = self._ensure_queues(
queued={'default': 2}, error={'default': 0}
)
task_1, task_2 = queues['queued']['default']
assert task_1['func'] == 'tests.tasks:unique_task'
assert task_1['kwargs'] == {'value': 1}
assert task_2['func'] == 'tests.tasks:unique_task'
assert task_2['kwargs'] == {'value': 2}
Pool(3).map(external_worker, range(3))
results = self.conn.lrange('unique_task', 0, -1)
assert len(results) == 2
assert set(results) == {'1', '2'}
def test_unique_task_2(self):
self.tiger.delay(unique_task)
self.tiger.delay(unique_task)
self.tiger.delay(unique_task)
self._ensure_queues(queued={'default': 1}, error={'default': 0})
def test_unique_key_task(self):
self.tiger.delay(unique_key_task, kwargs={'a': 1, 'b': 1})
self.tiger.delay(unique_key_task, kwargs={'a': 1, 'b': 2})
self.tiger.delay(unique_key_task, kwargs={'a': 2, 'b': 1})
self.tiger.delay(unique_key_task, kwargs={'a': 2, 'b': 2})
self.tiger.delay(unique_key_task)
self.tiger.delay(unique_key_task, kwargs={'b': 1})
queues = self._ensure_queues(
queued={'default': 3}, error={'default': 0}
)
task_1, task_2, task_3 = queues['queued']['default']
# Note the last queued value is used.
assert task_1['func'] == 'tests.tasks:unique_key_task'
assert task_1['kwargs'] == {'a': 1, 'b': 2}
assert task_2['func'] == 'tests.tasks:unique_key_task'
assert task_2['kwargs'] == {'a': 2, 'b': 2}
assert task_3['func'] == 'tests.tasks:unique_key_task'
assert task_3['kwargs'] == {'b': 1}
def test_locked_task(self):
self.tiger.delay(locked_task, kwargs={'key': '1'})
self.tiger.delay(locked_task, kwargs={'key': '2'})
self.tiger.delay(locked_task, kwargs={'key': '2'})
self._ensure_queues(
queued={'default': 3},
scheduled={'default': 0},
error={'default': 0},
)
Pool(3).map(external_worker, range(3))
# One task with keys 1 and 2 executed, but one is scheduled because
# it hit a lock.
self._ensure_queues(
queued={'default': 0},
scheduled={'default': 1},
error={'default': 0},
)
# Wait for task to exit, .1 extra for task startup time
time.sleep(DELAY + 0.1)
# Two runs: The first one picks the task up from the "scheduled" queue,
# the second one processes it.
Worker(self.tiger).run(once=True)
self._ensure_queues(
queued={'default': 1},
scheduled={'default': 0},
error={'default': 0},
)
Worker(self.tiger).run(once=True)
self._ensure_queues(
queued={'default': 0},
scheduled={'default': 0},
error={'default': 0},
)
def test_lock_key(self):
self.tiger.delay(
locked_task, kwargs={'key': '1', 'other': 1}, lock_key=('key',)
)
self.tiger.delay(
locked_task, kwargs={'key': '2', 'other': 2}, lock_key=('key',)
)
self.tiger.delay(
locked_task, kwargs={'key': '2', 'other': 3}, lock_key=('key',)
)
self._ensure_queues(
queued={'default': 3},
scheduled={'default': 0},
error={'default': 0},
)
Pool(3).map(external_worker, range(3))
# One task with keys 1 and 2 executed, but one is scheduled because
# it hit a lock.
self._ensure_queues(
queued={'default': 0},
scheduled={'default': 1},
error={'default': 0},
)
time.sleep(DELAY)
# Two runs: The first one picks the task up from the "scheduled" queue,
# the second one processes it.
Worker(self.tiger).run(once=True)
self._ensure_queues(
queued={'default': 1},
scheduled={'default': 0},
error={'default': 0},
)
Worker(self.tiger).run(once=True)
self._ensure_queues(
queued={'default': 0},
scheduled={'default': 0},
error={'default': 0},
)
def test_retry(self):
# Use the default retry method we configured.
task = self.tiger.delay(exception_task, retry=True)
self._ensure_queues(
queued={'default': 1},
scheduled={'default': 0},
error={'default': 0},
)
# First run
Worker(self.tiger).run(once=True)
assert task.n_executions() == 1
self._ensure_queues(
queued={'default': 0},
scheduled={'default': 1},
error={'default': 0},
)
# The task is scheduled, so nothing happens here.
Worker(self.tiger).run(once=True)
assert task.n_executions() == 1
self._ensure_queues(
queued={'default': 0},
scheduled={'default': 1},
error={'default': 0},
)
time.sleep(DELAY)
# Second run (run twice to move from scheduled to queued)
Worker(self.tiger).run(once=True)
Worker(self.tiger).run(once=True)
assert task.n_executions() == 2
self._ensure_queues(
queued={'default': 0},
scheduled={'default': 1},
error={'default': 0},
)
time.sleep(DELAY)
# Third run will fail permanently.
Worker(self.tiger).run(once=True)
Worker(self.tiger).run(once=True)
assert task.n_executions() == 3
self._ensure_queues(
queued={'default': 0},
scheduled={'default': 0},
error={'default': 1},
)
def test_retry_on_1(self):
# Fails immediately
self.tiger.delay(exception_task, retry_on=[ValueError, IndexError])
Worker(self.tiger).run(once=True)
self._ensure_queues(
queued={'default': 0},
scheduled={'default': 0},
error={'default': 1},
)
def test_retry_on_2(self):
# Will be retried
self.tiger.delay(exception_task, retry_on=[ValueError, Exception])
Worker(self.tiger).run(once=True)
self._ensure_queues(
queued={'default': 0},
scheduled={'default': 1},
error={'default': 0},
)
def test_retry_on_3(self):
# Make sure we catch superclasses.
self.tiger.delay(exception_task, retry_on=[Exception])
Worker(self.tiger).run(once=True)
self._ensure_queues(
queued={'default': 0},
scheduled={'default': 1},
error={'default': 0},
)
def test_retry_on_invalid(self):
"""
Ensure we handle exceptions that can't be imported.
"""
class CustomException(Exception):
"""
Since this is an inline exception, it's not possible to import it
via dotted path.
"""
self.tiger.delay(exception_task, retry_on=[CustomException])
Worker(self.tiger).run(once=True)
self._ensure_queues(
queued={'default': 0},
scheduled={'default': 0},
error={'default': 1},
)
def test_retry_method(self):
task = self.tiger.delay(
exception_task, retry_method=linear(DELAY, DELAY, 3)
)
def _run(n_executions):
Worker(self.tiger).run(once=True)
Worker(self.tiger).run(once=True)
assert task.n_executions() == n_executions
_run(1)
# Retry in 1*DELAY
time.sleep(DELAY)
_run(2)
# Retry in 2*DELAY
time.sleep(DELAY)
_run(2)
time.sleep(DELAY)
_run(3)
# Retry in 3*DELAY
time.sleep(DELAY)
_run(3)
time.sleep(DELAY)
_run(3)
time.sleep(DELAY)
_run(4)
self._ensure_queues(error={'default': 1})
def test_retry_method_fixed(self):
f = fixed(2, 3)
assert f[0](1, *f[1]) == 2
assert f[0](2, *f[1]) == 2
assert f[0](3, *f[1]) == 2
pytest.raises(StopRetry, f[0], 4, *f[1])
def test_retry_method_linear(self):
f = linear(1, 2, 3)
assert f[0](1, *f[1]) == 1
assert f[0](2, *f[1]) == 3
assert f[0](3, *f[1]) == 5
pytest.raises(StopRetry, f[0], 4, *f[1])
def test_retry_method_exponential(self):
f = exponential(1, 2, 4)
assert f[0](1, *f[1]) == 1
assert f[0](2, *f[1]) == 2
assert f[0](3, *f[1]) == 4
assert f[0](4, *f[1]) == 8
pytest.raises(StopRetry, f[0], 5, *f[1])
def test_retry_exception_1(self):
self.tiger.delay(retry_task)
self._ensure_queues(queued={'default': 1})
Worker(self.tiger).run(once=True)
self._ensure_queues(scheduled={'default': 1})
time.sleep(DELAY)
Worker(self.tiger).run(once=True)
Worker(self.tiger).run(once=True)
self._ensure_queues(scheduled={'default': 1})
time.sleep(DELAY)
Worker(self.tiger).run(once=True)
Worker(self.tiger).run(once=True)
self._ensure_queues(error={'default': 1})
def test_retry_exception_2(self):
task = self.tiger.delay(retry_task_2)
self._ensure_queues(queued={'default': 1})
assert task.n_executions() == 0
Worker(self.tiger).run(once=True)
self._ensure_queues(scheduled={'default': 1})
assert task.n_executions() == 1
time.sleep(DELAY)
Worker(self.tiger).run(once=True)
Worker(self.tiger).run(once=True)
self._ensure_queues()
pytest.raises(TaskNotFound, task.n_executions)
def test_batch_1(self):
self.tiger.delay(batch_task, args=[1])
self.tiger.delay(batch_task, args=[2])
self.tiger.delay(batch_task, args=[3])
self.tiger.delay(batch_task, args=[4])
self._ensure_queues(queued={'batch': 4})
Worker(self.tiger).run(once=True)
self._ensure_queues(queued={'batch': 0})
data = [json.loads(d) for d in self.conn.lrange('batch_task', 0, -1)]
assert data == [
[
{'args': [1], 'kwargs': {}},
{'args': [2], 'kwargs': {}},
{'args': [3], 'kwargs': {}},
],
[{'args': [4], 'kwargs': {}}],
]
def test_batch_2(self):
self.tiger.delay(batch_task, args=[1])
self.tiger.delay(non_batch_task, args=[5])
self.tiger.delay(batch_task, args=[2])
self.tiger.delay(batch_task, args=[3])
self.tiger.delay(batch_task, args=[4])
self.tiger.delay(non_batch_task, args=[6])
self.tiger.delay(non_batch_task, args=[7])
self._ensure_queues(queued={'batch': 7})
Worker(self.tiger).run(once=True)
self._ensure_queues(queued={'batch': 0})
data = [json.loads(d) for d in self.conn.lrange('batch_task', 0, -1)]
assert data == [
[{'args': [1], 'kwargs': {}}, {'args': [2], 'kwargs': {}}],
5,
[{'args': [3], 'kwargs': {}}, {'args': [4], 'kwargs': {}}],
6,
7,
]
def test_batch_3(self):
self.tiger.delay(batch_task, queue='default', args=[1])
self.tiger.delay(batch_task, queue='default', args=[2])
self.tiger.delay(batch_task, queue='default', args=[3])
self.tiger.delay(batch_task, queue='default', args=[4])
self._ensure_queues(queued={'default': 4})
Worker(self.tiger).run(once=True)
self._ensure_queues(queued={'default': 0})
data = [json.loads(d) for d in self.conn.lrange('batch_task', 0, -1)]
assert data == [
[{'args': [1], 'kwargs': {}}],
[{'args': [2], 'kwargs': {}}],
[{'args': [3], 'kwargs': {}}],
[{'args': [4], 'kwargs': {}}],
]
def test_batch_4(self):
self.tiger.delay(batch_task, queue='batch.sub', args=[1])
self.tiger.delay(batch_task, queue='batch.sub', args=[2])
self.tiger.delay(batch_task, queue='batch.sub', args=[3])
self.tiger.delay(batch_task, queue='batch.sub', args=[4])
self._ensure_queues(queued={'batch.sub': 4})
Worker(self.tiger).run(once=True)
self._ensure_queues(queued={'batch.sub': 0})
data = [json.loads(d) for d in self.conn.lrange('batch_task', 0, -1)]
assert data == [
[
{'args': [1], 'kwargs': {}},
{'args': [2], 'kwargs': {}},
{'args': [3], 'kwargs': {}},
],
[{'args': [4], 'kwargs': {}}],
]
def test_batch_exception_1(self):
self.tiger.delay(batch_task, args=[1])
self.tiger.delay(batch_task, args=[10])
self.tiger.delay(batch_task, args=[2])
self.tiger.delay(batch_task, args=[3])
self._ensure_queues(queued={'batch': 4})
Worker(self.tiger).run(once=True)
self._ensure_queues(queued={'batch': 0}, error={'batch': 3})
def test_batch_exception_2(self):
# If we queue non-batch tasks into a batch queue, we currently fail
# the entire batch for a specific task.
self.tiger.delay(non_batch_task, args=[1])
self.tiger.delay(non_batch_task, args=[10])
self.tiger.delay(non_batch_task, args=[2])
self.tiger.delay(non_batch_task, args=[3])
self._ensure_queues(queued={'batch': 4})
Worker(self.tiger).run(once=True)
self._ensure_queues(queued={'batch': 0}, error={'batch': 3})
def test_batch_exception_3(self):
self.tiger.delay(batch_task, args=[1])
self.tiger.delay(non_batch_task, args=[2])
self.tiger.delay(batch_task, args=[10])
self._ensure_queues(queued={'batch': 3})
Worker(self.tiger).run(once=True)
self._ensure_queues(queued={'batch': 0}, error={'batch': 2})
def test_batch_lock_key(self):
self.tiger.delay(
batch_task, kwargs={'key': '1', 'other': 1}, lock_key=('key,')
)
self.tiger.delay(
batch_task, kwargs={'key': '2', 'other': 2}, lock_key=('key,')
)
self.tiger.delay(
batch_task, kwargs={'key': '2', 'other': 3}, lock_key=('key,')
)
self._ensure_queues(queued={'batch': 3})
Worker(self.tiger).run(once=True)
self._ensure_queues(queued={'batch': 0})
def test_only_queues(self):
self.tiger.delay(simple_task, queue='a')
self.tiger.delay(simple_task, queue='a.a')
self.tiger.delay(simple_task, queue='b')
self.tiger.delay(simple_task, queue='b.a')
self._ensure_queues(queued={'a': 1, 'a.a': 1, 'b': 1, 'b.a': 1})
self.tiger.config['ONLY_QUEUES'] = ['a']
Worker(self.tiger).run(once=True)
self._ensure_queues(queued={'b': 1, 'b.a': 1})
def test_exclude_queues(self):
"""
Test combining ONLY_QUEUES and EXCLUDE_QUEUES, and precedence in case
of subqueues or overlaps.
"""
self.tiger.config['ONLY_QUEUES'] = ['a', 'a.b.c', 'b', 'c']
self.tiger.config['EXCLUDE_QUEUES'] = ['a.b', 'b']
# Queues that should be processed
process_queues = ['a', 'a.a', 'a.b.c', 'c', 'c.a']
# Queues that should be excluded
ignore_queues = ['a.b', 'a.b.d', 'b', 'b.a', 'd', 'd.a']
all_queues = process_queues + ignore_queues
for queue in all_queues:
self.tiger.delay(simple_task, queue=queue)
self._ensure_queues(queued={q: 1 for q in all_queues})
Worker(self.tiger).run(once=True)
self._ensure_queues(queued={q: 1 for q in ignore_queues})
def test_purge_errored_tasks_basic(self):
self.tiger.delay(exception_task)
Worker(self.tiger).run(once=True)
queues = self._ensure_queues(
queued={'default': 0}, error={'default': 1}
)
task = queues['error']['default'][0]
assert task['func'] == 'tests.tasks:exception_task'
# purge errored tasks
assert 1 == self.tiger.purge_errored_tasks()
self._ensure_queues(queued={'default': 0}, error={'default': 0})
def test_purge_errored_tasks_no_errored_tasks(self):
self._ensure_queues(queued={'default': 0}, error={'default': 0})
assert 0 == self.tiger.purge_errored_tasks()
self._ensure_queues(queued={'default': 0}, error={'default': 0})
def test_purge_errored_tasks_both_errored_and_queued(self):
self.tiger.delay(exception_task)
Worker(self.tiger).run(once=True)
self.tiger.delay(simple_task)
self._ensure_queues(queued={'default': 1}, error={'default': 1})
assert 1 == self.tiger.purge_errored_tasks()
self._ensure_queues(queued={'default': 1}, error={'default': 0})
def test_purge_errored_tasks_specific_queues(self):
self.tiger.delay(exception_task, queue='a.b.c')
self.tiger.delay(exception_task, queue='a.b.d')
self.tiger.delay(exception_task, queue='a')
self.tiger.delay(exception_task, queue='e')
self.tiger.delay(exception_task)
Worker(self.tiger).run(once=True)
self._ensure_queues(
queued={'a.b.c': 0, 'a.b.d': 0, 'a': 0, 'e': 0, 'default': 0},
error={'a.b.c': 1, 'a.b.d': 1, 'a': 1, 'e': 1, 'default': 1},
)
# create iterator, don't iterate over it
assert 1 == self.tiger.purge_errored_tasks(queues=['a.b.c'])
self._ensure_queues(
queued={'a.b.c': 0, 'a.b.d': 0, 'a': 0, 'e': 0, 'default': 0},
error={'a.b.c': 0, 'a.b.d': 1, 'a': 1, 'e': 1, 'default': 1},
)
assert 1 == self.tiger.purge_errored_tasks(
queues=['a'], exclude_queues=['a.b.d']
)
self._ensure_queues(
queued={'a.b.c': 0, 'a.b.d': 0, 'a': 0, 'e': 0, 'default': 0},
error={'a.b.c': 0, 'a.b.d': 1, 'a': 0, 'e': 1, 'default': 1},
)
assert 2 == self.tiger.purge_errored_tasks(exclude_queues=['e'])
self._ensure_queues(
queued={'a.b.c': 0, 'a.b.d': 0, 'a': 0, 'e': 0, 'default': 0},
error={'a.b.c': 0, 'a.b.d': 0, 'a': 0, 'e': 1, 'default': 0},
)
assert 1 == self.tiger.purge_errored_tasks()
self._ensure_queues(
queued={'a.b.c': 0, 'a.b.d': 0, 'a': 0, 'e': 0, 'default': 0},
error={'a.b.c': 0, 'a.b.d': 0, 'a': 0, 'e': 0, 'default': 0},
)
def test_purge_errored_tasks_older_than(self):
task_timestamps = [
datetime.datetime(2015, 1, 1),
datetime.datetime(2016, 1, 1),
datetime.datetime(2017, 1, 1),
datetime.datetime(2018, 1, 1),
]
for task_timestamp in task_timestamps:
with FreezeTime(task_timestamp):
self.tiger.delay(exception_task)
Worker(self.tiger).run(once=True)
self._ensure_queues(queued={'default': 0}, error={'default': 4})
_, tasks = Task.tasks_from_queue(self.tiger, 'default', 'error')
actual_timestamps = [task.ts for task in tasks]
assert task_timestamps == actual_timestamps
assert 2 == self.tiger.purge_errored_tasks(
last_execution_before=datetime.datetime(2016, 6, 1)
)
self._ensure_queues(queued={'default': 0}, error={'default': 2})
def test_purge_errored_tasks_limit(self):
for _ in range(10):
self.tiger.delay(exception_task)
Worker(self.tiger).run(once=True)
self._ensure_queues(queued={'default': 0}, error={'default': 10})
# purge 1
assert 1 == self.tiger.purge_errored_tasks(limit=1)
self._ensure_queues(queued={'default': 0}, error={'default': 9})
# purge 4
assert 4 == self.tiger.purge_errored_tasks(limit=4)
self._ensure_queues(queued={'default': 0}, error={'default': 5})
# purge the rest
assert 5 == self.tiger.purge_errored_tasks(limit=None)
self._ensure_queues(queued={'default': 0}, error={'default': 0})
def test_purge_errored_tasks_only_errored_unique_task(self):
# only one of these should actually schedule (since it's unique)
self.tiger.delay(unique_exception_task)
self.tiger.delay(unique_exception_task)
self._ensure_queues(queued={'default': 1})
Worker(self.tiger).run(once=True)
self._ensure_queues(error={'default': 1})
self.tiger.delay(unique_exception_task)
self._ensure_queues(queued={'default': 1}, error={'default': 1})
assert 1 == self.tiger.purge_errored_tasks()
self._ensure_queues(queued={'default': 1}, error={'default': 0})
class TestTasks(BaseTestCase):
"""
Task class test cases.
"""
def test_delay(self):
task = Task(self.tiger, simple_task)
self._ensure_queues()
task.delay()
self._ensure_queues(queued={'default': 1})
# Canceling only works for scheduled tasks.
pytest.raises(TaskNotFound, task.cancel)
def test_delay_scheduled(self):
task = Task(self.tiger, simple_task, queue='a')
task.delay(when=datetime.timedelta(minutes=5))
self._ensure_queues(scheduled={'a': 1})
# Test canceling a scheduled task.
task.cancel()
self._ensure_queues()
# Canceling again raises an error
pytest.raises(TaskNotFound, task.cancel)
def test_delay_scheduled_2(self):
task = Task(self.tiger, simple_task, queue='a')
task.delay(when=datetime.timedelta(minutes=5))
self._ensure_queues(scheduled={'a': 1})
task_id = task.id
# We can't look up a non-unique task by recreating it.
task = Task(self.tiger, simple_task, queue='a')
pytest.raises(TaskNotFound, task.cancel)
# We can look up a task by its ID.
fetch_task = lambda: Task.from_id(
self.tiger, 'a', 'scheduled', task_id
)
task = fetch_task()
task.cancel()
self._ensure_queues()
# Task.from_id raises if it doesn't exist.
pytest.raises(TaskNotFound, fetch_task)
def test_delay_scheduled_3(self):
task = Task(self.tiger, simple_task, unique=True)
task.delay(when=datetime.timedelta(minutes=5))
self._ensure_queues(scheduled={'default': 1})
# We can look up a unique task by recreating it.
task = Task(self.tiger, simple_task, unique=True)
task.cancel()
self._ensure_queues()
def test_delete_failed_task(self):
"""
Ensure we can delete a task that's in the error queue.
"""
task = self.tiger.delay(exception_task)
Worker(self.tiger).run(once=True)
self._ensure_queues(error={'default': 1})
task.delete()
self._ensure_queues()
def test_cancel_failed_task_2(self):
"""
Ensure we can't cancel a task that's in the error queue.
"""
task = self.tiger.delay(exception_task)
Worker(self.tiger).run(once=True)
self._ensure_queues(error={'default': 1})
pytest.raises(TaskNotFound, task.cancel)
self._ensure_queues(error={'default': 1})
def test_update_scheduled_time(self):
task = Task(self.tiger, simple_task, unique=True)
task.delay(when=datetime.timedelta(minutes=5))
self._ensure_queues(scheduled={'default': 1})
old_score = self.conn.zscore('t:scheduled:default', task.id)
task.update_scheduled_time(when=datetime.timedelta(minutes=6))
self._ensure_queues(scheduled={'default': 1})
new_score = self.conn.zscore('t:scheduled:default', task.id)
# The difference can be slightly over 60 due to processing time, but
# shouldn't be much higher.
assert 60 <= new_score - old_score < 61
def test_execute(self):
task = Task(self.tiger, exception_task)
pytest.raises(Exception, task.execute)
def test_tasks_from_queue(self):
task0 = Task(self.tiger, simple_task)
task1 = Task(self.tiger, exception_task)
task2 = Task(self.tiger, simple_task, queue='other')
task0.delay()
task1.delay()
task2.delay()
n, tasks = Task.tasks_from_queue(self.tiger, 'default', 'queued')
assert n == 2
assert task0.id == tasks[0].id
assert task0.func == simple_task
assert task0.func == tasks[0].func
assert task0.serialized_func == 'tests.tasks:simple_task'
assert task0.serialized_func == tasks[0].serialized_func
assert task0.state == tasks[0].state
assert task0.state == 'queued'
assert task0.queue == tasks[0].queue
assert task0.queue == 'default'
def test_tasks_from_queue_with_executions(self):
task = self.tiger.delay(exception_task, retry=True)
# Get two executions in task
Worker(self.tiger).run(once=True)
time.sleep(DELAY)
# Second run (run twice to move from scheduled to queued)
Worker(self.tiger).run(once=True)
Worker(self.tiger).run(once=True)
n, tasks = Task.tasks_from_queue(
self.tiger, 'default', 'scheduled', load_executions=1
)
assert n == 1
assert len(tasks[0].executions) == 1
n, tasks = Task.tasks_from_queue(
self.tiger, 'default', 'scheduled', load_executions=10
)
assert n == 1
assert len(tasks[0].executions) == 2
def test_eager(self):
self.tiger.config['ALWAYS_EAGER'] = True
# Ensure task is immediately executed.
task = Task(self.tiger, simple_task)
task.delay()
self._ensure_queues()
# Ensure task is immediately executed.
task = Task(self.tiger, exception_task)
pytest.raises(Exception, task.delay)
self._ensure_queues()
# Even when we specify "when" in the past
task = Task(self.tiger, simple_task)
task.delay(when=datetime.timedelta(seconds=-5))
self._ensure_queues()
# or with a zero timedelta.
task = Task(self.tiger, simple_task)
task.delay(when=datetime.timedelta(seconds=0))
self._ensure_queues()
# Ensure there is an exception if we can't serialize the task.
task = Task(self.tiger, decorated_task, args=[object()])
pytest.raises(TypeError, task.delay)
self._ensure_queues()
# Ensure task is not executed if it's scheduled in the future.
task = Task(self.tiger, simple_task)
task.delay(when=datetime.timedelta(seconds=5))
self._ensure_queues(scheduled={'default': 1})
class TestCurrentTask(BaseTestCase):
"""
Ensure current_task/current_tasks are set.
"""
def test_current_task(self):
task = Task(self.tiger, verify_current_task)
task.delay()
Worker(self.tiger).run(once=True)
assert not self.conn.exists('runtime_error')
assert self.conn.get('task_id') == task.id
def test_current_tasks(self):
task1 = Task(self.tiger, verify_current_tasks)
task1.delay()
task2 = Task(self.tiger, verify_current_tasks)
task2.delay()
Worker(self.tiger).run(once=True)
assert self.conn.lrange('task_ids', 0, -1) == [task1.id, task2.id]
def test_current_task_eager(self):
self.tiger.config['ALWAYS_EAGER'] = True
task = Task(self.tiger, verify_current_task)
task.delay()
assert not self.conn.exists('runtime_error')
assert self.conn.get('task_id') == task.id
def test_current_tasks_eager(self):
self.tiger.config['ALWAYS_EAGER'] = True
task = Task(self.tiger, verify_current_tasks)
task.delay()
assert not self.conn.exists('runtime_error')
assert self.conn.lrange('task_ids', 0, -1) == [task.id]
class TestTaskTigerGlobal(BaseTestCase):
"""
Ensure TaskTiger.current_instance is set.
"""
def test_task(self):
task = Task(self.tiger, verify_tasktiger_instance)
task.delay()
Worker(self.tiger).run(once=True)
self._ensure_queues()
def test_eager(self):
self.tiger.config['ALWAYS_EAGER'] = True
task = Task(self.tiger, verify_tasktiger_instance)
task.delay()
class TestReliability(BaseTestCase):
"""
Test behavior if things go wrong.
"""
def _test_expired_task(self, task, expected_state):
"""
Ensure the given task ends up in the expected state if the worker is
killed prematurely. The task needs to run for longer than DELAY for
this test to work.
"""
task.delay()
self._ensure_queues(queued={'default': 1})
# Start a worker and wait until it starts processing.
worker = Process(target=external_worker)
worker.start()
time.sleep(DELAY)
# Kill the worker while it's still processing the task.
os.kill(worker.pid, signal.SIGKILL)
self._ensure_queues(active={'default': 1})
# Wait for (at least) ACTIVE_TASK_UPDATE_TIMEOUT
time.sleep(2 * DELAY)
Worker(self.tiger).run(once=True)
self._ensure_queues(**{expected_state: {'default': 1}})
def test_discard_expired_task(self):
"""
Ensure a non-retriable task ends up in "error" state if the worker is
killed prematurely.
"""
task = Task(self.tiger, sleep_task)
self._test_expired_task(task, 'error')
def test_requeue_expired_task(self):
"""
Ensure a retriable task ends up in "queued" state if the worker is
killed prematurely.
"""
task = Task(self.tiger, sleep_task, retry_on=[JobTimeoutException])
self._test_expired_task(task, 'queued')
def test_killed_child_process(self):
"""
Ensure that TaskTiger completes gracefully if the child process
disappears and there is no execution object.
"""
import psutil
sleep_task.delay()
self._ensure_queues(queued={'default': 1})
# Start a worker and wait until it starts processing.
worker = Process(target=external_worker)
worker.start()
time.sleep(DELAY)
# Get the PID of the worker subprocess actually executing the task
current_process = psutil.Process(pid=worker.pid)
current_children = current_process.children()
assert len(current_children) == 1
# Kill the worker subprocess that is executing the task.
current_children[0].kill()
# Make sure the worker still terminates gracefully.
worker.join()
assert worker.exitcode == 0
# Make sure the task is in the error queue.
self._ensure_queues(error={'default': 1})
def test_task_disappears(self):
"""
Ensure that a task object that disappears while the task is processing
is handled properly. This could happen when a worker processes a task,
then hangs for a long time, causing another worker to pick up and finish
the task. Then, when the original worker resumes, the task object will
be gone. Make sure we log a "not found" error and move on.
"""
task = Task(self.tiger, sleep_task, kwargs={'delay': 2 * DELAY})
task.delay()
self._ensure_queues(queued={'default': 1})
# Start a worker and wait until it starts processing.
worker = Process(target=external_worker)
worker.start()
time.sleep(DELAY)
# Remove the task object while the task is processing.
assert self.conn.delete('t:task:{}'.format(task.id)) == 1
# Kill the worker while it's still processing the task.
os.kill(worker.pid, signal.SIGKILL)
# _ensure_queues() breaks here because it can't find the task
assert self.conn.scard('t:queued') == 0
assert self.conn.scard('t:active') == 1
assert self.conn.scard('t:error') == 0
assert self.conn.scard('t:scheduled') == 0
# Capture logger
errors = []
def fake_error(msg):
errors.append(msg)
with Patch(self.tiger.log._logger, 'error', fake_error):
# Since ACTIVE_TASK_UPDATE_TIMEOUT hasn't elapsed yet, re-running
# the worker at this time won't change anything. (run twice to move
# from scheduled to queued)
Worker(self.tiger).run(once=True)
Worker(self.tiger).run(once=True)
assert len(errors) == 0
assert self.conn.scard('t:queued') == 0
assert self.conn.scard('t:active') == 1
assert self.conn.scard('t:error') == 0
assert self.conn.scard('t:scheduled') == 0
# After waiting and re-running the worker, queues will clear.
time.sleep(2 * DELAY)
Worker(self.tiger).run(once=True)
Worker(self.tiger).run(once=True)
self._ensure_queues()
assert len(errors) == 1
assert "not found" in errors[0]
def test_child_hanging_forever(self):
"""
Ensure the parent kills the child if it hangs forever.
"""
import psutil
task = Task(self.tiger, sleep_task, hard_timeout=1)
task.delay()
self._ensure_queues(queued={'default': 1})
# Start a worker and wait until it starts processing.
worker = Process(
target=external_worker,
kwargs={"patch_config": {"ACTIVE_TASK_UPDATE_TIMER": 1}},
)
worker.start()
time.sleep(DELAY)
# Get the PID of the worker subprocess actually executing the task
current_process = psutil.Process(pid=worker.pid)
current_children = current_process.children()
assert len(current_children) == 1
# Pause the child while it's still processing the task.
current_children[0].suspend()
# The parent will eventually kill the child.
worker.join()
assert worker.exitcode == 0
assert not current_children[0].is_running()
# Ensure we have an errored task and execution.
queues = self._ensure_queues(error={'default': 1})
task = queues['error']['default'][0]
assert task['func'] == 'tests.tasks:sleep_task'
executions = self.conn.lrange(
't:task:%s:executions' % task['id'], 0, -1
)
assert len(executions) == 1
execution = json.loads(executions[0])
assert execution['exception_name'] == serialize_func_name(
JobTimeoutException
)
assert not execution['success']
class TestRunnerClass(BaseTestCase):
def test_custom_runner_class_single_task(self):
task = self.tiger.delay(simple_task, runner_class=MyRunnerClass)
Worker(self.tiger).run(once=True)
assert self.conn.get('task_id') == task.id
self.conn.delete('task_id')
self._ensure_queues()
def test_custom_runner_class_batch_task(self):
self.tiger.delay(batch_task, args=[1], runner_class=MyRunnerClass)
self.tiger.delay(batch_task, args=[2], runner_class=MyRunnerClass)
Worker(self.tiger).run(once=True)
assert self.conn.get('task_args') == "1,2"
self.conn.delete('task_args')
self._ensure_queues()
def test_mixed_runner_class_batch_task(self):
"""Ensure all tasks in a batch task must have the same runner class."""
self.tiger.delay(batch_task, args=[1], runner_class=MyRunnerClass)
self.tiger.delay(batch_task, args=[2])
Worker(self.tiger).run(once=True)
assert self.conn.get('task_args') is None
self._ensure_queues(error={'batch': 2})
def test_permanent_error(self):
task = self.tiger.delay(
exception_task, runner_class=MyErrorRunnerClass
)
Worker(self.tiger).run(once=True)
assert self.conn.get('task_id') == task.id
self.conn.delete('task_id')
self._ensure_queues(error={'default': 1})
def test_eager_task(self):
self.tiger.config['ALWAYS_EAGER'] = True
task = Task(self.tiger, simple_task, runner_class=MyRunnerClass)
assert task.delay() == 123
self._ensure_queues()
|
ipygpulogger.py | import time, psutil, gc, tracemalloc
from collections import namedtuple
import threading
from IPython import get_ipython
have_cuda = 0
import torch
if torch.cuda.is_available():
have_cuda = 1
import pynvml
pynvml.nvmlInit()
process = psutil.Process()
def preload_pytorch():
if have_cuda: torch.ones((1, 1)).cuda()
def cpu_mem_used_get():
"process used memory in MBs rounded down"
return int(process.memory_info().rss/2**20)
def gpu_mem_used_get():
"query nvidia for used memory for gpu in MBs (rounded down). If id is not passed, currently selected torch device is used. Clears pytorch cache before taking the measurements"
torch.cuda.empty_cache() # clear cache to report the correct data
id = torch.cuda.current_device()
handle = pynvml.nvmlDeviceGetHandleByIndex(id)
info = pynvml.nvmlDeviceGetMemoryInfo(handle)
return int(info.used/2**20)
# similar to gpu_mem_used_get, but doesn't do any checks, clearing caches,
# gc.collect, etc., to be lightening fast when run in a tight loop from a peak
# memory measurement thread.
def gpu_mem_used_get_fast(gpu_handle):
info = pynvml.nvmlDeviceGetMemoryInfo(gpu_handle)
return int(info.used/2**20)
IPyGPULoggerMemory = namedtuple('IPyGPULoggerMemory', ['used_delta', 'peaked_delta', 'used_total'])
IPyGPULoggerTime = namedtuple('IPyGPULoggerTime', ['time_delta'])
class IPyGPULogger():
def __init__(self, compact=False, gc_collect=True):
self.compact = compact # one line printouts
self.gc_collect = gc_collect # don't use when tracking mem leaks
self.peak_monitoring = False
self.running = False
self.time_start = 0
self.time_delta = 0
self.cpu_mem_used_peak = -1
self.cpu_mem_used_delta = 0
self.cpu_mem_used_prev = -1
self.cpu_mem_peaked_delta = -1
self.gpu_mem_used_peak = -1
self.gpu_mem_used_delta = 0
self.gpu_mem_used_prev = -1
self.gpu_mem_peaked_delta = -1
self.ipython = get_ipython()
self.input_cells = self.ipython.user_ns['In']
@property
def data(self):
return (IPyGPULoggerMemory(self.cpu_mem_used_delta, self.cpu_mem_peaked_delta, self.cpu_mem_used_prev),
IPyGPULoggerMemory(self.gpu_mem_used_delta, self.gpu_mem_peaked_delta, self.gpu_mem_used_prev),
IPyGPULoggerTime(self.time_delta)
)
def start(self):
"""Register memory profiling tools to IPython instance."""
self.running = True
preload_pytorch()
# initial measurements
if self.gc_collect: gc.collect()
self.cpu_mem_used_prev = cpu_mem_used_get()
self.gpu_mem_used_prev = gpu_mem_used_get()
self.ipython.events.register("pre_run_cell", self.pre_run_cell)
self.ipython.events.register("post_run_cell", self.post_run_cell)
# run pre_run_cell() manually, since we are past that event in this cell
self.pre_run_cell()
return self
def stop(self):
"""Unregister memory profiling tools from IPython instance."""
if not self.running: return
try: self.ipython.events.unregister("pre_run_cell", self.pre_run_cell)
except ValueError: pass
try: self.ipython.events.unregister("post_run_cell", self.post_run_cell)
except ValueError: pass
self.running = False
self.peak_monitoring = False
def pre_run_cell(self):
if not self.running: return
self.peak_monitoring = True
# start RAM tracing
tracemalloc.start()
# this thread samples RAM usage as long as the current cell is running
peak_monitor_thread = threading.Thread(target=self.peak_monitor_func)
peak_monitor_thread.daemon = True
peak_monitor_thread.start()
# time before we execute the current cell
self.time_start = time.time()
def post_run_cell(self):
if not self.running: return
self.time_delta = time.time() - self.time_start
self.peak_monitoring = False
if self.gc_collect: gc.collect()
# instead of needing a peak memory monitoring thread, tracemalloc does
# the job of getting newly used and peaked memory automatically, since
# it tracks all malloc/free calls.
cpu_mem_used_delta, cpu_mem_used_peak = list(map(lambda x: x/2**20, tracemalloc.get_traced_memory()))
tracemalloc.stop() # reset accounting
self.cpu_mem_used_new = cpu_mem_used_get()
self.cpu_mem_used_delta = cpu_mem_used_delta
self.cpu_mem_peaked_delta = max(0, cpu_mem_used_peak - cpu_mem_used_delta)
self.gpu_mem_used_new = gpu_mem_used_get()
self.gpu_mem_used_delta = self.gpu_mem_used_new - self.gpu_mem_used_prev
self.gpu_mem_peaked_delta = max(0, self.gpu_mem_used_peak - self.gpu_mem_used_new)
# not really useful, as the report is right next to the cell, the cell
# counts aren't fixed, if re-run
# cell_num = len(self.input_cells) - 1
if (self.compact):
print(f"CPU: {self.cpu_mem_used_delta:0.0f}/{self.cpu_mem_peaked_delta:0.0f}/{self.cpu_mem_used_new:0.0f} MB | GPU: {self.gpu_mem_used_delta:0.0f}/{self.gpu_mem_peaked_delta:0.0f}/{self.gpu_mem_used_new:0.0f} MB | Time {self.time_delta:0.3f}s | (Consumed/Peaked/Used Total)")
else:
print(f"RAM: Consumed Peaked Used Total | Exec time {self.time_delta:0.3f}s")
print(f"CPU: {self.cpu_mem_used_delta:5.0f} {self.cpu_mem_peaked_delta:5.0f} {self.cpu_mem_used_new:5.0f} MB |")
print(f"GPU: {self.gpu_mem_used_delta:5.0f} {self.gpu_mem_peaked_delta:5.0f} {self.gpu_mem_used_new:5.0f} MB |")
# for self.data accessor
self.cpu_mem_used_prev = self.cpu_mem_used_new
self.gpu_mem_used_prev = self.gpu_mem_used_new
def peak_monitor_func(self):
self.cpu_mem_used_peak = -1
self.gpu_mem_used_peak = -1
gpu_id = torch.cuda.current_device()
gpu_handle = pynvml.nvmlDeviceGetHandleByIndex(gpu_id)
while True:
# using tracemalloc for tracing peak cpu RAM instead
#cpu_mem_used = cpu_mem_used_get()
#self.cpu_mem_used_peak = max(cpu_mem_used, self.cpu_mem_used_peak)
# no gc.collect, empty_cache here, since it has to be fast and we
# want to measure only the peak memory usage
gpu_mem_used = gpu_mem_used_get_fast(gpu_handle)
self.gpu_mem_used_peak = max(gpu_mem_used, self.gpu_mem_used_peak)
time.sleep(0.001) # 1msec
if not self.peak_monitoring: break
|
lisp-core.py | # -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp-core.py
#
# This is the core process that is used to demux to the specific LISP
# functional components. The 4342 listen socket is centralized here.
#
#
# +------------- data encapsulation via network --------------+
# | |
# | IPC when mr & ms colocated |
# | +--------------------------------+ |
# | | | |
# | | IPC when mr & ddt colo | |
# | | +------------+ | |
# | | | | | |
# | | | v v v 4341
# +-------------+ +----------+ +----------+ +----------+ +----------+
# | lisp-[ir]tr | | lisp-mr | | lisp-ddt | | lisp-ms | | lisp-etr |
# +-------------+ +----------+ +----------+ +----------+ +----------+
# ^ IPC ^ IPC ^ IPC ^ IPC ^ IPC
# | | | | |
# | | | | |
# | | | | |
# +--------------+--------------+--------------+--------------+
# |
# | for dispatching control messages
# +-----------+
# | lisp-core |
# +-----------+
# | 4342
# |
# via network
#
# -----------------------------------------------------------------------------
import lisp
import lispconfig
import multiprocessing
import threading
import commands
import time
import os
import bottle
import json
import sys
import socket
import thread
#
# Newer versions of CherryPy does not include WSGIServer. Has moved to cheroot.
#
try:
from cherrypy.wsgiserver import CherryPyWSGIServer as wsgi_server
from cherrypy.wsgiserver.ssl_pyopenssl import pyOpenSSLAdapter as ssl_adaptor
except:
from cheroot.wsgi import Server as wsgi_server
from cheroot.ssl.builtin import BuiltinSSLAdapter as ssl_adaptor
#endtry
#------------------------------------------------------------------------------
#
# Global variables.
#
lisp_build_date = ""
lisp_control_listen_socket = None
lisp_ipc_socket = None
lisp_ipc_control_socket = None
lisp_sockets = [None, None, None]
lisp_encap_socket = None
#------------------------------------------------------------------------------
#
# lisp_api_get
#
# Ask the LISP subsystem for configuration information.
#
@bottle.route('/lisp/api', method="get")
@bottle.route('/lisp/api/<command>', method="get")
@bottle.route('/lisp/api/<command>/<data_structure>', method="get")
def lisp_api_get(command = "", data_structure=""):
data = [{ "?" : [{"?" : "not-auth"}] }]
#
# Authenticate.
#
if (bottle.request.auth != None):
username, pw = bottle.request.auth
if (lispconfig.lisp_find_user_account(username, pw) == False):
return(json.dumps(data))
#endif
else:
if (bottle.request.headers["User-Agent"].find("python") != -1):
return(json.dumps(data))
#endif
if (lispconfig.lisp_validate_user() == False):
return(json.dumps(data))
#endif
#endif
#
# First check for dynamic data. That is go get data from appropriate
# process. Return from process in JSON format.
#
if (command == "data" and data_structure != ""):
jdata = bottle.request.body.readline()
data = json.loads(jdata) if jdata != "" else ""
if (data != ""): data = data.values()[0]
if (data == []): data = ""
if (type(data) == dict and type(data.values()[0]) == dict):
data = data.values()[0]
#endif
data = lisp_get_api_data(data_structure, data)
return(data)
#endif
#
# A valid user can access data now.
#
if (command != ""):
command = "lisp " + command
else:
jdata = bottle.request.body.readline()
if (jdata == ""):
data = [{ "?" : [{"?" : "no-body"}] }]
return(json.dumps(data))
#endif
data = json.loads(jdata)
command = data.keys()[0]
#endif
data = lispconfig.lisp_get_clause_for_api(command)
return(json.dumps(data))
#enddef
#
# lisp_get_api_system
#
# Return system information in dictionary array (JSON format).
#
def lisp_get_api_system():
data = {}
data["hostname"] = socket.gethostname()
data["system-uptime"] = commands.getoutput("uptime")
data["lisp-uptime"] = lisp.lisp_print_elapsed(lisp.lisp_uptime)
data["lisp-version"] = lisp.lisp_version
yesno = "yes" if os.path.exists("./logs/lisp-traceback.log") else "no"
data["traceback-log"] = yesno
v4 = lisp.lisp_myrlocs[0]
v6 = lisp.lisp_myrlocs[1]
v4 = "none" if (v4 == None) else v4.print_address_no_iid()
v6 = "none" if (v6 == None) else v6.print_address_no_iid()
data["lisp-rlocs"] = [v4, v6]
return(json.dumps(data))
#enddef
#
# lisp_get_api_data
#
# Send IPC message to process that owns the dynamic data strucutre we
# are retrieving via the API. Variable data for the 'map-cache' and
# 'site-cache' API contains:
#
# { "eid-prefix" : <eid>, "group-prefix" : <group>, "instance-id" : <iid> }
#
# For 'map-resolver' and 'map-server" API contains:
#
# { "address" : <address>" } or { "dns-name" : <dns-name> }
#
# For 'site-cache-summary', there is no data required.
#
def lisp_get_api_data(data_structure, data):
valid_apis = ["site-cache", "map-cache", "system", "map-resolver",
"map-server", "database-mapping", "site-cache-summary"]
if (data_structure not in valid_apis): return(json.dumps([]))
#
# lisp-core process handles the system lispapi.get_system() API.
#
if (data_structure == "system"): return(lisp_get_api_system())
#
# Build IPC, acquire lock, and send IPC message. Then wait.
#
if (data != ""): data = json.dumps(data)
ipc = lisp.lisp_api_ipc("lisp-core", data_structure + "%" + data)
if (data_structure in ["map-cache", "map-resolver"]):
if (lisp.lisp_is_running("lisp-rtr")):
lisp.lisp_ipc_lock.acquire()
lisp.lisp_ipc(ipc, lisp_ipc_socket, "lisp-rtr")
elif (lisp.lisp_is_running("lisp-itr")):
lisp.lisp_ipc_lock.acquire()
lisp.lisp_ipc(ipc, lisp_ipc_socket, "lisp-itr")
else:
return(json.dumps([]))
#endif
#endif
if (data_structure in ["map-server", "database-mapping"]):
if (lisp.lisp_is_running("lisp-etr")):
lisp.lisp_ipc_lock.acquire()
lisp.lisp_ipc(ipc, lisp_ipc_socket, "lisp-etr")
elif (lisp.lisp_is_running("lisp-itr")):
lisp.lisp_ipc_lock.acquire()
lisp.lisp_ipc(ipc, lisp_ipc_socket, "lisp-itr")
else:
return(json.dumps([]))
#endif
#endif
if (data_structure in ["site-cache", "site-cache-summary"]):
if (lisp.lisp_is_running("lisp-ms")):
lisp.lisp_ipc_lock.acquire()
lisp.lisp_ipc(ipc, lisp_ipc_socket, "lisp-ms")
else:
return(json.dumps([]))
#endif
#endif
lisp.lprint("Waiting for api get-data '{}', parmameters: '{}'".format( \
data_structure, data))
opcode, source, port, output = lisp.lisp_receive(lisp_ipc_socket, True)
lisp.lisp_ipc_lock.release()
return(output)
#enddef
#
# lisp_api_put_delete
#
# Tell the LISP subsystem to add/replace or remove a command clause.
#
@bottle.route('/lisp/api', method="put")
@bottle.route('/lisp/api/<command>', method="put")
@bottle.route('/lisp/api/<command>', method="delete")
def lisp_api_put_delete(command = ""):
data = [{ "?" : [{"?" : "not-auth"}] }]
if (bottle.request.auth == None): return(data)
#
# Authenticate.
#
if (bottle.request.auth != None):
username, pw = bottle.request.auth
if (lispconfig.lisp_find_user_account(username, pw) == False):
return(json.dumps(data))
#endif
else:
if (bottle.request.headers["User-Agent"].find("python") != -1):
return(json.dumps(data))
#endif
if (lispconfig.lisp_validate_user() == False):
return(json.dumps(data))
#endif
#endif
#
# If the request is to add, change, or remove a "user-account" command,
# the validated user must be configured as a superuser.
#
if (command == "user-account"):
if (lispconfig.lisp_is_user_superuser(username) == False):
data = [{ "user-account" : [{"?" : "not-auth"}] }]
return(json.dumps(data))
#endif
#endif
#
# A valid user can access data now.
#
jdata = bottle.request.body.readline()
if (jdata == ""):
data = [{ "?" : [{"?" : "no-body"}] }]
return(json.dumps(data))
#endif
data = json.loads(jdata)
if (command != ""):
command = "lisp " + command
else:
command = data[0].keys()[0]
#endif
#
# Add, replace, or remove lines from configuration file. Grab config
# file lock.
#
lisp.lisp_ipc_lock.acquire()
if (bottle.request.method == "DELETE"):
data = lispconfig.lisp_remove_clause_for_api(data)
else:
data = lispconfig.lisp_put_clause_for_api(data)
#endif
lisp.lisp_ipc_lock.release()
return(json.dumps(data))
#enddef
#
# lisp_show_api_doc
#
@bottle.route('/lisp/show/api-doc', method="get")
def lisp_show_api_doc():
if (os.path.exists("lispapi.py")): os.system("pydoc lispapi > lispapi.txt")
if (os.path.exists("lispapi.txt") == False):
return("lispapi.txt file not found")
#endif
return(bottle.static_file("lispapi.txt", root="./"))
#enddef
#
# lisp_show_command_doc
#
@bottle.route('/lisp/show/command-doc', method="get")
def lisp_show_comamnd_doc():
return(bottle.static_file("lisp.config.example", root="./",
mimetype="text/plain"))
#enddef
#
# lisp_show_lisp_xtr
#
# Display the show-xtr file that the go data-plane lisp-xtr writes to.
#
@bottle.route('/lisp/show/lisp-xtr', method="get")
def lisp_show_lisp_xtr():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
#
# Special case to look for a other data-planes. If it does not exist, check
# the lispers.net go data-plane.
#
if (os.path.exists("./show-ztr")):
f = open("./show-ztr", "r"); lines = f.read(); f.close()
else:
f = open("./show-xtr", "r"); lines = f.read(); f.close()
#endif
new = ""
lines = lines.split("\n")
for line in lines:
if (line[0:4] == " "): new += lisp.lisp_space(4)
if (line[0:2] == " "): new += lisp.lisp_space(2)
new += line + "<br>"
#endfor
new = lisp.convert_font(new)
return(lisp.lisp_print_sans(new))
#enddef
#
# lisp_show_keys
#
# Display LISP crypto-key-list to ITR, ETR, RTR.
#
@bottle.route('/lisp/show/<xtr>/keys', method="get")
def lisp_show_keys(xtr):
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
superuser = lispconfig.lisp_is_user_superuser(None)
if (superuser == False):
output = "Permission denied"
return(lispconfig.lisp_show_wrapper(lisp.lisp_print_cour(output)))
#endif
if (xtr not in ["itr", "etr", "rtr"]):
output = "Invalid URL"
return(lispconfig.lisp_show_wrapper(lisp.lisp_print_cour(output)))
#endif
command = "show {}-keys".format(xtr)
return(lispconfig.lisp_process_show_command(lisp_ipc_socket, command))
#enddef
#
# lisp_show_geo_map
#
# Use Google Maps API to draw a circle on a geographical map. The html file
# ./lispers.net-geo.html is javascript to call the Google API.
#
@bottle.route('/lisp/geo-map/<geo_prefix>')
def lisp_show_geo_map(geo_prefix):
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
geo_prefix = geo_prefix.split("-")
geo_prefix = "-".join(geo_prefix[0:-1]) + "/" + geo_prefix[-1]
geo = lisp.lisp_geo("")
geo.parse_geo_string(geo_prefix)
lat, lon = geo.dms_to_decimal()
radius = geo.radius * 1000
r = open("./lispers.net-geo.html", "r"); html = r.read(); r.close()
html = html.replace("$LAT", str(lat))
html = html.replace("$LON", str(lon))
html = html.replace("$RADIUS", str(radius))
return(html)
#enddef
#
# lisp_core_login_page
#
# Print to browser landing page.
#
@bottle.route('/lisp/login', method="get")
def lisp_core_login_page():
return(lispconfig.lisp_login_page())
#enddef
#
# lisp_core_do_login
#
# Get login info entered in forms data. Validate and add to cookie database.
# If valid, take user to landing page. Othereise, go back to login page.
#
@bottle.route('/lisp/login', method="post")
def lisp_core_do_login():
if (lispconfig.lisp_validate_user()):
return(lispconfig.lisp_landing_page())
#endif
return(lisp_core_login_page())
#enddef
#
# lisp_core_landing_page
#
# Print to browser landing page.
#
@bottle.route('/lisp')
def lisp_core_landing_page():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
return(lispconfig.lisp_landing_page())
#enddef
#
# lisp_core_traceback_page
#
# Look in log files for Traceback messages.
#
@bottle.route('/lisp/traceback')
def lisp_core_traceback_page():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
clean = True
#
# Check explicit lisp-traceback.log.
#
if (os.path.exists("./logs/lisp-traceback.log")):
output = commands.getoutput("cat ./logs/lisp-traceback.log")
if (output):
output = output.replace("----------", "<b>----------</b>")
output = output.replace("\n", "<br>")
clean = False
#endif
#endif
#
# Look for Traceback messages in log files.
#
if (clean):
output = ""
cmd = "egrep --with-filename Traceback ./logs/*.log"
log_files = commands.getoutput(cmd)
log_files = log_files.split("\n")
for lf in log_files:
if (lf.find(":") == -1): continue
line = lf.split(":")
if (line[1] == "0"): continue
output += "Found Tracebacks in log file {}<br>".format(line[0])
clean = False
#endfor
output = output[0:-4]
#endif
if (clean):
output = "No Tracebacks found - a stable system is a happy system"
#endif
output = lisp.lisp_print_cour(output)
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_core_not_supported
#
# Print to browser landing page.
#
@bottle.route('/lisp/show/not-supported')
def lisp_core_not_supported():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
return(lispconfig.lisp_not_supported())
#enddef
#
# lisp_show_status_command
#
# Show some version and system info.
#
@bottle.route('/lisp/show/status')
def lisp_show_status_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
#
# Do not print out "show configuration" button or the debug drop-down menu.
#
output = ""
superuser = lispconfig.lisp_is_user_superuser(None)
if (superuser):
sc = lisp.lisp_button("show configuration", "/lisp/show/conf")
dc = lisp.lisp_button("show configuration diff", "/lisp/show/diff")
ac = lisp.lisp_button("archive configuration", "/lisp/archive/conf")
cc = lisp.lisp_button("clear configuration", "/lisp/clear/conf/verify")
lf = lisp.lisp_button("log flows", "/lisp/log/flows")
ils = lisp.lisp_button("install LISP software", "/lisp/install/image")
rs = lisp.lisp_button("restart LISP subsystem", "/lisp/restart/verify")
output = "<center>{}{}{}{}{}{}{}</center><hr>".format(sc, dc, ac, cc,
lf, ils, rs)
#endif
sys_uptime = commands.getoutput("uptime")
uname = commands.getoutput("uname -pv")
main_version = lisp.lisp_version.replace("+", "")
#
# This is really broken. It returns twice as many CPUs than really on the
# machine (on MacOS).
#
cpu_count = multiprocessing.cpu_count()
i = sys_uptime.find(", load")
sys_uptime = sys_uptime[0:i]
elapsed = lisp.lisp_print_elapsed(lisp.lisp_uptime)
top = "Not available"
#
# Get LISP process status.
#
command = "ps auww" if lisp.lisp_is_macos() else "ps aux"
status = commands.getoutput( \
"{} | egrep 'PID|python lisp|python -O lisp' | egrep -v grep". \
format(command))
status = status.replace(" ", lisp.space(1))
status = status.replace("\n", "<br>")
#
# top on MacOS.
#
if (uname.find("Darwin") != -1):
cpu_count = cpu_count / 2
top = commands.getoutput("top -l 1 | head -50")
top = top.split("PID")
top = top[0]
#
# Massage the 'top' output so we can have one line per information
# line.
#
i = top.find("Load Avg")
j = top[0:i].find("threads")
processes = top[0:j+7]
top = processes + "<br>" + top[i::]
i = top.find("CPU usage")
top = top[0:i] + "<br>" + top[i::]
i = top.find("SharedLibs:")
top = top[0:i] + "<br>" + top[i::]
i = top.find("MemRegions")
top = top[0:i] + "<br>" + top[i::]
i = top.find("PhysMem")
top = top[0:i] + "<br>" + top[i::]
i = top.find("VM:")
top = top[0:i] + "<br>" + top[i::]
i = top.find("Networks")
top = top[0:i] + "<br>" + top[i::]
i = top.find("Disks")
top = top[0:i] + "<br>" + top[i::]
else:
#
# top on Fedora Linux.
#
lines = commands.getoutput("top -b -n 1 | head -50")
lines = lines.split("PID")
lines[1] = lines[1].replace(" ", lisp.space(1))
lines = lines[0] + lines[1]
top = lines.replace("\n", "<br>")
#endif
release_notes = commands.getoutput("cat release-notes.txt")
release_notes = release_notes.replace("\n", "<br>")
output += '''
<br><table align="center" border="1" cellspacing="3x" cellpadding="5x">
<tr>
<td width="20%"><i>LISP Subsystem Version:<br>
LISP Release {} Build Date:</i></td>
<td width="80%"><font face="Courier New">{}<br>
{}</font></td>
</tr>
<tr>
<td width="20%"><i>LISP Subsystem Uptime:<br>System Uptime:</i></td>
<td width="80%"><font face="Courier New">{}<br>
{}</font></td>
</tr>
<tr>
<td width="20%"><i>System Architecture:<br>
Number of CPUs:<font face="Courier New">{}{}</font></td>
<td width="80%"><font face="Courier New">{}</font></td>
</tr>
<tr>
<td width="20%" valign="top"><i>LISP Process Status:</i></td>
<td width="80%">
<div style="height: 100px; overflow: auto">
<font size="2" face="Courier New">{}</font></div></td>
</tr>
<tr>
<td width="20%" valign="top"><i>System Resource Utilization:</i></td>
<td width="80%">
<div style="height: 200px; overflow: auto">
<font face="Courier New">{}</font></td>
</tr>
<tr>
<td width="20%" valign="top"><i>Release Notes:</i></td>
<td width="80%">
<div style="height: 300px; overflow: auto">
<font size="2" face="Courier New">{}</font></div></td>
</tr>
</table>
'''.format(main_version, lisp.lisp_version, lisp_build_date, elapsed,
sys_uptime, lisp.lisp_space(1), cpu_count, uname, status, top,
release_notes)
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_show_conf_command
#
# Show configuration file.
#
@bottle.route('/lisp/show/conf')
def lisp_show_conf_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
return(bottle.static_file("lisp.config", root="./", mimetype="text/plain"))
#enddef
#
# lisp_show_diff_command
#
# Show configuration diff file.
#
@bottle.route('/lisp/show/diff')
def lisp_show_diff_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
return(bottle.static_file("lisp.config.diff", root="./",
mimetype="text/plain"))
#enddef
#
# lisp_archive_conf_command
#
# Save a copy of lisp.config in lisp.config.archive.
#
@bottle.route('/lisp/archive/conf')
def lisp_archive_conf_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
lisp.lisp_ipc_lock.acquire()
os.system("cp ./lisp.config ./lisp.config.archive")
lisp.lisp_ipc_lock.release()
output = "Configuration file saved to "
output = lisp.lisp_print_sans(output)
output += lisp.lisp_print_cour("./lisp.config.archive")
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_clear_conf_command
#
# Clear contents of the lisp.config file.
#
@bottle.route('/lisp/clear/conf')
def lisp_clear_conf_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
os.system("cp ./lisp.config ./lisp.config.before-clear")
lisp.lisp_ipc_lock.acquire()
lisp_core_cp_lisp_config()
lisp.lisp_ipc_lock.release()
output = "Configuration cleared, a backup copy is stored in "
output = lisp.lisp_print_sans(output)
output += lisp.lisp_print_cour("./lisp.config.before-clear")
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_clear_conf_verify_command
#
# Ask user if they really want to clear the config file.
#
@bottle.route('/lisp/clear/conf/verify')
def lisp_clear_conf_verify_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
output = "<br>Are you sure you want to clear the configuration?"
output = lisp.lisp_print_sans(output)
yes = lisp.lisp_button("yes", "/lisp/clear/conf")
cancel = lisp.lisp_button("cancel", "/lisp")
output += yes + cancel + "<br>"
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_get_port_on_command_line
#
# Figure out if the lisp-core.pyo process was started with a parameter. If so,
# it is the port number we use for bottle. We want to restart using the same
# parameters.
#
def lisp_get_port_on_command_line():
port = ""
for p in ["443", "-8080", "8080"]:
c = 'ps auxww | egrep "lisp-core.pyo {}" | egrep -v grep'.format(p)
output = commands.getoutput(c)
if (output == ""): continue
output = output.split("\n")[0]
output = output.split(" ")
if (output[-2] == "lisp-core.pyo" and output[-1] == p): port = p
break
#endfor
return(port)
#enddef
#
# lisp_restart_command
#
# Restart the LISP subsystem.
#
@bottle.route('/lisp/restart')
def lisp_restart_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
#
# Check to see if requiretty is in effect. If so, we can't sudo, so tell
# user.
#
line = commands.getoutput("egrep requiretty /etc/sudoers").split(" ")
if (line[-1] == "requiretty" and line[0] == "Defaults"):
output = "Need to remove 'requiretty' from /etc/sudoers"
output = lisp.lisp_print_sans(output)
return(lispconfig.lisp_show_wrapper(output))
#endif
lisp.lprint(lisp.bold("LISP subsystem restart request received", False))
#
# Check if we should start the process with 443 (or -8080) as the port
# number for the lisp-core should run on.
#
port = lisp_get_port_on_command_line()
#
# Build command and launch it in another process.
#
c = "sleep 1; sudo ./RESTART-LISP {}".format(port)
thread.start_new_thread(os.system, (c, ))
output = lisp.lisp_print_sans("Restarting LISP subsystem ...")
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_restart_verify_command
#
# Ask user if they really want to restart the LISP subsystem.
#
@bottle.route('/lisp/restart/verify')
def lisp_restart_verify_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
output = "<br>Are you sure you want to restart the LISP subsystem?"
output = lisp.lisp_print_sans(output)
yes = lisp.lisp_button("yes", "/lisp/restart")
cancel = lisp.lisp_button("cancel", "/lisp")
output += yes + cancel + "<br>"
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_install_command
#
# Install tgz file user supplied in html form.
#
@bottle.route('/lisp/install', method="post")
def lisp_install_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
image = bottle.request.forms.get("image_url")
if (image.find("lispers.net") == -1 or image.find(".tgz") == -1):
string = "Invalid install request for file {}".format(image)
lisp.lprint(lisp.bold(string, False))
output = lisp.lisp_print_sans("Invalid lispers.net tarball file name")
return(lispconfig.lisp_show_wrapper(output))
#endif
if (lisp.lisp_is_ubuntu()):
c = "python lisp-get-bits.pyo {} force 2>&1 > /dev/null".format(image)
else:
c = "python lisp-get-bits.pyo {} force >& /dev/null".format(image)
#endif
status = os.system(c)
image_file = image.split("/")[-1]
if (os.path.exists(image_file)):
release = image.split("release-")[1]
release = release.split(".tgz")[0]
output = "Install completed for release {}".format(release)
output = lisp.lisp_print_sans(output)
output += "<br><br>" + lisp.lisp_button("restart LISP subsystem",
"/lisp/restart/verify") + "<br>"
else:
string = lisp.lisp_print_cour(image)
output = "Install failed for file {}".format(string)
output = lisp.lisp_print_sans(output)
#endif
string = "Install request for file {} {}".format(image,
"succeeded" if (status == 0) else "failed")
lisp.lprint(lisp.bold(string, False))
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_install_get_image
#
# Ask user for tgz image to install.
#
@bottle.route('/lisp/install/image')
def lisp_install_get_image():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
string = lisp.lisp_print_sans("<br>Enter lispers.net tarball URL:")
output = '''
<form action="/lisp/install" method="post" style="display: inline;">
{}
<input type="text" name="image_url" size="75" required/>
<input type="submit" style="background-color:transparent;border-radius:10px;" value="Submit" />
</form><br>'''.format(string)
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_log_flows_command
#
# Touch file ./log-flows so we can have the user request a dump of the memory
# based flow log.
#
@bottle.route('/lisp/log/flows')
def lisp_log_flows_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
os.system("touch ./log-flows")
output = lisp.lisp_print_sans("Flow data appended to file ")
out = "<a href='/lisp/show/log/lisp-flow/100'>logs/lisp-flows.log</a>"
output += lisp.lisp_print_cour(out)
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_search_log_command
#
# Search the <num> tail lines of <name> and display in <hr> separated format
# with search keyword in red.
#
@bottle.route('/lisp/search/log/<name>/<num>/<keyword>')
def lisp_search_log_command(name = "", num = "", keyword = ""):
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
command = "tail -n {} logs/{}.log | egrep -B10 -A10 {}".format(num, name,
keyword)
output = commands.getoutput(command)
if (output):
occurences = output.count(keyword)
output = lisp.convert_font(output)
output = output.replace("--\n--\n", "--\n")
output = output.replace("\n", "<br>")
output = output.replace("--<br>", "<hr>")
output = "Found <b>{}</b> occurences<hr>".format(occurences) + output
else:
output = "Keyword {} not found".format(keyword)
#endif
#
# Highlight keyword in blue.
#
blue = "<font color='blue'><b>{}</b>".format(keyword)
output = output.replace(keyword, blue)
output = output.replace(keyword, keyword + "</font>")
output = lisp.lisp_print_cour(output)
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_search_log_command_input
#
# Get input form data for keyword to search on.
#
@bottle.post('/lisp/search/log/<name>/<num>')
def lisp_search_log_command_input(name = "", num=""):
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
keyword = bottle.request.forms.get("keyword")
return(lisp_search_log_command(name, num, keyword))
#enddef
#
# lisp_show_log_name_command
#
# Show trace log file.
#
@bottle.route('/lisp/show/log/<name>/<num>')
def lisp_show_log_name_command(name = "", num=""):
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
#
# Deafult to print out last 100 lines and convert to html bold.
#
if (num == ""): num = 100
header = '''
<form action="/lisp/search/log/{}/{}" method="post">
<i>Keyword search:</i>
<input type="text" name="keyword" />
<input style="background-color:transparent;border-radius:10px;" type="submit" value="Submit" />
</form><hr>
'''.format(name, num)
if (os.path.exists("logs/{}.log".format(name))):
output = commands.getoutput("tail -n {} logs/{}.log".format(num, name))
output = lisp.convert_font(output)
output = output.replace("\n", "<br>")
output = header + lisp.lisp_print_cour(output)
else:
a = lisp.lisp_print_sans("File")
aa = lisp.lisp_print_cour("logs/{}.log".format(name))
aaa = lisp.lisp_print_sans("does not exist")
output = "{} {} {}".format(a, aa, aaa)
#endif
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_debug_menu_command
#
# Turn on or off debug.
#
@bottle.route('/lisp/debug/<name>')
def lisp_debug_menu_command(name = ""):
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
#
# Process "disable all" separately.
#
if (name == "disable%all"):
data = lispconfig.lisp_get_clause_for_api("lisp debug")
if (data[0].has_key("lisp debug")):
new = []
for entry in data[0]["lisp debug"]:
key = entry.keys()[0]
new.append({ key : "no" })
#endfor
new = { "lisp debug" : new }
lispconfig.lisp_put_clause_for_api(new)
#endif
data = lispconfig.lisp_get_clause_for_api("lisp xtr-parameters")
if (data[0].has_key("lisp xtr-parameters")):
new = []
for entry in data[0]["lisp xtr-parameters"]:
key = entry.keys()[0]
if (key in ["data-plane-logging", "flow-logging"]):
new.append({ key : "no" })
else:
new.append({ key : entry[key] })
#endif
#endfor
new = { "lisp xtr-parameters" : new }
lispconfig.lisp_put_clause_for_api(new)
#endif
return(lispconfig.lisp_landing_page())
#endif
#
# Process enabling or disable debug logging for a single item.
#
name = name.split("%")
component = name[0]
yesno = name[1]
xtr_parms = ["data-plane-logging", "flow-logging"]
clause_name = "lisp xtr-parameters" if (component in xtr_parms) else \
"lisp debug"
data = lispconfig.lisp_get_clause_for_api(clause_name)
if (data[0].has_key(clause_name)):
new = {}
for entry in data[0][clause_name]:
new[entry.keys()[0]] = entry.values()[0]
if (new.has_key(component)): new[component] = yesno
#endfor
new = { clause_name: new }
lispconfig.lisp_put_clause_for_api(new)
#endif
return(lispconfig.lisp_landing_page())
#enddef
#
# lisp_clear_referral_command
#
# Send a clear command to a LISP component.
#
@bottle.route('/lisp/clear/<name>')
@bottle.route('/lisp/clear/etr/<etr_name>/<stats_name>')
@bottle.route('/lisp/clear/rtr/<rtr_name>/<stats_name>')
@bottle.route('/lisp/clear/itr/<itr_name>')
@bottle.route('/lisp/clear/rtr/<rtr_name>')
def lisp_clear_command(name = "", itr_name = '', rtr_name = "", etr_name = "",
stats_name = ""):
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
#
# Do various checks.
#
if (lispconfig.lisp_is_user_superuser(None) == False):
output = lisp.lisp_print_sans("Not authorized")
return(lispconfig.lisp_show_wrapper(output))
#endif
ipc = "clear"
if (name == "referral"):
process = "lisp-mr"
print_name = "Referral"
elif (itr_name == "map-cache"):
process = "lisp-itr"
print_name = "ITR <a href='/lisp/show/itr/map-cache'>map-cache</a>"
elif (rtr_name == "map-cache"):
process = "lisp-rtr"
print_name = "RTR <a href='/lisp/show/rtr/map-cache'>map-cache</a>"
elif (etr_name == "stats"):
process = "lisp-etr"
print_name = ("ETR '{}' decapsulation <a href='/lisp/show/" + \
"database'>stats</a>").format(stats_name)
ipc += "%" + stats_name
elif (rtr_name == "stats"):
process = "lisp-rtr"
print_name = ("RTR '{}' decapsulation <a href='/lisp/show/" + \
"rtr/map-cache'>stats</a>").format(stats_name)
ipc += "%" + stats_name
else:
output = lisp.lisp_print_sans("Invalid command")
return(lispconfig.lisp_show_wrapper(output))
#endif
#
# Send IPC to lisp-mr. Do not wait for a reply.
#
ipc = lisp.lisp_command_ipc(ipc, "lisp-core")
lisp.lisp_ipc(ipc, lisp_ipc_socket, process)
#
# Only touch lisp.config file if there are static map-cache entries.
#
exist = commands.getoutput("egrep 'lisp map-cache' ./lisp.config")
if (exist != ""):
os.system("touch ./lisp.config")
#endif
output = lisp.lisp_print_sans("{} cleared".format(print_name))
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_show_map_server_command
#
# Have the lisp-etr process show the map-server configuration.
#
@bottle.route('/lisp/show/map-server')
def lisp_show_map_server_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
return(lispconfig.lisp_process_show_command(lisp_ipc_socket,
"show map-server"))
#enddef
#
# lisp_show_database_command
#
# Have the lisp-etr process show the database-mapping configuration.
#
@bottle.route('/lisp/show/database')
def lisp_show_database_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
return(lispconfig.lisp_process_show_command(lisp_ipc_socket,
"show database-mapping"))
#enddef
#
# lisp_show_itr_map_cache_command
#
# Have the lisp-itr process show the map-cache.
#
@bottle.route('/lisp/show/itr/map-cache')
def lisp_show_itr_map_cache_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
return(lispconfig.lisp_process_show_command(lisp_ipc_socket,
"show itr-map-cache"))
#enddef
#
# lisp_show_itr_rloc_probing_command
#
# Have the lisp-itr process show the RLOC-probe list.
#
@bottle.route('/lisp/show/itr/rloc-probing')
def lisp_show_itr_rloc_probing_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
return(lispconfig.lisp_process_show_command(lisp_ipc_socket,
"show itr-rloc-probing"))
#enddef
#
# lisp_show_itr_map_cache_lookup
#
# Execute longest match lookup and return results.
#
@bottle.post('/lisp/show/itr/map-cache/lookup')
def lisp_show_itr_map_cache_lookup():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
eid_str = bottle.request.forms.get("eid")
if (lispconfig.lisp_validate_input_address_string(eid_str) == False):
output = "Address '{}' has invalid format".format(eid_str)
output = lisp.lisp_print_sans(output)
return(lispconfig.lisp_show_wrapper(output))
#endif
command = "show itr-map-cache" + "%" + eid_str
return(lispconfig.lisp_process_show_command(lisp_ipc_socket,
command))
#enddef
#
# lisp_show_rtr_map_cache_command
#
# Have the lisp-rtr process show the map-cache.
#
@bottle.route('/lisp/show/rtr/map-cache')
@bottle.route('/lisp/show/rtr/map-cache/<dns>')
def lisp_show_rtr_map_cache_command(dns = ""):
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
if (dns == "dns"):
return(lispconfig.lisp_process_show_command(lisp_ipc_socket,
"show rtr-map-cache-dns"))
else:
return(lispconfig.lisp_process_show_command(lisp_ipc_socket,
"show rtr-map-cache"))
#endif
#enddef
#
# lisp_show_rtr_rloc_probing_command
#
# Have the lisp-rtr process show the RLOC-probe list.
#
@bottle.route('/lisp/show/rtr/rloc-probing')
def lisp_show_rtr_rloc_probing_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
return(lispconfig.lisp_process_show_command(lisp_ipc_socket,
"show rtr-rloc-probing"))
#enddef
#
# lisp_show_rtr_map_cache_lookup
#
# Execute longest match lookup and return results.
#
@bottle.post('/lisp/show/rtr/map-cache/lookup')
def lisp_show_rtr_map_cache_lookup():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
eid_str = bottle.request.forms.get("eid")
if (lispconfig.lisp_validate_input_address_string(eid_str) == False):
output = "Address '{}' has invalid format".format(eid_str)
output = lisp.lisp_print_sans(output)
return(lispconfig.lisp_show_wrapper(output))
#endif
command = "show rtr-map-cache" + "%" + eid_str
return(lispconfig.lisp_process_show_command(lisp_ipc_socket,
command))
#enddef
#
# lisp_show_referral_command
#
# Have the lisp-mr show the DDT referral-cache.
#
@bottle.route('/lisp/show/referral')
def lisp_show_referral_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
return(lispconfig.lisp_process_show_command(lisp_ipc_socket,
"show referral-cache"))
#enddef
#
# lisp_show_referral_cache_lookup
#
# Execute longest match lookup and return results.
#
@bottle.post('/lisp/show/referral/lookup')
def lisp_show_referral_cache_lookup():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
eid_str = bottle.request.forms.get("eid")
if (lispconfig.lisp_validate_input_address_string(eid_str) == False):
output = "Address '{}' has invalid format".format(eid_str)
output = lisp.lisp_print_sans(output)
return(lispconfig.lisp_show_wrapper(output))
#endif
command = "show referral-cache" + "%" + eid_str
return(lispconfig.lisp_process_show_command(lisp_ipc_socket, command))
#enddef
#
# lisp_show_delegation_command
#
# Have the lisp-mr show the DDT configured delegation information.
#
@bottle.route('/lisp/show/delegations')
def lisp_show_delegations_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
return(lispconfig.lisp_process_show_command(lisp_ipc_socket,
"show delegations"))
#enddef
#
# lisp_show_delegations_lookup
#
# Execute longest match lookup and return results.
#
@bottle.post('/lisp/show/delegations/lookup')
def lisp_show_delegations_lookup():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
eid_str = bottle.request.forms.get("eid")
if (lispconfig.lisp_validate_input_address_string(eid_str) == False):
output = "Address '{}' has invalid format".format(eid_str)
output = lisp.lisp_print_sans(output)
return(lispconfig.lisp_show_wrapper(output))
#endif
command = "show delegations" + "%" + eid_str
return(lispconfig.lisp_process_show_command(lisp_ipc_socket, command))
#enddef
#
# lisp_show_site_command
#
# Have the lisp-ms process show the site registration information. Convert
# eid-prefix from format "<iid>-<eid>-<ml>" to "[<iid>]<eid>/<ml>" internal
# format. We need to do this because URLs should avoid square brackets.
#
@bottle.route('/lisp/show/site')
@bottle.route('/lisp/show/site/<eid_prefix>')
def lisp_show_site_command(eid_prefix = ""):
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
command = "show site"
if (eid_prefix != ""):
command = lispconfig.lisp_parse_eid_in_url(command, eid_prefix)
#endif
return(lispconfig.lisp_process_show_command(lisp_ipc_socket, command))
#enddef
#
# lisp_show_itr_dyn_eid_command
#
# Show dynamic-EIDs from the ITR's point of view.
#
@bottle.route('/lisp/show/itr/dynamic-eid/<eid_prefix>')
def lisp_show_itr_dyn_eid_command(eid_prefix = ""):
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
command = "show itr-dynamic-eid"
if (eid_prefix != ""):
command = lispconfig.lisp_parse_eid_in_url(command, eid_prefix)
#endif
return(lispconfig.lisp_process_show_command(lisp_ipc_socket, command))
#enddef
#
# lisp_show_dyn_eid_command
#
# Show dynamic-EIDs from the ITR's point of view.
#
@bottle.route('/lisp/show/etr/dynamic-eid/<eid_prefix>')
def lisp_show_dyn_eid_command(eid_prefix = ""):
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
command = "show etr-dynamic-eid"
if (eid_prefix != ""):
command = lispconfig.lisp_parse_eid_in_url(command, eid_prefix)
#endif
return(lispconfig.lisp_process_show_command(lisp_ipc_socket, command))
#enddef
#
# lisp_show_site_lookup
#
# Execute longest match lookup and return results.
#
@bottle.post('/lisp/show/site/lookup')
def lisp_show_site_lookup():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
eid_str = bottle.request.forms.get("eid")
if (lispconfig.lisp_validate_input_address_string(eid_str) == False):
output = "Address '{}' has invalid format".format(eid_str)
output = lisp.lisp_print_sans(output)
return(lispconfig.lisp_show_wrapper(output))
#endif
command = "show site" + "%" + eid_str + "@lookup"
return(lispconfig.lisp_process_show_command(lisp_ipc_socket, command))
#enddef
#
# lisp_lig_command
#
# Do interactive lig.
#
@bottle.post('/lisp/lig')
def lisp_lig_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
eid = bottle.request.forms.get("eid")
mr = bottle.request.forms.get("mr")
count = bottle.request.forms.get("count")
no_nat = "no-info" if bottle.request.forms.get("no-nat") == "yes" else ""
#
# Default map-resolver to localhost.
#
if (mr == ""): mr = "localhost"
#
# Check for no input. User error.
#
if (eid == ""):
output = "Need to supply EID address"
return(lispconfig.lisp_show_wrapper(lisp.lisp_print_cour(output)))
#endif
lig = ""
if os.path.exists("lisp-lig.pyo"): lig = "-O lisp-lig.pyo"
if os.path.exists("lisp-lig.py"): lig = "lisp-lig.py"
#
# Something went wrong with the install.
#
if (lig == ""):
output = "Cannot find lisp-lig.py or lisp-lig.pyo"
return(lispconfig.lisp_show_wrapper(lisp.lisp_print_cour(output)))
#endif
if (count != ""): count = "count {}".format(count)
command = 'python {} "{}" to {} {} {}'.format(lig, eid, mr, count, no_nat)
output = commands.getoutput(command)
output = output.replace("\n", "<br>")
output = lisp.convert_font(output)
rloc = lisp.space(2) + "RLOC:"
output = output.replace("RLOC:", rloc)
empty = lisp.space(2) + "Empty,"
output = output.replace("Empty,", empty)
geo = lisp.space(4) + "geo:"
output = output.replace("geo:", geo)
elp = lisp.space(4) + "elp:"
output = output.replace("elp:", elp)
rle = lisp.space(4) + "rle:"
output = output.replace("rle:", rle)
return(lispconfig.lisp_show_wrapper(lisp.lisp_print_cour(output)))
#enddef
#
# lisp_rig_command
#
# Do interactive rig.
#
@bottle.post('/lisp/rig')
def lisp_rig_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
eid = bottle.request.forms.get("eid")
ddt = bottle.request.forms.get("ddt")
follow_all = "follow-all-referrals" if \
bottle.request.forms.get("follow") == "yes" else ""
#
# Default ddt-node to localhost.
#
if (ddt == ""): ddt = "localhost"
#
# Check for no input. User error.
#
if (eid == ""):
output = "Need to supply EID address"
return(lispconfig.lisp_show_wrapper(lisp.lisp_print_cour(output)))
#endif
rig = ""
if os.path.exists("lisp-rig.pyo"): rig = "-O lisp-rig.pyo"
if os.path.exists("lisp-rig.py"): rig = "lisp-rig.py"
#
# Something went wrong with the install.
#
if (rig == ""):
output = "Cannot find lisp-rig.py or lisp-rig.pyo"
return(lispconfig.lisp_show_wrapper(lisp.lisp_print_cour(output)))
#endif
command = 'python {} "{}" to {} {}'.format(rig, eid, ddt, follow_all)
output = commands.getoutput(command)
output = output.replace("\n", "<br>")
output = lisp.convert_font(output)
ref = lisp.space(2) + "Referrals:"
output = output.replace("Referrals:", ref)
return(lispconfig.lisp_show_wrapper(lisp.lisp_print_cour(output)))
#enddef
#
# lisp_run_geo_lig
#
# Do lookup on both supplied EIDs passed as input parameters and return
# a geo-point and geo-prefix if they are found in RLOC records.
#
def lisp_run_geo_lig(eid1, eid2):
lig = None
if os.path.exists("lisp-lig.pyo"): lig = "-O lisp-lig.pyo"
if os.path.exists("lisp-lig.py"): lig = "lisp-lig.py"
if (lig == None): return([None, None])
#
# First get a map-resolver addresss.
#
o = commands.getoutput("egrep -A 2 'lisp map-resolver {' ./lisp.config")
mr = None
for keyword in ["address = ", "dns-name = "]:
mr = None
index = o.find(keyword)
if (index == -1): continue
mr = o[index+len(keyword)::]
index = mr.find("\n")
if (index == -1): continue
mr = mr[0:index]
break
#endfor
if (mr == None): return([None, None])
#
# Lookup EIDs in loop.
#
addr = lisp.lisp_address(lisp.LISP_AFI_NONE, "", 0, 0)
geos = []
for eid in [eid1, eid2]:
#
# Don't do lookups for Geo-Coordinates. Only for EIDs that are not
# in Geo-Coordinate format.
#
if (addr.is_geo_string(eid)):
geos.append(eid)
continue
#endif
command = 'python {} "{}" to {} count 1'.format(lig, eid, mr)
for cmd in [command, command + " no-info"]:
output = commands.getoutput(command)
index = output.find("geo: ")
if (index == -1):
if (cmd != command): geos.append(None)
continue
#endif
output = output[index+len("geo: ")::]
index = output.find("\n")
if (index == -1):
if (cmd != command): geos.append(None)
continue
#endif
geos.append(output[0:index])
break
#endfor
#endfor
return(geos)
#enddef
#
# lisp_geo_command
#
# Do geo lookups from lisp.lisp_geo() functions.
#
@bottle.post('/lisp/geo')
def lisp_geo_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
eid = bottle.request.forms.get("geo-point")
eid_prefix = bottle.request.forms.get("geo-prefix")
output = ""
#
# If an EID in the form of an IP address or distinguish-name, run a
# lig to get record from mapping database to obtain the geo data.
#
gs = lisp.lisp_address(lisp.LISP_AFI_NONE, "", 0, 0)
geo_point = lisp.lisp_geo("")
geo_prefix = lisp.lisp_geo("")
point, prefix = lisp_run_geo_lig(eid, eid_prefix)
#
# Check EID format if geo-coordiante or return geo-point from database
# lookup.
#
if (gs.is_geo_string(eid)):
if (geo_point.parse_geo_string(eid) == False):
output = "Could not parse geo-point format"
#endif
elif (point == None):
output = "EID {} lookup could not find geo-point".format(
lisp.bold(eid, True))
elif (geo_point.parse_geo_string(point) == False):
output = "Could not parse geo-point format returned from lookup"
#endif
#
# Geo-point is good, now check EID-prefix or geo-prefix format retunred
# from database lookup.
#
if (output == ""):
if (gs.is_geo_string(eid_prefix)):
if (geo_prefix.parse_geo_string(eid_prefix) == False):
output = "Could not parse geo-prefix format"
#endif
elif (prefix == None):
output = "EID-prefix {} lookup could not find geo-prefix".format( \
lisp.bold(eid_prefix, True))
elif (geo_prefix.parse_geo_string(prefix) == False):
output = "Could not parse geo-prefix format returned from lookup"
#endif
#endif
#
# No input errors. Return good results. Otherwise, error response in
# variable 'output'.
#
if (output == ""):
eid = "" if (eid == point) else ", EID {}".format(eid)
eid_prefix = "" if (eid_prefix == prefix) else \
", EID-prefix {}".format(eid_prefix)
point_str = geo_point.print_geo_url()
prefix_str = geo_prefix.print_geo_url()
km = geo_prefix.radius
dd_point = geo_point.dms_to_decimal()
dd_point = (round(dd_point[0], 6), round(dd_point[1], 6))
dd_prefix = geo_prefix.dms_to_decimal()
dd_prefix = (round(dd_prefix[0], 6), round(dd_prefix[1], 6))
distance = round(geo_prefix.get_distance(geo_point), 2)
inside = "inside" if geo_prefix.point_in_circle(geo_point) else \
"outside"
spo = lisp.space(2)
spe = lisp.space(1)
sd = lisp.space(3)
output = ("Geo-Point:{}{} {}{}<br>Geo-Prefix:{}{} {}, {} " + \
"kilometer radius{}<br>").format(spo, point_str, dd_point, eid,
spe, prefix_str, dd_prefix, km, eid_prefix)
output += "Distance:{}{} kilometers, point is {} of circle".format(sd,
distance, lisp.bold(inside, True))
#endif
return(lispconfig.lisp_show_wrapper(lisp.lisp_print_cour(output)))
#enddef
#
# lisp_get_info_source
#
# See if this source has sent an Info-Request and we are caching it so we
# can proxy Map-Request for it. Either address OR nonce can be supplied to
# determine if we are doing a lookup based on address or nonce.
#
def lisp_get_info_source(addr_str, port, nonce):
if (addr_str != None):
for info_source in lisp.lisp_info_sources_by_address.values():
info_source_str = info_source.address.print_address_no_iid()
if (info_source_str == addr_str and info_source.port == port):
return(info_source)
#endif
#endfor
return(None)
#endif
if (nonce != None):
if (nonce not in lisp.lisp_info_sources_by_nonce): return(None)
return(lisp.lisp_info_sources_by_nonce[nonce])
#endif
return(None)
#enddef
#
# lisp_nat_proxy_map_request
#
# Grab the nonce from the Map-Request, store it in the info-source data
# structure and modify the ITR-RLOCs field so the Map-Reply comes back to us.
#
def lisp_nat_proxy_map_request(lisp_sockets, info_source, packet):
#
# Parse and move packet pointer to beginning of Map-Request.
#
ecm = lisp.lisp_ecm(0)
packet = ecm.decode(packet)
if (packet == None):
lisp.lprint("Could not decode ECM packet")
return(True)
#endif
header = lisp.lisp_control_header()
if (header.decode(packet) == None):
lisp.lprint("Could not decode control header")
return(True)
#endif
if (header.type != lisp.LISP_MAP_REQUEST):
lisp.lprint("Received ECM without Map-Request inside")
return(True)
#endif
#
# We are at the Map-Request header.
#
map_request = lisp.lisp_map_request()
packet = map_request.decode(packet, None, 0)
nonce = map_request.nonce
addr_str = info_source.address.print_address_no_iid()
#
# Print Map-Request again to show what has changed.
#
map_request.print_map_request()
lisp.lprint("Process {} from info-source {}, port {}, nonce 0x{}". \
format(lisp.bold("nat-proxy Map-Request", False),
lisp.red(addr_str, False), info_source.port,
lisp.lisp_hex_string(nonce)))
#
# Store nonce in info-source and cache in dictionary array. We will need
# to find it based on nonce when the Map-Reply is returned to us.
#
info_source.cache_nonce_for_info_source(nonce)
#
# Do not timeout Map-Requests that are subscription-requests. Because a
# Map-Notify can be triggered any time back to the requester.
#
info_source.no_timeout = map_request.subscribe_bit
#
# Check if we are already in ITR-RLOCs list. If so, this could be looping.
# Return so the Map-Request can be processed in the regular fashion (that
# is, send on DDT or to a Map-Resolver.
#
for itr_rloc in map_request.itr_rlocs:
if (itr_rloc.is_local()): return(False)
#endfor
#
# Store new ITR-RLOCs list.
#
myself = lisp.lisp_myrlocs[0]
map_request.itr_rloc_count = 0
map_request.itr_rlocs = []
map_request.itr_rlocs.append(myself)
packet = map_request.encode(None, 0)
map_request.print_map_request()
deid = map_request.target_eid
if (deid.is_ipv6()):
myself_v6 = lisp.lisp_myrlocs[1]
if (myself_v6 != None): myself = myself_v6
#endif
#
# Send ECM based Map-Request to Map-Resolver.
#
ms = lisp.lisp_is_running("lisp-ms")
lisp.lisp_send_ecm(lisp_sockets, packet, deid, lisp.LISP_CTRL_PORT,
deid, myself, to_ms=ms, ddt=False)
return(True)
#enddef
#
# lisp_nat_proxy_reply
#
# Grab the nonce from the Map-Request, store it in the info-source data
# structure and modify the ITR-RLOCs field so the Map-Reply/Notify comes
# back to us.
#
def lisp_nat_proxy_reply(lisp_sockets, info_source, packet, mr_or_mn):
addr_str = info_source.address.print_address_no_iid()
port = info_source.port
nonce = info_source.nonce
mr_or_mn = "Reply" if mr_or_mn else "Notify"
mr_or_mn = lisp.bold("nat-proxy Map-{}".format(mr_or_mn), False)
lisp.lprint("Forward {} to info-source {}, port {}, nonce 0x{}".format( \
mr_or_mn, lisp.red(addr_str, False), port,
lisp.lisp_hex_string(nonce)))
#
# Send on socket with arguments passed from IPC message.
#
dest = lisp.lisp_convert_4to6(addr_str)
lisp.lisp_send(lisp_sockets, dest, port, packet)
#enddef
#
# lisp_core_dispatch_packet
#
# Look at packet type and decide which process to send it to.
#
def lisp_core_dispatch_packet(lisp_sockets, source, sport, packet):
global lisp_ipc_socket
header = lisp.lisp_control_header()
if (header.decode(packet) == None):
lisp.lprint("Could not decode control header")
return
#endif
#
# In the lispers.net implementation any LISP system can process Info-
# Requests. We'll have the lisp-core process do this. lig/rig and the
# lisp-etr process sends Info-Requests messages. Since the lisp-core
# process processes Info-Requests, it responds with Info-Reply messages.
# And they are sent to the emphemeral port so go straight back to the lig/
# rig, or etr-processes.
#
if (header.type == lisp.LISP_NAT_INFO):
if (header.info_reply == False):
lisp.lisp_process_info_request(lisp_sockets, packet, source, sport,
lisp.lisp_ms_rtr_list)
#endif
return
#endif
local_packet = packet
packet = lisp.lisp_packet_ipc(packet, source, sport)
#
# Map-Registers, Echos, and Map-Notify-Acks go to the lisp-ms process.
#
if (header.type in (lisp.LISP_MAP_REGISTER, lisp.LISP_MAP_NOTIFY_ACK)):
lisp.lisp_ipc(packet, lisp_ipc_socket, "lisp-ms")
return
#endif
#
# Map-Reply messages go to ITRs.
#
if (header.type == lisp.LISP_MAP_REPLY):
map_reply = lisp.lisp_map_reply()
map_reply.decode(local_packet)
info_source = lisp_get_info_source(None, 0, map_reply.nonce)
if (info_source):
lisp_nat_proxy_reply(lisp_sockets, info_source, local_packet, True)
else:
lig = "/tmp/lisp-lig"
if (os.path.exists(lig)):
lisp.lisp_ipc(packet, lisp_ipc_socket, lig)
else:
lisp.lisp_ipc(packet, lisp_ipc_socket, "lisp-itr")
#endif
#endif
return
#endif
#
# Map-Notify messages go to ITRs.
#
if (header.type == lisp.LISP_MAP_NOTIFY):
map_notify = lisp.lisp_map_notify(lisp_sockets)
map_notify.decode(local_packet)
info_source = lisp_get_info_source(None, 0, map_notify.nonce)
if (info_source):
lisp_nat_proxy_reply(lisp_sockets, info_source, local_packet,
False)
else:
lig = "/tmp/lisp-lig"
if (os.path.exists(lig)):
lisp.lisp_ipc(packet, lisp_ipc_socket, lig)
else:
process = "lisp-rtr" if lisp.lisp_is_running("lisp-rtr") else \
"lisp-etr"
lisp.lisp_ipc(packet, lisp_ipc_socket, process)
#endif
#endif
return
#endif
#
# Map-Referral messages go to MRs. But if a rig client is running on
# this machine, IPC it to the client.
#
if (header.type == lisp.LISP_MAP_REFERRAL):
rig = "/tmp/lisp-rig"
if (os.path.exists(rig)):
lisp.lisp_ipc(packet, lisp_ipc_socket, rig)
else:
lisp.lisp_ipc(packet, lisp_ipc_socket, "lisp-mr")
#endif
return
#endif
#
# Map-Requests go to ETRs/RTRs when they RLOC-probes or SMR-invoked
# requests. And Map-Requests go to ITRs when they are SMRs.
#
if (header.type == lisp.LISP_MAP_REQUEST):
process = "lisp-itr" if (header.is_smr()) else "lisp-etr"
#
# RLOC-probes are received specifically by the process by pcaping
# on port 4342.
#
if (header.rloc_probe): return
lisp.lisp_ipc(packet, lisp_ipc_socket, process)
return
#endif
#
# ECMs can go to a lot of places. They are sent ITR->MR, LIG->MR, MR->DDT,
# MR->MS, and MS->ETR. If we find an Info-Request source, this core
# process will process the Map-Request so it can get the Map-Reply and
# forward to the translated address and port of a client behind a NAT.
#
if (header.type == lisp.LISP_ECM):
info_source = lisp_get_info_source(source, sport, None)
if (info_source):
if (lisp_nat_proxy_map_request(lisp_sockets, info_source,
local_packet)): return
#endif
process = "lisp-mr"
if (header.is_to_etr()):
process = "lisp-etr"
elif (header.is_to_ms()):
process = "lisp-ms"
elif (header.is_ddt()):
if (lisp.lisp_is_running("lisp-ddt")):
process = "lisp-ddt"
elif (lisp.lisp_is_running("lisp-ms")):
process = "lisp-ms"
#endif
elif (lisp.lisp_is_running("lisp-mr") == False):
process = "lisp-etr"
#endif
lisp.lisp_ipc(packet, lisp_ipc_socket, process)
#endif
return
#enddef
#
# lisp_ssl_server
#
# Setup cherrypy server that supports SSL connections. This is so we can
# protect passwords that flow over an http connection.
#
# Used the following to create private key and cert:
#
# openssl req -new -x509 -keyout server.pem -out server.pem -days 365 -nodes
#
class lisp_ssl_server(bottle.ServerAdapter):
def run(self, hand):
cert = "./lisp-cert.pem"
#
# Use user provided lisp-cert.pem if it exists. Otherwise use the
# lispers.net default lisp-cert.pem.default file.
#
if (os.path.exists(cert) == False):
os.system("cp ./lisp-cert.pem.default {}".format(cert))
lisp.lprint(("{} does not exist, creating a copy from lisp-" + \
"cert.pem.default").format(cert))
#endif
server = wsgi_server((self.host, self.port), hand)
server.ssl_adapter = ssl_adaptor(cert, cert, None)
try:
server.start()
finally:
server.stop()
#endtry
#enddef
#endclass
#
# lisp_bottle_ipv4_process
#
# Variable bottle_port can take on the following values:
#
# 8080 - run web server on port 8080 using SSL
# 443 - run web server on port 443 using SSL
# -8080 - run web server on port 8080 with no SSL (no secure connection).
#
# Any other port is accepted and used with SSL. If a "-" precedes it, it is
# used with no SSL.
#
def lisp_bottle_ipv4_process(bottle_port):
lisp.lisp_set_exception()
#
# No security. Usually for testing purposes or complexities installing
# OpenSSL.
#
if (bottle_port < 0):
bottle.run(host="0.0.0.0", port=-bottle_port)
return
#endif
bottle.server_names["lisp-ssl-server"] = lisp_ssl_server
#
# If you want to run without SSL, do this and comment out the above call.
#
try:
bottle.run(host="0.0.0.0", port=bottle_port, server="lisp-ssl-server",
fast=True)
except:
bottle.run(host="0.0.0.0", port=bottle_port, fast=True)
#endtry
return
#enddef
#
# lisp_bottle_ipv6_process
#
# Start HTTP server on port 8080. But bottle does not support IPv6 yet so
# we comment out the call.
#
def lisp_bottle_ipv6_process():
lisp.lisp_set_exception()
# run(host="0::0", port=8080)
return
#enddef
#
# lisp_check_processes
#
# Check to see if any component has gone down when it should be running. And
# if it comes up when it should be running, download the configuration commands
# it is responsible for.
#
def lisp_check_processes(lisp_socket):
lisp.lisp_set_exception()
status = {"lisp-itr" : False, "lisp-etr" : False, "lisp-rtr" : False,
"lisp-mr" : False, "lisp-ms" : False, "lisp-ddt" : False}
while (True):
time.sleep(1)
old_status = status
status = {}
for process in old_status:
status[process] = lisp.lisp_is_running(process)
if (old_status[process] == status[process]): continue
lisp.lprint("*** Process '{}' has {} ***".format(process,
"come up" if status[process] else "gone down"))
#
# If process has come up, send configuration commands.
#
if (status[process] == True):
lisp.lisp_ipc_lock.acquire()
lispconfig.lisp_send_commands(lisp_socket, process)
lisp.lisp_ipc_lock.release()
#endif
#endfor
#endwhile
return
#enddef
#
# lisp_timeout_info_sources
#
# Timeout info sources from lisp_info_source_list{}.
#
def lisp_timeout_info_sources():
lisp.lisp_set_exception()
timeout = 60
while (True):
time.sleep(timeout)
delete_list = []
now = lisp.lisp_get_timestamp()
#
# Find entries that are greater than 1 minute old.
#
for key in lisp.lisp_info_sources_by_address:
info_source = lisp.lisp_info_sources_by_address[key]
if (info_source.no_timeout): continue
if (info_source.uptime + timeout < now): continue
delete_list.append(key)
nonce = info_source.nonce
if (nonce == None): continue
if (nonce in lisp.lisp_info_sources_by_nonce):
lisp.lisp_info_sources_by_nonce.pop(nonce)
#endif
#endfor
#
# Go through delete list to remove from dictionary array.
#
for key in delete_list:
lisp.lisp_info_sources_by_address.pop(key)
#endfor
#endwhile
return
#enddef
#
# lisp_core_control_packet_process
#
# Listen for IPC messages from LISP componment processes. They want to send
# control packets out on the network from UDP port 4342.
#
def lisp_core_control_packet_process(lisp_ipc_control_socket, lisp_sockets):
lisp.lisp_set_exception()
while (True):
try: packet_data = lisp_ipc_control_socket.recvfrom(9000)
except: return(["", "", "", ""])
data = packet_data[0].split("@")
source = packet_data[1]
opcode = data[0]
dest = data[1]
port = int(data[2])
packet = data[3::]
if (len(packet) > 1):
packet = lisp.lisp_bit_stuff(packet)
else:
packet = packet[0]
#endif
if (opcode != "control-packet"):
lisp.lprint(("lisp_core_control_packet_process() received" + \
"unexpected control-packet, message ignored"))
continue
#endif
lisp.lprint(("{} {} bytes from {}, dest/port: {}/{}, control-" + \
"packet: {}").format(lisp.bold("Receive", False), len(packet),
source, dest, port, lisp.lisp_format_packet(packet)))
#
# Check if this is a Map-Reply to a ephem port and we have an
# Info-Source for the nonce in the Map-Reply. If so, call
# lisp_core_dispatch_packet().
#
header = lisp.lisp_control_header()
header.decode(packet)
if (header.type == lisp.LISP_MAP_REPLY):
map_reply = lisp.lisp_map_reply()
map_reply.decode(packet)
if (lisp_get_info_source(None, 0, map_reply.nonce)):
lisp_core_dispatch_packet(lisp_sockets, source, port, packet)
continue
#endif
#endif
#
# This is a Map-Notify that the lisp-etr process received and it
# has determined it is a (S,G) multicast Map-Notify that the lisp-itr
# process needs to process to update its map-cache.
#
if (header.type == lisp.LISP_MAP_NOTIFY and source == "lisp-etr"):
ipc = lisp.lisp_packet_ipc(packet, source, port)
lisp.lisp_ipc(ipc, lisp_ipc_socket, "lisp-itr")
continue
#endif
#
# We are sending on a udp46 socket, so if the destination is IPv6
# we have an address format we can use. If destination is IPv4 we
# need to put the address in a IPv6 IPv4-compatible format.
#
addr = lisp.lisp_convert_4to6(dest)
addr = lisp.lisp_address(lisp.LISP_AFI_IPV6, "", 128, 0)
if (addr.is_ipv4_string(dest)): dest = "::ffff:" + dest
addr.store_address(dest)
#
# Send on socket with arguments passed from IPC message.
#
lisp.lisp_send(lisp_sockets, addr, port, packet)
#endwhile
return
#enddef
#
# lisp_cp_lisp_config
#
# The file ./lisp.config does not exist. Copy all commands from file
# lisp.config.example up to the dashed line.
#
def lisp_core_cp_lisp_config():
f = open("./lisp.config.example", "r"); lines = f.read(); f.close()
f = open("./lisp.config", "w")
lines = lines.split("\n")
for line in lines:
f.write(line + "\n")
if (line[0] == "#" and line[-1] == "#" and len(line) >= 4):
dashes = line[1:-2]
dash_check = len(dashes) * "-"
if (dashes == dash_check): break
#endif
#endfor
f.close()
return
#enddef
#
# lisp_core_startup
#
# Intialize this LISP core process. This function returns a LISP network
# listen socket.
#
def lisp_core_startup(bottle_port):
global lisp_build_date
global lisp_control_listen_socket
global lisp_ipc_socket
global lisp_ipc_control_socket
global lisp_sockets
global lisp_encap_socket
lisp.lisp_i_am("core")
lisp.lisp_set_exception()
lisp.lisp_print_banner("core-process starting up")
lisp.lisp_uptime = lisp.lisp_get_timestamp()
lisp.lisp_version = commands.getoutput("cat lisp-version.txt")
lisp_build_date = commands.getoutput("cat lisp-build-date.txt")
#
# Get local address for source RLOC for encapsulation.
#
if (lisp.lisp_get_local_addresses() == False): return(False)
#
# Only the core process uses a lock so it can send commands and show
# output in parallel to the component processes.
#
lisp.lisp_ipc_lock = multiprocessing.Lock()
#
# If this is a development build, put a plus after the version number.
# A development build is a build done from a directory that has the
# lisp.py file. Released builds built from the build directory will build
# only .pyo files.
#
if (os.path.exists("lisp.py")): lisp.lisp_version += "+"
#
# Open network socket to listen (and send) on port 4342. We may want
# a Map-Resolver to respond with a source-address of an anycast address
# so firewalls and NAT can return responses to ITRs or lig/rig clients.
#
address = "0.0.0.0" if lisp.lisp_is_raspbian() else "0::0"
if (os.getenv("LISP_ANYCAST_MR") == None or lisp.lisp_myrlocs[0] == None):
lisp_control_listen_socket = lisp.lisp_open_listen_socket(address,
str(lisp.LISP_CTRL_PORT))
else:
address = lisp.lisp_myrlocs[0].print_address_no_iid()
lisp_control_listen_socket = lisp.lisp_open_listen_socket(address,
str(lisp.LISP_CTRL_PORT))
#endif
lisp.lprint("Listen on {}, port 4342".format(address))
#
# Open datagram socket for 4341. We will not listen on it. We just don't
# want the kernel to send port unreachables to ITRs and PITRs. If another
# data-plane is running, it may listen on the data port 4341. Let it.
#
if (lisp.lisp_external_data_plane() == False):
lisp_encap_socket = lisp.lisp_open_listen_socket(address,
str(lisp.LISP_DATA_PORT))
lisp.lprint("Listen on {}, port 4341".format(address))
#endif
#
# Open internal socket to send from to LISP components for configuration
# events.
#
lisp_ipc_socket = lisp.lisp_open_send_socket("lisp-core", "")
lisp_ipc_socket.settimeout(3)
#
# Open internal socket 'lisp-core-pkt' so LISP components can send
# control packets from UDP port 4342 via this lisp-core process.
#
lisp_ipc_control_socket = lisp.lisp_open_listen_socket("", "lisp-core-pkt")
lisp_sockets = [lisp_control_listen_socket, lisp_control_listen_socket,
lisp_ipc_socket]
#
# Start a thread to listen for control packet from LISP component
# processes.
#
threading.Thread(target=lisp_core_control_packet_process,
args=[lisp_ipc_control_socket, lisp_sockets]).start()
#
# Start a new thread to monitor configuration file changes. Do quick check
# to see if this is a first-time startup for the system. Check to see if
# lisp.config was not created by user.
#
if (os.path.exists("./lisp.config") == False):
lisp.lprint(("./lisp.config does not exist, creating a copy " + \
"from lisp.config.example"))
lisp_core_cp_lisp_config()
#endif
#
# Check if we are a map-server listening on a multicast group. This
# is a decentralized-push-xtr with a multicast map-server address.
#
lisp_check_decent_xtr_multicast(lisp_control_listen_socket)
threading.Thread(target=lispconfig.lisp_config_process,
args=[lisp_ipc_socket]).start()
#
# Start a new thread to run bottle for each address-family.
#
threading.Thread(target=lisp_bottle_ipv4_process,
args=[bottle_port]).start()
threading.Thread(target=lisp_bottle_ipv6_process, args=[]).start()
#
# Start a new thread to run LISP component health check.
#
threading.Thread(target=lisp_check_processes,
args=[lisp_ipc_socket]).start()
#
# Start a new thread to run LISP component health check.
#
threading.Thread(target=lisp_timeout_info_sources).start()
return(True)
#enddef
#
# lisp_core_shutdown
#
# Shutdown process.
#
def lisp_core_shutdown():
#
# Close sockets.
#
lisp.lisp_close_socket(lisp_ipc_socket, "lisp-core")
lisp.lisp_close_socket(lisp_ipc_control_socket, "lisp-core-pkt")
lisp.lisp_close_socket(lisp_control_listen_socket, "")
lisp.lisp_close_socket(lisp_encap_socket, "")
return
#enddef
#
# lisp_check_decent_xtr_multicast
#
# Check to see if "decentralized-push-xtr = yes" and if any map-server clause
# has a multicast address configured. If so, setsockopt so we can receive
# multicast Map-Register messages.
#
# This function is robust enough for when a user copies lisp.config.example
# into lisp.config. We have to ignore text after "#- ... -#".
#
def lisp_check_decent_xtr_multicast(lisp_socket):
f = open("./lisp.config", "r"); lines = f.read(); f.close()
lines = lines.split("\n")
#
# Check if "decentralized-push-xtr = yes" is in the "lisp xtr-parameters"
# command clause.
#
decent_xtr = False
for line in lines:
if (line[0:1] == "#-" and line[-2:-1] == "-#"): break
if (line == "" or line[0] == "#"): continue
if (line.find("decentralized-push-xtr = yes") == -1): continue
decent_xtr = True
break
#endfor
if (decent_xtr == False): return
#
# Check if "lisp map-server" command clauses have multicast addresses
# configured.
#
groups = []
in_clause = False
for line in lines:
if (line[0:1] == "#-" and line[-2:-1] == "-#"): break
if (line == "" or line[0] == "#"): continue
if (line.find("lisp map-server") != -1):
in_clause = True
continue
#endif
if (line[0] == "}"):
in_clause = False
continue
#endif
#
# Parse address. Look at high-order byte.
#
if (in_clause and line.find("address = ") != -1):
group = line.split("address = ")[1]
ho_byte = int(group.split(".")[0])
if (ho_byte >= 224 and ho_byte < 240): groups.append(group)
#endif
#endfor
if (group == []): return
#
# Find eth0 IP address.
#
out = commands.getoutput('ifconfig eth0 | egrep "inet "')
if (out == ""): return
intf_addr = out.split()[1]
#
# Set socket options on socket.
#
i = socket.inet_aton(intf_addr)
for group in groups:
lisp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
lisp_socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF, i)
g = socket.inet_aton(group) + i
lisp_socket.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, g)
lisp.lprint("Setting multicast listen socket for group {}".format( \
group))
#endfor
return
#enddef
#------------------------------------------------------------------------------
bottle_port = int(sys.argv[1]) if (len(sys.argv) > 1) else 8080
#
# Main entry point for process.
#
if (lisp_core_startup(bottle_port) == False):
lisp.lprint("lisp_core_startup() failed")
lisp.lisp_print_banner("lisp-core abnormal exit")
exit(1)
#endif
while (True):
#
# Process either commands, an IPC data-packet (for testing), or any
# protocol message on the IPC listen socket..
#
opcode, source, port, packet = \
lisp.lisp_receive(lisp_control_listen_socket, False)
if (source == ""): break
#
# Process received network packet.
#
source = lisp.lisp_convert_6to4(source)
lisp_core_dispatch_packet(lisp_sockets, source, port, packet)
#endwhile
lisp_core_shutdown()
lisp.lisp_print_banner("lisp-core normal exit")
exit(0)
#------------------------------------------------------------------------------
|
HslCommunication.py | '''
GNU LESSER GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2017 - 2018 Richard.Hu <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
This version of the GNU Lesser General Public License incorporates
the terms and conditions of version 3 of the GNU General Public
License, supplemented by the additional permissions listed below.
0. Additional Definitions.
As used herein, "this License" refers to version 3 of the GNU Lesser
General Public License, and the "GNU GPL" refers to version 3 of the GNU
General Public License.
"The Library" refers to a covered work governed by this License,
other than an Application or a Combined Work as defined below.
An "Application" is any work that makes use of an interface provided
by the Library, but which is not otherwise based on the Library.
Defining a subclass of a class defined by the Library is deemed a mode
of using an interface provided by the Library.
A "Combined Work" is a work produced by combining or linking an
Application with the Library. The particular version of the Library
with which the Combined Work was made is also called the "Linked
Version".
The "Minimal Corresponding Source" for a Combined Work means the
Corresponding Source for the Combined Work, excluding any source code
for portions of the Combined Work that, considered in isolation, are
based on the Application, and not on the Linked Version.
The "Corresponding Application Code" for a Combined Work means the
object code and/or source code for the Application, including any data
and utility programs needed for reproducing the Combined Work from the
Application, but excluding the System Libraries of the Combined Work.
1. Exception to Section 3 of the GNU GPL.
You may convey a covered work under sections 3 and 4 of this License
without being bound by section 3 of the GNU GPL.
2. Conveying Modified Versions.
If you modify a copy of the Library, and, in your modifications, a
facility refers to a function or data to be supplied by an Application
that uses the facility (other than as an argument passed when the
facility is invoked), then you may convey a copy of the modified
version:
a) under this License, provided that you make a good faith effort to
ensure that, in the event an Application does not supply the
function or data, the facility still operates, and performs
whatever part of its purpose remains meaningful, or
b) under the GNU GPL, with none of the additional permissions of
this License applicable to that copy.
3. Object Code Incorporating Material from Library Header Files.
The object code form of an Application may incorporate material from
a header file that is part of the Library. You may convey such object
code under terms of your choice, provided that, if the incorporated
material is not limited to numerical parameters, data structure
layouts and accessors, or small macros, inline functions and templates
(ten or fewer lines in length), you do both of the following:
a) Give prominent notice with each copy of the object code that the
Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the object code with a copy of the GNU GPL and this license
document.
4. Combined Works.
You may convey a Combined Work under terms of your choice that,
taken together, effectively do not restrict modification of the
portions of the Library contained in the Combined Work and reverse
engineering for debugging such modifications, if you also do each of
the following:
a) Give prominent notice with each copy of the Combined Work that
the Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the Combined Work with a copy of the GNU GPL and this license
document.
c) For a Combined Work that displays copyright notices during
execution, include the copyright notice for the Library among
these notices, as well as a reference directing the user to the
copies of the GNU GPL and this license document.
d) Do one of the following:
0) Convey the Minimal Corresponding Source under the terms of this
License, and the Corresponding Application Code in a form
suitable for, and under terms that permit, the user to
recombine or relink the Application with a modified version of
the Linked Version to produce a modified Combined Work, in the
manner specified by section 6 of the GNU GPL for conveying
Corresponding Source.
1) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (a) uses at run time
a copy of the Library already present on the user's computer
system, and (b) will operate properly with a modified version
of the Library that is interface-compatible with the Linked
Version.
e) Provide Installation Information, but only if you would otherwise
be required to provide such information under section 6 of the
GNU GPL, and only to the extent that such information is
necessary to install and execute a modified version of the
Combined Work produced by recombining or relinking the
Application with a modified version of the Linked Version. (If
you use option 4d0, the Installation Information must accompany
the Minimal Corresponding Source and Corresponding Application
Code. If you use option 4d1, you must provide the Installation
Information in the manner specified by section 6 of the GNU GPL
for conveying Corresponding Source.)
5. Combined Libraries.
You may place library facilities that are a work based on the
Library side by side in a single library together with other library
facilities that are not Applications and are not covered by this
License, and convey such a combined library under terms of your
choice, if you do both of the following:
a) Accompany the combined library with a copy of the same work based
on the Library, uncombined with any other library facilities,
conveyed under the terms of this License.
b) Give prominent notice with the combined library that part of it
is a work based on the Library, and explaining where to find the
accompanying uncombined form of the same work.
6. Revised Versions of the GNU Lesser General Public License.
The Free Software Foundation may publish revised and/or new versions
of the GNU Lesser General Public License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the
Library as you received it specifies that a certain numbered version
of the GNU Lesser General Public License "or any later version"
applies to it, you have the option of following the terms and
conditions either of that published version or of any later version
published by the Free Software Foundation. If the Library as you
received it does not specify a version number of the GNU Lesser
General Public License, you may choose any version of the GNU Lesser
General Public License ever published by the Free Software Foundation.
If the Library as you received it specifies that a proxy can decide
whether future versions of the GNU Lesser General Public License shall
apply, that proxy's public statement of acceptance of any version is
permanent authorization for you to choose that version for the
Library.
'''
import string
import uuid
import socket
import struct
import threading
import gzip
import datetime
import random
from time import sleep
from enum import Enum
class StringResources:
'''系统的资源类'''
@staticmethod
def ConnectedFailed():
return "连接失败"
@staticmethod
def UnknownError():
return "未知错误"
@staticmethod
def ErrorCode():
return "错误代号"
@staticmethod
def TextDescription():
return "文本描述"
@staticmethod
def ExceptionMessage():
return "错误信息:"
@staticmethod
def ExceptionStackTrace():
return "错误堆栈:"
@staticmethod
def ExceptopnTargetSite():
return "错误方法:"
@staticmethod
def ExceprionCustomer():
return "用户自定义方法出错:"
@staticmethod
def TokenCheckFailed():
return "令牌检查错误。"
@staticmethod
def SuccessText():
return "Success"
@staticmethod
def NotSupportedDataType():
return "输入的类型不支持,请重新输入"
# Modbus相关
@staticmethod
def ModbusTcpFunctionCodeNotSupport():
return "不支持的功能码"
@staticmethod
def ModbusTcpFunctionCodeOverBound():
return "读取的数据越界"
@staticmethod
def ModbusTcpFunctionCodeQuantityOver():
return "读取长度超过最大值"
@staticmethod
def ModbusTcpFunctionCodeReadWriteException():
return "读写异常"
@staticmethod
def ModbusTcpReadCoilException():
return "读取线圈异常"
@staticmethod
def ModbusTcpWriteCoilException():
return "写入线圈异常"
@staticmethod
def ModbusTcpReadRegisterException():
return "读取寄存器异常"
@staticmethod
def ModbusTcpWriteRegisterException():
return "写入寄存器异常"
@staticmethod
def ModbusAddressMustMoreThanOne():
return "地址值在起始地址为1的情况下,必须大于1"
@staticmethod
def MelsecPleaseReferToManulDocument():
return "请查看三菱的通讯手册来查看报警的具体信息"
@staticmethod
def MelsecReadBitInfo():
return "读取位变量数组只能针对位软元件,如果读取字软元件,请调用Read方法"
@staticmethod
def OmronStatus0():
return "通讯正常"
@staticmethod
def OmronStatus1():
return "消息头不是FINS"
@staticmethod
def OmronStatus2():
return "数据长度太长"
@staticmethod
def OmronStatus3():
return "该命令不支持"
@staticmethod
def OmronStatus20():
return "超过连接上限"
@staticmethod
def OmronStatus21():
return "指定的节点已经处于连接中"
@staticmethod
def OmronStatus22():
return "尝试去连接一个受保护的网络节点,该节点还未配置到PLC中"
@staticmethod
def OmronStatus23():
return "当前客户端的网络节点超过正常范围"
@staticmethod
def OmronStatus24():
return "当前客户端的网络节点已经被使用"
@staticmethod
def OmronStatus25():
return "所有的网络节点已经被使用"
class OperateResult:
'''结果对象类,可以携带额外的数据信息'''
def __init__(self, err = 0, msg = ""):
self.ErrorCode = err
self.Message = msg
# 是否成功的标志
IsSuccess = False
# 操作返回的错误消息
Message = StringResources.SuccessText()
# 错误码
ErrorCode = 0
# 返回显示的文本
def ToMessageShowString( self ):
'''获取错误代号及文本描述'''
return StringResources.ErrorCode() + ":" + str(self.ErrorCode) + "\r\n" + StringResources.TextDescription() + ":" + self.Message
def CopyErrorFromOther(self, result):
'''从另一个结果类中拷贝错误信息'''
if result != None:
self.ErrorCode = result.ErrorCode
self.Message = result.Message
@staticmethod
def CreateFailedResult( result ):
'''创建一个失败的结果对象'''
failed = OperateResult()
failed.ErrorCode = result.ErrorCode
failed.Message = result.Message
return failed
@staticmethod
def CreateSuccessResult(Content1=None,Content2=None,Content3=None,Content4=None,Content5=None,Content6=None,Content7=None,Content8=None,Content9=None,Content10=None):
'''创建一个成功的对象'''
success = OperateResult()
success.IsSuccess = True
success.Message = StringResources.SuccessText()
if(Content2 == None and Content3 == None and Content4 == None and Content5 == None and Content6 == None and Content7 == None and Content8 == None and Content9 == None and Content10 == None) :
success.Content = Content1
else:
success.Content1 = Content1
success.Content2 = Content2
success.Content3 = Content3
success.Content4 = Content4
success.Content5 = Content5
success.Content6 = Content6
success.Content7 = Content7
success.Content8 = Content8
success.Content9 = Content9
success.Content10 = Content10
return success
class SoftIncrementCount:
'''一个简单的不持久化的序号自增类,采用线程安全实现,并允许指定最大数字,到达后清空从指定数开始'''
start = 0
current = 0
maxValue = 100000000000000000000000000
hybirdLock = threading.Lock()
def __init__(self, maxValue, start):
'''实例化一个自增信息的对象,包括最大值'''
self.maxValue = maxValue
self.start = start
def GetCurrentValue( self ):
'''获取自增信息'''
value = 0
self.hybirdLock.acquire()
value = self.current
self.current = self.current + 1
if self.current > self.maxValue:
self.current = 0
self.hybirdLock.release()
return value
class INetMessage:
'''数据消息的基本基类'''
def ProtocolHeadBytesLength(self):
'''协议头数据长度,也即是第一次接收的数据长度'''
return 0
def GetContentLengthByHeadBytes(self):
'''二次接收的数据长度'''
return 0
def CheckHeadBytesLegal(self,toke):
'''令牌检查是否成功'''
return False
def GetHeadBytesIdentity(self):
'''获取头子节里的消息标识'''
return 0
HeadBytes = bytes(0)
ContentBytes = bytes(0)
SendBytes = bytes(0)
class S7Message (INetMessage):
'''西门子s7协议的消息接收规则'''
def ProtocolHeadBytesLength(self):
'''协议头数据长度,也即是第一次接收的数据长度'''
return 4
def GetContentLengthByHeadBytes(self):
'''二次接收的数据长度'''
if self.HeadBytes != None:
return self.HeadBytes[2]*256 + self.HeadBytes[3]-4
else:
return 0
def CheckHeadBytesLegal(self,token):
'''令牌检查是否成功'''
if self.HeadBytes != None:
if self.HeadBytes[0] == 0x03 and self.HeadBytes[1] == 0x00:
return True
else:
return False
else:
return False
class MelsecA1EBinaryMessage(INetMessage):
'''三菱的A兼容1E帧协议解析规则'''
def ProtocolHeadBytesLength(self):
'''协议头数据长度,也即是第一次接收的数据长度'''
return 2
def GetContentLengthByHeadBytes(self):
'''二次接收的数据长度'''
contentLength = 0
if self.HeadBytes[1] == 0x5B:
contentLength = 2
else:
length = 0
if self.SendBytes[10] % 2 == 0:
length = self.SendBytes[10]
else:
length = self.SendBytes[10] + 1
if self.HeadBytes[0] == 0x80:
contentLength = int(length / 2)
elif self.HeadBytes[0] == 0x81:
contentLength = self.SendBytes[10] * 2
elif self.HeadBytes[0] == 0x82:
contentLength = 0
elif self.HeadBytes[0] == 0x83:
contentLength = 0
# 在A兼容1E协议中,写入值后,若不发生异常,只返回副标题 + 结束代码(0x00)
# 这已经在协议头部读取过了,后面要读取的长度为0(contentLength=0)
return contentLength
def CheckHeadBytesLegal(self,token):
'''令牌检查是否成功'''
if self.HeadBytes != None:
if self.HeadBytes[0] - self.SendBytes[0] == 0x80:
return True
else:
return False
else:
return False
class MelsecQnA3EBinaryMessage(INetMessage):
'''三菱的Qna兼容3E帧协议解析规则'''
def ProtocolHeadBytesLength(self):
'''协议头数据长度,也即是第一次接收的数据长度'''
return 9
def GetContentLengthByHeadBytes(self):
'''二次接收的数据长度'''
if self.HeadBytes != None:
return self.HeadBytes[8] * 256 + self.HeadBytes[7]
else:
return 0
def CheckHeadBytesLegal(self,token):
'''令牌检查是否成功'''
if self.HeadBytes != None:
if self.HeadBytes[0] == 0xD0 and self.HeadBytes[1] == 0x00:
return True
else:
return False
else:
return False
class MelsecQnA3EAsciiMessage(INetMessage):
'''三菱的Qna兼容3E帧的ASCII协议解析规则'''
def ProtocolHeadBytesLength(self):
'''协议头数据长度,也即是第一次接收的数据长度'''
return 18
def GetContentLengthByHeadBytes(self):
'''二次接收的数据长度'''
if self.HeadBytes != None:
return int(self.HeadBytes[14:18].decode('ascii'),16)
else:
return 0
def CheckHeadBytesLegal(self,token):
'''令牌检查是否成功'''
if self.HeadBytes != None:
if self.HeadBytes[0] == ord('D') and self.HeadBytes[1] == ord('0') and self.HeadBytes[2] == ord('0') and self.HeadBytes[3] == ord('0'):
return True
else:
return False
else:
return False
class ModbusTcpMessage (INetMessage):
'''Modbus-Tcp协议的信息'''
def ProtocolHeadBytesLength(self):
'''协议头数据长度,也即是第一次接收的数据长度'''
return 6
def GetContentLengthByHeadBytes(self):
'''二次接收的数据长度'''
if self.HeadBytes != None:
return self.HeadBytes[4] * 256 + self.HeadBytes[5]
else:
return 0
def CheckHeadBytesLegal(self,token):
'''令牌检查是否成功'''
return True
def GetHeadBytesIdentity(self):
'''获取头子节里的消息标识'''
return self.HeadBytes[0] * 256 + self.HeadBytes[1]
class HslMessage (INetMessage):
'''本组件系统使用的默认的消息规则,说明解析和反解析规则的'''
def ProtocolHeadBytesLength(self):
'''协议头数据长度,也即是第一次接收的数据长度'''
return 32
def GetContentLengthByHeadBytes(self):
'''二次接收的数据长度'''
if self.HeadBytes != None:
buffer = bytearray(4)
buffer[0:4] = self.HeadBytes[28:32]
return struct.unpack('<i',buffer)[0]
else:
return 0
def GetHeadBytesIdentity(self):
'''获取头子节里的消息标识'''
if self.HeadBytes != None:
buffer = bytearray(4)
buffer[0:4] = self.HeadBytes[4:8]
return struct.unpack('<i',buffer)[0]
else:
return 0
def CheckHeadBytesLegal(self,token):
'''令牌检查是否成功'''
if self.HeadBytes == None:
return False
else:
return SoftBasic.IsTwoBytesEquel(self.HeadBytes,12,token,0,16)
class DataFormat(Enum):
'''应用于多字节数据的解析或是生成格式'''
ABCD = 0
BADC = 1
CDAB = 2
DCBA = 3
class ByteTransform:
'''数据转换类的基础,提供了一些基础的方法实现.'''
DataFormat = DataFormat.DCBA
def TransBool(self, buffer, index ):
'''将buffer数组转化成bool对象'''
return ((buffer[index] & 0x01) == 0x01)
def TransBoolArray(self, buffer, index, length ):
'''将buffer数组转化成bool数组对象,需要转入索引,长度'''
data = bytearray(length)
for i in range(length):
data[i]=buffer[i+index]
return SoftBasic.ByteToBoolArray( data, length * 8 )
def TransByte( self, buffer, index ):
'''将buffer中的字节转化成byte对象,需要传入索引'''
return buffer[index]
def TransByteArray( self, buffer, index, length ):
'''将buffer中的字节转化成byte数组对象,需要传入索引'''
data = bytearray(length)
for i in range(length):
data[i]=buffer[i+index]
return data
def TransInt16( self, buffer, index ):
'''从缓存中提取short结果'''
data = self.TransByteArray(buffer,index,2)
return struct.unpack('<h',data)[0]
def TransInt16Array( self, buffer, index, length ):
'''从缓存中提取short数组结果'''
tmp = []
for i in range(length):
tmp.append( self.TransInt16( buffer, index + 2 * i ))
return tmp
def TransUInt16(self, buffer, index ):
'''从缓存中提取ushort结果'''
data = self.TransByteArray(buffer,index,2)
return struct.unpack('<H',data)[0]
def TransUInt16Array(self, buffer, index, length ):
'''从缓存中提取ushort数组结果'''
tmp = []
for i in range(length):
tmp.append( self.TransUInt16( buffer, index + 2 * i ))
return tmp
def TransInt32(self, buffer, index ):
'''从缓存中提取int结果'''
data = self.ByteTransDataFormat4(self.TransByteArray(buffer,index,4))
return struct.unpack('<i',data)[0]
def TransInt32Array(self, buffer, index, length ):
'''从缓存中提取int数组结果'''
tmp = []
for i in range(length):
tmp.append( self.TransInt32( buffer, index + 4 * i ))
return tmp
def TransUInt32(self, buffer, index ):
'''从缓存中提取uint结果'''
data = self.ByteTransDataFormat4(self.TransByteArray(buffer,index,4))
return struct.unpack('<I',data)[0]
def TransUInt32Array(self, buffer, index, length ):
'''从缓存中提取uint数组结果'''
tmp = []
for i in range(length):
tmp.append( self.TransUInt32( buffer, index + 4 * i ))
return tmp
def TransInt64(self, buffer, index ):
'''从缓存中提取long结果'''
data = self.ByteTransDataFormat8(self.TransByteArray(buffer,index,8))
return struct.unpack('<q',data)[0]
def TransInt64Array(self, buffer, index, length):
'''从缓存中提取long数组结果'''
tmp = []
for i in range(length):
tmp.append( self.TransInt64( buffer, index + 8 * i ))
return tmp
def TransUInt64(self, buffer, index ):
'''从缓存中提取ulong结果'''
data = self.ByteTransDataFormat8(self.TransByteArray(buffer,index,8))
return struct.unpack('<Q',data)[0]
def TransUInt64Array(self, buffer, index, length):
'''从缓存中提取ulong数组结果'''
tmp = []
for i in range(length):
tmp.append( self.TransUInt64( buffer, index + 8 * i ))
return tmp
def TransSingle(self, buffer, index ):
'''从缓存中提取float结果'''
data = self.ByteTransDataFormat4(self.TransByteArray(buffer,index,4))
return struct.unpack('<f',data)[0]
def TransSingleArray(self, buffer, index, length):
'''从缓存中提取float数组结果'''
tmp = []
for i in range(length):
tmp.append( self.TransSingle( buffer, index + 4 * i ))
return tmp
def TransDouble(self, buffer, index ):
'''从缓存中提取double结果'''
data = self.ByteTransDataFormat8(self.TransByteArray(buffer,index,8))
return struct.unpack('<d',data)[0]
def TransDoubleArray(self, buffer, index, length):
'''从缓存中提取double数组结果'''
tmp = []
for i in range(length):
tmp.append( self.TransDouble( buffer, index + 8 * i ))
return tmp
def TransString( self, buffer, index, length, encoding ):
'''从缓存中提取string结果,使用指定的编码'''
data = self.TransByteArray(buffer,index,length)
return data.decode(encoding)
def BoolArrayTransByte(self, values):
'''bool数组变量转化缓存数据,需要传入bool数组'''
if (values == None): return None
return SoftBasic.BoolArrayToByte( values )
def BoolTransByte(self, value):
'''bool变量转化缓存数据,需要传入bool值'''
return self.BoolArrayTransByte([value])
def ByteTransByte(self, value ):
'''byte变量转化缓存数据,需要传入byte值'''
buffer = bytearray(1)
buffer[0] = value
return buffer
def Int16ArrayTransByte(self, values ):
'''short数组变量转化缓存数据,需要传入short数组'''
if (values == None) : return None
buffer = bytearray(len(values) * 2)
for i in range(len(values)):
buffer[(i*2): (i*2+2)] = struct.pack('<h',values[i])
return buffer
def Int16TransByte(self, value ):
'''short数组变量转化缓存数据,需要传入short值'''
return self.Int16ArrayTransByte([value])
def UInt16ArrayTransByte(self, values ):
'''ushort数组变量转化缓存数据,需要传入ushort数组'''
if (values == None) : return None
buffer = bytearray(len(values) * 2)
for i in range(len(values)):
buffer[(i*2): (i*2+2)] = struct.pack('<H',values[i])
return buffer
def UInt16TransByte(self, value ):
'''ushort变量转化缓存数据,需要传入ushort值'''
return self.UInt16ArrayTransByte([value])
def Int32ArrayTransByte(self, values ):
'''int数组变量转化缓存数据,需要传入int数组'''
if (values == None) : return None
buffer = bytearray(len(values) * 4)
for i in range(len(values)):
buffer[(i*4): (i*4+4)] = self.ByteTransDataFormat4(struct.pack('<i',values[i]))
return buffer
def Int32TransByte(self, value ):
'''int变量转化缓存数据,需要传入int值'''
return self.Int32ArrayTransByte([value])
def UInt32ArrayTransByte(self, values ):
'''uint数组变量转化缓存数据,需要传入uint数组'''
if (values == None) : return None
buffer = bytearray(len(values) * 4)
for i in range(len(values)):
buffer[(i*4): (i*4+4)] = self.ByteTransDataFormat4(struct.pack('<I',values[i]))
return buffer
def UInt32TransByte(self, value ):
'''uint变量转化缓存数据,需要传入uint值'''
return self.UInt32ArrayTransByte([value])
def Int64ArrayTransByte(self, values ):
'''long数组变量转化缓存数据,需要传入long数组'''
if (values == None) : return None
buffer = bytearray(len(values) * 8)
for i in range(len(values)):
buffer[(i*8): (i*8+8)] = self.ByteTransDataFormat8(struct.pack('<q',values[i]))
return buffer
def Int64TransByte(self, value ):
'''long变量转化缓存数据,需要传入long值'''
return self.Int64ArrayTransByte([value])
def UInt64ArrayTransByte(self, values ):
'''ulong数组变量转化缓存数据,需要传入ulong数组'''
if (values == None) : return None
buffer = bytearray(len(values) * 8)
for i in range(len(values)):
buffer[(i*8): (i*8+8)] = self.ByteTransDataFormat8(struct.pack('<Q',values[i]))
return buffer
def UInt64TransByte(self, value ):
'''ulong变量转化缓存数据,需要传入ulong值'''
return self.UInt64ArrayTransByte([value])
def FloatArrayTransByte(self, values ):
'''float数组变量转化缓存数据,需要传入float数组'''
if (values == None) : return None
buffer = bytearray(len(values) * 4)
for i in range(len(values)):
buffer[(i*4): (i*4+4)] = self.ByteTransDataFormat4(struct.pack('<f',values[i]))
return buffer
def FloatTransByte(self, value ):
'''float变量转化缓存数据,需要传入float值'''
return self.FloatArrayTransByte([value])
def DoubleArrayTransByte(self, values ):
'''double数组变量转化缓存数据,需要传入double数组'''
if (values == None) : return None
buffer = bytearray(len(values) * 8)
for i in range(len(values)):
buffer[(i*8): (i*8+8)] = self.ByteTransDataFormat8(struct.pack('<d',values[i]))
return buffer
def DoubleTransByte(self, value ):
'''double变量转化缓存数据,需要传入double值'''
return self.DoubleArrayTransByte([value])
def StringTransByte(self, value:str, encoding:str ):
'''使用指定的编码字符串转化缓存数据,需要传入string值及编码信息'''
return value.encode(encoding)
def ByteTransDataFormat4(self, value, index = 0 ):
'''反转多字节的数据信息'''
buffer = bytearray(4)
if self.DataFormat == DataFormat.ABCD:
buffer[0] = value[index + 3]
buffer[1] = value[index + 2]
buffer[2] = value[index + 1]
buffer[3] = value[index + 0]
elif self.DataFormat == DataFormat.BADC:
buffer[0] = value[index + 2]
buffer[1] = value[index + 3]
buffer[2] = value[index + 0]
buffer[3] = value[index + 1]
elif self.DataFormat == DataFormat.CDAB:
buffer[0] = value[index + 1]
buffer[1] = value[index + 0]
buffer[2] = value[index + 3]
buffer[3] = value[index + 2]
elif self.DataFormat == DataFormat.DCBA:
buffer[0] = value[index + 0]
buffer[1] = value[index + 1]
buffer[2] = value[index + 2]
buffer[3] = value[index + 3]
return buffer
def ByteTransDataFormat8(self, value, index = 0 ):
'''反转多字节的数据信息'''
buffer = bytearray(8)
if self.DataFormat == DataFormat.ABCD:
buffer[0] = value[index + 7]
buffer[1] = value[index + 6]
buffer[2] = value[index + 5]
buffer[3] = value[index + 4]
buffer[4] = value[index + 3]
buffer[5] = value[index + 2]
buffer[6] = value[index + 1]
buffer[7] = value[index + 0]
elif self.DataFormat == DataFormat.BADC:
buffer[0] = value[index + 6]
buffer[1] = value[index + 7]
buffer[2] = value[index + 4]
buffer[3] = value[index + 5]
buffer[4] = value[index + 2]
buffer[5] = value[index + 3]
buffer[6] = value[index + 0]
buffer[7] = value[index + 1]
elif self.DataFormat == DataFormat.CDAB:
buffer[0] = value[index + 1]
buffer[1] = value[index + 0]
buffer[2] = value[index + 3]
buffer[3] = value[index + 2]
buffer[4] = value[index + 5]
buffer[5] = value[index + 4]
buffer[6] = value[index + 7]
buffer[7] = value[index + 6]
elif self.DataFormat == DataFormat.DCBA:
buffer[0] = value[index + 0]
buffer[1] = value[index + 1]
buffer[2] = value[index + 2]
buffer[3] = value[index + 3]
buffer[4] = value[index + 4]
buffer[5] = value[index + 5]
buffer[6] = value[index + 6]
buffer[7] = value[index + 7]
return buffer
class RegularByteTransform(ByteTransform):
'''常规的字节转换类'''
def __init__(self):
return
class ReverseBytesTransform(ByteTransform):
'''字节倒序的转换类'''
def TransInt16(self, buffer, index ):
'''从缓存中提取short结果'''
data = self.TransByteArray(buffer,index,2)
return struct.unpack('>h',data)[0]
def TransUInt16(self, buffer, index ):
'''从缓存中提取ushort结果'''
data = self.TransByteArray(buffer,index,2)
return struct.unpack('>H',data)[0]
def TransInt32(self, buffer, index ):
'''从缓存中提取int结果'''
data = self.TransByteArray(buffer,index,4)
return struct.unpack('>i',data)[0]
def TransUInt32(self, buffer, index ):
'''从缓存中提取uint结果'''
data = self.TransByteArray(buffer,index,4)
return struct.unpack('>I',data)[0]
def TransInt64(self, buffer, index ):
'''从缓存中提取long结果'''
data = self.TransByteArray(buffer,index,8)
return struct.unpack('>q',data)[0]
def TransUInt64(self, buffer, index ):
'''从缓存中提取ulong结果'''
data = self.TransByteArray(buffer,index,8)
return struct.unpack('>Q',data)[0]
def TransSingle(self, buffer, index ):
'''从缓存中提取float结果'''
data = self.TransByteArray(buffer,index,4)
return struct.unpack('>f',data)[0]
def TransDouble(self, buffer, index ):
'''从缓存中提取double结果'''
data = self.TransByteArray(buffer,index,8)
return struct.unpack('>d',data)[0]
def Int16ArrayTransByte(self, values ):
'''short数组变量转化缓存数据,需要传入short数组 -> bytearray'''
if (values == None) : return None
buffer = bytearray(len(values) * 2)
for i in range(len(values)):
buffer[(i*2): (i*2+2)] = struct.pack('>h',values[i])
return buffer
def UInt16ArrayTransByte(self, values ):
'''ushort数组变量转化缓存数据,需要传入ushort数组 -> bytearray'''
if (values == None) : return None
buffer = bytearray(len(values) * 2)
for i in range(len(values)):
buffer[(i*2): (i*2+2)] = struct.pack('>H',values[i])
return buffer
def Int32ArrayTransByte(self, values ):
'''int数组变量转化缓存数据,需要传入int数组 -> bytearray'''
if (values == None) : return None
buffer = bytearray(len(values) * 4)
for i in range(len(values)):
buffer[(i*4): (i*4+4)] = struct.pack('>i',values[i])
return buffer
def UInt32ArrayTransByte(self, values ):
'''uint数组变量转化缓存数据,需要传入uint数组 -> bytearray'''
if (values == None) : return None
buffer = bytearray(len(values) * 4)
for i in range(len(values)):
buffer[(i*4): (i*4+4)] = struct.pack('>I',values[i])
return buffer
def Int64ArrayTransByte(self, values ):
'''long数组变量转化缓存数据,需要传入long数组 -> bytearray'''
if (values == None) : return None
buffer = bytearray(len(values) * 8)
for i in range(len(values)):
buffer[(i*8): (i*8+8)] = struct.pack('>q',values[i])
return buffer
def UInt64ArrayTransByte(self, values ):
'''ulong数组变量转化缓存数据,需要传入ulong数组 -> bytearray'''
if (values == None) : return None
buffer = bytearray(len(values) * 8)
for i in range(len(values)):
buffer[(i*8): (i*8+8)] = struct.pack('>Q',values[i])
return buffer
def FloatArrayTransByte(self, values ):
'''float数组变量转化缓存数据,需要传入float数组 -> bytearray'''
if (values == None) : return None
buffer = bytearray(len(values) * 4)
for i in range(len(values)):
buffer[(i*4): (i*4+4)] = struct.pack('>f',values[i])
return buffer
def DoubleArrayTransByte(self, values ):
'''double数组变量转化缓存数据,需要传入double数组 -> bytearray'''
if (values == None) : return None
buffer = bytearray(len(values) * 8)
for i in range(len(values)):
buffer[(i*8): (i*8+8)] = struct.pack('>d',values[i])
return buffer
class ReverseWordTransform(ByteTransform):
'''按照字节错位的数据转换类'''
def __init__(self):
'''初始化方法,重新设置DataFormat'''
self.DataFormat = DataFormat.ABCD
IsStringReverse = False
def ReverseBytesByWord( self, buffer, index, length ):
'''按照字节错位的方法 -> bytearray'''
if buffer == None: return None
data = self.TransByteArray(buffer,index,length)
for i in range(len(data)//2):
data[i*2+0],data[i*2+1]= data[i*2+1],data[i*2+0]
return data
def ReverseAllBytesByWord( self, buffer ):
'''按照字节错位的方法 -> bytearray'''
return self.ReverseBytesByWord(buffer,0,len(buffer))
def TransInt16( self, buffer, index ):
'''从缓存中提取short结果'''
data = self.ReverseBytesByWord(buffer,index,2)
return struct.unpack('<h',data)[0]
def TransUInt16(self, buffer, index ):
'''从缓存中提取ushort结果'''
data = self.ReverseBytesByWord(buffer,index,2)
return struct.unpack('<H',data)[0]
def TransString( self, buffer, index, length, encoding ):
'''从缓存中提取string结果,使用指定的编码'''
data = self.TransByteArray(buffer,index,length)
if self.IsStringReverse:
return self.ReverseAllBytesByWord(data).decode(encoding)
else:
return data.decode(encoding)
def Int16ArrayTransByte(self, values ):
'''short数组变量转化缓存数据,需要传入short数组'''
buffer = super().Int16ArrayTransByte(values)
return self.ReverseAllBytesByWord(buffer)
def UInt16ArrayTransByte(self, values ):
'''ushort数组变量转化缓存数据,需要传入ushort数组'''
buffer = super().UInt16ArrayTransByte(values)
return self.ReverseAllBytesByWord(buffer)
def StringTransByte(self, value, encoding ):
'''使用指定的编码字符串转化缓存数据,需要传入string值及编码信息'''
buffer = value.encode(encoding)
buffer = SoftBasic.BytesArrayExpandToLengthEven(buffer)
if self.IsStringReverse:
return self.ReverseAllBytesByWord( buffer )
else:
return buffer
class ByteTransformHelper:
'''所有数据转换类的静态辅助方法'''
@staticmethod
def GetBoolResultFromBytes( result, byteTransform ):
'''将指定的OperateResult类型转化'''
try:
if result.IsSuccess:
return OperateResult.CreateSuccessResult(byteTransform.TransBool(result.Content , 0 ))
else:
return OperateResult.CreateFailedResult(result)
except Exception as ex:
return OperateResult( msg = "数据转化失败,源数据:" + SoftBasic.ByteToHexString( result.Content ) + " 消息:" + str(ex))
@staticmethod
def GetByteResultFromBytes( result, byteTransform ):
'''将指定的OperateResult类型转化'''
try:
if result.IsSuccess:
return OperateResult.CreateSuccessResult(byteTransform.TransByte(result.Content , 0 ))
else:
return OperateResult.CreateFailedResult(result)
except Exception as ex:
return OperateResult( msg = "数据转化失败,源数据:" + SoftBasic.ByteToHexString( result.Content ) + " 消息:" + str(ex))
@staticmethod
def GetInt16ResultFromBytes( result, byteTransform ):
'''将指定的OperateResult类型转化'''
try:
if result.IsSuccess:
return OperateResult.CreateSuccessResult(byteTransform.TransInt16(result.Content , 0 ))
else:
return OperateResult.CreateFailedResult(result)
except Exception as ex:
return OperateResult( msg = "数据转化失败,源数据:" + SoftBasic.ByteToHexString( result.Content ) + " 消息:" + str(ex))
@staticmethod
def GetUInt16ResultFromBytes( result, byteTransform ):
'''将指定的OperateResult类型转化'''
try:
if result.IsSuccess:
return OperateResult.CreateSuccessResult(byteTransform.TransUInt16(result.Content , 0 ))
else:
return OperateResult.CreateFailedResult(result)
except Exception as ex:
return OperateResult( msg = "数据转化失败,源数据:" + SoftBasic.ByteToHexString( result.Content ) + " 消息:" + str(ex))
@staticmethod
def GetInt32ResultFromBytes( result, byteTransform ):
'''将指定的OperateResult类型转化'''
try:
if result.IsSuccess:
return OperateResult.CreateSuccessResult(byteTransform.TransInt32(result.Content , 0 ))
else:
return OperateResult.CreateFailedResult(result)
except Exception as ex:
return OperateResult( msg = "数据转化失败,源数据:" + SoftBasic.ByteToHexString( result.Content ) + " 消息:" + str(ex))
@staticmethod
def GetUInt32ResultFromBytes( result, byteTransform ):
'''将指定的OperateResult类型转化'''
try:
if result.IsSuccess:
return OperateResult.CreateSuccessResult(byteTransform.TransUInt32(result.Content , 0 ))
else:
return OperateResult.CreateFailedResult(result)
except Exception as ex:
return OperateResult( msg = "数据转化失败,源数据:" + SoftBasic.ByteToHexString( result.Content ) + " 消息:" + str(ex))
@staticmethod
def GetInt64ResultFromBytes( result, byteTransform ):
'''将指定的OperateResult类型转化'''
try:
if result.IsSuccess:
return OperateResult.CreateSuccessResult(byteTransform.TransInt64(result.Content , 0 ))
else:
return OperateResult.CreateFailedResult(result)
except Exception as ex:
return OperateResult( msg = "数据转化失败,源数据:" + SoftBasic.ByteToHexString( result.Content ) + " 消息:" + str(ex))
@staticmethod
def GetUInt64ResultFromBytes( result, byteTransform ):
'''将指定的OperateResult类型转化'''
try:
if result.IsSuccess:
return OperateResult.CreateSuccessResult(byteTransform.TransUInt64(result.Content , 0 ))
else:
return OperateResult.CreateFailedResult(result)
except Exception as ex:
return OperateResult( msg = "数据转化失败,源数据:" + SoftBasic.ByteToHexString( result.Content ) + " 消息:" + str(ex))
@staticmethod
def GetSingleResultFromBytes( result, byteTransform ):
'''将指定的OperateResult类型转化'''
try:
if result.IsSuccess:
return OperateResult.CreateSuccessResult(byteTransform.TransSingle(result.Content , 0 ))
else:
return OperateResult.CreateFailedResult(result)
except Exception as ex:
return OperateResult( msg = "数据转化失败,源数据:" + SoftBasic.ByteToHexString( result.Content ) + " 消息:" + str(ex))
@staticmethod
def GetDoubleResultFromBytes( result, byteTransform ):
'''将指定的OperateResult类型转化'''
try:
if result.IsSuccess:
return OperateResult.CreateSuccessResult(byteTransform.TransDouble(result.Content , 0 ))
else:
return OperateResult.CreateFailedResult(result)
except Exception as ex:
return OperateResult( msg = "数据转化失败,源数据:" + SoftBasic.ByteToHexString( result.Content ) + " 消息:" + str(ex))
@staticmethod
def GetStringResultFromBytes( result, byteTransform ):
'''将指定的OperateResult类型转化'''
try:
if result.IsSuccess:
return OperateResult.CreateSuccessResult(byteTransform.TransString(result.Content , 0, len(result.Content), 'ascii' ))
else:
return OperateResult.CreateFailedResult(result)
except Exception as ex:
return OperateResult( msg = "数据转化失败,源数据:" + SoftBasic.ByteToHexString( result.Content ) + " 消息:" + str(ex))
class DeviceAddressBase:
'''所有设备通信类的地址基础类'''
Address = 0
def AnalysisAddress( self, address ):
'''解析字符串的地址'''
self.Address = int(address)
class SoftBasic:
'''系统运行的基础方法,提供了一些基本的辅助方法'''
@staticmethod
def GetSizeDescription(size):
'''获取指定数据大小的文本描述字符串'''
if size < 1000:
return str(size) + " B"
elif size < (1000 * 1000):
data = float(size) / 1024
return '{:.2f}'.format(data) + " Kb"
elif size < (1000 * 1000 * 1000):
data = float(size) / 1024 / 1024
return '{:.2f}'.format(data) + " Mb"
else:
data = float(size) / 1024 / 1024 / 1024
return '{:.2f}'.format(data) + " Gb"
@staticmethod
def ByteToHexString(inBytes,segment=' '):
'''将字节数组转换成十六进制的表示形式,需要传入2个参数,数据和分隔符,该方法还存在一点问题'''
str_list = []
for byte in inBytes:
str_list.append('{:02X}'.format(byte))
if segment != None:
return segment.join(str_list)
else:
return ''.join(str_list)
@staticmethod
def ByteToBoolArray( InBytes, length ):
'''从字节数组中提取bool数组变量信息'''
if InBytes == None:
return None
if length > len(InBytes) * 8:
length = len(InBytes) * 8
buffer = []
for i in range(length):
index = i // 8
offect = i % 8
temp = 0
if offect == 0 : temp = 0x01
elif offect == 1 : temp = 0x02
elif offect == 2 : temp = 0x04
elif offect == 3 : temp = 0x08
elif offect == 4 : temp = 0x10
elif offect == 5 : temp = 0x20
elif offect == 6 : temp = 0x40
elif offect == 7 : temp = 0x80
if (InBytes[index] & temp) == temp:
buffer.append(True)
else:
buffer.append(False)
return buffer
@staticmethod
def BoolArrayToByte( array ):
'''从bool数组变量变成byte数组'''
if (array == None) : return None
length = 0
if len(array) % 8 == 0:
length = int(len(array) / 8)
else:
length = int(len(array) / 8) + 1
buffer = bytearray(length)
for i in range(len(array)):
index = i // 8
offect = i % 8
temp = 0
if offect == 0 : temp = 0x01
elif offect == 1 : temp = 0x02
elif offect == 2 : temp = 0x04
elif offect == 3 : temp = 0x08
elif offect == 4 : temp = 0x10
elif offect == 5 : temp = 0x20
elif offect == 6 : temp = 0x40
elif offect == 7 : temp = 0x80
if array[i] : buffer[index] += temp
return buffer
@staticmethod
def HexStringToBytes( hex ):
'''将hex字符串转化为byte数组'''
return bytes.fromhex(hex)
@staticmethod
def BytesArrayExpandToLengthEven(array):
'''扩充一个整型的数据长度为偶数个'''
if len(array) % 2 == 1:
array.append(0)
return array
@staticmethod
def IsTwoBytesEquel( b1, start1, b2, start2, length ):
'''判断两个字节的指定部分是否相同'''
if b1 == None or b2 == None: return False
for ii in range(length):
if b1[ii+start1] != b2[ii+start2]: return False
return True
@staticmethod
def TokenToBytes( token ):
'''将uuid的token值转化成统一的bytes数组,方便和java,C#通讯'''
buffer = bytearray(token.bytes)
buffer[0],buffer[1],buffer[2],buffer[3] = buffer[3],buffer[2],buffer[1],buffer[0]
buffer[4],buffer[5] = buffer[5],buffer[4]
buffer[6],buffer[7] = buffer[7],buffer[6]
return buffer
@staticmethod
def ArrayExpandToLength( value, length ):
'''将数组扩充到指定的长度'''
buffer = bytearray(length)
if len(value) >= length:
buffer[0:] = value[0:len(value)]
else:
buffer[0:len(value)] = value
return buffer
@staticmethod
def ArrayExpandToLengthEven( value ):
'''将数组扩充到偶数的长度'''
if len(value) % 2 == 0:
return value
else:
buffer = bytearray(len(value)+1)
buffer[0:len(value)] = value
return value
@staticmethod
def StringToUnicodeBytes( value ):
'''获取字符串的unicode编码字符'''
if value == None: return bytearray(0)
buffer = value.encode('utf-16')
if len(buffer) > 1 and buffer[0] == 255 and buffer[1] == 254:
buffer = buffer[2:len(buffer)]
return buffer
@staticmethod
def GetUniqueStringByGuidAndRandom():
'''获取一串唯一的随机字符串,长度为20,由Guid码和4位数的随机数组成,保证字符串的唯一性'''
return SoftBasic.ByteToHexString(SoftBasic.TokenToBytes(uuid.uuid1()), None) + str(random.randint(12, 20))
class HslSecurity:
@staticmethod
def ByteEncrypt( enBytes ):
'''加密方法,只对当前的程序集开放'''
if (enBytes == None) : return None
result = bytearray(len(enBytes))
for i in range(len(enBytes)):
result[i] = enBytes[i] ^ 0xB5
return result
@staticmethod
def ByteDecrypt( deBytes ):
'''解密方法,只对当前的程序集开放'''
return HslSecurity.ByteEncrypt(deBytes)
class SoftZipped:
'''一个负责压缩解压数据字节的类'''
@staticmethod
def CompressBytes( inBytes ):
'''压缩字节数据'''
if inBytes == None : return None
return gzip.compress( inBytes )
@staticmethod
def Decompress( inBytes ):
'''解压字节数据'''
if inBytes == None : return None
return gzip.decompress( inBytes )
class HslProtocol:
'''用于本程序集访问通信的暗号说明'''
@staticmethod
def HeadByteLength():
'''规定所有的网络传输指令头都为32字节'''
return 32
@staticmethod
def ProtocolBufferSize():
'''所有网络通信中的缓冲池数据信息'''
return 1024
@staticmethod
def ProtocolCheckSecends():
'''用于心跳程序的暗号信息'''
return 1
@staticmethod
def ProtocolClientQuit():
'''客户端退出消息'''
return 2
@staticmethod
def ProtocolClientRefuseLogin():
'''因为客户端达到上限而拒绝登录'''
return 3
@staticmethod
def ProtocolClientAllowLogin():
return 4
@staticmethod
def ProtocolUserString():
'''说明发送的只是文本信息'''
return 1001
@staticmethod
def ProtocolUserBytes():
'''发送的数据就是普通的字节数组'''
return 1002
@staticmethod
def ProtocolUserBitmap():
'''发送的数据就是普通的图片数据'''
return 1003
@staticmethod
def ProtocolUserException():
'''发送的数据是一条异常的数据,字符串为异常消息'''
return 1004
@staticmethod
def ProtocolFileDownload():
'''请求文件下载的暗号'''
return 2001
@staticmethod
def ProtocolFileUpload():
'''请求文件上传的暗号'''
return 2002
@staticmethod
def ProtocolFileDelete():
'''请求删除文件的暗号'''
return 2003
@staticmethod
def ProtocolFileCheckRight():
'''文件校验成功'''
return 2004
@staticmethod
def ProtocolFileCheckError():
'''文件校验失败'''
return 2005
@staticmethod
def ProtocolFileSaveError():
'''文件保存失败'''
return 2006
@staticmethod
def ProtocolFileDirectoryFiles():
'''请求文件列表的暗号'''
return 2007
@staticmethod
def ProtocolFileDirectories():
'''请求子文件的列表暗号'''
return 2008
@staticmethod
def ProtocolProgressReport():
'''进度返回暗号'''
return 2009
@staticmethod
def ProtocolNoZipped():
'''不压缩数据字节'''
return 3001
@staticmethod
def ProtocolZipped():
'''压缩数据字节'''
return 3002
@staticmethod
def CommandBytesBase( command, customer, token, data ):
'''生成终极传送指令的方法,所有的数据均通过该方法出来'''
_zipped = HslProtocol.ProtocolNoZipped()
buffer = None
_sendLength = 0
if data == None:
buffer = bytearray(HslProtocol.HeadByteLength())
else:
data = HslSecurity.ByteEncrypt( data )
if len(data) > 102400:
data = SoftZipped.CompressBytes( data )
_zipped = HslProtocol.ProtocolZipped()
buffer = bytearray( HslProtocol.HeadByteLength() + len(data) )
_sendLength = len(data)
buffer[0:4] = struct.pack( '<i', command )
buffer[4:8] = struct.pack( '<i', customer )
buffer[8:12] = struct.pack( '<i', _zipped)
buffer[12:28] = SoftBasic.TokenToBytes(token)
buffer[28:32] = struct.pack( '<i', _sendLength)
if _sendLength>0:
buffer[32:_sendLength+32]=data
return buffer
@staticmethod
def CommandAnalysis( head, content ):
'''解析接收到数据,先解压缩后进行解密'''
if content != None:
_zipped = struct.unpack('<i', head[8:12])[0]
if _zipped == HslProtocol.ProtocolZipped():
content = SoftZipped.Decompress( content )
return HslSecurity.ByteEncrypt(content)
return bytearray(0)
@staticmethod
def CommandBytes( customer, token, data ):
'''获取发送字节数据的实际数据,带指令头'''
return HslProtocol.CommandBytesBase( HslProtocol.ProtocolUserBytes(), customer, token, data )
@staticmethod
def CommandString( customer, token, data ):
'''获取发送字节数据的实际数据,带指令头'''
if data == None:
return HslProtocol.CommandBytesBase( HslProtocol.ProtocolUserString(), customer, token, None )
else:
buffer = SoftBasic.StringToUnicodeBytes(data)
return HslProtocol.CommandBytesBase( HslProtocol.ProtocolUserString(), customer, token, buffer )
class NetworkBase:
'''网络基础类的核心'''
Token = uuid.UUID('{00000000-0000-0000-0000-000000000000}')
CoreSocket = socket.socket()
def Receive(self,socket,length):
'''接收固定长度的字节数组'''
totle = 0
data = bytearray()
try:
while totle < length:
data.extend(socket.recv(length-totle))
totle += len(data)
return OperateResult.CreateSuccessResult(data)
except Exception as e:
result = OperateResult()
result.Message = str(e)
return result
def Send(self,socket,data):
'''发送消息给套接字,直到完成的时候返回'''
try:
socket.send(data)
return OperateResult.CreateSuccessResult()
except Exception as e:
return OperateResult( msg = str(e))
def CreateSocketAndConnect(self,ipAddress,port,timeout = 10000):
'''创建一个新的socket对象并连接到远程的地址,默认超时时间为10秒钟'''
try:
socketTmp = socket.socket()
socketTmp.connect((ipAddress,port))
return OperateResult.CreateSuccessResult(socketTmp)
except Exception as e:
return OperateResult( msg = str(e))
def ReceiveMessage( self, socket, timeOut, netMsg ):
'''接收一条完整的数据,使用异步接收完成,包含了指令头信息'''
result = OperateResult()
headResult = self.Receive( socket, netMsg.ProtocolHeadBytesLength() )
if headResult.IsSuccess == False:
result.CopyErrorFromOther(headResult)
return result
netMsg.HeadBytes = headResult.Content
if netMsg.CheckHeadBytesLegal( SoftBasic.TokenToBytes(self.Token) ) == False:
# 令牌校验失败
if socket != None: socket.close()
result.Message = StringResources.TokenCheckFailed()
return result
contentLength = netMsg.GetContentLengthByHeadBytes( )
if contentLength == 0:
netMsg.ContentBytes = bytearray(0)
else:
contentResult = self.Receive( socket, contentLength )
if contentResult.IsSuccess == False:
result.CopyErrorFromOther( contentResult )
return result
netMsg.ContentBytes = contentResult.Content
if netMsg.ContentBytes == None: netMsg.ContentBytes = bytearray(0)
result.Content = netMsg
result.IsSuccess = True
return result
class NetworkDoubleBase(NetworkBase):
'''支持长连接,短连接两个模式的通用客户端基类'''
byteTransform = ByteTransform()
ipAddress = "127.0.0.1"
port = 10000
isPersistentConn = False
isSocketError = False
receiveTimeOut = 10000
isUseSpecifiedSocket = False
interactiveLock = threading.Lock()
iNetMessage = INetMessage()
def SetPersistentConnection( self ):
'''在读取数据之前可以调用本方法将客户端设置为长连接模式,相当于跳过了ConnectServer的结果验证,对异形客户端无效'''
self.isPersistentConn = True
def ConnectServer( self ):
'''切换短连接模式到长连接模式,后面的每次请求都共享一个通道'''
self.isPersistentConn = True
result = OperateResult( )
# 重新连接之前,先将旧的数据进行清空
if self.CoreSocket != None:
self.CoreSocket.close()
rSocket = self.CreateSocketAndInitialication( )
if rSocket.IsSuccess == False:
self.isSocketError = True
rSocket.Content = None
result.Message = rSocket.Message
else:
self.CoreSocket = rSocket.Content
result.IsSuccess = True
return result
def ConnectClose( self ):
'''在长连接模式下,断开服务器的连接,并切换到短连接模式'''
result = OperateResult( )
self.isPersistentConn = False
self.interactiveLock.acquire()
# 额外操作
result = self.ExtraOnDisconnect( self.CoreSocket )
# 关闭信息
if self.CoreSocket != None : self.CoreSocket.close()
self.CoreSocket = None
self.interactiveLock.release( )
return result
# 初始化的信息方法和连接结束的信息方法,需要在继承类里面进行重新实现
def InitializationOnConnect( self, socket ):
'''连接上服务器后需要进行的初始化操作'''
return OperateResult.CreateSuccessResult()
def ExtraOnDisconnect( self, socket ):
'''在将要和服务器进行断开的情况下额外的操作,需要根据对应协议进行重写'''
return OperateResult.CreateSuccessResult()
def GetAvailableSocket( self ):
'''获取本次操作的可用的网络套接字'''
if self.isPersistentConn :
# 如果是异形模式
if self.isUseSpecifiedSocket :
if self.isSocketError:
return OperateResult( msg = '连接不可用' )
else:
return OperateResult.CreateSuccessResult( self.CoreSocket )
else:
# 长连接模式
if self.isSocketError or self.CoreSocket == None :
connect = self.ConnectServer( )
if connect.IsSuccess == False:
self.isSocketError = True
return OperateResult( msg = connect.Message )
else:
self.isSocketError = False
return OperateResult.CreateSuccessResult( self.CoreSocket )
else:
return OperateResult.CreateSuccessResult( self.CoreSocket )
else:
# 短连接模式
return self.CreateSocketAndInitialication( )
def CreateSocketAndInitialication( self ):
'''连接并初始化网络套接字'''
result = self.CreateSocketAndConnect( self.ipAddress, self.port, 10000 )
if result.IsSuccess:
# 初始化
initi = self.InitializationOnConnect( result.Content )
if initi.IsSuccess == False:
if result.Content !=None : result.Content.close( )
result.IsSuccess = initi.IsSuccess
result.CopyErrorFromOther( initi )
return result
def ReadFromCoreSocketServer( self, socket, send ):
'''在其他指定的套接字上,使用报文来通讯,传入需要发送的消息,返回一条完整的数据指令'''
read = self.ReadFromCoreServerBase( socket, send )
if read.IsSuccess == False: return OperateResult.CreateFailedResult( read )
# 拼接结果数据
Content = bytearray(len(read.Content1) + len(read.Content2))
if len(read.Content1) > 0 :
Content[0:len(read.Content1)] = read.Content1
if len(read.Content2) > 0 :
Content[len(read.Content1):len(Content)] = read.Content2
return OperateResult.CreateSuccessResult( Content )
def ReadFromCoreServer( self, send ):
'''使用底层的数据报文来通讯,传入需要发送的消息,返回一条完整的数据指令'''
result = OperateResult( )
self.interactiveLock.acquire()
# 获取有用的网络通道,如果没有,就建立新的连接
resultSocket = self.GetAvailableSocket( )
if resultSocket.IsSuccess == False:
self.isSocketError = True
self.interactiveLock.release()
result.CopyErrorFromOther( resultSocket )
return result
read = self.ReadFromCoreSocketServer( resultSocket.Content, send )
if read.IsSuccess :
self.isSocketError = False
result.IsSuccess = read.IsSuccess
result.Content = read.Content
result.Message = StringResources.SuccessText
# string tmp2 = BasicFramework.SoftBasic.ByteToHexString( result.Content, '-' )
else:
self.isSocketError = True
result.CopyErrorFromOther( read )
self.interactiveLock.release()
if self.isPersistentConn==False:
if resultSocket.Content != None:
resultSocket.Content.close()
return result
def ReadFromCoreServerBase( self, socket, send ):
'''使用底层的数据报文来通讯,传入需要发送的消息,返回最终的数据结果,被拆分成了头子节和内容字节信息'''
self.iNetMessage.SendBytes = send
sendResult = self.Send( socket, send )
if sendResult.IsSuccess == False:
if socket!= None : socket.close( )
return OperateResult.CreateFailedResult( sendResult )
# 接收超时时间大于0时才允许接收远程的数据
if (self.receiveTimeOut >= 0):
# 接收数据信息
resultReceive = self.ReceiveMessage(socket, 10000, self.iNetMessage)
if resultReceive.IsSuccess == False:
socket.close( )
return OperateResult( msg = "Receive data timeout: " + str(self.receiveTimeOut ) + " Msg:"+ resultReceive.Message)
return OperateResult.CreateSuccessResult( resultReceive.Content.HeadBytes, resultReceive.Content.ContentBytes )
else:
return OperateResult.CreateSuccessResult( bytearray(0), bytearray(0) )
def GetBoolResultFromBytes( self, result ):
'''将指定的OperateResult类型转化'''
return ByteTransformHelper.GetBoolResultFromBytes( result, self.byteTransform)
def GetByteResultFromBytes( self, result ):
'''将指定的OperateResult类型转化'''
return ByteTransformHelper.GetByteResultFromBytes( result, self.byteTransform)
def GetInt16ResultFromBytes( self, result ):
'''将指定的OperateResult类型转化'''
return ByteTransformHelper.GetInt16ResultFromBytes( result, self.byteTransform)
def GetUInt16ResultFromBytes( self, result ):
'''将指定的OperateResult类型转化'''
return ByteTransformHelper.GetUInt16ResultFromBytes( result, self.byteTransform)
def GetInt32ResultFromBytes( self, result ):
'''将指定的OperateResult类型转化'''
return ByteTransformHelper.GetInt32ResultFromBytes( result, self.byteTransform )
def GetUInt32ResultFromBytes( self, result ):
'''将指定的OperateResult类型转化'''
return ByteTransformHelper.GetUInt32ResultFromBytes( result, self.byteTransform )
def GetInt64ResultFromBytes( self, result ):
'''将指定的OperateResult类型转化'''
return ByteTransformHelper.GetInt64ResultFromBytes( result, self.byteTransform )
def GetUInt64ResultFromBytes( self, result ):
'''将指定的OperateResult类型转化'''
return ByteTransformHelper.GetUInt64ResultFromBytes( result, self.byteTransform )
def GetSingleResultFromBytes( self, result ):
'''将指定的OperateResult类型转化'''
return ByteTransformHelper.GetSingleResultFromBytes( result, self.byteTransform )
def GetDoubleResultFromBytes( self, result ):
'''将指定的OperateResult类型转化'''
return ByteTransformHelper.GetDoubleResultFromBytes( result, self.byteTransform )
def GetStringResultFromBytes( self, result ):
'''将指定的OperateResult类型转化'''
return ByteTransformHelper.GetStringResultFromBytes( result, self.byteTransform )
class NetworkDeviceBase(NetworkDoubleBase):
'''设备类的基类,提供了基础的字节读写方法'''
# 单个数据字节的长度,西门子为2,三菱,欧姆龙,modbusTcp就为1
WordLength = 1
def Read( self, address, length ):
'''从设备读取原始数据'''
return OperateResult( )
def Write( self, address, value ):
'''将原始数据写入设备'''
return OperateResult()
def ReadInt16( self, address, length = None ):
'''读取设备的short类型的数据'''
if(length == None):
return self.GetInt16ResultFromBytes( self.Read( address, self.WordLength ) )
else:
read = self.Read(address,length*self.WordLength)
if read.IsSuccess == False:
return OperateResult.CreateFailedResult(read)
return OperateResult.CreateSuccessResult(self.byteTransform.TransInt16Array(read.Content,0,length))
def ReadUInt16( self, address, length = None ):
'''读取设备的ushort数据类型的数据'''
if length == None:
return self.GetUInt16ResultFromBytes(self.Read(address,self.WordLength))
else:
read = self.Read(address,length*self.WordLength)
if read.IsSuccess == False:
return OperateResult.CreateFailedResult(read)
return OperateResult.CreateSuccessResult(self.byteTransform.TransUInt16Array(read.Content,0,length))
def ReadInt32( self, address, length = None ):
'''读取设备的int类型的数据'''
if length == None:
return self.GetInt32ResultFromBytes( self.Read( address, 2 * self.WordLength ) )
else:
read = self.Read(address,2*length*self.WordLength)
if read.IsSuccess == False:
return OperateResult.CreateFailedResult(read)
return OperateResult.CreateSuccessResult(self.byteTransform.TransInt32Array(read.Content,0,length))
def ReadUInt32( self, address, length = None ):
'''读取设备的uint数据类型的数据'''
if length == None:
return self.GetUInt32ResultFromBytes(self.Read(address,2 * self.WordLength))
else:
read = self.Read(address,2*length*self.WordLength)
if read.IsSuccess == False:
return OperateResult.CreateFailedResult(read)
return OperateResult.CreateSuccessResult(self.byteTransform.TransUInt32Array(read.Content,0,length))
def ReadFloat( self, address, length = None ):
'''读取设备的float类型的数据'''
if length == None:
return self.GetSingleResultFromBytes( self.Read( address, 2 * self.WordLength ) )
else:
read = self.Read(address,2*length*self.WordLength)
if read.IsSuccess == False:
return OperateResult.CreateFailedResult(read)
return OperateResult.CreateSuccessResult(self.byteTransform.TransSingleArray(read.Content,0,length))
def ReadInt64( self, address, length = None ):
'''读取设备的long类型的数组'''
if length == None:
return self.GetInt64ResultFromBytes( self.Read( address, 4 * self.WordLength) )
else:
read = self.Read(address,4*length*self.WordLength)
if read.IsSuccess == False:
return OperateResult.CreateFailedResult(read)
return OperateResult.CreateSuccessResult(self.byteTransform.TransInt64Array(read.Content,0,length))
def ReadUInt64( self, address, length = None ):
'''读取设备的long类型的数组'''
if length == None:
return self.GetUInt64ResultFromBytes( self.Read( address, 4 * self.WordLength) )
else:
read = self.Read(address,4*length*self.WordLength)
if read.IsSuccess == False:
return OperateResult.CreateFailedResult(read)
return OperateResult.CreateSuccessResult(self.byteTransform.TransUInt64Array(read.Content,0,length))
def ReadDouble( self, address, length = None ):
'''读取设备的long类型的数组'''
if length == None:
return self.GetDoubleResultFromBytes( self.Read( address, 4 * self.WordLength) )
else:
read = self.Read(address,4*length*self.WordLength)
if read.IsSuccess == False:
return OperateResult.CreateFailedResult(read)
return OperateResult.CreateSuccessResult(self.byteTransform.TransDoubleArray(read.Content,0,length))
def ReadString( self, address, length ):
return self.GetStringResultFromBytes( self.Read( address, length ) )
def WriteInt16( self, address, value ):
'''向设备中写入short数据或是数组,返回是否写入成功'''
if type(value) == list:
return self.Write( address, self.byteTransform.Int16ArrayTransByte( value ) )
else:
return self.WriteInt16( address, [value] )
def WriteUInt16( self, address, value ):
'''向设备中写入short数据或是数组,返回是否写入成功'''
if type(value) == list:
return self.Write( address, self.byteTransform.UInt16ArrayTransByte( value ) )
else:
return self.WriteUInt16( address, [value] )
def WriteInt32( self, address, value ):
'''向设备中写入int数据,返回是否写入成功'''
if type(value) == list:
return self.Write( address, self.byteTransform.Int32ArrayTransByte(value) )
else:
return self.WriteInt32( address, [value])
def WriteUInt32( self, address, value):
'''向设备中写入uint数据,返回是否写入成功'''
if type(value) == list:
return self.Write( address, self.byteTransform.UInt32ArrayTransByte(value) )
else:
return self.WriteUInt32( address, [value] )
def WriteFloat( self, address, value ):
'''向设备中写入float数据,返回是否写入成功'''
if type(value) == list:
return self.Write( address, self.byteTransform.FloatArrayTransByte(value) )
else:
return self.WriteFloat(address, [value])
def WriteInt64( self, address, value ):
'''向设备中写入long数据,返回是否写入成功'''
if type(value) == list:
return self.Write( address, self.byteTransform.Int64ArrayTransByte(value))
else:
return self.WriteInt64( address, [value] )
def WriteUInt64( self, address, value ):
'''向设备中写入ulong数据,返回是否写入成功'''
if type(value) == list:
return self.Write( address, self.byteTransform.UInt64ArrayTransByte(value))
else:
return self.WriteUInt64( address, [value] )
def WriteDouble( self, address, value ):
'''向设备中写入double数据,返回是否写入成功'''
if type(value) == list:
return self.Write( address, self.byteTransform.DoubleArrayTransByte(value) )
else:
return self.WriteDouble( address, [value] )
def WriteString( self, address, value, length = None ):
'''向设备中写入string数据,编码为ascii,返回是否写入成功'''
if length == None:
return self.Write( address, self.byteTransform.StringTransByte( value, 'ascii' ) )
else:
return self.Write( address, SoftBasic.ArrayExpandToLength(self.byteTransform.StringTransByte( value, 'ascii' ), length))
def WriteUnicodeString( self, address, value, length = None):
'''向设备中写入string数据,编码为unicode,返回是否写入成功'''
if length == None:
temp = SoftBasic.StringToUnicodeBytes(value)
return self.Write( address, temp )
else:
temp = SoftBasic.StringToUnicodeBytes(value)
temp = SoftBasic.ArrayExpandToLength( temp, length * 2 )
return self.Write( address, temp )
class ModbusInfo:
'''Modbus协议相关的一些信息'''
@staticmethod
def ReadCoil():
'''读取线圈功能码'''
return 0x01
@staticmethod
def ReadDiscrete():
'''读取寄存器功能码'''
return 0x02
@staticmethod
def ReadRegister():
'''读取寄存器功能码'''
return 0x03
@staticmethod
def ReadInputRegister():
'''读取输入寄存器'''
return 0x04
@staticmethod
def WriteOneCoil():
'''写单个寄存器'''
return 0x05
@staticmethod
def WriteOneRegister():
'''写单个寄存器'''
return 0x06
@staticmethod
def WriteCoil():
'''写多个线圈'''
return 0x0F
@staticmethod
def WriteRegister():
'''写多个寄存器'''
return 0x10
@staticmethod
def FunctionCodeNotSupport():
'''不支持该功能码'''
return 0x01
@staticmethod
def FunctionCodeOverBound():
'''该地址越界'''
return 0x02
@staticmethod
def FunctionCodeQuantityOver():
'''读取长度超过最大值'''
return 0x03
@staticmethod
def FunctionCodeReadWriteException():
'''读写异常'''
return 0x04
@staticmethod
def PackCommandToTcp( value, id ):
'''将modbus指令打包成Modbus-Tcp指令'''
buffer = bytearray( len(value) + 6)
buffer[0:2] = struct.pack('>H',id)
buffer[4:6] = struct.pack('>H',len(value))
buffer[6:len(buffer)] = value
return buffer
@staticmethod
def GetDescriptionByErrorCode( code ):
'''通过错误码来获取到对应的文本消息'''
if code == 0x01: return StringResources.ModbusTcpFunctionCodeNotSupport()
elif code == 0x02: return StringResources.ModbusTcpFunctionCodeOverBound()
elif code == 0x03: return StringResources.ModbusTcpFunctionCodeQuantityOver()
elif code == 0x04: return StringResources.ModbusTcpFunctionCodeReadWriteException()
else: return StringResources.UnknownError
@staticmethod
def AnalysisReadAddress( address, isStartWithZero ):
'''分析Modbus协议的地址信息,该地址适应于tcp及rtu模式'''
try:
mAddress = ModbusAddress(address)
if isStartWithZero == False:
if mAddress.Address < 1:
raise RuntimeError(StringResources.ModbusAddressMustMoreThanOne())
else:
mAddress.Address = mAddress.Address - 1
return OperateResult.CreateSuccessResult(mAddress)
except Exception as ex:
return OperateResult( msg = str(ex))
class ModbusAddress(DeviceAddressBase):
'''Modbus协议的地址类'''
Station = 0
Function = ModbusInfo.ReadRegister()
def __init__(self, address = "0"):
self.Station = -1
self.Function = ModbusInfo.ReadRegister()
self.Address = 0
self.AnalysisAddress(address)
def AnalysisAddress( self, address = "0" ):
'''解析Modbus的地址码'''
if address.find(';')>=0:
listAddress = address.split(";")
for index in range(len(listAddress)):
if listAddress[index][0] == 's' or listAddress[index][0] == 'S':
self.Station = int(listAddress[index][2:])
elif listAddress[index][0] == 'x' or listAddress[index][0] == 'X':
self.Function = int(listAddress[index][2:])
else:
self.Address = int(listAddress[index])
else:
self.Address = int(address)
def CreateReadCoils( self, station, length ):
'''创建一个读取线圈的字节对象'''
buffer = bytearray(6)
if self.Station < 0 :
buffer[0] = station
else:
buffer[0] = self.Station
buffer[1] = ModbusInfo.ReadCoil()
buffer[2:4] = struct.pack('>H', self.Address)
buffer[4:6] = struct.pack('>H', length)
return buffer
def CreateReadDiscrete( self, station, length ):
'''创建一个读取离散输入的字节对象'''
buffer = bytearray(6)
if self.Station < 0 :
buffer[0] = station
else:
buffer[0] = self.Station
buffer[1] = ModbusInfo.ReadDiscrete()
buffer[2:4] = struct.pack('>H', self.Address)
buffer[4:6] = struct.pack('>H', length)
return buffer
def CreateReadRegister( self, station, length ):
'''创建一个读取寄存器的字节对象'''
buffer = bytearray(6)
if self.Station < 0 :
buffer[0] = station
else:
buffer[0] = self.Station
buffer[1] = self.Function
buffer[2:4] = struct.pack('>H', self.Address)
buffer[4:6] = struct.pack('>H', length)
return buffer
def CreateReadInputRegister( self, station, length ):
'''创建一个读取寄存器的字节对象'''
buffer = bytearray(6)
if self.Station < 0 :
buffer[0] = station
else:
buffer[0] = self.Station
buffer[1] = ModbusInfo.ReadInputRegister()
buffer[2:4] = struct.pack('>H', self.Address)
buffer[4:6] = struct.pack('>H', length)
return buffer
def CreateWriteOneCoil(self, station, value):
'''创建一个写入单个线圈的指令'''
buffer = bytearray(6)
if self.Station < 0 :
buffer[0] = station
else:
buffer[0] = self.Station
buffer[1] = ModbusInfo.WriteOneCoil()
buffer[2:4] = struct.pack('>H', self.Address)
if value == True:
buffer[4] = 0xFF
return buffer
def CreateWriteOneRegister(self, station, values):
'''创建一个写入单个寄存器的指令'''
buffer = bytearray(6)
if self.Station < 0 :
buffer[0] = station
else:
buffer[0] = self.Station
buffer[1] = ModbusInfo.WriteOneRegister()
buffer[2:4] = struct.pack('>H', self.Address)
buffer[4:6] = values
return buffer
def CreateWriteCoil(self, station, values):
'''创建一个写入批量线圈的指令'''
data = SoftBasic.BoolArrayToByte( values )
buffer = bytearray(7 + len(data))
if self.Station < 0 :
buffer[0] = station
else:
buffer[0] = self.Station
buffer[1] = ModbusInfo.WriteCoil()
buffer[2:4] = struct.pack('>H', self.Address)
buffer[4:6] = struct.pack('>H', len(values))
buffer[6] = len(data)
buffer[7:len(buffer)] = data
return buffer
def CreateWriteRegister(self, station, values):
'''创建一个写入批量寄存器的指令'''
buffer = bytearray(7 + len(values))
if self.Station < 0 :
buffer[0] = station
else:
buffer[0] = self.Station
buffer[1] = ModbusInfo.WriteRegister()
buffer[2:4] = struct.pack('>H', self.Address)
buffer[4:6] = struct.pack('>H', len(values)//2)
buffer[6] = len(values)
buffer[7:len(buffer)] = values
return buffer
def AddressAdd(self, value):
'''地址新增指定的数'''
modbusAddress = ModbusAddress()
modbusAddress.Station = self.Station
modbusAddress.Function = self.Function
modbusAddress.Address = self.Address+value
return modbusAddress
class ModbusTcpNet(NetworkDeviceBase):
'''Modbus-Tcp协议的客户端通讯类,方便的和服务器进行数据交互'''
station = 1
softIncrementCount = None
isAddressStartWithZero = True
def __init__(self, ipAddress = '127.0.0.1', port = 502, station = 1):
'''实例化一个MOdbus-Tcp协议的客户端对象'''
self.WordLength = 1
self.softIncrementCount = SoftIncrementCount( 65536, 0 )
self.station = station
self.ipAddress = ipAddress
self.port = port
self.byteTransform = ReverseWordTransform()
self.iNetMessage = ModbusTcpMessage()
def SetDataFormat( self, value ):
'''多字节的数据是否高低位反转,该设置的改变会影响Int32,UInt32,float,double,Int64,UInt64类型的读写'''
self.byteTransform.DataFormat = value
def GetDataFormat( self ):
'''多字节的数据是否高低位反转,该设置的改变会影响Int32,UInt32,float,double,Int64,UInt64类型的读写'''
return self.byteTransform.DataFormat
def SetIsStringReverse( self, value ):
'''字符串数据是否按照字来反转'''
self.byteTransform.IsStringReverse = value
def GetIsStringReverse( self ):
'''字符串数据是否按照字来反转'''
return self.byteTransform.IsStringReverse
def BuildReadCoilCommand(self, address, length):
'''生成一个读取线圈的指令头'''
# 分析地址
analysis = ModbusInfo.AnalysisReadAddress( address, self.isAddressStartWithZero )
if analysis.IsSuccess == False: return OperateResult.CreateFailedResult(analysis)
# 获取消息号
messageId = self.softIncrementCount.GetCurrentValue()
#生成最终的指令
buffer = ModbusInfo.PackCommandToTcp(analysis.Content.CreateReadCoils( self.station, length ), messageId)
return OperateResult.CreateSuccessResult(buffer)
def BuildReadDiscreteCommand(self, address, length):
'''生成一个读取离散信息的指令头'''
# 分析地址
analysis = ModbusInfo.AnalysisReadAddress( address, self.isAddressStartWithZero )
if analysis.IsSuccess == False: return OperateResult.CreateFailedResult(analysis)
# 获取消息号
messageId = self.softIncrementCount.GetCurrentValue()
buffer = ModbusInfo.PackCommandToTcp(analysis.Content.CreateReadDiscrete(self.station,length), messageId)
return OperateResult.CreateSuccessResult(buffer)
def BuildReadRegisterCommand(self, address, length):
'''创建一个读取寄存器的字节对象'''
analysis = ModbusInfo.AnalysisReadAddress( address, self.isAddressStartWithZero )
if analysis.IsSuccess == False: return OperateResult.CreateFailedResult(analysis)
# 获取消息号
messageId = self.softIncrementCount.GetCurrentValue()
buffer = ModbusInfo.PackCommandToTcp(analysis.Content.CreateReadRegister(self.station,length), messageId)
return OperateResult.CreateSuccessResult(buffer)
def BuildReadInputRegisterCommand(self, address, length):
'''创建一个读取寄存器的字节对象'''
analysis = ModbusInfo.AnalysisReadAddress( address, self.isAddressStartWithZero )
if analysis.IsSuccess == False: return OperateResult.CreateFailedResult(analysis)
# 获取消息号
messageId = self.softIncrementCount.GetCurrentValue()
buffer = ModbusInfo.PackCommandToTcp(analysis.Content.CreateReadInputRegister(self.station,length), messageId)
return OperateResult.CreateSuccessResult(buffer)
def BuildWriteOneCoilCommand(self, address,value):
'''生成一个写入单线圈的指令头'''
analysis = ModbusInfo.AnalysisReadAddress( address, self.isAddressStartWithZero )
if analysis.IsSuccess == False: return OperateResult.CreateFailedResult(analysis)
# 获取消息号
messageId = self.softIncrementCount.GetCurrentValue()
buffer = ModbusInfo.PackCommandToTcp(analysis.Content.CreateWriteOneCoil(self.station,value), messageId)
return OperateResult.CreateSuccessResult(buffer)
def BuildWriteOneRegisterCommand(self, address, values):
'''生成一个写入单个寄存器的报文'''
analysis = ModbusInfo.AnalysisReadAddress( address, self.isAddressStartWithZero )
if analysis.IsSuccess == False: return OperateResult.CreateFailedResult(analysis)
# 获取消息号
messageId = self.softIncrementCount.GetCurrentValue()
buffer = ModbusInfo.PackCommandToTcp(analysis.Content.CreateWriteOneRegister(self.station,values), messageId)
return OperateResult.CreateSuccessResult(buffer)
def BuildWriteCoilCommand(self, address, values):
'''生成批量写入单个线圈的报文信息,需要传入bool数组信息'''
analysis = ModbusInfo.AnalysisReadAddress( address, self.isAddressStartWithZero )
if analysis.IsSuccess == False: return OperateResult.CreateFailedResult(analysis)
# 获取消息号
messageId = self.softIncrementCount.GetCurrentValue()
buffer = ModbusInfo.PackCommandToTcp(analysis.Content.CreateWriteCoil(self.station,values), messageId)
return OperateResult.CreateSuccessResult(buffer)
def BuildWriteRegisterCommand(self, address, values):
'''生成批量写入寄存器的报文信息,需要传入byte数组'''
analysis = ModbusInfo.AnalysisReadAddress( address, self.isAddressStartWithZero )
if analysis.IsSuccess == False: return OperateResult.CreateFailedResult(analysis)
# 获取消息号
messageId = self.softIncrementCount.GetCurrentValue()
buffer = ModbusInfo.PackCommandToTcp(analysis.Content.CreateWriteRegister(self.station,values), messageId)
return OperateResult.CreateSuccessResult(buffer)
def BuildReadModbusAddressCommand( self, address, length ):
'''生成一个读取寄存器的指令头,address->ModbusAddress'''
# 获取消息号
messageId = self.softIncrementCount.GetCurrentValue()
# 生成最终tcp指令
buffer = ModbusInfo.PackCommandToTcp( address.CreateReadRegister( self.station, length ), messageId )
return OperateResult.CreateSuccessResult( buffer )
def CheckModbusTcpResponse( self, send ):
'''检查当前的Modbus-Tcp响应是否是正确的'''
resultBytes = self.ReadFromCoreServer( send )
if resultBytes.IsSuccess == True:
if (send[7] + 0x80) == resultBytes.Content[7]:
# 发生了错误
resultBytes.IsSuccess = False
resultBytes.Message = ModbusInfo.GetDescriptionByErrorCode( resultBytes.Content[8] )
resultBytes.ErrorCode = resultBytes.Content[8]
return resultBytes
def ReadModBusBase( self, code, address, length ):
'''检查当前的Modbus-Tcp响应是否是正确的'''
command = None
if code == ModbusInfo.ReadCoil():
command = self.BuildReadCoilCommand( address, length )
elif code == ModbusInfo.ReadDiscrete():
command = self.BuildReadDiscreteCommand( address, length )
elif code == ModbusInfo.ReadRegister():
command = self.BuildReadRegisterCommand( address, length )
elif code == ModbusInfo.ReadInputRegister():
command = self.BuildReadInputRegisterCommand( address, length )
else:
command = OperateResult( msg = StringResources.ModbusTcpFunctionCodeNotSupport() )
if command.IsSuccess == False : return OperateResult.CreateFailedResult( command )
resultBytes = self.CheckModbusTcpResponse( command.Content )
if resultBytes.IsSuccess == True:
# 二次数据处理
if len(resultBytes.Content) >= 9:
buffer = bytearray(len(resultBytes.Content) - 9)
buffer[0:len(buffer)] = resultBytes.Content[9:]
resultBytes.Content = buffer
return resultBytes
def ReadModBusAddressBase( self, address, length = 1 ):
'''读取服务器的数据,需要指定不同的功能码'''
command = self.BuildReadModbusAddressCommand( address, length )
if command.IsSuccess == False: return OperateResult.CreateFailedResult(command)
resultBytes = self.CheckModbusTcpResponse( command.Content )
if resultBytes.IsSuccess == True:
# 二次数据处理
if len(resultBytes.Content) >= 9:
buffer = bytearray(len(resultBytes.Content) - 9)
buffer[0:len(buffer)] = resultBytes.Content[9:]
resultBytes.Content = buffer
return resultBytes
def ReadCoil( self, address, length = None):
'''批量的读取线圈,需要指定起始地址,读取长度可选'''
if length == None:
read = self.ReadCoil( address, 1 )
if read.IsSuccess == False : return OperateResult.CreateFailedResult( read )
return OperateResult.CreateSuccessResult( read.Content[0] )
else:
read = self.ReadModBusBase( ModbusInfo.ReadCoil(), address, length )
if read.IsSuccess == False : return OperateResult.CreateFailedResult( read )
return OperateResult.CreateSuccessResult( SoftBasic.ByteToBoolArray( read.Content, length ) )
def ReadDiscrete( self, address, length = None):
'''批量的读取输入点,需要指定起始地址,可选读取长度'''
if length == None:
read = self.ReadDiscrete( address, 1 )
if read.IsSuccess == False : return OperateResult.CreateFailedResult( read )
return OperateResult.CreateSuccessResult( read.Content[0] )
else:
read = self.ReadModBusBase( ModbusInfo.ReadDiscrete(), address, length )
if read.IsSuccess == False : return OperateResult.CreateFailedResult( read )
return OperateResult.CreateSuccessResult( SoftBasic.ByteToBoolArray( read.Content, length ) )
def Read( self, address, length ):
'''从Modbus服务器批量读取寄存器的信息,需要指定起始地址,读取长度'''
analysis = ModbusInfo.AnalysisReadAddress( address, self.isAddressStartWithZero )
if analysis.IsSuccess == False : return OperateResult.CreateFailedResult( analysis )
return self.ReadModBusAddressBase( analysis.Content, length )
def WriteOneRegister( self, address, value ):
'''写一个寄存器数据'''
if type(value) == list:
command = self.BuildWriteOneRegisterCommand( address, value )
if command.IsSuccess == False : return command
return self.CheckModbusTcpResponse( command.Content )
else:
return self.WriteOneRegister(address, struct.pack('>H', value))
def Write( self, address, value ):
'''将数据写入到Modbus的寄存器上去,需要指定起始地址和数据内容'''
command = self.BuildWriteRegisterCommand( address, value )
if command.IsSuccess == False:
return command
return self.CheckModbusTcpResponse( command.Content )
def WriteCoil( self, address, value ):
'''批量写线圈信息,指定是否通断'''
if type(value) == list:
command = self.BuildWriteCoilCommand( address, value )
if command.IsSuccess == False : return command
return self.CheckModbusTcpResponse( command.Content )
else:
command = self.BuildWriteOneCoilCommand( address, value )
if command.IsSuccess == False : return command
return self.CheckModbusTcpResponse( command.Content )
def WriteBool( self, address, values ):
'''批量写寄存器的数据内容'''
return self.Write( address, SoftBasic.BoolArrayToByte( values ) )
# 三菱的类库
class MelsecA1EDataType:
'''三菱PLC的数据类型,此处包含了几个常用的类型'''
DataCode = bytearray(2)
DataType = 0
AsciiCode = 0
FromBase = 0
def __init__(self, code0, code1, typeCode, asciiCode, fromBase):
'''如果您清楚类型代号,可以根据值进行扩展'''
self.DataCode[0] = code0
self.DataCode[1] = code1
self.AsciiCode = asciiCode
self.FromBase = fromBase
if typeCode < 2:
self.DataType = typeCode
@staticmethod
def GetX():
'''X输入寄存器'''
return MelsecA1EDataType(0x58,0x20,0x01,'X*',8)
@staticmethod
def GetY():
'''Y输出寄存器'''
return MelsecA1EDataType(0x59,0x20,0x01,'Y*',8)
@staticmethod
def GetM():
'''M中间寄存器'''
return MelsecA1EDataType(0x4D,0x20,0x01,'M*',10)
@staticmethod
def GetS():
'''S状态寄存器'''
return MelsecA1EDataType(0x53,0x20,0x01,'S*',10)
@staticmethod
def GetD():
'''D数据寄存器'''
return MelsecA1EDataType(0x44,0x20,0x00,'D*',10)
@staticmethod
def GetR():
'''R文件寄存器'''
return MelsecA1EDataType(0x52,0x20,0x00,'R*',10)
class MelsecMcDataType:
'''三菱PLC的数据类型,此处包含了几个常用的类型'''
DataCode = 0
DataType = 0
AsciiCode = 0
FromBase = 0
def __init__(self, code, typeCode, asciiCode, fromBase):
'''如果您清楚类型代号,可以根据值进行扩展'''
self.DataCode = code
self.AsciiCode = asciiCode
self.FromBase = fromBase
if typeCode < 2:
self.DataType = typeCode
@staticmethod
def GetX():
'''X输入寄存器'''
return MelsecMcDataType(0x9C,0x01,'X*',16)
@staticmethod
def GetY():
'''Y输出寄存器'''
return MelsecMcDataType(0x9D,0x01,'Y*',16)
@staticmethod
def GetM():
'''M中间寄存器'''
return MelsecMcDataType(0x90,0x01,'M*',10)
@staticmethod
def GetD():
'''D数据寄存器'''
return MelsecMcDataType(0xA8,0x00,'D*',10)
@staticmethod
def GetW():
'''W链接寄存器'''
return MelsecMcDataType(0xB4,0x00,'W*',16)
@staticmethod
def GetL():
'''L锁存继电器'''
return MelsecMcDataType(0x92,0x01,'L*',10)
@staticmethod
def GetF():
'''F报警器'''
return MelsecMcDataType(0x93,0x01,'F*',10)
@staticmethod
def GetV():
'''V边沿继电器'''
return MelsecMcDataType(0x93,0x01,'V*',10)
@staticmethod
def GetB():
'''B链接继电器'''
return MelsecMcDataType(0xA,0x01,'B*',16)
@staticmethod
def GetR():
'''R文件寄存器'''
return MelsecMcDataType(0xAF,0x00,'R*',10)
@staticmethod
def GetS():
'''S步进继电器'''
return MelsecMcDataType(0x98,0x01,'S*',10)
@staticmethod
def GetZ():
'''变址寄存器'''
return MelsecMcDataType(0xCC,0x00,'Z*',10)
@staticmethod
def GetT():
'''定时器的值'''
return MelsecMcDataType(0xC2,0x00,'TN',10)
@staticmethod
def GetC():
'''计数器的值'''
return MelsecMcDataType(0xC5,0x00,'CN',10)
class MelsecHelper:
'''所有三菱通讯类的通用辅助工具类,包含了一些通用的静态方法,可以使用本类来获取一些原始的报文信息。详细的操作参见例子'''
@staticmethod
def McA1EAnalysisAddress( address = "0" ):
result = OperateResult()
try:
if address.startswith("X") or address.startswith("x"):
result.Content1 = MelsecA1EDataType.GetX()
result.Content2 = int(address[1:], MelsecA1EDataType.GetX().FromBase)
elif address.startswith("Y") or address.startswith("y"):
result.Content1 = MelsecA1EDataType.GetY()
result.Content2 = int(address[1:], MelsecA1EDataType.GetY().FromBase)
elif address.startswith("M") or address.startswith("m"):
result.Content1 = MelsecA1EDataType.GetM()
result.Content2 = int(address[1:], MelsecA1EDataType.GetM().FromBase)
elif address.startswith("S") or address.startswith("s"):
result.Content1 = MelsecA1EDataType.GetS()
result.Content2 = int(address[1:], MelsecA1EDataType.GetS().FromBase)
elif address.startswith("D") or address.startswith("d"):
result.Content1 = MelsecA1EDataType.GetD()
result.Content2 = int(address[1:], MelsecA1EDataType.GetD().FromBase)
elif address.startswith("R") or address.startswith("r"):
result.Content1 = MelsecA1EDataType.GetR()
result.Content2 = int(address[1:], MelsecA1EDataType.GetR().FromBase)
else:
raise Exception("type not supported!")
except Exception as ex:
result.Message = str(ex)
return result
result.IsSuccess = True
result.Message = StringResources.SuccessText()
return result
@staticmethod
def McAnalysisAddress( address = "0" ):
result = OperateResult()
try:
if address.startswith("M") or address.startswith("m"):
result.Content1 = MelsecMcDataType.GetM()
result.Content2 = int(address[1:], MelsecMcDataType.GetM().FromBase)
elif address.startswith("X") or address.startswith("x"):
result.Content1 = MelsecMcDataType.GetX()
result.Content2 = int(address[1:], MelsecMcDataType.GetX().FromBase)
elif address.startswith("Y") or address.startswith("y"):
result.Content1 = MelsecMcDataType.GetY()
result.Content2 = int(address[1:], MelsecMcDataType.GetY().FromBase)
elif address.startswith("D") or address.startswith("d"):
result.Content1 = MelsecMcDataType.GetD()
result.Content2 = int(address[1:], MelsecMcDataType.GetD().FromBase)
elif address.startswith("W") or address.startswith("w"):
result.Content1 = MelsecMcDataType.GetW()
result.Content2 = int(address[1:], MelsecMcDataType.GetW().FromBase)
elif address.startswith("L") or address.startswith("l"):
result.Content1 = MelsecMcDataType.GetL()
result.Content2 = int(address[1:], MelsecMcDataType.GetL().FromBase)
elif address.startswith("F") or address.startswith("f"):
result.Content1 = MelsecMcDataType.GetF()
result.Content2 = int(address[1:], MelsecMcDataType.GetF().FromBase)
elif address.startswith("V") or address.startswith("v"):
result.Content1 = MelsecMcDataType.GetV()
result.Content2 = int(address[1:], MelsecMcDataType.GetV().FromBase)
elif address.startswith("B") or address.startswith("b"):
result.Content1 = MelsecMcDataType.GetB()
result.Content2 = int(address[1:], MelsecMcDataType.GetB().FromBase)
elif address.startswith("R") or address.startswith("r"):
result.Content1 = MelsecMcDataType.GetR()
result.Content2 = int(address[1:], MelsecMcDataType.GetR().FromBase)
elif address.startswith("S") or address.startswith("s"):
result.Content1 = MelsecMcDataType.GetS()
result.Content2 = int(address[1:], MelsecMcDataType.GetS().FromBase)
elif address.startswith("Z") or address.startswith("z"):
result.Content1 = MelsecMcDataType.GetZ()
result.Content2 = int(address[1:], MelsecMcDataType.GetZ().FromBase)
elif address.startswith("T") or address.startswith("t"):
result.Content1 = MelsecMcDataType.GetT()
result.Content2 = int(address[1:], MelsecMcDataType.GetT().FromBase)
elif address.startswith("C") or address.startswith("c"):
result.Content1 = MelsecMcDataType.GetC()
result.Content2 = int(address[1:], MelsecMcDataType.GetC().FromBase)
else:
raise Exception("type not supported!")
except Exception as ex:
result.Message = str(ex)
return result
result.IsSuccess = True
result.Message = StringResources.SuccessText()
return result
@staticmethod
def BuildBytesFromData( value, length = None ):
'''从数据构建一个ASCII格式地址字节'''
if length == None:
return ('{:02X}'.format(value)).encode('ascii')
else:
return (('{:0'+ str(length) +'X}').format(value)).encode('ascii')
@staticmethod
def BuildBytesFromAddress( address, dataType ):
'''从三菱的地址中构建MC协议的6字节的ASCII格式的地址'''
if dataType.FromBase == 10:
return ('{:06d}'.format(address)).encode('ascii')
else:
return ('{:06X}'.format(address)).encode('ascii')
@staticmethod
def FxCalculateCRC( data ):
'''计算Fx协议指令的和校验信息'''
sum = 0
index = 1
while index < (len(data) - 2):
sum += data[index]
index=index+1
return MelsecHelper.BuildBytesFromData( sum )
@staticmethod
def CheckCRC( data ):
'''检查指定的和校验是否是正确的'''
crc = MelsecHelper.FxCalculateCRC( data )
if (crc[0] != data[data.Length - 2]) : return False
if (crc[1] != data[data.Length - 1]) : return False
return True
class MelsecA1ENet(NetworkDeviceBase):
'''三菱PLC通讯协议,采用A兼容1E帧协议实现,使用二进制码通讯,请根据实际型号来进行选取'''
PLCNumber = 0xFF
def __init__(self,ipAddress= "127.0.0.1",port = 0):
'''实例化一个三菱的A兼容1E帧协议的通讯对象'''
self.iNetMessage = MelsecA1EBinaryMessage()
self.byteTransform = RegularByteTransform()
self.ipAddress = ipAddress
self.port = port
self.WordLength = 1
@staticmethod
def BuildReadCommand(address,length,plcNumber):
'''根据类型地址长度确认需要读取的指令头'''
analysis = MelsecHelper.McA1EAnalysisAddress( address )
if analysis.IsSuccess == False : return OperateResult.CreateFailedResult( analysis )
subtitle = 0
if analysis.Content1.DataType == 0x01:
subtitle = 0x00
else:
subtitle = 0x01
_PLCCommand = bytearray(12)
_PLCCommand[0] = subtitle # 副标题
_PLCCommand[1] = plcNumber # PLC编号
_PLCCommand[2] = 0x0A # CPU监视定时器(L)这里设置为0x00,0x0A,等待CPU返回的时间为10*250ms=2.5秒
_PLCCommand[3] = 0x00 # CPU监视定时器(H)
_PLCCommand[4] = analysis.Content2 % 256 # 起始软元件(开始读取的地址)
_PLCCommand[5] = analysis.Content2 // 256
_PLCCommand[6] = 0x00
_PLCCommand[7] = 0x00
_PLCCommand[8] = analysis.Content1.DataCode[1] # 软元件代码(L)
_PLCCommand[9] = analysis.Content1.DataCode[0] # 软元件代码(H)
_PLCCommand[10] = length % 256 # 软元件点数
_PLCCommand[11] = 0x00
return OperateResult.CreateSuccessResult( _PLCCommand )
@staticmethod
def BuildWriteCommand( address,value,plcNumber):
'''根据类型地址以及需要写入的数据来生成指令头'''
analysis = MelsecHelper.McA1EAnalysisAddress( address )
if analysis.IsSuccess == False : return OperateResult.CreateFailedResult( analysis )
length = -1
if analysis.Content1.DataType == 1:
# 按照位写入的操作,数据需要重新计算
length2 = len(value) // 2 + 1
if len(value) % 2 == 0 :
length2 = len(value) // 2
buffer = bytearray(length2)
for i in range(length2):
if value[i * 2 + 0] != 0x00 :
buffer[i] += 0x10
if (i * 2 + 1) < len(value) :
if value[i * 2 + 1] != 0x00 :
buffer[i] += 0x01
length = len(value)
value = buffer
subtitle = 0
if analysis.Content1.DataType == 0x01:
subtitle = 0x02
else:
subtitle = 0x03
_PLCCommand = bytearray(12 + len(value))
_PLCCommand[0] = subtitle # 副标题
_PLCCommand[1] = plcNumber # PLC编号
_PLCCommand[2] = 0x0A # CPU监视定时器(L)这里设置为0x00,0x0A,等待CPU返回的时间为10*250ms=2.5秒
_PLCCommand[3] = 0x00 # CPU监视定时器(H)
_PLCCommand[4] = analysis.Content2 % 256 # 起始软元件(开始读取的地址)
_PLCCommand[5] = analysis.Content2 // 256
_PLCCommand[6] = 0x00
_PLCCommand[7] = 0x00
_PLCCommand[8] = analysis.Content1.DataCode[1] # 软元件代码(L)
_PLCCommand[9] = analysis.Content1.DataCode[0] # 软元件代码(H)
_PLCCommand[10] = length % 256 # 软元件点数
_PLCCommand[11] = 0x00
# 判断是否进行位操作
if analysis.Content1.DataType == 1:
if length > 0:
_PLCCommand[10] = length % 256 # 软元件点数
else:
_PLCCommand[10] = len(value) * 2 % 256 # 软元件点数
else:
_PLCCommand[10] = len(value) // 2 % 256 # 软元件点数
_PLCCommand[12:] = value
return OperateResult.CreateSuccessResult( _PLCCommand )
@staticmethod
def ExtractActualData( response, isBit ):
''' 从PLC反馈的数据中提取出实际的数据内容,需要传入反馈数据,是否位读取'''
if isBit == True:
# 位读取
Content = bytearray((len(response) - 2) * 2)
i = 2
while i < len(response):
if (response[i] & 0x10) == 0x10:
Content[(i - 2) * 2 + 0] = 0x01
if (response[i] & 0x01) == 0x01:
Content[(i - 2) * 2 + 1] = 0x01
i = i + 1
return OperateResult.CreateSuccessResult( Content )
else:
# 字读取
return OperateResult.CreateSuccessResult( response[2:] )
def Read( self, address, length ):
'''从三菱PLC中读取想要的数据,返回读取结果'''
# 获取指令
command = MelsecA1ENet.BuildReadCommand( address, length, self.PLCNumber )
if command.IsSuccess == False :
return OperateResult.CreateFailedResult( command )
# 核心交互
read = self.ReadFromCoreServer( command.Content )
if read.IsSuccess == False : return OperateResult.CreateFailedResult( read )
# 错误代码验证
errorCode = read.Content[1]
if errorCode != 0 : return OperateResult(err=errorCode, msg=StringResources.MelsecPleaseReferToManulDocument())
# 数据解析,需要传入是否使用位的参数
return MelsecA1ENet.ExtractActualData( read.Content, command.Content[0] == 0x00 )
def ReadBool( self, address, length = None ):
'''从三菱PLC中批量读取位软元件,返回读取结果'''
if length == None:
read = self.ReadBool(address,1)
if read.IsSuccess == False:
return OperateResult.CreateFailedResult(read)
else:
return OperateResult.CreateSuccessResult(read.Content[0])
else:
# 解析地址
analysis = MelsecHelper.McA1EAnalysisAddress( address )
if analysis.IsSuccess == False :
return OperateResult.CreateFailedResult( analysis )
# 位读取校验
if analysis.Content1.DataType == 0x00 :
return OperateResult( msg = StringResources.MelsecReadBitInfo() )
# 核心交互
read = self.Read( address, length )
if read.IsSuccess == False :
return OperateResult.CreateFailedResult( read )
# 转化bool数组
content = []
for i in range(length):
if read.Content[i] == 0x01:
content.append(True)
else:
content.append(False)
return OperateResult.CreateSuccessResult( content )
def Write( self, address, value ):
'''向PLC写入数据,数据格式为原始的字节类型'''
# 解析指令
command = MelsecA1ENet.BuildWriteCommand( address, value, self.PLCNumber )
if command.IsSuccess == False : return command
# 核心交互
read = self.ReadFromCoreServer( command.Content )
if read.IsSuccess == False : return read
# 错误码校验
errorCode = read.Content[1]
if errorCode != 0 : return OperateResult(err=errorCode, msg=StringResources.MelsecPleaseReferToManulDocument())
# 成功
return OperateResult.CreateSuccessResult( )
def WriteBool( self, address, values ):
'''向PLC中位软元件写入bool数组或是值,返回值说明,比如你写入M100,values[0]对应M100'''
if type(values) == list:
buffer = bytearray(len(values))
for i in range(len(values)):
if values[i] == True:
buffer[i] = 0x01
return self.Write(address, buffer)
else:
return self.Write(address,[values])
class MelsecMcNet(NetworkDeviceBase):
'''三菱PLC通讯类,采用Qna兼容3E帧协议实现,需要在PLC侧先的以太网模块先进行配置,必须为二进制通讯'''
NetworkNumber = 0
NetworkStationNumber = 0
def __init__(self,ipAddress= "127.0.0.1",port = 0):
'''实例化一个三菱的Qna兼容3E帧协议的通讯对象'''
self.iNetMessage = MelsecQnA3EBinaryMessage()
self.byteTransform = RegularByteTransform()
self.ipAddress = ipAddress
self.port = port
self.WordLength = 1
@staticmethod
def BuildReadCommand(address,length,networkNumber = 0,networkStationNumber = 0):
'''根据类型地址长度确认需要读取的指令头'''
analysis = MelsecHelper.McAnalysisAddress( address )
if analysis.IsSuccess == False : return OperateResult.CreateFailedResult( analysis )
_PLCCommand = bytearray(21)
_PLCCommand[0] = 0x50 # 副标题
_PLCCommand[1] = 0x00
_PLCCommand[2] = networkNumber # 网络号
_PLCCommand[3] = 0xFF # PLC编号
_PLCCommand[4] = 0xFF # 目标模块IO编号
_PLCCommand[5] = 0x03
_PLCCommand[6] = networkStationNumber # 目标模块站号
_PLCCommand[7] = 0x0C # 请求数据长度
_PLCCommand[8] = 0x00
_PLCCommand[9] = 0x0A # CPU监视定时器
_PLCCommand[10] = 0x00
_PLCCommand[11] = 0x01 # 批量读取数据命令
_PLCCommand[12] = 0x04
_PLCCommand[13] = analysis.Content1.DataType # 以点为单位还是字为单位成批读取
_PLCCommand[14] = 0x00
_PLCCommand[15] = analysis.Content2 % 256 # 起始地址的地位
_PLCCommand[16] = analysis.Content2 // 256
_PLCCommand[17] = 0x00
_PLCCommand[18] = analysis.Content1.DataCode # 指明读取的数据
_PLCCommand[19] = length % 256 # 软元件长度的地位
_PLCCommand[20] = length // 256
return OperateResult.CreateSuccessResult( _PLCCommand )
@staticmethod
def BuildWriteCommand( address, value, networkNumber = 0, networkStationNumber = 0 ):
'''根据类型地址以及需要写入的数据来生成指令头'''
analysis = MelsecHelper.McAnalysisAddress( address )
if analysis.IsSuccess == False : return OperateResult.CreateFailedResult( analysis )
length = -1
if analysis.Content1.DataType == 1:
# 按照位写入的操作,数据需要重新计算
length2 = len(value) // 2 + 1
if len(value) % 2 == 0 :
length2 = len(value) // 2
buffer = bytearray(length2)
for i in range(length2):
if value[i * 2 + 0] != 0x00 :
buffer[i] += 0x10
if (i * 2 + 1) < len(value) :
if value[i * 2 + 1] != 0x00 :
buffer[i] += 0x01
length = len(value)
value = buffer
_PLCCommand = bytearray(21 + len(value))
_PLCCommand[0] = 0x50 # 副标题
_PLCCommand[1] = 0x00
_PLCCommand[2] = networkNumber # 网络号
_PLCCommand[3] = 0xFF # PLC编号
_PLCCommand[4] = 0xFF # 目标模块IO编号
_PLCCommand[5] = 0x03
_PLCCommand[6] = networkStationNumber # 目标模块站号
_PLCCommand[7] = (len(_PLCCommand) - 9) % 256 # 请求数据长度
_PLCCommand[8] = (len(_PLCCommand) - 9) // 256
_PLCCommand[9] = 0x0A # CPU监视定时器
_PLCCommand[10] = 0x00
_PLCCommand[11] = 0x01 # 批量读取数据命令
_PLCCommand[12] = 0x14
_PLCCommand[13] = analysis.Content1.DataType # 以点为单位还是字为单位成批读取
_PLCCommand[14] = 0x00
_PLCCommand[15] = analysis.Content2 % 256 # 起始地址的地位
_PLCCommand[16] = analysis.Content2 // 256
_PLCCommand[17] = 0x00
_PLCCommand[18] = analysis.Content1.DataCode # 指明写入的数据
# 判断是否进行位操作
if analysis.Content1.DataType == 1:
if length > 0:
_PLCCommand[19] = length % 256 # 软元件长度的地位
_PLCCommand[20] = length // 256
else:
_PLCCommand[19] = len(value) * 2 % 256 # 软元件长度的地位
_PLCCommand[20] = len(value) * 2 // 256
else:
_PLCCommand[19] = len(value) // 2 % 256 # 软元件长度的地位
_PLCCommand[20] = len(value) // 2 // 256
_PLCCommand[21:] = value
return OperateResult.CreateSuccessResult( _PLCCommand )
@staticmethod
def ExtractActualData( response, isBit ):
''' 从PLC反馈的数据中提取出实际的数据内容,需要传入反馈数据,是否位读取'''
if isBit == True:
# 位读取
Content = bytearray((len(response) - 11) * 2)
i = 11
while i < len(response):
if (response[i] & 0x10) == 0x10:
Content[(i - 11) * 2 + 0] = 0x01
if (response[i] & 0x01) == 0x01:
Content[(i - 11) * 2 + 1] = 0x01
i = i + 1
return OperateResult.CreateSuccessResult( Content )
else:
# 字读取
Content = bytearray(len(response) - 11)
Content[0:] = response[11:]
return OperateResult.CreateSuccessResult( Content )
def Read( self, address, length ):
'''从三菱PLC中读取想要的数据,返回读取结果'''
# 获取指令
command = MelsecMcNet.BuildReadCommand( address, length, self.NetworkNumber, self.NetworkStationNumber )
if command.IsSuccess == False :
return OperateResult.CreateFailedResult( command )
# 核心交互
read = self.ReadFromCoreServer( command.Content )
if read.IsSuccess == False : return OperateResult.CreateFailedResult( read )
# 错误代码验证
errorCode = read.Content[9] * 256 + read.Content[10]
if errorCode != 0 : return OperateResult(err=errorCode, msg=StringResources.MelsecPleaseReferToManulDocument())
# 数据解析,需要传入是否使用位的参数
return MelsecMcNet.ExtractActualData( read.Content, command.Content[13] == 1 )
def ReadBool( self, address, length = None ):
'''从三菱PLC中批量读取位软元件,返回读取结果'''
if length == None:
read = self.ReadBool(address,1)
if read.IsSuccess == False:
return OperateResult.CreateFailedResult(read)
else:
return OperateResult.CreateSuccessResult(read.Content[0])
else:
# 解析地址
analysis = MelsecHelper.McAnalysisAddress( address )
if analysis.IsSuccess == False :
return OperateResult.CreateFailedResult( analysis )
# 位读取校验
if analysis.Content1.DataType == 0x00 :
return OperateResult( msg = StringResources.MelsecReadBitInfo() )
# 核心交互
read = self.Read( address, length )
if read.IsSuccess == False :
return OperateResult.CreateFailedResult( read )
# 转化bool数组
content = []
for i in range(length):
if read.Content[i] == 0x01:
content.append(True)
else:
content.append(False)
return OperateResult.CreateSuccessResult( content )
def Write( self, address, value ):
'''向PLC写入数据,数据格式为原始的字节类型'''
# 解析指令
command = MelsecMcNet.BuildWriteCommand( address, value, self.NetworkNumber, self.NetworkStationNumber )
if command.IsSuccess == False : return command
# 核心交互
read = self.ReadFromCoreServer( command.Content )
if read.IsSuccess == False : return read
# 错误码校验
errorCode = read.Content[9] * 256 + read.Content[10]
if errorCode != 0 : return OperateResult(err=errorCode, msg=StringResources.MelsecPleaseReferToManulDocument())
# 成功
return OperateResult.CreateSuccessResult( )
def WriteBool( self, address, values ):
'''向PLC中位软元件写入bool数组或是值,返回值说明,比如你写入M100,values[0]对应M100'''
if type(values) == list:
buffer = bytearray(len(values))
for i in range(len(values)):
if values[i] == True:
buffer[i] = 0x01
return self.Write(address, buffer)
else:
return self.WriteBool(address,[values])
class MelsecMcAsciiNet(NetworkDeviceBase):
'''三菱PLC通讯类,采用Qna兼容3E帧协议实现,需要在PLC侧先的以太网模块先进行配置,必须为ASCII通讯格式'''
NetworkNumber = 0
NetworkStationNumber = 0
def __init__(self,ipAddress= "127.0.0.1",port = 0):
'''实例化一个三菱的Qna兼容3E帧协议的通讯对象'''
self.iNetMessage = MelsecQnA3EAsciiMessage()
self.byteTransform = RegularByteTransform()
self.ipAddress = ipAddress
self.port = port
self.WordLength = 1
@staticmethod
def BuildReadCommand( address, length, networkNumber = 0, networkStationNumber = 0 ):
'''根据类型地址长度确认需要读取的报文'''
analysis = MelsecHelper.McAnalysisAddress( address )
if analysis.IsSuccess == False : return OperateResult.CreateFailedResult( analysis )
# 默认信息----注意:高低字节交错
_PLCCommand = bytearray(42)
_PLCCommand[ 0] = 0x35 # 副标题
_PLCCommand[ 1] = 0x30
_PLCCommand[ 2] = 0x30
_PLCCommand[ 3] = 0x30
_PLCCommand[ 4] = MelsecHelper.BuildBytesFromData( networkNumber )[0] # 网络号
_PLCCommand[ 5] = MelsecHelper.BuildBytesFromData( networkNumber )[1]
_PLCCommand[ 6] = 0x46 # PLC编号
_PLCCommand[ 7] = 0x46
_PLCCommand[ 8] = 0x30 # 目标模块IO编号
_PLCCommand[ 9] = 0x33
_PLCCommand[10] = 0x46
_PLCCommand[11] = 0x46
_PLCCommand[12] = MelsecHelper.BuildBytesFromData( networkStationNumber )[0] # 目标模块站号
_PLCCommand[13] = MelsecHelper.BuildBytesFromData( networkStationNumber )[1]
_PLCCommand[14] = 0x30 # 请求数据长度
_PLCCommand[15] = 0x30
_PLCCommand[16] = 0x31
_PLCCommand[17] = 0x38
_PLCCommand[18] = 0x30 # CPU监视定时器
_PLCCommand[19] = 0x30
_PLCCommand[20] = 0x31
_PLCCommand[21] = 0x30
_PLCCommand[22] = 0x30 # 批量读取数据命令
_PLCCommand[23] = 0x34
_PLCCommand[24] = 0x30
_PLCCommand[25] = 0x31
_PLCCommand[26] = 0x30 # 以点为单位还是字为单位成批读取
_PLCCommand[27] = 0x30
_PLCCommand[28] = 0x30
_PLCCommand[29] = 0x30 if analysis.Content1.DataType == 0 else 0x31
_PLCCommand[30] = analysis.Content1.AsciiCode.encode('ascii')[0] # 软元件类型
_PLCCommand[31] = analysis.Content1.AsciiCode.encode('ascii')[1]
_PLCCommand[32:38] = MelsecHelper.BuildBytesFromAddress( analysis.Content2, analysis.Content1 ) # 起始地址的地位
_PLCCommand[38:42] = MelsecHelper.BuildBytesFromData( length, 4 ) # 软元件点数
return OperateResult.CreateSuccessResult( _PLCCommand )
@staticmethod
def BuildWriteCommand( address, value, networkNumber = 0, networkStationNumber = 0 ):
'''根据类型地址以及需要写入的数据来生成报文'''
analysis = MelsecHelper.McAnalysisAddress( address )
if analysis.IsSuccess == False : return OperateResult.CreateFailedResult( analysis )
# 预处理指令
if analysis.Content1.DataType == 0x01:
# 位写入
buffer = bytearray(len(value))
for i in range(len(buffer)):
buffer[i] = 0x30 if value[i] == 0x00 else 0x31
value = buffer
else:
# 字写入
buffer = bytearray(len(value) * 2)
for i in range(len(value) // 2):
tmp = value[i*2]+ value[i*2+1]*256
buffer[4*i:4*i+4] = MelsecHelper.BuildBytesFromData( tmp, 4 )
value = buffer
# 默认信息----注意:高低字节交错
_PLCCommand = bytearray(42 + len(value))
_PLCCommand[ 0] = 0x35 # 副标题
_PLCCommand[ 1] = 0x30
_PLCCommand[ 2] = 0x30
_PLCCommand[ 3] = 0x30
_PLCCommand[ 4] = MelsecHelper.BuildBytesFromData( networkNumber )[0] # 网络号
_PLCCommand[ 5] = MelsecHelper.BuildBytesFromData( networkNumber )[1]
_PLCCommand[ 6] = 0x46 # PLC编号
_PLCCommand[ 7] = 0x46
_PLCCommand[ 8] = 0x30 # 目标模块IO编号
_PLCCommand[ 9] = 0x33
_PLCCommand[10] = 0x46
_PLCCommand[11] = 0x46
_PLCCommand[12] = MelsecHelper.BuildBytesFromData( networkStationNumber )[0] # 目标模块站号
_PLCCommand[13] = MelsecHelper.BuildBytesFromData( networkStationNumber )[1]
_PLCCommand[14] = MelsecHelper.BuildBytesFromData( len(_PLCCommand) - 18, 4 )[0] # 请求数据长度
_PLCCommand[15] = MelsecHelper.BuildBytesFromData( len(_PLCCommand) - 18, 4 )[1]
_PLCCommand[16] = MelsecHelper.BuildBytesFromData( len(_PLCCommand) - 18, 4 )[2]
_PLCCommand[17] = MelsecHelper.BuildBytesFromData( len(_PLCCommand) - 18, 4 )[3]
_PLCCommand[18] = 0x30 # CPU监视定时器
_PLCCommand[19] = 0x30
_PLCCommand[20] = 0x31
_PLCCommand[21] = 0x30
_PLCCommand[22] = 0x31 # 批量写入的命令
_PLCCommand[23] = 0x34
_PLCCommand[24] = 0x30
_PLCCommand[25] = 0x31
_PLCCommand[26] = 0x30 # 子命令
_PLCCommand[27] = 0x30
_PLCCommand[28] = 0x30
_PLCCommand[29] = 0x30 if analysis.Content1.DataType == 0 else 0x31
_PLCCommand[30] = analysis.Content1.AsciiCode.encode('ascii')[0] # 软元件类型
_PLCCommand[31] = analysis.Content1.AsciiCode.encode('ascii')[1]
_PLCCommand[32] = MelsecHelper.BuildBytesFromAddress( analysis.Content2, analysis.Content1 )[0] # 起始地址的地位
_PLCCommand[33] = MelsecHelper.BuildBytesFromAddress( analysis.Content2, analysis.Content1 )[1]
_PLCCommand[34] = MelsecHelper.BuildBytesFromAddress( analysis.Content2, analysis.Content1 )[2]
_PLCCommand[35] = MelsecHelper.BuildBytesFromAddress( analysis.Content2, analysis.Content1 )[3]
_PLCCommand[36] = MelsecHelper.BuildBytesFromAddress( analysis.Content2, analysis.Content1 )[4]
_PLCCommand[37] = MelsecHelper.BuildBytesFromAddress( analysis.Content2, analysis.Content1 )[5]
# 判断是否进行位操作
if (analysis.Content1.DataType == 1):
_PLCCommand[38] = MelsecHelper.BuildBytesFromData( len(value), 4 )[0] # 软元件点数
_PLCCommand[39] = MelsecHelper.BuildBytesFromData( len(value), 4 )[1]
_PLCCommand[40] = MelsecHelper.BuildBytesFromData( len(value), 4 )[2]
_PLCCommand[41] = MelsecHelper.BuildBytesFromData( len(value), 4 )[3]
else:
_PLCCommand[38] = MelsecHelper.BuildBytesFromData( len(value) // 4, 4 )[0] # 软元件点数
_PLCCommand[39] = MelsecHelper.BuildBytesFromData( len(value) // 4, 4 )[1]
_PLCCommand[40] = MelsecHelper.BuildBytesFromData( len(value) // 4, 4 )[2]
_PLCCommand[41] = MelsecHelper.BuildBytesFromData( len(value) // 4, 4 )[3]
_PLCCommand[42:] = value
return OperateResult.CreateSuccessResult( _PLCCommand )
@staticmethod
def ExtractActualData( response, isBit ):
if isBit == True:
# 位读取
Content = bytearray(len(response) - 22)
for i in range(22,len(response)):
Content[i - 22] = 0x00 if response[i] == 0x30 else 0x01
return OperateResult.CreateSuccessResult( Content )
else:
# 字读取
Content = bytearray((len(response) - 22) // 2)
for i in range(len(Content)//2):
tmp = int(response[i * 4 + 22:i * 4 + 26].decode('ascii'),16)
Content[i * 2:i * 2+2] = struct.pack('<H',tmp)
return OperateResult.CreateSuccessResult( Content )
def Read( self, address, length ):
'''从三菱PLC中读取想要的数据,返回读取结果'''
# 获取指令
command = MelsecMcAsciiNet.BuildReadCommand( address, length, self.NetworkNumber, self.NetworkStationNumber )
if command.IsSuccess == False : return OperateResult.CreateFailedResult( command )
# 核心交互
read = self.ReadFromCoreServer( command.Content )
if read.IsSuccess == False : return OperateResult.CreateFailedResult( read )
# 错误代码验证
errorCode = int( read.Content[18:22].decode('ascii'), 16 )
if errorCode != 0 : return OperateResult( err= errorCode, msg = StringResources.MelsecPleaseReferToManulDocument() )
# 数据解析,需要传入是否使用位的参数
return MelsecMcAsciiNet.ExtractActualData( read.Content, command.Content[29] == 0x31 )
def ReadBool( self, address, length = None ):
if length == None:
read = self.ReadBool( address, 1 )
if read.IsSuccess == False : return OperateResult.CreateFailedResult( read )
return OperateResult.CreateSuccessResult( read.Content[0] )
else:
# 解析地址
analysis = MelsecHelper.McAnalysisAddress( address )
if analysis.IsSuccess == False : return OperateResult.CreateFailedResult( analysis )
# 位读取校验
if analysis.Content1.DataType == 0x00 : return OperateResult( msg = StringResources.MelsecReadBitInfo )
# 核心交互
read = self.Read( address, length )
if read.IsSuccess == False : return OperateResult.CreateFailedResult( read )
# 转化bool数组
content = []
for i in range(len(read.Content)):
if read.Content[i] == 0x01:
content.append(True)
else:
content.append(False)
return OperateResult.CreateSuccessResult( content )
def Write( self, address, value ):
'''向PLC写入数据,数据格式为原始的字节类型'''
# 解析指令
command = MelsecMcAsciiNet.BuildWriteCommand( address, value, self.NetworkNumber, self.NetworkStationNumber )
if command.IsSuccess == False : return command
# 核心交互
read = self.ReadFromCoreServer( command.Content )
if read.IsSuccess == False : return read
# 错误码验证
errorCode = int( read.Content[18:22].decode('ascii'), 16 )
if errorCode != 0 : return OperateResult( err = errorCode, msg = StringResources.MelsecPleaseReferToManulDocument() )
# 写入成功
return OperateResult.CreateSuccessResult( )
def WriteBool( self, address, values ):
'''向PLC中位软元件写入bool数组,返回值说明,比如你写入M100,values[0]对应M100'''
if type(values) == list:
buffer = bytearray(len(values))
for i in range(len(buffer)):
buffer[i] = 0x01 if values[i] == True else 0x00
return self.Write( address, buffer )
else:
return self.WriteBool( address, [values] )
# 西门子的数据类
class SiemensPLCS(Enum):
'''西门子PLC的类对象'''
S1200 = 0
S300 = 1
S1500 = 2
S200Smart = 3
class SiemensS7Net(NetworkDeviceBase):
'''一个西门子的客户端类,使用S7协议来进行数据交互'''
CurrentPlc = SiemensPLCS.S1200
plcHead1 = bytearray([0x03,0x00,0x00,0x16,0x11,0xE0,0x00,0x00,0x00,0x01,0x00,0xC0,0x01,0x0A,0xC1,0x02,0x01,0x02,0xC2,0x02,0x01,0x00])
plcHead2 = bytearray([0x03,0x00,0x00,0x19,0x02,0xF0,0x80,0x32,0x01,0x00,0x00,0x04,0x00,0x00,0x08,0x00,0x00,0xF0,0x00,0x00,0x01,0x00,0x01,0x01,0xE0])
plcOrderNumber = bytearray([0x03,0x00,0x00,0x21,0x02,0xF0,0x80,0x32,0x07,0x00,0x00,0x00,0x01,0x00,0x08,0x00,0x08,0x00,0x01,0x12,0x04,0x11,0x44,0x01,0x00,0xFF,0x09,0x00,0x04,0x00,0x11,0x00,0x00])
plcHead1_200smart = bytearray([0x03,0x00,0x00,0x16,0x11,0xE0,0x00,0x00,0x00,0x01,0x00,0xC1,0x02,0x10,0x00,0xC2,0x02,0x03,0x00,0xC0,0x01,0x0A])
plcHead2_200smart = bytearray([0x03,0x00,0x00,0x19,0x02,0xF0,0x80,0x32,0x01,0x00,0x00,0xCC,0xC1,0x00,0x08,0x00,0x00,0xF0,0x00,0x00,0x01,0x00,0x01,0x03,0xC0])
def __init__(self, siemens, ipAddress = "127.0.0.1"):
'''实例化一个西门子的S7协议的通讯对象并指定Ip地址'''
self.WordLength = 2
self.ipAddress = ipAddress
self.port = 102
self.CurrentPlc = siemens
self.iNetMessage = S7Message()
self.byteTransform = ReverseBytesTransform()
if siemens == SiemensPLCS.S1200:
self.plcHead1[21] = 0
elif siemens == SiemensPLCS.S300:
self.plcHead1[21] = 2
elif siemens == SiemensPLCS.S1500:
self.plcHead1[21] = 0
elif siemens == SiemensPLCS.S200Smart:
self.plcHead1 = self.plcHead1_200smart
self.plcHead2 = self.plcHead2_200smart
else:
self.plcHead1[18] = 0
@staticmethod
def CalculateAddressStarted( address = "M0" ):
'''计算特殊的地址信息'''
if address.find('.') >= 0:
temp = address.split(".")
return int(temp[0]) * 8 + int(temp[1])
else:
return int( address ) * 8
@staticmethod
def AnalysisAddress( address = 'M0' ):
'''解析数据地址,解析出地址类型,起始地址,DB块的地址'''
result = OperateResult( )
try:
result.Content3 = 0
if address[0] == 'I':
result.Content1 = 0x81
result.Content2 = SiemensS7Net.CalculateAddressStarted( address[1:] )
elif address[0] == 'Q':
result.Content1 = 0x82
result.Content2 = SiemensS7Net.CalculateAddressStarted( address[1:] )
elif address[0] == 'M':
result.Content1 = 0x83
result.Content2 = SiemensS7Net.CalculateAddressStarted( address[1:] )
elif address[0] == 'D' or address[0:2] == "DB":
result.Content1 = 0x84
adds = address.split(".")
if address[1] == 'B':
result.Content3 = int( adds[0][2:] )
else:
result.Content3 = int( adds[0][1:] )
result.Content2 = SiemensS7Net.CalculateAddressStarted( address[ (address.find( '.' ) + 1):])
elif address[0] == 'T':
result.Content1 = 0x1D
result.Content2 = SiemensS7Net.CalculateAddressStarted( address[1:] )
elif address[0] == 'C':
result.Content1 = 0x1C
result.Content2 = SiemensS7Net.CalculateAddressStarted( address[1:] )
elif address[0] == 'V':
result.Content1 = 0x84
result.Content3 = 1
result.Content2 = SiemensS7Net.CalculateAddressStarted( address[1:] )
else:
result.Message = StringResources.NotSupportedDataType()
result.Content1 = 0
result.Content2 = 0
result.Content3 = 0
return result
except Exception as ex:
result.Message = str(ex)
return result
result.IsSuccess = True
return result
@staticmethod
def BuildReadCommand( address, length ):
'''生成一个读取字数据指令头的通用方法'''
if address == None : raise Exception( "address" )
if length == None : raise Exception( "count" )
if len(address) != len(length) : raise Exception( "两个参数的个数不统一" )
if len(length) > 19 : raise Exception( "读取的数组数量不允许大于19" )
readCount = len(length)
_PLCCommand = bytearray(19 + readCount * 12)
# ======================================================================================
_PLCCommand[0] = 0x03 # 报文头
_PLCCommand[1] = 0x00
_PLCCommand[2] = len(_PLCCommand) // 256 # 长度
_PLCCommand[3] = len(_PLCCommand) % 256
_PLCCommand[4] = 0x02 # 固定
_PLCCommand[5] = 0xF0
_PLCCommand[6] = 0x80
_PLCCommand[7] = 0x32 # 协议标识
_PLCCommand[8] = 0x01 # 命令:发
_PLCCommand[9] = 0x00 # redundancy identification (reserved): 0x0000;
_PLCCommand[10] = 0x00 # protocol data unit reference; it’s increased by request event;
_PLCCommand[11] = 0x00
_PLCCommand[12] = 0x01 # 参数命令数据总长度
_PLCCommand[13] = (len(_PLCCommand) - 17) // 256
_PLCCommand[14] = (len(_PLCCommand) - 17) % 256
_PLCCommand[15] = 0x00 # 读取内部数据时为00,读取CPU型号为Data数据长度
_PLCCommand[16] = 0x00
# =====================================================================================
_PLCCommand[17] = 0x04 # 读写指令,04读,05写
_PLCCommand[18] = readCount # 读取数据块个数
for ii in range(readCount):
#===========================================================================================
# 指定有效值类型
_PLCCommand[19 + ii * 12] = 0x12
# 接下来本次地址访问长度
_PLCCommand[20 + ii * 12] = 0x0A
# 语法标记,ANY
_PLCCommand[21 + ii * 12] = 0x10
# 按字为单位
_PLCCommand[22 + ii * 12] = 0x02
# 访问数据的个数
_PLCCommand[23 + ii * 12] = length[ii] // 256
_PLCCommand[24 + ii * 12] = length[ii] % 256
# DB块编号,如果访问的是DB块的话
_PLCCommand[25 + ii * 12] = address[ii].Content3 // 256
_PLCCommand[26 + ii * 12] = address[ii].Content3 % 256
# 访问数据类型
_PLCCommand[27 + ii * 12] = address[ii].Content1
# 偏移位置
_PLCCommand[28 + ii * 12] = address[ii].Content2 // 256 // 256 % 256
_PLCCommand[29 + ii * 12] = address[ii].Content2 // 256 % 256
_PLCCommand[30 + ii * 12] = address[ii].Content2 % 256
return OperateResult.CreateSuccessResult( _PLCCommand )
@staticmethod
def BuildBitReadCommand( address ):
'''生成一个位读取数据指令头的通用方法'''
analysis = SiemensS7Net.AnalysisAddress( address )
if analysis.IsSuccess == False : return OperateResult.CreateFailedResult( analysis )
_PLCCommand = bytearray(31)
# 报文头
_PLCCommand[0] = 0x03
_PLCCommand[1] = 0x00
# 长度
_PLCCommand[2] = len(_PLCCommand) // 256
_PLCCommand[3] = len(_PLCCommand) % 256
# 固定
_PLCCommand[4] = 0x02
_PLCCommand[5] = 0xF0
_PLCCommand[6] = 0x80
_PLCCommand[7] = 0x32
# 命令:发
_PLCCommand[8] = 0x01
# 标识序列号
_PLCCommand[9] = 0x00
_PLCCommand[10] = 0x00
_PLCCommand[11] = 0x00
_PLCCommand[12] = 0x01
# 命令数据总长度
_PLCCommand[13] = (len(_PLCCommand) - 17) // 256
_PLCCommand[14] = (len(_PLCCommand) - 17) % 256
_PLCCommand[15] = 0x00
_PLCCommand[16] = 0x00
# 命令起始符
_PLCCommand[17] = 0x04
# 读取数据块个数
_PLCCommand[18] = 0x01
#===========================================================================================
# 读取地址的前缀
_PLCCommand[19] = 0x12
_PLCCommand[20] = 0x0A
_PLCCommand[21] = 0x10
# 读取的数据时位
_PLCCommand[22] = 0x01
# 访问数据的个数
_PLCCommand[23] = 0x00
_PLCCommand[24] = 0x01
# DB块编号,如果访问的是DB块的话
_PLCCommand[25] = analysis.Content3 // 256
_PLCCommand[26] = analysis.Content3 % 256
# 访问数据类型
_PLCCommand[27] = analysis.Content1
# 偏移位置
_PLCCommand[28] = analysis.Content2 // 256 // 256 % 256
_PLCCommand[29] = analysis.Content2 // 256 % 256
_PLCCommand[30] = analysis.Content2 % 256
return OperateResult.CreateSuccessResult( _PLCCommand )
@staticmethod
def BuildWriteByteCommand( address, data ):
'''生成一个写入字节数据的指令'''
if data == None : data = bytearray(0)
analysis = SiemensS7Net.AnalysisAddress( address )
if analysis.IsSuccess == False : return OperateResult.CreateFailedResult(analysis)
_PLCCommand = bytearray(35 + len(data))
_PLCCommand[0] = 0x03
_PLCCommand[1] = 0x00
# 长度
_PLCCommand[2] = (35 + len(data)) // 256
_PLCCommand[3] = (35 + len(data)) % 256
# 固定
_PLCCommand[4] = 0x02
_PLCCommand[5] = 0xF0
_PLCCommand[6] = 0x80
_PLCCommand[7] = 0x32
# 命令 发
_PLCCommand[8] = 0x01
# 标识序列号
_PLCCommand[9] = 0x00
_PLCCommand[10] = 0x00
_PLCCommand[11] = 0x00
_PLCCommand[12] = 0x01
# 固定
_PLCCommand[13] = 0x00
_PLCCommand[14] = 0x0E
# 写入长度+4
_PLCCommand[15] = (4 + len(data)) // 256
_PLCCommand[16] = (4 + len(data)) % 256
# 读写指令
_PLCCommand[17] = 0x05
# 写入数据块个数
_PLCCommand[18] = 0x01
# 固定,返回数据长度
_PLCCommand[19] = 0x12
_PLCCommand[20] = 0x0A
_PLCCommand[21] = 0x10
# 写入方式,1是按位,2是按字
_PLCCommand[22] = 0x02
# 写入数据的个数
_PLCCommand[23] = len(data) // 256
_PLCCommand[24] = len(data) % 256
# DB块编号,如果访问的是DB块的话
_PLCCommand[25] = analysis.Content3 // 256
_PLCCommand[26] = analysis.Content3 % 256
# 写入数据的类型
_PLCCommand[27] = analysis.Content1
# 偏移位置
_PLCCommand[28] = analysis.Content2 // 256 // 256 % 256
_PLCCommand[29] = analysis.Content2 // 256 % 256
_PLCCommand[30] = analysis.Content2 % 256
# 按字写入
_PLCCommand[31] = 0x00
_PLCCommand[32] = 0x04
# 按位计算的长度
_PLCCommand[33] = len(data) * 8 // 256
_PLCCommand[34] = len(data) * 8 % 256
_PLCCommand[35:] = data
return OperateResult.CreateSuccessResult(_PLCCommand)
@staticmethod
def BuildWriteBitCommand( address, data ):
analysis = SiemensS7Net.AnalysisAddress( address )
if analysis.IsSuccess == False : return OperateResult.CreateFailedResult(analysis)
buffer = bytearray(1)
if data == True : buffer[0] = 0x01
_PLCCommand = bytearray(35 + len(buffer))
_PLCCommand[0] = 0x03
_PLCCommand[1] = 0x00
# 长度
_PLCCommand[2] = (35 + len(buffer)) // 256
_PLCCommand[3] = (35 + len(buffer)) % 256
# 固定
_PLCCommand[4] = 0x02
_PLCCommand[5] = 0xF0
_PLCCommand[6] = 0x80
_PLCCommand[7] = 0x32
# 命令 发
_PLCCommand[8] = 0x01
# 标识序列号
_PLCCommand[9] = 0x00
_PLCCommand[10] = 0x00
_PLCCommand[11] = 0x00
_PLCCommand[12] = 0x01
# 固定
_PLCCommand[13] = 0x00
_PLCCommand[14] = 0x0E
# 写入长度+4
_PLCCommand[15] = (4 + len(buffer)) // 256
_PLCCommand[16] = (4 + len(buffer)) % 256
# 命令起始符
_PLCCommand[17] = 0x05
# 写入数据块个数
_PLCCommand[18] = 0x01
_PLCCommand[19] = 0x12
_PLCCommand[20] = 0x0A
_PLCCommand[21] = 0x10
# 写入方式,1是按位,2是按字
_PLCCommand[22] = 0x01
# 写入数据的个数
_PLCCommand[23] = len(buffer) // 256
_PLCCommand[24] = len(buffer) % 256
# DB块编号,如果访问的是DB块的话
_PLCCommand[25] = analysis.Content3 // 256
_PLCCommand[26] = analysis.Content3 % 256
# 写入数据的类型
_PLCCommand[27] = analysis.Content1
# 偏移位置
_PLCCommand[28] = analysis.Content2 // 256 // 256
_PLCCommand[29] = analysis.Content2 // 256
_PLCCommand[30] = analysis.Content2 % 256
# 按位写入
_PLCCommand[31] = 0x00
_PLCCommand[32] = 0x03
# 按位计算的长度
_PLCCommand[33] = len(buffer) // 256
_PLCCommand[34] = len(buffer) % 256
_PLCCommand[35:] = buffer
return OperateResult.CreateSuccessResult(_PLCCommand)
def InitializationOnConnect( self, socket ):
'''连接上服务器后需要进行的二次握手操作'''
# msg = SoftBasic.ByteToHexString(self.plcHead1, ' ')
# 第一次握手
read_first = self.ReadFromCoreServerBase( socket, self.plcHead1 )
if read_first.IsSuccess == False : return read_first
# 第二次握手
read_second = self.ReadFromCoreServerBase( socket, self.plcHead2 )
if read_second.IsSuccess == False : return read_second
# 返回成功的信号
return OperateResult.CreateSuccessResult( )
def ReadOrderNumber( self ):
'''从PLC读取订货号信息'''
read = self.ReadFromCoreServer( self.plcOrderNumber )
if read.IsSuccess == False : return OperateResult.CreateFailedResult( read )
return OperateResult.CreateSuccessResult( read.Content[71:92].decode('ascii') )
def __ReadBase( self, address, length ):
'''基础的读取方法,外界不应该调用本方法'''
command = SiemensS7Net.BuildReadCommand( address, length )
if command.IsSuccess == False : return command
read = self.ReadFromCoreServer( command.Content )
if read.IsSuccess == False : return read
# 分析结果
receiveCount = 0
for i in range(len(length)):
receiveCount += length[i]
if len(read.Content) >= 21 and read.Content[20] == len(length) :
buffer = bytearray(receiveCount)
kk = 0
ll = 0
ii = 21
while ii < len(read.Content):
if ii + 1 < len(read.Content):
if read.Content[ii] == 0xFF and read.Content[ii + 1] == 0x04:
# 有数据
buffer[ll : ll + length[kk]] = read.Content[ii+4 : ii+4+length[kk]]
ii += length[kk] + 3
ll += length[kk]
kk += 1
ii += 1
return OperateResult.CreateSuccessResult( buffer )
else :
result = OperateResult()
result.ErrorCode = read.ErrorCode
result.Message = "数据块长度校验失败"
return result
def Read( self, address, length ):
'''从PLC读取数据,地址格式为I100,Q100,DB20.100,M100,T100,C100以字节为单位'''
if type(address) == list and type(length) == list:
addressResult = []
for i in range(length):
tmp = SiemensS7Net.AnalysisAddress( address[i] )
if tmp.IsSuccess == False : return OperateResult.CreateFailedResult( addressResult[i] )
addressResult.append( tmp )
return self.__ReadBase( addressResult, length )
else:
addressResult = SiemensS7Net.AnalysisAddress( address )
if addressResult.IsSuccess == False : return OperateResult.CreateFailedResult( addressResult )
bytesContent = bytearray()
alreadyFinished = 0
while alreadyFinished < length :
readLength = min( length - alreadyFinished, 200 )
read = self.__ReadBase( [ addressResult ], [ readLength ] )
if read.IsSuccess == True :
bytesContent.extend( read.Content )
else:
return read
alreadyFinished += readLength
addressResult.Content2 += readLength * 8
return OperateResult.CreateSuccessResult( bytesContent )
def __ReadBitFromPLC( self, address ):
'''从PLC读取数据,地址格式为I100,Q100,DB20.100,M100,以位为单位'''
# 指令生成
command = SiemensS7Net.BuildBitReadCommand( address )
if command.IsSuccess == False : return OperateResult.CreateFailedResult( command )
# 核心交互
read = self.ReadFromCoreServer( command.Content )
if read.IsSuccess == False : return read
# 分析结果
receiveCount = 1
if len(read.Content) >= 21 and read.Content[20] == 1 :
buffer = bytearray(receiveCount)
if 22 < len(read.Content) :
if read.Content[21] == 0xFF and read.Content[22] == 0x03:
# 有数据
buffer[0] = read.Content[25]
return OperateResult.CreateSuccessResult( buffer )
else:
result = OperateResult()
result.ErrorCode = read.ErrorCode
result.Message = "数据块长度校验失败"
return result
def ReadBool( self, address ):
'''读取指定地址的bool数据'''
return self.GetBoolResultFromBytes( self.__ReadBitFromPLC( address ) )
def ReadByte( self, address ):
'''读取指定地址的byte数据'''
return self.GetByteResultFromBytes( self.Read( address, 1 ) )
def __WriteBase( self, entireValue ):
'''基础的写入数据的操作支持'''
write = self.ReadFromCoreServer( entireValue )
if write.IsSuccess == False : return write
if write.Content[len(write.Content) - 1] != 0xFF :
# 写入异常
return OperateResult( msg = "写入数据异常", err = write.Content[write.Content.Length - 1])
else:
return OperateResult.CreateSuccessResult( )
def Write( self, address, value ):
'''将数据写入到PLC数据,地址格式为I100,Q100,DB20.100,M100,以字节为单位'''
command = self.BuildWriteByteCommand( address, value )
if command.IsSuccess == False : return command
return self.__WriteBase( command.Content )
def WriteBool( self, address, value ):
'''写入PLC的一个位,例如"M100.6","I100.7","Q100.0","DB20.100.0",如果只写了"M100"默认为"M100.0'''
# 生成指令
command = SiemensS7Net.BuildWriteBitCommand( address, value )
if command.IsSuccess == False : return command
return self.__WriteBase( command.Content )
def WriteByte( self, address, value ):
'''向PLC中写入byte数据,返回值说明'''
return self.Write( address, [value] )
class SiemensFetchWriteNet(NetworkDeviceBase):
'''使用了Fetch/Write协议来和西门子进行通讯,该种方法需要在PLC侧进行一些配置'''
def __init__( self, ipAddress = '127.0.0.1', port = 1000 ):
''' 实例化一个西门子的Fetch/Write协议的通讯对象,可以指定ip地址及端口号'''
self.ipAddress = ipAddress
self.port = port
self.WordLength = 2
@staticmethod
def CalculateAddressStarted( address = "M100" ):
'''计算特殊的地址信息'''
if address.find( '.' ) < 0:
return int( address )
else:
temp = address.split( '.' )
return int( temp[0] )
@staticmethod
def AnalysisAddress( address = "M100" ):
'''解析数据地址,解析出地址类型,起始地址,DB块的地址'''
result = OperateResult( )
try:
result.Content3 = 0
if address[0] == 'I':
result.Content1 = 0x03
result.Content2 = SiemensFetchWriteNet.CalculateAddressStarted( address[1:] )
elif address[0] == 'Q':
result.Content1 = 0x04
result.Content2 = SiemensFetchWriteNet.CalculateAddressStarted( address[1:] )
elif address[0] == 'M':
result.Content1 = 0x02
result.Content2 = SiemensFetchWriteNet.CalculateAddressStarted( address[1:] )
elif address[0] == 'D' or address.startswith("DB"):
result.Content1 = 0x01
adds = address.split( '.' )
if address[1] == 'B':
result.Content3 = int( adds[0][2:] )
else:
result.Content3 = int( adds[0][1:] )
if result.Content3 > 255:
result.Message = "DB块数据无法大于255"
return result
result.Content2 = SiemensFetchWriteNet.CalculateAddressStarted( address[ address.find( '.' ) + 1:] )
elif address[0] == 'T':
result.Content1 = 0x07
result.Content2 = SiemensFetchWriteNet.CalculateAddressStarted( address[1:] )
elif address[0] == 'C':
result.Content1 = 0x06
result.Content2 = SiemensFetchWriteNet.CalculateAddressStarted( address[1:])
else:
result.Message = StringResources.NotSupportedDataType()
result.Content1 = 0
result.Content2 = 0
result.Content3 = 0
return result
except Exception as ex:
result.Message = str(ex)
return result
result.IsSuccess = True
return result
@staticmethod
def BuildReadCommand( address, count ):
'''生成一个读取字数据指令头的通用方法'''
result = OperateResult( )
analysis = SiemensFetchWriteNet.AnalysisAddress( address )
if analysis.IsSuccess == False :
result.CopyErrorFromOther( analysis )
return result
_PLCCommand = bytearray(16)
_PLCCommand[0] = 0x53
_PLCCommand[1] = 0x35
_PLCCommand[2] = 0x10
_PLCCommand[3] = 0x01
_PLCCommand[4] = 0x03
_PLCCommand[5] = 0x05
_PLCCommand[6] = 0x03
_PLCCommand[7] = 0x08
# 指定数据区
_PLCCommand[8] = analysis.Content1
_PLCCommand[9] = analysis.Content3
# 指定数据地址
_PLCCommand[10] =analysis.Content2 // 256
_PLCCommand[11] = analysis.Content2 % 256
if analysis.Content1 == 0x01 or analysis.Content1 == 0x06 or analysis.Content1 == 0x07:
if count % 2 != 0:
result.Message = "读取的数据长度必须为偶数"
return result
else:
# 指定数据长度
_PLCCommand[12] = count // 2 // 256
_PLCCommand[13] = count // 2 % 256
else:
# 指定数据长度
_PLCCommand[12] = count // 256
_PLCCommand[13] = count % 256
_PLCCommand[14] = 0xff
_PLCCommand[15] = 0x02
result.Content = _PLCCommand
result.IsSuccess = True
return result
@staticmethod
def BuildWriteCommand( address, data ):
'''生成一个写入字节数据的指令'''
if data == None : data = bytearray(0)
result = OperateResult( )
analysis = SiemensFetchWriteNet.AnalysisAddress( address )
if analysis.IsSuccess == False:
result.CopyErrorFromOther( analysis )
return result
_PLCCommand = bytearray(16 + len(data))
_PLCCommand[0] = 0x53
_PLCCommand[1] = 0x35
_PLCCommand[2] = 0x10
_PLCCommand[3] = 0x01
_PLCCommand[4] = 0x03
_PLCCommand[5] = 0x03
_PLCCommand[6] = 0x03
_PLCCommand[7] = 0x08
# 指定数据区
_PLCCommand[8] = analysis.Content1
_PLCCommand[9] = analysis.Content3
# 指定数据地址
_PLCCommand[10] = analysis.Content2 // 256
_PLCCommand[11] = analysis.Content2 % 256
if analysis.Content1 == 0x01 or analysis.Content1 == 0x06 or analysis.Content1 == 0x07:
if data.Length % 2 != 0:
result.Message = "写入的数据长度必须为偶数"
return result
else:
# 指定数据长度
_PLCCommand[12] = data.Length // 2 // 256
_PLCCommand[13] = data.Length // 2 % 256
else:
# 指定数据长度
_PLCCommand[12] = data.Length // 256
_PLCCommand[13] = data.Length % 256
_PLCCommand[14] = 0xff
_PLCCommand[15] = 0x02
# 放置数据
_PLCCommand[16:16+len(data)] = data
result.Content = _PLCCommand
result.IsSuccess = True
return result
def Read( self, address, length ):
'''从PLC读取数据,地址格式为I100,Q100,DB20.100,M100,T100,C100,以字节为单位'''
# 指令解析 -> Instruction parsing
command = SiemensFetchWriteNet.BuildReadCommand( address, length )
if command.IsSuccess == False : return command
# 核心交互 -> Core Interactions
read = self.ReadFromCoreServer( command.Content )
if read.IsSuccess == False : return read
# 错误码验证 -> Error code Verification
if read.Content[8] != 0x00 : return OperateResult(read.Content[8],"发生了异常,具体信息查找Fetch/Write协议文档")
# 读取正确 -> Read Right
buffer = bytearray(len(read.Content) - 16)
buffer[0:len(buffer)] = read.Content[16:16+len(buffer)]
return OperateResult.CreateSuccessResult( buffer )
def ReadByte( self, address ):
'''读取指定地址的byte数据'''
return self.GetByteResultFromBytes( self.Read( address, 1 ) )
def Write( self, address, value ):
'''将数据写入到PLC数据,地址格式为I100,Q100,DB20.100,M100,以字节为单位'''
# 指令解析 -> Instruction parsing
command = SiemensFetchWriteNet.BuildWriteCommand( address, value )
if command.IsSuccess == False : return command
# 核心交互 -> Core Interactions
write = self.ReadFromCoreServer( command.Content )
if write.IsSuccess == False : return write
# 错误码验证 -> Error code Verification
if (write.Content[8] != 0x00) : OperateResult(err = write.Content[8], msg = "西门子PLC写入失败!")
# 写入成功 -> Write Right
return OperateResult.CreateSuccessResult( )
def WriteBool( self, address, values):
'''向PLC中写入byte数据,返回是否写入成功 -> Writes byte data to the PLC and returns whether the write succeeded'''
if type(values) == list:
return self.Write( address, SoftBasic.BoolArrayToByte( values ) )
else:
return self.WriteBool( address, [ values ] )
# Omron PLC 通讯类
class OmronFinsDataType:
'''欧姆龙的Fins协议的数据类型'''
BitCode = 0
WordCode = 0
def __init__(self, bitCode = 0, wordCode = 0):
'''实例化一个Fins的数据类型'''
self.BitCode = bitCode
self.WordCode = wordCode
@staticmethod
def DM():
'''DM Area'''
return OmronFinsDataType( 0x02, 0x82 )
@staticmethod
def CIO():
'''CIO Area'''
return OmronFinsDataType( 0x30, 0xB0 )
@staticmethod
def WR():
'''Work Area'''
return OmronFinsDataType( 0x31, 0xB1 )
@staticmethod
def HR():
'''Holding Bit Area'''
return OmronFinsDataType( 0x32, 0xB2 )
@staticmethod
def AR():
'''Auxiliary Bit Area'''
return OmronFinsDataType( 0x33, 0xB3 )
class OmronFinsNet(NetworkDoubleBase):
'''欧姆龙PLC通讯类,采用Fins-Tcp通信协议实现'''
def __init__(self,ipAddress="127.0.0.1",port = 1000):
'''实例化一个欧姆龙PLC Fins帧协议的通讯对象'''
self.ipAddress = ipAddress
self.port = port
ICF = 0
RSV = 0
GCT = 0
DNA = 0
DA1 = 0
DA2 = 0
SNA = 0
SA1 = 0
SA2 = 0
SID = 0
def SetSA1(self, value):
'''设置SA1的方法'''
self.SA1 = value
self.handSingle[19] = value
handSingle = bytearray([0x46, 0x49, 0x4E, 0x53,0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01])
@staticmethod
def AnalysisAddress( address, isBit ):
result = OperateResult( )
try:
if address[0] == 'D' or address[0] == 'd':
# DM区数据
result.Content1 = OmronFinsDataType.DM
elif address[0] == 'C' or address[0] == 'c':
# CIO区数据
result.Content1 = OmronFinsDataType.CIO
elif address[0] == 'W' or address[0] == 'w':
# WR区
result.Content1 = OmronFinsDataType.WR
elif address[0] == 'H' or address[0] == 'h':
# HR区
result.Content1 = OmronFinsDataType.HR
elif address[0] == 'A' or address[0] == 'a':
# AR区
result.Content1 = OmronFinsDataType.AR
else:
raise RuntimeError( StringResources.NotSupportedDataType() )
if isBit == True:
# 位操作
splits = address[1:].split('.')
addr = int( splits[0] )
result.Content2 = bytearray(3)
result.Content2[0] = struct.pack('<H', addr )[1]
result.Content2[1] = struct.pack('<H', addr )[0]
if len(splits) > 1:
result.Content2[2] = int(splits[1])
if result.Content2[2] > 15:
raise RuntimeError( "欧姆龙位地址必须0-15之间" )
else:
# 字操作
addr = int( address[1:] )
result.Content2 = bytearray(3)
result.Content2[0] = struct.pack('<H', addr )[1]
result.Content2[1] = struct.pack('<H', addr )[0]
except Exception as ex:
result.Message = str(ex)
return result
result.IsSuccess = True
return result
@staticmethod
def ResponseValidAnalysis( response, isRead ):
# 数据有效性分析
if len(response) >= 16:
# 提取错误码
buffer = bytearray(4)
buffer[0] = response[15]
buffer[1] = response[14]
buffer[2] = response[13]
buffer[3] = response[12]
err = struct.unpack( '<i' , buffer )[0]
if err > 0 : return OperateResult( err = err, msg = OmronFinsNet.GetStatusDescription( err ) )
if response.Length >= 30:
err = response[28] * 256 + response[29]
if err > 0 : return OperateResult( err = err, msg = "欧姆龙数据接收出错" )
if isRead == False : return OperateResult.CreateSuccessResult( bytearray(0) )
# 读取操作
content = bytearray(len(response) - 30)
if len(content) > 0 :
content[0:len(content)] = response[30:]
return OperateResult.CreateSuccessResult( content )
return OperateResult( msg = "欧姆龙数据接收出错" )
@staticmethod
def GetStatusDescription( err ):
'''获取错误信息的字符串描述文本'''
if err == 0: return StringResources.OmronStatus0()
elif err == 1: return StringResources.OmronStatus1()
elif err == 2: return StringResources.OmronStatus2()
elif err == 3: return StringResources.OmronStatus3()
elif err == 20: return StringResources.OmronStatus20()
elif err == 21: return StringResources.OmronStatus21()
elif err == 22: return StringResources.OmronStatus22()
elif err == 23: return StringResources.OmronStatus23()
elif err == 24: return StringResources.OmronStatus24()
elif err == 25: return StringResources.OmronStatus25()
else: return StringResources.UnknownError()
def PackCommand( self, cmd ):
'''将普通的指令打包成完整的指令'''
buffer = bytearray(26 + len(cmd))
buffer[0:4] = self.handSingle[0:4]
tmp = struct.pack('>i', len(buffer) - 8 )
buffer[4:8] = tmp
buffer[11] = 0x02
buffer[16] = self.ICF
buffer[17] = self.RSV
buffer[18] = self.GCT
buffer[19] = self.DNA
buffer[20] = self.DA1
buffer[21] = self.DA2
buffer[22] = self.SNA
buffer[23] = self.SA1
buffer[24] = self.SA2
buffer[25] = self.SID
buffer[26:] = cmd
return buffer
def BuildReadCommand( self, address, length , isBit):
'''根据类型地址长度确认需要读取的指令头'''
analysis = OmronFinsNet.AnalysisAddress( address, isBit )
if analysis.IsSuccess == False : return OperateResult.CreateFailedResult( analysis )
_PLCCommand = bytearray(8)
_PLCCommand[0] = 0x01
_PLCCommand[1] = 0x01
if isBit == True:
_PLCCommand[2] = analysis.Content1.BitCode
else:
_PLCCommand[2] = analysis.Content1.WordCode
_PLCCommand[3:6] = analysis.Content2
_PLCCommand[6] = length / 256
_PLCCommand[7] = length % 256
return OperateResult.CreateSuccessResult( self.PackCommand( _PLCCommand ) )
def BuildWriteCommand( self, address, value, isBit ):
'''根据类型地址以及需要写入的数据来生成指令头'''
analysis = self.AnalysisAddress( address, isBit )
if analysis.IsSuccess == False : return OperateResult.CreateFailedResult( analysis )
_PLCCommand = bytearray(8 + len(value))
_PLCCommand[0] = 0x01
_PLCCommand[1] = 0x02
if isBit == True:
_PLCCommand[2] = analysis.Content1.BitCode
else:
_PLCCommand[2] = analysis.Content1.WordCode
_PLCCommand[3:6] = analysis.Content2
if isBit == True:
_PLCCommand[6] = len(value) // 256
_PLCCommand[7] = len(value) % 256
else:
_PLCCommand[6] = len(value) // 2 // 256
_PLCCommand[7] = len(value) // 2 % 256
_PLCCommand[8:] = value
return OperateResult.CreateSuccessResult( self.PackCommand( _PLCCommand ) )
def InitializationOnConnect( self, socket ):
'''在连接上欧姆龙PLC后,需要进行一步握手协议'''
# 握手信号
read = self.ReadFromCoreServerBase( socket, self.handSingle )
if read.IsSuccess == False : return read
# 检查返回的状态
buffer = bytearray(4)
buffer[0] = read.Content2[7]
buffer[1] = read.Content2[6]
buffer[2] = read.Content2[5]
buffer[3] = read.Content2[4]
status = struct.unpack( '<i',buffer )[0]
if status != 0 : return OperateResult( err = status, msg = OmronFinsNet.GetStatusDescription( status ) )
# 提取PLC的节点地址
if read.Content2.Length >= 16 : self.DA1 = read.Content2[15]
return OperateResult.CreateSuccessResult( )
def Read( self, address, length ):
'''从欧姆龙PLC中读取想要的数据,返回读取结果,读取单位为字'''
# 获取指令
command = self.BuildReadCommand( address, length, False )
if command.IsSuccess == False : return OperateResult.CreateFailedResult( command )
# 核心数据交互
read = self.ReadFromCoreServer( command.Content )
if read.IsSuccess == False : return OperateResult.CreateFailedResult( read )
# 数据有效性分析
valid = OmronFinsNet.ResponseValidAnalysis( read.Content, True )
if valid.IsSuccess == False : return OperateResult.CreateFailedResult( valid )
# 读取到了正确的数据
return OperateResult.CreateSuccessResult( valid.Content )
def ReadBool( self, address, length = None ):
'''从欧姆龙PLC中批量读取位软元件,返回读取结果'''
if length == None:
read = self.ReadBool( address, 1 )
if read.IsSuccess == False : return OperateResult.CreateFailedResult( read )
return OperateResult.CreateSuccessResult( read.Content[0] )
else:
# 获取指令
command = self.BuildReadCommand( address, length, True )
if command.IsSuccess == False : return OperateResult.CreateFailedResult( command )
# 核心数据交互
read = self.ReadFromCoreServer( command.Content )
if read.IsSuccess == False : return OperateResult.CreateFailedResult( read )
# 数据有效性分析
valid = OmronFinsNet.ResponseValidAnalysis( read.Content, True )
if valid.IsSuccess == False : return OperateResult.CreateFailedResult( valid )
# 返回正确的数据信息
content = []
for i in range(len(read.Content)):
if read.Content[i] == 0x01:
content.append(True)
else:
content.append(False)
return OperateResult.CreateSuccessResult( content )
def Write( self, address, value ):
'''向PLC中位软元件写入bool数组,返回值说明,比如你写入D100,values[0]对应D100.0'''
# 获取指令
command = self.BuildWriteCommand( address, value, False )
if command.IsSuccess == False : return command
# 核心数据交互
read = self.ReadFromCoreServer( command.Content )
if read.IsSuccess == False : return read
# 数据有效性分析
valid = OmronFinsNet.ResponseValidAnalysis( read.Content, False )
if valid.IsSuccess == False : return valid
# 成功
return OperateResult.CreateSuccessResult( )
def WriteBool( self, address, values ):
'''向PLC中位软元件写入bool数组,返回值说明,比如你写入D100,values[0]对应D100.0'''
if type(values) == list:
# 获取指令
content = bytearray(len(values))
for i in range(len(values)):
if values[i] == True:
content[i] = 0x01
else:
content[i] = 0x00
command = self.BuildWriteCommand( address, content, True )
if command.IsSuccess == False : return command
# 核心数据交互
read = self.ReadFromCoreServer( command.Content )
if read.IsSuccess == False : return read
# 数据有效性分析
valid = OmronFinsNet.ResponseValidAnalysis( read.Content, False )
if valid.IsSuccess == False : return valid
# 写入成功
return OperateResult.CreateSuccessResult( )
else:
return self.WriteBool( address, [values] )
# NetSimplifyClient类
class NetSimplifyClient(NetworkDoubleBase):
'''异步访问数据的客户端类,用于向服务器请求一些确定的数据信息'''
def __init__(self, ipAddress, port):
'''实例化一个客户端的对象,用于和服务器通信'''
self.iNetMessage = HslMessage()
self.byteTransform = RegularByteTransform()
self.ipAddress = ipAddress
self.port = port
def ReadBytesFromServer( self, customer, send = None):
'''客户端向服务器进行请求,请求字节数据'''
return self.__ReadFromServerBase( HslProtocol.CommandBytes( customer, self.Token, send))
def ReadStringFromServer( self, customer, send = None):
'''客户端向服务器进行请求,请求字符串数据'''
read = self.__ReadFromServerBase( HslProtocol.CommandString( customer, self.Token, send))
if read.IsSuccess == False:
return OperateResult.CreateFailedResult( read )
return OperateResult.CreateSuccessResult( read.Content.decode('utf-16') )
def __ReadFromServerBase( self, send):
'''需要发送的底层数据'''
read = self.ReadFromCoreServer( send )
if read.IsSuccess == False:
return read
headBytes = bytearray(HslProtocol.HeadByteLength())
contentBytes = bytearray(len(read.Content) - HslProtocol.HeadByteLength())
headBytes[0:HslProtocol.HeadByteLength()] = read.Content[0:HslProtocol.HeadByteLength()]
if len(contentBytes) > 0:
contentBytes[0:len(contentBytes)] = read.Content[HslProtocol.HeadByteLength():len(read.Content)]
contentBytes = HslProtocol.CommandAnalysis( headBytes, contentBytes )
return OperateResult.CreateSuccessResult( contentBytes )
class AppSession:
'''网络会话信息'''
IpAddress = "127.0.0.1"
Port = 12345
LoginAlias = ""
HeartTime = None
ClientType = ""
ClientUniqueID = ""
BytesHead = bytearray(32)
BytesContent = bytearray(0)
KeyGroup = ""
WorkSocket = socket.socket()
HybirdLockSend = threading.Lock()
def __init__( self ):
self.ClientUniqueID = SoftBasic.GetUniqueStringByGuidAndRandom()
self.HeartTime = datetime.datetime.now()
def Clear( self ):
self.BytesHead = bytearray(HslProtocol.HeadByteLength())
self.BytesContent = None
class NetworkXBase(NetworkBase):
'''多功能网络类的基类'''
ThreadBack = None
def __init__(self):
return
def SendBytesAsync( self, session, content ):
'''发送数据的方法'''
if content == None : return
session.HybirdLockSend.acquire()
self.Send( session.WorkSocket, content )
session.HybirdLockSend.release()
def ThreadBackground( self, session ):
while True:
if session.WorkSocket == None : break
readHeadBytes = self.Receive(session.WorkSocket,HslProtocol.HeadByteLength())
if readHeadBytes.IsSuccess == False :
self.SocketReceiveException( session )
return
length = struct.unpack( '<i', readHeadBytes.Content[28:32])[0]
readContent = self.Receive(session.WorkSocket,length)
if readContent.IsSuccess == False :
self.SocketReceiveException( session )
return
if self.CheckRemoteToken( readHeadBytes.Content ):
head = readHeadBytes.Content
content = HslProtocol.CommandAnalysis(head,readContent.Content)
protocol = struct.unpack('<i', head[0:4])[0]
customer = struct.unpack('<i', head[4:8])[0]
self.DataProcessingCenter(session,protocol,customer,content)
else:
self.AppSessionRemoteClose( session )
def BeginReceiveBackground( self, session ):
ThreadBack = threading.Thread(target=self.ThreadBackground,args=[session])
ThreadBack.start()
def DataProcessingCenter( self, session, protocol, customer, content ):
'''数据处理中心,应该继承重写'''
return
def CheckRemoteToken( self, headBytes ):
'''检查当前的头子节信息的令牌是否是正确的'''
return SoftBasic.IsTwoBytesEquel( headBytes,12, SoftBasic.TokenToBytes(self.Token), 0, 16 )
def SocketReceiveException( self, session ):
'''接收出错的时候进行处理'''
return
def AppSessionRemoteClose( self, session ):
'''当远端的客户端关闭连接时触发'''
return
def SendBaseAndCheckReceive( self, socket, headcode, customer, send ):
'''[自校验] 发送字节数据并确认对方接收完成数据,如果结果异常,则结束通讯'''
# 数据处理
send = HslProtocol.CommandBytesBase( headcode, customer, self.Token, send )
sendResult = self.Send( socket, send )
if sendResult.IsSuccess == False: return sendResult
# 检查对方接收完成
checkResult = self.ReceiveLong( socket )
if checkResult.IsSuccess == False: return checkResult
# 检查长度接收
if checkResult.Content != len(send):
self.CloseSocket(socket)
return OperateResult( msg = "接收的数据数据长度验证失败")
return checkResult
def SendBytesAndCheckReceive( self, socket, customer, send ):
'''[自校验] 发送字节数据并确认对方接收完成数据,如果结果异常,则结束通讯'''
return self.SendBaseAndCheckReceive( socket, HslProtocol.ProtocolUserBytes(), customer, send )
def SendStringAndCheckReceive( self, socket, customer, send ):
'''[自校验] 直接发送字符串数据并确认对方接收完成数据,如果结果异常,则结束通讯'''
data = SoftBasic.StringToUnicodeBytes(send)
return self.SendBaseAndCheckReceive( socket, HslProtocol.ProtocolUserString(), customer, data )
def ReceiveAndCheckBytes( self, socket, timeout ):
'''[自校验] 接收一条完整的同步数据,包含头子节和内容字节,基础的数据,如果结果异常,则结束通讯'''
# 30秒超时接收验证
# if (timeout > 0) ThreadPool.QueueUserWorkItem( new WaitCallback( ThreadPoolCheckTimeOut ), hslTimeOut );
# 接收头指令
headResult = self.Receive(socket, HslProtocol.HeadByteLength())
if headResult.IsSuccess == False:
return OperateResult.CreateFailedResult(headResult)
# 检查令牌
if self.CheckRemoteToken(headResult.Content) == False:
self.CloseSocket(socket)
return OperateResult( msg = StringResources.TokenCheckFailed() )
contentLength = struct.unpack( '<i', headResult.Content[(HslProtocol.HeadByteLength() - 4):])[0]
# 接收内容
contentResult = self.Receive(socket, contentLength)
if contentResult.IsSuccess == False:
return OperateResult.CreateFailedResult( contentResult )
# 返回成功信息
checkResult = self.SendLong(socket, HslProtocol.HeadByteLength() + contentLength)
if checkResult.IsSuccess == False:
return OperateResult.CreateFailedResult( checkResult )
head = headResult.Content
content = contentResult.Content
content = HslProtocol.CommandAnalysis(head, content)
return OperateResult.CreateSuccessResult(head, content)
def ReceiveStringContentFromSocket( self, socket ):
'''[自校验] 从网络中接收一个字符串数据,如果结果异常,则结束通讯'''
receive = self.ReceiveAndCheckBytes(socket, 10000)
if receive.IsSuccess == False: return OperateResult.CreateFailedResult(receive)
# 检查是否是字符串信息
if struct.unpack('<i',receive.Content1[0:4])[0] != HslProtocol.ProtocolUserString():
self.CloseSocket(socket)
return OperateResult( msg = "ReceiveStringContentFromSocket异常" )
if receive.Content2 == None: receive.Content2 = bytearray(0)
# 分析数据
return OperateResult.CreateSuccessResult(struct.unpack('<i', receive.Content1[4:8])[0], receive.Content2.decode('utf-16'))
def ReceiveBytesContentFromSocket( self, socket ):
'''[自校验] 从网络中接收一串字节数据,如果结果异常,则结束通讯'''
receive = self.ReceiveAndCheckBytes( socket, 10000 )
if receive.IsSuccess == False: return OperateResult.CreateFailedResult(receive)
# 检查是否是字节信息
if struct.unpack('<i', receive.Content1[0:4])[0] != HslProtocol.ProtocolUserBytes():
self.CloseSocket(socket)
return OperateResult( msg = "字节内容检查失败" )
# 分析数据
return OperateResult.CreateSuccessResult( struct.unpack('<i', receive.Content1[4:8])[0], receive.Content2 )
def ReceiveLong( self, socket ):
'''从网络中接收Long数据'''
read = self.Receive(socket, 8)
if read.IsSuccess == False: return OperateResult.CreateFailedResult(read)
return OperateResult.CreateSuccessResult(struct.unpack('<Q', read.Content)[0])
def SendLong( self, socket, value ):
'''将Long数据发送到套接字'''
return self.Send( socket, struct.pack( '<Q', value ) )
def CloseSocket(self, socket):
'''关闭网络'''
if socket != None:
socket.close()
class NetPushClient(NetworkXBase):
'''发布订阅类的客户端,使用指定的关键订阅相关的数据推送信息'''
IpAddress = "127.0.0.1"
Port = 12345
keyWord = "A"
ReConnectTime = 10
action = None
def __init__( self, ipAddress, port, key):
'''实例化一个发布订阅类的客户端,需要指定ip地址,端口,及订阅关键字'''
self.IpAddress = ipAddress
self.Port = port
self.keyWord = key
def DataProcessingCenter( self, session, protocol, customer, content ):
if protocol == HslProtocol.ProtocolUserString():
if self.action != None: self.action( self.keyWord, content.decode('utf-16') )
def SocketReceiveException( self, session ):
# 发生异常的时候需要进行重新连接
while True:
print('NetPushClient wait 10s to reconnect server')
sleep( self.ReConnectTime )
if self.CreatePush( ).IsSuccess == True:
break
def CreatePush( self, pushCallBack = None ):
'''创建数据推送服务'''
if pushCallBack == None:
if self.CoreSocket != None: self.CoreSocket.close( )
connect = self.CreateSocketAndConnect( self.IpAddress, self.Port, 5000 )
if connect.IsSuccess == False: return connect
send = self.SendStringAndCheckReceive( connect.Content, 0, self.keyWord )
if send.IsSuccess == False: return send
receive = self.ReceiveStringContentFromSocket( connect.Content )
if receive.IsSuccess == False : return receive
if receive.Content1 != 0: return OperateResult( msg = receive.Content2 )
appSession = AppSession( )
self.CoreSocket = connect.Content
appSession.WorkSocket = connect.Content
self.BeginReceiveBackground( appSession )
return OperateResult.CreateSuccessResult( )
else:
self.action = pushCallBack
return self.CreatePush( )
def ClosePush( self ):
'''关闭消息推送的界面'''
self.action = None
if self.CoreSocket != None:
self.Send(self.CoreSocket, struct.pack('<i', 100 ) )
self.CloseSocket(self.CoreSocket)
|
create_rdb.py | #!/usr/bin/env python
#coding:utf-8
'''
Created on 2019-03-05
@author: yunify
'''
import qingcloud.iaas
import threading
import time
from optparse import OptionParser
import sys
import os
import qingcloud.iaas.constants as const
import common.common as Common
def get_topslave_rdb_instance_id(conn,user_id,rdb_id):
print("get_topslave_rdb_instance_id user_id == %s rdb_id == %s" % (user_id,rdb_id))
if rdb_id and not isinstance(rdb_id, list):
rdb_id = [rdb_id]
print("rdb_id == %s" %(rdb_id))
topslave_rdb_instance_id = None
# DescribeRDBs
action = const.ACTION_DESCRIBE_RDBS
print("action == %s" % (action))
ret = conn.describe_rdbs(owner=user_id,rdbs=rdb_id,verbose=1)
print("describe_rdbs ret == %s" % (ret))
Common.check_ret_code(ret, action)
rdb_set = ret['rdb_set']
if rdb_set is None or len(rdb_set) == 0:
print("describe_rdbs rdb_set is None")
exit(-1)
for rdb in rdb_set:
rdb_instances = rdb.get("rdb_instances")
print("rdb_instances == %s" %(rdb_instances))
for rdb_instance in rdb_instances:
print("rdb_instance == %s" % (rdb_instance))
rdb_instance_role = rdb_instance["rdb_instance_role"]
if "topslave" == rdb_instance_role:
topslave_rdb_instance_id = rdb_instance["rdb_instance_id"]
return topslave_rdb_instance_id
def get_master_rdb_instance_id(conn,user_id,rdb_id):
print("get_master_rdb_instance_id user_id == %s rdb_id == %s" % (user_id,rdb_id))
if rdb_id and not isinstance(rdb_id, list):
rdb_id = [rdb_id]
print("rdb_id == %s" %(rdb_id))
master_rdb_instance_id = None
# DescribeRDBs
action = const.ACTION_DESCRIBE_RDBS
print("action == %s" % (action))
ret = conn.describe_rdbs(owner=user_id,rdbs=rdb_id,verbose=1)
print("describe_rdbs ret == %s" % (ret))
Common.check_ret_code(ret, action)
rdb_set = ret['rdb_set']
if rdb_set is None or len(rdb_set) == 0:
print("describe_rdbs rdb_set is None")
exit(-1)
for rdb in rdb_set:
rdb_instances = rdb.get("rdb_instances")
print("rdb_instances == %s" %(rdb_instances))
for rdb_instance in rdb_instances:
print("rdb_instance == %s" % (rdb_instance))
rdb_instance_role = rdb_instance["rdb_instance_role"]
if "master" == rdb_instance_role:
master_rdb_instance_id = rdb_instance["rdb_instance_id"]
return master_rdb_instance_id
def get_rdb_topslave_ip(conn,user_id,rdb_id):
print("get_rdb_topslave_ip user_id == %s rdb_id == %s" % (user_id,rdb_id))
if rdb_id and not isinstance(rdb_id, list):
rdb_id = [rdb_id]
print("rdb_id == %s" %(rdb_id))
rdb_topslave_ip = None
# DescribeRDBs
action = const.ACTION_DESCRIBE_RDBS
print("action == %s" % (action))
ret = conn.describe_rdbs(owner=user_id,rdbs=rdb_id,verbose=1)
print("describe_rdbs ret == %s" % (ret))
Common.check_ret_code(ret, action)
rdb_set = ret['rdb_set']
if rdb_set is None or len(rdb_set) == 0:
print("describe_rdbs rdb_set is None")
exit(-1)
for rdb in rdb_set:
rdb_instances = rdb.get("rdb_instances")
print("rdb_instances == %s" %(rdb_instances))
for rdb_instance in rdb_instances:
print("rdb_instance == %s" % (rdb_instance))
rdb_instance_role = rdb_instance["rdb_instance_role"]
if "topslave" == rdb_instance_role:
rdb_topslave_ip = rdb_instance["private_ip"]
return rdb_topslave_ip
def get_rdb_master_ip(conn,user_id,rdb_id):
print("get_rdb_master_ip user_id == %s rdb_id == %s" % (user_id,rdb_id))
if rdb_id and not isinstance(rdb_id, list):
rdb_id = [rdb_id]
print("rdb_id == %s" %(rdb_id))
master_ip = None
# DescribeRDBs
action = const.ACTION_DESCRIBE_RDBS
print("action == %s" % (action))
ret = conn.describe_rdbs(owner=user_id,rdbs=rdb_id,verbose=1)
print("describe_rdbs ret == %s" % (ret))
Common.check_ret_code(ret, action)
rdb_set = ret['rdb_set']
if rdb_set is None or len(rdb_set) == 0:
print("describe_rdbs rdb_set is None")
exit(-1)
for rdb in rdb_set:
master_ip = rdb.get("master_ip")
return master_ip
def create_rdb(conn,user_id,vxnet_id,master_private_ip,topslave_private_ip):
print("子线程启动")
print("create_rdb user_id == %s vxnet_id == %s master_private_ip == %s topslave_private_ip == %s" % (user_id,vxnet_id,master_private_ip,topslave_private_ip))
if not master_private_ip:
print("master_private_ip is None")
# CreateRDB
action = const.ACTION_CREATE_RDB
print("action == %s" % (action))
ret = conn.create_rdb(owner=user_id,vxnet=vxnet_id,rdb_engine='psql',engine_version='9.4',rdb_username='yunify',rdb_password='Zhu88jie',rdb_type=2,storage_size=10,rdb_name='数据库服务',description='数据库')
print("create_rdb ret == %s" % (ret))
Common.check_ret_code(ret, action)
else:
print("master_private_ip is %s" %(master_private_ip))
# CreateRDB
action = const.ACTION_CREATE_RDB
print("action == %s" % (action))
private_ips_list = {"master":master_private_ip,"topslave":topslave_private_ip}
print("private_ips_list == %s" %(private_ips_list))
ret = conn.create_rdb(owner=user_id,vxnet=vxnet_id,rdb_engine='psql',engine_version='9.4',rdb_username='yunify',rdb_password='Zhu88jie',rdb_type=2,storage_size=10,rdb_name='数据库服务',description='数据库',private_ips=[private_ips_list])
print("create_rdb ret == %s" % (ret))
Common.check_ret_code(ret, action)
job_id = ret['job_id']
rdb_id = ret['rdb']
print("job_id == %s" % (job_id))
print("rdb_id == %s" % (rdb_id))
# check job status
num = 0
while num < 300:
num = num + 1
print("num == %d" % (num))
time.sleep(1)
status = Common.get_job_status(conn,job_id)
if status == "successful":
print("create_rdb successful")
break
print("status == %s" % (status))
if status == "successful":
print("create_rdb rdb successful")
#create_rdb ok
create_rdb_status = "True"
# create_rdb_status 写入文件
create_rdb_status_conf = "/opt/create_rdb_status_conf"
with open(create_rdb_status_conf, "w+") as f:
f.write("CREATE_RDB_STATUS %s" % (create_rdb_status))
#rdb_id 写入文件
rdb_id_conf = "/opt/rdb_id_conf"
with open(rdb_id_conf, "w+") as f:
f.write("RDB_ID %s" %(rdb_id))
#rdb_master_ip 写入文件
rdb_master_ip_conf = "/opt/rdb_master_ip_conf"
rdb_master_ip = get_rdb_master_ip(conn,user_id,rdb_id)
print("get_rdb_master_ip rdb_master_ip == %s" %(rdb_master_ip))
if rdb_master_ip:
with open(rdb_master_ip_conf, "w+") as f:
f.write("POSTGRESQL_ADDRESS %s" %(rdb_master_ip))
#rdb_topslave_ip 写入文件
rdb_topslave_ip_conf = "/opt/rdb_topslave_ip_conf"
rdb_topslave_ip = get_rdb_topslave_ip(conn,user_id,rdb_id)
print("get_rdb_topslave_ip rdb_topslave_ip == %s" %(rdb_topslave_ip))
if rdb_topslave_ip:
with open(rdb_topslave_ip_conf, "w+") as f:
f.write("RDB_TOPSLAVE_IP %s" %(rdb_topslave_ip))
#master_rdb_instance_id 写入文件
master_rdb_instance_id_conf = "/opt/master_rdb_instance_id_conf"
master_rdb_instance_id = get_master_rdb_instance_id(conn,user_id,rdb_id)
print("get_master_rdb_instance_id master_rdb_instance_id == %s" %(master_rdb_instance_id))
if master_rdb_instance_id:
with open(master_rdb_instance_id_conf, "w+") as f:
f.write("MASTER_RDB_INSTANCE_ID %s" %(master_rdb_instance_id))
#topslave_rdb_instance_id 写入文件
topslave_rdb_instance_id_conf = "/opt/topslave_rdb_instance_id_conf"
topslave_rdb_instance_id = get_topslave_rdb_instance_id(conn,user_id,rdb_id)
print("get_topslave_rdb_instance_id topslave_rdb_instance_id == %s" %(topslave_rdb_instance_id))
if topslave_rdb_instance_id:
with open(topslave_rdb_instance_id_conf, "w+") as f:
f.write("TOPSLAVE_RDB_INSTANCE_ID %s" %(topslave_rdb_instance_id))
# attach tags
current_time = time.strftime("%Y-%m-%d", time.localtime())
tag_name = '桌面云数据库 %s' %(current_time)
Common.attach_tags_to_resource(conn,user_id=user_id,tag_name=tag_name,resource_type='rdb',resource_id=rdb_id)
print("子线程结束")
if __name__ == "__main__":
print("主线程启动")
#解析参数
opt_parser = OptionParser()
opt_parser.add_option("-z", "--zone_id", action="store", type="string", \
dest="zone_id", help='zone id', default="")
opt_parser.add_option("-a", "--access_key_id", action="store", type="string", \
dest="access_key_id", help='access key id', default="")
opt_parser.add_option("-s", "--secret_access_key", action="store", type="string", \
dest="secret_access_key", help='secret access key', default="")
opt_parser.add_option("-H", "--host", action="store", type="string", \
dest="host", help='host', default="")
opt_parser.add_option("-p", "--port", action="store", type="string", \
dest="port", help='port', default="")
opt_parser.add_option("-P", "--protocol", action="store", type="string", \
dest="protocol", help='protocol', default="")
opt_parser.add_option("-v", "--vxnet_id", action="store", type="string", \
dest="vxnet_id", help='vxnet id', default="")
opt_parser.add_option("-m", "--master_private_ip", action="store", type="string", \
dest="master_private_ip", help='master private ip', default="")
opt_parser.add_option("-t", "--topslave_private_ip", action="store", type="string", \
dest="topslave_private_ip", help='topslave private ip', default="")
(options, _) = opt_parser.parse_args(sys.argv)
zone_id = options.zone_id
access_key_id = options.access_key_id
secret_access_key = options.secret_access_key
host = options.host
port = options.port
protocol = options.protocol
vxnet_id = options.vxnet_id
master_private_ip = options.master_private_ip
topslave_private_ip = options.topslave_private_ip
print("zone_id:%s" % (zone_id))
print("access_key_id:%s" % (access_key_id))
print("secret_access_key:%s" % (secret_access_key))
print("host:%s" % (host))
print("port:%s" % (port))
print("protocol:%s" % (protocol))
print("vxnet_id:%s" % (vxnet_id))
print("master_private_ip:%s" % (master_private_ip))
print("topslave_private_ip:%s" % (topslave_private_ip))
#连接iaas后台
conn = Common.connect_iaas(zone_id, access_key_id, secret_access_key, host,port,protocol)
print("connect_iaas conn == %s" % (conn))
# 获取账号ID
user_id = Common.get_user_id(conn,access_key_id)
print("get_user_id user_id == %s" % (user_id))
#创建子线程执行创建数据库的操作
t = threading.Thread(target=create_rdb,args=(conn,user_id,vxnet_id,master_private_ip,topslave_private_ip,))
t.start()
t.join()
print("主线程结束")
|
container.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright (c) 2013 Qin Xuye <qin@qinxuye.me>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on 2014-5-17
@author: chine
'''
import os
import multiprocessing
import threading
from cola.core.utils import import_job_desc, get_ip
from cola.core.logs import get_logger
from cola.job.task import Task
from cola.functions.budget import BudgetApplyClient
from cola.functions.speed import SpeedControlClient
from cola.functions.counter import CounterClient
MAX_IDLE_TIMES = 5
class Container(object):
def __init__(self, container_id, working_dir,
job_path, job_name, env, mq,
counter_server, budget_server, speed_server,
stopped, nonsuspend, idle_statuses, n_tasks=1,
is_local=False, master_ip=None, logger=None,
task_start_id=0):
self.container_id = container_id
self.working_dir = working_dir
self.mq = mq
self.env = env
self.job_name = job_name
self.job_desc = env.get('job_desc_%s' % job_name) or \
import_job_desc(job_path)
self.counter_server = counter_server
self.budget_server = budget_server
self.speed_server = speed_server
self.stopped = stopped
self.nonsuspend = nonsuspend
self.idle_statuses = idle_statuses
self.n_tasks = n_tasks
self.is_local = is_local
self.master_ip = master_ip
self.logger = logger
self.task_start_id = task_start_id
self.ip = self.env.get('ip', None) or get_ip()
self.counter_clients = [None for _ in range(self.n_tasks)]
self.budget_clients = [None for _ in range(self.n_tasks)]
self.speed_clients = [None for _ in range(self.n_tasks)]
self.task_threads = []
self.inited = False
self.lock = multiprocessing.Lock()
def init(self):
with self.lock:
if self.inited: return
self.log_file = os.path.join(self.working_dir, 'job.log')
self.logger = self.logger or get_logger(name='cola_task',
filename=self.log_file,
server=self.master_ip)
for i in range(self.n_tasks):
self.counter_clients[i] = CounterClient(self.counter_server,
app_name=self.job_name)
self.budget_clients[i] = BudgetApplyClient(self.budget_server,
app_name=self.job_name)
self.speed_clients[i] = SpeedControlClient(self.speed_server, self.ip,
self.task_start_id+i,
app_name=self.job_name)
self.init_tasks()
self._init_counter_sync()
self._init_idle_status_checker()
self.inited = True
def init_tasks(self):
self.tasks = []
for i in range(self.n_tasks):
task_id = self.task_start_id + i
task_dir = os.path.join(self.working_dir, str(task_id))
task = Task(task_dir, self.job_desc, task_id, self.mq,
self.stopped, self.nonsuspend,
self.counter_clients[i],
self.budget_clients[i],
self.speed_clients[i],
logger=self.logger, env=self.env,
is_local=self.is_local, job_name=self.job_name)
t = threading.Thread(target=task.run)
self.tasks.append(task)
self.task_threads.append(t)
def _init_counter_sync(self):
def _sync():
for task in self.tasks:
task.counter_client.sync()
def sync():
try:
while not self.stopped.is_set():
_sync()
self.stopped.wait(5)
finally:
_sync()
self.sync_t = threading.Thread(target=sync)
def _init_idle_status_checker(self):
def check():
idle_times = 0
while not self.stopped.is_set():
self.idle_statuses[self.container_id] = \
all([task.is_idle() for task in self.tasks])
if self.idle_statuses[self.container_id]:
idle_times += 1
if self.job_desc.settings.job.size=='auto' and idle_times > MAX_IDLE_TIMES:
break
else:
idle_times = 0
self.stopped.wait(5)
self.check_idle_t = threading.Thread(target=check)
def run(self, block=False):
self.init()
for task in self.task_threads:
task.start()
self.sync_t.start()
self.check_idle_t.start()
if block:
self.wait_for_stop()
def wait_for_stop(self):
if not self.inited: return
for task in self.task_threads:
try:
task.join()
except KeyboardInterrupt:
continue
try:
self.sync_t.join()
except KeyboardInterrupt:
pass
try:
self.check_idle_t.join()
except KeyboardInterrupt:
pass |
__init__.py | '''FLAsk support for OIDC Access Tokens -- FLAAT. A set of decorators for authorising
access to OIDC authenticated REST APIs.'''
# This code is distributed under the MIT License
# pylint
# vim: tw=100 foldmethod=indent
# pylint: disable=invalid-name, superfluous-parens
# pylint: disable=logging-not-lazy, logging-format-interpolation, logging-fstring-interpolation
# pylint: disable=wrong-import-position, no-self-use, line-too-long
from functools import wraps
import json
import os
import sys
from itertools import count
is_py2 = sys.version[0] == '2'
if is_py2:
# pylint: disable=import-error
from Queue import Queue, Empty
else:
from queue import Queue, Empty
from threading import Thread
import logging
# Gracefully load modules:
available_web_frameworks = ['flask', 'aiohttp', 'fastapi']
try:
from flask import request
except ModuleNotFoundError:
available_web_frameworks.remove('flask')
try:
from aiohttp import web
except ModuleNotFoundError:
available_web_frameworks.remove('aiohttp')
try:
import asyncio
from fastapi.responses import JSONResponse
except ModuleNotFoundError:
available_web_frameworks.remove('fastapi')
from aarc_g002_entitlement import Aarc_g002_entitlement
from aarc_g002_entitlement import Aarc_g002_entitlement_Error
from aarc_g002_entitlement import Aarc_g002_entitlement_ParseError
from . import tokentools
from . import issuertools
from . import flaat_exceptions
from .caches import Issuer_config_cache
logger = logging.getLogger(__name__)
name = "flaat"
#defaults; May be overwritten per initialisation of flaat
verbose = 0
verify_tls = True
def ensure_is_list(item):
'''Make sure we have a list'''
if isinstance(item, str):
return [item]
return item
def check_environment_for_override(env_key):
''' Override the actual group membership, if environment is set. '''
try:
env_val = os.getenv(env_key)
if env_val is not None:
avail_entitlement_entries = json.loads(env_val)
return avail_entitlement_entries
except TypeError as e:
logger.error(F"Cannot decode JSON group list from the environment:"
F"{env_val}\n{e}")
except json.JSONDecodeError as e:
logger.error(F"Cannot decode JSON group list from the environment:"
F"{env_val}\n{e}")
return None
def formatted_entitlements(entitlements):
def my_mstr(self):
"""Return the nicely formatted entitlement"""
str_str = '\n'.join(
[
' namespace_id: {namespace_id}' +
'\n delegated_namespace: {delegated_namespace}' +
'\n subnamespaces: {subnamespaces}' +
'\n group: {group}' +
'\n subgroups: {subgroups}' +
'\n role_in_subgroup {role}' +
'\n group_authority: {group_authority}'
]
).format(
namespace_id = self.namespace_id,
delegated_namespace = self.delegated_namespace,
group = self.group,
group_authority = self.group_authority,
subnamespaces = ','.join(['{}'.format(ns) for ns in self.subnamespaces]),
subgroups = ','.join(['{}'.format(grp) for grp in self.subgroups]),
role ='{}'.format(self.role) if self.role else 'n/a'
)
return str_str
return ('\n' + '\n\n'.join([my_mstr(x) for x in entitlements]) + '\n')
class Flaat():
'''FLAsk support for OIDC Access Tokens.
Provide decorators and configuration for OIDC'''
# pylint: disable=too-many-instance-attributes
def __init__(self):
self.trusted_op_list = None
self.iss = None
self.op_hint = None
self.trusted_op_file = None
self.verbose = verbose
self.verify_tls = True
self.client_id = None
self.client_secret = None
self.last_error = ''
self.issuer_config_cache = Issuer_config_cache() # maps issuer to issuer configs # formerly issuer_configs
self.accesstoken_issuer_cache = {} # maps accesstoken to issuer
self.num_request_workers = 10
self.client_connect_timeout = 1.2 # seconds
# No leading slash ('/') in ops_that_support_jwt !!!
self.ops_that_support_jwt = \
[ 'https://iam-test.indigo-datacloud.eu',
'https://iam.deep-hybrid-datacloud.eu',
'https://iam.extreme-datacloud.eu',
'https://wlcg.cloud.cnaf.infn.it',
'https://aai.egi.eu/oidc',
'https://aai-dev.egi.eu/oidc',
'https://oidc.scc.kit.edu/auth/realms/kit',
'https://unity.helmholtz-data-federation.de/oauth2',
'https://login.helmholtz-data-federation.de/oauth2',
'https://login-dev.helmholtz.de/oauth2',
'https://login.helmholtz.de/oauth2',
'https://b2access.eudat.eu/oauth2',
'https://b2access-integration.fz-juelich.de/oauth2',
'https://services.humanbrainproject.eu/oidc',
'https://login.elixir-czech.org/oidc',
]
self.claim_search_precedence = ['userinfo', 'access_token']
self.request_id = "unset"
self.supported_web_frameworks = available_web_frameworks
if 'flask' in available_web_frameworks:
self.web_framework = 'flask'
elif 'aiohttp' in available_web_frameworks:
self.web_framework = 'aiohttp'
elif 'fastapi' in available_web_frameworks:
self.web_framework = 'fastapi'
self.raise_error_on_return = True # else just return an error
def get_request_id(self, request_object):
'''Return a string identifying the request'''
# request_object = self._find_request_based_on_web_framework(request, args, kwargs)
the_id=""
try:
if self.web_framework == "flask":
the_id = F"{str(request_object.remote_addr)}--" \
+ str(request_object.base_url)
elif self.web_framework == "aiohttp":
the_id = str(request_object.remote) + "--" \
+ str(request_object.url)
elif self.web_framework == "fastapi":
the_id = F"{str(request_object.client.host)}:{str(request_object.client.port)}--" \
+ str(request_object.url)
except AttributeError as e:
logger.error(F"Cannot identify the request: {e}\n{the_id}")
return(the_id)
def set_cache_lifetime(self, lifetime):
'''Set cache lifetime of requests_cache zn seconds, default: 300s'''
issuertools.cache_options.set_lifetime(lifetime)
def set_cache_allowable_codes(self, allowable_codes):
'''set http status code that will be cached'''
issuertools.cache_options.set_allowable_codes(allowable_codes)
def set_cache_backend(self, backend):
'''set the cache backend'''
issuertools.cache_options.backend = backend
def set_trusted_OP(self, iss):
'''Define OIDC Provider. Must be a valid URL. E.g. 'https://aai.egi.eu/oidc/'
This should not be required for OPs that put their address into the AT (e.g. keycloak, mitre,
shibboleth)'''
self.iss = iss.rstrip('/')
def set_trusted_OP_list(self, trusted_op_list):
'''Define a list of OIDC provider URLs.
E.g. ['https://iam.deep-hybrid-datacloud.eu/', 'https://login.helmholtz.de/oauth2/', 'https://aai.egi.eu/oidc/'] '''
self.trusted_op_list = []
for issuer in trusted_op_list:
self.trusted_op_list.append(issuer.rstrip('/'))
# iss_config = issuertools.find_issuer_config_in_list(self.trusted_op_list, self.op_hint,
# exclude_list = [])
# self.issuer_config_cache.add_list(iss_config)
def set_trusted_OP_file(self, filename='/etc/oidc-agent/issuer.config', hint=None):
'''Set filename of oidc-agent's issuer.config. Requires oidc-agent to be installed.'''
self.trusted_op_file = filename
self.op_hint = hint
def set_OP_hint(self, hint):
'''String to specify the hint. This is used for regex searching in lists of providers for
possible matching ones.'''
self.op_hint = hint
def set_verbosity(self, level):
'''Verbosity level of flaat:
0: No output
1: Errors
2: More info, including token info
3: Max'''
self.verbose = level
tokentools.verbose = level
issuertools.verbose = level
def set_verify_tls(self, param_verify_tls=True):
'''Whether to verify tls connections. Only use for development and debugging'''
self.verify_tls = param_verify_tls
issuertools.verify_tls = param_verify_tls
def set_client_id(self, client_id):
'''Client id. At the moment this one is sent to all matching providers. This is only
required if you need to access the token introspection endpoint. I don't have a use case for
that right now.'''
# FIXME: consider client_id/client_secret per OP.
self.client_id = client_id
def set_client_secret(self, client_secret):
'''Client Secret. At the moment this one is sent to all matching providers.'''
self.client_secret = client_secret
def set_last_error(self, error):
'''Store an error message'''
self.last_error = error
def extend_last_error(self, error):
if self.last_error == '':
self.last_error = error
else:
self.last_error = F"{self.last_error}\n{error}"
def get_last_error(self):
'''Retrieve and clear the error message'''
retval = self.last_error
# self.last_error = ''
return retval
def self_clear_last_error(self):
'''Clear last error message'''
self.last_error = ''
def set_num_request_workers(self, num):
'''set number of request workers'''
self.num_request_workers = num
issuertools.num_request_workers = num
def get_num_request_workers(self):
'''get number of request workers'''
return (self.num_request_workers)
def set_client_connect_timeout(self, num):
'''set timeout for flaat connecting to OPs'''
self.client_connect_timeout = num
def get_client_connect_timeout(self):
'''get timeout for flaat connecting to OPs'''
return (self.client_connect_timeout)
def set_iss_config_timeout(self, num):
'''set timeout for connections to get config from OP'''
issuertools.timeout = num
def get_iss_config_timeout(self):
'''set timeout for connections to get config from OP'''
return (issuertools.timeout)
def set_timeout(self, num):
'''set global timeouts for http connections'''
self.set_iss_config_timeout(num)
self.set_client_connect_timeout(num)
def get_timeout(self):
'''get global timeout for https connections'''
return ((self.get_iss_config_timeout(), self.get_client_connect_timeout()))
def set_claim_search_precedence(self, a_list):
'''set order in which to search for specific claim'''
self.claim_search_precedence = a_list
def get_claim_search_precedence(self):
'''get order in which to search for specific claim'''
return (self.claim_search_precedence)
def set_web_framework(self, framework_name):
'''specify the web framework. Currently supported are 'flaat' and 'aiohttp' '''
if framework_name in self.supported_web_frameworks:
self.web_framework = framework_name
else:
logger.error("Specified Web Framework '%s' is not supported" % framework_name)
sys.exit (42)
def _find_issuer_config_everywhere(self, access_token):
'''Use many places to find issuer configs
'''
# 0: Use accesstoken_issuer cache to find issuerconfig:
if self.verbose > 0:
logger.info('0: Trying to find issuer in cache')
try:
issuer = self.accesstoken_issuer_cache[access_token]
iss_config = self.issuer_config_cache.get(issuer)
if self.verbose > 1:
logger.info(F" 0: returning {iss_config['issuer']}")
return [iss_config]
except KeyError as e:
# issuer not found in cache
pass
# 1: find info in the AT
if self.verbose > 0:
logger.info('1: Trying to find issuer in access_token')
at_iss = tokentools.get_issuer_from_accesstoken_info(access_token)
if at_iss is not None:
trusted_op_list_buf = []
if self.trusted_op_list is not None:
if len(self.trusted_op_list) >0:
trusted_op_list_buf = self.trusted_op_list
if self.iss is not None:
trusted_op_list_buf.append(self.iss)
if at_iss.rstrip('/') not in trusted_op_list_buf:
logger.warning(F'The issuer {at_iss} of the received access_token is not trusted')
self.set_last_error(F'The issuer {at_iss} of the received access_token is not trusted')
# newline="\n"
# logger.warning(F"list: {newline.join(trusted_op_list_buf)}")
return None
iss_config = issuertools.find_issuer_config_in_at(access_token)
if iss_config is not None:
return [iss_config]
# 2: use a provided string
if self.verbose > 0:
logger.info('2: Trying to find issuer from "set_iss"')
iss_config = issuertools.find_issuer_config_in_string(self.iss)
if iss_config is not None:
return [iss_config]
# 3: Try the provided list of providers:
if self.verbose > 0:
logger.info('3: Trying to find issuer from trusted_op_list')
iss_config = issuertools.find_issuer_config_in_list(self.trusted_op_list, self.op_hint,
exclude_list = self.ops_that_support_jwt)
if iss_config is not None:
return iss_config
# 4: Try oidc-agent's issuer config file
if self.verbose > 0:
logger.info('Trying to find issuer from "set_OIDC_provider_file"')
iss_config = issuertools.find_issuer_config_in_file(self.trusted_op_file, self.op_hint,
exclude_list = self.ops_that_support_jwt)
if iss_config is not None:
return iss_config
self.set_last_error("Issuer config not found")
return None
# def verify_at_is_from_truested_iss(self, access_token):
# '''verify that the AT is issued by a trusted issuer'''
def get_info_thats_in_at(self, access_token):
# FIXME: Add here parameter verify=True, then go and verify the token
'''return the information contained inside the access_token itself'''
# '''analyse access_token and return info'''
accesstoken_info = None
if access_token:
accesstoken_info = tokentools.get_accesstoken_info(access_token)
# at_head=None
# at_body=None
# if accesstoken_info is not None and not {}:
# at_head = accesstoken_info['header']
# at_body = accesstoken_info['body']
# return (at_head, at_body)
return (accesstoken_info)
def get_issuer_from_accesstoken(self, access_token):
'''get the issuer that issued the accesstoken'''
try:
issuer = self.accesstoken_issuer_cache[access_token]
return(issuer)
except KeyError:
# update the accesstoken_issuer_cache:
self.get_info_from_userinfo_endpoints(access_token)
try:
issuer = self.accesstoken_issuer_cache[access_token]
return(issuer)
except KeyError:
return None
def get_info_from_userinfo_endpoints(self, access_token):
'''Traverse all reasonable configured userinfo endpoints and query them with the
access_token. Note: For OPs that include the iss inside the AT, they will be directly
queried, and are not included in the search (because that makes no sense).
Returns user_info object or None. If None is returned self.last_error is set with a
meaningful message.
Also updates
- accesstoken_issuer_cache
- issuer_config_cache
'''
# user_info = "" # return value
user_info = None # return value
# get a sensible issuer config. In case we don't have a jwt AT, we poll more OPs
issuer_config_list = self._find_issuer_config_everywhere(access_token)
self.issuer_config_cache.add_list(issuer_config_list)
# If there is no issuer in the cache by now, we're dead
if len(self.issuer_config_cache) == 0 :
logger.warning('No issuer config found, or issuer not supported')
return None
# get userinfo
param_q = Queue(self.num_request_workers*2)
result_q = Queue(self.num_request_workers*2)
def thread_worker_get_userinfo():
'''Thread worker'''
def safe_get(q):
try:
return q.get(timeout=5)
except Empty:
return None
while True:
item = safe_get(param_q)
if item is None:
break
result = issuertools.get_user_info(item['access_token'], item['issuer_config'])
result_q.put(result)
param_q.task_done()
result_q.task_done()
for i in range (self.num_request_workers):
t = Thread(target=thread_worker_get_userinfo)
t.daemon = True
t.start()
if self.verbose > 0:
logger.debug (F"len of issuer_config_cache: {len(self.issuer_config_cache)}")
for issuer_config in self.issuer_config_cache:
# logger.info(F"tyring to get userinfo from {issuer_config['issuer']}")
# user_info = issuertools.get_user_info(access_token, issuer_config)
params = {}
params['access_token'] = access_token
params['issuer_config'] = issuer_config
param_q.put(params)
# Collect results from threadpool
param_q.join()
result_q.join()
try:
while not result_q.empty():
retval = result_q.get(block=False, timeout=self.client_connect_timeout)
if retval is not None:
(user_info, issuer_config) = retval
issuer = issuer_config['issuer']
if self.verbose > 1:
logger.debug(F"got issuer: {issuer}")
self.issuer_config_cache.add_config(issuer, issuer_config)
# logger.info(F"storing in accesstoken cache: {issuer} -=> {access_token}")
self.accesstoken_issuer_cache[access_token] = issuer
return (user_info)
except Empty:
logger.info("EMPTY result in thead join")
# pass
except Exception as e:
logger.error("Error: Uncaught Exception: {}".format(str(e)))
if user_info is None:
self.set_last_error ("User Info not found or not accessible. Something may be wrong with the Access Token.")
return(user_info)
def get_info_from_introspection_endpoints(self, access_token):
'''If there's a client_id and client_secret defined, we access the token introspection
endpoint and return the info obtained from there'''
# get introspection_token
introspection_info = None
issuer_config_list = self._find_issuer_config_everywhere(access_token)
self.issuer_config_cache.add_list(issuer_config_list)
if len(self.issuer_config_cache) == 0 :
logger.info("Issuer Configs yielded None")
self.set_last_error("Issuer of Access Token is not supported")
return None
for issuer_config in self.issuer_config_cache:
introspection_info = issuertools.get_introspected_token_info(access_token, issuer_config,
self.client_id, self.client_secret)
if introspection_info is not None:
break
return(introspection_info)
def get_all_info_by_at(self, access_token):
'''Collect all possible user info and return them as one json
object.'''
if access_token is None:
self.set_last_error('No access token found')
return None
accesstoken_info = self.get_info_thats_in_at(access_token)
user_info = self.get_info_from_userinfo_endpoints(access_token)
introspection_info = self.get_info_from_introspection_endpoints(access_token)
# FIXME: We have to verify the accesstoken
# And verify that it comes from a trusted issuer!!
if accesstoken_info is not None:
timeleft = tokentools.get_timeleft(accesstoken_info)
if timeleft < 0:
self.set_last_error('Token expired for %d seconds' % abs(timeleft))
return None
if user_info is None:
return None
# return tokentools.merge_tokens ([accesstoken_info['header'], accesstoken_info['body'], user_info, introspection_info])
return tokentools.merge_tokens ([accesstoken_info, user_info, introspection_info])
def _find_request_based_on_web_framework(self, request, args, kwargs):
'''use configured web_framework and return the actual request object'''
if self.web_framework == 'flask':
return request
if self.web_framework == 'aiohttp':
return args[0]
if self.web_framework == 'fastapi':
return kwargs["request"]
return None
def _return_formatter_wf(self, return_value, status=200):
'''Return the object appropriate for the chosen web framework'''
if status != 200:
logger.error(F'Incoming request [{self.request_id}] http status: {status} - {self.get_last_error()}')
if self.raise_error_on_return:
if self.web_framework == 'flask':
raise flaat_exceptions.FlaatExceptionFlask(reason=return_value, status_code=status)
if self.web_framework == 'aiohttp':
raise flaat_exceptions.FlaatExceptionAio(reason=return_value, status_code=status)
if self.web_framework == 'fastapi':
raise flaat_exceptions.FlaatExceptionFastapi(reason=return_value, status_code=status)
else:
if self.web_framework == 'flask':
return (return_value, status)
if self.web_framework == 'aiohttp':
return web.Response(text=return_value, status=status)
if self.web_framework == 'fastapi':
return JSONResponse(content=return_value, status_code=status)
#return return_value
return None
def _get_all_info_from_request(self, param_request):
'''gather all info about the user that we can find.
Returns a "supertoken" json structure.'''
access_token = tokentools.get_access_token_from_request(param_request)
if access_token is None:
self.set_last_error("No Access Token Found.")
return None
# logger.info (F"access_token: {access_token}")
return self.get_all_info_by_at(access_token)
def _wrap_async_call(self, func, *args, **kwargs):
'''wrap function call so that it is awaited when necessary,
depending on the web framework used.
'''
def get_or_create_eventloop():
try:
return asyncio.get_event_loop()
except RuntimeError as ex:
if "There is no current event loop in thread" in str(ex):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return asyncio.get_event_loop()
if self.web_framework == 'fastapi':
if (asyncio.iscoroutine(func) or asyncio.iscoroutinefunction(func)):
return get_or_create_eventloop().run_until_complete(func(*args, **kwargs))
logger.info(F'Incoming request [{self.request_id}] Success')
return func(*args, **kwargs)
def login_required(self, on_failure=None):
'''Decorator to enforce a valid login.
Optional on_failure is a function that will be invoked if there was no valid user detected.
Useful for redirecting to some login page'''
def wrapper(view_func):
@wraps(view_func)
def decorated(*args, **kwargs):
try:
if os.environ['DISABLE_AUTHENTICATION_AND_ASSUME_AUTHENTICATED_USER'].lower() == 'yes':
return self._wrap_async_call(view_func, *args, **kwargs)
except KeyError: # i.e. the environment variable was not set
pass
request_object = self._find_request_based_on_web_framework(request, args, kwargs)
self.request_id = self.get_request_id(request_object)
all_info = self._get_all_info_from_request(request_object)
if all_info is None:
if self.verbose > 0:
self.extend_last_error(F"No information about user found in {str(self.get_claim_search_precedence())}")
logger.warning(self.get_last_error())
return self._return_formatter_wf(\
('No valid authentication found: %s' % self.get_last_error()), 401)
if on_failure:
return self._return_formatter_wf(on_failure(self.get_last_error()), 401)
return self._wrap_async_call(view_func, *args, **kwargs)
return decorated
return wrapper
def _determine_number_of_required_matches(self, match, req_group_list):
'''determine the number of requi`example.py`red matches from parameters'''
# How many matches do we need?
required_matches = None
if match == 'all':
required_matches = len(req_group_list)
if match == 'one':
required_matches = 1
if isinstance (match, int):
required_matches = match
if required_matches > len(req_group_list):
required_matches = len(req_group_list)
if self.verbose > 1:
logger.info(' required matches: {}'.format(required_matches))
return required_matches
def _get_entitlements_from_claim(self, all_info, claim):
'''extract groups / entitlements from given claim (in userinfo or access_token)'''
# search group / entitlement entries in specified claim (in userinfo or access_token)
for location in self.claim_search_precedence:
avail_group_entries = None
if location == "userinfo":
avail_group_entries = all_info.get(claim)
if location == "access_token":
avail_group_entries = all_info['body'].get(claim)
if avail_group_entries is not None:
break
if avail_group_entries is None:
self.set_last_error('Not authorised (claim does not exist: "%s")' % claim)
if self.verbose:
logger.warning('Claim does not exist: "%s".' % claim)
logger.debug(json.dumps(all_info, sort_keys=True, indent=4, separators=(',', ': ')))
return (None, self.get_last_error())
if not isinstance(avail_group_entries, list):
self.set_last_error('Not authorised (claim does not point to a list: "%s")' % avail_group_entries)
if self.verbose:
logger.debug('Claim does not point to a list: "%s".' % avail_group_entries)
logger.debug(json.dumps(all_info, sort_keys=True, indent=4, separators=(',', ': ')))
avail_group_entries = [avail_group_entries]
return (avail_group_entries, None)
def group_required(self, group=None, claim=None, on_failure=None, match='all'):
'''Decorator to enforce membership in a given group.
group is the name (or list) of the group to match
match specifies how many of the given groups must be matched. Valid values for match are
'all', 'one', or an integer
on_failure is a function that will be invoked if there was no valid user detected.
Useful for redirecting to some login page'''
def wrapper(view_func):
@wraps(view_func)
def decorated(*args, **kwargs):
try:
if os.environ['DISABLE_AUTHENTICATION_AND_ASSUME_VALID_GROUPS'].lower() == 'yes':
return self._wrap_async_call(view_func, *args, **kwargs)
except KeyError: # i.e. the environment variable was not set
pass
user_message = 'Not enough required group memberships found.'
request_object = self._find_request_based_on_web_framework(request, args, kwargs)
self.request_id = self.get_request_id(request_object)
all_info = self._get_all_info_from_request(request_object)
if all_info is None:
if on_failure:
return self._return_formatter_wf(on_failure(self.get_last_error()), 401)
return self._return_formatter_wf('No valid authentication found. %s' % self.get_last_error(), 401)
req_group_list = ensure_is_list (group)
required_matches = self._determine_number_of_required_matches(match, req_group_list)
if not required_matches:
logger.error('Error interpreting the "match" parameter')
return self._return_formatter_wf('Error interpreting the "match" parameter', 403)
if self.verbose>1:
logger.debug(json.dumps(all_info, sort_keys=True, indent=4, separators=(',', ': ')))
# copy entries from incoming claim
(avail_group_entries, user_message) = self._get_entitlements_from_claim(all_info, claim)
override_group_entries = check_environment_for_override('DISABLE_AUTHENTICATION_AND_ASSUME_GROUPS')
if override_group_entries is not None:
avail_group_entries = override_group_entries
if not avail_group_entries:
return self._return_formatter_wf(user_message, 403)
# now we do the actual checking
matches_found = 0
for entry in avail_group_entries:
for g in req_group_list:
if entry == g:
matches_found += 1
if self.verbose > 0:
logger.info('found %d of %d matches' % (matches_found, required_matches))
if self.verbose > 1:
logger.info(F'Available Groups: {str(avail_group_entries)}')
logger.info(F'Required Groups: {str(req_group_list)}')
if matches_found >= required_matches:
return self._wrap_async_call(view_func, *args, **kwargs)
user_message = 'You are not authorised'
# Either we returned above or there was no matching group
if on_failure:
return self._return_formatter_wf(on_failure(user_message), 403)
return self._return_formatter_wf(user_message+ self.get_last_error(), 403)
return decorated
return wrapper
def aarc_g002_entitlement_required(self, entitlement=None, claim=None, on_failure=None, match='all'):
'''Decorator to enforce membership in a given group defined according to AARC-G002.
entitlement is the name (or list) of the entitlement to match
match specifies how many of the given groups must be matched. Valid values for match are
'all', 'one', or an integer
on_failure is a function that will be invoked if there was no valid user detected.
Useful for redirecting to some login page'''
return self.aarc_g002_group_required(entitlement, claim, on_failure, match)
def aarc_g002_group_required(self, group=None, claim=None, on_failure=None, match='all'):
'''Decorator to enforce membership in a given group defined according to AARC-G002.
group is the name (or list) of the entitlement to match
match specifies how many of the given groups must be matched. Valid values for match are
'all', 'one', or an integer
on_failure is a function that will be invoked if there was no valid user detected.
Useful for redirecting to some login page'''
# rename for clarity, don't use group below
entitlement=group
del(group)
def wrapper(view_func):
@wraps(view_func)
def decorated(*args, **kwargs):
try:
if os.environ['DISABLE_AUTHENTICATION_AND_ASSUME_AUTHENTICATED_USER'].lower() == 'yes':
return self._wrap_async_call(view_func, *args, **kwargs)
except KeyError: # i.e. the environment variable was not set
pass
user_message = 'Not enough required entitlements found.'
request_object = self._find_request_based_on_web_framework(request, args, kwargs)
self.request_id = self.get_request_id(request_object)
all_info = self._get_all_info_from_request(request_object)
if all_info is None:
if on_failure:
return self._return_formatter_wf(on_failure(self.get_last_error()), 401)
return self._return_formatter_wf('No valid authentication found. %s' % self.get_last_error(), 401)
req_entitlement_list = ensure_is_list (entitlement)
required_matches = self._determine_number_of_required_matches(match, req_entitlement_list)
if not required_matches:
logger.error('Error interpreting the "match" parameter')
return self._return_formatter_wf('Error interpreting the "match" parameter', 403)
if self.verbose>1:
logger.debug(json.dumps(all_info, sort_keys=True, indent=4, separators=(',', ': ')))
# copy entries from incoming claim
(avail_entitlement_entries, user_message) = self._get_entitlements_from_claim(all_info, claim)
override_entitlement_entries = check_environment_for_override('DISABLE_AUTHENTICATION_AND_ASSUME_ENTITLEMENTS')
if override_entitlement_entries is not None:
avail_entitlement_entries = override_entitlement_entries
if not avail_entitlement_entries:
return self._return_formatter_wf(user_message, 403)
if self.verbose > 1:
logger.info(F'Available Entitlements: {str(avail_entitlement_entries)}')
logger.info(F'Required Entitlements: {str(req_entitlement_list)}')
# generate entitlement objects from input strings
def e_expander(es):
"""Helper function to catch exceptions in list comprehension"""
try:
return Aarc_g002_entitlement(es, strict=False)
except ValueError:
return None
except Aarc_g002_entitlement_ParseError:
return None
except Aarc_g002_entitlement_Error:
return None
# logger.info("Parsing entitlements")
try:
avail_entitlements = [ e_expander(es) for es in avail_entitlement_entries if e_expander(es) is not None]
except ValueError as e:
logger.error (F"Failed to parse available entitlements: {e}")
logger.error (F" available entitlement_entries: {avail_entitlement_entries}")
try:
req_entitlements = [ e_expander(es) for es in req_entitlement_list if e_expander(es) is not None]
except ValueError as e:
logger.error (F"Failed to parse required entitlement(s): {e}")
logger.error (F" required entitlement_list: {req_entitlement_list}")
# logger.info("done")
if self.verbose > 1:
logger.info(F'Available Entitlements: {formatted_entitlements(avail_entitlements)}')
logger.info(F'Required Entitlements: {formatted_entitlements(req_entitlements)}')
# now we do the actual checking
matches_found = 0
# for required in req_entitlements:
for required in req_entitlements:
for avail in avail_entitlements:
if required.is_contained_in(avail):
matches_found += 1
if self.verbose > 0:
logger.info('found %d of %d matches' % (matches_found, required_matches))
if matches_found >= required_matches:
return self._wrap_async_call(view_func, *args, **kwargs)
user_message = 'You are not authorised'
# Either we returned above or there was no matching entitlement
if on_failure:
return self._return_formatter_wf(on_failure(user_message ), 403)
return self._return_formatter_wf(user_message , 403)
return decorated
return wrapper
|
history.py | #
# This file is:
# Copyright (C) 2018 Calin Culianu <calin.culianu@gmail.com>
#
# MIT License
#
from . import utils
from . import gui
from oregano import WalletStorage, Wallet
from oregano.address import Address, PublicKey
from oregano.util import timestamp_to_datetime, PrintError, profiler
from oregano.i18n import _, language
from oregano.transaction import Transaction
import time, math, sys, os
from collections import namedtuple
from .uikit_bindings import *
from .custom_objc import *
HistoryEntry = namedtuple("HistoryEntry", "tx tx_hash status_str label v_str balance_str date ts conf status value fiat_amount fiat_balance fiat_amount_str fiat_balance_str ccy status_image")
#######################################################################
# HELPER STUFF EXPORTED TO OTHER MODULES ('Addresses' uses these too) #
#######################################################################
StatusImages = [ # Indexed by 'status' from tx info and/or HistoryEntry
UIImage.imageNamed_("warning.png").retain(),
UIImage.imageNamed_("warning.png").retain(),
UIImage.imageNamed_("unconfirmed.png").retain(),
UIImage.imageNamed_("unconfirmed.png").retain(),
UIImage.imageNamed_("clock1.png").retain(),
UIImage.imageNamed_("clock2.png").retain(),
UIImage.imageNamed_("clock3.png").retain(),
UIImage.imageNamed_("clock4.png").retain(),
UIImage.imageNamed_("clock5.png").retain(),
UIImage.imageNamed_("grnchk.png").retain(),
UIImage.imageNamed_("signed.png").retain(),
UIImage.imageNamed_("unsigned.png").retain(),
]
class _HistoryListProxy:
'''
This class mimics a python list, and may be returned from HistoryMgr.doReloadForKey
(in some cases an actual list is returned from that method, though).
This class implements on-demand creation of HistoryEntry entries as they are accessed.
The rationale behind this class's mechanism is as a performance speedup since iOS allows us
to load tableView cells on-demand anyway, so there's no sense in pre-populating all the history
data ahead of time on app gui.refresh_all(), as it may never get seen on-screen anyway.
'''
def __init__(self, statusImagesOverride=None, forceNoFX=False):
self.statusImagesOverride = statusImagesOverride
self.forceNoFX = forceNoFX
self.hentries = dict() # index in self.hitems -> HistoryEntry named tuple
self.hitems = list() # list of tuples as returned by wallet.get_history()
self.txids = dict() # map of txid (tx_hash) -> index
def set_hitems(self,hitems):
self.hentries = dict()
self.txids = dict()
self.hitems = hitems if hitems else list()
def get_by_txid(self, txid):
index = self.txids.get(txid)
if index is None:
for i,hitem in enumerate(self.hitems):
if hitem[0] == txid:
index = i
self.txids[txid] = index
break
if index is not None:
return self[index]
else:
raise KeyError('cannot find txid %s in hitems'%txid)
def __len__(self):
return len(self.hitems)
def __bool__(self):
return bool(self.hitems)
def __iter__(self):
class Iter:
def __init__(self, hlp):
self.hlp = hlp
self.i = 0
def __next__(self):
if self.i < len(self.hlp):
self.i += 1
return self.hlp[self.i-1]
else:
raise StopIteration
return Iter(self)
def __getitem__(self,index):
if index < 0:
index += len(self)
if index >= len(self) or index < 0:
raise IndexError('_HistoryListProxy index out of range')
he = self.hentries.get(index)
if he is None:
hitem = self.hitems[index]
he = _HistoryListProxy._build_history_entry(hitem, self.statusImagesOverride, self.forceNoFX) # populate the HistoryEntry if it was missing
self.hentries[index] = he # save HistoryEntry
self.txids[he.tx_hash] = index # remember the index for this txid
return he
def __setitem__(self,index,val):
if index < 0:
index += len(self)
if index >= len(self) or index < 0:
raise IndexError('_HistoryListProxy index out of range')
if not isinstance(val, HistoryEntry):
raise ValueError('Can only add objects of type HistoryEntry to this list')
self.hentries[index] = val
self.txids[val.tx_hash] = index # rememebr the index for this txid
return val
def append(self,hentry):
self.hitems.append(None) # null hitem is ok here, because if hentry exists for a particular index, hitems are ignored anyway
ln = len(self.hentries)
self[ln-1] = hentry
NSLog("_HistoryListProxy.append() called -- this isn't really supported. FIXME!")
def __contains__(self,hentry):
if not isinstance(hentry,HistoryEntry):
return False
index = self.txids.get(hentry.tx_hash)
if index is not None:
return True
for i,hitem in enumerate(self.hitems):
if hitem[0] == hentry.tx_hash:
self.txids[hentry.tx_hash] = i
if i not in self.hentries: self.hentries[i] = hentry
return True
return False
@staticmethod
def _build_history_entry(h_item, statusImagesOverride, forceNoFX):
sImages = StatusImages if not statusImagesOverride or len(statusImagesOverride) < len(StatusImages) else statusImagesOverride
parent = gui.ElectrumGui.gui
wallet = parent.wallet
daemon = parent.daemon
if wallet is None or daemon is None:
utils.NSLog("buid_history_entry: wallet and/or daemon was None, returning early")
return None
fx = daemon.fx if daemon.fx and daemon.fx.show_history() else None
ccy = ''
tx_hash, height, conf, timestamp, value, balance = h_item
status, status_str = wallet.get_tx_status(tx_hash, height, conf, timestamp)
has_invoice = wallet.invoices.paid.get(tx_hash)
v_str = parent.format_amount(value, True, whitespaces=True)
balance_str = parent.format_amount(balance, whitespaces=True)
label = wallet.get_label(tx_hash)
date = timestamp_to_datetime(time.time() if conf <= 0 else timestamp)
ts = timestamp if conf > 0 else time.time()
fiat_amount = 0
fiat_balance = 0
fiat_amount_str = ''
fiat_balance_str = ''
if fx: fx.history_used_spot = False
if not forceNoFX and fx:
if not ccy:
ccy = fx.get_currency()
try:
hdate = timestamp_to_datetime(time.time() if conf <= 0 else timestamp)
hamount = fx.historical_value(value, hdate)
htext = fx.historical_value_str(value, hdate) if hamount else ''
fiat_amount = hamount if hamount else fiat_amount
fiat_amount_str = htext if htext else fiat_amount_str
hamount = fx.historical_value(balance, hdate) if balance else 0
htext = fx.historical_value_str(balance, hdate) if hamount else ''
fiat_balance = hamount if hamount else fiat_balance
fiat_balance_str = htext if htext else fiat_balance_str
except:
utils.NSLog("Exception in get_history computing fiat amounts!\n%s",str(sys.exc_info()[1]))
#import traceback
#traceback.print_exc(file=sys.stderr)
fiat_amount = fiat_balance = 0
fiat_amount_str = fiat_balance_str = ''
if status >= 0 and status < len(sImages):
img = sImages[status]
else:
img = None
tx = wallet.transactions.get(tx_hash, None)
if tx is not None and tx.raw:
# NB: save a copy of the tx in this hentry, because it may get
# deserialized later, and if we were to deserialize the tx that's
# in the wallet dict, we'd eat memory.
tx = Transaction(tx.raw)
entry = HistoryEntry(tx, tx_hash, status_str, label, v_str, balance_str, date, ts, conf, status, value, fiat_amount, fiat_balance, fiat_amount_str, fiat_balance_str, ccy, img)
return entry
def get_history(domain : list = None, statusImagesOverride : list = None, forceNoFX : bool = False) -> object:
''' For a given set of addresses (or None for all addresses), builds a list of
HistoryEntry '''
parent = gui.ElectrumGui.gui
wallet = parent.wallet
daemon = parent.daemon
ret = _HistoryListProxy(statusImagesOverride, forceNoFX)
if wallet is None or daemon is None:
utils.NSLog("get_history: wallet and/or daemon was None, returning early")
return ret
h = wallet.get_history(domain)
h.reverse()
ret.set_hitems(h)
return ret
from . import txdetail
from . import contacts
from typing import Any
class HistoryMgr(utils.DataMgr):
def doReloadForKey(self, key : Any) -> Any:
t0 = time.time()
hist = list()
unk = False
duped = ''
if isinstance(key, (type(None), list)):
# the common case, 'None' or [Address]
hist = get_history(domain = key)
# contacts entires
elif isinstance(key, contacts.ContactsEntry):
hist = get_contact_history(key.address)
elif isinstance(key, Address):
# support for list-less single Address.. call self again with the proper format
hist = self.get([key])
duped = ' (duped) '
elif isinstance(key, str):
# support for string addresses or tx_hashes.. detect which and act accordingly
if Address.is_valid(key):
hist = self.get([Address.from_string(key)]) # recursively call self with propery list data type, which will end up calling get_history (it's ok -- this is to cache results uniformly!)
duped = ' (duped) '
elif gui.ElectrumGui.gui.wallet and gui.ElectrumGui.gui.wallet.transactions.get(key, None):
fullHist = self.get(None) # recursively call self to get a full history (will be cached so it's ok!)
try:
hentry = fullHist.get_by_txid(key)
hist.append(hentry)
except KeyError:
pass
else:
unk = True
else:
unk = True
dstr = str(key) if not isinstance(key, contacts.ContactsEntry) else '[ContactsEntry: ' + key.address_str + ']'
if unk:
utils.NSLog("HistoryMgr: failed to retrieve any data for unknown domain=%s, returning empty list",dstr[:80])
else:
time_taken = time.time()-t0
utils.NSLog("HistoryMgr: refresh %d entries for domain=%s in %f ms%s (hist result type=%s)", len(hist), dstr[:80],time_taken*1e3,duped,''.join(list(str(type(hist)))[-19:-2]))
gui.ElectrumGui.gui.refresh_cost('history', time_taken)
return hist
import threading
class ContactsHistorySynchronizer(utils.PySig):
def __init__(self, parent): # parent must be a gui.ElectrumGui object
super().__init__()
self.parent = parent
self.eventFlag = threading.Event()
self.stopFlag = threading.Event()
#self.lock = threading.RLock()
self.thread = None
def __del__(self):
self.stop()
super().__del__()
def wallet_name(self):
ret = '<None>'
if self.parent.wallet and self.parent.wallet.storage:
ret = os.path.split(self.parent.wallet.storage.path)[1]
return ret
def _synchronizer(self):
self.print_error("Started (wallet=%s)..." % self.wallet_name())
last_seen_len, announce, last_cost, last_ts = 0, False, 1.0, 0.0
while not self.stopFlag.is_set():
if self.eventFlag.wait():
if self.stopFlag.is_set():
break
self.print_error("Woke Up...")
# check that we aren't running too often...
diff_t = time.time() - last_ts
if diff_t > 0 and diff_t < last_cost:
diff_t = last_cost - diff_t # transform to "time left"
# sleep diff_t seconds since we were running too often
self.print_error(f"Throttled; sleeping {diff_t:1.3f} seconds...")
if self.stopFlag.wait(diff_t):
# we got a stop signal while sleeping, break out of loop
self.print_error("stopped while throttling")
break
#/
wallet = self.parent.wallet
storage = wallet.storage if wallet else None
full = None
if wallet and storage and wallet.is_up_to_date() and wallet.synchronizer and wallet.synchronizer.is_up_to_date():
t0 = time.time()
if True: #with self.lock:
full = self._synch_full(wallet)
if full is None:
# early return, callee detected stop signal
self.print_error("stop caught in inner function")
continue
for addrstr, d in full.items():
k = 'contact_history_%s' % (addrstr)
if storage.get(k) != d:
storage.put(k, d)
announce = True
self.print_error('Wrote %s...' % k)
last_cost = time.time() - t0
full_len = len(full) if full else 0
if announce or last_seen_len != full_len:
self.print_error("Contact history updated, announcing...")
self.emit()
announce = False
else:
self.print_error("No new contact history.")
last_seen_len = full_len
last_ts = time.time()
self.eventFlag.clear()
self.print_error("Stopping! (wallet=%s)" % self.wallet_name())
def _notify_needs_synch(self):
if not self.eventFlag.is_set():
self.eventFlag.set()
def restart(self):
self.stop()
self.start()
def start(self):
if not self.thread:
self.thread = threading.Thread(name='ContactsHistorySynchronizer', target=self._synchronizer)
if not self.thread.is_alive():
self.stopFlag.clear()
self.thread.start()
self.parent.sigHistory.connect(self._notify_needs_synch)
self.parent.sigContacts.connect(self._notify_needs_synch)
def stop(self):
if self.thread and self.thread.is_alive():
self.parent.sigHistory.disconnect(self._notify_needs_synch)
self.parent.sigContacts.disconnect(self._notify_needs_synch)
self.stopFlag.set()
self.eventFlag.set()
self.thread.join()
self.thread = None
self.stopFlag.clear()
self.eventFlag.clear()
def get_history(self, address : Address) -> list:
ret = list()
if isinstance(address, str) and Address.is_valid(address):
address = Address.from_string(address)
storage = self.parent.wallet.storage if self.parent.wallet else None
if storage:
addrstr = address.to_storage_string()
k = 'contact_history_%s' % (addrstr)
hdict = storage.get(k)
if hdict:
ret = list(hdict.values())
ret.sort(key=lambda x: x[1], reverse=True)
return ret
def delete_history(self, address : Address) -> None:
if isinstance(address, str) and Address.is_valid(address):
address = Address.from_string(address)
storage = self.parent.wallet.storage if self.parent.wallet else None
if storage:
if True:#with self.lock:
addrstr = address.to_storage_string()
k = 'contact_history_%s' % (addrstr)
storage.put(k, None)
self.print_error("Deleted %s" % addrstr)
@profiler
def _synch_full(self, wallet):
c = self._get_contacts(wallet)
if not c:
# short-circuit abort function early if no contacts exist.
return dict()
h = self._get_history(wallet)
seen = dict() # Address -> dict of tx_hash_str -> hitem tuple
for hitem in h:
if self.stopFlag.is_set():
# early return, another thread requested a stop
return None
# loop through ALL the history and see if relevant tx's exist for contacts we care about
tx_hash = hitem[0]
tx = wallet.transactions.get(tx_hash)
if tx and tx.raw:
tx = Transaction(tx.raw) # take a copy
ins = tx.inputs() # implicit deserialize
for x in ins:
xa = x['address']
if isinstance(xa, PublicKey):
xa = xa.toAddress()
if isinstance(xa, Address) and xa in c:
dct = seen.get(xa, dict())
wasEmpty = not dct
if tx_hash not in dct:
dct[tx_hash] = hitem
if wasEmpty: seen[xa] = dct
outs = tx.outputs()
for x in outs:
typ, xa, dummy = x
if isinstance(xa, Address) and xa in c:
dct = seen.get(xa, dict())
wasEmpty = not dct
if tx_hash not in dct:
dct[tx_hash] = hitem
if wasEmpty: seen[xa] = dct
storable = dict()
for addr,d in seen.items():
addrstr = addr.to_storage_string()
storable[addrstr] = d
return storable
def _get_contacts(self, wallet):
conts = contacts.get_contacts(wallet=wallet,sort=False)
ret = dict()
for c in conts:
ret[c.address] = c
return ret
def _get_history(self, wallet):
h = wallet.get_history(None, reverse=True)
return h
def get_contact_history(address : Address) -> list:
hitems = gui.ElectrumGui.gui.contactHistSync.get_history(address)
ret = _HistoryListProxy()
ret.set_hitems(hitems)
#utils.NSLog("get_contact_history: Returning history of size %d for %s",len(ret),address.to_storage_string())
return ret
def delete_contact_history(address : Address) -> None:
gui.ElectrumGui.gui.contactHistSync.delete_history(address)
_tx_cell_height = 76.0 # TxHistoryCell height in points
_date_width = None
_is_ipad = utils.is_ipad()
class TxHistoryHelper(TxHistoryHelperBase):
haveShowMoreTxs = objc_property()
@objc_method
def dealloc(self) -> None:
#cleanup code here
print("TxHistoryHelper dealloc")
gui.ElectrumGui.gui.sigHistory.disconnect(self)
self.haveShowMoreTxs = None
utils.nspy_pop(self) # clear 'txs' python dict
send_super(__class__, self, 'dealloc')
@objc_method
def miscSetup(self) -> None:
nib = UINib.nibWithNibName_bundle_("TxHistoryCell", None)
self.tv.registerNib_forCellReuseIdentifier_(nib, "TxHistoryCell")
self.tv.refreshControl = gui.ElectrumGui.gui.helper.createAndBindRefreshControl()
def gotRefresh() -> None:
if self.tv:
if self.tv.refreshControl: self.tv.refreshControl.endRefreshing()
self.tv.reloadData()
gui.ElectrumGui.gui.sigHistory.connect(gotRefresh, self)
@objc_method
def numberOfSectionsInTableView_(self, tableView) -> int:
return 1
@objc_method
def tableView_numberOfRowsInSection_(self, tableView, section : int) -> int:
h = _GetTxs(self)
rows = 0
self.haveShowMoreTxs = False
len_h = len(h) if h else 0
if not self.compactMode:
rows = len_h
else:
rows = max(math.floor(tableView.bounds.size.height / _tx_cell_height), 1)
rows = min(rows,len_h)
self.haveShowMoreTxs = len_h > rows
return rows
@objc_method
def tableView_viewForFooterInSection_(self, tv, section : int) -> ObjCInstance:
if self.haveShowMoreTxs:
v = None
objs = NSBundle.mainBundle.loadNibNamed_owner_options_("WalletsMisc",None,None)
for o in objs:
if not v and isinstance(o,UIView) and o.tag == 3000:
v = o
l = v.viewWithTag_(1)
if l: l.text = _("Show All Transactions")
for o in objs:
if isinstance(o, UIGestureRecognizer) and o.view and v \
and o.view.ptr.value == v.ptr.value:
o.addTarget_action_(self, SEL(b'onSeeAllTxs:'))
return v
return UIView.alloc().initWithFrame_(CGRectMake(0,0,0,0)).autorelease()
@objc_method
def onSeeAllTxs_(self, gr : ObjCInstance) -> None:
if gr.view.hasAnimations:
print("onSeeAllTxs: animation already active, ignoring spurious second tap....")
return
def seeAllTxs() -> None:
# Push a new viewcontroller that contains just a tableview.. we create another instance of this
# class to manage the tableview and set it up properly. This should be fast as we are sharing tx history
# data with the child instance via our "nspy_put" mechanism.
vc = UIViewController.new().autorelease()
vc.title = _("All Transactions")
bb = UIBarButtonItem.new().autorelease()
bb.title = _("Back")
vc.navigationItem.backBarButtonItem = bb
vc.view = UITableView.alloc().initWithFrame_style_(self.vc.view.frame, UITableViewStylePlain).autorelease()
vc.view.separatorInset = UIEdgeInsetsZero
helper = NewTxHistoryHelper(tv = vc.view, vc = self.vc, domain = _GetDomain(self))
self.vc.navigationController.pushViewController_animated_(vc, True)
#c = UIColor.colorWithRed_green_blue_alpha_(0.0,0.0,0.0,0.10)
#gr.view.backgroundColorAnimationToColor_duration_reverses_completion_(c,0.2,True,seeAllTxs)
gr.view.viewWithTag_(1).textColorAnimationFromColor_toColor_duration_reverses_completion_(
utils.uicolor_custom('link'), utils.uicolor_custom('linktapped'), 0.2, True, seeAllTxs
)
@objc_method
def tableView_heightForFooterInSection_(self, tv, section : int) -> float:
if self.compactMode:
return 50.0
return 0.0
@objc_method
def emptyCell_(self, tableView : ObjCInstance) -> ObjCInstance:
identifier = "Cell"
cell = tableView.dequeueReusableCellWithIdentifier_(identifier)
if cell is None:
cell = UITableViewCell.alloc().initWithStyle_reuseIdentifier_(UITableViewCellStyleSubtitle, identifier).autorelease()
cell.textLabel.text = _("No transactions")
cell.textLabel.textColor = utils.uicolor_custom('dark')
cell.detailTextLabel.text = _("No transactions were found on the blockchain.")
cell.detailTextLabel.font = UIFont.italicSystemFontOfSize_(12.0)
cell.detailTextLabel.textColor = utils.uicolor_custom('light')
return cell
@objc_method
def tableView_cellForRowAtIndexPath_(self, tableView, indexPath) -> ObjCInstance:
h = _GetTxs(self)
if not h or indexPath.row >= len(h):
return self.emptyCell_(tableView)
identifier = "TxHistoryCell"
cell = tableView.dequeueReusableCellWithIdentifier_(identifier)
global _date_width
if _date_width is None:
_date_width = cell.dateWidthCS.constant
#HistoryEntry = tx tx_hash status_str label v_str balance_str date ts conf status value fiat_amount fiat_balance fiat_amount_str fiat_balance_str ccy status_image
entry = h[indexPath.row]
if entry is None:
return self.emptyCell_(tableView)
ff = '' #str(entry.date)
if entry.conf and entry.conf > 0 and entry.conf < 6:
ff = "%s %s"%(entry.conf, _('confirmations'))
cell.amountTit.setText_withKerning_(_("Amount"), utils._kern)
cell.balanceTit.setText_withKerning_(_("Balance"), utils._kern)
cell.statusTit.setText_withKerning_(_("Status"), utils._kern)
amtStr = utils.stripAmount(entry.v_str)
balStr = utils.stripAmount(entry.balance_str)
if (self.compactMode and not _is_ipad) or (not entry.fiat_amount_str and not entry.fiat_balance_str):
if cell.amount.numberOfLines != 1:
cell.amount.numberOfLines = 1
cell.balance.numberOfLines = 1
if cell.dateWidthCS.constant != _date_width:
cell.dateWidthCS.constant = _date_width
cell.amount.text = amtStr
cell.balance.text = balStr
else:
# begin experimental fiat history rates zone
cell.amount.numberOfLines = 0
cell.balance.numberOfLines = 0
cell.dateWidthCS.constant = _date_width
s1 = ns_from_py(amtStr).sizeWithAttributes_({NSFontAttributeName:utils._f1})
s2 = ns_from_py(balStr).sizeWithAttributes_({NSFontAttributeName:utils._f1})
def adjustCS() -> None:
if _is_ipad:
pass
else:
cell.dateWidthCS.constant = _date_width - 24.0
cell.amount.attributedText = utils.hackyFiatAmtAttrStr(amtStr,utils.stripAmount(entry.fiat_amount_str),entry.ccy,s2.width-s1.width,utils.uicolor_custom('light'),adjustCS,utils._kern*1.25, isIpad=_is_ipad)
cell.balance.attributedText = utils.hackyFiatAmtAttrStr(balStr,utils.stripAmount(entry.fiat_balance_str),entry.ccy,s1.width-s2.width,utils.uicolor_custom('light'),adjustCS,utils._kern*1.25, isIpad=_is_ipad)
# end experimental zone...
cell.desc.setText_withKerning_(entry.label.strip() if isinstance(entry.label, str) else '', utils._kern)
cell.icon.image = UIImage.imageNamed_("tx_send.png") if entry.value and entry.value < 0 else UIImage.imageNamed_("tx_recv.png")
if entry.conf > 0:
cell.date.attributedText = utils.makeFancyDateAttrString(entry.status_str.strip())
else:
cell.date.text = entry.status_str.strip()
cell.status.text = ff #if entry.conf < 6 else ""
cell.statusIcon.image = entry.status_image
return cell
@objc_method
def tableView_heightForRowAtIndexPath_(self, tv : ObjCInstance, indexPath : ObjCInstance) -> float:
return _tx_cell_height if indexPath.row > 0 or _GetTxs(self) else 44.0
@objc_method
def tableView_didSelectRowAtIndexPath_(self, tv, indexPath):
tv.deselectRowAtIndexPath_animated_(indexPath,True)
parent = gui.ElectrumGui.gui
if parent.wallet is None:
return
if not self.vc:
utils.NSLog("TxHistoryHelper: No self.vc defined, cannot proceed to tx detail screen")
return
tx = None
try:
entry = _GetTxs(self)[indexPath.row]
if entry.tx:
tx = entry.tx
else:
tx = parent.wallet.transactions.get(entry.tx_hash, None)
if tx and tx.raw: tx = Transaction(tx.raw)
except:
return
if tx is None:
# I'm not sure why this would happen but we did get issue #810 where it happened to 1 user.
# Perhaps a chain split led to an "old" history view on-screen. That's my theory, at least. -Calin
parent.show_error(_("The requested transaction has dropped out of the wallet history.\n\nIf this problem persists, please contact us at ergon.moe."),
title = _("Transaction Not Found"),
onOk = lambda: parent.refresh_components('history'))
return
txd = txdetail.CreateTxDetailWithEntry(entry,tx=tx)
self.vc.navigationController.pushViewController_animated_(txd, True)
class TxHistoryHelperWithHeader(TxHistoryHelper):
@objc_method
def tableView_viewForHeaderInSection_(self, tv : ObjCInstance,section : int) -> ObjCInstance:
objs = NSBundle.mainBundle.loadNibNamed_owner_options_("TableHeaders", None, None)
for o in objs:
if isinstance(o, UIView) and o.tag == 10000:
label = o.viewWithTag_(1)
if label: label.text = _("Transaction History")
return o
return UIView.alloc().initWithFrame_(CGRectMake(0.0,0.0,0.0,0.0)).autorelease()
@objc_method
def tableView_heightForHeaderInSection_(self, tv : ObjCInstance,section : int) -> float:
return 28.0
def NewTxHistoryHelper(tv : ObjCInstance, vc : ObjCInstance, domain : list = None, noRefreshControl = False, cls : ObjCClass=None) -> ObjCInstance:
if not cls:
cls = TxHistoryHelper
helper = cls.new().autorelease()
if tv.delegate and tv.dataSource and tv.delegate == tv.dataSource and isinstance(tv.delegate, TxHistoryHelper):
TxHistoryHelperDissociate(tv.delegate)
tv.dataSource = helper
tv.delegate = helper
helper.tv = tv
helper.vc = vc
# optimization to share the same history data with the new helper class we just created for the full mode view
# .. hopefully this will keep the UI peppy and responsive!
if domain is not None:
utils.nspy_put_byname(helper, domain, 'domain')
helper.miscSetup()
if noRefreshControl: helper.tv.refreshControl = None
from rubicon.objc.runtime import libobjc
libobjc.objc_setAssociatedObject(tv.ptr, helper.ptr, helper.ptr, 0x301)
return helper
def TxHistoryHelperDissociate(helper):
if helper and helper.tv:
if helper.tv.dataSource: helper.tv.dataSource = None
if helper.tv.delegate: helper.tv.delegate = None
helper.vc = None
# below clears object association -- will auto-release the helper as a side-effect
from rubicon.objc.runtime import libobjc
theTV = helper.tv
helper.tv = None
if libobjc.objc_getAssociatedObject(theTV.ptr, helper.ptr).value == helper.ptr.value:
libobjc.objc_setAssociatedObject(theTV.ptr, helper.ptr, None, 0x301)
# this should be a method of TxHistoryHelper but it returns a python object, so it has to be a standalone global function
def _GetTxs(txsHelper : object) -> list:
if not txsHelper:
raise ValueError('GetTxs: Need to specify a TxHistoryHelper instance')
h = gui.ElectrumGui.gui.sigHistory.get(_GetDomain(txsHelper))
return h
def _GetDomain(txsHelper : object) -> list:
if not txsHelper:
raise ValueError('GetDomain: Need to specify a TxHistoryHelper instance')
return utils.nspy_get_byname(txsHelper, 'domain')
def Find(tx_hash_or_address : str) -> HistoryEntry:
if not isinstance(tx_hash_or_address, str): return None
h = gui.ElectrumGui.gui.sigHistory.get(tx_hash_or_address)
if h and len(h): return h[0]
return None
|
proxy.py | #!/usr/bin/env python3
import base64
import copy
import datetime
import json
import math
import re
import socket
import threading
from collections import namedtuple
from itertools import count
from urllib.parse import urlparse, ParseResult, parse_qs, urlencode
from subprocess import Popen, PIPE
from http import cookies as hcookies
from PyQt5.QtCore import QThread, QObject, pyqtSlot
class MessageError(Exception):
pass
class ProxyException(Exception):
pass
class InvalidQuery(Exception):
pass
class SocketClosed(Exception):
pass
class SockBuffer:
# I can't believe I have to implement this
def __init__(self, sock):
self.buf = [] # a list of chunks of strings
self.s = sock
self.closed = False
def close(self):
try:
self.s.shutdown(socket.SHUT_RDWR)
self.s.close()
except OSError:
# already closed
pass
finally:
self.closed = True
def _check_newline(self):
for chunk in self.buf:
if '\n' in chunk:
return True
return False
def readline(self):
# Receive until we get a newline, raise SocketClosed if socket is closed
while True:
try:
data = self.s.recv(256)
except OSError:
raise SocketClosed()
if not data:
raise SocketClosed()
self.buf.append(data)
if b'\n' in data:
break
# Combine chunks
allbytes = b''.join(self.buf)
head, tail = allbytes.split(b'\n', 1)
self.buf = [tail]
return head.decode()
def send(self, data):
try:
self.s.send(data)
except OSError:
raise SocketClosed()
class ProxyThread(QThread):
threads = {}
tiditer = count()
def __init__(self, target=None, args=tuple()):
global mainWidg
QThread.__init__(self)
self.f = target
self.args = args
self.tid = next(ProxyThread.tiditer)
ProxyThread.threads[self.tid] = self
self.finished.connect(clean_thread(self.tid))
def run(self):
self.f(*self.args)
def wait(self):
QThread.wait(self)
@classmethod
def waitall(cls):
ts = [(tid, thread) for tid, thread in cls.threads.items()]
for tid, thread in ts:
thread.wait()
def clean_thread(tid):
@pyqtSlot()
def clean():
del ProxyThread.threads[tid]
return clean
class Headers:
def __init__(self, headers=None):
self.headers = {}
if headers is not None:
if isinstance(headers, Headers):
for _, pairs in headers.headers.items():
for k, v in pairs:
self.add(k, v)
else:
for k, vs in headers.items():
for v in vs:
self.add(k, v)
def __contains__(self, hd):
for k, _ in self.headers.items():
if k.lower() == hd.lower():
return True
return False
def add(self, k, v):
try:
lst = self.headers[k.lower()]
lst.append((k, v))
except KeyError:
self.headers[k.lower()] = [(k, v)]
def set(self, k, v):
self.headers[k.lower()] = [(k, v)]
def get(self, k):
return self.headers[k.lower()][0][1]
def delete(self, k):
try:
del self.headers[k.lower()]
except KeyError:
pass
def pairs(self, key=None):
for _, kvs in self.headers.items():
for k, v in kvs:
if key is None or k.lower() == key.lower():
yield (k, v)
def dict(self):
retdict = {}
for _, kvs in self.headers.items():
for k, v in kvs:
if k in retdict:
retdict[k].append(v)
else:
retdict[k] = [v]
return retdict
class RequestContext:
def __init__(self, client, query=None):
self._current_query = []
self.client = client
if query is not None:
self._current_query = query
def _validate(self, query):
self.client.validate_query(query)
def set_query(self, query):
self._validate(query)
self._current_query = query
def apply_phrase(self, phrase):
self._validate([phrase])
self._current_query.append(phrase)
def pop_phrase(self):
if len(self._current_query) > 0:
self._current_query.pop()
def apply_filter(self, filt):
self._validate([[filt]])
self._current_query.append([filt])
@property
def query(self):
return copy.deepcopy(self._current_query)
class URL:
def __init__(self, url):
parsed = urlparse(url)
if url is not None:
parsed = urlparse(url)
self.scheme = parsed.scheme
self.netloc = parsed.netloc
self.path = parsed.path
self.params = parsed.params
self.query = parsed.query
self.fragment = parsed.fragment
else:
self.scheme = ""
self.netloc = ""
self.path = "/"
self.params = ""
self.query = ""
self.fragment = ""
def geturl(self, include_params=True):
params = self.params
query = self.query
fragment = self.fragment
if not include_params:
params = ""
query = ""
fragment = ""
r = ParseResult(scheme=self.scheme,
netloc=self.netloc,
path=self.path,
params=params,
query=query,
fragment=fragment)
return r.geturl()
def parameters(self):
try:
return parse_qs(self.query, keep_blank_values=True)
except Exception:
return {}
def param_iter(self):
for k, vs in self.parameters().items():
for v in vs:
yield k, v
def set_param(self, key, val):
params = self.parameters()
params[key] = [val]
self.query = urlencode(params)
def add_param(self, key, val):
params = self.parameters()
if key in params:
params[key].append(val)
else:
params[key] = [val]
self.query = urlencode(params)
def del_param(self, key):
params = self.parameters()
del params[key]
self.query = urlencode(params)
def set_params(self, params):
self.query = urlencode(params)
class InterceptMacro:
"""
A class representing a macro that modifies requests as they pass through the
proxy
"""
def __init__(self):
self.name = ''
self.intercept_requests = False
self.intercept_responses = False
self.intercept_ws = False
def __repr__(self):
return "<InterceptingMacro (%s)>" % self.name
def mangle_request(self, request):
return request
def mangle_response(self, request, response):
return response
def mangle_websocket(self, request, response, message):
return message
class HTTPRequest:
def __init__(self, method="GET", path="/", proto_major=1, proto_minor=1,
headers=None, body=bytes(), dest_host="", dest_port=80,
use_tls=False, time_start=None, time_end=None, db_id="",
tags=None, headers_only=False, storage_id=0):
# http info
self.method = method
self.url = URL(path)
self.proto_major = proto_major
self.proto_minor = proto_minor
self.headers = Headers(headers)
self.headers_only = headers_only
self._body = bytes()
if not headers_only:
self.body = body
# metadata
self.dest_host = dest_host
self.dest_port = dest_port
self.use_tls = use_tls
self.time_start = time_start
self.time_end = time_end
self.response = None
self.unmangled = None
self.ws_messages = []
self.db_id = db_id
self.storage_id = storage_id
if tags is not None:
self.tags = set(tags)
else:
self.tags = set()
@property
def body(self):
return self._body
@body.setter
def body(self, bs):
self.headers_only = False
if type(bs) is str:
self._body = bs.encode()
elif type(bs) is bytes:
self._body = bs
else:
raise Exception("invalid body type: {}".format(type(bs)))
self.headers.set("Content-Length", str(len(self._body)))
@property
def content_length(self):
if 'content-length' in self.headers:
return int(self.headers.get('content-length'))
return len(self.body)
def status_line(self):
sline = "{method} {path} HTTP/{proto_major}.{proto_minor}".format(
method=self.method, path=self.url.geturl(), proto_major=self.proto_major,
proto_minor=self.proto_minor).encode()
return sline
def headers_section(self):
message = self.status_line() + b"\r\n"
for k, v in self.headers.pairs():
message += "{}: {}\r\n".format(k, v).encode()
return message
def full_message(self):
message = self.headers_section()
message += b"\r\n"
message += self.body
return message
def parameters(self):
try:
return parse_qs(self.body.decode(), keep_blank_values=True)
except Exception:
return {}
def param_iter(self, ignore_content_type=False):
if not ignore_content_type:
if "content-type" not in self.headers:
return
if "www-form-urlencoded" not in self.headers.get("content-type").lower():
return
for k, vs in self.parameters().items():
for v in vs:
yield k, v
def set_param(self, key, val):
params = self.parameters()
params[key] = [val]
self.body = urlencode(params)
def add_param(self, key, val):
params = self.parameters()
if key in params:
params[key].append(val)
else:
params[key] = [val]
self.body = urlencode(params)
def del_param(self, key):
params = self.parameters()
del params[key]
self.body = urlencode(params)
def set_params(self, params):
self.body = urlencode(params)
def cookies(self):
try:
cookie = hcookies.BaseCookie()
cookie.load(self.headers.get("cookie"))
return cookie
except Exception as e:
return hcookies.BaseCookie()
def cookie_iter(self):
c = self.cookies()
for k in c:
yield k, c[k].value
def set_cookie(self, key, val):
c = self.cookies()
c[key] = val
self.set_cookies(c)
def del_cookie(self, key):
c = self.cookies()
del c[key]
self.set_cookies(c)
def set_cookies(self, c):
if isinstance(c, hcookies.BaseCookie):
# it's a basecookie
cookie_pairs = []
for k in c:
cookie_pairs.append('{}={}'.format(k, c[k].value))
header_str = '; '.join(cookie_pairs)
elif isinstance(c, HTTPRequest):
# it's a request we should copy cookies from
try:
header_str = c.headers.get("Cookie")
except KeyError:
header_str = ""
else:
# it's a dictionary
cookie_pairs = []
for k, v in c.items():
cookie_pairs.append('{}={}'.format(k, v))
header_str = '; '.join(cookie_pairs)
if header_str == '':
try:
self.headers.delete("Cookie")
except KeyError:
pass
else:
self.headers.set("Cookie", header_str)
def add_cookies(self, c):
new_cookies = self.cookies()
if isinstance(c, hcookies.BaseCookie):
for k in c:
new_cookies[k] = c[k].value
elif isinstance(c, HTTPRequest):
for k, v in c.cookie_iter():
new_cookies[k] = v
elif isinstance(c, HTTPResponse):
for k, v in c.cookie_iter():
new_cookies[k] = v
else:
for k, v in c.items():
new_cookies[k] = v
self.set_cookies(new_cookies)
def full_url(self):
return get_full_url(self)
def copy(self):
return HTTPRequest(
method=self.method,
path=self.url.geturl(),
proto_major=self.proto_major,
proto_minor=self.proto_minor,
headers=self.headers,
body=self.body,
dest_host=self.dest_host,
dest_port=self.dest_port,
use_tls=self.use_tls,
tags=copy.deepcopy(self.tags),
headers_only=self.headers_only,
)
class HTTPResponse:
def __init__(self, status_code=200, reason="OK", proto_major=1, proto_minor=1,
headers=None, body=bytes(), db_id="", headers_only=False, storage_id=0):
self.status_code = status_code
self.reason = reason
self.proto_major = proto_major
self.proto_minor = proto_minor
self.headers = Headers()
if headers is not None:
for k, vs in headers.items():
for v in vs:
self.headers.add(k, v)
self.headers_only = headers_only
self._body = bytes()
if not headers_only:
self.body = body
self.unmangled = None
self.db_id = db_id
self.storage = storage_id
@property
def body(self):
return self._body
@body.setter
def body(self, bs):
self.headers_only = False
if type(bs) is str:
self._body = bs.encode()
elif type(bs) is bytes:
self._body = bs
else:
raise Exception("invalid body type: {}".format(type(bs)))
self.headers.set("Content-Length", str(len(self._body)))
@property
def content_length(self):
if 'content-length' in self.headers:
return int(self.headers.get('content-length'))
return len(self.body)
def status_line(self):
sline = "HTTP/{proto_major}.{proto_minor} {status_code} {reason}".format(
proto_major=self.proto_major, proto_minor=self.proto_minor,
status_code=self.status_code, reason=self.reason).encode()
return sline
def headers_section(self):
message = self.status_line() + b"\r\n"
for k, v in self.headers.pairs():
message += "{}: {}\r\n".format(k, v).encode()
return message
def full_message(self):
message = self.headers_section()
message += b"\r\n"
message += self.body
return message
def cookies(self):
try:
cookie = hcookies.BaseCookie()
for _, v in self.headers.pairs('set-cookie'):
cookie.load(v)
return cookie
except Exception as e:
return hcookies.BaseCookie()
def cookie_iter(self):
c = self.cookies()
for k in c:
yield k, c[k].value
def set_cookie(self, key, val):
c = self.cookies()
c[key] = val
self.set_cookies(c)
def del_cookie(self, key):
c = self.cookies()
del c[key]
self.set_cookies(c)
def set_cookies(self, c):
self.headers.delete("set-cookie")
if isinstance(c, hcookies.BaseCookie):
cookies = c
else:
cookies = hcookies.BaseCookie()
for k, v in c.items():
cookies[k] = v
for _, m in c.items():
self.headers.add("Set-Cookie", m.OutputString())
def copy(self):
return HTTPResponse(
status_code=self.status_code,
reason=self.reason,
proto_major=self.proto_major,
proto_minor=self.proto_minor,
headers=self.headers.headers,
body=self.body,
headers_only=self.headers_only,
)
class WSMessage:
def __init__(self, is_binary=True, message=bytes(), to_server=True,
timestamp=None, db_id="", storage_id=0):
self.is_binary = is_binary
self.message = message
self.to_server = to_server
self.timestamp = timestamp or datetime.datetime(1970, 1, 1)
self.unmangled = None
self.db_id = db_id
self.storage = storage_id
def copy(self):
return WSMessage(
is_binary=self.is_binary,
message=self.message,
to_server=self.to_server,
)
ScopeResult = namedtuple("ScopeResult", ["is_custom", "filter"])
ListenerResult = namedtuple("ListenerResult", ["lid", "addr"])
GenPemCertsResult = namedtuple("GenPemCertsResult", ["key_pem", "cert_pem"])
SavedQuery = namedtuple("SavedQuery", ["name", "query"])
SavedStorage = namedtuple("SavedStorage", ["storage_id", "description"])
def messagingFunction(func):
def f(self, *args, **kwargs):
if self.is_interactive:
raise MessageError("cannot be called while other message is interactive")
if self.closed:
raise MessageError("connection is closed")
with self.message_lock:
return func(self, *args, **kwargs)
return f
class ProxyConnection:
next_id = 1
def __init__(self, kind="", addr=""):
self.connid = ProxyConnection.next_id
ProxyConnection.next_id += 1
self.sbuf = None
self.buf = bytes()
self.parent_client = None
self.debug = False
self.is_interactive = False
self.closed = True
self.message_lock = threading.Lock()
self.kind = None
self.addr = None
self.int_thread = None
if kind.lower() == "tcp":
tcpaddr, port = addr.rsplit(":", 1)
self.connect_tcp(tcpaddr, int(port))
elif kind.lower() == "unix":
self.connect_unix(addr)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def connect_tcp(self, addr, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((addr, port))
self.sbuf = SockBuffer(s)
self.closed = False
self.kind = "tcp"
self.addr = "{}:{}".format(addr, port)
def connect_unix(self, addr):
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(addr)
self.sbuf = SockBuffer(s)
self.closed = False
self.kind = "unix"
self.addr = addr
@property
def maddr(self):
if self.kind is not None:
return "{}:{}".format(self.kind, self.addr)
else:
return None
def close(self):
self.sbuf.close()
if self.parent_client is not None:
try:
self.parent_client.conns.remove(self)
except KeyError:
pass
self.closed = True
def read_message(self):
ln = self.sbuf.readline()
if self.debug:
print("<({}) {}".format(self.connid, ln))
j = json.loads(ln)
if ("Success" in j) and (j["Success"] is False):
if "Reason" in j:
raise MessageError(j["Reason"])
raise MessageError("unknown error")
return j
def submit_command(self, cmd):
ln = json.dumps(cmd).encode() + b"\n"
if self.debug:
print(">({}) {}".format(self.connid, ln.decode()[:-1]))
self.sbuf.send(ln)
def reqrsp_cmd(self, cmd):
self.submit_command(cmd)
ret = self.read_message()
if ret is None:
raise Exception()
return ret
###########
# Commands
@messagingFunction
def ping(self):
cmd = {"Command": "Ping"}
result = self.reqrsp_cmd(cmd)
return result["Ping"]
@messagingFunction
def submit(self, req, storage=0):
cmd = {
"Command": "Submit",
"Request": encode_req(req),
"Storage": 0,
}
if storage is not None:
cmd["Storage"] = storage
result = self.reqrsp_cmd(cmd)
if "SubmittedRequest" not in result:
raise MessageError("no request returned")
newreq = decode_req(result["SubmittedRequest"], storage=storage)
req.response = newreq.response
req.unmangled = newreq.unmangled
req.time_start = newreq.time_start
req.time_end = newreq.time_end
req.db_id = newreq.db_id
req.storage_id = storage
@messagingFunction
def save_new(self, req, storage):
cmd = {
"Command": "SaveNew",
"Request": encode_req(req),
"Storage": storage,
}
result = self.reqrsp_cmd(cmd)
req.db_id = result["DbId"]
req.storage_id = storage
return result["DbId"]
def _query_storage(self, q, storage, headers_only=False, max_results=0):
cmd = {
"Command": "StorageQuery",
"Query": q,
"HeadersOnly": headers_only,
"MaxResults": max_results,
"Storage": storage,
}
result = self.reqrsp_cmd(cmd)
reqs = []
unmangled = set()
for reqd in result["Results"]:
req = decode_req(reqd, headers_only=headers_only, storage=storage)
req.storage_id = storage
reqs.append(req)
if req.unmangled is not None:
unmangled.add(req.unmangled.db_id)
return [r for r in reqs if r.db_id not in unmangled]
@messagingFunction
def query_storage(self, q, storage, max_results=0, headers_only=False):
return self._query_storage(q, storage, headers_only=headers_only, max_results=max_results)
@messagingFunction
def req_by_id(self, reqid, storage, headers_only=False):
results = self._query_storage([[["dbid", "is", reqid]]], storage,
headers_only=headers_only, max_results=1)
if len(results) == 0:
raise MessageError("request with id {} does not exist".format(reqid))
return results[0]
@messagingFunction
def set_scope(self, filt):
cmd = {
"Command": "SetScope",
"Query": filt,
}
self.reqrsp_cmd(cmd)
@messagingFunction
def get_scope(self):
cmd = {
"Command": "ViewScope",
}
result = self.reqrsp_cmd(cmd)
ret = ScopeResult(result["IsCustom"], result["Query"])
return ret
@messagingFunction
def add_tag(self, reqid, tag, storage):
cmd = {
"Command": "AddTag",
"ReqId": reqid,
"Tag": tag,
"Storage": storage,
}
self.reqrsp_cmd(cmd)
@messagingFunction
def remove_tag(self, reqid, tag, storage):
cmd = {
"Command": "RemoveTag",
"ReqId": reqid,
"Tag": tag,
"Storage": storage,
}
self.reqrsp_cmd(cmd)
@messagingFunction
def clear_tag(self, reqid, storage):
cmd = {
"Command": "ClearTag",
"ReqId": reqid,
"Storage": storage,
}
self.reqrsp_cmd(cmd)
@messagingFunction
def all_saved_queries(self, storage):
cmd = {
"Command": "AllSavedQueries",
"Storage": storage,
}
results = self.reqrsp_cmd(cmd)
queries = []
for result in results["Queries"]:
queries.append(SavedQuery(name=result["Name"], query=result["Query"]))
return queries
@messagingFunction
def save_query(self, name, filt, storage):
cmd = {
"Command": "SaveQuery",
"Name": name,
"Query": filt,
"Storage": storage,
}
self.reqrsp_cmd(cmd)
@messagingFunction
def load_query(self, name, storage):
cmd = {
"Command": "LoadQuery",
"Name": name,
"Storage": storage,
}
result = self.reqrsp_cmd(cmd)
return result["Query"]
@messagingFunction
def delete_query(self, name, storage):
cmd = {
"Command": "DeleteQuery",
"Name": name,
"Storage": storage,
}
self.reqrsp_cmd(cmd)
@messagingFunction
def add_listener(self, addr, port, transparent=False, destHost="",
destPort=0, destUseTLS=False):
laddr = "{}:{}".format(addr, port)
cmd = {
"Command": "AddListener",
"Type": "tcp",
"Addr": laddr,
"TransparentMode": transparent,
"DestHost": destHost,
"DestPort": destPort,
"DestUseTLS": destUseTLS,
}
result = self.reqrsp_cmd(cmd)
lid = result["Id"]
return lid
@messagingFunction
def remove_listener(self, lid):
cmd = {
"Command": "RemoveListener",
"Id": lid,
}
self.reqrsp_cmd(cmd)
@messagingFunction
def get_listeners(self):
cmd = {
"Command": "GetListeners",
}
result = self.reqrsp_cmd(cmd)
results = []
for r in result["Results"]:
results.append((r["Id"], r["Addr"]))
return results
@messagingFunction
def load_certificates(self, cert_file, pkey_file):
cmd = {
"Command": "LoadCerts",
"KeyFile": pkey_file,
"CertificateFile": cert_file,
}
self.reqrsp_cmd(cmd)
@messagingFunction
def set_certificates(self, pkey_pem, cert_pem):
cmd = {
"Command": "SetCerts",
"KeyPEMData": pkey_pem,
"CertificatePEMData": cert_pem,
}
self.reqrsp_cmd(cmd)
@messagingFunction
def clear_certificates(self):
cmd = {
"Command": "ClearCerts",
}
self.reqrsp_cmd(cmd)
@messagingFunction
def generate_certificates(self, pkey_file, cert_file):
cmd = {
"Command": "GenCerts",
"KeyFile": pkey_file,
"CertFile": cert_file,
}
self.reqrsp_cmd(cmd)
@messagingFunction
def generate_pem_certificates(self):
cmd = {
"Command": "GenPEMCerts",
}
result = self.reqrsp_cmd(cmd)
ret = GenPemCertsResult(result["KeyPEMData"], result["CertificatePEMData"])
return ret
@messagingFunction
def validate_query(self, query):
cmd = {
"Command": "ValidateQuery",
"Query": query,
}
try:
self.reqrsp_cmd(cmd)
except MessageError as e:
raise InvalidQuery(str(e))
@messagingFunction
def check_request(self, query, req=None, storage_id=-1, db_id=""):
cmd = {
"Command": "checkrequest",
"Query": query,
}
if req:
cmd["Request"] = encode_req(req)
if db_id != "":
cmd["DbId"] = db_id
cmd["StorageId"] = storage_id
result = self.reqrsp_cmd(cmd)
return result["Result"]
@messagingFunction
def add_sqlite_storage(self, path, desc):
cmd = {
"Command": "AddSQLiteStorage",
"Path": path,
"Description": desc
}
result = self.reqrsp_cmd(cmd)
return result["StorageId"]
@messagingFunction
def add_in_memory_storage(self, desc):
cmd = {
"Command": "AddInMemoryStorage",
"Description": desc
}
result = self.reqrsp_cmd(cmd)
return result["StorageId"]
@messagingFunction
def close_storage(self, storage_id):
cmd = {
"Command": "CloseStorage",
"StorageId": storage_id,
}
self.reqrsp_cmd(cmd)
@messagingFunction
def set_proxy_storage(self, storage_id):
cmd = {
"Command": "SetProxyStorage",
"StorageId": storage_id,
}
self.reqrsp_cmd(cmd)
@messagingFunction
def list_storage(self):
cmd = {
"Command": "ListStorage",
}
result = self.reqrsp_cmd(cmd)
ret = []
for ss in result["Storages"]:
ret.append(SavedStorage(ss["Id"], ss["Description"]))
return ret
@messagingFunction
def set_proxy(self, use_proxy=False, proxy_host="", proxy_port=0, use_creds=False,
username="", password="", is_socks=False):
cmd = {
"Command": "SetProxy",
"UseProxy": use_proxy,
"ProxyHost": proxy_host,
"ProxyPort": proxy_port,
"ProxyIsSOCKS": is_socks,
"UseCredentials": use_creds,
"Username": username,
"Password": password,
}
self.reqrsp_cmd(cmd)
@messagingFunction
def intercept(self, macro):
# Run an intercepting macro until closed
# Start intercepting
self.is_interactive = True
cmd = {
"Command": "Intercept",
"InterceptRequests": macro.intercept_requests,
"InterceptResponses": macro.intercept_responses,
"InterceptWS": macro.intercept_ws,
}
try:
self.reqrsp_cmd(cmd)
except Exception as e:
self.is_interactive = False
raise e
def run_macro():
iditer = count()
threads = {}
while True:
try:
msg = self.read_message()
except MessageError as e:
return
except SocketClosed:
return
def mangle_and_respond(msg):
retCmd = None
if msg["Type"] == "httprequest":
req = decode_req(msg["Request"])
newReq = macro.mangle_request(req)
if newReq is None:
retCmd = {
"Id": msg["Id"],
"Dropped": True,
}
else:
newReq.unmangled = None
newReq.response = None
newReq.ws_messages = []
retCmd = {
"Id": msg["Id"],
"Dropped": False,
"Request": encode_req(newReq),
}
elif msg["Type"] == "httpresponse":
req = decode_req(msg["Request"])
rsp = decode_rsp(msg["Response"])
newRsp = macro.mangle_response(req, rsp)
if newRsp is None:
retCmd = {
"Id": msg["Id"],
"Dropped": True,
}
else:
newRsp.unmangled = None
retCmd = {
"Id": msg["Id"],
"Dropped": False,
"Response": encode_rsp(newRsp),
}
elif msg["Type"] == "wstoserver" or msg["Type"] == "wstoclient":
req = decode_req(msg["Request"])
rsp = decode_rsp(msg["Response"])
wsm = decode_ws(msg["WSMessage"])
newWsm = macro.mangle_websocket(req, rsp, wsm)
if newWsm is None:
retCmd = {
"Id": msg["Id"],
"Dropped": True,
}
else:
newWsm.unmangled = None
retCmd = {
"Id": msg["Id"],
"Dropped": False,
"WSMessage": encode_ws(newWsm),
}
else:
raise Exception("Unknown message type: " + msg["Type"])
if retCmd is not None:
try:
self.submit_command(retCmd)
except SocketClosed:
return
tid = next(iditer)
mangle_thread = ProxyThread(target=mangle_and_respond,
args=(msg,))
threads[tid] = mangle_thread
mangle_thread.start()
self.int_thread = ProxyThread(target=run_macro)
self.int_thread.start()
@messagingFunction
def watch_storage(self, storage_id=-1, headers_only=True):
# Generator that generates request, response, and wsmessages as they
# are stored by the proxy
cmd = {
"Command": "WatchStorage",
"StorageId": storage_id,
"HeadersOnly": headers_only,
}
try:
self.reqrsp_cmd(cmd)
except Exception as e:
self.is_interactive = False
raise e
while True:
msg = self.read_message()
if msg["Request"]:
msg["Request"] = decode_req(msg["Request"],
storage=msg["StorageId"],
headers_only=headers_only)
if msg["Response"]:
msg["Response"] = decode_rsp(msg["Response"],
storage=msg["StorageId"],
headers_only=headers_only)
if msg["WSMessage"]:
msg["WSMessage"] = decode_ws(msg["WSMessage"],
storage=msg["StorageId"],
headers_only=headers_only)
yield msg
@messagingFunction
def set_plugin_value(self, key, value, storage_id):
cmd = {
"Command": "SetPluginValue",
"Storage": storage_id,
"Key": key,
"Value": value,
}
self.reqrsp_cmd(cmd)
@messagingFunction
def get_plugin_value(self, key, storage_id):
cmd = {
"Command": "GetPluginValue",
"Storage": storage_id,
"Key": key,
}
result = self.reqrsp_cmd(cmd)
return result["Value"]
ActiveStorage = namedtuple("ActiveStorage", ["type", "storage_id", "prefix"])
def _serialize_storage(stype, prefix):
return "{}|{}".format(stype, prefix)
class ProxyClient:
def __init__(self, binary=None, debug=False, conn_addr=None):
self.binloc = binary
self.proxy_proc = None
self.ltype = None
self.laddr = None
self.debug = debug
self.conn_addr = conn_addr
self.conns = set()
self.msg_conn = None # conn for single req/rsp messages
self.context = RequestContext(self)
self.storage_by_id = {}
self.storage_by_prefix = {}
self.proxy_storage = None
self.inmem_storage = None
self.reqrsp_methods = {
"submit_command",
# "reqrsp_cmd",
"ping",
# "submit",
# "save_new",
# "query_storage",
# "req_by_id",
"set_scope",
"get_scope",
# "add_tag",
# "remove_tag",
# "clear_tag",
"all_saved_queries",
"save_query",
"load_query",
"delete_query",
"add_listener",
"remove_listener",
"get_listeners",
"load_certificates",
"set_certificates",
"clear_certificates",
"generate_certificates",
"generate_pem_certificates",
"validate_query",
# "check_request",
"list_storage",
# "add_sqlite_storage",
# "add_in_memory_storage",
# "close_storage",
# "set_proxy_storage",
"set_proxy",
# "set_plugin_value",
# "get_plugin_value",
}
def __enter__(self):
if self.conn_addr is not None:
self.msg_connect(self.conn_addr)
else:
self.execute_binary(binary=self.binloc, debug=self.debug)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def __getattr__(self, name):
if name in self.reqrsp_methods:
return getattr(self.msg_conn, name)
raise NotImplementedError(name)
@property
def maddr(self):
if self.ltype is not None:
return "{}:{}".format(self.ltype, self.laddr)
else:
return None
def execute_binary(self, binary=None, debug=False, listen_addr=None):
self.binloc = binary
args = [self.binloc]
if listen_addr is not None:
args += ["--msglisten", listen_addr]
else:
args += ["--msgauto"]
if debug:
args += ["--dbg"]
self.proxy_proc = Popen(args, stdout=PIPE, stderr=PIPE)
# Wait for it to start and make connection
listenstr = self.proxy_proc.stdout.readline().rstrip()
self.msg_connect(listenstr.decode())
def msg_connect(self, addr):
self.ltype, self.laddr = addr.split(":", 1)
self.msg_conn = self.new_conn()
self._get_storage()
def close(self):
conns = list(self.conns)
for conn in conns:
conn.close()
if self.proxy_proc is not None:
self.proxy_proc.terminate()
def new_conn(self):
conn = ProxyConnection(kind=self.ltype, addr=self.laddr)
conn.parent_client = self
conn.debug = self.debug
self.conns.add(conn)
return conn
# functions involving storage
def _add_storage(self, storage, prefix):
self.storage_by_prefix[prefix] = storage
self.storage_by_id[storage.storage_id] = storage
def _clear_storage(self):
self.storage_by_prefix = {}
self.storage_by_id = {}
def _get_storage(self):
self._clear_storage()
storages = self.list_storage()
for s in storages:
stype, prefix = s.description.split("|")
storage = ActiveStorage(stype, s.storage_id, prefix)
self._add_storage(storage, prefix)
def parse_reqid(self, reqid):
if reqid[0].isalpha():
prefix = reqid[0]
realid = reqid[1:]
else:
prefix = ""
realid = reqid
# `u`, `s` are special cases for the unmangled version of req and rsp
if prefix == 'u':
req = self.req_by_id(realid)
if req.unmangled is None:
raise MessageError("request %s was not mangled" % reqid)
ureq = req.unmangled
return self.storage_by_id[ureq.storage_id], ureq.db_id
elif prefix == 's':
req = self.req_by_id(realid)
if req.response is None:
raise MessageError("response %s was not mangled" % reqid)
if req.response.unmangled is None:
raise MessageError("response %s was not mangled" % reqid)
return self.storage_by_id[req.storage_id], req.db_id
else:
storage = self.storage_by_prefix[prefix]
return storage, realid
def storage_iter(self):
for _, s in self.storage_by_id.items():
yield s
def _stg_or_def(self, storage):
if storage is None:
return self.proxy_storage
return storage
def is_in_context(self, req):
return self.check_request(self.context.query, req)
def in_context_requests(self, headers_only=False, max_results=0):
return self.query_storage(self.context.query,
headers_only=headers_only,
max_results=max_results)
def in_context_requests_async(self, slot, headers_only=False, max_results=0, *args, **kwargs):
return self.query_storage(slot,
self.context.query,
headers_only=headers_only,
max_results=max_results)
def in_context_requests_iter(self, headers_only=False, max_results=0):
results = self.query_storage(self.context.query,
headers_only=headers_only,
max_results=max_results)
ret = results
if max_results > 0 and len(results) > max_results:
ret = results[:max_results]
for reqh in ret:
req = self.req_by_id(reqh.db_id, storage_id=reqh.storage_id)
yield req
def get_reqid(self, req):
prefix = ""
if req.storage_id in self.storage_by_id:
s = self.storage_by_id[req.storage_id]
prefix = s.prefix
return "{}{}".format(prefix, req.db_id)
# functions that don't just pass through to underlying conn
def add_sqlite_storage(self, path, prefix):
desc = _serialize_storage("sqlite", prefix)
sid = self.msg_conn.add_sqlite_storage(path, desc)
s = ActiveStorage(type="sqlite", storage_id=sid, prefix=prefix)
self._add_storage(s, prefix)
return s
def add_in_memory_storage(self, prefix):
desc = _serialize_storage("inmem", prefix)
sid = self.msg_conn.add_in_memory_storage(desc)
s = ActiveStorage(type="inmem", storage_id=sid, prefix=prefix)
self._add_storage(s, prefix)
return s
def close_storage(self, storage_id):
s = self.storage_by_id[storage_id]
self.msg_conn.close_storage(s.storage_id)
del self.storage_by_id[s.storage_id]
del self.storage_by_prefix[s.prefix]
def set_proxy_storage(self, storage_id):
s = self.storage_by_id[storage_id]
self.msg_conn.set_proxy_storage(s.storage_id)
self.proxy_storage = storage_id
def set_storage_prefix(self, storage_id, prefix):
if prefix in self.storage_by_prefix:
raise Exception("prefix already exists")
s = self.storage_by_id[storage_id]
del self.storage_by_prefix[s.prefix]
news = ActiveStorage(type=s.type, prefix=prefix, storage_id=s.storage_id)
self.storage_by_prefix[news.prefix] = news
self.storage_by_id[storage_id] = news
def save_new(self, req, inmem=False, storage=None):
if inmem:
storage = self.inmem_storage
else:
storage = self._stg_or_def(storage)
self.msg_conn.save_new(req, storage=storage)
def submit(self, req, save=False, inmem=False, storage=None):
if save:
storage = self._stg_or_def(storage)
if inmem:
storage = self.inmem_storage
self.msg_conn.submit(req, storage=storage)
def query_storage(self, q, max_results=0, headers_only=False, storage=None, conn=None):
results = []
conn = conn or self.msg_conn
if storage is None:
for s in self.storage_iter():
results += conn.query_storage(q, max_results=max_results,
headers_only=headers_only,
storage=s.storage_id)
else:
results += conn.query_storage(q, max_results=max_results,
headers_only=headers_only,
storage=storage)
def kfunc(req):
if req.time_start is None:
return datetime.datetime.utcfromtimestamp(0)
return req.time_start
results.sort(key=kfunc)
results = [r for r in reversed(results)]
return results
def query_storage_async(self, slot, *args, **kwargs):
def perform_query():
try:
with self.new_conn() as c:
r = self.query_storage(*args, conn=c, **kwargs)
slot.emit(r)
except Exception:
pass
ProxyThread(target=perform_query).start()
def req_by_id(self, reqid, storage_id=None, headers_only=False):
if storage_id is None:
storage, db_id = self.parse_reqid(reqid)
storage_id = storage.storage_id
else:
db_id = reqid
retreq = self.msg_conn.req_by_id(db_id, headers_only=headers_only,
storage=storage_id)
if reqid[0] == 's': # `u` is handled by parse_reqid
retreq.response = retreq.response.unmangled
return retreq
def check_request(self, query, req=None, reqid=""):
if req is not None:
return self.msg_conn.check_request(query, req=req)
else:
storage, db_id = self.parse_reqid(reqid)
storage_id = storage.storage_id
return self.msg_conn.check_request(query, storage_id=storage_id, db_id=db_id)
raise Exception("check_request requires either a request or reqid")
# for these and submit, might need storage stored on the request itself
def add_tag(self, reqid, tag, storage=None):
self.msg_conn.add_tag(reqid, tag, storage=self._stg_or_def(storage))
def remove_tag(self, reqid, tag, storage=None):
self.msg_conn.remove_tag(reqid, tag, storage=self._stg_or_def(storage))
def clear_tag(self, reqid, storage=None):
self.msg_conn.clear_tag(reqid, storage=self._stg_or_def(storage))
def all_saved_queries(self, storage=None):
self.msg_conn.all_saved_queries(storage=None)
def save_query(self, name, filt, storage=None):
self.msg_conn.save_query(name, filt, storage=self._stg_or_def(storage))
def load_query(self, name, storage=None):
self.msg_conn.load_query(name, storage=self._stg_or_def(storage))
def delete_query(self, name, storage=None):
self.msg_conn.delete_query(name, storage=self._stg_or_def(storage))
def set_plugin_value(self, key, value, storage=None):
self.msg_conn.set_plugin_value(key, value, self._stg_or_def(storage))
def get_plugin_value(self, key, storage=None):
return self.msg_conn.get_plugin_value(key, self._stg_or_def(storage))
def decode_req(result, headers_only=False, storage=0):
if "StartTime" in result and result["StartTime"] > 0:
time_start = time_from_nsecs(result["StartTime"])
else:
time_start = None
if "EndTime" in result and result["EndTime"] > 0:
time_end = time_from_nsecs(result["EndTime"])
else:
time_end = None
if "DbId" in result:
db_id = result["DbId"]
else:
db_id = ""
if "Tags" in result:
tags = result["Tags"]
else:
tags = ""
ret = HTTPRequest(
method=result["Method"],
path=result["Path"],
proto_major=result["ProtoMajor"],
proto_minor=result["ProtoMinor"],
headers=copy.deepcopy(result["Headers"]),
body=base64.b64decode(result["Body"]),
dest_host=result["DestHost"],
dest_port=result["DestPort"],
use_tls=result["UseTLS"],
time_start=time_start,
time_end=time_end,
tags=tags,
headers_only=headers_only,
db_id=db_id,
storage_id=storage)
if "Unmangled" in result:
ret.unmangled = decode_req(result["Unmangled"], headers_only=headers_only, storage=storage)
if "Response" in result:
ret.response = decode_rsp(result["Response"], headers_only=headers_only, storage=storage)
if "WSMessages" in result:
for wsm in result["WSMessages"]:
ret.ws_messages.append(decode_ws(wsm, storage=storage))
return ret
def decode_rsp(result, headers_only=False, storage=0):
ret = HTTPResponse(
status_code=result["StatusCode"],
reason=result["Reason"],
proto_major=result["ProtoMajor"],
proto_minor=result["ProtoMinor"],
headers=copy.deepcopy(result["Headers"]),
body=base64.b64decode(result["Body"]),
headers_only=headers_only,
storage_id=storage,
)
if "Unmangled" in result:
ret.unmangled = decode_rsp(result["Unmangled"], headers_only=headers_only, storage=storage)
return ret
def decode_ws(result, storage=0):
timestamp = None
db_id = ""
if "Timestamp" in result:
timestamp = time_from_nsecs(result["Timestamp"])
if "DbId" in result:
db_id = result["DbId"]
ret = WSMessage(
is_binary=result["IsBinary"],
message=base64.b64decode(result["Message"]),
to_server=result["ToServer"],
timestamp=timestamp,
db_id=db_id,
storage_id=storage,
)
if "Unmangled" in result:
ret.unmangled = decode_ws(result["Unmangled"], storage=storage)
return ret
def encode_req(req, int_rsp=False):
msg = {
"DestHost": req.dest_host,
"DestPort": req.dest_port,
"UseTLS": req.use_tls,
"Method": req.method,
"Path": req.url.geturl(),
"ProtoMajor": req.proto_major,
"ProtoMinor": req.proto_major,
"Headers": req.headers.dict(),
"Tags": list(req.tags),
"Body": base64.b64encode(copy.copy(req.body)).decode(),
}
if not int_rsp:
msg["StartTime"] = time_to_nsecs(req.time_start)
msg["EndTime"] = time_to_nsecs(req.time_end)
if req.unmangled is not None:
msg["Unmangled"] = encode_req(req.unmangled)
if req.response is not None:
msg["Response"] = encode_rsp(req.response)
msg["WSMessages"] = []
for wsm in req.ws_messages:
msg["WSMessages"].append(encode_ws(wsm))
return msg
def encode_rsp(rsp, int_rsp=False):
msg = {
"ProtoMajor": rsp.proto_major,
"ProtoMinor": rsp.proto_minor,
"StatusCode": rsp.status_code,
"Reason": rsp.reason,
"Headers": rsp.headers.dict(),
"Body": base64.b64encode(copy.copy(rsp.body)).decode(),
}
if not int_rsp:
if rsp.unmangled is not None:
msg["Unmangled"] = encode_rsp(rsp.unmangled)
return msg
def encode_ws(ws, int_rsp=False):
msg = {
"Message": base64.b64encode(ws.message).decode(),
"IsBinary": ws.is_binary,
"toServer": ws.to_server,
}
if not int_rsp:
if ws.unmangled is not None:
msg["Unmangled"] = encode_ws(ws.unmangled)
msg["Timestamp"] = time_to_nsecs(ws.timestamp)
msg["DbId"] = ws.db_id
return msg
def time_from_nsecs(nsecs):
secs = nsecs / 1000000000
t = datetime.datetime.utcfromtimestamp(secs)
return t
def time_to_nsecs(t):
if t is None:
return None
secs = (t - datetime.datetime(1970, 1, 1)).total_seconds()
return int(math.floor(secs * 1000000000))
RequestStatusLine = namedtuple("RequestStatusLine", ["method", "path", "proto_major", "proto_minor"])
ResponseStatusLine = namedtuple("ResponseStatusLine", ["proto_major", "proto_minor", "status_code", "reason"])
def parse_req_sline(sline):
if len(sline.split(b' ')) == 3:
verb, path, version = sline.split(b' ')
elif len(sline.split(b' ')) == 2:
verb, version = sline.split(b' ')
path = b''
else:
raise Exception("malformed statusline")
raw_version = version[5:] # strip HTTP/
pmajor, pminor = raw_version.split(b'.', 1)
return RequestStatusLine(verb.decode(), path.decode(), int(pmajor), int(pminor))
def parse_rsp_sline(sline):
if len(sline.split(b' ')) > 2:
version, status_code, reason = sline.split(b' ', 2)
else:
version, status_code = sline.split(b' ', 1)
reason = ''
raw_version = version[5:] # strip HTTP/
pmajor, pminor = raw_version.split(b'.', 1)
return ResponseStatusLine(int(pmajor), int(pminor), int(status_code), reason.decode())
def _parse_message(bs, sline_parser):
header_env, body = re.split(br"(?:\r\n|\n)(?:\r\n|\n)", bs, 1)
status_line, header_bytes = re.split(b"\r?\n", header_env, 1)
h = Headers()
for l in re.split(br"\r?\n", header_bytes):
k, v = l.split(b": ", 1)
if k.lower != 'content-length':
h.add(k.decode(), v.decode())
h.add("Content-Length", str(len(body)))
return (sline_parser(status_line), h, body)
def parse_request(bs, dest_host='', dest_port=80, use_tls=False):
req_sline, headers, body = _parse_message(bs, parse_req_sline)
req = HTTPRequest(
method=req_sline.method,
path=req_sline.path,
proto_major=req_sline.proto_major,
proto_minor=req_sline.proto_minor,
headers=headers.dict(),
body=body,
dest_host=dest_host,
dest_port=dest_port,
use_tls=use_tls)
return req
def parse_response(bs):
rsp_sline, headers, body = _parse_message(bs, parse_rsp_sline)
rsp = HTTPResponse(
status_code=rsp_sline.status_code,
reason=rsp_sline.reason,
proto_major=rsp_sline.proto_major,
proto_minor=rsp_sline.proto_minor,
headers=headers.dict(),
body=body)
return rsp
def get_full_url(req):
netloc = req.dest_host
if req.use_tls:
scheme = "https"
if req.dest_port != 443:
netloc = "%s:%d" % (req.dest_host, req.dest_port)
else:
scheme = "http"
if req.dest_port != 80:
netloc = "%s:%d" % (req.dest_host, req.dest_port)
rpath = req.url
u = URL("")
u.scheme = scheme
u.netloc = netloc
u.path = rpath.path
u.params = rpath.params
u.query = rpath.query
u.fragment = rpath.fragment
return u.geturl()
|
connection.py | import argparse
import threading
import socket
import queue
import enum
DEFAULT_LOCAL_IP_ADDRESS = '0.0.0.0'
DEFAULT_LOCAL_PORT_NUMBER = 8888
DEFAULT_REMOTE_IP_ADDRESS = 'localhost'
DEFAULT_REMOTE_PORT_NUMBER = 9999
DEFAULT_BUFFER_SIZE = 1024
class LocalStatus(enum.Enum):
SERVER_INITIALIZED = 'Local socket initialized.'
SERVER_LISTENING = 'Local socket listening.'
SERVER_SHUTDOWN = 'Local socket shutdown.'
class RemoteStatus(enum.Enum):
HANDSHAKE_INITIALIZED = 'Handshake initialized.'
HANDSHAKE_SUCCESSFUL = 'Handshake successful.'
class Connection:
def __init__(self, local_ip_address, local_port_number, remote_ip_address,
remote_port_number, buffer_size):
self.local_socket = None
self.local_ip_address = local_ip_address
self.local_port_number = local_port_number
self.remote_ip_address = remote_ip_address
self.remote_port_number = remote_port_number
self.buffer_size = buffer_size
self.local_queue = queue.Queue()
self.local_status = None
self.local_thread = threading.Thread(name='localThread',
target=self.open_local_socket)
self.local_thread.daemon = True
self.local_thread.start()
self.remote_status = None
self.handshake_thread = threading.Thread(name='handshakeThread',
target=self.initiate_handshake)
self.handshake_thread.daemon = True
self.handshake_thread.start()
def initiate_handshake(self):
self.remote_status = RemoteStatus.HANDSHAKE_INITIALIZED
print (self.remote_status)
while True:
remote_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
remote_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
remote_socket.connect((self.remote_ip_address, self.remote_port_number))
remote_socket.send('SYN\n')
if remote_socket.recv(self.buffer_size) == 'ACK\n':
remote_socket.send('SYN-ACK\n')
remote_socket.shutdown(socket.SHUT_WR)
remote_socket.close()
self.remote_status = RemoteStatus.HANDSHAKE_SUCCESSFUL
print (self.remote_status)
else:
pass
break
except socket.error:
continue
def send(self, message):
remote_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
remote_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
remote_socket.connect((self.remote_ip_address, self.remote_port_number))
print ('Remote socket sent:'), message
remote_socket.send(message + '\n')
remote_socket.shutdown(socket.SHUT_WR)
remote_socket.close()
except socket.error:
pass
def get_message(self):
if not self.local_queue.empty():
return self.local_queue.get()
else:
return None
def open_local_socket(self):
self.local_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.local_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.local_socket.bind((self.local_ip_address, self.local_port_number))
self.local_socket.listen(1)
self.local_status = LocalStatus.SERVER_LISTENING
print (self.local_status)
while True:
try:
connection, address = self.local_socket.accept()
message = connection.recv(self.buffer_size)
if message == 'SYN\n':
connection.send('ACK\n')
else:
print ('Local socket received:'), message.rstrip()
self.local_queue.put(message)
except socket.error:
break
def close_server_socket(self):
try:
self.local_socket.shutdown(socket.SHUT_RD)
self.local_socket.close()
self.local_status = LocalStatus.SERVER_SHUTDOWN
print (self.local_status)
except socket.error:
pass
self.local_thread.stop = True
self.handshake_thread.stop = True
def main(local_ip_address, local_port_number,
remote_ip_address, remote_port_number,
buffer_size):
return Connection(local_ip_address, local_port_number,
remote_ip_address, remote_port_number,
buffer_size)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-lip', '--localIPAddress', help='local IP address',
required=False, default=DEFAULT_LOCAL_IP_ADDRESS)
parser.add_argument('-lpn', '--localPortNumber', help='local port number',
required=False, type=int, default=str(DEFAULT_LOCAL_PORT_NUMBER))
parser.add_argument('-rip', '--remoteIPAddress', help='remote IP address',
required=False, default=DEFAULT_REMOTE_IP_ADDRESS)
parser.add_argument('-rpn', '--remotePortNumber', help='remote port number',
required=False, type=int, default=str(DEFAULT_REMOTE_PORT_NUMBER))
parser.add_argument('-bs', '--buffer_size', help='buffer size',
required=False, type=int, default=str(DEFAULT_BUFFER_SIZE))
args = parser.parse_args()
print ('localIpAddress:'), args.localIPAddress
print ('localPortNumber:'), args.localPortNumber
print ('localIpAddress:'), args.remoteIPAddress
print ('localPortNumber:'), args.remotePortNumber
print ('buffer_size:'), args.buffer_size
main(args.localIPAddress, args.localPortNumber,
args.remoteIPAddress, args.remotePortNumber,
args.buffer_size)
|
framereader.py | # pylint: skip-file
import json
import os
import pickle
import struct
import subprocess
import tempfile
import threading
from enum import IntEnum
from functools import wraps
import numpy as np
from lru import LRU
import _io
from tools.lib.cache import cache_path_for_file_path
from tools.lib.exceptions import DataUnreadableError
from common.file_helpers import atomic_write_in_dir
try:
from xx.chffr.lib.filereader import FileReader
except ImportError:
from tools.lib.filereader import FileReader
HEVC_SLICE_B = 0
HEVC_SLICE_P = 1
HEVC_SLICE_I = 2
class GOPReader:
def get_gop(self, num):
# returns (start_frame_num, num_frames, frames_to_skip, gop_data)
raise NotImplementedError
class DoNothingContextManager:
def __enter__(self):
return self
def __exit__(self, *x):
pass
class FrameType(IntEnum):
raw = 1
h265_stream = 2
def fingerprint_video(fn):
with FileReader(fn) as f:
header = f.read(4)
if len(header) == 0:
raise DataUnreadableError(f"{fn} is empty")
elif header == b"\x00\xc0\x12\x00":
return FrameType.raw
elif header == b"\x00\x00\x00\x01":
if 'hevc' in fn:
return FrameType.h265_stream
else:
raise NotImplementedError(fn)
else:
raise NotImplementedError(fn)
def ffprobe(fn, fmt=None):
cmd = ["ffprobe",
"-v", "quiet",
"-print_format", "json",
"-show_format", "-show_streams"]
if fmt:
cmd += ["-f", fmt]
cmd += [fn]
try:
ffprobe_output = subprocess.check_output(cmd)
except subprocess.CalledProcessError:
raise DataUnreadableError(fn)
return json.loads(ffprobe_output)
def vidindex(fn, typ):
vidindex_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "vidindex")
vidindex = os.path.join(vidindex_dir, "vidindex")
subprocess.check_call(["make"], cwd=vidindex_dir, stdout=open("/dev/null", "w"))
with tempfile.NamedTemporaryFile() as prefix_f, \
tempfile.NamedTemporaryFile() as index_f:
try:
subprocess.check_call([vidindex, typ, fn, prefix_f.name, index_f.name])
except subprocess.CalledProcessError:
raise DataUnreadableError(f"vidindex failed on file {fn}")
with open(index_f.name, "rb") as f:
index = f.read()
with open(prefix_f.name, "rb") as f:
prefix = f.read()
index = np.frombuffer(index, np.uint32).reshape(-1, 2)
assert index[-1, 0] == 0xFFFFFFFF
assert index[-1, 1] == os.path.getsize(fn)
return index, prefix
def cache_fn(func):
@wraps(func)
def cache_inner(fn, *args, **kwargs):
if kwargs.pop('no_cache', None):
cache_path = None
else:
cache_prefix = kwargs.pop('cache_prefix', None)
cache_path = cache_path_for_file_path(fn, cache_prefix)
if cache_path and os.path.exists(cache_path):
with open(cache_path, "rb") as cache_file:
cache_value = pickle.load(cache_file)
else:
cache_value = func(fn, *args, **kwargs)
if cache_path:
with atomic_write_in_dir(cache_path, mode="wb", overwrite=True) as cache_file:
pickle.dump(cache_value, cache_file, -1)
return cache_value
return cache_inner
@cache_fn
def index_stream(fn, typ):
assert typ in ("hevc", )
with FileReader(fn) as f:
assert os.path.exists(f.name), fn
index, prefix = vidindex(f.name, typ)
probe = ffprobe(f.name, typ)
return {
'index': index,
'global_prefix': prefix,
'probe': probe
}
def index_videos(camera_paths, cache_prefix=None):
"""Requires that paths in camera_paths are contiguous and of the same type."""
if len(camera_paths) < 1:
raise ValueError("must provide at least one video to index")
frame_type = fingerprint_video(camera_paths[0])
for fn in camera_paths:
index_video(fn, frame_type, cache_prefix)
def index_video(fn, frame_type=None, cache_prefix=None):
cache_path = cache_path_for_file_path(fn, cache_prefix)
if os.path.exists(cache_path):
return
if frame_type is None:
frame_type = fingerprint_video(fn[0])
if frame_type == FrameType.h265_stream:
index_stream(fn, "hevc", cache_prefix=cache_prefix)
else:
raise NotImplementedError("Only h265 supported")
def get_video_index(fn, frame_type, cache_prefix=None):
cache_path = cache_path_for_file_path(fn, cache_prefix)
if not os.path.exists(cache_path):
index_video(fn, frame_type, cache_prefix)
if not os.path.exists(cache_path):
return None
with open(cache_path, "rb") as cache_file:
return pickle.load(cache_file)
def read_file_check_size(f, sz, cookie):
buff = bytearray(sz)
bytes_read = f.readinto(buff)
assert bytes_read == sz, (bytes_read, sz)
return buff
def rgb24toyuv420(rgb):
yuv_from_rgb = np.array([[ 0.299 , 0.587 , 0.114 ],
[-0.14714119, -0.28886916, 0.43601035 ],
[ 0.61497538, -0.51496512, -0.10001026 ]])
img = np.dot(rgb.reshape(-1, 3), yuv_from_rgb.T).reshape(rgb.shape)
y_len = img.shape[0] * img.shape[1]
uv_len = y_len // 4
ys = img[:, :, 0]
us = (img[::2, ::2, 1] + img[1::2, ::2, 1] + img[::2, 1::2, 1] + img[1::2, 1::2, 1]) / 4 + 128
vs = (img[::2, ::2, 2] + img[1::2, ::2, 2] + img[::2, 1::2, 2] + img[1::2, 1::2, 2]) / 4 + 128
yuv420 = np.empty(y_len + 2 * uv_len, dtype=img.dtype)
yuv420[:y_len] = ys.reshape(-1)
yuv420[y_len:y_len + uv_len] = us.reshape(-1)
yuv420[y_len + uv_len:y_len + 2 * uv_len] = vs.reshape(-1)
return yuv420.clip(0, 255).astype('uint8')
def decompress_video_data(rawdat, vid_fmt, w, h, pix_fmt):
# using a tempfile is much faster than proc.communicate for some reason
with tempfile.TemporaryFile() as tmpf:
tmpf.write(rawdat)
tmpf.seek(0)
threads = os.getenv("FFMPEG_THREADS", "0")
cuda = os.getenv("FFMPEG_CUDA", "0") == "1"
proc = subprocess.Popen(
["ffmpeg",
"-threads", threads,
"-hwaccel", "none" if not cuda else "cuda",
"-c:v", "hevc",
"-vsync", "0",
"-f", vid_fmt,
"-flags2", "showall",
"-i", "pipe:0",
"-threads", threads,
"-f", "rawvideo",
"-pix_fmt", pix_fmt,
"pipe:1"],
stdin=tmpf, stdout=subprocess.PIPE, stderr=open("/dev/null"))
# dat = proc.communicate()[0]
dat = proc.stdout.read()
if proc.wait() != 0:
raise DataUnreadableError("ffmpeg failed")
if pix_fmt == "rgb24":
ret = np.frombuffer(dat, dtype=np.uint8).reshape(-1, h, w, 3)
elif pix_fmt == "yuv420p":
ret = np.frombuffer(dat, dtype=np.uint8).reshape(-1, (h*w*3//2))
elif pix_fmt == "yuv444p":
ret = np.frombuffer(dat, dtype=np.uint8).reshape(-1, 3, h, w)
else:
raise NotImplementedError
return ret
class BaseFrameReader:
# properties: frame_type, frame_count, w, h
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def close(self):
pass
def get(self, num, count=1, pix_fmt="yuv420p"):
raise NotImplementedError
def FrameReader(fn, cache_prefix=None, readahead=False, readbehind=False, index_data=None):
frame_type = fingerprint_video(fn)
if frame_type == FrameType.raw:
return RawFrameReader(fn)
elif frame_type in (FrameType.h265_stream,):
if not index_data:
index_data = get_video_index(fn, frame_type, cache_prefix)
return StreamFrameReader(fn, frame_type, index_data, readahead=readahead, readbehind=readbehind)
else:
raise NotImplementedError(frame_type)
class RawData:
def __init__(self, f):
self.f = _io.FileIO(f, 'rb')
self.lenn = struct.unpack("I", self.f.read(4))[0]
self.count = os.path.getsize(f) / (self.lenn+4)
def read(self, i):
self.f.seek((self.lenn+4)*i + 4)
return self.f.read(self.lenn)
class RawFrameReader(BaseFrameReader):
def __init__(self, fn):
# raw camera
self.fn = fn
self.frame_type = FrameType.raw
self.rawfile = RawData(self.fn)
self.frame_count = self.rawfile.count
self.w, self.h = 640, 480
def load_and_debayer(self, img):
img = np.frombuffer(img, dtype='uint8').reshape(960, 1280)
cimg = np.dstack([img[0::2, 1::2], ((img[0::2, 0::2].astype("uint16") + img[1::2, 1::2].astype("uint16")) >> 1).astype("uint8"), img[1::2, 0::2]])
return cimg
def get(self, num, count=1, pix_fmt="yuv420p"):
assert self.frame_count is not None
assert num+count <= self.frame_count
if pix_fmt not in ("yuv420p", "rgb24"):
raise ValueError(f"Unsupported pixel format {pix_fmt!r}")
app = []
for i in range(num, num+count):
dat = self.rawfile.read(i)
rgb_dat = self.load_and_debayer(dat)
if pix_fmt == "rgb24":
app.append(rgb_dat)
elif pix_fmt == "yuv420p":
app.append(rgb24toyuv420(rgb_dat))
else:
raise NotImplementedError
return app
class VideoStreamDecompressor:
def __init__(self, fn, vid_fmt, w, h, pix_fmt):
self.fn = fn
self.vid_fmt = vid_fmt
self.w = w
self.h = h
self.pix_fmt = pix_fmt
if pix_fmt == "yuv420p":
self.out_size = w*h*3//2 # yuv420p
elif pix_fmt in ("rgb24", "yuv444p"):
self.out_size = w*h*3
else:
raise NotImplementedError
self.proc = None
self.t = threading.Thread(target=self.write_thread)
self.t.daemon = True
def write_thread(self):
try:
with FileReader(self.fn) as f:
while True:
r = f.read(1024*1024)
if len(r) == 0:
break
self.proc.stdin.write(r)
finally:
self.proc.stdin.close()
def read(self):
threads = os.getenv("FFMPEG_THREADS", "0")
cuda = os.getenv("FFMPEG_CUDA", "0") == "1"
cmd = [
"ffmpeg",
"-threads", threads,
"-hwaccel", "none" if not cuda else "cuda",
"-c:v", "hevc",
# "-avioflags", "direct",
"-analyzeduration", "0",
"-probesize", "32",
"-flush_packets", "0",
# "-fflags", "nobuffer",
"-vsync", "0",
"-f", self.vid_fmt,
"-i", "pipe:0",
"-threads", threads,
"-f", "rawvideo",
"-pix_fmt", self.pix_fmt,
"pipe:1"
]
self.proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
try:
self.t.start()
while True:
dat = self.proc.stdout.read(self.out_size)
if len(dat) == 0:
break
assert len(dat) == self.out_size
if self.pix_fmt == "rgb24":
ret = np.frombuffer(dat, dtype=np.uint8).reshape((self.h, self.w, 3))
elif self.pix_fmt == "yuv420p":
ret = np.frombuffer(dat, dtype=np.uint8)
elif self.pix_fmt == "yuv444p":
ret = np.frombuffer(dat, dtype=np.uint8).reshape((3, self.h, self.w))
else:
assert False
yield ret
result_code = self.proc.wait()
assert result_code == 0, result_code
finally:
self.proc.kill()
self.t.join()
class StreamGOPReader(GOPReader):
def __init__(self, fn, frame_type, index_data):
assert frame_type == FrameType.h265_stream
self.fn = fn
self.frame_type = frame_type
self.frame_count = None
self.w, self.h = None, None
self.prefix = None
self.index = None
self.index = index_data['index']
self.prefix = index_data['global_prefix']
probe = index_data['probe']
self.prefix_frame_data = None
self.num_prefix_frames = 0
self.vid_fmt = "hevc"
i = 0
while i < self.index.shape[0] and self.index[i, 0] != HEVC_SLICE_I:
i += 1
self.first_iframe = i
assert self.first_iframe == 0
self.frame_count = len(self.index) - 1
self.w = probe['streams'][0]['width']
self.h = probe['streams'][0]['height']
def _lookup_gop(self, num):
frame_b = num
while frame_b > 0 and self.index[frame_b, 0] != HEVC_SLICE_I:
frame_b -= 1
frame_e = num + 1
while frame_e < (len(self.index) - 1) and self.index[frame_e, 0] != HEVC_SLICE_I:
frame_e += 1
offset_b = self.index[frame_b, 1]
offset_e = self.index[frame_e, 1]
return (frame_b, frame_e, offset_b, offset_e)
def get_gop(self, num):
frame_b, frame_e, offset_b, offset_e = self._lookup_gop(num)
assert frame_b <= num < frame_e
num_frames = frame_e - frame_b
with FileReader(self.fn) as f:
f.seek(offset_b)
rawdat = f.read(offset_e - offset_b)
if num < self.first_iframe:
assert self.prefix_frame_data
rawdat = self.prefix_frame_data + rawdat
rawdat = self.prefix + rawdat
skip_frames = 0
if num < self.first_iframe:
skip_frames = self.num_prefix_frames
return frame_b, num_frames, skip_frames, rawdat
class GOPFrameReader(BaseFrameReader):
#FrameReader with caching and readahead for formats that are group-of-picture based
def __init__(self, readahead=False, readbehind=False):
self.open_ = True
self.readahead = readahead
self.readbehind = readbehind
self.frame_cache = LRU(64)
if self.readahead:
self.cache_lock = threading.RLock()
self.readahead_last = None
self.readahead_len = 30
self.readahead_c = threading.Condition()
self.readahead_thread = threading.Thread(target=self._readahead_thread)
self.readahead_thread.daemon = True
self.readahead_thread.start()
else:
self.cache_lock = DoNothingContextManager()
def close(self):
if not self.open_:
return
self.open_ = False
if self.readahead:
self.readahead_c.acquire()
self.readahead_c.notify()
self.readahead_c.release()
self.readahead_thread.join()
def _readahead_thread(self):
while True:
self.readahead_c.acquire()
try:
if not self.open_:
break
self.readahead_c.wait()
finally:
self.readahead_c.release()
if not self.open_:
break
assert self.readahead_last
num, pix_fmt = self.readahead_last
if self.readbehind:
for k in range(num - 1, max(0, num - self.readahead_len), -1):
self._get_one(k, pix_fmt)
else:
for k in range(num, min(self.frame_count, num + self.readahead_len)):
self._get_one(k, pix_fmt)
def _get_one(self, num, pix_fmt):
assert num < self.frame_count
if (num, pix_fmt) in self.frame_cache:
return self.frame_cache[(num, pix_fmt)]
with self.cache_lock:
if (num, pix_fmt) in self.frame_cache:
return self.frame_cache[(num, pix_fmt)]
frame_b, num_frames, skip_frames, rawdat = self.get_gop(num)
ret = decompress_video_data(rawdat, self.vid_fmt, self.w, self.h, pix_fmt)
ret = ret[skip_frames:]
assert ret.shape[0] == num_frames
for i in range(ret.shape[0]):
self.frame_cache[(frame_b+i, pix_fmt)] = ret[i]
return self.frame_cache[(num, pix_fmt)]
def get(self, num, count=1, pix_fmt="yuv420p"):
assert self.frame_count is not None
if num + count > self.frame_count:
raise ValueError(f"{num + count} > {self.frame_count}")
if pix_fmt not in ("yuv420p", "rgb24", "yuv444p"):
raise ValueError(f"Unsupported pixel format {pix_fmt!r}")
ret = [self._get_one(num + i, pix_fmt) for i in range(count)]
if self.readahead:
self.readahead_last = (num+count, pix_fmt)
self.readahead_c.acquire()
self.readahead_c.notify()
self.readahead_c.release()
return ret
class StreamFrameReader(StreamGOPReader, GOPFrameReader):
def __init__(self, fn, frame_type, index_data, readahead=False, readbehind=False):
StreamGOPReader.__init__(self, fn, frame_type, index_data)
GOPFrameReader.__init__(self, readahead, readbehind)
def GOPFrameIterator(gop_reader, pix_fmt):
dec = VideoStreamDecompressor(gop_reader.fn, gop_reader.vid_fmt, gop_reader.w, gop_reader.h, pix_fmt)
for frame in dec.read():
yield frame
def FrameIterator(fn, pix_fmt, **kwargs):
fr = FrameReader(fn, **kwargs)
if isinstance(fr, GOPReader):
for v in GOPFrameIterator(fr, pix_fmt):
yield v
else:
for i in range(fr.frame_count):
yield fr.get(i, pix_fmt=pix_fmt)[0]
|
camera.py | import configparser
import logging
import math
import os
import pathlib
import threading
import time
import glob
from contextlib import contextmanager
from functools import wraps
from io import BytesIO
from pathlib import Path
from queue import Queue
from typing import List
import cv2
from PIL import Image, _webp
from telegram import Message
from configuration import ConfigWrapper
from klippy import Klippy
from power_device import PowerDevice
logger = logging.getLogger(__name__)
def cam_light_toggle(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
self.use_light()
if self.light_timeout > 0 and self.light_device and not self.light_device.device_state and not self.light_lock.locked():
self.light_timer_event.clear()
self.light_lock.acquire()
self.light_need_off = True
self.light_device.switch_device(True)
time.sleep(self.light_timeout)
self.light_timer_event.set()
self.light_timer_event.wait()
# Todo: maybe add try block?
result = func(self, *args, **kwargs)
self.free_light()
def delayed_light_off():
if self.light_requests == 0:
if self.light_lock.locked():
self.light_lock.release()
self.light_need_off = False
self.light_device.switch_device(False)
else:
logger.debug(f"light requests count: {self.light_requests}")
if self.light_need_off and self.light_requests == 0:
threading.Timer(self.light_timeout, delayed_light_off).start()
return result
return wrapper
class Camera:
def __init__(self, config: ConfigWrapper, klippy: Klippy, light_device: PowerDevice, logging_handler: logging.Handler = None):
self.enabled: bool = True if config.camera.enabled and config.camera.host else False
self._host = int(config.camera.host) if str.isdigit(config.camera.host) else config.camera.host
self._threads: int = config.camera.threads
self._flip_vertically: bool = config.camera.flip_vertically
self._flip_horizontally: bool = config.camera.flip_horizontally
self._fourcc: str = config.camera.fourcc
self._video_duration: int = config.camera.video_duration
self._video_buffer_size: int = config.camera.video_buffer_size
self._stream_fps: int = config.camera.stream_fps
self._klippy: Klippy = klippy
# Todo: refactor into timelapse class
self._base_dir: str = config.timelapse.base_dir
self._ready_dir: str = config.timelapse.ready_dir
self._cleanup: bool = config.timelapse.cleanup
self._target_fps: int = 15
self._min_lapse_duration: int = 0
self._max_lapse_duration: int = 0
self._last_frame_duration: int = 5
self._light_need_off: bool = False
self._light_need_off_lock = threading.Lock()
self.light_timeout: int = config.camera.light_timeout
self.light_device: PowerDevice = light_device
self._camera_lock = threading.Lock()
self.light_lock = threading.Lock()
self.light_timer_event = threading.Event()
self.light_timer_event.set()
self._hw_accel: bool = False
if config.camera.picture_quality == 'low':
self._img_extension: str = 'jpeg'
elif config.camera.picture_quality == 'high':
self._img_extension: str = 'webp'
else:
self._img_extension: str = config.camera.picture_quality
self._light_requests: int = 0
self._light_request_lock = threading.Lock()
if self._flip_vertically and self._flip_horizontally:
self._flip = -1
elif self._flip_horizontally:
self._flip = 1
elif self._flip_vertically:
self._flip = 0
if config.camera.rotate == '90_cw':
self._rotate_code: int = cv2.ROTATE_90_CLOCKWISE
elif config.camera.rotate == '90_ccw':
self._rotate_code: int = cv2.ROTATE_90_COUNTERCLOCKWISE
elif config.camera.rotate == '180':
self._rotate_code: int = cv2.ROTATE_180
else:
self._rotate_code: int = -10
if logging_handler:
logger.addHandler(logging_handler)
if config.bot.debug:
logger.setLevel(logging.DEBUG)
logger.debug(cv2.getBuildInformation())
os.environ["OPENCV_VIDEOIO_DEBUG"] = "1"
# Fixme: deprecated! use T-API https://learnopencv.com/opencv-transparent-api/
if cv2.ocl.haveOpenCL():
logger.debug('OpenCL is available')
cv2.ocl.setUseOpenCL(True)
logger.debug(f'OpenCL in OpenCV is enabled: {cv2.ocl.useOpenCL()}')
cv2.setNumThreads(self._threads)
self.cam_cam = cv2.VideoCapture()
self.cam_cam.set(cv2.CAP_PROP_BUFFERSIZE, 1)
@property
def light_need_off(self) -> bool:
with self._light_need_off_lock:
return self._light_need_off
@light_need_off.setter
def light_need_off(self, new_value: bool):
with self._light_need_off_lock:
self._light_need_off = new_value
@property
def lapse_dir(self) -> str:
return f'{self._base_dir}/{self._klippy.printing_filename_with_time}'
@property
def light_requests(self) -> int:
with self._light_request_lock:
return self._light_requests
def use_light(self):
with self._light_request_lock:
self._light_requests += 1
def free_light(self):
with self._light_request_lock:
self._light_requests -= 1
@property
def target_fps(self) -> int:
return self._target_fps
@target_fps.setter
def target_fps(self, new_value: int):
self._target_fps = new_value
@property
def min_lapse_duration(self) -> int:
return self._min_lapse_duration
@min_lapse_duration.setter
def min_lapse_duration(self, new_value: int):
if new_value >= 0:
self._min_lapse_duration = new_value
@property
def max_lapse_duration(self) -> int:
return self._max_lapse_duration
@max_lapse_duration.setter
def max_lapse_duration(self, new_value: int):
if new_value >= 0:
self._max_lapse_duration = new_value
@property
def last_frame_duration(self) -> int:
return self._last_frame_duration
@last_frame_duration.setter
def last_frame_duration(self, new_value: int):
if new_value >= 0:
self._last_frame_duration = new_value
@staticmethod
def _create_thumb(image) -> BytesIO:
# cv2.cvtColor cause segfaults!
img = Image.fromarray(image[:, :, [2, 1, 0]])
bio = BytesIO()
bio.name = 'thumbnail.jpeg'
img.thumbnail((320, 320))
img.save(bio, 'JPEG', quality=100, optimize=True)
bio.seek(0)
img.close()
del img
return bio
@cam_light_toggle
def take_photo(self) -> BytesIO:
with self._camera_lock:
self.cam_cam.open(self._host)
self.cam_cam.set(cv2.CAP_PROP_BUFFERSIZE, 1)
success, image = self.cam_cam.read()
self.cam_cam.release()
if not success:
logger.debug("failed to get camera frame for photo")
# Todo: resize to cam resolution!
img = Image.open('../imgs/nosignal.png')
else:
if self._hw_accel:
image_um = cv2.UMat(image)
if self._flip_vertically or self._flip_horizontally:
image_um = cv2.flip(image_um, self._flip)
img = Image.fromarray(cv2.UMat.get(cv2.cvtColor(image_um, cv2.COLOR_BGR2RGB)))
image_um = None
del image_um
else:
if self._flip_vertically or self._flip_horizontally:
image = cv2.flip(image, self._flip)
# Todo: check memory leaks
if self._rotate_code > -10:
image = cv2.rotate(image, rotateCode=self._rotate_code)
# # cv2.cvtColor cause segfaults!
# rgb = image[:, :, ::-1]
rgb = image[:, :, [2, 1, 0]]
img = Image.fromarray(rgb)
rgb = None
del rgb
image = None
del image, success
bio = BytesIO()
bio.name = f'status.{self._img_extension}'
if self._img_extension in ['jpg', 'jpeg']:
img.save(bio, 'JPEG', quality=80, subsampling=0)
elif self._img_extension == 'webp':
# https://github.com/python-pillow/Pillow/issues/4364
_webp.HAVE_WEBPANIM = False
img.save(bio, 'WebP', quality=0, lossless=True)
elif self._img_extension == 'png':
img.save(bio, 'PNG')
bio.seek(0)
img.close()
del img
return bio
@contextmanager
def take_video_generator(self):
(video_bio, thumb_bio, width, height) = self.take_video()
try:
yield video_bio, thumb_bio, width, height
finally:
video_bio.close()
thumb_bio.close()
@cam_light_toggle
def take_video(self) -> (BytesIO, BytesIO, int, int):
def process_video_frame(frame_local):
if self._flip_vertically or self._flip_horizontally:
if self._hw_accel:
frame_loc_ = cv2.UMat(frame_local)
frame_loc_ = cv2.flip(frame_loc_, self._flip)
frame_local = cv2.UMat.get(frame_loc_)
del frame_loc_
else:
frame_local = cv2.flip(frame_local, self._flip)
# Todo: check memory leaks
if self._rotate_code > -10:
frame_local = cv2.rotate(frame_local, rotateCode=self._rotate_code)
return frame_local
def write_video():
cv2.setNumThreads(self._threads)
out = cv2.VideoWriter(filepath, fourcc=cv2.VideoWriter_fourcc(*self._fourcc), fps=fps_cam, frameSize=(width, height))
while video_lock.locked():
try:
frame_local = frame_queue.get(block=False)
except Exception as ex:
logger.warning(f'Reading video frames queue exception {ex.with_traceback}')
frame_local = frame_queue.get()
out.write(process_video_frame(frame_local))
# frame_local = None
# del frame_local
while not frame_queue.empty():
frame_local = frame_queue.get()
out.write(process_video_frame(frame_local))
# frame_local = None
# del frame_local
out.release()
video_written_event.set()
with self._camera_lock:
cv2.setNumThreads(self._threads) # TOdo: check self set and remove!
self.cam_cam.open(self._host)
self.cam_cam.set(cv2.CAP_PROP_BUFFERSIZE, 1)
success, frame = self.cam_cam.read()
if not success:
logger.debug("failed to get camera frame for video")
# Todo: get picture from imgs?
frame = process_video_frame(frame)
height, width, channels = frame.shape
thumb_bio = self._create_thumb(frame)
del frame, channels
fps_cam = self.cam_cam.get(cv2.CAP_PROP_FPS) if self._stream_fps == 0 else self._stream_fps
filepath = os.path.join('/tmp/', 'video.mp4')
frame_queue = Queue(fps_cam * self._video_buffer_size)
video_lock = threading.Lock()
video_written_event = threading.Event()
video_written_event.clear()
video_lock.acquire()
threading.Thread(target=write_video, args=()).start()
t_end = time.time() + self._video_duration
while success and time.time() <= t_end:
success, frame_loc = self.cam_cam.read()
try:
frame_queue.put(frame_loc, block=False)
except Exception as ex:
logger.warning(f'Writing video frames queue exception {ex.with_traceback}')
frame_queue.put(frame_loc)
# frame_loc = None
# del frame_loc
video_lock.release()
video_written_event.wait()
self.cam_cam.release()
video_bio = BytesIO()
video_bio.name = 'video.mp4'
with open(filepath, 'rb') as fh:
video_bio.write(fh.read())
os.remove(filepath)
video_bio.seek(0)
return video_bio, thumb_bio, width, height
def take_lapse_photo(self) -> None:
# Todo: check for space available?
Path(self.lapse_dir).mkdir(parents=True, exist_ok=True)
# never add self in params there!
with self.take_photo() as photo:
filename = f'{self.lapse_dir}/{time.time()}.{self._img_extension}'
with open(filename, "wb") as outfile:
outfile.write(photo.getvalue())
photo.close()
def create_timelapse(self, printing_filename: str, gcode_name: str, info_mess: Message) -> (BytesIO, BytesIO, int, int, str, str):
return self._create_timelapse(printing_filename, gcode_name, info_mess)
def create_timelapse_for_file(self, filename: str, info_mess: Message) -> (BytesIO, BytesIO, int, int, str, str):
return self._create_timelapse(filename, filename, info_mess)
def _calculate_fps(self, frames_count: int) -> int:
actual_duration = frames_count / self._target_fps
# Todo: check _max_lapse_duration > _min_lapse_duration
if (self._min_lapse_duration == 0 and self._max_lapse_duration == 0) or (self._min_lapse_duration <= actual_duration <= self._max_lapse_duration and self._max_lapse_duration > 0) or (
actual_duration > self._min_lapse_duration and self._max_lapse_duration == 0):
return self._target_fps
elif actual_duration < self._min_lapse_duration and self._min_lapse_duration > 0:
fps = math.ceil(frames_count / self._min_lapse_duration)
return fps if fps >= 1 else 1
elif actual_duration > self._max_lapse_duration > 0:
return math.ceil(frames_count / self._max_lapse_duration)
else:
logger.error(f"Unknown fps calculation state for durations min:{self._min_lapse_duration} and max:{self._max_lapse_duration} and actual:{actual_duration}")
return self._target_fps
def _create_timelapse(self, printing_filename: str, gcode_name: str, info_mess: Message) -> (BytesIO, BytesIO, int, int, str, str):
if not printing_filename:
raise ValueError(f'Gcode file name is empty')
while self.light_need_off:
time.sleep(1)
lapse_dir = f'{self._base_dir}/{printing_filename}'
if not Path(f'{lapse_dir}/lapse.lock').is_file():
open(f'{lapse_dir}/lapse.lock', mode='a').close()
# Todo: check for nonempty photos!
photos = glob.glob(f'{glob.escape(lapse_dir)}/*.{self._img_extension}')
photos.sort(key=os.path.getmtime)
photo_count = len(photos)
if photo_count == 0:
raise ValueError(f"Empty photos list for {printing_filename} in lapse path {lapse_dir}")
info_mess.edit_text(text=f"Creating thumbnail")
last_photo = photos[-1]
img = cv2.imread(last_photo)
height, width, layers = img.shape
thumb_bio = self._create_thumb(img)
video_filepath = f'{lapse_dir}/lapse.mp4'
if Path(video_filepath).is_file():
os.remove(video_filepath)
lapse_fps = self._calculate_fps(photo_count)
with self._camera_lock:
cv2.setNumThreads(self._threads) # TOdo: check self set and remove!
out = cv2.VideoWriter(video_filepath, fourcc=cv2.VideoWriter_fourcc(*self._fourcc), fps=lapse_fps, frameSize=(width, height))
info_mess.edit_text(text=f"Images recoding")
last_update_time = time.time()
for fnum, filename in enumerate(photos):
if time.time() >= last_update_time + 3:
info_mess.edit_text(text=f"Images recoded {fnum}/{photo_count}")
last_update_time = time.time()
out.write(cv2.imread(filename))
info_mess.edit_text(text=f"Repeating last image for {self._last_frame_duration} seconds")
for _ in range(lapse_fps * self._last_frame_duration):
out.write(img)
out.release()
cv2.destroyAllWindows()
del out
del photos, img, layers
# Todo: some error handling?
video_bio = BytesIO()
video_bio.name = f'{printing_filename}.mp4'
target_video_file = f'{self._ready_dir}/{printing_filename}.mp4'
with open(video_filepath, 'rb') as fh:
video_bio.write(fh.read())
if self._ready_dir and os.path.isdir(self._ready_dir):
info_mess.edit_text(text=f"Copy lapse to target ditectory")
Path(target_video_file).parent.mkdir(parents=True, exist_ok=True)
with open(f"{target_video_file}", 'wb') as cpf:
cpf.write(video_bio.getvalue())
video_bio.seek(0)
os.remove(f'{lapse_dir}/lapse.lock')
if self._cleanup:
info_mess.edit_text(text=f"Performing cleanups")
for filename in glob.glob(f'{glob.escape(lapse_dir)}/*.{self._img_extension}'):
os.remove(filename)
if video_bio.getbuffer().nbytes < 52428800:
for filename in glob.glob(f'{glob.escape(lapse_dir)}/*'):
os.remove(filename)
Path(lapse_dir).rmdir()
return video_bio, thumb_bio, width, height, video_filepath, gcode_name
def clean(self) -> None:
if self._cleanup and self._klippy.printing_filename and os.path.isdir(self.lapse_dir):
for filename in glob.glob(f'{glob.escape(self.lapse_dir)}/*'):
os.remove(filename)
# Todo: refactor into timelapse class
# Todo: check for 64 symbols length in lapse names
def detect_unfinished_lapses(self) -> List[str]:
# Todo: detect unstarted timelapse builds? folder with pics and no mp4 files
return list(map(lambda el: pathlib.PurePath(el).parent.name, glob.glob(f'{self._base_dir}/*/*.lock')))
|
test_socket.py | import unittest
from test import support
import errno
import io
import itertools
import socket
import select
import tempfile
import time
import traceback
import queue
import sys
import os
import array
import contextlib
from weakref import proxy
import signal
import math
import pickle
import struct
import random
import shutil
import string
import _thread as thread
import threading
try:
import multiprocessing
except ImportError:
multiprocessing = False
try:
import fcntl
except ImportError:
fcntl = None
HOST = support.HOST
# test unicode string and carriage return
MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf-8')
MAIN_TIMEOUT = 60.0
VSOCKPORT = 1234
try:
import _socket
except ImportError:
_socket = None
def get_cid():
if fcntl is None:
return None
try:
with open("/dev/vsock", "rb") as f:
r = fcntl.ioctl(f, socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID, " ")
except OSError:
return None
else:
return struct.unpack("I", r)[0]
def _have_socket_can():
"""Check whether CAN sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_can_isotp():
"""Check whether CAN ISOTP sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_rds():
"""Check whether RDS sockets are supported on this host."""
try:
s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_alg():
"""Check whether AF_ALG sockets are supported on this host."""
try:
s = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_qipcrtr():
"""Check whether AF_QIPCRTR sockets are supported on this host."""
try:
s = socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_vsock():
"""Check whether AF_VSOCK sockets are supported on this host."""
ret = get_cid() is not None
return ret
@contextlib.contextmanager
def socket_setdefaulttimeout(timeout):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(timeout)
yield
finally:
socket.setdefaulttimeout(old_timeout)
HAVE_SOCKET_CAN = _have_socket_can()
HAVE_SOCKET_CAN_ISOTP = _have_socket_can_isotp()
HAVE_SOCKET_RDS = _have_socket_rds()
HAVE_SOCKET_ALG = _have_socket_alg()
HAVE_SOCKET_QIPCRTR = _have_socket_qipcrtr()
HAVE_SOCKET_VSOCK = _have_socket_vsock()
# Size in bytes of the int type
SIZEOF_INT = array.array("i").itemsize
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class ThreadSafeCleanupTestCase(unittest.TestCase):
"""Subclass of unittest.TestCase with thread-safe cleanup methods.
This subclass protects the addCleanup() and doCleanups() methods
with a recursive lock.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cleanup_lock = threading.RLock()
def addCleanup(self, *args, **kwargs):
with self._cleanup_lock:
return super().addCleanup(*args, **kwargs)
def doCleanups(self, *args, **kwargs):
with self._cleanup_lock:
return super().doCleanups(*args, **kwargs)
class SocketCANTest(unittest.TestCase):
"""To be able to run this test, a `vcan0` CAN interface can be created with
the following commands:
# modprobe vcan
# ip link add dev vcan0 type vcan
# ifconfig vcan0 up
"""
interface = 'vcan0'
bufsize = 128
"""The CAN frame structure is defined in <linux/can.h>:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* data length code: 0 .. 8 */
__u8 data[8] __attribute__((aligned(8)));
};
"""
can_frame_fmt = "=IB3x8s"
can_frame_size = struct.calcsize(can_frame_fmt)
"""The Broadcast Management Command frame structure is defined
in <linux/can/bcm.h>:
struct bcm_msg_head {
__u32 opcode;
__u32 flags;
__u32 count;
struct timeval ival1, ival2;
canid_t can_id;
__u32 nframes;
struct can_frame frames[0];
}
`bcm_msg_head` must be 8 bytes aligned because of the `frames` member (see
`struct can_frame` definition). Must use native not standard types for packing.
"""
bcm_cmd_msg_fmt = "@3I4l2I"
bcm_cmd_msg_fmt += "x" * (struct.calcsize(bcm_cmd_msg_fmt) % 8)
def setUp(self):
self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.addCleanup(self.s.close)
try:
self.s.bind((self.interface,))
except OSError:
self.skipTest('network interface `%s` does not exist' %
self.interface)
class SocketRDSTest(unittest.TestCase):
"""To be able to run this test, the `rds` kernel module must be loaded:
# modprobe rds
"""
bufsize = 8192
def setUp(self):
self.serv = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
self.addCleanup(self.serv.close)
try:
self.port = support.bind_port(self.serv)
except OSError:
self.skipTest('unable to bind RDS socket')
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.wait_threads = support.wait_threads_exit()
self.wait_threads.__enter__()
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = queue.Queue(1)
self.server_crashed = False
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
try:
self.__setUp()
except:
self.server_crashed = True
raise
finally:
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
self.wait_threads.__exit__(None, None, None)
if self.queue.qsize():
exc = self.queue.get()
raise exc
def clientRun(self, test_func):
self.server_ready.wait()
try:
self.clientSetUp()
except BaseException as e:
self.queue.put(e)
self.clientTearDown()
return
finally:
self.client_ready.set()
if self.server_crashed:
self.clientTearDown()
return
if not hasattr(test_func, '__call__'):
raise TypeError("test_func must be a callable function")
try:
test_func()
except BaseException as e:
self.queue.put(e)
finally:
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedCANSocketTest(SocketCANTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketCANTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
try:
self.cli.bind((self.interface,))
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedRDSSocketTest(SocketRDSTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketRDSTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
try:
# RDS sockets must be bound explicitly to send or receive data
self.cli.bind((HOST, 0))
self.cli_addr = self.cli.getsockname()
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
@unittest.skipUnless(get_cid() != 2,
"This test can only be run on a virtual guest.")
class ThreadedVSOCKSocketStreamTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.serv.close)
self.serv.bind((socket.VMADDR_CID_ANY, VSOCKPORT))
self.serv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.serv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
time.sleep(0.1)
self.cli = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
cid = get_cid()
self.cli.connect((cid, VSOCKPORT))
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class SocketConnectedTest(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
# The following classes are used by the sendmsg()/recvmsg() tests.
# Combining, for instance, ConnectedStreamTestMixin and TCPTestBase
# gives a drop-in replacement for SocketConnectedTest, but different
# address families can be used, and the attributes serv_addr and
# cli_addr will be set to the addresses of the endpoints.
class SocketTestBase(unittest.TestCase):
"""A base class for socket tests.
Subclasses must provide methods newSocket() to return a new socket
and bindSock(sock) to bind it to an unused address.
Creates a socket self.serv and sets self.serv_addr to its address.
"""
def setUp(self):
self.serv = self.newSocket()
self.bindServer()
def bindServer(self):
"""Bind server socket and set self.serv_addr to its address."""
self.bindSock(self.serv)
self.serv_addr = self.serv.getsockname()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketListeningTestMixin(SocketTestBase):
"""Mixin to listen on the server socket."""
def setUp(self):
super().setUp()
self.serv.listen()
class ThreadedSocketTestMixin(ThreadSafeCleanupTestCase, SocketTestBase,
ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ConnectedStreamTestMixin(SocketListeningTestMixin,
ThreadedSocketTestMixin):
"""Mixin to allow client/server stream tests with connected client.
Server's socket representing connection to client is self.cli_conn
and client's connection to server is self.serv_conn. (Based on
SocketConnectedTest.)
"""
def setUp(self):
super().setUp()
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
super().tearDown()
def clientSetUp(self):
super().clientSetUp()
self.cli.connect(self.serv_addr)
self.serv_conn = self.cli
def clientTearDown(self):
try:
self.serv_conn.close()
self.serv_conn = None
except AttributeError:
pass
super().clientTearDown()
class UnixSocketTestBase(SocketTestBase):
"""Base class for Unix-domain socket tests."""
# This class is used for file descriptor passing tests, so we
# create the sockets in a private directory so that other users
# can't send anything that might be problematic for a privileged
# user running the tests.
def setUp(self):
self.dir_path = tempfile.mkdtemp()
self.addCleanup(os.rmdir, self.dir_path)
super().setUp()
def bindSock(self, sock):
path = tempfile.mktemp(dir=self.dir_path)
support.bind_unix_socket(sock, path)
self.addCleanup(support.unlink, path)
class UnixStreamBase(UnixSocketTestBase):
"""Base class for Unix-domain SOCK_STREAM tests."""
def newSocket(self):
return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
class InetTestBase(SocketTestBase):
"""Base class for IPv4 socket tests."""
host = HOST
def setUp(self):
super().setUp()
self.port = self.serv_addr[1]
def bindSock(self, sock):
support.bind_port(sock, host=self.host)
class TCPTestBase(InetTestBase):
"""Base class for TCP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class UDPTestBase(InetTestBase):
"""Base class for UDP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class SCTPStreamBase(InetTestBase):
"""Base class for SCTP tests in one-to-one (SOCK_STREAM) mode."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_SCTP)
class Inet6TestBase(InetTestBase):
"""Base class for IPv6 socket tests."""
host = support.HOSTv6
class UDP6TestBase(Inet6TestBase):
"""Base class for UDP-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
# Test-skipping decorators for use with ThreadableTest.
def skipWithClientIf(condition, reason):
"""Skip decorated test if condition is true, add client_skip decorator.
If the decorated object is not a class, sets its attribute
"client_skip" to a decorator which will return an empty function
if the test is to be skipped, or the original function if it is
not. This can be used to avoid running the client part of a
skipped test when using ThreadableTest.
"""
def client_pass(*args, **kwargs):
pass
def skipdec(obj):
retval = unittest.skip(reason)(obj)
if not isinstance(obj, type):
retval.client_skip = lambda f: client_pass
return retval
def noskipdec(obj):
if not (isinstance(obj, type) or hasattr(obj, "client_skip")):
obj.client_skip = lambda f: f
return obj
return skipdec if condition else noskipdec
def requireAttrs(obj, *attributes):
"""Skip decorated test if obj is missing any of the given attributes.
Sets client_skip attribute as skipWithClientIf() does.
"""
missing = [name for name in attributes if not hasattr(obj, name)]
return skipWithClientIf(
missing, "don't have " + ", ".join(name for name in missing))
def requireSocket(*args):
"""Skip decorated test if a socket cannot be created with given arguments.
When an argument is given as a string, will use the value of that
attribute of the socket module, or skip the test if it doesn't
exist. Sets client_skip attribute as skipWithClientIf() does.
"""
err = None
missing = [obj for obj in args if
isinstance(obj, str) and not hasattr(socket, obj)]
if missing:
err = "don't have " + ", ".join(name for name in missing)
else:
callargs = [getattr(socket, obj) if isinstance(obj, str) else obj
for obj in args]
try:
s = socket.socket(*callargs)
except OSError as e:
# XXX: check errno?
err = str(e)
else:
s.close()
return skipWithClientIf(
err is not None,
"can't create socket({0}): {1}".format(
", ".join(str(o) for o in args), err))
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s.close()
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doesn't mask failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in [support.HOSTv4, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test support.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [support.HOSTv4]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
def test_host_resolution_bad_address(self):
# These are all malformed IP addresses and expected not to resolve to
# any result. But some ISPs, e.g. AWS, may successfully resolve these
# IPs.
explanation = (
"resolving an invalid IP address did not raise OSError; "
"can be caused by a broken DNS server"
)
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
with self.assertRaises(OSError, msg=addr):
socket.gethostbyname(addr)
with self.assertRaises(OSError, msg=explanation):
socket.gethostbyaddr(addr)
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInvalidInterfaceNameIndex(self):
# test nonexistent interface index/name
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
# test with invalid values
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
@support.cpython_only
def testNtoHErrors(self):
import _testcapi
s_good_values = [0, 1, 2, 0xffff]
l_good_values = s_good_values + [0xffffffff]
l_bad_values = [-1, -2, 1<<32, 1<<1000]
s_bad_values = l_bad_values + [_testcapi.INT_MIN - 1,
_testcapi.INT_MAX + 1]
s_deprecated_values = [1<<16, _testcapi.INT_MAX]
for k in s_good_values:
socket.ntohs(k)
socket.htons(k)
for k in l_good_values:
socket.ntohl(k)
socket.htonl(k)
for k in s_bad_values:
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htons, k)
for k in l_bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.htonl, k)
for k in s_deprecated_values:
self.assertWarns(DeprecationWarning, socket.ntohs, k)
self.assertWarns(DeprecationWarning, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
# Issue #26936: Android getservbyname() was broken before API 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
# Issue #26936: Android getservbyport() is broken.
if not support.is_android:
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Set the default timeout to 10, and see if it propagates
with socket_setdefaulttimeout(10):
self.assertEqual(socket.getdefaulttimeout(), 10)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), 10)
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), None)
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
# bpo-29972: inet_pton() doesn't fail on AIX
if not sys.platform.startswith('aix'):
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:7:8:0')
# bpo-29972: inet_pton() doesn't fail on AIX
if not sys.platform.startswith('aix'):
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6:7:8:')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('170.85.170.85', f(bytearray(b'\xaa\x55\xaa\x55')))
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
self.assertEqual('170.85.170.85', g(bytearray(b'\xaa\x55\xaa\x55')))
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
self.assertEqual('::1', f(bytearray(b'\x00' * 15 + b'\x01')))
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
port = support.find_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
sock.close()
self.assertRaises(OSError, sock.send, b"spam")
def testCloseException(self):
sock = socket.socket()
sock.bind((socket._LOCALHOST, 0))
socket.socket(fileno=sock.fileno()).close()
try:
sock.close()
except OSError as err:
# Winsock apparently raises ENOTSOCK
self.assertIn(err.errno, (errno.EBADF, errno.ENOTSOCK))
else:
self.fail("close() should raise EBADF/ENOTSOCK")
def testNewAttributes(self):
# testing .family, .type and .protocol
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
sock.close()
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = support.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = support.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(hasattr(socket, 'SIO_LOOPBACK_FAST_PATH'),
'Loopback fast path support required for this test')
def test_sio_loopback_fast_path(self):
s = socket.socket()
self.addCleanup(s.close)
try:
s.ioctl(socket.SIO_LOOPBACK_FAST_PATH, True)
except OSError as exc:
WSAEOPNOTSUPP = 10045
if exc.winerror == WSAEOPNOTSUPP:
self.skipTest("SIO_LOOPBACK_FAST_PATH is defined but "
"doesn't implemented in this Windows version")
raise
self.assertRaises(TypeError, s.ioctl, socket.SIO_LOOPBACK_FAST_PATH, None)
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if support.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
# Issue #26936: Android getaddrinfo() was broken before API level 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(str(family), 'AddressFamily.AF_INET')
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(str(type), 'SocketKind.SOCK_STREAM')
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test
# (issue #12804, issue #25138).
with support.transient_internet('python.org'):
socket.gethostbyname('python.org')
# these should all be successful
domain = 'испытание.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup chooses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not strictly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_socket_close(self):
sock = socket.socket()
try:
sock.bind((HOST, 0))
socket.close(sock.fileno())
with self.assertRaises(OSError):
sock.listen(1)
finally:
with self.assertRaises(OSError):
# sock.close() fails with EBADF
sock.close()
with self.assertRaises(TypeError):
socket.close(None)
with self.assertRaises(OSError):
socket.close(-1)
def test_makefile_mode(self):
for mode in 'r', 'rb', 'rw', 'w', 'wb':
with self.subTest(mode=mode):
with socket.socket() as sock:
with sock.makefile(mode) as fp:
self.assertEqual(fp.mode, mode)
def test_makefile_invalid_mode(self):
for mode in 'rt', 'x', '+', 'a':
with self.subTest(mode=mode):
with socket.socket() as sock:
with self.assertRaisesRegex(ValueError, 'invalid mode'):
sock.makefile(mode)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
family = pickle.loads(pickle.dumps(socket.AF_INET, protocol))
self.assertEqual(family, socket.AF_INET)
type = pickle.loads(pickle.dumps(socket.SOCK_STREAM, protocol))
self.assertEqual(type, socket.SOCK_STREAM)
def test_listen_backlog(self):
for backlog in 0, -1:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen(backlog)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
srv.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(support.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (support.HOSTv6, 0, -10))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_getaddrinfo_ipv6_basic(self):
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D', # Note capital letter `D`.
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, 0))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
hasattr(socket, 'if_nameindex'),
'if_nameindex is not supported')
def test_getaddrinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface (Linux, Mac OS X)
(ifindex, test_interface) = socket.if_nameindex()[0]
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + test_interface,
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getaddrinfo_ipv6_scopeid_numeric(self):
# Also works on Linux and Mac OS X, but is not documented (?)
# Windows, Linux and Max OS X allow nonexistent interface numbers here.
ifindex = 42
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + str(ifindex),
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
hasattr(socket, 'if_nameindex'),
'if_nameindex is not supported')
def test_getnameinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface.
(ifindex, test_interface) = socket.if_nameindex()[0]
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + test_interface, '1234'))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getnameinfo_ipv6_scopeid_numeric(self):
# Also works on Linux (undocumented), but does not work on Mac OS X
# Windows and Linux allow nonexistent interface numbers here.
ifindex = 42
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + str(ifindex), '1234'))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(str(s.family), 'AddressFamily.AF_INET')
self.assertEqual(str(s.type), 'SocketKind.SOCK_STREAM')
def test_socket_consistent_sock_type(self):
SOCK_NONBLOCK = getattr(socket, 'SOCK_NONBLOCK', 0)
SOCK_CLOEXEC = getattr(socket, 'SOCK_CLOEXEC', 0)
sock_type = socket.SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC
with socket.socket(socket.AF_INET, sock_type) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(1)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(0)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(True)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(False)
self.assertEqual(s.type, socket.SOCK_STREAM)
@unittest.skipIf(os.name == 'nt', 'Will not work on Windows')
def test_unknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
#
# On Windows this trick won't work, so the test is skipped.
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
unknown_family = max(socket.AddressFamily.__members__.values()) + 1
unknown_type = max(
kind
for name, kind in socket.SocketKind.__members__.items()
if name not in {'SOCK_NONBLOCK', 'SOCK_CLOEXEC'}
) + 1
with socket.socket(
family=unknown_family, type=unknown_type, proto=23,
fileno=fd) as s:
self.assertEqual(s.family, unknown_family)
self.assertEqual(s.type, unknown_type)
# some OS like macOS ignore proto
self.assertIn(s.proto, {0, 23})
@unittest.skipUnless(hasattr(os, 'sendfile'), 'test needs os.sendfile()')
def test__sendfile_use_sendfile(self):
class File:
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
with socket.socket() as sock:
fd = os.open(os.curdir, os.O_RDONLY)
os.close(fd)
with self.assertRaises(socket._GiveupOnSendfile):
sock._sendfile_use_sendfile(File(fd))
with self.assertRaises(OverflowError):
sock._sendfile_use_sendfile(File(2**1000))
with self.assertRaises(TypeError):
sock._sendfile_use_sendfile(File(None))
def _test_socket_fileno(self, s, family, stype):
self.assertEqual(s.family, family)
self.assertEqual(s.type, stype)
fd = s.fileno()
s2 = socket.socket(fileno=fd)
self.addCleanup(s2.close)
# detach old fd to avoid double close
s.detach()
self.assertEqual(s2.family, family)
self.assertEqual(s2.type, stype)
self.assertEqual(s2.fileno(), fd)
def test_socket_fileno(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((support.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_STREAM)
if hasattr(socket, "SOCK_DGRAM"):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind((support.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_DGRAM)
if support.IPV6_ENABLED:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((support.HOSTv6, 0, 0, 0))
self._test_socket_fileno(s, socket.AF_INET6, socket.SOCK_STREAM)
if hasattr(socket, "AF_UNIX"):
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind(os.path.join(tmpdir, 'socket'))
self._test_socket_fileno(s, socket.AF_UNIX, socket.SOCK_STREAM)
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class BasicCANTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_RAW
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCMConstants(self):
socket.CAN_BCM
# opcodes
socket.CAN_BCM_TX_SETUP # create (cyclic) transmission task
socket.CAN_BCM_TX_DELETE # remove (cyclic) transmission task
socket.CAN_BCM_TX_READ # read properties of (cyclic) transmission task
socket.CAN_BCM_TX_SEND # send one CAN frame
socket.CAN_BCM_RX_SETUP # create RX content filter subscription
socket.CAN_BCM_RX_DELETE # remove RX content filter subscription
socket.CAN_BCM_RX_READ # read properties of RX content filter subscription
socket.CAN_BCM_TX_STATUS # reply to TX_READ request
socket.CAN_BCM_TX_EXPIRED # notification on performed transmissions (count=0)
socket.CAN_BCM_RX_STATUS # reply to RX_READ request
socket.CAN_BCM_RX_TIMEOUT # cyclic message is absent
socket.CAN_BCM_RX_CHANGED # updated CAN frame (detected content change)
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testCreateBCMSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM) as s:
pass
def testBindAny(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.bind(('', ))
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
self.assertRaisesRegex(OSError, 'interface name too long',
s.bind, ('x' * 1024,))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_LOOPBACK"),
'socket.CAN_RAW_LOOPBACK required for this test.')
def testLoopback(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
for loopback in (0, 1):
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK,
loopback)
self.assertEqual(loopback,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_FILTER"),
'socket.CAN_RAW_FILTER required for this test.')
def testFilter(self):
can_id, can_mask = 0x200, 0x700
can_filter = struct.pack("=II", can_id, can_mask)
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, can_filter)
self.assertEqual(can_filter,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, 8))
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, bytearray(can_filter))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class CANTest(ThreadedCANSocketTest):
def __init__(self, methodName='runTest'):
ThreadedCANSocketTest.__init__(self, methodName=methodName)
@classmethod
def build_can_frame(cls, can_id, data):
"""Build a CAN frame."""
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data)
@classmethod
def dissect_can_frame(cls, frame):
"""Dissect a CAN frame."""
can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame)
return (can_id, can_dlc, data[:can_dlc])
def testSendFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
self.assertEqual(addr[0], self.interface)
self.assertEqual(addr[1], socket.AF_CAN)
def _testSendFrame(self):
self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05')
self.cli.send(self.cf)
def testSendMaxFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
def _testSendMaxFrame(self):
self.cf = self.build_can_frame(0x00, b'\x07' * 8)
self.cli.send(self.cf)
def testSendMultiFrames(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf1, cf)
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf2, cf)
def _testSendMultiFrames(self):
self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11')
self.cli.send(self.cf1)
self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33')
self.cli.send(self.cf2)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def _testBCM(self):
cf, addr = self.cli.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
can_id, can_dlc, data = self.dissect_can_frame(cf)
self.assertEqual(self.can_id, can_id)
self.assertEqual(self.data, data)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCM(self):
bcm = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM)
self.addCleanup(bcm.close)
bcm.connect((self.interface,))
self.can_id = 0x123
self.data = bytes([0xc0, 0xff, 0xee])
self.cf = self.build_can_frame(self.can_id, self.data)
opcode = socket.CAN_BCM_TX_SEND
flags = 0
count = 0
ival1_seconds = ival1_usec = ival2_seconds = ival2_usec = 0
bcm_can_id = 0x0222
nframes = 1
assert len(self.cf) == 16
header = struct.pack(self.bcm_cmd_msg_fmt,
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
bcm_can_id,
nframes,
)
header_plus_frame = header + self.cf
bytes_sent = bcm.send(header_plus_frame)
self.assertEqual(bytes_sent, len(header_plus_frame))
@unittest.skipUnless(HAVE_SOCKET_CAN_ISOTP, 'CAN ISOTP required for this test.')
class ISOTPTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.interface = "vcan0"
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_ISOTP
socket.SOCK_DGRAM
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_ISOTP"),
'socket.CAN_ISOTP required for this test.')
def testCreateISOTPSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
pass
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
with self.assertRaisesRegex(OSError, 'interface name too long'):
s.bind(('x' * 1024, 1, 2))
def testBind(self):
try:
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
addr = self.interface, 0x123, 0x456
s.bind(addr)
self.assertEqual(s.getsockname(), addr)
except OSError as e:
if e.errno == errno.ENODEV:
self.skipTest('network interface `%s` does not exist' %
self.interface)
else:
raise
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class BasicRDSTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_RDS
socket.PF_RDS
def testCreateSocket(self):
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
pass
def testSocketBufferSize(self):
bufsize = 16384
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class RDSTest(ThreadedRDSSocketTest):
def __init__(self, methodName='runTest'):
ThreadedRDSSocketTest.__init__(self, methodName=methodName)
def setUp(self):
super().setUp()
self.evt = threading.Event()
def testSendAndRecv(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
self.assertEqual(self.cli_addr, addr)
def _testSendAndRecv(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testPeek(self):
data, addr = self.serv.recvfrom(self.bufsize, socket.MSG_PEEK)
self.assertEqual(self.data, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testPeek(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
@requireAttrs(socket.socket, 'recvmsg')
def testSendAndRecvMsg(self):
data, ancdata, msg_flags, addr = self.serv.recvmsg(self.bufsize)
self.assertEqual(self.data, data)
@requireAttrs(socket.socket, 'sendmsg')
def _testSendAndRecvMsg(self):
self.data = b'hello ' * 10
self.cli.sendmsg([self.data], (), 0, (HOST, self.port))
def testSendAndRecvMulti(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data1, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data2, data)
def _testSendAndRecvMulti(self):
self.data1 = b'bacon'
self.cli.sendto(self.data1, 0, (HOST, self.port))
self.data2 = b'egg'
self.cli.sendto(self.data2, 0, (HOST, self.port))
def testSelect(self):
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testSelect(self):
self.data = b'select'
self.cli.sendto(self.data, 0, (HOST, self.port))
@unittest.skipUnless(HAVE_SOCKET_QIPCRTR,
'QIPCRTR sockets required for this test.')
class BasicQIPCRTRTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_QIPCRTR
def testCreateSocket(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
pass
def testUnbound(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
self.assertEqual(s.getsockname()[1], 0)
def testBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
support.bind_port(s, host=s.getsockname()[0])
self.assertNotEqual(s.getsockname()[1], 0)
def testInvalidBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
self.assertRaises(OSError, support.bind_port, s, host=-2)
def testAutoBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
s.connect((123, 123))
self.assertNotEqual(s.getsockname()[1], 0)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
class BasicVSOCKTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_VSOCK
def testVSOCKConstants(self):
socket.SO_VM_SOCKETS_BUFFER_SIZE
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE
socket.VMADDR_CID_ANY
socket.VMADDR_PORT_ANY
socket.VMADDR_CID_HOST
socket.VM_SOCKETS_INVALID_VERSION
socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID
def testCreateSocket(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
pass
def testSocketBufferSize(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
orig_max = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE)
orig = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE)
orig_min = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE, orig_max * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE, orig * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE, orig_min * 2)
self.assertEqual(orig_max * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE))
self.assertEqual(orig * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE))
self.assertEqual(orig_min * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE))
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
testShutdown_overflow = support.cpython_only(testShutdown)
@support.cpython_only
def _testShutdown_overflow(self):
import _testcapi
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertTrue(self.cli_conn._closed)
self.assertRaises(OSError, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
# Tests for the sendmsg()/recvmsg() interface. Where possible, the
# same test code is used with different families and types of socket
# (e.g. stream, datagram), and tests using recvmsg() are repeated
# using recvmsg_into().
#
# The generic test classes such as SendmsgTests and
# RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be
# supplied with sockets cli_sock and serv_sock representing the
# client's and the server's end of the connection respectively, and
# attributes cli_addr and serv_addr holding their (numeric where
# appropriate) addresses.
#
# The final concrete test classes combine these with subclasses of
# SocketTestBase which set up client and server sockets of a specific
# type, and with subclasses of SendrecvmsgBase such as
# SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these
# sockets to cli_sock and serv_sock and override the methods and
# attributes of SendrecvmsgBase to fill in destination addresses if
# needed when sending, check for specific flags in msg_flags, etc.
#
# RecvmsgIntoMixin provides a version of doRecvmsg() implemented using
# recvmsg_into().
# XXX: like the other datagram (UDP) tests in this module, the code
# here assumes that datagram delivery on the local machine will be
# reliable.
class SendrecvmsgBase(ThreadSafeCleanupTestCase):
# Base class for sendmsg()/recvmsg() tests.
# Time in seconds to wait before considering a test failed, or
# None for no timeout. Not all tests actually set a timeout.
fail_timeout = 3.0
def setUp(self):
self.misc_event = threading.Event()
super().setUp()
def sendToServer(self, msg):
# Send msg to the server.
return self.cli_sock.send(msg)
# Tuple of alternative default arguments for sendmsg() when called
# via sendmsgToServer() (e.g. to include a destination address).
sendmsg_to_server_defaults = ()
def sendmsgToServer(self, *args):
# Call sendmsg() on self.cli_sock with the given arguments,
# filling in any arguments which are not supplied with the
# corresponding items of self.sendmsg_to_server_defaults, if
# any.
return self.cli_sock.sendmsg(
*(args + self.sendmsg_to_server_defaults[len(args):]))
def doRecvmsg(self, sock, bufsize, *args):
# Call recvmsg() on sock with given arguments and return its
# result. Should be used for tests which can use either
# recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides
# this method with one which emulates it using recvmsg_into(),
# thus allowing the same test to be used for both methods.
result = sock.recvmsg(bufsize, *args)
self.registerRecvmsgResult(result)
return result
def registerRecvmsgResult(self, result):
# Called by doRecvmsg() with the return value of recvmsg() or
# recvmsg_into(). Can be overridden to arrange cleanup based
# on the returned ancillary data, for instance.
pass
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer.
self.assertEqual(addr1, addr2)
# Flags that are normally unset in msg_flags
msg_flags_common_unset = 0
for name in ("MSG_CTRUNC", "MSG_OOB"):
msg_flags_common_unset |= getattr(socket, name, 0)
# Flags that are normally set
msg_flags_common_set = 0
# Flags set when a complete record has been received (e.g. MSG_EOR
# for SCTP)
msg_flags_eor_indicator = 0
# Flags set when a complete record has not been received
# (e.g. MSG_TRUNC for datagram sockets)
msg_flags_non_eor_indicator = 0
def checkFlags(self, flags, eor=None, checkset=0, checkunset=0, ignore=0):
# Method to check the value of msg_flags returned by recvmsg[_into]().
#
# Checks that all bits in msg_flags_common_set attribute are
# set in "flags" and all bits in msg_flags_common_unset are
# unset.
#
# The "eor" argument specifies whether the flags should
# indicate that a full record (or datagram) has been received.
# If "eor" is None, no checks are done; otherwise, checks
# that:
#
# * if "eor" is true, all bits in msg_flags_eor_indicator are
# set and all bits in msg_flags_non_eor_indicator are unset
#
# * if "eor" is false, all bits in msg_flags_non_eor_indicator
# are set and all bits in msg_flags_eor_indicator are unset
#
# If "checkset" and/or "checkunset" are supplied, they require
# the given bits to be set or unset respectively, overriding
# what the attributes require for those bits.
#
# If any bits are set in "ignore", they will not be checked,
# regardless of the other inputs.
#
# Will raise Exception if the inputs require a bit to be both
# set and unset, and it is not ignored.
defaultset = self.msg_flags_common_set
defaultunset = self.msg_flags_common_unset
if eor:
defaultset |= self.msg_flags_eor_indicator
defaultunset |= self.msg_flags_non_eor_indicator
elif eor is not None:
defaultset |= self.msg_flags_non_eor_indicator
defaultunset |= self.msg_flags_eor_indicator
# Function arguments override defaults
defaultset &= ~checkunset
defaultunset &= ~checkset
# Merge arguments with remaining defaults, and check for conflicts
checkset |= defaultset
checkunset |= defaultunset
inboth = checkset & checkunset & ~ignore
if inboth:
raise Exception("contradictory set, unset requirements for flags "
"{0:#x}".format(inboth))
# Compare with given msg_flags value
mask = (checkset | checkunset) & ~ignore
self.assertEqual(flags & mask, checkset & mask)
class RecvmsgIntoMixin(SendrecvmsgBase):
# Mixin to implement doRecvmsg() using recvmsg_into().
def doRecvmsg(self, sock, bufsize, *args):
buf = bytearray(bufsize)
result = sock.recvmsg_into([buf], *args)
self.registerRecvmsgResult(result)
self.assertGreaterEqual(result[0], 0)
self.assertLessEqual(result[0], bufsize)
return (bytes(buf[:result[0]]),) + result[1:]
class SendrecvmsgDgramFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for datagram sockets.
@property
def msg_flags_non_eor_indicator(self):
return super().msg_flags_non_eor_indicator | socket.MSG_TRUNC
class SendrecvmsgSCTPFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for SCTP sockets.
@property
def msg_flags_eor_indicator(self):
return super().msg_flags_eor_indicator | socket.MSG_EOR
class SendrecvmsgConnectionlessBase(SendrecvmsgBase):
# Base class for tests on connectionless-mode sockets. Users must
# supply sockets on attributes cli and serv to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.serv
@property
def cli_sock(self):
return self.cli
@property
def sendmsg_to_server_defaults(self):
return ([], [], 0, self.serv_addr)
def sendToServer(self, msg):
return self.cli_sock.sendto(msg, self.serv_addr)
class SendrecvmsgConnectedBase(SendrecvmsgBase):
# Base class for tests on connected sockets. Users must supply
# sockets on attributes serv_conn and cli_conn (representing the
# connections *to* the server and the client), to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.cli_conn
@property
def cli_sock(self):
return self.serv_conn
def checkRecvmsgAddress(self, addr1, addr2):
# Address is currently "unspecified" for a connected socket,
# so we don't examine it
pass
class SendrecvmsgServerTimeoutBase(SendrecvmsgBase):
# Base class to set a timeout on server's socket.
def setUp(self):
super().setUp()
self.serv_sock.settimeout(self.fail_timeout)
class SendmsgTests(SendrecvmsgServerTimeoutBase):
# Tests for sendmsg() which can use any socket type and do not
# involve recvmsg() or recvmsg_into().
def testSendmsg(self):
# Send a simple message with sendmsg().
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG]), len(MSG))
def testSendmsgDataGenerator(self):
# Send from buffer obtained from a generator (not a sequence).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgDataGenerator(self):
self.assertEqual(self.sendmsgToServer((o for o in [MSG])),
len(MSG))
def testSendmsgAncillaryGenerator(self):
# Gather (empty) ancillary data from a generator.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgAncillaryGenerator(self):
self.assertEqual(self.sendmsgToServer([MSG], (o for o in [])),
len(MSG))
def testSendmsgArray(self):
# Send data from an array instead of the usual bytes object.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgArray(self):
self.assertEqual(self.sendmsgToServer([array.array("B", MSG)]),
len(MSG))
def testSendmsgGather(self):
# Send message data from more than one buffer (gather write).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgGather(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
def testSendmsgBadArgs(self):
# Check that sendmsg() rejects invalid arguments.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadArgs(self):
self.assertRaises(TypeError, self.cli_sock.sendmsg)
self.assertRaises(TypeError, self.sendmsgToServer,
b"not in an iterable")
self.assertRaises(TypeError, self.sendmsgToServer,
object())
self.assertRaises(TypeError, self.sendmsgToServer,
[object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG, object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], 0, object())
self.sendToServer(b"done")
def testSendmsgBadCmsg(self):
# Check that invalid ancillary data items are rejected.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(object(), 0, b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, object(), b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, object())])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0)])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b"data", 42)])
self.sendToServer(b"done")
@requireAttrs(socket, "CMSG_SPACE")
def testSendmsgBadMultiCmsg(self):
# Check that invalid ancillary data items are rejected when
# more than one item is present.
self.assertEqual(self.serv_sock.recv(1000), b"done")
@testSendmsgBadMultiCmsg.client_skip
def _testSendmsgBadMultiCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [0, 0, b""])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b""), object()])
self.sendToServer(b"done")
def testSendmsgExcessCmsgReject(self):
# Check that sendmsg() rejects excess ancillary data items
# when the number that can be sent is limited.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgExcessCmsgReject(self):
if not hasattr(socket, "CMSG_SPACE"):
# Can only send one item
with self.assertRaises(OSError) as cm:
self.sendmsgToServer([MSG], [(0, 0, b""), (0, 0, b"")])
self.assertIsNone(cm.exception.errno)
self.sendToServer(b"done")
def testSendmsgAfterClose(self):
# Check that sendmsg() fails on a closed socket.
pass
def _testSendmsgAfterClose(self):
self.cli_sock.close()
self.assertRaises(OSError, self.sendmsgToServer, [MSG])
class SendmsgStreamTests(SendmsgTests):
# Tests for sendmsg() which require a stream socket and do not
# involve recvmsg() or recvmsg_into().
def testSendmsgExplicitNoneAddr(self):
# Check that peer address can be specified as None.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgExplicitNoneAddr(self):
self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG))
def testSendmsgTimeout(self):
# Check that timeout works with sendmsg().
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
def _testSendmsgTimeout(self):
try:
self.cli_sock.settimeout(0.03)
try:
while True:
self.sendmsgToServer([b"a"*512])
except socket.timeout:
pass
except OSError as exc:
if exc.errno != errno.ENOMEM:
raise
# bpo-33937 the test randomly fails on Travis CI with
# "OSError: [Errno 12] Cannot allocate memory"
else:
self.fail("socket.timeout not raised")
finally:
self.misc_event.set()
# XXX: would be nice to have more tests for sendmsg flags argument.
# Linux supports MSG_DONTWAIT when sending, but in general, it
# only works when receiving. Could add other platforms if they
# support it too.
@skipWithClientIf(sys.platform not in {"linux"},
"MSG_DONTWAIT not known to work on this platform when "
"sending")
def testSendmsgDontWait(self):
# Check that MSG_DONTWAIT in flags causes non-blocking behaviour.
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@testSendmsgDontWait.client_skip
def _testSendmsgDontWait(self):
try:
with self.assertRaises(OSError) as cm:
while True:
self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT)
# bpo-33937: catch also ENOMEM, the test randomly fails on Travis CI
# with "OSError: [Errno 12] Cannot allocate memory"
self.assertIn(cm.exception.errno,
(errno.EAGAIN, errno.EWOULDBLOCK, errno.ENOMEM))
finally:
self.misc_event.set()
class SendmsgConnectionlessTests(SendmsgTests):
# Tests for sendmsg() which require a connectionless-mode
# (e.g. datagram) socket, and do not involve recvmsg() or
# recvmsg_into().
def testSendmsgNoDestAddr(self):
# Check that sendmsg() fails when no destination address is
# given for unconnected socket.
pass
def _testSendmsgNoDestAddr(self):
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG])
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG], [], 0, None)
class RecvmsgGenericTests(SendrecvmsgBase):
# Tests for recvmsg() which can also be emulated using
# recvmsg_into(), and can use any socket type.
def testRecvmsg(self):
# Receive a simple message with recvmsg[_into]().
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsg(self):
self.sendToServer(MSG)
def testRecvmsgExplicitDefaults(self):
# Test recvmsg[_into]() with default arguments provided explicitly.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgExplicitDefaults(self):
self.sendToServer(MSG)
def testRecvmsgShorter(self):
# Receive a message smaller than buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) + 42)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShorter(self):
self.sendToServer(MSG)
def testRecvmsgTrunc(self):
# Receive part of message, check for truncation indicators.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
def _testRecvmsgTrunc(self):
self.sendToServer(MSG)
def testRecvmsgShortAncillaryBuf(self):
# Test ancillary data buffer too small to hold any ancillary data.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShortAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgLongAncillaryBuf(self):
# Test large ancillary data buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgLongAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgAfterClose(self):
# Check that recvmsg[_into]() fails on a closed socket.
self.serv_sock.close()
self.assertRaises(OSError, self.doRecvmsg, self.serv_sock, 1024)
def _testRecvmsgAfterClose(self):
pass
def testRecvmsgTimeout(self):
# Check that timeout works.
try:
self.serv_sock.settimeout(0.03)
self.assertRaises(socket.timeout,
self.doRecvmsg, self.serv_sock, len(MSG))
finally:
self.misc_event.set()
def _testRecvmsgTimeout(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@requireAttrs(socket, "MSG_PEEK")
def testRecvmsgPeek(self):
# Check that MSG_PEEK in flags enables examination of pending
# data without consuming it.
# Receive part of data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3, 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
# Ignoring MSG_TRUNC here (so this test is the same for stream
# and datagram sockets). Some wording in POSIX seems to
# suggest that it needn't be set when peeking, but that may
# just be a slip.
self.checkFlags(flags, eor=False,
ignore=getattr(socket, "MSG_TRUNC", 0))
# Receive all data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
# Check that the same data can still be received normally.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgPeek.client_skip
def _testRecvmsgPeek(self):
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
def testRecvmsgFromSendmsg(self):
# Test receiving with recvmsg[_into]() when message is sent
# using sendmsg().
self.serv_sock.settimeout(self.fail_timeout)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgFromSendmsg.client_skip
def _testRecvmsgFromSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
class RecvmsgGenericStreamTests(RecvmsgGenericTests):
# Tests which require a stream socket and can use either recvmsg()
# or recvmsg_into().
def testRecvmsgEOF(self):
# Receive end-of-stream indicator (b"", peer socket closed).
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.assertEqual(msg, b"")
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=None) # Might not have end-of-record marker
def _testRecvmsgEOF(self):
self.cli_sock.close()
def testRecvmsgOverflow(self):
# Receive a message in more than one chunk.
seg1, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
seg2, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testRecvmsgOverflow(self):
self.sendToServer(MSG)
class RecvmsgTests(RecvmsgGenericTests):
# Tests for recvmsg() which can use any socket type.
def testRecvmsgBadArgs(self):
# Check that recvmsg() rejects invalid arguments.
self.assertRaises(TypeError, self.serv_sock.recvmsg)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
-1, 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
len(MSG), -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
[bytearray(10)], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
object(), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), 0, object())
msg, ancdata, flags, addr = self.serv_sock.recvmsg(len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgBadArgs(self):
self.sendToServer(MSG)
class RecvmsgIntoTests(RecvmsgIntoMixin, RecvmsgGenericTests):
# Tests for recvmsg_into() which can use any socket type.
def testRecvmsgIntoBadArgs(self):
# Check that recvmsg_into() rejects invalid arguments.
buf = bytearray(len(MSG))
self.assertRaises(TypeError, self.serv_sock.recvmsg_into)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
len(MSG), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
buf, 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[object()], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[b"I'm not writable"], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf, object()], 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg_into,
[buf], -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], 0, object())
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0)
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoBadArgs(self):
self.sendToServer(MSG)
def testRecvmsgIntoGenerator(self):
# Receive into buffer obtained from a generator (not a sequence).
buf = bytearray(len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
(o for o in [buf]))
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoGenerator(self):
self.sendToServer(MSG)
def testRecvmsgIntoArray(self):
# Receive into an array rather than the usual bytearray.
buf = array.array("B", [0] * len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf])
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf.tobytes(), MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoArray(self):
self.sendToServer(MSG)
def testRecvmsgIntoScatter(self):
# Receive into multiple buffers (scatter write).
b1 = bytearray(b"----")
b2 = bytearray(b"0123456789")
b3 = bytearray(b"--------------")
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
[b1, memoryview(b2)[2:9], b3])
self.assertEqual(nbytes, len(b"Mary had a little lamb"))
self.assertEqual(b1, bytearray(b"Mary"))
self.assertEqual(b2, bytearray(b"01 had a 9"))
self.assertEqual(b3, bytearray(b"little lamb---"))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoScatter(self):
self.sendToServer(b"Mary had a little lamb")
class CmsgMacroTests(unittest.TestCase):
# Test the functions CMSG_LEN() and CMSG_SPACE(). Tests
# assumptions used by sendmsg() and recvmsg[_into](), which share
# code with these functions.
# Match the definition in socketmodule.c
try:
import _testcapi
except ImportError:
socklen_t_limit = 0x7fffffff
else:
socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX)
@requireAttrs(socket, "CMSG_LEN")
def testCMSG_LEN(self):
# Test CMSG_LEN() with various valid and invalid values,
# checking the assumptions used by recvmsg() and sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_LEN(n)
# This is how recvmsg() calculates the data size
self.assertEqual(ret - socket.CMSG_LEN(0), n)
self.assertLessEqual(ret, self.socklen_t_limit)
self.assertRaises(OverflowError, socket.CMSG_LEN, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_LEN, toobig)
self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize)
@requireAttrs(socket, "CMSG_SPACE")
def testCMSG_SPACE(self):
# Test CMSG_SPACE() with various valid and invalid values,
# checking the assumptions used by sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
last = socket.CMSG_SPACE(0)
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(last, array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_SPACE(n)
self.assertGreaterEqual(ret, last)
self.assertGreaterEqual(ret, socket.CMSG_LEN(n))
self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0))
self.assertLessEqual(ret, self.socklen_t_limit)
last = ret
self.assertRaises(OverflowError, socket.CMSG_SPACE, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig)
self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize)
class SCMRightsTest(SendrecvmsgServerTimeoutBase):
# Tests for file descriptor passing on Unix-domain sockets.
# Invalid file descriptor value that's unlikely to evaluate to a
# real FD even if one of its bytes is replaced with a different
# value (which shouldn't actually happen).
badfd = -0x5555
def newFDs(self, n):
# Return a list of n file descriptors for newly-created files
# containing their list indices as ASCII numbers.
fds = []
for i in range(n):
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
self.addCleanup(os.close, fd)
os.write(fd, str(i).encode())
fds.append(fd)
return fds
def checkFDs(self, fds):
# Check that the file descriptors in the given list contain
# their correct list indices as ASCII numbers.
for n, fd in enumerate(fds):
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(os.read(fd, 1024), str(n).encode())
def registerRecvmsgResult(self, result):
self.addCleanup(self.closeRecvmsgFDs, result)
def closeRecvmsgFDs(self, recvmsg_result):
# Close all file descriptors specified in the ancillary data
# of the given return value from recvmsg() or recvmsg_into().
for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]:
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
for fd in fds:
os.close(fd)
def createAndSendFDs(self, n):
# Send n new file descriptors created by newFDs() to the
# server, with the constant MSG as the non-ancillary data.
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(n)))]),
len(MSG))
def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0):
# Check that constant MSG was received with numfds file
# descriptors in a maximum of maxcmsgs control messages (which
# must contain only complete integers). By default, check
# that MSG_CTRUNC is unset, but ignore any flags in
# ignoreflags.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertIsInstance(ancdata, list)
self.assertLessEqual(len(ancdata), maxcmsgs)
fds = array.array("i")
for item in ancdata:
self.assertIsInstance(item, tuple)
cmsg_level, cmsg_type, cmsg_data = item
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0)
fds.frombytes(cmsg_data)
self.assertEqual(len(fds), numfds)
self.checkFDs(fds)
def testFDPassSimple(self):
# Pass a single FD (array read from bytes object).
self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testFDPassSimple(self):
self.assertEqual(
self.sendmsgToServer(
[MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(1)).tobytes())]),
len(MSG))
def testMultipleFDPass(self):
# Pass multiple FDs in a single array.
self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testMultipleFDPass(self):
self.createAndSendFDs(4)
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassCMSG_SPACE(self):
# Test using CMSG_SPACE() to calculate ancillary buffer size.
self.checkRecvmsgFDs(
4, self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(4 * SIZEOF_INT)))
@testFDPassCMSG_SPACE.client_skip
def _testFDPassCMSG_SPACE(self):
self.createAndSendFDs(4)
def testFDPassCMSG_LEN(self):
# Test using CMSG_LEN() to calculate ancillary buffer size.
self.checkRecvmsgFDs(1,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(4 * SIZEOF_INT)),
# RFC 3542 says implementations may set
# MSG_CTRUNC if there isn't enough space
# for trailing padding.
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassCMSG_LEN(self):
self.createAndSendFDs(1)
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparate(self):
# Pass two FDs in two separate arrays. Arrays may be combined
# into a single control message by the OS.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG), 10240),
maxcmsgs=2)
@testFDPassSeparate.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
def _testFDPassSeparate(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparateMinSpace(self):
# Pass two FDs in two separate arrays, receiving them into the
# minimum space for two arrays.
num_fds = 2
self.checkRecvmsgFDs(num_fds,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(SIZEOF_INT * num_fds)),
maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC)
@testFDPassSeparateMinSpace.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
def _testFDPassSeparateMinSpace(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
def sendAncillaryIfPossible(self, msg, ancdata):
# Try to send msg and ancdata to server, but if the system
# call fails, just send msg with no ancillary data.
try:
nbytes = self.sendmsgToServer([msg], ancdata)
except OSError as e:
# Check that it was the system call that failed
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer([msg])
self.assertEqual(nbytes, len(msg))
@unittest.skipIf(sys.platform == "darwin", "see issue #24725")
def testFDPassEmpty(self):
# Try to pass an empty FD array. Can receive either no array
# or an empty array.
self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock,
len(MSG), 10240),
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassEmpty(self):
self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
b"")])
def testFDPassPartialInt(self):
# Try to pass a truncated FD array.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertLess(len(cmsg_data), SIZEOF_INT)
def _testFDPassPartialInt(self):
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [self.badfd]).tobytes()[:-1])])
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassPartialIntInMiddle(self):
# Try to pass two FD arrays, the first of which is truncated.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 2)
fds = array.array("i")
# Arrays may have been combined in a single control message
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.assertLessEqual(len(fds), 2)
self.checkFDs(fds)
@testFDPassPartialIntInMiddle.client_skip
def _testFDPassPartialIntInMiddle(self):
fd0, fd1 = self.newFDs(2)
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0, self.badfd]).tobytes()[:-1]),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))])
def checkTruncatedHeader(self, result, ignoreflags=0):
# Check that no ancillary data items are returned when data is
# truncated inside the cmsghdr structure.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no buffer size
# is specified.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)),
# BSD seems to set MSG_CTRUNC only
# if an item has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTruncNoBufSize(self):
self.createAndSendFDs(1)
def testCmsgTrunc0(self):
# Check that no ancillary data is received when buffer size is 0.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0),
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTrunc0(self):
self.createAndSendFDs(1)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
def testCmsgTrunc1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1))
def _testCmsgTrunc1(self):
self.createAndSendFDs(1)
def testCmsgTrunc2Int(self):
# The cmsghdr structure has at least three members, two of
# which are ints, so we still shouldn't see any ancillary
# data.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
SIZEOF_INT * 2))
def _testCmsgTrunc2Int(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Minus1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(0) - 1))
def _testCmsgTruncLen0Minus1(self):
self.createAndSendFDs(1)
# The following tests try to truncate the control message in the
# middle of the FD array.
def checkTruncatedArray(self, ancbuf, maxdata, mindata=0):
# Check that file descriptor data is truncated to between
# mindata and maxdata bytes when received with buffer size
# ancbuf, and that any complete file descriptor numbers are
# valid.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbuf)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
if mindata == 0 and ancdata == []:
return
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertGreaterEqual(len(cmsg_data), mindata)
self.assertLessEqual(len(cmsg_data), maxdata)
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.checkFDs(fds)
def testCmsgTruncLen0(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0)
def _testCmsgTruncLen0(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Plus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1)
def _testCmsgTruncLen0Plus1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT),
maxdata=SIZEOF_INT)
def _testCmsgTruncLen1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen2Minus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1,
maxdata=(2 * SIZEOF_INT) - 1)
def _testCmsgTruncLen2Minus1(self):
self.createAndSendFDs(2)
class RFC3542AncillaryTest(SendrecvmsgServerTimeoutBase):
# Test sendmsg() and recvmsg[_into]() using the ancillary data
# features of the RFC 3542 Advanced Sockets API for IPv6.
# Currently we can only handle certain data items (e.g. traffic
# class, hop limit, MTU discovery and fragmentation settings)
# without resorting to unportable means such as the struct module,
# but the tests here are aimed at testing the ancillary data
# handling in sendmsg() and recvmsg() rather than the IPv6 API
# itself.
# Test value to use when setting hop limit of packet
hop_limit = 2
# Test value to use when setting traffic class of packet.
# -1 means "use kernel default".
traffic_class = -1
def ancillaryMapping(self, ancdata):
# Given ancillary data list ancdata, return a mapping from
# pairs (cmsg_level, cmsg_type) to corresponding cmsg_data.
# Check that no (level, type) pair appears more than once.
d = {}
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertNotIn((cmsg_level, cmsg_type), d)
d[(cmsg_level, cmsg_type)] = cmsg_data
return d
def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space. Check that data is MSG, ancillary data is not
# truncated (but ignore any flags in ignoreflags), and hop
# limit is between 0 and maxhop inclusive.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
self.assertIsInstance(ancdata[0], tuple)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimit(self):
# Test receiving the packet hop limit as ancillary data.
self.checkHopLimit(ancbufsize=10240)
@testRecvHopLimit.client_skip
def _testRecvHopLimit(self):
# Need to wait until server has asked to receive ancillary
# data, as implementations are not required to buffer it
# otherwise.
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimitCMSG_SPACE(self):
# Test receiving hop limit, using CMSG_SPACE to calculate buffer size.
self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT))
@testRecvHopLimitCMSG_SPACE.client_skip
def _testRecvHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Could test receiving into buffer sized using CMSG_LEN, but RFC
# 3542 says portable applications must provide space for trailing
# padding. Implementations may set MSG_CTRUNC if there isn't
# enough space for the padding.
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSetHopLimit(self):
# Test setting hop limit on outgoing packet and receiving it
# at the other end.
self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit)
@testSetHopLimit.client_skip
def _testSetHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255,
ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space. Check that data is MSG, ancillary
# data is not truncated (but ignore any flags in ignoreflags),
# and traffic class and hop limit are in range (hop limit no
# more than maxhop).
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 2)
ancmap = self.ancillaryMapping(ancdata)
tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)]
self.assertEqual(len(tcdata), SIZEOF_INT)
a = array.array("i")
a.frombytes(tcdata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)]
self.assertEqual(len(hldata), SIZEOF_INT)
a = array.array("i")
a.frombytes(hldata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimit(self):
# Test receiving traffic class and hop limit as ancillary data.
self.checkTrafficClassAndHopLimit(ancbufsize=10240)
@testRecvTrafficClassAndHopLimit.client_skip
def _testRecvTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
# Test receiving traffic class and hop limit, using
# CMSG_SPACE() to calculate buffer size.
self.checkTrafficClassAndHopLimit(
ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2)
@testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip
def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSetTrafficClassAndHopLimit(self):
# Test setting traffic class and hop limit on outgoing packet,
# and receiving them at the other end.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testSetTrafficClassAndHopLimit.client_skip
def _testSetTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testOddCmsgSize(self):
# Try to send ancillary data with first item one byte too
# long. Fall back to sending with correct size if this fails,
# and check that second item was handled correctly.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testOddCmsgSize.client_skip
def _testOddCmsgSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
try:
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class]).tobytes() + b"\x00"),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
except OSError as e:
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
self.assertEqual(nbytes, len(MSG))
# Tests for proper handling of truncated ancillary data
def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space, which should be too small to contain the ancillary
# data header (if ancbufsize is None, pass no second argument
# to recvmsg()). Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and no ancillary data is
# returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
args = () if ancbufsize is None else (ancbufsize,)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), *args)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no ancillary
# buffer size is provided.
self.checkHopLimitTruncatedHeader(ancbufsize=None,
# BSD seems to set
# MSG_CTRUNC only if an item
# has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc0(self):
# Check that no ancillary data is received when ancillary
# buffer size is zero.
self.checkHopLimitTruncatedHeader(ancbufsize=0,
ignoreflags=socket.MSG_CTRUNC)
@testSingleCmsgTrunc0.client_skip
def _testSingleCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=1)
@testSingleCmsgTrunc1.client_skip
def _testSingleCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc2Int(self):
self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT)
@testSingleCmsgTrunc2Int.client_skip
def _testSingleCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncLen0Minus1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1)
@testSingleCmsgTruncLen0Minus1.client_skip
def _testSingleCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncInData(self):
# Test truncation of a control message inside its associated
# data. The message may be returned with its data truncated,
# or not returned at all.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertLess(len(cmsg_data), SIZEOF_INT)
@testSingleCmsgTruncInData.client_skip
def _testSingleCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space, which should be large enough to
# contain the first item, but too small to contain the header
# of the second. Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and only one ancillary
# data item is returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT})
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
# Try the above test with various buffer sizes.
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc0(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT),
ignoreflags=socket.MSG_CTRUNC)
@testSecondCmsgTrunc0.client_skip
def _testSecondCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1)
@testSecondCmsgTrunc1.client_skip
def _testSecondCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc2Int(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
2 * SIZEOF_INT)
@testSecondCmsgTrunc2Int.client_skip
def _testSecondCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncLen0Minus1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(0) - 1)
@testSecondCmsgTruncLen0Minus1.client_skip
def _testSecondCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecomdCmsgTruncInData(self):
# Test truncation of the second of two control messages inside
# its associated data.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertLess(len(cmsg_data), SIZEOF_INT)
self.assertEqual(ancdata, [])
@testSecomdCmsgTruncInData.client_skip
def _testSecomdCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Derive concrete test classes for different socket types.
class SendrecvmsgUDPTestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgUDPTest(SendmsgConnectionlessTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgUDPTest(RecvmsgTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoUDPTest(RecvmsgIntoTests, SendrecvmsgUDPTestBase):
pass
class SendrecvmsgUDP6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDP6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class SendmsgUDP6Test(SendmsgConnectionlessTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgUDP6Test(RecvmsgTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoUDP6Test(RecvmsgIntoTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgRFC3542AncillaryUDP6Test(RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoRFC3542AncillaryUDP6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
class SendrecvmsgTCPTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, TCPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgTCPTest(SendmsgStreamTests, SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgTCPTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoTCPTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
class SendrecvmsgSCTPStreamTestBase(SendrecvmsgSCTPFlagsBase,
SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, SCTPStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class SendmsgSCTPStreamTest(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgSCTPStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
@requireAttrs(socket.socket, "recvmsg_into")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgIntoSCTPStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgIntoSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
class SendrecvmsgUnixStreamTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, UnixStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "AF_UNIX")
class SendmsgUnixStreamTest(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgUnixStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgIntoUnixStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgSCMRightsStreamTest(SCMRightsTest, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg_into")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgIntoSCMRightsStreamTest(RecvmsgIntoMixin, SCMRightsTest,
SendrecvmsgUnixStreamTestBase):
pass
# Test interrupting the interruptible send/receive methods with a
# signal when a timeout is set. These tests avoid having multiple
# threads alive during the test so that the OS cannot deliver the
# signal to the wrong one.
class InterruptedTimeoutBase(unittest.TestCase):
# Base class for interrupted send/receive tests. Installs an
# empty handler for SIGALRM and removes it on teardown, along with
# any scheduled alarms.
def setUp(self):
super().setUp()
orig_alrm_handler = signal.signal(signal.SIGALRM,
lambda signum, frame: 1 / 0)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
# Timeout for socket operations
timeout = 4.0
# Provide setAlarm() method to schedule delivery of SIGALRM after
# given number of seconds, or cancel it if zero, and an
# appropriate time value to use. Use setitimer() if available.
if hasattr(signal, "setitimer"):
alarm_time = 0.05
def setAlarm(self, seconds):
signal.setitimer(signal.ITIMER_REAL, seconds)
else:
# Old systems may deliver the alarm up to one second early
alarm_time = 2
def setAlarm(self, seconds):
signal.alarm(seconds)
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedRecvTimeoutTest(InterruptedTimeoutBase, UDPTestBase):
# Test interrupting the recv*() methods with signals when a
# timeout is set.
def setUp(self):
super().setUp()
self.serv.settimeout(self.timeout)
def checkInterruptedRecv(self, func, *args, **kwargs):
# Check that func(*args, **kwargs) raises
# errno of EINTR when interrupted by a signal.
try:
self.setAlarm(self.alarm_time)
with self.assertRaises(ZeroDivisionError) as cm:
func(*args, **kwargs)
finally:
self.setAlarm(0)
def testInterruptedRecvTimeout(self):
self.checkInterruptedRecv(self.serv.recv, 1024)
def testInterruptedRecvIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recv_into, bytearray(1024))
def testInterruptedRecvfromTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom, 1024)
def testInterruptedRecvfromIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom_into, bytearray(1024))
@requireAttrs(socket.socket, "recvmsg")
def testInterruptedRecvmsgTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg, 1024)
@requireAttrs(socket.socket, "recvmsg_into")
def testInterruptedRecvmsgIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg_into, [bytearray(1024)])
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedSendTimeoutTest(InterruptedTimeoutBase,
ThreadSafeCleanupTestCase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# OSError with an errno of EINTR when interrupted by a
# signal.
try:
with self.assertRaises(ZeroDivisionError) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
finally:
self.setAlarm(0)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@support.requires_mac_ver(10, 7)
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@support.requires_mac_ver(10, 7)
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@support.requires_mac_ver(10, 7)
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def _check_defaults(self, sock):
self.assertIsInstance(sock, socket.socket)
if hasattr(socket, 'AF_UNIX'):
self.assertEqual(sock.family, socket.AF_UNIX)
else:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def _testDefaults(self):
self._check_defaults(self.cli)
def testDefaults(self):
self._check_defaults(self.serv)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
self.event = threading.Event()
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def assert_sock_timeout(self, sock, timeout):
self.assertEqual(self.serv.gettimeout(), timeout)
blocking = (timeout != 0.0)
self.assertEqual(sock.getblocking(), blocking)
if fcntl is not None:
# When a Python socket has a non-zero timeout, it's switched
# internally to a non-blocking mode. Later, sock.sendall(),
# sock.recv(), and other socket operations use a select() call and
# handle EWOULDBLOCK/EGAIN on all socket operations. That's how
# timeouts are enforced.
fd_blocking = (timeout is None)
flag = fcntl.fcntl(sock, fcntl.F_GETFL, os.O_NONBLOCK)
self.assertEqual(not bool(flag & os.O_NONBLOCK), fd_blocking)
def testSetBlocking(self):
# Test setblocking() and settimeout() methods
self.serv.setblocking(True)
self.assert_sock_timeout(self.serv, None)
self.serv.setblocking(False)
self.assert_sock_timeout(self.serv, 0.0)
self.serv.settimeout(None)
self.assert_sock_timeout(self.serv, None)
self.serv.settimeout(0)
self.assert_sock_timeout(self.serv, 0)
self.serv.settimeout(10)
self.assert_sock_timeout(self.serv, 10)
self.serv.settimeout(0)
self.assert_sock_timeout(self.serv, 0)
def _testSetBlocking(self):
pass
@support.cpython_only
def testSetBlocking_overflow(self):
# Issue 15989
import _testcapi
if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX:
self.skipTest('needs UINT_MAX < ULONG_MAX')
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
_testSetBlocking_overflow = support.cpython_only(_testSetBlocking)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'test needs socket.SOCK_NONBLOCK')
@support.requires_linux_version(2, 6, 28)
def testInitNonBlocking(self):
# create a socket with SOCK_NONBLOCK
self.serv.close()
self.serv = socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK)
self.assert_sock_timeout(self.serv, 0)
def _testInitNonBlocking(self):
pass
def testInheritFlagsBlocking(self):
# bpo-7995: accept() on a listening socket with a timeout and the
# default timeout is None, the resulting socket must be blocking.
with socket_setdefaulttimeout(None):
self.serv.settimeout(10)
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testInheritFlagsBlocking(self):
self.cli.connect((HOST, self.port))
def testInheritFlagsTimeout(self):
# bpo-7995: accept() on a listening socket with a timeout and the
# default timeout is None, the resulting socket must inherit
# the default timeout.
default_timeout = 20.0
with socket_setdefaulttimeout(default_timeout):
self.serv.settimeout(10)
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertEqual(conn.gettimeout(), default_timeout)
def _testInheritFlagsTimeout(self):
self.cli.connect((HOST, self.port))
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
# connect() didn't start: non-blocking accept() fails
start_time = time.monotonic()
with self.assertRaises(BlockingIOError):
conn, addr = self.serv.accept()
dt = time.monotonic() - start_time
self.assertLess(dt, 1.0)
self.event.set()
read, write, err = select.select([self.serv], [], [], MAIN_TIMEOUT)
if self.serv not in read:
self.fail("Error trying to do accept after select.")
# connect() completed: non-blocking accept() doesn't block
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testAccept(self):
# don't connect before event is set to check
# that non-blocking accept() raises BlockingIOError
self.event.wait()
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
conn.setblocking(0)
# the server didn't send data yet: non-blocking recv() fails
with self.assertRaises(BlockingIOError):
msg = conn.recv(len(MSG))
self.event.set()
read, write, err = select.select([conn], [], [], MAIN_TIMEOUT)
if conn not in read:
self.fail("Error during select call to non-blocking socket.")
# the server sent data yet: non-blocking recv() doesn't block
msg = conn.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.connect((HOST, self.port))
# don't send anything before event is set to check
# that non-blocking recv() raises BlockingIOError
self.event.wait()
# send data: recv() will no longer block
self.cli.sendall(MSG)
class FileObjectClassTestCase(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf-8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(socket.timeout, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(OSError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(OSError, self.cli_conn.getsockname)
def _testRealClose(self):
pass
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(OSError, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid closing the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * support.SOCK_MAX_SIZE
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'wb'
write_msg = MSG
newline = ''
class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'rb'
read_msg = MSG
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = support.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(OSError) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = support.find_unused_port()
with self.assertRaises(OSError) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = [ errno.ECONNREFUSED, ]
if hasattr(errno, 'ENETUNREACH'):
expected_errnos.append(errno.ENETUNREACH)
if hasattr(errno, 'EADDRNOTAVAIL'):
# bpo-31910: socket.create_connection() fails randomly
# with EADDRNOTAVAIL on Travis CI
expected_errnos.append(errno.EADDRNOTAVAIL)
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
with self.assertRaises(socket.timeout):
socket.create_connection((HOST, 1234))
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = support.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send(b"done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, b"done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(socket.timeout, lambda: sock.recv(5))
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
@unittest.skipUnless(hasattr(signal, 'alarm'),
'test needs signal.alarm()')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# platform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(OSError, Exception))
self.assertTrue(issubclass(socket.herror, OSError))
self.assertTrue(issubclass(socket.gaierror, OSError))
self.assertTrue(issubclass(socket.timeout, OSError))
def test_setblocking_invalidfd(self):
# Regression test for issue #28471
sock0 = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, 0, sock0.fileno())
sock0.close()
self.addCleanup(sock.detach)
with self.assertRaises(OSError):
sock.setblocking(False)
@unittest.skipUnless(sys.platform == 'linux', 'Linux specific test')
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = b"\x00python-test-hello\x00\xff"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind(address)
s1.listen()
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.connect(s1.getsockname())
with s1.accept()[0] as s3:
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
self.assertRaises(OSError, s.bind, address)
def testStrName(self):
# Check that an abstract name can be passed as a string.
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.bind("\x00python\x00test\x00")
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
finally:
s.close()
def testBytearrayName(self):
# Check that an abstract name can be passed as a bytearray.
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(bytearray(b"\x00python\x00test\x00"))
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'test needs socket.AF_UNIX')
class TestUnixDomain(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def encoded(self, path):
# Return the given path encoded in the file system encoding,
# or skip the test if this is not possible.
try:
return os.fsencode(path)
except UnicodeEncodeError:
self.skipTest(
"Pathname {0!a} cannot be represented in file "
"system encoding {1!r}".format(
path, sys.getfilesystemencoding()))
def bind(self, sock, path):
# Bind the socket
try:
support.bind_unix_socket(sock, path)
except OSError as e:
if str(e) == "AF_UNIX path too long":
self.skipTest(
"Pathname {0!a} is too long to serve as an AF_UNIX path"
.format(path))
else:
raise
def testUnbound(self):
# Issue #30205 (note getsockname() can return None on OS X)
self.assertIn(self.sock.getsockname(), ('', None))
def testStrAddr(self):
# Test binding to and retrieving a normal string pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testBytesAddr(self):
# Test binding to a bytes pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, self.encoded(path))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testSurrogateescapeBind(self):
# Test binding to a valid non-ASCII pathname, with the
# non-ASCII bytes supplied using surrogateescape encoding.
path = os.path.abspath(support.TESTFN_UNICODE)
b = self.encoded(path)
self.bind(self.sock, b.decode("ascii", "surrogateescape"))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testUnencodableAddr(self):
# Test binding to a pathname that cannot be encoded in the
# file system encoding.
if support.TESTFN_UNENCODABLE is None:
self.skipTest("No unencodable filename available")
path = os.path.abspath(support.TESTFN_UNENCODABLE)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
def testRecvFromIntoSmallBuffer(self):
# See issue #20246.
buf = bytearray(8)
self.assertRaises(ValueError, self.cli_conn.recvfrom_into, buf, 1024)
def _testRecvFromIntoSmallBuffer(self):
self.serv_conn.send(MSG)
def testRecvFromIntoEmptyBuffer(self):
buf = bytearray()
self.cli_conn.recvfrom_into(buf)
self.cli_conn.recvfrom_into(buf, 0)
_testRecvFromIntoEmptyBuffer = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
try:
f = open("/proc/modules")
except (FileNotFoundError, IsADirectoryError, PermissionError):
# It's ok if the file does not exist, is a directory or if we
# have not the permission to read it.
return False
with f:
for line in f:
if line.startswith("tipc "):
return True
return False
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# There is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class ContextManagersTest(ThreadedTCPSocketTest):
def _testSocketClass(self):
# base test
with socket.socket() as sock:
self.assertFalse(sock._closed)
self.assertTrue(sock._closed)
# close inside with block
with socket.socket() as sock:
sock.close()
self.assertTrue(sock._closed)
# exception inside with block
with socket.socket() as sock:
self.assertRaises(OSError, sock.sendall, b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionBase(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionBase(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
self.assertFalse(sock._closed)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionClose(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionClose(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
sock.close()
self.assertTrue(sock._closed)
self.assertRaises(OSError, sock.sendall, b'foo')
class InheritanceTest(unittest.TestCase):
@unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"),
"SOCK_CLOEXEC not defined")
@support.requires_linux_version(2, 6, 28)
def test_SOCK_CLOEXEC(self):
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertFalse(s.get_inheritable())
def test_default_inheritable(self):
sock = socket.socket()
with sock:
self.assertEqual(sock.get_inheritable(), False)
def test_dup(self):
sock = socket.socket()
with sock:
newsock = sock.dup()
sock.close()
with newsock:
self.assertEqual(newsock.get_inheritable(), False)
def test_set_inheritable(self):
sock = socket.socket()
with sock:
sock.set_inheritable(True)
self.assertEqual(sock.get_inheritable(), True)
sock.set_inheritable(False)
self.assertEqual(sock.get_inheritable(), False)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(sock.get_inheritable(), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(sock.get_inheritable(), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
sock.set_inheritable(True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
def test_socketpair(self):
s1, s2 = socket.socketpair()
self.addCleanup(s1.close)
self.addCleanup(s2.close)
self.assertEqual(s1.get_inheritable(), False)
self.assertEqual(s2.get_inheritable(), False)
@unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"),
"SOCK_NONBLOCK not defined")
class NonblockConstantTest(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), timeout)
self.assertTrue(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
if timeout == 0:
# timeout == 0: means that getblocking() must be False.
self.assertFalse(s.getblocking())
else:
# If timeout > 0, the socket will be in a "blocking" mode
# from the standpoint of the Python API. For Python socket
# object, "blocking" means that operations like 'sock.recv()'
# will block. Internally, file descriptors for
# "blocking" Python sockets *with timeouts* are in a
# *non-blocking* mode, and 'sock.recv()' uses 'select()'
# and handles EWOULDBLOCK/EAGAIN to enforce the timeout.
self.assertTrue(s.getblocking())
else:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), None)
self.assertFalse(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
self.assertTrue(s.getblocking())
@support.requires_linux_version(2, 6, 28)
def test_SOCK_NONBLOCK(self):
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(1)
self.checkNonblock(s, nonblock=False)
s.setblocking(0)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, nonblock=False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(1)
self.checkNonblock(s, nonblock=False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(multiprocessing, "need multiprocessing")
class TestSocketSharing(SocketTCPTest):
# This must be classmethod and not staticmethod or multiprocessing
# won't be able to bootstrap it.
@classmethod
def remoteProcessServer(cls, q):
# Recreate socket from shared data
sdata = q.get()
message = q.get()
s = socket.fromshare(sdata)
s2, c = s.accept()
# Send the message
s2.sendall(message)
s2.close()
s.close()
def testShare(self):
# Transfer the listening server socket to another process
# and service it from there.
# Create process:
q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.remoteProcessServer, args=(q,))
p.start()
# Get the shared socket data
data = self.serv.share(p.pid)
# Pass the shared socket to the other process
addr = self.serv.getsockname()
self.serv.close()
q.put(data)
# The data that the server will send us
message = b"slapmahfro"
q.put(message)
# Connect
s = socket.create_connection(addr)
# listen for the data
m = []
while True:
data = s.recv(100)
if not data:
break
m.append(data)
s.close()
received = b"".join(m)
self.assertEqual(received, message)
p.join()
def testShareLength(self):
data = self.serv.share(os.getpid())
self.assertRaises(ValueError, socket.fromshare, data[:-1])
self.assertRaises(ValueError, socket.fromshare, data+b"foo")
def compareSockets(self, org, other):
# socket sharing is expected to work only for blocking socket
# since the internal python timeout value isn't transferred.
self.assertEqual(org.gettimeout(), None)
self.assertEqual(org.gettimeout(), other.gettimeout())
self.assertEqual(org.family, other.family)
self.assertEqual(org.type, other.type)
# If the user specified "0" for proto, then
# internally windows will have picked the correct value.
# Python introspection on the socket however will still return
# 0. For the shared socket, the python value is recreated
# from the actual value, so it may not compare correctly.
if org.proto != 0:
self.assertEqual(org.proto, other.proto)
def testShareLocal(self):
data = self.serv.share(os.getpid())
s = socket.fromshare(data)
try:
self.compareSockets(self.serv, s)
finally:
s.close()
def testTypes(self):
families = [socket.AF_INET, socket.AF_INET6]
types = [socket.SOCK_STREAM, socket.SOCK_DGRAM]
for f in families:
for t in types:
try:
source = socket.socket(f, t)
except OSError:
continue # This combination is not supported
try:
data = source.share(os.getpid())
shared = socket.fromshare(data)
try:
self.compareSockets(source, shared)
finally:
shared.close()
finally:
source.close()
class SendfileUsingSendTest(ThreadedTCPSocketTest):
"""
Test the send() implementation of socket.sendfile().
"""
FILESIZE = (10 * 1024 * 1024) # 10 MiB
BUFSIZE = 8192
FILEDATA = b""
TIMEOUT = 2
@classmethod
def setUpClass(cls):
def chunks(total, step):
assert total >= step
while total > step:
yield step
total -= step
if total:
yield total
chunk = b"".join([random.choice(string.ascii_letters).encode()
for i in range(cls.BUFSIZE)])
with open(support.TESTFN, 'wb') as f:
for csize in chunks(cls.FILESIZE, cls.BUFSIZE):
f.write(chunk)
with open(support.TESTFN, 'rb') as f:
cls.FILEDATA = f.read()
assert len(cls.FILEDATA) == cls.FILESIZE
@classmethod
def tearDownClass(cls):
support.unlink(support.TESTFN)
def accept_conn(self):
self.serv.settimeout(self.TIMEOUT)
conn, addr = self.serv.accept()
conn.settimeout(self.TIMEOUT)
self.addCleanup(conn.close)
return conn
def recv_data(self, conn):
received = []
while True:
chunk = conn.recv(self.BUFSIZE)
if not chunk:
break
received.append(chunk)
return b''.join(received)
def meth_from_sock(self, sock):
# Depending on the mixin class being run return either send()
# or sendfile() method implementation.
return getattr(sock, "_sendfile_use_send")
# regular file
def _testRegularFile(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
def testRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# non regular file
def _testNonRegularFile(self):
address = self.serv.getsockname()
file = io.BytesIO(self.FILEDATA)
with socket.create_connection(address) as sock, file as file:
sent = sock.sendfile(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
self.assertRaises(socket._GiveupOnSendfile,
sock._sendfile_use_sendfile, file)
def testNonRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# empty file
def _testEmptyFileSend(self):
address = self.serv.getsockname()
filename = support.TESTFN + "2"
with open(filename, 'wb'):
self.addCleanup(support.unlink, filename)
file = open(filename, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, 0)
self.assertEqual(file.tell(), 0)
def testEmptyFileSend(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(data, b"")
# offset
def _testOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file, offset=5000)
self.assertEqual(sent, self.FILESIZE - 5000)
self.assertEqual(file.tell(), self.FILESIZE)
def testOffset(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE - 5000)
self.assertEqual(data, self.FILEDATA[5000:])
# count
def _testCount(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 5000007
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCount(self):
count = 5000007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count small
def _testCountSmall(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 1
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCountSmall(self):
count = 1
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count + offset
def _testCountWithOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 100007
meth = self.meth_from_sock(sock)
sent = meth(file, offset=2007, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count + 2007)
def testCountWithOffset(self):
count = 100007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[2007:count+2007])
# non blocking sockets are not supposed to work
def _testNonBlocking(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
sock.setblocking(False)
meth = self.meth_from_sock(sock)
self.assertRaises(ValueError, meth, file)
self.assertRaises(ValueError, sock.sendfile, file)
def testNonBlocking(self):
conn = self.accept_conn()
if conn.recv(8192):
self.fail('was not supposed to receive any data')
# timeout (non-triggered)
def _testWithTimeout(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
def testWithTimeout(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# timeout (triggered)
def _testWithTimeoutTriggeredSend(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=0.01) as sock, \
file as file:
meth = self.meth_from_sock(sock)
self.assertRaises(socket.timeout, meth, file)
def testWithTimeoutTriggeredSend(self):
conn = self.accept_conn()
conn.recv(88192)
# errors
def _test_errors(self):
pass
def test_errors(self):
with open(support.TESTFN, 'rb') as file:
with socket.socket(type=socket.SOCK_DGRAM) as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "SOCK_STREAM", meth, file)
with open(support.TESTFN, 'rt') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "binary mode", meth, file)
with open(support.TESTFN, 'rb') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count='2')
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count=0.1)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=0)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=-1)
@unittest.skipUnless(hasattr(os, "sendfile"),
'os.sendfile() required for this test.')
class SendfileUsingSendfileTest(SendfileUsingSendTest):
"""
Test the sendfile() implementation of socket.sendfile().
"""
def meth_from_sock(self, sock):
return getattr(sock, "_sendfile_use_sendfile")
@unittest.skipUnless(HAVE_SOCKET_ALG, 'AF_ALG required')
class LinuxKernelCryptoAPI(unittest.TestCase):
# tests for AF_ALG
def create_alg(self, typ, name):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
try:
sock.bind((typ, name))
except FileNotFoundError as e:
# type / algorithm is not available
sock.close()
raise unittest.SkipTest(str(e), typ, name)
else:
return sock
# bpo-31705: On kernel older than 4.5, sendto() failed with ENOKEY,
# at least on ppc64le architecture
@support.requires_linux_version(4, 5)
def test_sha256(self):
expected = bytes.fromhex("ba7816bf8f01cfea414140de5dae2223b00361a396"
"177a9cb410ff61f20015ad")
with self.create_alg('hash', 'sha256') as algo:
op, _ = algo.accept()
with op:
op.sendall(b"abc")
self.assertEqual(op.recv(512), expected)
op, _ = algo.accept()
with op:
op.send(b'a', socket.MSG_MORE)
op.send(b'b', socket.MSG_MORE)
op.send(b'c', socket.MSG_MORE)
op.send(b'')
self.assertEqual(op.recv(512), expected)
def test_hmac_sha1(self):
expected = bytes.fromhex("effcdf6ae5eb2fa2d27416d5f184df9c259a7c79")
with self.create_alg('hash', 'hmac(sha1)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, b"Jefe")
op, _ = algo.accept()
with op:
op.sendall(b"what do ya want for nothing?")
self.assertEqual(op.recv(512), expected)
# Although it should work with 3.19 and newer the test blocks on
# Ubuntu 15.10 with Kernel 4.2.0-19.
@support.requires_linux_version(4, 3)
def test_aes_cbc(self):
key = bytes.fromhex('06a9214036b8a15b512e03d534120006')
iv = bytes.fromhex('3dafba429d9eb430b422da802c9fac41')
msg = b"Single block msg"
ciphertext = bytes.fromhex('e353779c1079aeb82708942dbe77181a')
msglen = len(msg)
with self.create_alg('skcipher', 'cbc(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
flags=socket.MSG_MORE)
op.sendall(msg)
self.assertEqual(op.recv(msglen), ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([ciphertext],
op=socket.ALG_OP_DECRYPT, iv=iv)
self.assertEqual(op.recv(msglen), msg)
# long message
multiplier = 1024
longmsg = [msg] * multiplier
op, _ = algo.accept()
with op:
op.sendmsg_afalg(longmsg,
op=socket.ALG_OP_ENCRYPT, iv=iv)
enc = op.recv(msglen * multiplier)
self.assertEqual(len(enc), msglen * multiplier)
self.assertEqual(enc[:msglen], ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([enc],
op=socket.ALG_OP_DECRYPT, iv=iv)
dec = op.recv(msglen * multiplier)
self.assertEqual(len(dec), msglen * multiplier)
self.assertEqual(dec, msg * multiplier)
@support.requires_linux_version(4, 9) # see issue29324
def test_aead_aes_gcm(self):
key = bytes.fromhex('c939cc13397c1d37de6ae0e1cb7c423c')
iv = bytes.fromhex('b3d8cc017cbb89b39e0f67e2')
plain = bytes.fromhex('c3b3c41f113a31b73d9a5cd432103069')
assoc = bytes.fromhex('24825602bd12a984e0092d3e448eda5f')
expected_ct = bytes.fromhex('93fe7d9e9bfd10348a5606e5cafa7354')
expected_tag = bytes.fromhex('0032a1dc85f1c9786925a2e71d8272dd')
taglen = len(expected_tag)
assoclen = len(assoc)
with self.create_alg('aead', 'gcm(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_AEAD_AUTHSIZE,
None, taglen)
# send assoc, plain and tag buffer in separate steps
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen, flags=socket.MSG_MORE)
op.sendall(assoc, socket.MSG_MORE)
op.sendall(plain)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# now with msg
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg_afalg([msg], op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# create anc data manually
pack_uint32 = struct.Struct('I').pack
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg(
[msg],
([socket.SOL_ALG, socket.ALG_SET_OP, pack_uint32(socket.ALG_OP_ENCRYPT)],
[socket.SOL_ALG, socket.ALG_SET_IV, pack_uint32(len(iv)) + iv],
[socket.SOL_ALG, socket.ALG_SET_AEAD_ASSOCLEN, pack_uint32(assoclen)],
)
)
res = op.recv(len(msg) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# decrypt and verify
op, _ = algo.accept()
with op:
msg = assoc + expected_ct + expected_tag
op.sendmsg_afalg([msg], op=socket.ALG_OP_DECRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(len(msg) - taglen)
self.assertEqual(plain, res[assoclen:])
@support.requires_linux_version(4, 3) # see test_aes_cbc
def test_drbg_pr_sha256(self):
# deterministic random bit generator, prediction resistance, sha256
with self.create_alg('rng', 'drbg_pr_sha256') as algo:
extra_seed = os.urandom(32)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, extra_seed)
op, _ = algo.accept()
with op:
rn = op.recv(32)
self.assertEqual(len(rn), 32)
def test_sendmsg_afalg_args(self):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
with sock:
with self.assertRaises(TypeError):
sock.sendmsg_afalg()
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(1)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=-1)
def test_length_restriction(self):
# bpo-35050, off-by-one error in length check
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
self.addCleanup(sock.close)
# salg_type[14]
with self.assertRaises(FileNotFoundError):
sock.bind(("t" * 13, "name"))
with self.assertRaisesRegex(ValueError, "type too long"):
sock.bind(("t" * 14, "name"))
# salg_name[64]
with self.assertRaises(FileNotFoundError):
sock.bind(("type", "n" * 63))
with self.assertRaisesRegex(ValueError, "name too long"):
sock.bind(("type", "n" * 64))
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
class TestMSWindowsTCPFlags(unittest.TestCase):
knownTCPFlags = {
# available since long time ago
'TCP_MAXSEG',
'TCP_NODELAY',
# available starting with Windows 10 1607
'TCP_FASTOPEN',
# available starting with Windows 10 1703
'TCP_KEEPCNT',
# available starting with Windows 10 1709
'TCP_KEEPIDLE',
'TCP_KEEPINTVL'
}
def test_new_tcp_flags(self):
provided = [s for s in dir(socket) if s.startswith('TCP')]
unknown = [s for s in provided if s not in self.knownTCPFlags]
self.assertEqual([], unknown,
"New TCP flags were discovered. See bpo-32394 for more information")
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest, UDPTimeoutTest ]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
UnicodeReadFileObjectClassTestCase,
UnicodeWriteFileObjectClassTestCase,
UnicodeReadWriteFileObjectClassTestCase,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
ContextManagersTest,
InheritanceTest,
NonblockConstantTest
])
tests.append(BasicSocketPairTest)
tests.append(TestUnixDomain)
tests.append(TestLinuxAbstractNamespace)
tests.extend([TIPCTest, TIPCThreadableTest])
tests.extend([BasicCANTest, CANTest])
tests.extend([BasicRDSTest, RDSTest])
tests.append(LinuxKernelCryptoAPI)
tests.append(BasicQIPCRTRTest)
tests.extend([
BasicVSOCKTest,
ThreadedVSOCKSocketStreamTest,
])
tests.extend([
CmsgMacroTests,
SendmsgUDPTest,
RecvmsgUDPTest,
RecvmsgIntoUDPTest,
SendmsgUDP6Test,
RecvmsgUDP6Test,
RecvmsgRFC3542AncillaryUDP6Test,
RecvmsgIntoRFC3542AncillaryUDP6Test,
RecvmsgIntoUDP6Test,
SendmsgTCPTest,
RecvmsgTCPTest,
RecvmsgIntoTCPTest,
SendmsgSCTPStreamTest,
RecvmsgSCTPStreamTest,
RecvmsgIntoSCTPStreamTest,
SendmsgUnixStreamTest,
RecvmsgUnixStreamTest,
RecvmsgIntoUnixStreamTest,
RecvmsgSCMRightsStreamTest,
RecvmsgIntoSCMRightsStreamTest,
# These are slow when setitimer() is not available
InterruptedRecvTimeoutTest,
InterruptedSendTimeoutTest,
TestSocketSharing,
SendfileUsingSendTest,
SendfileUsingSendfileTest,
])
tests.append(TestMSWindowsTCPFlags)
thread_info = support.threading_setup()
support.run_unittest(*tests)
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
bot.py | # coding=utf-8
# Copyright 2008, Sean B. Palmer, inamidst.com
# Copyright © 2012, Elad Alfassa <elad@fedoraproject.org>
# Copyright 2012-2015, Elsie Powell, http://embolalia.com
#
# Licensed under the Eiffel Forum License 2.
from __future__ import unicode_literals, absolute_import, print_function, division
import collections
import os
import re
import sys
import threading
import time
from sopel import tools
from sopel import irc
from sopel.db import SopelDB
from sopel.tools import stderr, Identifier
import sopel.tools.jobs
from sopel.trigger import Trigger
from sopel.module import NOLIMIT
from sopel.logger import get_logger
import sopel.loader
LOGGER = get_logger(__name__)
if sys.version_info.major >= 3:
unicode = str
basestring = str
py3 = True
else:
py3 = False
class _CapReq(object):
def __init__(self, prefix, module, failure=None, arg=None, success=None):
def nop(bot, cap):
pass
# TODO at some point, reorder those args to be sane
self.prefix = prefix
self.module = module
self.arg = arg
self.failure = failure or nop
self.success = success or nop
class Sopel(irc.Bot):
def __init__(self, config, daemon=False):
irc.Bot.__init__(self, config)
self._daemon = daemon # Used for iPython. TODO something saner here
# `re.compile('.*') is re.compile('.*')` because of caching, so we need
# to associate a list with each regex, since they are unexpectedly
# indistinct.
self._callables = {
'high': collections.defaultdict(list),
'medium': collections.defaultdict(list),
'low': collections.defaultdict(list)
}
self.config = config
"""The :class:`sopel.config.Config` for the current Sopel instance."""
self.doc = {}
"""
A dictionary of command names to their docstring and example, if
declared. The first item in a callable's commands list is used as the
key in version *3.2* onward. Prior to *3.2*, the name of the function
as declared in the source code was used.
"""
self._command_groups = collections.defaultdict(list)
"""A mapping of module names to a list of commands in it."""
self.stats = {} # deprecated, remove in 7.0
self._times = {}
"""
A dictionary mapping lower-case'd nicks to dictionaries which map
funtion names to the time which they were last used by that nick.
"""
self.server_capabilities = {}
"""A dict mapping supported IRCv3 capabilities to their options.
For example, if the server specifies the capability ``sasl=EXTERNAL``,
it will be here as ``{"sasl": "EXTERNAL"}``. Capabilities specified
without any options will have ``None`` as the value.
For servers that do not support IRCv3, this will be an empty set."""
self.enabled_capabilities = set()
"""A set containing the IRCv3 capabilities that the bot has enabled."""
self._cap_reqs = dict()
"""A dictionary of capability names to a list of requests"""
self.privileges = dict()
"""A dictionary of channels to their users and privilege levels
The value associated with each channel is a dictionary of
:class:`sopel.tools.Identifier`\s to
a bitwise integer value, determined by combining the appropriate
constants from :mod:`sopel.module`.
.. deprecated:: 6.2.0
Use :attr:`channels` instead.
"""
self.channels = tools.SopelMemory() # name to chan obj
"""A map of the channels that Sopel is in.
The keys are Identifiers of the channel names, and map to
:class:`sopel.tools.target.Channel` objects which contain the users in
the channel and their permissions.
"""
self.users = tools.SopelMemory() # name to user obj
"""A map of the users that Sopel is aware of.
The keys are Identifiers of the nicknames, and map to
:class:`sopel.tools.target.User` instances. In order for Sopel to be
aware of a user, it must be in at least one channel which they are also
in.
"""
self.db = SopelDB(config)
"""The bot's database, as a :class:`sopel.db.SopelDB` instance."""
self.memory = tools.SopelMemory()
"""
A thread-safe dict for storage of runtime data to be shared between
modules. See :class:`sopel.tools.Sopel.SopelMemory`
"""
self.shutdown_methods = []
"""List of methods to call on shutdown"""
self.scheduler = sopel.tools.jobs.JobScheduler(self)
self.scheduler.start()
# Set up block lists
# Default to empty
if not self.config.core.nick_blocks:
self.config.core.nick_blocks = []
if not self.config.core.host_blocks:
self.config.core.host_blocks = []
self.setup()
# Backwards-compatibility aliases to attributes made private in 6.2. Remove
# these in 7.0
times = property(lambda self: getattr(self, '_times'))
command_groups = property(lambda self: getattr(self, '_command_groups'))
def write(self, args, text=None): # Shim this in here for autodocs
"""Send a command to the server.
``args`` is an iterable of strings, which are joined by spaces.
``text`` is treated as though it were the final item in ``args``, but
is preceeded by a ``:``. This is a special case which means that
``text``, unlike the items in ``args`` may contain spaces (though this
constraint is not checked by ``write``).
In other words, both ``sopel.write(('PRIVMSG',), 'Hello, world!')``
and ``sopel.write(('PRIVMSG', ':Hello, world!'))`` will send
``PRIVMSG :Hello, world!`` to the server.
Newlines and carriage returns ('\\n' and '\\r') are removed before
sending. Additionally, if the message (after joining) is longer than
than 510 characters, any remaining characters will not be sent.
"""
irc.Bot.write(self, args, text=text)
def setup(self):
stderr("\nWelcome to Sopel. Loading modules...\n\n")
modules = sopel.loader.enumerate_modules(self.config)
error_count = 0
success_count = 0
for name in modules:
path, type_ = modules[name]
try:
module, _ = sopel.loader.load_module(name, path, type_)
except Exception as e:
error_count = error_count + 1
filename, lineno = tools.get_raising_file_and_line()
rel_path = os.path.relpath(filename, os.path.dirname(__file__))
raising_stmt = "%s:%d" % (rel_path, lineno)
stderr("Error loading %s: %s (%s)" % (name, e, raising_stmt))
else:
try:
if hasattr(module, 'setup'):
module.setup(self)
relevant_parts = sopel.loader.clean_module(
module, self.config)
except Exception as e:
error_count = error_count + 1
filename, lineno = tools.get_raising_file_and_line()
rel_path = os.path.relpath(
filename, os.path.dirname(__file__)
)
raising_stmt = "%s:%d" % (rel_path, lineno)
stderr("Error in %s setup procedure: %s (%s)"
% (name, e, raising_stmt))
else:
self.register(*relevant_parts)
success_count += 1
if len(modules) > 1: # coretasks is counted
stderr('\n\nRegistered %d modules,' % (success_count - 1))
stderr('%d modules failed to load\n\n' % error_count)
else:
stderr("Warning: Couldn't load any modules")
def unregister(self, obj):
if not callable(obj):
return
if hasattr(obj, 'rule'): # commands and intents have it added
for rule in obj.rule:
callb_list = self._callables[obj.priority][rule]
if obj in callb_list:
callb_list.remove(obj)
if hasattr(obj, 'interval'):
# TODO this should somehow find the right job to remove, rather than
# clearing the entire queue. Issue #831
self.scheduler.clear_jobs()
if (getattr(obj, '__name__', None) == 'shutdown' and
obj in self.shutdown_methods):
self.shutdown_methods.remove(obj)
def register(self, callables, jobs, shutdowns, urls):
# Append module's shutdown function to the bot's list of functions to
# call on shutdown
self.shutdown_methods += shutdowns
for callbl in callables:
for rule in callbl.rule:
self._callables[callbl.priority][rule].append(callbl)
if hasattr(callbl, 'commands'):
module_name = callbl.__module__.rsplit('.', 1)[-1]
# TODO doc and make decorator for this. Not sure if this is how
# it should work yet, so not making it public for 6.0.
category = getattr(callbl, 'category', module_name)
self._command_groups[category].append(callbl.commands[0])
for command, docs in callbl._docs.items():
self.doc[command] = docs
for func in jobs:
for interval in func.interval:
job = sopel.tools.jobs.Job(interval, func)
self.scheduler.add_job(job)
if not self.memory.contains('url_callbacks'):
self.memory['url_callbacks'] = tools.SopelMemory()
for func in urls:
self.memory['url_callbacks'][func.url_regex] = func
def part(self, channel, msg=None):
"""Part a channel."""
self.write(['PART', channel], msg)
def join(self, channel, password=None):
"""Join a channel
If `channel` contains a space, and no `password` is given, the space is
assumed to split the argument into the channel to join and its
password. `channel` should not contain a space if `password` is given.
"""
if password is None:
self.write(('JOIN', channel))
else:
self.write(['JOIN', channel, password])
def msg(self, recipient, text, max_messages=1):
# Deprecated, but way too much of a pain to remove.
self.say(text, recipient, max_messages)
def say(self, text, recipient, max_messages=1):
"""Send ``text`` as a PRIVMSG to ``recipient``.
In the context of a triggered callable, the ``recipient`` defaults to
the channel (or nickname, if a private message) from which the message
was received.
By default, this will attempt to send the entire ``text`` in one
message. If the text is too long for the server, it may be truncated.
If ``max_messages`` is given, the ``text`` will be split into at most
that many messages, each no more than 400 bytes. The split is made at
the last space character before the 400th byte, or at the 400th byte if
no such space exists. If the ``text`` is too long to fit into the
specified number of messages using the above splitting, the final
message will contain the entire remainder, which may be truncated by
the server.
"""
# We're arbitrarily saying that the max is 400 bytes of text when
# messages will be split. Otherwise, we'd have to acocunt for the bot's
# hostmask, which is hard.
max_text_length = 400
# Encode to bytes, for propper length calculation
if isinstance(text, unicode):
encoded_text = text.encode('utf-8')
else:
encoded_text = text
excess = ''
if max_messages > 1 and len(encoded_text) > max_text_length:
last_space = encoded_text.rfind(' '.encode('utf-8'), 0, max_text_length)
if last_space == -1:
excess = encoded_text[max_text_length:]
encoded_text = encoded_text[:max_text_length]
else:
excess = encoded_text[last_space + 1:]
encoded_text = encoded_text[:last_space]
# We'll then send the excess at the end
# Back to unicode again, so we don't screw things up later.
text = encoded_text.decode('utf-8')
try:
self.sending.acquire()
# No messages within the last 3 seconds? Go ahead!
# Otherwise, wait so it's been at least 0.8 seconds + penalty
recipient_id = Identifier(recipient)
if recipient_id not in self.stack:
self.stack[recipient_id] = []
elif self.stack[recipient_id]:
elapsed = time.time() - self.stack[recipient_id][-1][0]
if elapsed < 3:
penalty = float(max(0, len(text) - 40)) / 70
wait = 0.8 + penalty
if elapsed < wait:
time.sleep(wait - elapsed)
# Loop detection
messages = [m[1] for m in self.stack[recipient_id][-8:]]
# If what we about to send repeated at least 5 times in the
# last 2 minutes, replace with '...'
if messages.count(text) >= 5 and elapsed < 120:
text = '...'
if messages.count('...') >= 3:
# If we said '...' 3 times, discard message
return
self.write(('PRIVMSG', recipient), text)
self.stack[recipient_id].append((time.time(), self.safe(text)))
self.stack[recipient_id] = self.stack[recipient_id][-10:]
finally:
self.sending.release()
# Now that we've sent the first part, we need to send the rest. Doing
# this recursively seems easier to me than iteratively
if excess:
self.msg(recipient, excess, max_messages - 1)
def notice(self, text, dest):
"""Send an IRC NOTICE to a user or a channel.
Within the context of a triggered callable, ``dest`` will default to
the channel (or nickname, if a private message), in which the trigger
happened.
"""
self.write(('NOTICE', dest), text)
def action(self, text, dest):
"""Send ``text`` as a CTCP ACTION PRIVMSG to ``dest``.
The same loop detection and length restrictions apply as with
:func:`say`, though automatic message splitting is not available.
Within the context of a triggered callable, ``dest`` will default to
the channel (or nickname, if a private message), in which the trigger
happened.
"""
self.say('\001ACTION {}\001'.format(text), dest)
def reply(self, text, dest, reply_to, notice=False):
"""Prepend ``reply_to`` to ``text``, and send as a PRIVMSG to ``dest``.
If ``notice`` is ``True``, send a NOTICE rather than a PRIVMSG.
The same loop detection and length restrictions apply as with
:func:`say`, though automatic message splitting is not available.
Within the context of a triggered callable, ``reply_to`` will default to
the nickname of the user who triggered the call, and ``dest`` to the
channel (or nickname, if a private message), in which the trigger
happened.
"""
text = '%s: %s' % (reply_to, text)
if notice:
self.notice(text, dest)
else:
self.say(text, dest)
class SopelWrapper(object):
def __init__(self, sopel, trigger):
# The custom __setattr__ for this class sets the attribute on the
# original bot object. We don't want that for these, so we set them
# with the normal __setattr__.
object.__setattr__(self, '_bot', sopel)
object.__setattr__(self, '_trigger', trigger)
def __dir__(self):
classattrs = [attr for attr in self.__class__.__dict__
if not attr.startswith('__')]
return list(self.__dict__) + classattrs + dir(self._bot)
def __getattr__(self, attr):
return getattr(self._bot, attr)
def __setattr__(self, attr, value):
return setattr(self._bot, attr, value)
def say(self, message, destination=None, max_messages=1):
if destination is None:
destination = self._trigger.sender
self._bot.say(message, destination, max_messages)
def action(self, message, destination=None):
if destination is None:
destination = self._trigger.sender
self._bot.action(message, destination)
def notice(self, message, destination=None):
if destination is None:
destination = self._trigger.sender
self._bot.notice(message, destination)
def reply(self, message, destination=None, reply_to=None, notice=False):
if destination is None:
destination = self._trigger.sender
if reply_to is None:
reply_to = self._trigger.nick
self._bot.reply(message, destination, reply_to, notice)
def call(self, func, sopel, trigger):
nick = trigger.nick
current_time = time.time()
if nick not in self._times:
self._times[nick] = dict()
if self.nick not in self._times:
self._times[self.nick] = dict()
if not trigger.is_privmsg and trigger.sender not in self._times:
self._times[trigger.sender] = dict()
if not trigger.admin and not func.unblockable:
if func in self._times[nick]:
usertimediff = current_time - self._times[nick][func]
if func.rate > 0 and usertimediff < func.rate:
#self._times[nick][func] = current_time
LOGGER.info(
"%s prevented from using %s in %s due to user limit: %d < %d",
trigger.nick, func.__name__, trigger.sender, usertimediff,
func.rate
)
return
if func in self._times[self.nick]:
globaltimediff = current_time - self._times[self.nick][func]
if func.global_rate > 0 and globaltimediff < func.global_rate:
#self._times[self.nick][func] = current_time
LOGGER.info(
"%s prevented from using %s in %s due to global limit: %d < %d",
trigger.nick, func.__name__, trigger.sender, globaltimediff,
func.global_rate
)
return
if not trigger.is_privmsg and func in self._times[trigger.sender]:
chantimediff = current_time - self._times[trigger.sender][func]
if func.channel_rate > 0 and chantimediff < func.channel_rate:
#self._times[trigger.sender][func] = current_time
LOGGER.info(
"%s prevented from using %s in %s due to channel limit: %d < %d",
trigger.nick, func.__name__, trigger.sender, chantimediff,
func.channel_rate
)
return
try:
exit_code = func(sopel, trigger)
except Exception: # TODO: Be specific
exit_code = None
self.error(trigger)
if exit_code != NOLIMIT:
self._times[nick][func] = current_time
self._times[self.nick][func] = current_time
if not trigger.is_privmsg:
self._times[trigger.sender][func] = current_time
def dispatch(self, pretrigger):
args = pretrigger.args
event, args, text = pretrigger.event, args, args[-1] if args else ''
if self.config.core.nick_blocks or self.config.core.host_blocks:
nick_blocked = self._nick_blocked(pretrigger.nick)
host_blocked = self._host_blocked(pretrigger.host)
else:
nick_blocked = host_blocked = None
list_of_blocked_functions = []
for priority in ('high', 'medium', 'low'):
items = self._callables[priority].items()
for regexp, funcs in items:
match = regexp.match(text)
if not match:
continue
user_obj = self.users.get(pretrigger.nick)
account = user_obj.account if user_obj else None
trigger = Trigger(self.config, pretrigger, match, account)
wrapper = self.SopelWrapper(self, trigger)
for func in funcs:
if (not trigger.admin and
not func.unblockable and
(nick_blocked or host_blocked)):
function_name = "%s.%s" % (
func.__module__, func.__name__
)
list_of_blocked_functions.append(function_name)
continue
if event not in func.event:
continue
if (hasattr(func, 'intents') and
trigger.tags.get('intent') not in func.intents):
continue
if func.thread:
targs = (func, wrapper, trigger)
t = threading.Thread(target=self.call, args=targs)
t.start()
else:
self.call(func, wrapper, trigger)
if list_of_blocked_functions:
if nick_blocked and host_blocked:
block_type = 'both'
elif nick_blocked:
block_type = 'nick'
else:
block_type = 'host'
LOGGER.info(
"[%s]%s prevented from using %s.",
block_type,
trigger.nick,
', '.join(list_of_blocked_functions)
)
def _host_blocked(self, host):
bad_masks = self.config.core.host_blocks
for bad_mask in bad_masks:
bad_mask = bad_mask.strip()
if not bad_mask:
continue
if (re.match(bad_mask + '$', host, re.IGNORECASE) or
bad_mask == host):
return True
return False
def _nick_blocked(self, nick):
bad_nicks = self.config.core.nick_blocks
for bad_nick in bad_nicks:
bad_nick = bad_nick.strip()
if not bad_nick:
continue
if (re.match(bad_nick + '$', nick, re.IGNORECASE) or
Identifier(bad_nick) == nick):
return True
return False
def _shutdown(self):
stderr(
'Calling shutdown for %d modules.' % (len(self.shutdown_methods),)
)
for shutdown_method in self.shutdown_methods:
try:
stderr(
"calling %s.%s" % (
shutdown_method.__module__, shutdown_method.__name__,
)
)
shutdown_method(self)
except Exception as e:
stderr(
"Error calling shutdown method for module %s:%s" % (
shutdown_method.__module__, e
)
)
def cap_req(self, module_name, capability, arg=None, failure_callback=None,
success_callback=None):
"""Tell Sopel to request a capability when it starts.
By prefixing the capability with `-`, it will be ensured that the
capability is not enabled. Simmilarly, by prefixing the capability with
`=`, it will be ensured that the capability is enabled. Requiring and
disabling is "first come, first served"; if one module requires a
capability, and another prohibits it, this function will raise an
exception in whichever module loads second. An exception will also be
raised if the module is being loaded after the bot has already started,
and the request would change the set of enabled capabilities.
If the capability is not prefixed, and no other module prohibits it, it
will be requested. Otherwise, it will not be requested. Since
capability requests that are not mandatory may be rejected by the
server, as well as by other modules, a module which makes such a
request should account for that possibility.
The actual capability request to the server is handled after the
completion of this function. In the event that the server denies a
request, the `failure_callback` function will be called, if provided.
The arguments will be a `Sopel` object, and the capability which was
rejected. This can be used to disable callables which rely on the
capability. It will be be called either if the server NAKs the request,
or if the server enabled it and later DELs it.
The `success_callback` function will be called upon acknowledgement of
the capability from the server, whether during the initial capability
negotiation, or later.
If ``arg`` is given, and does not exactly match what the server
provides or what other modules have requested for that capability, it is
considered a conflict.
"""
# TODO raise better exceptions
cap = capability[1:]
prefix = capability[0]
entry = self._cap_reqs.get(cap, [])
if any((ent.arg != arg for ent in entry)):
raise Exception('Capability conflict')
if prefix == '-':
if self.connection_registered and cap in self.enabled_capabilities:
raise Exception('Can not change capabilities after server '
'connection has been completed.')
if any((ent.prefix != '-' for ent in entry)):
raise Exception('Capability conflict')
entry.append(_CapReq(prefix, module_name, failure_callback, arg,
success_callback))
self._cap_reqs[cap] = entry
else:
if prefix != '=':
cap = capability
prefix = ''
if self.connection_registered and (cap not in
self.enabled_capabilities):
raise Exception('Can not change capabilities after server '
'connection has been completed.')
# Non-mandatory will callback at the same time as if the server
# rejected it.
if any((ent.prefix == '-' for ent in entry)) and prefix == '=':
raise Exception('Capability conflict')
entry.append(_CapReq(prefix, module_name, failure_callback, arg,
success_callback))
self._cap_reqs[cap] = entry
|
ManyHellosServer.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import requests as _requests
import random as _random
import os
from ManyHellos.authclient import KBaseAuth as _KBaseAuth
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-server-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'ManyHellos'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from ManyHellos.ManyHellosImpl import ManyHellos # noqa @IgnorePep8
impl_ManyHellos = ManyHellos(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
newerr.data = e.message
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # noqa @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'ManyHellos'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_ManyHellos.manyHellos,
name='ManyHellos.manyHellos',
types=[dict])
self.method_authentication['ManyHellos.manyHellos'] = 'required' # noqa
self.rpc_service.add(impl_ManyHellos.manyHellos_prepare,
name='ManyHellos.manyHellos_prepare',
types=[dict])
self.method_authentication['ManyHellos.manyHellos_prepare'] = 'required' # noqa
self.rpc_service.add(impl_ManyHellos.manyHellos_runEach,
name='ManyHellos.manyHellos_runEach',
types=[dict])
self.method_authentication['ManyHellos.manyHellos_runEach'] = 'required' # noqa
self.rpc_service.add(impl_ManyHellos.manyHellos_collect,
name='ManyHellos.manyHellos_collect',
types=[dict])
self.method_authentication['ManyHellos.manyHellos_collect'] = 'required' # noqa
self.rpc_service.add(impl_ManyHellos.hi,
name='ManyHellos.hi',
types=[basestring])
self.method_authentication['ManyHellos.hi'] = 'required' # noqa
self.rpc_service.add(impl_ManyHellos.status,
name='ManyHellos.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'ManyHellos ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception, e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print 'Request method was %s\n' % environ['REQUEST_METHOD']
# print 'Environment dictionary is:\n%s\n' % pprint.pformat(environ)
# print 'Request body was: %s' % request_body
# print 'Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
app.py | from tkinter import Tk
from multiprocessing import Process
from application import Application
from config import Config
from upnp.upnp import Upnp
import asyncio
import websockets
import concurrent.futures
import json
async def open_port_register():
upnp=Upnp()
upnp.delete_port_mapping(tensorflow_port)
upnp.add_port_mapping(upnp.get_ip_address())
async with websockets.connect(maneframe_web_uri) as websocket:
await websocket.send(json.dumps(register_payload))
print("> {}".format(json.dumps(register_payload)))
while True:
greeting = await websocket.recv()
print("< {}".format(greeting))
def start_web():
loop = asyncio.get_event_loop()
loop.run_until_complete(open_port_register())
if __name__ == '__main__':
# Get all config values
config = Config()
tensorflow_port=config.cfg['tensorflow']['port']
webserver_port=config.cfg['webserver']['port']
maneframe_web_uri=config.cfg['maneframe']['web']['uri']
uuid = config.uuid_str
register_payload={'ip':config.external_ip,'tensorflow_port':tensorflow_port, 'uuid': uuid}
# Open ports and register code
p = Process(target=start_web)
p.start()
# Visual components start
root = Tk()
app = Application(master=root)
app.mainloop()
root.destroy()
p.terminate()
|
balance_server.py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import select
import struct
import errno
import json
import threading
import sys
from six.moves.queue import Queue
import time
class Server(object):
_READ = select.EPOLLIN | select.EPOLLHUP | select.EPOLLERR
_WRITE = select.EPOLLOUT | select.EPOLLHUP | select.EPOLLERR
RECV_SIZE = 4096
HEAD_SIZE = 8 # 8bytes, 64bit
HEAD_FORMAT = '!4si'
CRC_CODE = b'\xCB\xEF\x00\x00'
def __init__(self, ip, port):
self._ip = ip
self._port = port
self._clients = {}
# request & response message
self._requests = {}
self._responses = {}
self._request_queue = Queue()
self._response_queue = Queue()
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setblocking(False)
server.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind((self._ip, self._port))
server.listen(5)
fd = server.fileno()
self._server = server
self._fd = fd
# create epoll
epoll = select.epoll()
epoll.register(fd, self._READ)
self._epoll = epoll
def _process_msg(self, fd, msg):
pass
def _handle_requests(self):
while True:
fd, msg = self._request_queue.get()
crc_code, length = struct.unpack_from(self.HEAD_FORMAT, msg)
if length != len(msg):
sys.stderr.write('length={} != msg_length={}, close\n'.format(
length, len(msg)))
self.close_conn(fd)
continue
msg = json.loads(msg[self.HEAD_SIZE:].decode())
self._process_msg(fd, msg)
def _enqueue_request(self, fd):
msg = self._requests[fd]
if len(msg) < self.HEAD_SIZE:
return
while True:
crc_code, length = struct.unpack_from(self.HEAD_FORMAT, msg)
if crc_code != self.CRC_CODE:
# connection error
# self._epoll.modify(fd, select.EPOLLERR)
self.close_conn(fd)
return
if len(msg) < length:
return
request = msg[:length]
self._request_queue.put((fd, request))
self._requests[fd] = msg[length:]
msg = self._requests[fd]
if len(msg) < self.HEAD_SIZE:
return
def _enqueue_response(self, fd, msg):
msg = json.dumps(msg).encode()
size = self.HEAD_SIZE + len(msg)
msg = struct.pack(self.HEAD_FORMAT, self.CRC_CODE, size) + msg
assert len(msg) == size, 'Error with response msg'
# self._response_queue.put((fd, msg))
# Fixme. response multi msg?
# assert len(self._responses[fd]) == 0
self._responses[fd] += msg
self._epoll.modify(fd, self._WRITE)
def _init_conn(self):
client, addr = self._server.accept()
# sys.stderr.write('addr={} conn\n'.format(addr))
# client.getpeername()
client.setblocking(False)
fd = client.fileno()
self._epoll.register(fd, self._READ)
self._clients[fd] = client
self._requests[fd] = b''
self._responses[fd] = b''
def _handle_in(self, fd):
try:
data = self._clients[fd].recv(self.RECV_SIZE)
except socket.error as e:
eno = e.args[0]
if eno not in (errno.EINTR, errno.EWOULDBLOCK, errno.EAGAIN):
# connection error
# self._epoll.modify(fd, select.EPOLLERR)
self.close_conn(fd)
return
if not data:
# connection close
# self._epoll.modify(fd, select.EPOLLHUP)
self.close_conn(fd)
return
else:
self._requests[fd] += data
self._enqueue_request(fd)
def _handle_out(self, fd):
response = self._responses[fd]
size = len(response)
try:
send_size = self._clients[fd].send(response)
except socket.error as e:
eno = e.args[0]
if eno not in (errno.EINTR, errno.EWOULDBLOCK, errno.EAGAIN):
# connection error
# self._epoll.modify(fd, select.EPOLLERR)
self.close_conn(fd)
return
if send_size == 0:
# connection close
# self._epoll.modify(fd, select.EPOLLHUP)
self.close_conn(fd)
return
else:
self._responses[fd] = response[send_size:]
if len(self._responses[fd]) == 0:
self._epoll.modify(fd, self._READ)
def close_conn(self, fd):
try:
ip, port = self._clients[fd].getpeername()
sys.stderr.write('close conn={}\n'.format(ip + ':' + str(port)))
self._epoll.unregister(fd)
self._clients[fd].close()
except Exception as e:
sys.stderr.write('Exception when close fd={}\n'.format(fd))
sys.stderr.write(str(e) + '\n')
del self._clients[fd]
del self._requests[fd]
del self._responses[fd]
def _start(self):
request_thread = threading.Thread(target=self._handle_requests)
request_thread.daemon = True
request_thread.start()
# Todo. Add response thread?
while True:
for fd, event in self._epoll.poll(timeout=1):
if fd == self._fd:
self._init_conn()
elif (event & select.EPOLLHUP) or (event & select.EPOLLERR):
self.close_conn(fd)
elif event & select.EPOLLIN:
self._handle_in(fd)
elif event & select.EPOLLOUT:
self._handle_out(fd)
def server_forever(self):
try:
self._start()
finally:
self._epoll.unregister(self._fd)
self._epoll.close()
self._server.close()
class BalanceServer(Server):
def __init__(self, ip='127.0.0.1', port=9379, table=None):
super(BalanceServer, self).__init__(ip, port)
self._table = table
self._handle_func = {
'register': self._handle_register,
'heartbeat': self._handle_heartbeat
}
def _handle_register(self, fd, msg):
# Todo
# store.set_client()
require_num = int(msg['num'])
self._table.add_service_name(fd, msg['service_name'], require_num)
servers = self._table.get_servers(fd, require_num)
client = self._clients[fd]
ip, port = client.getpeername()
sys.stderr.write('register addr={} service_name={} num={}\n'.format(
ip + ':' + str(port), msg['service_name'], require_num))
# response
msg = {
'type': 'register',
'seq': int(msg['seq']) + 1,
'servers': servers,
'num': len(servers)
}
self._enqueue_response(fd, msg)
def _handle_heartbeat(self, fd, msg):
version = 0
try:
version = int(msg['version'])
except KeyError:
# compatible old client
pass
new_version, servers = self._table.is_servers_update(fd, version)
if new_version > version:
msg = {
'type': 'servers_change',
'servers': servers,
'version': new_version
}
else:
msg = {'type': 'heartbeat'}
self._enqueue_response(fd, msg)
def _process_msg(self, fd, msg):
type = msg['type']
func = self._handle_func[type]
func(fd, msg)
def close_conn(self, fd):
super(BalanceServer, self).close_conn(fd)
self._table.rm_service_name(fd)
def server_forever(self):
self._table.start()
super(BalanceServer, self).server_forever()
if __name__ == '__main__':
from .service_table import ServiceTable
import argparse
parser = argparse.ArgumentParser(
description='Discovery server with balance')
parser.add_argument(
'--server',
type=str,
default='0.0.0.0:7001',
help='endpoint of the server, e.g. 127.0.0.1:8888 [default: %(default)s]'
)
parser.add_argument(
'--worker_num',
type=int,
default=1,
help='worker num of server [default: %(default)s]')
parser.add_argument(
'--db_endpoints',
type=str,
default='127.0.0.1:6379',
help='database endpoints, e.g. 127.0.0.1:2379,127.0.0.1:2380 [default: %(default)s]'
)
parser.add_argument(
'--db_passwd',
type=str,
default=None,
help='detabase password [default: %(default)s]')
parser.add_argument(
'--db_type',
type=str,
default='redis',
help='database type, only support redis for now [default: %(default)s]')
args = parser.parse_args()
server = args.server
worker_num = args.worker_num
db_endpoints = args.db_endpoints.split(',')
redis_ip_port = db_endpoints[0].split(':')
server_ip_port = server.split(':')
table = ServiceTable(redis_ip_port[0],
int(redis_ip_port[1])) # connect redis ip:port
balance_server = BalanceServer(server_ip_port[0],
int(server_ip_port[1]), table) # listen
balance_server.server_forever()
|
terminal.py | import sublime
import os
import time
import base64
import logging
import tempfile
import threading
from queue import Queue, Empty
from .ptty import TerminalPtyProcess, TerminalScreen, TerminalStream
from .utils import responsive, intermission
from .view import panel_window, view_size
from .key import get_key_code
from .image import get_image_info, image_resize
CONTINUATION = "\u200b\u200c\u200b"
IMAGE = """
<style>
body {{
margin: 1px;
}}
</style>
<img src="data:image/{what};base64,{data}" width="{width}" height="{height}"/>
"""
logger = logging.getLogger('Terminus')
class Terminal:
_terminals = {}
_detached_terminals = []
def __init__(self, view=None):
self._title = ""
self.view = view
self._cached_cursor = [0, 0]
self._cached_cursor_is_hidden = [True]
self.image_count = 0
self.images = {}
self._strings = Queue()
self._pending_to_send_string = [False]
self._pending_to_clear_scrollback = [False]
self._pending_to_reset = [None]
self.lock = threading.Lock()
@classmethod
def from_id(cls, vid):
if vid not in cls._terminals:
return None
return cls._terminals[vid]
@classmethod
def from_tag(cls, tag):
for terminal in cls._terminals.values():
if terminal.tag == tag:
return terminal
return None
@classmethod
def cull_terminals(cls):
terminals_to_close = []
for terminal in cls._terminals.values():
if not terminal.is_hosted():
terminals_to_close.append(terminal)
for terminal in terminals_to_close:
terminal.close()
def attach_view(self, view, offset=None):
with self.lock:
self.view = view
self.detached = False
Terminal._terminals[view.id()] = self
if self in Terminal._detached_terminals:
Terminal._detached_terminals.remove(self)
self.view.settings().erase("terminus_view.detached")
# allow screen to be rerendered
self.screen.dirty.update(range(self.screen.lines))
self.set_offset(offset)
def detach_view(self):
with self.lock:
self.detached = True
Terminal._detached_terminals.append(self)
if self.view.id() in Terminal._terminals:
del Terminal._terminals[self.view.id()]
self.view.settings().set("terminus_view.detached", True)
self.view = None
@responsive(period=1, default=True)
def is_hosted(self):
if self.detached:
# irrelevant if terminal is detached
return True
if self.panel_name:
return panel_window(self.view) or False
else:
return self.view.window() or False
def _need_to_render(self):
flag = False
if self.screen.dirty:
flag = True
elif self.screen.cursor.x != self._cached_cursor[0] or \
self.screen.cursor.y != self._cached_cursor[1]:
flag = True
elif self.screen.cursor.hidden != self._cached_cursor_is_hidden[0]:
flag = True
if flag:
self._cached_cursor[0] = self.screen.cursor.x
self._cached_cursor[1] = self.screen.cursor.y
self._cached_cursor_is_hidden[0] = self.screen.cursor.hidden
return flag
def _start_rendering(self):
data = [""]
done = [False]
@responsive(period=1, default=False)
def was_resized():
size = view_size(self.view)
return self.screen.lines != size[0] or self.screen.columns != size[1]
def reader():
while True:
try:
temp = self.process.read(1024)
except EOFError:
break
with self.lock:
data[0] += temp
if done[0] or not self.is_hosted():
logger.debug("reader breaks")
break
done[0] = True
threading.Thread(target=reader).start()
def renderer():
def feed_data():
if len(data[0]) > 0:
logger.debug("receieved: {}".format(data[0]))
self.stream.feed(data[0])
data[0] = ""
while True:
with intermission(period=0.03), self.lock:
feed_data()
if not self.detached:
if was_resized():
self.handle_resize()
self.view.run_command("terminus_show_cursor")
if self._need_to_render():
self.view.run_command("terminus_render")
self.screen.dirty.clear()
if done[0] or not self.is_hosted():
logger.debug("renderer breaks")
break
feed_data()
done[0] = True
sublime.set_timeout(lambda: self.cleanup())
threading.Thread(target=renderer).start()
def set_offset(self, offset=None):
if offset is not None:
self.offset = offset
else:
if self.view and self.view.size() > 0:
view = self.view
self.offset = view.rowcol(view.size())[0] + 1
else:
self.offset = 0
logger.debug("activating with offset %s", self.offset)
def activate(
self, config_name, cmd, cwd=None, env=None, title=None,
panel_name=None, tag=None, auto_close=True, cancellable=False, timeit=False):
view = self.view
if view:
self.detached = False
Terminal._terminals[view.id()] = self
else:
Terminal._detached_terminals.append(self)
self.detached = True
self.config_name = config_name
self.panel_name = panel_name
self.tag = tag
self.auto_close = auto_close
self.cancellable = cancellable
self.timeit = timeit
if timeit:
self.start_time = time.time()
self.default_title = view.name() if view.name() else title
if view:
self.title = title
self.set_offset()
size = view_size(view or sublime.active_window().active_view(), (40, 80))
logger.debug("view size: {}".format(str(size)))
_env = os.environ.copy()
_env.update(env)
self.process = TerminalPtyProcess.spawn(cmd, cwd=cwd, env=_env, dimensions=size)
self.screen = TerminalScreen(
size[1], size[0], process=self.process, history=10000,
clear_callback=self.clear_callback, reset_callback=self.reset_callback)
self.stream = TerminalStream(self.screen)
self.screen.set_show_image_callback(self.show_image)
self._start_rendering()
def close(self):
logger.debug("close")
self.process.terminate()
vid = self.view.id()
if vid in self._terminals:
del self._terminals[vid]
def cleanup(self, by_user=False):
logger.debug("cleanup")
if not self.view or self.view.id() not in self._terminals:
return
if self.view.settings().get("terminus_view.closed"):
return
self.view.run_command("terminus_render")
# process might became orphan, make sure the process is terminated
# however, we do not immediately from it from _terminals to allow
# copy, paste etc to be functional
self.process.terminate()
if self.process.exitstatus == 0 and self.auto_close:
self.view.run_command("terminus_close")
self.view.run_command("terminus_trim_trailing_lines")
if by_user:
self.view.run_command("append", {"characters": "[Cancelled]"})
elif self.timeit:
if self.process.exitstatus == 0:
self.view.run_command(
"append",
{"characters": "[Finished in {:0.2f}s]".format(time.time() - self.start_time)})
else:
self.view.run_command(
"append",
{"characters": "[Finished in {:0.2f}s with exit code {}]".format(
time.time() - self.start_time, self.process.exitstatus)})
elif self.process.exitstatus is not None:
self.view.run_command(
"append",
{"characters": "process is terminated with return code {}.".format(
self.process.exitstatus)})
self.view.sel().clear()
if not self.panel_name and self.view.settings().get("result_file_regex"):
# if it is a tab based build, we will to refocus to enable next_result
window = self.view.window()
if window:
active_view = window.active_view()
self.view.window().focus_view(self.view)
if active_view:
self.view.window().focus_view(active_view)
# to avoid being reactivated
self.view.settings().set("terminus_view.closed", True)
def handle_resize(self):
size = view_size(self.view)
logger.debug("handle resize {} {} -> {} {}".format(
self.screen.lines, self.screen.columns, size[0], size[1]))
try:
# pywinpty will rasie an runtime error
self.process.setwinsize(*size)
self.screen.resize(*size)
except RuntimeError:
pass
@property
def title(self):
return self._title
@title.setter
def title(self, value):
if not self.detached:
value = value if value else self.config_name
self._title = value
self.view.set_name(value)
def clear_callback(self):
self._pending_to_clear_scrollback[0] = True
def reset_callback(self):
if self._pending_to_reset[0] is None:
self._pending_to_reset[0] = False
else:
self._pending_to_reset[0] = True
def send_key(self, *args, **kwargs):
kwargs["application_mode"] = self.application_mode_enabled()
kwargs["new_line_mode"] = self.new_line_mode_enabled()
self.send_string(get_key_code(*args, **kwargs), normalized=False)
def send_string(self, string, normalized=True):
if normalized:
# normalize CR and CRLF to CR (or CRLF if LNM)
string = string.replace("\r\n", "\n")
if self.new_line_mode_enabled():
string = string.replace("\n", "\r\n")
else:
string = string.replace("\n", "\r")
no_queue = not self._pending_to_send_string[0]
if no_queue and len(string) <= 512:
logger.debug("sent: {}".format(string[0:64] if len(string) > 64 else string))
self.process.write(string)
else:
for i in range(0, len(string), 512):
self._strings.put(string[i:i+512])
if no_queue:
self._pending_to_send_string[0] = True
threading.Thread(target=self.process_send_string).start()
def process_send_string(self):
while True:
try:
string = self._strings.get(False)
logger.debug("sent: {}".format(string[0:64] if len(string) > 64 else string))
self.process.write(string)
except Empty:
self._pending_to_send_string[0] = False
return
else:
time.sleep(0.1)
def bracketed_paste_mode_enabled(self):
return (2004 << 5) in self.screen.mode
def new_line_mode_enabled(self):
return (20 << 5) in self.screen.mode
def application_mode_enabled(self):
return (1 << 5) in self.screen.mode
def find_image(self, pt):
view = self.view
for pid in self.images:
region = view.query_phantom(pid)[0]
if region.end() == pt:
return pid
return None
def show_image(self, data, args, cr=None):
view = self.view
if "inline" not in args or not args["inline"]:
return
cursor = self.screen.cursor
pt = view.text_point(self.offset + cursor.y, cursor.x)
databytes = base64.decodebytes(data.encode())
image_info = get_image_info(databytes)
if not image_info:
logger.error("cannot get image info")
return
what, width, height = image_info
_, image_path = tempfile.mkstemp(suffix="." + what)
with open(image_path, "wb") as f:
f.write(databytes)
width, height = image_resize(
width,
height,
args["width"] if "width" in args else None,
args["height"] if "height" in args else None,
view.em_width(),
view.viewport_extent()[0] - 3 * view.em_width(),
args["preserveAspectRatio"] if "preserveAspectRatio" in args else 1
)
if self.find_image(pt):
self.view.run_command("terminus_insert", {"point": pt, "character": " "})
pt += 1
self.image_count += 1
p = view.add_phantom(
"terminus_image#{}".format(self.image_count),
sublime.Region(pt, pt),
IMAGE.format(
what=what,
data=data,
width=width,
height=height,
count=self.image_count),
sublime.LAYOUT_INLINE,
)
self.images[p] = image_path
if cr:
self.screen.index()
def clean_images(self):
view = self.view
for pid in list(self.images.keys()):
region = view.query_phantom(pid)[0]
if region.empty() and region.begin() == 0:
view.erase_phantom_by_id(pid)
if pid in self.images:
try:
os.remove(self.images[pid])
except Exception:
pass
del self.images[pid]
def __del__(self):
# make sure the process is terminated
self.process.terminate(force=True)
# remove images
for image_path in list(self.images.values()):
try:
os.remove(image_path)
except Exception:
pass
if self.process.isalive():
logger.debug("process becomes orphaned")
else:
logger.debug("process is terminated")
|
bot_base.py | '''
@ Harris Christiansen (Harris@HarrisChristiansen.com)
January 2016
Generals.io Automated Client - https://github.com/harrischristiansen/generals-bot
Generals Bot: Base Bot Class
'''
import logging
from Queue import PriorityQueue
import random
import threading
import time
from client import generals
from viewer import GeneralsViewer
# Opponent Type Definitions
OPP_EMPTY = 0
OPP_ARMY = 1
OPP_CITY = 2
OPP_GENERAL = 3
DIRECTIONS = [(1, 0), (-1, 0), (0, 1), (0, -1)]
class GeneralsBot(object):
def __init__(self, updateMethod, name="PurdueBot", gameType="private", privateRoomID="PurdueBot"):
# Save Config
self._updateMethod = updateMethod
self._name = name
self._gameType = gameType
self._privateRoomID = privateRoomID
# Start Game Loop
_create_thread(self._start_game_loop)
# Start Game Viewer
window_title = "%s (%s)" % (self._name, self._gameType)
self._viewer = GeneralsViewer(window_title)
self._viewer.mainViewerLoop()
def _start_game_loop(self):
# Create Game
if (self._gameType == "ffa"): # FFA
self._game = generals.Generals(self._name, self._name, 'ffa')
elif (self._gameType == "1v1"): # 1v1
self._game = generals.Generals(self._name, self._name, '1v1')
else: # private
self._game = generals.Generals(self._name, self._name, 'private', gameid=self._privateRoomID)
# Start Game Update Loop
self._running = True
_create_thread(self._start_update_loop)
while (self._running):
msg = str(raw_input('Send Msg:'))
self._game.send_chat(msg)
time.sleep(0.7)
######################### Handle Updates From Server #########################
def _start_update_loop(self):
for update in self._game.get_updates():
self._set_update(update)
if (not self._running):
return
self._make_move()
# Update GeneralsViewer Grid
if '_viewer' in dir(self):
if '_path' in dir(self):
self._update.path = self._path
if '_collect_path' in dir(self):
self._update.collect_path = self._collect_path
self._viewer.updateGrid(self._update)
def _set_update(self, update):
if (update.complete):
print("!!!! Game Complete. Result = " + str(update.result) + " !!!!")
self._running = False
return
self._update = update
######################### Move Generation #########################
def _make_move(self):
self._updateMethod(self, self._update)
######################### Tile Finding #########################
def find_largest_tile(self, ofType=None, notInPath=[], includeGeneral=False): # ofType = Integer, notInPath = [Tile], includeGeneral = False|True|Int Acceptable Largest|0.1->0.9 Ratio
if (ofType == None):
ofType = self._update.player_index
general = self._update.generals[ofType]
largest = None
for x in range(self._update.cols): # Check Each Square
for y in range(self._update.rows):
tile = self._update.grid[y][x]
if (tile.tile == ofType and (largest == None or largest.army < tile.army)): # New Largest
if ((tile not in notInPath) and tile != general): # Exclude Path and General
largest = tile
if (includeGeneral > 0 and general not in notInPath): # Handle includeGeneral
if (includeGeneral < 1):
includeGeneral = general.army * includeGeneral
if (includeGeneral < 6):
includeGeneral = 6
if (largest == None):
largest = general
elif (includeGeneral == True and largest.army < general.army):
largest = general
elif (includeGeneral > True and largest.army < general.army and largest.army <= includeGeneral):
largest = general
return largest
def find_city(self, ofType=None, notOfType=None, notInPath=[], findLargest=True, includeGeneral=False): # ofType = Integer, notOfType = Integer, notInPath = [Tile], findLargest = Boolean
if (ofType == None and notOfType == None):
ofType = self._update.player_index
found_city = None
for city in self._update.cities: # Check Each City
if (city in notInPath):
continue
if (city.tile == ofType or (notOfType != None and city.tile != notOfType)):
if (found_city == None):
found_city = city
if (findLargest and found_city.army < city.army) or (not findLargest and city.army < found_city.army):
found_city = city
if includeGeneral:
general = self._update.generals[ofType]
if (found_city == None):
return general
if (general != None and ((findLargest and general.army > found_city.army) or (not findLargest and general.army < found_city.army))):
return general
return found_city
def find_closest_in_path(self, tile, path):
closest = None
closest_distance = 9999
for x in range(self._update.cols): # Check Each Square
for y in range(self._update.rows):
dest = self._update.grid[y][x]
if (dest in path):
distance = self.distance(tile, dest)
if (distance < closest_distance):
closest = dest
closest_distance = distance
return closest
def find_closest_target(self, source):
max_target_army = source.army * 2 + 14
closest = None
closest_distance = 9999
for x in range(self._update.cols): # Check Each Square
for y in range(self._update.rows):
dest = self._update.grid[y][x]
if (dest.tile < generals.map.TILE_EMPTY or dest.tile == self._update.player_index or dest.army > max_target_army): # Non Target Tiles
continue
distance = self.distance(source, dest)
if (dest in self._update.generals): # Generals appear closer
distance = distance * 0.17
elif (dest in self._update.cities): # Cities vary distance based on size, but appear closer
distance = distance * sorted((0.22, (dest.army / (1.4*source.army)), 4))[1]
elif (dest.tile == generals.map.TILE_EMPTY): # Empties appear further away
distance = distance * 3.9
if (dest.army > source.army): # Larger targets appear further away
distance = distance * (1.4*dest.army/source.army)
if (distance < closest_distance and self._validTarget(dest)):
closest = dest
closest_distance = distance
return closest
def find_primary_target(self, target=None):
target_type = OPP_EMPTY - 1
if (target != None and target in self._update.generals):
target_type = OPP_GENERAL
elif (target != None and target in self._update.cities):
target_type = OPP_CITY
elif (target != None and target.army > 0):
target_type = OPP_ARMY
elif (target != None):
target_type = OPP_EMPTY
if (target != None and target.tile == self._update.player_index): # Acquired Target
target = None
target_type = OPP_EMPTY - 1
# Determine Max Target Size
largest = self.find_largest_tile(includeGeneral=True)
max_target_size = largest.army * 1.25
for x in _shuffle(range(self._update.cols)): # Check Each Square
for y in _shuffle(range(self._update.rows)):
source = self._update.grid[y][x]
if (not self._validTarget(source)):
continue
if (target_type <= OPP_GENERAL): # Search for Generals
if (source.tile >= 0 and source.tile != self._update.player_index and source in self._update.generals and source.army < largest.army):
return source
if (target_type <= OPP_CITY): # Search for Smallest Cities
if (source.tile != self._update.player_index and source.army < max_target_size and source in self._update.cities):
if (target_type < OPP_CITY or source.army < target.army):
target = source
target_type = OPP_CITY
newTarget = True
if (target_type <= OPP_ARMY): # Search for Largest Opponent Armies
if (source.tile >= 0 and source.tile != self._update.player_index and (target == None or source.army > target.army) and source not in self._update.cities):
target = source
target_type = OPP_ARMY
if (target_type < OPP_EMPTY): # Search for Empty Squares
if (source.tile == generals.map.TILE_EMPTY and source.army < max_target_size):
return source
return target
######################### Pathfinding #########################
def find_path(self, source=None, dest=None):
# Verify Source and Dest
if (source == None): # No Source, Use General
source = self._update.generals[self._update.player_index]
if (dest == None): # No Dest, Use Primary Target
dest = self.find_primary_target()
# Current Player Largest Army
largest = self.find_largest_tile(includeGeneral=True)
# Determine Path To Destination
frontier = PriorityQueue()
frontier.put(source, largest.army - source.army)
came_from = {}
cost_so_far = {}
came_from[source] = None
cost_so_far[source] = largest.army - source.army
while not frontier.empty():
current = frontier.get()
if current == dest: # Found Destination
break
for next in self._neighbors(current):
# Calculate New Cost
new_cost = next.army
if (next.tile == self._update.player_index):
new_cost = 0 - new_cost
new_cost = cost_so_far[current] + largest.army + new_cost + 1
# Add to frontier
if next not in cost_so_far or (new_cost < cost_so_far[next] and next not in self._path_reconstruct(came_from, current)):
cost_so_far[next] = new_cost
# Calculate Priority
priority = new_cost + (self.distance(next, dest)**2)
if (next.tile != self._update.player_index and (next in self._update.cities or next in self._update.generals)): # Increase Priority of New Cities
priority -= largest.army
cost_so_far[next] -= source.army
frontier.put(next, priority)
came_from[next] = current
# Create Path List
path = self._path_reconstruct(came_from, dest)
return path
def _path_reconstruct(self, came_from, dest):
current = dest
path = [current]
try:
while came_from[current] != None:
current = came_from[current]
path.append(current)
except KeyError:
None
path.reverse()
return path
def _neighbors(self, source):
x = source.x
y = source.y
neighbors = []
for dy, dx in DIRECTIONS:
if (self.validPosition(x+dx, y+dy)):
current = self._update.grid[y+dy][x+dx]
if (current.tile != generals.map.TILE_OBSTACLE or current in self._update.cities or current in self._update.generals):
neighbors.append(current)
return neighbors
######################### Movement Helpers #########################
def path_forward_moves(self, path):
if (len(path) < 2):
return (None, None)
# Find largest tile in path to move forward
largest = path[0]
largest_index = 0
for i, tile in enumerate(path):
if (tile == path[-1]):
break
if (tile.tile == path[0].tile and tile > largest):
largest = tile
largest_index = i
dest = path[largest_index+1]
return (largest, dest)
def toward_dest_moves(self, source, dest=None):
# Determine Destination
if (dest == None):
dest = self.find_primary_target()
# Compute X/Y Directions
dir_y = 1
if source.y > dest.y:
dir_y = -1
dir_x = 1
if source.x > dest.x:
dir_x = -1
# Return List of Moves
moves = random.sample([(0, dir_x), (dir_y, 0)],2)
moves.extend(random.sample([(0, -dir_x), (-dir_y, 0)],2))
return moves
def away_king_moves(self, source):
general = self._update.generals[self._update.player_index]
if (source.y == general.y and source.x == general.x): # Moving from General
return self.moves_random()
dir_y = 1
if source.y < general.y:
dir_y = -1
dir_x = 1
if source.x < general.x:
dir_x = -1
moves = random.sample([(0, dir_x), (dir_y, 0)],2)
moves.extend(random.sample([(0, -dir_x), (-dir_y, 0)],2))
return moves
def moves_random(self):
return random.sample(DIRECTIONS, 4)
def distance(self, source, dest):
return abs(source.x - dest.x) + abs(source.y - dest.y)
def place_move(self, source, dest, move_half=False):
if (self.validPosition(dest.x, dest.y)):
self._game.move(source.y, source.x, dest.y, dest.x, move_half)
return True
return False
def validPosition(self, x, y):
return 0 <= y < self._update.rows and 0 <= x < self._update.cols and self._update._tile_grid[y][x] != generals.map.TILE_MOUNTAIN
def _validTarget(self, target): # Check target to verify reachable
for dy, dx in self.moves_random():
if (self.validPosition(target.x+dx, target.y+dy)):
tile = self._update.grid[target.y+dy][target.x+dx]
if (tile.tile != generals.map.TILE_OBSTACLE or tile in self._update.cities or tile in self._update.generals):
return True
return False
######################### Global Helpers #########################
def _create_thread(f):
t = threading.Thread(target=f)
t.daemon = True
t.start()
def _shuffle(seq):
shuffled = list(seq)
random.shuffle(shuffled)
return iter(shuffled)
|
client.py | import argparse
import json
import requests
from threading import Thread
from server import create_server
class KeyValueClient:
def __init__(self, host, port):
self.url = f"http://{host}:{port}"
self.session = requests.Session()
def get_keys(self, keys):
response = self.session.get(self.url, params={"key": keys})
if response.status_code == 200:
return response.json()
else:
return None
def get_all_keys(self):
response = self.session.get(f"{self.url}/keys")
if response.status_code == 200:
return response.json()
else:
return None
def set_keys(self, data):
response = self.session.post(self.url, data=json.dumps(data))
return response.status_code == 200
def delete_keys(self, keys):
response = self.session.delete(self.url, data=json.dumps({"key": keys}))
return response.status_code == 200
def init_server(host=None, port=None):
if host and port:
server = create_server(host, port)
else:
server = create_server()
thread = Thread(target=server.serve_forever)
thread.start()
host, port = server.server_address
return host, port, server, thread
def main(host=None, port=None, noserver=False):
client = KeyValueClient(host, port)
print(
f"Client for key-value server (http://{host}:{port}):\n"
"g <key, ...> - Get keys\n"
"l - Get all keys\n"
"s <key=value, ...> - Set keys\n"
"d <key, ...> - Delete keys\n"
"e - Exit (also Ctrl-C and Ctrl-D)"
)
while True:
try:
command, whitespace, data = input("\n").partition(" ")
except (KeyboardInterrupt, EOFError):
break
if not whitespace and command not in ("l", "e"):
continue
if command == "g":
keys = [key.strip() for key in data.split(",")]
response = client.get_keys(keys)
print(response or f"Error! No keys: {keys}")
if command == "l":
response = client.get_all_keys()
print(response if response is not None else "Error!")
elif command == "s":
pairs = [pair.strip().partition("=") for pair in data.split(",")]
if not all(equal_char for _, equal_char, _ in pairs):
print("Error! Can't parse data")
continue
data = {key: value for key, _, value in pairs}
response = client.set_keys(data)
print("OK" if response else "Error!")
elif command == "d":
keys = [key.strip() for key in data.split(",")]
response = client.delete_keys(keys)
print("OK" if response else "Error!")
elif command == "e":
break
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--host", help="Host for client")
parser.add_argument("--port", help="Port for client", type=int)
parser.add_argument(
"--noserver",
help="No initialize server (You must set host and port)",
action="store_true",
)
args = parser.parse_args()
server = None
thread = None
host = args.host
port = args.port
if not args.noserver:
host, port, server, thread = init_server(host, port)
elif host is None and port is None:
raise ValueError("You must set host and port if no server (See --help)")
try:
main(host, port)
except requests.exceptions.ConnectionError:
print("Server is offline. Try later")
if server and thread:
server.shutdown()
thread.join()
|
gameserver.py | import board as b
import chatroom
import threading
import time
import sys, traceback
import gameserverstatus as gss
import warcode as wc
# menu options
SINGLE_PLAYER_OPTION = "1"
MULTI_PLAYER_OPTION = "2"
JOIN_PLAYER_OPTION = "3"
QUIT_OPTION = "4"
class TheGameServer:
def __init__(self, max,udp_socket):
self.log("Initializing the game server")
# special structure: GAMES
# this is the main data structure(variable) of the game server
# details inside ServerStatus object
self.games = gss.ServerStatus(max)
self.board = 0
self.server_quitting = False # used to gracefully stop the server
self.threads = [] # all the treads will be enlisted here
self.code = wc.WarCode() # war codes translator
self.lock = threading.Lock() # to lock my threads
# prepare moderator process
traffic_t = threading.Thread(target=self.moderator)
self.add_thread(traffic_t)
# prepare police process
police_t = threading.Thread(target=self.police)
self.add_thread(police_t)
# prepare the chat room
# chat room uses the GAMES structure to direct messages among users
# messages can be: public, per game, per team, private
self.chat_room = chatroom.ChatRoom(self.games,udp_socket)
# add a new thread to the threads list
def add_thread(self,t):
t.daemon = True
t.start()
self.threads.append(t)
# stops the server gracefully joins all threads
def quit(self):
self.log("Closing chat room")
self.chat_room.quit()
self.log("Closing game server")
self.server_quitting = True
for t in self.threads:
t.join()
# Threaded processes
"""
the moderator the background process that will be running while the game_server is active
it will check for the INACTIVE status of every connected player
if it is INACTIVE the player is sent to a new threaded menu
"""
def moderator(self):
self.log("\tMODERATOR: TID = " + str(threading.current_thread()))
while not self.server_quitting:
player = self.games.detect_inactive_player()
if (player):
self.log("MODERATOR: Player "+ player.name + " was taken to the menu")
player.take_to_menu() # The player is not playing but in the menu (voids MODERATOR actions)
menu_t = threading.Thread(target=self.menu, args=(player,))
self.add_thread(menu_t)
self.log("\tMODERATOR: thread finished")
"""
the police is a background process that checks the status of every active game
if the game is in a deadlock it will unlock the game
"""
def police(self):
self.log("\tPOLICE: TID = " + str(threading.current_thread()))
while not self.server_quitting:
#self.games.detect_fix_finished_games(self.code.game_won("You (or your team) won the game"))
self.games.detect_fix_deadLocks()
self.log("\tPOLICE: thread finished")
# log in the system report
def log(self,msg):
print(msg)
#### save to a file
# main menu
def menu(self,player):
self.log("Menu: TID = " + str(threading.current_thread()))
player.send(self.code.main_menu())
self.code.translate(player.receive())
try:
if (self.code.is_single_player_option): # single player
self.single_player_game(player,self.new_game_id())
return
if (self.code.is_multiplayer_option): # multi-player
self.multiplayer_game(player,self.new_game_id())
return
if (self.code.is_join_option): # join a game (sub-menu)
self.join_game(player)
return
if (self.code.is_medal_option): # medals
self.medals(player)
player.deactivate()
return
if (self.code.is_help_option): # help
self.help(player)
player.deactivate()
return
# if the players reaches here, then the player seleted quit
player.send(self.code.acknowledgement("Bye")) # send last messsage
player.quit() # finish player
except Exception as e:
player.send("Server: Please enter a valid option")
"""
regular methods
"""
"""
adds a new player to the list of players
this player will be found later by the moderator thread
and will be guided to the main menu
"""
def add_player(self,player):
self.games.add_player(player)
"""
Update game_id in only one place
used locks to make sure every id is unique
"""
def new_game_id(self):
self.lock.acquire()
g_id = self.games.new_game_id()
self.lock.release()
return str(g_id)
# on id for every board
def new_board_id(self):
self.lock.acquire()
self.board += 1
board_id = str(self.board)
self.lock.release()
return board_id
"""
This is the single player game method that wont be in deadlock as soon as the
user select quit game will be taken back to the main menu
"""
def single_player_game(self,player,this_game_id):
won = False
quit = False
lost = False
player.game = int(this_game_id)
player_game = b.Board(self.new_board_id()) # new game
self.games.add_singleplayer_game(this_game_id,player)
computer_game = b.Board(self.new_board_id(),True) # automatic player
while not self.server_quitting and \
not won and not lost and not quit:
player_tuple = player_game.board_id, player_game.serialize(True)
computer_tuple = computer_game.board_id, computer_game.serialize(True)
player.send(self.code.boards_msg([player_tuple],[computer_tuple]))
msg = player.receive()
self.code.translate(msg)
# quit game
if(self.code.is_quitting_option):
quit = True
continue
if(self.code.is_a_shoot): # this verifies if the CODE is a shoot
_,x,y = self.code.shot() # if it is extracts the x,y
if(computer_game.valid(x,y)): # verify range of the coordinates
won = computer_game.shoot(x,y) # player's turn
if(not won):
x,y = computer_game.generate_coordinates() # computer's turn
lost = player_game.shoot(x,y)
else:
player.send(self.code.invalid_shot("Enter a valid shot"))
player.send(self.get_result(player,won,lost,quit)) # prepares and send message to the player
self.games.set_deadlock(this_game_id) # POLICE process is in cherge from now on
# prepares message to be sent to the player after a game is finished
def get_result(self,player,won,lost,quit):
if(self.server_quitting):
msg = self.code.server_down("Server is down")
elif(won):
msg = self.code.game_won("Congratulations, you won the game")
player.won_game()
elif(lost):
msg = self.code.game_lost("You lost, try next time")
player.lost_game()
else:
msg = self.code.game_quitting("I knew you gonna quit!!")
player.quit_game()
return msg
"""
This the multi-player game.
Every NEW multi-player game will be running in a separated thread
this method will wait for new players to REGISTER to this game
these new player must have choose the join option in the main menu
when the list of players is full , the game is started
if the player(the creator of the game) decides to abort
the game is marked as a DEADLOCK then
the main loop of this method wont start and
after the main loop: all the REGISTERED players to this game are marked as INACTIVE
once the team is formed
"""
def multiplayer_game(self,player,this_game_id):
player.send(self.code.players_count())
self.code.translate(player.receive())
players_count = self.code.players
player.send(self.code.teams())
msg = player.receive()
self.code.translate(msg)
team = self.code.team
self.games.add_multiplayer_game(this_game_id,player,players_count,team) # update server status
wait = self.wait_for_players(this_game_id,player)
if(wait):
players = self.games.player_game_players(player) # get every player in this game
for p in players:
p.board = b.Board(self.new_board_id()) # is assigned a board for each player
p.send(self.code.serve_board(p.board.serialize(True))) # every player is served
p.receive() # this answer is an ACK automatic generated by the client
while not self.server_quitting and not self.games.is_in_deadlock(this_game_id) \
and not self.games.is_finished(this_game_id):
friends = self.games.get_friends(this_game_id,player)
enemies = self.games.get_enemies(this_game_id,player)
# get all the boards in all versions: friendly and not friendly for enemies and friends
friendly = True
f_for_f = [(f.board.board_id,f.board.serialize(friendly)) for f in friends] # friends boards to be sent to the friends
f_for_e = [(f.board.board_id,f.board.serialize(not friendly)) for f in friends] # friends boards to be sent to enemies
e_for_f = [(e.board.board_id,e.board.serialize(not friendly)) for e in enemies] # enemies boards to be sent to friends
e_for_e = [(e.board.board_id,e.board.serialize(friendly)) for e in enemies] # enemies boards to be sent to enemies
self.inform_teams(friends,f_for_f,e_for_f,player) # send boards to the team mates
self.inform_teams(enemies,e_for_e,f_for_e) # send boards to enemies
valid_shot = False
while not valid_shot:
self.code.translate(player.receive())
valid_shot = self.code.is_a_shoot
if (valid_shot):
board_index,x,y = self.code.shot() # retrieve the board and shot coordinates
enemy = next((e for e in enemies if e.board.board_id == str(board_index)), None)
if(enemy.board.valid(x,y)): # if it is a valid shoot
this_player_lost = enemy.board.shoot(x,y) # shoot the board
self.lock.acquire()
if (this_player_lost):
enemy.send(self.code.game_lost("Game lost, good luck the next one"))# last communication with the player
enemy.lost_game()
self.games.remove_player(this_game_id,enemy) # remove the player
if (self.games.is_finished(this_game_id)):
self.games.inform_players(this_game_id,self.code.game_won("You or your team won the game"))
self.games.set_deadlock(this_game_id)
self.lock.release()
return
self.lock.release()
self.games.next_player(this_game_id) # move to next player
player = self.games.get_player_in_turn(this_game_id) # get the player
else:
self.log("Not valid shot")
else:
player.send(self.code.invalid_shot("Enter a valid shot"))
# informs to every single player about the game boards
# * player : the player to be informed
# * boards_from_friends : boards from team mates
# * boards_from_enemies : boards from enemies
def inform_teams(self,players,boards_from_friends,boards_from_enemies,current_player=None):
for player in players:
if(player != current_player):
player.send(self.code.boards_msg(boards_from_friends,boards_from_enemies))
player.receive() # acknowledgement received
else:
player.send(self.code.in_turn(self.code.boards_msg(boards_from_friends,boards_from_enemies)))
# this is the lobby where the players wait for others to connect, max wait one minute
def wait_for_players(self,this_game_id,player):
while self.games.is_game_open(this_game_id): # repeat while game is open
if (self.games.waiting_too_long(this_game_id,time.time())): # if waiting too much (> 1 munite)
players = self.games.player_game_players(player) # all the player in this game
for p in players:
p.send(self.code.waiting_termination()) # inform all that the waiting is over
p.receive()
self.games.set_deadlock(this_game_id) # send the POLICE to fix it
return False
return True
# join an existing game
def join_game(self,player):
available_games = self.games.open_games()
if (len(available_games)== 0): # sorry not available games
player.send(self.code.no_open_games()) # msg = "no game"
player.deactivate() # send back to the main menu (MODERATOR)
return
player.send(self.code.open_games(available_games)) # there are available games
self.code.translate(player.receive())
if (not self.code.is_quitting_option):
game_selected = self.code.game_to_join
player.send(self.code.teams())
msg = player.receive()
self.code.translate(msg)
player.set_initial_time()
self.games.update_game_player(game_selected, player,self.code.team)
return # wait for the game to start
player.deactivate()
# medals option
def medals(self,player):
player.send(self.code.medals(player.games_won ,
player.games_lost,
player.games_quit))
# help
def help(self,player):
player.send(self.code.acknowledgement())
|
runtime.py | from concurrent.futures import ThreadPoolExecutor
from functools import lru_cache, partial, wraps
import inspect
import threading
import uuid
import sublime
import sublime_plugin
MYPY = False
if MYPY:
from typing import Any, Callable, Dict, Iterator, Literal, Optional, Tuple, TypeVar
T = TypeVar('T')
F = TypeVar('F', bound=Callable[..., Any])
Callback = Tuple[Callable, Tuple[Any, ...], Dict[str, Any]]
ReturnValue = Any
savvy_executor = ThreadPoolExecutor(max_workers=1)
# `enqueue_on_*` functions emphasize that we run two queues and
# just put tasks on it. In contrast to `set_timeout_*` which
# emphasizes that we delay or defer something. (In particular
# `set_timeout_async` is somewhat a misnomer because both calls
# return immediately.)
# Both functions have the standard python callable interface
# `(f, *a, *kw)`, which is used in e.g. `partial` or
# `executor.submit`. This has the advantage that we can swap
# the functions to change the behavior without changing the
# arguments.
def enqueue_on_ui(fn, *args, **kwargs):
# type: (Callable, Any, Any) -> None
sublime.set_timeout(partial(fn, *args, **kwargs))
def enqueue_on_worker(fn, *args, **kwargs):
# type: (Callable, Any, Any) -> None
sublime.set_timeout_async(partial(fn, *args, **kwargs))
def enqueue_on_savvy(fn, *args, **kwargs):
# type: (Callable, Any, Any) -> None
savvy_executor.submit(fn, *args, **kwargs)
def run_on_new_thread(fn, *args, **kwargs):
# type: (Callable, Any, Any) -> None
threading.Thread(target=fn, args=args, kwargs=kwargs).start()
def on_new_thread(fn):
@wraps(fn)
def wrapped(*a, **kw):
run_on_new_thread(fn, *a, **kw)
return wrapped
def run_or_timeout(fn, timeout):
cond = threading.Condition()
result = None
exc = None
def program():
nonlocal cond, exc, result
try:
result = fn()
except Exception as e:
exc = e
finally:
with cond:
cond.notify_all()
with cond:
run_on_new_thread(program)
if not cond.wait(timeout):
raise TimeoutError()
if exc:
raise exc
else:
return result
lock = threading.Lock()
COMMANDS = {} # type: Dict[str, Callback]
RESULTS = {} # type: Dict[str, ReturnValue]
def run_as_text_command(fn, view, *args, **kwargs):
# type: (Callable[..., T], sublime.View, Any, Any) -> Optional[T]
token = uuid.uuid4().hex
with lock:
COMMANDS[token] = (fn, (view, ) + args, kwargs)
view.run_command('gs_generic_text_cmd', {'token': token})
with lock:
# If the view has been closed, Sublime will not run
# text commands on it anymore (but also not throw).
# For now, we stay close, don't raise and just return
# `None`.
rv = RESULTS.pop(token, None)
return rv
def text_command(fn):
# type: (F) -> F
@wraps(fn)
def decorated(view, *args, **kwargs):
# type: (sublime.View, Any, Any) -> Optional[T]
return run_as_text_command(fn, view, *args, **kwargs)
return decorated # type: ignore[return-value]
@lru_cache()
def wants_edit_object(fn):
sig = inspect.signature(fn)
return 'edit' in sig.parameters
class gs_generic_text_cmd(sublime_plugin.TextCommand):
def run_(self, edit_token, cmd_args):
cmd_args = self.filter_args(cmd_args)
token = cmd_args['token']
with lock:
# Any user can "redo" text commands, but we don't want that.
try:
fn, args, kwargs = COMMANDS.pop(token)
except KeyError:
return
edit = self.view.begin_edit(edit_token, self.name(), cmd_args)
try:
if wants_edit_object(fn):
return self.run(token, fn, args[0], edit, *args[1:], **kwargs)
else:
return self.run(token, fn, *args, **kwargs)
finally:
self.view.end_edit(edit)
def run(self, token, fn, *args, **kwargs):
rv = fn(*args, **kwargs)
with lock:
RESULTS[token] = rv
THROTTLED_CACHE = {}
THROTTLED_LOCK = threading.Lock()
def throttled(fn, *args, **kwargs):
# type: (...) -> Callable[[], None]
token = (fn,)
action = partial(fn, *args, **kwargs)
with THROTTLED_LOCK:
THROTTLED_CACHE[token] = action
def task():
with THROTTLED_LOCK:
ok = THROTTLED_CACHE[token] == action
if ok:
action()
return task
AWAIT_UI_THREAD = 'AWAIT_UI_THREAD' # type: Literal["AWAIT_UI_THREAD"]
AWAIT_WORKER = 'AWAIT_WORKER' # type: Literal["AWAIT_WORKER"]
if MYPY:
HopperR = Iterator[Literal["AWAIT_UI_THREAD", "AWAIT_WORKER"]]
HopperFn = Callable[..., HopperR]
def cooperative_thread_hopper(fn):
# type: (HopperFn) -> Callable[..., None]
"""Mark given function as cooperative.
`fn` must return `HopperR` t.i. it must yield AWAIT_UI_THREAD
or AWAIT_UI_THREAD at some point.
When calling `fn` it will run on the same thread as the caller
until the function yields. It then schedules a task on the
desired thread which will continue execution the function.
It is thus cooperative in the sense that all other tasks
already queued will get a chance to run before we continue.
It is "async" in the sense that the function does not run
from start to end in a blocking manner but can be suspended.
However, it is sync till the first yield (but you could of
course yield on the first line!), only then execution returns
to the call site.
Be aware that, if the call site and the thread you request are
_not_ the same, you can get concurrent execution afterwards!
"""
def tick(gen, send_value=None):
try:
rv = gen.send(send_value)
except StopIteration:
return
except Exception as ex:
raise ex from None
if rv == AWAIT_UI_THREAD:
enqueue_on_ui(tick, gen)
elif rv == AWAIT_WORKER:
enqueue_on_worker(tick, gen)
def decorated(*args, **kwargs):
gen = fn(*args, **kwargs)
if inspect.isgenerator(gen):
tick(gen)
return decorated
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.